summaryrefslogtreecommitdiff
path: root/core/src/test
diff options
context:
space:
mode:
authorSimon Willnauer <simonw@apache.org>2015-06-05 13:12:03 +0200
committerSimon Willnauer <simonw@apache.org>2015-06-05 13:12:03 +0200
commit15a62448343fd24f8e63f43b1e4b16f50005e4a5 (patch)
tree7d04660f3f7aef0d679da3e6185af9cf378bf1d0 /core/src/test
parent7ccc193a666e2ae888e7ac93d677a2143e5e07c3 (diff)
create core module
Diffstat (limited to 'core/src/test')
-rw-r--r--core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java68
-rw-r--r--core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java62
-rw-r--r--core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTest.java240
-rw-r--r--core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java108
-rw-r--r--core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java152
-rw-r--r--core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java186
-rw-r--r--core/src/test/java/org/apache/lucene/util/SloppyMathTests.java92
-rw-r--r--core/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java236
-rw-r--r--core/src/test/java/org/elasticsearch/NamingConventionTests.java174
-rw-r--r--core/src/test/java/org/elasticsearch/VersionTests.java195
-rw-r--r--core/src/test/java/org/elasticsearch/action/IndicesRequestTests.java902
-rw-r--r--core/src/test/java/org/elasticsearch/action/ListenerActionTests.java75
-rw-r--r--core/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/action/RejectionActionTests.java111
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java175
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java115
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java191
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTest.java77
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java171
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java64
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTest.java112
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java191
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java43
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java330
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java78
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java65
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java89
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java62
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java102
-rw-r--r--core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java37
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java48
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java53
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java364
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java188
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/bulk-log.json24
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json5
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk10.json15
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json5
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json5
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json7
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json5
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk6.json6
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk7.json6
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk8.json6
-rw-r--r--core/src/test/java/org/elasticsearch/action/bulk/simple-bulk9.json4
-rw-r--r--core/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java132
-rw-r--r--core/src/test/java/org/elasticsearch/action/count/CountRequestTests.java110
-rw-r--r--core/src/test/java/org/elasticsearch/action/count/CountResponseTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java101
-rw-r--r--core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTest.java87
-rw-r--r--core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java47
-rw-r--r--core/src/test/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptRequestTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java196
-rw-r--r--core/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json16
-rw-r--r--core/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json6
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java117
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java133
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json16
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json10
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json8
-rw-r--r--core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json6
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java76
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java456
-rw-r--r--core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java845
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java410
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java271
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java1360
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java205
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java328
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/multiRequest1.json13
-rw-r--r--core/src/test/java/org/elasticsearch/action/termvectors/multiRequest2.json26
-rw-r--r--core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java201
-rw-r--r--core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java1082
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java136
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java63
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/breaker/CircuitBreakerBenchmark.java189
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java85
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java88
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java97
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java123
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java66
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java72
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java55
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java262
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/mapping/ManyMappingsBenchmark.java154
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java158
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java199
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript1.java45
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript2.java45
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript3.java45
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript4.java45
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScriptPlugin.java43
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java166
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java334
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java101
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java136
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java83
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java45
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java54
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java49
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java74
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java72
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java72
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java166
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/CardinalityAggregationSearchBenchmark.java161
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/GlobalOrdinalsBenchmark.java245
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java224
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/IncludeExcludeAggregationSearchBenchmark.java130
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/PercentilesAggregationSearchBenchmark.java209
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java146
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/SubAggregationSearchCollectModeBenchmark.java316
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchAndIndexingBenchmark.java353
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java402
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java263
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java217
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java347
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java209
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/child/ParentChildIndexGenerator.java120
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java201
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java192
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/search/scroll/ScrollSearchBenchmark.java157
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java282
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java123
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java108
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java70
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java59
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java72
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java149
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java181
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java157
-rw-r--r--core/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java65
-rw-r--r--core/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java166
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java96
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/JNANativesTests.java47
-rw-r--r--core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java182
-rw-r--r--core/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java85
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java133
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java720
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityTests.java113
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java91
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java447
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java202
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityTest.java55
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java82
-rw-r--r--core/src/test/java/org/elasticsearch/bwcompat/UnicastBackwardsCompatibilityTest.java56
-rw-r--r--core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java241
-rw-r--r--core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/client/node/NodeClientTests.java49
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java194
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java141
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java176
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java97
-rw-r--r--core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java237
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java76
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceTests.java266
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java1049
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java624
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java533
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java81
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java334
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java290
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java211
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java76
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java141
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java69
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java158
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/ack/AckTests.java409
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java249
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java321
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java151
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java85
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java69
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java360
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java895
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java309
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java118
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java74
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java80
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java246
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java436
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java69
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java392
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java829
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java474
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTest.java100
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestBase.java189
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java633
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java155
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java247
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java146
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java114
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java166
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java581
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java169
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java539
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java343
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java124
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java144
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java94
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java217
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java152
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java106
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java416
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java36
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java91
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java188
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java414
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java170
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java174
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java178
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java923
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java97
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIntegrationTest.java77
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java334
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java202
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java44
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java92
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java127
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java222
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringTests.java114
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java162
-rw-r--r--core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java327
-rw-r--r--core/src/test/java/org/elasticsearch/codecs/CodecTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/common/Base64Test.java59
-rw-r--r--core/src/test/java/org/elasticsearch/common/BooleansTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/common/ChannelsTests.java291
-rw-r--r--core/src/test/java/org/elasticsearch/common/ParseFieldTests.java101
-rw-r--r--core/src/test/java/org/elasticsearch/common/PidFileTests.java80
-rw-r--r--core/src/test/java/org/elasticsearch/common/StringsTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/common/TableTests.java152
-rw-r--r--core/src/test/java/org/elasticsearch/common/UUIDTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTest.java148
-rw-r--r--core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java247
-rw-r--r--core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java181
-rw-r--r--core/src/test/java/org/elasticsearch/common/bytes/BytesReferenceTests.java61
-rw-r--r--core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java582
-rw-r--r--core/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java336
-rw-r--r--core/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java152
-rw-r--r--core/src/test/java/org/elasticsearch/common/cli/CliToolTests.java382
-rw-r--r--core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java60
-rw-r--r--core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java152
-rw-r--r--core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java127
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTests.java435
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTests.java120
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java143
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java70
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java63
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java34
-rw-r--r--core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java30
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java1003
-rw-r--r--core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java607
-rw-r--r--core/src/test/java/org/elasticsearch/common/hashing/MurmurHash3Tests.java55
-rw-r--r--core/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java176
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/StreamsTests.java97
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/rootdir.properties1
-rw-r--r--core/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java355
-rw-r--r--core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java258
-rw-r--r--core/src/test/java/org/elasticsearch/common/logging/jdk/JDKESLoggerTests.java119
-rw-r--r--core/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java173
-rw-r--r--core/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java165
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java148
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java325
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java137
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java342
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/index/ElasticsearchDirectoryReaderTests.java76
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java232
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java66
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java77
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java74
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java109
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java266
-rw-r--r--core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java283
-rw-r--r--core/src/test/java/org/elasticsearch/common/math/MathUtilsTests.java39
-rw-r--r--core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java164
-rw-r--r--core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTest.java167
-rw-r--r--core/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java29
-rw-r--r--core/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java29
-rw-r--r--core/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java39
-rw-r--r--core/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java29
-rw-r--r--core/src/test/java/org/elasticsearch/common/regex/RegexTests.java71
-rw-r--r--core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java109
-rw-r--r--core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java316
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java128
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java378
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java24
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java24
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json10
-rw-r--r--core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml8
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java83
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java148
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java65
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java200
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/RatioValueTests.java69
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java69
-rw-r--r--core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java153
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java93
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java383
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java109
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java254
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTest.java141
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java132
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/LongHashTests.java210
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/MultiDataPathUpgraderTests.java290
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/SingleObjectCacheTests.java97
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java105
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java239
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java331
-rw-r--r--core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTest.java153
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java106
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java129
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java263
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentParserTests.java46
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java100
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java105
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java456
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTests.java524
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborFilteringGeneratorTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGeneratorBenchmark.java99
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonFilteringGeneratorTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java102
-rw-r--r--core/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java224
-rw-r--r--core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java83
-rw-r--r--core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java290
-rw-r--r--core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java264
-rw-r--r--core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java147
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java115
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java1083
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java218
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTest.java105
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java218
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java143
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java183
-rw-r--r--core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java139
-rw-r--r--core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java47
-rw-r--r--core/src/test/java/org/elasticsearch/document/BulkTests.java1002
-rw-r--r--core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java253
-rw-r--r--core/src/test/java/org/elasticsearch/document/ShardInfoTests.java153
-rw-r--r--core/src/test/java/org/elasticsearch/env/EnvironmentTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java384
-rw-r--r--core/src/test/java/org/elasticsearch/exists/SimpleExistsTests.java117
-rw-r--r--core/src/test/java/org/elasticsearch/explain/ExplainActionTests.java305
-rw-r--r--core/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java216
-rw-r--r--core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java194
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java323
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java125
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java385
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java249
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java562
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java358
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java120
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/QuorumGatewayTests.java181
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java163
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityTests.java123
-rw-r--r--core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java485
-rw-r--r--core/src/test/java/org/elasticsearch/get/GetActionTests.java1348
-rw-r--r--core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java151
-rw-r--r--core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java223
-rw-r--r--core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningDisabledIntegrationTest.java77
-rw-r--r--core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningEnabledIntegrationTest.java75
-rw-r--r--core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTest.java215
-rw-r--r--core/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java61
-rw-r--r--core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java726
-rw-r--r--core/src/test/java/org/elasticsearch/index/TransportIndexFailuresTest.java161
-rw-r--r--core/src/test/java/org/elasticsearch/index/VersionTypeTests.java222
-rw-r--r--core/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java122
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java62
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java205
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java274
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java49
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java66
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java71
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java80
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java96
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java127
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java63
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java115
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java255
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java63
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTest.java154
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java78
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java46
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java171
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java48
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java49
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java87
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/SnowballAnalyzerTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java111
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java116
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java178
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json37
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java241
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt2
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json29
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json31
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java41
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json46
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json23
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/stop.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java110
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json72
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt3
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt3
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/test1.json84
-rw-r--r--core/src/test/java/org/elasticsearch/index/analysis/test1.yml62
-rw-r--r--core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java105
-rw-r--r--core/src/test/java/org/elasticsearch/index/codec/CodecTests.java101
-rw-r--r--core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java78
-rw-r--r--core/src/test/java/org/elasticsearch/index/codec/postingformat/ElasticsearchPostingsFormatTest.java44
-rw-r--r--core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java60
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java52
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java73
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/InternalEngineIntegrationTest.java91
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeTests.java92
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java142
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java1897
-rw-r--r--core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java978
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java278
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java131
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java512
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java630
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java120
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java115
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java200
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java639
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java89
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingTests.java70
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/FieldDataTests.java107
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java184
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java199
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java170
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java431
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java92
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java271
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesTests.java111
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/SortedSetDVStringFieldDataTests.java41
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java294
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java120
-rw-r--r--core/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java77
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java123
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java363
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java195
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java177
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/UidTests.java44
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java467
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/mapping.json56
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json56
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/mapping_offsets_on_all.json56
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json55
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json11
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_type_in_root.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_default.json21
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_include_in_all_set_to_false.json23
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json54
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json55
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/test1.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/type_date_detection_mapping.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/all/type_numeric_detection_mapping.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java127
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java67
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java176
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java58
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java113
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java95
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java359
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java112
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java130
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java222
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java93
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java456
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json4
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java81
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java183
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json33
-rwxr-xr-xcore/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalIndexModule.java33
-rwxr-xr-xcore/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java242
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapperPlugin.java52
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java95
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationTests.java123
-rwxr-xr-xcore/src/test/java/org/elasticsearch/index/mapper/externalvalues/RegisterExternalTypes.java43
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java182
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java48
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java80
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java504
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java384
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java136
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java117
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperIntegrationTests.java71
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java143
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java186
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java126
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java98
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java124
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java220
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java513
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java274
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java195
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json4
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json11
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json27
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json32
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json25
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json32
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json55
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json50
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java351
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/null_value/NullValueTests.java65
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java522
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java162
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java84
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json28
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java121
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java138
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json84
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json39
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json40
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json42
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/simple/test1.json39
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIntegrationTests.java86
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java118
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java101
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java305
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java522
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java792
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java352
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java241
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java48
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterTests.java242
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java208
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_create_index.json31
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json1
-rw-r--r--core/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java326
-rw-r--r--core/src/test/java/org/elasticsearch/index/merge/policy/VersionFieldUpgraderTest.java144
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java36
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeFormatTests.java167
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeTimezoneTests.java128
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java2535
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/TemplateQueryBuilderTest.java66
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java133
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java766
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java37
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/and-filter-cache.json21
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/and-filter-named.json26
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/and-filter.json25
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/and-filter2.json23
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/bool-filter.json35
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/bool.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/boosting-query.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/child-mapping.json12
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json11
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json11
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/constantScore-query.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/data.json43
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_filter_format.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_filter_format_invalid.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone_numeric_field.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_exclusive.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_inclusive.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_query_format.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_query_format_invalid.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone_numeric_field.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/disMax.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/disMax2.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/field3.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/filtered-query.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/filtered-query2.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/filtered-query3.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/filtered-query4.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/fquery-filter.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/fquery-with-empty-bool-query.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/function-score-query-causing-NPE.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json10
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json9
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/fuzzy.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geoShape-filter.json21
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geoShape-query.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json21
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance-named.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance1.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance10.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance11.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance12.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance2.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance3.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance4.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance5.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance6.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance7.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance8.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_distance9.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon1.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon2.json27
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon3.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon4.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_1.json20
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_2.json22
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_3.json12
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_4.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_5.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java64
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/has-child.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/mapping.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/match-with-fuzzy-transpositions.json1
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/match-without-fuzzy-transpositions.json1
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/matchAll.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/match_all_empty1.json3
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/match_all_empty2.json3
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/mlt-items.json22
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/mlt.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/not-filter.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/not-filter2.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/not-filter3.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/or-filter.json25
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/or-filter2.json23
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java99
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java104
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java64
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/prefix-boost.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/prefix-filter.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/prefix.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-fields-match.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-fields1.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-fields2.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-fields3.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-filter.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-regexp-max-determinized-states.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-regexp-too-many-determinized-states.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-timezone-incorrect.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query-timezone.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/query2.json6
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/range-filter-named.json20
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/range-filter.json19
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/range.json10
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/range2.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-boost.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json20
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json18
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-filter-max-determinized-states.json17
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-filter.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp-max-determinized-states.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/regexp.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/simple-query-string.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json13
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json12
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json16
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json7
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanContaining.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json29
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanFirst.json10
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanNear.json24
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanNot.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanOr.json21
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanOr2.json30
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanTerm.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/spanWithin.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/starColonStar.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/term-array-invalid.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/term-filter-named.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/term-filter.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/term-with-boost.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/term.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/terms-filter-named.json15
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/terms-filter.json14
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/terms-query.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/wildcard-boost.json8
-rw-r--r--core/src/test/java/org/elasticsearch/index/query/wildcard.json5
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java146
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java50
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java297
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java422
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/MockScorer.java102
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java248
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java265
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java68
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java153
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java175
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java627
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java355
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java92
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java91
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java353
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/IndexShardModuleTests.java54
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java379
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java81
-rw-r--r--core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java64
-rw-r--r--core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTest.java70
-rw-r--r--core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTest.java133
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java748
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogTests.java172
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTest.java83
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java139
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java85
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/LegacyVerificationTests.java124
-rw-r--r--core/src/test/java/org/elasticsearch/index/store/StoreTest.java1270
-rw-r--r--core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java171
-rw-r--r--core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java43
-rw-r--r--core/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java131
-rw-r--r--core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java1246
-rw-r--r--core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java168
-rw-r--r--core/src/test/java/org/elasticsearch/indexing/IndexActionTests.java218
-rw-r--r--core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java229
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesCustomDataPathTests.java145
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java271
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java915
-rw-r--r--core/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java169
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java47
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java55
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java36
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java40
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java36
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java42
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java30
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java33
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java35
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java205
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java280
-rw-r--r--core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java123
-rw-r--r--core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java77
-rw-r--r--core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java80
-rw-r--r--core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java102
-rw-r--r--core/src/test/java/org/elasticsearch/indices/flush/FlushTest.java226
-rw-r--r--core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java223
-rw-r--r--core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java136
-rw-r--r--core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java113
-rw-r--r--core/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java131
-rw-r--r--core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java117
-rw-r--r--core/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java44
-rw-r--r--core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java207
-rw-r--r--core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java175
-rw-r--r--core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java341
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java152
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java350
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerTests.java274
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java628
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java528
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java77
-rw-r--r--core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTest.java81
-rw-r--r--core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java65
-rw-r--r--core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java285
-rw-r--r--core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java425
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java392
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java369
-rw-r--r--core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java161
-rw-r--r--core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java1049
-rw-r--r--core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java302
-rw-r--r--core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java196
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java66
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringTests.java98
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java675
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/template0.json11
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/template1.json11
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/template2.json13
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/template3.json13
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/template4.json13
-rw-r--r--core/src/test/java/org/elasticsearch/indices/template/template5.json15
-rw-r--r--core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerTests.java147
-rw-r--r--core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java158
-rw-r--r--core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java380
-rw-r--r--core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java178
-rw-r--r--core/src/test/java/org/elasticsearch/monitor/SigarTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java1415
-rw-r--r--core/src/test/java/org/elasticsearch/network/DirectBufferNetworkTests.java145
-rw-r--r--core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java154
-rw-r--r--core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java159
-rw-r--r--core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java40
-rw-r--r--core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java40
-rw-r--r--core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java115
-rw-r--r--core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledTest.java66
-rw-r--r--core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledTest.java64
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java386
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java387
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java74
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java267
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java2040
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java419
-rw-r--r--core/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java220
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleTests.java104
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluginLuceneCheckerTests.java85
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java526
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java58
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluginServiceTests.java123
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java58
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginTests.java61
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/SitePluginRelativePathConfigTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/SitePluginTests.java134
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java35
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties19
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/lucene/current/CurrentLucenePlugin.java40
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/lucene/current/es-plugin-test.properties21
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/lucene/newer/NewerLucenePlugin.java40
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/lucene/newer/es-plugin-test.properties21
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/lucene/old/OldLucenePlugin.java40
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/lucene/old/es-plugin-test.properties21
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java40
-rw-r--r--core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java46
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java131
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java181
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTest.java141
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java332
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/RelocationTests.java550
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java113
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java33
-rw-r--r--core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryTests.java151
-rw-r--r--core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java183
-rw-r--r--core/src/test/java/org/elasticsearch/rest/CorsRegexDefaultTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/rest/CorsRegexTests.java107
-rw-r--r--core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java426
-rw-r--r--core/src/test/java/org/elasticsearch/rest/NoOpClient.java54
-rw-r--r--core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java271
-rw-r--r--core/src/test/java/org/elasticsearch/rest/RestRequestTests.java109
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java58
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java268
-rw-r--r--core/src/test/java/org/elasticsearch/rest/action/support/RestTableTest.java82
-rw-r--r--core/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java158
-rw-r--r--core/src/test/java/org/elasticsearch/river/RiverTests.java168
-rw-r--r--core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java144
-rw-r--r--core/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java356
-rw-r--r--core/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java464
-rw-r--r--core/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java141
-rw-r--r--core/src/test/java/org/elasticsearch/script/GroovyScriptTests.java146
-rw-r--r--core/src/test/java/org/elasticsearch/script/GroovySecurityTests.java129
-rw-r--r--core/src/test/java/org/elasticsearch/script/IndexLookupTests.java1134
-rw-r--r--core/src/test/java/org/elasticsearch/script/IndexedScriptTests.java236
-rw-r--r--core/src/test/java/org/elasticsearch/script/NativeScriptTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptFieldTests.java184
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java89
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptModesTests.java319
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptParameterParserTest.java1269
-rw-r--r--core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java445
-rw-r--r--core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java392
-rw-r--r--core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java171
-rw-r--r--core/src/test/java/org/elasticsearch/script/mustache/MustacheTest.java56
-rw-r--r--core/src/test/java/org/elasticsearch/search/CountSearchTests.java65
-rw-r--r--core/src/test/java/org/elasticsearch/search/MultiValueModeTests.java736
-rw-r--r--core/src/test/java/org/elasticsearch/search/SearchServiceTests.java73
-rw-r--r--core/src/test/java/org/elasticsearch/search/SearchWithRejectionsTests.java90
-rw-r--r--core/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java65
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java70
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/AggregationsBinaryTests.java142
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java87
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java143
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java444
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/MetaDataTests.java117
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/MissingValueTests.java195
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTests.java82
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsTests.java170
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java399
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetTests.java176
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java1691
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java1445
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/DedicatedAggregationTests.java55
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java1593
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java188
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java275
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java492
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java322
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java125
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java1272
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java1228
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java1527
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java390
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java212
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java557
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggTests.java60
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java1495
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedTests.java643
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java299
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java324
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java423
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTests.java119
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsBackwardCompatibilityTests.java119
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java747
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java441
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java1905
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorTests.java948
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountTests.java162
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java1048
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/agg-filter-with-empty-bool.json33
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java148
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java24
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java47
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java50
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java55
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java413
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java107
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java323
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java473
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java557
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java440
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java325
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java340
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java455
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java438
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java1412
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java449
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java326
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java203
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java137
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java24
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/package-info.java24
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java401
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeTests.java386
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java605
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java485
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java433
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java131
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java380
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java1445
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java586
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/support/MissingValuesTests.java297
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java108
-rw-r--r--core/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java163
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java107
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java160
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java375
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java128
-rw-r--r--core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java464
-rw-r--r--core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTest.java86
-rw-r--r--core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java225
-rw-r--r--core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java2056
-rw-r--r--core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java272
-rw-r--r--core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingTest.java179
-rw-r--r--core/src/test/java/org/elasticsearch/search/child/bool-query-with-empty-clauses.json19
-rw-r--r--core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java102
-rw-r--r--core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java102
-rw-r--r--core/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java828
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java969
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptPlugin.java41
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java120
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java121
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java141
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java166
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java801
-rw-r--r--core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java406
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java340
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java796
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java636
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java104
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java488
-rw-r--r--core/src/test/java/org/elasticsearch/search/geo/gzippedmap.gzbin0 -> 7734 bytes
-rw-r--r--core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java81
-rw-r--r--core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java39
-rw-r--r--core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java123
-rw-r--r--core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java2609
-rw-r--r--core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java113
-rw-r--r--core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java1217
-rw-r--r--core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java267
-rw-r--r--core/src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java159
-rw-r--r--core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java591
-rw-r--r--core/src/test/java/org/elasticsearch/search/morelikethis/items.json25
-rw-r--r--core/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java120
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/ExistsMissingTests.java193
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java618
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java2576
-rw-r--r--core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringTests.java344
-rw-r--r--core/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java772
-rw-r--r--core/src/test/java/org/elasticsearch/search/scan/ScanContextTests.java116
-rw-r--r--core/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java106
-rw-r--r--core/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java87
-rw-r--r--core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java173
-rw-r--r--core/src/test/java/org/elasticsearch/search/scroll/DuelScrollTests.java229
-rw-r--r--core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java579
-rw-r--r--core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesTests.java113
-rw-r--r--core/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java240
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java2529
-rw-r--r--core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java149
-rw-r--r--core/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java103
-rw-r--r--core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java195
-rw-r--r--core/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java66
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java1170
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java197
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java1057
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java85
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java42
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java85
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java1285
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java334
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java545
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/context/GeoLocationContextMappingTest.java199
-rw-r--r--core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java401
-rw-r--r--core/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java53
-rw-r--r--core/src/test/java/org/elasticsearch/similarity/SimilarityTests.java79
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java256
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java1118
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java254
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java1897
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java250
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java59
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java85
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java57
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java321
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java42
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryPlugin.java71
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java89
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java222
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java70
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java96
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java106
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java71
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java118
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java51
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java35
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java76
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java98
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java126
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java96
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java124
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java354
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java111
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java237
-rw-r--r--core/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java378
-rw-r--r--core/src/test/java/org/elasticsearch/test/BackgroundIndexer.java286
-rw-r--r--core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java300
-rw-r--r--core/src/test/java/org/elasticsearch/test/DummyShardLock.java37
-rw-r--r--core/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java120
-rw-r--r--core/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java287
-rw-r--r--core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java2079
-rw-r--r--core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java258
-rw-r--r--core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java598
-rw-r--r--core/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java57
-rw-r--r--core/src/test/java/org/elasticsearch/test/ExternalNode.java250
-rw-r--r--core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java160
-rw-r--r--core/src/test/java/org/elasticsearch/test/InternalTestCluster.java1800
-rw-r--r--core/src/test/java/org/elasticsearch/test/MockLogAppender.java139
-rw-r--r--core/src/test/java/org/elasticsearch/test/SettingsSource.java44
-rw-r--r--core/src/test/java/org/elasticsearch/test/TestCluster.java214
-rw-r--r--core/src/test/java/org/elasticsearch/test/TestSearchContext.java663
-rw-r--r--core/src/test/java/org/elasticsearch/test/VersionUtils.java107
-rw-r--r--core/src/test/java/org/elasticsearch/test/XContentTestUtils.java104
-rw-r--r--core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java567
-rw-r--r--core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArraysModule.java32
-rw-r--r--core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java151
-rw-r--r--core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java32
-rw-r--r--core/src/test/java/org/elasticsearch/test/client/RandomizingClient.java69
-rw-r--r--core/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java167
-rw-r--r--core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java248
-rw-r--r--core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java177
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java96
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java129
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/LongGCDisruption.java110
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java92
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java59
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java202
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java58
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java66
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java42
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java83
-rw-r--r--core/src/test/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java164
-rw-r--r--core/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java102
-rw-r--r--core/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java227
-rw-r--r--core/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java81
-rw-r--r--core/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java46
-rw-r--r--core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java190
-rw-r--r--core/src/test/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java53
-rw-r--r--core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java303
-rw-r--r--core/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java33
-rw-r--r--core/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java59
-rw-r--r--core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java805
-rw-r--r--core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java261
-rw-r--r--core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java145
-rw-r--r--core/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java62
-rw-r--r--core/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java46
-rw-r--r--core/src/test/java/org/elasticsearch/test/junit/annotations/Network.java34
-rw-r--r--core/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java41
-rw-r--r--core/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java118
-rw-r--r--core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java168
-rw-r--r--core/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java80
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java374
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/FakeRestRequest.java104
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/RestTestCandidate.java67
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java157
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/Stash.java128
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/RestClient.java240
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/RestException.java41
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java115
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java40
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java40
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java223
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java108
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java123
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java89
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java40
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java39
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java34
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java34
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java48
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java40
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java39
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java36
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java33
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java33
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java51
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java161
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java100
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java57
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java54
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java83
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java71
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/Assertion.java83
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/DoSection.java136
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java34
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java58
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java58
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java61
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java55
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java63
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java59
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java58
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java77
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java76
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/SetSection.java52
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java60
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java115
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/section/TestSection.java79
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java216
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java139
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java74
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/support/Features.java57
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java169
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java42
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java174
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java393
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java111
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java169
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java97
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java197
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java367
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java77
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java95
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java137
-rw-r--r--core/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java247
-rw-r--r--core/src/test/java/org/elasticsearch/test/search/MockSearchService.java76
-rw-r--r--core/src/test/java/org/elasticsearch/test/search/MockSearchServiceModule.java32
-rw-r--r--core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java305
-rw-r--r--core/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java44
-rw-r--r--core/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java32
-rw-r--r--core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java131
-rw-r--r--core/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java159
-rw-r--r--core/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterTests.java66
-rw-r--r--core/src/test/java/org/elasticsearch/test/test/TestScopeClusterTests.java63
-rw-r--r--core/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java83
-rw-r--r--core/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java67
-rw-r--r--core/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java180
-rw-r--r--core/src/test/java/org/elasticsearch/test/transport/MockTransportService.java497
-rw-r--r--core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java223
-rw-r--r--core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java100
-rw-r--r--core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java300
-rw-r--r--core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java144
-rw-r--r--core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java1220
-rw-r--r--core/src/test/java/org/elasticsearch/transport/ActionNamesTests.java57
-rw-r--r--core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportTests.java425
-rw-r--r--core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java107
-rw-r--r--core/src/test/java/org/elasticsearch/transport/TransportMessageTests.java96
-rw-r--r--core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java35
-rw-r--r--core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java180
-rw-r--r--core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java127
-rw-r--r--core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java98
-rw-r--r--core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java236
-rw-r--r--core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java182
-rw-r--r--core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java51
-rw-r--r--core/src/test/java/org/elasticsearch/tribe/TribeTests.java426
-rw-r--r--core/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java118
-rw-r--r--core/src/test/java/org/elasticsearch/tribe/elasticsearch.yml3
-rw-r--r--core/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java233
-rw-r--r--core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java128
-rw-r--r--core/src/test/java/org/elasticsearch/update/UpdateNoopTests.java266
-rw-r--r--core/src/test/java/org/elasticsearch/update/UpdateTests.java1585
-rw-r--r--core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java295
-rw-r--r--core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java80
-rw-r--r--core/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java889
-rw-r--r--core/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java404
-rw-r--r--core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java110
-rw-r--r--core/src/test/resources/config/elasticsearch.json3
-rw-r--r--core/src/test/resources/config/elasticsearch.properties2
-rw-r--r--core/src/test/resources/config/elasticsearch.yaml3
-rwxr-xr-xcore/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff201
-rwxr-xr-xcore/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic62120
-rwxr-xr-xcore/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff201
-rwxr-xr-xcore/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic62120
-rw-r--r--core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml2
-rwxr-xr-xcore/src/test/resources/indices/analyze/no_aff_conf_dir/hunspell/en_US/en_US.dic62120
-rwxr-xr-xcore/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_AU.aff201
-rwxr-xr-xcore/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.aff201
-rwxr-xr-xcore/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.dic62120
-rw-r--r--core/src/test/resources/jmeter/index-count.jmx240
-rw-r--r--core/src/test/resources/jmeter/index-get.jmx211
-rw-r--r--core/src/test/resources/jmeter/index-search.jmx240
-rw-r--r--core/src/test/resources/jmeter/index.jmx210
-rw-r--r--core/src/test/resources/jmeter/ping-single.jmx210
-rw-r--r--core/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf81
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zipbin0 -> 135467 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zipbin0 -> 94223 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zipbin0 -> 98605 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zipbin0 -> 93503 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zipbin0 -> 104290 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zipbin0 -> 102423 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zipbin0 -> 91860 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zipbin0 -> 116902 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zipbin0 -> 122419 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zipbin0 -> 113842 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zipbin0 -> 109714 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zipbin0 -> 92664 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zipbin0 -> 99697 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zipbin0 -> 98220 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zipbin0 -> 105743 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zipbin0 -> 87140 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zipbin0 -> 105754 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zipbin0 -> 100569 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zipbin0 -> 102509 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zipbin0 -> 90693 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zipbin0 -> 96880 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zipbin0 -> 94998 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zipbin0 -> 72960 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zipbin0 -> 97709 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zipbin0 -> 92364 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zipbin0 -> 50178 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zipbin0 -> 83020 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zipbin0 -> 107489 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.0.zipbin0 -> 84446 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zipbin0 -> 79469 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zipbin0 -> 105523 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zipbin0 -> 79181 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zipbin0 -> 84639 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zipbin0 -> 68731 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zipbin0 -> 96730 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zipbin0 -> 91527 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zipbin0 -> 100001 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zipbin0 -> 77474 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zipbin0 -> 74186 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zipbin0 -> 78601 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zipbin0 -> 97221 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zipbin0 -> 85899 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zipbin0 -> 89775 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.Beta1.zipbin0 -> 87691 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zipbin0 -> 83046 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zipbin0 -> 87691 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zipbin0 -> 95682 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zipbin0 -> 81554 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zipbin0 -> 85427 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zipbin0 -> 85609 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.0.zipbin0 -> 75708 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.1.zipbin0 -> 70588 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zipbin0 -> 94939 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zipbin0 -> 96973 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zipbin0 -> 86574 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zipbin0 -> 91930 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zipbin0 -> 91215 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zipbin0 -> 69023 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zipbin0 -> 92720 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zipbin0 -> 87101 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zipbin0 -> 44217 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zipbin0 -> 78523 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zipbin0 -> 102435 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.0.zipbin0 -> 78601 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zipbin0 -> 74774 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zipbin0 -> 99176 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zipbin0 -> 73470 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zipbin0 -> 78744 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zipbin0 -> 65028 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zipbin0 -> 92534 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zipbin0 -> 86675 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zipbin0 -> 96377 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zipbin0 -> 73731 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zipbin0 -> 70471 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zipbin0 -> 75578 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zipbin0 -> 93489 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zipbin0 -> 82553 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zipbin0 -> 86535 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.Beta1.zipbin0 -> 82970 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zipbin0 -> 79187 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zipbin0 -> 83618 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zipbin0 -> 91822 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zipbin0 -> 77470 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zipbin0 -> 82268 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zipbin0 -> 82469 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.0.zipbin0 -> 72023 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.1.zipbin0 -> 66707 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zipbin0 -> 89557 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/unsupported-0.20.6.zipbin0 -> 207883 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/bwcompat/unsupportedrepo-0.20.6.zipbin0 -> 164293 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zipbin0 -> 21330 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zipbin0 -> 21229 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/cluster/routing/issue_9023.zipbin0 -> 33438 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/cluster/routing/shard_routes.txt217
-rw-r--r--core/src/test/resources/org/elasticsearch/common/cli/tool-cmd1.help1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/cli/tool.help1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/dir/file2.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/file1.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/file2.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/subdir/file4.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file1.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file3.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/file2.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file4.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file5.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file1.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file3.txt1
-rw-r--r--core/src/test/resources/org/elasticsearch/common/logging/log4j/config/logging.yml12
-rw-r--r--core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/logging.yml10
-rw-r--r--core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/test3/logging.yml10
-rw-r--r--core/src/test/resources/org/elasticsearch/gateway/global-3.stbin0 -> 109 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/query/config/scripts/full-query-template.mustache6
-rw-r--r--core/src/test/resources/org/elasticsearch/index/query/config/scripts/storedTemplate.mustache3
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/legacy_translogs.zipbin0 -> 6807 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/translog-invalid-first-byte.binarybin0 -> 91 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/translog-v0.binarybin0 -> 91 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-body.binarybin0 -> 417 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binarybin0 -> 417 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/translog-v1-truncated.binarybin0 -> 383 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/index/translog/translog-v1.binarybin0 -> 417 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties21
-rw-r--r--core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/anotherplugin/_site/index.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty0
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/dummy/_site/index.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/loading/jar/in-jar-plugin.jarbin0 -> 2039 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/loading/zip/in-zip-plugin.jarbin0 -> 2042 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_folder_file.zipbin0 -> 503 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_folder_site.zipbin0 -> 373 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_single_folder.zipbin0 -> 405 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_and_config.zipbin0 -> 681 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_only.zipbin0 -> 166 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_classfile.zipbin0 -> 191 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v1.zipbin0 -> 479 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v2.zipbin0 -> 1175 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v3.zipbin0 -> 1527 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_with_sourcefiles.zipbin0 -> 2412 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/plugin_without_folders.zipbin0 -> 221 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir/index.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir_without_index/page.html9
-rw-r--r--core/src/test/resources/org/elasticsearch/rest/action/admin/indices/upgrade/index-0.90.6.zipbin0 -> 85057 bytes
-rw-r--r--core/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression1
-rw-r--r--core/src/test/resources/org/elasticsearch/script/config/scripts/script1.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/script/config/scripts/script1.mustache1
-rw-r--r--core/src/test/resources/org/elasticsearch/script/config/scripts/script2.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/combine_script.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/init_script.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/map_script.groovy1
-rw-r--r--core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/reduce_script.groovy1
-rw-r--r--core/src/test/resources/packaging/scripts/20_tar_package.bats95
-rw-r--r--core/src/test/resources/packaging/scripts/25_tar_plugins.bats344
-rw-r--r--core/src/test/resources/packaging/scripts/30_deb_package.bats177
-rw-r--r--core/src/test/resources/packaging/scripts/40_rpm_package.bats142
-rw-r--r--core/src/test/resources/packaging/scripts/50_plugins.bats380
-rw-r--r--core/src/test/resources/packaging/scripts/50_systemd.bats146
-rw-r--r--core/src/test/resources/packaging/scripts/packaging_test_utils.bash464
1456 files changed, 499986 insertions, 0 deletions
diff --git a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java
new file mode 100644
index 0000000000..9f95ec6147
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilterTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+/**
+ */
+
+public class TruncateTokenFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTest() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+ return new TokenStreamComponents(t, new TruncateTokenFilter(t, 3));
+ }
+ };
+
+ TokenStream test = analyzer.tokenStream("test", "a bb ccc dddd eeeee");
+ test.reset();
+ CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("a"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("bb"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("ccc"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("ddd"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("eee"));
+
+ assertThat(test.incrementToken(), equalTo(false));
+ }
+}
diff --git a/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java
new file mode 100644
index 0000000000..e8c074be47
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/analysis/miscellaneous/UniqueTokenFilterTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.analysis.miscellaneous;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class UniqueTokenFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTest() throws IOException {
+ Analyzer analyzer = new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new MockTokenizer(MockTokenizer.WHITESPACE, false);
+ return new TokenStreamComponents(t, new UniqueTokenFilter(t));
+ }
+ };
+
+ TokenStream test = analyzer.tokenStream("test", "this test with test");
+ test.reset();
+ CharTermAttribute termAttribute = test.addAttribute(CharTermAttribute.class);
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("this"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("test"));
+
+ assertThat(test.incrementToken(), equalTo(true));
+ assertThat(termAttribute.toString(), equalTo("with"));
+
+ assertThat(test.incrementToken(), equalTo(false));
+ }
+}
diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTest.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTest.java
new file mode 100644
index 0000000000..634026a5df
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTest.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.lucene.queries;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DisjunctionMaxQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.similarities.BM25Similarity;
+import org.apache.lucene.search.similarities.DefaultSimilarity;
+import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class BlendedTermQueryTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testBooleanQuery() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+ String[] firstNames = new String[]{
+ "simon", "paul"
+ };
+ String[] surNames = new String[]{
+ "willnauer", "simon"
+ };
+ for (int i = 0; i < surNames.length; i++) {
+ Document d = new Document();
+ d.add(new TextField("id", Integer.toString(i), Field.Store.YES));
+ d.add(new TextField("firstname", firstNames[i], Field.Store.NO));
+ d.add(new TextField("surname", surNames[i], Field.Store.NO));
+ w.addDocument(d);
+ }
+ int iters = scaledRandomIntBetween(25, 100);
+ for (int j = 0; j < iters; j++) {
+ Document d = new Document();
+ d.add(new TextField("id", Integer.toString(firstNames.length + j), Field.Store.YES));
+ d.add(new TextField("firstname", rarely() ? "some_other_name" :
+ "simon the sorcerer", Field.Store.NO)); // make sure length-norm is the tie-breaker
+ d.add(new TextField("surname", "bogus", Field.Store.NO));
+ w.addDocument(d);
+ }
+ w.commit();
+ DirectoryReader reader = DirectoryReader.open(w, true);
+ IndexSearcher searcher = setSimilarity(newSearcher(reader));
+
+ {
+ Term[] terms = new Term[]{new Term("firstname", "simon"), new Term("surname", "simon")};
+ BlendedTermQuery query = BlendedTermQuery.booleanBlendedQuery(terms, true);
+ TopDocs search = searcher.search(query, 3);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(3, scoreDocs.length);
+ assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+ }
+ {
+ BooleanQuery query = new BooleanQuery(false);
+ query.add(new TermQuery(new Term("firstname", "simon")), BooleanClause.Occur.SHOULD);
+ query.add(new TermQuery(new Term("surname", "simon")), BooleanClause.Occur.SHOULD);
+ TopDocs search = searcher.search(query, 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+
+ }
+ reader.close();
+ w.close();
+ dir.close();
+
+ }
+
+ @Test
+ public void testDismaxQuery() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
+ String[] username = new String[]{
+ "foo fighters", "some cool fan", "cover band"};
+ String[] song = new String[]{
+ "generator", "foo fighers - generator", "foo fighters generator"
+ };
+ final boolean omitNorms = random().nextBoolean();
+ FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
+ ft.setIndexOptions(random().nextBoolean() ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS);
+ ft.setOmitNorms(omitNorms);
+ ft.freeze();
+
+ FieldType ft1 = new FieldType(TextField.TYPE_NOT_STORED);
+ ft1.setIndexOptions(random().nextBoolean() ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS);
+ ft1.setOmitNorms(omitNorms);
+ ft1.freeze();
+ for (int i = 0; i < username.length; i++) {
+ Document d = new Document();
+ d.add(new TextField("id", Integer.toString(i), Field.Store.YES));
+ d.add(new Field("username", username[i], ft));
+ d.add(new Field("song", song[i], ft));
+ w.addDocument(d);
+ }
+ int iters = scaledRandomIntBetween(25, 100);
+ for (int j = 0; j < iters; j++) {
+ Document d = new Document();
+ d.add(new TextField("id", Integer.toString(username.length + j), Field.Store.YES));
+ d.add(new Field("username", "foo fighters", ft1));
+ d.add(new Field("song", "some bogus text to bump up IDF", ft1));
+ w.addDocument(d);
+ }
+ w.commit();
+ DirectoryReader reader = DirectoryReader.open(w, true);
+ IndexSearcher searcher = setSimilarity(newSearcher(reader));
+ {
+ String[] fields = new String[]{"username", "song"};
+ BooleanQuery query = new BooleanQuery(false);
+ query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f), BooleanClause.Occur.SHOULD);
+ query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "fighters"), 0.1f), BooleanClause.Occur.SHOULD);
+ query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "generator"), 0.1f), BooleanClause.Occur.SHOULD);
+ TopDocs search = searcher.search(query, 10);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+ }
+ {
+ BooleanQuery query = new BooleanQuery(false);
+ DisjunctionMaxQuery uname = new DisjunctionMaxQuery(0.0f);
+ uname.add(new TermQuery(new Term("username", "foo")));
+ uname.add(new TermQuery(new Term("song", "foo")));
+
+ DisjunctionMaxQuery s = new DisjunctionMaxQuery(0.0f);
+ s.add(new TermQuery(new Term("username", "fighers")));
+ s.add(new TermQuery(new Term("song", "fighers")));
+ DisjunctionMaxQuery gen = new DisjunctionMaxQuery(0f);
+ gen.add(new TermQuery(new Term("username", "generator")));
+ gen.add(new TermQuery(new Term("song", "generator")));
+ query.add(uname, BooleanClause.Occur.SHOULD);
+ query.add(s, BooleanClause.Occur.SHOULD);
+ query.add(gen, BooleanClause.Occur.SHOULD);
+ TopDocs search = searcher.search(query, 4);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+
+ }
+ reader.close();
+ w.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBasics() {
+ final int iters = scaledRandomIntBetween(5, 25);
+ for (int j = 0; j < iters; j++) {
+ String[] fields = new String[1 + random().nextInt(10)];
+ for (int i = 0; i < fields.length; i++) {
+ fields[i] = TestUtil.randomRealisticUnicodeString(random(), 1, 10);
+ }
+ String term = TestUtil.randomRealisticUnicodeString(random(), 1, 10);
+ Term[] terms = toTerms(fields, term);
+ boolean disableCoord = random().nextBoolean();
+ boolean useBoolean = random().nextBoolean();
+ float tieBreaker = random().nextFloat();
+ BlendedTermQuery query = useBoolean ? BlendedTermQuery.booleanBlendedQuery(terms, disableCoord) : BlendedTermQuery.dismaxBlendedQuery(terms, tieBreaker);
+ QueryUtils.check(query);
+ terms = toTerms(fields, term);
+ BlendedTermQuery query2 = useBoolean ? BlendedTermQuery.booleanBlendedQuery(terms, disableCoord) : BlendedTermQuery.dismaxBlendedQuery(terms, tieBreaker);
+ assertEquals(query, query2);
+ }
+ }
+
+ public Term[] toTerms(String[] fields, String term) {
+ Term[] terms = new Term[fields.length];
+ List<String> fieldsList = Arrays.asList(fields);
+ Collections.shuffle(fieldsList, random());
+ fields = fieldsList.toArray(new String[0]);
+ for (int i = 0; i < fields.length; i++) {
+ terms[i] = new Term(fields[i], term);
+ }
+ return terms;
+ }
+
+ public IndexSearcher setSimilarity(IndexSearcher searcher) {
+ Similarity similarity = random().nextBoolean() ? new BM25Similarity() : new DefaultSimilarity();
+ searcher.setSimilarity(similarity);
+ return searcher;
+ }
+
+ @Test
+ public void testExtractTerms() throws IOException {
+ Set<Term> terms = new HashSet<>();
+ int num = scaledRandomIntBetween(1, 10);
+ for (int i = 0; i < num; i++) {
+ terms.add(new Term(TestUtil.randomRealisticUnicodeString(random(), 1, 10), TestUtil.randomRealisticUnicodeString(random(), 1, 10)));
+ }
+
+ BlendedTermQuery blendedTermQuery = random().nextBoolean() ? BlendedTermQuery.dismaxBlendedQuery(terms.toArray(new Term[0]), random().nextFloat()) :
+ BlendedTermQuery.booleanBlendedQuery(terms.toArray(new Term[0]), random().nextBoolean());
+ Set<Term> extracted = new HashSet<>();
+ IndexSearcher searcher = new IndexSearcher(new MultiReader());
+ searcher.createNormalizedWeight(blendedTermQuery, false).extractTerms(extracted);
+ assertThat(extracted.size(), equalTo(terms.size()));
+ assertThat(extracted, containsInAnyOrder(terms.toArray(new Term[0])));
+ }
+}
diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java
new file mode 100644
index 0000000000..2399502925
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPassageFormatterTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+
+public class CustomPassageFormatterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleFormat() {
+ String content = "This is a really cool highlighter. Postings highlighter gives nice snippets back. No matches here.";
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new DefaultEncoder());
+
+ Passage[] passages = new Passage[3];
+ String match = "highlighter";
+ BytesRef matchBytesRef = new BytesRef(match);
+
+ Passage passage1 = new Passage();
+ int start = content.indexOf(match);
+ int end = start + match.length();
+ passage1.startOffset = 0;
+ passage1.endOffset = end + 2; //lets include the whitespace at the end to make sure we trim it
+ passage1.addMatch(start, end, matchBytesRef);
+ passages[0] = passage1;
+
+ Passage passage2 = new Passage();
+ start = content.lastIndexOf(match);
+ end = start + match.length();
+ passage2.startOffset = passage1.endOffset;
+ passage2.endOffset = end + 26;
+ passage2.addMatch(start, end, matchBytesRef);
+ passages[1] = passage2;
+
+ Passage passage3 = new Passage();
+ passage3.startOffset = passage2.endOffset;
+ passage3.endOffset = content.length();
+ passages[2] = passage3;
+
+ Snippet[] fragments = passageFormatter.format(passages, content);
+ assertThat(fragments, notNullValue());
+ assertThat(fragments.length, equalTo(3));
+ assertThat(fragments[0].getText(), equalTo("This is a really cool <em>highlighter</em>."));
+ assertThat(fragments[0].isHighlighted(), equalTo(true));
+ assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
+ assertThat(fragments[1].isHighlighted(), equalTo(true));
+ assertThat(fragments[2].getText(), equalTo("No matches here."));
+ assertThat(fragments[2].isHighlighted(), equalTo(false));
+ }
+
+ @Test
+ public void testHtmlEncodeFormat() {
+ String content = "<b>This is a really cool highlighter.</b> Postings highlighter gives nice snippets back.";
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<em>", "</em>", new SimpleHTMLEncoder());
+
+ Passage[] passages = new Passage[2];
+ String match = "highlighter";
+ BytesRef matchBytesRef = new BytesRef(match);
+
+ Passage passage1 = new Passage();
+ int start = content.indexOf(match);
+ int end = start + match.length();
+ passage1.startOffset = 0;
+ passage1.endOffset = end + 6; //lets include the whitespace at the end to make sure we trim it
+ passage1.addMatch(start, end, matchBytesRef);
+ passages[0] = passage1;
+
+ Passage passage2 = new Passage();
+ start = content.lastIndexOf(match);
+ end = start + match.length();
+ passage2.startOffset = passage1.endOffset;
+ passage2.endOffset = content.length();
+ passage2.addMatch(start, end, matchBytesRef);
+ passages[1] = passage2;
+
+ Snippet[] fragments = passageFormatter.format(passages, content);
+ assertThat(fragments, notNullValue());
+ assertThat(fragments.length, equalTo(2));
+ assertThat(fragments[0].getText(), equalTo("&lt;b&gt;This is a really cool <em>highlighter</em>.&lt;&#x2F;b&gt;"));
+ assertThat(fragments[1].getText(), equalTo("Postings <em>highlighter</em> gives nice snippets back."));
+ }
+}
diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java
new file mode 100644
index 0000000000..8d04caae60
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighterTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. Elasticsearch licenses this
+ * file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.highlight.DefaultEncoder;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.search.highlight.HighlightUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class CustomPostingsHighlighterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomPostingsHighlighter() throws Exception {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+
+ //good position but only one match
+ final String firstValue = "This is a test. Just a test1 highlighting from postings highlighter.";
+ Field body = new Field("body", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ body.setStringValue(firstValue);
+
+ //two matches, not the best snippet due to its length though
+ final String secondValue = "This is the second highlighting value to perform highlighting on a longer text that gets scored lower.";
+ Field body2 = new Field("body", "", offsetsType);
+ doc.add(body2);
+ body2.setStringValue(secondValue);
+
+ //two matches and short, will be scored highest
+ final String thirdValue = "This is highlighting the third short highlighting value.";
+ Field body3 = new Field("body", "", offsetsType);
+ doc.add(body3);
+ body3.setStringValue(thirdValue);
+
+ //one match, same as first but at the end, will be scored lower due to its position
+ final String fourthValue = "Just a test4 highlighting from postings highlighter.";
+ Field body4 = new Field("body", "", offsetsType);
+ doc.add(body4);
+ body4.setStringValue(fourthValue);
+
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ String firstHlValue = "Just a test1 <b>highlighting</b> from postings highlighter.";
+ String secondHlValue = "This is the second <b>highlighting</b> value to perform <b>highlighting</b> on a longer text that gets scored lower.";
+ String thirdHlValue = "This is <b>highlighting</b> the third short <b>highlighting</b> value.";
+ String fourthHlValue = "Just a test4 <b>highlighting</b> from postings highlighter.";
+
+ IndexSearcher searcher = newSearcher(ir);
+ Query query = new TermQuery(new Term("body", "highlighting"));
+
+ TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ int docId = topDocs.scoreDocs[0].doc;
+
+ String fieldValue = firstValue + HighlightUtils.PARAGRAPH_SEPARATOR + secondValue + HighlightUtils.PARAGRAPH_SEPARATOR + thirdValue + HighlightUtils.PARAGRAPH_SEPARATOR + fourthValue;
+
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(null, new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder()), fieldValue, false);
+ Snippet[] snippets = highlighter.highlightField("body", query, searcher, docId, 5);
+
+ assertThat(snippets.length, equalTo(4));
+
+ assertThat(snippets[0].getText(), equalTo(firstHlValue));
+ assertThat(snippets[1].getText(), equalTo(secondHlValue));
+ assertThat(snippets[2].getText(), equalTo(thirdHlValue));
+ assertThat(snippets[3].getText(), equalTo(fourthHlValue));
+
+ ir.close();
+ dir.close();
+ }
+
+ @Test
+ public void testNoMatchSize() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+ iwc.setMergePolicy(newLogMergePolicy());
+ RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+
+ FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+ offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+ Field body = new Field("body", "", offsetsType);
+ Field none = new Field("none", "", offsetsType);
+ Document doc = new Document();
+ doc.add(body);
+ doc.add(none);
+
+ String firstValue = "This is a test. Just a test highlighting from postings. Feel free to ignore.";
+ body.setStringValue(firstValue);
+ none.setStringValue(firstValue);
+ iw.addDocument(doc);
+
+ IndexReader ir = iw.getReader();
+ iw.close();
+
+ Query query = new TermQuery(new Term("none", "highlighting"));
+
+ IndexSearcher searcher = newSearcher(ir);
+ TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
+ assertThat(topDocs.totalHits, equalTo(1));
+ int docId = topDocs.scoreDocs[0].doc;
+
+ CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
+
+ CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(null, passageFormatter, firstValue, false);
+ Snippet[] snippets = highlighter.highlightField("body", query, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(0));
+
+ highlighter = new CustomPostingsHighlighter(null, passageFormatter, firstValue, true);
+ snippets = highlighter.highlightField("body", query, searcher, docId, 5);
+ assertThat(snippets.length, equalTo(1));
+ assertThat(snippets[0].getText(), equalTo("This is a test."));
+
+ ir.close();
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java
new file mode 100644
index 0000000000..0df6add537
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIteratorTests.java
@@ -0,0 +1,186 @@
+/*
+Licensed to Elasticsearch under one or more contributor
+license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright
+ownership. Elasticsearch licenses this file to you under
+the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ *
+ http://www.apache.org/licenses/LICENSE-2.0
+ *
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+ */
+
+package org.apache.lucene.search.postingshighlight;
+
+import org.elasticsearch.search.highlight.HighlightUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.text.BreakIterator;
+import java.text.CharacterIterator;
+import java.text.StringCharacterIterator;
+import java.util.Locale;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class CustomSeparatorBreakIteratorTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBreakOnCustomSeparator() throws Exception {
+ Character separator = randomSeparator();
+ BreakIterator bi = new CustomSeparatorBreakIterator(separator);
+ String source = "this" + separator + "is" + separator + "the" + separator + "first" + separator + "sentence";
+ bi.setText(source);
+ assertThat(bi.current(), equalTo(0));
+ assertThat(bi.first(), equalTo(0));
+ assertThat(source.substring(bi.current(), bi.next()), equalTo("this" + separator));
+ assertThat(source.substring(bi.current(), bi.next()), equalTo("is" + separator));
+ assertThat(source.substring(bi.current(), bi.next()), equalTo("the" + separator));
+ assertThat(source.substring(bi.current(), bi.next()), equalTo("first" + separator));
+ assertThat(source.substring(bi.current(), bi.next()), equalTo("sentence"));
+ assertThat(bi.next(), equalTo(BreakIterator.DONE));
+
+ assertThat(bi.last(), equalTo(source.length()));
+ int current = bi.current();
+ assertThat(source.substring(bi.previous(), current), equalTo("sentence"));
+ current = bi.current();
+ assertThat(source.substring(bi.previous(), current), equalTo("first" + separator));
+ current = bi.current();
+ assertThat(source.substring(bi.previous(), current), equalTo("the" + separator));
+ current = bi.current();
+ assertThat(source.substring(bi.previous(), current), equalTo("is" + separator));
+ current = bi.current();
+ assertThat(source.substring(bi.previous(), current), equalTo("this" + separator));
+ assertThat(bi.previous(), equalTo(BreakIterator.DONE));
+ assertThat(bi.current(), equalTo(0));
+
+ assertThat(source.substring(0, bi.following(9)), equalTo("this" + separator + "is" + separator + "the" + separator));
+
+ assertThat(source.substring(0, bi.preceding(9)), equalTo("this" + separator + "is" + separator));
+
+ assertThat(bi.first(), equalTo(0));
+ assertThat(source.substring(0, bi.next(3)), equalTo("this" + separator + "is" + separator + "the" + separator));
+ }
+
+ @Test
+ public void testSingleSentences() throws Exception {
+ BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+ BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+ assertSameBreaks("a", expected, actual);
+ assertSameBreaks("ab", expected, actual);
+ assertSameBreaks("abc", expected, actual);
+ assertSameBreaks("", expected, actual);
+ }
+
+ @Test
+ public void testSliceEnd() throws Exception {
+ BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+ BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+ assertSameBreaks("a000", 0, 1, expected, actual);
+ assertSameBreaks("ab000", 0, 1, expected, actual);
+ assertSameBreaks("abc000", 0, 1, expected, actual);
+ assertSameBreaks("000", 0, 0, expected, actual);
+ }
+
+ @Test
+ public void testSliceStart() throws Exception {
+ BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+ BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+ assertSameBreaks("000a", 3, 1, expected, actual);
+ assertSameBreaks("000ab", 3, 2, expected, actual);
+ assertSameBreaks("000abc", 3, 3, expected, actual);
+ assertSameBreaks("000", 3, 0, expected, actual);
+ }
+
+ @Test
+ public void testSliceMiddle() throws Exception {
+ BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+ BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+ assertSameBreaks("000a000", 3, 1, expected, actual);
+ assertSameBreaks("000ab000", 3, 2, expected, actual);
+ assertSameBreaks("000abc000", 3, 3, expected, actual);
+ assertSameBreaks("000000", 3, 0, expected, actual);
+ }
+
+ /** the current position must be ignored, initial position is always first() */
+ @Test
+ public void testFirstPosition() throws Exception {
+ BreakIterator expected = BreakIterator.getSentenceInstance(Locale.ROOT);
+ BreakIterator actual = new CustomSeparatorBreakIterator(randomSeparator());
+ assertSameBreaks("000ab000", 3, 2, 4, expected, actual);
+ }
+
+ private static char randomSeparator() {
+ return randomFrom(' ', HighlightUtils.NULL_SEPARATOR, HighlightUtils.PARAGRAPH_SEPARATOR);
+ }
+
+ private static void assertSameBreaks(String text, BreakIterator expected, BreakIterator actual) {
+ assertSameBreaks(new StringCharacterIterator(text),
+ new StringCharacterIterator(text),
+ expected,
+ actual);
+ }
+
+ private static void assertSameBreaks(String text, int offset, int length, BreakIterator expected, BreakIterator actual) {
+ assertSameBreaks(text, offset, length, offset, expected, actual);
+ }
+
+ private static void assertSameBreaks(String text, int offset, int length, int current, BreakIterator expected, BreakIterator actual) {
+ assertSameBreaks(new StringCharacterIterator(text, offset, offset + length, current),
+ new StringCharacterIterator(text, offset, offset + length, current),
+ expected,
+ actual);
+ }
+
+ /** Asserts that two breakiterators break the text the same way */
+ private static void assertSameBreaks(CharacterIterator one, CharacterIterator two, BreakIterator expected, BreakIterator actual) {
+ expected.setText(one);
+ actual.setText(two);
+
+ assertEquals(expected.current(), actual.current());
+
+ // next()
+ int v = expected.current();
+ while (v != BreakIterator.DONE) {
+ assertEquals(v = expected.next(), actual.next());
+ assertEquals(expected.current(), actual.current());
+ }
+
+ // first()
+ assertEquals(expected.first(), actual.first());
+ assertEquals(expected.current(), actual.current());
+ // last()
+ assertEquals(expected.last(), actual.last());
+ assertEquals(expected.current(), actual.current());
+
+ // previous()
+ v = expected.current();
+ while (v != BreakIterator.DONE) {
+ assertEquals(v = expected.previous(), actual.previous());
+ assertEquals(expected.current(), actual.current());
+ }
+
+ // following()
+ for (int i = one.getBeginIndex(); i <= one.getEndIndex(); i++) {
+ expected.first();
+ actual.first();
+ assertEquals(expected.following(i), actual.following(i));
+ assertEquals(expected.current(), actual.current());
+ }
+
+ // preceding()
+ for (int i = one.getBeginIndex(); i <= one.getEndIndex(); i++) {
+ expected.last();
+ actual.last();
+ assertEquals(expected.preceding(i), actual.preceding(i));
+ assertEquals(expected.current(), actual.current());
+ }
+ }
+}
diff --git a/core/src/test/java/org/apache/lucene/util/SloppyMathTests.java b/core/src/test/java/org/apache/lucene/util/SloppyMathTests.java
new file mode 100644
index 0000000000..61a74b49e0
--- /dev/null
+++ b/core/src/test/java/org/apache/lucene/util/SloppyMathTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.lucene.util;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.number.IsCloseTo.closeTo;
+
+public class SloppyMathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAccuracy() {
+ for (double lat1 = -89; lat1 <= 89; lat1+=1) {
+ final double lon1 = randomLongitude();
+
+ for (double i = -180; i <= 180; i+=1) {
+ final double lon2 = i;
+ final double lat2 = randomLatitude();
+
+ assertAccurate(lat1, lon1, lat2, lon2);
+ }
+ }
+ }
+
+ @Test
+ public void testSloppyMath() {
+ testSloppyMath(DistanceUnit.METERS, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.KILOMETERS, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.INCH, 0.01, 5, 45, 90);
+ testSloppyMath(DistanceUnit.MILES, 0.01, 5, 45, 90);
+ }
+
+ private static double maxError(double distance) {
+ return distance / 1000.0;
+ }
+
+ private void testSloppyMath(DistanceUnit unit, double...deltaDeg) {
+ final double lat1 = randomLatitude();
+ final double lon1 = randomLongitude();
+ logger.info("testing SloppyMath with {} at \"{}, {}\"", unit, lat1, lon1);
+
+ for (int test = 0; test < deltaDeg.length; test++) {
+ for (int i = 0; i < 100; i++) {
+ // crop pole areas, sine we now there the function
+ // is not accurate around lat(89°, 90°) and lat(-90°, -89°)
+ final double lat2 = Math.max(-89.0, Math.min(+89.0, lat1 + (random().nextDouble() - 0.5) * 2 * deltaDeg[test]));
+ final double lon2 = lon1 + (random().nextDouble() - 0.5) * 2 * deltaDeg[test];
+
+ final double accurate = GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, unit);
+ final double dist = GeoDistance.SLOPPY_ARC.calculate(lat1, lon1, lat2, lon2, unit);
+
+ assertThat("distance between("+lat1+", "+lon1+") and ("+lat2+", "+lon2+"))", dist, closeTo(accurate, maxError(accurate)));
+ }
+ }
+ }
+
+ private static void assertAccurate(double lat1, double lon1, double lat2, double lon2) {
+ double accurate = GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.METERS);
+ double sloppy = GeoDistance.SLOPPY_ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.METERS);
+ assertThat("distance between("+lat1+", "+lon1+") and ("+lat2+", "+lon2+"))", sloppy, closeTo(accurate, maxError(accurate)));
+ }
+
+ private static final double randomLatitude() {
+ // crop pole areas, sine we now there the function
+ // is not accurate around lat(89°, 90°) and lat(-90°, -89°)
+ return (random().nextDouble() - 0.5) * 178.0;
+ }
+
+ private static final double randomLongitude() {
+ return (random().nextDouble() - 0.5) * 360.0;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/core/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
new file mode 100644
index 0000000000..b00c76ded6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.query.TestQueryParsingException;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.junit.Test;
+
+import java.io.EOFException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ElasticsearchExceptionTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testStatus() {
+ ElasticsearchException exception = new ElasticsearchException("test");
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new ElasticsearchException("test", new RuntimeException());
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new ElasticsearchException("test", new IndexMissingException(new Index("test")));
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+
+ exception = new RemoteTransportException("test", new IndexMissingException(new Index("test")));
+ assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND));
+
+ exception = new RemoteTransportException("test", new IllegalArgumentException("foobar"));
+ assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));
+
+ exception = new RemoteTransportException("test", new IllegalStateException("foobar"));
+ assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR));
+ }
+
+ public void testGuessRootCause() {
+ {
+ ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new IndexException(new Index("foo"), "index is closed", new RuntimeException("foobar"))));
+ ElasticsearchException[] rootCauses = exception.guessRootCauses();
+ assertEquals(rootCauses.length, 1);
+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "index_exception");
+ assertEquals(rootCauses[0].getMessage(), "index is closed");
+ ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 1));
+ ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 2));
+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1});
+ if (randomBoolean()) {
+ rootCauses = (randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex).guessRootCauses();
+ } else {
+ rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex);
+ }
+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "test_query_parsing_exception");
+ assertEquals(rootCauses[0].getMessage(), "foobar");
+
+ ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar"));
+ rootCauses = oneLevel.guessRootCauses();
+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "exception");
+ assertEquals(rootCauses[0].getMessage(), "foo");
+ }
+ {
+ ShardSearchFailure failure = new ShardSearchFailure(
+ new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null),
+ new SearchShardTarget("node_1", "foo", 1));
+ ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), 1, 2, "foobar", null),
+ new SearchShardTarget("node_1", "foo1", 1));
+ ShardSearchFailure failure2 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), 1, 2, "foobar", null),
+ new SearchShardTarget("node_1", "foo1", 2));
+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2});
+ final ElasticsearchException[] rootCauses = ex.guessRootCauses();
+ assertEquals(rootCauses.length, 2);
+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "test_query_parsing_exception");
+ assertEquals(rootCauses[0].getMessage(), "foobar");
+ assertEquals(((QueryParsingException)rootCauses[0]).index().name(), "foo");
+ assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "test_query_parsing_exception");
+ assertEquals(rootCauses[1].getMessage(), "foobar");
+ assertEquals(((QueryParsingException) rootCauses[1]).getLineNumber(), 1);
+ assertEquals(((QueryParsingException) rootCauses[1]).getColumnNumber(), 2);
+
+ }
+
+ {
+ final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar"));
+ assertEquals(foobars.length, 1);
+ assertTrue(foobars[0] instanceof ElasticsearchException);
+ assertEquals(foobars[0].getMessage(), "foobar");
+ assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class);
+ assertEquals(foobars[0].getExceptionName(), "illegal_argument_exception");
+ }
+
+ }
+
+ public void testDeduplicate() throws IOException {
+ {
+ ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 1));
+ ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 2));
+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1});
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ ex.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]}";
+ assertEquals(expected, builder.string());
+ }
+ {
+ ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 1));
+ ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), "foobar", null),
+ new SearchShardTarget("node_1", "foo1", 1));
+ ShardSearchFailure failure2 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), "foobar", null),
+ new SearchShardTarget("node_1", "foo1", 2));
+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2});
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ ex.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}},{\"shard\":1,\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo1\"}}]}";
+ assertEquals(expected, builder.string());
+ }
+ }
+
+ public void testGetRootCause() {
+ Exception root = new RuntimeException("foobar");
+ ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", root)));
+ assertEquals(root, exception.getRootCause());
+ assertTrue(exception.contains(RuntimeException.class));
+ assertFalse(exception.contains(EOFException.class));
+ }
+
+ public void testToString() {
+ ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", new RuntimeException("foobar"))));
+ assertEquals("ElasticsearchException[foo]; nested: ElasticsearchException[bar]; nested: IllegalArgumentException[index is closed]; nested: RuntimeException[foobar];", exception.toString());
+ }
+
+ public void testToXContent() throws IOException {
+ {
+ ElasticsearchException ex = new ElasticsearchException("foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", new RuntimeException("foobar"))));
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ ex.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+
+ String expected = "{\"type\":\"exception\",\"reason\":\"foo\",\"caused_by\":{\"type\":\"exception\",\"reason\":\"bar\",\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"index is closed\",\"caused_by\":{\"type\":\"runtime_exception\",\"reason\":\"foobar\"}}}}";
+ assertEquals(expected, builder.string());
+ }
+
+ {
+ Exception ex = new FileNotFoundException("foo not found");
+ if (randomBoolean()) {
+ // just a wrapper which is omitted
+ ex = new RemoteTransportException("foobar", ex);
+ }
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex);
+ builder.endObject();
+
+ String expected = "{\"type\":\"file_not_found_exception\",\"reason\":\"foo not found\"}";
+ assertEquals(expected, builder.string());
+ }
+
+ {
+ QueryParsingException ex = new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null);
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex);
+ builder.endObject();
+ String expected = "{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2,\"index\":\"foo\"}";
+ assertEquals(expected, builder.string());
+ }
+
+ { // test equivalence
+ ElasticsearchException ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found"));
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex);
+ builder.endObject();
+
+ XContentBuilder otherBuilder = XContentFactory.jsonBuilder();
+
+ otherBuilder.startObject();
+ ex.toXContent(otherBuilder, ToXContent.EMPTY_PARAMS);
+ otherBuilder.endObject();
+ assertEquals(otherBuilder.string(), builder.string());
+ }
+ }
+
+ public void testSerializeElasticsearchException() throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ QueryParsingException ex = new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null);
+ out.writeThrowable(ex);
+
+ StreamInput in = StreamInput.wrap(out.bytes());
+ QueryParsingException e = in.readThrowable();
+ assertEquals(ex.index(), e.index());
+ assertEquals(ex.getMessage(), e.getMessage());
+ assertEquals(ex.getLineNumber(), e.getLineNumber());
+ assertEquals(ex.getColumnNumber(), e.getColumnNumber());
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/NamingConventionTests.java b/core/src/test/java/org/elasticsearch/NamingConventionTests.java
new file mode 100644
index 0000000000..db39c66f30
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/NamingConventionTests.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Sets;
+
+import junit.framework.TestCase;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.net.URISyntaxException;
+import java.nio.file.*;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Simple class that ensures that all subclasses concrete of ElasticsearchTestCase end with either Test | Tests
+ */
+public class NamingConventionTests extends ElasticsearchTestCase {
+
+ // see https://github.com/elasticsearch/elasticsearch/issues/9945
+ public void testNamingConventions()
+ throws ClassNotFoundException, IOException, URISyntaxException {
+ final Set<Class> notImplementing = new HashSet<>();
+ final Set<Class> pureUnitTest = new HashSet<>();
+ final Set<Class> missingSuffix = new HashSet<>();
+ String[] packages = {"org.elasticsearch", "org.apache.lucene"};
+ for (final String packageName : packages) {
+ final String path = "/" + packageName.replace('.', '/');
+ final Path startPath = getDataPath(path);
+ final Set<Path> ignore = Sets.newHashSet(PathUtils.get("/org/elasticsearch/stresstest"), PathUtils.get("/org/elasticsearch/benchmark/stress"));
+ Files.walkFileTree(startPath, new FileVisitor<Path>() {
+ private Path pkgPrefix = PathUtils.get(path).getParent();
+ @Override
+ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
+ Path next = pkgPrefix.resolve(dir.getFileName());
+ if (ignore.contains(next)) {
+ return FileVisitResult.SKIP_SUBTREE;
+ }
+ pkgPrefix = next;
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ try {
+ String filename = file.getFileName().toString();
+ if (filename.endsWith(".class")) {
+ Class<?> clazz = loadClass(filename);
+ if (Modifier.isAbstract(clazz.getModifiers()) == false && Modifier.isInterface(clazz.getModifiers()) == false) {
+ if ((clazz.getName().endsWith("Tests") || clazz.getName().endsWith("Test"))) { // don't worry about the ones that match the pattern
+ if (isTestCase(clazz) == false) {
+ notImplementing.add(clazz);
+ }
+ } else if (isTestCase(clazz)) {
+ missingSuffix.add(clazz);
+ } else if (junit.framework.Test.class.isAssignableFrom(clazz) || hasTestAnnotation(clazz)) {
+ pureUnitTest.add(clazz);
+ }
+ }
+
+ }
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ return FileVisitResult.CONTINUE;
+ }
+
+ private boolean hasTestAnnotation(Class<?> clazz) {
+ for (Method method : clazz.getDeclaredMethods()) {
+ if (method.getAnnotation(Test.class) != null) {
+ return true;
+ }
+ }
+ return false;
+
+ }
+
+ private boolean isTestCase(Class<?> clazz) {
+ return ElasticsearchTestCase.class.isAssignableFrom(clazz) || ElasticsearchTestCase.class.isAssignableFrom(clazz) || ElasticsearchTokenStreamTestCase.class.isAssignableFrom(clazz) || LuceneTestCase.class.isAssignableFrom(clazz);
+ }
+
+ private Class<?> loadClass(String filename) throws ClassNotFoundException {
+ StringBuilder pkg = new StringBuilder();
+ for (Path p : pkgPrefix) {
+ pkg.append(p.getFileName().toString()).append(".");
+ }
+ pkg.append(filename.substring(0, filename.length() - 6));
+ return Thread.currentThread().getContextClassLoader().loadClass(pkg.toString());
+ }
+
+ @Override
+ public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
+ throw exc;
+ }
+
+ @Override
+ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
+ pkgPrefix = pkgPrefix.getParent();
+ return FileVisitResult.CONTINUE;
+ }
+ });
+
+ }
+ assertTrue(missingSuffix.remove(WrongName.class));
+ assertTrue(missingSuffix.remove(WrongNameTheSecond.class));
+ assertTrue(notImplementing.remove(NotImplementingTests.class));
+ assertTrue(notImplementing.remove(NotImplementingTest.class));
+ assertTrue(pureUnitTest.remove(PlainUnit.class));
+ assertTrue(pureUnitTest.remove(PlainUnitTheSecond.class));
+
+ String classesToSubclass = Joiner.on(',').join(
+ ElasticsearchTestCase.class.getSimpleName(),
+ ElasticsearchTestCase.class.getSimpleName(),
+ ElasticsearchTokenStreamTestCase.class.getSimpleName(),
+ LuceneTestCase.class.getSimpleName());
+ assertTrue("Not all subclasses of " + ElasticsearchTestCase.class.getSimpleName() +
+ " match the naming convention. Concrete classes must end with [Test|Tests]: " + missingSuffix.toString(),
+ missingSuffix.isEmpty());
+ assertTrue("Pure Unit-Test found must subclass one of [" + classesToSubclass +"] " + pureUnitTest.toString(),
+ pureUnitTest.isEmpty());
+ assertTrue("Classes ending with Test|Tests] must subclass [" + classesToSubclass +"] " + notImplementing.toString(),
+ notImplementing.isEmpty());
+ }
+
+ /*
+ * Some test the test classes
+ */
+
+ @Ignore
+ public static final class NotImplementingTests {}
+ @Ignore
+ public static final class NotImplementingTest {}
+
+ public static final class WrongName extends ElasticsearchTestCase {}
+
+ public static final class WrongNameTheSecond extends ElasticsearchTestCase {}
+
+ public static final class PlainUnit extends TestCase {}
+
+ public static final class PlainUnitTheSecond {
+ @Test
+ public void foo() {
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java
new file mode 100644
index 0000000000..5b8c8c5d4e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/VersionTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.lang.reflect.Modifier;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.Version.V_0_20_0;
+import static org.elasticsearch.Version.V_0_90_0;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+
+public class VersionTests extends ElasticsearchTestCase {
+
+ public void testMavenVersion() {
+ // maven sets this property to ensure that the latest version
+ // we use here is the version that is actually set to the project.version
+ // in maven
+ String property = System.getProperty("tests.version", null);
+ assumeTrue("tests.version is set", property != null);
+ assertEquals(property, Version.CURRENT.toString());
+ }
+
+ public void testVersionComparison() throws Exception {
+ assertThat(V_0_20_0.before(V_0_90_0), is(true));
+ assertThat(V_0_20_0.before(V_0_20_0), is(false));
+ assertThat(V_0_90_0.before(V_0_20_0), is(false));
+
+ assertThat(V_0_20_0.onOrBefore(V_0_90_0), is(true));
+ assertThat(V_0_20_0.onOrBefore(V_0_20_0), is(true));
+ assertThat(V_0_90_0.onOrBefore(V_0_20_0), is(false));
+
+ assertThat(V_0_20_0.after(V_0_90_0), is(false));
+ assertThat(V_0_20_0.after(V_0_20_0), is(false));
+ assertThat(V_0_90_0.after(V_0_20_0), is(true));
+
+ assertThat(V_0_20_0.onOrAfter(V_0_90_0), is(false));
+ assertThat(V_0_20_0.onOrAfter(V_0_20_0), is(true));
+ assertThat(V_0_90_0.onOrAfter(V_0_20_0), is(true));
+ }
+
+ public void testVersionConstantPresent() {
+ assertThat(Version.CURRENT, sameInstance(Version.fromId(Version.CURRENT.id)));
+ assertThat(Version.CURRENT.luceneVersion, equalTo(org.apache.lucene.util.Version.LATEST));
+ final int iters = scaledRandomIntBetween(20, 100);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion(random());
+ assertThat(version, sameInstance(Version.fromId(version.id)));
+ assertThat(version.luceneVersion, sameInstance(Version.fromId(version.id).luceneVersion));
+ }
+ }
+
+ public void testCURRENTIsLatest() {
+ final int iters = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion(random());
+ if (version != Version.CURRENT) {
+ assertThat("Version: " + version + " should be before: " + Version.CURRENT + " but wasn't", version.before(Version.CURRENT), is(true));
+ }
+ }
+ }
+
+ public void testVersionFromString() {
+ final int iters = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion(random());
+ if (version.snapshot()) { // number doesn't include SNAPSHOT but the parser checks for that
+ assertEquals(Version.fromString(version.number()), version);
+ } else {
+ assertThat(Version.fromString(version.number()), sameInstance(version));
+ }
+ assertFalse(Version.fromString(version.number()).snapshot());
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testTooLongVersionFromString() {
+ Version.fromString("1.0.0.1.3");
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testTooShortVersionFromString() {
+ Version.fromString("1.0");
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testWrongVersionFromString() {
+ Version.fromString("WRONG.VERSION");
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testVersionNoPresentInSettings() {
+ Version.indexCreated(Settings.builder().build());
+ }
+
+ public void testIndexCreatedVersion() {
+ // an actual index has a IndexMetaData.SETTING_UUID
+ final Version version = randomFrom(Version.V_0_18_0, Version.V_0_90_13, Version.V_1_3_0);
+ assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
+ }
+
+ public void testMinCompatVersion() {
+ assertThat(Version.V_2_0_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0));
+ assertThat(Version.V_1_3_0.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0));
+ assertThat(Version.V_1_2_0.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0));
+ assertThat(Version.V_1_2_3.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0));
+ assertThat(Version.V_1_0_0_RC2.minimumCompatibilityVersion(), equalTo(Version.V_1_0_0_RC2));
+ }
+
+ public void testParseVersion() {
+ final int iters = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ Version version = randomVersion(random());
+ String stringVersion = version.toString();
+ if (version.snapshot() == false && random().nextBoolean()) {
+ version = new Version(version.id, true, version.luceneVersion);
+ }
+ Version parsedVersion = Version.fromString(version.toString());
+ assertEquals(version, parsedVersion);
+ assertEquals(version.snapshot(), parsedVersion.snapshot());
+ }
+ }
+
+ public void testParseLenient() {
+ // note this is just a silly sanity check, we test it in lucene
+ for (Version version : VersionUtils.allVersions()) {
+ org.apache.lucene.util.Version luceneVersion = version.luceneVersion;
+ String string = luceneVersion.toString().toUpperCase(Locale.ROOT)
+ .replaceFirst("^LUCENE_(\\d+)_(\\d+)$", "$1.$2");
+ assertThat(luceneVersion, Matchers.equalTo(Lucene.parseVersionLenient(string, null)));
+ }
+ }
+
+ public void testAllVersionsMatchId() throws Exception {
+ Map<String, Version> maxBranchVersions = new HashMap<>();
+ for (java.lang.reflect.Field field : Version.class.getDeclaredFields()) {
+ if (field.getName().endsWith("_ID")) {
+ assertTrue(field.getName() + " should be static", Modifier.isStatic(field.getModifiers()));
+ assertTrue(field.getName() + " should be final", Modifier.isFinal(field.getModifiers()));
+ int versionId = (Integer)field.get(Version.class);
+
+ String constantName = field.getName().substring(0, field.getName().length() - 3);
+ java.lang.reflect.Field versionConstant = Version.class.getField(constantName);
+ assertTrue(constantName + " should be static", Modifier.isStatic(versionConstant.getModifiers()));
+ assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers()));
+
+ Version v = (Version) versionConstant.get(Version.class);
+ logger.info("Checking " + v);
+ assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId));
+ assertEquals("Version " + constantName + " does not have correct id", versionId, v.id);
+ assertEquals("V_" + v.number().replace('.', '_'), constantName);
+
+ // only the latest version for a branch should be a snapshot (ie unreleased)
+ String branchName = "" + v.major + "." + v.minor;
+ Version maxBranchVersion = maxBranchVersions.get(branchName);
+ if (maxBranchVersion == null) {
+ maxBranchVersions.put(branchName, v);
+ } else if (v.after(maxBranchVersion)) {
+ assertFalse("Version " + maxBranchVersion + " cannot be a snapshot because version " + v + " exists", maxBranchVersion.snapshot());
+ maxBranchVersions.put(branchName, v);
+ }
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/IndicesRequestTests.java b/core/src/test/java/org/elasticsearch/action/IndicesRequestTests.java
new file mode 100644
index 0000000000..a89180f076
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/IndicesRequestTests.java
@@ -0,0 +1,902 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexAction;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.flush.FlushAction;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
+import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeAction;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
+import org.elasticsearch.action.bulk.BulkAction;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.delete.DeleteAction;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.exists.ExistsAction;
+import org.elasticsearch.action.exists.ExistsRequest;
+import org.elasticsearch.action.explain.ExplainAction;
+import org.elasticsearch.action.explain.ExplainRequest;
+import org.elasticsearch.action.get.GetAction;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.MultiGetAction;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.percolate.MultiPercolateAction;
+import org.elasticsearch.action.percolate.MultiPercolateRequest;
+import org.elasticsearch.action.percolate.PercolateAction;
+import org.elasticsearch.action.percolate.PercolateRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.suggest.SuggestAction;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.action.termvectors.MultiTermVectorsAction;
+import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
+import org.elasticsearch.action.termvectors.TermVectorsAction;
+import org.elasticsearch.action.termvectors.TermVectorsRequest;
+import org.elasticsearch.action.update.UpdateAction;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.action.SearchServiceTransportAction;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestHandler;
+import org.elasticsearch.transport.TransportService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.instanceOf;
+
+@ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2)
+@Slow
+public class IndicesRequestTests extends ElasticsearchIntegrationTest {
+
+ private final List<String> indices = new ArrayList<>();
+
+ @Override
+ protected int minimumNumberOfShards() {
+ //makes sure that a reduce is always needed when searching
+ return 2;
+ }
+
+ @Override
+ protected int minimumNumberOfReplicas() {
+ //makes sure that write operations get sent to the replica as well
+ //so we are able to intercept those messages and check them
+ return 1;
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, InterceptingTransportService.class.getName())
+ .build();
+ }
+
+ @Before
+ public void setup() {
+ int numIndices = iterations(1, 5);
+ for (int i = 0; i < numIndices; i++) {
+ indices.add("test" + i);
+ }
+ for (String index : indices) {
+ assertAcked(prepareCreate(index).addAlias(new Alias(index + "-alias")));
+ }
+ ensureGreen();
+ }
+
+ @After
+ public void cleanUp() {
+ assertAllRequestsHaveBeenConsumed();
+ indices.clear();
+ }
+
+ @Test
+ public void testGetFieldMappings() {
+ String getFieldMappingsShardAction = GetFieldMappingsAction.NAME + "[index][s]";
+ interceptTransportActions(getFieldMappingsShardAction);
+
+ GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest();
+ getFieldMappingsRequest.indices(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().getFieldMappings(getFieldMappingsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(getFieldMappingsRequest, getFieldMappingsShardAction);
+ }
+
+ @Test
+ public void testAnalyze() {
+ String analyzeShardAction = AnalyzeAction.NAME + "[s]";
+ interceptTransportActions(analyzeShardAction);
+
+ AnalyzeRequest analyzeRequest = new AnalyzeRequest(randomIndexOrAlias());
+ analyzeRequest.text("text");
+ internalCluster().clientNodeClient().admin().indices().analyze(analyzeRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(analyzeRequest, analyzeShardAction);
+ }
+
+ @Test
+ public void testIndex() {
+ String[] indexShardActions = new String[]{IndexAction.NAME, IndexAction.NAME + "[r]"};
+ interceptTransportActions(indexShardActions);
+
+ IndexRequest indexRequest = new IndexRequest(randomIndexOrAlias(), "type", "id").source("field", "value");
+ internalCluster().clientNodeClient().index(indexRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(indexRequest, indexShardActions);
+ }
+
+ @Test
+ public void testDelete() {
+ String[] deleteShardActions = new String[]{DeleteAction.NAME, DeleteAction.NAME + "[r]"};
+ interceptTransportActions(deleteShardActions);
+
+ DeleteRequest deleteRequest = new DeleteRequest(randomIndexOrAlias(), "type", "id");
+ internalCluster().clientNodeClient().delete(deleteRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(deleteRequest, deleteShardActions);
+ }
+
+ @Test
+ public void testUpdate() {
+ //update action goes to the primary, index op gets executed locally, then replicated
+ String[] updateShardActions = new String[]{UpdateAction.NAME, IndexAction.NAME + "[r]"};
+ interceptTransportActions(updateShardActions);
+
+ String indexOrAlias = randomIndexOrAlias();
+ client().prepareIndex(indexOrAlias, "type", "id").setSource("field", "value").get();
+ UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id").doc("field1", "value1");
+ UpdateResponse updateResponse = internalCluster().clientNodeClient().update(updateRequest).actionGet();
+ assertThat(updateResponse.isCreated(), equalTo(false));
+
+ clearInterceptedActions();
+ assertSameIndices(updateRequest, updateShardActions);
+ }
+
+ @Test
+ public void testUpdateUpsert() {
+ //update action goes to the primary, index op gets executed locally, then replicated
+ String[] updateShardActions = new String[]{UpdateAction.NAME, IndexAction.NAME + "[r]"};
+ interceptTransportActions(updateShardActions);
+
+ String indexOrAlias = randomIndexOrAlias();
+ UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id").upsert("field", "value").doc("field1", "value1");
+ UpdateResponse updateResponse = internalCluster().clientNodeClient().update(updateRequest).actionGet();
+ assertThat(updateResponse.isCreated(), equalTo(true));
+
+ clearInterceptedActions();
+ assertSameIndices(updateRequest, updateShardActions);
+ }
+
+ @Test
+ public void testUpdateDelete() {
+ //update action goes to the primary, delete op gets executed locally, then replicated
+ String[] updateShardActions = new String[]{UpdateAction.NAME, DeleteAction.NAME + "[r]"};
+ interceptTransportActions(updateShardActions);
+
+ String indexOrAlias = randomIndexOrAlias();
+ client().prepareIndex(indexOrAlias, "type", "id").setSource("field", "value").get();
+ UpdateRequest updateRequest = new UpdateRequest(indexOrAlias, "type", "id").script(new Script("ctx.op='delete'"));
+ UpdateResponse updateResponse = internalCluster().clientNodeClient().update(updateRequest).actionGet();
+ assertThat(updateResponse.isCreated(), equalTo(false));
+
+ clearInterceptedActions();
+ assertSameIndices(updateRequest, updateShardActions);
+ }
+
+ @Test
+ public void testBulk() {
+ String[] bulkShardActions = new String[]{BulkAction.NAME + "[s]", BulkAction.NAME + "[s][r]"};
+ interceptTransportActions(bulkShardActions);
+
+ List<String> indices = new ArrayList<>();
+ BulkRequest bulkRequest = new BulkRequest();
+ int numIndexRequests = iterations(1, 10);
+ for (int i = 0; i < numIndexRequests; i++) {
+ String indexOrAlias = randomIndexOrAlias();
+ bulkRequest.add(new IndexRequest(indexOrAlias, "type", "id").source("field", "value"));
+ indices.add(indexOrAlias);
+ }
+ int numDeleteRequests = iterations(1, 10);
+ for (int i = 0; i < numDeleteRequests; i++) {
+ String indexOrAlias = randomIndexOrAlias();
+ bulkRequest.add(new DeleteRequest(indexOrAlias, "type", "id"));
+ indices.add(indexOrAlias);
+ }
+ int numUpdateRequests = iterations(1, 10);
+ for (int i = 0; i < numUpdateRequests; i++) {
+ String indexOrAlias = randomIndexOrAlias();
+ bulkRequest.add(new UpdateRequest(indexOrAlias, "type", "id").doc("field1", "value1"));
+ indices.add(indexOrAlias);
+ }
+
+ internalCluster().clientNodeClient().bulk(bulkRequest).actionGet();
+
+ clearInterceptedActions();
+ assertIndicesSubset(indices, bulkShardActions);
+ }
+
+ @Test
+ public void testGet() {
+ String getShardAction = GetAction.NAME + "[s]";
+ interceptTransportActions(getShardAction);
+
+ GetRequest getRequest = new GetRequest(randomIndexOrAlias(), "type", "id");
+ internalCluster().clientNodeClient().get(getRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(getRequest, getShardAction);
+ }
+
+ @Test
+ public void testExplain() {
+ String explainShardAction = ExplainAction.NAME + "[s]";
+ interceptTransportActions(explainShardAction);
+
+ ExplainRequest explainRequest = new ExplainRequest(randomIndexOrAlias(), "type", "id").source(new QuerySourceBuilder().setQuery(QueryBuilders.matchAllQuery()));
+ internalCluster().clientNodeClient().explain(explainRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(explainRequest, explainShardAction);
+ }
+
+ @Test
+ public void testTermVector() {
+ String termVectorShardAction = TermVectorsAction.NAME + "[s]";
+ interceptTransportActions(termVectorShardAction);
+
+ TermVectorsRequest termVectorsRequest = new TermVectorsRequest(randomIndexOrAlias(), "type", "id");
+ internalCluster().clientNodeClient().termVectors(termVectorsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(termVectorsRequest, termVectorShardAction);
+ }
+
+ @Test
+ public void testMultiTermVector() {
+ String multiTermVectorsShardAction = MultiTermVectorsAction.NAME + "[shard][s]";
+ interceptTransportActions(multiTermVectorsShardAction);
+
+ List<String> indices = new ArrayList<>();
+ MultiTermVectorsRequest multiTermVectorsRequest = new MultiTermVectorsRequest();
+ int numDocs = iterations(1, 30);
+ for (int i = 0; i < numDocs; i++) {
+ String indexOrAlias = randomIndexOrAlias();
+ multiTermVectorsRequest.add(indexOrAlias, "type", Integer.toString(i));
+ indices.add(indexOrAlias);
+ }
+ internalCluster().clientNodeClient().multiTermVectors(multiTermVectorsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertIndicesSubset(indices, multiTermVectorsShardAction);
+ }
+
+ @Test
+ public void testMultiGet() {
+ String multiGetShardAction = MultiGetAction.NAME + "[shard][s]";
+ interceptTransportActions(multiGetShardAction);
+
+ List<String> indices = new ArrayList<>();
+ MultiGetRequest multiGetRequest = new MultiGetRequest();
+ int numDocs = iterations(1, 30);
+ for (int i = 0; i < numDocs; i++) {
+ String indexOrAlias = randomIndexOrAlias();
+ multiGetRequest.add(indexOrAlias, "type", Integer.toString(i));
+ indices.add(indexOrAlias);
+ }
+ internalCluster().clientNodeClient().multiGet(multiGetRequest).actionGet();
+
+ clearInterceptedActions();
+ assertIndicesSubset(indices, multiGetShardAction);
+ }
+
+ @Test
+ public void testExists() {
+ String existsShardAction = ExistsAction.NAME + "[s]";
+ interceptTransportActions(existsShardAction);
+
+ ExistsRequest existsRequest = new ExistsRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().exists(existsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(existsRequest, existsShardAction);
+ }
+
+ @Test
+ public void testFlush() {
+ String flushShardAction = FlushAction.NAME + "[s]";
+ interceptTransportActions(flushShardAction);
+
+ FlushRequest flushRequest = new FlushRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().flush(flushRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(flushRequest, flushShardAction);
+ }
+
+ @Test
+ public void testOptimize() {
+ String optimizeShardAction = OptimizeAction.NAME + "[s]";
+ interceptTransportActions(optimizeShardAction);
+
+ OptimizeRequest optimizeRequest = new OptimizeRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().optimize(optimizeRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(optimizeRequest, optimizeShardAction);
+ }
+
+ @Test
+ public void testRefresh() {
+ String refreshShardAction = RefreshAction.NAME + "[s]";
+ interceptTransportActions(refreshShardAction);
+
+ RefreshRequest refreshRequest = new RefreshRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().refresh(refreshRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(refreshRequest, refreshShardAction);
+ }
+
+ @Test
+ public void testClearCache() {
+ String clearCacheAction = ClearIndicesCacheAction.NAME + "[s]";
+ interceptTransportActions(clearCacheAction);
+
+ ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().clearCache(clearIndicesCacheRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(clearIndicesCacheRequest, clearCacheAction);
+ }
+
+ @Test
+ public void testRecovery() {
+ String recoveryAction = RecoveryAction.NAME + "[s]";
+ interceptTransportActions(recoveryAction);
+
+ RecoveryRequest recoveryRequest = new RecoveryRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().recoveries(recoveryRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(recoveryRequest, recoveryAction);
+ }
+
+ @Test
+ public void testSegments() {
+ String segmentsAction = IndicesSegmentsAction.NAME + "[s]";
+ interceptTransportActions(segmentsAction);
+
+ IndicesSegmentsRequest segmentsRequest = new IndicesSegmentsRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().segments(segmentsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(segmentsRequest, segmentsAction);
+ }
+
+ @Test
+ public void testIndicesStats() {
+ String indicesStats = IndicesStatsAction.NAME + "[s]";
+ interceptTransportActions(indicesStats);
+
+ IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest().indices(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().stats(indicesStatsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(indicesStatsRequest, indicesStats);
+ }
+
+ @Test
+ public void testSuggest() {
+ String suggestAction = SuggestAction.NAME + "[s]";
+ interceptTransportActions(suggestAction);
+
+ SuggestRequest suggestRequest = new SuggestRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().suggest(suggestRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(suggestRequest, suggestAction);
+ }
+
+ @Test
+ public void testValidateQuery() {
+ String validateQueryShardAction = ValidateQueryAction.NAME + "[s]";
+ interceptTransportActions(validateQueryShardAction);
+
+ ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().validateQuery(validateQueryRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(validateQueryRequest, validateQueryShardAction);
+ }
+
+ @Test
+ public void testPercolate() {
+ String percolateShardAction = PercolateAction.NAME + "[s]";
+ interceptTransportActions(percolateShardAction);
+
+ client().prepareIndex("test-get", "type", "1").setSource("field","value").get();
+
+ PercolateRequest percolateRequest = new PercolateRequest().indices(randomIndicesOrAliases()).documentType("type");
+ if (randomBoolean()) {
+ percolateRequest.getRequest(new GetRequest("test-get", "type", "1"));
+ } else {
+ percolateRequest.source("\"field\":\"value\"");
+ }
+ internalCluster().clientNodeClient().percolate(percolateRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(percolateRequest, percolateShardAction);
+ }
+
+ @Test
+ public void testMultiPercolate() {
+ String multiPercolateShardAction = MultiPercolateAction.NAME + "[shard][s]";
+ interceptTransportActions(multiPercolateShardAction);
+
+ client().prepareIndex("test-get", "type", "1").setSource("field", "value").get();
+
+ MultiPercolateRequest multiPercolateRequest = new MultiPercolateRequest();
+ List<String> indices = new ArrayList<>();
+ int numRequests = iterations(1, 30);
+ for (int i = 0; i < numRequests; i++) {
+ String[] indicesOrAliases = randomIndicesOrAliases();
+ Collections.addAll(indices, indicesOrAliases);
+ PercolateRequest percolateRequest = new PercolateRequest().indices(indicesOrAliases).documentType("type");
+ if (randomBoolean()) {
+ percolateRequest.getRequest(new GetRequest("test-get", "type", "1"));
+ } else {
+ percolateRequest.source("\"field\":\"value\"");
+ }
+ multiPercolateRequest.add(percolateRequest);
+ }
+
+ internalCluster().clientNodeClient().multiPercolate(multiPercolateRequest).actionGet();
+
+ clearInterceptedActions();
+ assertIndicesSubset(indices, multiPercolateShardAction);
+ }
+
+ @Test
+ public void testOpenIndex() {
+ interceptTransportActions(OpenIndexAction.NAME);
+
+ OpenIndexRequest openIndexRequest = new OpenIndexRequest(randomUniqueIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().open(openIndexRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(openIndexRequest, OpenIndexAction.NAME);
+ }
+
+ @Test
+ public void testCloseIndex() {
+ interceptTransportActions(CloseIndexAction.NAME);
+
+ CloseIndexRequest closeIndexRequest = new CloseIndexRequest(randomUniqueIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().close(closeIndexRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(closeIndexRequest, CloseIndexAction.NAME);
+ }
+
+ @Test
+ public void testDeleteIndex() {
+ interceptTransportActions(DeleteIndexAction.NAME);
+
+ String[] randomIndicesOrAliases = randomUniqueIndicesOrAliases();
+ DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(randomIndicesOrAliases);
+ assertAcked(internalCluster().clientNodeClient().admin().indices().delete(deleteIndexRequest).actionGet());
+
+ clearInterceptedActions();
+ assertSameIndices(deleteIndexRequest, DeleteIndexAction.NAME);
+ }
+
+ @Test
+ public void testGetMappings() {
+ interceptTransportActions(GetMappingsAction.NAME);
+
+ GetMappingsRequest getMappingsRequest = new GetMappingsRequest().indices(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().getMappings(getMappingsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(getMappingsRequest, GetMappingsAction.NAME);
+ }
+
+ @Test
+ public void testPutMapping() {
+ interceptTransportActions(PutMappingAction.NAME);
+
+ PutMappingRequest putMappingRequest = new PutMappingRequest(randomUniqueIndicesOrAliases()).type("type").source("field", "type=string");
+ internalCluster().clientNodeClient().admin().indices().putMapping(putMappingRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(putMappingRequest, PutMappingAction.NAME);
+ }
+
+ @Test
+ public void testGetSettings() {
+ interceptTransportActions(GetSettingsAction.NAME);
+
+ GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(randomIndicesOrAliases());
+ internalCluster().clientNodeClient().admin().indices().getSettings(getSettingsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(getSettingsRequest, GetSettingsAction.NAME);
+ }
+
+ @Test
+ public void testUpdateSettings() {
+ interceptTransportActions(UpdateSettingsAction.NAME);
+
+ UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(randomIndicesOrAliases()).settings(Settings.builder().put("refresh_interval", -1));
+ internalCluster().clientNodeClient().admin().indices().updateSettings(updateSettingsRequest).actionGet();
+
+ clearInterceptedActions();
+ assertSameIndices(updateSettingsRequest, UpdateSettingsAction.NAME);
+ }
+
+ @Test
+ public void testSearchQueryThenFetch() throws Exception {
+ interceptTransportActions(SearchServiceTransportAction.QUERY_ACTION_NAME,
+ SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+
+ String[] randomIndicesOrAliases = randomIndicesOrAliases();
+ for (int i = 0; i < randomIndicesOrAliases.length; i++) {
+ client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get();
+ }
+ refresh();
+
+ SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.QUERY_THEN_FETCH);
+ SearchResponse searchResponse = internalCluster().clientNodeClient().search(searchRequest).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0l));
+
+ clearInterceptedActions();
+ assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_ACTION_NAME, SearchServiceTransportAction.FETCH_ID_ACTION_NAME);
+ //free context messages are not necessarily sent, but if they are, check their indices
+ assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ }
+
+ @Test
+ public void testSearchDfsQueryThenFetch() throws Exception {
+ interceptTransportActions(SearchServiceTransportAction.DFS_ACTION_NAME, SearchServiceTransportAction.QUERY_ID_ACTION_NAME,
+ SearchServiceTransportAction.FETCH_ID_ACTION_NAME, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+
+ String[] randomIndicesOrAliases = randomIndicesOrAliases();
+ for (int i = 0; i < randomIndicesOrAliases.length; i++) {
+ client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get();
+ }
+ refresh();
+
+ SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.DFS_QUERY_THEN_FETCH);
+ SearchResponse searchResponse = internalCluster().clientNodeClient().search(searchRequest).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0l));
+
+ clearInterceptedActions();
+ assertSameIndices(searchRequest, SearchServiceTransportAction.DFS_ACTION_NAME, SearchServiceTransportAction.QUERY_ID_ACTION_NAME,
+ SearchServiceTransportAction.FETCH_ID_ACTION_NAME);
+ //free context messages are not necessarily sent, but if they are, check their indices
+ assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ }
+
+ @Test
+ public void testSearchQueryAndFetch() throws Exception {
+ interceptTransportActions(SearchServiceTransportAction.QUERY_FETCH_ACTION_NAME,
+ SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+
+ String[] randomIndicesOrAliases = randomIndicesOrAliases();
+ for (int i = 0; i < randomIndicesOrAliases.length; i++) {
+ client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get();
+ }
+ refresh();
+
+ SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.QUERY_AND_FETCH);
+ SearchResponse searchResponse = internalCluster().clientNodeClient().search(searchRequest).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0l));
+
+ clearInterceptedActions();
+ assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_FETCH_ACTION_NAME);
+ //free context messages are not necessarily sent, but if they are, check their indices
+ assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ }
+
+ @Test
+ public void testSearchDfsQueryAndFetch() throws Exception {
+ interceptTransportActions(SearchServiceTransportAction.QUERY_QUERY_FETCH_ACTION_NAME,
+ SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+
+ String[] randomIndicesOrAliases = randomIndicesOrAliases();
+ for (int i = 0; i < randomIndicesOrAliases.length; i++) {
+ client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get();
+ }
+ refresh();
+
+ SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.DFS_QUERY_AND_FETCH);
+ SearchResponse searchResponse = internalCluster().clientNodeClient().search(searchRequest).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0l));
+
+ clearInterceptedActions();
+ assertSameIndices(searchRequest, SearchServiceTransportAction.QUERY_QUERY_FETCH_ACTION_NAME);
+ //free context messages are not necessarily sent, but if they are, check their indices
+ assertSameIndicesOptionalRequests(searchRequest, SearchServiceTransportAction.FREE_CONTEXT_ACTION_NAME);
+ }
+
+ @Test
+ public void testSearchScan() throws Exception {
+ interceptTransportActions(SearchServiceTransportAction.SCAN_ACTION_NAME);
+
+ String[] randomIndicesOrAliases = randomIndicesOrAliases();
+ for (int i = 0; i < randomIndicesOrAliases.length; i++) {
+ client().prepareIndex(randomIndicesOrAliases[i], "type", "id-" + i).setSource("field", "value").get();
+ }
+ refresh();
+
+ SearchRequest searchRequest = new SearchRequest(randomIndicesOrAliases).searchType(SearchType.SCAN).scroll(new TimeValue(500));
+ SearchResponse searchResponse = internalCluster().clientNodeClient().search(searchRequest).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(0l));
+
+ client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get();
+
+ clearInterceptedActions();
+ assertSameIndices(searchRequest, SearchServiceTransportAction.SCAN_ACTION_NAME);
+ }
+
+ private static void assertSameIndices(IndicesRequest originalRequest, String... actions) {
+ assertSameIndices(originalRequest, false, actions);
+ }
+
+ private static void assertSameIndicesOptionalRequests(IndicesRequest originalRequest, String... actions) {
+ assertSameIndices(originalRequest, true, actions);
+ }
+
+ private static void assertSameIndices(IndicesRequest originalRequest, boolean optional, String... actions) {
+ for (String action : actions) {
+ List<TransportRequest> requests = consumeTransportRequests(action);
+ if (!optional) {
+ assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0));
+ }
+ for (TransportRequest internalRequest : requests) {
+ assertThat(internalRequest, instanceOf(IndicesRequest.class));
+ assertThat(internalRequest.getClass().getName(), ((IndicesRequest)internalRequest).indices(), equalTo(originalRequest.indices()));
+ assertThat(((IndicesRequest)internalRequest).indicesOptions(), equalTo(originalRequest.indicesOptions()));
+ }
+ }
+ }
+
+ private static void assertSameIndicesOptionalRequests(String[] indices, String... actions) {
+ assertSameIndices(indices, true, actions);
+ }
+
+ private static void assertSameIndices(String[] indices, boolean optional, String... actions) {
+ for (String action : actions) {
+ List<TransportRequest> requests = consumeTransportRequests(action);
+ if (!optional) {
+ assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0));
+ }
+ for (TransportRequest internalRequest : requests) {
+ assertThat(internalRequest, instanceOf(IndicesRequest.class));
+ assertThat(internalRequest.getClass().getName(), ((IndicesRequest)internalRequest).indices(), equalTo(indices));
+ }
+ }
+ }
+
+ private static void assertIndicesSubset(List<String> indices, String... actions) {
+ //indices returned by each bulk shard request need to be a subset of the original indices
+ for (String action : actions) {
+ List<TransportRequest> requests = consumeTransportRequests(action);
+ assertThat("no internal requests intercepted for action [" + action + "]", requests.size(), greaterThan(0));
+ for (TransportRequest internalRequest : requests) {
+ assertThat(internalRequest, instanceOf(IndicesRequest.class));
+ for (String index : ((IndicesRequest) internalRequest).indices()) {
+ assertThat(indices, hasItem(index));
+ }
+ }
+ }
+ }
+
+ private String randomIndexOrAlias() {
+ String index = randomFrom(indices);
+ if (randomBoolean()) {
+ return index + "-alias";
+ } else {
+ return index;
+ }
+ }
+
+ private String[] randomIndicesOrAliases() {
+ int count = randomIntBetween(1, indices.size() * 2); //every index has an alias
+ String[] indices = new String[count];
+ for (int i = 0; i < count; i++) {
+ indices[i] = randomIndexOrAlias();
+ }
+ return indices;
+ }
+
+ private String[] randomUniqueIndicesOrAliases() {
+ Set<String> uniqueIndices = new HashSet<>();
+ int count = randomIntBetween(1, this.indices.size());
+ while (uniqueIndices.size() < count) {
+ uniqueIndices.add(randomFrom(this.indices));
+ }
+ String[] indices = new String[count];
+ int i = 0;
+ for (String index : uniqueIndices) {
+ indices[i++] = randomBoolean() ? index + "-alias" : index;
+ }
+ return indices;
+ }
+
+ private static void assertAllRequestsHaveBeenConsumed() {
+ Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
+ for (TransportService transportService : transportServices) {
+ assertThat(((InterceptingTransportService)transportService).requests.entrySet(), emptyIterable());
+ }
+ }
+
+ private static void clearInterceptedActions() {
+ Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
+ for (TransportService transportService : transportServices) {
+ ((InterceptingTransportService) transportService).clearInterceptedActions();
+ }
+ }
+
+ private static void interceptTransportActions(String... actions) {
+ Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
+ for (TransportService transportService : transportServices) {
+ ((InterceptingTransportService) transportService).interceptTransportActions(actions);
+ }
+ }
+
+ private static List<TransportRequest> consumeTransportRequests(String action) {
+ List<TransportRequest> requests = new ArrayList<>();
+ Iterable<TransportService> transportServices = internalCluster().getInstances(TransportService.class);
+ for (TransportService transportService : transportServices) {
+ List<TransportRequest> transportRequests = ((InterceptingTransportService) transportService).consumeRequests(action);
+ if (transportRequests != null) {
+ requests.addAll(transportRequests);
+ }
+ }
+ return requests;
+ }
+
+ public static class InterceptingTransportService extends TransportService {
+
+ private final Set<String> actions = new HashSet<>();
+
+ private final Map<String, List<TransportRequest>> requests = new HashMap<>();
+
+ @Inject
+ public InterceptingTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
+ super(settings, transport, threadPool);
+ }
+
+ synchronized List<TransportRequest> consumeRequests(String action) {
+ return requests.remove(action);
+ }
+
+ synchronized void interceptTransportActions(String... actions) {
+ Collections.addAll(this.actions, actions);
+ }
+
+ synchronized void clearInterceptedActions() {
+ actions.clear();
+ }
+
+ @Override
+ public <Request extends TransportRequest> void registerRequestHandler(String action, Class<Request> request, String executor, boolean forceExecution, TransportRequestHandler<Request> handler) {
+ super.registerRequestHandler(action, request, executor, forceExecution, new InterceptingRequestHandler(action, handler));
+ }
+
+ private class InterceptingRequestHandler implements TransportRequestHandler {
+
+ private final TransportRequestHandler requestHandler;
+ private final String action;
+
+ InterceptingRequestHandler(String action, TransportRequestHandler requestHandler) {
+ this.requestHandler = requestHandler;
+ this.action = action;
+ }
+
+ @Override
+ public void messageReceived(TransportRequest request, TransportChannel channel) throws Exception {
+ synchronized (InterceptingTransportService.this) {
+ if (actions.contains(action)) {
+ List<TransportRequest> requestList = requests.get(action);
+ if (requestList == null) {
+ requestList = new ArrayList<>();
+ requestList.add(request);
+ requests.put(action, requestList);
+ } else {
+ requestList.add(request);
+ }
+ }
+ }
+ requestHandler.messageReceived(request, channel);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/ListenerActionTests.java b/core/src/test/java/org/elasticsearch/action/ListenerActionTests.java
new file mode 100644
index 0000000000..50cde99fcd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/ListenerActionTests.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ */
+public class ListenerActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void verifyThreadedListeners() throws Throwable {
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference<Throwable> failure = new AtomicReference<>();
+ final AtomicReference<String> threadName = new AtomicReference<>();
+ Client client = client();
+
+ IndexRequest request = new IndexRequest("test", "type", "1");
+ if (randomBoolean()) {
+ // set the source, without it, we will have a verification failure
+ request.source("field1", "value1");
+ }
+
+ client.index(request, new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse indexResponse) {
+ threadName.set(Thread.currentThread().getName());
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ threadName.set(Thread.currentThread().getName());
+ failure.set(e);
+ latch.countDown();
+ }
+ });
+
+ latch.await();
+
+ boolean shouldBeThreaded = DiscoveryNode.clientNode(client.settings()) || TransportClient.CLIENT_TYPE.equals(client.settings().get(Client.CLIENT_TYPE_SETTING));
+ if (shouldBeThreaded) {
+ assertTrue(threadName.get().contains("listener"));
+ } else {
+ assertFalse(threadName.get().contains("listener"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java b/core/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java
new file mode 100644
index 0000000000..83411fcd40
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class OriginalIndicesTests extends ElasticsearchTestCase {
+
+ private static final IndicesOptions[] indicesOptionsValues = new IndicesOptions[]{
+ IndicesOptions.lenientExpandOpen() , IndicesOptions.strictExpand(), IndicesOptions.strictExpandOpen(),
+ IndicesOptions.strictExpandOpenAndForbidClosed(), IndicesOptions.strictSingleIndexNoExpandForbidClosed()};
+
+ @Test
+ public void testOriginalIndicesSerialization() throws IOException {
+ int iterations = iterations(10, 30);
+ for (int i = 0; i < iterations; i++) {
+ OriginalIndices originalIndices = randomOriginalIndices();
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(randomVersion(random()));
+ OriginalIndices.writeOriginalIndices(originalIndices, out);
+
+ StreamInput in = StreamInput.wrap(out.bytes());
+ in.setVersion(out.getVersion());
+ OriginalIndices originalIndices2 = OriginalIndices.readOriginalIndices(in);
+
+ assertThat(originalIndices2.indices(), equalTo(originalIndices.indices()));
+ assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions()));
+ }
+ }
+
+ private static OriginalIndices randomOriginalIndices() {
+ int numIndices = randomInt(10);
+ String[] indices = new String[numIndices];
+ for (int j = 0; j < indices.length; j++) {
+ indices[j] = randomAsciiOfLength(randomIntBetween(1, 10));
+ }
+ IndicesOptions indicesOptions = randomFrom(indicesOptionsValues);
+ return new OriginalIndices(indices, indicesOptions);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/RejectionActionTests.java b/core/src/test/java/org/elasticsearch/action/RejectionActionTests.java
new file mode 100644
index 0000000000..273caa5229
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/RejectionActionTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Locale;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 2)
+public class RejectionActionTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("threadpool.search.size", 1)
+ .put("threadpool.search.queue_size", 1)
+ .put("threadpool.index.size", 1)
+ .put("threadpool.index.queue_size", 1)
+ .put("threadpool.get.size", 1)
+ .put("threadpool.get.queue_size", 1)
+ .build();
+ }
+
+
+ @Test
+ public void simulateSearchRejectionLoad() throws Throwable {
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "1").get();
+ }
+
+ int numberOfAsyncOps = randomIntBetween(200, 700);
+ final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps);
+ final CopyOnWriteArrayList<Object> responses = Lists.newCopyOnWriteArrayList();
+ for (int i = 0; i < numberOfAsyncOps; i++) {
+ client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field", "1"))
+ .execute(new ActionListener<SearchResponse>() {
+ @Override
+ public void onResponse(SearchResponse searchResponse) {
+ responses.add(searchResponse);
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ responses.add(e);
+ latch.countDown();
+ }
+ });
+ }
+ latch.await();
+ assertThat(responses.size(), equalTo(numberOfAsyncOps));
+
+ // validate all responses
+ for (Object response : responses) {
+ if (response instanceof SearchResponse) {
+ SearchResponse searchResponse = (SearchResponse) response;
+ for (ShardSearchFailure failure : searchResponse.getShardFailures()) {
+ assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected"));
+ }
+ } else {
+ Throwable t = (Throwable) response;
+ Throwable unwrap = ExceptionsHelper.unwrapCause(t);
+ if (unwrap instanceof SearchPhaseExecutionException) {
+ SearchPhaseExecutionException e = (SearchPhaseExecutionException) unwrap;
+ for (ShardSearchFailure failure : e.shardFailures()) {
+ assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected"));
+ }
+ } else if ((unwrap instanceof EsRejectedExecutionException) == false) {
+ throw new AssertionError("unexpected failure", (Throwable) response);
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java b/core/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java
new file mode 100644
index 0000000000..4b3ad48749
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/HotThreadsTest.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ */
+@Slow
+public class HotThreadsTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testHotThreadsDontFail() throws ExecutionException, InterruptedException {
+ /**
+ * This test just checks if nothing crashes or gets stuck etc.
+ */
+ createIndex("test");
+ final int iters = scaledRandomIntBetween(2, 20);
+ final AtomicBoolean hasErrors = new AtomicBoolean(false);
+ for (int i = 0; i < iters; i++) {
+ final String type;
+ NodesHotThreadsRequestBuilder nodesHotThreadsRequestBuilder = client().admin().cluster().prepareNodesHotThreads();
+ if (randomBoolean()) {
+ TimeValue timeValue = new TimeValue(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(20, 500));
+ nodesHotThreadsRequestBuilder.setInterval(timeValue);
+ }
+ if (randomBoolean()) {
+ nodesHotThreadsRequestBuilder.setThreads(rarely() ? randomIntBetween(500, 5000) : randomIntBetween(1, 500));
+ }
+ nodesHotThreadsRequestBuilder.setIgnoreIdleThreads(randomBoolean());
+ if (randomBoolean()) {
+ switch (randomIntBetween(0, 2)) {
+ case 2:
+ type = "cpu";
+ break;
+ case 1:
+ type = "wait";
+ break;
+ default:
+ type = "block";
+ break;
+ }
+ assertThat(type, notNullValue());
+ nodesHotThreadsRequestBuilder.setType(type);
+ } else {
+ type = null;
+ }
+ final CountDownLatch latch = new CountDownLatch(1);
+ nodesHotThreadsRequestBuilder.execute(new ActionListener<NodesHotThreadsResponse>() {
+ @Override
+ public void onResponse(NodesHotThreadsResponse nodeHotThreads) {
+ boolean success = false;
+ try {
+ assertThat(nodeHotThreads, notNullValue());
+ Map<String, NodeHotThreads> nodesMap = nodeHotThreads.getNodesMap();
+ assertThat(nodesMap.size(), equalTo(cluster().size()));
+ for (NodeHotThreads ht : nodeHotThreads) {
+ assertNotNull(ht.getHotThreads());
+ //logger.info(ht.getHotThreads());
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ hasErrors.set(true);
+ }
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ logger.error("FAILED", e);
+ hasErrors.set(true);
+ latch.countDown();
+ fail();
+ }
+ });
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+ ensureSearchable();
+ while(latch.getCount() > 0) {
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(
+ andQuery(
+ matchAllQuery(),
+ notQuery(andQuery(termQuery("field1", "value1"),
+ termQuery("field1", "value2"))))).get(),
+ 3l);
+ }
+ latch.await();
+ assertThat(hasErrors.get(), is(false));
+ }
+ }
+
+ public void testIgnoreIdleThreads() throws ExecutionException, InterruptedException {
+
+ // First time, don't ignore idle threads:
+ NodesHotThreadsRequestBuilder builder = client().admin().cluster().prepareNodesHotThreads();
+ builder.setIgnoreIdleThreads(false);
+ builder.setThreads(Integer.MAX_VALUE);
+ NodesHotThreadsResponse response = builder.execute().get();
+
+ int totSizeAll = 0;
+ for (NodeHotThreads node : response.getNodesMap().values()) {
+ totSizeAll += node.getHotThreads().length();
+ }
+
+ // Second time, do ignore idle threads:
+ builder = client().admin().cluster().prepareNodesHotThreads();
+ builder.setThreads(Integer.MAX_VALUE);
+
+ // Make sure default is true:
+ assertEquals(true, builder.request().ignoreIdleThreads());
+ response = builder.execute().get();
+
+ int totSizeIgnoreIdle = 0;
+ for (NodeHotThreads node : response.getNodesMap().values()) {
+ totSizeIgnoreIdle += node.getHotThreads().length();
+ }
+
+ // The filtered stacks should be smaller than unfiltered ones:
+ assertThat(totSizeIgnoreIdle, lessThan(totSizeAll));
+ }
+
+ public void testTimestampAndParams() throws ExecutionException, InterruptedException {
+
+ NodesHotThreadsResponse response = client().admin().cluster().prepareNodesHotThreads().execute().get();
+
+ for (NodeHotThreads node : response.getNodesMap().values()) {
+ String result = node.getHotThreads();
+ assertTrue(result.indexOf("Hot threads at") != -1);
+ assertTrue(result.indexOf("interval=500ms") != -1);
+ assertTrue(result.indexOf("busiestThreads=3") != -1);
+ assertTrue(result.indexOf("ignoreIdleThreads=true") != -1);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java
new file mode 100644
index 0000000000..0fa72040da
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.repositories;
+
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+/**
+ * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only.
+ *
+ * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only".
+ */
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class RepositoryBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPutRepositoryWithBlocks() {
+ logger.info("--> registering a repository is blocked when the cluster is read only");
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().cluster().preparePutRepository("test-repo-blocks")
+ .setType("fs")
+ .setVerify(false)
+ .setSettings(Settings.settingsBuilder().put("location", randomRepoPath())), MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } finally {
+ setClusterReadOnly(false);
+ }
+
+ logger.info("--> registering a repository is allowed when the cluster is not read only");
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
+ .setType("fs")
+ .setVerify(false)
+ .setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+ }
+
+ @Test
+ public void testVerifyRepositoryWithBlocks() {
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
+ .setType("fs")
+ .setVerify(false)
+ .setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+
+ // This test checks that the Get Repository operation is never blocked, even if the cluster is read only.
+ try {
+ setClusterReadOnly(true);
+ VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet();
+ assertThat(response.getNodes().length, equalTo(cluster().numDataAndMasterNodes()));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+
+ @Test
+ public void testDeleteRepositoryWithBlocks() {
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
+ .setType("fs")
+ .setVerify(false)
+ .setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+
+ logger.info("--> deleting a repository is blocked when the cluster is read only");
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().cluster().prepareDeleteRepository("test-repo-blocks"), MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } finally {
+ setClusterReadOnly(false);
+ }
+
+ logger.info("--> deleting a repository is allowed when the cluster is not read only");
+ assertAcked(client().admin().cluster().prepareDeleteRepository("test-repo-blocks"));
+ }
+
+ @Test
+ public void testGetRepositoryWithBlocks() {
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks")
+ .setType("fs")
+ .setVerify(false)
+ .setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+
+ // This test checks that the Get Repository operation is never blocked, even if the cluster is read only.
+ try {
+ setClusterReadOnly(true);
+ GetRepositoriesResponse response = client().admin().cluster().prepareGetRepositories("test-repo-blocks").execute().actionGet();
+ assertThat(response.repositories(), hasSize(1));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java
new file mode 100644
index 0000000000..5dab53c6bd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.cluster.snapshots;
+
+import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+/**
+ * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only.
+ *
+ * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only".
+ */
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class SnapshotBlocksTests extends ElasticsearchIntegrationTest {
+
+ protected static final String INDEX_NAME = "test-blocks-1";
+ protected static final String OTHER_INDEX_NAME = "test-blocks-2";
+ protected static final String COMMON_INDEX_NAME_MASK = "test-blocks-*";
+ protected static final String REPOSITORY_NAME = "repo-" + INDEX_NAME;
+ protected static final String SNAPSHOT_NAME = "snapshot-0";
+
+ @Before
+ protected void setUpRepository() throws Exception {
+ createIndex(INDEX_NAME, OTHER_INDEX_NAME);
+
+ int docs = between(10, 100);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex(INDEX_NAME, "type").setSource("test", "init").execute().actionGet();
+ }
+ docs = between(10, 100);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex(OTHER_INDEX_NAME, "type").setSource("test", "init").execute().actionGet();
+ }
+
+
+ logger.info("--> register a repository");
+ assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME)
+ .setType("fs")
+ .setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+
+ logger.info("--> verify the repository");
+ VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get();
+ assertThat(verifyResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes()));
+
+ logger.info("--> create a snapshot");
+ CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME)
+ .setIncludeGlobalState(true)
+ .setWaitForCompletion(true)
+ .execute().actionGet();
+ assertThat(snapshotResponse.status(), equalTo(RestStatus.OK));
+ ensureSearchable();
+ }
+
+ @Test
+ public void testCreateSnapshotWithBlocks() {
+ logger.info("--> creating a snapshot is blocked when the cluster is read only");
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1"), MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } finally {
+ setClusterReadOnly(false);
+ }
+
+ logger.info("--> creating a snapshot is allowed when the cluster is not read only");
+ CreateSnapshotResponse response = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1")
+ .setWaitForCompletion(true)
+ .execute().actionGet();
+ assertThat(response.status(), equalTo(RestStatus.OK));
+ }
+
+ @Test
+ public void testCreateSnapshotWithIndexBlocks() {
+ logger.info("--> creating a snapshot is not blocked when an index is read only");
+ try {
+ enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY);
+ assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK));
+ } finally {
+ disableIndexBlock(INDEX_NAME, SETTING_READ_ONLY);
+ }
+
+ logger.info("--> creating a snapshot is blocked when an index is blocked for reads");
+ try {
+ enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ);
+ assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK);
+ logger.info("--> creating a snapshot is not blocked when an read-blocked index is not part of the snapshot");
+ assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK));
+ } finally {
+ disableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ);
+ }
+ }
+
+ @Test
+ public void testDeleteSnapshotWithBlocks() {
+ logger.info("--> deleting a snapshot is blocked when the cluster is read only");
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().cluster().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } finally {
+ setClusterReadOnly(false);
+ }
+
+ logger.info("--> deleting a snapshot is allowed when the cluster is not read only");
+ DeleteSnapshotResponse response = client().admin().cluster().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME).execute().actionGet();
+ assertThat(response.isAcknowledged(), equalTo(true));
+ }
+
+ @Test
+ public void testRestoreSnapshotWithBlocks() {
+ assertAcked(client().admin().indices().prepareDelete(INDEX_NAME, OTHER_INDEX_NAME));
+ assertFalse(client().admin().indices().prepareExists(INDEX_NAME, OTHER_INDEX_NAME).get().isExists());
+
+ logger.info("--> restoring a snapshot is blocked when the cluster is read only");
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } finally {
+ setClusterReadOnly(false);
+ }
+
+ logger.info("--> creating a snapshot is allowed when the cluster is not read only");
+ RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME)
+ .setWaitForCompletion(true)
+ .execute().actionGet();
+ assertThat(response.status(), equalTo(RestStatus.OK));
+ assertTrue(client().admin().indices().prepareExists(INDEX_NAME).get().isExists());
+ assertTrue(client().admin().indices().prepareExists(OTHER_INDEX_NAME).get().isExists());
+ }
+
+ @Test
+ public void testGetSnapshotWithBlocks() {
+ // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only.
+ try {
+ setClusterReadOnly(true);
+ GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots(REPOSITORY_NAME).execute().actionGet();
+ assertThat(response.getSnapshots(), hasSize(1));
+ assertThat(response.getSnapshots().get(0).name(), equalTo(SNAPSHOT_NAME));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+
+ @Test
+ public void testSnapshotStatusWithBlocks() {
+ // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only.
+ try {
+ setClusterReadOnly(true);
+ SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(REPOSITORY_NAME)
+ .setSnapshots(SNAPSHOT_NAME)
+ .execute().actionGet();
+ assertThat(response.getSnapshots(), hasSize(1));
+ assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTest.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTest.java
new file mode 100644
index 0000000000..dd66049adf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.state;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ * Unit tests for the {@link ClusterStateRequest}.
+ */
+public class ClusterStateRequestTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testSerialization() throws Exception {
+ int iterations = randomIntBetween(5, 20);
+ for (int i = 0; i < iterations; i++) {
+
+ IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean());
+ ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(randomBoolean()).metaData(randomBoolean())
+ .nodes(randomBoolean()).blocks(randomBoolean()).indices("testindex", "testindex2").indicesOptions(indicesOptions);
+
+ Version testVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT);
+ BytesStreamOutput output = new BytesStreamOutput();
+ output.setVersion(testVersion);
+ clusterStateRequest.writeTo(output);
+
+ StreamInput streamInput = StreamInput.wrap(output.bytes());
+ streamInput.setVersion(testVersion);
+ ClusterStateRequest deserializedCSRequest = new ClusterStateRequest();
+ deserializedCSRequest.readFrom(streamInput);
+
+ assertThat(deserializedCSRequest.routingTable(), equalTo(clusterStateRequest.routingTable()));
+ assertThat(deserializedCSRequest.metaData(), equalTo(clusterStateRequest.metaData()));
+ assertThat(deserializedCSRequest.nodes(), equalTo(clusterStateRequest.nodes()));
+ assertThat(deserializedCSRequest.blocks(), equalTo(clusterStateRequest.blocks()));
+ assertThat(deserializedCSRequest.indices(), equalTo(clusterStateRequest.indices()));
+
+ if (testVersion.onOrAfter(Version.V_1_5_0)) {
+ assertOptionsMatch(deserializedCSRequest.indicesOptions(), clusterStateRequest.indicesOptions());
+ } else {
+ // versions before V_1_5_0 use IndicesOptions.lenientExpandOpen()
+ assertOptionsMatch(deserializedCSRequest.indicesOptions(), IndicesOptions.lenientExpandOpen());
+ }
+ }
+ }
+
+ private static void assertOptionsMatch(IndicesOptions in, IndicesOptions out) {
+ assertThat(in.ignoreUnavailable(), equalTo(out.ignoreUnavailable()));
+ assertThat(in.expandWildcardsClosed(), equalTo(out.expandWildcardsClosed()));
+ assertThat(in.expandWildcardsOpen(), equalTo(out.expandWildcardsOpen()));
+ assertThat(in.allowNoIndices(), equalTo(out.allowNoIndices()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java
new file mode 100644
index 0000000000..9945eceaa5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsTests.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.stats;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.monitor.sigar.SigarService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.is;
+
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1, numClientNodes = 0)
+public class ClusterStatsTests extends ElasticsearchIntegrationTest {
+
+ private void assertCounts(ClusterStatsNodes.Counts counts, int total, int masterOnly, int dataOnly, int masterData, int client) {
+ assertThat(counts.getTotal(), Matchers.equalTo(total));
+ assertThat(counts.getMasterOnly(), Matchers.equalTo(masterOnly));
+ assertThat(counts.getDataOnly(), Matchers.equalTo(dataOnly));
+ assertThat(counts.getMasterData(), Matchers.equalTo(masterData));
+ assertThat(counts.getClient(), Matchers.equalTo(client));
+ }
+
+ private void waitForNodes(int numNodes) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForEvents(Priority.LANGUID).waitForNodes(Integer.toString(numNodes))).actionGet();
+ assertThat(actionGet.isTimedOut(), is(false));
+ }
+
+ @Test
+ public void testNodeCounts() {
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 1, 0, 0, 1, 0);
+
+ internalCluster().startNode(Settings.builder().put("node.data", false));
+ waitForNodes(2);
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 2, 1, 0, 1, 0);
+
+ internalCluster().startNode(Settings.builder().put("node.master", false));
+ waitForNodes(3);
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 3, 1, 1, 1, 0);
+
+ internalCluster().startNode(Settings.builder().put("node.client", true));
+ waitForNodes(4);
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertCounts(response.getNodesStats().getCounts(), 4, 1, 1, 1, 1);
+ }
+
+
+ private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, int total, int primaries, double replicationFactor) {
+ assertThat(stats.getIndices(), Matchers.equalTo(indices));
+ assertThat(stats.getTotal(), Matchers.equalTo(total));
+ assertThat(stats.getPrimaries(), Matchers.equalTo(primaries));
+ assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor));
+ }
+
+ @Test
+ public void testIndicesShardStats() {
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+
+
+ prepareCreate("test1").setSettings("number_of_shards", 2, "number_of_replicas", 1).get();
+ ensureYellow();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l));
+ assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
+ assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
+
+ // add another node, replicas should get assigned
+ internalCluster().startNode();
+ ensureGreen();
+ index("test1", "type", "1", "f", "f");
+ refresh(); // make the doc visible
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+ assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l));
+ assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
+
+ prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
+ ensureGreen();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
+ assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2));
+ assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5);
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3));
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4));
+
+ assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5));
+ assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0));
+ assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0));
+
+ }
+
+ @Test
+ public void testValuesSmokeScreen() throws IOException {
+ internalCluster().ensureAtMostNumDataNodes(5);
+ internalCluster().ensureAtLeastNumDataNodes(1);
+ SigarService sigarService = internalCluster().getInstance(SigarService.class);
+ assertAcked(prepareCreate("test1").setSettings(settingsBuilder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0).build()));
+ index("test1", "type", "1", "f", "f");
+ /*
+ * Ensure at least one shard is allocated otherwise the FS stats might
+ * return 0. This happens if the File#getTotalSpace() and friends is called
+ * on a directory that doesn't exist or has not yet been created.
+ */
+ ensureYellow("test1");
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ String msg = response.toString();
+ assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000l)); // 1 Jan 2000
+ assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l));
+
+ assertThat(msg, response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l));
+ assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
+ if (sigarService.sigarAvailable()) {
+ // We only get those if we have sigar
+ assertThat(msg, response.nodesStats.getOs().getAvailableProcessors(), Matchers.greaterThan(0));
+ assertThat(msg, response.nodesStats.getOs().getAvailableMemory().bytes(), Matchers.greaterThan(0l));
+ assertThat(msg, response.nodesStats.getOs().getCpus().size(), Matchers.greaterThan(0));
+ }
+ assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0));
+ assertThat(msg, response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true));
+ assertThat(msg, response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0));
+
+ assertThat(msg, response.nodesStats.getProcess().count, Matchers.greaterThan(0));
+ // 0 happens when not supported on platform
+ assertThat(msg, response.nodesStats.getProcess().getAvgOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(0L));
+ // these can be -1 if not supported on platform
+ assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+ assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java
new file mode 100644
index 0000000000..004f2c85da
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.tasks;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class PendingTasksBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPendingTasksWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ // This test checks that the Pending Cluster Tasks operation is never blocked, even if an index is read only or whatever.
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertNotNull(response.getPendingTasks());
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ try {
+ setClusterReadOnly(true);
+ PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertNotNull(response.getPendingTasks());
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java
new file mode 100644
index 0000000000..b93d7a2f72
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.cache.clear;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testClearIndicesCacheWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ NumShards numShards = getNumShards("test");
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true).execute().actionGet();
+ assertNoFailures(clearIndicesCacheResponse);
+ assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+ // Request is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTest.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTest.java
new file mode 100644
index 0000000000..31576c38d0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import org.elasticsearch.action.index.IndexRequestBuilderTest;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.rest.NoOpClient;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class CreateIndexRequestBuilderTest extends ElasticsearchTestCase {
+
+ private static final String KEY = "my.settings.key";
+ private static final String VALUE = "my.settings.value";
+ private NoOpClient testClient;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ this.testClient = new NoOpClient(getTestName());
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ this.testClient.close();
+ super.tearDown();
+ }
+
+ /**
+ * test setting the source with available setters
+ */
+ @Test
+ public void testSetSource() throws IOException {
+ CreateIndexRequestBuilder builder = new CreateIndexRequestBuilder(this.testClient, CreateIndexAction.INSTANCE);
+ builder.setSource("{\""+KEY+"\" : \""+VALUE+"\"}");
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ XContentBuilder xContent = XContentFactory.jsonBuilder().startObject().field(KEY, VALUE).endObject();
+ xContent.close();
+ builder.setSource(xContent);
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ ByteArrayOutputStream docOut = new ByteArrayOutputStream();
+ XContentBuilder doc = XContentFactory.jsonBuilder(docOut).startObject().field(KEY, VALUE).endObject();
+ doc.close();
+ builder.setSource(docOut.toByteArray());
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ Map<String, String> settingsMap = new HashMap<>();
+ settingsMap.put(KEY, VALUE);
+ builder.setSettings(settingsMap);
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+ }
+
+ /**
+ * test setting the settings with available setters
+ */
+ @Test
+ public void testSetSettings() throws IOException {
+ CreateIndexRequestBuilder builder = new CreateIndexRequestBuilder(this.testClient, CreateIndexAction.INSTANCE);
+ builder.setSettings(KEY, VALUE);
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ builder.setSettings("{\""+KEY+"\" : \""+VALUE+"\"}");
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ builder.setSettings(Settings.builder().put(KEY, VALUE));
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ builder.setSettings(Settings.builder().put(KEY, VALUE).build());
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ Map<String, String> settingsMap = new HashMap<>();
+ settingsMap.put(KEY, VALUE);
+ builder.setSettings(settingsMap);
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+
+ XContentBuilder xContent = XContentFactory.jsonBuilder().startObject().field(KEY, VALUE).endObject();
+ xContent.close();
+ builder.setSettings(xContent);
+ assertEquals(VALUE, builder.request().settings().get(KEY));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java
new file mode 100644
index 0000000000..5b020f2ab3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.create;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ClusterScope(scope = Scope.TEST)
+public class CreateIndexTests extends ElasticsearchIntegrationTest{
+
+ @Test
+ public void testCreationDate_Given() {
+ prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4l)).get();
+ ClusterStateResponse response = client().admin().cluster().prepareState().get();
+ ClusterState state = response.getState();
+ assertThat(state, notNullValue());
+ MetaData metadata = state.getMetaData();
+ assertThat(metadata, notNullValue());
+ ImmutableOpenMap<String, IndexMetaData> indices = metadata.getIndices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.size(), equalTo(1));
+ IndexMetaData index = indices.get("test");
+ assertThat(index, notNullValue());
+ assertThat(index.creationDate(), equalTo(4l));
+ }
+
+ @Test
+ public void testCreationDate_Generated() {
+ long timeBeforeRequest = System.currentTimeMillis();
+ prepareCreate("test").get();
+ long timeAfterRequest = System.currentTimeMillis();
+ ClusterStateResponse response = client().admin().cluster().prepareState().get();
+ ClusterState state = response.getState();
+ assertThat(state, notNullValue());
+ MetaData metadata = state.getMetaData();
+ assertThat(metadata, notNullValue());
+ ImmutableOpenMap<String, IndexMetaData> indices = metadata.getIndices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.size(), equalTo(1));
+ IndexMetaData index = indices.get("test");
+ assertThat(index, notNullValue());
+ assertThat(index.creationDate(), allOf(lessThanOrEqualTo(timeAfterRequest), greaterThanOrEqualTo(timeBeforeRequest)));
+ }
+
+ @Test
+ public void testDoubleAddMapping() throws Exception {
+ try {
+ prepareCreate("test")
+ .addMapping("type1", "date", "type=date")
+ .addMapping("type1", "num", "type=integer");
+ fail("did not hit expected exception");
+ } catch (IllegalStateException ise) {
+ // expected
+ }
+ try {
+ prepareCreate("test")
+ .addMapping("type1", new HashMap<String,Object>())
+ .addMapping("type1", new HashMap<String,Object>());
+ fail("did not hit expected exception");
+ } catch (IllegalStateException ise) {
+ // expected
+ }
+ try {
+ prepareCreate("test")
+ .addMapping("type1", jsonBuilder())
+ .addMapping("type1", jsonBuilder());
+ fail("did not hit expected exception");
+ } catch (IllegalStateException ise) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testInvalidShardCountSettings() throws Exception {
+ try {
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0))
+ .build())
+ .get();
+ fail("should have thrown an exception about the primary shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
+ }
+
+ try {
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1))
+ .build())
+ .get();
+ fail("should have thrown an exception about the replica shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
+ }
+
+ try {
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0))
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1))
+ .build())
+ .get();
+ fail("should have thrown an exception about the shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testCreateIndexWithBlocks() {
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(prepareCreate("test"));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+
+ @Test
+ public void testInvalidShardCountSettingsWithoutPrefix() throws Exception {
+ try {
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0))
+ .build())
+ .get();
+ fail("should have thrown an exception about the shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
+ }
+ try {
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1))
+ .build())
+ .get();
+ fail("should have thrown an exception about the shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
+ }
+ try {
+ prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0))
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1))
+ .build())
+ .get();
+ fail("should have thrown an exception about the shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true));
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java
new file mode 100644
index 0000000000..6973f63a22
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.delete;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class DeleteIndexBlocksTests extends ElasticsearchIntegrationTest{
+
+ @Test
+ public void testDeleteIndexWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().indices().prepareDelete("test"));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java
new file mode 100644
index 0000000000..b9fa6bcd8b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class FlushBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testFlushWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ NumShards numShards = getNumShards("test");
+
+ int docs = between(10, 100);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet();
+ }
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ FlushResponse response = client().admin().indices().prepareFlush("test").execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareFlush("test"));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Flushing all indices is blocked when the cluster is read-only
+ try {
+ FlushResponse response = client().admin().indices().prepareFlush().execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().indices().prepareFlush());
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java
new file mode 100644
index 0000000000..d2e996d623
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java
@@ -0,0 +1,330 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.get;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.*;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class GetIndexTests extends ElasticsearchIntegrationTest {
+
+ private static final String[] allFeatures = { "_alias", "_aliases", "_mapping", "_mappings", "_settings", "_warmer", "_warmers" };
+
+ @Override
+ protected void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("idx").addAlias(new Alias("alias_idx")).addMapping("type1", "{\"type1\":{}}")
+ .setSettings(Settings.builder().put("number_of_shards", 1)).get());
+ ensureSearchable("idx");
+ assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("idx")).get());
+ createIndex("empty_idx");
+ ensureSearchable("idx", "empty_idx");
+ }
+
+ @Test
+ public void testSimple() {
+ GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices("idx").get();
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ assertAliases(response, "idx");
+ assertMappings(response, "idx");
+ assertSettings(response, "idx");
+ assertWarmers(response, "idx");
+ }
+
+ @Test(expected=IndexMissingException.class)
+ public void testSimpleUnknownIndex() {
+ client().admin().indices().prepareGetIndex().addIndices("missing_idx").get();
+ }
+
+ @Test
+ public void testEmpty() {
+ GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices("empty_idx").get();
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("empty_idx"));
+ assertEmptyAliases(response);
+ assertEmptyOrOnlyDefaultMappings(response, "empty_idx");
+ assertNonEmptySettings(response, "empty_idx");
+ assertEmptyWarmers(response);
+ }
+
+ @Test
+ public void testSimpleMapping() {
+ GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
+ Feature.MAPPINGS);
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ assertMappings(response, "idx");
+ assertEmptyAliases(response);
+ assertEmptySettings(response);
+ assertEmptyWarmers(response);
+ }
+
+ @Test
+ public void testSimpleAlias() {
+ GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
+ Feature.ALIASES);
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ assertAliases(response, "idx");
+ assertEmptyMappings(response);
+ assertEmptySettings(response);
+ assertEmptyWarmers(response);
+ }
+
+ @Test
+ public void testSimpleSettings() {
+ GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
+ Feature.SETTINGS);
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ assertSettings(response, "idx");
+ assertEmptyAliases(response);
+ assertEmptyMappings(response);
+ assertEmptyWarmers(response);
+ }
+
+ @Test
+ public void testSimpleWarmer() {
+ GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
+ Feature.WARMERS);
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ assertWarmers(response, "idx");
+ assertEmptyAliases(response);
+ assertEmptyMappings(response);
+ assertEmptySettings(response);
+ }
+
+ @Test
+ public void testSimpleMixedFeatures() {
+ int numFeatures = randomIntBetween(1, Feature.values().length);
+ List<Feature> features = new ArrayList<Feature>(numFeatures);
+ for (int i = 0; i < numFeatures; i++) {
+ features.add(randomFrom(Feature.values()));
+ }
+ GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("idx"),
+ features.toArray(new Feature[features.size()]));
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ if (features.contains(Feature.ALIASES)) {
+ assertAliases(response, "idx");
+ } else {
+ assertEmptyAliases(response);
+ }
+ if (features.contains(Feature.MAPPINGS)) {
+ assertMappings(response, "idx");
+ } else {
+ assertEmptyMappings(response);
+ }
+ if (features.contains(Feature.SETTINGS)) {
+ assertSettings(response, "idx");
+ } else {
+ assertEmptySettings(response);
+ }
+ if (features.contains(Feature.WARMERS)) {
+ assertWarmers(response, "idx");
+ } else {
+ assertEmptyWarmers(response);
+ }
+ }
+
+ @Test
+ public void testEmptyMixedFeatures() {
+ int numFeatures = randomIntBetween(1, Feature.values().length);
+ List<Feature> features = new ArrayList<Feature>(numFeatures);
+ for (int i = 0; i < numFeatures; i++) {
+ features.add(randomFrom(Feature.values()));
+ }
+ GetIndexResponse response = runWithRandomFeatureMethod(client().admin().indices().prepareGetIndex().addIndices("empty_idx"),
+ features.toArray(new Feature[features.size()]));
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("empty_idx"));
+ assertEmptyAliases(response);
+ if (features.contains(Feature.MAPPINGS)) {
+ assertEmptyOrOnlyDefaultMappings(response, "empty_idx");
+ } else {
+ assertEmptyMappings(response);
+ }
+ if (features.contains(Feature.SETTINGS)) {
+ assertNonEmptySettings(response, "empty_idx");
+ } else {
+ assertEmptySettings(response);
+ }
+ assertEmptyWarmers(response);
+ }
+
+ @Test
+ public void testGetIndexWithBlocks() {
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("idx", block);
+ GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices("idx")
+ .addFeatures(Feature.MAPPINGS, Feature.ALIASES).get();
+ String[] indices = response.indices();
+ assertThat(indices, notNullValue());
+ assertThat(indices.length, equalTo(1));
+ assertThat(indices[0], equalTo("idx"));
+ assertMappings(response, "idx");
+ assertAliases(response, "idx");
+ } finally {
+ disableIndexBlock("idx", block);
+ }
+ }
+
+ try {
+ enableIndexBlock("idx", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), INDEX_METADATA_BLOCK);
+ } finally {
+ disableIndexBlock("idx", SETTING_BLOCKS_METADATA);
+ }
+ }
+
+ private GetIndexResponse runWithRandomFeatureMethod(GetIndexRequestBuilder requestBuilder, Feature... features) {
+ if (randomBoolean()) {
+ return requestBuilder.addFeatures(features).get();
+ } else {
+ return requestBuilder.setFeatures(features).get();
+ }
+ }
+
+ private void assertWarmers(GetIndexResponse response, String indexName) {
+ ImmutableOpenMap<String, ImmutableList<Entry>> warmers = response.warmers();
+ assertThat(warmers, notNullValue());
+ assertThat(warmers.size(), equalTo(1));
+ ImmutableList<Entry> indexWarmers = warmers.get(indexName);
+ assertThat(indexWarmers, notNullValue());
+ assertThat(indexWarmers.size(), equalTo(1));
+ Entry warmer = indexWarmers.get(0);
+ assertThat(warmer, notNullValue());
+ assertThat(warmer.name(), equalTo("warmer1"));
+ }
+
+ private void assertSettings(GetIndexResponse response, String indexName) {
+ ImmutableOpenMap<String, Settings> settings = response.settings();
+ assertThat(settings, notNullValue());
+ assertThat(settings.size(), equalTo(1));
+ Settings indexSettings = settings.get(indexName);
+ assertThat(indexSettings, notNullValue());
+ assertThat(indexSettings.get("index.number_of_shards"), equalTo("1"));
+ }
+
+ private void assertNonEmptySettings(GetIndexResponse response, String indexName) {
+ ImmutableOpenMap<String, Settings> settings = response.settings();
+ assertThat(settings, notNullValue());
+ assertThat(settings.size(), equalTo(1));
+ Settings indexSettings = settings.get(indexName);
+ assertThat(indexSettings, notNullValue());
+ }
+
+ private void assertMappings(GetIndexResponse response, String indexName) {
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = response.mappings();
+ assertThat(mappings, notNullValue());
+ assertThat(mappings.size(), equalTo(1));
+ ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.get(indexName);
+ assertThat(indexMappings, notNullValue());
+ assertThat(indexMappings.size(), anyOf(equalTo(1), equalTo(2)));
+ if (indexMappings.size() == 2) {
+ MappingMetaData mapping = indexMappings.get("_default_");
+ assertThat(mapping, notNullValue());
+ }
+ MappingMetaData mapping = indexMappings.get("type1");
+ assertThat(mapping, notNullValue());
+ assertThat(mapping.type(), equalTo("type1"));
+ }
+
+ private void assertEmptyOrOnlyDefaultMappings(GetIndexResponse response, String indexName) {
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = response.mappings();
+ assertThat(mappings, notNullValue());
+ assertThat(mappings.size(), equalTo(1));
+ ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.get(indexName);
+ assertThat(indexMappings, notNullValue());
+ assertThat(indexMappings.size(), anyOf(equalTo(0), equalTo(1)));
+ if (indexMappings.size() == 1) {
+ MappingMetaData mapping = indexMappings.get("_default_");
+ assertThat(mapping, notNullValue());
+ }
+ }
+
+ private void assertAliases(GetIndexResponse response, String indexName) {
+ ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliases = response.aliases();
+ assertThat(aliases, notNullValue());
+ assertThat(aliases.size(), equalTo(1));
+ ImmutableList<AliasMetaData> indexAliases = aliases.get(indexName);
+ assertThat(indexAliases, notNullValue());
+ assertThat(indexAliases.size(), equalTo(1));
+ AliasMetaData alias = indexAliases.get(0);
+ assertThat(alias, notNullValue());
+ assertThat(alias.alias(), equalTo("alias_idx"));
+ }
+
+ private void assertEmptyWarmers(GetIndexResponse response) {
+ assertThat(response.warmers(), notNullValue());
+ assertThat(response.warmers().isEmpty(), equalTo(true));
+ }
+
+ private void assertEmptySettings(GetIndexResponse response) {
+ assertThat(response.settings(), notNullValue());
+ assertThat(response.settings().isEmpty(), equalTo(true));
+ }
+
+ private void assertEmptyMappings(GetIndexResponse response) {
+ assertThat(response.mappings(), notNullValue());
+ assertThat(response.mappings().isEmpty(), equalTo(true));
+ }
+
+ private void assertEmptyAliases(GetIndexResponse response) {
+ assertThat(response.aliases(), notNullValue());
+ assertThat(response.aliases().isEmpty(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java
new file mode 100644
index 0000000000..f5c73082bc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequestTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.mapping.put;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+public class PutMappingRequestTests extends ElasticsearchTestCase {
+
+ public void testValidation() {
+ PutMappingRequest r = new PutMappingRequest("myindex");
+ ActionRequestValidationException ex = r.validate();
+ assertNotNull("type validation should fail", ex);
+ assertTrue(ex.getMessage().contains("type is missing"));
+
+ r.type("");
+ ex = r.validate();
+ assertNotNull("type validation should fail", ex);
+ assertTrue(ex.getMessage().contains("type is empty"));
+
+ r.type("mytype");
+ ex = r.validate();
+ assertNotNull("source validation should fail", ex);
+ assertTrue(ex.getMessage().contains("source is missing"));
+
+ r.source("");
+ ex = r.validate();
+ assertNotNull("source validation should fail", ex);
+ assertTrue(ex.getMessage().contains("source is empty"));
+
+ r.source("somevalidmapping");
+ ex = r.validate();
+ assertNull("validation should succeed", ex);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java
new file mode 100644
index 0000000000..47b5fbe0da
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.optimize;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class OptimizeBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testOptimizeWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ NumShards numShards = getNumShards("test");
+
+ int docs = between(10, 100);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet();
+ }
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ OptimizeResponse response = client().admin().indices().prepareOptimize("test").execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareOptimize("test"));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Optimizing all indices is blocked when the cluster is read-only
+ try {
+ OptimizeResponse response = client().admin().indices().prepareOptimize().execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().indices().prepareFlush());
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java
new file mode 100644
index 0000000000..fc83f96eb3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.refresh;
+
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class RefreshBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRefreshWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ NumShards numShards = getNumShards("test");
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ RefreshResponse response = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareRefresh("test"));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Refreshing all indices is blocked when the cluster is read-only
+ try {
+ RefreshResponse response = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().indices().prepareRefresh());
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java
new file mode 100644
index 0000000000..a3f2f9f104
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class IndicesSegmentsBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndicesSegmentsWithBlocks() {
+ createIndex("test-blocks");
+ ensureGreen("test-blocks");
+
+ int docs = between(10, 100);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("test-blocks", "type", "" + i).setSource("test", "init").execute().actionGet();
+ }
+ client().admin().indices().prepareFlush("test-blocks").get();
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("test-blocks", blockSetting);
+ IndicesSegmentResponse response = client().admin().indices().prepareSegments("test-blocks").execute().actionGet();
+ assertNoFailures(response);
+ } finally {
+ disableIndexBlock("test-blocks", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ try {
+ enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareSegments("test-blocks"));
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java
new file mode 100644
index 0000000000..df9fbda67c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.segments;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+
+public class IndicesSegmentsRequestTests extends ElasticsearchSingleNodeTest {
+
+ @Before
+ public void setupIndex() {
+ Settings settings = Settings.builder()
+ // don't allow any merges so that the num docs is the expected segments
+ .put("index.merge.policy.segments_per_tier", 1000000f)
+ .build();
+ createIndex("test", settings);
+
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ for (int j = 0; j < numDocs; ++j) {
+ String id = Integer.toString(j);
+ client().prepareIndex("test", "type1", id).setSource("text", "sometext").get();
+ }
+ client().admin().indices().prepareFlush("test").get();
+ }
+
+ public void testBasic() {
+ IndicesSegmentResponse rsp = client().admin().indices().prepareSegments("test").get();
+ List<Segment> segments = rsp.getIndices().get("test").iterator().next().getShards()[0].getSegments();
+ assertNull(segments.get(0).ramTree);
+ }
+
+ public void testVerbose() {
+ IndicesSegmentResponse rsp = client().admin().indices().prepareSegments("test").setVerbose(true).get();
+ List<Segment> segments = rsp.getIndices().get("test").iterator().next().getShards()[0].getSegments();
+ assertNotNull(segments.get(0).ramTree);
+ }
+
+ /**
+ * with the default IndicesOptions inherited from BroadcastOperationRequest this will raise an exception
+ */
+ @Test(expected=org.elasticsearch.indices.IndexClosedException.class)
+ public void testRequestOnClosedIndex() {
+ client().admin().indices().prepareClose("test").get();
+ client().admin().indices().prepareSegments("test").get();
+ }
+
+ /**
+ * setting the "ignoreUnavailable" option prevents IndexClosedException
+ */
+ public void testRequestOnClosedIndexIgnoreUnavailable() {
+ client().admin().indices().prepareClose("test").get();
+ IndicesOptions defaultOptions = new IndicesSegmentsRequest().indicesOptions();
+ IndicesOptions testOptions = IndicesOptions.fromOptions(true, true, true, false, defaultOptions);
+ IndicesSegmentResponse rsp = client().admin().indices().prepareSegments("test").setIndicesOptions(testOptions).get();
+ assertEquals(0, rsp.getIndices().size());
+ }
+
+ /**
+ * by default IndicesOptions setting IndicesSegmentsRequest should not throw exception when no index present
+ */
+ public void testAllowNoIndex() {
+ client().admin().indices().prepareDelete("test").get();
+ IndicesSegmentResponse rsp = client().admin().indices().prepareSegments().get();
+ assertEquals(0, rsp.getIndices().size());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java
new file mode 100644
index 0000000000..d6dba10e69
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class IndicesStatsBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndicesStatsWithBlocks() {
+ createIndex("ro");
+ ensureGreen("ro");
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("ro", blockSetting);
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("ro").execute().actionGet();
+ assertNotNull(indicesStatsResponse.getIndex("ro"));
+ } finally {
+ disableIndexBlock("ro", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ try {
+ enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA);
+ client().admin().indices().prepareStats("ro").execute().actionGet();
+ fail("Exists should fail when " + IndexMetaData.SETTING_BLOCKS_METADATA + " is true");
+ } catch (ClusterBlockException e) {
+ // Ok, a ClusterBlockException is expected
+ } finally {
+ disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java
new file mode 100644
index 0000000000..a8c40a8ce4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.CommitStats;
+import org.elasticsearch.index.engine.SegmentsStats;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class IndicesStatsTests extends ElasticsearchSingleNodeTest {
+
+ public void testSegmentStatsEmptyIndex() {
+ createIndex("test");
+ IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
+ SegmentsStats stats = rsp.getTotal().getSegments();
+ assertEquals(0, stats.getTermsMemoryInBytes());
+ assertEquals(0, stats.getStoredFieldsMemoryInBytes());
+ assertEquals(0, stats.getTermVectorsMemoryInBytes());
+ assertEquals(0, stats.getNormsMemoryInBytes());
+ assertEquals(0, stats.getDocValuesMemoryInBytes());
+ }
+
+ public void testSegmentStats() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .field("doc_values", true)
+ .field("store", true)
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping));
+ ensureGreen("test");
+ client().prepareIndex("test", "doc", "1").setSource("foo", "bar").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
+ SegmentsStats stats = rsp.getIndex("test").getTotal().getSegments();
+ assertThat(stats.getTermsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getNormsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0l));
+
+ // now check multiple segments stats are merged together
+ client().prepareIndex("test", "doc", "2").setSource("foo", "bar").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ rsp = client().admin().indices().prepareStats("test").get();
+ SegmentsStats stats2 = rsp.getIndex("test").getTotal().getSegments();
+ assertThat(stats2.getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
+ assertThat(stats2.getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
+ assertThat(stats2.getTermVectorsMemoryInBytes(), greaterThan(stats.getTermVectorsMemoryInBytes()));
+ assertThat(stats2.getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
+ assertThat(stats2.getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
+ }
+
+ public void testCommitStats() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+
+ IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
+ for (ShardStats shardStats : rsp.getIndex("test").getShards()) {
+ final CommitStats commitStats = shardStats.getCommitStats();
+ assertNotNull(commitStats);
+ assertThat(commitStats.getGeneration(), greaterThan(0l));
+ assertThat(commitStats.getId(), notNullValue());
+ assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
+ assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));
+
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java
new file mode 100644
index 0000000000..67ed2bee12
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/admin/indices/warmer/put/PutWarmerRequestTests.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.admin.indices.warmer.put;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.hasSize;
+
+public class PutWarmerRequestTests extends ElasticsearchTestCase {
+
+ @Test // issue 4196
+ public void testThatValidationWithoutSpecifyingSearchRequestFails() {
+ PutWarmerRequest putWarmerRequest = new PutWarmerRequest("foo");
+ ActionRequestValidationException validationException = putWarmerRequest.validate();
+ assertThat(validationException.validationErrors(), hasSize(1));
+ assertThat(validationException.getMessage(), containsString("search request is missing"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java
new file mode 100644
index 0000000000..ea25cfb767
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkIntegrationTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.base.Charsets;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+
+public class BulkIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBulkIndexCreatesMapping() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/bulk-log.json");
+ BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ bulkBuilder.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ bulkBuilder.get();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings().get();
+ assertTrue(mappingsResponse.getMappings().containsKey("logstash-2014.03.30"));
+ assertTrue(mappingsResponse.getMappings().get("logstash-2014.03.30").containsKey("logs"));
+ }
+ });
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java
new file mode 100644
index 0000000000..800a49453c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class BulkProcessorClusterSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBulkProcessorAutoCreateRestrictions() throws Exception {
+ // See issue #8125
+ Settings settings = Settings.settingsBuilder().put("action.auto_create_index", false).build();
+
+ internalCluster().startNode(settings);
+
+ createIndex("willwork");
+ client().admin().cluster().prepareHealth("willwork").setWaitForGreenStatus().execute().actionGet();
+
+ BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
+ bulkRequestBuilder.add(client().prepareIndex("willwork", "type1", "1").setSource("{\"foo\":1}"));
+ bulkRequestBuilder.add(client().prepareIndex("wontwork", "type1", "2").setSource("{\"foo\":2}"));
+ bulkRequestBuilder.add(client().prepareIndex("willwork", "type1", "3").setSource("{\"foo\":3}"));
+ BulkResponse br = bulkRequestBuilder.get();
+ BulkItemResponse[] responses = br.getItems();
+ assertEquals(3, responses.length);
+ assertFalse("Operation on existing index should succeed", responses[0].isFailed());
+ assertTrue("Missing index should have been flagged", responses[1].isFailed());
+ assertEquals("[wontwork] no such index", responses[1].getFailureMessage());
+ assertFalse("Operation on existing index should succeed", responses[2].isFailed());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java
new file mode 100644
index 0000000000..ad1e9d9771
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java
@@ -0,0 +1,364 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.carrotsearch.randomizedtesting.generators.RandomPicks;
+
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class BulkProcessorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testThatBulkProcessorCountIsCorrect() throws InterruptedException {
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
+
+ int numDocs = randomIntBetween(10, 100);
+ try (BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo")
+ //let's make sure that the bulk action limit trips, one single execution will index all the documents
+ .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
+ .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
+ .build()) {
+
+ MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
+
+ latch.await();
+
+ assertThat(listener.beforeCounts.get(), equalTo(1));
+ assertThat(listener.afterCounts.get(), equalTo(1));
+ assertThat(listener.bulkFailures.size(), equalTo(0));
+ assertResponseItems(listener.bulkItems, numDocs);
+ assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
+ }
+ }
+
+ @Test
+ public void testBulkProcessorFlush() throws InterruptedException {
+ final CountDownLatch latch = new CountDownLatch(1);
+ BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
+
+ int numDocs = randomIntBetween(10, 100);
+
+ try (BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo")
+ //let's make sure that this bulk won't be automatically flushed
+ .setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100))
+ .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
+
+ MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
+
+ assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false));
+ //we really need an explicit flush as none of the bulk thresholds was reached
+ processor.flush();
+ latch.await();
+
+ assertThat(listener.beforeCounts.get(), equalTo(1));
+ assertThat(listener.afterCounts.get(), equalTo(1));
+ assertThat(listener.bulkFailures.size(), equalTo(0));
+ assertResponseItems(listener.bulkItems, numDocs);
+ assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
+ }
+ }
+
+ @Test
+ public void testBulkProcessorConcurrentRequests() throws Exception {
+ int bulkActions = randomIntBetween(10, 100);
+ int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
+ int concurrentRequests = randomIntBetween(0, 10);
+
+ int expectedBulkActions = numDocs / bulkActions;
+
+ final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
+ int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
+ final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
+
+ BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
+
+ MultiGetRequestBuilder multiGetRequestBuilder;
+
+ try (BulkProcessor processor = BulkProcessor.builder(client(), listener)
+ .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions)
+ //set interval and size to high values
+ .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
+
+ multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
+
+ latch.await();
+
+ assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions));
+ assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions));
+ assertThat(listener.bulkFailures.size(), equalTo(0));
+ assertThat(listener.bulkItems.size(), equalTo(numDocs - numDocs % bulkActions));
+ }
+
+ closeLatch.await();
+
+ assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions));
+ assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions));
+ assertThat(listener.bulkFailures.size(), equalTo(0));
+ assertThat(listener.bulkItems.size(), equalTo(numDocs));
+
+ Set<String> ids = new HashSet<>();
+ for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
+ assertThat(bulkItemResponse.isFailed(), equalTo(false));
+ assertThat(bulkItemResponse.getIndex(), equalTo("test"));
+ assertThat(bulkItemResponse.getType(), equalTo("test"));
+ //with concurrent requests > 1 we can't rely on the order of the bulk requests
+ assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs)));
+ //we do want to check that we don't get duplicate ids back
+ assertThat(ids.add(bulkItemResponse.getId()), equalTo(true));
+ }
+
+ assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
+ }
+
+ @Test
+ //https://github.com/elasticsearch/elasticsearch/issues/5038
+ public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception {
+ //we create a transport client with no nodes to make sure it throws NoNodeAvailableException
+ Settings settings = Settings.builder()
+ .put("path.home", createTempDir().toString())
+ .build();
+ Client transportClient = TransportClient.builder().settings(settings).build();
+
+ int bulkActions = randomIntBetween(10, 100);
+ int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
+ int concurrentRequests = randomIntBetween(0, 10);
+
+ int expectedBulkActions = numDocs / bulkActions;
+
+ final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
+ int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
+ final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
+
+ BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
+
+ try (BulkProcessor processor = BulkProcessor.builder(transportClient, listener)
+ .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions)
+ //set interval and size to high values
+ .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
+
+ indexDocs(transportClient, processor, numDocs);
+
+ latch.await();
+
+ assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions));
+ assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions));
+ assertThat(listener.bulkFailures.size(), equalTo(expectedBulkActions));
+ assertThat(listener.bulkItems.size(), equalTo(0));
+ }
+
+ closeLatch.await();
+
+ assertThat(listener.bulkFailures.size(), equalTo(totalExpectedBulkActions));
+ assertThat(listener.bulkItems.size(), equalTo(0));
+ transportClient.close();
+ }
+
+ @Test
+ public void testBulkProcessorWaitOnClose() throws Exception {
+ BulkProcessorTestListener listener = new BulkProcessorTestListener();
+
+ int numDocs = randomIntBetween(10, 100);
+ BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo")
+ //let's make sure that the bulk action limit trips, one single execution will index all the documents
+ .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs)
+ .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10),
+ (ByteSizeUnit)RandomPicks.randomFrom(getRandom(), ByteSizeUnit.values())))
+ .build();
+
+ MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
+ assertThat(processor.isOpen(), is(true));
+ assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true));
+ if (randomBoolean()) { // check if we can call it multiple times
+ if (randomBoolean()) {
+ assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true));
+ } else {
+ processor.close();
+ }
+ }
+ assertThat(processor.isOpen(), is(false));
+
+ assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1));
+ assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1));
+ assertThat(listener.bulkFailures.size(), equalTo(0));
+ assertResponseItems(listener.bulkItems, numDocs);
+ assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
+ }
+
+ @Test
+ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception {
+ createIndex("test-ro");
+ assertAcked(client().admin().indices().prepareUpdateSettings("test-ro")
+ .setSettings(Settings.builder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true)));
+ ensureGreen();
+
+ int bulkActions = randomIntBetween(10, 100);
+ int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
+ int concurrentRequests = randomIntBetween(0, 10);
+
+ int expectedBulkActions = numDocs / bulkActions;
+
+ final CountDownLatch latch = new CountDownLatch(expectedBulkActions);
+ int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1;
+ final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions);
+
+ int testDocs = 0;
+ int testReadOnlyDocs = 0;
+ MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet();
+ BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch);
+
+ try (BulkProcessor processor = BulkProcessor.builder(client(), listener)
+ .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions)
+ //set interval and size to high values
+ .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
+
+ for (int i = 1; i <= numDocs; i++) {
+ if (randomBoolean()) {
+ testDocs++;
+ processor.add(new IndexRequest("test", "test", Integer.toString(testDocs)).source("field", "value"));
+ multiGetRequestBuilder.add("test", "test", Integer.toString(testDocs));
+ } else {
+ testReadOnlyDocs++;
+ processor.add(new IndexRequest("test-ro", "test", Integer.toString(testReadOnlyDocs)).source("field", "value"));
+ }
+ }
+ }
+
+ closeLatch.await();
+
+ assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions));
+ assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions));
+ assertThat(listener.bulkFailures.size(), equalTo(0));
+ assertThat(listener.bulkItems.size(), equalTo(testDocs + testReadOnlyDocs));
+
+ Set<String> ids = new HashSet<>();
+ Set<String> readOnlyIds = new HashSet<>();
+ for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
+ assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro")));
+ assertThat(bulkItemResponse.getType(), equalTo("test"));
+ if (bulkItemResponse.getIndex().equals("test")) {
+ assertThat(bulkItemResponse.isFailed(), equalTo(false));
+ //with concurrent requests > 1 we can't rely on the order of the bulk requests
+ assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testDocs)));
+ //we do want to check that we don't get duplicate ids back
+ assertThat(ids.add(bulkItemResponse.getId()), equalTo(true));
+ } else {
+ assertThat(bulkItemResponse.isFailed(), equalTo(true));
+ //with concurrent requests > 1 we can't rely on the order of the bulk requests
+ assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testReadOnlyDocs)));
+ //we do want to check that we don't get duplicate ids back
+ assertThat(readOnlyIds.add(bulkItemResponse.getId()), equalTo(true));
+ }
+ }
+
+ assertMultiGetResponse(multiGetRequestBuilder.get(), testDocs);
+ }
+
+ private static MultiGetRequestBuilder indexDocs(Client client, BulkProcessor processor, int numDocs) {
+ MultiGetRequestBuilder multiGetRequestBuilder = client.prepareMultiGet();
+ for (int i = 1; i <= numDocs; i++) {
+ processor.add(new IndexRequest("test", "test", Integer.toString(i)).source("field", randomRealisticUnicodeOfLengthBetween(1, 30)));
+ multiGetRequestBuilder.add("test", "test", Integer.toString(i));
+ }
+ return multiGetRequestBuilder;
+ }
+
+ private static void assertResponseItems(List<BulkItemResponse> bulkItemResponses, int numDocs) {
+ assertThat(bulkItemResponses.size(), is(numDocs));
+ int i = 1;
+ for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
+ assertThat(bulkItemResponse.getIndex(), equalTo("test"));
+ assertThat(bulkItemResponse.getType(), equalTo("test"));
+ assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++)));
+ assertThat(bulkItemResponse.isFailed(), equalTo(false));
+ }
+ }
+
+ private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, int numDocs) {
+ assertThat(multiGetResponse.getResponses().length, equalTo(numDocs));
+ int i = 1;
+ for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) {
+ assertThat(multiGetItemResponse.getIndex(), equalTo("test"));
+ assertThat(multiGetItemResponse.getType(), equalTo("test"));
+ assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++)));
+ }
+ }
+
+ private static class BulkProcessorTestListener implements BulkProcessor.Listener {
+
+ private final CountDownLatch[] latches;
+ private final AtomicInteger beforeCounts = new AtomicInteger();
+ private final AtomicInteger afterCounts = new AtomicInteger();
+ private final List<BulkItemResponse> bulkItems = new CopyOnWriteArrayList<>();
+ private final List<Throwable> bulkFailures = new CopyOnWriteArrayList<>();
+
+ private BulkProcessorTestListener(CountDownLatch... latches) {
+ this.latches = latches;
+ }
+
+ @Override
+ public void beforeBulk(long executionId, BulkRequest request) {
+ beforeCounts.incrementAndGet();
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
+ bulkItems.addAll(Arrays.asList(response.getItems()));
+ afterCounts.incrementAndGet();
+ for (CountDownLatch latch : latches) {
+ latch.countDown();
+ }
+ }
+
+ @Override
+ public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
+ bulkFailures.add(failure);
+ afterCounts.incrementAndGet();
+ for (CountDownLatch latch : latches) {
+ latch.countDown();
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
new file mode 100644
index 0000000000..6220958a0c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkRequestTests.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.bulk;
+
+import com.google.common.base.Charsets;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class BulkRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleBulk1() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
+ // translate Windows line endings (\r\n) to standard ones (\n)
+ if (Constants.WINDOWS) {
+ bulkAction = Strings.replace(bulkAction, "\r\n", "\n");
+ }
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ assertThat(((IndexRequest) bulkRequest.requests().get(0)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value1\" }").toBytes()));
+ assertThat(bulkRequest.requests().get(1), instanceOf(DeleteRequest.class));
+ assertThat(((IndexRequest) bulkRequest.requests().get(2)).source().toBytes(), equalTo(new BytesArray("{ \"field1\" : \"value3\" }").toBytes()));
+ }
+
+ @Test
+ public void testSimpleBulk2() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk2.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ }
+
+ @Test
+ public void testSimpleBulk3() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk3.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(3));
+ }
+
+ @Test
+ public void testSimpleBulk4() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk4.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(4));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).id(), equalTo("1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).retryOnConflict(), equalTo(2));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(0)).doc().source().toUtf8(), equalTo("{\"field\":\"value\"}"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).id(), equalTo("0"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).type(), equalTo("type1"));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).index(), equalTo("index1"));
+ Script script = ((UpdateRequest) bulkRequest.requests().get(1)).script();
+ assertThat(script, notNullValue());
+ assertThat(script.getScript(), equalTo("counter += param1"));
+ assertThat(script.getLang(), equalTo("js"));
+ Map<String, Object> scriptParams = script.getParams();
+ assertThat(scriptParams, notNullValue());
+ assertThat(scriptParams.size(), equalTo(1));
+ assertThat(((Integer) scriptParams.get("param1")), equalTo(1));
+ assertThat(((UpdateRequest) bulkRequest.requests().get(1)).upsertRequest().source().toUtf8(), equalTo("{\"counter\":1}"));
+ }
+
+ @Test
+ public void testBulkAllowExplicitIndex() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
+ try {
+ new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), null, null, false);
+ fail();
+ } catch (Exception e) {
+
+ }
+
+ bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk5.json");
+ new BulkRequest().add(new BytesArray(bulkAction.getBytes(Charsets.UTF_8)), "test", null, false);
+ }
+
+ @Test
+ public void testBulkAddIterable() {
+ BulkRequest bulkRequest = Requests.bulkRequest();
+ List<ActionRequest> requests = new ArrayList<>();
+ requests.add(new IndexRequest("test", "test", "id").source("field", "value"));
+ requests.add(new UpdateRequest("test", "test", "id").doc("field", "value"));
+ requests.add(new DeleteRequest("test", "test", "id"));
+ bulkRequest.add(requests);
+ assertThat(bulkRequest.requests().size(), equalTo(3));
+ assertThat(bulkRequest.requests().get(0), instanceOf(IndexRequest.class));
+ assertThat(bulkRequest.requests().get(1), instanceOf(UpdateRequest.class));
+ assertThat(bulkRequest.requests().get(2), instanceOf(DeleteRequest.class));
+ }
+
+ @Test
+ public void testSimpleBulk6() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk6.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ try {
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ fail("should have thrown an exception about the wrong format of line 1");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about the wrong format of line 1: " + e.getMessage(),
+ e.getMessage().contains("Malformed action/metadata line [1], expected a simple value for field [_source] but found [START_OBJECT]"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testSimpleBulk7() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk7.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ try {
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ fail("should have thrown an exception about the wrong format of line 5");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about the wrong format of line 5: " + e.getMessage(),
+ e.getMessage().contains("Malformed action/metadata line [5], expected a simple value for field [_unkown] but found [START_ARRAY]"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testSimpleBulk8() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk8.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ try {
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ fail("should have thrown an exception about the unknown paramater _foo");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about the unknown paramater _foo: " + e.getMessage(),
+ e.getMessage().contains("Action/metadata line [3] contains an unknown parameter [_foo]"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testSimpleBulk9() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk9.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ try {
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ fail("should have thrown an exception about the wrong format of line 3");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about the wrong format of line 3: " + e.getMessage(),
+ e.getMessage().contains("Malformed action/metadata line [3], expected START_OBJECT or END_OBJECT but found [START_ARRAY]"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testSimpleBulk10() throws Exception {
+ String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk10.json");
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(bulkAction.getBytes(Charsets.UTF_8), 0, bulkAction.length(), null, null);
+ assertThat(bulkRequest.numberOfActions(), equalTo(9));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/bulk-log.json b/core/src/test/java/org/elasticsearch/action/bulk/bulk-log.json
new file mode 100644
index 0000000000..9c3663c3f6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/bulk-log.json
@@ -0,0 +1,24 @@
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
+{"index":{"_index":"logstash-2014.03.30","_type":"logs"}}
+{"message":"in24.inetnebr.com--[01/Aug2/1995:00:00:01-0400]\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"2001839","@version":"1","@timestamp":"2014-03-30T12:38:10.048Z","host":["romeo","in24.inetnebr.com"],"monthday":1,"month":8,"year":1995,"time":"00:00:01","tz":"-0400","request":"\"GET/shuttle/missions/sts-68/news/sts-68-mcc-05.txtHTTP/1.0\"","httpresponse":"200","size":1839,"rtime":"1995-08-01T00:00:01.000Z"}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json
new file mode 100644
index 0000000000..cf76477187
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk.json
@@ -0,0 +1,5 @@
+{ "index":{"_index":"test","_type":"type1","_id":"1"} }
+{ "field1" : "value1" }
+{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk10.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk10.json
new file mode 100644
index 0000000000..3556dc261b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk10.json
@@ -0,0 +1,15 @@
+{ "index" : {"_index":null, "_type":"type1", "_id":"0"} }
+{ "field1" : "value1" }
+{ "index" : {"_index":"test", "_type":null, "_id":"0"} }
+{ "field1" : "value1" }
+{ "index" : {"_index":"test", "_type":"type1", "_id":null} }
+{ "field1" : "value1" }
+{ "delete" : {"_index":null, "_type":"type1", "_id":"0"} }
+{ "delete" : {"_index":"test", "_type":null, "_id":"0"} }
+{ "delete" : {"_index":"test", "_type":"type1", "_id":null} }
+{ "create" : {"_index":null, "_type":"type1", "_id":"0"} }
+{ "field1" : "value1" }
+{ "create" : {"_index":"test", "_type":null, "_id":"0"} }
+{ "field1" : "value1" }
+{ "create" : {"_index":"test", "_type":"type1", "_id":null} }
+{ "field1" : "value1" }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json
new file mode 100644
index 0000000000..7cd4f9932d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk2.json
@@ -0,0 +1,5 @@
+{ "index":{ } }
+{ "field1" : "value1" }
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json
new file mode 100644
index 0000000000..7cd4f9932d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk3.json
@@ -0,0 +1,5 @@
+{ "index":{ } }
+{ "field1" : "value1" }
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json
new file mode 100644
index 0000000000..8b916b8fee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk4.json
@@ -0,0 +1,7 @@
+{ "update" : {"_id" : "1", "_retry_on_conflict" : 2} }
+{ "doc" : {"field" : "value"} }
+{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1" } }
+{ "script" : "counter += param1", "lang" : "js", "params" : {"param1" : 1}, "upsert" : {"counter" : 1}}
+{ "delete" : { "_id" : "2" } }
+{ "create" : { "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json
new file mode 100644
index 0000000000..6ad5ff3052
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk5.json
@@ -0,0 +1,5 @@
+{ "index": {"_type": "type1","_id": "1"} }
+{ "field1" : "value1" }
+{ "delete" : { "_type" : "type1", "_id" : "2" } }
+{ "create" : { "_type" : "type1", "_id" : "3" } }
+{ "field1" : "value3" }
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk6.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk6.json
new file mode 100644
index 0000000000..e9c9796559
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk6.json
@@ -0,0 +1,6 @@
+{"index": {"_index": "test", "_type": "doc", "_source": {"hello": "world"}, "_id": 0}}
+{"field1": "value0"}
+{"index": {"_index": "test", "_type": "doc", "_id": 1}}
+{"field1": "value1"}
+{"index": {"_index": "test", "_type": "doc", "_id": 2}}
+{"field1": "value2"}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk7.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk7.json
new file mode 100644
index 0000000000..a642d9ce4f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk7.json
@@ -0,0 +1,6 @@
+{"index": {"_index": "test", "_type": "doc", "_id": 0}}
+{"field1": "value0"}
+{"index": {"_index": "test", "_type": "doc", "_id": 1}}
+{"field1": "value1"}
+{"index": {"_index": "test", "_type": "doc", "_id": 2, "_unkown": ["foo", "bar"]}}
+{"field1": "value2"}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk8.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk8.json
new file mode 100644
index 0000000000..c1a94b1d15
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk8.json
@@ -0,0 +1,6 @@
+{"index": {"_index": "test", "_type": "doc", "_id": 0}}
+{"field1": "value0"}
+{"index": {"_index": "test", "_type": "doc", "_id": 1, "_foo": "bar"}}
+{"field1": "value1"}
+{"index": {"_index": "test", "_type": "doc", "_id": 2}}
+{"field1": "value2"}
diff --git a/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk9.json b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk9.json
new file mode 100644
index 0000000000..ebdbf75011
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/bulk/simple-bulk9.json
@@ -0,0 +1,4 @@
+{"index": {}}
+{"field1": "value0"}
+{"index": ["bar"] }
+{"field1": "value1"}
diff --git a/core/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java b/core/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java
new file mode 100644
index 0000000000..98ab1632e2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class CountRequestBuilderTests extends ElasticsearchTestCase {
+
+ private static Client client;
+
+ @BeforeClass
+ public static void initClient() {
+ //this client will not be hit by any request, but it needs to be a non null proper client
+ //that is why we create it but we don't add any transport address to it
+ Settings settings = Settings.builder()
+ .put("path.home", createTempDir().toString())
+ .build();
+ client = TransportClient.builder().settings(settings).build();
+ }
+
+ @AfterClass
+ public static void closeClient() {
+ client.close();
+ client = null;
+ }
+
+ @Test
+ public void testEmptySourceToString() {
+ CountRequestBuilder countRequestBuilder = client.prepareCount();
+ assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().toString()));
+ }
+
+ @Test
+ public void testQueryBuilderQueryToString() {
+ CountRequestBuilder countRequestBuilder = client.prepareCount();
+ countRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+ assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().setQuery(QueryBuilders.matchAllQuery()).toString()));
+ }
+
+ @Test
+ public void testStringQueryToString() {
+ CountRequestBuilder countRequestBuilder = client.prepareCount();
+ String query = "{ \"match_all\" : {} }";
+ countRequestBuilder.setQuery(new BytesArray(query));
+ assertThat(countRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }"));
+ }
+
+ @Test
+ public void testXContentBuilderQueryToString() throws IOException {
+ CountRequestBuilder countRequestBuilder = client.prepareCount();
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
+ xContentBuilder.startObject();
+ xContentBuilder.startObject("match_all");
+ xContentBuilder.endObject();
+ xContentBuilder.endObject();
+ countRequestBuilder.setQuery(xContentBuilder);
+ assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().setQuery(xContentBuilder.bytes()).toString()));
+ }
+
+ @Test
+ public void testStringSourceToString() {
+ CountRequestBuilder countRequestBuilder = client.prepareCount();
+ String query = "{ \"query\": { \"match_all\" : {} } }";
+ countRequestBuilder.setSource(new BytesArray(query));
+ assertThat(countRequestBuilder.toString(), equalTo("{ \"query\": { \"match_all\" : {} } }"));
+ }
+
+ @Test
+ public void testXContentBuilderSourceToString() throws IOException {
+ CountRequestBuilder countRequestBuilder = client.prepareCount();
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
+ xContentBuilder.startObject();
+ xContentBuilder.startObject("match_all");
+ xContentBuilder.endObject();
+ xContentBuilder.endObject();
+ countRequestBuilder.setSource(xContentBuilder.bytes());
+ assertThat(countRequestBuilder.toString(), equalTo(XContentHelper.convertToJson(xContentBuilder.bytes(), false, true)));
+ }
+
+ @Test
+ public void testThatToStringDoesntWipeSource() {
+ String source = "{\n" +
+ " \"query\" : {\n" +
+ " \"match\" : {\n" +
+ " \"field\" : {\n" +
+ " \"query\" : \"value\"" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }";
+ CountRequestBuilder countRequestBuilder = client.prepareCount().setSource(new BytesArray(source));
+ String preToString = countRequestBuilder.request().source().toUtf8();
+ assertThat(countRequestBuilder.toString(), equalTo(source));
+ String postToString = countRequestBuilder.request().source().toUtf8();
+ assertThat(preToString, equalTo(postToString));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/count/CountRequestTests.java b/core/src/test/java/org/elasticsearch/action/count/CountRequestTests.java
new file mode 100644
index 0000000000..a972ff56d1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/count/CountRequestTests.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.QuerySourceBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.CoreMatchers.nullValue;
+
+public class CountRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testToSearchRequest() {
+ CountRequest countRequest;
+ if (randomBoolean()) {
+ countRequest = new CountRequest(randomStringArray());
+ } else {
+ countRequest = new CountRequest();
+ }
+ if (randomBoolean()) {
+ countRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
+ }
+ if (randomBoolean()) {
+ countRequest.types(randomStringArray());
+ }
+ if (randomBoolean()) {
+ countRequest.routing(randomStringArray());
+ }
+ if (randomBoolean()) {
+ countRequest.preference(randomAsciiOfLengthBetween(1, 10));
+ }
+ if (randomBoolean()) {
+ countRequest.source(new QuerySourceBuilder().setQuery(QueryBuilders.termQuery("field", "value")));
+ }
+ if (randomBoolean()) {
+ countRequest.minScore(randomFloat());
+ }
+ if (randomBoolean()) {
+ countRequest.terminateAfter(randomIntBetween(1, 1000));
+ }
+
+ SearchRequest searchRequest = countRequest.toSearchRequest();
+ assertThat(searchRequest.indices(), equalTo(countRequest.indices()));
+ assertThat(searchRequest.indicesOptions(), equalTo(countRequest.indicesOptions()));
+ assertThat(searchRequest.types(), equalTo(countRequest.types()));
+ assertThat(searchRequest.routing(), equalTo(countRequest.routing()));
+ assertThat(searchRequest.preference(), equalTo(countRequest.preference()));
+
+ if (countRequest.source() == null) {
+ assertThat(searchRequest.source(), nullValue());
+ } else {
+ Map<String, Object> sourceMap = XContentHelper.convertToMap(searchRequest.source(), false).v2();
+ assertThat(sourceMap.size(), equalTo(1));
+ assertThat(sourceMap.get("query"), notNullValue());
+ }
+
+ Map<String, Object> extraSourceMap = XContentHelper.convertToMap(searchRequest.extraSource(), false).v2();
+ int count = 1;
+ assertThat((Integer)extraSourceMap.get("size"), equalTo(0));
+ if (countRequest.minScore() == CountRequest.DEFAULT_MIN_SCORE) {
+ assertThat(extraSourceMap.get("min_score"), nullValue());
+ } else {
+ assertThat(((Number)extraSourceMap.get("min_score")).floatValue(), equalTo(countRequest.minScore()));
+ count++;
+ }
+ if (countRequest.terminateAfter() == SearchContext.DEFAULT_TERMINATE_AFTER) {
+ assertThat(extraSourceMap.get("terminate_after"), nullValue());
+ } else {
+ assertThat((Integer)extraSourceMap.get("terminate_after"), equalTo(countRequest.terminateAfter()));
+ count++;
+ }
+ assertThat(extraSourceMap.size(), equalTo(count));
+ }
+
+ private static String[] randomStringArray() {
+ int count = randomIntBetween(1, 5);
+ String[] indices = new String[count];
+ for (int i = 0; i < count; i++) {
+ indices[i] = randomAsciiOfLengthBetween(1, 10);
+ }
+ return indices;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/count/CountResponseTests.java b/core/src/test/java/org/elasticsearch/action/count/CountResponseTests.java
new file mode 100644
index 0000000000..bbe6c64edf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/count/CountResponseTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.count;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.internal.InternalSearchHits;
+import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class CountResponseTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFromSearchResponse() {
+ InternalSearchResponse internalSearchResponse = new InternalSearchResponse(new InternalSearchHits(null, randomLong(), randomFloat()), null, null, randomBoolean(), randomBoolean());
+ ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[randomIntBetween(0, 5)];
+ for (int i = 0; i < shardSearchFailures.length; i++) {
+ shardSearchFailures[i] = new ShardSearchFailure(new IllegalArgumentException());
+ }
+ SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, randomIntBetween(0, 100), randomIntBetween(0, 100), randomIntBetween(0, 100), shardSearchFailures);
+
+ CountResponse countResponse = new CountResponse(searchResponse);
+ assertThat(countResponse.getTotalShards(), equalTo(searchResponse.getTotalShards()));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(searchResponse.getSuccessfulShards()));
+ assertThat(countResponse.getFailedShards(), equalTo(searchResponse.getFailedShards()));
+ assertThat(countResponse.getShardFailures(), equalTo((ShardOperationFailedException[])searchResponse.getShardFailures()));
+ assertThat(countResponse.getCount(), equalTo(searchResponse.getHits().totalHits()));
+ assertThat(countResponse.terminatedEarly(), equalTo(searchResponse.isTerminatedEarly()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java b/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java
new file mode 100644
index 0000000000..073d259663
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.get;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class MultiGetShardRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSerialization() throws IOException {
+ MultiGetRequest multiGetRequest = new MultiGetRequest();
+ if (randomBoolean()) {
+ multiGetRequest.preference(randomAsciiOfLength(randomIntBetween(1, 10)));
+ }
+ if (randomBoolean()) {
+ multiGetRequest.realtime(false);
+ }
+ if (randomBoolean()) {
+ multiGetRequest.refresh(true);
+ }
+ multiGetRequest.ignoreErrorsOnGeneratedFields(randomBoolean());
+
+ MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0);
+ int numItems = iterations(10, 30);
+ for (int i = 0; i < numItems; i++) {
+ MultiGetRequest.Item item = new MultiGetRequest.Item("alias-" + randomAsciiOfLength(randomIntBetween(1, 10)), "type", "id-" + i);
+ if (randomBoolean()) {
+ int numFields = randomIntBetween(1, 5);
+ String[] fields = new String[numFields];
+ for (int j = 0; j < fields.length; j++) {
+ fields[j] = randomAsciiOfLength(randomIntBetween(1, 10));
+ }
+ item.fields(fields);
+ }
+ if (randomBoolean()) {
+ item.version(randomIntBetween(1, Integer.MAX_VALUE));
+ item.versionType(randomFrom(VersionType.values()));
+ }
+ if (randomBoolean()) {
+ item.fetchSourceContext(new FetchSourceContext(randomBoolean()));
+ }
+ multiGetShardRequest.add(0, item);
+ }
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(randomVersion(random()));
+ multiGetShardRequest.writeTo(out);
+
+ StreamInput in = StreamInput.wrap(out.bytes());
+ in.setVersion(out.getVersion());
+ MultiGetShardRequest multiGetShardRequest2 = new MultiGetShardRequest();
+ multiGetShardRequest2.readFrom(in);
+
+ assertThat(multiGetShardRequest2.index(), equalTo(multiGetShardRequest.index()));
+ assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference()));
+ assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime()));
+ assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh()));
+ assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields()));
+ assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size()));
+ for (int i = 0; i < multiGetShardRequest2.items.size(); i++) {
+ MultiGetRequest.Item item = multiGetShardRequest.items.get(i);
+ MultiGetRequest.Item item2 = multiGetShardRequest2.items.get(i);
+ assertThat(item2.index(), equalTo(item.index()));
+ assertThat(item2.type(), equalTo(item.type()));
+ assertThat(item2.id(), equalTo(item.id()));
+ assertThat(item2.fields(), equalTo(item.fields()));
+ assertThat(item2.version(), equalTo(item.version()));
+ assertThat(item2.versionType(), equalTo(item.versionType()));
+ assertThat(item2.fetchSourceContext(), equalTo(item.fetchSourceContext()));
+ }
+ assertThat(multiGetShardRequest2.indices(), equalTo(multiGetShardRequest.indices()));
+ assertThat(multiGetShardRequest2.indicesOptions(), equalTo(multiGetShardRequest.indicesOptions()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTest.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTest.java
new file mode 100644
index 0000000000..478e12051d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTest.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.rest.NoOpClient;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.util.HashMap;
+import java.util.Map;
+
+public class IndexRequestBuilderTest extends ElasticsearchTestCase {
+
+ private static final String EXPECTED_SOURCE = "{\"SomeKey\":\"SomeValue\"}";
+ private NoOpClient testClient;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ this.testClient = new NoOpClient(getTestName());
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ this.testClient.close();
+ super.tearDown();
+ }
+
+ /**
+ * test setting the source for the request with different available setters
+ */
+ @Test
+ public void testSetSource() throws Exception {
+ IndexRequestBuilder indexRequestBuilder = new IndexRequestBuilder(this.testClient, IndexAction.INSTANCE);
+ Map<String, String> source = new HashMap<>();
+ source.put("SomeKey", "SomeValue");
+ indexRequestBuilder.setSource(source);
+ assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
+
+ indexRequestBuilder.setSource(source, XContentType.JSON);
+ assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
+
+ indexRequestBuilder.setSource("SomeKey", "SomeValue");
+ assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
+
+ // force the Object... setter
+ indexRequestBuilder.setSource((Object) "SomeKey", "SomeValue");
+ assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
+
+ ByteArrayOutputStream docOut = new ByteArrayOutputStream();
+ XContentBuilder doc = XContentFactory.jsonBuilder(docOut).startObject().field("SomeKey", "SomeValue").endObject();
+ doc.close();
+ indexRequestBuilder.setSource(docOut.toByteArray());
+ assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
+
+ doc = XContentFactory.jsonBuilder().startObject().field("SomeKey", "SomeValue").endObject();
+ doc.close();
+ indexRequestBuilder.setSource(doc);
+ assertEquals(EXPECTED_SOURCE, XContentHelper.convertToJson(indexRequestBuilder.request().source(), true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
new file mode 100644
index 0000000000..a8aead541c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.index;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class IndexRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIndexRequestOpTypeFromString() throws Exception {
+ String create = "create";
+ String index = "index";
+ String createUpper = "CREATE";
+ String indexUpper = "INDEX";
+
+ assertThat(IndexRequest.OpType.fromString(create), equalTo(IndexRequest.OpType.CREATE));
+ assertThat(IndexRequest.OpType.fromString(index), equalTo(IndexRequest.OpType.INDEX));
+ assertThat(IndexRequest.OpType.fromString(createUpper), equalTo(IndexRequest.OpType.CREATE));
+ assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX));
+ }
+
+ @Test(expected= IllegalArgumentException.class)
+ public void testReadBogusString(){
+ String foobar = "foobar";
+ IndexRequest.OpType.fromString(foobar);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptRequestTests.java b/core/src/test/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptRequestTests.java
new file mode 100644
index 0000000000..3ff5cf9392
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/indexedscripts/get/GetIndexedScriptRequestTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.indexedscripts.get;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class GetIndexedScriptRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testGetIndexedScriptRequestSerialization() throws IOException {
+ GetIndexedScriptRequest request = new GetIndexedScriptRequest("lang", "id");
+ if (randomBoolean()) {
+ request.version(randomIntBetween(1, Integer.MAX_VALUE));
+ request.versionType(randomFrom(VersionType.values()));
+ }
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(randomVersion(random()));
+ request.writeTo(out);
+
+ StreamInput in = StreamInput.wrap(out.bytes());
+ in.setVersion(out.getVersion());
+ GetIndexedScriptRequest request2 = new GetIndexedScriptRequest();
+ request2.readFrom(in);
+
+ assertThat(request2.id(), equalTo(request.id()));
+ assertThat(request2.scriptLang(), equalTo(request.scriptLang()));
+ assertThat(request2.version(), equalTo(request.version()));
+ assertThat(request2.versionType(), equalTo(request.versionType()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java b/core/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java
new file mode 100644
index 0000000000..50f915bfe6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.percolate;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiPercolatorRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseBulkRequests() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json");
+ MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length);
+
+ assertThat(request.requests().size(), equalTo(8));
+ PercolateRequest percolateRequest = request.requests().get(0);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ Map sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map()));
+
+ percolateRequest = request.requests().get(1);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index2"));
+ assertThat(percolateRequest.indices()[1], equalTo("my-index3"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map()));
+
+ percolateRequest = request.requests().get(2);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index4"));
+ assertThat(percolateRequest.indices()[1], equalTo("my-index5"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(percolateRequest.onlyCount(), equalTo(true));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map()));
+
+ percolateRequest = request.requests().get(3);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index6"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().id(), equalTo("1"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().index(), equalTo("my-index6"));
+ assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.getRequest().preference(), equalTo("_local"));
+
+ percolateRequest = request.requests().get(4);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index7"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed()));
+ assertThat(percolateRequest.onlyCount(), equalTo(true));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().id(), equalTo("2"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().index(), equalTo("my-index7"));
+ assertThat(percolateRequest.getRequest().routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.getRequest().preference(), equalTo("_local"));
+
+ percolateRequest = request.requests().get(5);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index8"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("primary"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value4").map()));
+
+ percolateRequest = request.requests().get(6);
+ assertThat(percolateRequest.indices()[0], equalTo("percolate-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("other-type"));
+ assertThat(percolateRequest.routing(), equalTo("percolate-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index9"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().routing(), nullValue());
+ assertThat(percolateRequest.getRequest().preference(), nullValue());
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), nullValue());
+
+ percolateRequest = request.requests().get(7);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index10"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), nullValue());
+ assertThat(percolateRequest.preference(), nullValue());
+ assertThat(percolateRequest.getRequest(), notNullValue());
+ assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index10"));
+ assertThat(percolateRequest.getRequest().type(), equalTo("my-type1"));
+ assertThat(percolateRequest.getRequest().routing(), nullValue());
+ assertThat(percolateRequest.getRequest().preference(), nullValue());
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, false, true, false, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), nullValue());
+ }
+
+ @Test
+ public void testParseBulkRequests_defaults() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate2.json");
+ MultiPercolateRequest request = new MultiPercolateRequest();
+ request.indices("my-index1").documentType("my-type1").indicesOptions(IndicesOptions.lenientExpandOpen());
+ request.add(data, 0, data.length);
+
+ assertThat(request.requests().size(), equalTo(3));
+ PercolateRequest percolateRequest = request.requests().get(0);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ Map sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value1").map()));
+
+ percolateRequest = request.requests().get(1);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.routing(), equalTo("my-routing-1"));
+ assertThat(percolateRequest.preference(), equalTo("_local"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map()));
+
+ percolateRequest = request.requests().get(2);
+ assertThat(percolateRequest.indices()[0], equalTo("my-index1"));
+ assertThat(percolateRequest.documentType(), equalTo("my-type1"));
+ assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen()));
+ assertThat(percolateRequest.onlyCount(), equalTo(false));
+ assertThat(percolateRequest.getRequest(), nullValue());
+ assertThat(percolateRequest.source(), notNullValue());
+ sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map();
+ assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map()));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json b/core/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json
new file mode 100644
index 0000000000..44079390bf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json
@@ -0,0 +1,16 @@
+{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : false}}
+{"doc" : {"field1" : "value1"}}
+{"percolate" : {"indices" : ["my-index2", "my-index3"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}}
+{"doc" : {"field1" : "value2"}}
+{"count" : {"indices" : ["my-index4", "my-index5"], "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : "open,closed"}}
+{"doc" : {"field1" : "value3"}}
+{"percolate" : {"id" : "1", "index" : "my-index6", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "expand_wildcards" : ["open", "closed"]}}
+{}
+{"count" : {"id" : "2", "index" : "my-index7", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local"}}
+{}
+{"percolate" : {"index" : "my-index8", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "primary"}}
+{"doc" : {"field1" : "value4"}}
+{"percolate" : {"id" : "3", "index" : "my-index9", "type" : "my-type1", "percolate_index": "percolate-index1", "percolate_type": "other-type", "percolate_preference": "_local", "percolate_routing": "percolate-routing-1"}}
+{}
+{"percolate" : {"id" : "4", "index" : "my-index10", "type" : "my-type1", "allow_no_indices": false, "expand_wildcards" : ["open"]}}
+{}
diff --git a/core/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json b/core/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json
new file mode 100644
index 0000000000..fa676cf618
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/percolate/mpercolate2.json
@@ -0,0 +1,6 @@
+{"percolate" : {"routing" : "my-routing-1", "preference" : "_local"}}
+{"doc" : {"field1" : "value1"}}
+{"percolate" : {"index" : "my-index1", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "_local", "ignore_unavailable" : true}}
+{"doc" : {"field1" : "value2"}}
+{"percolate" : {}}
+{"doc" : {"field1" : "value3"}}
diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
new file mode 100644
index 0000000000..ee520760b5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class MultiSearchRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleAdd() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
+ assertThat(request.requests().size(), equalTo(8));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(request.requests().get(0).types().length, equalTo(0));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(2).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(request.requests().get(3).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(3).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(request.requests().get(4).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(4).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed())));
+ assertThat(request.requests().get(5).indices(), nullValue());
+ assertThat(request.requests().get(5).types().length, equalTo(0));
+ assertThat(request.requests().get(6).indices(), nullValue());
+ assertThat(request.requests().get(6).types().length, equalTo(0));
+ assertThat(request.requests().get(6).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH));
+ assertThat(request.requests().get(7).indices(), nullValue());
+ assertThat(request.requests().get(7).types().length, equalTo(0));
+ }
+
+ @Test
+ public void simpleAdd2() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
+ assertThat(request.requests().size(), equalTo(5));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(0).types().length, equalTo(0));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices(), nullValue());
+ assertThat(request.requests().get(2).types().length, equalTo(0));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH));
+ assertThat(request.requests().get(4).indices(), nullValue());
+ assertThat(request.requests().get(4).types().length, equalTo(0));
+ }
+
+ @Test
+ public void simpleAdd3() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
+ assertThat(request.requests().size(), equalTo(4));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
+ assertThat(request.requests().get(0).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(1).indices()[0], equalTo("test2"));
+ assertThat(request.requests().get(1).indices()[1], equalTo("test3"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(2).indices()[0], equalTo("test4"));
+ assertThat(request.requests().get(2).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(2).types()[0], equalTo("type2"));
+ assertThat(request.requests().get(2).types()[1], equalTo("type1"));
+ assertThat(request.requests().get(3).indices(), nullValue());
+ assertThat(request.requests().get(3).types().length, equalTo(0));
+ assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH));
+ }
+
+ @Test
+ public void simpleAdd4() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json");
+ MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null);
+ assertThat(request.requests().size(), equalTo(3));
+ assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
+ assertThat(request.requests().get(0).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(0).queryCache(), equalTo(true));
+ assertThat(request.requests().get(0).preference(), nullValue());
+ assertThat(request.requests().get(1).indices()[0], equalTo("test2"));
+ assertThat(request.requests().get(1).indices()[1], equalTo("test3"));
+ assertThat(request.requests().get(1).types()[0], equalTo("type1"));
+ assertThat(request.requests().get(1).queryCache(), nullValue());
+ assertThat(request.requests().get(1).preference(), equalTo("_local"));
+ assertThat(request.requests().get(2).indices()[0], equalTo("test4"));
+ assertThat(request.requests().get(2).indices()[1], equalTo("test1"));
+ assertThat(request.requests().get(2).types()[0], equalTo("type2"));
+ assertThat(request.requests().get(2).types()[1], equalTo("type1"));
+ assertThat(request.requests().get(2).routing(), equalTo("123"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java
new file mode 100644
index 0000000000..900c278780
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.search;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class SearchRequestBuilderTests extends ElasticsearchTestCase {
+
+ private static Client client;
+
+ @BeforeClass
+ public static void initClient() {
+ //this client will not be hit by any request, but it needs to be a non null proper client
+ //that is why we create it but we don't add any transport address to it
+ Settings settings = Settings.builder()
+ .put("path.home", createTempDir().toString())
+ .build();
+ client = TransportClient.builder().settings(settings).build();
+ }
+
+ @AfterClass
+ public static void closeClient() {
+ client.close();
+ client = null;
+ }
+
+ @Test
+ public void testEmptySourceToString() {
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch();
+ assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().toString()));
+ }
+
+ @Test
+ public void testQueryBuilderQueryToString() {
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch();
+ searchRequestBuilder.setQuery(QueryBuilders.matchAllQuery());
+ assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString()));
+ }
+
+ @Test
+ public void testXContentBuilderQueryToString() throws IOException {
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch();
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
+ xContentBuilder.startObject();
+ xContentBuilder.startObject("match_all");
+ xContentBuilder.endObject();
+ xContentBuilder.endObject();
+ searchRequestBuilder.setQuery(xContentBuilder);
+ assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(xContentBuilder).toString()));
+ }
+
+ @Test
+ public void testStringQueryToString() {
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch();
+ String query = "{ \"match_all\" : {} }";
+ searchRequestBuilder.setQuery(query);
+ assertThat(searchRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }"));
+ }
+
+ @Test
+ public void testStringSourceToString() {
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch();
+ String source = "{ \"query\" : { \"match_all\" : {} } }";
+ searchRequestBuilder.setSource(source);
+ assertThat(searchRequestBuilder.toString(), equalTo(source));
+ }
+
+ @Test
+ public void testXContentBuilderSourceToString() throws IOException {
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch();
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
+ xContentBuilder.startObject();
+ xContentBuilder.startObject("query");
+ xContentBuilder.startObject("match_all");
+ xContentBuilder.endObject();
+ xContentBuilder.endObject();
+ xContentBuilder.endObject();
+ searchRequestBuilder.setSource(xContentBuilder);
+ assertThat(searchRequestBuilder.toString(), equalTo(XContentHelper.convertToJson(xContentBuilder.bytes(), false, true)));
+ }
+
+ @Test
+ public void testThatToStringDoesntWipeRequestSource() {
+ String source = "{\n" +
+ " \"query\" : {\n" +
+ " \"match\" : {\n" +
+ " \"field\" : {\n" +
+ " \"query\" : \"value\"" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }";
+ SearchRequestBuilder searchRequestBuilder = client.prepareSearch().setSource(source);
+ String preToString = searchRequestBuilder.request().source().toUtf8();
+ assertThat(searchRequestBuilder.toString(), equalTo(source));
+ String postToString = searchRequestBuilder.request().source().toUtf8();
+ assertThat(preToString, equalTo(postToString));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json
new file mode 100644
index 0000000000..3d98f37515
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json
@@ -0,0 +1,16 @@
+{"index":"test", "ignore_unavailable" : true, "expand_wildcards" : "open,closed"}}
+{"query" : {"match_all" {}}}
+{"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]}
+{"query" : {"match_all" {}}}
+{"index":"test", "ignore_unavailable" : false, "expand_wildcards" : ["open"]}}
+{"query" : {"match_all" {}}}
+{"index":"test", "ignore_unavailable" : true, "allow_no_indices": true, "expand_wildcards" : ["open", "closed"]}}
+{"query" : {"match_all" {}}}
+{"index":"test", "ignore_unavailable" : true, "allow_no_indices": false, "expand_wildcards" : ["closed"]}}
+{"query" : {"match_all" {}}}
+{}
+{"query" : {"match_all" {}}}
+{"search_type" : "dfs_query_then_fetch"}
+{"query" : {"match_all" {}}}
+
+{"query" : {"match_all" {}}}
diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json b/core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json
new file mode 100644
index 0000000000..e2e06d9f95
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json
@@ -0,0 +1,10 @@
+{"index":"test"}
+{"query" : {"match_all" {}}}
+{"index" : "test", "type" : "type1"}
+{"query" : {"match_all" {}}}
+{}
+{"query" : {"match_all" {}}}
+{"search_type" : "dfs_query_then_fetch"}
+{"query" : {"match_all" {}}}
+
+{"query" : {"match_all" {}}}
diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json b/core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json
new file mode 100644
index 0000000000..6416720a92
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json
@@ -0,0 +1,8 @@
+{"index":["test0", "test1"]}
+{"query" : {"match_all" {}}}
+{"index" : "test2,test3", "type" : "type1"}
+{"query" : {"match_all" {}}}
+{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ]}
+{"query" : {"match_all" {}}}
+{"search_type" : "dfs_query_then_fetch"}
+{"query" : {"match_all" {}}}
diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json b/core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json
new file mode 100644
index 0000000000..ab6b8206b0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json
@@ -0,0 +1,6 @@
+{"index":["test0", "test1"], "query_cache": true}
+{"query" : {"match_all" {}}}
+{"index" : "test2,test3", "type" : "type1", "preference": "_local"}
+{"query" : {"match_all" {}}}
+{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"}
+{"query" : {"match_all" {}}}
diff --git a/core/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/core/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java
new file mode 100644
index 0000000000..13657fb06d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class IndicesOptionsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSerialization() throws Exception {
+ int iterations = randomIntBetween(5, 20);
+ for (int i = 0; i < iterations; i++) {
+ IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean());
+
+ BytesStreamOutput output = new BytesStreamOutput();
+ Version outputVersion = randomVersion(random());
+ output.setVersion(outputVersion);
+ indicesOptions.writeIndicesOptions(output);
+
+ StreamInput streamInput = StreamInput.wrap(output.bytes());
+ streamInput.setVersion(randomVersion(random()));
+ IndicesOptions indicesOptions2 = IndicesOptions.readIndicesOptions(streamInput);
+
+ assertThat(indicesOptions2.ignoreUnavailable(), equalTo(indicesOptions.ignoreUnavailable()));
+ assertThat(indicesOptions2.allowNoIndices(), equalTo(indicesOptions.allowNoIndices()));
+ assertThat(indicesOptions2.expandWildcardsOpen(), equalTo(indicesOptions.expandWildcardsOpen()));
+ assertThat(indicesOptions2.expandWildcardsClosed(), equalTo(indicesOptions.expandWildcardsClosed()));
+
+ assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices()));
+ assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices()));
+ }
+ }
+
+ @Test
+ public void testFromOptions() {
+ int iterations = randomIntBetween(5, 20);
+ for (int i = 0; i < iterations; i++) {
+ boolean ignoreUnavailable = randomBoolean();
+ boolean allowNoIndices = randomBoolean();
+ boolean expandToOpenIndices = randomBoolean();
+ boolean expandToClosedIndices = randomBoolean();
+ boolean allowAliasesToMultipleIndices = randomBoolean();
+ boolean forbidClosedIndices = randomBoolean();
+ IndicesOptions indicesOptions = IndicesOptions.fromOptions(
+ ignoreUnavailable, allowNoIndices,expandToOpenIndices, expandToClosedIndices,
+ allowAliasesToMultipleIndices, forbidClosedIndices
+ );
+
+ assertThat(indicesOptions.ignoreUnavailable(), equalTo(ignoreUnavailable));
+ assertThat(indicesOptions.allowNoIndices(), equalTo(allowNoIndices));
+ assertThat(indicesOptions.expandWildcardsOpen(), equalTo(expandToOpenIndices));
+ assertThat(indicesOptions.expandWildcardsClosed(), equalTo(expandToClosedIndices));
+ assertThat(indicesOptions.allowAliasesToMultipleIndices(), equalTo(allowAliasesToMultipleIndices));
+ assertThat(indicesOptions.allowAliasesToMultipleIndices(), equalTo(allowAliasesToMultipleIndices));
+ assertThat(indicesOptions.forbidClosedIndices(), equalTo(forbidClosedIndices));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java b/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java
new file mode 100644
index 0000000000..2c97caf6e8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/support/ListenableActionFutureTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transports;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class ListenableActionFutureTests extends ElasticsearchTestCase {
+
+ public void testListenerIsCallableFromNetworkThreads() throws Throwable {
+ ThreadPool threadPool = new ThreadPool("testListenerIsCallableFromNetworkThreads");
+ try {
+ final PlainListenableActionFuture<Object> future = new PlainListenableActionFuture<>(threadPool);
+ final CountDownLatch listenerCalled = new CountDownLatch(1);
+ final AtomicReference<Throwable> error = new AtomicReference<>();
+ final Object response = new Object();
+ future.addListener(new ActionListener<Object>() {
+ @Override
+ public void onResponse(Object o) {
+ listenerCalled.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ error.set(e);
+ listenerCalled.countDown();
+ }
+ });
+ Thread networkThread = new Thread(new AbstractRunnable() {
+ @Override
+ public void onFailure(Throwable t) {
+ error.set(t);
+ listenerCalled.countDown();
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ future.onResponse(response);
+ }
+ }, Transports.TEST_MOCK_TRANSPORT_THREAD_PREFIX + "_testListenerIsCallableFromNetworkThread");
+ networkThread.start();
+ networkThread.join();
+ listenerCalled.await();
+ if (error.get() != null) {
+ throw error.get();
+ }
+ } finally {
+ ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java
new file mode 100644
index 0000000000..a5a5bc4512
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.support;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.CoreMatchers.*;
+
+public class TransportActionFilterChainTests extends ElasticsearchTestCase {
+
+ private AtomicInteger counter;
+
+ @Before
+ public void init() throws Exception {
+ counter = new AtomicInteger();
+ }
+
+ @Test
+ public void testActionFiltersRequest() throws ExecutionException, InterruptedException {
+
+ int numFilters = randomInt(10);
+ Set<Integer> orders = new HashSet<>(numFilters);
+ while (orders.size() < numFilters) {
+ orders.add(randomInt(10));
+ }
+
+ Set<ActionFilter> filters = new HashSet<>();
+ for (Integer order : orders) {
+ filters.add(new RequestTestFilter(order, randomFrom(RequestOperation.values())));
+ }
+
+ String actionName = randomAsciiOfLength(randomInt(30));
+ ActionFilters actionFilters = new ActionFilters(filters);
+ TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters) {
+ @Override
+ protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
+ listener.onResponse(new TestResponse());
+ }
+ };
+
+ ArrayList<ActionFilter> actionFiltersByOrder = Lists.newArrayList(filters);
+ Collections.sort(actionFiltersByOrder, new Comparator<ActionFilter>() {
+ @Override
+ public int compare(ActionFilter o1, ActionFilter o2) {
+ return Integer.compare(o1.order(), o2.order());
+ }
+ });
+
+ List<ActionFilter> expectedActionFilters = Lists.newArrayList();
+ boolean errorExpected = false;
+ for (ActionFilter filter : actionFiltersByOrder) {
+ RequestTestFilter testFilter = (RequestTestFilter) filter;
+ expectedActionFilters.add(testFilter);
+ if (testFilter.callback == RequestOperation.LISTENER_FAILURE) {
+ errorExpected = true;
+ }
+ if (!(testFilter.callback == RequestOperation.CONTINUE_PROCESSING) ) {
+ break;
+ }
+ }
+
+ PlainListenableActionFuture<TestResponse> future = new PlainListenableActionFuture<>(null);
+ transportAction.execute(new TestRequest(), future);
+ try {
+ assertThat(future.get(), notNullValue());
+ assertThat("shouldn't get here if an error is expected", errorExpected, equalTo(false));
+ } catch(Throwable t) {
+ assertThat("shouldn't get here if an error is not expected " + t.getMessage(), errorExpected, equalTo(true));
+ }
+
+ List<RequestTestFilter> testFiltersByLastExecution = Lists.newArrayList();
+ for (ActionFilter actionFilter : actionFilters.filters()) {
+ testFiltersByLastExecution.add((RequestTestFilter) actionFilter);
+ }
+ Collections.sort(testFiltersByLastExecution, new Comparator<RequestTestFilter>() {
+ @Override
+ public int compare(RequestTestFilter o1, RequestTestFilter o2) {
+ return Integer.compare(o1.executionToken, o2.executionToken);
+ }
+ });
+
+ ArrayList<RequestTestFilter> finalTestFilters = Lists.newArrayList();
+ for (ActionFilter filter : testFiltersByLastExecution) {
+ RequestTestFilter testFilter = (RequestTestFilter) filter;
+ finalTestFilters.add(testFilter);
+ if (!(testFilter.callback == RequestOperation.CONTINUE_PROCESSING) ) {
+ break;
+ }
+ }
+
+ assertThat(finalTestFilters.size(), equalTo(expectedActionFilters.size()));
+ for (int i = 0; i < finalTestFilters.size(); i++) {
+ RequestTestFilter testFilter = finalTestFilters.get(i);
+ assertThat(testFilter, equalTo(expectedActionFilters.get(i)));
+ assertThat(testFilter.runs.get(), equalTo(1));
+ assertThat(testFilter.lastActionName, equalTo(actionName));
+ }
+ }
+
+ @Test
+ public void testActionFiltersResponse() throws ExecutionException, InterruptedException {
+
+ int numFilters = randomInt(10);
+ Set<Integer> orders = new HashSet<>(numFilters);
+ while (orders.size() < numFilters) {
+ orders.add(randomInt(10));
+ }
+
+ Set<ActionFilter> filters = new HashSet<>();
+ for (Integer order : orders) {
+ filters.add(new ResponseTestFilter(order, randomFrom(ResponseOperation.values())));
+ }
+
+ String actionName = randomAsciiOfLength(randomInt(30));
+ ActionFilters actionFilters = new ActionFilters(filters);
+ TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters) {
+ @Override
+ protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
+ listener.onResponse(new TestResponse());
+ }
+ };
+
+ ArrayList<ActionFilter> actionFiltersByOrder = Lists.newArrayList(filters);
+ Collections.sort(actionFiltersByOrder, new Comparator<ActionFilter>() {
+ @Override
+ public int compare(ActionFilter o1, ActionFilter o2) {
+ return Integer.compare(o2.order(), o1.order());
+ }
+ });
+
+ List<ActionFilter> expectedActionFilters = Lists.newArrayList();
+ boolean errorExpected = false;
+ for (ActionFilter filter : actionFiltersByOrder) {
+ ResponseTestFilter testFilter = (ResponseTestFilter) filter;
+ expectedActionFilters.add(testFilter);
+ if (testFilter.callback == ResponseOperation.LISTENER_FAILURE) {
+ errorExpected = true;
+ }
+ if (testFilter.callback != ResponseOperation.CONTINUE_PROCESSING) {
+ break;
+ }
+ }
+
+ PlainListenableActionFuture<TestResponse> future = new PlainListenableActionFuture<>(null);
+ transportAction.execute(new TestRequest(), future);
+ try {
+ assertThat(future.get(), notNullValue());
+ assertThat("shouldn't get here if an error is expected", errorExpected, equalTo(false));
+ } catch(Throwable t) {
+ assertThat("shouldn't get here if an error is not expected " + t.getMessage(), errorExpected, equalTo(true));
+ }
+
+ List<ResponseTestFilter> testFiltersByLastExecution = Lists.newArrayList();
+ for (ActionFilter actionFilter : actionFilters.filters()) {
+ testFiltersByLastExecution.add((ResponseTestFilter) actionFilter);
+ }
+ Collections.sort(testFiltersByLastExecution, new Comparator<ResponseTestFilter>() {
+ @Override
+ public int compare(ResponseTestFilter o1, ResponseTestFilter o2) {
+ return Integer.compare(o1.executionToken, o2.executionToken);
+ }
+ });
+
+ ArrayList<ResponseTestFilter> finalTestFilters = Lists.newArrayList();
+ for (ActionFilter filter : testFiltersByLastExecution) {
+ ResponseTestFilter testFilter = (ResponseTestFilter) filter;
+ finalTestFilters.add(testFilter);
+ if (testFilter.callback != ResponseOperation.CONTINUE_PROCESSING) {
+ break;
+ }
+ }
+
+ assertThat(finalTestFilters.size(), equalTo(expectedActionFilters.size()));
+ for (int i = 0; i < finalTestFilters.size(); i++) {
+ ResponseTestFilter testFilter = finalTestFilters.get(i);
+ assertThat(testFilter, equalTo(expectedActionFilters.get(i)));
+ assertThat(testFilter.runs.get(), equalTo(1));
+ assertThat(testFilter.lastActionName, equalTo(actionName));
+ }
+ }
+
+ @Test
+ public void testTooManyContinueProcessingRequest() throws ExecutionException, InterruptedException {
+
+ final int additionalContinueCount = randomInt(10);
+
+ RequestTestFilter testFilter = new RequestTestFilter(randomInt(), new RequestCallback() {
+ @Override
+ public void execute(final String action, final ActionRequest actionRequest, final ActionListener actionListener, final ActionFilterChain actionFilterChain) {
+ for (int i = 0; i <= additionalContinueCount; i++) {
+ actionFilterChain.proceed(action, actionRequest, actionListener);
+ }
+ }
+ });
+
+ Set<ActionFilter> filters = new HashSet<>();
+ filters.add(testFilter);
+
+ String actionName = randomAsciiOfLength(randomInt(30));
+ ActionFilters actionFilters = new ActionFilters(filters);
+ TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters) {
+ @Override
+ protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
+ listener.onResponse(new TestResponse());
+ }
+ };
+
+ final CountDownLatch latch = new CountDownLatch(additionalContinueCount + 1);
+ final AtomicInteger responses = new AtomicInteger();
+ final List<Throwable> failures = new CopyOnWriteArrayList<>();
+
+ transportAction.execute(new TestRequest(), new ActionListener<TestResponse>() {
+ @Override
+ public void onResponse(TestResponse testResponse) {
+ responses.incrementAndGet();
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ failures.add(e);
+ latch.countDown();
+ }
+ });
+
+ if (!latch.await(10, TimeUnit.SECONDS)) {
+ fail("timeout waiting for the filter to notify the listener as many times as expected");
+ }
+
+ assertThat(testFilter.runs.get(), equalTo(1));
+ assertThat(testFilter.lastActionName, equalTo(actionName));
+
+ assertThat(responses.get(), equalTo(1));
+ assertThat(failures.size(), equalTo(additionalContinueCount));
+ for (Throwable failure : failures) {
+ assertThat(failure, instanceOf(IllegalStateException.class));
+ }
+ }
+
+ @Test
+ public void testTooManyContinueProcessingResponse() throws ExecutionException, InterruptedException {
+
+ final int additionalContinueCount = randomInt(10);
+
+ ResponseTestFilter testFilter = new ResponseTestFilter(randomInt(), new ResponseCallback() {
+ @Override
+ public void execute(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
+ for (int i = 0; i <= additionalContinueCount; i++) {
+ chain.proceed(action, response, listener);
+ }
+ }
+ });
+
+ Set<ActionFilter> filters = new HashSet<>();
+ filters.add(testFilter);
+
+ String actionName = randomAsciiOfLength(randomInt(30));
+ ActionFilters actionFilters = new ActionFilters(filters);
+ TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters) {
+ @Override
+ protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
+ listener.onResponse(new TestResponse());
+ }
+ };
+
+ final CountDownLatch latch = new CountDownLatch(additionalContinueCount + 1);
+ final AtomicInteger responses = new AtomicInteger();
+ final List<Throwable> failures = new CopyOnWriteArrayList<>();
+
+ transportAction.execute(new TestRequest(), new ActionListener<TestResponse>() {
+ @Override
+ public void onResponse(TestResponse testResponse) {
+ responses.incrementAndGet();
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ failures.add(e);
+ latch.countDown();
+ }
+ });
+
+ if (!latch.await(10, TimeUnit.SECONDS)) {
+ fail("timeout waiting for the filter to notify the listener as many times as expected");
+ }
+
+ assertThat(testFilter.runs.get(), equalTo(1));
+ assertThat(testFilter.lastActionName, equalTo(actionName));
+
+ assertThat(responses.get(), equalTo(1));
+ assertThat(failures.size(), equalTo(additionalContinueCount));
+ for (Throwable failure : failures) {
+ assertThat(failure, instanceOf(IllegalStateException.class));
+ }
+ }
+
+ private class RequestTestFilter implements ActionFilter {
+ private final RequestCallback callback;
+ private final int order;
+ AtomicInteger runs = new AtomicInteger();
+ volatile String lastActionName;
+ volatile int executionToken = Integer.MAX_VALUE; //the filters that don't run will go last in the sorted list
+
+ RequestTestFilter(int order, RequestCallback callback) {
+ this.order = order;
+ this.callback = callback;
+ }
+
+ @Override
+ public int order() {
+ return order;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void apply(String action, ActionRequest actionRequest, ActionListener actionListener, ActionFilterChain actionFilterChain) {
+ this.runs.incrementAndGet();
+ this.lastActionName = action;
+ this.executionToken = counter.incrementAndGet();
+ this.callback.execute(action, actionRequest, actionListener, actionFilterChain);
+ }
+
+ @Override
+ public void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
+ chain.proceed(action, response, listener);
+ }
+ }
+
+ private class ResponseTestFilter implements ActionFilter {
+ private final ResponseCallback callback;
+ private final int order;
+ AtomicInteger runs = new AtomicInteger();
+ volatile String lastActionName;
+ volatile int executionToken = Integer.MAX_VALUE; //the filters that don't run will go last in the sorted list
+
+ ResponseTestFilter(int order, ResponseCallback callback) {
+ this.order = order;
+ this.callback = callback;
+ }
+
+ @Override
+ public int order() {
+ return order;
+ }
+
+ @Override
+ public void apply(String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
+ chain.proceed(action, request, listener);
+ }
+
+ @Override
+ public void apply(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
+ this.runs.incrementAndGet();
+ this.lastActionName = action;
+ this.executionToken = counter.incrementAndGet();
+ this.callback.execute(action, response, listener, chain);
+ }
+ }
+
+ private static enum RequestOperation implements RequestCallback {
+ CONTINUE_PROCESSING {
+ @Override
+ public void execute(String action, ActionRequest actionRequest, ActionListener actionListener, ActionFilterChain actionFilterChain) {
+ actionFilterChain.proceed(action, actionRequest, actionListener);
+ }
+ },
+ LISTENER_RESPONSE {
+ @Override
+ @SuppressWarnings("unchecked")
+ public void execute(String action, ActionRequest actionRequest, ActionListener actionListener, ActionFilterChain actionFilterChain) {
+ actionListener.onResponse(new TestResponse());
+ }
+ },
+ LISTENER_FAILURE {
+ @Override
+ public void execute(String action, ActionRequest actionRequest, ActionListener actionListener, ActionFilterChain actionFilterChain) {
+ actionListener.onFailure(new ElasticsearchTimeoutException(""));
+ }
+ }
+ }
+
+ private static enum ResponseOperation implements ResponseCallback {
+ CONTINUE_PROCESSING {
+ @Override
+ public void execute(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
+ chain.proceed(action, response, listener);
+ }
+ },
+ LISTENER_RESPONSE {
+ @Override
+ @SuppressWarnings("unchecked")
+ public void execute(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
+ listener.onResponse(new TestResponse());
+ }
+ },
+ LISTENER_FAILURE {
+ @Override
+ public void execute(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain) {
+ listener.onFailure(new ElasticsearchTimeoutException(""));
+ }
+ }
+ }
+
+ private static interface RequestCallback {
+ void execute(String action, ActionRequest actionRequest, ActionListener actionListener, ActionFilterChain actionFilterChain);
+ }
+
+ private static interface ResponseCallback {
+ void execute(String action, ActionResponse response, ActionListener listener, ActionFilterChain chain);
+ }
+
+ private static class TestRequest extends ActionRequest {
+ @Override
+ public ActionRequestValidationException validate() {
+ return null;
+ }
+ }
+
+ private static class TestResponse extends ActionResponse {
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java
new file mode 100644
index 0000000000..432e21248f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java
@@ -0,0 +1,845 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.support.replication;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.index.CorruptIndexException;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionWriteResponse;
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.support.ActionFilter;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateObserver;
+import org.elasticsearch.cluster.action.shard.ShardStateAction;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.lease.Releasable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.index.shard.IndexShardNotStartedException;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.cluster.TestClusterService;
+import org.elasticsearch.test.transport.CapturingTransport;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseOptions;
+import org.elasticsearch.transport.TransportService;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.hamcrest.Matchers.*;
+
+public class ShardReplicationTests extends ElasticsearchTestCase {
+
+ private static ThreadPool threadPool;
+
+ private TestClusterService clusterService;
+ private TransportService transportService;
+ private CapturingTransport transport;
+ private Action action;
+ /* *
+ * TransportReplicationAction needs an instance of IndexShard to count operations.
+ * indexShards is reset to null before each test and will be initialized upon request in the tests.
+ */
+
+ @BeforeClass
+ public static void beforeClass() {
+ threadPool = new ThreadPool("ShardReplicationTests");
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ transport = new CapturingTransport();
+ clusterService = new TestClusterService(threadPool);
+ transportService = new TransportService(transport, threadPool);
+ transportService.start();
+ action = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool);
+ count.set(1);
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
+ threadPool = null;
+ }
+
+ <T> void assertListenerThrows(String msg, PlainActionFuture<T> listener, Class<?> klass) throws InterruptedException {
+ try {
+ listener.get();
+ fail(msg);
+ } catch (ExecutionException ex) {
+ assertThat(ex.getCause(), instanceOf(klass));
+ }
+ }
+
+ @Test
+ public void testBlocks() throws ExecutionException, InterruptedException {
+ Request request = new Request();
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+
+ ClusterBlocks.Builder block = ClusterBlocks.builder()
+ .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
+ TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ assertFalse("primary phase should stop execution", primaryPhase.checkBlocks());
+ assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class);
+
+ block = ClusterBlocks.builder()
+ .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
+ listener = new PlainActionFuture<>();
+ primaryPhase = action.new PrimaryPhase(new Request().timeout("5ms"), listener);
+ assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks());
+ assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class);
+
+
+ listener = new PlainActionFuture<>();
+ primaryPhase = action.new PrimaryPhase(new Request(), listener);
+ assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks());
+ assertFalse("primary phase should wait on retryable block", listener.isDone());
+
+ block = ClusterBlocks.builder()
+ .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
+ clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
+ assertListenerThrows("primary phase should fail operation when moving from a retryable block to a non-retryable one", listener, ClusterBlockException.class);
+ assertIndexShardUninitialized();
+ }
+
+ public void assertIndexShardUninitialized() {
+ assertEquals(1, count.get());
+ }
+
+ ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) {
+ int assignedReplicas = randomIntBetween(0, numberOfReplicas);
+ return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas);
+ }
+
+ ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) {
+ ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
+ // no point in randomizing - node assignment later on does it too.
+ for (int i = 0; i < assignedReplicas; i++) {
+ replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
+ }
+ for (int i = assignedReplicas; i < replicaStates.length; i++) {
+ replicaStates[i] = ShardRoutingState.UNASSIGNED;
+ }
+ return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates);
+ }
+
+ ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) {
+ final int numberOfReplicas = replicaStates.length;
+
+ int numberOfNodes = numberOfReplicas + 1;
+ if (primaryState == ShardRoutingState.RELOCATING) {
+ numberOfNodes++;
+ }
+ for (ShardRoutingState state : replicaStates) {
+ if (state == ShardRoutingState.RELOCATING) {
+ numberOfNodes++;
+ }
+ }
+ numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures
+ final ShardId shardId = new ShardId(index, 0);
+ DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
+ Set<String> unassignedNodes = new HashSet<>();
+ for (int i = 0; i < numberOfNodes + 1; i++) {
+ final DiscoveryNode node = newNode(i);
+ discoBuilder = discoBuilder.put(node);
+ unassignedNodes.add(node.id());
+ }
+ discoBuilder.localNodeId(newNode(0).id());
+ discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures
+ IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
+ .put(SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
+ .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
+
+ RoutingTable.Builder routing = new RoutingTable.Builder();
+ routing.addAsNew(indexMetaData);
+ IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId, false);
+
+ String primaryNode = null;
+ String relocatingNode = null;
+ if (primaryState != ShardRoutingState.UNASSIGNED) {
+ if (primaryLocal) {
+ primaryNode = newNode(0).id();
+ unassignedNodes.remove(primaryNode);
+ } else {
+ primaryNode = selectAndRemove(unassignedNodes);
+ }
+ if (primaryState == ShardRoutingState.RELOCATING) {
+ relocatingNode = selectAndRemove(unassignedNodes);
+ }
+ }
+ indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, 0, primaryNode, relocatingNode, true, primaryState, 0));
+
+ for (ShardRoutingState replicaState : replicaStates) {
+ String replicaNode = null;
+ relocatingNode = null;
+ if (replicaState != ShardRoutingState.UNASSIGNED) {
+ assert primaryNode != null : "a replica is assigned but the primary isn't";
+ replicaNode = selectAndRemove(unassignedNodes);
+ if (replicaState == ShardRoutingState.RELOCATING) {
+ relocatingNode = selectAndRemove(unassignedNodes);
+ }
+ }
+ indexShardRoutingBuilder.addShard(
+ new ImmutableShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState, 0));
+ }
+
+ ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
+ state.nodes(discoBuilder);
+ state.metaData(MetaData.builder().put(indexMetaData, false).generateUuidIfNeeded());
+ state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build())));
+ return state.build();
+ }
+
+ private String selectAndRemove(Set<String> strings) {
+ String selection = randomFrom(strings.toArray(new String[strings.size()]));
+ strings.remove(selection);
+ return selection;
+ }
+
+ @Test
+ public void testNotStartedPrimary() throws InterruptedException, ExecutionException {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+ // no replicas in oder to skip the replication part
+ clusterService.setState(state(index, true,
+ randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
+
+ logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
+
+ Request request = new Request(shardId).timeout("1ms");
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+ TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ primaryPhase.run();
+ assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class);
+
+ request = new Request(shardId);
+ listener = new PlainActionFuture<>();
+ primaryPhase = action.new PrimaryPhase(request, listener);
+ primaryPhase.run();
+ assertFalse("unassigned primary didn't cause a retry", listener.isDone());
+
+ clusterService.setState(state(index, true, ShardRoutingState.STARTED));
+ logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint());
+
+ listener.get();
+ assertTrue("request wasn't processed on primary, despite of it being assigned", request.processedOnPrimary.get());
+ assertIndexShardCounter(1);
+ }
+
+ @Test
+ public void testRoutingToPrimary() {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+
+ clusterService.setState(stateWithStartedPrimary(index, randomBoolean(), 3));
+
+ logger.debug("using state: \n{}", clusterService.state().prettyPrint());
+
+ final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
+ final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
+ Request request = new Request(shardId);
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+
+ TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ assertTrue(primaryPhase.checkBlocks());
+ primaryPhase.routeRequestOrPerformLocally(shardRoutingTable.primaryShard(), shardRoutingTable.shardsIt());
+ if (primaryNodeId.equals(clusterService.localNode().id())) {
+ logger.info("--> primary is assigned locally, testing for execution");
+ assertTrue("request failed to be processed on a local primary", request.processedOnPrimary.get());
+ if (transport.capturedRequests().length > 0) {
+ assertIndexShardCounter(2);
+ } else {
+ assertIndexShardCounter(1);
+ }
+ } else {
+ logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId);
+ final List<CapturingTransport.CapturedRequest> capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId);
+ assertThat(capturedRequests, notNullValue());
+ assertThat(capturedRequests.size(), equalTo(1));
+ assertThat(capturedRequests.get(0).action, equalTo("testAction"));
+ assertIndexShardUninitialized();
+ }
+ }
+
+ @Test
+ public void testWriteConsistency() throws ExecutionException, InterruptedException {
+ action = new ActionWithConsistency(Settings.EMPTY, "testActionWithConsistency", transportService, clusterService, threadPool);
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+ final int assignedReplicas = randomInt(2);
+ final int unassignedReplicas = randomInt(2);
+ final int totalShards = 1 + assignedReplicas + unassignedReplicas;
+ final boolean passesWriteConsistency;
+ Request request = new Request(shardId).consistencyLevel(randomFrom(WriteConsistencyLevel.values()));
+ switch (request.consistencyLevel()) {
+ case ONE:
+ passesWriteConsistency = true;
+ break;
+ case DEFAULT:
+ case QUORUM:
+ if (totalShards <= 2) {
+ passesWriteConsistency = true; // primary is enough
+ } else {
+ passesWriteConsistency = assignedReplicas + 1 >= (totalShards / 2) + 1;
+ }
+ break;
+ case ALL:
+ passesWriteConsistency = unassignedReplicas == 0;
+ break;
+ default:
+ throw new RuntimeException("unknown consistency level [" + request.consistencyLevel() + "]");
+ }
+ ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
+ for (int i = 0; i < assignedReplicas; i++) {
+ replicaStates[i] = randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
+ }
+ for (int i = assignedReplicas; i < replicaStates.length; i++) {
+ replicaStates[i] = ShardRoutingState.UNASSIGNED;
+ }
+
+ clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates));
+ logger.debug("using consistency level of [{}], assigned shards [{}], total shards [{}]. expecting op to [{}]. using state: \n{}",
+ request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry",
+ clusterService.state().prettyPrint());
+
+ final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+
+ TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ if (passesWriteConsistency) {
+ assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), nullValue());
+ primaryPhase.run();
+ assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get());
+ if (assignedReplicas > 0) {
+ assertIndexShardCounter(2);
+ } else {
+ assertIndexShardCounter(1);
+ }
+ } else {
+ assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), notNullValue());
+ primaryPhase.run();
+ assertFalse("operations should not have been perform, consistency level is *NOT* met", request.processedOnPrimary.get());
+ assertIndexShardUninitialized();
+ for (int i = 0; i < replicaStates.length; i++) {
+ replicaStates[i] = ShardRoutingState.STARTED;
+ }
+ clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates));
+ assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get());
+ assertIndexShardCounter(2);
+ }
+ }
+
+ @Test
+ public void testReplication() throws ExecutionException, InterruptedException {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+
+ clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
+
+ final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
+ int assignedReplicas = 0;
+ int totalShards = 0;
+ for (ShardRouting shard : shardRoutingTable) {
+ totalShards++;
+ if (shard.primary() == false && shard.assignedToNode()) {
+ assignedReplicas++;
+ }
+ if (shard.relocating()) {
+ assignedReplicas++;
+ totalShards++;
+ }
+ }
+
+ runReplicateTest(shardRoutingTable, assignedReplicas, totalShards);
+ }
+
+ @Test
+ public void testReplicationWithShadowIndex() throws ExecutionException, InterruptedException {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+
+ ClusterState state = stateWithStartedPrimary(index, true, randomInt(5));
+ MetaData.Builder metaData = MetaData.builder(state.metaData());
+ Settings.Builder settings = Settings.builder().put(metaData.get(index).settings());
+ settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true);
+ metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings));
+ clusterService.setState(ClusterState.builder(state).metaData(metaData));
+
+ final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
+ int assignedReplicas = 0;
+ int totalShards = 0;
+ for (ShardRouting shard : shardRoutingTable) {
+ totalShards++;
+ if (shard.primary() && shard.relocating()) {
+ assignedReplicas++;
+ totalShards++;
+ }
+ }
+ runReplicateTest(shardRoutingTable, assignedReplicas, totalShards);
+ }
+
+
+ protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException {
+ final ShardRouting primaryShard = shardRoutingTable.primaryShard();
+ final ShardIterator shardIt = shardRoutingTable.shardsIt();
+ final ShardId shardId = shardIt.shardId();
+ final Request request = new Request();
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+
+ logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint());
+
+ final TransportReplicationAction<Request, Request, Response>.InternalRequest internalRequest = action.new InternalRequest(request);
+ internalRequest.concreteIndex(shardId.index().name());
+ Releasable reference = getOrCreateIndexShardOperationsCounter();
+ assertIndexShardCounter(2);
+ TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase =
+ action.new ReplicationPhase(shardIt, request,
+ new Response(), new ClusterStateObserver(clusterService, logger),
+ primaryShard, internalRequest, listener, reference);
+
+ assertThat(replicationPhase.totalShards(), equalTo(totalShards));
+ assertThat(replicationPhase.pending(), equalTo(assignedReplicas));
+ replicationPhase.run();
+ final CapturingTransport.CapturedRequest[] capturedRequests = transport.capturedRequests();
+ transport.clear();
+ assertThat(capturedRequests.length, equalTo(assignedReplicas));
+ if (assignedReplicas > 0) {
+ assertThat("listener is done, but there are outstanding replicas", listener.isDone(), equalTo(false));
+ }
+ int pending = replicationPhase.pending();
+ int criticalFailures = 0; // failures that should fail the shard
+ int successfull = 1;
+ for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) {
+ if (randomBoolean()) {
+ Throwable t;
+ if (randomBoolean()) {
+ t = new CorruptIndexException("simulated", (String) null);
+ criticalFailures++;
+ } else {
+ t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING);
+ }
+ logger.debug("--> simulating failure on {} with [{}]", capturedRequest.node, t.getClass().getSimpleName());
+ transport.handleResponse(capturedRequest.requestId, t);
+ } else {
+ successfull++;
+ transport.handleResponse(capturedRequest.requestId, TransportResponse.Empty.INSTANCE);
+ }
+ pending--;
+ assertThat(replicationPhase.pending(), equalTo(pending));
+ assertThat(replicationPhase.successful(), equalTo(successfull));
+ }
+ assertThat(listener.isDone(), equalTo(true));
+ Response response = listener.get();
+ final ActionWriteResponse.ShardInfo shardInfo = response.getShardInfo();
+ assertThat(shardInfo.getFailed(), equalTo(criticalFailures));
+ assertThat(shardInfo.getFailures(), arrayWithSize(criticalFailures));
+ assertThat(shardInfo.getSuccessful(), equalTo(successfull));
+ assertThat(shardInfo.getTotal(), equalTo(totalShards));
+
+ assertThat("failed to see enough shard failures", transport.capturedRequests().length, equalTo(criticalFailures));
+ for (CapturingTransport.CapturedRequest capturedRequest : transport.capturedRequests()) {
+ assertThat(capturedRequest.action, equalTo(ShardStateAction.SHARD_FAILED_ACTION_NAME));
+ }
+ // all replicas have responded so the counter should be decreased again
+ assertIndexShardCounter(1);
+ }
+
+ @Test
+ public void testCounterOnPrimary() throws InterruptedException, ExecutionException, IOException {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+ // no replica, we only want to test on primary
+ clusterService.setState(state(index, true,
+ ShardRoutingState.STARTED));
+ logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
+ Request request = new Request(shardId).timeout("100ms");
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+
+ /**
+ * Execute an action that is stuck in shard operation until a latch is counted down.
+ * That way we can start the operation, check if the counter was incremented and then unblock the operation
+ * again to see if the counter is decremented afterwards.
+ * TODO: I could also write an action that asserts that the counter is 2 in the shard operation.
+ * However, this failure would only become apparent once listener.get is called. Seems a little implicit.
+ * */
+ action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
+ final TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ Thread t = new Thread() {
+ public void run() {
+ primaryPhase.run();
+ }
+ };
+ t.start();
+ // shard operation should be ongoing, so the counter is at 2
+ // we have to wait here because increment happens in thread
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(@Nullable Object input) {
+ return (count.get() == 2);
+ }
+ });
+
+ assertIndexShardCounter(2);
+ assertThat(transport.capturedRequests().length, equalTo(0));
+ ((ActionWithDelay) action).countDownLatch.countDown();
+ t.join();
+ listener.get();
+ // operation finished, counter back to 0
+ assertIndexShardCounter(1);
+ assertThat(transport.capturedRequests().length, equalTo(0));
+ }
+
+ @Test
+ public void testCounterIncrementedWhileReplicationOngoing() throws InterruptedException, ExecutionException, IOException {
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+ // one replica to make sure replication is attempted
+ clusterService.setState(state(index, true,
+ ShardRoutingState.STARTED, ShardRoutingState.STARTED));
+ logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
+ Request request = new Request(shardId).timeout("100ms");
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+ TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ primaryPhase.run();
+ assertIndexShardCounter(2);
+ assertThat(transport.capturedRequests().length, equalTo(1));
+ // try once with successful response
+ transport.handleResponse(transport.capturedRequests()[0].requestId, TransportResponse.Empty.INSTANCE);
+ assertIndexShardCounter(1);
+ transport.clear();
+ request = new Request(shardId).timeout("100ms");
+ primaryPhase = action.new PrimaryPhase(request, listener);
+ primaryPhase.run();
+ assertIndexShardCounter(2);
+ assertThat(transport.capturedRequests().length, equalTo(1));
+ // try with failure response
+ transport.handleResponse(transport.capturedRequests()[0].requestId, new CorruptIndexException("simulated", (String) null));
+ assertIndexShardCounter(1);
+ }
+
+ @Test
+ public void testReplicasCounter() throws Exception {
+ final ShardId shardId = new ShardId("test", 0);
+ clusterService.setState(state(shardId.index().getName(), true,
+ ShardRoutingState.STARTED, ShardRoutingState.STARTED));
+ action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
+ final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler();
+ Thread t = new Thread() {
+ public void run() {
+ try {
+ replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel());
+ } catch (Exception e) {
+ }
+ }
+ };
+ t.start();
+ // shard operation should be ongoing, so the counter is at 2
+ // we have to wait here because increment happens in thread
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(@Nullable Object input) {
+ return count.get() == 2;
+ }
+ });
+ ((ActionWithDelay) action).countDownLatch.countDown();
+ t.join();
+ // operation should have finished and counter decreased because no outstanding replica requests
+ assertIndexShardCounter(1);
+ // now check if this also works if operation throws exception
+ action = new ActionWithExceptions(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
+ final Action.ReplicaOperationTransportHandler replicaOperationTransportHandlerForException = action.new ReplicaOperationTransportHandler();
+ try {
+ replicaOperationTransportHandlerForException.messageReceived(new Request(shardId), createTransportChannel());
+ fail();
+ } catch (Throwable t2) {
+ }
+ assertIndexShardCounter(1);
+ }
+
+ @Test
+ public void testCounterDecrementedIfShardOperationThrowsException() throws InterruptedException, ExecutionException, IOException {
+ action = new ActionWithExceptions(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
+ final String index = "test";
+ final ShardId shardId = new ShardId(index, 0);
+ clusterService.setState(state(index, true,
+ ShardRoutingState.STARTED, ShardRoutingState.STARTED));
+ logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
+ Request request = new Request(shardId).timeout("100ms");
+ PlainActionFuture<Response> listener = new PlainActionFuture<>();
+ TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
+ primaryPhase.run();
+ // no replica request should have been sent yet
+ assertThat(transport.capturedRequests().length, equalTo(0));
+ // no matter if the operation is retried or not, counter must be be back to 1
+ assertIndexShardCounter(1);
+ }
+
+ private void assertIndexShardCounter(int expected) {
+ assertThat(count.get(), equalTo(expected));
+ }
+
+ private final AtomicInteger count = new AtomicInteger(0);
+
+ /*
+ * Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
+ * */
+ private synchronized Releasable getOrCreateIndexShardOperationsCounter() {
+ count.incrementAndGet();
+ return new Releasable() {
+ @Override
+ public void close() {
+ count.decrementAndGet();
+ }
+ };
+ }
+
+ static class Request extends ReplicationRequest<Request> {
+ int shardId;
+ public AtomicBoolean processedOnPrimary = new AtomicBoolean();
+ public AtomicInteger processedOnReplicas = new AtomicInteger();
+
+ Request() {
+ this.operationThreaded(randomBoolean());
+ }
+
+ Request(ShardId shardId) {
+ this();
+ this.shardId = shardId.id();
+ this.index(shardId.index().name());
+ // keep things simple
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeVInt(shardId);
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardId = in.readVInt();
+ }
+ }
+
+ static class Response extends ActionWriteResponse {
+ }
+
+ class Action extends TransportReplicationAction<Request, Request, Response> {
+
+ Action(Settings settings, String actionName, TransportService transportService,
+ ClusterService clusterService,
+ ThreadPool threadPool) {
+ super(settings, actionName, transportService, clusterService, null, threadPool,
+ new ShardStateAction(settings, clusterService, transportService, null, null), null,
+ new ActionFilters(new HashSet<ActionFilter>()), Request.class, Request.class, ThreadPool.Names.SAME);
+ }
+
+ @Override
+ protected Response newResponseInstance() {
+ return new Response();
+ }
+
+ @Override
+ protected Tuple<Response, Request> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
+ boolean executedBefore = shardRequest.request.processedOnPrimary.getAndSet(true);
+ assert executedBefore == false : "request has already been executed on the primary";
+ return new Tuple<>(new Response(), shardRequest.request);
+ }
+
+ @Override
+ protected void shardOperationOnReplica(ShardId shardId, Request request) {
+ request.processedOnReplicas.incrementAndGet();
+ }
+
+ @Override
+ protected ShardIterator shards(ClusterState clusterState, InternalRequest request) {
+ return clusterState.getRoutingTable().index(request.concreteIndex()).shard(request.request().shardId).shardsIt();
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return false;
+ }
+
+ @Override
+ protected boolean resolveIndex() {
+ return false;
+ }
+
+ @Override
+ protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
+ return getOrCreateIndexShardOperationsCounter();
+ }
+ }
+
+ class ActionWithConsistency extends Action {
+
+ ActionWithConsistency(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, actionName, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected boolean checkWriteConsistency() {
+ return true;
+ }
+ }
+
+ static DiscoveryNode newNode(int nodeId) {
+ return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ }
+
+ /*
+ * Throws exceptions when executed. Used for testing if the counter is correctly decremented in case an operation fails.
+ * */
+ class ActionWithExceptions extends Action {
+
+ ActionWithExceptions(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) throws IOException {
+ super(settings, actionName, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected Tuple<Response, Request> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
+ return throwException(shardRequest.shardId);
+ }
+
+ private Tuple<Response, Request> throwException(ShardId shardId) {
+ try {
+ if (randomBoolean()) {
+ // throw a generic exception
+ // for testing on replica this will actually cause an NPE because it will make the shard fail but
+ // for this we need an IndicesService which is null.
+ throw new ElasticsearchException("simulated");
+ } else {
+ // throw an exception which will cause retry on primary and be ignored on replica
+ throw new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING);
+ }
+ } catch (Exception e) {
+ logger.info("throwing ", e);
+ throw e;
+ }
+ }
+
+ @Override
+ protected void shardOperationOnReplica(ShardId shardId, Request shardRequest) {
+ throwException(shardRequest.internalShardId);
+ }
+ }
+
+ /**
+ * Delays the operation until countDownLatch is counted down
+ */
+ class ActionWithDelay extends Action {
+ CountDownLatch countDownLatch = new CountDownLatch(1);
+
+ ActionWithDelay(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) throws IOException {
+ super(settings, actionName, transportService, clusterService, threadPool);
+ }
+
+ @Override
+ protected Tuple<Response, Request> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
+ awaitLatch();
+ return new Tuple<>(new Response(), shardRequest.request);
+ }
+
+ private void awaitLatch() throws InterruptedException {
+ countDownLatch.await();
+ countDownLatch = new CountDownLatch(1);
+ }
+
+ @Override
+ protected void shardOperationOnReplica(ShardId shardId, Request shardRequest) {
+ try {
+ awaitLatch();
+ } catch (InterruptedException e) {
+ }
+ }
+
+ }
+
+ /*
+ * Transport channel that is needed for replica operation testing.
+ * */
+ public TransportChannel createTransportChannel() {
+ return new TransportChannel() {
+
+ @Override
+ public String action() {
+ return null;
+ }
+
+ @Override
+ public String getProfileName() {
+ return "";
+ }
+
+ @Override
+ public void sendResponse(TransportResponse response) throws IOException {
+ }
+
+ @Override
+ public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
+ }
+
+ @Override
+ public void sendResponse(Throwable error) throws IOException {
+ }
+ };
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java
new file mode 100644
index 0000000000..31d5af8d8b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTests.java
@@ -0,0 +1,410 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvectors;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.common.inject.internal.Join;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+public abstract class AbstractTermVectorsTests extends ElasticsearchIntegrationTest {
+
+ protected static class TestFieldSetting {
+ final public String name;
+ final public boolean storedOffset;
+ final public boolean storedPayloads;
+ final public boolean storedPositions;
+
+ public TestFieldSetting(String name, boolean storedOffset, boolean storedPayloads, boolean storedPositions) {
+ this.name = name;
+ this.storedOffset = storedOffset;
+ this.storedPayloads = storedPayloads;
+ this.storedPositions = storedPositions;
+ }
+
+ public void addToMappings(XContentBuilder mappingsBuilder) throws IOException {
+ mappingsBuilder.startObject(name);
+ mappingsBuilder.field("type", "string");
+ String tv_settings;
+ if (storedPositions && storedOffset && storedPayloads) {
+ tv_settings = "with_positions_offsets_payloads";
+ } else if (storedPositions && storedOffset) {
+ tv_settings = "with_positions_offsets";
+ } else if (storedPayloads) {
+ tv_settings = "with_positions_payloads";
+ } else if (storedPositions) {
+ tv_settings = "with_positions";
+ } else if (storedOffset) {
+ tv_settings = "with_offsets";
+ } else {
+ tv_settings = "yes";
+ }
+
+ mappingsBuilder.field("term_vector", tv_settings);
+
+ if (storedPayloads) {
+ mappingsBuilder.field("analyzer", "tv_test");
+ }
+
+ mappingsBuilder.endObject();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("name: ").append(name).append(" tv_with:");
+ if (storedPayloads) {
+ sb.append("payloads,");
+ }
+ if (storedOffset) {
+ sb.append("offsets,");
+ }
+ if (storedPositions) {
+ sb.append("positions,");
+ }
+ return sb.toString();
+ }
+ }
+
+ protected static class TestDoc {
+ final public String id;
+ final public TestFieldSetting[] fieldSettings;
+ final public String[] fieldContent;
+ public String index = "test";
+ public String alias = "alias";
+ public String type = "type1";
+
+ public TestDoc(String id, TestFieldSetting[] fieldSettings, String[] fieldContent) {
+ this.id = id;
+ assertEquals(fieldSettings.length, fieldContent.length);
+ this.fieldSettings = fieldSettings;
+ this.fieldContent = fieldContent;
+ }
+
+ public TestDoc index(String index) {
+ this.index = index;
+ return this;
+ }
+
+ public TestDoc alias(String alias) {
+ this.alias = alias;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+
+ StringBuilder sb = new StringBuilder("index:").append(index).append(" type:").append(type).append(" id:").append(id);
+ for (int i = 0; i < fieldSettings.length; i++) {
+ TestFieldSetting f = fieldSettings[i];
+ sb.append("\n").append("Field: ").append(f).append("\n content:").append(fieldContent[i]);
+ }
+ sb.append("\n");
+
+ return sb.toString();
+ }
+ }
+
+ protected static class TestConfig {
+ final public TestDoc doc;
+ final public String[] selectedFields;
+ final public boolean requestPositions;
+ final public boolean requestOffsets;
+ final public boolean requestPayloads;
+ public Class expectedException = null;
+
+ public TestConfig(TestDoc doc, String[] selectedFields, boolean requestPositions, boolean requestOffsets, boolean requestPayloads) {
+ this.doc = doc;
+ this.selectedFields = selectedFields;
+ this.requestPositions = requestPositions;
+ this.requestOffsets = requestOffsets;
+ this.requestPayloads = requestPayloads;
+ }
+
+ public TestConfig expectedException(Class exceptionClass) {
+ this.expectedException = exceptionClass;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ String requested = "";
+ if (requestOffsets) {
+ requested += "offsets,";
+ }
+ if (requestPositions) {
+ requested += "position,";
+ }
+ if (requestPayloads) {
+ requested += "payload,";
+ }
+ Locale aLocale = new Locale("en", "US");
+ return String.format(aLocale, "(doc: %s\n requested: %s, fields: %s)", doc, requested,
+ selectedFields == null ? "NULL" : Join.join(",", selectedFields));
+ }
+ }
+
+ protected void createIndexBasedOnFieldSettings(String index, String alias, TestFieldSetting[] fieldSettings) throws IOException {
+ XContentBuilder mappingBuilder = jsonBuilder();
+ mappingBuilder.startObject().startObject("type1").startObject("properties");
+ for (TestFieldSetting field : fieldSettings) {
+ field.addToMappings(mappingBuilder);
+ }
+ mappingBuilder.endObject().endObject().endObject();
+ Settings.Builder settings = Settings.settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.tv_test.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase");
+ assertAcked(prepareCreate(index).addMapping("type1", mappingBuilder).setSettings(settings).addAlias(new Alias(alias)));
+
+ ensureYellow();
+ }
+
+ /**
+ * Generate test documentsThe returned documents are already indexed.
+ */
+ protected TestDoc[] generateTestDocs(String index, TestFieldSetting[] fieldSettings) {
+ String[] fieldContentOptions = new String[]{"Generating a random permutation of a sequence (such as when shuffling cards).",
+ "Selecting a random sample of a population (important in statistical sampling).",
+ "Allocating experimental units via random assignment to a treatment or control condition.",
+ "Generating random numbers: see Random number generation.",
+ "Transforming a data stream (such as when using a scrambler in telecommunications)."};
+
+ String[] contentArray = new String[fieldSettings.length];
+ Map<String, Object> docSource = new HashMap<>();
+ int totalShards = getNumShards(index).numPrimaries;
+ TestDoc[] testDocs = new TestDoc[totalShards];
+ // this methods wants to send one doc to each shard
+ for (int i = 0; i < totalShards; i++) {
+ docSource.clear();
+ for (int j = 0; j < contentArray.length; j++) {
+ contentArray[j] = fieldContentOptions[randomInt(fieldContentOptions.length - 1)];
+ docSource.put(fieldSettings[j].name, contentArray[j]);
+ }
+ final String id = routingKeyForShard(index, "type", i);
+ TestDoc doc = new TestDoc(id, fieldSettings, contentArray.clone());
+ index(doc.index, doc.type, doc.id, docSource);
+ testDocs[i] = doc;
+ }
+
+ refresh();
+ return testDocs;
+
+ }
+
+ protected TestConfig[] generateTestConfigs(int numberOfTests, TestDoc[] testDocs, TestFieldSetting[] fieldSettings) {
+ ArrayList<TestConfig> configs = new ArrayList<>();
+ for (int i = 0; i < numberOfTests; i++) {
+
+ ArrayList<String> selectedFields = null;
+ if (randomBoolean()) {
+ // used field selection
+ selectedFields = new ArrayList<>();
+ if (randomBoolean()) {
+ selectedFields.add("Doesnt_exist"); // this will be ignored.
+ }
+ for (TestFieldSetting field : fieldSettings)
+ if (randomBoolean()) {
+ selectedFields.add(field.name);
+ }
+
+ if (selectedFields.size() == 0) {
+ selectedFields = null; // 0 length set is not supported.
+ }
+
+ }
+ TestConfig config = new TestConfig(testDocs[randomInt(testDocs.length - 1)], selectedFields == null ? null
+ : selectedFields.toArray(new String[]{}), randomBoolean(), randomBoolean(), randomBoolean());
+
+ configs.add(config);
+ }
+ // always adds a test that fails
+ configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist").alias("doesn't_exist"),
+ new String[]{"doesnt_exist"}, true, true, true).expectedException(IndexMissingException.class));
+
+ refresh();
+
+ return configs.toArray(new TestConfig[configs.size()]);
+ }
+
+ protected TestFieldSetting[] getFieldSettings() {
+ return new TestFieldSetting[]{new TestFieldSetting("field_with_positions", false, false, true),
+ new TestFieldSetting("field_with_offsets", true, false, false),
+ new TestFieldSetting("field_with_only_tv", false, false, false),
+ new TestFieldSetting("field_with_positions_offsets", false, false, true),
+ new TestFieldSetting("field_with_positions_payloads", false, true, true)
+ };
+ }
+
+ protected DirectoryReader indexDocsWithLucene(TestDoc[] testDocs) throws IOException {
+ Map<String, Analyzer> mapping = new HashMap<>();
+ for (TestFieldSetting field : testDocs[0].fieldSettings) {
+ if (field.storedPayloads) {
+ mapping.put(field.name, new Analyzer() {
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer tokenizer = new StandardTokenizer();
+ TokenFilter filter = new LowerCaseFilter(tokenizer);
+ filter = new TypeAsPayloadTokenFilter(filter);
+ return new TokenStreamComponents(tokenizer, filter);
+ }
+
+ });
+ }
+ }
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new StandardAnalyzer(CharArraySet.EMPTY_SET), mapping);
+
+ Directory dir = new RAMDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(wrapper);
+
+ conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ for (TestDoc doc : testDocs) {
+ Document d = new Document();
+ d.add(new Field("id", doc.id, StringField.TYPE_STORED));
+ for (int i = 0; i < doc.fieldContent.length; i++) {
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ TestFieldSetting fieldSetting = doc.fieldSettings[i];
+
+ type.setStoreTermVectorOffsets(fieldSetting.storedOffset);
+ type.setStoreTermVectorPayloads(fieldSetting.storedPayloads);
+ type.setStoreTermVectorPositions(fieldSetting.storedPositions || fieldSetting.storedPayloads || fieldSetting.storedOffset);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ d.add(new Field(fieldSetting.name, doc.fieldContent[i], type));
+ }
+ writer.updateDocument(new Term("id", doc.id), d);
+ writer.commit();
+ }
+ writer.close();
+
+ return DirectoryReader.open(dir);
+ }
+
+ protected void validateResponse(TermVectorsResponse esResponse, Fields luceneFields, TestConfig testConfig) throws IOException {
+ assertThat(esResponse.getIndex(), equalTo(testConfig.doc.index));
+ TestDoc testDoc = testConfig.doc;
+ HashSet<String> selectedFields = testConfig.selectedFields == null ? null : new HashSet<>(
+ Arrays.asList(testConfig.selectedFields));
+ Fields esTermVectorFields = esResponse.getFields();
+ for (TestFieldSetting field : testDoc.fieldSettings) {
+ Terms esTerms = esTermVectorFields.terms(field.name);
+ if (selectedFields != null && !selectedFields.contains(field.name)) {
+ assertNull(esTerms);
+ continue;
+ }
+
+ assertNotNull(esTerms);
+
+ Terms luceneTerms = luceneFields.terms(field.name);
+ TermsEnum esTermEnum = esTerms.iterator();
+ TermsEnum luceneTermEnum = luceneTerms.iterator();
+
+ while (esTermEnum.next() != null) {
+ assertNotNull(luceneTermEnum.next());
+
+ assertThat(esTermEnum.totalTermFreq(), equalTo(luceneTermEnum.totalTermFreq()));
+ PostingsEnum esDocsPosEnum = esTermEnum.postings(null, null, PostingsEnum.POSITIONS);
+ PostingsEnum luceneDocsPosEnum = luceneTermEnum.postings(null, null, PostingsEnum.POSITIONS);
+ if (luceneDocsPosEnum == null) {
+ // test we expect that...
+ assertFalse(field.storedOffset);
+ assertFalse(field.storedPayloads);
+ assertFalse(field.storedPositions);
+ continue;
+ }
+
+ String currentTerm = esTermEnum.term().utf8ToString();
+
+ assertThat("Token mismatch for field: " + field.name, currentTerm, equalTo(luceneTermEnum.term().utf8ToString()));
+
+ esDocsPosEnum.nextDoc();
+ luceneDocsPosEnum.nextDoc();
+
+ int freq = esDocsPosEnum.freq();
+ assertThat(freq, equalTo(luceneDocsPosEnum.freq()));
+ for (int i = 0; i < freq; i++) {
+ String failDesc = " (field:" + field.name + " term:" + currentTerm + ")";
+ int lucenePos = luceneDocsPosEnum.nextPosition();
+ int esPos = esDocsPosEnum.nextPosition();
+ if (field.storedPositions && testConfig.requestPositions) {
+ assertThat("Position test failed" + failDesc, lucenePos, equalTo(esPos));
+ } else {
+ assertThat("Missing position test failed" + failDesc, esPos, equalTo(-1));
+ }
+ if (field.storedOffset && testConfig.requestOffsets) {
+ assertThat("Offset test failed" + failDesc, luceneDocsPosEnum.startOffset(), equalTo(esDocsPosEnum.startOffset()));
+ assertThat("Offset test failed" + failDesc, luceneDocsPosEnum.endOffset(), equalTo(esDocsPosEnum.endOffset()));
+ } else {
+ assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.startOffset(), equalTo(-1));
+ assertThat("Missing offset test failed" + failDesc, esDocsPosEnum.endOffset(), equalTo(-1));
+ }
+ if (field.storedPayloads && testConfig.requestPayloads) {
+ assertThat("Payload test failed" + failDesc, luceneDocsPosEnum.getPayload(), equalTo(esDocsPosEnum.getPayload()));
+ } else {
+ assertThat("Missing payload test failed" + failDesc, esDocsPosEnum.getPayload(), equalTo(null));
+ }
+ }
+ }
+ assertNull("Es returned terms are done but lucene isn't", luceneTermEnum.next());
+ }
+ }
+
+ protected TermVectorsRequestBuilder getRequestForConfig(TestConfig config) {
+ return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id).setPayloads(config.requestPayloads)
+ .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true)
+ .setSelectedFields(config.selectedFields);
+ }
+
+ protected Fields getTermVectorsFromLucene(DirectoryReader directoryReader, TestDoc doc) throws IOException {
+ IndexSearcher searcher = new IndexSearcher(directoryReader);
+ TopDocs search = searcher.search(new TermQuery(new Term("id", doc.id)), 1);
+
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ assertEquals(1, scoreDocs.length);
+ return directoryReader.getTermVectors(scoreDocs[0].doc);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java
new file mode 100644
index 0000000000..7021885b2d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvectors;
+
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.io.BytesStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+public class GetTermVectorsCheckDocFreqTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ @Override
+ public Settings indexSettings() {
+ return Settings.builder()
+ .put(super.indexSettings())
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")
+ .build();
+ }
+
+ @Test
+ public void testSimpleTermVectors() throws IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+ int numDocs = 15;
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = { "brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the" };
+ int[] freq = { 1, 1, 1, 1, 1, 1, 1, 2 };
+ int[][] pos = { { 2 }, { 8 }, { 3 }, { 4 }, { 7 }, { 5 }, { 1 }, { 0, 6 } };
+ int[][] startOffset = { { 10 }, { 40 }, { 16 }, { 20 }, { 35 }, { 26 }, { 4 }, { 0, 31 } };
+ int[][] endOffset = { { 15 }, { 43 }, { 19 }, { 25 }, { 39 }, { 30 }, { 9 }, { 3, 34 } };
+ for (int i = 0; i < numDocs; i++) {
+ checkAllInfo(numDocs, values, freq, pos, startOffset, endOffset, i);
+ checkWithoutTermStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
+ checkWithoutFieldStatistics(numDocs, values, freq, pos, startOffset, endOffset, i);
+ }
+ }
+
+ private void checkWithoutFieldStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
+ int i) throws IOException {
+ TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setTermStatistics(true).setFieldStatistics(false).setSelectedFields();
+ TermVectorsResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
+ assertThat(terms.getDocCount(), Matchers.equalTo(-1));
+ assertThat(terms.getSumDocFreq(), equalTo((long) -1));
+ TermsEnum iterator = terms.iterator();
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ if (string.equals("the")) {
+ assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
+ } else {
+ assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
+ }
+
+ PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(numDocs));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = XContentFactory.jsonBuilder();
+ xBuilder.startObject();
+ response.toXContent(xBuilder, null);
+ xBuilder.endObject();
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8().replaceFirst("\"took\":\\d+,", "");;
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+
+ }
+
+ private void checkWithoutTermStatistics(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset,
+ int i) throws IOException {
+ TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setTermStatistics(false).setFieldStatistics(true).setSelectedFields();
+ assertThat(resp.request().termStatistics(), equalTo(false));
+ TermVectorsResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
+ assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
+ assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
+ TermsEnum iterator = terms.iterator();
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+
+ assertThat("expected ttf of " + string, -1, equalTo((int) iterator.totalTermFreq()));
+
+ PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(-1));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = XContentFactory.jsonBuilder();
+ xBuilder.startObject();
+ response.toXContent(xBuilder, null);
+ xBuilder.endObject();
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8().replaceFirst("\"took\":\\d+,", "");;
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+
+ }
+
+ private void checkAllInfo(int numDocs, String[] values, int[] freq, int[][] pos, int[][] startOffset, int[][] endOffset, int i)
+ throws IOException {
+ TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setFieldStatistics(true).setTermStatistics(true).setSelectedFields();
+ assertThat(resp.request().fieldStatistics(), equalTo(true));
+ TermVectorsResponse response = resp.execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
+ assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
+ assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
+ TermsEnum iterator = terms.iterator();
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, Matchers.notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, Matchers.notNullValue());
+ if (string.equals("the")) {
+ assertThat("expected ttf of " + string, numDocs * 2, equalTo((int) iterator.totalTermFreq()));
+ } else {
+ assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq()));
+ }
+
+ PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ assertThat(iterator.docFreq(), equalTo(numDocs));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ assertThat(iterator.next(), Matchers.nullValue());
+
+ XContentBuilder xBuilder = XContentFactory.jsonBuilder();
+ xBuilder.startObject();
+ response.toXContent(xBuilder, ToXContent.EMPTY_PARAMS);
+ xBuilder.endObject();
+ BytesStream bytesStream = xBuilder.bytesStream();
+ String utf8 = bytesStream.bytes().toUtf8().replaceFirst("\"took\":\\d+,", "");;
+ String expectedString = "{\"_index\":\"test\",\"_type\":\"type1\",\"_id\":\""
+ + i
+ + "\",\"_version\":1,\"found\":true,\"term_vectors\":{\"field\":{\"field_statistics\":{\"sum_doc_freq\":120,\"doc_count\":15,\"sum_ttf\":135},\"terms\":{\"brown\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":2,\"start_offset\":10,\"end_offset\":15,\"payload\":\"d29yZA==\"}]},\"dog\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":8,\"start_offset\":40,\"end_offset\":43,\"payload\":\"d29yZA==\"}]},\"fox\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":3,\"start_offset\":16,\"end_offset\":19,\"payload\":\"d29yZA==\"}]},\"jumps\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":4,\"start_offset\":20,\"end_offset\":25,\"payload\":\"d29yZA==\"}]},\"lazy\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":7,\"start_offset\":35,\"end_offset\":39,\"payload\":\"d29yZA==\"}]},\"over\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":5,\"start_offset\":26,\"end_offset\":30,\"payload\":\"d29yZA==\"}]},\"quick\":{\"doc_freq\":15,\"ttf\":15,\"term_freq\":1,\"tokens\":[{\"position\":1,\"start_offset\":4,\"end_offset\":9,\"payload\":\"d29yZA==\"}]},\"the\":{\"doc_freq\":15,\"ttf\":30,\"term_freq\":2,\"tokens\":[{\"position\":0,\"start_offset\":0,\"end_offset\":3,\"payload\":\"d29yZA==\"},{\"position\":6,\"start_offset\":31,\"end_offset\":34,\"payload\":\"d29yZA==\"}]}}}}}";
+ assertThat(utf8, equalTo(expectedString));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java
new file mode 100644
index 0000000000..7eec3a65d2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java
@@ -0,0 +1,1360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvectors;
+
+import com.carrotsearch.hppc.ObjectIntHashMap;
+
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.hamcrest.Matcher;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+@Slow
+public class GetTermVectorsTests extends AbstractTermVectorsTests {
+
+ @Test
+ public void testNoSuchDoc() throws Exception {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("type1", mapping));
+
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "666").setSource("field", "foo bar").execute().actionGet();
+ refresh();
+ for (int i = 0; i < 20; i++) {
+ ActionFuture<TermVectorsResponse> termVector = client().termVectors(new TermVectorsRequest(indexOrAlias(), "type1", "" + i));
+ TermVectorsResponse actionGet = termVector.actionGet();
+ assertThat(actionGet, notNullValue());
+ assertThat(actionGet.getIndex(), equalTo("test"));
+ assertThat(actionGet.isExists(), equalTo(false));
+ // check response is nevertheless serializable to json
+ actionGet.toXContent(jsonBuilder().startObject(), ToXContent.EMPTY_PARAMS);
+ }
+ }
+
+ @Test
+ public void testExistingFieldWithNoTermVectorsNoNPE() throws Exception {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("existingfield")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("type1", mapping));
+
+ ensureYellow();
+
+ // when indexing a field that simply has a question mark, the term vectors will be null
+ client().prepareIndex("test", "type1", "0").setSource("existingfield", "?").execute().actionGet();
+ refresh();
+ ActionFuture<TermVectorsResponse> termVector = client().termVectors(new TermVectorsRequest(indexOrAlias(), "type1", "0")
+ .selectedFields(new String[]{"existingfield"}));
+
+ // lets see if the null term vectors are caught...
+ TermVectorsResponse actionGet = termVector.actionGet();
+ assertThat(actionGet, notNullValue());
+ assertThat(actionGet.isExists(), equalTo(true));
+ assertThat(actionGet.getIndex(), equalTo("test"));
+ assertThat(actionGet.getFields().terms("existingfield"), nullValue());
+ }
+
+ @Test
+ public void testExistingFieldButNotInDocNPE() throws Exception {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("existingfield")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("type1", mapping));
+
+ ensureYellow();
+
+ // when indexing a field that simply has a question mark, the term vectors will be null
+ client().prepareIndex("test", "type1", "0").setSource("anotherexistingfield", 1).execute().actionGet();
+ refresh();
+ ActionFuture<TermVectorsResponse> termVectors = client().termVectors(new TermVectorsRequest(indexOrAlias(), "type1", "0")
+ .selectedFields(randomBoolean() ? new String[]{"existingfield"} : null)
+ .termStatistics(true)
+ .fieldStatistics(true)
+ .dfs(true));
+
+ // lets see if the null term vectors are caught...
+ TermVectorsResponse actionGet = termVectors.actionGet();
+ assertThat(actionGet, notNullValue());
+ assertThat(actionGet.isExists(), equalTo(true));
+ assertThat(actionGet.getIndex(), equalTo("test"));
+ assertThat(actionGet.getFields().terms("existingfield"), nullValue());
+ }
+
+ @Test
+ public void testNotIndexedField() throws Exception {
+ // must be of type string and indexed.
+ assertAcked(prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .addMapping("type1",
+ "field0", "type=integer,", // no tvs
+ "field1", "type=string,index=no", // no tvs
+ "field2", "type=string,index=no,store=yes", // no tvs
+ "field3", "type=string,index=no,term_vector=yes", // no tvs
+ "field4", "type=string,index=not_analyzed", // yes tvs
+ "field5", "type=string,index=analyzed")); // yes tvs
+
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ for (int i = 0; i < 6; i++) {
+ indexBuilders.add(client().prepareIndex()
+ .setIndex("test")
+ .setType("type1")
+ .setId(String.valueOf(i))
+ .setSource("field" + i, i));
+ }
+ indexRandom(true, indexBuilders);
+
+ for (int i = 0; i < 4; i++) {
+ TermVectorsResponse resp = client().prepareTermVectors(indexOrAlias(), "type1", String.valueOf(i))
+ .setSelectedFields("field" + i)
+ .get();
+ assertThat(resp, notNullValue());
+ assertThat(resp.isExists(), equalTo(true));
+ assertThat(resp.getIndex(), equalTo("test"));
+ assertThat("field" + i + " :", resp.getFields().terms("field" + i), nullValue());
+ }
+
+ for (int i = 4; i < 6; i++) {
+ TermVectorsResponse resp = client().prepareTermVectors(indexOrAlias(), "type1", String.valueOf(i))
+ .setSelectedFields("field" + i).get();
+ assertThat(resp.getIndex(), equalTo("test"));
+ assertThat("field" + i + " :", resp.getFields().terms("field" + i), notNullValue());
+ }
+ }
+
+ @Test
+ public void testSimpleTermVectors() throws IOException {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping)
+ .addAlias(new Alias("alias"))
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureYellow();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ for (int i = 0; i < 10; i++) {
+ TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), "type1", Integer.toString(i)).setPayloads(true)
+ .setOffsets(true).setPositions(true).setSelectedFields();
+ TermVectorsResponse response = resp.execute().actionGet();
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ checkBrownFoxTermVector(fields, "field", true);
+ }
+ }
+
+ @Test
+ public void testRandomSingleTermVectors() throws IOException {
+ FieldType ft = new FieldType();
+ int config = randomInt(6);
+ boolean storePositions = false;
+ boolean storeOffsets = false;
+ boolean storePayloads = false;
+ boolean storeTermVectors = false;
+ switch (config) {
+ case 0: {
+ // do nothing
+ break;
+ }
+ case 1: {
+ storeTermVectors = true;
+ break;
+ }
+ case 2: {
+ storeTermVectors = true;
+ storePositions = true;
+ break;
+ }
+ case 3: {
+ storeTermVectors = true;
+ storeOffsets = true;
+ break;
+ }
+ case 4: {
+ storeTermVectors = true;
+ storePositions = true;
+ storeOffsets = true;
+ break;
+ }
+ case 5: {
+ storeTermVectors = true;
+ storePositions = true;
+ storePayloads = true;
+ break;
+ }
+ case 6: {
+ storeTermVectors = true;
+ storePositions = true;
+ storeOffsets = true;
+ storePayloads = true;
+ break;
+ }
+ }
+ ft.setStoreTermVectors(storeTermVectors);
+ ft.setStoreTermVectorOffsets(storeOffsets);
+ ft.setStoreTermVectorPayloads(storePayloads);
+ ft.setStoreTermVectorPositions(storePositions);
+
+ String optionString = AbstractFieldMapper.termVectorOptionsToString(ft);
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "string")
+ .field("term_vector", optionString)
+ .field("analyzer", "tv_test")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping)
+ .setSettings(settingsBuilder()
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+ ensureYellow();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog")
+ // 0the3 4quick9 10brown15 16fox19 20jumps25 26over30
+ // 31the34 35lazy39 40dog43
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+ String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"};
+ int[] freq = {1, 1, 1, 1, 1, 1, 1, 2};
+ int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}};
+ int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
+ int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
+
+ boolean isPayloadRequested = randomBoolean();
+ boolean isOffsetRequested = randomBoolean();
+ boolean isPositionsRequested = randomBoolean();
+ String infoString = createInfoString(isPositionsRequested, isOffsetRequested, isPayloadRequested, optionString);
+ for (int i = 0; i < 10; i++) {
+ TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(i))
+ .setPayloads(isPayloadRequested).setOffsets(isOffsetRequested).setPositions(isPositionsRequested).setSelectedFields();
+ TermVectorsResponse response = resp.execute().actionGet();
+ assertThat(infoString + "doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0));
+ if (ft.storeTermVectors()) {
+ Terms terms = fields.terms("field");
+ assertThat(terms.size(), equalTo(8l));
+ TermsEnum iterator = terms.iterator();
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(infoString, next, notNullValue());
+ assertThat(infoString + "expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(infoString, next, notNullValue());
+ // do not test ttf or doc frequency, because here we have
+ // many shards and do not know how documents are distributed
+ PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
+ // docs and pos only returns something if positions or
+ // payloads or offsets are stored / requestd Otherwise use
+ // DocsEnum?
+ assertThat(infoString, docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(infoString, freq[j], equalTo(docsAndPositions.freq()));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ if (isPositionsRequested && storePositions) {
+ assertThat(infoString, termPos.length, equalTo(freq[j]));
+ }
+ if (isOffsetRequested && storeOffsets) {
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ }
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ // only return something useful if requested and stored
+ if (isPositionsRequested && storePositions) {
+ assertThat(infoString + "positions for term: " + string, nextPosition, equalTo(termPos[k]));
+ } else {
+ assertThat(infoString + "positions for term: ", nextPosition, equalTo(-1));
+ }
+ // only return something useful if requested and stored
+ if (isPayloadRequested && storePayloads) {
+ assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef(
+ "word")));
+ } else {
+ assertThat(infoString + "payloads for term: " + string, docsAndPositions.getPayload(), equalTo(null));
+ }
+ // only return something useful if requested and stored
+ if (isOffsetRequested && storeOffsets) {
+
+ assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(),
+ equalTo(termStartOffset[k]));
+ assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ } else {
+ assertThat(infoString + "startOffsets term: " + string, docsAndPositions.startOffset(), equalTo(-1));
+ assertThat(infoString + "endOffsets term: " + string, docsAndPositions.endOffset(), equalTo(-1));
+ }
+
+ }
+ }
+ assertThat(iterator.next(), nullValue());
+ }
+ }
+ }
+
+ private String createInfoString(boolean isPositionsRequested, boolean isOffsetRequested, boolean isPayloadRequested,
+ String optionString) {
+ String ret = "Store config: " + optionString + "\n" + "Requested: pos-"
+ + (isPositionsRequested ? "yes" : "no") + ", offsets-" + (isOffsetRequested ? "yes" : "no") + ", payload- "
+ + (isPayloadRequested ? "yes" : "no") + "\n";
+ return ret;
+ }
+
+ @Test
+ public void testDuelESLucene() throws Exception {
+ TestFieldSetting[] testFieldSettings = getFieldSettings();
+ createIndexBasedOnFieldSettings("test", "alias", testFieldSettings);
+ //we generate as many docs as many shards we have
+ TestDoc[] testDocs = generateTestDocs("test", testFieldSettings);
+
+ DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
+ TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
+
+ for (TestConfig test : testConfigs) {
+ try {
+ TermVectorsRequestBuilder request = getRequestForConfig(test);
+ if (test.expectedException != null) {
+ assertThrows(request, test.expectedException);
+ continue;
+ }
+
+ TermVectorsResponse response = request.get();
+ Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
+ validateResponse(response, luceneTermVectors, test);
+ } catch (Throwable t) {
+ throw new Exception("Test exception while running " + test.toString(), t);
+ }
+ }
+ }
+
+ @Test
+ public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOException {
+ //create the test document
+ int encoding = randomIntBetween(0, 2);
+ String encodingString = "";
+ if (encoding == 0) {
+ encodingString = "float";
+ }
+ if (encoding == 1) {
+ encodingString = "int";
+ }
+ if (encoding == 2) {
+ encodingString = "identity";
+ }
+ String[] tokens = crateRandomTokens();
+ Map<String, List<BytesRef>> payloads = createPayloads(tokens, encoding);
+ String delimiter = createRandomDelimiter(tokens);
+ String queryString = createString(tokens, payloads, encoding, delimiter.charAt(0));
+ //create the mapping
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field").field("type", "string").field("term_vector", "with_positions_offsets_payloads")
+ .field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_test.filter", "my_delimited_payload_filter")
+ .put("index.analysis.filter.my_delimited_payload_filter.delimiter", delimiter)
+ .put("index.analysis.filter.my_delimited_payload_filter.encoding", encodingString)
+ .put("index.analysis.filter.my_delimited_payload_filter.type", "delimited_payload_filter")));
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", Integer.toString(1))
+ .setSource(jsonBuilder().startObject().field("field", queryString).endObject()).execute().actionGet();
+ refresh();
+ TermVectorsRequestBuilder resp = client().prepareTermVectors("test", "type1", Integer.toString(1)).setPayloads(true).setOffsets(true)
+ .setPositions(true).setSelectedFields();
+ TermVectorsResponse response = resp.execute().actionGet();
+ assertThat("doc id 1 doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(1));
+ Terms terms = fields.terms("field");
+ TermsEnum iterator = terms.iterator();
+ while (iterator.next() != null) {
+ String term = iterator.term().utf8ToString();
+ PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ List<BytesRef> curPayloads = payloads.get(term);
+ assertThat(term, curPayloads, notNullValue());
+ assertNotNull(docsAndPositions);
+ for (int k = 0; k < docsAndPositions.freq(); k++) {
+ docsAndPositions.nextPosition();
+ if (docsAndPositions.getPayload()!=null){
+ String infoString = "\nterm: " + term + " has payload \n"+ docsAndPositions.getPayload().toString() + "\n but should have payload \n"+curPayloads.get(k).toString();
+ assertThat(infoString, docsAndPositions.getPayload(), equalTo(curPayloads.get(k)));
+ } else {
+ String infoString = "\nterm: " + term + " has no payload but should have payload \n"+curPayloads.get(k).toString();
+ assertThat(infoString, curPayloads.get(k).length, equalTo(0));
+ }
+ }
+ }
+ assertThat(iterator.next(), nullValue());
+ }
+
+ private String createRandomDelimiter(String[] tokens) {
+ String delimiter = "";
+ boolean isTokenOrWhitespace = true;
+ while(isTokenOrWhitespace) {
+ isTokenOrWhitespace = false;
+ delimiter = randomUnicodeOfLength(1);
+ for(String token:tokens) {
+ if(token.contains(delimiter)) {
+ isTokenOrWhitespace = true;
+ }
+ }
+ if(Character.isWhitespace(delimiter.charAt(0))) {
+ isTokenOrWhitespace = true;
+ }
+ }
+ return delimiter;
+ }
+
+ private String createString(String[] tokens, Map<String, List<BytesRef>> payloads, int encoding, char delimiter) {
+ String resultString = "";
+ ObjectIntHashMap<String> payloadCounter = new ObjectIntHashMap<>();
+ for (String token : tokens) {
+ if (!payloadCounter.containsKey(token)) {
+ payloadCounter.putIfAbsent(token, 0);
+ } else {
+ payloadCounter.put(token, payloadCounter.get(token) + 1);
+ }
+ resultString = resultString + token;
+ BytesRef payload = payloads.get(token).get(payloadCounter.get(token));
+ if (payload.length > 0) {
+ resultString = resultString + delimiter;
+ switch (encoding) {
+ case 0: {
+ resultString = resultString + Float.toString(PayloadHelper.decodeFloat(payload.bytes, payload.offset));
+ break;
+ }
+ case 1: {
+ resultString = resultString + Integer.toString(PayloadHelper.decodeInt(payload.bytes, payload.offset));
+ break;
+ }
+ case 2: {
+ resultString = resultString + payload.utf8ToString();
+ break;
+ }
+ default: {
+ throw new ElasticsearchException("unsupported encoding type");
+ }
+ }
+ }
+ resultString = resultString + " ";
+ }
+ return resultString;
+ }
+
+ private Map<String, List<BytesRef>> createPayloads(String[] tokens, int encoding) {
+ Map<String, List<BytesRef>> payloads = new HashMap<>();
+ for (String token : tokens) {
+ if (payloads.get(token) == null) {
+ payloads.put(token, new ArrayList<BytesRef>());
+ }
+ boolean createPayload = randomBoolean();
+ if (createPayload) {
+ switch (encoding) {
+ case 0: {
+ float theFloat = randomFloat();
+ payloads.get(token).add(new BytesRef(PayloadHelper.encodeFloat(theFloat)));
+ break;
+ }
+ case 1: {
+ payloads.get(token).add(new BytesRef(PayloadHelper.encodeInt(randomInt())));
+ break;
+ }
+ case 2: {
+ String payload = randomUnicodeOfLengthBetween(50, 100);
+ for (int c = 0; c < payload.length(); c++) {
+ if (Character.isWhitespace(payload.charAt(c))) {
+ payload = payload.replace(payload.charAt(c), 'w');
+ }
+ }
+ payloads.get(token).add(new BytesRef(payload));
+ break;
+ }
+ default: {
+ throw new ElasticsearchException("unsupported encoding type");
+ }
+ }
+ } else {
+ payloads.get(token).add(new BytesRef());
+ }
+ }
+ return payloads;
+ }
+
+ private String[] crateRandomTokens() {
+ String[] tokens = { "the", "quick", "brown", "fox" };
+ int numTokensWithDuplicates = randomIntBetween(3, 15);
+ String[] finalTokens = new String[numTokensWithDuplicates];
+ for (int i = 0; i < numTokensWithDuplicates; i++) {
+ finalTokens[i] = tokens[randomIntBetween(0, tokens.length - 1)];
+ }
+ return finalTokens;
+ }
+
+ // like testSimpleTermVectors but we create fields with no term vectors
+ @Test
+ public void testSimpleTermVectorsWithGenerate() throws IOException {
+ String[] fieldNames = new String[10];
+ for (int i = 0; i < fieldNames.length; i++) {
+ fieldNames[i] = "field" + String.valueOf(i);
+ }
+
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1").startObject("properties");
+ XContentBuilder source = jsonBuilder().startObject();
+ for (String field : fieldNames) {
+ mapping.startObject(field)
+ .field("type", "string")
+ .field("term_vector", randomBoolean() ? "with_positions_offsets_payloads" : "no")
+ .field("analyzer", "tv_test")
+ .endObject();
+ source.field(field, "the quick brown fox jumps over the lazy dog");
+ }
+ mapping.endObject().endObject().endObject();
+ source.endObject();
+
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", mapping)
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.tv_test.filter", "type_as_payload", "lowercase")));
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(source)
+ .execute().actionGet();
+ refresh();
+ }
+
+ for (int i = 0; i < 10; i++) {
+ TermVectorsResponse response = client().prepareTermVectors("test", "type1", Integer.toString(i))
+ .setPayloads(true)
+ .setOffsets(true)
+ .setPositions(true)
+ .setSelectedFields(fieldNames)
+ .execute().actionGet();
+ assertThat("doc id: " + i + " doesn't exists but should", response.isExists(), equalTo(true));
+ Fields fields = response.getFields();
+ assertThat(fields.size(), equalTo(fieldNames.length));
+ for (String fieldName : fieldNames) {
+ // MemoryIndex does not support payloads
+ checkBrownFoxTermVector(fields, fieldName, false);
+ }
+ }
+ }
+
+ private void checkBrownFoxTermVector(Fields fields, String fieldName, boolean withPayloads) throws IOException {
+ String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"};
+ int[] freq = {1, 1, 1, 1, 1, 1, 1, 2};
+ int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}};
+ int[][] startOffset = {{10}, {40}, {16}, {20}, {35}, {26}, {4}, {0, 31}};
+ int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
+
+ Terms terms = fields.terms(fieldName);
+ assertThat(terms.size(), equalTo(8l));
+ TermsEnum iterator = terms.iterator();
+ for (int j = 0; j < values.length; j++) {
+ String string = values[j];
+ BytesRef next = iterator.next();
+ assertThat(next, notNullValue());
+ assertThat("expected " + string, string, equalTo(next.utf8ToString()));
+ assertThat(next, notNullValue());
+ // do not test ttf or doc frequency, because here we have many
+ // shards and do not know how documents are distributed
+ PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL);
+ assertThat(docsAndPositions.nextDoc(), equalTo(0));
+ assertThat(freq[j], equalTo(docsAndPositions.freq()));
+ int[] termPos = pos[j];
+ int[] termStartOffset = startOffset[j];
+ int[] termEndOffset = endOffset[j];
+ assertThat(termPos.length, equalTo(freq[j]));
+ assertThat(termStartOffset.length, equalTo(freq[j]));
+ assertThat(termEndOffset.length, equalTo(freq[j]));
+ for (int k = 0; k < freq[j]; k++) {
+ int nextPosition = docsAndPositions.nextPosition();
+ assertThat("term: " + string, nextPosition, equalTo(termPos[k]));
+ assertThat("term: " + string, docsAndPositions.startOffset(), equalTo(termStartOffset[k]));
+ assertThat("term: " + string, docsAndPositions.endOffset(), equalTo(termEndOffset[k]));
+ if (withPayloads) {
+ assertThat("term: " + string, docsAndPositions.getPayload(), equalTo(new BytesRef("word")));
+ }
+ }
+ }
+ assertThat(iterator.next(), nullValue());
+ }
+
+ @Test
+ public void testDuelWithAndWithoutTermVectors() throws IOException, ExecutionException, InterruptedException {
+ // setup indices
+ String[] indexNames = new String[] {"with_tv", "without_tv"};
+ assertAcked(prepareCreate(indexNames[0])
+ .addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets,analyzer=keyword"));
+ assertAcked(prepareCreate(indexNames[1])
+ .addMapping("type1", "field1", "type=string,term_vector=no,analyzer=keyword"));
+ ensureGreen();
+
+ // index documents with and without term vectors
+ String[] content = new String[]{
+ "Generating a random permutation of a sequence (such as when shuffling cards).",
+ "Selecting a random sample of a population (important in statistical sampling).",
+ "Allocating experimental units via random assignment to a treatment or control condition.",
+ "Generating random numbers: see Random number generation.",
+ "Selecting a random sample of a population (important in statistical sampling).",
+ "Allocating experimental units via random assignment to a treatment or control condition.",
+ "Transforming a data stream (such as when using a scrambler in telecommunications)."};
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ for (String indexName : indexNames) {
+ for (int id = 0; id < content.length; id++) {
+ indexBuilders.add(client().prepareIndex()
+ .setIndex(indexName)
+ .setType("type1")
+ .setId(String.valueOf(id))
+ .setSource("field1", content[id]));
+ }
+ }
+ indexRandom(true, indexBuilders);
+
+ // request tvs and compare from each index
+ for (int id = 0; id < content.length; id++) {
+ Fields[] fields = new Fields[2];
+ for (int j = 0; j < indexNames.length; j++) {
+ TermVectorsResponse resp = client().prepareTermVector(indexNames[j], "type1", String.valueOf(id))
+ .setOffsets(true)
+ .setPositions(true)
+ .setSelectedFields("field1")
+ .get();
+ assertThat("doc with index: " + indexNames[j] + ", type1 and id: " + id, resp.isExists(), equalTo(true));
+ fields[j] = resp.getFields();
+ }
+ compareTermVectors("field1", fields[0], fields[1]);
+ }
+ }
+
+ private void compareTermVectors(String fieldName, Fields fields0, Fields fields1) throws IOException {
+ Terms terms0 = fields0.terms(fieldName);
+ Terms terms1 = fields1.terms(fieldName);
+ assertThat(terms0, notNullValue());
+ assertThat(terms1, notNullValue());
+ assertThat(terms0.size(), equalTo(terms1.size()));
+
+ TermsEnum iter0 = terms0.iterator();
+ TermsEnum iter1 = terms1.iterator();
+ for (int i = 0; i < terms0.size(); i++) {
+ BytesRef next0 = iter0.next();
+ assertThat(next0, notNullValue());
+ BytesRef next1 = iter1.next();
+ assertThat(next1, notNullValue());
+
+ // compare field value
+ String string0 = next0.utf8ToString();
+ String string1 = next1.utf8ToString();
+ assertThat("expected: " + string0, string0, equalTo(string1));
+
+ // compare df and ttf
+ assertThat("term: " + string0, iter0.docFreq(), equalTo(iter1.docFreq()));
+ assertThat("term: " + string0, iter0.totalTermFreq(), equalTo(iter1.totalTermFreq()));
+
+ // compare freq and docs
+ PostingsEnum docsAndPositions0 = iter0.postings(null, null, PostingsEnum.ALL);
+ PostingsEnum docsAndPositions1 = iter1.postings(null, null, PostingsEnum.ALL);
+ assertThat("term: " + string0, docsAndPositions0.nextDoc(), equalTo(docsAndPositions1.nextDoc()));
+ assertThat("term: " + string0, docsAndPositions0.freq(), equalTo(docsAndPositions1.freq()));
+
+ // compare position, start offsets and end offsets
+ for (int j = 0; j < docsAndPositions0.freq(); j++) {
+ assertThat("term: " + string0, docsAndPositions0.nextPosition(), equalTo(docsAndPositions1.nextPosition()));
+ assertThat("term: " + string0, docsAndPositions0.startOffset(), equalTo(docsAndPositions1.startOffset()));
+ assertThat("term: " + string0, docsAndPositions0.endOffset(), equalTo(docsAndPositions1.endOffset()));
+ }
+ }
+ assertThat(iter0.next(), nullValue());
+ assertThat(iter1.next(), nullValue());
+ }
+
+ @Test
+ public void testSimpleWildCards() throws IOException {
+ int numFields = 25;
+
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1").startObject("properties");
+ XContentBuilder source = jsonBuilder().startObject();
+ for (int i = 0; i < numFields; i++) {
+ mapping.startObject("field" + i)
+ .field("type", "string")
+ .field("term_vector", randomBoolean() ? "yes" : "no")
+ .endObject();
+ source.field("field" + i, "some text here");
+ }
+ source.endObject();
+ mapping.endObject().endObject().endObject();
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("type1", mapping));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "0").setSource(source).get();
+ refresh();
+
+ TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "type1", "0").setSelectedFields("field*").get();
+ assertThat("Doc doesn't exists but should", response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat("All term vectors should have been generated", response.getFields().size(), equalTo(numFields));
+ }
+
+ @Test
+ public void testArtificialVsExisting() throws ExecutionException, InterruptedException, IOException {
+ // setup indices
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "standard");
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets"));
+ ensureGreen();
+
+ // index documents existing document
+ String[] content = new String[]{
+ "Generating a random permutation of a sequence (such as when shuffling cards).",
+ "Selecting a random sample of a population (important in statistical sampling).",
+ "Allocating experimental units via random assignment to a treatment or control condition.",
+ "Generating random numbers: see Random number generation."};
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ for (int i = 0; i < content.length; i++) {
+ indexBuilders.add(client().prepareIndex()
+ .setIndex("test")
+ .setType("type1")
+ .setId(String.valueOf(i))
+ .setSource("field1", content[i]));
+ }
+ indexRandom(true, indexBuilders);
+
+ for (int i = 0; i < content.length; i++) {
+ // request tvs from existing document
+ TermVectorsResponse respExisting = client().prepareTermVectors("test", "type1", String.valueOf(i))
+ .setOffsets(true)
+ .setPositions(true)
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .get();
+ assertThat("doc with index: test, type1 and id: existing", respExisting.isExists(), equalTo(true));
+
+ // request tvs from artificial document
+ TermVectorsResponse respArtificial = client().prepareTermVectors()
+ .setIndex("test")
+ .setType("type1")
+ .setRouting(String.valueOf(i)) // ensure we get the stats from the same shard as existing doc
+ .setDoc(jsonBuilder()
+ .startObject()
+ .field("field1", content[i])
+ .endObject())
+ .setOffsets(true)
+ .setPositions(true)
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .get();
+ assertThat("doc with index: test, type1 and id: " + String.valueOf(i), respArtificial.isExists(), equalTo(true));
+
+ // compare existing tvs with artificial
+ compareTermVectors("field1", respExisting.getFields(), respArtificial.getFields());
+ }
+ }
+
+ @Test
+ public void testArtificialNoDoc() throws IOException {
+ // setup indices
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "standard");
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "field1", "type=string"));
+ ensureGreen();
+
+ // request tvs from artificial document
+ String text = "the quick brown fox jumps over the lazy dog";
+ TermVectorsResponse resp = client().prepareTermVectors()
+ .setIndex("test")
+ .setType("type1")
+ .setDoc(jsonBuilder()
+ .startObject()
+ .field("field1", text)
+ .endObject())
+ .setOffsets(true)
+ .setPositions(true)
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .get();
+ assertThat(resp.isExists(), equalTo(true));
+ checkBrownFoxTermVector(resp.getFields(), "field1", false);
+ }
+
+ @Test
+ public void testArtificialNonExistingField() throws Exception {
+ // setup indices
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "standard");
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "field1", "type=string"));
+ ensureGreen();
+
+ // index just one doc
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ indexBuilders.add(client().prepareIndex()
+ .setIndex("test")
+ .setType("type1")
+ .setId("1")
+ .setRouting("1")
+ .setSource("field1", "some text"));
+ indexRandom(true, indexBuilders);
+
+ // request tvs from artificial document
+ XContentBuilder doc = jsonBuilder()
+ .startObject()
+ .field("field1", "the quick brown fox jumps over the lazy dog")
+ .field("non_existing", "the quick brown fox jumps over the lazy dog")
+ .endObject();
+
+ for (int i = 0; i < 2; i++) {
+ TermVectorsResponse resp = client().prepareTermVectors()
+ .setIndex("test")
+ .setType("type1")
+ .setDoc(doc)
+ .setRouting("" + i)
+ .setOffsets(true)
+ .setPositions(true)
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .get();
+ assertThat(resp.isExists(), equalTo(true));
+ checkBrownFoxTermVector(resp.getFields(), "field1", false);
+ // we should have created a mapping for this field
+ waitForMappingOnMaster("test", "type1", "non_existing");
+ // and return the generated term vectors
+ checkBrownFoxTermVector(resp.getFields(), "non_existing", false);
+ }
+ }
+
+ @Test
+ public void testPerFieldAnalyzer() throws IOException {
+ int numFields = 25;
+
+ // setup mapping and document source
+ Set<String> withTermVectors = new HashSet<>();
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("type1").startObject("properties");
+ XContentBuilder source = jsonBuilder().startObject();
+ for (int i = 0; i < numFields; i++) {
+ String fieldName = "field" + i;
+ if (randomBoolean()) {
+ withTermVectors.add(fieldName);
+ }
+ mapping.startObject(fieldName)
+ .field("type", "string")
+ .field("term_vector", withTermVectors.contains(fieldName) ? "yes" : "no")
+ .endObject();
+ source.field(fieldName, "some text here");
+ }
+ source.endObject();
+ mapping.endObject().endObject().endObject();
+
+ // setup indices with mapping
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "standard");
+ assertAcked(prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .setSettings(settings)
+ .addMapping("type1", mapping));
+ ensureGreen();
+
+ // index a single document with prepared source
+ client().prepareIndex("test", "type1", "0").setSource(source).get();
+ refresh();
+
+ // create random per_field_analyzer and selected fields
+ Map<String, String> perFieldAnalyzer = new HashMap<>();
+ Set<String> selectedFields = new HashSet<>();
+ for (int i = 0; i < numFields; i++) {
+ if (randomBoolean()) {
+ perFieldAnalyzer.put("field" + i, "keyword");
+ }
+ if (randomBoolean()) {
+ perFieldAnalyzer.put("non_existing" + i, "keyword");
+ }
+ if (randomBoolean()) {
+ selectedFields.add("field" + i);
+ }
+ if (randomBoolean()) {
+ selectedFields.add("non_existing" + i);
+ }
+ }
+
+ // selected fields not specified
+ TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), "type1", "0")
+ .setPerFieldAnalyzer(perFieldAnalyzer)
+ .get();
+
+ // should return all fields that have terms vectors, some with overridden analyzer
+ checkAnalyzedFields(response.getFields(), withTermVectors, perFieldAnalyzer);
+
+ // selected fields specified including some not in the mapping
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "0")
+ .setSelectedFields(selectedFields.toArray(Strings.EMPTY_ARRAY))
+ .setPerFieldAnalyzer(perFieldAnalyzer)
+ .get();
+
+ // should return only the specified valid fields, with some with overridden analyzer
+ checkAnalyzedFields(response.getFields(), selectedFields, perFieldAnalyzer);
+ }
+
+ private void checkAnalyzedFields(Fields fieldsObject, Set<String> fieldNames, Map<String, String> perFieldAnalyzer) throws IOException {
+ Set<String> validFields = new HashSet<>();
+ for (String fieldName : fieldNames){
+ if (fieldName.startsWith("non_existing")) {
+ assertThat("Non existing field\"" + fieldName + "\" should not be returned!", fieldsObject.terms(fieldName), nullValue());
+ continue;
+ }
+ Terms terms = fieldsObject.terms(fieldName);
+ assertThat("Existing field " + fieldName + "should have been returned", terms, notNullValue());
+ // check overridden by keyword analyzer ...
+ if (perFieldAnalyzer.containsKey(fieldName)) {
+ TermsEnum iterator = terms.iterator();
+ assertThat("Analyzer for " + fieldName + " should have been overridden!", iterator.next().utf8ToString(), equalTo("some text here"));
+ assertThat(iterator.next(), nullValue());
+ }
+ validFields.add(fieldName);
+ }
+ // ensure no other fields are returned
+ assertThat("More fields than expected are returned!", fieldsObject.size(), equalTo(validFields.size()));
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+ @Test
+ public void testDfs() throws ExecutionException, InterruptedException, IOException {
+ logger.info("Setting up the index ...");
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "standard")
+ .put("index.number_of_shards", randomIntBetween(2, 10)); // we need at least 2 shards
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "text", "type=string"));
+ ensureGreen();
+
+ int numDocs = scaledRandomIntBetween(25, 100);
+ logger.info("Indexing {} documents...", numDocs);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("test", "type1", i + "").setSource("text", "cat"));
+ }
+ indexRandom(true, builders);
+
+ XContentBuilder expectedStats = jsonBuilder()
+ .startObject()
+ .startObject("text")
+ .startObject("field_statistics")
+ .field("sum_doc_freq", numDocs)
+ .field("doc_count", numDocs)
+ .field("sum_ttf", numDocs)
+ .endObject()
+ .startObject("terms")
+ .startObject("cat")
+ .field("doc_freq", numDocs)
+ .field("ttf", numDocs)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ logger.info("Without dfs 'cat' should appear strictly less than {} times.", numDocs);
+ TermVectorsResponse response = client().prepareTermVectors("test", "type1", randomIntBetween(0, numDocs - 1) + "")
+ .setSelectedFields("text")
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .get();
+ checkStats(response.getFields(), expectedStats, false);
+
+ logger.info("With dfs 'cat' should appear exactly {} times.", numDocs);
+ response = client().prepareTermVectors("test", "type1", randomIntBetween(0, numDocs - 1) + "")
+ .setSelectedFields("text")
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .setDfs(true)
+ .get();
+ checkStats(response.getFields(), expectedStats, true);
+ }
+
+ private void checkStats(Fields fields, XContentBuilder xContentBuilder, boolean isEqual) throws IOException {
+ Map<String, Object> stats = JsonXContent.jsonXContent.createParser(xContentBuilder.bytes()).map();
+ assertThat("number of fields expected:", fields.size(), equalTo(stats.size()));
+ for (String fieldName : fields) {
+ logger.info("Checking field statistics for field: {}", fieldName);
+ Terms terms = fields.terms(fieldName);
+ Map<String, Integer> fieldStatistics = getFieldStatistics(stats, fieldName);
+ String msg = "field: " + fieldName + " ";
+ assertThat(msg + "sum_doc_freq:",
+ (int) terms.getSumDocFreq(),
+ equalOrLessThanTo(fieldStatistics.get("sum_doc_freq"), isEqual));
+ assertThat(msg + "doc_count:",
+ terms.getDocCount(),
+ equalOrLessThanTo(fieldStatistics.get("doc_count"), isEqual));
+ assertThat(msg + "sum_ttf:",
+ (int) terms.getSumTotalTermFreq(),
+ equalOrLessThanTo(fieldStatistics.get("sum_ttf"), isEqual));
+
+ final TermsEnum termsEnum = terms.iterator();
+ BytesRef text;
+ while((text = termsEnum.next()) != null) {
+ String term = text.utf8ToString();
+ logger.info("Checking term statistics for term: ({}, {})", fieldName, term);
+ Map<String, Integer> termStatistics = getTermStatistics(stats, fieldName, term);
+ msg = "term: (" + fieldName + "," + term + ") ";
+ assertThat(msg + "doc_freq:",
+ termsEnum.docFreq(),
+ equalOrLessThanTo(termStatistics.get("doc_freq"), isEqual));
+ assertThat(msg + "ttf:",
+ (int) termsEnum.totalTermFreq(),
+ equalOrLessThanTo(termStatistics.get("ttf"), isEqual));
+ }
+ }
+ }
+
+ private Map<String, Integer> getFieldStatistics(Map<String, Object> stats, String fieldName) throws IOException {
+ return (Map<String, Integer>) ((Map<String, Object>) stats.get(fieldName)).get("field_statistics");
+ }
+
+ private Map<String, Integer> getTermStatistics(Map<String, Object> stats, String fieldName, String term) {
+ return (Map<String, Integer>) ((Map<String, Object>) ((Map<String, Object>) stats.get(fieldName)).get("terms")).get(term);
+ }
+
+ private Matcher<Integer> equalOrLessThanTo(Integer value, boolean isEqual) {
+ if (isEqual) {
+ return equalTo(value);
+ }
+ return lessThan(value);
+ }
+
+ @Test
+ public void testTermVectorsWithVersion() {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ TermVectorsResponse response = client().prepareTermVectors("test", "type1", "1").get();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ // From translog:
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ // From Lucene index:
+ refresh();
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ logger.info("--> index doc 1 again, so increasing the version");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ // From translog:
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(2).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ // From Lucene index:
+ refresh();
+
+ // version 0 means ignore version, which is the default
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+ }
+
+ @Test
+ public void testFilterLength() throws ExecutionException, InterruptedException, IOException {
+ logger.info("Setting up the index ...");
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "keyword");
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "tags", "type=string"));
+ ensureYellow();
+
+ int numTerms = scaledRandomIntBetween(10, 50);
+ logger.info("Indexing one document with tags of increasing length ...");
+ List<String> tags = new ArrayList<>();
+ for (int i = 0; i < numTerms; i++) {
+ String tag = "a";
+ for (int j = 0; j < i; j++) {
+ tag += "a";
+ }
+ tags.add(tag);
+ }
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("tags", tags));
+
+ logger.info("Checking best tags by longest to shortest size ...");
+ TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings();
+ filterSettings.maxNumTerms = numTerms;
+ TermVectorsResponse response;
+ for (int i = 0; i < numTerms; i++) {
+ filterSettings.minWordLength = numTerms - i;
+ response = client().prepareTermVectors("test", "type1", "1")
+ .setSelectedFields("tags")
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .setFilterSettings(filterSettings)
+ .get();
+ checkBestTerms(response.getFields().terms("tags"), tags.subList((numTerms - i - 1), numTerms));
+ }
+ }
+
+ @Test
+ public void testFilterTermFreq() throws ExecutionException, InterruptedException, IOException {
+ logger.info("Setting up the index ...");
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "keyword");
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "tags", "type=string"));
+ ensureYellow();
+
+ logger.info("Indexing one document with tags of increasing frequencies ...");
+ int numTerms = scaledRandomIntBetween(10, 50);
+ List<String> tags = new ArrayList<>();
+ List<String> uniqueTags = new ArrayList<>();
+ String tag;
+ for (int i = 0; i < numTerms; i++) {
+ tag = "tag_" + i;
+ tags.add(tag);
+ for (int j = 0; j < i; j++) {
+ tags.add(tag);
+ }
+ uniqueTags.add(tag);
+ }
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("tags", tags));
+
+ logger.info("Checking best tags by highest to lowest term freq ...");
+ TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings();
+ TermVectorsResponse response;
+ for (int i = 0; i < numTerms; i++) {
+ filterSettings.maxNumTerms = i + 1;
+ response = client().prepareTermVectors("test", "type1", "1")
+ .setSelectedFields("tags")
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .setFilterSettings(filterSettings)
+ .get();
+ checkBestTerms(response.getFields().terms("tags"), uniqueTags.subList((numTerms - i - 1), numTerms));
+ }
+ }
+
+ @Test
+ public void testFilterDocFreq() throws ExecutionException, InterruptedException, IOException {
+ logger.info("Setting up the index ...");
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer", "keyword")
+ .put("index.number_of_shards", 1); // no dfs
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type1", "tags", "type=string"));
+ ensureYellow();
+
+ int numDocs = scaledRandomIntBetween(10, 50); // as many terms as there are docs
+ logger.info("Indexing {} documents with tags of increasing dfs ...", numDocs);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ List<String> tags = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ tags.add("tag_" + i);
+ builders.add(client().prepareIndex("test", "type1", i + "").setSource("tags", tags));
+ }
+ indexRandom(true, builders);
+
+ logger.info("Checking best terms by highest to lowest idf ...");
+ TermVectorsRequest.FilterSettings filterSettings = new TermVectorsRequest.FilterSettings();
+ TermVectorsResponse response;
+ for (int i = 0; i < numDocs; i++) {
+ filterSettings.maxNumTerms = i + 1;
+ response = client().prepareTermVectors("test", "type1", (numDocs - 1) + "")
+ .setSelectedFields("tags")
+ .setFieldStatistics(true)
+ .setTermStatistics(true)
+ .setFilterSettings(filterSettings)
+ .get();
+ checkBestTerms(response.getFields().terms("tags"), tags.subList((numDocs - i - 1), numDocs));
+ }
+ }
+
+ private void checkBestTerms(Terms terms, List<String> expectedTerms) throws IOException {
+ final TermsEnum termsEnum = terms.iterator();
+ List<String> bestTerms = new ArrayList<>();
+ BytesRef text;
+ while((text = termsEnum.next()) != null) {
+ bestTerms.add(text.utf8ToString());
+ }
+ Collections.sort(expectedTerms);
+ Collections.sort(bestTerms);
+ assertArrayEquals(expectedTerms.toArray(), bestTerms.toArray());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java
new file mode 100644
index 0000000000..2fdd8df354
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsTests.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvectors;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class MultiTermVectorsTests extends AbstractTermVectorsTests {
+
+ @Test
+ public void testDuelESLucene() throws Exception {
+ AbstractTermVectorsTests.TestFieldSetting[] testFieldSettings = getFieldSettings();
+ createIndexBasedOnFieldSettings("test", "alias", testFieldSettings);
+ //we generate as many docs as many shards we have
+ TestDoc[] testDocs = generateTestDocs("test", testFieldSettings);
+
+ DirectoryReader directoryReader = indexDocsWithLucene(testDocs);
+ AbstractTermVectorsTests.TestConfig[] testConfigs = generateTestConfigs(20, testDocs, testFieldSettings);
+
+ MultiTermVectorsRequestBuilder requestBuilder = client().prepareMultiTermVectors();
+ for (AbstractTermVectorsTests.TestConfig test : testConfigs) {
+ requestBuilder.add(getRequestForConfig(test).request());
+ }
+
+ MultiTermVectorsItemResponse[] responseItems = requestBuilder.get().getResponses();
+
+ for (int i = 0; i < testConfigs.length; i++) {
+ TestConfig test = testConfigs[i];
+ try {
+ MultiTermVectorsItemResponse item = responseItems[i];
+ if (test.expectedException != null) {
+ assertTrue(item.isFailed());
+ continue;
+ } else if (item.isFailed()) {
+ fail(item.getFailure().getMessage());
+ }
+ Fields luceneTermVectors = getTermVectorsFromLucene(directoryReader, test.doc);
+ validateResponse(item.getResponse(), luceneTermVectors, test);
+ } catch (Throwable t) {
+ throw new Exception("Test exception while running " + test.toString(), t);
+ }
+ }
+
+ }
+
+ @Test
+ public void testMissingIndexThrowsMissingIndex() throws Exception {
+ TermVectorsRequestBuilder requestBuilder = client().prepareTermVectors("testX", "typeX", Integer.toString(1));
+ MultiTermVectorsRequestBuilder mtvBuilder = client().prepareMultiTermVectors();
+ mtvBuilder.add(requestBuilder.request());
+ MultiTermVectorsResponse response = mtvBuilder.execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getFailure().getMessage(), equalTo("[" + response.getResponses()[0].getIndex() + "] missing"));
+ }
+
+ @Test
+ public void testMultiTermVectorsWithVersion() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ MultiTermVectorsResponse response = client().prepareMultiTermVectors().add(indexOrAlias(), "type1", "1").get();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).get();
+ }
+
+ // Version from translog
+ response = client().prepareMultiTermVectors()
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2))
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[]{"value1"});
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[1].getResponse().getFields().terms("field"), new String[]{"value1"});
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+ //Version from Lucene index
+ refresh();
+ response = client().prepareMultiTermVectors()
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1).realtime(false))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2).realtime(false))
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[]{"value1"});
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[1].getResponse().getFields().terms("field"), new String[]{"value1"});
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).get();
+ }
+
+ // Version from translog
+ response = client().prepareMultiTermVectors()
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(Versions.MATCH_ANY))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(1))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(2))
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[]{"value2"});
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[2].getResponse().getFields().terms("field"), new String[]{"value2"});
+
+
+ //Version from Lucene index
+ refresh();
+ response = client().prepareMultiTermVectors()
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(Versions.MATCH_ANY))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(1))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").selectedFields("field").version(2))
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[0].getResponse().getFields().terms("field"), new String[]{"value2"});
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ checkTermTexts(response.getResponses()[2].getResponse().getFields().terms("field"), new String[]{"value2"});
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+ private void checkTermTexts(Terms terms, String[] expectedTexts) throws IOException {
+ final TermsEnum termsEnum = terms.iterator();
+ for (String expectedText : expectedTexts) {
+ assertThat(termsEnum.next().utf8ToString(), equalTo(expectedText));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
new file mode 100644
index 0000000000..dab38d997f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java
@@ -0,0 +1,328 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.termvectors;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.TypeParsers;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class TermVectorsUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void streamResponse() throws Exception {
+
+ TermVectorsResponse outResponse = new TermVectorsResponse("a", "b", "c");
+ outResponse.setExists(true);
+ writeStandardTermVector(outResponse);
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ outResponse.writeTo(out);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+ TermVectorsResponse inResponse = new TermVectorsResponse("a", "b", "c");
+ inResponse.readFrom(esBuffer);
+
+ // see if correct
+ checkIfStandardTermVector(inResponse);
+
+ outResponse = new TermVectorsResponse("a", "b", "c");
+ writeEmptyTermVector(outResponse);
+ // write
+ outBuffer = new ByteArrayOutputStream();
+ out = new OutputStreamStreamOutput(outBuffer);
+ outResponse.writeTo(out);
+
+ // read
+ esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ esBuffer = new InputStreamStreamInput(esInBuffer);
+ inResponse = new TermVectorsResponse("a", "b", "c");
+ inResponse.readFrom(esBuffer);
+ assertTrue(inResponse.isExists());
+
+ }
+
+ private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
+ conf.setOpenMode(OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ type.setStoreTermVectorOffsets(true);
+ type.setStoreTermVectorPayloads(false);
+ type.setStoreTermVectorPositions(true);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ Document d = new Document();
+ d.add(new Field("id", "abc", StringField.TYPE_STORED));
+
+ writer.updateDocument(new Term("id", "abc"), d);
+ writer.commit();
+ writer.close();
+ DirectoryReader dr = DirectoryReader.open(dir);
+ IndexSearcher s = new IndexSearcher(dr);
+ TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ int doc = scoreDocs[0].doc;
+ Fields fields = dr.getTermVectors(doc);
+ EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
+ outResponse.setFields(fields, null, flags, fields);
+ outResponse.setExists(true);
+ dr.close();
+ dir.close();
+
+ }
+
+ private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
+
+ conf.setOpenMode(OpenMode.CREATE);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ FieldType type = new FieldType(TextField.TYPE_STORED);
+ type.setStoreTermVectorOffsets(true);
+ type.setStoreTermVectorPayloads(false);
+ type.setStoreTermVectorPositions(true);
+ type.setStoreTermVectors(true);
+ type.freeze();
+ Document d = new Document();
+ d.add(new Field("id", "abc", StringField.TYPE_STORED));
+ d.add(new Field("title", "the1 quick brown fox jumps over the1 lazy dog", type));
+ d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog", type));
+
+ writer.updateDocument(new Term("id", "abc"), d);
+ writer.commit();
+ writer.close();
+ DirectoryReader dr = DirectoryReader.open(dir);
+ IndexSearcher s = new IndexSearcher(dr);
+ TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
+ ScoreDoc[] scoreDocs = search.scoreDocs;
+ int doc = scoreDocs[0].doc;
+ Fields termVectors = dr.getTermVectors(doc);
+ EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
+ outResponse.setFields(termVectors, null, flags, termVectors);
+ dr.close();
+ dir.close();
+
+ }
+
+ private void checkIfStandardTermVector(TermVectorsResponse inResponse) throws IOException {
+
+ Fields fields = inResponse.getFields();
+ assertThat(fields.terms("title"), Matchers.notNullValue());
+ assertThat(fields.terms("desc"), Matchers.notNullValue());
+ assertThat(fields.size(), equalTo(2));
+ }
+
+ @Test
+ public void testRestRequestParsing() throws Exception {
+ BytesReference inputBytes = new BytesArray(
+ " {\"fields\" : [\"a\", \"b\",\"c\"], \"offsets\":false, \"positions\":false, \"payloads\":true}");
+
+ TermVectorsRequest tvr = new TermVectorsRequest(null, null, null);
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorsRequest.parseRequest(tvr, parser);
+
+ Set<String> fields = tvr.selectedFields();
+ assertThat(fields.contains("a"), equalTo(true));
+ assertThat(fields.contains("b"), equalTo(true));
+ assertThat(fields.contains("c"), equalTo(true));
+ assertThat(tvr.offsets(), equalTo(false));
+ assertThat(tvr.positions(), equalTo(false));
+ assertThat(tvr.payloads(), equalTo(true));
+ String additionalFields = "b,c ,d, e ";
+ RestTermVectorsAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields().size(), equalTo(5));
+ assertThat(fields.contains("d"), equalTo(true));
+ assertThat(fields.contains("e"), equalTo(true));
+
+ additionalFields = "";
+ RestTermVectorsAction.addFieldStringsFromParameter(tvr, additionalFields);
+
+ inputBytes = new BytesArray(" {\"offsets\":false, \"positions\":false, \"payloads\":true}");
+ tvr = new TermVectorsRequest(null, null, null);
+ parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorsRequest.parseRequest(tvr, parser);
+ additionalFields = "";
+ RestTermVectorsAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields(), equalTo(null));
+ additionalFields = "b,c ,d, e ";
+ RestTermVectorsAction.addFieldStringsFromParameter(tvr, additionalFields);
+ assertThat(tvr.selectedFields().size(), equalTo(4));
+
+ }
+
+ @Test
+ public void testRequestParsingThrowsException() throws Exception {
+ BytesReference inputBytes = new BytesArray(
+ " {\"fields\" : \"a, b,c \", \"offsets\":false, \"positions\":false, \"payloads\":true, \"meaningless_term\":2}");
+ TermVectorsRequest tvr = new TermVectorsRequest(null, null, null);
+ boolean threwException = false;
+ try {
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(inputBytes);
+ TermVectorsRequest.parseRequest(tvr, parser);
+ } catch (Exception e) {
+ threwException = true;
+ }
+ assertThat(threwException, equalTo(true));
+
+ }
+
+ @Test
+ public void streamRequest() throws IOException {
+
+ for (int i = 0; i < 10; i++) {
+ TermVectorsRequest request = new TermVectorsRequest("index", "type", "id");
+ request.offsets(random().nextBoolean());
+ request.fieldStatistics(random().nextBoolean());
+ request.payloads(random().nextBoolean());
+ request.positions(random().nextBoolean());
+ request.termStatistics(random().nextBoolean());
+ String parent = random().nextBoolean() ? "someParent" : null;
+ request.parent(parent);
+ String pref = random().nextBoolean() ? "somePreference" : null;
+ request.preference(pref);
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ request.writeTo(out);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+ TermVectorsRequest req2 = new TermVectorsRequest(null, null, null);
+ req2.readFrom(esBuffer);
+
+ assertThat(request.offsets(), equalTo(req2.offsets()));
+ assertThat(request.fieldStatistics(), equalTo(req2.fieldStatistics()));
+ assertThat(request.payloads(), equalTo(req2.payloads()));
+ assertThat(request.positions(), equalTo(req2.positions()));
+ assertThat(request.termStatistics(), equalTo(req2.termStatistics()));
+ assertThat(request.preference(), equalTo(pref));
+ assertThat(request.routing(), equalTo(parent));
+
+ }
+ }
+
+ @Test
+ public void testFieldTypeToTermVectorString() throws Exception {
+ FieldType ft = new FieldType();
+ ft.setStoreTermVectorOffsets(false);
+ ft.setStoreTermVectorPayloads(true);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorPositions(true);
+ String ftOpts = AbstractFieldMapper.termVectorOptionsToString(ft);
+ assertThat("with_positions_payloads", equalTo(ftOpts));
+ AllFieldMapper.Builder builder = new AllFieldMapper.Builder();
+ boolean exceptiontrown = false;
+ try {
+ TypeParsers.parseTermVector("", ftOpts, builder);
+ } catch (MapperParsingException e) {
+ exceptiontrown = true;
+ }
+ assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false));
+ }
+
+ @Test
+ public void testTermVectorStringGenerationWithoutPositions() throws Exception {
+ FieldType ft = new FieldType();
+ ft.setStoreTermVectorOffsets(true);
+ ft.setStoreTermVectorPayloads(true);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorPositions(false);
+ String ftOpts = AbstractFieldMapper.termVectorOptionsToString(ft);
+ assertThat(ftOpts, equalTo("with_offsets"));
+ }
+
+ @Test
+ public void testMultiParser() throws Exception {
+ byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest1.json");
+ BytesReference bytes = new BytesArray(data);
+ MultiTermVectorsRequest request = new MultiTermVectorsRequest();
+ request.add(new TermVectorsRequest(), bytes);
+ checkParsedParameters(request);
+
+ data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest2.json");
+ bytes = new BytesArray(data);
+ request = new MultiTermVectorsRequest();
+ request.add(new TermVectorsRequest(), bytes);
+ checkParsedParameters(request);
+
+ }
+ void checkParsedParameters(MultiTermVectorsRequest request) {
+ Set<String> ids = new HashSet<>();
+ ids.add("1");
+ ids.add("2");
+ Set<String> fields = new HashSet<>();
+ fields.add("a");
+ fields.add("b");
+ fields.add("c");
+ for (TermVectorsRequest singleRequest : request.requests) {
+ assertThat(singleRequest.index(), equalTo("testidx"));
+ assertThat(singleRequest.type(), equalTo("test"));
+ assertThat(singleRequest.payloads(), equalTo(false));
+ assertThat(singleRequest.positions(), equalTo(false));
+ assertThat(singleRequest.offsets(), equalTo(false));
+ assertThat(singleRequest.termStatistics(), equalTo(true));
+ assertThat(singleRequest.fieldStatistics(), equalTo(false));
+ assertThat(singleRequest.id(),Matchers.anyOf(Matchers.equalTo("1"), Matchers.equalTo("2")));
+ assertThat(singleRequest.selectedFields(), equalTo(fields));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/multiRequest1.json b/core/src/test/java/org/elasticsearch/action/termvectors/multiRequest1.json
new file mode 100644
index 0000000000..fcb5e3a927
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/multiRequest1.json
@@ -0,0 +1,13 @@
+{
+ "ids": ["1","2"],
+ "parameters": {
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads":false,
+ "offsets":false,
+ "positions":false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type":"test"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/multiRequest2.json b/core/src/test/java/org/elasticsearch/action/termvectors/multiRequest2.json
new file mode 100644
index 0000000000..a0709effe7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/termvectors/multiRequest2.json
@@ -0,0 +1,26 @@
+{
+ "docs": [
+ {
+ "_id": "1",
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads": false,
+ "offsets": false,
+ "positions": false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type": "test"
+ },
+ {
+ "_id": "2",
+ "field_statistics": false,
+ "term_statistics": true,
+ "payloads": false,
+ "offsets": false,
+ "positions": false,
+ "fields":["a","b","c"],
+ "_index": "testidx",
+ "_type": "test"
+ }
+ ]
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
new file mode 100644
index 0000000000..4292b7df61
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.update;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+public class UpdateRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testUpdateRequest() throws Exception {
+ UpdateRequest request = new UpdateRequest("test", "type", "1");
+ // simple script
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .field("script", "script1")
+ .endObject());
+ Script script = request.script();
+ assertThat(script, notNullValue());
+ assertThat(script.getScript(), equalTo("script1"));
+ assertThat(script.getType(), equalTo(ScriptType.INLINE));
+ assertThat(script.getLang(), nullValue());
+ Map<String, Object> params = script.getParams();
+ assertThat(params, nullValue());
+
+ // script with params
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject().startObject("script").field("inline", "script1").startObject("params")
+ .field("param1", "value1").endObject().endObject().endObject());
+ script = request.script();
+ assertThat(script, notNullValue());
+ assertThat(script.getScript(), equalTo("script1"));
+ assertThat(script.getType(), equalTo(ScriptType.INLINE));
+ assertThat(script.getLang(), nullValue());
+ params = script.getParams();
+ assertThat(params, notNullValue());
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("param1").toString(), equalTo("value1"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
+ .endObject().field("inline", "script1").endObject().endObject());
+ script = request.script();
+ assertThat(script, notNullValue());
+ assertThat(script.getScript(), equalTo("script1"));
+ assertThat(script.getType(), equalTo(ScriptType.INLINE));
+ assertThat(script.getLang(), nullValue());
+ params = script.getParams();
+ assertThat(params, notNullValue());
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("param1").toString(), equalTo("value1"));
+
+ // script with params and upsert
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject().startObject("script").startObject("params").field("param1", "value1")
+ .endObject().field("inline", "script1").endObject().startObject("upsert").field("field1", "value1").startObject("compound")
+ .field("field2", "value2").endObject().endObject().endObject());
+ script = request.script();
+ assertThat(script, notNullValue());
+ assertThat(script.getScript(), equalTo("script1"));
+ assertThat(script.getType(), equalTo(ScriptType.INLINE));
+ assertThat(script.getLang(), nullValue());
+ params = script.getParams();
+ assertThat(params, notNullValue());
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("param1").toString(), equalTo("value1"));
+ Map<String, Object> upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject().startObject("upsert").field("field1", "value1").startObject("compound")
+ .field("field2", "value2").endObject().endObject().startObject("script").startObject("params").field("param1", "value1")
+ .endObject().field("inline", "script1").endObject().endObject());
+ script = request.script();
+ assertThat(script, notNullValue());
+ assertThat(script.getScript(), equalTo("script1"));
+ assertThat(script.getType(), equalTo(ScriptType.INLINE));
+ assertThat(script.getLang(), nullValue());
+ params = script.getParams();
+ assertThat(params, notNullValue());
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ // script with doc
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject().startObject("doc").field("field1", "value1").startObject("compound")
+ .field("field2", "value2").endObject().endObject().endObject());
+ Map<String, Object> doc = request.doc().sourceAsMap();
+ assertThat(doc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUpdateRequestOldAPI() throws Exception {
+ UpdateRequest request = new UpdateRequest("test", "type", "1");
+ // simple script
+ request.source(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject());
+ assertThat(request.scriptString(), equalTo("script1"));
+
+ // script with params
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .field("script", "script1")
+ .startObject("params").field("param1", "value1").endObject()
+ .endObject());
+ assertThat(request.scriptString(), notNullValue());
+ assertThat(request.scriptString(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.scriptString(), notNullValue());
+ assertThat(request.scriptString(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+
+ // script with params and upsert
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .endObject());
+ assertThat(request.scriptString(), notNullValue());
+ assertThat(request.scriptString(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ Map<String, Object> upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.scriptString(), notNullValue());
+ assertThat(request.scriptString(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("params").field("param1", "value1").endObject()
+ .startObject("upsert").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .field("script", "script1")
+ .endObject());
+ assertThat(request.scriptString(), notNullValue());
+ assertThat(request.scriptString(), equalTo("script1"));
+ assertThat(request.scriptParams().get("param1").toString(), equalTo("value1"));
+ upsertDoc = XContentHelper.convertToMap(request.upsertRequest().source(), true).v2();
+ assertThat(upsertDoc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) upsertDoc.get("compound")).get("field2").toString(), equalTo("value2"));
+
+ // script with doc
+ request = new UpdateRequest("test", "type", "1");
+ request.source(XContentFactory.jsonBuilder().startObject()
+ .startObject("doc").field("field1", "value1").startObject("compound").field("field2", "value2").endObject().endObject()
+ .endObject());
+ Map<String, Object> doc = request.doc().sourceAsMap();
+ assertThat(doc.get("field1").toString(), equalTo("value1"));
+ assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java
new file mode 100644
index 0000000000..461c3272b3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java
@@ -0,0 +1,1082 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.aliases;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesMissingException;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.CollectionAssertions.hasKey;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@Slow
+public class IndexAliasesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testAliases() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
+
+ logger.info("--> indexing against [alias1], should work now");
+ IndexResponse indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getIndex(), equalTo("test"));
+
+ logger.info("--> creating index [test_x]");
+ createIndex("test_x");
+
+ ensureGreen();
+
+ logger.info("--> remove [alias1], Aliasing index [test_x] with [alias1]");
+ assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1").addAlias("test_x", "alias1"));
+
+ logger.info("--> indexing against [alias1], should work against [test_x]");
+ indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getIndex(), equalTo("test_x"));
+ }
+
+ @Test
+ public void testFailedFilter() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ //invalid filter, invalid json
+ IndicesAliasesRequestBuilder indicesAliasesRequestBuilder = admin().indices().prepareAliases().addAlias("test", "alias1", "abcde");
+ try {
+ indicesAliasesRequestBuilder.get();
+ fail("put alias should have been failed due to invalid filter");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]"));
+ }
+
+ //valid json , invalid filter
+ indicesAliasesRequestBuilder = admin().indices().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }");
+ try {
+ indicesAliasesRequestBuilder.get();
+ fail("put alias should have been failed due to invalid filter");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]"));
+ }
+ }
+
+ @Test
+ public void testFilteringAliases() throws Exception {
+ logger.info("--> creating index [test]");
+ assertAcked(prepareCreate("test").addMapping("type", "user", "type=string"));
+
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1] and filter [user:kimchy]");
+ QueryBuilder filter = termQuery("user", "kimchy");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", filter));
+
+ // For now just making sure that filter was stored with the alias
+ logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]");
+ ClusterState clusterState = admin().cluster().prepareState().get().getState();
+ IndexMetaData indexMd = clusterState.metaData().index("test");
+ assertThat(indexMd.aliases().get("alias1").filter().string(), equalTo("{\"term\":{\"user\":\"kimchy\"}}"));
+
+ }
+
+ @Test
+ public void testEmptyFilter() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("--> aliasing index [test] with [alias1] and empty filter");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1", "{}"));
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesSingleIndex() throws Exception {
+ logger.info("--> creating index [test]");
+ assertAcked(prepareCreate("test").addMapping("type1", "id", "type=string", "name", "type=string"));
+
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "foos", termQuery("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "bars", termQuery("name", "bar")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "tests", termQuery("name", "test")));
+
+ logger.info("--> indexing against [test]");
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "foo test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "bar test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(source("3", "baz test")).refresh(true)).actionGet();
+ client().index(indexRequest("test").type("type1").id("4").source(source("4", "something else")).refresh(true)).actionGet();
+
+ logger.info("--> checking single filtering alias search");
+ SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1");
+
+ logger.info("--> checking single filtering alias wildcard search");
+ searchResponse = client().prepareSearch("fo*").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1");
+
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3");
+
+ logger.info("--> checking single filtering alias search with sort");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchAllQuery()).addSort("_uid", SortOrder.ASC).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3");
+
+ logger.info("--> checking single filtering alias search with global facets");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name")))
+ .get();
+ assertSearchResponse(searchResponse);
+ Global global = searchResponse.getAggregations().get("global");
+ Terms terms = global.getAggregations().get("test");
+ assertThat(terms.getBuckets().size(), equalTo(4));
+
+ logger.info("--> checking single filtering alias search with global facets and sort");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addAggregation(AggregationBuilders.global("global").subAggregation(AggregationBuilders.terms("test").field("name")))
+ .addSort("_uid", SortOrder.ASC).get();
+ assertSearchResponse(searchResponse);
+ global = searchResponse.getAggregations().get("global");
+ terms = global.getAggregations().get("test");
+ assertThat(terms.getBuckets().size(), equalTo(4));
+
+ logger.info("--> checking single filtering alias search with non-global facets");
+ searchResponse = client().prepareSearch("tests").setQuery(QueryBuilders.matchQuery("name", "bar"))
+ .addAggregation(AggregationBuilders.terms("test").field("name"))
+ .addSort("_uid", SortOrder.ASC).get();
+ assertSearchResponse(searchResponse);
+ terms = searchResponse.getAggregations().get("test");
+ assertThat(terms.getBuckets().size(), equalTo(2));
+
+ searchResponse = client().prepareSearch("foos", "bars").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2");
+
+ logger.info("--> checking single non-filtering alias search");
+ searchResponse = client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking non-filtering alias and filtering alias search");
+ searchResponse = client().prepareSearch("alias1", "foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking index and filtering alias search");
+ searchResponse = client().prepareSearch("test", "foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+
+ logger.info("--> checking index and alias wildcard search");
+ searchResponse = client().prepareSearch("te*", "fo*").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4");
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesTwoIndices() throws Exception {
+ logger.info("--> creating index [test1]");
+ assertAcked(prepareCreate("test1").addMapping("type1", "name", "type=string"));
+ logger.info("--> creating index [test2]");
+ assertAcked(prepareCreate("test2").addMapping("type1", "name", "type=string"));
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "foos", termQuery("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "bars", termQuery("name", "bar")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "foos", termQuery("name", "foo")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"))).get();
+ client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"))).get();
+ client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"))).get();
+ client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"))).get();
+
+ logger.info("--> indexing against [test2]");
+ client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"))).get();
+ client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"))).get();
+ client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"))).get();
+ client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"))).get();
+
+ refresh();
+
+ logger.info("--> checking filtering alias for two indices");
+ SearchResponse searchResponse = client().prepareSearch("foos").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "5");
+ assertThat(client().prepareCount("foos").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(2L));
+
+ logger.info("--> checking filtering alias for one index");
+ searchResponse = client().prepareSearch("bars").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "2");
+ assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L));
+
+ logger.info("--> checking filtering alias for two indices and one complete index");
+ searchResponse = client().prepareSearch("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5");
+ assertThat(client().prepareCount("foos", "test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for one index");
+ searchResponse = client().prepareSearch("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "1", "2", "3", "4", "5");
+ assertThat(client().prepareCount("foos", "aliasToTest1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices");
+ searchResponse = client().prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(8L));
+ assertThat(client().prepareCount("foos", "aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(8L));
+
+ logger.info("--> checking filtering alias for two indices and non-filtering alias for both indices");
+ searchResponse = client().prepareSearch("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get();
+ assertHits(searchResponse.getHits(), "4", "8");
+ assertThat(client().prepareCount("foos", "aliasToTests").setQuery(QueryBuilders.termQuery("name", "something")).get().getCount(), equalTo(2L));
+ }
+
+ @Test
+ public void testSearchingFilteringAliasesMultipleIndices() throws Exception {
+ logger.info("--> creating indices");
+ createIndex("test1", "test2", "test3");
+
+ client().admin().indices().preparePutMapping("test1", "test2", "test3")
+ .setType("type1")
+ .setSource("name", "type=string")
+ .get();
+ waitForConcreteMappingsOnAll("test1", "type1", "name");
+ waitForConcreteMappingsOnAll("test2", "type1", "name");
+ waitForConcreteMappingsOnAll("test3", "type1", "name");
+
+ ensureGreen();
+
+ logger.info("--> adding aliases to indices");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "alias12"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "alias12"));
+
+ logger.info("--> adding filtering aliases to indices");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "filter1", termQuery("name", "test1")));
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "filter23", termQuery("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test3", "filter23", termQuery("name", "foo")));
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "filter13", termQuery("name", "baz")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test3", "filter13", termQuery("name", "baz")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("11").source(source("11", "foo test1"))).get();
+ client().index(indexRequest("test1").type("type1").id("12").source(source("12", "bar test1"))).get();
+ client().index(indexRequest("test1").type("type1").id("13").source(source("13", "baz test1"))).get();
+
+ client().index(indexRequest("test2").type("type1").id("21").source(source("21", "foo test2"))).get();
+ client().index(indexRequest("test2").type("type1").id("22").source(source("22", "bar test2"))).get();
+ client().index(indexRequest("test2").type("type1").id("23").source(source("23", "baz test2"))).get();
+
+ client().index(indexRequest("test3").type("type1").id("31").source(source("31", "foo test3"))).get();
+ client().index(indexRequest("test3").type("type1").id("32").source(source("32", "bar test3"))).get();
+ client().index(indexRequest("test3").type("type1").id("33").source(source("33", "baz test3"))).get();
+
+ refresh();
+
+ logger.info("--> checking filtering alias for multiple indices");
+ SearchResponse searchResponse = client().prepareSearch("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "31", "13", "33");
+ assertThat(client().prepareCount("filter23", "filter13").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(4L));
+
+ searchResponse = client().prepareSearch("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "31", "11", "12", "13");
+ assertThat(client().prepareCount("filter23", "filter1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(5L));
+
+ searchResponse = client().prepareSearch("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "33");
+ assertThat(client().prepareCount("filter13", "filter1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(4L));
+
+ searchResponse = client().prepareSearch("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "21", "31", "33");
+ assertThat(client().prepareCount("filter13", "filter1", "filter23").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(6L));
+
+ searchResponse = client().prepareSearch("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "21", "22", "23", "31", "13", "33");
+ assertThat(client().prepareCount("filter23", "filter13", "test2").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(6L));
+
+ searchResponse = client().prepareSearch("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get();
+ assertHits(searchResponse.getHits(), "11", "12", "13", "21", "22", "23", "31", "33");
+ assertThat(client().prepareCount("filter23", "filter13", "test1", "test2").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(8L));
+ }
+
+ @Test
+ public void testDeletingByQueryFilteringAliases() throws Exception {
+ logger.info("--> creating index [test1] and [test2");
+ assertAcked(prepareCreate("test1").addMapping("type1", "name", "type=string"));
+ assertAcked(prepareCreate("test2").addMapping("type1", "name", "type=string"));
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "foos", termQuery("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "bars", termQuery("name", "bar")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "tests", termQuery("name", "test")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTests"));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "foos", termQuery("name", "foo")));
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "tests", termQuery("name", "test")));
+
+ logger.info("--> indexing against [test1]");
+ client().index(indexRequest("test1").type("type1").id("1").source(source("1", "foo test"))).get();
+ client().index(indexRequest("test1").type("type1").id("2").source(source("2", "bar test"))).get();
+ client().index(indexRequest("test1").type("type1").id("3").source(source("3", "baz test"))).get();
+ client().index(indexRequest("test1").type("type1").id("4").source(source("4", "something else"))).get();
+
+ logger.info("--> indexing against [test2]");
+ client().index(indexRequest("test2").type("type1").id("5").source(source("5", "foo test"))).get();
+ client().index(indexRequest("test2").type("type1").id("6").source(source("6", "bar test"))).get();
+ client().index(indexRequest("test2").type("type1").id("7").source(source("7", "baz test"))).get();
+ client().index(indexRequest("test2").type("type1").id("8").source(source("8", "something else"))).get();
+
+ refresh();
+
+ logger.info("--> checking counts before delete");
+ assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L));
+ }
+
+
+
+ @Test
+ public void testDeleteAliases() throws Exception {
+ logger.info("--> creating index [test1] and [test2]");
+ assertAcked(prepareCreate("test1").addMapping("type", "name", "type=string"));
+ assertAcked(prepareCreate("test2").addMapping("type", "name", "type=string"));
+ ensureGreen();
+
+ logger.info("--> adding filtering aliases to index [test1]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test1", "aliasToTest1")
+ .addAlias("test1", "aliasToTests")
+ .addAlias("test1", "foos", termQuery("name", "foo"))
+ .addAlias("test1", "bars", termQuery("name", "bar"))
+ .addAlias("test1", "tests", termQuery("name", "test")));
+
+ logger.info("--> adding filtering aliases to index [test2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("test2", "aliasToTest2")
+ .addAlias("test2", "aliasToTests")
+ .addAlias("test2", "foos", termQuery("name", "foo"))
+ .addAlias("test2", "tests", termQuery("name", "test")));
+
+ String[] indices = {"test1", "test2"};
+ String[] aliases = {"aliasToTest1", "foos", "bars", "tests", "aliasToTest2", "aliasToTests"};
+
+ admin().indices().prepareAliases().removeAlias(indices, aliases).get();
+
+ AliasesExistResponse response = admin().indices().prepareAliasesExist(aliases).get();
+ assertThat(response.exists(), equalTo(false));
+ }
+
+
+ @Test
+ public void testWaitForAliasCreationMultipleShards() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias" + i));
+ client().index(indexRequest("alias" + i).type("type1").id("1").source(source("1", "test"))).get();
+ }
+ }
+
+ @Test
+ public void testWaitForAliasCreationSingleShard() throws Exception {
+ logger.info("--> creating index [test]");
+ assertAcked(admin().indices().create(createIndexRequest("test").settings(settingsBuilder().put("index.numberOfReplicas", 0).put("index.numberOfShards", 1))).get());
+
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias" + i));
+ client().index(indexRequest("alias" + i).type("type1").id("1").source(source("1", "test"))).get();
+ }
+ }
+
+ @Test
+ public void testWaitForAliasSimultaneousUpdate() throws Exception {
+ final int aliasCount = 10;
+
+ logger.info("--> creating index [test]");
+ createIndex("test");
+
+ ensureGreen();
+
+ ExecutorService executor = Executors.newFixedThreadPool(aliasCount);
+ for (int i = 0; i < aliasCount; i++) {
+ final String aliasName = "alias" + i;
+ executor.submit(new Runnable() {
+ @Override
+ public void run() {
+ assertAcked(admin().indices().prepareAliases().addAlias("test", aliasName));
+ client().index(indexRequest(aliasName).type("type1").id("1").source(source("1", "test"))).actionGet();
+ }
+ });
+ }
+ executor.shutdown();
+ boolean done = executor.awaitTermination(10, TimeUnit.SECONDS);
+ assertThat(done, equalTo(true));
+ if (!done) {
+ executor.shutdownNow();
+ }
+ }
+
+
+ @Test
+ public void testSameAlias() throws Exception {
+ logger.info("--> creating index [test]");
+ assertAcked(prepareCreate("test").addMapping("type", "name", "type=string"));
+ ensureGreen();
+
+ logger.info("--> creating alias1 ");
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1")));
+ TimeValue timeout = TimeValue.timeValueSeconds(2);
+ logger.info("--> recreating alias1 ");
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1").setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> modifying alias1 to have a filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termQuery("name", "foo")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> recreating alias1 with the same filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termQuery("name", "foo")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> recreating alias1 with a different filter");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().addAlias("test", "alias1", termQuery("name", "bar")).setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+ logger.info("--> verify that filter was updated");
+ AliasMetaData aliasMetaData = internalCluster().clusterService().state().metaData().aliases().get("alias1").get("test");
+ assertThat(aliasMetaData.getFilter().toString(), equalTo("{\"term\":{\"name\":\"bar\"}}"));
+
+ logger.info("--> deleting alias1");
+ stopWatch.start();
+ assertAcked((admin().indices().prepareAliases().removeAlias("test", "alias1").setTimeout(timeout)));
+ assertThat(stopWatch.stop().lastTaskTime().millis(), lessThan(timeout.millis()));
+
+
+ }
+
+ @Test(expected = AliasesMissingException.class)
+ public void testIndicesRemoveNonExistingAliasResponds404() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+ logger.info("--> deleting alias1 which does not exist");
+ assertAcked((admin().indices().prepareAliases().removeAlias("test", "alias1")));
+ }
+
+ @Test
+ public void testIndicesGetAliases() throws Exception {
+
+ logger.info("--> creating indices [foobar, test, test123, foobarbaz, bazbar]");
+ createIndex("foobar");
+ createIndex("test");
+ createIndex("test123");
+ createIndex("foobarbaz");
+ createIndex("bazbar");
+
+ client().admin().indices().preparePutMapping("foobar", "test", "test123", "foobarbaz", "bazbar")
+ .setType("type").setSource("field", "type=string").get();
+ waitForConcreteMappingsOnAll("foobar", "type", "field");
+ waitForConcreteMappingsOnAll("test", "type", "field");
+ waitForConcreteMappingsOnAll("test123", "type", "field");
+ waitForConcreteMappingsOnAll("foobarbaz", "type", "field");
+ waitForConcreteMappingsOnAll("bazbar", "type", "field");
+
+ ensureGreen();
+
+ logger.info("--> creating aliases [alias1, alias2]");
+ assertAcked(admin().indices().prepareAliases().addAlias("foobar", "alias1").addAlias("foobar", "alias2"));
+
+ logger.info("--> getting alias1");
+ GetAliasesResponse getResponse = admin().indices().prepareGetAliases("alias1").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ AliasesExistResponse existsResponse = admin().indices().prepareAliasesExist("alias1").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting all aliases that start with alias*");
+ getResponse = admin().indices().prepareGetAliases("alias*").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("alias1"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).alias(), equalTo("alias2"));
+ assertThat(getResponse.getAliases().get("foobar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("alias*").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+
+ logger.info("--> creating aliases [bar, baz, foo]");
+ assertAcked(admin().indices().prepareAliases()
+ .addAlias("bazbar", "bar")
+ .addAlias("bazbar", "bac", termQuery("field", "value"))
+ .addAlias("foobar", "foo"));
+
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(new AliasAction(AliasAction.Type.ADD, "foobar", "bac").routing("bla")));
+
+ logger.info("--> getting bar and baz for index bazbar");
+ getResponse = admin().indices().prepareGetAliases("bar", "bac").addIndices("bazbar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("bar", "bac")
+ .addIndices("bazbar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting *b* for index baz*");
+ getResponse = admin().indices().prepareGetAliases("*b*").addIndices("baz*").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("*b*")
+ .addIndices("baz*").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting *b* for index *bar");
+ getResponse = admin().indices().prepareGetAliases("b*").addIndices("*bar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("bazbar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("term"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("field"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getFilter().string(), containsString("value"));
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(0).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1), notNullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).alias(), equalTo("bar"));
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("bazbar").get(1).getSearchRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("bac"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), equalTo("bla"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), equalTo("bla"));
+ existsResponse = admin().indices().prepareAliasesExist("b*")
+ .addIndices("*bar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting f* for index *bar");
+ getResponse = admin().indices().prepareGetAliases("f*").addIndices("*bar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("f*")
+ .addIndices("*bar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ // alias at work
+ logger.info("--> getting f* for index *bac");
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("*bac").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("foo")
+ .addIndices("*bac").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ logger.info("--> getting foo for index foobar");
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(1));
+ assertThat(getResponse.getAliases().get("foobar").get(0), notNullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).alias(), equalTo("foo"));
+ assertThat(getResponse.getAliases().get("foobar").get(0).getFilter(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getIndexRouting(), nullValue());
+ assertThat(getResponse.getAliases().get("foobar").get(0).getSearchRouting(), nullValue());
+ existsResponse = admin().indices().prepareAliasesExist("foo")
+ .addIndices("foobar").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ // alias at work again
+ logger.info("--> getting * for index *bac");
+ getResponse = admin().indices().prepareGetAliases("*").addIndices("*bac").get();
+ assertThat(getResponse, notNullValue());
+ assertThat(getResponse.getAliases().size(), equalTo(2));
+ assertThat(getResponse.getAliases().get("foobar").size(), equalTo(4));
+ assertThat(getResponse.getAliases().get("bazbar").size(), equalTo(2));
+ existsResponse = admin().indices().prepareAliasesExist("*")
+ .addIndices("*bac").get();
+ assertThat(existsResponse.exists(), equalTo(true));
+
+ assertAcked(admin().indices().prepareAliases()
+ .removeAlias("foobar", "foo"));
+
+ getResponse = admin().indices().prepareGetAliases("foo").addIndices("foobar").get();
+ assertThat(getResponse.getAliases().isEmpty(), equalTo(true));
+ existsResponse = admin().indices().prepareAliasesExist("foo").addIndices("foobar").get();
+ assertThat(existsResponse.exists(), equalTo(false));
+ }
+
+ @Test
+ public void testAddAliasNullWithoutExistingIndices() {
+ try {
+ assertAcked(admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, "alias1")));
+ fail("create alias should have failed due to null index");
+ } catch (IllegalArgumentException e) {
+ assertThat("Exception text does not contain \"Alias action [add]: [index] may not be empty string\"",
+ e.getMessage(), containsString("Alias action [add]: [index] may not be empty string"));
+ }
+ }
+
+ @Test
+ public void testAddAliasNullWithExistingIndices() throws Exception {
+ logger.info("--> creating index [test]");
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("--> aliasing index [null] with [empty-alias]");
+
+ try {
+ assertAcked(admin().indices().prepareAliases().addAlias((String) null, "empty-alias"));
+ fail("create alias should have failed due to null index");
+ } catch (IllegalArgumentException e) {
+ assertThat("Exception text does not contain \"Alias action [add]: [index] may not be empty string\"",
+ e.getMessage(), containsString("Alias action [add]: [index] may not be empty string"));
+ }
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasEmptyIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasNullAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testAddAliasEmptyAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get();
+ }
+
+ @Test
+ public void testAddAliasNullAliasNullIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, null)).get();
+ assertTrue("Should throw " + ActionRequestValidationException.class.getSimpleName(), false);
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testAddAliasEmptyAliasEmptyIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
+ assertTrue("Should throw " + ActionRequestValidationException.class.getSimpleName(), false);
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasNullIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasEmptyIndex() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("", "alias1")).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasNullAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", null)).get();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void tesRemoveAliasEmptyAlias() {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", "")).get();
+ }
+
+ @Test
+ public void testRemoveAliasNullAliasNullIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, null)).get();
+ fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testRemoveAliasEmptyAliasEmptyIndex() {
+ try {
+ admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "")).get();
+ fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors(), notNullValue());
+ assertThat(e.validationErrors().size(), equalTo(2));
+ }
+ }
+
+ @Test
+ public void testGetAllAliasesWorks() {
+ createIndex("index1");
+ createIndex("index2");
+
+ ensureYellow();
+
+ assertAcked(admin().indices().prepareAliases().addAlias("index1", "alias1").addAlias("index2", "alias2"));
+
+ GetAliasesResponse response = admin().indices().prepareGetAliases().get();
+ assertThat(response.getAliases(), hasKey("index1"));
+ assertThat(response.getAliases(), hasKey("index1"));
+ }
+
+ @Test
+ public void testCreateIndexWithAliases() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type", "field", "type=string")
+ .addAlias(new Alias("alias1"))
+ .addAlias(new Alias("alias2").filter(QueryBuilders.missingQuery("field")))
+ .addAlias(new Alias("alias3").indexRouting("index").searchRouting("search")));
+
+ checkAliases();
+ }
+
+ @Test
+ public void testCreateIndexWithAliasesInSource() throws Exception {
+ assertAcked(prepareCreate("test").setSource("{\n" +
+ " \"aliases\" : {\n" +
+ " \"alias1\" : {},\n" +
+ " \"alias2\" : {\"filter\" : {\"match_all\": {}}},\n" +
+ " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"}\n" +
+ " }\n" +
+ "}"));
+
+ checkAliases();
+ }
+
+ @Test
+ public void testCreateIndexWithAliasesSource() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type", "field", "type=string")
+ .setAliases("{\n" +
+ " \"alias1\" : {},\n" +
+ " \"alias2\" : {\"filter\" : {\"term\": {\"field\":\"value\"}}},\n" +
+ " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"}\n" +
+ "}"));
+
+ checkAliases();
+ }
+
+ @Test
+ public void testCreateIndexWithAliasesFilterNotValid() {
+ //non valid filter, invalid json
+ CreateIndexRequestBuilder createIndexRequestBuilder = prepareCreate("test").addAlias(new Alias("alias2").filter("f"));
+
+ try {
+ createIndexRequestBuilder.get();
+ fail("create index should have failed due to invalid alias filter");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias2]"));
+ }
+
+ //valid json but non valid filter
+ createIndexRequestBuilder = prepareCreate("test").addAlias(new Alias("alias2").filter("{ \"test\": {} }"));
+
+ try {
+ createIndexRequestBuilder.get();
+ fail("create index should have failed due to invalid alias filter");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias2]"));
+ }
+ }
+
+ @Test
+ public void testAddAliasWithFilterNoMapping() throws Exception {
+ assertAcked(prepareCreate("test"));
+
+ try {
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "a", QueryBuilders.termQuery("field1", "term"))
+ .get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getCause(), instanceOf(QueryParsingException.class));
+ }
+
+ try {
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "a", QueryBuilders.rangeQuery("field2").from(0).to(1))
+ .get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getCause(), instanceOf(QueryParsingException.class));
+ }
+
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "a", QueryBuilders.matchAllQuery()) // <-- no fail, b/c no field mentioned
+ .get();
+ }
+
+ @Test
+ public void testAliasFilterWithNowInRangeFilterAndQuery() throws Exception {
+ assertAcked(prepareCreate("my-index").addMapping("my-type", "_timestamp", "enabled=true"));
+ assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeQuery("_timestamp").from("now-1d").to("now")));
+ assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", rangeQuery("_timestamp").from("now-1d").to("now")));
+
+ final int numDocs = scaledRandomIntBetween(5, 52);
+ for (int i = 1; i <= numDocs; i++) {
+ client().prepareIndex("my-index", "my-type").setCreate(true).setSource("{}").get();
+ if (i % 2 == 0) {
+ refresh();
+ SearchResponse response = client().prepareSearch("filter1").get();
+ assertHitCount(response, i);
+
+ response = client().prepareSearch("filter2").get();
+ assertHitCount(response, i);
+ }
+ }
+ }
+
+ @Test
+ public void testAliasesFilterWithHasChildQuery() throws Exception {
+ assertAcked(prepareCreate("my-index")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ );
+ try {
+ assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", hasChildQuery("child", matchAllQuery())));
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getCause(), instanceOf(IllegalStateException.class));
+ assertThat(e.getCause().getMessage(), equalTo("Search context is required to be set"));
+ }
+ try {
+ assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", hasParentQuery("child", matchAllQuery())));
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getCause(), instanceOf(IllegalStateException.class));
+ assertThat(e.getCause().getMessage(), equalTo("Search context is required to be set"));
+ }
+ }
+
+ @Test
+ public void testAliasesFilterWithHasChildQueryPre2Dot0() throws Exception {
+ assertAcked(prepareCreate("my-index")
+ .setSettings(Settings.builder()
+ .put(indexSettings())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_6_0)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ );
+ assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", hasChildQuery("child", matchAllQuery())));
+ assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", hasParentQuery("child", matchAllQuery())));
+ }
+
+ @Test
+ public void testAliasesWithBlocks() {
+ createIndex("test");
+ ensureGreen();
+
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", block);
+
+ assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1").addAlias("test", "alias2"));
+ assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1"));
+ assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1));
+ assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true));
+ } finally {
+ disableIndexBlock("test", block);
+ }
+ }
+
+ try {
+ enableIndexBlock("test", SETTING_READ_ONLY);
+
+ assertBlocked(admin().indices().prepareAliases().addAlias("test", "alias3"), INDEX_READ_ONLY_BLOCK);
+ assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK);
+ assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1));
+ assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true));
+
+ } finally {
+ disableIndexBlock("test", SETTING_READ_ONLY);
+ }
+
+ try {
+ enableIndexBlock("test", SETTING_BLOCKS_METADATA);
+
+ assertBlocked(admin().indices().prepareAliases().addAlias("test", "alias3"), INDEX_METADATA_BLOCK);
+ assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_METADATA_BLOCK);
+ assertBlocked(admin().indices().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK);
+ assertBlocked(admin().indices().prepareAliasesExist("alias2"), INDEX_METADATA_BLOCK);
+
+ } finally {
+ disableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ }
+ }
+
+ private void checkAliases() {
+ GetAliasesResponse getAliasesResponse = admin().indices().prepareGetAliases("alias1").get();
+ assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1));
+ AliasMetaData aliasMetaData = getAliasesResponse.getAliases().get("test").get(0);
+ assertThat(aliasMetaData.alias(), equalTo("alias1"));
+ assertThat(aliasMetaData.filter(), nullValue());
+ assertThat(aliasMetaData.indexRouting(), nullValue());
+ assertThat(aliasMetaData.searchRouting(), nullValue());
+
+ getAliasesResponse = admin().indices().prepareGetAliases("alias2").get();
+ assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1));
+ aliasMetaData = getAliasesResponse.getAliases().get("test").get(0);
+ assertThat(aliasMetaData.alias(), equalTo("alias2"));
+ assertThat(aliasMetaData.filter(), notNullValue());
+ assertThat(aliasMetaData.indexRouting(), nullValue());
+ assertThat(aliasMetaData.searchRouting(), nullValue());
+
+ getAliasesResponse = admin().indices().prepareGetAliases("alias3").get();
+ assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1));
+ aliasMetaData = getAliasesResponse.getAliases().get("test").get(0);
+ assertThat(aliasMetaData.alias(), equalTo("alias3"));
+ assertThat(aliasMetaData.filter(), nullValue());
+ assertThat(aliasMetaData.indexRouting(), equalTo("index"));
+ assertThat(aliasMetaData.searchRouting(), equalTo("search"));
+ }
+
+ private void assertHits(SearchHits hits, String... ids) {
+ assertThat(hits.totalHits(), equalTo((long) ids.length));
+ Set<String> hitIds = newHashSet();
+ for (SearchHit hit : hits.getHits()) {
+ hitIds.add(hit.id());
+ }
+ assertThat(hitIds, containsInAnyOrder(ids));
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" }";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java
new file mode 100644
index 0000000000..a225024b1c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/aliases/AliasesBenchmark.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.aliases;
+
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ */
+public class AliasesBenchmark {
+
+ private final static String INDEX_NAME = "my-index";
+
+ public static void main(String[] args) throws IOException {
+ int NUM_ADDITIONAL_NODES = 0;
+ int BASE_ALIAS_COUNT = 100000;
+ int NUM_ADD_ALIAS_REQUEST = 1000;
+
+ Settings settings = Settings.settingsBuilder()
+ .put("node.master", false).build();
+ Node node1 = NodeBuilder.nodeBuilder().settings(
+ Settings.settingsBuilder().put(settings).put("node.master", true)
+ ).node();
+
+ Node[] otherNodes = new Node[NUM_ADDITIONAL_NODES];
+ for (int i = 0; i < otherNodes.length; i++) {
+ otherNodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ Client client = node1.client();
+ try {
+ client.admin().indices().prepareCreate(INDEX_NAME).execute().actionGet();
+ } catch (IndexAlreadyExistsException e) {}
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+ int numberOfAliases = countAliases(client);
+ System.out.println("Number of aliases: " + numberOfAliases);
+
+ if (numberOfAliases < BASE_ALIAS_COUNT) {
+ int diff = BASE_ALIAS_COUNT - numberOfAliases;
+ System.out.println("Adding " + diff + " more aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
+ IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
+ for (int i = 1; i <= diff; i++) {
+ builder.addAlias(INDEX_NAME, Strings.randomBase64UUID());
+ if (i % 1000 == 0) {
+ builder.execute().actionGet();
+ builder = client.admin().indices().prepareAliases();
+ }
+ }
+ if (!builder.request().getAliasActions().isEmpty()) {
+ builder.execute().actionGet();
+ }
+ } else if (numberOfAliases > BASE_ALIAS_COUNT) {
+ IndicesAliasesRequestBuilder builder = client.admin().indices().prepareAliases();
+ int diff = numberOfAliases - BASE_ALIAS_COUNT;
+ System.out.println("Removing " + diff + " aliases to get to the start amount of " + BASE_ALIAS_COUNT + " aliases");
+ List<AliasMetaData> aliases= client.admin().indices().prepareGetAliases("*")
+ .addIndices(INDEX_NAME)
+ .execute().actionGet().getAliases().get(INDEX_NAME);
+ for (int i = 0; i <= diff; i++) {
+ builder.removeAlias(INDEX_NAME, aliases.get(i).alias());
+ if (i % 1000 == 0) {
+ builder.execute().actionGet();
+ builder = client.admin().indices().prepareAliases();
+ }
+ }
+ if (!builder.request().getAliasActions().isEmpty()) {
+ builder.execute().actionGet();
+ }
+ }
+
+ numberOfAliases = countAliases(client);
+ System.out.println("Number of aliases: " + numberOfAliases);
+
+ long totalTime = 0;
+ int max = numberOfAliases + NUM_ADD_ALIAS_REQUEST;
+ for (int i = numberOfAliases; i <= max; i++) {
+ if (i != numberOfAliases && i % 100 == 0) {
+ long avgTime = totalTime / 100;
+ System.out.println("Added [" + (i - numberOfAliases) + "] aliases. Avg create time: " + avgTime + " ms");
+ totalTime = 0;
+ }
+
+ long time = System.currentTimeMillis();
+// String filter = termFilter("field" + i, "value" + i).toXContent(XContentFactory.jsonBuilder(), null).string();
+ client.admin().indices().prepareAliases().addAlias(INDEX_NAME, Strings.randomBase64UUID()/*, filter*/)
+ .execute().actionGet();
+ totalTime += System.currentTimeMillis() - time;
+ }
+ System.out.println("Number of aliases: " + countAliases(client));
+
+ client.close();
+ node1.close();
+ for (Node otherNode : otherNodes) {
+ otherNode.close();
+ }
+ }
+
+ private static int countAliases(Client client) {
+ GetAliasesResponse response = client.admin().indices().prepareGetAliases("*")
+ .addIndices(INDEX_NAME)
+ .execute().actionGet();
+ if (response.getAliases().isEmpty()) {
+ return 0;
+ } else {
+ return response.getAliases().get(INDEX_NAME).size();
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java b/core/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java
new file mode 100644
index 0000000000..15745fc931
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/bloom/BloomBench.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.bloom;
+
+import org.apache.lucene.codecs.bloom.FuzzySet;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.util.BloomFilter;
+
+import java.security.SecureRandom;
+
+/**
+ */
+public class BloomBench {
+
+ public static void main(String[] args) throws Exception {
+ SecureRandom random = new SecureRandom();
+ final int ELEMENTS = (int) SizeValue.parseSizeValue("1m").singles();
+ final double fpp = 0.01;
+ BloomFilter gFilter = BloomFilter.create(ELEMENTS, fpp);
+ System.out.println("G SIZE: " + new ByteSizeValue(gFilter.getSizeInBytes()));
+
+ FuzzySet lFilter = FuzzySet.createSetBasedOnMaxMemory((int) gFilter.getSizeInBytes());
+ //FuzzySet lFilter = FuzzySet.createSetBasedOnQuality(ELEMENTS, 0.97f);
+
+ for (int i = 0; i < ELEMENTS; i++) {
+ BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
+ gFilter.put(bytesRef);
+ lFilter.addValue(bytesRef);
+ }
+
+ int lFalse = 0;
+ int gFalse = 0;
+ for (int i = 0; i < ELEMENTS; i++) {
+ BytesRef bytesRef = new BytesRef(Strings.randomBase64UUID(random));
+ if (gFilter.mightContain(bytesRef)) {
+ gFalse++;
+ }
+ if (lFilter.contains(bytesRef) == FuzzySet.ContainsResult.MAYBE) {
+ lFalse++;
+ }
+ }
+ System.out.println("Failed positives, g[" + gFalse + "], l[" + lFalse + "]");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/breaker/CircuitBreakerBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/breaker/CircuitBreakerBenchmark.java
new file mode 100644
index 0000000000..f6b0497b09
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/breaker/CircuitBreakerBenchmark.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.breaker;
+
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static junit.framework.Assert.assertNotNull;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+
+/**
+ * Benchmarks for different implementations of the circuit breaker
+ */
+public class CircuitBreakerBenchmark {
+
+ private static final String INDEX = UUID.randomUUID().toString();
+ private static final int QUERIES = 100;
+ private static final int BULK_SIZE = 100;
+ private static final int NUM_DOCS = 2_000_000;
+ private static final int AGG_SIZE = 25;
+
+ private static void switchToNoop(Client client) {
+ Settings settings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, CircuitBreaker.Type.NOOP)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+ }
+
+ private static void switchToMemory(Client client) {
+ Settings settings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, CircuitBreaker.Type.MEMORY)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
+ }
+
+ private static void runSingleThreadedQueries(Client client) {
+ long totalTime = 0;
+ for (int i = 0; i < QUERIES; i++) {
+ if (i % 10 == 0) {
+ System.out.println("--> query #" + i);
+ }
+ SearchResponse resp = client.prepareSearch(INDEX).setQuery(matchAllQuery())
+ .addAggregation(
+ terms("myterms")
+ .size(AGG_SIZE)
+ .field("num")
+ ).setSize(0).get();
+ Terms terms = resp.getAggregations().get("myterms");
+ assertNotNull("term aggs were calculated", terms);
+ totalTime += resp.getTookInMillis();
+ }
+
+ System.out.println("--> single threaded average time: " + (totalTime / QUERIES) + "ms");
+ }
+
+ private static void runMultiThreadedQueries(final Client client) throws Exception {
+ final AtomicLong totalThreadedTime = new AtomicLong(0);
+ int THREADS = 10;
+ Thread threads[] = new Thread[THREADS];
+ for (int i = 0; i < THREADS; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ long tid = Thread.currentThread().getId();
+ for (int i = 0; i < QUERIES; i++) {
+ if (i % 30 == 0) {
+ System.out.println("--> [" + tid + "] query # "+ i);
+ }
+ SearchResponse resp = client.prepareSearch(INDEX).setQuery(matchAllQuery())
+ .addAggregation(
+ terms("myterms")
+ .size(AGG_SIZE)
+ .field("num")
+ ).setSize(0).get();
+ Terms terms = resp.getAggregations().get("myterms");
+ assertNotNull("term aggs were calculated", terms);
+ totalThreadedTime.addAndGet(resp.getTookInMillis());
+ }
+ }
+ });
+ }
+
+ System.out.println("--> starting " + THREADS + " threads for parallel aggregating");
+ for (Thread t : threads) {
+ t.start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ System.out.println("--> threaded average time: " + (totalThreadedTime.get() / (THREADS * QUERIES)) + "ms");
+ }
+
+ public static void main(String args[]) throws Exception {
+ Node node = NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder()).node();
+ final Client client = node.client();
+ try {
+ try {
+ client.admin().indices().prepareDelete(INDEX).get();
+ } catch (Exception e) {
+ // Ignore
+ }
+ try {
+ client.admin().indices().prepareCreate(INDEX).setSettings(
+ settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0)).get();
+ } catch (IndexAlreadyExistsException e) {}
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+
+
+ System.out.println("--> indexing: " + NUM_DOCS + " documents...");
+ BulkRequestBuilder bulkBuilder = client.prepareBulk();
+ for (int i = 0; i < NUM_DOCS; i++) {
+ bulkBuilder.add(client.prepareIndex(INDEX, "doc").setSource("num", i));
+ if (i % BULK_SIZE == 0) {
+ // Send off bulk request
+ bulkBuilder.get();
+ // Create a new holder
+ bulkBuilder = client.prepareBulk();
+ }
+ }
+ bulkBuilder.get();
+ client.admin().indices().prepareRefresh(INDEX).get();
+ SearchResponse countResp = client.prepareSearch(INDEX).setQuery(matchAllQuery()).setSize(0).get();
+ assert countResp.getHits().getTotalHits() == NUM_DOCS : "all docs should be indexed";
+
+ final int warmupCount = 100;
+ for (int i = 0; i < warmupCount; i++) {
+ if (i % 15 == 0) {
+ System.out.println("--> warmup #" + i);
+ }
+ SearchResponse resp = client.prepareSearch(INDEX).setQuery(matchAllQuery())
+ .addAggregation(
+ terms("myterms")
+ .size(AGG_SIZE)
+ .field("num")
+ ).setSize(0).get();
+ Terms terms = resp.getAggregations().get("myterms");
+ assertNotNull("term aggs were calculated", terms);
+ }
+
+ System.out.println("--> running single-threaded tests");
+ runSingleThreadedQueries(client);
+ System.out.println("--> switching to NOOP breaker");
+ switchToNoop(client);
+ runSingleThreadedQueries(client);
+ switchToMemory(client);
+
+ System.out.println("--> running multi-threaded tests");
+ runMultiThreadedQueries(client);
+ System.out.println("--> switching to NOOP breaker");
+ switchToNoop(client);
+ runMultiThreadedQueries(client);
+ } finally {
+ client.close();
+ node.close();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java
new file mode 100644
index 0000000000..660d042e5e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/checksum/ChecksumBenchmark.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.checksum;
+
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.security.MessageDigest;
+import java.util.zip.Adler32;
+import java.util.zip.CRC32;
+
+/**
+ *
+ */
+public class ChecksumBenchmark {
+
+ public static final int BATCH_SIZE = 16 * 1024;
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Warning up");
+ long warmSize = ByteSizeValue.parseBytesSizeValue("1g", null).bytes();
+ crc(warmSize);
+ adler(warmSize);
+ md5(warmSize);
+
+ long dataSize = ByteSizeValue.parseBytesSizeValue("10g", null).bytes();
+ System.out.println("Running size: " + dataSize);
+ crc(dataSize);
+ adler(dataSize);
+ md5(dataSize);
+ }
+
+ private static void crc(long dataSize) {
+ long start = System.currentTimeMillis();
+ CRC32 crc = new CRC32();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ for (long i = 0; i < iter; i++) {
+ crc.update(data);
+ }
+ crc.getValue();
+ System.out.println("CRC took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+
+ private static void adler(long dataSize) {
+ long start = System.currentTimeMillis();
+ Adler32 crc = new Adler32();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ for (long i = 0; i < iter; i++) {
+ crc.update(data);
+ }
+ crc.getValue();
+ System.out.println("Adler took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+
+ private static void md5(long dataSize) throws Exception {
+ long start = System.currentTimeMillis();
+ byte[] data = new byte[BATCH_SIZE];
+ long iter = dataSize / BATCH_SIZE;
+ MessageDigest digest = MessageDigest.getInstance("MD5");
+ for (long i = 0; i < iter; i++) {
+ digest.update(data);
+ }
+ digest.digest();
+ System.out.println("md5 took " + new TimeValue(System.currentTimeMillis() - start));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java
new file mode 100644
index 0000000000..bcb2157317
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/cluster/ClusterAllocationRerouteBenchmark.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.cluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+
+import java.util.Random;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+
+public class ClusterAllocationRerouteBenchmark {
+
+ private static final ESLogger logger = Loggers.getLogger(ClusterAllocationRerouteBenchmark.class);
+
+ public static void main(String[] args) {
+ final int numberOfRuns = 1;
+ final int numIndices = 5 * 365; // five years
+ final int numShards = 6;
+ final int numReplicas = 2;
+ final int numberOfNodes = 30;
+ final int numberOfTags = 2;
+ AllocationService strategy = ElasticsearchAllocationTestCase.createAllocationService(Settings.builder()
+ .put("cluster.routing.allocation.awareness.attributes", "tag")
+ .build(), new Random(1));
+
+ MetaData.Builder mb = MetaData.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ mb.put(IndexMetaData.builder("test_" + i).numberOfShards(numShards).numberOfReplicas(numReplicas));
+ }
+ MetaData metaData = mb.build();
+ RoutingTable.Builder rb = RoutingTable.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ rb.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rb.build();
+ DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
+ for (int i = 1; i <= numberOfNodes; i++) {
+ nb.put(ElasticsearchAllocationTestCase.newNode("node" + i, numberOfTags == 0 ? ImmutableMap.<String, String>of() : ImmutableMap.of("tag", "tag_" + (i % numberOfTags))));
+ }
+ ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).nodes(nb).build();
+
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < numberOfRuns; i++) {
+ logger.info("[{}] starting... ", i);
+ long runStart = System.currentTimeMillis();
+ ClusterState clusterState = initialClusterState;
+ while (clusterState.readOnlyRoutingNodes().hasUnassignedShards()) {
+ logger.info("[{}] remaining unassigned {}", i, clusterState.readOnlyRoutingNodes().unassigned().size());
+ RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ result = strategy.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ }
+ logger.info("[{}] took {}", i, TimeValue.timeValueMillis(System.currentTimeMillis() - runStart));
+ }
+ long took = System.currentTimeMillis() - start;
+ logger.info("total took {}, AVG {}", TimeValue.timeValueMillis(took), TimeValue.timeValueMillis(took / numberOfRuns));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java
new file mode 100644
index 0000000000..fe548b9ee4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/common/lucene/uidscan/LuceneUidScanBenchmark.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.common.lucene.uidscan;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.FSDirectory;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.nio.file.Paths;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ *
+ */
+public class LuceneUidScanBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ FSDirectory dir = FSDirectory.open(PathUtils.get("work/test"));
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ final int NUMBER_OF_THREADS = 2;
+ final long INDEX_COUNT = SizeValue.parseSizeValue("1m").singles();
+ final long SCAN_COUNT = SizeValue.parseSizeValue("100k").singles();
+ final long startUid = 1000000;
+
+ long LIMIT = startUid + INDEX_COUNT;
+ StopWatch watch = new StopWatch().start();
+ System.out.println("Indexing " + INDEX_COUNT + " docs...");
+ for (long i = startUid; i < LIMIT; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("_uid", Long.toString(i), Store.NO));
+ doc.add(new NumericDocValuesField("_version", i));
+ writer.addDocument(doc);
+ }
+ System.out.println("Done indexing, took " + watch.stop().lastTaskTime());
+
+ final IndexReader reader = DirectoryReader.open(writer, true);
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ for (long i = 0; i < SCAN_COUNT; i++) {
+ long id = startUid + (Math.abs(ThreadLocalRandom.current().nextInt()) % INDEX_COUNT);
+ final long version = Versions.loadVersion(reader, new Term("_uid", Long.toString(id)));
+ if (version != id) {
+ System.err.println("wrong id...");
+ break;
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+
+ watch = new StopWatch().start();
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].start();
+ }
+ latch.await();
+ watch.stop();
+ System.out.println("Scanned in " + watch.totalTime() + " TP Seconds " + ((SCAN_COUNT * NUMBER_OF_THREADS) / watch.totalTime().secondsFrac()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java
new file mode 100644
index 0000000000..97113a6d1c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/common/recycler/RecyclerBenchmark.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.common.recycler;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.recycler.AbstractRecyclerC;
+import org.elasticsearch.common.recycler.Recycler;
+
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.recycler.Recyclers.*;
+
+/** Benchmark that tries to measure the overhead of object recycling depending on concurrent access. */
+public class RecyclerBenchmark {
+
+ private static final long NUM_RECYCLES = 5000000L;
+ private static final Random RANDOM = new Random(0);
+
+ private static long bench(final Recycler<?> recycler, long numRecycles, int numThreads) throws InterruptedException {
+ final AtomicLong recycles = new AtomicLong(numRecycles);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final Thread[] threads = new Thread[numThreads];
+ for (int i = 0; i < numThreads; ++i){
+ // Thread ids happen to be generated sequentially, so we also generate random threads so that distribution of IDs
+ // is not perfect for the concurrent recycler
+ for (int j = RANDOM.nextInt(5); j >= 0; --j) {
+ new Thread();
+ }
+
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ return;
+ }
+ while (recycles.getAndDecrement() > 0) {
+ final Recycler.V<?> v = recycler.obtain();
+ v.close();
+ }
+ }
+ };
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ final long start = System.nanoTime();
+ latch.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ return System.nanoTime() - start;
+ }
+
+ public static void main(String[] args) throws InterruptedException {
+ final int limit = 100;
+ final Recycler.C<Object> c = new AbstractRecyclerC<Object>() {
+
+ @Override
+ public Object newInstance(int sizing) {
+ return new Object();
+ }
+
+ @Override
+ public void recycle(Object value) {
+ // do nothing
+ }
+ };
+
+ final ImmutableMap<String, Recycler<Object>> recyclers = ImmutableMap.<String, Recycler<Object>>builder()
+ .put("none", none(c))
+ .put("concurrent-queue", concurrentDeque(c, limit))
+ .put("locked", locked(deque(c, limit)))
+ .put("concurrent", concurrent(dequeFactory(c, limit), Runtime.getRuntime().availableProcessors())).build();
+
+ // warmup
+ final long start = System.nanoTime();
+ while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)) {
+ for (Recycler<?> recycler : recyclers.values()) {
+ bench(recycler, NUM_RECYCLES, 2);
+ }
+ }
+
+ // run
+ for (int numThreads = 1; numThreads <= 4 * Runtime.getRuntime().availableProcessors(); numThreads *= 2) {
+ System.out.println("## " + numThreads + " threads\n");
+ System.gc();
+ Thread.sleep(1000);
+ for (Recycler<?> recycler : recyclers.values()) {
+ bench(recycler, NUM_RECYCLES, numThreads);
+ }
+ for (int i = 0; i < 5; ++i) {
+ for (Map.Entry<String, Recycler<Object>> entry : recyclers.entrySet()) {
+ System.out.println(entry.getKey() + "\t" + TimeUnit.NANOSECONDS.toMillis(bench(entry.getValue(), NUM_RECYCLES, numThreads)));
+ }
+ System.out.println();
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java
new file mode 100644
index 0000000000..ea1e589f7d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/counter/SimpleCounterBenchmark.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.counter;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class SimpleCounterBenchmark {
+
+ private static long NUMBER_OF_ITERATIONS = 10000000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ final AtomicLong counter = new AtomicLong();
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ counter.incrementAndGet();
+ }
+ System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ counter.incrementAndGet();
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java
new file mode 100644
index 0000000000..06fc39deab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/fs/FsAppendBenchmark.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.fs;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.unit.ByteSizeValue;
+
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.util.Random;
+
+/**
+ *
+ */
+public class FsAppendBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Path path = PathUtils.get("work/test.log");
+ IOUtils.deleteFilesIgnoringExceptions(path);
+
+ int CHUNK = (int) ByteSizeValue.parseBytesSizeValue("1k", "CHUNK").bytes();
+ long DATA = ByteSizeValue.parseBytesSizeValue("10gb", "DATA").bytes();
+
+ byte[] data = new byte[CHUNK];
+ new Random().nextBytes(data);
+
+ StopWatch watch = new StopWatch().start("write");
+ try (FileChannel channel = FileChannel.open(path, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) {
+ long position = 0;
+ while (position < DATA) {
+ channel.write(ByteBuffer.wrap(data), position);
+ position += data.length;
+ }
+ watch.stop().start("flush");
+ channel.force(true);
+ }
+ watch.stop();
+ System.out.println("Wrote [" + (new ByteSizeValue(DATA)) + "], chunk [" + (new ByteSizeValue(CHUNK)) + "], in " + watch);
+ }
+
+ private static final ByteBuffer fill = ByteBuffer.allocateDirect(1);
+
+// public static long padLogFile(long position, long currentSize, long preAllocSize) throws IOException {
+// if (position + 4096 >= currentSize) {
+// currentSize = currentSize + preAllocSize;
+// fill.position(0);
+// f.getChannel().write(fill, currentSize - fill.remaining());
+// }
+// return currentSize;
+// }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java
new file mode 100644
index 0000000000..d78df7f6aa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/get/SimpleGetActionBenchmark.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.get;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+// simple test for embedded / single remote lookup
+public class SimpleGetActionBenchmark {
+
+ public static void main(String[] args) {
+ long OPERATIONS = SizeValue.parseSizeValue("300k").singles();
+
+ Node node = NodeBuilder.nodeBuilder().node();
+
+ Client client;
+ if (false) {
+ client = NodeBuilder.nodeBuilder().client(true).node().client();
+ } else {
+ client = node.client();
+ }
+
+ client.prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (long i = 0; i < OPERATIONS; i++) {
+ client.prepareGet("test", "type1", "1").execute().actionGet();
+ }
+ stopWatch.stop();
+
+ System.out.println("Ran in " + stopWatch.totalTime() + ", per second: " + (((double) OPERATIONS) / stopWatch.totalTime().secondsFrac()));
+
+ node.close();
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java
new file mode 100644
index 0000000000..e51ba31b6d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/hppc/StringMapAdjustOrPutBenchmark.java
@@ -0,0 +1,262 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.hppc;
+
+import com.carrotsearch.hppc.IntIntHashMap;
+import com.carrotsearch.hppc.IntObjectHashMap;
+import com.carrotsearch.hppc.ObjectIntHashMap;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+
+import java.util.HashMap;
+import java.util.IdentityHashMap;
+import java.util.concurrent.ThreadLocalRandom;
+
+// TODO: these benchmarks aren't too good and may be easily skewed by jit doing
+// escape analysis/ side-effects/ local
+// optimisations. Proper benchmarks with JMH (bulk ops, single-shot mode)
+// should be better here.
+// https://github.com/carrotsearch/hppc/blob/master/hppc-benchmarks/src/main/java/com/carrotsearch/hppc/benchmarks/B003_HashSet_Contains.java
+
+public class StringMapAdjustOrPutBenchmark {
+
+ public static void main(String[] args) {
+
+ int NUMBER_OF_KEYS = (int) SizeValue.parseSizeValue("20").singles();
+ int STRING_SIZE = 5;
+ long PUT_OPERATIONS = SizeValue.parseSizeValue("5m").singles();
+ long ITERATIONS = 10;
+ boolean REUSE = true;
+
+
+ String[] values = new String[NUMBER_OF_KEYS];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), STRING_SIZE);
+ }
+
+ StopWatch stopWatch;
+
+ stopWatch = new StopWatch().start();
+ ObjectIntHashMap<String> map = new ObjectIntHashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ map.clear();
+ } else {
+ map = new ObjectIntHashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ map.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ map.clear();
+ map = null;
+
+ stopWatch.stop();
+ System.out.println("TObjectIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ stopWatch = new StopWatch().start();
+// TObjectIntCustomHashMap<String> iMap = new TObjectIntCustomHashMap<String>(new StringIdentityHashingStrategy());
+ ObjectIntHashMap<String> iMap = new ObjectIntHashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ iMap.clear();
+ } else {
+ iMap = new ObjectIntHashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TObjectIntCustomHashMap(StringIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ iMap.clear();
+ iMap = null;
+
+ stopWatch = new StopWatch().start();
+ iMap = new ObjectIntHashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ iMap.clear();
+ } else {
+ iMap = new ObjectIntHashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ iMap.addTo(values[(int) (i % NUMBER_OF_KEYS)], 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TObjectIntCustomHashMap(PureIdentity): " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ iMap.clear();
+ iMap = null;
+
+ // now test with THashMap
+ stopWatch = new StopWatch().start();
+ ObjectObjectHashMap<String, StringEntry> tMap = new ObjectObjectHashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ tMap.clear();
+ } else {
+ tMap = new ObjectObjectHashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = tMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ tMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+
+ tMap.clear();
+ tMap = null;
+
+ stopWatch.stop();
+ System.out.println("THashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ stopWatch = new StopWatch().start();
+ HashMap<String, StringEntry> hMap = new HashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ hMap.clear();
+ } else {
+ hMap = new HashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = hMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ hMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+
+ hMap.clear();
+ hMap = null;
+
+ stopWatch.stop();
+ System.out.println("HashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+
+ stopWatch = new StopWatch().start();
+ IdentityHashMap<String, StringEntry> ihMap = new IdentityHashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ ihMap.clear();
+ } else {
+ hMap = new HashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ String key = values[(int) (i % NUMBER_OF_KEYS)];
+ StringEntry stringEntry = ihMap.get(key);
+ if (stringEntry == null) {
+ stringEntry = new StringEntry(key, 1);
+ ihMap.put(key, stringEntry);
+ } else {
+ stringEntry.counter++;
+ }
+ }
+ }
+ stopWatch.stop();
+ System.out.println("IdentityHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ ihMap.clear();
+ ihMap = null;
+
+ int[] iValues = new int[NUMBER_OF_KEYS];
+ for (int i = 0; i < values.length; i++) {
+ iValues[i] = ThreadLocalRandom.current().nextInt();
+ }
+
+ stopWatch = new StopWatch().start();
+ IntIntHashMap intMap = new IntIntHashMap();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ intMap.clear();
+ } else {
+ intMap = new IntIntHashMap();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ int key = iValues[(int) (i % NUMBER_OF_KEYS)];
+ intMap.addTo(key, 1);
+ }
+ }
+ stopWatch.stop();
+ System.out.println("TIntIntHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+
+ intMap.clear();
+ intMap = null;
+
+ // now test with THashMap
+ stopWatch = new StopWatch().start();
+ IntObjectHashMap<IntEntry> tIntMap = new IntObjectHashMap<>();
+ for (long iter = 0; iter < ITERATIONS; iter++) {
+ if (REUSE) {
+ tIntMap.clear();
+ } else {
+ tIntMap = new IntObjectHashMap<>();
+ }
+ for (long i = 0; i < PUT_OPERATIONS; i++) {
+ int key = iValues[(int) (i % NUMBER_OF_KEYS)];
+ IntEntry intEntry = tIntMap.get(key);
+ if (intEntry == null) {
+ intEntry = new IntEntry(key, 1);
+ tIntMap.put(key, intEntry);
+ } else {
+ intEntry.counter++;
+ }
+ }
+ }
+
+ tIntMap.clear();
+ tIntMap = null;
+
+ stopWatch.stop();
+ System.out.println("TIntObjectHashMap: " + stopWatch.totalTime() + ", " + stopWatch.totalTime().millisFrac() / ITERATIONS + "ms");
+ }
+
+
+ static class StringEntry {
+ String key;
+ int counter;
+
+ StringEntry(String key, int counter) {
+ this.key = key;
+ this.counter = counter;
+ }
+ }
+
+ static class IntEntry {
+ int key;
+ int counter;
+
+ IntEntry(int key, int counter) {
+ this.key = key;
+ this.counter = counter;
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/mapping/ManyMappingsBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/mapping/ManyMappingsBenchmark.java
new file mode 100644
index 0000000000..b40d29a948
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/mapping/ManyMappingsBenchmark.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.mapping;
+
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class ManyMappingsBenchmark {
+
+ private static final String MAPPING = "{\n" +
+ " \"dynamic_templates\": [\n" +
+ " {\n" +
+ " \"t1\": {\n" +
+ " \"mapping\": {\n" +
+ " \"store\": false,\n" +
+ " \"norms\": {\n" +
+ " \"enabled\": false\n" +
+ " },\n" +
+ " \"type\": \"string\"\n" +
+ " },\n" +
+ " \"match\": \"*_ss\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
+ " \"t2\": {\n" +
+ " \"mapping\": {\n" +
+ " \"store\": false,\n" +
+ " \"type\": \"date\"\n" +
+ " },\n" +
+ " \"match\": \"*_dt\"\n" +
+ " }\n" +
+ " },\n" +
+ " {\n" +
+ " \"t3\": {\n" +
+ " \"mapping\": {\n" +
+ " \"store\": false,\n" +
+ " \"type\": \"integer\"\n" +
+ " },\n" +
+ " \"match\": \"*_i\"\n" +
+ " }\n" +
+ " }\n" +
+ " ],\n" +
+ " \"_source\": {\n" +
+ " \"enabled\": false\n" +
+ " },\n" +
+ " \"properties\": {}\n" +
+ " }";
+
+ private static final String INDEX_NAME = "index";
+ private static final String TYPE_NAME = "type";
+ private static final int FIELD_COUNT = 100000;
+ private static final int DOC_COUNT = 10000000;
+ private static final boolean TWO_NODES = true;
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+ Bootstrap.initializeNatives(true, false, false);
+ Settings settings = settingsBuilder()
+ .put("")
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ManyMappingsBenchmark.class.getSimpleName();
+ Node node = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings))
+ .node();
+ if (TWO_NODES) {
+ Node node2 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings))
+ .node();
+ }
+
+ Client client = node.client();
+
+ client.admin().indices().prepareDelete(INDEX_NAME)
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen())
+ .get();
+ client.admin().indices().prepareCreate(INDEX_NAME)
+ .addMapping(TYPE_NAME, MAPPING)
+ .get();
+
+ BulkRequestBuilder builder = client.prepareBulk();
+ int fieldCount = 0;
+ long time = System.currentTimeMillis();
+ final int PRINT = 1000;
+ for (int i = 0; i < DOC_COUNT; i++) {
+ XContentBuilder sourceBuilder = jsonBuilder().startObject();
+ sourceBuilder.field(++fieldCount + "_ss", "xyz");
+ sourceBuilder.field(++fieldCount + "_dt", System.currentTimeMillis());
+ sourceBuilder.field(++fieldCount + "_i", i % 100);
+ sourceBuilder.endObject();
+
+ if (fieldCount >= FIELD_COUNT) {
+ fieldCount = 0;
+ System.out.println("dynamic fields rolled up");
+ }
+
+ builder.add(
+ client.prepareIndex(INDEX_NAME, TYPE_NAME, String.valueOf(i))
+ .setSource(sourceBuilder)
+ );
+
+ if (builder.numberOfActions() >= 1000) {
+ builder.get();
+ builder = client.prepareBulk();
+ }
+
+ if (i % PRINT == 0) {
+ long took = System.currentTimeMillis() - time;
+ time = System.currentTimeMillis();
+ System.out.println("Indexed " + i + " docs, in " + TimeValue.timeValueMillis(took));
+ }
+ }
+ if (builder.numberOfActions() > 0) {
+ builder.get();
+ }
+
+
+
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java
new file mode 100644
index 0000000000..f5a126eeab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/percolator/PercolatorStressBenchmark.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.percolator;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.percolator.PercolatorService;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class PercolatorStressBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("cluster.routing.schedule", 200, TimeUnit.MILLISECONDS)
+ .put(SETTING_NUMBER_OF_SHARDS, 4)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Node clientNode = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+ Client client = clientNode.client();
+
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth("test")
+ .setWaitForGreenStatus()
+ .execute().actionGet();
+ if (healthResponse.isTimedOut()) {
+ System.err.println("Quiting, because cluster health requested timed out...");
+ return;
+ } else if (healthResponse.getStatus() != ClusterHealthStatus.GREEN) {
+ System.err.println("Quiting, because cluster state isn't green...");
+ return;
+ }
+
+ int COUNT = 200000;
+ int QUERIES = 100;
+ int TERM_QUERIES = QUERIES / 2;
+ int RANGE_QUERIES = QUERIES - TERM_QUERIES;
+
+ client.prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("numeric1", 1).endObject()).execute().actionGet();
+
+ // register queries
+ int i = 0;
+ for (; i < TERM_QUERIES; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", termQuery("name", "value"))
+ .endObject())
+ .execute().actionGet();
+ }
+
+ int[] numbers = new int[RANGE_QUERIES];
+ for (; i < QUERIES; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", rangeQuery("numeric1").from(i).to(i))
+ .endObject())
+ .execute().actionGet();
+ numbers[i - TERM_QUERIES] = i;
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Percolating [" + COUNT + "] ...");
+ for (i = 1; i <= COUNT; i++) {
+ XContentBuilder source;
+ int expectedMatches;
+ if (i % 2 == 0) {
+ source = source(Integer.toString(i), "value");
+ expectedMatches = TERM_QUERIES;
+ } else {
+ int number = numbers[i % RANGE_QUERIES];
+ source = source(Integer.toString(i), number);
+ expectedMatches = 1;
+ }
+ PercolateResponse percolate = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(source)
+ .execute().actionGet();
+ if (percolate.getMatches().length != expectedMatches) {
+ System.err.println("No matching number of queries");
+ }
+
+ if ((i % 10000) == 0) {
+ System.out.println("Percolated " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Percolation took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ clientNode.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().startObject("doc")
+ .field("id", id)
+ .field("name", nameValue)
+ .endObject().endObject();
+ }
+
+ private static XContentBuilder source(String id, int number) throws IOException {
+ return jsonBuilder().startObject().startObject("doc")
+ .field("id", id)
+ .field("numeric1", number)
+ .field("numeric2", number)
+ .field("numeric3", number)
+ .field("numeric4", number)
+ .field("numeric5", number)
+ .field("numeric6", number)
+ .field("numeric7", number)
+ .field("numeric8", number)
+ .field("numeric9", number)
+ .field("numeric10", number)
+ .endObject().endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java
new file mode 100644
index 0000000000..1ac7c43303
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/recovery/ReplicaRecoveryBenchmark.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.recovery;
+
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.BackgroundIndexer;
+import org.elasticsearch.transport.TransportModule;
+
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ReplicaRecoveryBenchmark {
+
+ private static final String INDEX_NAME = "index";
+ private static final String TYPE_NAME = "type";
+
+
+ static int DOC_COUNT = (int) SizeValue.parseSizeValue("40k").singles();
+ static int CONCURRENT_INDEXERS = 2;
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+ Bootstrap.initializeNatives(true, false, false);
+
+ Settings settings = settingsBuilder()
+ .put("gateway.type", "local")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, "false")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(TransportModule.TRANSPORT_TYPE_KEY, "local")
+ .build();
+
+ String clusterName = ReplicaRecoveryBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings))
+ .node();
+
+ final ESLogger logger = ESLoggerFactory.getLogger("benchmark");
+
+ final Client client1 = node1.client();
+ client1.admin().cluster().prepareUpdateSettings().setPersistentSettings("logger.indices.recovery: TRACE").get();
+ final BackgroundIndexer indexer = new BackgroundIndexer(INDEX_NAME, TYPE_NAME, client1, 0, CONCURRENT_INDEXERS, false, new Random());
+ indexer.setMinFieldSize(10);
+ indexer.setMaxFieldSize(150);
+ try {
+ client1.admin().indices().prepareDelete(INDEX_NAME).get();
+ } catch (IndexMissingException e) {
+ }
+ client1.admin().indices().prepareCreate(INDEX_NAME).get();
+ indexer.start(DOC_COUNT / 2);
+ while (indexer.totalIndexedDocs() < DOC_COUNT / 2) {
+ Thread.sleep(5000);
+ logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
+ }
+ client1.admin().indices().prepareFlush().get();
+ indexer.continueIndexing(DOC_COUNT / 2);
+ while (indexer.totalIndexedDocs() < DOC_COUNT) {
+ Thread.sleep(5000);
+ logger.info("--> indexed {} of {}", indexer.totalIndexedDocs(), DOC_COUNT);
+ }
+
+
+ logger.info("--> starting another node and allocating a shard on it");
+
+ Node node2 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings))
+ .node();
+
+ client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1").get();
+
+ final AtomicBoolean end = new AtomicBoolean(false);
+
+ final Thread backgroundLogger = new Thread(new Runnable() {
+
+ long lastTime = System.currentTimeMillis();
+ long lastDocs = indexer.totalIndexedDocs();
+ long lastBytes = 0;
+ long lastTranslogOps = 0;
+
+ @Override
+ public void run() {
+ while (true) {
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+
+ }
+ if (end.get()) {
+ return;
+ }
+ long currentTime = System.currentTimeMillis();
+ long currentDocs = indexer.totalIndexedDocs();
+ RecoveryResponse recoveryResponse = client1.admin().indices().prepareRecoveries(INDEX_NAME).setActiveOnly(true).get();
+ List<ShardRecoveryResponse> indexRecoveries = recoveryResponse.shardResponses().get(INDEX_NAME);
+ long translogOps;
+ long bytes;
+ if (indexRecoveries.size() > 0) {
+ translogOps = indexRecoveries.get(0).recoveryState().getTranslog().recoveredOperations();
+ bytes = recoveryResponse.shardResponses().get(INDEX_NAME).get(0).recoveryState().getIndex().recoveredBytes();
+ } else {
+ bytes = lastBytes = 0;
+ translogOps = lastTranslogOps = 0;
+ }
+ float seconds = (currentTime - lastTime) / 1000.0F;
+ logger.info("--> indexed [{}];[{}] doc/s, recovered [{}] MB/s , translog ops [{}]/s ",
+ currentDocs, (currentDocs - lastDocs) / seconds,
+ (bytes - lastBytes) / 1024.0F / 1024F / seconds, (translogOps - lastTranslogOps) / seconds);
+ lastBytes = bytes;
+ lastTranslogOps = translogOps;
+ lastTime = currentTime;
+ lastDocs = currentDocs;
+ }
+ }
+ });
+
+ backgroundLogger.start();
+
+ client1.admin().cluster().prepareHealth().setWaitForGreenStatus().get();
+
+ logger.info("--> green. starting relocation cycles");
+
+ long startDocIndexed = indexer.totalIndexedDocs();
+ indexer.continueIndexing(DOC_COUNT * 50);
+
+ long totalRecoveryTime = 0;
+ long startTime = System.currentTimeMillis();
+ long[] recoveryTimes = new long[3];
+ for (int iteration = 0; iteration < 3; iteration++) {
+ logger.info("--> removing replicas");
+ client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 0").get();
+ logger.info("--> adding replica again");
+ long recoveryStart = System.currentTimeMillis();
+ client1.admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS + ": 1").get();
+ client1.admin().cluster().prepareHealth(INDEX_NAME).setWaitForGreenStatus().setTimeout("15m").get();
+ long recoveryTime = System.currentTimeMillis() - recoveryStart;
+ totalRecoveryTime += recoveryTime;
+ recoveryTimes[iteration] = recoveryTime;
+ logger.info("--> recovery done in [{}]", new TimeValue(recoveryTime));
+
+ // sleep some to let things clean up
+ Thread.sleep(10000);
+ }
+
+ long endDocIndexed = indexer.totalIndexedDocs();
+ long totalTime = System.currentTimeMillis() - startTime;
+ indexer.stop();
+
+ end.set(true);
+
+ backgroundLogger.interrupt();
+
+ backgroundLogger.join();
+
+ logger.info("average doc/s [{}], average relocation time [{}], taking [{}], [{}], [{}]", (endDocIndexed - startDocIndexed) * 1000.0 / totalTime, new TimeValue(totalRecoveryTime / 3),
+ TimeValue.timeValueMillis(recoveryTimes[0]), TimeValue.timeValueMillis(recoveryTimes[1]), TimeValue.timeValueMillis(recoveryTimes[2])
+ );
+
+ client1.close();
+ node1.close();
+ node2.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript1.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript1.java
new file mode 100644
index 0000000000..d4b7a0d671
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript1.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.expression;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeScript1 extends AbstractSearchScript {
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeScript1();
+ }
+ }
+
+ public static final String NATIVE_SCRIPT_1 = "native_1";
+
+ @Override
+ public Object run() {
+ return docFieldLongs("x").getValue();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript2.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript2.java
new file mode 100644
index 0000000000..acb374bf68
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript2.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.expression;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeScript2 extends AbstractSearchScript {
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeScript2();
+ }
+ }
+
+ public static final String NATIVE_SCRIPT_2 = "native_2";
+
+ @Override
+ public Object run() {
+ return docFieldLongs("x").getValue() + docFieldDoubles("y").getValue();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript3.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript3.java
new file mode 100644
index 0000000000..b57cde7cac
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript3.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.expression;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeScript3 extends AbstractSearchScript {
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeScript3();
+ }
+ }
+
+ public static final String NATIVE_SCRIPT_3 = "native_3";
+
+ @Override
+ public Object run() {
+ return 1.2 * docFieldLongs("x").getValue() / docFieldDoubles("y").getValue();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript4.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript4.java
new file mode 100644
index 0000000000..d87d1deeaa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScript4.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.expression;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeScript4 extends AbstractSearchScript {
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeScript4();
+ }
+ }
+
+ public static final String NATIVE_SCRIPT_4 = "native_4";
+
+ @Override
+ public Object run() {
+ return Math.sqrt(Math.abs(docFieldDoubles("z").getValue())) + Math.log(Math.abs(docFieldLongs("x").getValue() * docFieldDoubles("y").getValue()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScriptPlugin.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScriptPlugin.java
new file mode 100644
index 0000000000..c2e8bb9ff7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/NativeScriptPlugin.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.expression;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.ScriptModule;
+
+public class NativeScriptPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "native-benchmark-scripts";
+ }
+
+ @Override
+ public String description() {
+ return "Native benchmark script";
+ }
+
+ public void onModule(ScriptModule module) {
+ module.registerScript(NativeScript1.NATIVE_SCRIPT_1, NativeScript1.Factory.class);
+ module.registerScript(NativeScript2.NATIVE_SCRIPT_2, NativeScript2.Factory.class);
+ module.registerScript(NativeScript3.NATIVE_SCRIPT_3, NativeScript3.Factory.class);
+ module.registerScript(NativeScript4.NATIVE_SCRIPT_4, NativeScript4.Factory.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java
new file mode 100644
index 0000000000..d0f534327a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/expression/ScriptComparisonBenchmark.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.expression;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.IndicesAdminClient;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.sort.ScriptSortBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.joda.time.PeriodType;
+
+import java.util.Random;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+public class ScriptComparisonBenchmark {
+
+ static final String clusterName = ScriptComparisonBenchmark.class.getSimpleName();
+ static final String indexName = "test";
+
+ static String[] langs = {
+ "expression",
+ "native",
+ "groovy"
+ };
+ static String[][] scripts = {
+ // the first value is the "reference" version (pure math)
+ {
+ "x",
+ "doc['x'].value",
+ NativeScript1.NATIVE_SCRIPT_1,
+ "doc['x'].value"
+ }, {
+ "x + y",
+ "doc['x'].value + doc['y'].value",
+ NativeScript2.NATIVE_SCRIPT_2,
+ "doc['x'].value + doc['y'].value",
+ }, {
+ "1.2 * x / y",
+ "1.2 * doc['x'].value / doc['y'].value",
+ NativeScript3.NATIVE_SCRIPT_3,
+ "1.2 * doc['x'].value / doc['y'].value",
+ }, {
+ "sqrt(abs(z)) + ln(abs(x * y))",
+ "sqrt(abs(doc['z'].value)) + ln(abs(doc['x'].value * doc['y'].value))",
+ NativeScript4.NATIVE_SCRIPT_4,
+ "sqrt(abs(doc['z'].value)) + log(abs(doc['x'].value * doc['y'].value))"
+ }
+ };
+
+ public static void main(String[] args) throws Exception {
+ int numDocs = 1000000;
+ int numQueries = 1000;
+ Client client = setupIndex();
+ indexDocs(client, numDocs);
+
+ for (int scriptNum = 0; scriptNum < scripts.length; ++scriptNum) {
+ runBenchmark(client, scriptNum, numQueries);
+ }
+ }
+
+ static void runBenchmark(Client client, int scriptNum, int numQueries) {
+ System.out.println("");
+ System.out.println("Script: " + scripts[scriptNum][0]);
+ System.out.println("--------------------------------");
+ for (int langNum = 0; langNum < langs.length; ++langNum) {
+ String lang = langs[langNum];
+ String script = scripts[scriptNum][langNum + 1];
+
+ timeQueries(client, lang, script, numQueries / 10); // warmup
+ TimeValue time = timeQueries(client, lang, script, numQueries);
+ printResults(lang, time, numQueries);
+ }
+ }
+
+ static Client setupIndex() throws Exception {
+ // create cluster
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptPlugin.class.getName())
+ .put("name", "node1")
+ .build();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settings).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ // delete the index, if it exists
+ try {
+ client.admin().indices().prepareDelete(indexName).execute().actionGet();
+ } catch (ElasticsearchException e) {
+ // ok if the index didn't exist
+ }
+
+ // create mappings
+ IndicesAdminClient admin = client.admin().indices();
+ admin.prepareCreate(indexName).addMapping("doc", "x", "type=long", "y", "type=double");
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ return client;
+ }
+
+ static void indexDocs(Client client, int numDocs) {
+ System.out.print("Indexing " + numDocs + " random docs...");
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ Random r = new Random(1);
+ for (int i = 0; i < numDocs; i++) {
+ bulkRequest.add(client.prepareIndex("test", "doc", Integer.toString(i))
+ .setSource("x", r.nextInt(), "y", r.nextDouble(), "z", r.nextDouble()));
+
+ if (i % 1000 == 0) {
+ bulkRequest.execute().actionGet();
+ bulkRequest = client.prepareBulk();
+ }
+ }
+ bulkRequest.execute().actionGet();
+ client.admin().indices().prepareRefresh("test").execute().actionGet();
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ System.out.println("done");
+ }
+
+ static TimeValue timeQueries(Client client, String lang, String script, int numQueries) {
+ ScriptSortBuilder sort = SortBuilders.scriptSort(new Script(script, ScriptType.INLINE, lang, null), "number");
+ SearchRequestBuilder req = client.prepareSearch(indexName)
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(sort);
+
+ StopWatch timer = new StopWatch();
+ timer.start();
+ for (int i = 0; i < numQueries; ++i) {
+ req.get();
+ }
+ timer.stop();
+ return timer.totalTime();
+ }
+
+ static void printResults(String lang, TimeValue time, int numQueries) {
+ long avgReq = time.millis() / numQueries;
+ System.out.println(lang + ": " + time.format(PeriodType.seconds()) + " (" + avgReq + " msec per req)");
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java
new file mode 100644
index 0000000000..7ef820c6ca
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/BasicScriptBenchmark.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.joda.time.DateTime;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.security.SecureRandom;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+public class BasicScriptBenchmark {
+
+ public static class RequestInfo {
+ public RequestInfo(SearchRequest source, int i) {
+ request = source;
+ numTerms = i;
+ }
+
+ SearchRequest request;
+ int numTerms;
+ }
+
+ public static class Results {
+ public static final String TIME_PER_DOCIN_MILLIS = "timePerDocinMillis";
+ public static final String NUM_TERMS = "numTerms";
+ public static final String NUM_DOCS = "numDocs";
+ public static final String TIME_PER_QUERY_IN_SEC = "timePerQueryInSec";
+ public static final String TOTAL_TIME_IN_SEC = "totalTimeInSec";
+ Double[] resultSeconds;
+ Double[] resultMSPerQuery;
+ Long[] numDocs;
+ Integer[] numTerms;
+ Double[] timePerDoc;
+ String label;
+ String description;
+ public String lineStyle;
+ public String color;
+
+ void init(int numVariations, String label, String description, String color, String lineStyle) {
+ resultSeconds = new Double[numVariations];
+ resultMSPerQuery = new Double[numVariations];
+ numDocs = new Long[numVariations];
+ numTerms = new Integer[numVariations];
+ timePerDoc = new Double[numVariations];
+ this.label = label;
+ this.description = description;
+ this.color = color;
+ this.lineStyle = lineStyle;
+ }
+
+ void set(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter, int which, int numTerms) {
+ resultSeconds[which] = (double) ((double) stopWatch.lastTaskTime().getMillis() / (double) 1000);
+ resultMSPerQuery[which] = (double) ((double) stopWatch.lastTaskTime().secondsFrac() / (double) maxIter);
+ numDocs[which] = searchResponse.getHits().totalHits();
+ this.numTerms[which] = numTerms;
+ timePerDoc[which] = resultMSPerQuery[which] / numDocs[which];
+ }
+
+ public void printResults(BufferedWriter writer) throws IOException {
+ String comma = (writer == null) ? "" : ";";
+ String results = description + "\n" + Results.TOTAL_TIME_IN_SEC + " = " + getResultArray(resultSeconds) + comma + "\n"
+ + Results.TIME_PER_QUERY_IN_SEC + " = " + getResultArray(resultMSPerQuery) + comma + "\n" + Results.NUM_DOCS + " = "
+ + getResultArray(numDocs) + comma + "\n" + Results.NUM_TERMS + " = " + getResultArray(numTerms) + comma + "\n"
+ + Results.TIME_PER_DOCIN_MILLIS + " = " + getResultArray(timePerDoc) + comma + "\n";
+ if (writer != null) {
+ writer.write(results);
+ } else {
+ System.out.println(results);
+ }
+
+ }
+
+ private String getResultArray(Object[] resultArray) {
+ String result = "[";
+ for (int i = 0; i < resultArray.length; i++) {
+ result += resultArray[i].toString();
+ if (i != resultArray.length - 1) {
+ result += ",";
+ }
+ }
+ result += "]";
+ return result;
+ }
+ }
+
+ public BasicScriptBenchmark() {
+ }
+
+ static List<String> termsList = new ArrayList<>();
+
+ static void init(int numTerms) {
+ SecureRandom random = new SecureRandom();
+ random.setSeed(1);
+ termsList.clear();
+ for (int i = 0; i < numTerms; i++) {
+ String term = new BigInteger(512, random).toString(32);
+ termsList.add(term);
+ }
+
+ }
+
+ static String[] getTerms(int numTerms) {
+ String[] terms = new String[numTerms];
+ for (int i = 0; i < numTerms; i++) {
+ terms[i] = termsList.get(i);
+ }
+ return terms;
+ }
+
+ public static void writeHelperFunction() throws IOException {
+ try (BufferedWriter out = Files.newBufferedWriter(PathUtils.get("addToPlot.m"), StandardCharsets.UTF_8)) {
+ out.write("function handle = addToPlot(numTerms, perDoc, color, linestyle, linewidth)\n" + "handle = line(numTerms, perDoc);\n"
+ + "set(handle, 'color', color);\n" + "set(handle, 'linestyle',linestyle);\n" + "set(handle, 'LineWidth',linewidth);\n"
+ + "end\n");
+ }
+ }
+
+ public static void printOctaveScript(List<Results> allResults, String[] args) throws IOException {
+ if (args.length == 0) {
+ return;
+ }
+ try (BufferedWriter out = Files.newBufferedWriter(PathUtils.get(args[0]), StandardCharsets.UTF_8)) {
+ out.write("#! /usr/local/bin/octave -qf");
+ out.write("\n\n\n\n");
+ out.write("######################################\n");
+ out.write("# Octave script for plotting results\n");
+ String filename = "scriptScoreBenchmark" + new DateTime().toString();
+ out.write("#Call '" + args[0] + "' from the command line. The plot is then in " + filename + "\n\n");
+
+ out.write("handleArray = [];\n tagArray = [];\n plot([]);\n hold on;\n");
+ for (Results result : allResults) {
+ out.write("\n");
+ out.write("# " + result.description);
+ result.printResults(out);
+ out.write("handleArray = [handleArray, addToPlot(" + Results.NUM_TERMS + ", " + Results.TIME_PER_DOCIN_MILLIS + ", '"
+ + result.color + "','" + result.lineStyle + "',5)];\n");
+ out.write("tagArray = [tagArray; '" + result.label + "'];\n");
+ out.write("\n");
+ }
+
+ out.write("xlabel(\'number of query terms');");
+ out.write("ylabel(\'query time per document');");
+
+ out.write("legend(handleArray,tagArray);\n");
+
+ out.write("saveas(gcf,'" + filename + ".png','png')\n");
+ out.write("hold off;\n\n");
+ } catch (IOException e) {
+ System.err.println("Error: " + e.getMessage());
+ }
+ writeHelperFunction();
+ }
+
+ static void printResult(SearchResponse searchResponse, StopWatch stopWatch, String queryInfo) {
+ System.out.println("--> Searching with " + queryInfo + " took " + stopWatch.lastTaskTime() + ", per query "
+ + (stopWatch.lastTaskTime().secondsFrac() / 100) + " for " + searchResponse.getHits().totalHits() + " docs");
+ }
+
+ static void indexData(long numDocs, Client client, boolean randomizeTerms) throws IOException {
+ try {
+ client.admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Throwable t) {
+ // index might exist already, in this case we do nothing TODO: make
+ // saver in general
+ }
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").field("index_options", "offsets").field("analyzer", "payload_float")
+ .endObject().endObject().endObject().endObject();
+ client.admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping("type1", mapping)
+ .setSettings(
+ Settings.settingsBuilder().put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.number_of_replicas", 0).put("index.number_of_shards", 1)).execute().actionGet();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ Random random = new Random(1);
+ for (int i = 0; i < numDocs; i++) {
+
+ bulkRequest.add(client.prepareIndex().setType("type1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("text", randomText(random, randomizeTerms)).endObject()));
+ if (i % 1000 == 0) {
+ bulkRequest.execute().actionGet();
+ bulkRequest = client.prepareBulk();
+ }
+ }
+ bulkRequest.execute().actionGet();
+ client.admin().indices().prepareRefresh("test").execute().actionGet();
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ System.out.println("Done indexing " + numDocs + " documents");
+
+ }
+
+ private static String randomText(Random random, boolean randomizeTerms) {
+ String text = "";
+ for (int i = 0; i < termsList.size(); i++) {
+ if (random.nextInt(5) == 3 || !randomizeTerms) {
+ text = text + " " + termsList.get(i) + "|1";
+ }
+ }
+ return text;
+ }
+
+ static void printTimings(SearchResponse searchResponse, StopWatch stopWatch, String message, int maxIter) {
+ System.out.println(message);
+ System.out.println(stopWatch.lastTaskTime() + ", " + (stopWatch.lastTaskTime().secondsFrac() / maxIter) + ", "
+ + searchResponse.getHits().totalHits() + ", "
+ + (stopWatch.lastTaskTime().secondsFrac() / (maxIter + searchResponse.getHits().totalHits())));
+ }
+
+ static List<Entry<String, RequestInfo>> initTermQueries(int minTerms, int maxTerms) {
+ List<Entry<String, RequestInfo>> termSearchRequests = new ArrayList<>();
+ for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
+ Map<String, Object> params = new HashMap<>();
+ String[] terms = getTerms(nTerms + 1);
+ params.put("text", terms);
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).size(0).query(QueryBuilders.termsQuery("text", terms)));
+ String infoString = "Results for term query with " + (nTerms + 1) + " terms:";
+ termSearchRequests.add(new AbstractMap.SimpleEntry<>(infoString, new RequestInfo(request, nTerms + 1)));
+ }
+ return termSearchRequests;
+ }
+
+ static List<Entry<String, RequestInfo>> initNativeSearchRequests(int minTerms, int maxTerms, String script, boolean langNative) {
+ List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<>();
+ for (int nTerms = minTerms; nTerms < maxTerms; nTerms++) {
+ Map<String, Object> params = new HashMap<>();
+ String[] terms = getTerms(nTerms + 1);
+ params.put("text", terms);
+ String infoString = "Results for native script with " + (nTerms + 1) + " terms:";
+ ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(new Script(script, ScriptType.INLINE,
+ "native", params)) : scriptFunction(new Script(script, ScriptType.INLINE, null, params));
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .explain(false)
+ .size(0)
+ .query(functionScoreQuery(QueryBuilders.termsQuery("text", terms), scriptFunction).boostMode(
+ CombineFunction.REPLACE)));
+ nativeSearchRequests.add(new AbstractMap.SimpleEntry<>(infoString, new RequestInfo(request, nTerms + 1)));
+ }
+ return nativeSearchRequests;
+ }
+
+ static List<Entry<String, RequestInfo>> initScriptMatchAllSearchRequests(String script, boolean langNative) {
+ List<Entry<String, RequestInfo>> nativeSearchRequests = new ArrayList<>();
+ String infoString = "Results for constant score script:";
+ ScriptScoreFunctionBuilder scriptFunction = (langNative == true) ? scriptFunction(new Script(script, ScriptType.INLINE, "native",
+ null)) : scriptFunction(new Script(script));
+ SearchRequest request = searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).size(0)
+ .query(functionScoreQuery(QueryBuilders.matchAllQuery(), scriptFunction).boostMode(CombineFunction.REPLACE)));
+ nativeSearchRequests.add(new AbstractMap.SimpleEntry<>(infoString, new RequestInfo(request, 0)));
+
+ return nativeSearchRequests;
+ }
+
+ static void runBenchmark(Client client, int maxIter, Results results, List<Entry<String, RequestInfo>> nativeSearchRequests,
+ int minTerms, int warmerIter) throws IOException {
+ int counter = 0;
+ for (Entry<String, RequestInfo> entry : nativeSearchRequests) {
+ SearchResponse searchResponse = null;
+ // warm up
+ for (int i = 0; i < warmerIter; i++) {
+ searchResponse = client.search(entry.getValue().request).actionGet();
+ }
+ System.gc();
+ // run benchmark
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+ for (int i = 0; i < maxIter; i++) {
+ searchResponse = client.search(entry.getValue().request).actionGet();
+ }
+ stopWatch.stop();
+ results.set(searchResponse, stopWatch, entry.getKey(), maxIter, counter, entry.getValue().numTerms);
+ counter++;
+ }
+ results.printResults(null);
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java
new file mode 100644
index 0000000000..23da127ba8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsConstantScoreBenchmark.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativeConstantForLoopScoreScript;
+import org.elasticsearch.benchmark.scripts.score.script.NativeConstantScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsConstantScoreBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 49;
+ int maxTerms = 50;
+ int maxIter = 1000;
+ int warmerIter = 1000;
+
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsConstantScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, true);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+
+ results.init(maxTerms - minTerms, "native const script score (log(2) 10X)",
+ "Results for native const script score with score = log(2) 10X:", "black", "-.");
+ // init script searches
+ List<Entry<String, RequestInfo>> searchRequests = initScriptMatchAllSearchRequests(
+ NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ // init native script searches
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel const (log(2) 10X)", "Results for mvel const score = log(2) 10X:", "red", "-.");
+ searchRequests = initScriptMatchAllSearchRequests("score = 0; for (int i=0; i<10;i++) {score = score + log(2);} return score",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "native const script score (2)", "Results for native const script score with score = 2:",
+ "black", ":");
+ // init native script searches
+ searchRequests = initScriptMatchAllSearchRequests(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel const (2)", "Results for mvel const score = 2:", "red", "--");
+ // init native script searches
+ searchRequests = initScriptMatchAllSearchRequests("2", false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java
new file mode 100644
index 0000000000..889a45c458
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScoreBenchmark.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativeNaiveTFIDFScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsScoreBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 1;
+ int maxTerms = 50;
+ int maxIter = 100;
+ int warmerIter = 10;
+
+ boolean runMVEL = false;
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, false);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+ results.init(maxTerms - minTerms, "native tfidf script score dense posting list",
+ "Results for native script score with dense posting list:", "black", "--");
+ // init native script searches
+ List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+
+ results.init(maxTerms - minTerms, "term query dense posting list", "Results for term query with dense posting lists:", "green",
+ "--");
+ // init term queries
+ searchRequests = initTermQueries(minTerms, maxTerms);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ if (runMVEL) {
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel tfidf dense posting list", "Results for mvel score with dense posting list:", "red",
+ "--");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(
+ minTerms,
+ maxTerms,
+ "score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+ }
+
+ indexData(10000, client, true);
+ results = new Results();
+ results.init(maxTerms - minTerms, "native tfidf script score sparse posting list",
+ "Results for native script scorewith sparse posting list:", "black", "-.");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(minTerms, maxTerms, NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+
+ results.init(maxTerms - minTerms, "term query sparse posting list", "Results for term query with sparse posting lists:", "green",
+ "-.");
+ // init term queries
+ searchRequests = initTermQueries(minTerms, maxTerms);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ if (runMVEL) {
+
+ results = new Results();
+ results.init(maxTerms - minTerms, "mvel tfidf sparse posting list", "Results for mvel score with sparse posting list:", "red",
+ "-.");
+ // init native script searches
+ searchRequests = initNativeSearchRequests(
+ minTerms,
+ maxTerms,
+ "score = 0.0; fi= _terminfo[\"text\"]; for(i=0; i<text.size(); i++){terminfo = fi[text.get(i)]; score = score + terminfo.tf()*fi.getDocCount()/terminfo.df();} return score;",
+ false);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+ }
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java
new file mode 100644
index 0000000000..786f943b2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/ScriptsScorePayloadSumBenchmark.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score;
+
+import org.elasticsearch.benchmark.scripts.score.plugin.NativeScriptExamplesPlugin;
+import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumNoRecordScoreScript;
+import org.elasticsearch.benchmark.scripts.score.script.NativePayloadSumScoreScript;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map.Entry;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ScriptsScorePayloadSumBenchmark extends BasicScriptBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ int minTerms = 1;
+ int maxTerms = 50;
+ int maxIter = 100;
+ int warmerIter = 10;
+
+ init(maxTerms);
+ List<Results> allResults = new ArrayList<>();
+ Settings settings = settingsBuilder().put("plugin.types", NativeScriptExamplesPlugin.class.getName()).build();
+
+ String clusterName = ScriptsScoreBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ Client client = node1.client();
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ indexData(10000, client, false);
+ client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+
+ Results results = new Results();
+ // init script searches
+ results.init(maxTerms - minTerms, "native payload sum script score", "Results for native script score:", "green", ":");
+ List<Entry<String, RequestInfo>> searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ results = new Results();
+ // init script searches
+ results.init(maxTerms - minTerms, "native payload sum script score no record", "Results for native script score:", "black", ":");
+ searchRequests = initNativeSearchRequests(minTerms, maxTerms,
+ NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, true);
+ // run actual benchmark
+ runBenchmark(client, maxIter, results, searchRequests, minTerms, warmerIter);
+ allResults.add(results);
+
+ printOctaveScript(allResults, args);
+
+ client.close();
+ node1.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java
new file mode 100644
index 0000000000..0d90d9fd6d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/plugin/NativeScriptExamplesPlugin.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.scripts.score.plugin;
+
+import org.elasticsearch.benchmark.scripts.score.script.*;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.ScriptModule;
+
+public class NativeScriptExamplesPlugin extends AbstractPlugin {
+
+
+ @Override
+ public String name() {
+ return "native-script-example";
+ }
+
+ @Override
+ public String description() {
+ return "Native script examples";
+ }
+
+ public void onModule(ScriptModule module) {
+ module.registerScript(NativeNaiveTFIDFScoreScript.NATIVE_NAIVE_TFIDF_SCRIPT_SCORE, NativeNaiveTFIDFScoreScript.Factory.class);
+ module.registerScript(NativeConstantForLoopScoreScript.NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE, NativeConstantForLoopScoreScript.Factory.class);
+ module.registerScript(NativeConstantScoreScript.NATIVE_CONSTANT_SCRIPT_SCORE, NativeConstantScoreScript.Factory.class);
+ module.registerScript(NativePayloadSumScoreScript.NATIVE_PAYLOAD_SUM_SCRIPT_SCORE, NativePayloadSumScoreScript.Factory.class);
+ module.registerScript(NativePayloadSumNoRecordScoreScript.NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE, NativePayloadSumNoRecordScoreScript.Factory.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java
new file mode 100644
index 0000000000..c61a40da88
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantForLoopScoreScript.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeConstantForLoopScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_CONSTANT_FOR_LOOP_SCRIPT_SCORE = "native_constant_for_loop_script_score";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeConstantForLoopScoreScript(params);
+ }
+ }
+
+ private NativeConstantForLoopScoreScript(Map<String, Object> params) {
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ for (int i = 0; i < 10; i++) {
+ score += Math.log(2);
+ }
+ return score;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java
new file mode 100644
index 0000000000..6d072421dd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeConstantScoreScript.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeConstantScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_CONSTANT_SCRIPT_SCORE = "native_constant_script_score";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeConstantScoreScript();
+ }
+ }
+
+ private NativeConstantScoreScript() {
+ }
+
+ @Override
+ public Object run() {
+ return 2;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java
new file mode 100644
index 0000000000..1f88e66351
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativeNaiveTFIDFScoreScript.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativeNaiveTFIDFScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_NAIVE_TFIDF_SCRIPT_SCORE = "native_naive_tfidf_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeNaiveTFIDFScoreScript(params);
+ }
+ }
+
+ private NativeNaiveTFIDFScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i]);
+ try {
+ if (indexFieldTerm.tf() != 0) {
+ score += indexFieldTerm.tf() * indexField.docCount() / indexFieldTerm.df();
+ }
+ } catch (IOException e) {
+ throw new RuntimeException();
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java
new file mode 100644
index 0000000000..825b31e801
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumNoRecordScoreScript.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+import org.elasticsearch.search.lookup.IndexLookup;
+import org.elasticsearch.search.lookup.TermPosition;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativePayloadSumNoRecordScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_PAYLOAD_SUM_NO_RECORD_SCRIPT_SCORE = "native_payload_sum_no_record_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativePayloadSumNoRecordScoreScript(params);
+ }
+ }
+
+ private NativePayloadSumNoRecordScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS);
+ for (TermPosition pos : indexFieldTerm) {
+ score += pos.payloadAsFloat(0);
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java
new file mode 100644
index 0000000000..0172c561aa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/scripts/score/script/NativePayloadSumScoreScript.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.scripts.score.script;
+
+import org.elasticsearch.search.lookup.IndexFieldTerm;
+import org.elasticsearch.search.lookup.IndexField;
+import org.elasticsearch.search.lookup.IndexLookup;
+import org.elasticsearch.search.lookup.TermPosition;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.AbstractSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.ArrayList;
+import java.util.Map;
+
+public class NativePayloadSumScoreScript extends AbstractSearchScript {
+
+ public static final String NATIVE_PAYLOAD_SUM_SCRIPT_SCORE = "native_payload_sum_script_score";
+ String field = null;
+ String[] terms = null;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativePayloadSumScoreScript(params);
+ }
+ }
+
+ private NativePayloadSumScoreScript(Map<String, Object> params) {
+ params.entrySet();
+ terms = new String[params.size()];
+ field = params.keySet().iterator().next();
+ Object o = params.get(field);
+ ArrayList<String> arrayList = (ArrayList<String>) o;
+ terms = arrayList.toArray(new String[arrayList.size()]);
+
+ }
+
+ @Override
+ public Object run() {
+ float score = 0;
+ IndexField indexField = indexLookup().get(field);
+ for (int i = 0; i < terms.length; i++) {
+ IndexFieldTerm indexFieldTerm = indexField.get(terms[i], IndexLookup.FLAG_PAYLOADS | IndexLookup.FLAG_CACHE);
+ for (TermPosition pos : indexFieldTerm) {
+ score += pos.payloadAsFloat(0);
+ }
+ }
+ return score;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java b/core/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java
new file mode 100644
index 0000000000..213a522c80
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/SuggestSearchBenchMark.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.SuggestBuilders;
+
+import java.io.IOException;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class SuggestSearchBenchMark {
+
+ public static void main(String[] args) throws Exception {
+ int SEARCH_ITERS = 200;
+
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Client client = nodes[0].client();
+ try {
+ client.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("_all").field("enabled", false).endObject()
+ .startObject("_type").field("index", "no").endObject()
+ .startObject("_id").field("index", "no").endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ long COUNT = SizeValue.parseSizeValue("10m").singles();
+ int BATCH = 100;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ char character = 'a';
+ int idCounter = 0;
+ for (; i <= ITERS; i++) {
+ int termCounter = 0;
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(idCounter++)).source(source("prefix" + character + termCounter++)));
+ }
+ character++;
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("failures...");
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime());
+
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+ }
+
+
+ System.out.println("Warming up...");
+ char startChar = 'a';
+ for (int i = 0; i <= 20; i++) {
+ String term = "prefix" + startChar;
+ SearchResponse response = client.prepareSearch()
+ .setQuery(prefixQuery("field", term))
+ .addSuggestion(SuggestBuilders.termSuggestion("field").field("field").text(term).suggestMode("always"))
+ .execute().actionGet();
+ if (response.getHits().totalHits() == 0) {
+ System.err.println("No hits");
+ continue;
+ }
+ startChar++;
+ }
+
+
+ System.out.println("Starting benchmarking suggestions.");
+ startChar = 'a';
+ long timeTaken = 0;
+ for (int i = 0; i <= SEARCH_ITERS; i++) {
+ String term = "prefix" + startChar;
+ SearchResponse response = client.prepareSearch()
+ .setQuery(matchQuery("field", term))
+ .addSuggestion(SuggestBuilders.termSuggestion("field").text(term).field("field").suggestMode("always"))
+ .execute().actionGet();
+ timeTaken += response.getTookInMillis();
+ if (response.getSuggest() == null) {
+ System.err.println("No suggestions");
+ continue;
+ }
+ List<? extends Option> options = response.getSuggest().getSuggestion("field").getEntries().get(0).getOptions();
+ if (options == null || options.isEmpty()) {
+ System.err.println("No suggestions");
+ }
+ startChar++;
+ }
+
+ System.out.println("Avg time taken without filter " + (timeTaken / SEARCH_ITERS));
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String nameValue) throws IOException {
+ return jsonBuilder().startObject()
+ .field("field", nameValue)
+ .endObject();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/CardinalityAggregationSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/CardinalityAggregationSearchBenchmark.java
new file mode 100644
index 0000000000..40e278159c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/CardinalityAggregationSearchBenchmark.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
+
+public class CardinalityAggregationSearchBenchmark {
+
+ private static final Random R = new Random();
+ private static final String CLUSTER_NAME = CardinalityAggregationSearchBenchmark.class.getSimpleName();
+ private static final int NUM_DOCS = 10000000;
+ private static final int LOW_CARD = 1000;
+ private static final int HIGH_CARD = 1000000;
+ private static final int BATCH = 100;
+ private static final int WARM = 5;
+ private static final int RUNS = 10;
+ private static final int ITERS = 5;
+
+ public static void main(String[] args) {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(CLUSTER_NAME)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(CLUSTER_NAME)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client = clientNode.client();
+
+ try {
+ client.admin().indices().create(createIndexRequest("index").settings(settings).mapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("low_card_str_value")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("low_card_str_value")
+ .field("type", "string")
+ .endObject()
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("high_card_str_value")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("high_card_str_value")
+ .field("type", "string")
+ .endObject()
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("low_card_num_value")
+ .field("type", "long")
+ .endObject()
+ .startObject("high_card_num_value")
+ .field("type", "long")
+ .endObject()
+ .endObject().endObject().endObject())).actionGet();
+
+ System.out.println("Indexing " + NUM_DOCS + " documents");
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (int i = 0; i < NUM_DOCS; ) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
+ final int lowCard = RandomInts.randomInt(R, LOW_CARD);
+ final int highCard = RandomInts.randomInt(R, HIGH_CARD);
+ request.add(client.prepareIndex("index", "type", Integer.toString(i)).setSource("low_card_str_value", "str" + lowCard, "high_card_str_value", "str" + highCard, "low_card_num_value", lowCard , "high_card_num_value", highCard));
+ ++i;
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ System.err.println(response.buildFailureMessage());
+ }
+ if ((i % 100000) == 0) {
+ System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+
+ client.admin().indices().prepareRefresh("index").execute().actionGet();
+ } catch (Exception e) {
+ System.out.println("Index already exists, skipping index creation");
+ }
+
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ for (int i = 0; i < WARM + RUNS; ++i) {
+ if (i >= WARM) {
+ System.out.println("RUN " + (i - WARM));
+ }
+ for (String field : new String[] {"low_card_str_value", "low_card_str_value.hash", "high_card_str_value", "high_card_str_value.hash", "low_card_num_value", "high_card_num_value"}) {
+ long start = System.nanoTime();
+ SearchResponse resp = null;
+ for (int j = 0; j < ITERS; ++j) {
+ resp = client.prepareSearch("index").setSize(0).addAggregation(cardinality("cardinality").field(field)).execute().actionGet();
+ }
+ long end = System.nanoTime();
+ final long cardinality = ((Cardinality) resp.getAggregations().get("cardinality")).getValue();
+ if (i >= WARM) {
+ System.out.println(field + "\t" + new TimeValue((end - start) / ITERS, TimeUnit.NANOSECONDS) + "\tcardinality=" + cardinality);
+ }
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/GlobalOrdinalsBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/GlobalOrdinalsBenchmark.java
new file mode 100644
index 0000000000..c415dbf9b2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/GlobalOrdinalsBenchmark.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.hppc.IntIntHashMap;
+import com.carrotsearch.hppc.ObjectHashSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.benchmark.search.aggregations.TermsAggregationSearchBenchmark.StatsResult;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.transport.TransportModule;
+
+import java.util.*;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class GlobalOrdinalsBenchmark {
+
+ private static final String INDEX_NAME = "index";
+ private static final String TYPE_NAME = "type";
+ private static final int QUERY_WARMUP = 25;
+ private static final int QUERY_COUNT = 100;
+ private static final int FIELD_START = 1;
+ private static final int FIELD_LIMIT = 1 << 22;
+ private static final boolean USE_DOC_VALUES = false;
+
+ static long COUNT = SizeValue.parseSizeValue("5m").singles();
+ static Node node;
+ static Client client;
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+ Bootstrap.initializeNatives(true, false, false);
+ Random random = new Random();
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(TransportModule.TRANSPORT_TYPE_KEY, "local")
+ .build();
+
+ String clusterName = GlobalOrdinalsBenchmark.class.getSimpleName();
+ node = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings))
+ .node();
+
+ client = node.client();
+
+ try {
+ client.admin().indices().prepareCreate(INDEX_NAME)
+ .addMapping(TYPE_NAME, jsonBuilder().startObject().startObject(TYPE_NAME)
+ .startArray("dynamic_templates")
+ .startObject()
+ .startObject("default")
+ .field("match", "*")
+ .field("match_mapping_type", "string")
+ .startObject("mapping")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fields")
+ .startObject("doc_values")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().endObject())
+ .get();
+ ObjectHashSet<String> uniqueTerms = new ObjectHashSet<>();
+ for (int i = 0; i < FIELD_LIMIT; i++) {
+ boolean added;
+ do {
+ added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, 16));
+ } while (!added);
+ }
+ String[] sValues = uniqueTerms.toArray(String.class);
+ uniqueTerms = null;
+
+ BulkRequestBuilder builder = client.prepareBulk();
+ IntIntHashMap tracker = new IntIntHashMap();
+ for (int i = 0; i < COUNT; i++) {
+ Map<String, Object> fieldValues = new HashMap<>();
+ for (int fieldSuffix = 1; fieldSuffix <= FIELD_LIMIT; fieldSuffix <<= 1) {
+ int index = tracker.putOrAdd(fieldSuffix, 0, 0);
+ if (index >= fieldSuffix) {
+ index = random.nextInt(fieldSuffix);
+ fieldValues.put("field_" + fieldSuffix, sValues[index]);
+ } else {
+ fieldValues.put("field_" + fieldSuffix, sValues[index]);
+ tracker.put(fieldSuffix, ++index);
+ }
+ }
+ builder.add(
+ client.prepareIndex(INDEX_NAME, TYPE_NAME, String.valueOf(i))
+ .setSource(fieldValues)
+ );
+
+ if (builder.numberOfActions() >= 1000) {
+ builder.get();
+ builder = client.prepareBulk();
+ }
+ }
+ if (builder.numberOfActions() > 0) {
+ builder.get();
+ }
+ } catch (IndexAlreadyExistsException e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+
+ client.admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put("logger.index.fielddata.ordinals", "DEBUG"))
+ .get();
+
+ client.admin().indices().prepareRefresh(INDEX_NAME).execute().actionGet();
+ COUNT = client.prepareCount(INDEX_NAME).setQuery(matchAllQuery()).execute().actionGet().getCount();
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ List<StatsResult> stats = new ArrayList<>();
+ for (int fieldSuffix = FIELD_START; fieldSuffix <= FIELD_LIMIT; fieldSuffix <<= 1) {
+ String fieldName = "field_" + fieldSuffix;
+ String name = "global_ordinals-" + fieldName;
+ if (USE_DOC_VALUES) {
+ fieldName = fieldName + ".doc_values";
+ name = name + "_doc_values"; // can't have . in agg name
+ }
+ stats.add(terms(name, fieldName, "global_ordinals_low_cardinality"));
+ }
+
+ for (int fieldSuffix = FIELD_START; fieldSuffix <= FIELD_LIMIT; fieldSuffix <<= 1) {
+ String fieldName = "field_" + fieldSuffix;
+ String name = "ordinals-" + fieldName;
+ if (USE_DOC_VALUES) {
+ fieldName = fieldName + ".doc_values";
+ name = name + "_doc_values"; // can't have . in agg name
+ }
+ stats.add(terms(name, fieldName, "ordinals"));
+ }
+
+ System.out.println("------------------ SUMMARY -----------------------------------------");
+ System.out.format(Locale.ENGLISH, "%30s%10s%10s%15s\n", "name", "took", "millis", "fieldata size");
+ for (StatsResult stat : stats) {
+ System.out.format(Locale.ENGLISH, "%30s%10s%10d%15s\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT), stat.fieldDataMemoryUsed);
+ }
+ System.out.println("------------------ SUMMARY -----------------------------------------");
+
+ client.close();
+ node.close();
+ }
+
+ private static StatsResult terms(String name, String field, String executionHint) {
+ long totalQueryTime;// LM VALUE
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ System.gc();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(INDEX_NAME)
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addAggregation(AggregationBuilders.terms(name).field(field).executionHint(executionHint))
+ .get();
+ if (j == 0) {
+ System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(INDEX_NAME)
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addAggregation(AggregationBuilders.terms(name).field(field).executionHint(executionHint))
+ .get();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ String nodeId = node.injector().getInstance(Discovery.class).localNode().getId();
+ ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeId).get();
+ System.out.println("--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
+ ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
+ System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
+
+ return new StatsResult(name, totalQueryTime, fieldDataMemoryUsed);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java
new file mode 100644
index 0000000000..03fb38344d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/HistogramAggregationSearchBenchmark.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+
+import java.util.Date;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+
+/**
+ *
+ */
+public class HistogramAggregationSearchBenchmark {
+
+ static final long COUNT = SizeValue.parseSizeValue("20m").singles();
+ static final int BATCH = 1000;
+ static final int QUERY_WARMUP = 5;
+ static final int QUERY_COUNT = 20;
+ static final int NUMBER_OF_TERMS = 1000;
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = HistogramAggregationSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+
+ //Node clientNode = nodeBuilder().clusterName(clusterName).settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client = node1.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = i;
+ }
+
+ Random r = new Random();
+ try {
+ client.admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put(settings))
+ .addMapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("l_value")
+ .field("type", "long")
+ .endObject()
+ .startObject("i_value")
+ .field("type", "integer")
+ .endObject()
+ .startObject("s_value")
+ .field("type", "short")
+ .endObject()
+ .startObject("b_value")
+ .field("type", "byte")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long iters = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= iters; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ final long value = lValues[r.nextInt(lValues.length)];
+ XContentBuilder source = jsonBuilder().startObject()
+ .field("id", Integer.valueOf(counter))
+ .field("l_value", value)
+ .field("i_value", (int) value)
+ .field("s_value", (short) value)
+ .field("b_value", (byte) value)
+ .field("date", new Date())
+ .endObject();
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(source));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
+ throw new Error();
+ }
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ System.out.println("--> Warmup...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("l_value").field("l_value").interval(4))
+ .addAggregation(histogram("i_value").field("i_value").interval(4))
+ .addAggregation(histogram("s_value").field("s_value").interval(4))
+ .addAggregation(histogram("b_value").field("b_value").interval(4))
+ .addAggregation(histogram("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup DONE");
+
+ long totalQueryTime = 0;
+ for (String field : new String[] {"b_value", "s_value", "i_value", "l_value"}) {
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram(field).field(field).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram(field).field(field).subAggregation(stats(field).field(field)).interval(4))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (" + field + "/" + field + ") " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date").field("date").interval(1000))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (date) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date").field("date").interval(1000).subAggregation(stats("stats").field("l_value")))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram Aggregation (date/l_value) " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ node1.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/IncludeExcludeAggregationSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/IncludeExcludeAggregationSearchBenchmark.java
new file mode 100644
index 0000000000..1bf8a33c09
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/IncludeExcludeAggregationSearchBenchmark.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+
+public class IncludeExcludeAggregationSearchBenchmark {
+
+ private static final Random R = new Random();
+ private static final String CLUSTER_NAME = IncludeExcludeAggregationSearchBenchmark.class.getSimpleName();
+ private static final int NUM_DOCS = 10000000;
+ private static final int BATCH = 100;
+ private static final int WARM = 3;
+ private static final int RUNS = 10;
+ private static final int ITERS = 3;
+
+ public static void main(String[] args) {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(CLUSTER_NAME)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(CLUSTER_NAME)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client = clientNode.client();
+
+ try {
+ client.admin().indices().create(createIndexRequest("index").settings(settings).mapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("str")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject().endObject().endObject())).actionGet();
+
+ System.out.println("Indexing " + NUM_DOCS + " documents");
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (int i = 0; i < NUM_DOCS; ) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
+ request.add(client.prepareIndex("index", "type", Integer.toString(i)).setSource("str", TestUtil.randomSimpleString(R)));
+ ++i;
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ System.err.println(response.buildFailureMessage());
+ }
+ if ((i % 100000) == 0) {
+ System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+
+ client.admin().indices().prepareRefresh("index").execute().actionGet();
+ } catch (Exception e) {
+ System.out.println("Index already exists, skipping index creation");
+ }
+
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ for (int i = 0; i < WARM + RUNS; ++i) {
+ if (i >= WARM) {
+ System.out.println("RUN " + (i - WARM));
+ }
+ long start = System.nanoTime();
+ SearchResponse resp = null;
+ for (int j = 0; j < ITERS; ++j) {
+ resp = client.prepareSearch("index").setQuery(QueryBuilders.prefixQuery("str", "sf")).setSize(0).addAggregation(terms("t").field("str").include("s.*")).execute().actionGet();
+ }
+ long end = System.nanoTime();
+ if (i >= WARM) {
+ System.out.println(new TimeValue((end - start) / ITERS, TimeUnit.NANOSECONDS));
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/PercentilesAggregationSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/PercentilesAggregationSearchBenchmark.java
new file mode 100644
index 0000000000..62bb62642a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/PercentilesAggregationSearchBenchmark.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
+
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.client.Requests.getRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
+
+public class PercentilesAggregationSearchBenchmark {
+
+ private static final int AMPLITUDE = 10000;
+ private static final int NUM_DOCS = (int) SizeValue.parseSizeValue("1m").singles();
+ private static final int BATCH = 100;
+ private static final String CLUSTER_NAME = PercentilesAggregationSearchBenchmark.class.getSimpleName();
+ private static final double[] PERCENTILES = new double[] { 0, 0.01, 0.1, 1, 10, 25, 50, 75, 90, 99, 99.9, 99.99, 100};
+ private static final int QUERY_WARMUP = 10;
+ private static final int QUERY_COUNT = 20;
+
+ private static Random R = new Random(0);
+
+ // we generate ints to not disadvantage qdigest which only works with integers
+ private enum Distribution {
+ UNIFORM {
+ @Override
+ int next() {
+ return (int) (R.nextDouble() * AMPLITUDE);
+ }
+ },
+ GAUSS {
+ @Override
+ int next() {
+ return (int) (R.nextDouble() * AMPLITUDE);
+ }
+ },
+ LOG_NORMAL {
+ @Override
+ int next() {
+ return (int) Math.exp(R.nextDouble() * Math.log(AMPLITUDE));
+ }
+ };
+ String indexName() {
+ return name().toLowerCase(Locale.ROOT);
+ }
+ abstract int next();
+ }
+
+ private static double accuratePercentile(double percentile, int[] sortedValues) {
+ final double index = percentile / 100 * (sortedValues.length - 1);
+ final int intIndex = (int) index;
+ final double delta = index - intIndex;
+ if (delta == 0) {
+ return sortedValues[intIndex];
+ } else {
+ return sortedValues[intIndex] * (1 - delta) + sortedValues[intIndex + 1] * delta;
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 100) // to also test performance and accuracy of the reduce phase
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(CLUSTER_NAME)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(CLUSTER_NAME)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client = clientNode.client();
+
+ for (Distribution d : Distribution.values()) {
+ try {
+// client.admin().indices().prepareDelete(d.indexName()).execute().actionGet();
+ client.admin().indices().create(createIndexRequest(d.indexName()).settings(settings)).actionGet();
+ } catch (Exception e) {
+ System.out.println("Index " + d.indexName() + " already exists, skipping index creation");
+ continue;
+ }
+
+ final int[] values = new int[NUM_DOCS];
+ for (int i = 0; i < NUM_DOCS; ++i) {
+ values[i] = d.next();
+ }
+ System.out.println("Indexing " + NUM_DOCS + " documents into " + d.indexName());
+ StopWatch stopWatch = new StopWatch().start();
+ for (int i = 0; i < NUM_DOCS; ) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH && i < NUM_DOCS; ++j) {
+ request.add(client.prepareIndex(d.indexName(), "values", Integer.toString(i)).setSource("v", values[i]));
+ ++i;
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ System.err.println(response.buildFailureMessage());
+ }
+ if ((i % 100000) == 0) {
+ System.out.println("--> Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ Arrays.sort(values);
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ for (double percentile : PERCENTILES) {
+ builder.field(Double.toString(percentile), accuratePercentile(percentile, values));
+ }
+ client.prepareIndex(d.indexName(), "values", "percentiles").setSource(builder.endObject()).execute().actionGet();
+ client.admin().indices().prepareRefresh(d.indexName()).execute().actionGet();
+ }
+
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ System.out.println("## Precision");
+ for (Distribution d : Distribution.values()) {
+ System.out.println("#### " + d);
+ final long count = client.prepareCount(d.indexName()).setQuery(matchAllQuery()).execute().actionGet().getCount();
+ if (count != NUM_DOCS + 1) {
+ throw new Error("Expected " + NUM_DOCS + " documents, got " + (count - 1));
+ }
+ Map<String, Object> percentilesUnsorted = client.get(getRequest(d.indexName()).type("values").id("percentiles")).actionGet().getSourceAsMap();
+ SortedMap<Double, Double> percentiles = Maps.newTreeMap();
+ for (Map.Entry<String, Object> entry : percentilesUnsorted.entrySet()) {
+ percentiles.put(Double.parseDouble(entry.getKey()), (Double) entry.getValue());
+ }
+ System.out.println("Expected percentiles: " + percentiles);
+ System.out.println();
+ SearchResponse resp = client.prepareSearch(d.indexName()).setSize(0).addAggregation(percentiles("pcts").field("v").percentiles(PERCENTILES)).execute().actionGet();
+ Percentiles pcts = resp.getAggregations().get("pcts");
+ Map<Double, Double> asMap = Maps.newLinkedHashMap();
+ double sumOfErrorSquares = 0;
+ for (Percentile percentile : pcts) {
+ asMap.put(percentile.getPercent(), percentile.getValue());
+ double error = percentile.getValue() - percentiles.get(percentile.getPercent());
+ sumOfErrorSquares += error * error;
+ }
+ System.out.println("Percentiles: " + asMap);
+ System.out.println("Sum of error squares: " + sumOfErrorSquares);
+ System.out.println();
+ }
+
+ System.out.println("## Performance");
+ for (int i = 0; i < 3; ++i) {
+ for (Distribution d : Distribution.values()) {
+ System.out.println("#### " + d);
+ for (int j = 0; j < QUERY_WARMUP; ++j) {
+ client.prepareSearch(d.indexName()).setSize(0).addAggregation(percentiles("pcts").field("v").percentiles(PERCENTILES)).execute().actionGet();
+ }
+ long start = System.nanoTime();
+ for (int j = 0; j < QUERY_COUNT; ++j) {
+ client.prepareSearch(d.indexName()).setSize(0).addAggregation(percentiles("pcts").field("v").percentiles(PERCENTILES)).execute().actionGet();
+ }
+ System.out.println(new TimeValue((System.nanoTime() - start) / QUERY_COUNT, TimeUnit.NANOSECONDS));
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java
new file mode 100644
index 0000000000..8e7d24697b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/QueryFilterAggregationSearchBenchmark.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+public class QueryFilterAggregationSearchBenchmark {
+
+ static final long COUNT = SizeValue.parseSizeValue("5m").singles();
+ static final int BATCH = 1000;
+ static final int QUERY_COUNT = 200;
+ static final int NUMBER_OF_TERMS = 200;
+
+ static Client client;
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = QueryFilterAggregationSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1")).node();
+ client = node1.client();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = ThreadLocalRandom.current().nextLong();
+ }
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ builder.field("l_value", lValues[ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS)]);
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 100000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ if (client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount() != COUNT) {
+ throw new Error();
+ }
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ final long anyValue = ((Number) client.prepareSearch().execute().actionGet().getHits().hits()[0].sourceAsMap().get("l_value")).longValue();
+
+ long totalQueryTime = 0;
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSize(0)
+ .setQuery(termQuery("l_value", anyValue))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Simple Query on first l_value " + totalQueryTime + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setSize(0)
+ .setQuery(termQuery("l_value", anyValue))
+ .addAggregation(AggregationBuilders.filter("filter").filter(QueryBuilders.termQuery("l_value", anyValue)))
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Filter agg first l_value " + totalQueryTime + "ms");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/SubAggregationSearchCollectModeBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/SubAggregationSearchCollectModeBenchmark.java
new file mode 100644
index 0000000000..bf13b774ed
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/SubAggregationSearchCollectModeBenchmark.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.hppc.ObjectScatterSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+
+import java.util.List;
+import java.util.Locale;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SubAggregationSearchCollectModeBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("2m").singles();
+ static int BATCH = 1000;
+ static int QUERY_WARMUP = 10;
+ static int QUERY_COUNT = 100;
+ static int NUMBER_OF_TERMS = 200;
+ static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
+ static int STRING_TERM_SIZE = 5;
+
+ static Client client;
+ static Node[] nodes;
+
+ public static void main(String[] args) throws Exception {
+ Bootstrap.initializeNatives(true, false, false);
+ Random random = new Random();
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = SubAggregationSearchCollectModeBenchmark.class.getSimpleName();
+ nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ client = clientNode.client();
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test").mapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("s_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("sm_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("l_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("lm_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())).actionGet();
+
+ long[] lValues = new long[NUMBER_OF_TERMS];
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ lValues[i] = ThreadLocalRandom.current().nextLong();
+ }
+ ObjectScatterSet<String> uniqueTerms = new ObjectScatterSet<>();
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ boolean added;
+ do {
+ added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
+ } while (!added);
+ }
+ String[] sValues = uniqueTerms.toArray(String.class);
+ uniqueTerms = null;
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ final String sValue = sValues[ThreadLocalRandom.current().nextInt(sValues.length)];
+ final long lValue = lValues[ThreadLocalRandom.current().nextInt(lValues.length)];
+ builder.field("s_value", sValue);
+ builder.field("l_value", lValue);
+ builder.field("s_value_dv", sValue);
+ builder.field("l_value_dv", lValue);
+
+ for (String field : new String[] {"sm_value", "sm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ for (String field : new String[] {"lm_value", "lm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(lValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ COUNT = client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ List<StatsResult> stats = Lists.newArrayList();
+ stats.add(runTest("0000", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("0001", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("0010", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("0011", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("0100", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("0101", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("0110", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("0111", new SubAggCollectionMode[] {SubAggCollectionMode.DEPTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("1000", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("1001", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("1010", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("1011", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("1100", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("1101", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+ stats.add(runTest("1110", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.DEPTH_FIRST}));
+ stats.add(runTest("1111", new SubAggCollectionMode[] {SubAggCollectionMode.BREADTH_FIRST,SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST, SubAggCollectionMode.BREADTH_FIRST}));
+
+ System.out.println("------------------ SUMMARY ----------------------------------------------");
+ System.out.format(Locale.ENGLISH, "%35s%10s%10s%15s%15s\n", "name", "took", "millis", "fieldata size", "heap used");
+ for (StatsResult stat : stats) {
+ System.out.format(Locale.ENGLISH, "%35s%10s%10d%15s%15s\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT), stat.fieldDataMemoryUsed, stat.heapUsed);
+ }
+ System.out.println("------------------ SUMMARY ----------------------------------------------");
+
+ clientNode.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ public static class StatsResult {
+ final String name;
+ final long took;
+ final ByteSizeValue fieldDataMemoryUsed;
+ final ByteSizeValue heapUsed;
+
+ public StatsResult(String name, long took, ByteSizeValue fieldDataMemoryUsed, ByteSizeValue heapUsed) {
+ this.name = name;
+ this.took = took;
+ this.fieldDataMemoryUsed = fieldDataMemoryUsed;
+ this.heapUsed = heapUsed;
+ }
+ }
+
+ private static StatsResult runTest(String name, SubAggCollectionMode[] collectionModes) {
+ long totalQueryTime;// LM VALUE
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ System.gc();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch("test")
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addAggregation(AggregationBuilders.terms(name + "s_value").field("s_value").collectMode(collectionModes[0])
+ .subAggregation(AggregationBuilders.terms(name + "l_value").field("l_value").collectMode(collectionModes[1])
+ .subAggregation(AggregationBuilders.terms(name + "s_value_dv").field("s_value_dv").collectMode(collectionModes[2])
+ .subAggregation(AggregationBuilders.terms(name + "l_value_dv").field("l_value_dv").collectMode(collectionModes[3])))))
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading : took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch("test")
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addAggregation(AggregationBuilders.terms(name + "s_value").field("s_value").collectMode(collectionModes[0])
+ .subAggregation(AggregationBuilders.terms(name + "l_value").field("l_value").collectMode(collectionModes[1])
+ .subAggregation(AggregationBuilders.terms(name + "s_value_dv").field("s_value_dv").collectMode(collectionModes[2])
+ .subAggregation(AggregationBuilders.terms(name + "l_value_dv").field("l_value_dv").collectMode(collectionModes[3])))))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ String[] nodeIds = new String[nodes.length];
+ for (int i = 0; i < nodeIds.length; i++) {
+ nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
+ }
+
+ ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
+ ByteSizeValue heapUsed = clusterStateResponse.getNodesStats().getJvm().getHeapUsed();
+ System.out.println("--> Heap used: " + heapUsed);
+ ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
+ System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
+
+ return new StatsResult(name, totalQueryTime, fieldDataMemoryUsed, heapUsed);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchAndIndexingBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchAndIndexingBenchmark.java
new file mode 100644
index 0000000000..45f7dbf956
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchAndIndexingBenchmark.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.hppc.ObjectScatterSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.elasticsearch.benchmark.search.aggregations.TermsAggregationSearchBenchmark.Method;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class TermsAggregationSearchAndIndexingBenchmark {
+
+ static String indexName = "test";
+ static String typeName = "type1";
+ static Random random = new Random();
+
+ static long COUNT = SizeValue.parseSizeValue("2m").singles();
+ static int BATCH = 1000;
+ static int NUMBER_OF_TERMS = (int) SizeValue.parseSizeValue("100k").singles();
+ static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
+ static int STRING_TERM_SIZE = 5;
+
+ static Node[] nodes;
+
+ public static void main(String[] args) throws Exception {
+ Bootstrap.initializeNatives(true, false, false);
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = TermsAggregationSearchAndIndexingBenchmark.class.getSimpleName();
+ nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node1"))
+ .clusterName(clusterName)
+ .node();
+ }
+ Client client = nodes[0].client();
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().prepareCreate(indexName)
+ .addMapping(typeName, generateMapping("eager", "lazy"))
+ .get();
+ Thread.sleep(5000);
+
+ long startTime = System.currentTimeMillis();
+ ObjectScatterSet<String> uniqueTerms = new ObjectScatterSet<>();
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ boolean added;
+ do {
+ added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
+ } while (!added);
+ }
+ String[] sValues = uniqueTerms.toArray(String.class);
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ final String sValue = sValues[counter % sValues.length];
+ builder.field("s_value", sValue);
+ builder.field("s_value_dv", sValue);
+
+ for (String field : new String[] {"sm_value", "sm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ request.add(Requests.indexRequest(indexName).type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH));
+ }
+ }
+
+ System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
+ } catch (IndexAlreadyExistsException e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().preparePutMapping(indexName)
+ .setType(typeName)
+ .setSource(generateMapping("lazy", "lazy"))
+ .get();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+
+ String[] nodeIds = new String[nodes.length];
+ for (int i = 0; i < nodeIds.length; i++) {
+ nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
+ }
+
+ List<TestRun> testRuns = new ArrayList<>();
+ testRuns.add(new TestRun("Regular field ordinals", "eager", "lazy", "s_value", "ordinals"));
+ testRuns.add(new TestRun("Docvalues field ordinals", "lazy", "eager", "s_value_dv", "ordinals"));
+ testRuns.add(new TestRun("Regular field global ordinals", "eager_global_ordinals", "lazy", "s_value", null));
+ testRuns.add(new TestRun("Docvalues field global", "lazy", "eager_global_ordinals", "s_value_dv", null));
+
+ List<TestResult> testResults = new ArrayList<>();
+ for (TestRun testRun : testRuns) {
+ client.admin().indices().preparePutMapping(indexName).setType(typeName)
+ .setSource(generateMapping(testRun.indexedFieldEagerLoading, testRun.docValuesEagerLoading)).get();
+ client.admin().indices().prepareClearCache(indexName).setFieldDataCache(true).get();
+ SearchThread searchThread = new SearchThread(client, testRun.termsAggsField, testRun.termsAggsExecutionHint);
+ RefreshThread refreshThread = new RefreshThread(client);
+ System.out.println("--> Running '" + testRun.name + "' round...");
+ new Thread(refreshThread).start();
+ new Thread(searchThread).start();
+ Thread.sleep(2 * 60 * 1000);
+ refreshThread.stop();
+ searchThread.stop();
+
+ System.out.println("--> Avg refresh time: " + refreshThread.avgRefreshTime + " ms");
+ System.out.println("--> Avg query time: " + searchThread.avgQueryTime + " ms");
+
+ ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
+ System.out.println("--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
+ ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
+ System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
+ testResults.add(new TestResult(testRun.name, refreshThread.avgRefreshTime, searchThread.avgQueryTime, fieldDataMemoryUsed));
+ }
+
+ System.out.println("----------------------------------------- SUMMARY ----------------------------------------------");
+ System.out.format(Locale.ENGLISH, "%30s%18s%15s%15s\n", "name", "avg refresh time", "avg query time", "fieldata size");
+ for (TestResult testResult : testResults) {
+ System.out.format(Locale.ENGLISH, "%30s%18s%15s%15s\n", testResult.name, testResult.avgRefreshTime, testResult.avgQueryTime, testResult.fieldDataSizeInMemory);
+ }
+ System.out.println("----------------------------------------- SUMMARY ----------------------------------------------");
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ static class RefreshThread implements Runnable {
+
+ private final Client client;
+ private volatile boolean run = true;
+ private volatile boolean stopped = false;
+ private volatile long avgRefreshTime = 0;
+
+ RefreshThread(Client client) throws IOException {
+ this.client = client;
+ }
+
+ @Override
+ public void run() {
+ long totalRefreshTime = 0;
+ int numExecutedRefreshed = 0;
+ while (run) {
+ long docIdLimit = COUNT;
+ for (long docId = 1; run && docId < docIdLimit;) {
+ try {
+ for (int j = 0; j < 8; j++) {
+ GetResponse getResponse = client
+ .prepareGet(indexName, "type1", String.valueOf(++docId))
+ .get();
+ client.prepareIndex(indexName, "type1", getResponse.getId())
+ .setSource(getResponse.getSource())
+ .get();
+ }
+ long startTime = System.currentTimeMillis();
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ totalRefreshTime += System.currentTimeMillis() - startTime;
+ numExecutedRefreshed++;
+ Thread.sleep(500);
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ avgRefreshTime = totalRefreshTime / numExecutedRefreshed;
+ stopped = true;
+ }
+
+ public void stop() throws InterruptedException {
+ run = false;
+ while (!stopped) {
+ Thread.sleep(100);
+ }
+ }
+
+ }
+
+ private static class TestRun {
+
+ final String name;
+ final String indexedFieldEagerLoading;
+ final String docValuesEagerLoading;
+ final String termsAggsField;
+ final String termsAggsExecutionHint;
+
+ private TestRun(String name, String indexedFieldEagerLoading, String docValuesEagerLoading, String termsAggsField, String termsAggsExecutionHint) {
+ this.name = name;
+ this.indexedFieldEagerLoading = indexedFieldEagerLoading;
+ this.docValuesEagerLoading = docValuesEagerLoading;
+ this.termsAggsField = termsAggsField;
+ this.termsAggsExecutionHint = termsAggsExecutionHint;
+ }
+ }
+
+ private static class TestResult {
+
+ final String name;
+ final TimeValue avgRefreshTime;
+ final TimeValue avgQueryTime;
+ final ByteSizeValue fieldDataSizeInMemory;
+
+ private TestResult(String name, long avgRefreshTime, long avgQueryTime, ByteSizeValue fieldDataSizeInMemory) {
+ this.name = name;
+ this.avgRefreshTime = TimeValue.timeValueMillis(avgRefreshTime);
+ this.avgQueryTime = TimeValue.timeValueMillis(avgQueryTime);
+ this.fieldDataSizeInMemory = fieldDataSizeInMemory;
+ }
+ }
+
+ static class SearchThread implements Runnable {
+
+ private final Client client;
+ private final String field;
+ private final String executionHint;
+ private volatile boolean run = true;
+ private volatile boolean stopped = false;
+ private volatile long avgQueryTime = 0;
+
+ SearchThread(Client client, String field, String executionHint) {
+ this.client = client;
+ this.field = field;
+ this.executionHint = executionHint;
+ }
+
+ @Override
+ public void run() {
+ long totalQueryTime = 0;
+ int numExecutedQueries = 0;
+ while (run) {
+ try {
+ SearchResponse searchResponse = Method.AGGREGATION.addTermsAgg(client.prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery()), "test", field, executionHint)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ numExecutedQueries++;
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ avgQueryTime = totalQueryTime / numExecutedQueries;
+ stopped = true;
+ }
+
+ public void stop() throws InterruptedException {
+ run = false;
+ while (!stopped) {
+ Thread.sleep(100);
+ }
+ }
+
+ }
+
+ private static XContentBuilder generateMapping(String loading1, String loading2) throws IOException {
+ return jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("s_value")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("loading", loading1)
+ .endObject()
+ .endObject()
+ .startObject("s_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("loading", loading2)
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java
new file mode 100644
index 0000000000..44107b0588
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TermsAggregationSearchBenchmark.java
@@ -0,0 +1,402 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.carrotsearch.hppc.ObjectScatterSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+
+import java.util.List;
+import java.util.Locale;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class TermsAggregationSearchBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("2m").singles();
+ static int BATCH = 1000;
+ static int QUERY_WARMUP = 10;
+ static int QUERY_COUNT = 100;
+ static int NUMBER_OF_TERMS = 200;
+ static int NUMBER_OF_MULTI_VALUE_TERMS = 10;
+ static int STRING_TERM_SIZE = 5;
+
+ static Client client;
+ static Node[] nodes;
+
+ public enum Method {
+ AGGREGATION {
+ @Override
+ SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
+ return builder.addAggregation(AggregationBuilders.terms(name).executionHint(executionHint).field(field));
+ }
+
+ @Override
+ SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
+ return builder.addAggregation(AggregationBuilders.terms(name).field(keyField).subAggregation(AggregationBuilders.stats("stats").field(valueField)));
+ }
+ },
+ AGGREGATION_DEFERRED {
+ @Override
+ SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint) {
+ return builder.addAggregation(AggregationBuilders.terms(name).executionHint(executionHint).field(field).collectMode(SubAggCollectionMode.BREADTH_FIRST));
+ }
+
+ @Override
+ SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField) {
+ return builder.addAggregation(AggregationBuilders.terms(name).field(keyField).collectMode(SubAggCollectionMode.BREADTH_FIRST).subAggregation(AggregationBuilders.stats("stats").field(valueField)));
+ }
+ };
+ abstract SearchRequestBuilder addTermsAgg(SearchRequestBuilder builder, String name, String field, String executionHint);
+ abstract SearchRequestBuilder addTermsStatsAgg(SearchRequestBuilder builder, String name, String keyField, String valueField);
+ }
+
+ public static void main(String[] args) throws Exception {
+ Bootstrap.initializeNatives(true, false, false);
+ Random random = new Random();
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = TermsAggregationSearchBenchmark.class.getSimpleName();
+ nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Node clientNode = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ client = clientNode.client();
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test").mapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("s_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("sm_value_dv")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("l_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("lm_value_dv")
+ .field("type", "long")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())).actionGet();
+
+ ObjectScatterSet<String> uniqueTerms = new ObjectScatterSet<>();
+ for (int i = 0; i < NUMBER_OF_TERMS; i++) {
+ boolean added;
+ do {
+ added = uniqueTerms.add(RandomStrings.randomAsciiOfLength(random, STRING_TERM_SIZE));
+ } while (!added);
+ }
+ String[] sValues = uniqueTerms.toArray(String.class);
+ uniqueTerms = null;
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ final String sValue = sValues[ThreadLocalRandom.current().nextInt(sValues.length)];
+ final long lValue = ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS);
+ builder.field("s_value", sValue);
+ builder.field("l_value", lValue);
+ builder.field("s_value_dv", sValue);
+ builder.field("l_value_dv", lValue);
+
+ for (String field : new String[] {"sm_value", "sm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(sValues[ThreadLocalRandom.current().nextInt(sValues.length)]);
+ }
+ builder.endArray();
+ }
+
+ for (String field : new String[] {"lm_value", "lm_value_dv"}) {
+ builder.startArray(field);
+ for (int k = 0; k < NUMBER_OF_MULTI_VALUE_TERMS; k++) {
+ builder.value(ThreadLocalRandom.current().nextInt(NUMBER_OF_TERMS));
+ }
+ builder.endArray();
+ }
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ COUNT = client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+
+ List<StatsResult> stats = Lists.newArrayList();
+ stats.add(terms("terms_agg_s", Method.AGGREGATION, "s_value", null));
+ stats.add(terms("terms_agg_s_dv", Method.AGGREGATION, "s_value_dv", null));
+ stats.add(terms("terms_agg_map_s", Method.AGGREGATION, "s_value", "map"));
+ stats.add(terms("terms_agg_map_s_dv", Method.AGGREGATION, "s_value_dv", "map"));
+ stats.add(terms("terms_agg_def_s", Method.AGGREGATION_DEFERRED, "s_value", null));
+ stats.add(terms("terms_agg_def_s_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", null));
+ stats.add(terms("terms_agg_def_map_s", Method.AGGREGATION_DEFERRED, "s_value", "map"));
+ stats.add(terms("terms_agg_def_map_s_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "map"));
+ stats.add(terms("terms_agg_l", Method.AGGREGATION, "l_value", null));
+ stats.add(terms("terms_agg_l_dv", Method.AGGREGATION, "l_value_dv", null));
+ stats.add(terms("terms_agg_def_l", Method.AGGREGATION_DEFERRED, "l_value", null));
+ stats.add(terms("terms_agg_def_l_dv", Method.AGGREGATION_DEFERRED, "l_value_dv", null));
+ stats.add(terms("terms_agg_sm", Method.AGGREGATION, "sm_value", null));
+ stats.add(terms("terms_agg_sm_dv", Method.AGGREGATION, "sm_value_dv", null));
+ stats.add(terms("terms_agg_map_sm", Method.AGGREGATION, "sm_value", "map"));
+ stats.add(terms("terms_agg_map_sm_dv", Method.AGGREGATION, "sm_value_dv", "map"));
+ stats.add(terms("terms_agg_def_sm", Method.AGGREGATION_DEFERRED, "sm_value", null));
+ stats.add(terms("terms_agg_def_sm_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", null));
+ stats.add(terms("terms_agg_def_map_sm", Method.AGGREGATION_DEFERRED, "sm_value", "map"));
+ stats.add(terms("terms_agg_def_map_sm_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", "map"));
+ stats.add(terms("terms_agg_lm", Method.AGGREGATION, "lm_value", null));
+ stats.add(terms("terms_agg_lm_dv", Method.AGGREGATION, "lm_value_dv", null));
+ stats.add(terms("terms_agg_def_lm", Method.AGGREGATION_DEFERRED, "lm_value", null));
+ stats.add(terms("terms_agg_def_lm_dv", Method.AGGREGATION_DEFERRED, "lm_value_dv", null));
+
+ stats.add(termsStats("terms_stats_agg_s_l", Method.AGGREGATION, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_s_l_dv", Method.AGGREGATION, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_def_s_l", Method.AGGREGATION_DEFERRED, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_def_s_l_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_s_lm", Method.AGGREGATION, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_agg_s_lm_dv", Method.AGGREGATION, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_def_s_lm", Method.AGGREGATION_DEFERRED, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_agg_def_s_lm_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_sm_l", Method.AGGREGATION, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_sm_l_dv", Method.AGGREGATION, "sm_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_def_sm_l", Method.AGGREGATION_DEFERRED, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_def_sm_l_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", "l_value_dv", null));
+
+ stats.add(termsStats("terms_stats_agg_s_l", Method.AGGREGATION, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_s_l_dv", Method.AGGREGATION, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_def_s_l", Method.AGGREGATION_DEFERRED, "s_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_def_s_l_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_s_lm", Method.AGGREGATION, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_agg_s_lm_dv", Method.AGGREGATION, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_def_s_lm", Method.AGGREGATION_DEFERRED, "s_value", "lm_value", null));
+ stats.add(termsStats("terms_stats_agg_def_s_lm_dv", Method.AGGREGATION_DEFERRED, "s_value_dv", "lm_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_sm_l", Method.AGGREGATION, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_sm_l_dv", Method.AGGREGATION, "sm_value_dv", "l_value_dv", null));
+ stats.add(termsStats("terms_stats_agg_def_sm_l", Method.AGGREGATION_DEFERRED, "sm_value", "l_value", null));
+ stats.add(termsStats("terms_stats_agg_def_sm_l_dv", Method.AGGREGATION_DEFERRED, "sm_value_dv", "l_value_dv", null));
+
+ System.out.println("------------------ SUMMARY ----------------------------------------------");
+ System.out.format(Locale.ENGLISH, "%35s%10s%10s%15s\n", "name", "took", "millis", "fieldata size");
+ for (StatsResult stat : stats) {
+ System.out.format(Locale.ENGLISH, "%35s%10s%10d%15s\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT), stat.fieldDataMemoryUsed);
+ }
+ System.out.println("------------------ SUMMARY ----------------------------------------------");
+
+ clientNode.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ public static class StatsResult {
+ final String name;
+ final long took;
+ final ByteSizeValue fieldDataMemoryUsed;
+
+ public StatsResult(String name, long took, ByteSizeValue fieldDataMemoryUsed) {
+ this.name = name;
+ this.took = took;
+ this.fieldDataMemoryUsed = fieldDataMemoryUsed;
+ }
+ }
+
+ private static StatsResult terms(String name, Method method, String field, String executionHint) {
+ long totalQueryTime;// LM VALUE
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ System.gc();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch("test")
+ .setSize(0)
+ .setQuery(matchAllQuery()), name, field, executionHint)
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = method.addTermsAgg(client.prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery()), name, field, executionHint)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms Agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ String[] nodeIds = new String[nodes.length];
+ for (int i = 0; i < nodeIds.length; i++) {
+ nodeIds[i] = nodes[i].injector().getInstance(Discovery.class).localNode().getId();
+ }
+
+ ClusterStatsResponse clusterStateResponse = client.admin().cluster().prepareClusterStats().setNodesIds(nodeIds).get();
+ System.out.println("--> Heap used: " + clusterStateResponse.getNodesStats().getJvm().getHeapUsed());
+ ByteSizeValue fieldDataMemoryUsed = clusterStateResponse.getIndicesStats().getFieldData().getMemorySize();
+ System.out.println("--> Fielddata memory size: " + fieldDataMemoryUsed);
+
+ return new StatsResult(name, totalQueryTime, fieldDataMemoryUsed);
+ }
+
+ private static StatsResult termsStats(String name, Method method, String keyField, String valueField, String executionHint) {
+ long totalQueryTime;
+
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ System.gc();
+
+ System.out.println("--> Warmup (" + name + ")...");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery()), name, keyField, valueField)
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Loading (" + name + "): took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = method.addTermsStatsAgg(client.prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery()), name, keyField, valueField)
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != COUNT) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Terms stats agg (" + name + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+ return new StatsResult(name, totalQueryTime, ByteSizeValue.parseBytesSizeValue("0b", "StatsResult"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java
new file mode 100644
index 0000000000..96203ccba1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.aggregations;
+
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class TimeDataHistogramAggregationBenchmark {
+
+ static long COUNT = SizeValue.parseSizeValue("5m").singles();
+ static long TIME_PERIOD = 24 * 3600 * 1000;
+ static int BATCH = 100;
+ static int QUERY_WARMUP = 50;
+ static int QUERY_COUNT = 500;
+ static IndexFieldData.CommonSettings.MemoryStorageFormat MEMORY_FORMAT = IndexFieldData.CommonSettings.MemoryStorageFormat.PAGED;
+ static double ACCEPTABLE_OVERHEAD_RATIO = 0.5;
+ static float MATCH_PERCENTAGE = 0.1f;
+
+ static Client client;
+
+ public static void main(String[] args) throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put("node.local", true)
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = TimeDataHistogramAggregationBenchmark.class.getSimpleName();
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ client = nodes[0].client();
+
+ Thread.sleep(10000);
+ try {
+ client.admin().indices().create(createIndexRequest("test")).actionGet();
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ long[] currentTimeInMillis1 = new long[]{System.currentTimeMillis()};
+ long[] currentTimeInMillis2 = new long[]{System.currentTimeMillis()};
+ long startTimeInMillis = currentTimeInMillis1[0];
+ long averageMillisChange = TIME_PERIOD / COUNT * 2;
+ long backwardSkew = Math.max(1, (long) (averageMillisChange * 0.1));
+ long bigOutOfOrder = 1;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+
+ XContentBuilder builder = jsonBuilder().startObject();
+ builder.field("id", Integer.toString(counter));
+ // move forward in time and sometimes a little bit back (delayed delivery)
+ long diff = ThreadLocalRandom.current().nextLong(2 * averageMillisChange + 2 * backwardSkew) - backwardSkew;
+ long[] currentTime = counter % 2 == 0 ? currentTimeInMillis1 : currentTimeInMillis2;
+ currentTime[0] += diff;
+ if (ThreadLocalRandom.current().nextLong(100) <= bigOutOfOrder) {
+ builder.field("l_value", currentTime[0] - 60000); // 1m delays
+ } else {
+ builder.field("l_value", currentTime[0]);
+ }
+
+ builder.endObject();
+
+ request.add(Requests.indexRequest("test").type("type1").id(Integer.toString(counter))
+ .source(builder));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (COUNT)) / stopWatch.totalTime().secondsFrac()));
+ System.out.println("Time range 1: " + (currentTimeInMillis1[0] - startTimeInMillis) / 1000.0 / 3600 + " hours");
+ System.out.println("Time range 2: " + (currentTimeInMillis2[0] - startTimeInMillis) / 1000.0 / 3600 + " hours");
+ System.out.println("--> optimizing index");
+ client.admin().indices().prepareOptimize().setMaxNumSegments(1).get();
+ } catch (IndexAlreadyExistsException e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ COUNT = client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ System.out.println("--> Number of docs in index: " + COUNT);
+
+ // load with the reverse options to make sure jit doesn't optimize one away
+ setMapping(ACCEPTABLE_OVERHEAD_RATIO, MEMORY_FORMAT.equals(IndexFieldData.CommonSettings.MemoryStorageFormat.PACKED) ? IndexFieldData.CommonSettings.MemoryStorageFormat.PAGED : IndexFieldData.CommonSettings.MemoryStorageFormat.PACKED);
+ warmUp("hist_l", "l_value", MATCH_PERCENTAGE);
+
+ setMapping(ACCEPTABLE_OVERHEAD_RATIO, MEMORY_FORMAT);
+ warmUp("hist_l", "l_value", MATCH_PERCENTAGE);
+
+ List<StatsResult> stats = Lists.newArrayList();
+ stats.add(measureAgg("hist_l", "l_value", MATCH_PERCENTAGE));
+
+ NodesStatsResponse nodeStats = client.admin().cluster().prepareNodesStats(nodes[0].settings().get("name")).clear()
+ .setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.FieldData)).get();
+
+
+ System.out.println("------------------ SUMMARY -------------------------------");
+
+ System.out.println("docs: " + COUNT);
+ System.out.println("match percentage: " + MATCH_PERCENTAGE);
+ System.out.println("memory format hint: " + MEMORY_FORMAT);
+ System.out.println("acceptable_overhead_ratio: " + ACCEPTABLE_OVERHEAD_RATIO);
+ System.out.println("field data: " + nodeStats.getNodes()[0].getIndices().getFieldData().getMemorySize());
+ System.out.format(Locale.ROOT, "%25s%10s%10s\n", "name", "took", "millis");
+ for (StatsResult stat : stats) {
+ System.out.format(Locale.ROOT, "%25s%10s%10d\n", stat.name, TimeValue.timeValueMillis(stat.took), (stat.took / QUERY_COUNT));
+ }
+ System.out.println("------------------ SUMMARY -------------------------------");
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ protected static void setMapping(double acceptableOverheadRatio, IndexFieldData.CommonSettings.MemoryStorageFormat fielddataStorageFormat) throws IOException {
+ XContentBuilder mapping = JsonXContent.contentBuilder();
+ mapping.startObject().startObject("type1").startObject("properties").startObject("l_value")
+ .field("type", "long")
+ .startObject("fielddata")
+ .field("acceptable_transient_overhead_ratio", acceptableOverheadRatio)
+ .field("acceptable_overhead_ratio", acceptableOverheadRatio)
+ .field(IndexFieldData.CommonSettings.SETTING_MEMORY_STORAGE_HINT, fielddataStorageFormat.name().toLowerCase(Locale.ROOT))
+ .endObject()
+ .endObject().endObject().endObject().endObject();
+ client.admin().indices().preparePutMapping("test").setType("type1").setSource(mapping).get();
+ }
+
+ static class StatsResult {
+ final String name;
+ final long took;
+
+ StatsResult(String name, long took) {
+ this.name = name;
+ this.took = took;
+ }
+ }
+
+ private static SearchResponse doTermsAggsSearch(String name, String field, float matchPercentage) {
+ Map<String, Object> params = new HashMap<>();
+ params.put("matchP", matchPercentage);
+ SearchResponse response = client.prepareSearch()
+ .setSize(0)
+ .setQuery(
+ QueryBuilders.constantScoreQuery(QueryBuilders.scriptQuery(new Script("random()<matchP", ScriptType.INLINE, null,
+ params))))
+ .addAggregation(AggregationBuilders.histogram(name).field(field).interval(3600 * 1000)).get();
+
+ if (response.getHits().totalHits() < COUNT * matchPercentage * 0.7) {
+ System.err.println("--> warning - big deviation from expected count: " + response.getHits().totalHits() + " expected: " + COUNT * matchPercentage);
+ }
+
+ return response;
+ }
+
+ private static StatsResult measureAgg(String name, String field, float matchPercentage) {
+ long totalQueryTime;// LM VALUE
+
+ System.out.println("--> Running (" + name + ")...");
+ totalQueryTime = 0;
+ long previousCount = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = doTermsAggsSearch(name, field, matchPercentage);
+ if (previousCount == 0) {
+ previousCount = searchResponse.getHits().getTotalHits();
+ } else if (searchResponse.getHits().totalHits() != previousCount) {
+ System.err.println("*** HIT COUNT CHANGE -> CACHE EXPIRED? ***");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Histogram aggregations (" + field + "): " + (totalQueryTime / QUERY_COUNT) + "ms");
+ return new StatsResult(name, totalQueryTime);
+ }
+
+ private static void warmUp(String name, String field, float matchPercentage) {
+ System.out.println("--> Warmup (" + name + ")...");
+ client.admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = doTermsAggsSearch(name, field, matchPercentage);
+ if (j == 0) {
+ System.out.println("--> Loading (" + field + "): took: " + searchResponse.getTook());
+ }
+ }
+ System.out.println("--> Warmup (" + name + ") DONE");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java
new file mode 100644
index 0000000000..b6df252adc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchAndIndexingBenchmark.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+
+import java.util.Arrays;
+import java.util.Random;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchAndIndexingBenchmark {
+
+ static int PARENT_COUNT = (int) SizeValue.parseSizeValue("1m").singles();
+ static int NUM_CHILDREN_PER_PARENT = 12;
+ static int QUERY_VALUE_RATIO_PER_PARENT = 3;
+ static int QUERY_COUNT = 50;
+ static String indexName = "test";
+ static Random random = new Random();
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchAndIndexingBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node1"))
+ .clusterName(clusterName)
+ .node();
+ Client client = node1.client();
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ long startTime = System.currentTimeMillis();
+ ParentChildIndexGenerator generator = new ParentChildIndexGenerator(client, PARENT_COUNT, NUM_CHILDREN_PER_PARENT, QUERY_VALUE_RATIO_PER_PARENT);
+ generator.index();
+ System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
+ } catch (IndexAlreadyExistsException e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ SearchThread searchThread = new SearchThread(client);
+ new Thread(searchThread).start();
+ IndexThread indexThread = new IndexThread(client);
+ new Thread(indexThread).start();
+
+ System.in.read();
+
+ indexThread.stop();
+ searchThread.stop();
+ client.close();
+ node1.close();
+ }
+
+ static class IndexThread implements Runnable {
+
+ private final Client client;
+ private volatile boolean run = true;
+
+ IndexThread(Client client) {
+ this.client = client;
+ }
+
+ @Override
+ public void run() {
+ while (run) {
+ int childIdLimit = PARENT_COUNT * NUM_CHILDREN_PER_PARENT;
+ for (int childId = 1; run && childId < childIdLimit;) {
+ try {
+ for (int j = 0; j < 8; j++) {
+ GetResponse getResponse = client
+ .prepareGet(indexName, "child", String.valueOf(++childId))
+ .setFields("_source", "_parent")
+ .setRouting("1") // Doesn't matter what value, since there is only one shard
+ .get();
+ client.prepareIndex(indexName, "child", Integer.toString(childId) + "_" + j)
+ .setParent(getResponse.getField("_parent").getValue().toString())
+ .setSource(getResponse.getSource())
+ .get();
+ }
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ Thread.sleep(1000);
+ if (childId % 500 == 0) {
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .clear().setIndices(true).execute().actionGet();
+ System.out.println("Deleted docs: " + statsResponse.getAt(0).getIndices().getDocs().getDeleted());
+ }
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ public void stop() {
+ run = false;
+ }
+
+ }
+
+ static class SearchThread implements Runnable {
+
+ private final Client client;
+ private final int numValues;
+ private volatile boolean run = true;
+
+ SearchThread(Client client) {
+ this.client = client;
+ this.numValues = NUM_CHILDREN_PER_PARENT / NUM_CHILDREN_PER_PARENT;
+ }
+
+ @Override
+ public void run() {
+ while (run) {
+ try {
+ long totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildQuery("child", termQuery("field2", "value" + random.nextInt(numValues)))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with term filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildQuery("child", matchAllQuery())
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+ Thread.sleep(1000);
+ } catch (Throwable e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void stop() {
+ run = false;
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java
new file mode 100644
index 0000000000..19e5c2f0f5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchBenchmark.java
@@ -0,0 +1,347 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.children.Children;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchBenchmark {
+
+ /*
+ Run: MAVEN_OPTS=-Xmx4g mvn test-compile exec:java -Dexec.mainClass="org.elasticsearch.benchmark.search.child.ChildSearchBenchmark" -Dexec.classpathScope="test" -Dexec.args="bwc false"
+ */
+
+ public static void main(String[] args) throws Exception {
+ boolean bwcMode = false;
+ int numParents = (int) SizeValue.parseSizeValue("2m").singles();;
+
+ if (args.length % 2 != 0) {
+ throw new IllegalArgumentException("Uneven number of arguments");
+ }
+ for (int i = 0; i < args.length; i += 2) {
+ String value = args[i + 1];
+ if ("--bwc_mode".equals(args[i])) {
+ bwcMode = Boolean.valueOf(value);
+ } else if ("--num_parents".equals(args[i])) {
+ numParents = Integer.valueOf(value);
+ }
+ }
+
+
+ Settings.Builder settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0);
+
+ // enable bwc parent child mode:
+ if (bwcMode) {
+ settings.put("tests.mock.version", Version.V_1_6_0);
+ }
+
+ String clusterName = ChildSearchBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings.build()).put("name", "node1")).node();
+ Client client = node1.client();
+
+ int CHILD_COUNT = 15;
+ int QUERY_VALUE_RATIO = 3;
+ int QUERY_WARMUP = 10;
+ int QUERY_COUNT = 20;
+ String indexName = "test";
+
+ ParentChildIndexGenerator parentChildIndexGenerator = new ParentChildIndexGenerator(client, numParents, CHILD_COUNT, QUERY_VALUE_RATIO);
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+ long startTime = System.currentTimeMillis();
+ parentChildIndexGenerator.index();
+ System.out.println("--> Indexing took " + ((System.currentTimeMillis() - startTime) / 1000) + " seconds.");
+ } catch (IndexAlreadyExistsException e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ System.out.println("--> Running just child query");
+ // run just the child query, warm up first
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
+ }
+
+ long totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(termQuery("child.tag", "tag1")).execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Just Child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ // run parent child constant query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue()))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue()))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_child filter with match_all child query");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildQuery("child", matchAllQuery())
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter with match_all child query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+
+ System.out.println("--> Running children agg");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(matchQuery("field1", parentChildIndexGenerator.getQueryValue()))
+ .addAggregation(
+ AggregationBuilders.children("to-child").childType("child")
+ )
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ Children children = searchResponse.getAggregations().get("to-child");
+ if (j % 10 == 0) {
+ System.out.println("--> children doc count [" + j + "], got [" + children.getDocCount() + "]");
+ }
+ }
+ System.out.println("--> children agg, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running children agg with match_all");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .addAggregation(
+ AggregationBuilders.children("to-child").childType("child")
+ )
+ .execute().actionGet();
+ totalQueryTime += searchResponse.getTookInMillis();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ Children children = searchResponse.getAggregations().get("to-child");
+ if (j % 10 == 0) {
+ System.out.println("--> children doc count [" + j + "], got [" + children.getDocCount() + "]");
+ }
+ }
+ System.out.println("--> children agg, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ // run parent child constant query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue()))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ }
+
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue()))
+ )
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent filter Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_parent filter with match_all parent query ");
+ totalQueryTime = 0;
+ for (int j = 1; j <= QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(
+ matchAllQuery(),
+ hasParentQuery("parent", matchAllQuery())
+ ))
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent filter with match_all parent query, Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ System.out.println("--> Running has_child query with score type");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())).scoreType("max")).execute().actionGet();
+ }
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", termQuery("field2", parentChildIndexGenerator.getQueryValue())).scoreType("max")).execute().actionGet();
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasChildQuery("child", matchAllQuery()).scoreType("max")).execute().actionGet();
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.out.println("--> Running has_parent query with score type");
+ // run parent child score query
+ for (int j = 0; j < QUERY_WARMUP; j++) {
+ client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())).scoreType("score")).execute().actionGet();
+ }
+
+ totalQueryTime = 0;
+ for (int j = 1; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", termQuery("field1", parentChildIndexGenerator.getQueryValue())).scoreType("score")).execute().actionGet();
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ totalQueryTime = 0;
+ for (int j = 1; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(hasParentQuery("parent", matchAllQuery()).scoreType("score")).execute().actionGet();
+ if (j % 10 == 0) {
+ System.out.println("--> hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "]");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_parent query with match_all Query Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+
+ System.gc();
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ client.close();
+ node1.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java
new file mode 100644
index 0000000000..966ca4fe06
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/child/ChildSearchShortCircuitBenchmark.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.search.child;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class ChildSearchShortCircuitBenchmark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ String clusterName = ChildSearchShortCircuitBenchmark.class.getSimpleName();
+ Node node1 = nodeBuilder().clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node1"))
+ .node();
+ Client client = node1.client();
+
+ long PARENT_COUNT = SizeValue.parseSizeValue("10M").singles();
+ int BATCH = 100;
+ int QUERY_WARMUP = 5;
+ int QUERY_COUNT = 25;
+ String indexName = "test";
+
+ client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ try {
+ client.admin().indices().create(createIndexRequest(indexName)).actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType("child").setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + PARENT_COUNT + "] parent document and some child documents");
+ long ITERS = PARENT_COUNT / BATCH;
+ int i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest(indexName).type("parent").id(Integer.toString(counter))
+ .source(parentSource(counter)));
+
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * BATCH) + "parent docs; took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+
+ int id = 0;
+ for (i = 1; i <= PARENT_COUNT; i *= 2) {
+ int parentId = 1;
+ for (int j = 0; j < i; j++) {
+ client.prepareIndex(indexName, "child", Integer.toString(id++))
+ .setParent(Integer.toString(parentId++))
+ .setSource(childSource(i))
+ .execute().actionGet();
+ }
+ }
+
+ System.out.println("--> Indexing took " + stopWatch.totalTime());
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount(indexName).setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ System.out.println("--> Running just child query");
+ // run just the child query, warm up first
+ for (int i = 1; i <= 10000; i *= 2) {
+ SearchResponse searchResponse = client.prepareSearch(indexName).setQuery(matchQuery("child.field2", i)).execute().actionGet();
+ System.out.println("--> Warmup took["+ i +"]: " + searchResponse.getTook());
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ // run parent child constant query
+ for (int j = 1; j < QUERY_WARMUP; j *= 2) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(
+ hasChildQuery("child", matchQuery("field2", j))
+ )
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ System.err.println("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ if (searchResponse.getHits().totalHits() != j) {
+ System.err.println("--> mismatch on hits [" + j + "], got [" + searchResponse.getHits().totalHits() + "], expected [" + PARENT_COUNT + "]");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int i = 1; i < PARENT_COUNT; i *= 2) {
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(filteredQuery(matchAllQuery(), hasChildQuery("child", matchQuery("field2", i))))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child filter " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ totalQueryTime = 0;
+ for (int i = 1; i < PARENT_COUNT; i *= 2) {
+ for (int j = 0; j < QUERY_COUNT; j++) {
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .setQuery(hasChildQuery("child", matchQuery("field2", i)).scoreType("max"))
+ .execute().actionGet();
+ if (searchResponse.getHits().totalHits() != i) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> has_child query " + i +" Avg: " + (totalQueryTime / QUERY_COUNT) + "ms");
+ }
+
+ System.gc();
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).setIndices(true).execute().actionGet();
+
+ System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ client.close();
+ node1.close();
+ }
+
+ private static XContentBuilder parentSource(int val) throws IOException {
+ return jsonBuilder().startObject().field("field1", Integer.toString(val)).endObject();
+ }
+
+ private static XContentBuilder childSource(int val) throws IOException {
+ return jsonBuilder().startObject().field("field2", Integer.toString(val)).endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/child/ParentChildIndexGenerator.java b/core/src/test/java/org/elasticsearch/benchmark/search/child/ParentChildIndexGenerator.java
new file mode 100644
index 0000000000..1d02a1f5f3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/child/ParentChildIndexGenerator.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.child;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import com.carrotsearch.hppc.ObjectHashSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+
+import java.util.Random;
+
+/**
+ */
+public class ParentChildIndexGenerator {
+
+ private final static Random RANDOM = new Random();
+
+ private final Client client;
+ private final int numParents;
+ private final int numChildrenPerParent;
+ private final int queryValueRatio;
+
+ public ParentChildIndexGenerator(Client client, int numParents, int numChildrenPerParent, int queryValueRatio) {
+ this.client = client;
+ this.numParents = numParents;
+ this.numChildrenPerParent = numChildrenPerParent;
+ this.queryValueRatio = queryValueRatio;
+ }
+
+ public void index() {
+ // Memory intensive...
+ ObjectHashSet<String> usedParentIds = new ObjectHashSet<>(numParents, 0.5d);
+ ObjectArrayList<ParentDocument> parents = new ObjectArrayList<>(numParents);
+
+ for (int i = 0; i < numParents; i++) {
+ String parentId;
+ do {
+ parentId = RandomStrings.randomAsciiOfLength(RANDOM, 10);
+ } while (!usedParentIds.add(parentId));
+ String[] queryValues = new String[numChildrenPerParent];
+ for (int j = 0; j < numChildrenPerParent; j++) {
+ queryValues[j] = getQueryValue();
+ }
+ parents.add(new ParentDocument(parentId, queryValues));
+ }
+
+ int indexCounter = 0;
+ int childIdCounter = 0;
+ while (!parents.isEmpty()) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int i = 0; !parents.isEmpty() && i < 100; i++) {
+ int index = RANDOM.nextInt(parents.size());
+ ParentDocument parentDocument = parents.get(index);
+
+ if (parentDocument.indexCounter == -1) {
+ request.add(Requests.indexRequest("test").type("parent")
+ .id(parentDocument.parentId)
+ .source("field1", getQueryValue()));
+ } else {
+ request.add(Requests.indexRequest("test").type("child")
+ .parent(parentDocument.parentId)
+ .id(String.valueOf(++childIdCounter))
+ .source("field2", parentDocument.queryValues[parentDocument.indexCounter]));
+ }
+
+ if (++parentDocument.indexCounter == parentDocument.queryValues.length) {
+ parents.remove(index);
+ }
+ }
+
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+
+ indexCounter += response.getItems().length;
+ if (indexCounter % 100000 == 0) {
+ System.out.println("--> Indexed " + indexCounter + " documents");
+ }
+ }
+ }
+
+ public String getQueryValue() {
+ return "value" + RANDOM.nextInt(numChildrenPerParent / queryValueRatio);
+ }
+
+ class ParentDocument {
+
+ final String parentId;
+ final String[] queryValues;
+ int indexCounter;
+
+ ParentDocument(String parentId, String[] queryValues) {
+ this.parentId = parentId;
+ this.queryValues = queryValues;
+ this.indexCounter = -1;
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java
new file mode 100644
index 0000000000..f43d581b50
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/geo/GeoDistanceSearchBenchmark.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.geo;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ */
+public class GeoDistanceSearchBenchmark {
+
+ public static void main(String[] args) throws Exception {
+
+ Node node = NodeBuilder.nodeBuilder().clusterName(GeoDistanceSearchBenchmark.class.getSimpleName()).node();
+ Client client = node.client();
+
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("Failed to wait for green status, bailing");
+ System.exit(1);
+ }
+
+ final long NUM_DOCS = SizeValue.parseSizeValue("1m").singles();
+ final long NUM_WARM = 50;
+ final long NUM_RUNS = 100;
+
+ if (client.admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ System.out.println("Found an index, count: " + client.prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount());
+ } else {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+ client.admin().indices().prepareCreate("test")
+ .setSettings(Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .addMapping("type1", mapping)
+ .execute().actionGet();
+
+ System.err.println("--> Indexing [" + NUM_DOCS + "]");
+ for (long i = 0; i < NUM_DOCS; ) {
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client.prepareIndex("test", "type1", Long.toString(i++)).setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ if ((i % 10000) == 0) {
+ System.err.println("--> indexed " + i);
+ }
+ }
+ System.err.println("Done indexed");
+ client.admin().indices().prepareFlush("test").execute().actionGet();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ }
+
+ System.err.println("--> Warming up (ARC) - optimize_bbox");
+ long start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "memory");
+ }
+ long totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - optimize_bbox (memory) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - optimize_bbox (memory)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (ARC) - optimize_bbox (indexed)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "indexed");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - optimize_bbox (indexed)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "indexed");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - optimize_bbox (indexed) " + (totalTime / NUM_RUNS) + "ms");
+
+
+ System.err.println("--> Warming up (ARC) - no optimize_bbox");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.ARC, "none");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (ARC) - no optimize_bbox " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (ARC) - no optimize_bbox");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.ARC, "none");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (ARC) - no optimize_bbox " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (SLOPPY_ARC)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.SLOPPY_ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (SLOPPY_ARC) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (SLOPPY_ARC)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.SLOPPY_ARC, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (SLOPPY_ARC) " + (totalTime / NUM_RUNS) + "ms");
+
+ System.err.println("--> Warming up (PLANE)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_WARM; i++) {
+ run(client, GeoDistance.PLANE, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Warmup (PLANE) " + (totalTime / NUM_WARM) + "ms");
+
+ System.err.println("--> Perf (PLANE)");
+ start = System.currentTimeMillis();
+ for (int i = 0; i < NUM_RUNS; i++) {
+ run(client, GeoDistance.PLANE, "memory");
+ }
+ totalTime = System.currentTimeMillis() - start;
+ System.err.println("--> Perf (PLANE) " + (totalTime / NUM_RUNS) + "ms");
+
+ node.close();
+ }
+
+ public static void run(Client client, GeoDistance geoDistance, String optimizeBbox) {
+ client.prepareSearch() // from NY
+ .setSize(0)
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location")
+ .distance("2km")
+ .optimizeBbox(optimizeBbox)
+ .geoDistance(geoDistance)
+ .point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java b/core/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java
new file mode 100644
index 0000000000..c01ee21e78
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/nested/NestedSearchBenchMark.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.nested;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class NestedSearchBenchMark {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "-1")
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node node1 = nodeBuilder()
+ .settings(settingsBuilder().put(settings).put("name", "node1"))
+ .node();
+ Client client = node1.client();
+
+ int count = (int) SizeValue.parseSizeValue("1m").singles();
+ int nestedCount = 10;
+ int rootDocs = count / nestedCount;
+ int batch = 100;
+ int queryWarmup = 5;
+ int queryCount = 500;
+ String indexName = "test";
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth()
+ .setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ try {
+ client.admin().indices().prepareCreate(indexName)
+ .addMapping("type", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "integer")
+ .endObject()
+ .startObject("field2")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field3")
+ .field("type", "integer")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+
+ System.out.println("--> Indexing [" + rootDocs + "] root documents and [" + (rootDocs * nestedCount) + "] nested objects");
+ long ITERS = rootDocs / batch;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client.prepareBulk();
+ for (int j = 0; j < batch; j++) {
+ counter++;
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
+ .field("field1", counter)
+ .startArray("field2");
+ for (int k = 0; k < nestedCount; k++) {
+ doc = doc.startObject()
+ .field("field3", k)
+ .endObject();
+ }
+ doc = doc.endArray();
+ request.add(
+ Requests.indexRequest(indexName).type("type").id(Integer.toString(counter)).source(doc)
+ );
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("--> failures...");
+ }
+ if (((i * batch) % 10000) == 0) {
+ System.out.println("--> Indexed " + (i * batch) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("--> Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) (count * (1 + nestedCount))) / stopWatch.totalTime().secondsFrac()));
+ } catch (Exception e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+ client.admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("--> Number of docs in index: " + client.prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ NodesStatsResponse statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+
+ System.out.println("--> Running match_all with sorting on nested field");
+ // run just the child query, warm up first
+ for (int j = 0; j < queryWarmup; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("field2.field3")
+ .setNestedPath("field2")
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ if (j == 0) {
+ System.out.println("--> Warmup took: " + searchResponse.getTook());
+ }
+ if (searchResponse.getHits().totalHits() != rootDocs) {
+ System.err.println("--> mismatch on hits");
+ }
+ }
+
+ long totalQueryTime = 0;
+ for (int j = 0; j < queryCount; j++) {
+ SearchResponse searchResponse = client.prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("field2.field3")
+ .setNestedPath("field2")
+ .sortMode("avg")
+ .order(j % 2 == 0 ? SortOrder.ASC : SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ if (searchResponse.getHits().totalHits() != rootDocs) {
+ System.err.println("--> mismatch on hits");
+ }
+ totalQueryTime += searchResponse.getTookInMillis();
+ }
+ System.out.println("--> Sorting by nested fields took: " + (totalQueryTime / queryCount) + "ms");
+
+ statsResponse = client.admin().cluster().prepareNodesStats()
+ .setJvm(true).execute().actionGet();
+ System.out.println("--> Committed heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapCommitted());
+ System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/search/scroll/ScrollSearchBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/search/scroll/ScrollSearchBenchmark.java
new file mode 100644
index 0000000000..a6909a36df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/search/scroll/ScrollSearchBenchmark.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.search.scroll;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.indices.IndexAlreadyExistsException;
+import org.elasticsearch.monitor.jvm.JvmStats;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+
+import java.util.Locale;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ */
+public class ScrollSearchBenchmark {
+
+ // Run with: -Xms1G -Xms1G
+ public static void main(String[] args) {
+ String indexName = "test";
+ String typeName = "type";
+ String clusterName = ScrollSearchBenchmark.class.getSimpleName();
+ long numDocs = SizeValue.parseSizeValue("300k").singles();
+ int requestSize = 50;
+
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 3)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .build();
+
+ Node[] nodes = new Node[3];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder()
+ .clusterName(clusterName)
+ .settings(settingsBuilder().put(settings).put("name", "node" + i))
+ .node();
+ }
+
+ Client client = nodes[0].client();
+
+ try {
+ client.admin().indices().prepareCreate(indexName).get();
+ for (int counter = 1; counter <= numDocs;) {
+ BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
+ for (int bulkCounter = 0; bulkCounter < 100; bulkCounter++) {
+ if (counter > numDocs) {
+ break;
+ }
+ bulkRequestBuilder.add(
+ client.prepareIndex(indexName, typeName, String.valueOf(counter))
+ .setSource("field1", counter++)
+ );
+ }
+ int indexedDocs = counter - 1;
+ if (indexedDocs % 100000 == 0) {
+ System.out.printf(Locale.ENGLISH, "--> Indexed %d so far\n", indexedDocs);
+ }
+ bulkRequestBuilder.get();
+ }
+ } catch (IndexAlreadyExistsException e) {
+ System.out.println("--> Index already exists, ignoring indexing phase, waiting for green");
+ ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> Timed out waiting for cluster health");
+ }
+ }
+
+ client.admin().indices().prepareRefresh(indexName).get();
+ System.out.printf(Locale.ENGLISH, "--> Number of docs in index: %d\n", client.prepareCount().get().getCount());
+
+ Long counter = numDocs;
+ SearchResponse searchResponse = client.prepareSearch(indexName)
+ .addSort("field1", SortOrder.DESC)
+ .setSize(requestSize)
+ .setScroll("10m").get();
+
+ if (searchResponse.getHits().getTotalHits() != numDocs) {
+ System.err.printf(Locale.ENGLISH, "Expected total hits [%d] but got [%d]\n", numDocs, searchResponse.getHits().getTotalHits());
+ }
+
+ if (searchResponse.getHits().hits().length != requestSize) {
+ System.err.printf(Locale.ENGLISH, "Expected hits length [%d] but got [%d]\n", requestSize, searchResponse.getHits().hits().length);
+ }
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!hit.sortValues()[0].equals(counter--)) {
+ System.err.printf(Locale.ENGLISH, "Expected sort value [%d] but got [%s]\n", counter + 1, hit.sortValues()[0]);
+ }
+ }
+ String scrollId = searchResponse.getScrollId();
+ int scrollRequestCounter = 0;
+ long sumTimeSpent = 0;
+ while (true) {
+ long timeSpent = System.currentTimeMillis();
+ searchResponse = client.prepareSearchScroll(scrollId).setScroll("10m").get();
+ sumTimeSpent += (System.currentTimeMillis() - timeSpent);
+ scrollRequestCounter++;
+ if (searchResponse.getHits().getTotalHits() != numDocs) {
+ System.err.printf(Locale.ENGLISH, "Expected total hits [%d] but got [%d]\n", numDocs, searchResponse.getHits().getTotalHits());
+ }
+ if (scrollRequestCounter % 20 == 0) {
+ long avgTimeSpent = sumTimeSpent / 20;
+ JvmStats.Mem mem = JvmStats.jvmStats().getMem();
+ System.out.printf(Locale.ENGLISH, "Cursor location=%d, avg time spent=%d ms\n", (requestSize * scrollRequestCounter), (avgTimeSpent));
+ System.out.printf(Locale.ENGLISH, "heap max=%s, used=%s, percentage=%d\n", mem.getHeapMax(), mem.getHeapUsed(), mem.getHeapUsedPercent());
+ sumTimeSpent = 0;
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ if (searchResponse.getHits().hits().length != requestSize) {
+ System.err.printf(Locale.ENGLISH, "Expected hits length [%d] but got [%d]\n", requestSize, searchResponse.getHits().hits().length);
+ }
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!hit.sortValues()[0].equals(counter--)) {
+ System.err.printf(Locale.ENGLISH, "Expected sort value [%d] but got [%s]\n", counter + 1, hit.sortValues()[0]);
+ }
+ }
+ scrollId = searchResponse.getScrollId();
+ }
+ if (counter != 0) {
+ System.err.printf(Locale.ENGLISH, "Counter should be 0 because scroll has been consumed\n");
+ }
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java b/core/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java
new file mode 100644
index 0000000000..1f53234599
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/stress/NodesStressTest.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.node.Node;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+
+/**
+ *
+ */
+public class NodesStressTest {
+
+ private Node[] nodes;
+
+ private int numberOfNodes = 2;
+
+ private Client[] clients;
+
+ private AtomicLong idGenerator = new AtomicLong();
+
+ private int fieldNumLimit = 50;
+
+ private long searcherIterations = 10;
+ private Searcher[] searcherThreads = new Searcher[1];
+
+ private long indexIterations = 10;
+ private Indexer[] indexThreads = new Indexer[1];
+
+ private TimeValue sleepAfterDone = TimeValue.timeValueMillis(0);
+ private TimeValue sleepBeforeClose = TimeValue.timeValueMillis(0);
+
+ private CountDownLatch latch;
+ private CyclicBarrier barrier1;
+ private CyclicBarrier barrier2;
+
+ public NodesStressTest() {
+ }
+
+ public NodesStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public NodesStressTest fieldNumLimit(int fieldNumLimit) {
+ this.fieldNumLimit = fieldNumLimit;
+ return this;
+ }
+
+ public NodesStressTest searchIterations(int searchIterations) {
+ this.searcherIterations = searchIterations;
+ return this;
+ }
+
+ public NodesStressTest searcherThreads(int numberOfSearcherThreads) {
+ searcherThreads = new Searcher[numberOfSearcherThreads];
+ return this;
+ }
+
+ public NodesStressTest indexIterations(long indexIterations) {
+ this.indexIterations = indexIterations;
+ return this;
+ }
+
+ public NodesStressTest indexThreads(int numberOfWriterThreads) {
+ indexThreads = new Indexer[numberOfWriterThreads];
+ return this;
+ }
+
+ public NodesStressTest sleepAfterDone(TimeValue time) {
+ this.sleepAfterDone = time;
+ return this;
+ }
+
+ public NodesStressTest sleepBeforeClose(TimeValue time) {
+ this.sleepBeforeClose = time;
+ return this;
+ }
+
+ public NodesStressTest build(Settings settings) throws Exception {
+ settings = settingsBuilder()
+// .put("index.refresh_interval", 1, TimeUnit.SECONDS)
+ .put(SETTING_NUMBER_OF_SHARDS, 5)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(settings)
+ .build();
+
+ nodes = new Node[numberOfNodes];
+ clients = new Client[numberOfNodes];
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ clients[i] = nodes[i].client();
+ }
+
+ for (int i = 0; i < searcherThreads.length; i++) {
+ searcherThreads[i] = new Searcher(i);
+ }
+ for (int i = 0; i < indexThreads.length; i++) {
+ indexThreads[i] = new Indexer(i);
+ }
+
+ latch = new CountDownLatch(1);
+ barrier1 = new CyclicBarrier(2);
+ barrier2 = new CyclicBarrier(2);
+ // warmup
+ StopWatch stopWatch = new StopWatch().start();
+ Indexer warmup = new Indexer(-1).max(10000);
+ warmup.start();
+ barrier1.await();
+ barrier2.await();
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Done Warmup, took [" + stopWatch.totalTime() + "]");
+
+ latch = new CountDownLatch(searcherThreads.length + indexThreads.length);
+ barrier1 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
+ barrier2 = new CyclicBarrier(searcherThreads.length + indexThreads.length + 1);
+
+ return this;
+ }
+
+ public void start() throws Exception {
+ for (Thread t : searcherThreads) {
+ t.start();
+ }
+ for (Thread t : indexThreads) {
+ t.start();
+ }
+ barrier1.await();
+
+ StopWatch stopWatch = new StopWatch();
+ stopWatch.start();
+
+ barrier2.await();
+
+ latch.await();
+ stopWatch.stop();
+
+ System.out.println("Done, took [" + stopWatch.totalTime() + "]");
+ System.out.println("Sleeping before close: " + sleepBeforeClose);
+ Thread.sleep(sleepBeforeClose.millis());
+
+ for (Client client : clients) {
+ client.close();
+ }
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("Sleeping before exit: " + sleepBeforeClose);
+ Thread.sleep(sleepAfterDone.millis());
+ }
+
+ class Searcher extends Thread {
+ final int id;
+ long counter = 0;
+ long max = searcherIterations;
+
+ Searcher(int id) {
+ super("Searcher" + id);
+ this.id = id;
+ }
+
+ @Override
+ public void run() {
+ try {
+ barrier1.await();
+ barrier2.await();
+ for (; counter < max; counter++) {
+ Client client = client(counter);
+ QueryBuilder query = termQuery("num", counter % fieldNumLimit);
+ query = constantScoreQuery(query);
+
+ SearchResponse search = client.search(searchRequest()
+ .source(searchSource().query(query)))
+ .actionGet();
+// System.out.println("Got search response, hits [" + search.hits().totalHits() + "]");
+ }
+ } catch (Exception e) {
+ System.err.println("Failed to search:");
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ class Indexer extends Thread {
+
+ final int id;
+ long counter = 0;
+ long max = indexIterations;
+
+ Indexer(int id) {
+ super("Indexer" + id);
+ this.id = id;
+ }
+
+ Indexer max(int max) {
+ this.max = max;
+ return this;
+ }
+
+ @Override
+ public void run() {
+ try {
+ barrier1.await();
+ barrier2.await();
+ for (; counter < max; counter++) {
+ Client client = client(counter);
+ long id = idGenerator.incrementAndGet();
+ client.index(Requests.indexRequest().index("test").type("type1").id(Long.toString(id))
+ .source(XContentFactory.jsonBuilder().startObject()
+ .field("num", id % fieldNumLimit)
+ .endObject()))
+ .actionGet();
+ }
+ System.out.println("Indexer [" + id + "]: Done");
+ } catch (Exception e) {
+ System.err.println("Failed to index:");
+ e.printStackTrace();
+ } finally {
+ latch.countDown();
+ }
+ }
+ }
+
+ private Client client(long i) {
+ return clients[((int) (i % clients.length))];
+ }
+
+ public static void main(String[] args) throws Exception {
+ NodesStressTest test = new NodesStressTest()
+ .numberOfNodes(2)
+ .indexThreads(5)
+ .indexIterations(10 * 1000)
+ .searcherThreads(5)
+ .searchIterations(10 * 1000)
+ .sleepBeforeClose(TimeValue.timeValueMinutes(10))
+ .sleepAfterDone(TimeValue.timeValueMinutes(10))
+ .build(EMPTY_SETTINGS);
+
+ test.start();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java b/core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java
new file mode 100644
index 0000000000..32c35cc085
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadBulkStress.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SingleThreadBulkStress {
+
+ public static void main(String[] args) throws Exception {
+ Random random = new Random();
+
+ int shardsCount = Integer.parseInt(System.getProperty("es.shards", "1"));
+ int replicaCount = Integer.parseInt(System.getProperty("es.replica", "1"));
+ boolean autoGenerateId = true;
+
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "1s")
+ .put("index.merge.async", true)
+ .put("index.translog.flush_threshold_ops", 5000)
+ .put(SETTING_NUMBER_OF_SHARDS, shardsCount)
+ .put(SETTING_NUMBER_OF_REPLICAS, replicaCount)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ //Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+ Node client = nodes[0];
+
+ Client client1 = client.client();
+
+ Thread.sleep(1000);
+ client1.admin().indices().prepareCreate("test").setSettings(settings).addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("_all").field("enabled", false).endObject()
+ .startObject("_type").field("index", "no").endObject()
+ .startObject("_id").field("index", "no").endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", true).endObject()
+// .startObject("field").field("index", "analyzed").field("omit_norms", false).endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+ long COUNT = SizeValue.parseSizeValue("2m").singles();
+ int BATCH = 500;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ long ITERS = COUNT / BATCH;
+ long i = 1;
+ int counter = 0;
+ for (; i <= ITERS; i++) {
+ BulkRequestBuilder request = client1.prepareBulk();
+ for (int j = 0; j < BATCH; j++) {
+ counter++;
+ request.add(Requests.indexRequest("test").type("type1").id(autoGenerateId ? null : Integer.toString(counter)).source(source(Integer.toString(counter), "test" + counter)));
+ }
+ BulkResponse response = request.execute().actionGet();
+ if (response.hasFailures()) {
+ System.err.println("failures...");
+ }
+ if (((i * BATCH) % 10000) == 0) {
+ System.out.println("Indexed " + (i * BATCH) + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ client.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ return jsonBuilder().startObject().field("field", nameValue).endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java b/core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java
new file mode 100644
index 0000000000..610745c51d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/stress/SingleThreadIndexingStress.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.stress;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.node.Node;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ *
+ */
+public class SingleThreadIndexingStress {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = settingsBuilder()
+ .put("index.refresh_interval", "1s")
+ .put("index.merge.async", true)
+ .put("index.translog.flush_threshold_ops", 5000)
+ .put(SETTING_NUMBER_OF_SHARDS, 2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .build();
+
+ Node[] nodes = new Node[1];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "node" + i)).node();
+ }
+
+ Node client = nodeBuilder().settings(settingsBuilder().put(settings).put("name", "client")).client(true).node();
+
+ Client client1 = client.client();
+
+ Thread.sleep(1000);
+ client1.admin().indices().create(createIndexRequest("test")).actionGet();
+ Thread.sleep(5000);
+
+ StopWatch stopWatch = new StopWatch().start();
+ int COUNT = 200000;
+ int ID_RANGE = 100;
+ System.out.println("Indexing [" + COUNT + "] ...");
+ int i = 1;
+ for (; i <= COUNT; i++) {
+// client1.admin().cluster().preparePingSingle("test", "type1", Integer.toString(i)).execute().actionGet();
+ client1.prepareIndex("test", "type1").setId(Integer.toString(i % ID_RANGE)).setSource(source(Integer.toString(i), "test" + i))
+ .setCreate(false).execute().actionGet();
+ if ((i % 10000) == 0) {
+ System.out.println("Indexed " + i + " took " + stopWatch.stop().lastTaskTime());
+ stopWatch.start();
+ }
+ }
+ System.out.println("Indexing took " + stopWatch.totalTime() + ", TPS " + (((double) COUNT) / stopWatch.totalTime().secondsFrac()));
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ System.out.println("Count: " + client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount());
+
+ client.close();
+
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private static XContentBuilder source(String id, String nameValue) throws IOException {
+ long time = System.currentTimeMillis();
+ return jsonBuilder().startObject()
+ .field("id", id)
+// .field("numeric1", time)
+// .field("numeric2", time)
+// .field("numeric3", time)
+// .field("numeric4", time)
+// .field("numeric5", time)
+// .field("numeric6", time)
+// .field("numeric7", time)
+// .field("numeric8", time)
+// .field("numeric9", time)
+// .field("numeric10", time)
+ .field("name", nameValue)
+ .endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java
new file mode 100644
index 0000000000..37b20bce57
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/time/SimpleTimeBenchmark.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.time;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class SimpleTimeBenchmark {
+
+ private static boolean USE_NANO_TIME = false;
+ private static long NUMBER_OF_ITERATIONS = 1000000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.currentTimeMillis();
+ }
+ System.out.println("Took " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Running using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ if (USE_NANO_TIME) {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.nanoTime();
+ }
+ } else {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ System.currentTimeMillis();
+ }
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Took " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java
new file mode 100644
index 0000000000..2978c5c417
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageRequest.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportRequest;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BenchmarkMessageRequest extends TransportRequest {
+
+ long id;
+ byte[] payload;
+
+ public BenchmarkMessageRequest(long id, byte[] payload) {
+ this.id = id;
+ this.payload = payload;
+ }
+
+ public BenchmarkMessageRequest() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ payload = new byte[in.readVInt()];
+ in.readFully(payload);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(payload.length);
+ out.writeBytes(payload);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java
new file mode 100644
index 0000000000..7a7e3d9ab9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkMessageResponse.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.transport.TransportResponse;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BenchmarkMessageResponse extends TransportResponse {
+
+ long id;
+ byte[] payload;
+
+ public BenchmarkMessageResponse(BenchmarkMessageRequest request) {
+ this.id = request.id;
+ this.payload = request.payload;
+ }
+
+ public BenchmarkMessageResponse(long id, byte[] payload) {
+ this.id = id;
+ this.payload = payload;
+ }
+
+ public BenchmarkMessageResponse() {
+ }
+
+ public long id() {
+ return id;
+ }
+
+ public byte[] payload() {
+ return payload;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ id = in.readLong();
+ payload = new byte[in.readVInt()];
+ in.readFully(payload);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeLong(id);
+ out.writeVInt(payload.length);
+ out.writeBytes(payload);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java
new file mode 100644
index 0000000000..fcc755b413
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/transport/BenchmarkNettyLargeMessages.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.settings.DynamicSettings;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+
+/**
+ *
+ */
+public class BenchmarkNettyLargeMessages {
+
+ public static void main(String[] args) throws InterruptedException {
+ final ByteSizeValue payloadSize = new ByteSizeValue(10, ByteSizeUnit.MB);
+ final int NUMBER_OF_ITERATIONS = 100000;
+ final int NUMBER_OF_CLIENTS = 5;
+ final byte[] payload = new byte[(int) payloadSize.bytes()];
+
+ Settings settings = Settings.settingsBuilder()
+ .build();
+
+ NetworkService networkService = new NetworkService(settings);
+ NodeSettingsService settingsService = new NodeSettingsService(settings);
+ DynamicSettings dynamicSettings = new DynamicSettings();
+
+
+ final ThreadPool threadPool = new ThreadPool("BenchmarkNettyLargeMessages");
+ final TransportService transportServiceServer = new TransportService(
+ new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT), threadPool
+ ).start();
+ final TransportService transportServiceClient = new TransportService(
+ new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT), threadPool
+ ).start();
+
+ final DiscoveryNode bigNode = new DiscoveryNode("big", new InetSocketTransportAddress("localhost", 9300), Version.CURRENT);
+// final DiscoveryNode smallNode = new DiscoveryNode("small", new InetSocketTransportAddress("localhost", 9300));
+ final DiscoveryNode smallNode = bigNode;
+
+ transportServiceClient.connectToNode(bigNode);
+ transportServiceClient.connectToNode(smallNode);
+
+ transportServiceServer.registerRequestHandler("benchmark", BenchmarkMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<BenchmarkMessageRequest>() {
+ @Override
+ public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new BenchmarkMessageResponse(request));
+ }
+ });
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS);
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
+ transportServiceClient.submitRequest(bigNode, "benchmark", message, options().withType(TransportRequestOptions.Type.BULK), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ }
+ latch.countDown();
+ }
+ }).start();
+ }
+
+ new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < 1; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(2, BytesRef.EMPTY_BYTES);
+ long start = System.currentTimeMillis();
+ transportServiceClient.submitRequest(smallNode, "benchmark", message, options().withType(TransportRequestOptions.Type.STATE), new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ long took = System.currentTimeMillis() - start;
+ System.out.println("Took " + took + "ms");
+ }
+ }
+ }).start();
+
+ latch.await();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java
new file mode 100644
index 0000000000..ff5c9c6226
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.StopWatch;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ *
+ */
+public class TransportBenchmark {
+
+ static enum Type {
+ LOCAL {
+ @Override
+ public Transport newTransport(Settings settings, ThreadPool threadPool) {
+ return new LocalTransport(settings, threadPool, Version.CURRENT);
+ }
+ },
+ NETTY {
+ @Override
+ public Transport newTransport(Settings settings, ThreadPool threadPool) {
+ return new NettyTransport(settings, threadPool, new NetworkService(Settings.EMPTY), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT);
+ }
+ };
+
+ public abstract Transport newTransport(Settings settings, ThreadPool threadPool);
+ }
+
+ public static void main(String[] args) {
+ final String executor = ThreadPool.Names.GENERIC;
+ final boolean waitForRequest = true;
+ final ByteSizeValue payloadSize = new ByteSizeValue(100, ByteSizeUnit.BYTES);
+ final int NUMBER_OF_CLIENTS = 10;
+ final int NUMBER_OF_ITERATIONS = 100000;
+ final byte[] payload = new byte[(int) payloadSize.bytes()];
+ final AtomicLong idGenerator = new AtomicLong();
+ final Type type = Type.NETTY;
+
+
+ Settings settings = Settings.settingsBuilder()
+ .build();
+
+ final ThreadPool serverThreadPool = new ThreadPool("server");
+ final TransportService serverTransportService = new TransportService(type.newTransport(settings, serverThreadPool), serverThreadPool).start();
+
+ final ThreadPool clientThreadPool = new ThreadPool("client");
+ final TransportService clientTransportService = new TransportService(type.newTransport(settings, clientThreadPool), clientThreadPool).start();
+
+ final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT);
+
+ serverTransportService.registerRequestHandler("benchmark", BenchmarkMessageRequest.class, executor, new TransportRequestHandler<BenchmarkMessageRequest>() {
+ @Override
+ public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new BenchmarkMessageResponse(request));
+ }
+ });
+
+ clientTransportService.connectToNode(node);
+
+ for (int i = 0; i < 10000; i++) {
+ BenchmarkMessageRequest message = new BenchmarkMessageRequest(1, payload);
+ clientTransportService.submitRequest(node, "benchmark", message, new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ }
+ }).txGet();
+ }
+
+
+ Thread[] clients = new Thread[NUMBER_OF_CLIENTS];
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS);
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ clients[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < NUMBER_OF_ITERATIONS; j++) {
+ final long id = idGenerator.incrementAndGet();
+ BenchmarkMessageRequest request = new BenchmarkMessageRequest(id, payload);
+ BaseTransportResponseHandler<BenchmarkMessageResponse> handler = new BaseTransportResponseHandler<BenchmarkMessageResponse>() {
+ @Override
+ public BenchmarkMessageResponse newInstance() {
+ return new BenchmarkMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return executor;
+ }
+
+ @Override
+ public void handleResponse(BenchmarkMessageResponse response) {
+ if (response.id() != id) {
+ System.out.println("NO ID MATCH [" + response.id() + "] and [" + id + "]");
+ }
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ latch.countDown();
+ }
+ };
+
+ if (waitForRequest) {
+ clientTransportService.submitRequest(node, "benchmark", request, handler).txGet();
+ } else {
+ clientTransportService.sendRequest(node, "benchmark", request, handler);
+ }
+ }
+ }
+ });
+ }
+
+ StopWatch stopWatch = new StopWatch().start();
+ for (int i = 0; i < NUMBER_OF_CLIENTS; i++) {
+ clients[i].start();
+ }
+
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ stopWatch.stop();
+
+ System.out.println("Ran [" + NUMBER_OF_CLIENTS + "], each with [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + stopWatch.totalTime() + "], TPS: " + (NUMBER_OF_CLIENTS * NUMBER_OF_ITERATIONS) / stopWatch.totalTime().secondsFrac());
+
+ clientTransportService.close();
+ clientThreadPool.shutdownNow();
+
+ serverTransportService.close();
+ serverThreadPool.shutdownNow();
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java
new file mode 100644
index 0000000000..61686ebb7d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/transport/netty/NettyEchoBenchmark.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.transport.netty;
+
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+
+public class NettyEchoBenchmark {
+
+ public static void main(String[] args) {
+ final int payloadSize = 100;
+ int CYCLE_SIZE = 50000;
+ final long NUMBER_OF_ITERATIONS = 500000;
+
+ ChannelBuffer message = ChannelBuffers.buffer(100);
+ for (int i = 0; i < message.capacity(); i++) {
+ message.writeByte((byte) i);
+ }
+
+ // Configure the server.
+ ServerBootstrap serverBootstrap = new ServerBootstrap(
+ new NioServerSocketChannelFactory(
+ Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool()));
+
+ // Set up the pipeline factory.
+ serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(new EchoServerHandler());
+ }
+ });
+
+ // Bind and start to accept incoming connections.
+ serverBootstrap.bind(new InetSocketAddress(9000));
+
+ ClientBootstrap clientBootstrap = new ClientBootstrap(
+ new NioClientSocketChannelFactory(
+ Executors.newCachedThreadPool(),
+ Executors.newCachedThreadPool()));
+
+// ClientBootstrap clientBootstrap = new ClientBootstrap(
+// new OioClientSocketChannelFactory(Executors.newCachedThreadPool()));
+
+ // Set up the pipeline factory.
+ final EchoClientHandler clientHandler = new EchoClientHandler();
+ clientBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(clientHandler);
+ }
+ });
+
+ // Start the connection attempt.
+ ChannelFuture future = clientBootstrap.connect(new InetSocketAddress("localhost", 9000));
+ future.awaitUninterruptibly();
+ Channel clientChannel = future.getChannel();
+
+ System.out.println("Warming up...");
+ for (long i = 0; i < 10000; i++) {
+ clientHandler.latch = new CountDownLatch(1);
+ clientChannel.write(message);
+ try {
+ clientHandler.latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ System.out.println("Warmed up");
+
+
+ long start = System.currentTimeMillis();
+ long cycleStart = System.currentTimeMillis();
+ for (long i = 1; i < NUMBER_OF_ITERATIONS; i++) {
+ clientHandler.latch = new CountDownLatch(1);
+ clientChannel.write(message);
+ try {
+ clientHandler.latch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ if ((i % CYCLE_SIZE) == 0) {
+ long cycleEnd = System.currentTimeMillis();
+ System.out.println("Ran 50000, TPS " + (CYCLE_SIZE / ((double) (cycleEnd - cycleStart) / 1000)));
+ cycleStart = cycleEnd;
+ }
+ }
+ long end = System.currentTimeMillis();
+ long seconds = (end - start) / 1000;
+ System.out.println("Ran [" + NUMBER_OF_ITERATIONS + "] iterations, payload [" + payloadSize + "]: took [" + seconds + "], TPS: " + ((double) NUMBER_OF_ITERATIONS) / seconds);
+
+ clientChannel.close().awaitUninterruptibly();
+ clientBootstrap.releaseExternalResources();
+ serverBootstrap.releaseExternalResources();
+ }
+
+ public static class EchoClientHandler extends SimpleChannelUpstreamHandler {
+
+ public volatile CountDownLatch latch;
+
+ public EchoClientHandler() {
+ }
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ latch.countDown();
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+
+
+ public static class EchoServerHandler extends SimpleChannelUpstreamHandler {
+
+ @Override
+ public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
+ e.getChannel().write(e.getMessage());
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ // Close the connection when an exception is raised.
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java b/core/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java
new file mode 100644
index 0000000000..d9995e1a20
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/benchmark/uuid/SimpleUuidBenchmark.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.benchmark.uuid;
+
+import org.elasticsearch.common.StopWatch;
+
+import java.util.UUID;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ *
+ */
+public class SimpleUuidBenchmark {
+
+ private static long NUMBER_OF_ITERATIONS = 10000;
+ private static int NUMBER_OF_THREADS = 100;
+
+ public static void main(String[] args) throws Exception {
+ StopWatch stopWatch = new StopWatch().start();
+ System.out.println("Running " + NUMBER_OF_ITERATIONS);
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ UUID.randomUUID().toString();
+ }
+ System.out.println("Generated in " + stopWatch.stop().totalTime() + " TP Millis " + (NUMBER_OF_ITERATIONS / stopWatch.totalTime().millisFrac()));
+
+ System.out.println("Generating using " + NUMBER_OF_THREADS + " threads with " + NUMBER_OF_ITERATIONS + " iterations");
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ UUID.randomUUID().toString();
+ }
+ latch.countDown();
+ }
+ });
+ }
+ stopWatch = new StopWatch().start();
+ for (Thread thread : threads) {
+ thread.start();
+ }
+ latch.await();
+ stopWatch.stop();
+ System.out.println("Generate in " + stopWatch.totalTime() + " TP Millis " + ((NUMBER_OF_ITERATIONS * NUMBER_OF_THREADS) / stopWatch.totalTime().millisFrac()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java b/core/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java
new file mode 100644
index 0000000000..b70ba62541
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.blocks;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class SimpleBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void verifyIndexAndClusterReadOnly() throws Exception {
+ // cluster.read_only = null: write and metadata not blocked
+ canCreateIndex("test1");
+ canIndexDocument("test1");
+ setIndexReadOnly("test1", "false");
+ canIndexExists("test1");
+
+ // cluster.read_only = true: block write and metadata
+ setClusterReadOnly(true);
+ canNotCreateIndex("test2");
+ // even if index has index.read_only = false
+ canNotIndexDocument("test1");
+ canIndexExists("test1");
+
+ // cluster.read_only = false: removes the block
+ setClusterReadOnly(false);
+ canCreateIndex("test2");
+ canIndexDocument("test2");
+ canIndexDocument("test1");
+ canIndexExists("test1");
+
+
+ // newly created an index has no blocks
+ canCreateIndex("ro");
+ canIndexDocument("ro");
+ canIndexExists("ro");
+
+ // adds index write and metadata block
+ setIndexReadOnly( "ro", "true");
+ canNotIndexDocument("ro");
+ canIndexExists("ro");
+
+ // other indices not blocked
+ canCreateIndex("rw");
+ canIndexDocument("rw");
+ canIndexExists("rw");
+
+ // blocks can be removed
+ setIndexReadOnly("ro", "false");
+ canIndexDocument("ro");
+ canIndexExists("ro");
+ }
+
+ @Test
+ public void testIndexReadWriteMetaDataBlocks() {
+ canCreateIndex("test1");
+ canIndexDocument("test1");
+ client().admin().indices().prepareUpdateSettings("test1")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_BLOCKS_WRITE, true))
+ .execute().actionGet();
+ canNotIndexDocument("test1");
+ client().admin().indices().prepareUpdateSettings("test1")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_BLOCKS_WRITE, false))
+ .execute().actionGet();
+ canIndexDocument("test1");
+ }
+
+ private void canCreateIndex(String index) {
+ try {
+ CreateIndexResponse r = client().admin().indices().prepareCreate(index).execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotCreateIndex(String index) {
+ try {
+ client().admin().indices().prepareCreate(index).execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void canIndexDocument(String index) {
+ try {
+ IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
+ builder.setSource("foo", "bar");
+ IndexResponse r = builder.execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotIndexDocument(String index) {
+ try {
+ IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
+ builder.setSource("foo", "bar");
+ builder.execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void canIndexExists(String index) {
+ try {
+ IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
+ assertThat(r, notNullValue());
+ } catch (ClusterBlockException e) {
+ fail();
+ }
+ }
+
+ private void canNotIndexExists(String index) {
+ try {
+ IndicesExistsResponse r = client().admin().indices().prepareExists(index).execute().actionGet();
+ fail();
+ } catch (ClusterBlockException e) {
+ // all is well
+ }
+ }
+
+ private void setIndexReadOnly(String index, Object value) {
+ HashMap<String, Object> newSettings = new HashMap<>();
+ newSettings.put(IndexMetaData.SETTING_READ_ONLY, value);
+
+ UpdateSettingsRequestBuilder settingsRequest = client().admin().indices().prepareUpdateSettings(index);
+ settingsRequest.setSettings(newSettings);
+ UpdateSettingsResponse settingsResponse = settingsRequest.execute().actionGet();
+ assertThat(settingsResponse, notNullValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
new file mode 100644
index 0000000000..3548a48a4c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+import org.apache.lucene.util.TestSecurityManager;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.bootstrap.ESPolicy;
+import org.elasticsearch.bootstrap.Security;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.PathUtils;
+
+import java.io.FilePermission;
+import java.nio.file.Path;
+import java.security.Permissions;
+import java.security.Policy;
+import java.util.Objects;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+
+/**
+ * Initializes natives and installs test security manager
+ * (init'd early by base classes to ensure it happens regardless of which
+ * test case happens to be first, test ordering, etc).
+ * <p>
+ * The idea is to mimic as much as possible what happens with ES in production
+ * mode (e.g. assign permissions and install security manager the same way)
+ */
+public class BootstrapForTesting {
+
+ // TODO: can we share more code with the non-test side here
+ // without making things complex???
+
+ static {
+ // just like bootstrap, initialize natives, then SM
+ Bootstrap.initializeNatives(true, true, true);
+
+ // make sure java.io.tmpdir exists always (in case code uses it in a static initializer)
+ Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
+ "please set ${java.io.tmpdir} in pom.xml"));
+ try {
+ Security.ensureDirectoryExists(javaTmpDir);
+ } catch (Exception e) {
+ throw new RuntimeException("unable to create test temp directory", e);
+ }
+
+ // install security manager if requested
+ if (systemPropertyAsBoolean("tests.security.manager", false)) {
+ try {
+ // initialize paths the same exact way as bootstrap.
+ Permissions perms = new Permissions();
+ Path basedir = PathUtils.get(Objects.requireNonNull(System.getProperty("project.basedir"),
+ "please set ${project.basedir} in pom.xml"));
+ // target/classes, target/test-classes
+ Security.addPath(perms, basedir.resolve("target").resolve("classes"), "read,readlink");
+ Security.addPath(perms, basedir.resolve("target").resolve("test-classes"), "read,readlink");
+ // lib/sigar
+ Security.addPath(perms, basedir.resolve("lib").resolve("sigar"), "read,readlink");
+ // .m2/repository
+ Path m2repoDir = PathUtils.get(Objects.requireNonNull(System.getProperty("m2.repository"),
+ "please set ${m2.repository} in pom.xml"));
+ Security.addPath(perms, m2repoDir, "read,readlink");
+ // java.io.tmpdir
+ Security.addPath(perms, javaTmpDir, "read,readlink,write,delete");
+ // custom test config file
+ if (Strings.hasLength(System.getProperty("tests.config"))) {
+ perms.add(new FilePermission(System.getProperty("tests.config"), "read,readlink"));
+ }
+ Policy.setPolicy(new ESPolicy(perms));
+ System.setSecurityManager(new TestSecurityManager());
+ Security.selfTest();
+ } catch (Exception e) {
+ throw new RuntimeException("unable to install test security manager", e);
+ }
+ }
+ }
+
+ // does nothing, just easy way to make sure the class is loaded.
+ public static void ensureInitialized() {}
+}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JNANativesTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JNANativesTests.java
new file mode 100644
index 0000000000..ead01b38cd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bootstrap/JNANativesTests.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class JNANativesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMlockall() {
+ if (Constants.MAC_OS_X) {
+ assertFalse("Memory locking is not available on OS X platforms", JNANatives.LOCAL_MLOCKALL);
+ }
+ }
+
+ @Test
+ public void testConsoleCtrlHandler() {
+ if (Constants.WINDOWS) {
+ assertNotNull(JNAKernel32Library.getInstance());
+ assertThat(JNAKernel32Library.getInstance().getCallbacks().size(), equalTo(1));
+ } else {
+ assertNotNull(JNAKernel32Library.getInstance());
+ assertThat(JNAKernel32Library.getInstance().getCallbacks().size(), equalTo(0));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java
new file mode 100644
index 0000000000..e939638114
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.FilePermission;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.Permissions;
+
+public class SecurityTests extends ElasticsearchTestCase {
+
+ /** test generated permissions */
+ public void testGeneratedPermissions() throws Exception {
+ Path path = createTempDir();
+ // make a fake ES home and ensure we only grant permissions to that.
+ Path esHome = path.resolve("esHome");
+ Settings.Builder settingsBuilder = Settings.builder();
+ settingsBuilder.put("path.home", esHome.toString());
+ Settings settings = settingsBuilder.build();
+
+ Path fakeTmpDir = createTempDir();
+ String realTmpDir = System.getProperty("java.io.tmpdir");
+ Permissions permissions;
+ try {
+ System.setProperty("java.io.tmpdir", fakeTmpDir.toString());
+ Environment environment = new Environment(settings);
+ permissions = Security.createPermissions(environment);
+ } finally {
+ System.setProperty("java.io.tmpdir", realTmpDir);
+ }
+
+ // the fake es home
+ assertTrue(permissions.implies(new FilePermission(esHome.toString(), "read")));
+ // its parent
+ assertFalse(permissions.implies(new FilePermission(path.toString(), "read")));
+ // some other sibling
+ assertFalse(permissions.implies(new FilePermission(path.resolve("other").toString(), "read")));
+ // double check we overwrote java.io.tmpdir correctly for the test
+ assertFalse(permissions.implies(new FilePermission(realTmpDir.toString(), "read")));
+ }
+
+ /** test generated permissions for all configured paths */
+ public void testEnvironmentPaths() throws Exception {
+ Path path = createTempDir();
+
+ Settings.Builder settingsBuilder = Settings.builder();
+ settingsBuilder.put("path.home", path.resolve("home").toString());
+ settingsBuilder.put("path.conf", path.resolve("conf").toString());
+ settingsBuilder.put("path.plugins", path.resolve("plugins").toString());
+ settingsBuilder.putArray("path.data", path.resolve("data1").toString(), path.resolve("data2").toString());
+ settingsBuilder.put("path.logs", path.resolve("logs").toString());
+ settingsBuilder.put("pidfile", path.resolve("test.pid").toString());
+ Settings settings = settingsBuilder.build();
+
+ Path fakeTmpDir = createTempDir();
+ String realTmpDir = System.getProperty("java.io.tmpdir");
+ Permissions permissions;
+ Environment environment;
+ try {
+ System.setProperty("java.io.tmpdir", fakeTmpDir.toString());
+ environment = new Environment(settings);
+ permissions = Security.createPermissions(environment);
+ } finally {
+ System.setProperty("java.io.tmpdir", realTmpDir);
+ }
+
+ // check that all directories got permissions:
+ // homefile: this is needed unless we break out rules for "lib" dir.
+ // TODO: make read-only
+ assertTrue(permissions.implies(new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete")));
+ // config file
+ // TODO: make read-only
+ assertTrue(permissions.implies(new FilePermission(environment.configFile().toString(), "read,readlink,write,delete")));
+ // plugins: r/w, TODO: can this be minimized?
+ assertTrue(permissions.implies(new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete")));
+ // data paths: r/w
+ for (Path dataPath : environment.dataFiles()) {
+ assertTrue(permissions.implies(new FilePermission(dataPath.toString(), "read,readlink,write,delete")));
+ }
+ for (Path dataPath : environment.dataWithClusterFiles()) {
+ assertTrue(permissions.implies(new FilePermission(dataPath.toString(), "read,readlink,write,delete")));
+ }
+ // logs: r/w
+ assertTrue(permissions.implies(new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete")));
+ // temp dir: r/w
+ assertTrue(permissions.implies(new FilePermission(fakeTmpDir.toString(), "read,readlink,write,delete")));
+ // double check we overwrote java.io.tmpdir correctly for the test
+ assertFalse(permissions.implies(new FilePermission(realTmpDir.toString(), "read")));
+ // PID file: r/w
+ assertTrue(permissions.implies(new FilePermission(environment.pidFile().toString(), "read,readlink,write,delete")));
+ }
+
+ public void testEnsureExists() throws IOException {
+ Path p = createTempDir();
+
+ // directory exists
+ Path exists = p.resolve("exists");
+ Files.createDirectory(exists);
+ Security.ensureDirectoryExists(exists);
+ Files.createTempFile(exists, null, null);
+ }
+
+ public void testEnsureNotExists() throws IOException {
+ Path p = createTempDir();
+
+ // directory does not exist: create it
+ Path notExists = p.resolve("notexists");
+ Security.ensureDirectoryExists(notExists);
+ Files.createTempFile(notExists, null, null);
+ }
+
+ public void testEnsureRegularFile() throws IOException {
+ Path p = createTempDir();
+
+ // regular file
+ Path regularFile = p.resolve("regular");
+ Files.createFile(regularFile);
+ try {
+ Security.ensureDirectoryExists(regularFile);
+ fail("didn't get expected exception");
+ } catch (IOException expected) {}
+ }
+
+ public void testEnsureSymlink() throws IOException {
+ Path p = createTempDir();
+
+ Path exists = p.resolve("exists");
+ Files.createDirectory(exists);
+
+ // symlink
+ Path linkExists = p.resolve("linkExists");
+ try {
+ Files.createSymbolicLink(linkExists, exists);
+ } catch (UnsupportedOperationException | IOException e) {
+ assumeNoException("test requires filesystem that supports symbolic links", e);
+ } catch (SecurityException e) {
+ assumeNoException("test cannot create symbolic links with security manager enabled", e);
+ }
+ Security.ensureDirectoryExists(linkExists);
+ Files.createTempFile(linkExists, null, null);
+ }
+
+ public void testEnsureBrokenSymlink() throws IOException {
+ Path p = createTempDir();
+
+ // broken symlink
+ Path brokenLink = p.resolve("brokenLink");
+ try {
+ Files.createSymbolicLink(brokenLink, p.resolve("nonexistent"));
+ } catch (UnsupportedOperationException | IOException e) {
+ assumeNoException("test requires filesystem that supports symbolic links", e);
+ } catch (SecurityException e) {
+ assumeNoException("test cannot create symbolic links with security manager enabled", e);
+ }
+ try {
+ Security.ensureDirectoryExists(brokenLink);
+ fail("didn't get expected exception");
+ } catch (IOException expected) {}
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java b/core/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java
new file mode 100644
index 0000000000..baaa5045be
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/broadcast/BroadcastActionsTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.broadcast;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.countRequest;
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+public class BroadcastActionsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void testBroadcastOperations() throws IOException {
+ assertAcked(prepareCreate("test", 1).execute().actionGet(5000));
+
+ NumShards numShards = getNumShards("test");
+
+ logger.info("Running Cluster Health");
+ ensureYellow();
+
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ flush();
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+ refresh();
+
+ logger.info("Count");
+ // check count
+ for (int i = 0; i < 5; i++) {
+ // test successful
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(termQuery("_type", "type1"))
+ .get();
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ // test failed (simply query that can't be parsed)
+ try {
+ client().count(countRequest("test").source("{ term : { _type : \"type1 } }".getBytes(Charsets.UTF_8))).actionGet();
+ } catch(SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, equalTo(numShards.numPrimaries));
+ }
+ }
+ }
+
+ private XContentBuilder source(String id, String nameValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java
new file mode 100644
index 0000000000..aac1e3c73d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.bwcompat;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.ExecutionException;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+@ElasticsearchIntegrationTest.ClusterScope(numDataNodes = 0, scope = ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
+public class BasicAnalysisBackwardCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ // This pattern match characters with Line_Break = Complex_Content.
+ final static Pattern complexUnicodeChars = Pattern.compile("[\u17B4\u17B5\u17D3\u17CB-\u17D1\u17DD\u1036\u17C6\u1A74\u1038\u17C7\u0E4E\u0E47-\u0E4D\u0EC8-\u0ECD\uAABF\uAAC1\u1037\u17C8-\u17CA\u1A75-\u1A7C\u1AA8-\u1AAB\uAADE\uAADF\u1AA0-\u1AA6\u1AAC\u1AAD\u109E\u109F\uAA77-\uAA79\u0E46\u0EC6\u17D7\u1AA7\uA9E6\uAA70\uAADD\u19DA\u0E01-\u0E3A\u0E40-\u0E45\u0EDE\u0E81\u0E82\u0E84\u0E87\u0E88\u0EAA\u0E8A\u0EDF\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAB\u0EDC\u0EDD\u0EAD-\u0EB9\u0EBB-\u0EBD\u0EC0-\u0EC4\uAA80-\uAABE\uAAC0\uAAC2\uAADB\uAADC\u1000\u1075\u1001\u1076\u1002\u1077\uAA60\uA9E9\u1003\uA9E0\uA9EA\u1004\u105A\u1005\u1078\uAA61\u1006\uA9E1\uAA62\uAA7E\u1007\uAA63\uA9EB\u1079\uAA72\u1008\u105B\uA9E2\uAA64\uA9EC\u1061\uAA7F\u1009\u107A\uAA65\uA9E7\u100A\u100B\uAA66\u100C\uAA67\u100D\uAA68\uA9ED\u100E\uAA69\uA9EE\u100F\u106E\uA9E3\uA9EF\u1010-\u1012\u107B\uA9FB\u1013\uAA6A\uA9FC\u1014\u107C\uAA6B\u105E\u1015\u1016\u107D\u107E\uAA6F\u108E\uA9E8\u1017\u107F\uA9FD\u1018\uA9E4\uA9FE\u1019\u105F\u101A\u103B\u101B\uAA73\uAA7A\u103C\u101C\u1060\u101D\u103D\u1082\u1080\u1050\u1051\u1065\u101E\u103F\uAA6C\u101F\u1081\uAA6D\u103E\uAA6E\uAA71\u1020\uA9FA\u105C\u105D\u106F\u1070\u1066\u1021-\u1026\u1052-\u1055\u1027-\u102A\u102C\u102B\u1083\u1072\u109C\u102D\u1071\u102E\u1033\u102F\u1073\u1074\u1030\u1056-\u1059\u1031\u1084\u1035\u1085\u1032\u109D\u1034\u1062\u1067\u1068\uA9E5\u1086\u1039\u103A\u1063\u1064\u1069-\u106D\u1087\u108B\u1088\u108C\u108D\u1089\u108A\u108F\u109A\u109B\uAA7B-\uAA7D\uAA74-\uAA76\u1780-\u17A2\u17DC\u17A3-\u17B3\u17B6-\u17C5\u17D2\u1950-\u196D\u1970-\u1974\u1980-\u199C\u19DE\u19DF\u199D-\u19AB\u19B0-\u19C9\u1A20-\u1A26\u1A58\u1A59\u1A27-\u1A3B\u1A5A\u1A5B\u1A3C-\u1A46\u1A54\u1A47-\u1A4C\u1A53\u1A6B\u1A55-\u1A57\u1A5C-\u1A5E\u1A4D-\u1A52\u1A61\u1A6C\u1A62-\u1A6A\u1A6E\u1A6F\u1A73\u1A70-\u1A72\u1A6D\u1A60]");
+
+ /**
+ * Simple upgrade test for analyzers to make sure they analyze to the same tokens after upgrade
+ * TODO we need this for random tokenizers / tokenfilters as well
+ */
+ @Test
+ public void testAnalyzerTokensAfterUpgrade() throws IOException, ExecutionException, InterruptedException {
+ int numFields = randomIntBetween(PreBuiltAnalyzers.values().length, PreBuiltAnalyzers.values().length * 10);
+ StringBuilder builder = new StringBuilder();
+ String[] fields = new String[numFields * 2];
+ int fieldId = 0;
+ for (int i = 0; i < fields.length; i++) {
+ fields[i++] = "field_" + fieldId++;
+ String analyzer = randomAnalyzer();
+ fields[i] = "type=string,analyzer=" + analyzer;
+ }
+ assertAcked(prepareCreate("test")
+ .addMapping("type", fields)
+ .setSettings(indexSettings()));
+ ensureYellow();
+ InputOutput[] inout = new InputOutput[numFields];
+ for (int i = 0; i < numFields; i++) {
+ String input;
+ Matcher matcher;
+ do {
+ // In Lucene 4.10, a bug was fixed in StandardTokenizer which was causing breaks on complex characters.
+ // The bug was fixed without backcompat Version handling, so testing between >=4.10 vs <= 4.9 can
+ // cause differences when the random string generated contains these complex characters. To mitigate
+ // the problem, we skip any strings containing these characters.
+ // TODO: only skip strings containing complex chars when comparing against ES <= 1.3.x
+ input = TestUtil.randomAnalysisString(getRandom(), 100, false);
+ matcher = complexUnicodeChars.matcher(input);
+ } while (matcher.find());
+
+ AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", input).setField("field_" + i).get();
+ inout[i] = new InputOutput(test, input, "field_" + i);
+ }
+
+ logClusterState();
+ boolean upgraded;
+ do {
+ logClusterState();
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ } while (upgraded);
+
+ for (int i = 0; i < inout.length; i++) {
+ InputOutput inputOutput = inout[i];
+ AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", inputOutput.input).setField(inputOutput.field).get();
+ List<AnalyzeResponse.AnalyzeToken> tokens = test.getTokens();
+ List<AnalyzeResponse.AnalyzeToken> expectedTokens = inputOutput.response.getTokens();
+ assertThat("size mismatch field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + BaseTokenStreamTestCase.escape(inputOutput.input), expectedTokens.size(), equalTo(tokens.size()));
+ for (int j = 0; j < tokens.size(); j++) {
+ String msg = "failed for term: " + expectedTokens.get(j).getTerm() + " field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + BaseTokenStreamTestCase.escape(inputOutput.input);
+ assertThat(msg, BaseTokenStreamTestCase.escape(expectedTokens.get(j).getTerm()), equalTo(BaseTokenStreamTestCase.escape(tokens.get(j).getTerm())));
+ assertThat(msg, expectedTokens.get(j).getPosition(), equalTo(tokens.get(j).getPosition()));
+ assertThat(msg, expectedTokens.get(j).getStartOffset(), equalTo(tokens.get(j).getStartOffset()));
+ assertThat(msg, expectedTokens.get(j).getEndOffset(), equalTo(tokens.get(j).getEndOffset()));
+ assertThat(msg, expectedTokens.get(j).getType(), equalTo(tokens.get(j).getType()));
+ }
+ }
+ }
+
+ private String randomAnalyzer() {
+ while(true) {
+ PreBuiltAnalyzers preBuiltAnalyzers = RandomPicks.randomFrom(getRandom(), PreBuiltAnalyzers.values());
+ if (preBuiltAnalyzers == PreBuiltAnalyzers.SORANI && compatibilityVersion().before(Version.V_1_3_0)) {
+ continue; // SORANI was added in 1.3.0
+ }
+ return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT);
+ }
+
+ }
+
+ private static final class InputOutput {
+ final AnalyzeResponse response;
+ final String input;
+ final String field;
+
+ public InputOutput(AnalyzeResponse response, String input, String field) {
+ this.response = response;
+ this.input = input;
+ this.field = field;
+ }
+
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java
new file mode 100644
index 0000000000..77360425f2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java
@@ -0,0 +1,720 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.bwcompat;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.util.English;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.termvectors.TermVectorsResponse;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
+import static org.elasticsearch.index.query.QueryBuilders.missingQuery;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ /**
+ * Basic test using Index & Realtime Get with external versioning. This test ensures routing works correctly across versions.
+ */
+ @Test
+ public void testExternalVersion() throws Exception {
+ createIndex("test");
+ final boolean routing = randomBoolean();
+ int numDocs = randomIntBetween(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
+ final long version = randomIntBetween(0, Integer.MAX_VALUE);
+ client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(version).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
+ GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(version).get();
+ assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(version));
+ final long nextVersion = version + randomIntBetween(0, Integer.MAX_VALUE);
+ client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(nextVersion).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
+ get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(nextVersion).get();
+ assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(nextVersion));
+ }
+ }
+
+ /**
+ * Basic test using Index & Realtime Get with internal versioning. This test ensures routing works correctly across versions.
+ */
+ @Test
+ public void testInternalVersion() throws Exception {
+ createIndex("test");
+ final boolean routing = randomBoolean();
+ int numDocs = randomIntBetween(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
+ String id = Integer.toString(i);
+ assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true));
+ GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(1).get();
+ assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(1l));
+ client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet();
+ get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(2).get();
+ assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(2l));
+ }
+
+ assertVersionCreated(compatibilityVersion(), "test");
+ }
+
+ /**
+ * Very basic bw compat test with a mixed version cluster random indexing and lookup by ID via term query
+ */
+ @Test
+ public void testIndexAndSearch() throws Exception {
+ createIndex("test");
+ int numDocs = randomIntBetween(10, 20);
+ List<IndexRequestBuilder> builder = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
+ }
+ indexRandom(true, builder);
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
+ }
+ assertVersionCreated(compatibilityVersion(), "test");
+ }
+
+ @Test
+ public void testRecoverFromPreviousVersion() throws ExecutionException, InterruptedException {
+ if (backwardsCluster().numNewDataNodes() == 0) {
+ backwardsCluster().startNewNode();
+ }
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
+ ensureYellow();
+ assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
+ int numDocs = randomIntBetween(100, 150);
+ ArrayList<String> ids = new ArrayList<>();
+ logger.info(" --> indexing [{}] docs", numDocs);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i);
+ ids.add(id);
+ docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i));
+ }
+ indexRandom(true, docs);
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ if (randomBoolean()) {
+ logger.info(" --> moving index to new nodes");
+ backwardsCluster().allowOnlyNewNodes("test");
+ } else {
+ logger.info(" --> allow index to on all nodes");
+ backwardsCluster().allowOnAllNodes("test");
+ }
+
+ logger.info(" --> indexing [{}] more docs", numDocs);
+ // sometimes index while relocating
+ if (randomBoolean()) {
+ for (int i = 0; i < numDocs; i++) {
+ String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i);
+ ids.add(id);
+ docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(numDocs + i));
+ }
+ indexRandom(true, docs);
+ if (compatibilityVersion().before(Version.V_1_3_0)) {
+ // issue another refresh through a new node to side step issue #6545
+ assertNoFailures(backwardsCluster().internalCluster().dataNodeClient().admin().indices().prepareRefresh().setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
+ }
+ numDocs *= 2;
+ }
+
+ logger.info(" --> waiting for relocation to complete", numDocs);
+ ensureYellow("test");// move all shards to the new node (it waits on relocation)
+ final int numIters = randomIntBetween(10, 20);
+ for (int i = 0; i < numIters; i++) {
+ assertSearchHits(client().prepareSearch().setSize(ids.size()).get(), ids.toArray(new String[ids.size()]));
+ }
+ assertVersionCreated(compatibilityVersion(), "test");
+ }
+
+ /**
+ * Test that ensures that we will never recover from a newer to an older version (we are not forward compatible)
+ */
+ @Test
+ public void testNoRecoveryFromNewNodes() throws ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().backwardsNodePattern()).put(indexSettings())));
+ if (backwardsCluster().numNewDataNodes() == 0) {
+ backwardsCluster().startNewNode();
+ }
+ ensureYellow();
+ assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
+ if (randomBoolean()) {
+ backwardsCluster().allowOnAllNodes("test");
+ }
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", randomRealisticUnicodeOfLength(10) + String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
+ }
+ indexRandom(true, docs);
+ backwardsCluster().allowOnAllNodes("test");
+ while (ensureYellow() != ClusterHealthStatus.GREEN) {
+ backwardsCluster().startNewNode();
+ }
+ assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ final int numIters = randomIntBetween(10, 20);
+ for (int i = 0; i < numIters; i++) {
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ assertSimpleSort("num_double", "num_int");
+ }
+ assertVersionCreated(compatibilityVersion(), "test");
+ }
+
+
+ public void assertSimpleSort(String... numericFields) {
+ for (String field : numericFields) {
+ SearchResponse searchResponse = client().prepareSearch().addSort(field, SortOrder.ASC).get();
+ SearchHit[] hits = searchResponse.getHits().getHits();
+ assertThat(hits.length, greaterThan(0));
+ Number previous = null;
+ for (SearchHit hit : hits) {
+ assertNotNull(hit.getSource().get(field));
+ if (previous != null) {
+ assertThat(previous.doubleValue(), lessThanOrEqualTo(((Number) hit.getSource().get(field)).doubleValue()));
+ }
+ previous = (Number) hit.getSource().get(field);
+ }
+ }
+ }
+
+ @Override
+ public void assertAllShardsOnNodes(String index, String pattern) {
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
+ String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
+ assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Upgrades a single node to the current version
+ */
+ @Test
+ public void testIndexUpgradeSingleNode() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
+ ensureYellow();
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
+ }
+
+ indexRandom(true, docs);
+ assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ backwardsCluster().allowOnAllNodes("test");
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ if (randomBoolean()) {
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
+ }
+ indexRandom(true, docs);
+ }
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+ ensureYellow();
+ final int numIters = randomIntBetween(1, 20);
+ for (int i = 0; i < numIters; i++) {
+ assertHitCount(client().prepareCount().get(), numDocs);
+ assertSimpleSort("num_double", "num_int");
+ }
+ assertVersionCreated(compatibilityVersion(), "test");
+ }
+
+ /**
+ * Test that allocates an index on one or more old nodes and then do a rolling upgrade
+ * one node after another is shut down and restarted from a newer version and we verify
+ * that all documents are still around after each nodes upgrade.
+ */
+ @Test
+ public void testIndexRollingUpgrade() throws Exception {
+ String[] indices = new String[randomIntBetween(1, 3)];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = "test" + i;
+ assertAcked(prepareCreate(indices[i]).setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
+ }
+
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ String[] indexForDoc = new String[docs.length];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex(indexForDoc[i] = RandomPicks.randomFrom(getRandom(), indices), "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
+ }
+ indexRandom(true, docs);
+ for (String index : indices) {
+ assertAllShardsOnNodes(index, backwardsCluster().backwardsNodePattern());
+ }
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ backwardsCluster().allowOnAllNodes(indices);
+ logClusterState();
+ boolean upgraded;
+ do {
+ logClusterState();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ assertSimpleSort("num_double", "num_int");
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex(indexForDoc[i], "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i), "num_int", randomInt(), "num_double", randomDouble());
+ }
+ indexRandom(true, docs);
+ } while (upgraded);
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+ ensureYellow();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ assertSimpleSort("num_double", "num_int");
+
+ String[] newIndices = new String[randomIntBetween(1, 3)];
+
+ for (int i = 0; i < newIndices.length; i++) {
+ newIndices[i] = "new_index" + i;
+ createIndex(newIndices[i]);
+ }
+ assertVersionCreated(Version.CURRENT, newIndices); // new indices are all created with the new version
+ assertVersionCreated(compatibilityVersion(), indices);
+ }
+
+ public void assertVersionCreated(Version version, String... indices) {
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(indices).get();
+ ImmutableOpenMap<String, Settings> indexToSettings = getSettingsResponse.getIndexToSettings();
+ for (String index : indices) {
+ Settings settings = indexToSettings.get(index);
+ assertThat(settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null), notNullValue());
+ assertThat(settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null), equalTo(version));
+ }
+ }
+
+
+ @Test
+ public void testUnsupportedFeatures() throws IOException {
+ XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent)
+ .startObject()
+ .startObject("type")
+ .startObject(FieldNamesFieldMapper.NAME)
+ // by setting randomly index to no we also test the pre-1.3 behavior
+ .field("index", randomFrom("no", "not_analyzed"))
+ .field("store", randomFrom("no", "yes"))
+ .endObject()
+ .endObject()
+ .endObject();
+
+ try {
+ assertAcked(prepareCreate("test").
+ setSettings(Settings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings()))
+ .addMapping("type", mapping));
+ } catch (MapperParsingException ex) {
+ assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class));
+ assertThat(ExceptionsHelper.detailedMessage(ex).contains("type=_field_names is not supported on indices created before version 1.3.0"), equalTo(true));
+ }
+
+ }
+
+ /**
+ * This filter had a major upgrade in 1.3 where we started to index the field names. Lets see if they still work as expected...
+ * this test is basically copied from SimpleQueryTests...
+ */
+ @Test
+ public void testExistsFilter() throws IOException, ExecutionException, InterruptedException {
+ int indexId = 0;
+ String indexName;
+
+ for (; ; ) {
+ indexName = "test_"+indexId++;
+ createIndex(indexName);
+ ensureYellow();
+ indexRandom(true,
+ client().prepareIndex(indexName, "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
+ client().prepareIndex(indexName, "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
+ client().prepareIndex(indexName, "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
+ client().prepareIndex(indexName, "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()));
+
+ CountResponse countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsQuery("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(existsQuery("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryStringQuery("_exists_:field1")).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsQuery("field2"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsQuery("field3"))).get();
+ assertHitCount(countResponse, 1l);
+
+ // wildcard check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsQuery("x*"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // object check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), existsQuery("obj1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingQuery("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingQuery("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(constantScoreQuery(missingQuery("field1"))).get();
+ assertHitCount(countResponse, 2l);
+
+ countResponse = client().prepareCount().setQuery(queryStringQuery("_missing_:field1")).get();
+ assertHitCount(countResponse, 2l);
+
+ // wildcard check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingQuery("x*"))).get();
+ assertHitCount(countResponse, 2l);
+
+ // object check
+ countResponse = client().prepareCount().setQuery(filteredQuery(matchAllQuery(), missingQuery("obj1"))).get();
+ assertHitCount(countResponse, 2l);
+ if (!backwardsCluster().upgradeOneNode()) {
+ break;
+ }
+ ensureYellow();
+ assertVersionCreated(compatibilityVersion(), indexName); // we had an old node in the cluster so we have to be on the compat version
+ assertAcked(client().admin().indices().prepareDelete(indexName));
+ }
+
+ assertVersionCreated(Version.CURRENT, indexName); // after upgrade we have current version
+ }
+
+
+ public Version getMasterVersion() {
+ return client().admin().cluster().prepareState().get().getState().nodes().masterNode().getVersion();
+ }
+
+ @Test
+ public void testDeleteRoutingRequired() throws ExecutionException, InterruptedException, IOException {
+ createIndexWithAlias();
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource(
+ XContentFactory.jsonBuilder().startObject().startObject("test").startObject("_routing").field("required", true).endObject().endObject().endObject()));
+ ensureYellow("test");
+
+ int numDocs = iterations(10, 50);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs - 2; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "test", Integer.toString(i))
+ .setRouting(randomAsciiOfLength(randomIntBetween(1, 10))).setSource("field", "value");
+ }
+ String firstDocId = Integer.toString(numDocs - 2);
+ indexRequestBuilders[numDocs - 2] = client().prepareIndex("test", "test", firstDocId)
+ .setRouting("routing").setSource("field", "value");
+ String secondDocId = Integer.toString(numDocs - 1);
+ String secondRouting = randomAsciiOfLength(randomIntBetween(1, 10));
+ indexRequestBuilders[numDocs - 1] = client().prepareIndex("test", "test", secondDocId)
+ .setRouting(secondRouting).setSource("field", "value");
+
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse searchResponse = client().prepareSearch("test").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs));
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "test", firstDocId).setRouting("routing").get();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ GetResponse getResponse = client().prepareGet("test", "test", firstDocId).setRouting("routing").get();
+ assertThat(getResponse.isExists(), equalTo(false));
+ refresh();
+ searchResponse = client().prepareSearch("test").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs - 1));
+ }
+
+ @Test
+ public void testIndexGetAndDelete() throws ExecutionException, InterruptedException {
+ createIndexWithAlias();
+ ensureYellow("test");
+
+ int numDocs = iterations(10, 50);
+ for (int i = 0; i < numDocs; i++) {
+ IndexResponse indexResponse = client().prepareIndex(indexOrAlias(), "type", Integer.toString(i)).setSource("field", "value-" + i).get();
+ assertThat(indexResponse.isCreated(), equalTo(true));
+ assertThat(indexResponse.getIndex(), equalTo("test"));
+ assertThat(indexResponse.getType(), equalTo("type"));
+ assertThat(indexResponse.getId(), equalTo(Integer.toString(i)));
+ }
+ refresh();
+
+ String docId = Integer.toString(randomIntBetween(0, numDocs - 1));
+ GetResponse getResponse = client().prepareGet(indexOrAlias(), "type", docId).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getIndex(), equalTo("test"));
+ assertThat(getResponse.getType(), equalTo("type"));
+ assertThat(getResponse.getId(), equalTo(docId));
+
+ DeleteResponse deleteResponse = client().prepareDelete(indexOrAlias(), "type", docId).get();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getIndex(), equalTo("test"));
+ assertThat(deleteResponse.getType(), equalTo("type"));
+ assertThat(deleteResponse.getId(), equalTo(docId));
+
+ getResponse = client().prepareGet(indexOrAlias(), "type", docId).get();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch(indexOrAlias()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs - 1));
+ }
+
+ @Test
+ public void testUpdate() {
+ createIndexWithAlias();
+ ensureYellow("test");
+
+ UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert("field1", "value1").setDoc("field2", "value2");
+
+ UpdateResponse updateResponse = updateRequestBuilder.get();
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getType(), equalTo("type1"));
+ assertThat(updateResponse.getId(), equalTo("1"));
+ assertThat(updateResponse.isCreated(), equalTo(true));
+
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().containsKey("field1"), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().containsKey("field2"), equalTo(false));
+
+ updateResponse = updateRequestBuilder.get();
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getType(), equalTo("type1"));
+ assertThat(updateResponse.getId(), equalTo("1"));
+ assertThat(updateResponse.isCreated(), equalTo(false));
+
+ getResponse = client().prepareGet("test", "type1", "1").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().containsKey("field1"), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().containsKey("field2"), equalTo(true));
+ }
+
+ @Test
+ public void testAnalyze() {
+ createIndexWithAlias();
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,analyzer=keyword"));
+ ensureYellow("test");
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("this is a test").setIndex(indexOrAlias()).setField("field").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
+ }
+
+ @Test
+ public void testExplain() {
+ createIndexWithAlias();
+ ensureYellow("test");
+
+ client().prepareIndex(indexOrAlias(), "test", "1").setSource("field", "value1").get();
+ refresh();
+
+ ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.termQuery("field", "value1")).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.isMatch(), equalTo(true));
+ assertThat(response.getExplanation(), notNullValue());
+ assertThat(response.getExplanation().isMatch(), equalTo(true));
+ assertThat(response.getExplanation().getDetails().length, equalTo(1));
+ }
+
+ @Test
+ public void testGetTermVector() throws IOException {
+ createIndexWithAlias();
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type1").setSource("field", "type=string,term_vector=with_positions_offsets_payloads").get());
+ ensureYellow("test");
+
+ client().prepareIndex(indexOrAlias(), "type1", "1")
+ .setSource("field", "the quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "type1", "1").get();
+ assertThat(termVectorsResponse.getIndex(), equalTo("test"));
+ assertThat(termVectorsResponse.isExists(), equalTo(true));
+ Fields fields = termVectorsResponse.getFields();
+ assertThat(fields.size(), equalTo(1));
+ assertThat(fields.terms("field").size(), equalTo(8l));
+ }
+
+ @Test
+ public void testIndicesStats() {
+ createIndex("test");
+ ensureYellow("test");
+
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().all().get();
+ assertThat(indicesStatsResponse.getIndices().size(), equalTo(1));
+ assertThat(indicesStatsResponse.getIndices().containsKey("test"), equalTo(true));
+ }
+
+ @Test
+ public void testMultiGet() throws ExecutionException, InterruptedException {
+ createIndexWithAlias();
+ ensureYellow("test");
+
+ int numDocs = iterations(10, 50);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + Integer.toString(i));
+ }
+ indexRandom(false, indexRequestBuilders);
+
+ int iterations = iterations(1, numDocs);
+ MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet();
+ for (int i = 0; i < iterations; i++) {
+ multiGetRequestBuilder.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(randomInt(numDocs - 1))));
+ }
+ MultiGetResponse multiGetResponse = multiGetRequestBuilder.get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(iterations));
+ for (int i = 0; i < multiGetResponse.getResponses().length; i++) {
+ MultiGetItemResponse multiGetItemResponse = multiGetResponse.getResponses()[i];
+ assertThat(multiGetItemResponse.isFailed(), equalTo(false));
+ assertThat(multiGetItemResponse.getIndex(), equalTo("test"));
+ assertThat(multiGetItemResponse.getType(), equalTo("type"));
+ assertThat(multiGetItemResponse.getId(), equalTo(multiGetRequestBuilder.request().getItems().get(i).id()));
+ assertThat(multiGetItemResponse.getResponse().isExists(), equalTo(true));
+ assertThat(multiGetItemResponse.getResponse().getIndex(), equalTo("test"));
+ assertThat(multiGetItemResponse.getResponse().getType(), equalTo("type"));
+ assertThat(multiGetItemResponse.getResponse().getId(), equalTo(multiGetRequestBuilder.request().getItems().get(i).id()));
+ }
+
+ }
+
+ @Test
+ public void testScroll() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ ensureYellow("test");
+
+ int numDocs = iterations(10, 100);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + Integer.toString(i));
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ int size = randomIntBetween(1, 10);
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setScroll("1m").setSize(size);
+ boolean scan = randomBoolean();
+ if (scan) {
+ searchRequestBuilder.setSearchType(SearchType.SCAN);
+ }
+
+ SearchResponse searchResponse = searchRequestBuilder.get();
+ assertThat(searchResponse.getScrollId(), notNullValue());
+ assertHitCount(searchResponse, numDocs);
+ int hits = 0;
+ if (scan) {
+ assertThat(searchResponse.getHits().getHits().length, equalTo(0));
+ } else {
+ assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
+ hits += searchResponse.getHits().getHits().length;
+ }
+
+ try {
+ do {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll("1m").get();
+ assertThat(searchResponse.getScrollId(), notNullValue());
+ assertHitCount(searchResponse, numDocs);
+ hits += searchResponse.getHits().getHits().length;
+ } while (searchResponse.getHits().getHits().length > 0);
+ assertThat(hits, equalTo(numDocs));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+ private void createIndexWithAlias() {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java b/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java
new file mode 100644
index 0000000000..ba208eb739
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @Test
+ public void testClusterState() throws Exception {
+ createIndex("test");
+
+ // connect to each node with a custom TransportClient, issue a ClusterStateRequest to test serialization
+ for (NodeInfo n : clusterNodes()) {
+ try (TransportClient tc = newTransportClient()) {
+ tc.addTransportAddress(n.getNode().address());
+ ClusterStateResponse response = tc.admin().cluster().prepareState().execute().actionGet();
+
+ assertThat(response.getState().status(), equalTo(ClusterState.ClusterStateStatus.UNKNOWN));
+ assertNotNull(response.getClusterName());
+ assertTrue(response.getState().getMetaData().hasIndex("test"));
+ }
+ }
+ }
+
+ @Test
+ public void testClusterStateWithBlocks() {
+ createIndex("test-blocks");
+
+ Map<String, ClusterBlock> blocks = new HashMap<>();
+ blocks.put(SETTING_BLOCKS_READ, IndexMetaData.INDEX_READ_BLOCK);
+ blocks.put(SETTING_BLOCKS_WRITE, IndexMetaData.INDEX_WRITE_BLOCK);
+ blocks.put(SETTING_BLOCKS_METADATA, IndexMetaData.INDEX_METADATA_BLOCK);
+
+ for (Map.Entry<String, ClusterBlock> block : blocks.entrySet()) {
+ try {
+ enableIndexBlock("test-blocks", block.getKey());
+
+ for (NodeInfo n : clusterNodes()) {
+ try (TransportClient tc = newTransportClient()) {
+ tc.addTransportAddress(n.getNode().address());
+
+ ClusterStateResponse response = tc.admin().cluster().prepareState().setIndices("test-blocks")
+ .setBlocks(true).setNodes(false).execute().actionGet();
+
+ ClusterBlocks clusterBlocks = response.getState().blocks();
+ assertNotNull(clusterBlocks);
+ assertTrue(clusterBlocks.hasIndexBlock("test-blocks", block.getValue()));
+
+ for (ClusterBlockLevel level : block.getValue().levels()) {
+ assertTrue(clusterBlocks.indexBlocked(level, "test-blocks"));
+ }
+
+ IndexMetaData indexMetaData = response.getState().getMetaData().getIndices().get("test-blocks");
+ assertNotNull(indexMetaData);
+ assertTrue(indexMetaData.settings().getAsBoolean(block.getKey(), null));
+ }
+ }
+ } finally {
+ disableIndexBlock("test-blocks", block.getKey());
+ }
+ }
+ }
+
+ private NodesInfoResponse clusterNodes() {
+ return client().admin().cluster().prepareNodesInfo().execute().actionGet();
+ }
+
+ private TransportClient newTransportClient() {
+ Settings settings = Settings.settingsBuilder().put("client.transport.ignore_cluster_name", true)
+ .put("node.name", "transport_client_" + getTestName()).build();
+ return TransportClient.builder().settings(settings).build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityTests.java b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityTests.java
new file mode 100644
index 0000000000..5da2c1ff96
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
+import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class GetIndexBackwardsCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @Test
+ public void testGetAliases() throws Exception {
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addAlias(new Alias("testAlias")).execute().actionGet();
+ assertAcked(createIndexResponse);
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.ALIASES)
+ .execute().actionGet();
+ ImmutableOpenMap<String, ImmutableList<AliasMetaData>> aliasesMap = getIndexResponse.aliases();
+ assertThat(aliasesMap, notNullValue());
+ assertThat(aliasesMap.size(), equalTo(1));
+ ImmutableList<AliasMetaData> aliasesList = aliasesMap.get("test");
+ assertThat(aliasesList, notNullValue());
+ assertThat(aliasesList.size(), equalTo(1));
+ AliasMetaData alias = aliasesList.get(0);
+ assertThat(alias, notNullValue());
+ assertThat(alias.alias(), equalTo("testAlias"));
+ }
+
+ @Test
+ public void testGetMappings() throws Exception {
+ CreateIndexResponse createIndexResponse = prepareCreate("test").addMapping("type1", "{\"type1\":{}}").execute().actionGet();
+ assertAcked(createIndexResponse);
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.MAPPINGS)
+ .execute().actionGet();
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = getIndexResponse.mappings();
+ assertThat(mappings, notNullValue());
+ assertThat(mappings.size(), equalTo(1));
+ ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.get("test");
+ assertThat(indexMappings, notNullValue());
+ assertThat(indexMappings.size(), anyOf(equalTo(1), equalTo(2)));
+ if (indexMappings.size() == 2) {
+ MappingMetaData mapping = indexMappings.get("_default_");
+ assertThat(mapping, notNullValue());
+ }
+ MappingMetaData mapping = indexMappings.get("type1");
+ assertThat(mapping, notNullValue());
+ assertThat(mapping.type(), equalTo("type1"));
+ }
+
+ @Test
+ public void testGetSettings() throws Exception {
+ CreateIndexResponse createIndexResponse = prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)).execute().actionGet();
+ assertAcked(createIndexResponse);
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.SETTINGS)
+ .execute().actionGet();
+ ImmutableOpenMap<String, Settings> settingsMap = getIndexResponse.settings();
+ assertThat(settingsMap, notNullValue());
+ assertThat(settingsMap.size(), equalTo(1));
+ Settings settings = settingsMap.get("test");
+ assertThat(settings, notNullValue());
+ assertThat(settings.get("index.number_of_shards"), equalTo("1"));
+ }
+
+ @Test
+ public void testGetWarmers() throws Exception {
+ createIndex("test");
+ ensureSearchable("test");
+ assertAcked(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch("test")).get());
+ ensureSearchable("test");
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.WARMERS)
+ .execute().actionGet();
+ ImmutableOpenMap<String, ImmutableList<Entry>> warmersMap = getIndexResponse.warmers();
+ assertThat(warmersMap, notNullValue());
+ assertThat(warmersMap.size(), equalTo(1));
+ ImmutableList<Entry> warmersList = warmersMap.get("test");
+ assertThat(warmersList, notNullValue());
+ assertThat(warmersList.size(), equalTo(1));
+ Entry warmer = warmersList.get(0);
+ assertThat(warmer, notNullValue());
+ assertThat(warmer.name(), equalTo("warmer1"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java b/core/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java
new file mode 100644
index 0000000000..5220b6c7cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/NodesStatsBasicBackwardsCompatTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.lang.reflect.Method;
+
+
+@ElasticsearchIntegrationTest.ClusterScope(scope= ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0)
+public class NodesStatsBasicBackwardsCompatTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @Test
+ public void testNodeStatsSetIndices() throws Exception {
+ createIndex("test");
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().execute().actionGet();
+
+ Settings settings = Settings.settingsBuilder()
+ .put("client.transport.ignore_cluster_name", true)
+ .put("node.name", "transport_client_" + getTestName()).build();
+
+ // We explicitly connect to each node with a custom TransportClient
+ for (NodeInfo n : nodesInfo.getNodes()) {
+ TransportClient tc = TransportClient.builder().settings(settings).build().addTransportAddress(n.getNode().address());
+ // Just verify that the NS can be sent and serialized/deserialized between nodes with basic indices
+ NodesStatsResponse ns = tc.admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ tc.close();
+ }
+ }
+
+ @Test
+ public void testNodeStatsSetRandom() throws Exception {
+ createIndex("test");
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().execute().actionGet();
+
+ Settings settings = Settings.settingsBuilder()
+ .put("node.name", "transport_client_" + getTestName())
+ .put("client.transport.ignore_cluster_name", true).build();
+
+ // We explicitly connect to each node with a custom TransportClient
+ for (NodeInfo n : nodesInfo.getNodes()) {
+ TransportClient tc = TransportClient.builder().settings(settings).build().addTransportAddress(n.getNode().address());
+
+ // randomize the combination of flags set
+ // Uses reflection to find methods in an attempt to future-proof this test against newly added flags
+ NodesStatsRequestBuilder nsBuilder = tc.admin().cluster().prepareNodesStats();
+
+ Class c = nsBuilder.getClass();
+ for (Method method : c.getDeclaredMethods()) {
+ if (method.getName().startsWith("set")) {
+ if (method.getParameterTypes().length == 1 && method.getParameterTypes()[0] == boolean.class) {
+ method.invoke(nsBuilder, randomBoolean());
+ }
+ } else if ((method.getName().equals("all") || method.getName().equals("clear")) && randomBoolean()) {
+ method.invoke(nsBuilder);
+ }
+ }
+ NodesStatsResponse ns = nsBuilder.execute().actionGet();
+ tc.close();
+
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java
new file mode 100644
index 0000000000..ac59615f90
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java
@@ -0,0 +1,447 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import com.google.common.base.Predicate;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.MultiDataPathUpgrader;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.IndexException;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.merge.policy.MergePolicyModule;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.rest.action.admin.indices.upgrade.UpgradeTest;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
+import org.hamcrest.Matchers;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.*;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.*;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+// needs at least 2 nodes since it bumps replicas to 1
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+@LuceneTestCase.Slow
+public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegrationTest {
+ // TODO: test for proper exception on unsupported indexes (maybe via separate test?)
+ // We have a 0.20.6.zip etc for this.
+
+ List<String> indexes;
+ List<String> unsupportedIndexes;
+ static Path singleDataPath;
+ static Path[] multiDataPath;
+
+ @Before
+ public void initIndexesList() throws Exception {
+ indexes = loadIndexesList("index");
+ unsupportedIndexes = loadIndexesList("unsupported");
+ }
+
+ private List<String> loadIndexesList(String prefix) throws IOException {
+ List<String> indexes = new ArrayList<>();
+ Path dir = getDataPath(".");
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir, prefix + "-*.zip")) {
+ for (Path path : stream) {
+ indexes.add(path.getFileName().toString());
+ }
+ }
+ Collections.sort(indexes);
+ return indexes;
+ }
+
+ @AfterClass
+ public static void tearDownStatics() {
+ singleDataPath = null;
+ multiDataPath = null;
+ }
+
+ @Override
+ public Settings nodeSettings(int ord) {
+ return Settings.builder()
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class) // disable merging so no segments will be upgraded
+ .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files
+ .build();
+ }
+
+ void setupCluster() throws Exception {
+ ListenableFuture<List<String>> replicas = internalCluster().startNodesAsync(1); // for replicas
+
+ Path baseTempDir = createTempDir();
+ // start single data path node
+ Settings.Builder nodeSettings = Settings.builder()
+ .put("path.data", baseTempDir.resolve("single-path").toAbsolutePath())
+ .put("node.master", false); // workaround for dangling index loading issue when node is master
+ ListenableFuture<String> singleDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
+
+ // start multi data path node
+ nodeSettings = Settings.builder()
+ .put("path.data", baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir.resolve("multi-path2").toAbsolutePath())
+ .put("node.master", false); // workaround for dangling index loading issue when node is master
+ ListenableFuture<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
+
+ // find single data path dir
+ Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNode.get()).nodeDataPaths();
+ assertEquals(1, nodePaths.length);
+ singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
+ assertFalse(Files.exists(singleDataPath));
+ Files.createDirectories(singleDataPath);
+ logger.info("--> Single data path: " + singleDataPath.toString());
+
+ // find multi data path dirs
+ nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths();
+ assertEquals(2, nodePaths.length);
+ multiDataPath = new Path[] {nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
+ nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)};
+ assertFalse(Files.exists(multiDataPath[0]));
+ assertFalse(Files.exists(multiDataPath[1]));
+ Files.createDirectories(multiDataPath[0]);
+ Files.createDirectories(multiDataPath[1]);
+ logger.info("--> Multi data paths: " + multiDataPath[0].toString() + ", " + multiDataPath[1].toString());
+
+ replicas.get(); // wait for replicas
+ }
+
+ String loadIndex(String indexFile) throws Exception {
+ Path unzipDir = createTempDir();
+ Path unzipDataDir = unzipDir.resolve("data");
+ String indexName = indexFile.replace(".zip", "").toLowerCase(Locale.ROOT).replace("unsupported-", "index-");
+
+ // decompress the index
+ Path backwardsIndex = getDataPath(indexFile);
+ try (InputStream stream = Files.newInputStream(backwardsIndex)) {
+ TestUtil.unzip(stream, unzipDir);
+ }
+
+ // check it is unique
+ assertTrue(Files.exists(unzipDataDir));
+ Path[] list = FileSystemUtils.files(unzipDataDir);
+ if (list.length != 1) {
+ throw new IllegalStateException("Backwards index must contain exactly one cluster");
+ }
+
+ // the bwc scripts packs the indices under this path
+ Path src = list[0].resolve("nodes/0/indices/" + indexName);
+ assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
+
+ if (randomBoolean()) {
+ logger.info("--> injecting index [{}] into single data path", indexName);
+ copyIndex(logger, src, indexName, singleDataPath);
+ } else {
+ logger.info("--> injecting index [{}] into multi data path", indexName);
+ copyIndex(logger, src, indexName, multiDataPath);
+ }
+ return indexName;
+ }
+
+ void importIndex(String indexName) throws IOException {
+ final Iterable<NodeEnvironment> instances = internalCluster().getInstances(NodeEnvironment.class);
+ for (NodeEnvironment nodeEnv : instances) { // upgrade multidata path
+ MultiDataPathUpgrader.upgradeMultiDataPath(nodeEnv, logger);
+ }
+ // force reloading dangling indices with a cluster state republish
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen(indexName);
+ }
+
+ // randomly distribute the files from src over dests paths
+ public static void copyIndex(final ESLogger logger, final Path src, final String indexName, final Path... dests) throws IOException {
+ for (Path dest : dests) {
+ Path indexDir = dest.resolve(indexName);
+ assertFalse(Files.exists(indexDir));
+ Files.createDirectories(indexDir);
+ }
+ Files.walkFileTree(src, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
+ Path relativeDir = src.relativize(dir);
+ for (Path dest : dests) {
+ Path destDir = dest.resolve(indexName).resolve(relativeDir);
+ Files.createDirectories(destDir);
+ }
+ return FileVisitResult.CONTINUE;
+ }
+
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) {
+ // skip lock file, we don't need it
+ logger.trace("Skipping lock file: " + file.toString());
+ return FileVisitResult.CONTINUE;
+ }
+
+ Path relativeFile = src.relativize(file);
+ Path destFile = dests[randomInt(dests.length - 1)].resolve(indexName).resolve(relativeFile);
+ logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString());
+ Files.move(file, destFile);
+ assertFalse(Files.exists(file));
+ assertTrue(Files.exists(destFile));
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ }
+
+ void unloadIndex(String indexName) throws Exception {
+ assertAcked(client().admin().indices().prepareDelete(indexName).get());
+ }
+
+ public void testAllVersionsTested() throws Exception {
+ SortedSet<String> expectedVersions = new TreeSet<>();
+ for (Version v : VersionUtils.allVersions()) {
+ if (v.snapshot()) continue; // snapshots are unreleased, so there is no backcompat yet
+ if (v.onOrBefore(Version.V_0_20_6)) continue; // we can only test back one major lucene version
+ if (v.equals(Version.CURRENT)) continue; // the current version is always compatible with itself
+ expectedVersions.add("index-" + v.toString() + ".zip");
+ }
+
+ for (String index : indexes) {
+ if (expectedVersions.remove(index) == false) {
+ logger.warn("Old indexes tests contain extra index: " + index);
+ }
+ }
+ if (expectedVersions.isEmpty() == false) {
+ StringBuilder msg = new StringBuilder("Old index tests are missing indexes:");
+ for (String expected : expectedVersions) {
+ msg.append("\n" + expected);
+ }
+ fail(msg.toString());
+ }
+ }
+
+ public void testOldIndexes() throws Exception {
+ setupCluster();
+
+ Collections.shuffle(indexes, getRandom());
+ for (String index : indexes) {
+ long startTime = System.currentTimeMillis();
+ logger.info("--> Testing old index " + index);
+ assertOldIndexWorks(index);
+ logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
+ }
+ }
+
+ @Test
+ public void testHandlingOfUnsupportedDanglingIndexes() throws Exception {
+ setupCluster();
+ Collections.shuffle(unsupportedIndexes, getRandom());
+ for (String index : unsupportedIndexes) {
+ assertUnsupportedIndexHandling(index);
+ }
+ }
+
+ /**
+ * Waits for the index to show up in the cluster state in closed state
+ */
+ void ensureClosed(final String index) throws InterruptedException {
+ assertTrue(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ return state.metaData().hasIndex(index) && state.metaData().index(index).getState() == IndexMetaData.State.CLOSE;
+ }
+ }));
+ }
+
+ /**
+ * Checks that the given index cannot be opened due to incompatible version
+ */
+ void assertUnsupportedIndexHandling(String index) throws Exception {
+ long startTime = System.currentTimeMillis();
+ logger.info("--> Testing old index " + index);
+ String indexName = loadIndex(index);
+ // force reloading dangling indices with a cluster state republish
+ client().admin().cluster().prepareReroute().get();
+ ensureClosed(indexName);
+ try {
+ client().admin().indices().prepareOpen(indexName).get();
+ fail("Shouldn't be able to open an old index");
+ } catch (IndexException ex) {
+ assertThat(ex.getMessage(), containsString("cannot open the index due to upgrade failure"));
+ }
+ unloadIndex(indexName);
+ logger.info("--> Done testing " + index + ", took " + ((System.currentTimeMillis() - startTime) / 1000.0) + " seconds");
+ }
+
+ void assertOldIndexWorks(String index) throws Exception {
+ Version version = extractVersion(index);
+ String indexName = loadIndex(index);
+ importIndex(indexName);
+ assertIndexSanity(indexName);
+ assertBasicSearchWorks(indexName);
+ assertBasicAggregationWorks(indexName);
+ assertRealtimeGetWorks(indexName);
+ assertNewReplicasWork(indexName);
+ assertUpgradeWorks(indexName, isLatestLuceneVersion(version));
+ assertDeleteByQueryWorked(indexName, version);
+ unloadIndex(indexName);
+ }
+
+ Version extractVersion(String index) {
+ return Version.fromString(index.substring(index.indexOf('-') + 1, index.lastIndexOf('.')));
+ }
+
+ boolean isLatestLuceneVersion(Version version) {
+ return version.luceneVersion.major == Version.CURRENT.luceneVersion.major &&
+ version.luceneVersion.minor == Version.CURRENT.luceneVersion.minor;
+ }
+
+ void assertIndexSanity(String indexName) {
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices(indexName).get();
+ assertEquals(1, getIndexResponse.indices().length);
+ assertEquals(indexName, getIndexResponse.indices()[0]);
+ ensureYellow(indexName);
+ SearchResponse test = client().prepareSearch(indexName).get();
+ assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l));
+ }
+
+ void assertBasicSearchWorks(String indexName) {
+ logger.info("--> testing basic search");
+ SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery());
+ SearchResponse searchRsp = searchReq.get();
+ ElasticsearchAssertions.assertNoFailures(searchRsp);
+ long numDocs = searchRsp.getHits().getTotalHits();
+ logger.info("Found " + numDocs + " in old index");
+
+ logger.info("--> testing basic search with sort");
+ searchReq.addSort("long_sort", SortOrder.ASC);
+ ElasticsearchAssertions.assertNoFailures(searchReq.get());
+
+ logger.info("--> testing exists filter");
+ searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), QueryBuilders.existsQuery("string")));
+ searchRsp = searchReq.get();
+ ElasticsearchAssertions.assertNoFailures(searchRsp);
+ assertEquals(numDocs, searchRsp.getHits().getTotalHits());
+
+ logger.info("--> testing missing filter");
+ // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache
+ searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), QueryBuilders.missingQuery("long_sort")));
+ searchRsp = searchReq.get();
+ ElasticsearchAssertions.assertNoFailures(searchRsp);
+ assertEquals(0, searchRsp.getHits().getTotalHits());
+ }
+
+ void assertBasicAggregationWorks(String indexName) {
+ // histogram on a long
+ SearchResponse searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.histogram("histo").field("long_sort").interval(10)).get();
+ ElasticsearchAssertions.assertSearchResponse(searchRsp);
+ Histogram histo = searchRsp.getAggregations().get("histo");
+ assertNotNull(histo);
+ long totalCount = 0;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ totalCount += bucket.getDocCount();
+ }
+ assertEquals(totalCount, searchRsp.getHits().getTotalHits());
+
+ // terms on a boolean
+ searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.terms("bool_terms").field("bool")).get();
+ Terms terms = searchRsp.getAggregations().get("bool_terms");
+ totalCount = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ totalCount += bucket.getDocCount();
+ }
+ assertEquals(totalCount, searchRsp.getHits().getTotalHits());
+ }
+
+ void assertRealtimeGetWorks(String indexName) {
+ assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder()
+ .put("refresh_interval", -1)
+ .build()));
+ SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.matchAllQuery());
+ SearchHit hit = searchReq.get().getHits().getAt(0);
+ String docId = hit.getId();
+ // foo is new, it is not a field in the generated index
+ client().prepareUpdate(indexName, "doc", docId).setDoc("foo", "bar").get();
+ GetResponse getRsp = client().prepareGet(indexName, "doc", docId).get();
+ Map<String, Object> source = getRsp.getSourceAsMap();
+ assertThat(source, Matchers.hasKey("foo"));
+
+ assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder()
+ .put("refresh_interval", EngineConfig.DEFAULT_REFRESH_INTERVAL)
+ .build()));
+ }
+
+ void assertNewReplicasWork(String indexName) throws Exception {
+ final int numReplicas = 1;
+ final long startTime = System.currentTimeMillis();
+ logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, indexName);
+ assertAcked(client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder()
+ .put("number_of_replicas", numReplicas)
+ ).execute().actionGet());
+ ensureGreen(TimeValue.timeValueMinutes(2), indexName);
+ logger.debug("--> index [{}] is green, took [{}]", indexName, TimeValue.timeValueMillis(System.currentTimeMillis() - startTime));
+ logger.debug("--> recovery status:\n{}", XContentHelper.toString(client().admin().indices().prepareRecoveries(indexName).get()));
+
+ // TODO: do something with the replicas! query? index?
+ }
+
+ // #10067: create-bwc-index.py deleted any doc with long_sort:[10-20]
+ void assertDeleteByQueryWorked(String indexName, Version version) throws Exception {
+ if (version.onOrBefore(Version.V_1_0_0_Beta2)) {
+ // TODO: remove this once #10262 is fixed
+ return;
+ }
+ // these documents are supposed to be deleted by a delete by query operation in the translog
+ SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.queryStringQuery("long_sort:[10 TO 20]"));
+ assertEquals(0, searchReq.get().getHits().getTotalHits());
+ }
+
+ void assertUpgradeWorks(String indexName, boolean alreadyLatest) throws Exception {
+ if (alreadyLatest == false) {
+ UpgradeTest.assertNotUpgraded(client(), indexName);
+ }
+ assertNoFailures(client().admin().indices().prepareUpgrade(indexName).get());
+ UpgradeTest.assertUpgraded(client(), indexName);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesTests.java b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesTests.java
new file mode 100644
index 0000000000..d27b361b16
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesTests.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.bwcompat;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.containsString;
+
+public class RecoveryWithUnsupportedIndicesTests extends StaticIndexBackwardCompatibilityTest {
+
+ @Test
+ public void testUpgradeStartClusterOn_0_20_6() throws Exception {
+ String indexName = "unsupported-0.20.6";
+
+ logger.info("Checking static index " + indexName);
+ Settings nodeSettings = prepareBackwardsDataDir(getDataPath(indexName + ".zip"), Node.HTTP_ENABLED, true);
+ try {
+ internalCluster().startNode(nodeSettings);
+ fail();
+ } catch (Exception ex) {
+ assertThat(ex.getMessage(), containsString(" was created before v0.90.0 and wasn't upgraded"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java
new file mode 100644
index 0000000000..3c41dd77c4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatTests.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.bwcompat;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.snapshots.AbstractSnapshotTests;
+import org.elasticsearch.snapshots.RestoreInfo;
+import org.elasticsearch.snapshots.SnapshotRestoreException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Modifier;
+import java.net.URI;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.Locale;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+@Slow
+@ClusterScope(scope = Scope.TEST)
+public class RestoreBackwardsCompatTests extends AbstractSnapshotTests {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("path.repo", reposRoot())
+ .build();
+ }
+
+ @Test
+ public void restoreOldSnapshots() throws Exception {
+ String repo = "test_repo";
+ String snapshot = "test_1";
+ List<String> repoVersions = repoVersions();
+ assertThat(repoVersions.size(), greaterThan(0));
+ for (String version : repoVersions) {
+ createRepo("repo", version, repo);
+ testOldSnapshot(version, repo, snapshot);
+ }
+
+ SortedSet<String> expectedVersions = new TreeSet<>();
+ for (java.lang.reflect.Field field : Version.class.getDeclaredFields()) {
+ if (Modifier.isStatic(field.getModifiers()) && field.getType() == Version.class) {
+ Version v = (Version) field.get(Version.class);
+ if (v.snapshot()) continue;
+ if (v.onOrBefore(Version.V_1_0_0_Beta1)) continue;
+ if (v.equals(Version.CURRENT)) continue;
+
+ expectedVersions.add(v.toString());
+ }
+ }
+
+ for (String repoVersion : repoVersions) {
+ if (expectedVersions.remove(repoVersion) == false) {
+ logger.warn("Old repositories tests contain extra repo: " + repoVersion);
+ }
+ }
+ if (expectedVersions.isEmpty() == false) {
+ StringBuilder msg = new StringBuilder("Old repositories tests are missing versions:");
+ for (String expected : expectedVersions) {
+ msg.append("\n" + expected);
+ }
+ fail(msg.toString());
+ }
+ }
+
+ @Test
+ public void testRestoreUnsupportedSnapshots() throws Exception {
+ String repo = "test_repo";
+ String snapshot = "test_1";
+ List<String> repoVersions = unsupportedRepoVersions();
+ assertThat(repoVersions.size(), greaterThan(0));
+ for (String version : repoVersions) {
+ createRepo("unsupportedrepo", version, repo);
+ assertUnsupportedIndexFailsToRestore(repo, snapshot);
+ }
+ }
+
+ private Path reposRoot() {
+ return getDataPath(".");
+ }
+
+ private List<String> repoVersions() throws Exception {
+ return listRepoVersions("repo");
+ }
+
+ private List<String> unsupportedRepoVersions() throws Exception {
+ return listRepoVersions("unsupportedrepo");
+ }
+
+ private List<String> listRepoVersions(String prefix) throws Exception {
+ List<String> repoVersions = newArrayList();
+ Path repoFiles = reposRoot();
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(repoFiles, prefix + "-*.zip")) {
+ for (Path entry : stream) {
+ String fileName = entry.getFileName().toString();
+ String version = fileName.substring(prefix.length() + 1);
+ version = version.substring(0, version.length() - ".zip".length());
+ repoVersions.add(version);
+ }
+ }
+ return repoVersions;
+ }
+
+ private void createRepo(String prefix, String version, String repo) throws Exception {
+ String repoFile = prefix + "-" + version + ".zip";
+ URI repoFileUri = getClass().getResource(repoFile).toURI();
+ URI repoJarUri = new URI("jar:" + repoFileUri.toString() + "!/repo/");
+ logger.info("--> creating repository [{}] for version [{}]", repo, version);
+ assertAcked(client().admin().cluster().preparePutRepository(repo)
+ .setType("url").setSettings(settingsBuilder()
+ .put("url", repoJarUri.toString())));
+ }
+
+ private void testOldSnapshot(String version, String repo, String snapshot) throws IOException {
+ logger.info("--> restoring snapshot");
+ RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get();
+ assertThat(response.status(), equalTo(RestStatus.OK));
+ RestoreInfo restoreInfo = response.getRestoreInfo();
+ assertThat(restoreInfo.successfulShards(), greaterThan(0));
+ assertThat(restoreInfo.successfulShards(), equalTo(restoreInfo.totalShards()));
+ assertThat(restoreInfo.failedShards(), equalTo(0));
+ String index = restoreInfo.indices().get(0);
+
+ logger.info("--> check search");
+ SearchResponse searchResponse = client().prepareSearch(index).get();
+ assertThat(searchResponse.getHits().totalHits(), greaterThan(1L));
+
+ logger.info("--> check settings");
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "version_attr"), equalTo(version));
+
+ logger.info("--> check templates");
+ IndexTemplateMetaData template = clusterState.getMetaData().templates().get("template_" + version.toLowerCase(Locale.ROOT));
+ assertThat(template, notNullValue());
+ assertThat(template.template(), equalTo("te*"));
+ assertThat(template.settings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), equalTo(1));
+ assertThat(template.mappings().size(), equalTo(1));
+ assertThat(template.mappings().get("type1").string(), equalTo("{\"type1\":{\"_source\":{\"enabled\":false}}}"));
+ if (Version.fromString(version).onOrAfter(Version.V_1_1_0)) {
+ // Support for aliases in templates was added in v1.1.0
+ assertThat(template.aliases().size(), equalTo(3));
+ assertThat(template.aliases().get("alias1"), notNullValue());
+ assertThat(template.aliases().get("alias2").filter().string(), containsString(version));
+ assertThat(template.aliases().get("alias2").indexRouting(), equalTo("kimchy"));
+ assertThat(template.aliases().get("{index}-alias"), notNullValue());
+ }
+
+ logger.info("--> cleanup");
+ cluster().wipeIndices(restoreInfo.indices().toArray(new String[restoreInfo.indices().size()]));
+ cluster().wipeTemplates();
+
+ }
+
+ private void assertUnsupportedIndexFailsToRestore(String repo, String snapshot) throws IOException {
+ logger.info("--> restoring unsupported snapshot");
+ try {
+ client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get();
+ fail("should have failed to restore");
+ } catch (SnapshotRestoreException ex) {
+ assertThat(ex.getMessage(), containsString("cannot restore index"));
+ assertThat(ex.getMessage(), containsString("because it cannot be upgraded"));
+ }
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityTest.java b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityTest.java
new file mode 100644
index 0000000000..2fcd9572a6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/StaticIndexBackwardCompatibilityTest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+/**
+ * These tests are against static indexes, built from versions of ES that cannot be upgraded without
+ * a full cluster restart (ie no wire format compatibility).
+ */
+@LuceneTestCase.SuppressCodecs("*")
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0)
+public class StaticIndexBackwardCompatibilityTest extends ElasticsearchIntegrationTest {
+
+ public void loadIndex(String index, Object... settings) throws Exception {
+ logger.info("Checking static index " + index);
+ Settings nodeSettings = prepareBackwardsDataDir(getDataPath(index + ".zip"), settings);
+ internalCluster().startNode(nodeSettings);
+ ensureGreen(index);
+ assertIndexSanity(index);
+ }
+
+ private void assertIndexSanity(String index) {
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get();
+ assertEquals(1, getIndexResponse.indices().length);
+ assertEquals(index, getIndexResponse.indices()[0]);
+ ensureYellow(index);
+ SearchResponse test = client().prepareSearch(index).get();
+ assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java b/core/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java
new file mode 100644
index 0000000000..11e01b2c1e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/TransportClientBackwardsCompatibilityTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.CompositeTestCluster;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class TransportClientBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @Test
+ public void testSniffMode() throws ExecutionException, InterruptedException {
+
+ Settings settings = Settings.builder().put(requiredSettings()).put("client.transport.nodes_sampler_interval", "1s")
+ .put("name", "transport_client_sniff_mode").put(ClusterName.SETTING, cluster().getClusterName())
+ .put("client.transport.sniff", true).build();
+
+ CompositeTestCluster compositeTestCluster = backwardsCluster();
+ TransportAddress transportAddress = compositeTestCluster.externalTransportAddress();
+
+ try(TransportClient client = TransportClient.builder().settings(settings).build()) {
+ client.addTransportAddress(transportAddress);
+
+ assertAcked(client.admin().indices().prepareCreate("test"));
+ ensureYellow("test");
+
+ int numDocs = iterations(10, 100);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ String id = "id" + i;
+ indexRequestBuilders[i] = client.prepareIndex("test", "test", id).setSource("field", "value" + i);
+ }
+ indexRandom(false, indexRequestBuilders);
+
+ String randomId = "id" + randomInt(numDocs-1);
+ GetResponse getResponse = client.prepareGet("test", "test", randomId).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ refresh();
+
+ SearchResponse searchResponse = client.prepareSearch("test").get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)numDocs));
+
+ int randomDocId = randomInt(numDocs-1);
+ String fieldValue = "value" + randomDocId;
+ String id = "id" + randomDocId;
+ searchResponse = client.prepareSearch("test").setQuery(QueryBuilders.termQuery("field", fieldValue)).get();
+ assertSearchHits(searchResponse, id);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/UnicastBackwardsCompatibilityTest.java b/core/src/test/java/org/elasticsearch/bwcompat/UnicastBackwardsCompatibilityTest.java
new file mode 100644
index 0000000000..d8019a64c6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/bwcompat/UnicastBackwardsCompatibilityTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bwcompat;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class UnicastBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("transport.tcp.port", 9380 + nodeOrdinal)
+ .put("discovery.zen.ping.multicast.enabled", false)
+ .put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
+ .build();
+ }
+
+ @Override
+ protected Settings externalNodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.externalNodeSettings(nodeOrdinal))
+ .put("transport.tcp.port", 9390 + nodeOrdinal)
+ .put("discovery.zen.ping.multicast.enabled", false)
+ .put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
+ .build();
+ }
+
+ @Test
+ public void testUnicastDiscovery() {
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().get();
+ assertThat(healthResponse.getNumberOfDataNodes(), equalTo(cluster().numDataNodes()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java
new file mode 100644
index 0000000000..c75e0e6248
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.google.common.base.Throwables;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushAction;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.delete.DeleteAction;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetAction;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptAction;
+import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptResponse;
+import org.elasticsearch.action.search.SearchAction;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportMessage;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase {
+
+ protected static final Settings HEADER_SETTINGS = Settings.builder()
+ .put(Headers.PREFIX + ".key1", "val1")
+ .put(Headers.PREFIX + ".key2", "val 2")
+ .build();
+
+ @SuppressWarnings("unchecked")
+ private static final GenericAction[] ACTIONS = new GenericAction[] {
+ // client actions
+ GetAction.INSTANCE, SearchAction.INSTANCE, DeleteAction.INSTANCE, DeleteIndexedScriptAction.INSTANCE,
+ IndexAction.INSTANCE,
+
+ // cluster admin actions
+ ClusterStatsAction.INSTANCE, CreateSnapshotAction.INSTANCE, ClusterRerouteAction.INSTANCE,
+
+ // indices admin actions
+ CreateIndexAction.INSTANCE, IndicesStatsAction.INSTANCE, ClearIndicesCacheAction.INSTANCE, FlushAction.INSTANCE
+ };
+
+ protected ThreadPool threadPool;
+ private Client client;
+
+ @Before
+ public void initClient() {
+ Settings settings = Settings.builder()
+ .put(HEADER_SETTINGS)
+ .put("path.home", createTempDir().toString())
+ .build();
+ threadPool = new ThreadPool("test-" + getTestName());
+ client = buildClient(settings, ACTIONS);
+ }
+
+ @After
+ public void cleanupClient() throws Exception {
+ client.close();
+ terminate(threadPool);
+ }
+
+ protected abstract Client buildClient(Settings headersSettings, GenericAction[] testedActions);
+
+
+ @Test
+ public void testActions() {
+
+ // TODO this is a really shitty way to test it, we need to figure out a way to test all the client methods
+ // without specifying each one (reflection doesn't as each action needs its own special settings, without
+ // them, request validation will fail before the test is executed. (one option is to enable disabling the
+ // validation in the settings??? - ugly and conceptually wrong)
+
+ // choosing arbitrary top level actions to test
+ client.prepareGet("idx", "type", "id").execute().addListener(new AssertingActionListener<GetResponse>(GetAction.NAME));
+ client.prepareSearch().execute().addListener(new AssertingActionListener<SearchResponse>(SearchAction.NAME));
+ client.prepareDelete("idx", "type", "id").execute().addListener(new AssertingActionListener<DeleteResponse>(DeleteAction.NAME));
+ client.prepareDeleteIndexedScript("lang", "id").execute().addListener(new AssertingActionListener<DeleteIndexedScriptResponse>(DeleteIndexedScriptAction.NAME));
+ client.prepareIndex("idx", "type", "id").setSource("source").execute().addListener(new AssertingActionListener<IndexResponse>(IndexAction.NAME));
+
+ // choosing arbitrary cluster admin actions to test
+ client.admin().cluster().prepareClusterStats().execute().addListener(new AssertingActionListener<ClusterStatsResponse>(ClusterStatsAction.NAME));
+ client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute().addListener(new AssertingActionListener<CreateSnapshotResponse>(CreateSnapshotAction.NAME));
+ client.admin().cluster().prepareReroute().execute().addListener(new AssertingActionListener<ClusterRerouteResponse>(ClusterRerouteAction.NAME));
+
+ // choosing arbitrary indices admin actions to test
+ client.admin().indices().prepareCreate("idx").execute().addListener(new AssertingActionListener<CreateIndexResponse>(CreateIndexAction.NAME));
+ client.admin().indices().prepareStats().execute().addListener(new AssertingActionListener<IndicesStatsResponse>(IndicesStatsAction.NAME));
+ client.admin().indices().prepareClearCache("idx1", "idx2").execute().addListener(new AssertingActionListener<ClearIndicesCacheResponse>(ClearIndicesCacheAction.NAME));
+ client.admin().indices().prepareFlush().execute().addListener(new AssertingActionListener<FlushResponse>(FlushAction.NAME));
+ }
+
+ @Test
+ public void testOverideHeader() throws Exception {
+ String key1Val = randomAsciiOfLength(5);
+ Map<String, Object> expected = ImmutableMap.<String, Object>builder()
+ .put("key1", key1Val)
+ .put("key2", "val 2")
+ .build();
+
+ client.prepareGet("idx", "type", "id")
+ .putHeader("key1", key1Val)
+ .execute().addListener(new AssertingActionListener<GetResponse>(GetAction.NAME, expected));
+
+ client.admin().cluster().prepareClusterStats()
+ .putHeader("key1", key1Val)
+ .execute().addListener(new AssertingActionListener<ClusterStatsResponse>(ClusterStatsAction.NAME, expected));
+
+ client.admin().indices().prepareCreate("idx")
+ .putHeader("key1", key1Val)
+ .execute().addListener(new AssertingActionListener<CreateIndexResponse>(CreateIndexAction.NAME, expected));
+ }
+
+ protected static void assertHeaders(Map<String, Object> headers, Map<String, Object> expected) {
+ assertThat(headers, notNullValue());
+ assertThat(headers.size(), is(expected.size()));
+ for (Map.Entry<String, Object> expectedEntry : expected.entrySet()) {
+ assertThat(headers.get(expectedEntry.getKey()), equalTo(expectedEntry.getValue()));
+ }
+ }
+
+ protected static void assertHeaders(TransportMessage<?> message) {
+ assertHeaders(message, HEADER_SETTINGS.getAsSettings(Headers.PREFIX).getAsStructuredMap());
+ }
+
+ protected static void assertHeaders(TransportMessage<?> message, Map<String, Object> expected) {
+ assertThat(message.getHeaders(), notNullValue());
+ assertThat(message.getHeaders().size(), is(expected.size()));
+ for (Map.Entry<String, Object> expectedEntry : expected.entrySet()) {
+ assertThat(message.getHeader(expectedEntry.getKey()), equalTo(expectedEntry.getValue()));
+ }
+ }
+
+ protected static class InternalException extends Exception {
+
+ private final String action;
+ private final Map<String, Object> headers;
+
+ public InternalException(String action, TransportMessage<?> message) {
+ this.action = action;
+ this.headers = new HashMap<>();
+ for (String key : message.getHeaders()) {
+ headers.put(key, message.getHeader(key));
+ }
+ }
+ }
+
+ protected static class AssertingActionListener<T> implements ActionListener<T> {
+
+ private final String action;
+ private final Map<String, Object> expectedHeaders;
+
+ public AssertingActionListener(String action) {
+ this(action, HEADER_SETTINGS.getAsSettings(Headers.PREFIX).getAsStructuredMap());
+ }
+
+ public AssertingActionListener(String action, Map<String, Object> expectedHeaders) {
+ this.action = action;
+ this.expectedHeaders = expectedHeaders;
+ }
+
+ @Override
+ public void onResponse(T t) {
+ fail("an internal exception was expected for action [" + action + "]");
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ Throwable e = unwrap(t, InternalException.class);
+ assertThat("expected action [" + action + "] to throw an internal exception", e, notNullValue());
+ assertThat(action, equalTo(((InternalException) e).action));
+ Map<String, Object> headers = ((InternalException) e).headers;
+ assertHeaders(headers, expectedHeaders);
+ }
+
+ public Throwable unwrap(Throwable t, Class<? extends Throwable> exceptionType) {
+ int counter = 0;
+ Throwable result = t;
+ while (!exceptionType.isInstance(result)) {
+ if (result.getCause() == null) {
+ return null;
+ }
+ if (result.getCause() == result) {
+ return null;
+ }
+ if (counter++ > 10) {
+ // dear god, if we got more than 10 levels down, WTF? just bail
+ fail("Exception cause unwrapping ran for 10 levels: " + Throwables.getStackTraceAsString(t));
+ return null;
+ }
+ result = result.getCause();
+ }
+ return result;
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java
new file mode 100644
index 0000000000..be2df27197
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.node;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableSet;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.TransportAction;
+import org.elasticsearch.client.AbstractClientHeadersTests;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+
+/**
+ *
+ */
+public class NodeClientHeadersTests extends AbstractClientHeadersTests {
+
+ private static final ActionFilters EMPTY_FILTERS = new ActionFilters(ImmutableSet.of());
+
+ @Override
+ protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) {
+ Settings settings = HEADER_SETTINGS;
+ Headers headers = new Headers(settings);
+ Actions actions = new Actions(settings, threadPool, testedActions);
+ return new NodeClient(settings, threadPool, headers, actions);
+ }
+
+ private static class Actions extends HashMap<GenericAction, TransportAction> {
+
+ private Actions(Settings settings, ThreadPool threadPool, GenericAction[] actions) {
+ for (GenericAction action : actions) {
+ put(action, new InternalTransportAction(settings, action.name(), threadPool));
+ }
+ }
+ }
+
+ private static class InternalTransportAction extends TransportAction {
+
+ private InternalTransportAction(Settings settings, String actionName, ThreadPool threadPool) {
+ super(settings, actionName, threadPool, EMPTY_FILTERS);
+ }
+
+ @Override
+ protected void doExecute(ActionRequest request, ActionListener listener) {
+ listener.onFailure(new InternalException(actionName, request));
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/client/node/NodeClientTests.java b/core/src/test/java/org/elasticsearch/client/node/NodeClientTests.java
new file mode 100644
index 0000000000..265b9184a2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/node/NodeClientTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.client.node;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class NodeClientTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put(Client.CLIENT_TYPE_SETTING, "anything").build();
+ }
+
+ @Test
+ public void testThatClientTypeSettingCannotBeChanged() {
+ for (Settings settings : internalCluster().getInstances(Settings.class)) {
+ assertThat(settings.get(Client.CLIENT_TYPE_SETTING), is("node"));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java
new file mode 100644
index 0000000000..fcf64e0518
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.atomic.AtomicInteger;
+
+abstract class FailAndRetryMockTransport<Response extends TransportResponse> implements Transport {
+
+ private final Random random;
+
+ private boolean connectMode = true;
+
+ private TransportServiceAdapter transportServiceAdapter;
+
+ private final AtomicInteger connectTransportExceptions = new AtomicInteger();
+ private final AtomicInteger failures = new AtomicInteger();
+ private final AtomicInteger successes = new AtomicInteger();
+ private final Set<DiscoveryNode> triedNodes = new CopyOnWriteArraySet<>();
+
+ FailAndRetryMockTransport(Random random) {
+ this.random = new Random(random.nextLong());
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+
+ //we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info
+ if (connectMode) {
+ TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId);
+ transportResponseHandler.handleResponse(new LivenessResponse(ClusterName.DEFAULT, node));
+ return;
+ }
+
+ //once nodes are connected we'll just return errors for each sendRequest call
+ triedNodes.add(node);
+
+ if (RandomInts.randomInt(random, 100) > 10) {
+ connectTransportExceptions.incrementAndGet();
+ throw new ConnectTransportException(node, "node not available");
+ } else {
+ if (random.nextBoolean()) {
+ failures.incrementAndGet();
+ //throw whatever exception that is not a subclass of ConnectTransportException
+ throw new IllegalStateException();
+ } else {
+ TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId);
+ if (random.nextBoolean()) {
+ successes.incrementAndGet();
+ transportResponseHandler.handleResponse(newResponse());
+ } else {
+ failures.incrementAndGet();
+ transportResponseHandler.handleException(new TransportException("transport exception"));
+ }
+ }
+ }
+ }
+
+ protected abstract Response newResponse();
+
+ public void endConnectMode() {
+ this.connectMode = false;
+ }
+
+ public int connectTransportExceptions() {
+ return connectTransportExceptions.get();
+ }
+
+ public int failures() {
+ return failures.get();
+ }
+
+ public int successes() {
+ return successes.get();
+ }
+
+ public Set<DiscoveryNode> triedNodes() {
+ return triedNodes;
+ }
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter transportServiceAdapter) {
+ this.transportServiceAdapter = transportServiceAdapter;
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return null;
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address) throws Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return false;
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+
+ }
+
+ @Override
+ public long serverOpen() {
+ return 0;
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return null;
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Transport start() {
+ return null;
+ }
+
+ @Override
+ public Transport stop() {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ @Override
+ public Map<String, BoundTransportAddress> profileBoundAddresses() {
+ return Collections.EMPTY_MAP;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java
new file mode 100644
index 0000000000..d513f5d4a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.GenericAction;
+import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse;
+import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.AbstractClientHeadersTests;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class TransportClientHeadersTests extends AbstractClientHeadersTests {
+
+ private static final LocalTransportAddress address = new LocalTransportAddress("test");
+
+ @Override
+ protected Client buildClient(Settings headersSettings, GenericAction[] testedActions) {
+ TransportClient client = TransportClient.builder().settings(Settings.builder()
+ .put("client.transport.sniff", false)
+ .put("node.name", "transport_client_" + this.getTestName())
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, InternalTransportService.class.getName())
+ .put(headersSettings)
+ .build()).build();
+
+ client.addTransportAddress(address);
+ return client;
+ }
+
+ @Test
+ public void testWithSniffing() throws Exception {
+ TransportClient client = TransportClient.builder().settings(Settings.builder()
+ .put("client.transport.sniff", true)
+ .put("cluster.name", "cluster1")
+ .put("node.name", "transport_client_" + this.getTestName() + "_1")
+ .put("client.transport.nodes_sampler_interval", "1s")
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, InternalTransportService.class.getName())
+ .put(HEADER_SETTINGS)
+ .put("path.home", createTempDir().toString())
+ .build()).build();
+ try {
+ client.addTransportAddress(address);
+
+ InternalTransportService service = (InternalTransportService) client.injector.getInstance(TransportService.class);
+
+ if (!service.clusterStateLatch.await(5, TimeUnit.SECONDS)) {
+ fail("takes way too long to get the cluster state");
+ }
+
+ assertThat(client.connectedNodes().size(), is(1));
+ assertThat(client.connectedNodes().get(0).getAddress(), is((TransportAddress) address));
+ } finally {
+ client.close();
+ }
+
+ }
+
+ public static class InternalTransportService extends TransportService {
+
+ CountDownLatch clusterStateLatch = new CountDownLatch(1);
+
+ @Inject
+ public InternalTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
+ super(settings, transport, threadPool);
+ }
+
+ @Override @SuppressWarnings("unchecked")
+ public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request, TransportRequestOptions options, TransportResponseHandler<T> handler) {
+ if (TransportLivenessAction.NAME.equals(action)) {
+ assertHeaders(request);
+ ((TransportResponseHandler<LivenessResponse>) handler).handleResponse(new LivenessResponse(ClusterName.DEFAULT, node));
+ return;
+ }
+ if (ClusterStateAction.NAME.equals(action)) {
+ assertHeaders(request);
+ ClusterName cluster1 = new ClusterName("cluster1");
+ ((TransportResponseHandler<ClusterStateResponse>) handler).handleResponse(new ClusterStateResponse(cluster1, state(cluster1)));
+ clusterStateLatch.countDown();
+ return;
+ }
+
+ handler.handleException(new TransportException("", new InternalException(action, request)));
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ assertThat((LocalTransportAddress) node.getAddress(), equalTo(address));
+ return true;
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ assertThat((LocalTransportAddress) node.getAddress(), equalTo(address));
+ }
+ }
+
+ private static ClusterState state(ClusterName clusterName) {
+ ClusterState.Builder builder = ClusterState.builder(clusterName);
+ builder.nodes(DiscoveryNodes.builder().put(new DiscoveryNode("node_id", address, Version.CURRENT)));
+ return builder.build();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
new file mode 100644
index 0000000000..bbf372b6df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.Closeable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class TransportClientNodesServiceTests extends ElasticsearchTestCase {
+
+ private static class TestIteration implements Closeable {
+ private final ThreadPool threadPool;
+ private final FailAndRetryMockTransport<TestResponse> transport;
+ private final TransportService transportService;
+ private final TransportClientNodesService transportClientNodesService;
+ private final int nodesCount;
+
+ TestIteration() {
+ threadPool = new ThreadPool("transport-client-nodes-service-tests");
+ transport = new FailAndRetryMockTransport<TestResponse>(getRandom()) {
+ @Override
+ protected TestResponse newResponse() {
+ return new TestResponse();
+ }
+ };
+ transportService = new TransportService(Settings.EMPTY, transport, threadPool);
+ transportService.start();
+ transportClientNodesService = new TransportClientNodesService(Settings.EMPTY, ClusterName.DEFAULT, transportService, threadPool, Headers.EMPTY, Version.CURRENT);
+
+ nodesCount = randomIntBetween(1, 10);
+ for (int i = 0; i < nodesCount; i++) {
+ transportClientNodesService.addTransportAddresses(new LocalTransportAddress("node" + i));
+ }
+ transport.endConnectMode();
+ }
+
+ @Override
+ public void close() {
+
+ transportService.stop();
+ transportClientNodesService.close();
+ try {
+ terminate(threadPool);
+ } catch (InterruptedException e) {
+ throw new AssertionError(e);
+ }
+ }
+ }
+
+ @Test
+ public void testListenerFailures() throws InterruptedException {
+
+ int iters = iterations(10, 100);
+ for (int i = 0; i <iters; i++) {
+ try(final TestIteration iteration = new TestIteration()) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicInteger finalFailures = new AtomicInteger();
+ final AtomicReference<Throwable> finalFailure = new AtomicReference<>();
+ final AtomicReference<TestResponse> response = new AtomicReference<>();
+ ActionListener<TestResponse> actionListener = new ActionListener<TestResponse>() {
+ @Override
+ public void onResponse(TestResponse testResponse) {
+ response.set(testResponse);
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ finalFailures.incrementAndGet();
+ finalFailure.set(e);
+ latch.countDown();
+ }
+ };
+
+ final AtomicInteger preSendFailures = new AtomicInteger();
+
+ iteration.transportClientNodesService.execute(new TransportClientNodesService.NodeListenerCallback<TestResponse>() {
+ @Override
+ public void doWithNode(DiscoveryNode node, final ActionListener<TestResponse> retryListener) {
+ if (rarely()) {
+ preSendFailures.incrementAndGet();
+ //throw whatever exception that is not a subclass of ConnectTransportException
+ throw new IllegalArgumentException();
+ }
+
+ iteration.transportService.sendRequest(node, "action", new TestRequest(), new TransportRequestOptions().withTimeout(50), new BaseTransportResponseHandler<TestResponse>() {
+ @Override
+ public TestResponse newInstance() {
+ return new TestResponse();
+ }
+
+ @Override
+ public void handleResponse(TestResponse response) {
+ retryListener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ retryListener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC;
+ }
+ });
+ }
+ }, actionListener);
+
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ //there can be only either one failure that causes the request to fail straightaway or success
+ assertThat(preSendFailures.get() + iteration.transport.failures() + iteration.transport.successes(), lessThanOrEqualTo(1));
+
+ if (iteration.transport.successes() == 1) {
+ assertThat(finalFailures.get(), equalTo(0));
+ assertThat(finalFailure.get(), nullValue());
+ assertThat(response.get(), notNullValue());
+ } else {
+ assertThat(finalFailures.get(), equalTo(1));
+ assertThat(finalFailure.get(), notNullValue());
+ assertThat(response.get(), nullValue());
+ if (preSendFailures.get() == 0 && iteration.transport.failures() == 0) {
+ assertThat(finalFailure.get(), instanceOf(NoNodeAvailableException.class));
+ }
+ }
+
+ assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.nodesCount));
+ assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() + iteration.transport.failures() + iteration.transport.successes()));
+ }
+ }
+ }
+
+ private static class TestRequest extends TransportRequest {
+
+ }
+
+ private static class TestResponse extends TransportResponse {
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java
new file mode 100644
index 0000000000..dec47e5800
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientRetryTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.support.PlainListenableActionFuture;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+@ClusterScope(scope = Scope.TEST, numClientNodes = 0)
+public class TransportClientRetryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRetry() throws IOException, ExecutionException, InterruptedException {
+
+ Iterable<TransportService> instances = internalCluster().getInstances(TransportService.class);
+ TransportAddress[] addresses = new TransportAddress[internalCluster().size()];
+ int i = 0;
+ for (TransportService instance : instances) {
+ addresses[i++] = instance.boundAddress().publishAddress();
+ }
+
+ Settings.Builder builder = settingsBuilder().put("client.transport.nodes_sampler_interval", "1s")
+ .put("name", "transport_client_retry_test")
+ .put("node.mode", InternalTestCluster.nodeMode())
+ .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false)
+ .put(ClusterName.SETTING, internalCluster().getClusterName())
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put("path.home", createTempDir());
+
+ try (TransportClient transportClient = TransportClient.builder().settings(builder.build()).build()) {
+ transportClient.addTransportAddresses(addresses);
+ assertThat(transportClient.connectedNodes().size(), equalTo(internalCluster().size()));
+
+ int size = cluster().size();
+ //kill all nodes one by one, leaving a single master/data node at the end of the loop
+ for (int j = 1; j < size; j++) {
+ internalCluster().stopRandomNode(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return true;
+ }
+ });
+
+ ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest().local(true);
+ ClusterState clusterState;
+ //use both variants of execute method: with and without listener
+ if (randomBoolean()) {
+ clusterState = transportClient.admin().cluster().state(clusterStateRequest).get().getState();
+ } else {
+ PlainListenableActionFuture<ClusterStateResponse> future = new PlainListenableActionFuture<>(transportClient.threadPool());
+ transportClient.admin().cluster().state(clusterStateRequest, future);
+ clusterState = future.get().getState();
+ }
+ assertThat(clusterState.nodes().size(), greaterThanOrEqualTo(size - j));
+ assertThat(transportClient.connectedNodes().size(), greaterThanOrEqualTo(size - j));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java
new file mode 100644
index 0000000000..70f96f2ede
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.startsWith;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 1.0)
+public class TransportClientTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPickingUpChangesInDiscoveryNode() {
+ String nodeName = internalCluster().startNode(Settings.builder().put("node.data", false));
+
+ TransportClient client = (TransportClient) internalCluster().client(nodeName);
+ assertThat(client.connectedNodes().get(0).dataNode(), equalTo(false));
+
+ }
+
+ @Test
+ public void testNodeVersionIsUpdated() {
+ TransportClient client = (TransportClient) internalCluster().client();
+ TransportClientNodesService nodeService = client.nodeService();
+ Node node = nodeBuilder().data(false).settings(Settings.builder()
+ .put(internalCluster().getDefaultSettings())
+ .put("path.home", createTempDir())
+ .put("node.name", "testNodeVersionIsUpdated")
+ .put("http.enabled", false)
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // make sure we get what we set :)
+ .build()).clusterName("foobar").build();
+ node.start();
+ try {
+ TransportAddress transportAddress = node.injector().getInstance(TransportService.class).boundAddress().publishAddress();
+ client.addTransportAddress(transportAddress);
+ assertThat(nodeService.connectedNodes().size(), greaterThanOrEqualTo(1)); // since we force transport clients there has to be one node started that we connect to.
+ for (DiscoveryNode discoveryNode : nodeService.connectedNodes()) { // connected nodes have updated version
+ assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT));
+ }
+
+ for (DiscoveryNode discoveryNode : nodeService.listedNodes()) {
+ assertThat(discoveryNode.id(), startsWith("#transport#-"));
+ assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion()));
+ }
+
+ assertThat(nodeService.filteredNodes().size(), equalTo(1));
+ for (DiscoveryNode discoveryNode : nodeService.filteredNodes()) {
+ assertThat(discoveryNode.getVersion(), equalTo(Version.CURRENT.minimumCompatibilityVersion()));
+ }
+ } finally {
+ node.close();
+ }
+ }
+
+ @Test
+ public void testThatTransportClientSettingIsSet() {
+ TransportClient client = (TransportClient) internalCluster().client();
+ Settings settings = client.injector.getInstance(Settings.class);
+ assertThat(settings.get(Client.CLIENT_TYPE_SETTING), is("transport"));
+ }
+
+ @Test
+ public void testThatTransportClientSettingCannotBeChanged() {
+ Settings baseSettings = settingsBuilder().put(Client.CLIENT_TYPE_SETTING, "anything").put("path.home", createTempDir()).build();
+ try (TransportClient client = TransportClient.builder().settings(baseSettings).build()) {
+ Settings settings = client.injector.getInstance(Settings.class);
+ assertThat(settings.get(Client.CLIENT_TYPE_SETTING), is("transport"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java
new file mode 100644
index 0000000000..76e06b6862
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterHealthResponsesTests.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth;
+import org.elasticsearch.action.admin.cluster.health.ClusterShardHealth;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.empty;
+
+public class ClusterHealthResponsesTests extends ElasticsearchTestCase {
+
+
+ private void assertIndexHealth(ClusterIndexHealth indexHealth, ShardCounter counter, IndexMetaData indexMetaData) {
+ assertThat(indexHealth.getStatus(), equalTo(counter.status()));
+ assertThat(indexHealth.getNumberOfShards(), equalTo(indexMetaData.getNumberOfShards()));
+ assertThat(indexHealth.getNumberOfReplicas(), equalTo(indexMetaData.getNumberOfReplicas()));
+ assertThat(indexHealth.getActiveShards(), equalTo(counter.active));
+ assertThat(indexHealth.getRelocatingShards(), equalTo(counter.relocating));
+ assertThat(indexHealth.getInitializingShards(), equalTo(counter.initializing));
+ assertThat(indexHealth.getUnassignedShards(), equalTo(counter.unassigned));
+ assertThat(indexHealth.getShards().size(), equalTo(indexMetaData.getNumberOfShards()));
+ assertThat(indexHealth.getValidationFailures(), empty());
+ int totalShards = 0;
+ for (ClusterShardHealth shardHealth : indexHealth.getShards().values()) {
+ totalShards += shardHealth.getActiveShards() + shardHealth.getInitializingShards() + shardHealth.getUnassignedShards();
+ }
+
+ assertThat(totalShards, equalTo(indexMetaData.getNumberOfShards() * (1 + indexMetaData.getNumberOfReplicas())));
+ }
+
+ protected class ShardCounter {
+ public int active;
+ public int relocating;
+ public int initializing;
+ public int unassigned;
+ public int primaryActive;
+ public int primaryInactive;
+
+ public ClusterHealthStatus status() {
+ if (primaryInactive > 0) {
+ return ClusterHealthStatus.RED;
+ }
+ if (unassigned > 0 || initializing > 0) {
+ return ClusterHealthStatus.YELLOW;
+ }
+ return ClusterHealthStatus.GREEN;
+ }
+
+ public void update(ShardRouting shardRouting) {
+ if (shardRouting.active()) {
+ active++;
+ if (shardRouting.primary()) {
+ primaryActive++;
+ }
+ if (shardRouting.relocating()) {
+ relocating++;
+ }
+ return;
+ }
+
+ if (shardRouting.primary()) {
+ primaryInactive++;
+ }
+ if (shardRouting.initializing()) {
+ initializing++;
+ } else {
+ unassigned++;
+ }
+ }
+ }
+
+ static int node_id = 1;
+
+ private ImmutableShardRouting genShardRouting(String index, int shardId, boolean primary) {
+
+ ShardRoutingState state;
+
+ int i = randomInt(40);
+ if (i > 5) {
+ state = ShardRoutingState.STARTED;
+ } else if (i > 3) {
+ state = ShardRoutingState.RELOCATING;
+ } else if (i > 1) {
+ state = ShardRoutingState.INITIALIZING;
+ } else {
+ state = ShardRoutingState.UNASSIGNED;
+ }
+
+ switch (state) {
+ case UNASSIGNED:
+ return new MutableShardRouting(index, shardId, null, primary, ShardRoutingState.UNASSIGNED, 1);
+ case STARTED:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.STARTED, 1);
+ case INITIALIZING:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), primary, ShardRoutingState.INITIALIZING, 1);
+ case RELOCATING:
+ return new MutableShardRouting(index, shardId, "node_" + Integer.toString(node_id++), "node_" + Integer.toString(node_id++), primary, ShardRoutingState.RELOCATING, 1);
+ default:
+ throw new ElasticsearchException("Unknown state: " + state.name());
+ }
+
+ }
+
+ private IndexShardRoutingTable genShardRoutingTable(String index, int shardId, int replicas, ShardCounter counter) {
+ IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(new ShardId(index, shardId), true);
+ ImmutableShardRouting shardRouting = genShardRouting(index, shardId, true);
+ counter.update(shardRouting);
+ builder.addShard(shardRouting);
+ for (; replicas > 0; replicas--) {
+ shardRouting = genShardRouting(index, shardId, false);
+ counter.update(shardRouting);
+ builder.addShard(shardRouting);
+ }
+
+ return builder.build();
+ }
+
+ IndexRoutingTable genIndexRoutingTable(IndexMetaData indexMetaData, ShardCounter counter) {
+ IndexRoutingTable.Builder builder = IndexRoutingTable.builder(indexMetaData.index());
+ for (int shard = 0; shard < indexMetaData.numberOfShards(); shard++) {
+ builder.addIndexShard(genShardRoutingTable(indexMetaData.index(), shard, indexMetaData.getNumberOfReplicas(), counter));
+ }
+ return builder.build();
+ }
+
+ @Test
+ public void testClusterIndexHealth() {
+ int numberOfShards = randomInt(3) + 1;
+ int numberOfReplicas = randomInt(4);
+ IndexMetaData indexMetaData = IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
+ ShardCounter counter = new ShardCounter();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ logger.info("index status: {}, expected {}", indexHealth.getStatus(), counter.status());
+ assertIndexHealth(indexHealth, counter, indexMetaData);
+ }
+
+ private void assertClusterHealth(ClusterHealthResponse clusterHealth, ShardCounter counter) {
+ assertThat(clusterHealth.getStatus(), equalTo(counter.status()));
+ assertThat(clusterHealth.getActiveShards(), equalTo(counter.active));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(counter.primaryActive));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(counter.initializing));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(counter.relocating));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(counter.unassigned));
+ assertThat(clusterHealth.getValidationFailures(), empty());
+ }
+
+ @Test
+ public void testClusterHealth() throws IOException {
+ ShardCounter counter = new ShardCounter();
+ RoutingTable.Builder routingTable = RoutingTable.builder();
+ MetaData.Builder metaData = MetaData.builder();
+ for (int i = randomInt(4); i >= 0; i--) {
+ int numberOfShards = randomInt(3) + 1;
+ int numberOfReplicas = randomInt(4);
+ IndexMetaData indexMetaData = IndexMetaData.builder("test_" + Integer.toString(i)).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas).build();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+ metaData.put(indexMetaData, true);
+ routingTable.add(indexRoutingTable);
+ }
+ ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ int pendingTasks = randomIntBetween(0, 200);
+ int inFlight = randomIntBetween(0, 200);
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(IndicesOptions.strictExpand(), (String[]) null), clusterState, pendingTasks, inFlight);
+ logger.info("cluster status: {}, expected {}", clusterHealth.getStatus(), counter.status());
+ clusterHealth = maybeSerialize(clusterHealth);
+ assertClusterHealth(clusterHealth, counter);
+ assertThat(clusterHealth.getNumberOfPendingTasks(), Matchers.equalTo(pendingTasks));
+ assertThat(clusterHealth.getNumberOfInFlightFetch(), Matchers.equalTo(inFlight));
+ }
+
+ ClusterHealthResponse maybeSerialize(ClusterHealthResponse clusterHealth) throws IOException {
+ if (randomBoolean()) {
+ BytesStreamOutput out = new BytesStreamOutput();
+ clusterHealth.writeTo(out);
+ StreamInput in = StreamInput.wrap(out.bytes());
+ clusterHealth = ClusterHealthResponse.readResponseFrom(in);
+ }
+ return clusterHealth;
+ }
+
+ @Test
+ public void testValidations() throws IOException {
+ IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(2).build();
+ ShardCounter counter = new ShardCounter();
+ IndexRoutingTable indexRoutingTable = genIndexRoutingTable(indexMetaData, counter);
+ indexMetaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(3).build();
+
+ ClusterIndexHealth indexHealth = new ClusterIndexHealth(indexMetaData, indexRoutingTable);
+ assertThat(indexHealth.getValidationFailures(), Matchers.hasSize(2));
+
+ RoutingTable.Builder routingTable = RoutingTable.builder();
+ MetaData.Builder metaData = MetaData.builder();
+ metaData.put(indexMetaData, true);
+ routingTable.add(indexRoutingTable);
+ ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", clusterState.metaData().concreteIndices(IndicesOptions.strictExpand(), (String[]) null), clusterState, 0, 0);
+ clusterHealth = maybeSerialize(clusterHealth);
+ // currently we have no cluster level validation failures as index validation issues are reported per index.
+ assertThat(clusterHealth.getValidationFailures(), Matchers.hasSize(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java
new file mode 100644
index 0000000000..658da8bde3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterHealthTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterHealthTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void simpleLocalHealthTest() {
+ createIndex("test");
+ ensureGreen(); // master should thing it's green now.
+
+ for (String node : internalCluster().getNodeNames()) {
+ // a very high time out, which should never fire due to the local flag
+ ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get("10s");
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(health.isTimedOut(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testHealth() {
+ logger.info("--> running cluster health on an index that does not exists");
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(true));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat(healthResponse.getIndices().isEmpty(), equalTo(true));
+
+ logger.info("--> running cluster wide health");
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().isEmpty(), equalTo(true));
+
+ logger.info("--> Creating index test1 with zero replicas");
+ createIndex("test1");
+
+ logger.info("--> running cluster health on an index that does exists");
+ healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("--> running cluster health on an index that does exists and an index that doesn't exists");
+ healthResponse = client().admin().cluster().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(true));
+ assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED));
+ assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(healthResponse.getIndices().size(), equalTo(1));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceTests.java
new file mode 100644
index 0000000000..87cadc4331
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceTests.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionModule;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
+import org.elasticsearch.action.support.ActionFilter;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+/**
+ * Integration tests for the ClusterInfoService collecting information
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+public class ClusterInfoServiceTests extends ElasticsearchIntegrationTest {
+
+ public static class Plugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "ClusterInfoServiceTests";
+ }
+
+ @Override
+ public String description() {
+ return "ClusterInfoServiceTests";
+ }
+
+ public void onModule(ActionModule module) {
+ module.registerFilter(BlockingActionFilter.class);
+ }
+ }
+
+ public static class BlockingActionFilter extends org.elasticsearch.action.support.ActionFilter.Simple {
+
+ ImmutableSet<String> blockedActions = ImmutableSet.of();
+
+ @Inject
+ public BlockingActionFilter(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ protected boolean apply(String action, ActionRequest request, ActionListener listener) {
+ if (blockedActions.contains(action)) {
+ throw new ElasticsearchException("force exception on [" + action + "]");
+ }
+ return true;
+ }
+
+ @Override
+ protected boolean apply(String action, ActionResponse response, ActionListener listener) {
+ return true;
+ }
+
+ @Override
+ public int order() {
+ return 0;
+ }
+
+ public void blockActions(String... actions) {
+ blockedActions = ImmutableSet.copyOf(actions);
+ }
+ }
+
+ static class InfoListener implements ClusterInfoService.Listener {
+ final AtomicReference<CountDownLatch> collected = new AtomicReference<>(new CountDownLatch(1));
+ volatile ClusterInfo lastInfo = null;
+
+ @Override
+ public void onNewInfo(ClusterInfo info) {
+ lastInfo = info;
+ CountDownLatch latch = collected.get();
+ latch.countDown();
+ }
+
+ public void reset() {
+ lastInfo = null;
+ collected.set(new CountDownLatch(1));
+ }
+
+ public ClusterInfo get() throws InterruptedException {
+ CountDownLatch latch = collected.get();
+ if (!latch.await(10, TimeUnit.SECONDS)) {
+ fail("failed to get a new cluster info");
+ }
+ return lastInfo;
+ }
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ // manual collection or upon cluster forming.
+ .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, "1s")
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .put("plugin.types", Plugin.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testClusterInfoServiceCollectsInformation() throws Exception {
+ internalCluster().startNodesAsync(2,
+ Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "200ms").build())
+ .get();
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0).build()));
+ ensureGreen("test");
+ InternalTestCluster internalTestCluster = internalCluster();
+ // Get the cluster info service on the master node
+ final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
+ InfoListener listener = new InfoListener();
+ infoService.addListener(listener);
+ ClusterInfo info = listener.get();
+ assertNotNull("info should not be null", info);
+ Map<String, DiskUsage> usages = info.getNodeDiskUsages();
+ Map<String, Long> shardSizes = info.getShardSizes();
+ assertNotNull(usages);
+ assertNotNull(shardSizes);
+ assertThat("some usages are populated", usages.values().size(), Matchers.equalTo(2));
+ assertThat("some shard sizes are populated", shardSizes.values().size(), greaterThan(0));
+ for (DiskUsage usage : usages.values()) {
+ logger.info("--> usage: {}", usage);
+ assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L));
+ }
+ for (Long size : shardSizes.values()) {
+ logger.info("--> shard size: {}", size);
+ assertThat("shard size is greater than 0", size, greaterThan(0L));
+ }
+ }
+
+ @Test
+ public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException {
+ internalCluster().startNodesAsync(2,
+ // manually control publishing
+ Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "60m").build())
+ .get();
+ prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
+ ensureGreen("test");
+ InternalTestCluster internalTestCluster = internalCluster();
+ InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName());
+ InfoListener listener = new InfoListener();
+ infoService.addListener(listener);
+
+ // get one healthy sample
+ infoService.updateOnce();
+ ClusterInfo info = listener.get();
+ assertNotNull("failed to collect info", info);
+ assertThat("some usages are populated", info.getNodeDiskUsages().size(), Matchers.equalTo(2));
+ assertThat("some shard sizes are populated", info.getShardSizes().size(), greaterThan(0));
+
+
+ MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, internalTestCluster.getMasterName());
+
+ final AtomicBoolean timeout = new AtomicBoolean(false);
+ final Set<String> blockedActions = ImmutableSet.of(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[s]");
+ // drop all outgoing stats requests to force a timeout.
+ for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) {
+ mockTransportService.addDelegate(node, new MockTransportService.DelegateTransport(mockTransportService.original()) {
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request,
+ TransportRequestOptions options) throws IOException, TransportException {
+ if (blockedActions.contains(action)) {
+ if (timeout.get()) {
+ logger.info("dropping [{}] to [{}]", action, node);
+ return;
+ }
+ }
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ });
+ }
+
+ // timeouts shouldn't clear the info
+ timeout.set(true);
+ listener.reset();
+ infoService.updateOnce();
+ info = listener.get();
+ assertNotNull("info should not be null", info);
+ // node info will time out both on the request level on the count down latch. this means
+ // it is likely to update the node disk usage based on the one response that came be from local
+ // node.
+ assertThat(info.getNodeDiskUsages().size(), greaterThanOrEqualTo(1));
+ // indices is guaranteed to time out on the latch, not updating anything.
+ assertThat(info.getShardSizes().size(), greaterThan(1));
+
+ // now we cause an exception
+ timeout.set(false);
+ ActionFilters actionFilters = internalTestCluster.getInstance(ActionFilters.class, internalTestCluster.getMasterName());
+ BlockingActionFilter blockingActionFilter = null;
+ for (ActionFilter filter : actionFilters.filters()) {
+ if (filter instanceof BlockingActionFilter) {
+ blockingActionFilter = (BlockingActionFilter) filter;
+ break;
+ }
+ }
+
+ assertNotNull("failed to find BlockingActionFilter", blockingActionFilter);
+ blockingActionFilter.blockActions(blockedActions.toArray(Strings.EMPTY_ARRAY));
+ listener.reset();
+ infoService.updateOnce();
+ info = listener.get();
+ assertNotNull("info should not be null", info);
+ assertThat(info.getNodeDiskUsages().size(), equalTo(0));
+ assertThat(info.getShardSizes().size(), equalTo(0));
+
+ // check we recover
+ blockingActionFilter.blockActions();
+ listener.reset();
+ infoService.updateOnce();
+ info = listener.get();
+ assertNotNull("info should not be null", info);
+ assertThat(info.getNodeDiskUsages().size(), equalTo(2));
+ assertThat(info.getShardSizes().size(), greaterThan(0));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java
new file mode 100644
index 0000000000..c60aaeffb6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java
@@ -0,0 +1,1049 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+import com.google.common.util.concurrent.ListenableFuture;
+
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.service.InternalClusterService;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.AbstractLifecycleComponent;
+import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Singleton;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.MockLogAppender;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class ClusterServiceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testTimeoutUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
+ final CountDownLatch block = new CountDownLatch(1);
+ clusterService1.submitStateUpdateTask("test1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ try {
+ block.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+
+ final CountDownLatch timedOut = new CountDownLatch(1);
+ final AtomicBoolean executeCalled = new AtomicBoolean();
+ clusterService1.submitStateUpdateTask("test2", new TimeoutClusterStateUpdateTask() {
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueMillis(2);
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ timedOut.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ executeCalled.set(true);
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+ });
+
+ assertThat(timedOut.await(500, TimeUnit.MILLISECONDS), equalTo(true));
+ block.countDown();
+ Thread.sleep(100); // sleep a bit to double check that execute on the timed out update task is not called...
+ assertThat(executeCalled.get(), equalTo(false));
+ }
+
+ @Test
+ public void testAckedUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
+ @Override
+ protected Void newResponse(boolean acknowledged) {
+ return null;
+ }
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return true;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ ensureGreen();
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testAckedUpdateTaskSameClusterState() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
+ @Override
+ protected Void newResponse(boolean acknowledged) {
+ return null;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ ensureGreen();
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testMasterAwareExecution() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+
+ ListenableFuture<String> master = internalCluster().startNodeAsync(settings);
+ ListenableFuture<String> nonMaster = internalCluster().startNodeAsync(settingsBuilder().put(settings).put("node.master", false).build());
+ master.get();
+ ensureGreen(); // make sure we have a cluster
+
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nonMaster.get());
+
+ final boolean[] taskFailed = {false};
+ final CountDownLatch latch1 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ latch1.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ taskFailed[0] = true;
+ latch1.countDown();
+ }
+ });
+
+ latch1.await();
+ assertTrue("cluster state update task was executed on a non-master", taskFailed[0]);
+
+ taskFailed[0] = true;
+ final CountDownLatch latch2 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new ClusterStateNonMasterUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ taskFailed[0] = false;
+ latch2.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ taskFailed[0] = true;
+ latch2.countDown();
+ }
+ });
+ latch2.await();
+ assertFalse("non-master cluster state update task was not executed", taskFailed[0]);
+ }
+
+ @Test
+ public void testAckedUpdateTaskNoAckExpected() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
+ @Override
+ protected Void newResponse(boolean acknowledged) {
+ return null;
+ }
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return false;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ ensureGreen();
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(true));
+ assertThat(ackTimeout.get(), equalTo(false));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+ }
+
+ @Test
+ public void testAckedUpdateTaskTimeoutZero() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+
+ final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
+ final AtomicBoolean ackTimeout = new AtomicBoolean(false);
+ final AtomicBoolean onFailure = new AtomicBoolean(false);
+ final AtomicBoolean executed = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch processedLatch = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
+ @Override
+ protected Void newResponse(boolean acknowledged) {
+ return null;
+ }
+
+ @Override
+ public boolean mustAck(DiscoveryNode discoveryNode) {
+ return false;
+ }
+
+ @Override
+ public void onAllNodesAcked(@Nullable Throwable t) {
+ allNodesAcked.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public void onAckTimeout() {
+ ackTimeout.set(true);
+ latch.countDown();
+ }
+
+ @Override
+ public TimeValue ackTimeout() {
+ return TimeValue.timeValueSeconds(0);
+ }
+
+ @Override
+ public TimeValue timeout() {
+ return TimeValue.timeValueSeconds(10);
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ processedLatch.countDown();
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ executed.set(true);
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("failed to execute callback in test {}", t, source);
+ onFailure.set(true);
+ latch.countDown();
+ }
+ });
+
+ ensureGreen();
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+
+ assertThat(allNodesAcked.get(), equalTo(false));
+ assertThat(ackTimeout.get(), equalTo(true));
+ assertThat(executed.get(), equalTo(true));
+ assertThat(onFailure.get(), equalTo(false));
+
+ assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testPendingUpdateTask() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ String node_0 = internalCluster().startNode(settings);
+ internalCluster().startNodeClient(settings);
+
+ final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node_0);
+ final CountDownLatch block1 = new CountDownLatch(1);
+ final CountDownLatch invoked1 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked1.countDown();
+ try {
+ block1.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ invoked1.countDown();
+ fail();
+ }
+ });
+ invoked1.await();
+ final CountDownLatch invoked2 = new CountDownLatch(9);
+ for (int i = 2; i <= 10; i++) {
+ clusterService.submitStateUpdateTask(Integer.toString(i), new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ invoked2.countDown();
+ }
+ });
+ }
+
+ // there might be other tasks in this node, make sure to only take the ones we add into account in this test
+
+ // The tasks can be re-ordered, so we need to check out-of-order
+ Set<String> controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"));
+ List<PendingClusterTask> pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10));
+ assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1"));
+ assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true));
+ for (PendingClusterTask task : pendingClusterTasks) {
+ controlSources.remove(task.getSource().string());
+ }
+ assertTrue(controlSources.isEmpty());
+
+ controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"));
+ PendingClusterTasksResponse response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().execute().actionGet();
+ assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10));
+ assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1"));
+ assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true));
+ for (PendingClusterTask task : response) {
+ controlSources.remove(task.getSource().string());
+ }
+ assertTrue(controlSources.isEmpty());
+ block1.countDown();
+ invoked2.await();
+
+ // whenever we test for no tasks, we need to awaitBusy since this is a live node
+ assertTrue(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return clusterService.pendingTasks().isEmpty();
+ }
+ }));
+ waitNoPendingTasksOnAll();
+
+ final CountDownLatch block2 = new CountDownLatch(1);
+ final CountDownLatch invoked3 = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("1", new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ invoked3.countDown();
+ try {
+ block2.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ invoked3.countDown();
+ fail();
+ }
+ });
+ invoked3.await();
+
+ for (int i = 2; i <= 5; i++) {
+ clusterService.submitStateUpdateTask(Integer.toString(i), new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ }
+ Thread.sleep(100);
+
+ pendingClusterTasks = clusterService.pendingTasks();
+ assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5));
+ controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
+ for (PendingClusterTask task : pendingClusterTasks) {
+ controlSources.remove(task.getSource().string());
+ }
+ assertTrue(controlSources.isEmpty());
+
+ response = internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().get();
+ assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5));
+ controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
+ for (PendingClusterTask task : response) {
+ if (controlSources.remove(task.getSource().string())) {
+ assertThat(task.getTimeInQueueInMillis(), greaterThan(0l));
+ }
+ }
+ assertTrue(controlSources.isEmpty());
+ block2.countDown();
+ }
+
+ @Test @Slow
+ public void testLocalNodeMasterListenerCallbacks() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 1)
+ .put("discovery.zen.ping_timeout", "400ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put("plugin.types", TestPlugin.class.getName())
+ .build();
+
+ String node_0 = internalCluster().startNode(settings);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+ MasterAwareService testService = internalCluster().getInstance(MasterAwareService.class);
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // the first node should be a master as the minimum required is 1
+ assertThat(clusterService.state().nodes().masterNode(), notNullValue());
+ assertThat(clusterService.state().nodes().localNodeMaster(), is(true));
+ assertThat(testService.master(), is(true));
+
+ String node_1 = internalCluster().startNode(settings);
+ final ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class, node_1);
+ MasterAwareService testService1 = internalCluster().getInstance(MasterAwareService.class, node_1);
+
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // the second node should not be the master as node1 is already the master.
+ assertThat(clusterService1.state().nodes().localNodeMaster(), is(false));
+ assertThat(testService1.master(), is(false));
+
+ internalCluster().stopCurrentMasterNode();
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("1").get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // now that node0 is closed, node1 should be elected as master
+ assertThat(clusterService1.state().nodes().localNodeMaster(), is(true));
+ assertThat(testService1.master(), is(true));
+
+ // start another node and set min_master_node
+ internalCluster().startNode(Settings.builder().put(settings));
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
+
+ Settings transientSettings = settingsBuilder()
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(transientSettings).get();
+
+ // and shutdown the second node
+ internalCluster().stopRandomNonMasterNode();
+
+ // there should not be any master as the minimum number of required eligible masters is not met
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object obj) {
+ return clusterService1.state().nodes().masterNode() == null && clusterService1.state().status() == ClusterState.ClusterStateStatus.APPLIED;
+ }
+ });
+ assertThat(testService1.master(), is(false));
+
+ // bring the node back up
+ String node_2 = internalCluster().startNode(Settings.builder().put(settings).put(transientSettings));
+ ClusterService clusterService2 = internalCluster().getInstance(ClusterService.class, node_2);
+ MasterAwareService testService2 = internalCluster().getInstance(MasterAwareService.class, node_2);
+
+ // make sure both nodes see each other otherwise the masternode below could be null if node 2 is master and node 1 did'r receive the updated cluster state...
+ assertThat(internalCluster().client(node_1).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").get().isTimedOut(), is(false));
+ assertThat(internalCluster().client(node_2).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).setWaitForNodes("2").get().isTimedOut(), is(false));
+
+ // now that we started node1 again, a new master should be elected
+ assertThat(clusterService2.state().nodes().masterNode(), is(notNullValue()));
+ if (node_2.equals(clusterService2.state().nodes().masterNode().name())) {
+ assertThat(testService1.master(), is(false));
+ assertThat(testService2.master(), is(true));
+ } else {
+ assertThat(testService1.master(), is(true));
+ assertThat(testService2.master(), is(false));
+ }
+ }
+
+ /**
+ * Note, this test can only work as long as we have a single thread executor executing the state update tasks!
+ */
+ @Test
+ public void testPrioritizedTasks() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+ BlockingTask block = new BlockingTask();
+ clusterService.submitStateUpdateTask("test", Priority.IMMEDIATE, block);
+ int taskCount = randomIntBetween(5, 20);
+ Priority[] priorities = Priority.values();
+
+ // will hold all the tasks in the order in which they were executed
+ List<PrioritiezedTask> tasks = new ArrayList<>(taskCount);
+ CountDownLatch latch = new CountDownLatch(taskCount);
+ for (int i = 0; i < taskCount; i++) {
+ Priority priority = priorities[randomIntBetween(0, priorities.length - 1)];
+ clusterService.submitStateUpdateTask("test", priority, new PrioritiezedTask(priority, latch, tasks));
+ }
+
+ block.release();
+ latch.await();
+
+ Priority prevPriority = null;
+ for (PrioritiezedTask task : tasks) {
+ if (prevPriority == null) {
+ prevPriority = task.priority;
+ } else {
+ assertThat(task.priority.sameOrAfter(prevPriority), is(true));
+ }
+ }
+ }
+
+ @Test
+ @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
+ public void testClusterStateUpdateLogging() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
+ MockLogAppender mockAppender = new MockLogAppender();
+ mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, "*processing [test1]: took * no change in cluster_state"));
+ mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, "*failed to execute cluster state update in *"));
+ mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took * done applying updated cluster_state (version: *, uuid: *)"));
+
+ Logger rootLogger = Logger.getRootLogger();
+ rootLogger.addAppender(mockAppender);
+ try {
+ final CountDownLatch latch = new CountDownLatch(4);
+ clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ fail();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ latch.countDown();
+ }
+ });
+ clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return ClusterState.builder(currentState).incrementVersion().build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ // Additional update task to make sure all previous logging made it to the logger
+ // We don't check logging for this on since there is no guarantee that it will occur before our check
+ clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
+ } finally {
+ rootLogger.removeAppender(mockAppender);
+ }
+ mockAppender.assertAllExpectationsMatched();
+ }
+
+ @Test
+ @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level
+ public void testLongClusterStateUpdateLogging() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "local")
+ .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10s")
+ .build();
+ internalCluster().startNode(settings);
+ ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class);
+ MockLogAppender mockAppender = new MockLogAppender();
+ mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", "cluster.service", Level.WARN, "*cluster state update task [test1] took * above the warn threshold of *"));
+ mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, "*cluster state update task [test2] took * above the warn threshold of 10ms"));
+ mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, "*cluster state update task [test3] took * above the warn threshold of 10ms"));
+ mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took * above the warn threshold of 10ms"));
+
+ Logger rootLogger = Logger.getRootLogger();
+ rootLogger.addAppender(mockAppender);
+ try {
+ final CountDownLatch latch = new CountDownLatch(5);
+ final CountDownLatch processedFirstTask = new CountDownLatch(1);
+ clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ Thread.sleep(100);
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ processedFirstTask.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+
+ processedFirstTask.await(1, TimeUnit.SECONDS);
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms")));
+
+ clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ Thread.sleep(100);
+ throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ fail();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ latch.countDown();
+ }
+ });
+ clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ Thread.sleep(100);
+ return ClusterState.builder(currentState).incrementVersion().build();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ Thread.sleep(100);
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ // Additional update task to make sure all previous logging made it to the logger
+ // We don't check logging for this on since there is no guarantee that it will occur before our check
+ clusterService1.submitStateUpdateTask("test5", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ return currentState;
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+ });
+ assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ } finally {
+ rootLogger.removeAppender(mockAppender);
+ }
+ mockAppender.assertAllExpectationsMatched();
+ }
+
+ private static class BlockingTask extends ClusterStateUpdateTask {
+ private final CountDownLatch latch = new CountDownLatch(1);
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ latch.await();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ }
+
+ public void release() {
+ latch.countDown();
+ }
+
+ }
+
+ private static class PrioritiezedTask extends ClusterStateUpdateTask {
+
+ private final Priority priority;
+ private final CountDownLatch latch;
+ private final List<PrioritiezedTask> tasks;
+
+ private PrioritiezedTask(Priority priority, CountDownLatch latch, List<PrioritiezedTask> tasks) {
+ this.priority = priority;
+ this.latch = latch;
+ this.tasks = tasks;
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ tasks.add(this);
+ latch.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ latch.countDown();
+ }
+ }
+
+ public static class TestPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test plugin";
+ }
+
+ @Override
+ public String description() {
+ return "test plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends LifecycleComponent>> services() {
+ List<Class<? extends LifecycleComponent>> services = new ArrayList<>(1);
+ services.add(MasterAwareService.class);
+ return services;
+ }
+ }
+
+ @Singleton
+ public static class MasterAwareService extends AbstractLifecycleComponent<MasterAwareService> implements LocalNodeMasterListener {
+
+ private final ClusterService clusterService;
+ private volatile boolean master;
+
+ @Inject
+ public MasterAwareService(Settings settings, ClusterService clusterService) {
+ super(settings);
+ clusterService.add(this);
+ this.clusterService = clusterService;
+ logger.info("initialized test service");
+ }
+
+ @Override
+ public void onMaster() {
+ logger.info("on master [" + clusterService.localNode() + "]");
+ master = true;
+ }
+
+ @Override
+ public void offMaster() {
+ logger.info("off master [" + clusterService.localNode() + "]");
+ master = false;
+ }
+
+ public boolean master() {
+ return master;
+ }
+
+ @Override
+ protected void doStart() {
+ }
+
+ @Override
+ protected void doStop() {
+ }
+
+ @Override
+ protected void doClose() {
+ }
+
+ @Override
+ public String executorName() {
+ return ThreadPool.Names.SAME;
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java
new file mode 100644
index 0000000000..b66c235d83
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java
@@ -0,0 +1,624 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.zen.DiscoveryNodesProvider;
+import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportConnectionListener;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.Matchers.*;
+
+public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase {
+
+ protected ThreadPool threadPool;
+ protected Map<String, MockNode> nodes = newHashMap();
+
+ public static class MockNode {
+ public final DiscoveryNode discoveryNode;
+ public final MockTransportService service;
+ public final PublishClusterStateAction action;
+ public final MockDiscoveryNodesProvider nodesProvider;
+
+ public MockNode(DiscoveryNode discoveryNode, MockTransportService service, PublishClusterStateAction action, MockDiscoveryNodesProvider nodesProvider) {
+ this.discoveryNode = discoveryNode;
+ this.service = service;
+ this.action = action;
+ this.nodesProvider = nodesProvider;
+ }
+
+ public void connectTo(DiscoveryNode node) {
+ service.connectToNode(node);
+ nodesProvider.addNode(node);
+ }
+ }
+
+ public MockNode createMockNode(final String name, Settings settings, Version version) throws Exception {
+ return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() {
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid());
+ newStateProcessed.onNewClusterStateProcessed();
+ }
+ });
+ }
+
+ public MockNode createMockNode(String name, Settings settings, Version version, PublishClusterStateAction.NewClusterStateListener listener) throws Exception {
+ MockTransportService service = buildTransportService(
+ Settings.builder().put(settings).put("name", name, TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(),
+ version
+ );
+ DiscoveryNode discoveryNode = new DiscoveryNode(name, name, service.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version);
+ MockDiscoveryNodesProvider nodesProvider = new MockDiscoveryNodesProvider(discoveryNode);
+ PublishClusterStateAction action = buildPublishClusterStateAction(settings, service, nodesProvider, listener);
+ MockNode node = new MockNode(discoveryNode, service, action, nodesProvider);
+ nodesProvider.addNode(discoveryNode);
+ final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1);
+ TransportConnectionListener waitForConnection = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ fail("disconnect should not be called " + node);
+ }
+ };
+ node.service.addConnectionListener(waitForConnection);
+ for (MockNode curNode : nodes.values()) {
+ curNode.service.addConnectionListener(waitForConnection);
+ curNode.connectTo(node.discoveryNode);
+ node.connectTo(curNode.discoveryNode);
+ }
+ node.connectTo(node.discoveryNode);
+ assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ for (MockNode curNode : nodes.values()) {
+ curNode.service.removeConnectionListener(waitForConnection);
+ }
+ node.service.removeConnectionListener(waitForConnection);
+ if (nodes.put(name, node) != null) {
+ fail("Node with the name " + name + " already exist");
+ }
+ return node;
+ }
+
+ public MockTransportService service(String name) {
+ MockNode node = nodes.get(name);
+ if (node != null) {
+ return node.service;
+ }
+ return null;
+ }
+
+ public PublishClusterStateAction action(String name) {
+ MockNode node = nodes.get(name);
+ if (node != null) {
+ return node.action;
+ }
+ return null;
+ }
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ threadPool = new ThreadPool(getClass().getName());
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ for (MockNode curNode : nodes.values()) {
+ curNode.action.close();
+ curNode.service.close();
+ }
+ terminate(threadPool);
+ }
+
+ protected MockTransportService buildTransportService(Settings settings, Version version) {
+ MockTransportService transportService = new MockTransportService(settings, new LocalTransport(settings, threadPool, version), threadPool);
+ transportService.start();
+ return transportService;
+ }
+
+ protected PublishClusterStateAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, MockDiscoveryNodesProvider nodesProvider,
+ PublishClusterStateAction.NewClusterStateListener listener) {
+ DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings));
+ return new PublishClusterStateAction(settings, transportService, nodesProvider, listener, discoverySettings);
+ }
+
+
+ static class MockDiscoveryNodesProvider implements DiscoveryNodesProvider {
+
+ private DiscoveryNodes discoveryNodes = DiscoveryNodes.EMPTY_NODES;
+
+ public MockDiscoveryNodesProvider(DiscoveryNode localNode) {
+ discoveryNodes = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build();
+ }
+
+ public void addNode(DiscoveryNode node) {
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(node).build();
+ }
+
+ @Override
+ public DiscoveryNodes nodes() {
+ return discoveryNodes;
+ }
+
+ @Override
+ public NodeService nodeService() {
+ assert false;
+ throw new UnsupportedOperationException("Shouldn't be here");
+ }
+ }
+
+
+ @Test
+ @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG")
+ public void testSimpleClusterStatePublishing() throws Exception {
+ MockNewClusterStateListener mockListenerA = new MockNewClusterStateListener();
+ MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, Version.CURRENT, mockListenerA);
+
+ MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener();
+ MockNode nodeB = createMockNode("nodeB", Settings.EMPTY, Version.CURRENT, mockListenerB);
+
+ // Initial cluster state
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build();
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
+
+ // cluster state update - add nodeB
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build();
+ ClusterState previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - add block
+ previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertTrue(clusterState.wasReadFromDiff());
+ assertThat(clusterState.blocks().global().size(), equalTo(1));
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - remove block
+ previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertTrue(clusterState.wasReadFromDiff());
+ assertThat(clusterState.blocks().global().size(), equalTo(0));
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // Adding new node - this node should get full cluster state while nodeB should still be getting diffs
+
+ MockNewClusterStateListener mockListenerC = new MockNewClusterStateListener();
+ MockNode nodeC = createMockNode("nodeC", Settings.EMPTY, Version.CURRENT, mockListenerC);
+
+ // cluster state update 3 - register node C
+ previousClusterState = clusterState;
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build();
+ clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertTrue(clusterState.wasReadFromDiff());
+ assertThat(clusterState.blocks().global().size(), equalTo(0));
+ }
+ });
+ mockListenerC.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ // First state
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update 4 - update settings
+ previousClusterState = clusterState;
+ MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(Settings.settingsBuilder().put("foo", "bar").build()).build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build();
+ NewClusterStateExpectation expectation = new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertTrue(clusterState.wasReadFromDiff());
+ assertThat(clusterState.blocks().global().size(), equalTo(0));
+ }
+ };
+ mockListenerB.add(expectation);
+ mockListenerC.add(expectation);
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - skipping one version change - should request full cluster state
+ previousClusterState = ClusterState.builder(clusterState).incrementVersion().build();
+ clusterState = ClusterState.builder(clusterState).incrementVersion().build();
+ expectation = new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ };
+ mockListenerB.add(expectation);
+ mockListenerC.add(expectation);
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - skipping one version change - should request full cluster state
+ previousClusterState = ClusterState.builder(clusterState).incrementVersion().build();
+ clusterState = ClusterState.builder(clusterState).incrementVersion().build();
+ expectation = new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ };
+ mockListenerB.add(expectation);
+ mockListenerC.add(expectation);
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // node B becomes the master and sends a version of the cluster state that goes back
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes)
+ .put(nodeA.discoveryNode)
+ .put(nodeB.discoveryNode)
+ .put(nodeC.discoveryNode)
+ .build();
+ previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
+ clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
+ expectation = new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ };
+ mockListenerA.add(expectation);
+ mockListenerC.add(expectation);
+ publishStateDiffAndWait(nodeB.action, clusterState, previousClusterState);
+ }
+
+ @Test
+ @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG")
+ public void testUnexpectedDiffPublishing() throws Exception {
+
+ MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() {
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ fail("Shouldn't send cluster state to myself");
+ }
+ });
+
+ MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener();
+ MockNode nodeB = createMockNode("nodeB", Settings.EMPTY, Version.CURRENT, mockListenerB);
+
+ // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build();
+ ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
+ ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - add block
+ previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertTrue(clusterState.wasReadFromDiff());
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+ }
+
+ @Test
+ @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG")
+ public void testDisablingDiffPublishing() throws Exception {
+ Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build();
+
+ MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() {
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ fail("Shouldn't send cluster state to myself");
+ }
+ });
+
+ MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() {
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff());
+ assertFalse(clusterState.wasReadFromDiff());
+ newStateProcessed.onNewClusterStateProcessed();
+ }
+ });
+
+ // Initial cluster state
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build();
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
+
+ // cluster state update - add nodeB
+ discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build();
+ ClusterState previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build();
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - add block
+ previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+ }
+
+
+ @Test
+ @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG")
+ public void testSimultaneousClusterStatePublishing() throws Exception {
+ int numberOfNodes = randomIntBetween(2, 10);
+ int numberOfIterations = randomIntBetween(50, 200);
+ Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "100ms").put(DiscoverySettings.PUBLISH_DIFF_ENABLE, true).build();
+ MockNode[] nodes = new MockNode[numberOfNodes];
+ DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder();
+ for (int i = 0; i < nodes.length; i++) {
+ final String name = "node" + i;
+ nodes[i] = createMockNode(name, settings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() {
+ @Override
+ public synchronized void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ assertProperMetaDataForVersion(clusterState.metaData(), clusterState.version());
+ if (randomInt(10) < 2) {
+ // Cause timeouts from time to time
+ try {
+ Thread.sleep(randomInt(110));
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ newStateProcessed.onNewClusterStateProcessed();
+ }
+ });
+ discoveryNodesBuilder.put(nodes[i].discoveryNode);
+ }
+
+ AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations];
+ DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
+ MetaData metaData = MetaData.EMPTY_META_DATA;
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(metaData).build();
+ ClusterState previousState;
+ for (int i = 0; i < numberOfIterations; i++) {
+ previousState = clusterState;
+ metaData = buildMetaDataForVersion(metaData, i + 1);
+ clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build();
+ listeners[i] = publishStateDiff(nodes[0].action, clusterState, previousState);
+ }
+
+ for (int i = 0; i < numberOfIterations; i++) {
+ listeners[i].await(1, TimeUnit.SECONDS);
+ }
+ }
+
+ @Test
+ @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG")
+ public void testSerializationFailureDuringDiffPublishing() throws Exception {
+
+ MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() {
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ fail("Shouldn't send cluster state to myself");
+ }
+ });
+
+ MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener();
+ MockNode nodeB = createMockNode("nodeB", Settings.EMPTY, Version.CURRENT, mockListenerB);
+
+ // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build();
+ ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
+ ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertFalse(clusterState.wasReadFromDiff());
+ }
+ });
+ publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState);
+
+ // cluster state update - add block
+ previousClusterState = clusterState;
+ clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build();
+ mockListenerB.add(new NewClusterStateExpectation() {
+ @Override
+ public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) {
+ assertTrue(clusterState.wasReadFromDiff());
+ }
+ });
+
+ ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) {
+ @Override
+ public Diff<ClusterState> diff(ClusterState previousState) {
+ return new Diff<ClusterState>() {
+ @Override
+ public ClusterState apply(ClusterState part) {
+ fail("this diff shouldn't be applied");
+ return part;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ throw new IOException("Simulated failure of diff serialization");
+ }
+ };
+ }
+ };
+ List<Tuple<DiscoveryNode, Throwable>> errors = publishStateDiff(nodeA.action, unserializableClusterState, previousClusterState).awaitErrors(1, TimeUnit.SECONDS);
+ assertThat(errors.size(), equalTo(1));
+ assertThat(errors.get(0).v2().getMessage(), containsString("Simulated failure of diff serialization"));
+ }
+
+ private MetaData buildMetaDataForVersion(MetaData metaData, long version) {
+ ImmutableOpenMap.Builder<String, IndexMetaData> indices = ImmutableOpenMap.builder(metaData.indices());
+ indices.put("test" + version, IndexMetaData.builder("test" + version).settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
+ .numberOfShards((int) version).numberOfReplicas(0).build());
+ return MetaData.builder(metaData)
+ .transientSettings(Settings.builder().put("test", version).build())
+ .indices(indices.build())
+ .build();
+ }
+
+ private void assertProperMetaDataForVersion(MetaData metaData, long version) {
+ for (long i = 1; i <= version; i++) {
+ assertThat(metaData.index("test" + i), notNullValue());
+ assertThat(metaData.index("test" + i).numberOfShards(), equalTo((int) i));
+ }
+ assertThat(metaData.index("test" + (version + 1)), nullValue());
+ assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version)));
+ }
+
+ public void publishStateDiffAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException {
+ publishStateDiff(action, state, previousState).await(1, TimeUnit.SECONDS);
+ }
+
+ public AssertingAckListener publishStateDiff(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException {
+ AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1);
+ ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState);
+ action.publish(changedEvent, assertingAckListener);
+ return assertingAckListener;
+ }
+
+ public static class AssertingAckListener implements Discovery.AckListener {
+ private final List<Tuple<DiscoveryNode, Throwable>> errors = new CopyOnWriteArrayList<>();
+ private final AtomicBoolean timeoutOccured = new AtomicBoolean();
+ private final CountDownLatch countDown;
+
+ public AssertingAckListener(int nodeCount) {
+ countDown = new CountDownLatch(nodeCount);
+ }
+
+ @Override
+ public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
+ if (t != null) {
+ errors.add(new Tuple<>(node, t));
+ }
+ countDown.countDown();
+ }
+
+ @Override
+ public void onTimeout() {
+ timeoutOccured.set(true);
+ // Fast forward the counter - no reason to wait here
+ long currentCount = countDown.getCount();
+ for (long i = 0; i < currentCount; i++) {
+ countDown.countDown();
+ }
+ }
+
+ public void await(long timeout, TimeUnit unit) throws InterruptedException {
+ assertThat(awaitErrors(timeout, unit), emptyIterable());
+ }
+
+ public List<Tuple<DiscoveryNode, Throwable>> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException {
+ countDown.await(timeout, unit);
+ assertFalse(timeoutOccured.get());
+ return errors;
+ }
+
+ }
+
+ public interface NewClusterStateExpectation {
+ void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed);
+ }
+
+ public static class MockNewClusterStateListener implements PublishClusterStateAction.NewClusterStateListener {
+ CopyOnWriteArrayList<NewClusterStateExpectation> expectations = new CopyOnWriteArrayList();
+
+ @Override
+ public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
+ final NewClusterStateExpectation expectation;
+ try {
+ expectation = expectations.remove(0);
+ } catch (ArrayIndexOutOfBoundsException ex) {
+ fail("Unexpected cluster state update " + clusterState.prettyPrint());
+ return;
+ }
+ expectation.check(clusterState, newStateProcessed);
+ newStateProcessed.onNewClusterStateProcessed();
+ }
+
+ public void add(NewClusterStateExpectation expectation) {
+ expectations.add(expectation);
+ }
+ }
+
+ public static class DelegatingClusterState extends ClusterState {
+
+ public DelegatingClusterState(ClusterState clusterState) {
+ super(clusterState.version(), clusterState.uuid(), clusterState);
+ }
+
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java
new file mode 100644
index 0000000000..5b73c82c2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java
@@ -0,0 +1,533 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.*;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.gateway.GatewayService;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;
+import static org.elasticsearch.test.XContentTestUtils.convertToMap;
+import static org.elasticsearch.test.XContentTestUtils.mapsEqualIgnoringArrayOrder;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.equalTo;
+
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0, numClientNodes = 0)
+public class ClusterStateDiffTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testClusterStateDiffSerialization() throws Exception {
+ DiscoveryNode masterNode = new DiscoveryNode("master", new LocalTransportAddress("master"), Version.CURRENT);
+ DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), Version.CURRENT);
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build();
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
+ ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode);
+
+ int iterationCount = randomIntBetween(10, 300);
+ for (int iteration = 0; iteration < iterationCount; iteration++) {
+ ClusterState previousClusterState = clusterState;
+ ClusterState previousClusterStateFromDiffs = clusterStateFromDiffs;
+ int changesCount = randomIntBetween(1, 4);
+ ClusterState.Builder builder = null;
+ for (int i = 0; i < changesCount; i++) {
+ if (i > 0) {
+ clusterState = builder.build();
+ }
+ switch (randomInt(4)) {
+ case 0:
+ builder = randomNodes(clusterState);
+ break;
+ case 1:
+ builder = randomRoutingTable(clusterState);
+ break;
+ case 2:
+ builder = randomBlocks(clusterState);
+ break;
+ case 3:
+ case 4:
+ builder = randomMetaDataChanges(clusterState);
+ break;
+ default:
+ throw new IllegalArgumentException("Shouldn't be here");
+ }
+ }
+ clusterState = builder.incrementVersion().build();
+
+ if (randomIntBetween(0, 10) < 1) {
+ // Update cluster state via full serialization from time to time
+ clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().localNode());
+ } else {
+ // Update cluster states using diffs
+ Diff<ClusterState> diffBeforeSerialization = clusterState.diff(previousClusterState);
+ BytesStreamOutput os = new BytesStreamOutput();
+ diffBeforeSerialization.writeTo(os);
+ byte[] diffBytes = os.bytes().toBytes();
+ Diff<ClusterState> diff;
+ try (StreamInput input = StreamInput.wrap(diffBytes)) {
+ diff = previousClusterStateFromDiffs.readDiffFrom(input);
+ clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs);
+ }
+ }
+
+
+ try {
+ // Check non-diffable elements
+ assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version()));
+ assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid()));
+
+ // Check nodes
+ assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes()));
+ assertThat(clusterStateFromDiffs.nodes().localNodeId(), equalTo(previousClusterStateFromDiffs.nodes().localNodeId()));
+ assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes()));
+ for (ObjectCursor<String> node : clusterStateFromDiffs.nodes().nodes().keys()) {
+ DiscoveryNode node1 = clusterState.nodes().get(node.value);
+ DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value);
+ assertThat(node1.version(), equalTo(node2.version()));
+ assertThat(node1.address(), equalTo(node2.address()));
+ assertThat(node1.attributes(), equalTo(node2.attributes()));
+ }
+
+ // Check routing table
+ assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version()));
+ assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting()));
+
+ // Check cluster blocks
+ assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global()));
+ assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices()));
+ assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence()));
+
+ // Check metadata
+ assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version()));
+ assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid()));
+ assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings()));
+ assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings()));
+ assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices()));
+ assertThat(clusterStateFromDiffs.metaData().templates(), equalTo(clusterState.metaData().templates()));
+ assertThat(clusterStateFromDiffs.metaData().customs(), equalTo(clusterState.metaData().customs()));
+ assertThat(clusterStateFromDiffs.metaData().aliases(), equalTo(clusterState.metaData().aliases()));
+
+ // JSON Serialization test - make sure that both states produce similar JSON
+ assertThat(mapsEqualIgnoringArrayOrder(convertToMap(clusterStateFromDiffs), convertToMap(clusterState)), equalTo(true));
+
+ // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order
+ // however, serialized size should remain the same
+ assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length));
+ } catch (AssertionError error) {
+ logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString());
+ throw error;
+ }
+ }
+
+ logger.info("Final cluster state:[{}]", clusterState.toString());
+
+ }
+
+ private ClusterState.Builder randomNodes(ClusterState clusterState) {
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ List<String> nodeIds = randomSubsetOf(randomInt(clusterState.nodes().nodes().size() - 1), clusterState.nodes().nodes().keys().toArray(String.class));
+ for (String nodeId : nodeIds) {
+ if (nodeId.startsWith("node-")) {
+ if (randomBoolean()) {
+ nodes.remove(nodeId);
+ } else {
+ nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random())));
+ }
+ }
+ }
+ int additionalNodeCount = randomIntBetween(1, 20);
+ for (int i = 0; i < additionalNodeCount; i++) {
+ nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random())));
+ }
+ return ClusterState.builder(clusterState).nodes(nodes);
+ }
+
+ private ClusterState.Builder randomRoutingTable(ClusterState clusterState) {
+ RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable());
+ int numberOfIndices = clusterState.routingTable().indicesRouting().size();
+ if (numberOfIndices > 0) {
+ List<String> randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keySet().toArray(new String[numberOfIndices]));
+ for (String index : randomIndices) {
+ if (randomBoolean()) {
+ builder.remove(index);
+ } else {
+ builder.add(randomIndexRoutingTable(index, clusterState.nodes().nodes().keys().toArray(String.class)));
+ }
+ }
+ }
+ int additionalIndexCount = randomIntBetween(1, 20);
+ for (int i = 0; i < additionalIndexCount; i++) {
+ builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().nodes().keys().toArray(String.class)));
+ }
+ return ClusterState.builder(clusterState).routingTable(builder.build());
+ }
+
+ private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds) {
+ IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index);
+ int shardCount = randomInt(10);
+
+ for (int i = 0; i < shardCount; i++) {
+ IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(new ShardId(index, i), randomBoolean());
+ int replicaCount = randomIntBetween(1, 10);
+ for (int j = 0; j < replicaCount; j++) {
+ indexShard.addShard(
+ new MutableShardRouting(index, i, randomFrom(nodeIds), j == 0, ShardRoutingState.fromValue((byte) randomIntBetween(1, 4)), 1));
+ }
+ builder.addIndexShard(indexShard.build());
+ }
+ return builder.build();
+ }
+
+ private ClusterState.Builder randomBlocks(ClusterState clusterState) {
+ ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks());
+ int globalBlocksCount = clusterState.blocks().global().size();
+ if (globalBlocksCount > 0) {
+ List<ClusterBlock> blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount]));
+ for (ClusterBlock block : blocks) {
+ builder.removeGlobalBlock(block);
+ }
+ }
+ int additionalGlobalBlocksCount = randomIntBetween(1, 3);
+ for (int i = 0; i < additionalGlobalBlocksCount; i++) {
+ builder.addGlobalBlock(randomGlobalBlock());
+ }
+ return ClusterState.builder(clusterState).blocks(builder);
+ }
+
+ private ClusterBlock randomGlobalBlock() {
+ switch (randomInt(2)) {
+ case 0:
+ return DiscoverySettings.NO_MASTER_BLOCK_ALL;
+ case 1:
+ return DiscoverySettings.NO_MASTER_BLOCK_WRITES;
+ default:
+ return GatewayService.STATE_NOT_RECOVERED_BLOCK;
+ }
+ }
+
+ private ClusterState.Builder randomMetaDataChanges(ClusterState clusterState) {
+ MetaData metaData = clusterState.metaData();
+ int changesCount = randomIntBetween(1, 10);
+ for (int i = 0; i < changesCount; i++) {
+ switch (randomInt(3)) {
+ case 0:
+ metaData = randomMetaDataSettings(metaData);
+ break;
+ case 1:
+ metaData = randomIndices(metaData);
+ break;
+ case 2:
+ metaData = randomTemplates(metaData);
+ break;
+ case 3:
+ metaData = randomMetaDataCustoms(metaData);
+ break;
+ default:
+ throw new IllegalArgumentException("Shouldn't be here");
+ }
+ }
+ return ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).version(metaData.version() + 1).build());
+ }
+
+ private Settings randomSettings(Settings settings) {
+ Settings.Builder builder = Settings.builder();
+ if (randomBoolean()) {
+ builder.put(settings);
+ }
+ int settingsCount = randomInt(10);
+ for (int i = 0; i < settingsCount; i++) {
+ builder.put(randomAsciiOfLength(10), randomAsciiOfLength(10));
+ }
+ return builder.build();
+
+ }
+
+ private MetaData randomMetaDataSettings(MetaData metaData) {
+ if (randomBoolean()) {
+ return MetaData.builder(metaData).persistentSettings(randomSettings(metaData.persistentSettings())).build();
+ } else {
+ return MetaData.builder(metaData).transientSettings(randomSettings(metaData.transientSettings())).build();
+ }
+ }
+
+ private interface RandomPart<T> {
+ /**
+ * Returns list of parts from metadata
+ */
+ ImmutableOpenMap<String, T> parts(MetaData metaData);
+
+ /**
+ * Puts the part back into metadata
+ */
+ MetaData.Builder put(MetaData.Builder builder, T part);
+
+ /**
+ * Remove the part from metadata
+ */
+ MetaData.Builder remove(MetaData.Builder builder, String name);
+
+ /**
+ * Returns a random part with the specified name
+ */
+ T randomCreate(String name);
+
+ /**
+ * Makes random modifications to the part
+ */
+ T randomChange(T part);
+
+ }
+
+ private <T> MetaData randomParts(MetaData metaData, String prefix, RandomPart<T> randomPart) {
+ MetaData.Builder builder = MetaData.builder(metaData);
+ ImmutableOpenMap<String, T> parts = randomPart.parts(metaData);
+ int partCount = parts.size();
+ if (partCount > 0) {
+ List<String> randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class));
+ for (String part : randomParts) {
+ if (randomBoolean()) {
+ randomPart.remove(builder, part);
+ } else {
+ randomPart.put(builder, randomPart.randomChange(parts.get(part)));
+ }
+ }
+ }
+ int additionalPartCount = randomIntBetween(1, 20);
+ for (int i = 0; i < additionalPartCount; i++) {
+ String name = randomName(prefix);
+ randomPart.put(builder, randomPart.randomCreate(name));
+ }
+ return builder.build();
+ }
+
+ private MetaData randomIndices(MetaData metaData) {
+ return randomParts(metaData, "index", new RandomPart<IndexMetaData>() {
+
+ @Override
+ public ImmutableOpenMap<String, IndexMetaData> parts(MetaData metaData) {
+ return metaData.indices();
+ }
+
+ @Override
+ public MetaData.Builder put(MetaData.Builder builder, IndexMetaData part) {
+ return builder.put(part, true);
+ }
+
+ @Override
+ public MetaData.Builder remove(MetaData.Builder builder, String name) {
+ return builder.remove(name);
+ }
+
+ @Override
+ public IndexMetaData randomCreate(String name) {
+ IndexMetaData.Builder builder = IndexMetaData.builder(name);
+ Settings.Builder settingsBuilder = Settings.builder();
+ setRandomSettings(getRandom(), settingsBuilder);
+ settingsBuilder.put(randomSettings(Settings.EMPTY)).put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion(random()));
+ builder.settings(settingsBuilder);
+ builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10));
+ int aliasCount = randomInt(10);
+ if (randomBoolean()) {
+ builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
+ }
+ for (int i = 0; i < aliasCount; i++) {
+ builder.putAlias(randomAlias());
+ }
+ return builder.build();
+ }
+
+ @Override
+ public IndexMetaData randomChange(IndexMetaData part) {
+ IndexMetaData.Builder builder = IndexMetaData.builder(part);
+ switch (randomIntBetween(0, 3)) {
+ case 0:
+ builder.settings(Settings.builder().put(part.settings()).put(randomSettings(Settings.EMPTY)));
+ break;
+ case 1:
+ if (randomBoolean() && part.aliases().isEmpty() == false) {
+ builder.removeAlias(randomFrom(part.aliases().keys().toArray(String.class)));
+ } else {
+ builder.putAlias(AliasMetaData.builder(randomAsciiOfLength(10)));
+ }
+ break;
+ case 2:
+ builder.settings(Settings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID()));
+ break;
+ case 3:
+ builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
+ break;
+ default:
+ throw new IllegalArgumentException("Shouldn't be here");
+ }
+ return builder.build();
+ }
+ });
+ }
+
+ private IndexWarmersMetaData randomWarmers() {
+ if (randomBoolean()) {
+ return new IndexWarmersMetaData(
+ new IndexWarmersMetaData.Entry(
+ randomName("warm"),
+ new String[]{randomName("type")},
+ randomBoolean(),
+ new BytesArray(randomAsciiOfLength(1000)))
+ );
+ } else {
+ return new IndexWarmersMetaData();
+ }
+ }
+
+ private MetaData randomTemplates(MetaData metaData) {
+ return randomParts(metaData, "template", new RandomPart<IndexTemplateMetaData>() {
+ @Override
+ public ImmutableOpenMap<String, IndexTemplateMetaData> parts(MetaData metaData) {
+ return metaData.templates();
+ }
+
+ @Override
+ public MetaData.Builder put(MetaData.Builder builder, IndexTemplateMetaData part) {
+ return builder.put(part);
+ }
+
+ @Override
+ public MetaData.Builder remove(MetaData.Builder builder, String name) {
+ return builder.removeTemplate(name);
+ }
+
+ @Override
+ public IndexTemplateMetaData randomCreate(String name) {
+ IndexTemplateMetaData.Builder builder = IndexTemplateMetaData.builder(name);
+ builder.order(randomInt(1000))
+ .template(randomName("temp"))
+ .settings(randomSettings(Settings.EMPTY));
+ int aliasCount = randomIntBetween(0, 10);
+ for (int i = 0; i < aliasCount; i++) {
+ builder.putAlias(randomAlias());
+ }
+ if (randomBoolean()) {
+ builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
+ }
+ return builder.build();
+ }
+
+ @Override
+ public IndexTemplateMetaData randomChange(IndexTemplateMetaData part) {
+ IndexTemplateMetaData.Builder builder = new IndexTemplateMetaData.Builder(part);
+ builder.order(randomInt(1000));
+ return builder.build();
+ }
+ });
+ }
+
+ private AliasMetaData randomAlias() {
+ AliasMetaData.Builder builder = newAliasMetaDataBuilder(randomName("alias"));
+ if (randomBoolean()) {
+ builder.filter(QueryBuilders.termQuery("test", randomRealisticUnicodeOfCodepointLength(10)).toString());
+ }
+ if (randomBoolean()) {
+ builder.routing(randomAsciiOfLength(10));
+ }
+ return builder.build();
+ }
+
+ private MetaData randomMetaDataCustoms(final MetaData metaData) {
+ return randomParts(metaData, "custom", new RandomPart<MetaData.Custom>() {
+
+ @Override
+ public ImmutableOpenMap<String, MetaData.Custom> parts(MetaData metaData) {
+ return metaData.customs();
+ }
+
+ @Override
+ public MetaData.Builder put(MetaData.Builder builder, MetaData.Custom part) {
+ if (part instanceof SnapshotMetaData) {
+ return builder.putCustom(SnapshotMetaData.TYPE, part);
+ } else if (part instanceof RepositoriesMetaData) {
+ return builder.putCustom(RepositoriesMetaData.TYPE, part);
+ } else if (part instanceof RestoreMetaData) {
+ return builder.putCustom(RestoreMetaData.TYPE, part);
+ }
+ throw new IllegalArgumentException("Unknown custom part " + part);
+ }
+
+ @Override
+ public MetaData.Builder remove(MetaData.Builder builder, String name) {
+ return builder.removeCustom(name);
+ }
+
+ @Override
+ public MetaData.Custom randomCreate(String name) {
+ switch (randomIntBetween(0, 2)) {
+ case 0:
+ return new SnapshotMetaData(new SnapshotMetaData.Entry(
+ new SnapshotId(randomName("repo"), randomName("snap")),
+ randomBoolean(),
+ SnapshotMetaData.State.fromValue((byte) randomIntBetween(0, 6)),
+ ImmutableList.<String>of(),
+ Math.abs(randomLong()),
+ ImmutableMap.<ShardId, SnapshotMetaData.ShardSnapshotStatus>of()));
+ case 1:
+ return new RepositoriesMetaData();
+ case 2:
+ return new RestoreMetaData(new RestoreMetaData.Entry(
+ new SnapshotId(randomName("repo"), randomName("snap")),
+ RestoreMetaData.State.fromValue((byte) randomIntBetween(0, 3)),
+ ImmutableList.<String>of(),
+ ImmutableMap.<ShardId, RestoreMetaData.ShardRestoreStatus>of()));
+ default:
+ throw new IllegalArgumentException("Shouldn't be here");
+ }
+ }
+
+ @Override
+ public MetaData.Custom randomChange(MetaData.Custom part) {
+ return part;
+ }
+ });
+ }
+
+ private String randomName(String prefix) {
+ return prefix + Strings.randomBase64UUID(getRandom());
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
new file mode 100644
index 0000000000..c6e5ce2fb5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class DiskUsageTests extends ElasticsearchTestCase {
+
+ @Test
+ public void diskUsageCalcTest() {
+ DiskUsage du = new DiskUsage("node1", "n1", 100, 40);
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(40.0));
+ assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - 40.0));
+ assertThat(du.getFreeBytes(), equalTo(40L));
+ assertThat(du.getUsedBytes(), equalTo(60L));
+ assertThat(du.getTotalBytes(), equalTo(100L));
+
+ // Test that DiskUsage handles invalid numbers, as reported by some
+ // filesystems (ZFS & NTFS)
+ DiskUsage du2 = new DiskUsage("node1", "n1", 100, 101);
+ assertThat(du2.getFreeDiskAsPercentage(), equalTo(101.0));
+ assertThat(du2.getFreeBytes(), equalTo(101L));
+ assertThat(du2.getUsedBytes(), equalTo(-1L));
+ assertThat(du2.getTotalBytes(), equalTo(100L));
+
+ DiskUsage du3 = new DiskUsage("node1", "n1", -1, -1);
+ assertThat(du3.getFreeDiskAsPercentage(), equalTo(100.0));
+ assertThat(du3.getFreeBytes(), equalTo(-1L));
+ assertThat(du3.getUsedBytes(), equalTo(0L));
+ assertThat(du3.getTotalBytes(), equalTo(-1L));
+
+ DiskUsage du4 = new DiskUsage("node1", "n1", 0, 0);
+ assertThat(du4.getFreeDiskAsPercentage(), equalTo(100.0));
+ assertThat(du4.getFreeBytes(), equalTo(0L));
+ assertThat(du4.getUsedBytes(), equalTo(0L));
+ assertThat(du4.getTotalBytes(), equalTo(0L));
+ }
+
+ @Test
+ public void randomDiskUsageTest() {
+ int iters = scaledRandomIntBetween(1000, 10000);
+ for (int i = 1; i < iters; i++) {
+ long total = between(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ long free = between(Integer.MIN_VALUE, Integer.MAX_VALUE);
+ DiskUsage du = new DiskUsage("random", "random", total, free);
+ if (total == 0) {
+ assertThat(du.getFreeBytes(), equalTo(free));
+ assertThat(du.getTotalBytes(), equalTo(0L));
+ assertThat(du.getUsedBytes(), equalTo(-free));
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0));
+ assertThat(du.getUsedDiskAsPercentage(), equalTo(0.0));
+ } else {
+ assertThat(du.getFreeBytes(), equalTo(free));
+ assertThat(du.getTotalBytes(), equalTo(total));
+ assertThat(du.getUsedBytes(), equalTo(total - free));
+ assertThat(du.getFreeDiskAsPercentage(), equalTo(100.0 * ((double) free / total)));
+ assertThat(du.getUsedDiskAsPercentage(), equalTo(100.0 - (100.0 * ((double) free / total))));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java
new file mode 100644
index 0000000000..2034dc41b8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesTests.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class MinimumMasterNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMinimumMasterNodes() throws Exception {
+
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .build();
+
+ logger.info("--> start first node");
+ internalCluster().startNode(settings);
+
+ logger.info("--> should be blocked, no master...");
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
+ assertThat(state.nodes().size(), equalTo(1)); // verify that we still see the local node in the cluster state
+
+ logger.info("--> start second node, cluster should be formed");
+ internalCluster().startNode(settings);
+
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(false));
+
+ createIndex("test");
+ NumShards numShards = getNumShards("test");
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ // make sure that all shards recovered before trying to flush
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), equalTo(numShards.totalNumShards));
+ // flush for simpler debugging
+ flushAndRefresh();
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ internalCluster().stopCurrentMasterNode();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object obj) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ return state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID);
+ }
+ });
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
+ assertThat(state.nodes().size(), equalTo(1)); // verify that we still see the local node in the cluster state
+
+ logger.info("--> starting the previous master node again...");
+ internalCluster().startNode(settings);
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(true));
+
+ ensureGreen();
+
+ logger.info("--> verify we the data back after cluster reform");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
+ }
+
+ internalCluster().stopRandomNonMasterNode();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
+ }
+ });
+
+ logger.info("--> starting the previous master node again...");
+ internalCluster().startNode(settings);
+
+ ensureGreen();
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(false));
+ state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(2));
+ assertThat(state.metaData().indices().containsKey("test"), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ensureGreen();
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
+ }
+ }
+
+ @Test @Slow
+ @TestLogging("cluster.routing.allocation.allocator:TRACE")
+ public void multipleNodesShutdownNonMasterNodes() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.minimum_master_nodes", 3)
+ .put("discovery.zen.ping_timeout", "1s")
+ .put("discovery.initial_state_timeout", "500ms")
+ .build();
+
+ logger.info("--> start first 2 nodes");
+ internalCluster().startNodesAsync(2, settings).get();
+
+ ClusterState state;
+
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ for (Client client : clients()) {
+ ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
+ }
+ }
+ });
+
+ logger.info("--> start two more nodes");
+ internalCluster().startNodesAsync(2, settings).get();
+
+ ensureGreen();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(4));
+
+ createIndex("test");
+ NumShards numShards = getNumShards("test");
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ ensureGreen();
+ // make sure that all shards recovered before trying to flush
+ assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false));
+ // flush for simpler debugging
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ refresh();
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
+ }
+
+ internalCluster().stopRandomNonMasterNode();
+ internalCluster().stopRandomNonMasterNode();
+
+ logger.info("--> verify that there is no master anymore on remaining nodes");
+ // spin here to wait till the state is set
+ assertNoMasterBlockOnAllNodes();
+
+ logger.info("--> start back the 2 nodes ");
+ String[] newNodes = internalCluster().startNodesAsync(2, settings).get().toArray(Strings.EMPTY_ARRAY);
+
+ ensureGreen();
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(4));
+ // we prefer to elect up and running nodes
+ assertThat(state.nodes().masterNodeId(), not(isOneOf(newNodes)));
+
+ logger.info("--> verify we the data back");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
+ }
+ }
+
+ @Test
+ public void dynamicUpdateMinimumMasterNodes() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.ping_timeout", "400ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .build();
+
+ logger.info("--> start 2 nodes");
+ internalCluster().startNodesAsync(2, settings).get();
+
+ // wait until second node join the cluster
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> setting minimum master node to 2");
+ setMinimumMasterNodes(2);
+
+ // make sure it has been processed on all nodes (master node spawns a secondary cluster state update task)
+ for (Client client : internalCluster()) {
+ assertThat(client.admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).get().isTimedOut(),
+ equalTo(false));
+ }
+
+ logger.info("--> stopping a node");
+ internalCluster().stopRandomDataNode();
+ logger.info("--> verifying min master node has effect");
+ assertNoMasterBlockOnAllNodes();
+
+ logger.info("--> bringing another node up");
+ internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build());
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ }
+
+ private void assertNoMasterBlockOnAllNodes() throws InterruptedException {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object obj) {
+ boolean success = true;
+ for (Client client : internalCluster()) {
+ ClusterState state = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ success &= state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID);
+ if (logger.isDebugEnabled()) {
+ logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", client, state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID));
+ }
+ }
+ return success;
+ }
+ }, 20, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testCanNotBringClusterDown() throws ExecutionException, InterruptedException {
+ int nodeCount = scaledRandomIntBetween(1, 5);
+ Settings.Builder settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms");
+
+ // set an initial value which is at least quorum to avoid split brains during initial startup
+ int initialMinMasterNodes = randomIntBetween(nodeCount / 2 + 1, nodeCount);
+ settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, initialMinMasterNodes);
+
+
+ logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes);
+ internalCluster().startNodesAsync(nodeCount, settings.build()).get();
+
+ logger.info("--> waiting for nodes to join");
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
+
+ int updateCount = randomIntBetween(1, nodeCount);
+
+ logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount);
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount)));
+
+ logger.info("--> verifying no node left and master is up");
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
+
+ updateCount = nodeCount + randomIntBetween(1, 2000);
+ logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount);
+ assertThat(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount))
+ .get().getPersistentSettings().getAsMap().keySet(),
+ empty());
+
+ logger.info("--> verifying no node left and master is up");
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java
new file mode 100644
index 0000000000..21e8682bbc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeTests.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import com.google.common.base.Predicate;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class NoMasterNodeTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testNoMasterActions() throws Exception {
+ // note, sometimes, we want to check with the fact that an index gets created, sometimes not...
+ boolean autoCreateIndex = randomBoolean();
+ logger.info("auto_create_index set to {}", autoCreateIndex);
+
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("action.auto_create_index", autoCreateIndex)
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put(DiscoverySettings.NO_MASTER_BLOCK, "all")
+ .build();
+
+ TimeValue timeout = TimeValue.timeValueMillis(200);
+
+ internalCluster().startNode(settings);
+ // start a second node, create an index, and then shut it down so we have no master block
+ internalCluster().startNode(settings);
+ createIndex("test");
+ client().admin().cluster().prepareHealth("test").setWaitForGreenStatus().execute().actionGet();
+ internalCluster().stopRandomDataNode();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertTrue(state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID));
+ }
+ });
+
+ assertThrows(client().prepareGet("test", "type1", "1"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ assertThrows(client().prepareGet("no_index", "type1", "1"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ assertThrows(client().prepareMultiGet().add("test", "type1", "1"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ assertThrows(client().prepareMultiGet().add("no_index", "type1", "1"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ PercolateSourceBuilder percolateSource = new PercolateSourceBuilder();
+ percolateSource.setDoc(docBuilder().setDoc(new HashMap()));
+ assertThrows(client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(percolateSource),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ percolateSource = new PercolateSourceBuilder();
+ percolateSource.setDoc(docBuilder().setDoc(new HashMap()));
+ assertThrows(client().preparePercolate()
+ .setIndices("no_index").setDocumentType("type1")
+ .setSource(percolateSource),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+
+ assertThrows(client().admin().indices().prepareAnalyze("test", "this is a test"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ assertThrows(client().admin().indices().prepareAnalyze("no_index", "this is a test"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ assertThrows(client().prepareCount("test"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ assertThrows(client().prepareCount("no_index"),
+ ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE
+ );
+
+ checkWriteAction(
+ false, timeout,
+ client().prepareUpdate("test", "type1", "1")
+ .setScript(new Script("test script", ScriptService.ScriptType.INLINE, null, null)).setTimeout(timeout));
+
+ checkWriteAction(
+ autoCreateIndex, timeout,
+ client().prepareUpdate("no_index", "type1", "1")
+ .setScript(new Script("test script", ScriptService.ScriptType.INLINE, null, null)).setTimeout(timeout));
+
+
+ checkWriteAction(false, timeout,
+ client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout));
+
+ checkWriteAction(autoCreateIndex, timeout,
+ client().prepareIndex("no_index", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout));
+
+ BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
+ bulkRequestBuilder.add(client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()));
+ bulkRequestBuilder.add(client().prepareIndex("test", "type1", "2").setSource(XContentFactory.jsonBuilder().startObject().endObject()));
+ checkBulkAction(false, bulkRequestBuilder);
+
+ bulkRequestBuilder = client().prepareBulk();
+ bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()));
+ bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "2").setSource(XContentFactory.jsonBuilder().startObject().endObject()));
+ checkBulkAction(autoCreateIndex, bulkRequestBuilder);
+
+ internalCluster().startNode(settings);
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ }
+
+ void checkWriteAction(boolean autoCreateIndex, TimeValue timeout, ActionRequestBuilder<?, ?, ?> builder) {
+ // we clean the metadata when loosing a master, therefore all operations on indices will auto create it, if allowed
+ long now = System.currentTimeMillis();
+ try {
+ builder.get();
+ fail("expected ClusterBlockException or MasterNotDiscoveredException");
+ } catch (ClusterBlockException | MasterNotDiscoveredException e) {
+ if (e instanceof MasterNotDiscoveredException) {
+ assertTrue(autoCreateIndex);
+ } else {
+ assertFalse(autoCreateIndex);
+ }
+ // verify we waited before giving up...
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ }
+ }
+
+ void checkBulkAction(boolean indexShouldBeAutoCreated, BulkRequestBuilder builder) {
+ // bulk operation do not throw MasterNotDiscoveredException exceptions. The only test that auto create kicked in and failed is
+ // via the timeout, as bulk operation do not wait on blocks.
+ TimeValue timeout;
+ if (indexShouldBeAutoCreated) {
+ // we expect the bulk to fail because it will try to go to the master. Use small timeout and detect it has passed
+ timeout = new TimeValue(200);
+ } else {
+ // the request should fail very quickly - use a large timeout and make sure it didn't pass...
+ timeout = new TimeValue(5000);
+ }
+ builder.setTimeout(timeout);
+ long now = System.currentTimeMillis();
+ try {
+ builder.get();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ if (indexShouldBeAutoCreated) {
+ // timeout is 200
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ } else {
+ // timeout is 5000
+ assertThat(System.currentTimeMillis() - now, lessThan(timeout.millis() - 50));
+ }
+ }
+ }
+
+ @Test
+ public void testNoMasterActions_writeMasterBlock() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("action.auto_create_index", false)
+ .put("discovery.zen.minimum_master_nodes", 2)
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .put(DiscoverySettings.NO_MASTER_BLOCK, "write")
+ .build();
+
+ internalCluster().startNode(settings);
+ // start a second node, create an index, and then shut it down so we have no master block
+ internalCluster().startNode(settings);
+ prepareCreate("test1").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).get();
+ prepareCreate("test2").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
+ client().admin().cluster().prepareHealth("_all").setWaitForGreenStatus().get();
+ client().prepareIndex("test1", "type1", "1").setSource("field", "value1").get();
+ client().prepareIndex("test2", "type1", "1").setSource("field", "value1").get();
+ refresh();
+
+ ensureSearchable("test1", "test2");
+
+ ClusterStateResponse clusterState = client().admin().cluster().prepareState().get();
+ logger.info("Cluster state:\n" + clusterState.getState().prettyPrint());
+
+ internalCluster().stopRandomDataNode();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ ClusterState state = client().admin().cluster().prepareState().setLocal(true).get().getState();
+ return state.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID);
+ }
+ }), equalTo(true));
+
+
+ GetResponse getResponse = client().prepareGet("test1", "type1", "1").get();
+ assertExists(getResponse);
+
+ CountResponse countResponse = client().prepareCount("test1").get();
+ assertHitCount(countResponse, 1l);
+
+ SearchResponse searchResponse = client().prepareSearch("test1").get();
+ assertHitCount(searchResponse, 1l);
+
+ countResponse = client().prepareCount("test2").get();
+ assertThat(countResponse.getTotalShards(), equalTo(2));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(1));
+
+ TimeValue timeout = TimeValue.timeValueMillis(200);
+ long now = System.currentTimeMillis();
+ try {
+ client().prepareUpdate("test1", "type1", "1").setDoc("field", "value2").setTimeout(timeout).get();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ now = System.currentTimeMillis();
+ try {
+ client().prepareIndex("test1", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).get();
+ fail("Expected ClusterBlockException");
+ } catch (ClusterBlockException e) {
+ assertThat(System.currentTimeMillis() - now, greaterThan(timeout.millis() - 50));
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+
+ internalCluster().startNode(settings);
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java
new file mode 100644
index 0000000000..c102251862
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/SimpleClusterStateTests.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.CollectionAssertions;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Checking simple filtering capabilites of the cluster state
+ *
+ */
+public class SimpleClusterStateTests extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void indexData() throws Exception {
+ index("foo", "bar", "1", XContentFactory.jsonBuilder().startObject().field("foo", "foo").endObject());
+ index("fuu", "buu", "1", XContentFactory.jsonBuilder().startObject().field("fuu", "fuu").endObject());
+ index("baz", "baz", "1", XContentFactory.jsonBuilder().startObject().field("baz", "baz").endObject());
+ refresh();
+ }
+
+ @Test
+ public void testRoutingTable() throws Exception {
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setRoutingTable(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("foo"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("fuu"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("baz"), is(true));
+ assertThat(clusterStateResponseUnfiltered.getState().routingTable().hasIndex("non-existent"), is(false));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("foo"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("fuu"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("baz"), is(false));
+ assertThat(clusterStateResponse.getState().routingTable().hasIndex("non-existent"), is(false));
+ }
+
+ @Test
+ public void testNodes() throws Exception {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setNodes(true).get();
+ assertThat(clusterStateResponse.getState().nodes().nodes().size(), is(cluster().size()));
+
+ ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponseFiltered.getState().nodes().nodes().size(), is(0));
+ }
+
+ @Test
+ public void testMetadata() throws Exception {
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().clear().setMetaData(true).get();
+ assertThat(clusterStateResponseUnfiltered.getState().metaData().indices().size(), is(3));
+
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get();
+ assertThat(clusterStateResponse.getState().metaData().indices().size(), is(0));
+ }
+
+ @Test
+ public void testIndexTemplates() throws Exception {
+ client().admin().indices().preparePutTemplate("foo_template")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ client().admin().indices().preparePutTemplate("fuu_template")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponseUnfiltered.getState().metaData().templates().size(), is(greaterThanOrEqualTo(2)));
+
+ GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates("foo_template").get();
+ assertIndexTemplateExists(getIndexTemplatesResponse, "foo_template");
+ }
+
+ @Test
+ public void testThatFilteringByIndexWorksForMetadataAndRoutingTable() throws Exception {
+ ClusterStateResponse clusterStateResponseFiltered = client().admin().cluster().prepareState().clear()
+ .setMetaData(true).setRoutingTable(true).setIndices("foo", "fuu", "non-existent").get();
+
+ // metadata
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices().size(), is(2));
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices(), CollectionAssertions.hasKey("foo"));
+ assertThat(clusterStateResponseFiltered.getState().metaData().indices(), CollectionAssertions.hasKey("fuu"));
+
+ // routing table
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("foo"), is(true));
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("fuu"), is(true));
+ assertThat(clusterStateResponseFiltered.getState().routingTable().hasIndex("baz"), is(false));
+ }
+
+ @Test
+ public void testLargeClusterStatePublishing() throws Exception {
+ int estimatedBytesSize = scaledRandomIntBetween(ByteSizeValue.parseBytesSizeValue("10k", "estimatedBytesSize").bytesAsInt(),
+ ByteSizeValue.parseBytesSizeValue("256k", "estimatedBytesSize").bytesAsInt());
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties");
+ int counter = 0;
+ int numberOfFields = 0;
+ while (true) {
+ mapping.startObject(Strings.randomBase64UUID()).field("type", "string").endObject();
+ counter += 10; // each field is about 10 bytes, assuming compression in place
+ numberOfFields++;
+ if (counter > estimatedBytesSize) {
+ break;
+ }
+ }
+ logger.info("number of fields [{}], estimated bytes [{}]", numberOfFields, estimatedBytesSize);
+ mapping.endObject().endObject().endObject();
+
+ int numberOfShards = scaledRandomIntBetween(1, cluster().numDataNodes());
+ // if the create index is ack'ed, then all nodes have successfully processed the cluster state
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("type", mapping)
+ .setTimeout("60s").get());
+ ensureGreen(); // wait for green state, so its both green, and there are no more pending events
+ MappingMetaData masterMappingMetaData = client().admin().indices().prepareGetMappings("test").setTypes("type").get().getMappings().get("test").get("type");
+ for (Client client : clients()) {
+ MappingMetaData mappingMetadata = client.admin().indices().prepareGetMappings("test").setTypes("type").setLocal(true).get().getMappings().get("test").get("type");
+ assertThat(mappingMetadata.source().string(), equalTo(masterMappingMetaData.source().string()));
+ assertThat(mappingMetadata, equalTo(masterMappingMetaData));
+ }
+ }
+
+ @Test
+ public void testIndicesOptions() throws Exception {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*")
+ .get();
+ assertThat(clusterStateResponse.getState().metaData().indices().size(), is(2));
+
+ // close one index
+ client().admin().indices().close(Requests.closeIndexRequest("fuu")).get();
+ clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*").get();
+ assertThat(clusterStateResponse.getState().metaData().indices().size(), is(1));
+ assertThat(clusterStateResponse.getState().metaData().index("foo").state(), equalTo(IndexMetaData.State.OPEN));
+
+ // expand_wildcards_closed should toggle return only closed index fuu
+ IndicesOptions expandCloseOptions = IndicesOptions.fromOptions(false, true, false, true);
+ clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*")
+ .setIndicesOptions(expandCloseOptions).get();
+ assertThat(clusterStateResponse.getState().metaData().indices().size(), is(1));
+ assertThat(clusterStateResponse.getState().metaData().index("fuu").state(), equalTo(IndexMetaData.State.CLOSE));
+
+ // ignore_unavailable set to true should not raise exception on fzzbzz
+ IndicesOptions ignoreUnavailabe = IndicesOptions.fromOptions(true, true, true, false);
+ clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("fzzbzz")
+ .setIndicesOptions(ignoreUnavailabe).get();
+ assertThat(clusterStateResponse.getState().metaData().indices().isEmpty(), is(true));
+
+ // empty wildcard expansion result should work when allowNoIndices is
+ // turned on
+ IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false);
+ clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("a*")
+ .setIndicesOptions(allowNoIndices).get();
+ assertThat(clusterStateResponse.getState().metaData().indices().isEmpty(), is(true));
+ }
+
+ @Test(expected=IndexMissingException.class)
+ public void testIndicesOptionsOnAllowNoIndicesFalse() throws Exception {
+ // empty wildcard expansion throws exception when allowNoIndices is turned off
+ IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, false, true, false);
+ client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("a*").setIndicesOptions(allowNoIndices).get();
+ }
+
+ @Test(expected=IndexMissingException.class)
+ public void testIndicesIgnoreUnavailableFalse() throws Exception {
+ // ignore_unavailable set to false throws exception when allowNoIndices is turned off
+ IndicesOptions allowNoIndices = IndicesOptions.fromOptions(false, true, true, false);
+ client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("fzzbzz").setIndicesOptions(allowNoIndices).get();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java
new file mode 100644
index 0000000000..d02bf477fe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/SimpleDataNodesTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope= Scope.TEST, numDataNodes =0)
+public class SimpleDataNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDataNodes() throws Exception {
+ internalCluster().startNode(settingsBuilder().put("node.data", false).build());
+ client().admin().indices().create(createIndexRequest("test")).actionGet();
+ try {
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
+ fail("no allocation should happen");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ internalCluster().startNode(settingsBuilder().put("node.data", false).build());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
+
+ // still no shard should be allocated
+ try {
+ client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")).timeout(timeValueSeconds(1))).actionGet();
+ fail("no allocation should happen");
+ } catch (UnavailableShardsException e) {
+ // all is well
+ }
+
+ // now, start a node data, and see that it gets with shards
+ internalCluster().startNode(settingsBuilder().put("node.data", true).build());
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));
+
+ IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ assertThat(indexResponse.getId(), equalTo("1"));
+ assertThat(indexResponse.getType(), equalTo("type1"));
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java
new file mode 100644
index 0000000000..54bb8ffafc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+@Slow
+public class SpecificMasterNodesTests extends ElasticsearchIntegrationTest {
+
+ protected final Settings.Builder settingsBuilder() {
+ return Settings.builder().put("discovery.type", "zen");
+ }
+
+ @Test
+ public void simpleOnlyMasterNodeElection() throws IOException {
+ logger.info("--> start data node / non master node");
+ internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+ logger.info("--> start master node");
+ final String masterNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> stop master node");
+ internalCluster().stopCurrentMasterNode();
+
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+
+ logger.info("--> start master node");
+ final String nextMasterEligibleNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
+ assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligibleNodeName));
+ }
+
+ @Test
+ public void electOnlyBetweenMasterNodes() throws IOException {
+ logger.info("--> start data node / non master node");
+ internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false).put("discovery.initial_state_timeout", "1s"));
+ try {
+ assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().masterNodeId(), nullValue());
+ fail("should not be able to find master");
+ } catch (MasterNotDiscoveredException e) {
+ // all is well, no master elected
+ }
+ logger.info("--> start master node (1)");
+ final String masterNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> start master node (2)");
+ final String nextMasterEligableNodeName = internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+ assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+ assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(masterNodeName));
+
+ logger.info("--> closing master node (1)");
+ internalCluster().stopCurrentMasterNode();
+ assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().masterNode().name(), equalTo(nextMasterEligableNodeName));
+ }
+
+ /**
+ * Tests that putting custom default mapping and then putting a type mapping will have the default mapping merged
+ * to the type mapping.
+ */
+ @Test
+ public void testCustomDefaultMapping() throws Exception {
+ logger.info("--> start master node / non data");
+ internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+
+ logger.info("--> start data node / non master node");
+ internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
+
+ createIndex("test");
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
+
+ MappingMetaData defaultMapping = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test").getMappings().get("_default_");
+ assertThat(defaultMapping.getSourceAsMap().get("_timestamp"), notNullValue());
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("_default_").setSource("_timestamp", "enabled=true"));
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type1").setSource("foo", "enabled=true"));
+ MappingMetaData type1Mapping = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test").getMappings().get("type1");
+ assertThat(type1Mapping.getSourceAsMap().get("_timestamp"), notNullValue());
+ }
+
+ @Test
+ public void testAliasFilterValidation() throws Exception {
+ logger.info("--> start master node / non data");
+ internalCluster().startNode(settingsBuilder().put("node.data", false).put("node.master", true));
+
+ logger.info("--> start data node / non master node");
+ internalCluster().startNode(settingsBuilder().put("node.data", true).put("node.master", false));
+
+ assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", \"properties\" : {\"field_a\" : { \"type\" : \"string\" },\"field_b\" :{ \"type\" : \"string\" }}}}}}"));
+ client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"))).get();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java b/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java
new file mode 100644
index 0000000000..416bc90bd2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope= Scope.TEST, numDataNodes =0)
+public class UpdateSettingsValidationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testUpdateSettingsValidation() throws Exception {
+ List<String> nodes = internalCluster().startNodesAsync(
+ settingsBuilder().put("node.data", false).build(),
+ settingsBuilder().put("node.master", false).build(),
+ settingsBuilder().put("node.master", false).build()
+ ).get();
+ String master = nodes.get(0);
+ String node_1 = nodes.get(1);
+ String node_2 = nodes.get(2);
+
+ createIndex("test");
+ NumShards test = getNumShards("test");
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards));
+
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).execute().actionGet();
+ healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries));
+
+ try {
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.refresh_interval", "")).execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException ex) {
+ logger.info("Error message: [{}]", ex.getMessage());
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java
new file mode 100644
index 0000000000..f226959dae
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = TEST, minNumDataNodes = 2)
+public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ //make sure that enough concurrent reroutes can happen at the same time
+ //we have a minimum of 2 nodes, and a maximum of 10 shards, thus 5 should be enough
+ .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5)
+ .build();
+ }
+
+ @Override
+ protected int minimumNumberOfShards() {
+ return cluster().numDataNodes();
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+
+ private void removePublishTimeout() {
+ //to test that the acknowledgement mechanism is working we better disable the wait for publish
+ //otherwise the operation is most likely acknowledged even if it doesn't support ack
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0")));
+ }
+
+ @Test
+ public void testClusterUpdateSettingsAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ // now that the cluster is stable, remove timeout
+ removePublishTimeout();
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get();
+ String excludedNodeId = null;
+ for (NodeInfo nodeInfo : nodesInfo) {
+ if (nodeInfo.getNode().isDataNode()) {
+ excludedNodeId = nodeInfo.getNode().id();
+ break;
+ }
+ }
+ assertNotNull(excludedNodeId);
+
+ ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get();
+ assertAcked(clusterUpdateSettingsResponse);
+ assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+
+ for (Client client : clients()) {
+ ClusterState clusterState = getLocalClusterState(client);
+ assertThat(clusterState.routingNodes().metaData().transientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assert clusterState.nodes() != null;
+ if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).id().equals(excludedNodeId)) {
+ //if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged
+ //reroute happens as part of the update settings and we made sure no throttling comes into the picture via settings
+ assertThat(shardRouting.relocating(), equalTo(true));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testClusterUpdateSettingsNoAcknowledgement() {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("number_of_shards", between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
+ .put("number_of_replicas", 0)).get();
+ ensureGreen();
+
+ // now that the cluster is stable, remove timeout
+ removePublishTimeout();
+
+ NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().get();
+ String excludedNodeId = null;
+ for (NodeInfo nodeInfo : nodesInfo) {
+ if (nodeInfo.getNode().isDataNode()) {
+ excludedNodeId = nodeInfo.getNode().id();
+ break;
+ }
+ }
+ assertNotNull(excludedNodeId);
+
+ ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTimeout("0s")
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get();
+ assertThat(clusterUpdateSettingsResponse.isAcknowledged(), equalTo(false));
+ assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId));
+ }
+
+ private static ClusterState getLocalClusterState(Client client) {
+ return client.admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+
+ @Test
+ public void testOpenIndexNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+ removePublishTimeout();
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").setTimeout("0s").get();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(false));
+ ensureGreen("test"); // make sure that recovery from disk has completed, so that check index doesn't fail.
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckTests.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckTests.java
new file mode 100644
index 0000000000..ce444a1f3d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckTests.java
@@ -0,0 +1,409 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.ack;
+
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(minNumDataNodes = 2)
+public class AckTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //to test that the acknowledgement mechanism is working we better disable the wait for publish
+ //otherwise the operation is most likely acknowledged even if it doesn't support ack
+ return Settings.builder().put(super.nodeSettings(nodeOrdinal))
+ .put(DiscoverySettings.PUBLISH_TIMEOUT, 0).build();
+ }
+
+ @Test
+ public void testUpdateSettingsAcknowledgement() {
+ createIndex("test");
+
+ assertAcked(client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.builder().put("refresh_interval", 9999, TimeUnit.MILLISECONDS)));
+
+ for (Client client : clients()) {
+ String refreshInterval = getLocalClusterState(client).metaData().index("test").settings().get("index.refresh_interval");
+ assertThat(refreshInterval, equalTo("9999ms"));
+ }
+ }
+
+ @Test
+ public void testUpdateSettingsNoAcknowledgement() {
+ createIndex("test");
+ UpdateSettingsResponse updateSettingsResponse = client().admin().indices().prepareUpdateSettings("test").setTimeout("0s")
+ .setSettings(Settings.builder().put("refresh_interval", 9999, TimeUnit.MILLISECONDS)).get();
+ assertThat(updateSettingsResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testPutWarmerAcknowledgement() {
+ createIndex("test");
+ // make sure one shard is started so the search during put warmer will not fail
+ index("test", "type", "1", "f", 1);
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(1));
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.get(0).name(), equalTo("custom_warmer"));
+ }
+ }
+
+ @Test
+ public void testPutWarmerNoAcknowledgement() throws InterruptedException {
+ createIndex("test");
+ // make sure one shard is started so the search during put warmer will not fail
+ index("test", "type", "1", "f", 1);
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer").setTimeout("0s")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(false));
+ /* Since we don't wait for the ack here we have to wait until the search request has been executed from the master
+ * otherwise the test infra might have already deleted the index and the search request fails on all shards causing
+ * the test to fail too. We simply wait until the the warmer has been installed and also clean it up afterwards.*/
+ assertTrue(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ if (getWarmersResponse.warmers().size() != 1) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }));
+ assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
+ }
+
+ @Test
+ public void testDeleteWarmerAcknowledgement() {
+ createIndex("test");
+ index("test", "type", "1", "f", 1);
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer"));
+
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testDeleteWarmerNoAcknowledgement() throws InterruptedException {
+ createIndex("test");
+ index("test", "type", "1", "f", 1);
+
+ assertAcked(client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery())));
+
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").setTimeout("0s").get();
+ assertFalse(deleteWarmerResponse.isAcknowledged());
+ assertTrue(awaitBusy(new Predicate<Object>() { // wait until they are all deleted
+ @Override
+ public boolean apply(Object input) {
+ for (Client client : clients()) {
+ GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get();
+ if (getWarmersResponse.warmers().size() > 0) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }));
+ }
+
+ @Test
+ public void testClusterRerouteAcknowledgement() throws InterruptedException {
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ ));
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ assertAcked(client().admin().cluster().prepareReroute().add(moveAllocationCommand));
+
+ for (Client client : clients()) {
+ ClusterState clusterState = getLocalClusterState(client);
+ for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
+ //if the shard that we wanted to move is still on the same node, it must be relocating
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.relocating(), equalTo(true));
+ }
+
+ }
+
+ boolean found = false;
+ for (MutableShardRouting mutableShardRouting : clusterState.routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.state(), anyOf(equalTo(ShardRoutingState.INITIALIZING), equalTo(ShardRoutingState.STARTED)));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+ }
+ }
+
+ @Test
+ public void testClusterRerouteNoAcknowledgement() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get();
+ assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testClusterRerouteAcknowledgementDryRun() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand));
+
+ //testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that
+ //all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update.
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ boolean found = false;
+ for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.fromNode())) {
+ //the shard that we wanted to move is still on the same node, as we had dryRun flag
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ assertThat(mutableShardRouting.started(), equalTo(true));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+
+ for (MutableShardRouting mutableShardRouting : clusterStateResponse.getState().routingNodes().routingNodeIter(moveAllocationCommand.toNode())) {
+ if (mutableShardRouting.shardId().equals(moveAllocationCommand.shardId())) {
+ fail("shard [" + mutableShardRouting + "] shouldn't be on node [" + moveAllocationCommand.toString() + "]");
+ }
+ }
+ }
+
+ @Test
+ public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedException {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, between(cluster().numDataNodes(), DEFAULT_MAX_NUM_SHARDS))
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)).get();
+ ensureGreen();
+
+ MoveAllocationCommand moveAllocationCommand = getAllocationCommand();
+
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get();
+ //acknowledged anyway as no changes were made
+ assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true));
+ }
+
+ private MoveAllocationCommand getAllocationCommand() {
+ String fromNodeId = null;
+ String toNodeId = null;
+ MutableShardRouting shardToBeMoved = null;
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ for (RoutingNode routingNode : clusterStateResponse.getState().routingNodes()) {
+ if (routingNode.node().isDataNode()) {
+ if (fromNodeId == null && routingNode.numberOfOwningShards() > 0) {
+ fromNodeId = routingNode.nodeId();
+ shardToBeMoved = routingNode.get(randomInt(routingNode.size() - 1));
+ } else {
+ toNodeId = routingNode.nodeId();
+ }
+
+ if (toNodeId != null && fromNodeId != null) {
+ break;
+ }
+ }
+ }
+
+ assertNotNull(fromNodeId);
+ assertNotNull(toNodeId);
+ assertNotNull(shardToBeMoved);
+
+ logger.info("==> going to move shard [{}] from [{}] to [{}]", shardToBeMoved, fromNodeId, toNodeId);
+ return new MoveAllocationCommand(shardToBeMoved.shardId(), fromNodeId, toNodeId);
+ }
+
+ @Test
+ public void testIndicesAliasesAcknowledgement() {
+ createIndex("test");
+
+ //testing acknowledgement when trying to submit an existing alias too
+ //in that case it would not make any change, but we are sure about the cluster state
+ //as the previous operation was acknowledged
+ for (int i = 0; i < 2; i++) {
+ assertAcked(client().admin().indices().prepareAliases().addAlias("test", "alias"));
+
+ for (Client client : clients()) {
+ AliasMetaData aliasMetaData = getLocalClusterState(client).metaData().aliases().get("alias").get("test");
+ assertThat(aliasMetaData.alias(), equalTo("alias"));
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesAliasesNoAcknowledgement() {
+ createIndex("test");
+
+ IndicesAliasesResponse indicesAliasesResponse = client().admin().indices().prepareAliases().addAlias("test", "alias").setTimeout("0s").get();
+ assertThat(indicesAliasesResponse.isAcknowledged(), equalTo(false));
+ }
+
+ public void testCloseIndexAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ for (Client client : clients()) {
+ IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
+ assertThat(indexMetaData.getState(), equalTo(State.CLOSE));
+ }
+ }
+
+ @Test
+ public void testCloseIndexNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").setTimeout("0s").get();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testOpenIndexAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ assertAcked(client().admin().indices().prepareOpen("test"));
+
+ for (Client client : clients()) {
+ IndexMetaData indexMetaData = getLocalClusterState(client).metaData().indices().get("test");
+ assertThat(indexMetaData.getState(), equalTo(State.OPEN));
+ }
+ }
+
+ @Test
+ public void testPutMappingAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed"));
+
+ for (Client client : clients()) {
+ assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
+ }
+ }
+
+ @Test
+ public void testPutMappingNoAcknowledgement() {
+ createIndex("test");
+ ensureGreen();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=string,index=not_analyzed").setTimeout("0s").get();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(false));
+ }
+
+ @Test
+ public void testCreateIndexAcknowledgement() {
+ createIndex("test");
+
+ for (Client client : clients()) {
+ assertThat(getLocalClusterState(client).metaData().indices().containsKey("test"), equalTo(true));
+ }
+
+ //let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ //but we do want to check that the new index is on all nodes cluster state even before green
+ ensureGreen();
+ }
+
+ @Test
+ public void testCreateIndexNoAcknowledgement() {
+ CreateIndexResponse createIndexResponse = client().admin().indices().prepareCreate("test").setTimeout("0s").get();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(false));
+
+ //let's wait for green, otherwise there can be issues with after test checks (mock directory wrapper etc.)
+ ensureGreen();
+ }
+
+ private static ClusterState getLocalClusterState(Client client) {
+ return client.admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java
new file mode 100644
index 0000000000..19a8ac48fc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import com.carrotsearch.hppc.ObjectIntHashMap;
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes =0, minNumDataNodes = 2)
+public class AwarenessAllocationTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+
+ @Override
+ protected int numberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void testSimpleAwareness() throws Exception {
+ Settings commonSettings = Settings.settingsBuilder()
+ .put("cluster.routing.schedule", "10ms")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build();
+
+
+ logger.info("--> starting 2 nodes on the same rack");
+ internalCluster().startNodesAsync(2, Settings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_1").build()).get();
+
+ createIndex("test1");
+ createIndex("test2");
+
+ NumShards test1 = getNumShards("test1");
+ NumShards test2 = getNumShards("test2");
+ //no replicas will be allocated as both indices end up on a single node
+ final int totalPrimaries = test1.numPrimaries + test2.numPrimaries;
+
+ ensureGreen();
+
+ logger.info("--> starting 1 node on a different rack");
+ final String node3 = internalCluster().startNode(Settings.settingsBuilder().put(commonSettings).put("node.rack_id", "rack_2").build());
+
+ // On slow machines the initial relocation might be delayed
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+
+ logger.info("--> waiting for no relocation");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForRelocatingShards(0).get();
+ if (clusterHealth.isTimedOut()) {
+ return false;
+ }
+
+ logger.info("--> checking current state");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ // verify that we have all the primaries on node3
+ ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ return counts.get(node3) == totalPrimaries;
+ }
+ }, 10, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ @Slow
+ public void testAwarenessZones() throws Exception {
+ Settings commonSettings = Settings.settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .build();
+
+ logger.info("--> starting 4 nodes on different zones");
+ List<String> nodes = internalCluster().startNodesAsync(
+ Settings.settingsBuilder().put(commonSettings).put("node.zone", "a").put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3).build(),
+ Settings.settingsBuilder().put(commonSettings).put("node.zone", "b").put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3).build(),
+ Settings.settingsBuilder().put(commonSettings).put("node.zone", "b").put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3).build(),
+ Settings.settingsBuilder().put(commonSettings).put("node.zone", "a").put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3).build()
+ ).get();
+ String A_0 = nodes.get(0);
+ String B_0 = nodes.get(1);
+ String B_1 = nodes.get(2);
+ String A_1 = nodes.get(3);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5)
+ .put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_1), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(B_1), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(A_0), anyOf(equalTo(2),equalTo(3)));
+ assertThat(counts.get(B_0), anyOf(equalTo(2),equalTo(3)));
+ }
+
+ @Test
+ @Slow
+ public void testAwarenessZonesIncrementalNodes() throws Exception {
+ Settings commonSettings = Settings.settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .build();
+
+ logger.info("--> starting 2 nodes on zones 'a' & 'b'");
+ List<String> nodes = internalCluster().startNodesAsync(
+ Settings.settingsBuilder().put(commonSettings).put("node.zone", "a").build(),
+ Settings.settingsBuilder().put(commonSettings).put("node.zone", "b").build()
+ ).get();
+ String A_0 = nodes.get(0);
+ String B_0 = nodes.get(1);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 5)
+ .put("index.number_of_replicas", 1)).execute().actionGet();
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(5));
+ logger.info("--> starting another node in zone 'b'");
+
+ String B_1 = internalCluster().startNode(Settings.settingsBuilder().put(commonSettings).put("node.zone", "b").build());
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ client().admin().cluster().prepareReroute().get();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntHashMap<>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+
+ String noZoneNode = internalCluster().startNode();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+ client().admin().cluster().prepareReroute().get();
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntHashMap<>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+
+ assertThat(counts.get(A_0), equalTo(5));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+ assertThat(counts.containsKey(noZoneNode), equalTo(false));
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
+
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForRelocatingShards(0).execute().actionGet();
+
+ assertThat(health.isTimedOut(), equalTo(false));
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ counts = new ObjectIntHashMap<>();
+
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).name(), 1);
+ }
+ }
+ }
+
+ assertThat(counts.get(A_0), equalTo(3));
+ assertThat(counts.get(B_0), equalTo(3));
+ assertThat(counts.get(B_1), equalTo(2));
+ assertThat(counts.get(noZoneNode), equalTo(2));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java
new file mode 100644
index 0000000000..b5181b6488
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
+import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class ClusterRerouteTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(ClusterRerouteTests.class);
+
+ @Test
+ public void rerouteWithCommands_disableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build();
+ rerouteWithCommands(commonSettings);
+ }
+
+ @Test
+ public void rerouteWithCommands_enableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name())
+ .build();
+ rerouteWithCommands(commonSettings);
+ }
+
+ private void rerouteWithCommands(Settings commonSettings) throws Exception {
+ List<String> nodesIds = internalCluster().startNodesAsync(2, commonSettings).get();
+ final String node_1 = nodesIds.get(0);
+ final String node_2 = nodesIds.get(1);
+
+ logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, *under dry_run*");
+ state = client().admin().cluster().prepareReroute()
+ .setExplain(randomBoolean())
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .setDryRun(true)
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ logger.info("--> get the state, verify nothing changed because of the dry run");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
+ state = client().admin().cluster().prepareReroute()
+ .setExplain(randomBoolean())
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ logger.info("--> move shard 1 primary from node1 to node2");
+ state = client().admin().cluster().prepareReroute()
+ .setExplain(randomBoolean())
+ .add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2))
+ .execute().actionGet().getState();
+
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_2).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+ }
+
+ @Test
+ public void rerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build();
+ rerouteWithAllocateLocalGateway(commonSettings);
+ }
+
+ @Test
+ public void rerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception {
+ Settings commonSettings = settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name())
+ .build();
+ rerouteWithAllocateLocalGateway(commonSettings);
+ }
+
+ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
+ logger.info("--> starting 2 nodes");
+ String node_1 = internalCluster().startNode(commonSettings);
+ internalCluster().startNode(commonSettings);
+ assertThat(cluster().size(), equalTo(2));
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .execute().actionGet();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(2));
+
+ logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
+ state = client().admin().cluster().prepareReroute()
+ .setExplain(randomBoolean())
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
+
+ logger.info("--> closing all nodes");
+ Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId("test", 0));
+ assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there!
+ internalCluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!
+
+ logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
+ assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // verify again after cluster was shut down
+ IOUtils.rm(shardLocation);
+
+ logger.info("--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
+ node_1 = internalCluster().startNode(commonSettings);
+ internalCluster().startNode(commonSettings);
+ // wait a bit for the cluster to realize that the shard is not there...
+ // TODO can we get around this? the cluster is RED, so what do we wait for?
+ client().admin().cluster().prepareReroute().get();
+ assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
+ logger.info("--> explicitly allocate primary");
+ state = client().admin().cluster().prepareReroute()
+ .setExplain(randomBoolean())
+ .add(new AllocateAllocationCommand(new ShardId("test", 0), node_1, true))
+ .execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> get the state, verify shard 1 primary allocated");
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(1));
+ assertThat(state.routingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
+
+ }
+
+ @Test
+ public void rerouteExplain() {
+ Settings commonSettings = settingsBuilder().build();
+
+ logger.info("--> starting a node");
+ String node_1 = internalCluster().startNode(commonSettings);
+
+ assertThat(cluster().size(), equalTo(1));
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("1").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> create an index with 1 shard");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ ensureGreen("test");
+
+ logger.info("--> disable allocation");
+ Settings newSettings = settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name())
+ .build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet();
+
+ logger.info("--> starting a second node");
+ String node_2 = internalCluster().startNode(commonSettings);
+ assertThat(cluster().size(), equalTo(2));
+ healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> try to move the shard from node1 to node2");
+ MoveAllocationCommand cmd = new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2);
+ ClusterRerouteResponse resp = client().admin().cluster().prepareReroute().add(cmd).setExplain(true).execute().actionGet();
+ RoutingExplanations e = resp.getExplanations();
+ assertThat(e.explanations().size(), equalTo(1));
+ RerouteExplanation explanation = e.explanations().get(0);
+ assertThat(explanation.command().name(), equalTo(cmd.name()));
+ assertThat(((MoveAllocationCommand)explanation.command()).shardId(), equalTo(cmd.shardId()));
+ assertThat(((MoveAllocationCommand)explanation.command()).fromNode(), equalTo(cmd.fromNode()));
+ assertThat(((MoveAllocationCommand)explanation.command()).toNode(), equalTo(cmd.toNode()));
+ assertThat(explanation.decisions().type(), equalTo(Decision.Type.YES));
+ }
+
+ @Test
+ public void testClusterRerouteWithBlocks() throws Exception {
+ List<String> nodesIds = internalCluster().startNodesAsync(2).get();
+
+ logger.info("--> create an index with 1 shard and 0 replicas");
+ assertAcked(prepareCreate("test-blocks").setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)));
+ ensureGreen("test-blocks");
+
+ logger.info("--> check that the index has 1 shard");
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ List<ShardRouting> shards = state.routingTable().allShards("test-blocks");
+ assertThat(shards, hasSize(1));
+
+ logger.info("--> check that the shard is allocated");
+ ShardRouting shard = shards.get(0);
+ assertThat(shard.assignedToNode(), equalTo(true));
+
+ logger.info("--> retrieve the node where the shard is allocated");
+ DiscoveryNode node = state.nodes().resolveNode(shard.currentNodeId());
+ assertNotNull(node);
+
+ // toggle is used to mve the shard from one node to another
+ int toggle = nodesIds.indexOf(node.getName());
+
+ // Rerouting shards is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test-blocks", blockSetting);
+ assertAcked(client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test-blocks", 0), nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))));
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ } finally {
+ disableIndexBlock("test-blocks", blockSetting);
+ }
+ }
+
+ // Rerouting shards is blocked when the cluster is read only
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test-blocks", 1), nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))));
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java
new file mode 100644
index 0000000000..dbc97dddfb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationTests.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope= Scope.TEST, numDataNodes =0)
+public class FilteringAllocationTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(FilteringAllocationTests.class);
+
+ @Test
+ public void testDecommissionNodeNoReplicas() throws Exception {
+ logger.info("--> starting 2 nodes");
+ List<String> nodesIds = internalCluster().startNodesAsync(2).get();
+ final String node_0 = nodesIds.get(0);
+ final String node_1 = nodesIds.get(1);
+ assertThat(cluster().size(), equalTo(2));
+
+ logger.info("--> creating an index with no replicas");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+ ensureGreen();
+ logger.info("--> index some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+
+ logger.info("--> decommission the second node");
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.exclude._name", node_1))
+ .execute().actionGet();
+ waitForRelocation();
+
+ logger.info("--> verify all are allocated on node1 now");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_0));
+ }
+ }
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ }
+
+ @Test
+ public void testDisablingAllocationFiltering() throws Exception {
+ logger.info("--> starting 2 nodes");
+ List<String> nodesIds = internalCluster().startNodesAsync(2).get();
+ final String node_0 = nodesIds.get(0);
+ final String node_1 = nodesIds.get(1);
+ assertThat(cluster().size(), equalTo(2));
+
+ logger.info("--> creating an index with no replicas");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ ensureGreen();
+
+ logger.info("--> index some data");
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(100l));
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test");
+ int numShardsOnNode1 = 0;
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if ("node1".equals(clusterState.nodes().get(shardRouting.currentNodeId()).name())) {
+ numShardsOnNode1++;
+ }
+ }
+ }
+
+ if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) {
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder().put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet();
+ // make sure we can recover all the nodes at once otherwise we might run into a state where one of the shards has not yet started relocating
+ // but we already fired up the request to wait for 0 relocating shards.
+ }
+ logger.info("--> remove index from the first node");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", node_0))
+ .execute().actionGet();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ logger.info("--> verify all shards are allocated on node_1 now");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ indexRoutingTable = clusterState.routingTable().index("test");
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ assertThat(clusterState.nodes().get(shardRouting.currentNodeId()).name(), equalTo(node_1));
+ }
+ }
+
+ logger.info("--> disable allocation filtering ");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(settingsBuilder().put("index.routing.allocation.exclude._name", ""))
+ .execute().actionGet();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+
+ logger.info("--> verify that there are shards allocated on both nodes now");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(clusterState.routingTable().index("test").numberOfNodesShardsAreAllocatedOn(), equalTo(2));
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java
new file mode 100644
index 0000000000..ac5741e93f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ShardsAllocatorModuleTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocatorModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.hamcrest.Matchers.instanceOf;
+
+@ClusterScope(scope= Scope.TEST, numDataNodes =0)
+public class ShardsAllocatorModuleTests extends ElasticsearchIntegrationTest {
+
+ public void testLoadDefaultShardsAllocator() throws IOException {
+ assertAllocatorInstance(Settings.Builder.EMPTY_SETTINGS, BalancedShardsAllocator.class);
+ }
+
+ public void testLoadByShortKeyShardsAllocator() throws IOException {
+ Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "even_shard") // legacy just to make sure we don't barf
+ .build();
+ assertAllocatorInstance(build, BalancedShardsAllocator.class);
+ build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, ShardsAllocatorModule.BALANCED_ALLOCATOR_KEY).build();
+ assertAllocatorInstance(build, BalancedShardsAllocator.class);
+ }
+
+ public void testLoadByClassNameShardsAllocator() throws IOException {
+ Settings build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY, "BalancedShards").build();
+ assertAllocatorInstance(build, BalancedShardsAllocator.class);
+
+ build = settingsBuilder().put(ShardsAllocatorModule.TYPE_KEY,
+ "org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator").build();
+ assertAllocatorInstance(build, BalancedShardsAllocator.class);
+ }
+
+ private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) throws IOException {
+ while (cluster().size() != 0) {
+ internalCluster().stopRandomDataNode();
+ }
+ internalCluster().startNode(settings);
+ ShardsAllocator instance = internalCluster().getInstance(ShardsAllocator.class);
+ assertThat(instance, instanceOf(clazz));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java
new file mode 100644
index 0000000000..06659cbd67
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.allocation;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SimpleAllocationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfShards() {
+ return 3;
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 1;
+ }
+
+ /**
+ * Test for
+ * https://groups.google.com/d/msg/elasticsearch/y-SY_HyoB-8/EZdfNt9VO44J
+ */
+ @Test
+ public void testSaneAllocation() {
+ assertAcked(prepareCreate("test", 3));
+ ensureGreen();
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(2));
+ }
+ }
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, 0)).execute().actionGet();
+ ensureGreen();
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(1));
+ }
+ }
+
+ // create another index
+ assertAcked(prepareCreate("test2", 3));
+ ensureGreen();
+
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, 1)).execute().actionGet();
+ ensureGreen();
+ state = client().admin().cluster().prepareState().execute().actionGet().getState();
+
+ assertThat(state.routingNodes().unassigned().size(), equalTo(0));
+ for (RoutingNode node : state.routingNodes()) {
+ if (!node.isEmpty()) {
+ assertThat(node.size(), equalTo(4));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java
new file mode 100644
index 0000000000..659f552e0a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.block;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.EnumSet;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class ClusterBlockTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSerialization() throws Exception {
+ int iterations = randomIntBetween(10, 100);
+ for (int i = 0; i < iterations; i++) {
+ // Get a random version
+ Version version = randomVersion(random());
+
+ // Get a random list of ClusterBlockLevels
+ EnumSet<ClusterBlockLevel> levels = EnumSet.noneOf(ClusterBlockLevel.class);
+ int nbLevels = randomIntBetween(1, ClusterBlockLevel.values().length);
+ for (int j = 0; j < nbLevels; j++) {
+ levels.add(randomFrom(ClusterBlockLevel.values()));
+ }
+
+ ClusterBlock clusterBlock = new ClusterBlock(randomInt(), "cluster block #" + randomInt(), randomBoolean(),
+ randomBoolean(), randomFrom(RestStatus.values()), levels);
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(version);
+ clusterBlock.writeTo(out);
+
+ StreamInput in = StreamInput.wrap(out.bytes());
+ in.setVersion(version);
+ ClusterBlock result = ClusterBlock.readClusterBlock(in);
+
+ assertThat(result.id(), equalTo(clusterBlock.id()));
+ assertThat(result.status(), equalTo(clusterBlock.status()));
+ assertThat(result.description(), equalTo(clusterBlock.description()));
+ assertThat(result.retryable(), equalTo(clusterBlock.retryable()));
+ assertThat(result.disableStatePersistence(), equalTo(clusterBlock.disableStatePersistence()));
+ assertArrayEquals(result.levels().toArray(), clusterBlock.levels().toArray());
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java
new file mode 100644
index 0000000000..18ebbc8c43
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MappingMetaDataParserTests.java
@@ -0,0 +1,360 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class MappingMetaDataParserTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParseIdAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.idResolved(), equalTo(true));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testFailIfIdIsNoValue() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startArray("id").value("id").endArray().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "1");
+ try {
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ fail();
+ } catch (MapperParsingException ex) {
+ // bogus its an array
+ }
+
+ bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("id").field("x", "id").endObject().field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ parseContext = md.createParseContext(null, "routing_value", "1");
+ try {
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ fail();
+ } catch (MapperParsingException ex) {
+ // bogus its an object
+ }
+ }
+
+ @Test
+ public void testParseRoutingAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "1");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTimestampAlone() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", "routing_value1", null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ assertThat(parseContext.timestampResolved(), equalTo(true));
+ }
+
+ @Test
+ public void testParseTimestampEquals() throws Exception {
+ MappingMetaData md1 = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ MappingMetaData md2 = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ assertThat(md1, equalTo(md2));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestamp() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "routing"),
+ new MappingMetaData.Timestamp(true, "timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value", "2");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.idResolved(), equalTo(true));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseRoutingWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext("id", null, "2");
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), nullValue());
+ assertThat(parseContext.timestampResolved(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTimestampWithPath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj2.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("routing", "routing_value").endObject()
+ .startObject("obj2").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, "routing_value1", null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.idResolved(), equalTo(false));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.routingResolved(), equalTo(true));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ assertThat(parseContext.timestampResolved(), equalTo(true));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithinSamePath() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").field("routing", "routing_value").field("timestamp", "1").endObject()
+ .startObject("obj2").field("field1", "value1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithinSamePathAndMoreLevels() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.obj0.id"),
+ new MappingMetaData.Routing(true, "obj1.obj2.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.obj3.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1")
+ .startObject("obj0")
+ .field("id", "id")
+ .endObject()
+ .startObject("obj2")
+ .field("routing", "routing_value")
+ .endObject()
+ .startObject("obj3")
+ .field("timestamp", "1")
+ .endObject()
+ .endObject()
+ .startObject("obj2").field("field1", "value1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+
+ @Test
+ public void testParseIdAndRoutingAndTimestampWithSameRepeatedObject() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("obj1.id"),
+ new MappingMetaData.Routing(true, "obj1.routing"),
+ new MappingMetaData.Timestamp(true, "obj1.timestamp", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+ byte[] bytes = jsonBuilder().startObject().field("field1", "value1").field("field2", "value2")
+ .startObject("obj0").field("field1", "value1").field("field2", "value2").endObject()
+ .startObject("obj1").field("id", "id").endObject()
+ .startObject("obj1").field("routing", "routing_value").endObject()
+ .startObject("obj1").field("timestamp", "1").endObject()
+ .endObject().bytes().toBytes();
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("id"));
+ assertThat(parseContext.routing(), equalTo("routing_value"));
+ assertThat(parseContext.timestamp(), equalTo("1"));
+ }
+
+ //
+ @Test
+ public void testParseIdRoutingTimestampWithRepeatedField() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("field1"),
+ new MappingMetaData.Routing(true, "field1.field1"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .field("field1", "bar")
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), equalTo("foo"));
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+
+ @Test
+ public void testParseNoIdRoutingWithRepeatedFieldAndObject() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id("id"),
+ new MappingMetaData.Routing(true, "field1.field1.field2"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .startObject("field1").field("field2", "bar").endObject()
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.routing(), nullValue());
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+
+ @Test
+ public void testParseRoutingWithRepeatedFieldAndValidRouting() throws Exception {
+ MappingMetaData md = new MappingMetaData("type1", new CompressedXContent("{}"),
+ new MappingMetaData.Id(null),
+ new MappingMetaData.Routing(true, "field1.field2"),
+ new MappingMetaData.Timestamp(true, "field1", "dateOptionalTime", TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP, null), false);
+
+ byte[] bytes = jsonBuilder().startObject()
+ .field("aaa", "wr")
+ .array("arr1", "1", "2", "3")
+ .field("field1", "foo")
+ .startObject("field1").field("field2", "bar").endObject()
+ .field("test", "value")
+ .field("zzz", "wr")
+ .endObject().bytes().toBytes();
+
+ MappingMetaData.ParseContext parseContext = md.createParseContext(null, null, null);
+ md.parse(XContentFactory.xContent(bytes).createParser(bytes), parseContext);
+ assertThat(parseContext.id(), nullValue());
+ assertThat(parseContext.routing(), equalTo("bar"));
+ assertThat(parseContext.timestamp(), equalTo("foo"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
new file mode 100644
index 0000000000..7d780e95aa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java
@@ -0,0 +1,895 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.metadata.IndexMetaData.State;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.indices.IndexClosedException;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MetaDataTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIndexOptions_strict() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.strictExpandOpen(), IndicesOptions.strictExpand()};
+
+ for (IndicesOptions options : indicesOptions) {
+ String[] results = md.concreteIndices(options, "foo");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ try {
+ md.concreteIndices(options, "bar");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("bar"));
+ }
+
+ results = md.concreteIndices(options, "foofoo", "foobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(options, "foofoobar");
+ assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")),
+ new HashSet<>(Arrays.asList(results)));
+
+ try {
+ md.concreteIndices(options, "bar");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("bar"));
+ }
+
+ try {
+ md.concreteIndices(options, "foo", "bar");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("bar"));
+ }
+
+ results = md.concreteIndices(options, "barbaz", "foobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ try {
+ md.concreteIndices(options, "barbaz", "bar");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("bar"));
+ }
+
+ results = md.concreteIndices(options, "baz*");
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(options, "foo", "baz*");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+ }
+
+ String[] results = md.concreteIndices(IndicesOptions.strictExpandOpen(), Strings.EMPTY_ARRAY);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(IndicesOptions.strictExpandOpen(), null);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(IndicesOptions.strictExpand(), Strings.EMPTY_ARRAY);
+ assertEquals(4, results.length);
+
+ results = md.concreteIndices(IndicesOptions.strictExpand(), null);
+ assertEquals(4, results.length);
+
+ results = md.concreteIndices(IndicesOptions.strictExpandOpen(), "foofoo*");
+ assertEquals(3, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo"));
+
+ results = md.concreteIndices(IndicesOptions.strictExpand(), "foofoo*");
+ assertEquals(4, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed"));
+ }
+
+ @Test
+ public void testIndexOptions_lenient() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions lenientExpand = IndicesOptions.fromOptions(true, true, true, true);
+ IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.lenientExpandOpen(), lenientExpand};
+
+ for (IndicesOptions options : indicesOptions) {
+ String[] results = md.concreteIndices(options, "foo");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(options, "bar");
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(options, "foofoo", "foobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(options, "foofoobar");
+ assertEquals(2, results.length);
+ assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")),
+ new HashSet<>(Arrays.asList(results)));
+
+ results = md.concreteIndices(options, "foo", "bar");
+ assertEquals(1, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo"));
+
+ results = md.concreteIndices(options, "barbaz", "foobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
+
+ results = md.concreteIndices(options, "barbaz", "bar");
+ assertEquals(1, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foofoo"));
+
+ results = md.concreteIndices(options, "baz*");
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(options, "foo", "baz*");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+ }
+
+ String[] results = md.concreteIndices(IndicesOptions.lenientExpandOpen(), Strings.EMPTY_ARRAY);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(lenientExpand, Strings.EMPTY_ARRAY);
+ assertEquals(4, results.length);
+
+ results = md.concreteIndices(IndicesOptions.lenientExpandOpen(), "foofoo*");
+ assertEquals(3, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo"));
+
+ results = md.concreteIndices(lenientExpand, "foofoo*");
+ assertEquals(4, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed"));
+ }
+
+ @Test
+ public void testIndexOptions_allowUnavailableDisallowEmpty() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo"))
+ .put(indexBuilder("foobar"))
+ .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions expandOpen = IndicesOptions.fromOptions(true, false, true, false);
+ IndicesOptions expand = IndicesOptions.fromOptions(true, false, true, true);
+ IndicesOptions[] indicesOptions = new IndicesOptions[]{expandOpen, expand};
+
+ for (IndicesOptions options : indicesOptions) {
+ String[] results = md.concreteIndices(options, "foo");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ try {
+ md.concreteIndices(options, "bar");
+ fail();
+ } catch(IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("bar"));
+ }
+
+ try {
+ md.concreteIndices(options, "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ try {
+ md.concreteIndices(options, "foo", "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+ }
+
+ String[] results = md.concreteIndices(expandOpen, Strings.EMPTY_ARRAY);
+ assertEquals(3, results.length);
+
+ results = md.concreteIndices(expand, Strings.EMPTY_ARRAY);
+ assertEquals(4, results.length);
+ }
+
+ @Test
+ public void testIndexOptions_wildcardExpansion() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("bar"))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ // Only closed
+ IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true);
+ String[] results = md.concreteIndices(options, Strings.EMPTY_ARRAY);
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(options, "foo*");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ // no wildcards, so wildcard expansion don't apply
+ results = md.concreteIndices(options, "bar");
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ // Only open
+ options = IndicesOptions.fromOptions(false, true, true, false);
+ results = md.concreteIndices(options, Strings.EMPTY_ARRAY);
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("bar", "foobar"));
+
+ results = md.concreteIndices(options, "foo*");
+ assertEquals(1, results.length);
+ assertEquals("foobar", results[0]);
+
+ results = md.concreteIndices(options, "bar");
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ // Open and closed
+ options = IndicesOptions.fromOptions(false, true, true, true);
+ results = md.concreteIndices(options, Strings.EMPTY_ARRAY);
+ assertEquals(3, results.length);
+ assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo"));
+
+ results = md.concreteIndices(options, "foo*");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foobar", "foo"));
+
+ results = md.concreteIndices(options, "bar");
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ results = md.concreteIndices(options, "-foo*");
+ assertEquals(1, results.length);
+ assertEquals("bar", results[0]);
+
+ results = md.concreteIndices(options, "-*");
+ assertEquals(0, results.length);
+
+ options = IndicesOptions.fromOptions(false, false, true, true);
+ try {
+ md.concreteIndices(options, "-*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("[-*]"));
+ }
+ }
+
+ @Test
+ public void testIndexOptions_noExpandWildcards() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ //ignore unavailable and allow no indices
+ {
+ IndicesOptions noExpandLenient = IndicesOptions.fromOptions(true, true, false, false);
+
+ String[] results = md.concreteIndices(noExpandLenient, "baz*");
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(noExpandLenient, "foo", "baz*");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(noExpandLenient, "foofoobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
+
+ results = md.concreteIndices(noExpandLenient, null);
+ assertEquals(0, results.length);
+
+ results = md.concreteIndices(noExpandLenient, Strings.EMPTY_ARRAY);
+ assertEquals(0, results.length);
+ }
+
+ //ignore unavailable but don't allow no indices
+ {
+ IndicesOptions noExpandDisallowEmpty = IndicesOptions.fromOptions(true, false, false, false);
+
+ try {
+ md.concreteIndices(noExpandDisallowEmpty, "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ String[] results = md.concreteIndices(noExpandDisallowEmpty, "foo", "baz*");
+ assertEquals(1, results.length);
+ assertEquals("foo", results[0]);
+
+ results = md.concreteIndices(noExpandDisallowEmpty, "foofoobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
+ }
+
+ //error on unavailable but allow no indices
+ {
+ IndicesOptions noExpandErrorUnavailable = IndicesOptions.fromOptions(false, true, false, false);
+
+ String[] results = md.concreteIndices(noExpandErrorUnavailable, "baz*");
+ assertThat(results, emptyArray());
+
+ try {
+ md.concreteIndices(noExpandErrorUnavailable, "foo", "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ results = md.concreteIndices(noExpandErrorUnavailable, "foofoobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
+ }
+
+ //error on both unavailable and no indices
+ {
+ IndicesOptions noExpandStrict = IndicesOptions.fromOptions(false, false, false, false);
+
+ try {
+ md.concreteIndices(noExpandStrict, "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ try {
+ md.concreteIndices(noExpandStrict, "foo", "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ String[] results = md.concreteIndices(noExpandStrict, "foofoobar");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
+ }
+ }
+
+ @Test
+ public void testIndexOptions_singleIndexNoExpandWildcards() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foobar").putAlias(AliasMetaData.builder("foofoobar")))
+ .put(indexBuilder("foofoo-closed").state(IndexMetaData.State.CLOSE))
+ .put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
+ MetaData md = mdBuilder.build();
+
+ //error on both unavailable and no indices + every alias needs to expand to a single index
+
+ try {
+ md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ try {
+ md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "baz*");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("baz*"));
+ }
+
+ try {
+ md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoobar");
+ fail();
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it"));
+ }
+
+ try {
+ md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "foofoobar");
+ fail();
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it"));
+ }
+
+ try {
+ md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoo-closed", "foofoobar");
+ fail();
+ } catch(IndexClosedException e) {
+ assertThat(e.getMessage(), equalTo("closed"));
+ assertEquals(e.index().getName(), "foofoo-closed");
+ }
+
+ String[] results = md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "barbaz");
+ assertEquals(2, results.length);
+ assertThat(results, arrayContainingInAnyOrder("foo", "foofoo"));
+ }
+
+ @Test
+ public void testIndexOptions_emptyCluster() {
+ MetaData md = MetaData.builder().build();
+ IndicesOptions options = IndicesOptions.strictExpandOpen();
+
+ String[] results = md.concreteIndices(options, Strings.EMPTY_ARRAY);
+ assertThat(results, emptyArray());
+ try {
+ md.concreteIndices(options, "foo");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("foo"));
+ }
+ results = md.concreteIndices(options, "foo*");
+ assertThat(results, emptyArray());
+ try {
+ md.concreteIndices(options, "foo*", "bar");
+ fail();
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("bar"));
+ }
+
+
+ options = IndicesOptions.lenientExpandOpen();
+ results = md.concreteIndices(options, Strings.EMPTY_ARRAY);
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(options, "foo");
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(options, "foo*");
+ assertThat(results, emptyArray());
+ results = md.concreteIndices(options, "foo*", "bar");
+ assertThat(results, emptyArray());
+
+ options = IndicesOptions.fromOptions(true, false, true, false);
+ try {
+ md.concreteIndices(options, Strings.EMPTY_ARRAY);
+ } catch (IndexMissingException e) {
+ assertThat(e.index().name(), equalTo("_all"));
+ }
+ }
+
+ @Test
+ public void testConvertWildcardsJustIndicesTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("testXYY"))
+ .put(indexBuilder("testYYY"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "testYYY"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testXXX", "ku*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "kuku")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"test*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*", "kuku"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
+ }
+
+ @Test
+ public void testConvertWildcardsTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX").putAlias(AliasMetaData.builder("alias1")).putAlias(AliasMetaData.builder("alias2")))
+ .put(indexBuilder("testXYY").putAlias(AliasMetaData.builder("alias2")))
+ .put(indexBuilder("testYYY").putAlias(AliasMetaData.builder("alias3")))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testYY*", "alias*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("alias1", "alias2", "alias3", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"-kuku"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+test*", "-testYYY"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testX*", "+testYYY"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"+testYYY", "+testX*"}, IndicesOptions.lenientExpandOpen())), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
+ }
+
+ @Test
+ public void testConvertWildcardsOpenClosedIndicesTests() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX").state(State.OPEN))
+ .put(indexBuilder("testXXY").state(State.OPEN))
+ .put(indexBuilder("testXYY").state(State.CLOSE))
+ .put(indexBuilder("testYYY").state(State.OPEN))
+ .put(indexBuilder("testYYX").state(State.CLOSE))
+ .put(indexBuilder("kuku").state(State.OPEN));
+ MetaData md = mdBuilder.build();
+ // Can't test when wildcard expansion is turned off here as convertFromWildcards shouldn't be called in this case. Tests for this are covered in the concreteIndices() tests
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.fromOptions(true, true, true, true))), equalTo(newHashSet("testXXX", "testXXY", "testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.fromOptions(true, true, false, true))), equalTo(newHashSet("testXYY")));
+ assertThat(newHashSet(md.convertFromWildcards(new String[]{"testX*"}, IndicesOptions.fromOptions(true, true, true, false))), equalTo(newHashSet("testXXX", "testXXY")));
+ }
+
+ private IndexMetaData.Builder indexBuilder(String index) {
+ return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testConcreteIndicesIgnoreIndicesOneMissingIndex() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ md.concreteIndices(IndicesOptions.strictExpandOpen(), "testZZZ");
+ }
+
+ @Test
+ public void testConcreteIndicesIgnoreIndicesOneMissingIndexOtherFound() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.lenientExpandOpen(), "testXXX", "testZZZ")), equalTo(newHashSet("testXXX")));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testConcreteIndicesIgnoreIndicesAllMissing() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.strictExpandOpen(), "testMo", "testMahdy")), equalTo(newHashSet("testXXX")));
+ }
+
+ @Test
+ public void testConcreteIndicesIgnoreIndicesEmptyRequest() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX"))
+ .put(indexBuilder("kuku"));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.lenientExpandOpen(), new String[]{})), equalTo(Sets.newHashSet("kuku", "testXXX")));
+ }
+
+ @Test
+ public void testConcreteIndicesWildcardExpansion() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("testXXX").state(State.OPEN))
+ .put(indexBuilder("testXXY").state(State.OPEN))
+ .put(indexBuilder("testXYY").state(State.CLOSE))
+ .put(indexBuilder("testYYY").state(State.OPEN))
+ .put(indexBuilder("testYYX").state(State.OPEN));
+ MetaData md = mdBuilder.build();
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, false, false), "testX*")), equalTo(new HashSet<String>()));
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, true, false), "testX*")), equalTo(newHashSet("testXXX", "testXXY")));
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, false, true), "testX*")), equalTo(newHashSet("testXYY")));
+ assertThat(newHashSet(md.concreteIndices(IndicesOptions.fromOptions(true, true, true, true), "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY")));
+ }
+
+ /**
+ * test resolving _all pattern (null, empty array or "_all") for random IndicesOptions
+ */
+ @Test
+ public void testConcreteIndicesAllPatternRandom() {
+ for (int i = 0; i < 10; i++) {
+ String[] allIndices = null;
+ switch (randomIntBetween(0, 2)) {
+ case 0:
+ break;
+ case 1:
+ allIndices = new String[0];
+ break;
+ case 2:
+ allIndices = new String[] { MetaData.ALL };
+ break;
+ }
+
+ IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean());
+ MetaData metadata = MetaData.builder().build();
+
+ // with no indices, asking for all indices should return empty list or exception, depending on indices options
+ if (indicesOptions.allowNoIndices()) {
+ String[] concreteIndices = metadata.concreteIndices(indicesOptions, allIndices);
+ assertThat(concreteIndices, notNullValue());
+ assertThat(concreteIndices.length, equalTo(0));
+ } else {
+ checkCorrectException(metadata, indicesOptions, allIndices);
+ }
+
+ // with existing indices, asking for all indices should return all open/closed indices depending on options
+ metadata = MetaData.builder()
+ .put(indexBuilder("aaa").state(State.OPEN).putAlias(AliasMetaData.builder("aaa_alias1")))
+ .put(indexBuilder("bbb").state(State.OPEN).putAlias(AliasMetaData.builder("bbb_alias1")))
+ .put(indexBuilder("ccc").state(State.CLOSE).putAlias(AliasMetaData.builder("ccc_alias1")))
+ .build();
+ if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed() || indicesOptions.allowNoIndices()) {
+ String[] concreteIndices = metadata.concreteIndices(indicesOptions, allIndices);
+ assertThat(concreteIndices, notNullValue());
+ int expectedNumberOfIndices = 0;
+ if (indicesOptions.expandWildcardsOpen()) {
+ expectedNumberOfIndices += 2;
+ }
+ if (indicesOptions.expandWildcardsClosed()) {
+ expectedNumberOfIndices += 1;
+ }
+ assertThat(concreteIndices.length, equalTo(expectedNumberOfIndices));
+ } else {
+ checkCorrectException(metadata, indicesOptions, allIndices);
+ }
+ }
+ }
+
+ /**
+ * check for correct exception type depending on indicesOptions and provided index name list
+ */
+ private void checkCorrectException(MetaData metadata, IndicesOptions indicesOptions, String[] allIndices) {
+ // two different exception types possible
+ if (!(indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed())
+ && (allIndices == null || allIndices.length == 0)) {
+ try {
+ metadata.concreteIndices(indicesOptions, allIndices);
+ fail("no wildcard expansion and null or empty list argument should trigger ElasticsearchIllegalArgumentException");
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ } else {
+ try {
+ metadata.concreteIndices(indicesOptions, allIndices);
+ fail("wildcard expansion on should trigger IndexMissingException");
+ } catch (IndexMissingException e) {
+ // expected
+ }
+ }
+ }
+
+ /**
+ * test resolving wildcard pattern that matches no index of alias for random IndicesOptions
+ */
+ @Test
+ public void testConcreteIndicesWildcardNoMatch() {
+ for (int i = 0; i < 10; i++) {
+ IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean());
+ MetaData metadata = MetaData.builder().build();
+
+ metadata = MetaData.builder()
+ .put(indexBuilder("aaa").state(State.OPEN).putAlias(AliasMetaData.builder("aaa_alias1")))
+ .put(indexBuilder("bbb").state(State.OPEN).putAlias(AliasMetaData.builder("bbb_alias1")))
+ .put(indexBuilder("ccc").state(State.CLOSE).putAlias(AliasMetaData.builder("ccc_alias1")))
+ .build();
+
+ // asking for non existing wildcard pattern should return empty list or exception
+ if (indicesOptions.allowNoIndices()) {
+ String[] concreteIndices = metadata.concreteIndices(indicesOptions, "Foo*");
+ assertThat(concreteIndices, notNullValue());
+ assertThat(concreteIndices.length, equalTo(0));
+ } else {
+ try {
+ metadata.concreteIndices(indicesOptions, "Foo*");
+ fail("expecting exeption when result empty and allowNoIndicec=false");
+ } catch (IndexMissingException e) {
+ // expected exception
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testIsAllIndices_null() throws Exception {
+ assertThat(MetaData.isAllIndices(null), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_empty() throws Exception {
+ assertThat(MetaData.isAllIndices(new String[0]), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_explicitAll() throws Exception {
+ assertThat(MetaData.isAllIndices(new String[]{"_all"}), equalTo(true));
+ }
+
+ @Test
+ public void testIsAllIndices_explicitAllPlusOther() throws Exception {
+ assertThat(MetaData.isAllIndices(new String[]{"_all", "other"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsAllIndices_normalIndexes() throws Exception {
+ assertThat(MetaData.isAllIndices(new String[]{"index1", "index2", "index3"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsAllIndices_wildcard() throws Exception {
+ assertThat(MetaData.isAllIndices(new String[]{"*"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_null() throws Exception {
+ assertThat(MetaData.isExplicitAllPattern(null), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_empty() throws Exception {
+ assertThat(MetaData.isExplicitAllPattern(new String[0]), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_explicitAll() throws Exception {
+ assertThat(MetaData.isExplicitAllPattern(new String[]{"_all"}), equalTo(true));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_explicitAllPlusOther() throws Exception {
+ assertThat(MetaData.isExplicitAllPattern(new String[]{"_all", "other"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_normalIndexes() throws Exception {
+ assertThat(MetaData.isExplicitAllPattern(new String[]{"index1", "index2", "index3"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsExplicitAllIndices_wildcard() throws Exception {
+ assertThat(MetaData.isExplicitAllPattern(new String[]{"*"}), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_explicitList() throws Exception {
+ //even though it does identify all indices, it's not a pattern but just an explicit list of them
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(concreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(concreteIndices, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_onlyWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(concreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingTrailingWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(concreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcard() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3", "a", "b"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingSingleExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"-index1", "+index1"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(concreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingSingleExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"-index1"};
+ String[] concreteIndices = new String[]{"index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_matchingTrailingWildcardAndExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*", "-index1", "+index1"};
+ String[] concreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(concreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(true));
+ }
+
+ @Test
+ public void testIsPatternMatchingAllIndices_nonMatchingTrailingWildcardAndExclusion() throws Exception {
+ String[] indicesOrAliases = new String[]{"index*", "-index1"};
+ String[] concreteIndices = new String[]{"index2", "index3"};
+ String[] allConcreteIndices = new String[]{"index1", "index2", "index3"};
+ MetaData metaData = metaDataBuilder(allConcreteIndices);
+ assertThat(metaData.isPatternMatchingAllIndices(indicesOrAliases, concreteIndices), equalTo(false));
+ }
+
+ @Test
+ public void testIndexOptions_failClosedIndicesAndAliases() {
+ MetaData.Builder mdBuilder = MetaData.builder()
+ .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed")))
+ .put(indexBuilder("foo2-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar2-closed")))
+ .put(indexBuilder("foo3").putAlias(AliasMetaData.builder("foobar2-closed")));
+ MetaData md = mdBuilder.build();
+
+ IndicesOptions options = IndicesOptions.strictExpandOpenAndForbidClosed();
+ try {
+ md.concreteIndices(options, "foo1-closed");
+ fail("foo1-closed should be closed, but it is open");
+ } catch (IndexClosedException e) {
+ // expected
+ }
+
+ try {
+ md.concreteIndices(options, "foobar1-closed");
+ fail("foo1-closed should be closed, but it is open");
+ } catch (IndexClosedException e) {
+ // expected
+ }
+
+ options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
+ String[] results = md.concreteIndices(options, "foo1-closed");
+ assertThat(results, emptyArray());
+
+ results = md.concreteIndices(options, "foobar1-closed");
+ assertThat(results, emptyArray());
+
+ options = IndicesOptions.lenientExpandOpen();
+ results = md.concreteIndices(options, "foo1-closed");
+ assertThat(results, arrayWithSize(1));
+ assertThat(results, arrayContaining("foo1-closed"));
+
+ results = md.concreteIndices(options, "foobar1-closed");
+ assertThat(results, arrayWithSize(1));
+ assertThat(results, arrayContaining("foo1-closed"));
+
+ // testing an alias pointing to three indices:
+ options = IndicesOptions.strictExpandOpenAndForbidClosed();
+ try {
+ md.concreteIndices(options, "foobar2-closed");
+ fail("foo2-closed should be closed, but it is open");
+ } catch (IndexClosedException e) {
+ // expected
+ }
+
+ options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
+ results = md.concreteIndices(options, "foobar2-closed");
+ assertThat(results, arrayWithSize(1));
+ assertThat(results, arrayContaining("foo3"));
+
+ options = IndicesOptions.lenientExpandOpen();
+ results = md.concreteIndices(options, "foobar2-closed");
+ assertThat(results, arrayWithSize(3));
+ assertThat(results, arrayContainingInAnyOrder("foo1-closed", "foo2-closed", "foo3"));
+ }
+
+ private MetaData metaDataBuilder(String... indices) {
+ MetaData.Builder mdBuilder = MetaData.builder();
+ for (String concreteIndex : indices) {
+ mdBuilder.put(indexBuilder(concreteIndex));
+ }
+ return mdBuilder.build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
new file mode 100644
index 0000000000..c5063bfc80
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java
@@ -0,0 +1,309 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.metadata;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ToAndFromJsonMetaDataTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleJsonFromAndTo() throws IOException {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1")
+ .settings(settings(Version.CURRENT))
+ .numberOfShards(1)
+ .numberOfReplicas(2))
+ .put(IndexMetaData.builder("test2")
+ .settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(2)
+ .numberOfReplicas(3))
+ .put(IndexMetaData.builder("test3")
+ .settings(settings(Version.CURRENT))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1))
+ .put(IndexMetaData.builder("test4")
+ .settings(settings(Version.CURRENT))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .creationDate(2l))
+ .put(IndexMetaData.builder("test5")
+ .settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2))
+ .put(IndexMetaData.builder("test6")
+ .settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .creationDate(2l))
+ .put(IndexMetaData.builder("test7")
+ .settings(settings(Version.CURRENT))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .creationDate(2l)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2))
+ .put(IndexMetaData.builder("test8")
+ .settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test9")
+ .settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
+ .creationDate(2l)
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test10")
+ .settings(settings(Version.CURRENT)
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1"))
+ .putAlias(newAliasMetaDataBuilder("alias2")))
+ .put(IndexMetaData.builder("test11")
+ .settings(settings(Version.CURRENT)
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1))
+ .putAlias(newAliasMetaDataBuilder("alias2"))
+ .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2)))
+ .put(IndexTemplateMetaData.builder("foo")
+ .template("bar")
+ .order(1)
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar1"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
+ .put(IndexMetaData.builder("test12")
+ .settings(settings(Version.CURRENT)
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .creationDate(2l)
+ .numberOfShards(1)
+ .numberOfReplicas(2)
+ .putMapping("mapping1", MAPPING_SOURCE1)
+ .putMapping("mapping2", MAPPING_SOURCE2)
+ .putAlias(newAliasMetaDataBuilder("alias1").filter(ALIAS_FILTER1))
+ .putAlias(newAliasMetaDataBuilder("alias2"))
+ .putAlias(newAliasMetaDataBuilder("alias4").filter(ALIAS_FILTER2)))
+ .put(IndexTemplateMetaData.builder("foo")
+ .template("bar")
+ .order(1)
+ .settings(settingsBuilder()
+ .put("setting1", "value1")
+ .put("setting2", "value2"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar1"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar2").filter("{\"term\":{\"user\":\"kimchy\"}}"))
+ .putAlias(newAliasMetaDataBuilder("alias-bar3").routing("routing-bar")))
+ .build();
+
+ String metaDataSource = MetaData.Builder.toXContent(metaData);
+// System.out.println("ToJson: " + metaDataSource);
+
+ MetaData parsedMetaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.JSON).createParser(metaDataSource));
+
+ IndexMetaData indexMetaData = parsedMetaData.index("test1");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test2");
+ assertThat(indexMetaData.numberOfShards(), equalTo(2));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(3));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test3");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
+ assertThat(indexMetaData.mappings().size(), equalTo(1));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+
+ indexMetaData = parsedMetaData.index("test4");
+ assertThat(indexMetaData.creationDate(), equalTo(2l));
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test5");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+
+ indexMetaData = parsedMetaData.index("test6");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(2l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(0));
+
+ indexMetaData = parsedMetaData.index("test7");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(2l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+
+ indexMetaData = parsedMetaData.index("test8");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(2));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+
+ indexMetaData = parsedMetaData.index("test9");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(2l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(2));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+
+ indexMetaData = parsedMetaData.index("test10");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(2));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+
+ indexMetaData = parsedMetaData.index("test11");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(-1l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(3));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+ assertThat(indexMetaData.aliases().get("alias2").filter(), nullValue());
+ assertThat(indexMetaData.aliases().get("alias4").alias(), equalTo("alias4"));
+ assertThat(indexMetaData.aliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
+
+ indexMetaData = parsedMetaData.index("test12");
+ assertThat(indexMetaData.numberOfShards(), equalTo(1));
+ assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
+ assertThat(indexMetaData.creationDate(), equalTo(2l));
+ assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
+ assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
+ assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
+ assertThat(indexMetaData.mappings().size(), equalTo(2));
+ assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
+ assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
+ assertThat(indexMetaData.aliases().size(), equalTo(3));
+ assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
+ assertThat(indexMetaData.aliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
+ assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
+ assertThat(indexMetaData.aliases().get("alias2").filter(), nullValue());
+ assertThat(indexMetaData.aliases().get("alias4").alias(), equalTo("alias4"));
+ assertThat(indexMetaData.aliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
+
+ // templates
+ assertThat(parsedMetaData.templates().get("foo").name(), is("foo"));
+ assertThat(parsedMetaData.templates().get("foo").template(), is("bar"));
+ assertThat(parsedMetaData.templates().get("foo").settings().get("index.setting1"), is("value1"));
+ assertThat(parsedMetaData.templates().get("foo").settings().getByPrefix("index.").get("setting2"), is("value2"));
+ assertThat(parsedMetaData.templates().get("foo").aliases().size(), equalTo(3));
+ assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar1").alias(), equalTo("alias-bar1"));
+ assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").alias(), equalTo("alias-bar2"));
+ assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").filter().string(), equalTo("{\"term\":{\"user\":\"kimchy\"}}"));
+ assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").alias(), equalTo("alias-bar3"));
+ assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").indexRouting(), equalTo("routing-bar"));
+ assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").searchRouting(), equalTo("routing-bar"));
+ }
+
+ private static final String MAPPING_SOURCE1 = "{\"mapping1\":{\"text1\":{\"type\":\"string\"}}}";
+ private static final String MAPPING_SOURCE2 = "{\"mapping2\":{\"text2\":{\"type\":\"string\"}}}";
+ private static final String ALIAS_FILTER1 = "{\"field1\":\"value1\"}";
+ private static final String ALIAS_FILTER2 = "{\"field2\":\"value2\"}";
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java
new file mode 100644
index 0000000000..c86508f756
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.node;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
+import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DiscoveryNodeFiltersTests extends ElasticsearchTestCase {
+
+ @Test
+ public void nameMatch() {
+ Settings settings = Settings.settingsBuilder()
+ .put("xxx.name", "name1")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void idMatch() {
+ Settings settings = Settings.settingsBuilder()
+ .put("xxx._id", "id1")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void idOrNameMatch() {
+ Settings settings = Settings.settingsBuilder()
+ .put("xxx._id", "id1,blah")
+ .put("xxx.name", "blah,name2")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void tagAndGroupMatch() {
+ Settings settings = Settings.settingsBuilder()
+ .put("xxx.tag", "A")
+ .put("xxx.group", "B")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(AND, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "B"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name2", "id2", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "B", "name", "X"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+
+ node = new DiscoveryNode("name3", "id3", DummyTransportAddress.INSTANCE,
+ ImmutableMap.<String, String>of("tag", "A", "group", "F", "name", "X"), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+
+ node = new DiscoveryNode("name4", "id4", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(false));
+ }
+
+ @Test
+ public void starMatch() {
+ Settings settings = Settings.settingsBuilder()
+ .put("xxx.name", "*")
+ .build();
+ DiscoveryNodeFilters filters = DiscoveryNodeFilters.buildFromSettings(OR, "xxx.", settings);
+
+ DiscoveryNode node = new DiscoveryNode("name1", "id1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT);
+ assertThat(filters.match(node), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java
new file mode 100644
index 0000000000..ff20115b20
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.node;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.ThrowableObjectInputStream;
+import org.elasticsearch.common.io.ThrowableObjectOutputStream;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+
+public class DiscoveryNodeTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testJavaSerializablilty() throws IOException, ClassNotFoundException {
+ final int iters = scaledRandomIntBetween(100, 300);
+ for (int i = 0; i < iters; i++) {
+ final String id = randomUnicodeOfLengthBetween(3, 20);
+ final String nodeName = randomUnicodeOfLengthBetween(3, 20);
+ final String hostName = randomUnicodeOfLengthBetween(3, 20);
+ final String hostAddress = randomUnicodeOfLengthBetween(3, 20);
+ final TransportAddress transportAddress = new LocalTransportAddress(randomUnicodeOfLengthBetween(3, 20));
+ final Map<String, String> attributes = new HashMap<>();
+ for (int a = randomInt(10); a > 0; a--) {
+ attributes.put(randomUnicodeOfLengthBetween(3, 20), randomUnicodeOfLengthBetween(3, 20));
+ }
+ final Version version = randomVersion(random());
+ DiscoveryNode discoveryNode = new DiscoveryNode(nodeName, id, hostName, hostAddress, transportAddress, attributes, version);
+ BytesStreamOutput bytesOutput = new BytesStreamOutput();
+ ThrowableObjectOutputStream too = new ThrowableObjectOutputStream(bytesOutput);
+ too.writeObject(discoveryNode);
+ too.close();
+ ThrowableObjectInputStream from = new ThrowableObjectInputStream(StreamInput.wrap(bytesOutput.bytes()));
+ DiscoveryNode readDiscoveryNode = (DiscoveryNode) from.readObject();
+ from.close();
+ assertThat(readDiscoveryNode, Matchers.equalTo(discoveryNode));
+ assertThat(readDiscoveryNode.id(), Matchers.equalTo(id));
+ assertThat(readDiscoveryNode.name(), Matchers.equalTo(nodeName));
+ assertThat(readDiscoveryNode.getHostName(), Matchers.equalTo(hostName));
+ assertThat(readDiscoveryNode.getHostAddress(), Matchers.equalTo(hostAddress));
+ assertThat(readDiscoveryNode.address(), Matchers.equalTo(transportAddress));
+ assertThat(readDiscoveryNode.attributes(), Matchers.equalTo(attributes));
+ assertThat(readDiscoveryNode.version(), Matchers.equalTo(version));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java
new file mode 100644
index 0000000000..f2957d75d0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.VersionUtils;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.nio.file.Path;
+import java.util.Arrays;
+
+public class RoutingBackwardCompatibilityTests extends ElasticsearchTestCase {
+
+ public void testBackwardCompatibility() throws Exception {
+ Path baseDir = createTempDir();
+ Node node = new Node(Settings.builder().put("path.home", baseDir.toString()).build(), false);
+ try {
+ try (BufferedReader reader = new BufferedReader(new InputStreamReader(RoutingBackwardCompatibilityTests.class.getResourceAsStream("/org/elasticsearch/cluster/routing/shard_routes.txt"), "UTF-8"))) {
+ for (String line = reader.readLine(); line != null; line = reader.readLine()) {
+ if (line.startsWith("#")) { // comment
+ continue;
+ }
+ String[] parts = line.split("\t");
+ assertEquals(Arrays.toString(parts), 7, parts.length);
+ final String index = parts[0];
+ final int numberOfShards = Integer.parseInt(parts[1]);
+ final String type = parts[2];
+ final String id = parts[3];
+ final String routing = "null".equals(parts[4]) ? null : parts[4];
+ final int pre20ExpectedShardId = Integer.parseInt(parts[5]);
+ final int currentExpectedShard = Integer.parseInt(parts[6]);
+
+ OperationRouting operationRouting = node.injector().getInstance(OperationRouting.class);
+ for (Version version : VersionUtils.allVersions()) {
+ final Settings settings = settings(version).build();
+ IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards).numberOfReplicas(randomInt(3)).build();
+ MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false);
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build();
+ ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ final int shardId = operationRouting.indexShards(clusterState, index, type, id, routing).shardId().getId();
+ if (version.before(Version.V_2_0_0)) {
+ assertEquals(pre20ExpectedShardId, shardId);
+ } else {
+ assertEquals(currentExpectedShard, shardId);
+ }
+ }
+ }
+ }
+ } finally {
+ node.close();
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeTests.java
new file mode 100644
index 0000000000..54a8c964e5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityUponUpgradeTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.nio.file.Path;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, minNumDataNodes = 0, maxNumDataNodes = 0)
+@LuceneTestCase.SuppressFileSystems("*") // extra files break the single data cluster expectation when unzipping the static index
+public class RoutingBackwardCompatibilityUponUpgradeTests extends ElasticsearchIntegrationTest {
+
+ public void testDefaultRouting() throws Exception {
+ test("default_routing_1_x", DjbHashFunction.class, false);
+ }
+
+ public void testCustomRouting() throws Exception {
+ test("custom_routing_1_x", SimpleHashFunction.class, true);
+ }
+
+ private void test(String name, Class<? extends HashFunction> expectedHashFunction, boolean expectedUseType) throws Exception {
+ Path zippedIndexDir = getDataPath("/org/elasticsearch/cluster/routing/" + name + ".zip");
+ Settings baseSettings = prepareBackwardsDataDir(zippedIndexDir);
+ internalCluster().startNode(Settings.builder()
+ .put(baseSettings)
+ .put(Node.HTTP_ENABLED, true)
+ .build());
+ ensureYellow("test");
+ GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get();
+ assertArrayEquals(new String[] {"test"}, getIndexResponse.indices());
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertEquals(expectedHashFunction.getName(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION));
+ assertEquals(Boolean.valueOf(expectedUseType).toString(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE));
+ SearchResponse allDocs = client().prepareSearch("test").get();
+ assertSearchResponse(allDocs);
+ assertHitCount(allDocs, 4);
+ // Make sure routing works
+ for (SearchHit hit : allDocs.getHits().hits()) {
+ GetResponse get = client().prepareGet(hit.index(), hit.type(), hit.id()).get();
+ assertTrue(get.isExists());
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java
new file mode 100644
index 0000000000..23fad2de2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTest.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.node.DiscoveryNodes.Builder;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class RoutingTableTest extends ElasticsearchAllocationTestCase {
+
+ private static final String TEST_INDEX_1 = "test1";
+ private static final String TEST_INDEX_2 = "test2";
+ private RoutingTable emptyRoutingTable;
+ private RoutingTable testRoutingTable;
+ private int numberOfShards;
+ private int numberOfReplicas;
+ private int shardsPerIndex;
+ private int totalNumberOfShards;
+ private final static Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ private final AllocationService ALLOCATION_SERVICE = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .build());
+ private ClusterState clusterState;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ this.numberOfShards = randomIntBetween(1, 5);
+ this.numberOfReplicas = randomIntBetween(1, 5);
+ this.shardsPerIndex = this.numberOfShards * (this.numberOfReplicas + 1);
+ this.totalNumberOfShards = this.shardsPerIndex * 2;
+ logger.info("Setup test with " + this.numberOfShards + " shards and " + this.numberOfReplicas + " replicas.");
+ this.emptyRoutingTable = new RoutingTable.Builder().build();
+ MetaData metaData = MetaData.builder()
+ .put(createIndexMetaData(TEST_INDEX_1))
+ .put(createIndexMetaData(TEST_INDEX_2))
+ .build();
+
+ this.testRoutingTable = new RoutingTable.Builder()
+ .add(new IndexRoutingTable.Builder(TEST_INDEX_1).initializeAsNew(metaData.index(TEST_INDEX_1)).build())
+ .add(new IndexRoutingTable.Builder(TEST_INDEX_2).initializeAsNew(metaData.index(TEST_INDEX_2)).build())
+ .build();
+ this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(testRoutingTable).build();
+ }
+
+ /**
+ * puts primary shard routings into initializing state
+ */
+ private void initPrimaries() {
+ logger.info("adding " + (this.numberOfReplicas + 1) + " nodes and performing rerouting");
+ Builder discoBuilder = DiscoveryNodes.builder();
+ for (int i = 0; i < this.numberOfReplicas + 1; i++) {
+ discoBuilder = discoBuilder.put(newNode("node" + i));
+ }
+ this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build();
+ RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.reroute(clusterState);
+ this.testRoutingTable = rerouteResult.routingTable();
+ assertThat(rerouteResult.changed(), is(true));
+ this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ }
+
+ private void startInitializingShards(String index) {
+ this.clusterState = ClusterState.builder(clusterState).routingTable(this.testRoutingTable).build();
+ logger.info("start primary shards for index " + index);
+ RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.routingNodes().shardsWithState(index, INITIALIZING));
+ this.clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ this.testRoutingTable = rerouteResult.routingTable();
+ }
+
+ private IndexMetaData.Builder createIndexMetaData(String indexName) {
+ return new IndexMetaData.Builder(indexName)
+ .settings(DEFAULT_SETTINGS)
+ .numberOfReplicas(this.numberOfReplicas)
+ .numberOfShards(this.numberOfShards);
+ }
+
+ @Test
+ public void testAllShards() {
+ assertThat(this.emptyRoutingTable.allShards().size(), is(0));
+ assertThat(this.testRoutingTable.allShards().size(), is(this.totalNumberOfShards));
+
+ assertThat(this.testRoutingTable.allShards(TEST_INDEX_1).size(), is(this.shardsPerIndex));
+ try {
+ assertThat(this.testRoutingTable.allShards("not_existing").size(), is(0));
+ fail("Exception expected when calling allShards() with non existing index name");
+ } catch (IndexMissingException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testHasIndex() {
+ assertThat(this.testRoutingTable.hasIndex(TEST_INDEX_1), is(true));
+ assertThat(this.testRoutingTable.hasIndex("foobar"), is(false));
+ }
+
+ @Test
+ public void testIndex() {
+ assertThat(this.testRoutingTable.index(TEST_INDEX_1).getIndex(), is(TEST_INDEX_1));
+ assertThat(this.testRoutingTable.index("foobar"), is(nullValue()));
+ }
+
+ @Test
+ public void testIndicesRouting() {
+ assertThat(this.testRoutingTable.indicesRouting().size(), is(2));
+ assertThat(this.testRoutingTable.getIndicesRouting().size(), is(2));
+ assertSame(this.testRoutingTable.getIndicesRouting(), this.testRoutingTable.indicesRouting());
+ }
+
+ @Test
+ public void testShardsWithState() {
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards));
+
+ initPrimaries();
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - 2 * this.numberOfShards));
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).size(), is(2 * this.numberOfShards));
+
+ startInitializingShards(TEST_INDEX_1);
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.STARTED).size(), is(this.numberOfShards));
+ int initializingExpected = this.numberOfShards + this.numberOfShards * this.numberOfReplicas;
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected));
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - this.numberOfShards));
+
+ startInitializingShards(TEST_INDEX_2);
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.STARTED).size(), is(2 * this.numberOfShards));
+ initializingExpected = 2 * this.numberOfShards * this.numberOfReplicas;
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected));
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards));
+
+ // now start all replicas too
+ startInitializingShards(TEST_INDEX_1);
+ startInitializingShards(TEST_INDEX_2);
+ assertThat(this.testRoutingTable.shardsWithState(ShardRoutingState.STARTED).size(), is(this.totalNumberOfShards));
+ }
+
+ @Test
+ public void testActivePrimaryShardsGrouped() {
+ assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], true).size(), is(0));
+ assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0));
+
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
+
+ initPrimaries();
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
+
+ startInitializingShards(TEST_INDEX_1);
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards));
+
+ startInitializingShards(TEST_INDEX_2);
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
+ assertThat(this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards));
+
+ try {
+ this.testRoutingTable.activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true);
+ fail("Calling with non-existing index name should raise IndexMissingException");
+ } catch (IndexMissingException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testAllActiveShardsGrouped() {
+ assertThat(this.emptyRoutingTable.allActiveShardsGrouped(new String[0], true).size(), is(0));
+ assertThat(this.emptyRoutingTable.allActiveShardsGrouped(new String[0], false).size(), is(0));
+
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+
+ initPrimaries();
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+
+ startInitializingShards(TEST_INDEX_1);
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+
+ startInitializingShards(TEST_INDEX_2);
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
+ assertThat(this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards));
+
+ try {
+ this.testRoutingTable.allActiveShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true);
+ } catch (IndexMissingException e) {
+ fail("Calling with non-existing index should be ignored at the moment");
+ }
+ }
+
+ @Test
+ public void testAllAssignedShardsGrouped() {
+ assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0));
+ assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+
+ initPrimaries();
+ assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards));
+ assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex));
+
+ assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards));
+ assertThat(this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards));
+
+ try {
+ this.testRoutingTable.allAssignedShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, false);
+ } catch (IndexMissingException e) {
+ fail("Calling with non-existing index should be ignored at the moment");
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
new file mode 100644
index 0000000000..46424270fa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java
@@ -0,0 +1,436 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import com.google.common.collect.Lists;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+public class AddIncrementallyTests extends ElasticsearchAllocationTestCase {
+ private final ESLogger logger = Loggers.getLogger(AddIncrementallyTests.class);
+
+ @Test
+ public void testAddNodesAndIndices() {
+ Settings.Builder settings = settingsBuilder();
+ settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertAtLeastOneIndexShardPerNode(clusterState);
+ clusterState = removeNodes(clusterState, service, 1);
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+
+ clusterState = addIndex(clusterState, service, 3, 2, 3);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, "test3", Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+
+ clusterState = addIndex(clusterState, service, 4, 2, 3);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, "test4", Matchers.equalTo(2));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ clusterState = removeNodes(clusterState, service, 1);
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(4));
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertNumIndexShardsPerNode(clusterState, Matchers.lessThanOrEqualTo(2));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+ @Test
+ public void testMinimalRelocations() {
+ Settings.Builder settings = settingsBuilder();
+ settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString())
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 2);
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+
+ logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ nodes.put(newNode("node2"));
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+
+ RoutingTable prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.sameInstance(routingTable));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+ @Test
+ public void testMinimalRelocationsNoLimit() {
+ Settings.Builder settings = settingsBuilder();
+ settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString())
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 100)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100);
+ AllocationService service = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(service, 1, 3, 3, 1);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(9));
+ int nodeOffset = 1;
+ clusterState = addNodes(clusterState, service, 1, nodeOffset++);
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), Matchers.equalTo(9));
+ assertThat(clusterState.routingNodes().unassigned().size(), Matchers.equalTo(0));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(3));
+
+ logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ nodes.put(newNode("node2"));
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+
+ RoutingTable prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(4));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(2));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), Matchers.equalTo(6));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node0").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), Matchers.equalTo(0));
+ assertThat(prev, Matchers.not(Matchers.sameInstance(routingTable)));
+
+ prev = routingTable;
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prev, Matchers.sameInstance(routingTable));
+ assertNumIndexShardsPerNode(clusterState, Matchers.equalTo(2));
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ }
+
+
+ private void assertNumIndexShardsPerNode(ClusterState state, Matcher<Integer> matcher) {
+ for (String index : state.routingTable().indicesRouting().keySet()) {
+ assertNumIndexShardsPerNode(state, index, matcher);
+ }
+ }
+
+ private void assertNumIndexShardsPerNode(ClusterState state, String index, Matcher<Integer> matcher) {
+ for (RoutingNode node : state.routingNodes()) {
+ assertThat(node.shardsWithState(index, STARTED).size(), matcher);
+ }
+ }
+
+
+ private void assertAtLeastOneIndexShardPerNode(ClusterState state) {
+ for (String index : state.routingTable().indicesRouting().keySet()) {
+
+ for (RoutingNode node : state.routingNodes()) {
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(1));
+ }
+ }
+
+ }
+
+ private ClusterState addNodes(ClusterState clusterState, AllocationService service, int numNodes, int nodeOffset) {
+ logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numNodes);
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ for (int i = 0; i < numNodes; i++) {
+ nodes.put(newNode("node" + (i + nodeOffset)));
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ // move initializing to started
+
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState initCluster(AllocationService service, int numberOfNodes, int numberOfIndices, int numberOfShards,
+ int numberOfReplicas) {
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(
+ numberOfReplicas);
+ metaDataBuilder = metaDataBuilder.put(index);
+ }
+
+ MetaData metaData = metaDataBuilder.build();
+
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+
+ logger.info("start " + numberOfNodes + " nodes");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes.put(newNode("node" + i));
+ }
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState addIndex(ClusterState clusterState, AllocationService service, int indexOrdinal, int numberOfShards,
+ int numberOfReplicas) {
+ MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.getMetaData());
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable());
+
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + indexOrdinal).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(
+ numberOfReplicas);
+ IndexMetaData imd = index.build();
+ metaDataBuilder = metaDataBuilder.put(imd, true);
+ routingTableBuilder.addAsNew(imd);
+
+ MetaData metaData = metaDataBuilder.build();
+ RoutingTable routingTable = routingTableBuilder.build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState removeNodes(ClusterState clusterState, AllocationService service, int numNodes) {
+ logger.info("Removing [{}] nodes", numNodes);
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+ ArrayList<DiscoveryNode> discoveryNodes = Lists.newArrayList(clusterState.nodes());
+ Collections.shuffle(discoveryNodes, getRandom());
+ for (DiscoveryNode node : discoveryNodes) {
+ nodes.remove(node.id());
+ numNodes--;
+ if (numNodes <= 0) {
+ break;
+ }
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingTable routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("rebalancing");
+ routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ logger.debug("ClusterState: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java
new file mode 100644
index 0000000000..2b7607422b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocatePostApiFlagTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class AllocatePostApiFlagTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AllocatePostApiFlagTests.class);
+
+ @Test
+ public void simpleFlagTests() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("creating an index with 1 shard, no replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(false));
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(false));
+
+ logger.info("start primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryAllocatedPostApi(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
new file mode 100644
index 0000000000..24cd971848
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.CancelAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class AllocationCommandsTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AllocationCommandsTests.class);
+
+ @Test
+ public void moveShardCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("creating an index with 1 shard, no replica");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("start primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("move the shard");
+ String existingNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String toNodeId;
+ if ("node1".equals(existingNodeId)) {
+ toNodeId = "node2";
+ } else {
+ toNodeId = "node1";
+ }
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(new ShardId("test", 0), existingNodeId, toNodeId)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(existingNodeId).get(0).state(), equalTo(ShardRoutingState.RELOCATING));
+ assertThat(clusterState.routingNodes().node(toNodeId).get(0).state(), equalTo(ShardRoutingState.INITIALIZING));
+
+ logger.info("finish moving the shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(existingNodeId).isEmpty(), equalTo(true));
+ assertThat(clusterState.routingNodes().node(toNodeId).get(0).state(), equalTo(ShardRoutingState.STARTED));
+ }
+
+ @Test
+ public void allocateCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 3 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ .put(newNode("node4", ImmutableMap.of("data", Boolean.FALSE.toString())))
+ ).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> allocating with primary flag set to false, should fail");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ logger.info("--> allocating to non-data node, should fail");
+ try {
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node4", true)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ logger.info("--> allocating with primary flag set to true");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", true)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> start the primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on the primary shard node, should fail");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> verify that we fail when there are no unassigned shards");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node3", false)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+ }
+
+ @Test
+ public void cancelCommand() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 3 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ ).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> allocating with primary flag set to true");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", true)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> cancel primary allocation, make sure it fails...");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ logger.info("--> start the primary shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+
+ logger.info("--> cancel primary allocation, make sure it fails...");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the relocation allocation");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the primary being replicated, make sure it fails");
+ try {
+ allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false)));
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> cancel allocation of the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node2", false)));
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> allocate the replica shard on on the second node");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node2", false)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(rerouteResult.changed(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the replica shard");
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+ logger.info("--> move the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new MoveAllocationCommand(new ShardId("test", 0), "node2", "node3")));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node3").shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> cancel the move of the replica shard");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node3", false)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).size(), equalTo(1));
+
+
+ logger.info("--> cancel the primary allocation (with allow_primary set to true)");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", true)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(rerouteResult.changed(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node2").shardsWithState(STARTED).get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+ }
+
+ @Test
+ public void serialization() throws Exception {
+ AllocationCommands commands = new AllocationCommands(
+ new AllocateAllocationCommand(new ShardId("test", 1), "node1", true),
+ new MoveAllocationCommand(new ShardId("test", 3), "node2", "node3"),
+ new CancelAllocationCommand(new ShardId("test", 4), "node5", true)
+ );
+ BytesStreamOutput bytes = new BytesStreamOutput();
+ AllocationCommands.writeTo(commands, bytes);
+ AllocationCommands sCommands = AllocationCommands.readFrom(StreamInput.wrap(bytes.bytes()));
+
+ assertThat(sCommands.commands().size(), equalTo(3));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(new ShardId("test", 1)));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1"));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).allowPrimary(), equalTo(true));
+
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(new ShardId("test", 3)));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).fromNode(), equalTo("node2"));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).toNode(), equalTo("node3"));
+
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(new ShardId("test", 4)));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node5"));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).allowPrimary(), equalTo(true));
+ }
+
+ @Test
+ public void xContent() throws Exception {
+ String commands = "{\n" +
+ " \"commands\" : [\n" +
+ " {\"allocate\" : {\"index\" : \"test\", \"shard\" : 1, \"node\" : \"node1\", \"allow_primary\" : true}}\n" +
+ " ,{\"move\" : {\"index\" : \"test\", \"shard\" : 3, \"from_node\" : \"node2\", \"to_node\" : \"node3\"}} \n" +
+ " ,{\"cancel\" : {\"index\" : \"test\", \"shard\" : 4, \"node\" : \"node5\", \"allow_primary\" : true}} \n" +
+ " ]\n" +
+ "}\n";
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(commands);
+ // move two tokens, parser expected to be "on" `commands` field
+ parser.nextToken();
+ parser.nextToken();
+ AllocationCommands sCommands = AllocationCommands.fromXContent(parser);
+
+ assertThat(sCommands.commands().size(), equalTo(3));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).shardId(), equalTo(new ShardId("test", 1)));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).node(), equalTo("node1"));
+ assertThat(((AllocateAllocationCommand) (sCommands.commands().get(0))).allowPrimary(), equalTo(true));
+
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).shardId(), equalTo(new ShardId("test", 3)));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).fromNode(), equalTo("node2"));
+ assertThat(((MoveAllocationCommand) (sCommands.commands().get(1))).toNode(), equalTo("node3"));
+
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).shardId(), equalTo(new ShardId("test", 4)));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).node(), equalTo("node5"));
+ assertThat(((CancelAllocationCommand) (sCommands.commands().get(2))).allowPrimary(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
new file mode 100644
index 0000000000..c8d1926b6d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java
@@ -0,0 +1,829 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class AwarenessAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class);
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded1() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded1'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded2() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded2'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded3() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded3'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(INITIALIZING)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(STARTED)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(RELOCATING)) {
+ logger.info(shard.toString());
+ }
+ for (ShardRouting shard : clusterState.routingNodes().shardsWithState(UNASSIGNED)) {
+ logger.info(shard.toString());
+ }
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> run it again, since we still might have relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded4() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded4'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ for (int i = 0; i < 2; i++) {
+ logger.info("--> complete initializing round: [{}]", i);
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(10));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(5));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ for (int i = 0; i < 2; i++) {
+ logger.info("--> complete initializing round: [{}]", i);
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(5));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(5));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded5() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded5'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, we will have another relocation");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+
+ logger.info("--> make sure another reroute does not move things");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void moveShardOnceNewNodeWithAttributeAdded6() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'moveShardOnceNewNodeWithAttributeAdded6'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(3))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node4", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node5"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, we will have another relocation");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node6", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).get(0).relocatingNodeId(), equalTo("node6"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> make sure another reroute does not move things");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void fullAwareness1() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness1'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> replica will not start because we have only one rack value");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void fullAwareness2() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness2'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node3", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> replica will not start because we have only one rack value");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node4"));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, make sure nothing moves");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable, sameInstance(clusterState.routingTable()));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void fullAwareness3() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id")
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table for 'fullAwareness3'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "1")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(10));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+
+ logger.info("--> add a new node with a new rack and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", ImmutableMap.of("rack_id", "2")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ logger.info("--> complete initializing");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> run it again, since we still might have relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+
+ logger.info("--> add another node with a new rack, some more relocation should happen");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4", ImmutableMap.of("rack_id", "3")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), greaterThan(0));
+
+ logger.info("--> complete relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(20));
+
+ logger.info("--> do another reroute, make sure nothing moves");
+ assertThat(strategy.reroute(clusterState).routingTable(), sameInstance(clusterState.routingTable()));
+ }
+
+ @Test
+ public void testUnbalancedZones() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
+ .put("cluster.routing.allocation.awareness.attributes", "zone")
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table for 'testUnbalancedZones'");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("A-0", ImmutableMap.of("zone", "a")))
+ .put(newNode("B-0", ImmutableMap.of("zone", "b")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> all replicas are allocated and started since we have on node in each zone");
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> add a new node in zone 'a' and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("A-1", ImmutableMap.of("zone", "a")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo("A-1"));
+ logger.info("--> starting initializing shards on the new node");
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(10));
+ assertThat(clusterState.getRoutingNodes().node("A-1").size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("A-0").size(), equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("B-0").size(), equalTo(5));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
new file mode 100644
index 0000000000..53167a4cf2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java
@@ -0,0 +1,474 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+public class BalanceConfigurationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(BalanceConfigurationTests.class);
+ // TODO maybe we can randomize these numbers somehow
+ final int numberOfNodes = 25;
+ final int numberOfIndices = 12;
+ final int numberOfShards = 2;
+ final int numberOfReplicas = 2;
+
+ @Test
+ public void testIndexBalance() {
+ /* Tests balance over indices only */
+ final float indexBalance = 1.0f;
+ final float replicaBalance = 0.0f;
+ final float balanceTreshold = 1.0f;
+
+ Settings.Builder settings = settingsBuilder();
+ settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = addNode(clusterState, strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = removeNodes(clusterState, strategy);
+ assertIndexBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ }
+
+ @Test
+ public void testReplicaBalance() {
+ /* Tests balance over replicas only */
+ final float indexBalance = 0.0f;
+ final float replicaBalance = 1.0f;
+ final float balanceTreshold = 1.0f;
+
+ Settings.Builder settings = settingsBuilder();
+ settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold);
+
+ AllocationService strategy = createAllocationService(settings.build());
+
+ ClusterState clusterState = initCluster(strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = addNode(clusterState, strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), numberOfNodes + 1, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ clusterState = removeNodes(clusterState, strategy);
+ assertReplicaBalance(logger, clusterState.getRoutingNodes(), (numberOfNodes + 1) - (numberOfNodes + 1) / 2, numberOfIndices, numberOfReplicas, numberOfShards, balanceTreshold);
+
+ }
+
+ private ClusterState initCluster(AllocationService strategy) {
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ IndexMetaData.Builder index = IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(numberOfReplicas);
+ metaDataBuilder = metaDataBuilder.put(index);
+ }
+
+ MetaData metaData = metaDataBuilder.build();
+
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+
+
+ logger.info("start " + numberOfNodes + " nodes");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfNodes; i++) {
+ nodes.put(newNode("node" + i));
+ }
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("restart all the primary shards, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
+ logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node" + numberOfNodes)))
+ .build();
+
+ RoutingTable routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ // move initializing to started
+
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+ private ClusterState removeNodes(ClusterState clusterState, AllocationService strategy) {
+ logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")");
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
+
+ for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) {
+ nodes.remove("node" + i);
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("rebalancing");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ while (true) {
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (routingTable == prev)
+ break;
+ prev = routingTable;
+ }
+
+ return clusterState;
+ }
+
+
+ private void assertReplicaBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+ final int numShards = numberOfIndices * numberOfShards * (numberOfReplicas + 1);
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (RoutingNode node : nodes) {
+// logger.info(node.nodeId() + ": " + node.shardsWithState(INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(node.shardsWithState(STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(node.shardsWithState(STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+
+ private void assertIndexBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+
+ final int numShards = numberOfShards * (numberOfReplicas + 1);
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (String index : nodes.getRoutingTable().indicesRouting().keySet()) {
+ for (RoutingNode node : nodes) {
+// logger.info(node.nodeId() +":"+index+ ": " + node.shardsWithState(index, INITIALIZING, STARTED).size() + " shards ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(node.shardsWithState(index, STARTED).size(), Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+ }
+
+ private void assertPrimaryBalance(ESLogger logger, RoutingNodes nodes, int numberOfNodes, int numberOfIndices, int numberOfReplicas, int numberOfShards, float treshold) {
+
+ final int numShards = numberOfShards;
+ final float avgNumShards = (float) (numShards) / (float) (numberOfNodes);
+ final int minAvgNumberOfShards = Math.round(Math.round(Math.floor(avgNumShards - treshold)));
+ final int maxAvgNumberOfShards = Math.round(Math.round(Math.ceil(avgNumShards + treshold)));
+
+ for (String index : nodes.getRoutingTable().indicesRouting().keySet()) {
+ for (RoutingNode node : nodes) {
+ int primaries = 0;
+ for (ShardRouting shard : node.shardsWithState(index, STARTED)) {
+ primaries += shard.primary() ? 1 : 0;
+ }
+// logger.info(node.nodeId() + ": " + primaries + " primaries ("+minAvgNumberOfShards+" to "+maxAvgNumberOfShards+")");
+ assertThat(primaries, Matchers.greaterThanOrEqualTo(minAvgNumberOfShards));
+ assertThat(primaries, Matchers.lessThanOrEqualTo(maxAvgNumberOfShards));
+ }
+ }
+ }
+
+ @Test
+ public void testPersistedSettings() {
+ Settings.Builder settings = settingsBuilder();
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0);
+ final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1];
+ NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) {
+
+ @Override
+ public void addListener(Listener listener) {
+ assertNull("addListener was called twice while only one time was expected", listeners[0]);
+ listeners[0] = listener;
+ }
+
+ };
+ BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service);
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f));
+
+ settings = settingsBuilder();
+ settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString());
+ listeners[0].onRefreshSettings(settings.build());
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f));
+
+ settings = settingsBuilder();
+ settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5);
+ settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1);
+ settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0);
+ listeners[0].onRefreshSettings(settings.build());
+ assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f));
+ assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f));
+ assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f));
+ }
+
+ @Test
+ public void testNoRebalanceOnPrimaryOverload() {
+ Settings.Builder settings = settingsBuilder();
+ AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(),
+ new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(),
+ NoopGatewayAllocator.INSTANCE, new ShardsAllocator() {
+
+ @Override
+ public boolean rebalance(RoutingAllocation allocation) {
+ return false;
+ }
+
+ @Override
+ public boolean move(MutableShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return false;
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+
+
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ }
+
+ /*
+ * // this allocator tries to rebuild this scenario where a rebalance is
+ * // triggered solely by the primary overload on node [1] where a shard
+ * // is rebalanced to node 0
+ routing_nodes:
+ -----node_id[0][V]
+ --------[test][0], node[0], [R], s[STARTED]
+ --------[test][4], node[0], [R], s[STARTED]
+ -----node_id[1][V]
+ --------[test][0], node[1], [P], s[STARTED]
+ --------[test][1], node[1], [P], s[STARTED]
+ --------[test][3], node[1], [R], s[STARTED]
+ -----node_id[2][V]
+ --------[test][1], node[2], [R], s[STARTED]
+ --------[test][2], node[2], [R], s[STARTED]
+ --------[test][4], node[2], [P], s[STARTED]
+ -----node_id[3][V]
+ --------[test][2], node[3], [P], s[STARTED]
+ --------[test][3], node[3], [P], s[STARTED]
+ ---- unassigned
+ */
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
+ boolean changed = !unassigned.isEmpty();
+ for (MutableShardRouting sr : unassigned) {
+ switch (sr.id()) {
+ case 0:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node1");
+ } else {
+ allocation.routingNodes().assign(sr, "node0");
+ }
+ break;
+ case 1:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node1");
+ } else {
+ allocation.routingNodes().assign(sr, "node2");
+ }
+ break;
+ case 2:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node3");
+ } else {
+ allocation.routingNodes().assign(sr, "node2");
+ }
+ break;
+ case 3:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node3");
+ } else {
+ allocation.routingNodes().assign(sr, "node1");
+ }
+ break;
+ case 4:
+ if (sr.primary()) {
+ allocation.routingNodes().assign(sr, "node2");
+ } else {
+ allocation.routingNodes().assign(sr, "node0");
+ }
+ break;
+ }
+
+ }
+ unassigned.clear();
+ return changed;
+ }
+ }), ClusterInfoService.EMPTY);
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1);
+ metaDataBuilder = metaDataBuilder.put(indexMeta);
+ MetaData metaData = metaDataBuilder.build();
+ for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
+ routingTableBuilder.addAsNew(cursor.value);
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
+ for (int i = 0; i < 4; i++) {
+ DiscoveryNode node = newNode("node" + i);
+ nodes.put(node);
+ }
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.INITIALIZING));
+ }
+ }
+ strategy = createAllocationService(settings.build());
+
+ logger.info("use the new allocator and check if it moves shards");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ logger.info("start the replica shards");
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ logger.info("rebalancing");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ assertThat(mutableShardRouting.state(), Matchers.equalTo(ShardRoutingState.STARTED));
+ }
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTest.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTest.java
new file mode 100644
index 0000000000..481a036eaf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceUnbalancedClusterTest.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ * see issue #9023
+ */
+@Slow
+public class BalanceUnbalancedClusterTest extends CatAllocationTestBase {
+
+ @Override
+ protected Path getCatPath() throws IOException {
+ Path tmp = createTempDir();
+ try (InputStream stream = Files.newInputStream(getDataPath("/org/elasticsearch/cluster/routing/issue_9023.zip"))) {
+ TestUtil.unzip(stream, tmp);
+ }
+ return tmp.resolve("issue_9023");
+ }
+
+ @Override
+ protected ClusterState allocateNew(ClusterState state) {
+ String index = "tweets-2014-12-29:00";
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+ MetaData metaData = MetaData.builder(state.metaData())
+ .put(IndexMetaData.builder(index).settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder(state.routingTable())
+ .addAsNew(metaData.index(index))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(state).metaData(metaData).routingTable(routingTable).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ while (true) {
+ if (routingTable.shardsWithState(INITIALIZING).isEmpty()) {
+ break;
+ }
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ Map<String, Integer> counts = new HashMap<>();
+ for (IndexShardRoutingTable table : routingTable.index(index)) {
+ for (ShardRouting r : table) {
+ String s = r.currentNodeId();
+ Integer count = counts.get(s);
+ if (count == null) {
+ count = 0;
+ }
+ count++;
+ counts.put(s, count);
+ }
+ }
+ for (Map.Entry<String, Integer> count : counts.entrySet()) {
+ // we have 10 shards and 4 nodes so 2 nodes have 3 shards and 2 nodes have 2 shards
+ assertTrue("Node: " + count.getKey() + " has shard mismatch: " + count.getValue(), count.getValue() >= 2);
+ assertTrue("Node: " + count.getKey() + " has shard mismatch: " + count.getValue(), count.getValue() <= 3);
+
+ }
+ return clusterState;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestBase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestBase.java
new file mode 100644
index 0000000000..2ab363abd8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestBase.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ * A base testscase that allows to run tests based on the output of the CAT API
+ * The input is a line based cat/shards output like:
+ * kibana-int 0 p STARTED 2 24.8kb 10.202.245.2 r5-9-35
+ *
+ * the test builds up a clusterstate from the cat input and optionally runs a full balance on it.
+ * This can be used to debug cluster allocation decisions.
+ */
+@Ignore
+public abstract class CatAllocationTestBase extends ElasticsearchAllocationTestCase {
+
+ protected abstract Path getCatPath() throws IOException;
+
+ @Test
+ public void run() throws IOException {
+ Set<String> nodes = new HashSet<>();
+ Map<String, Idx> indices = new HashMap<>();
+ try (BufferedReader reader = Files.newBufferedReader(getCatPath(), Charsets.UTF_8)) {
+ String line = null;
+ // regexp FTW
+ Pattern pattern = Pattern.compile("^(.+)\\s+(\\d)\\s+([rp])\\s+(STARTED|RELOCATING|INITIALIZING|UNASSIGNED)\\s+\\d+\\s+[0-9.a-z]+\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+).*$");
+ while((line = reader.readLine()) != null) {
+ final Matcher matcher;
+ if ((matcher = pattern.matcher(line)).matches()) {
+ final String index = matcher.group(1);
+ Idx idx = indices.get(index);
+ if (idx == null) {
+ idx = new Idx(index);
+ indices.put(index, idx);
+ }
+ final int shard = Integer.parseInt(matcher.group(2));
+ final boolean primary = matcher.group(3).equals("p");
+ ShardRoutingState state = ShardRoutingState.valueOf(matcher.group(4));
+ String ip = matcher.group(5);
+ nodes.add(ip);
+ MutableShardRouting routing = new MutableShardRouting(index, shard, ip, primary, state, 1);
+ idx.add(routing);
+ logger.debug("Add routing {}", routing);
+ } else {
+ fail("can't read line: " + line);
+ }
+ }
+
+ }
+
+ logger.info("Building initial routing table");
+ MetaData.Builder builder = MetaData.builder();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for(Idx idx : indices.values()) {
+ IndexMetaData idxMeta = IndexMetaData.builder(idx.name).settings(settings(Version.CURRENT)).numberOfShards(idx.numShards()).numberOfReplicas(idx.numReplicas()).build();
+ builder.put(idxMeta, false);
+ IndexRoutingTable.Builder tableBuilder = new IndexRoutingTable.Builder(idx.name).initializeAsRecovery(idxMeta);
+ Map<Integer, IndexShardRoutingTable> shardIdToRouting = new HashMap<>();
+ for (MutableShardRouting r : idx.routing) {
+ IndexShardRoutingTable refData = new IndexShardRoutingTable.Builder(new ShardId(idx.name, r.id()), true).addShard(r).build();
+ if (shardIdToRouting.containsKey(r.getId())) {
+ refData = new IndexShardRoutingTable.Builder(shardIdToRouting.get(r.getId())).addShard(r).build();
+ }
+ shardIdToRouting.put(r.getId(), refData);
+
+ }
+ for (IndexShardRoutingTable t: shardIdToRouting.values()) {
+ tableBuilder.addIndexShard(t);
+ }
+ IndexRoutingTable table = tableBuilder.build();
+ routingTableBuilder.add(table);
+ }
+ MetaData metaData = builder.build();
+
+ RoutingTable routingTable = routingTableBuilder.build();
+ DiscoveryNodes.Builder builderDiscoNodes = DiscoveryNodes.builder();
+ for (String node : nodes) {
+ builderDiscoNodes.put(newNode(node));
+ }
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build();
+ if (balanceFirst()) {
+ clusterState = rebalance(clusterState);
+ }
+ clusterState = allocateNew(clusterState);
+ }
+
+ protected abstract ClusterState allocateNew(ClusterState clusterState);
+
+ protected boolean balanceFirst() {
+ return true;
+ }
+
+ private ClusterState rebalance(ClusterState clusterState) {
+ RoutingTable routingTable;AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+ RoutingAllocation.Result reroute = strategy.reroute(clusterState);
+ routingTable = reroute.routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingTable = clusterState.routingTable();
+ int numRelocations = 0;
+ while (true) {
+ List<ShardRouting> initializing = routingTable.shardsWithState(INITIALIZING);
+ if (initializing.isEmpty()) {
+ break;
+ }
+ logger.debug(initializing.toString());
+ numRelocations += initializing.size();
+ routingTable = strategy.applyStartedShards(clusterState, initializing).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ logger.debug("--> num relocations to get balance: " + numRelocations);
+ return clusterState;
+ }
+
+
+
+ public class Idx {
+ final String name;
+ final List<MutableShardRouting> routing = new ArrayList<>();
+
+ public Idx(String name) {
+ this.name = name;
+ }
+
+
+ public void add(MutableShardRouting r) {
+ routing.add(r);
+ }
+
+ public int numReplicas() {
+ int count = 0;
+ for (MutableShardRouting msr : routing) {
+ if (msr.primary() == false && msr.id()==0) {
+ count++;
+ }
+ }
+ return count;
+ }
+
+ public int numShards() {
+ int max = 0;
+ for (MutableShardRouting msr : routing) {
+ if (msr.primary()) {
+ max = Math.max(msr.getId()+1, max);
+ }
+ }
+ return max;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
new file mode 100644
index 0000000000..d738a8cbd6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java
@@ -0,0 +1,633 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ClusterRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class);
+
+ @Test
+ public void testAlways() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+// assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
+ }
+
+
+ @Test
+ public void testClusterPrimariesActive1() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), equalTo("test1"));
+ }
+
+ @Test
+ public void testClusterPrimariesActive2() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testClusterAllActive1() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the test2 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").size(), equalTo(1));
+ assertThat(routingNodes.node("node3").get(0).shardId().index().name(), anyOf(equalTo("test1"), equalTo("test2")));
+ }
+
+ @Test
+ public void testClusterAllActive2() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testClusterAllActive3() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start the test1 replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test2, replicas will start initializing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test2", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
new file mode 100644
index 0000000000..2cf4f13a3b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class ConcurrentRebalanceRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ConcurrentRebalanceRoutingTests.class);
+
+ @Test
+ public void testClusterConcurrentRebalance() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the replica shards, rebalancing should start, but, only 3 should be rebalancing");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("finalize this session relocation, 3 more should relocate now");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("finalize this session relocation, 2 more should relocate now");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(2));
+
+ logger.info("finalize this session relocation, no more relocation");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(0));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
new file mode 100644
index 0000000000..5fc7cb58ab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DeadNodesAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(DeadNodesAllocationTests.class);
+
+ @Test
+ public void simpleDeadNodeOnStartedPrimaryShard() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> fail node with primary");
+ String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(nodeIdRemaining))
+ ).build();
+
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).primary(), equalTo(true));
+ assertThat(clusterState.routingNodes().node(nodeIdRemaining).get(0).state(), equalTo(STARTED));
+ }
+
+ @Test
+ public void deadNodeWhileRelocatingOnToNode() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(origPrimaryNodeId))
+ .put(newNode(origReplicaNodeId))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ }
+
+ @Test
+ public void deadNodeWhileRelocatingOnFromNode() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node3"))
+ .put(newNode(origReplicaNodeId))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java
new file mode 100644
index 0000000000..fb6a3198ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DisableAllocationTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DisableAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(DisableAllocationTests.class);
+
+ @Test
+ public void testClusterDisableAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)
+ .put(DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testClusterDisableReplicaAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.disable_replica_allocation", true)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ }
+
+ @Test
+ public void testIndexDisableAllocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("disabled").settings(settings(Version.CURRENT).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true).put(DisableAllocationDecider.INDEX_ROUTING_ALLOCATION_DISABLE_NEW_ALLOCATION, true)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("enabled").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("disabled"))
+ .addAsNew(metaData.index("enabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> verify only enabled index has been routed");
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("disabled", STARTED).size(), equalTo(0));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
new file mode 100644
index 0000000000..eb341deded
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ElectReplicaAsPrimaryDuringRelocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ElectReplicaAsPrimaryDuringRelocationTests.class);
+
+ @Test
+ public void testElectReplicaAsPrimaryDuringRelocation() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+
+ logger.info("Start another node and perform rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("find the replica shard that gets relocated");
+ IndexShardRoutingTable indexShardRoutingTable = null;
+ if (routingTable.index("test").shard(0).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = routingTable.index("test").shard(0);
+ } else if (routingTable.index("test").shard(1).replicaShards().get(0).relocating()) {
+ indexShardRoutingTable = routingTable.index("test").shard(1);
+ }
+
+ // we might have primary relocating, and the test is only for replicas, so only test in the case of replica allocation
+ if (indexShardRoutingTable != null) {
+ logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId());
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("make sure all the primary shards are active");
+ assertThat(routingTable.index("test").shard(0).primaryShard().active(), equalTo(true));
+ assertThat(routingTable.index("test").shard(1).primaryShard().active(), equalTo(true));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
new file mode 100644
index 0000000000..5eab884605
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class FailedNodeRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class);
+
+ @Test
+ public void simpleFailedNodeTest() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start 4 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
+
+
+ logger.info("remove 2 nodes where primaries are allocated, reroute");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ )
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNode.numberOfShardsWithState(INITIALIZING), equalTo(1));
+ }
+ }
+
+ @Test
+ public void simpleFailedNodeTestNoReassign() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start 4 nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("start the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node4").numberOfShardsWithState(STARTED), equalTo(1));
+
+
+ logger.info("remove 2 nodes where primaries are allocated, reroute");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .remove(routingTable.index("test1").shard(0).primaryShard().currentNodeId())
+ .remove(routingTable.index("test2").shard(0).primaryShard().currentNodeId())
+ )
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.rerouteWithNoReassign(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(1));
+ }
+ assertThat(routingNodes.unassigned().size(), equalTo(2));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
new file mode 100644
index 0000000000..505de07e16
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java
@@ -0,0 +1,581 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class FailedShardsRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FailedShardsRoutingTests.class);
+
+ @Test
+ public void testFailedShardPrimaryRelocatingToAndFrom() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("--> building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding 2 nodes on same rack and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // starting primaries
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ // starting replicas
+ rerouteResult = allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("--> verifying all is allocated");
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+
+ logger.info("--> adding additional node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node1").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.routingNodes().node("node2").get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ String origPrimaryNodeId = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String origReplicaNodeId = clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId();
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node3 being initialized");
+ rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node("node3").get(0)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingNodes().node("node3").size(), equalTo(0));
+
+ logger.info("--> moving primary shard to node3");
+ rerouteResult = allocation.reroute(clusterState, new AllocationCommands(
+ new MoveAllocationCommand(clusterState.routingTable().index("test").shard(0).primaryShard().shardId(), clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), "node3"))
+ );
+ assertThat(rerouteResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().node(origPrimaryNodeId).get(0).state(), equalTo(RELOCATING));
+ assertThat(clusterState.routingNodes().node("node3").get(0).state(), equalTo(INITIALIZING));
+
+ logger.info("--> fail primary shard recovering instance on node1 being relocated");
+ rerouteResult = allocation.applyFailedShard(clusterState, new ImmutableShardRouting(clusterState.routingNodes().node(origPrimaryNodeId).get(0)));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ // check promotion of replica to primary
+ assertThat(clusterState.routingNodes().node(origReplicaNodeId).get(0).state(), equalTo(STARTED));
+ assertThat(clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(), equalTo(origReplicaNodeId));
+ assertThat(clusterState.routingTable().index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(origPrimaryNodeId), equalTo("node3")));
+ }
+
+ @Test
+ public void failPrimaryStartedCheckReplicaElected() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the shards (primaries)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Start the shards (backups)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("fail the primary shard, will have no place to be rerouted to (single node), so stays unassigned");
+ ShardRouting shardToFail = new ImmutableShardRouting(routingTable.index("test").shard(0).primaryShard());
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, shardToFail).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), not(equalTo(shardToFail.currentNodeId())));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+
+ logger.info("fail the shard again, check that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, shardToFail).changed(), equalTo(false));
+ }
+
+ @Test
+ public void firstAllocationFailureSingleNode() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding single node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the first shard, will have no place to be rerouted to (single node), so stays unassigned");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the shard again, see that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, "node1", true, INITIALIZING, 0)).changed(), equalTo(false));
+ }
+
+ @Test
+ public void singleShardMultipleAllocationFailures() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("Building initial routing table");
+ int numberOfReplicas = scaledRandomIntBetween(2, 10);
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(numberOfReplicas))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1);
+ DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder();
+ for (int i = 0; i < numberOfReplicas + 1; i++) {
+ nodeBuilder.put(newNode("node" + Integer.toString(i)));
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
+ while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) {
+ // start all initializing
+ clusterState = ClusterState.builder(clusterState)
+ .routingTable(strategy
+ .applyStartedShards(clusterState, clusterState.routingTable().shardsWithState(INITIALIZING)).routingTable()
+ )
+ .build();
+ // and assign more unassigned
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+ }
+
+ int shardsToFail = randomIntBetween(1, numberOfReplicas);
+ ArrayList<ShardRouting> failedShards = new ArrayList<>();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ for (int i = 0; i < shardsToFail; i++) {
+ String n = "node" + Integer.toString(randomInt(numberOfReplicas));
+ logger.info("failing shard on node [{}]", n);
+ ShardRouting shardToFail = routingNodes.node(n).get(0);
+ failedShards.add(new MutableShardRouting(shardToFail));
+ }
+
+ routingTable = strategy.applyFailedShards(clusterState, failedShards).routingTable();
+
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ for (ShardRouting failedShard : failedShards) {
+ if (!routingNodes.node(failedShard.currentNodeId()).isEmpty()) {
+ fail("shard " + failedShard + " was re-assigned to it's node");
+ }
+ }
+ }
+
+ @Test
+ public void firstAllocationFailureTwoNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the first shard, will start INITIALIZING on the second node");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), not(equalTo(nodeHoldingPrimary)));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("fail the shard again, see that nothing happens");
+ assertThat(strategy.applyFailedShard(clusterState, new ImmutableShardRouting("test", 0, nodeHoldingPrimary, true, INITIALIZING, 0)).changed(), equalTo(false));
+ }
+
+ @Test
+ public void rebalanceFailure() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the shards (primaries)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Start the shards (backups)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), anyOf(equalTo("node1"), equalTo("node2")));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), anyOf(equalTo("node2"), equalTo("node1")));
+ }
+
+ logger.info("Adding third node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+
+
+ logger.info("Fail the shards on node 3");
+ ShardRouting shardToFail = routingNodes.node("node3").get(0);
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, new ImmutableShardRouting(shardToFail)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(3));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ // make sure the failedShard is not INITIALIZING again on node3
+ assertThat(routingNodes.node("node3").get(0).shardId(), not(equalTo(shardToFail.shardId())));
+ }
+
+ @Test
+ public void testFailAllReplicasInitializingOnPrimaryFail() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ // add 4 nodes
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState).routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
+ // start primary shards
+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ // fail the primary shard, check replicas get removed as well...
+ ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
+ RoutingAllocation.Result routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail);
+ assertThat(routingResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingResult.routingTable()).build();
+ // the primary gets allocated on another node, replicas are unassigned
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ ShardRouting newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard();
+ assertThat(newPrimaryShard, not(equalTo(primaryShardToFail)));
+
+ // start the primary shard
+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ // simulate another failure coming in, with the "old" shard routing, verify that nothing changes, and we ignore it
+ routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail);
+ assertThat(routingResult.changed(), equalTo(false));
+ }
+
+ @Test
+ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToElect() {
+ AllocationService allocation = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ // add 4 nodes
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState).routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
+ // start primary shards
+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ // start another replica shard, while keep one initializing
+ clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, ImmutableList.of(clusterState.routingNodes().shardsWithState(INITIALIZING).get(0))).routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ // fail the primary shard, check one replica gets elected to primary, others become INITIALIZING (from it)
+ ShardRouting primaryShardToFail = clusterState.routingTable().index("test").shard(0).primaryShard();
+ RoutingAllocation.Result routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail);
+ assertThat(routingResult.changed(), equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingResult.routingTable()).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ ShardRouting newPrimaryShard = clusterState.routingTable().index("test").shard(0).primaryShard();
+ assertThat(newPrimaryShard, not(equalTo(primaryShardToFail)));
+
+ // simulate another failure coming in, with the "old" shard routing, verify that nothing changes, and we ignore it
+ routingResult = allocation.applyFailedShard(clusterState, primaryShardToFail);
+ assertThat(routingResult.changed(), equalTo(false));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
new file mode 100644
index 0000000000..786a0f9e56
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class FilterRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(FilterRoutingTests.class);
+
+ @Test
+ public void testClusterFilters() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.include.tag1", "value1,value2")
+ .put("cluster.routing.allocation.exclude.tag1", "value3,value4")
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding four nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))
+ .put(newNode("node3", ImmutableMap.of("tag1", "value3")))
+ .put(newNode("node4", ImmutableMap.of("tag1", "value4")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
+ List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
+ }
+ }
+
+ @Test
+ public void testIndexFilters() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .put("index.routing.allocation.include.tag1", "value1,value2")
+ .put("index.routing.allocation.exclude.tag1", "value3,value4")
+ .build()))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))
+ .put(newNode("node3", ImmutableMap.of("tag1", "value3")))
+ .put(newNode("node4", ImmutableMap.of("tag1", "value4")))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> make sure shards are only allocated on tag1 with value1 and value2");
+ List<MutableShardRouting> startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node2")));
+ }
+
+ logger.info("--> switch between value2 and value4, shards should be relocating");
+
+ metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .put("index.routing.allocation.include.tag1", "value1,value4")
+ .put("index.routing.allocation.exclude.tag1", "value2,value3")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.RELOCATING).size(), equalTo(2));
+
+ logger.info("--> finish relocation");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ startedShards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(startedShards.size(), equalTo(4));
+ for (MutableShardRouting startedShard : startedShards) {
+ assertThat(startedShard.currentNodeId(), Matchers.anyOf(equalTo("node1"), equalTo("node4")));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
new file mode 100644
index 0000000000..85bcfb8a3e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java
@@ -0,0 +1,539 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class IndexBalanceTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+
+ @Test
+ public void testBalanceAllNodesStarted() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceIncrementallyStartNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test1").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceAllNodesStartedAddIndex() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ logger.info("Add new index 3 shards 1 replica");
+
+ prevRoutingTable = routingTable;
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they
+ // recover from primary *started* shards in the
+ // IndicesClusterStateService
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().size(), equalTo(1));
+ }
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
new file mode 100644
index 0000000000..006fa12208
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
@@ -0,0 +1,343 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class NodeVersionAllocationDeciderTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(NodeVersionAllocationDeciderTests.class);
+
+ @Test
+ public void testDoNotAllocateFromPrimary() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3", VersionUtils.getPreviousVersion())))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(UNASSIGNED).size(), equalTo(1));
+ }
+
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ }
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShardsWithState(STARTED).size(), equalTo(2));
+ }
+ }
+
+
+ @Test
+ public void testRandom() {
+ AllocationService service = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+ MetaData.Builder builder = MetaData.builder();
+ RoutingTable.Builder rtBuilder = RoutingTable.builder();
+ int numIndices = between(1, 20);
+ for (int i = 0; i < numIndices; i++) {
+ builder.put(IndexMetaData.builder("test_" + i).settings(settings(Version.CURRENT)).numberOfShards(between(1, 5)).numberOfReplicas(between(0, 2)));
+ }
+ MetaData metaData = builder.build();
+
+ for (int i = 0; i < numIndices; i++) {
+ rtBuilder.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rtBuilder.build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(routingTable.allShards().size()));
+ List<DiscoveryNode> nodes = new ArrayList<>();
+ int nodeIdx = 0;
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ int numNodes = between(1, 20);
+ if (nodes.size() > numNodes) {
+ Collections.shuffle(nodes, getRandom());
+ nodes = nodes.subList(0, numNodes);
+ } else {
+ for (int j = nodes.size(); j < numNodes; j++) {
+ if (frequently()) {
+ nodes.add(newNode("node" + (nodeIdx++), randomBoolean() ? VersionUtils.getPreviousVersion() : Version.CURRENT));
+ } else {
+ nodes.add(newNode("node" + (nodeIdx++), randomVersion(random())));
+ }
+ }
+ }
+ for (DiscoveryNode node : nodes) {
+ nodesBuilder.put(node);
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ clusterState = stabilize(clusterState, service);
+ }
+ }
+
+ @Test
+ public void testRollingRestart() {
+ AllocationService service = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
+ }
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("old0", VersionUtils.getPreviousVersion()))
+ .put(newNode("old1", VersionUtils.getPreviousVersion()))
+ .put(newNode("old2", VersionUtils.getPreviousVersion()))).build();
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("old0", VersionUtils.getPreviousVersion()))
+ .put(newNode("old1", VersionUtils.getPreviousVersion()))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node0", VersionUtils.getPreviousVersion()))
+ .put(newNode("new1"))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("new2"))
+ .put(newNode("new1"))
+ .put(newNode("new0"))).build();
+
+ clusterState = stabilize(clusterState, service);
+ routingTable = clusterState.routingTable();
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(2).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), notNullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), notNullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), notNullValue());
+ }
+ }
+
+ private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
+ logger.trace("RoutingNodes: {}", clusterState.routingNodes().prettyPrint());
+
+ RoutingTable routingTable = service.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertRecoveryNodeVersions(routingNodes);
+
+ logger.info("complete rebalancing");
+ RoutingTable prev = routingTable;
+ boolean stable = false;
+ for (int i = 0; i < 1000; i++) { // at most 200 iters - this should be enough for all tests
+ logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
+ routingTable = service.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+ if (stable = (routingTable == prev)) {
+ break;
+ }
+ assertRecoveryNodeVersions(routingNodes);
+ prev = routingTable;
+ }
+ logger.info("stabilized success [{}]", stable);
+ assertThat(stable, is(true));
+ return clusterState;
+ }
+
+ private final void assertRecoveryNodeVersions(RoutingNodes routingNodes) {
+ logger.trace("RoutingNodes: {}", routingNodes.prettyPrint());
+
+ List<MutableShardRouting> mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.RELOCATING);
+ for (MutableShardRouting r : mutableShardRoutings) {
+ String toId = r.relocatingNodeId();
+ String fromId = r.currentNodeId();
+ assertThat(fromId, notNullValue());
+ assertThat(toId, notNullValue());
+ logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
+ assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
+ }
+
+ mutableShardRoutings = routingNodes.shardsWithState(ShardRoutingState.INITIALIZING);
+ for (MutableShardRouting r : mutableShardRoutings) {
+ if (r.initializing() && r.relocatingNodeId() == null && !r.primary()) {
+ MutableShardRouting primary = routingNodes.activePrimary(r);
+ assertThat(primary, notNullValue());
+ String fromId = primary.currentNodeId();
+ String toId = r.currentNodeId();
+ logger.trace("From: " + fromId + " with Version: " + routingNodes.node(fromId).node().version() + " to: " + toId + " with Version: " + routingNodes.node(toId).node().version());
+ assertTrue(routingNodes.node(toId).node().version().onOrAfter(routingNodes.node(fromId).node().version()));
+ }
+ }
+
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
new file mode 100644
index 0000000000..1c5b2ea7eb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class PreferLocalPrimariesToRelocatingPrimariesTests extends ElasticsearchAllocationTestCase {
+ @Test
+ public void testPreferLocalPrimaryAllocationOverFiltered() {
+ int concurrentRecoveries = randomIntBetween(1, 10);
+ int primaryRecoveries = randomIntBetween(1, 10);
+ int numberOfShards = randomIntBetween(5, 20);
+ int totalNumberOfShards = numberOfShards * 2;
+
+ logger.info("create an allocation with [{}] initial primary recoveries and [{}] concurrent recoveries", primaryRecoveries, concurrentRecoveries);
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", concurrentRecoveries)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", primaryRecoveries)
+ .build());
+
+ logger.info("create 2 indices with [{}] no replicas, and wait till all are allocated", numberOfShards);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(numberOfShards).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting till all are allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))
+ .put(newNode("node2", ImmutableMap.of("tag1", "value2")))).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ logger.info("remove one of the nodes and apply filter to move everything from another node");
+
+ metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .put("index.routing.allocation.exclude.tag1", "value2")
+ .build()))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", 0)
+ .put("index.routing.allocation.exclude.tag1", "value2")
+ .build()))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards);
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(numberOfShards));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingTable().shardsWithState(UNASSIGNED).size(), equalTo(numberOfShards));
+
+ logger.info("start node back up");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node1", ImmutableMap.of("tag1", "value1")))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (clusterState.routingNodes().shardsWithState(STARTED).size() < totalNumberOfShards) {
+ int localInitializations = 0;
+ int relocatingInitializations = 0;
+ for (MutableShardRouting routing : clusterState.routingNodes().shardsWithState(INITIALIZING)) {
+ if (routing.relocatingNodeId() == null) {
+ localInitializations++;
+ } else {
+ relocatingInitializations++;
+ }
+ }
+ int needToInitialize = totalNumberOfShards - clusterState.routingNodes().shardsWithState(STARTED).size() - clusterState.routingNodes().shardsWithState(RELOCATING).size();
+ logger.info("local initializations: [{}], relocating: [{}], need to initialize: {}", localInitializations, relocatingInitializations, needToInitialize);
+ assertThat(localInitializations, equalTo(Math.min(primaryRecoveries, needToInitialize)));
+ clusterState = startRandomInitializingShard(clusterState, strategy);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
new file mode 100644
index 0000000000..d9607e9ac4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class PreferPrimaryAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PreferPrimaryAllocationTests.class);
+
+ @Test
+ public void testPreferPrimaryAllocationOverReplicas() {
+ logger.info("create an allocation with 1 initial recoveries");
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 1)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 1)
+ .build());
+
+ logger.info("create several indices with no replicas, and wait till all are allocated");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(0))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("adding two nodes and performing rerouting till all are allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ while (!clusterState.routingNodes().shardsWithState(INITIALIZING).isEmpty()) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ logger.info("increasing the number of replicas to 1, and perform a reroute (to get the replicas allocation going)");
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("2 replicas should be initializing now for the existing indices (we throttle to 1)");
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("create a new index");
+ metaData = MetaData.builder(clusterState.metaData())
+ .put(IndexMetaData.builder("new_index").settings(settings(Version.CURRENT)).numberOfShards(4).numberOfReplicas(0))
+ .build();
+
+ routingTable = RoutingTable.builder(clusterState.routingTable())
+ .addAsNew(metaData.index("new_index"))
+ .build();
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("reroute, verify that primaries for the new index primary shards are allocated");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingTable().index("new_index").shardsWithState(INITIALIZING).size(), equalTo(2));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
new file mode 100644
index 0000000000..7d900e889f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PrimaryElectionRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PrimaryElectionRoutingTests.class);
+
+ @Test
+ public void testBackupElectionToPrimaryWhenPrimaryCanBeAllocatedToAnotherNode() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the backup shard (on node2)");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Adding third node and reroute and kill first node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingNodes.node("node1"), nullValue());
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ // verify where the primary is
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ }
+
+ @Test
+ public void testRemovingInitializingReplicasIfPrimariesFails() {
+ AllocationService allocation = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ rerouteResult = allocation.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ // now, fail one node, while the replica is initializing, and it also holds a primary
+ logger.info("--> fail node with primary");
+ String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
+ String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode(nodeIdRemaining))
+ ).build();
+ rerouteResult = allocation.reroute(clusterState);
+ clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingNodes.shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingNodes.shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingNodes.node(nodeIdRemaining).shardsWithState(INITIALIZING).get(0).primary(), equalTo(true));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
new file mode 100644
index 0000000000..ae3df56926
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(PrimaryNotRelocatedWhileBeingRecoveredTests.class);
+
+
+ @Test
+ public void testPrimaryNotRelocatedWhileBeingRecoveredFrom() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+
+ logger.info("start another node, replica will start recovering form primary");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+
+ logger.info("start another node, make sure the primary is not relocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
new file mode 100644
index 0000000000..df15051c22
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.MetaData.Builder;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class RandomAllocationDeciderTests extends ElasticsearchAllocationTestCase {
+
+ /* This test will make random allocation decision on a growing and shrinking
+ * cluster leading to a random distribution of the shards. After a certain
+ * amount of iterations the test allows allocation unless the same shard is
+ * already allocated on a node and balances the cluster to gain optimal
+ * balance.*/
+ @Test
+ public void testRandomDecisions() {
+ RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom());
+ AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY),
+ randomAllocationDecider))), new ShardsAllocators(NoopGatewayAllocator.INSTANCE), ClusterInfoService.EMPTY);
+ int indices = scaledRandomIntBetween(1, 20);
+ Builder metaBuilder = MetaData.builder();
+ int maxNumReplicas = 1;
+ int totalNumShards = 0;
+ for (int i = 0; i < indices; i++) {
+ int replicas = scaledRandomIntBetween(0, 6);
+ maxNumReplicas = Math.max(maxNumReplicas, replicas + 1);
+ int numShards = scaledRandomIntBetween(1, 20);
+ totalNumShards += numShards * (replicas + 1);
+ metaBuilder.put(IndexMetaData.builder("INDEX_" + i).settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(replicas));
+
+ }
+ MetaData metaData = metaBuilder.build();
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < indices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("INDEX_" + i));
+ }
+
+ RoutingTable routingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ int numIters = scaledRandomIntBetween(5, 15);
+ int nodeIdCounter = 0;
+ int atMostNodes = scaledRandomIntBetween(Math.max(1, maxNumReplicas), 15);
+ final boolean frequentNodes = randomBoolean();
+ for (int i = 0; i < numIters; i++) {
+ logger.info("Start iteration [{}]", i);
+ ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
+ DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+
+ if (clusterState.nodes().size() <= atMostNodes &&
+ (nodeIdCounter == 0 || (frequentNodes ? frequently() : rarely()))) {
+ int numNodes = scaledRandomIntBetween(1, 3);
+ for (int j = 0; j < numNodes; j++) {
+ logger.info("adding node [{}]", nodeIdCounter);
+ newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
+ }
+ }
+
+ if (nodeIdCounter > 1 && rarely()) {
+ int nodeId = scaledRandomIntBetween(0, nodeIdCounter - 2);
+ logger.info("removing node [{}]", nodeId);
+ newNodesBuilder.remove("NODE_" + nodeId);
+ }
+
+ stateBuilder.nodes(newNodesBuilder.build());
+ clusterState = stateBuilder.build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
+ .routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+ }
+ logger.info("Fill up nodes such that every shard can be allocated");
+ if (clusterState.nodes().size() < maxNumReplicas) {
+ ClusterState.Builder stateBuilder = ClusterState.builder(clusterState);
+ DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ for (int j = 0; j < (maxNumReplicas - clusterState.nodes().size()); j++) {
+ logger.info("adding node [{}]", nodeIdCounter);
+ newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
+ }
+ stateBuilder.nodes(newNodesBuilder.build());
+ clusterState = stateBuilder.build();
+ }
+
+
+ randomAllocationDecider.alwaysSayYes = true;
+ logger.info("now say YES to everything");
+ int iterations = 0;
+ do {
+ iterations++;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ if (clusterState.routingNodes().shardsWithState(INITIALIZING).size() > 0) {
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING))
+ .routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ } while (clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size() != 0 ||
+ clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size() != 0 && iterations < 200);
+ logger.info("Done Balancing after [{}] iterations", iterations);
+ // we stop after 200 iterations if it didn't stabelize by then something is likely to be wrong
+ assertThat("max num iteration exceeded", iterations, Matchers.lessThan(200));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(0));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
+ int shards = clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size();
+ assertThat(shards, equalTo(totalNumShards));
+ final int numNodes = clusterState.nodes().size();
+ final int upperBound = (int) Math.round(((shards / numNodes) * 1.10));
+ final int lowerBound = (int) Math.round(((shards / numNodes) * 0.90));
+ for (int i = 0; i < nodeIdCounter; i++) {
+ if (clusterState.getRoutingNodes().node("NODE_" + i) == null) {
+ continue;
+ }
+ assertThat(clusterState.getRoutingNodes().node("NODE_" + i).size(), Matchers.anyOf(
+ Matchers.anyOf(equalTo((shards / numNodes) + 1), equalTo((shards / numNodes) - 1), equalTo((shards / numNodes))),
+ Matchers.allOf(Matchers.greaterThanOrEqualTo(lowerBound), Matchers.lessThanOrEqualTo(upperBound))));
+ }
+ }
+
+ private static final class RandomAllocationDecider extends AllocationDecider {
+
+ private final Random random;
+
+ public RandomAllocationDecider(Random random) {
+ super(Settings.EMPTY);
+ this.random = random;
+ }
+
+ public boolean alwaysSayYes = false;
+
+ @Override
+ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ private Decision getRandomDecision() {
+ if (alwaysSayYes) {
+ return Decision.YES;
+ }
+ switch (random.nextInt(10)) {
+ case 9:
+ case 8:
+ case 7:
+ case 6:
+ case 5:
+ return Decision.NO;
+ case 4:
+ return Decision.THROTTLE;
+ case 3:
+ case 2:
+ case 1:
+ return Decision.YES;
+ default:
+ return Decision.ALWAYS;
+ }
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ @Override
+ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return getRandomDecision();
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
new file mode 100644
index 0000000000..a8d44c1df1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class RebalanceAfterActiveTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(RebalanceAfterActiveTests.class);
+
+ @Test
+ public void testRebalanceOnlyAfterAllShardsAreActive() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(5));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("start two nodes and fully start the shards");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
+ .build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ }
+
+ logger.info("start the replica shards, rebalancing should start");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we only allow one relocation at a time
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(5));
+
+ logger.info("complete relocation, other half of relocation should happen");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ // we now only relocate 3, since 2 remain where they are!
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(7));
+ assertThat(routingTable.shardsWithState(RELOCATING).size(), equalTo(3));
+
+ logger.info("complete relocation, thats it!");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ // make sure we have an even relocation
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.size(), equalTo(1));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
new file mode 100644
index 0000000000..268fed8876
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ReplicaAllocatedAfterPrimaryTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ReplicaAllocatedAfterPrimaryTests.class);
+
+ @Test
+ public void testBackupIsAllocatedAfterPrimary() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Start all the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node(nodeHoldingPrimary).shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
new file mode 100644
index 0000000000..620ad859c8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java
@@ -0,0 +1,416 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class RoutingNodesIntegrityTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(IndexBalanceTests.class);
+
+ @Test
+ public void testBalanceAllNodesStarted() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ RoutingNodes routingNodes = clusterState.routingNodes();
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ // all shards are unassigned. so no inactive shards or primaries.
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ }
+
+ @Test
+ public void testBalanceIncrementallyStartNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).addAsNew(metaData.index("test1")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shard");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(3));
+
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+ }
+
+ @Test
+ public void testBalanceAllNodesStartedAddIndex() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 1)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(3).numberOfReplicas(1)).build();
+
+ RoutingTable routingTable = RoutingTable.builder().addAsNew(metaData.index("test")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Adding three node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
+
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Another round of rebalancing");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(1));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(1));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(1));
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the more shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(2));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test", STARTED).size(), equalTo(2));
+
+ logger.info("Add new index 3 shards 1 replica");
+
+ prevRoutingTable = routingTable;
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(true));
+
+ assertThat(routingTable.index("test1").shards().size(), equalTo(3));
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Reroute, assign");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(true));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Reroute, start the primaries");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Reroute, start the replicas");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(4));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(4));
+
+ assertThat(routingNodes.node("node1").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node2").shardsWithState("test1", STARTED).size(), equalTo(2));
+ assertThat(routingNodes.node("node3").shardsWithState("test1", STARTED).size(), equalTo(2));
+
+ logger.info("kill one node");
+ IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0);
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ // replica got promoted to primary
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Start Recovering shards round 1");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(true));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ logger.info("Start Recovering shards round 2");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(assertShardStats(routingNodes), equalTo(true));
+ assertThat(routingNodes.hasInactiveShards(), equalTo(false));
+ assertThat(routingNodes.hasInactivePrimaries(), equalTo(false));
+ assertThat(routingNodes.hasUnassignedPrimaries(), equalTo(false));
+
+ }
+
+ private boolean assertShardStats(RoutingNodes routingNodes) {
+ return RoutingNodes.assertShardStats(routingNodes);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java
new file mode 100644
index 0000000000..74106e91db
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesUtils.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+
+/**
+ */
+public class RoutingNodesUtils {
+
+ public static int numberOfShardsOfType(RoutingNodes nodes, ShardRoutingState state) {
+ int count = 0;
+ for (RoutingNode routingNode : nodes) {
+ count += routingNode.numberOfShardsWithState(state);
+ }
+ return count;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
new file mode 100644
index 0000000000..355f917b1c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SameShardRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SameShardRoutingTests.class);
+
+ @Test
+ public void sameHost() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(SameShardAllocationDecider.SAME_HOST_SETTING, true).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes with the same host");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(new DiscoveryNode("node1", "node1", "test1", "test1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))
+ .put(new DiscoveryNode("node2", "node2", "test1", "test1", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
+
+ logger.info("--> start all primary shards, no replica will be started since its on the same host");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(0));
+
+ logger.info("--> add another node, with a different host, replicas will be allocating");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(new DiscoveryNode("node3", "node3", "test2", "test2", DummyTransportAddress.INSTANCE, ImmutableMap.<String, String>of(), Version.CURRENT))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.STARTED), equalTo(2));
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), ShardRoutingState.INITIALIZING), equalTo(2));
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().shardsWithState(INITIALIZING)) {
+ assertThat(shardRouting.currentNodeId(), equalTo("node3"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
new file mode 100644
index 0000000000..4ca86bfcf7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ShardVersioningTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class);
+
+ @Test
+ public void simple() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
+ ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ }
+
+ logger.info("start all the primary shards for test1, replicas will start initializing");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test1", INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
+ assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(2l));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).version(), equalTo(2l));
+ }
+
+ for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
+ assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).version(), equalTo(1l));
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
new file mode 100644
index 0000000000..68732d9fa6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class ShardsLimitAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ShardsLimitAllocationTests.class);
+
+ @Test
+ public void indexLevelShardsLimitAllocate() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 2)))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(2));
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(0));
+ assertThat(clusterState.readOnlyRoutingNodes().unassigned().size(), equalTo(4));
+
+ logger.info("Do another reroute, make sure its still not allocated");
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ @Test
+ public void indexLevelShardsLimitRemain() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ logger.info("Adding one node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), STARTED), equalTo(5));
+
+ logger.info("add another index with 5 shards");
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ))
+ .build();
+ routingTable = RoutingTable.builder(routingTable)
+ .addAsNew(metaData.index("test1"))
+ .build();
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("Add another one node and reroute");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(numberOfShardsOfType(clusterState.readOnlyRoutingNodes(), STARTED), equalTo(10));
+
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node1")) {
+ assertThat(shardRouting.index(), equalTo("test"));
+ }
+ for (MutableShardRouting shardRouting : clusterState.readOnlyRoutingNodes().node("node2")) {
+ assertThat(shardRouting.index(), equalTo("test1"));
+ }
+
+ logger.info("update " + ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE + " for test, see that things move");
+ metaData = MetaData.builder(metaData)
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 3)
+ ))
+ .build();
+
+
+ clusterState = ClusterState.builder(clusterState).metaData(metaData).build();
+
+ logger.info("reroute after setting");
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(3));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(RELOCATING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(RELOCATING), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(3));
+ // the first move will destroy the balance and the balancer will move 2 shards from node2 to node one right after
+ // moving the nodes to node2 since we consider INITIALIZING nodes during rebalance
+ routingNodes = clusterState.routingNodes();
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ // now we are done compared to EvenShardCountAllocator since the Balancer is not soely based on the average
+ assertThat(clusterState.readOnlyRoutingNodes().node("node1").numberOfShardsWithState(STARTED), equalTo(5));
+ assertThat(clusterState.readOnlyRoutingNodes().node("node2").numberOfShardsWithState(STARTED), equalTo(5));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
new file mode 100644
index 0000000000..7044d34ec7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java
@@ -0,0 +1,414 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Set;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.cluster.routing.allocation.RoutingNodesUtils.numberOfShardsOfType;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SingleShardNoReplicasRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SingleShardNoReplicasRoutingTests.class);
+
+ @Test
+ public void testSingleIndexStartedShard() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Rerouting again, nothing should change");
+ prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Marking the shard as started");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Starting another node and making sure nothing changed");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Killing node1 where the shard is, checking the shard is relocated");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+
+ logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(routingTable == prevRoutingTable, equalTo(true));
+
+ logger.info("Start the shard on node 2");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable != prevRoutingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
+ }
+
+ @Test
+ public void testSingleIndexShardFailed() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
+
+ logger.info("Marking the shard as failed");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyFailedShard(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING).get(0)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ }
+
+ @Test
+ public void testMultiIndexEvenDistribution() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ final int numberOfIndices = 50;
+ logger.info("Building initial routing table with " + numberOfIndices + " indices");
+
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ metaDataBuilder.put(IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));
+ }
+ MetaData metaData = metaDataBuilder.build();
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("test" + i));
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding " + (numberOfIndices / 2) + " nodes");
+ DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
+ List<DiscoveryNode> nodes = newArrayList();
+ for (int i = 0; i < (numberOfIndices / 2); i++) {
+ nodesBuilder.put(newNode("node" + i));
+ }
+ RoutingTable prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ // make sure we still have 2 shards initializing per node on the first 25 nodes
+ String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ }
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ Set<String> encounteredIndices = newHashSet();
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(0));
+ assertThat(routingNode.size(), equalTo(2));
+ // make sure we still have 2 shards initializing per node on the only 25 nodes
+ int nodeIndex = Integer.parseInt(routingNode.nodeId().substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ // check that we don't have a shard associated with a node with the same index name (we have a single shard)
+ for (MutableShardRouting shardRoutingEntry : routingNode) {
+ assertThat(encounteredIndices, not(hasItem(shardRoutingEntry.index())));
+ encounteredIndices.add(shardRoutingEntry.index());
+ }
+ }
+
+ logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change");
+ nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
+ for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
+ nodesBuilder.put(newNode("node" + i));
+ }
+ prevRoutingTable = routingTable;
+ clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(false));
+
+ logger.info("Marking the shard as started");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ int numberOfRelocatingShards = 0;
+ int numberOfStartedShards = 0;
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).unassigned(), equalTo(false));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(STARTED), equalTo(RELOCATING)));
+ if (routingTable.index("test" + i).shard(0).shards().get(0).state() == STARTED) {
+ numberOfStartedShards++;
+ } else if (routingTable.index("test" + i).shard(0).shards().get(0).state() == RELOCATING) {
+ numberOfRelocatingShards++;
+ }
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).primary(), equalTo(true));
+ // make sure we still have 2 shards either relocating or started on the first 25 nodes (still)
+ String nodeId = routingTable.index("test" + i).shard(0).shards().get(0).currentNodeId();
+ int nodeIndex = Integer.parseInt(nodeId.substring("node".length()));
+ assertThat(nodeIndex, lessThan(25));
+ }
+ assertThat(numberOfRelocatingShards, equalTo(25));
+ assertThat(numberOfStartedShards, equalTo(25));
+ }
+
+ @Test
+ public void testMultiIndexUnevenNodes() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build());
+
+ final int numberOfIndices = 10;
+ logger.info("Building initial routing table with " + numberOfIndices + " indices");
+
+ MetaData.Builder metaDataBuilder = MetaData.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ metaDataBuilder.put(IndexMetaData.builder("test" + i).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0));
+ }
+ MetaData metaData = metaDataBuilder.build();
+
+ RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
+ for (int i = 0; i < numberOfIndices; i++) {
+ routingTableBuilder.addAsNew(metaData.index("test" + i));
+ }
+ RoutingTable routingTable = routingTableBuilder.build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.indicesRouting().size(), equalTo(numberOfIndices));
+
+ logger.info("Starting 3 nodes and rerouting");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")))
+ .build();
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), equalTo(INITIALIZING));
+ }
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ assertThat(numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(numberOfIndices));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(INITIALIZING), anyOf(equalTo(3), equalTo(4)));
+
+ logger.info("Start two more nodes, things should remain the same");
+ clusterState = ClusterState.builder(clusterState)
+ .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4")).put(newNode("node5")))
+ .build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ }
+ routingNodes = clusterState.routingNodes();
+ assertThat("4 source shard routing are relocating", numberOfShardsOfType(routingNodes, RELOCATING), equalTo(4));
+ assertThat("4 target shard routing are initializing", numberOfShardsOfType(routingNodes, INITIALIZING), equalTo(4));
+
+ logger.info("Now, mark the relocated as started");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+// routingTable = strategy.reroute(new RoutingStrategyInfo(metaData, routingTable), nodes);
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ for (int i = 0; i < numberOfIndices; i++) {
+ assertThat(routingTable.index("test" + i).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().size(), equalTo(1));
+ assertThat(routingTable.index("test" + i).shard(0).shards().get(0).state(), anyOf(equalTo(RELOCATING), equalTo(STARTED)));
+ }
+ routingNodes = clusterState.routingNodes();
+ assertThat(numberOfShardsOfType(routingNodes, STARTED), equalTo(numberOfIndices));
+ for (RoutingNode routingNode : routingNodes) {
+ assertThat(routingNode.numberOfShardsWithState(STARTED), equalTo(2));
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
new file mode 100644
index 0000000000..9b5b955d42
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SingleShardOneReplicaRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(SingleShardOneReplicaRoutingTests.class);
+
+ @Test
+ public void testSingleIndexFirstStartPrimaryThenBackups() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+
+ logger.info("Kill node1, backup shard should become primary");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
+
+ logger.info("Start another node, backup shard should start initializing");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo("node2"));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo("node3"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
new file mode 100644
index 0000000000..54ac32c01a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TenShardsOneReplicaRoutingTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(TenShardsOneReplicaRoutingTests.class);
+
+ @Test
+ public void testSingleIndexFirstStartPrimaryThenBackups() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 10)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .put("cluster.routing.allocation.balance.index", 0.0f)
+ .put("cluster.routing.allocation.balance.replica", 1.0f)
+ .put("cluster.routing.allocation.balance.primary", 0.0f)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(i).shards().get(1).currentNodeId(), nullValue());
+ }
+
+ logger.info("Adding one node and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), nullValue());
+ }
+
+ logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the primary shard (on node1)");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node1").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ // backup shards are initializing as well, we make sure that they recover from primary *started* shards in the IndicesClusterStateService
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ }
+
+ logger.info("Reroute, nothing should change");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ assertThat(prevRoutingTable == routingTable, equalTo(true));
+
+ logger.info("Start the backup shard");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node2").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
+ assertThat(routingTable.index("test").shard(i).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(routingTable.index("test").shard(i).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(i).replicaShards().get(0).currentNodeId(), equalTo("node2"));
+ }
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10));
+
+ logger.info("Add another node and perform rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), lessThan(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED, RELOCATING), equalTo(10));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), lessThan(10));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(INITIALIZING), equalTo(6));
+
+ logger.info("Start the shards on node 3");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.node("node3").shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ routingNodes = clusterState.routingNodes();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(10));
+ assertThat(routingNodes.node("node1").numberOfShardsWithState(STARTED), equalTo(7));
+ assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(7));
+ assertThat(routingNodes.node("node3").numberOfShardsWithState(STARTED), equalTo(6));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
new file mode 100644
index 0000000000..7479bd1db3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ThrottlingAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(ThrottlingAllocationTests.class);
+
+ @Test
+ public void testPrimaryRecoveryThrottling() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.node_concurrent_recoveries", 3)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start one node, do reroute, only 3 should initialize");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(17));
+
+ logger.info("start initializing, another 3 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(14));
+
+ logger.info("start initializing, another 3 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(11));
+
+ logger.info("start initializing, another 1 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(9));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+
+ logger.info("start initializing, all primaries should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(10));
+ }
+
+ @Test
+ public void testReplicaAndPrimaryRecoveryThrottling() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 3)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3)
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("start one node, do reroute, only 3 should initialize");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(7));
+
+ logger.info("start initializing, another 2 should initialize");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+
+ logger.info("start initializing, all primaries should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
+
+ logger.info("start another node, replicas should start being allocated");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(3));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
+
+ logger.info("start initializing replicas");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(2));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+
+ logger.info("start initializing replicas, all should be started");
+ routingTable = strategy.applyStartedShards(clusterState, routingTable.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(10));
+ assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(0));
+ assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
new file mode 100644
index 0000000000..4749ab5ad8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class UpdateNumberOfReplicasTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(UpdateNumberOfReplicasTests.class);
+
+ @Test
+ public void testUpdateNumberOfReplicas() {
+ AllocationService strategy = createAllocationService(settingsBuilder().put("cluster.routing.allocation.concurrent_recoveries", 10).build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(1).state(), equalTo(UNASSIGNED));
+ assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
+ assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
+
+
+ logger.info("Adding two nodes and performing rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
+
+ RoutingTable prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start all the primary shards");
+ RoutingNodes routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("Start all the replica shards");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ final String nodeHoldingPrimary = routingTable.index("test").shard(0).primaryShard().currentNodeId();
+ final String nodeHoldingReplica = routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId();
+ assertThat(nodeHoldingPrimary, not(equalTo(nodeHoldingReplica)));
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).shards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+
+
+ logger.info("add another replica");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(2).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(2).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(2));
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
+
+ logger.info("Add another node and start the added replica");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), equalTo(nodeHoldingReplica));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(INITIALIZING).get(0).currentNodeId(), equalTo("node3"));
+
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(3));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+ assertThat(routingTable.index("test").shard(0).replicaShardsWithState(STARTED).get(1).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+
+ logger.info("now remove a replica");
+ routingNodes = clusterState.routingNodes();
+ prevRoutingTable = routingTable;
+ routingTable = RoutingTable.builder(routingTable).updateNumberOfReplicas(1).build();
+ metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
+
+ assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(1));
+
+ assertThat(prevRoutingTable != routingTable, equalTo(true));
+ assertThat(routingTable.index("test").shards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).size(), equalTo(2));
+ assertThat(routingTable.index("test").shard(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).primaryShard().currentNodeId(), equalTo(nodeHoldingPrimary));
+ assertThat(routingTable.index("test").shard(0).replicaShards().size(), equalTo(1));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).state(), equalTo(STARTED));
+ assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), anyOf(equalTo(nodeHoldingReplica), equalTo("node3")));
+
+ logger.info("do a reroute, should remain the same");
+ prevRoutingTable = routingTable;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(prevRoutingTable != routingTable, equalTo(false));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
new file mode 100644
index 0000000000..707d54ee51
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java
@@ -0,0 +1,923 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase {
+
+ private static ShardsAllocators makeShardsAllocators() {
+ return new ShardsAllocators(NoopGatewayAllocator.INSTANCE);
+ }
+
+ @Test
+ public void diskThresholdTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build();
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node1", new DiskUsage("node1", "node1", 100, 10)); // 90% used
+ usages.put("node2", new DiskUsage("node2", "node2", 100, 35)); // 65% used
+ usages.put("node3", new DiskUsage("node3", "node3", 100, 60)); // 40% used
+ usages.put("node4", new DiskUsage("node4", "node4", 100, 80)); // 20% used
+
+ Map<String, Long> shardSizes = new HashMap<>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Primary shard should be initializing, replica should not
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ // Assert that node1 didn't get any shards because its disk usage is too high
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+
+ logger.info("--> adding node3");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica is initialized now that node3 is available with enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica couldn't be started since node1 doesn't have enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing decider settings");
+
+ // Set the low threshold to 60 instead of 70
+ // Set the high threshold to 70 instead of 80
+ // node2 now should not have new shards allocated to it, but shards can remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build();
+
+ deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing settings again");
+
+ // Set the low threshold to 50 instead of 60
+ // Set the high threshold to 60 instead of 70
+ // node2 now should not have new shards allocated to it, and shards cannot remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build();
+
+ deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> adding node4");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Node4 is available now, so the shard is moved off of node2
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+ }
+
+ @Test
+ public void diskThresholdWithAbsoluteSizesTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "9b").build();
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node1", new DiskUsage("node1", "n1", 100, 10)); // 90% used
+ usages.put("node2", new DiskUsage("node2", "n2", 100, 10)); // 90% used
+ usages.put("node3", new DiskUsage("node3", "n3", 100, 60)); // 40% used
+ usages.put("node4", new DiskUsage("node4", "n4", 100, 80)); // 20% used
+ usages.put("node5", new DiskUsage("node5", "n5", 100, 85)); // 15% used
+
+ Map<String, Long> shardSizes = new HashMap<>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding node1 and node2 node");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Primary should initialize, even though both nodes are over the limit initialize
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ String nodeWithPrimary, nodeWithoutPrimary;
+ if (clusterState.getRoutingNodes().node("node1").size() == 1) {
+ nodeWithPrimary = "node1";
+ nodeWithoutPrimary = "node2";
+ } else {
+ nodeWithPrimary = "node2";
+ nodeWithoutPrimary = "node1";
+ }
+ logger.info("--> nodeWithPrimary: {}", nodeWithPrimary);
+ logger.info("--> nodeWithoutPrimary: {}", nodeWithoutPrimary);
+
+ // Make node without the primary now habitable to replicas
+ usages.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", 100, 35)); // 65% used
+ final ClusterInfo clusterInfo2 = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+ cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo2;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Now the replica should be able to initialize
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(2));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary and replica, since they were both initializing
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ // Assert that node1 got a single shard (the primary), even though its disk usage is too high
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
+ // Assert that node2 got a single shard (a replica)
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+
+ // Assert that one replica is still unassigned
+ //assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
+
+ logger.info("--> adding node3");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that the replica is initialized now that node3 is available with enough space
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(2));
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that all replicas could be started
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing decider settings");
+
+ // Set the low threshold to 60 instead of 70
+ // Set the high threshold to 70 instead of 80
+ // node2 now should not have new shards allocated to it, but shards can remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build();
+
+ deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> changing settings again");
+
+ // Set the low threshold to 50 instead of 60
+ // Set the high threshold to 60 instead of 70
+ // node2 now should not have new shards allocated to it, and shards cannot remain
+ diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build();
+
+ deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(3));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
+ // Shard hasn't been moved off of node2 yet because there's nowhere for it to go
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+
+ logger.info("--> adding node4");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node4"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ // One shard is relocating off of node1
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), equalTo(1));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // primary shard already has been relocated away
+ assertThat(clusterState.getRoutingNodes().node(nodeWithPrimary).size(), equalTo(0));
+ // node with increased space still has its shard
+ assertThat(clusterState.getRoutingNodes().node(nodeWithoutPrimary).size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+
+ logger.info("--> adding node5");
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node5"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Shards remain started on node3 and node4
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(2));
+ // One shard is relocating off of node2 now
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), equalTo(1));
+ // Initializing on node5
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> apply INITIALIZING shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> final cluster state:");
+ logShardStates(clusterState);
+ // Node1 still has no shards because it has no space for them
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(0));
+ // Node5 is available now, so the shard is moved off of node2
+ assertThat(clusterState.getRoutingNodes().node("node2").size(), equalTo(0));
+ assertThat(clusterState.getRoutingNodes().node("node3").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node4").size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node5").size(), equalTo(1));
+ }
+
+ @Test
+ public void diskThresholdWithShardSizes() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "71%").build();
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node1", new DiskUsage("node1", "n1", 100, 31)); // 69% used
+ usages.put("node2", new DiskUsage("node2", "n2", 100, 1)); // 99% used
+
+ Map<String, Long> shardSizes = new HashMap<>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ logger.info("--> adding node1");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // Shard can't be allocated to node1 (or node2) because it would cause too much usage
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ // No shards are started, no nodes have enough disk for allocation
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(0));
+ }
+
+ @Test
+ public void unknownDiskUsageTest() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build();
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node2", new DiskUsage("node2", "node2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", "node3", 100, 0)); // 100% used
+
+ Map<String, Long> shardSizes = new HashMap<>();
+ shardSizes.put("[test][0][p]", 10L); // 10 bytes
+ shardSizes.put("[test][0][r]", 10L); // 10 bytes
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+ logger.info("--> adding node1");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ // Shard can be allocated to node1, even though it only has 25% free,
+ // because it's a primary that's never been allocated before
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // A single shard is started on node1, even though it normally would not
+ // be allowed, because it's a primary that hasn't been allocated, and node1
+ // is still below the high watermark (unlike node3)
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(1));
+ assertThat(clusterState.getRoutingNodes().node("node1").size(), equalTo(1));
+ }
+
+ @Test
+ public void averageUsageUnitTest() {
+ RoutingNode rn = new RoutingNode("node1", newNode("node1"));
+ DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY);
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node2", new DiskUsage("node2", "n2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", "n3", 100, 0)); // 100% used
+
+ DiskUsage node1Usage = decider.averageUsage(rn, usages);
+ assertThat(node1Usage.getTotalBytes(), equalTo(100L));
+ assertThat(node1Usage.getFreeBytes(), equalTo(25L));
+ }
+
+ @Test
+ public void freeDiskPercentageAfterShardAssignedUnitTest() {
+ RoutingNode rn = new RoutingNode("node1", newNode("node1"));
+ DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY);
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node2", new DiskUsage("node2", "n2", 100, 50)); // 50% used
+ usages.put("node3", new DiskUsage("node3", "n3", 100, 0)); // 100% used
+
+ Double after = decider.freeDiskPercentageAfterShardAssigned(new DiskUsage("node2", "n2", 100, 30), 11L);
+ assertThat(after, equalTo(19.0));
+ }
+
+ @Test
+ public void testShardRelocationsTakenIntoAccount() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build();
+
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node1", new DiskUsage("node1", "n1", 100, 40)); // 60% used
+ usages.put("node2", new DiskUsage("node2", "n2", 100, 40)); // 60% used
+ usages.put("node2", new DiskUsage("node3", "n3", 100, 40)); // 60% used
+
+ Map<String, Long> shardSizes = new HashMap<>();
+ shardSizes.put("[test][0][p]", 14L); // 14 bytes
+ shardSizes.put("[test][0][r]", 14L);
+ shardSizes.put("[test2][0][p]", 1L); // 1 bytes
+ shardSizes.put("[test2][0][r]", 1L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
+ new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY),
+ new DiskThresholdDecider(diskSettings))));
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ // shards should be initializing
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
+
+ logger.info("--> start the shards");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logShardStates(clusterState);
+ // Assert that we're able to start the primary and replicas
+ assertThat(clusterState.routingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(4));
+
+ logger.info("--> adding node3");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
+ .put(newNode("node3"))
+ ).build();
+
+ AllocationCommand relocate1 = new MoveAllocationCommand(new ShardId("test", 0), "node2", "node3");
+ AllocationCommands cmds = new AllocationCommands(relocate1);
+
+ routingTable = strategy.reroute(clusterState, cmds).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logShardStates(clusterState);
+
+ AllocationCommand relocate2 = new MoveAllocationCommand(new ShardId("test2", 0), "node2", "node3");
+ cmds = new AllocationCommands(relocate2);
+
+ try {
+ // The shard for the "test" index is already being relocated to
+ // node3, which will put it over the low watermark when it
+ // completes, with shard relocations taken into account this should
+ // throw an exception about not being able to complete
+ strategy.reroute(clusterState, cmds).routingTable();
+ fail("should not have been able to reroute the shard");
+ } catch (IllegalArgumentException e) {
+ assertThat("can't allocated because there isn't enough room: " + e.getMessage(),
+ e.getMessage().contains("more than allowed [70.0%] used disk on node, free: [26.0%]"), equalTo(true));
+ }
+
+ }
+
+ @Test
+ public void testCanRemainWithShardRelocatingAway() {
+ Settings diskSettings = settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build();
+
+ // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available
+ Map<String, DiskUsage> usages = new HashMap<>();
+ usages.put("node1", new DiskUsage("node1", "n1", 100, 20)); // 80% used
+ usages.put("node2", new DiskUsage("node2", "n2", 100, 100)); // 0% used
+
+ Map<String, Long> shardSizes = new HashMap<>();
+ shardSizes.put("[test][0][p]", 40L);
+ shardSizes.put("[test][1][p]", 40L);
+ final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+
+ DiskThresholdDecider diskThresholdDecider = new DiskThresholdDecider(diskSettings);
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNode discoveryNode1 = new DiscoveryNode("node1", new LocalTransportAddress("1"), Version.CURRENT);
+ DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", new LocalTransportAddress("2"), Version.CURRENT);
+ DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build();
+
+ ClusterState baseClusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
+ .metaData(metaData)
+ .routingTable(routingTable)
+ .nodes(discoveryNodes)
+ .build();
+
+ // Two shards consuming each 80% of disk space while 70% is allowed, so shard 0 isn't allowed here
+ MutableShardRouting firstRouting = new MutableShardRouting("test", 0, "node1", true, ShardRoutingState.STARTED, 1);
+ MutableShardRouting secondRouting = new MutableShardRouting("test", 1, "node1", true, ShardRoutingState.STARTED, 1);
+ RoutingNode firstRoutingNode = new RoutingNode("node1", discoveryNode1, Arrays.asList(firstRouting, secondRouting));
+ RoutingTable.Builder builder = RoutingTable.builder().add(
+ IndexRoutingTable.builder("test")
+ .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 0), false)
+ .addShard(firstRouting)
+ .build()
+ )
+ .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 1), false)
+ .addShard(secondRouting)
+ .build()
+ )
+ );
+ ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder).build();
+ RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo);
+ Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
+ assertThat(decision.type(), equalTo(Decision.Type.NO));
+
+ // Two shards consuming each 80% of disk space while 70% is allowed, but one is relocating, so shard 0 can stay
+ firstRouting = new MutableShardRouting("test", 0, "node1", true, ShardRoutingState.STARTED, 1);
+ secondRouting = new MutableShardRouting("test", 1, "node1", "node2", true, ShardRoutingState.RELOCATING, 1);
+ firstRoutingNode = new RoutingNode("node1", discoveryNode1, Arrays.asList(firstRouting, secondRouting));
+ builder = RoutingTable.builder().add(
+ IndexRoutingTable.builder("test")
+ .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 0), false)
+ .addShard(firstRouting)
+ .build()
+ )
+ .addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 1), false)
+ .addShard(secondRouting)
+ .build()
+ )
+ );
+ clusterState = ClusterState.builder(baseClusterState).routingTable(builder).build();
+ routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo);
+ decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
+ assertThat(decision.type(), equalTo(Decision.Type.YES));
+
+ // Creating AllocationService instance and the services it depends on...
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ logger.info("--> calling fake getClusterInfo");
+ return clusterInfo;
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+ AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(
+ new SameShardAllocationDecider(Settings.EMPTY), diskThresholdDecider
+ )));
+ AllocationService strategy = new AllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
+ .build(), deciders, makeShardsAllocators(), cis);
+ // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away
+ // and therefor we will have sufficient disk space on node1.
+ RoutingAllocation.Result result = strategy.reroute(clusterState);
+ assertThat(result.changed(), is(false));
+ assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
+ assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
+ assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING));
+ assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node1"));
+ assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2"));
+ }
+
+ public void logShardStates(ClusterState state) {
+ RoutingNodes rn = state.routingNodes();
+ logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shards(new Predicate<MutableShardRouting>() {
+ @Override
+ public boolean apply(org.elasticsearch.cluster.routing.MutableShardRouting input) {
+ return true;
+ }
+ }).size(),
+ rn.shardsWithState(UNASSIGNED).size(),
+ rn.shardsWithState(INITIALIZING).size(),
+ rn.shardsWithState(RELOCATING).size(),
+ rn.shardsWithState(STARTED).size());
+ logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shardsWithState(UNASSIGNED),
+ rn.shardsWithState(INITIALIZING),
+ rn.shardsWithState(RELOCATING),
+ rn.shardsWithState(STARTED));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java
new file mode 100644
index 0000000000..083dc79acb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ * Unit tests for the DiskThresholdDecider
+ */
+public class DiskThresholdDeciderUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDynamicSettings() {
+ NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY);
+
+ ClusterInfoService cis = new ClusterInfoService() {
+ @Override
+ public ClusterInfo getClusterInfo() {
+ Map<String, DiskUsage> usages = new HashMap<>();
+ Map<String, Long> shardSizes = new HashMap<>();
+ return new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
+ }
+
+ @Override
+ public void addListener(Listener listener) {
+ // noop
+ }
+ };
+ DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null);
+
+ assertThat(decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test")));
+ assertThat(decider.getFreeDiskThresholdHigh(), equalTo(10.0d));
+ assertThat(decider.getFreeBytesThresholdLow(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test")));
+ assertThat(decider.getFreeDiskThresholdLow(), equalTo(15.0d));
+ assertThat(decider.getUsedDiskThresholdLow(), equalTo(85.0d));
+ assertThat(decider.getRerouteInterval().seconds(), equalTo(60L));
+ assertTrue(decider.isEnabled());
+ assertTrue(decider.isIncludeRelocations());
+
+ DiskThresholdDecider.ApplySettings applySettings = decider.newApplySettings();
+
+ Settings newSettings = Settings.builder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, false)
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "500mb")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "30s")
+ .build();
+
+ applySettings.onRefreshSettings(newSettings);
+
+ assertThat("high threshold bytes should be unset",
+ decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test")));
+ assertThat("high threshold percentage should be changed",
+ decider.getFreeDiskThresholdHigh(), equalTo(30.0d));
+ assertThat("low threshold bytes should be set to 500mb",
+ decider.getFreeBytesThresholdLow(), equalTo(ByteSizeValue.parseBytesSizeValue("500mb", "test")));
+ assertThat("low threshold bytes should be unset",
+ decider.getFreeDiskThresholdLow(), equalTo(0.0d));
+ assertThat("reroute interval should be changed to 30 seconds",
+ decider.getRerouteInterval().seconds(), equalTo(30L));
+ assertFalse("disk threshold decider should now be disabled", decider.isEnabled());
+ assertFalse("relocations should now be disabled", decider.isIncludeRelocations());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIntegrationTest.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIntegrationTest.java
new file mode 100644
index 0000000000..12924234ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIntegrationTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Simple integration for {@link EnableAllocationDecider} there is a more exhaustive unittest in
+ * {@link EnableAllocationTests} this test is meant to check if the actual update of the settings
+ * works as expected.
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+public class EnableAllocationDeciderIntegrationTest extends ElasticsearchIntegrationTest {
+
+ public void testEnableRebalance() throws InterruptedException {
+ final String firstNode = internalCluster().startNode();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get();
+ // we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that
+ // all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect
+ final int numShards = 2;
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
+ assertAcked(prepareCreate("test_1").setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)));
+ ensureGreen();
+ assertAllShardsOnNodes("test", firstNode);
+ assertAllShardsOnNodes("test_1", firstNode);
+
+ final String secondNode = internalCluster().startNode();
+ // prevent via index setting but only on index test
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get();
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen();
+ assertAllShardsOnNodes("test", firstNode);
+ assertAllShardsOnNodes("test_1", firstNode);
+
+ // now enable the index test to relocate since index settings override cluster settings
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get();
+ logger.info("--> balance index [test]");
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen("test");
+ Set<String> test = assertAllShardsOnNodes("test", firstNode, secondNode);
+ assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
+
+ // flip the cluster wide setting such that we can also balance for index test_1 eventually we should have one shard of each index on each node
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get();
+ logger.info("--> balance index [test_1]");
+ client().admin().cluster().prepareReroute().get();
+ ensureGreen("test_1");
+ Set<String> test_1 = assertAllShardsOnNodes("test_1", firstNode, secondNode);
+ assertThat("index: [test_1] expected to be rebalanced on both nodes", test_1.size(), equalTo(2));
+
+ test = assertAllShardsOnNodes("test", firstNode, secondNode);
+ assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
new file mode 100644
index 0000000000..52c4bb27a1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.EnumSet;
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE;
+import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class EnableAllocationTests extends ElasticsearchAllocationTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(EnableAllocationTests.class);
+
+ @Test
+ public void testClusterEnableNone() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name())
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testClusterEnableOnlyPrimaries() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.PRIMARIES.name())
+ .build());
+
+ logger.info("Building initial routing table");
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+ }
+
+ @Test
+ public void testIndexEnableNone() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("disabled").settings(settings(Version.CURRENT)
+ .put(INDEX_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()))
+ .numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("enabled").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("disabled"))
+ .addAsNew(metaData.index("enabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding two nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ logger.info("--> start the shards (replicas)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ logger.info("--> verify only enabled index has been routed");
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("enabled", STARTED).size(), equalTo(2));
+ assertThat(clusterState.readOnlyRoutingNodes().shardsWithState("disabled", STARTED).size(), equalTo(0));
+ }
+
+
+
+
+ @Test
+ public void testEnableClusterBalance() {
+ final boolean useClusterSetting = randomBoolean();
+ final Rebalance allowedOnes = RandomPicks.randomFrom(getRandom(), EnumSet.of(Rebalance.PRIMARIES, Rebalance.REPLICAS, Rebalance.ALL));
+ Settings build = settingsBuilder()
+ .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings
+ .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3)
+ .build();
+ NodeSettingsService nodeSettingsService = new NodeSettingsService(build);
+ AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom());
+ Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build();
+
+ logger.info("Building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(3).numberOfReplicas(1))
+ .put(IndexMetaData.builder("always_disabled").settings(settings(Version.CURRENT).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .addAsNew(metaData.index("always_disabled"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding one nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(4));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(4));
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> adding one nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ ).build();
+ ClusterState prevState = clusterState;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), equalTo(0));
+
+ if (useClusterSetting) {
+ prevState = clusterState;
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder()
+ .put(CLUSTER_ROUTING_REBALANCE_ENABLE, allowedOnes)
+ .build())).build();
+ } else {
+ prevState = clusterState;
+ IndexMetaData meta = clusterState.getMetaData().index("test");
+ IndexMetaData meta1 = clusterState.getMetaData().index("always_disabled");
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices().put(IndexMetaData.builder(meta1))
+ .put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, allowedOnes).build())))
+ .build();
+
+ }
+ nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState));
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(6));
+ assertThat("expected 2 shards to relocate useClusterSettings: " + useClusterSetting, clusterState.routingNodes().shardsWithState(RELOCATING).size(), equalTo(2));
+ List<MutableShardRouting> mutableShardRoutings = clusterState.routingNodes().shardsWithState(RELOCATING);
+ switch (allowedOnes) {
+ case PRIMARIES:
+ for (MutableShardRouting routing : mutableShardRoutings) {
+ assertTrue("only primaries are allowed to relocate", routing.primary());
+ assertThat("only test index can rebalance", routing.getIndex(), equalTo("test"));
+ }
+ break;
+ case REPLICAS:
+ for (MutableShardRouting routing : mutableShardRoutings) {
+ assertFalse("only replicas are allowed to relocate", routing.primary());
+ assertThat("only test index can rebalance", routing.getIndex(), equalTo("test"));
+ }
+ break;
+ case ALL:
+ for (MutableShardRouting routing : mutableShardRoutings) {
+ assertThat("only test index can rebalance", routing.getIndex(), equalTo("test"));
+ }
+ break;
+ default:
+ fail("only replicas, primaries or all are allowed");
+ }
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(8));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ }
+
+ @Test
+ public void testEnableClusterBalanceNoReplicas() {
+ final boolean useClusterSetting = randomBoolean();
+ Settings build = settingsBuilder()
+ .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings
+ .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3)
+ .build();
+ NodeSettingsService nodeSettingsService = new NodeSettingsService(build);
+ AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom());
+ Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build();
+
+ logger.info("Building initial routing table");
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put(indexSettings)).numberOfShards(6).numberOfReplicas(0))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ logger.info("--> adding one nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(6));
+ logger.info("--> start the shards (primaries)");
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(clusterState.routingNodes().shardsWithState(INITIALIZING).size(), equalTo(0));
+
+ logger.info("--> adding one nodes and do rerouting");
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .put(newNode("node3"))
+ ).build();
+ ClusterState prevState = clusterState;
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat(clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(6));
+ assertThat(clusterState.routingNodes().shardsWithState(RELOCATING).size(), equalTo(0));
+ if (useClusterSetting) {
+ prevState = clusterState;
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder()
+ .put(CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL)
+ .build())).build();
+ } else {
+ prevState = clusterState;
+ IndexMetaData meta = clusterState.getMetaData().index("test");
+ clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices()
+ .put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build();
+ }
+ nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState));
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+ assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.routingNodes().shardsWithState(STARTED).size(), equalTo(4));
+ assertThat("expected 2 primaries to relocate useClusterSettings: " + useClusterSetting, clusterState.routingNodes().shardsWithState(RELOCATING).size(), equalTo(2));
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java
new file mode 100644
index 0000000000..db4bb57655
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesTests.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.allocation.decider;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+public class MockDiskUsagesTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ // Use the mock internal cluster info service, which has fake-able disk usages
+ .put(ClusterModule.CLUSTER_SERVICE_IMPL, MockInternalClusterInfoService.class.getName())
+ // Update more frequently
+ .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "2s")
+ .build();
+ }
+
+ @Test
+ //@TestLogging("org.elasticsearch.cluster:TRACE,org.elasticsearch.cluster.routing.allocation.decider:TRACE")
+ public void testRerouteOccursOnDiskpassingHighWatermark() throws Exception {
+ List<String> nodes = internalCluster().startNodesAsync(3).get();
+
+ // Wait for all 3 nodes to be up
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ NodesStatsResponse resp = client().admin().cluster().prepareNodesStats().get();
+ assertThat(resp.getNodes().length, equalTo(3));
+ }
+ });
+
+ // Start with all nodes at 50% usage
+ final MockInternalClusterInfoService cis = (MockInternalClusterInfoService)
+ internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName());
+ cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", 100, 50));
+ cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", 100, 50));
+ cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", 100, 50));
+
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, randomFrom("20b", "80%"))
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, randomFrom("10b", "90%"))
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "1s")).get();
+
+ // Create an index with 10 shards so we can check allocation for it
+ prepareCreate("test").setSettings(settingsBuilder()
+ .put("number_of_shards", 10)
+ .put("number_of_replicas", 0)
+ .put("index.routing.allocation.exclude._name", "")).get();
+ ensureGreen("test");
+
+ // Block until the "fake" cluster info is retrieved at least once
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterInfo info = cis.getClusterInfo();
+ logger.info("--> got: {} nodes", info.getNodeDiskUsages().size());
+ assertThat(info.getNodeDiskUsages().size(), greaterThan(0));
+ }
+ });
+
+ List<String> realNodeNames = newArrayList();
+ ClusterStateResponse resp = client().admin().cluster().prepareState().get();
+ Iterator<RoutingNode> iter = resp.getState().getRoutingNodes().iterator();
+ while (iter.hasNext()) {
+ RoutingNode node = iter.next();
+ realNodeNames.add(node.nodeId());
+ logger.info("--> node {} has {} shards",
+ node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
+ }
+
+ // Update the disk usages so one node has now passed the high watermark
+ cis.setN1Usage(realNodeNames.get(0), new DiskUsage(nodes.get(0), "n1", 100, 50));
+ cis.setN2Usage(realNodeNames.get(1), new DiskUsage(nodes.get(1), "n2", 100, 50));
+ cis.setN3Usage(realNodeNames.get(2), new DiskUsage(nodes.get(2), "n3", 100, 0)); // nothing free on node3
+
+ // Cluster info gathering interval is 2 seconds, give reroute 2 seconds to kick in
+ Thread.sleep(4000);
+
+ // Retrieve the count of shards on each node
+ resp = client().admin().cluster().prepareState().get();
+ iter = resp.getState().getRoutingNodes().iterator();
+ Map<String, Integer> nodesToShardCount = newHashMap();
+ while (iter.hasNext()) {
+ RoutingNode node = iter.next();
+ logger.info("--> node {} has {} shards",
+ node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
+ nodesToShardCount.put(node.nodeId(), resp.getState().getRoutingNodes().node(node.nodeId()).numberOfOwningShards());
+ }
+ assertThat("node1 has 5 shards", nodesToShardCount.get(realNodeNames.get(0)), equalTo(5));
+ assertThat("node2 has 5 shards", nodesToShardCount.get(realNodeNames.get(1)), equalTo(5));
+ assertThat("node3 has 0 shards", nodesToShardCount.get(realNodeNames.get(2)), equalTo(0));
+ }
+
+ /** Create a fake NodeStats for the given node and usage */
+ public static NodeStats makeStats(String nodeName, DiskUsage usage) {
+ FsStats.Info[] infos = new FsStats.Info[1];
+ FsStats.Info info = new FsStats.Info("/path.data", null, null,
+ usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes(), -1, -1, -1, -1, -1, -1);
+ infos[0] = info;
+ FsStats fsStats = new FsStats(System.currentTimeMillis(), infos);
+ return new NodeStats(new DiscoveryNode(nodeName, null, Version.V_2_0_0),
+ System.currentTimeMillis(),
+ null, null, null, null, null, null,
+ fsStats,
+ null, null, null);
+ }
+
+ /**
+ * Fake ClusterInfoService class that allows updating the nodes stats disk
+ * usage with fake values
+ */
+ public static class MockInternalClusterInfoService extends InternalClusterInfoService {
+
+ private final ClusterName clusterName;
+ private volatile NodeStats[] stats = new NodeStats[3];
+
+ @Inject
+ public MockInternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService,
+ TransportNodesStatsAction transportNodesStatsAction,
+ TransportIndicesStatsAction transportIndicesStatsAction,
+ ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, nodeSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool);
+ this.clusterName = ClusterName.clusterNameFromSettings(settings);
+ stats[0] = makeStats("node_t1", new DiskUsage("node_t1", "n1", 100, 100));
+ stats[1] = makeStats("node_t2", new DiskUsage("node_t2", "n2", 100, 100));
+ stats[2] = makeStats("node_t3", new DiskUsage("node_t3", "n3", 100, 100));
+ }
+
+ public void setN1Usage(String nodeName, DiskUsage newUsage) {
+ stats[0] = makeStats(nodeName, newUsage);
+ }
+
+ public void setN2Usage(String nodeName, DiskUsage newUsage) {
+ stats[1] = makeStats(nodeName, newUsage);
+ }
+
+ public void setN3Usage(String nodeName, DiskUsage newUsage) {
+ stats[2] = makeStats(nodeName, newUsage);
+ }
+
+ @Override
+ public CountDownLatch updateNodeStats(final ActionListener<NodesStatsResponse> listener) {
+ NodesStatsResponse response = new NodesStatsResponse(clusterName, stats);
+ listener.onResponse(response);
+ return new CountDownLatch(0);
+ }
+
+ @Override
+ public CountDownLatch updateIndicesStats(final ActionListener<IndicesStatsResponse> listener) {
+ // Not used, so noop
+ return new CountDownLatch(0);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java
new file mode 100644
index 0000000000..d19433853a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/operation/hash/murmur3/Murmur3HashFunctionTests.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing.operation.hash.murmur3;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.hash.HashFunction;
+import com.google.common.hash.Hashing;
+import org.elasticsearch.cluster.routing.Murmur3HashFunction;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+public class Murmur3HashFunctionTests extends ElasticsearchTestCase {
+
+ public void test() {
+ // Make sure that we agree with guava
+ Murmur3HashFunction murmur3 = new Murmur3HashFunction();
+ HashFunction guavaMurmur3 = Hashing.murmur3_32();
+ for (int i = 0; i < 100; ++i) {
+ final String id = RandomStrings.randomRealisticUnicodeOfCodepointLength(getRandom(), RandomInts.randomIntBetween(getRandom(), 1, 20));
+ //final String id = "0";
+ final int hash1 = guavaMurmur3.newHasher().putUnencodedChars(id).hash().asInt();
+ final int hash2 = murmur3.hash(id);
+ assertEquals(hash1, hash2);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
new file mode 100644
index 0000000000..71e5e62f67
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.serialization;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ClusterSerializationTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void testClusterStateSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).localNodeId("node1").masterNodeId("node2").build();
+
+ ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+
+ ClusterState serializedClusterState = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), newNode("node1"));
+
+ assertThat(serializedClusterState.getClusterName().value(), equalTo(clusterState.getClusterName().value()));
+
+ assertThat(serializedClusterState.routingTable().prettyPrint(), equalTo(clusterState.routingTable().prettyPrint()));
+ }
+
+
+ @Test
+ public void testRoutingTableSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ RoutingTable source = strategy.reroute(clusterState).routingTable();
+
+ BytesStreamOutput outStream = new BytesStreamOutput();
+ source.writeTo(outStream);
+ StreamInput inStream = StreamInput.wrap(outStream.bytes().toBytes());
+ RoutingTable target = RoutingTable.Builder.readFrom(inStream);
+
+ assertThat(target.prettyPrint(), equalTo(source.prettyPrint()));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
new file mode 100644
index 0000000000..9c812dd7b7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.serialization;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ *
+ */
+public class ClusterStateToStringTests extends ElasticsearchAllocationTestCase {
+ @Test
+ public void testClusterStateSerialization() throws Exception {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test_idx").settings(settings(Version.CURRENT)).numberOfShards(10).numberOfReplicas(1))
+ .put(IndexTemplateMetaData.builder("test_template").build())
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test_idx"))
+ .build();
+
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", DummyTransportAddress.INSTANCE, Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build();
+
+ ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
+
+ AllocationService strategy = createAllocationService();
+ clusterState = ClusterState.builder(clusterState).routingTable(strategy.reroute(clusterState).routingTable()).build();
+
+ String clusterStateString = clusterState.toString();
+ assertNotNull(clusterStateString);
+
+ assertThat(clusterStateString, containsString("test_idx"));
+ assertThat(clusterStateString, containsString("test_template"));
+ assertThat(clusterStateString, containsString("node_foo"));
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java
new file mode 100644
index 0000000000..0cc1c8c5ba
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.serialization;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.cluster.Diff;
+import org.elasticsearch.cluster.DiffableUtils;
+import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
+import org.elasticsearch.cluster.AbstractDiffable;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.stream.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class DiffableTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testImmutableMapDiff() throws IOException {
+ ImmutableMap.Builder<String, TestDiffable> builder = ImmutableMap.builder();
+ builder.put("foo", new TestDiffable("1"));
+ builder.put("bar", new TestDiffable("2"));
+ builder.put("baz", new TestDiffable("3"));
+ ImmutableMap<String, TestDiffable> before = builder.build();
+ Map<String, TestDiffable> map = newHashMap();
+ map.putAll(before);
+ map.remove("bar");
+ map.put("baz", new TestDiffable("4"));
+ map.put("new", new TestDiffable("5"));
+ ImmutableMap<String, TestDiffable> after = ImmutableMap.copyOf(map);
+ Diff diff = DiffableUtils.diff(before, after);
+ BytesStreamOutput out = new BytesStreamOutput();
+ diff.writeTo(out);
+ StreamInput in = StreamInput.wrap(out.bytes());
+ ImmutableMap<String, TestDiffable> serialized = DiffableUtils.readImmutableMapDiff(in, TestDiffable.PROTO).apply(before);
+ assertThat(serialized.size(), equalTo(3));
+ assertThat(serialized.get("foo").value(), equalTo("1"));
+ assertThat(serialized.get("baz").value(), equalTo("4"));
+ assertThat(serialized.get("new").value(), equalTo("5"));
+ }
+
+ @Test
+ public void testImmutableOpenMapDiff() throws IOException {
+ ImmutableOpenMap.Builder<String, TestDiffable> builder = ImmutableOpenMap.builder();
+ builder.put("foo", new TestDiffable("1"));
+ builder.put("bar", new TestDiffable("2"));
+ builder.put("baz", new TestDiffable("3"));
+ ImmutableOpenMap<String, TestDiffable> before = builder.build();
+ builder = ImmutableOpenMap.builder(before);
+ builder.remove("bar");
+ builder.put("baz", new TestDiffable("4"));
+ builder.put("new", new TestDiffable("5"));
+ ImmutableOpenMap<String, TestDiffable> after = builder.build();
+ Diff diff = DiffableUtils.diff(before, after);
+ BytesStreamOutput out = new BytesStreamOutput();
+ diff.writeTo(out);
+ StreamInput in = StreamInput.wrap(out.bytes());
+ ImmutableOpenMap<String, TestDiffable> serialized = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<TestDiffable>() {
+ @Override
+ public TestDiffable readFrom(StreamInput in, String key) throws IOException {
+ return new TestDiffable(in.readString());
+ }
+
+ @Override
+ public Diff<TestDiffable> readDiffFrom(StreamInput in, String key) throws IOException {
+ return AbstractDiffable.readDiffFrom(new StreamableReader<TestDiffable>() {
+ @Override
+ public TestDiffable readFrom(StreamInput in) throws IOException {
+ return new TestDiffable(in.readString());
+ }
+ }, in);
+ }
+ }).apply(before);
+ assertThat(serialized.size(), equalTo(3));
+ assertThat(serialized.get("foo").value(), equalTo("1"));
+ assertThat(serialized.get("baz").value(), equalTo("4"));
+ assertThat(serialized.get("new").value(), equalTo("5"));
+
+ }
+ public static class TestDiffable extends AbstractDiffable<TestDiffable> {
+
+ public static final TestDiffable PROTO = new TestDiffable("");
+
+ private final String value;
+
+ public TestDiffable(String value) {
+ this.value = value;
+ }
+
+ public String value() {
+ return value;
+ }
+
+ @Override
+ public TestDiffable readFrom(StreamInput in) throws IOException {
+ return new TestDiffable(in.readString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(value);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java
new file mode 100644
index 0000000000..2faf0dd4b3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder;
+import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = TEST)
+public class ClusterSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void clusterNonExistingSettingsUpdate() {
+ String key1 = "no_idea_what_you_are_talking_about";
+ int value1 = 10;
+
+ ClusterUpdateSettingsResponse response = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(key1, value1).build())
+ .get();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable());
+ }
+
+ @Test
+ public void clusterSettingsUpdateResponse() {
+ String key1 = IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC;
+ int value1 = 10;
+
+ String key2 = DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION;
+ boolean value2 = true;
+
+ Settings transientSettings1 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).build();
+ Settings persistentSettings1 = Settings.builder().put(key2, value2).build();
+
+ ClusterUpdateSettingsResponse response1 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings1)
+ .setPersistentSettings(persistentSettings1)
+ .execute()
+ .actionGet();
+
+ assertAcked(response1);
+ assertThat(response1.getTransientSettings().get(key1), notNullValue());
+ assertThat(response1.getTransientSettings().get(key2), nullValue());
+ assertThat(response1.getPersistentSettings().get(key1), nullValue());
+ assertThat(response1.getPersistentSettings().get(key2), notNullValue());
+
+ Settings transientSettings2 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build();
+ Settings persistentSettings2 = Settings.EMPTY;
+
+ ClusterUpdateSettingsResponse response2 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings2)
+ .setPersistentSettings(persistentSettings2)
+ .execute()
+ .actionGet();
+
+ assertAcked(response2);
+ assertThat(response2.getTransientSettings().get(key1), notNullValue());
+ assertThat(response2.getTransientSettings().get(key2), notNullValue());
+ assertThat(response2.getPersistentSettings().get(key1), nullValue());
+ assertThat(response2.getPersistentSettings().get(key2), nullValue());
+
+ Settings transientSettings3 = Settings.EMPTY;
+ Settings persistentSettings3 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build();
+
+ ClusterUpdateSettingsResponse response3 = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(transientSettings3)
+ .setPersistentSettings(persistentSettings3)
+ .execute()
+ .actionGet();
+
+ assertAcked(response3);
+ assertThat(response3.getTransientSettings().get(key1), nullValue());
+ assertThat(response3.getTransientSettings().get(key2), nullValue());
+ assertThat(response3.getPersistentSettings().get(key1), notNullValue());
+ assertThat(response3.getPersistentSettings().get(key2), notNullValue());
+ }
+
+ @Test
+ public void testUpdateDiscoveryPublishTimeout() {
+
+ DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class);
+
+ assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.DEFAULT_PUBLISH_TIMEOUT));
+
+ ClusterUpdateSettingsResponse response = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "1s").build())
+ .get();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT), equalTo("1s"));
+ assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
+
+ response = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "whatever").build())
+ .get();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable());
+ assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
+
+ response = client().admin().cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, -1).build())
+ .get();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable());
+ assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
+ }
+
+ @Test
+ public void testClusterUpdateSettingsWithBlocks() {
+ String key1 = "cluster.routing.allocation.enable";
+ Settings transientSettings = Settings.builder().put(key1, false).build();
+
+ String key2 = "cluster.routing.allocation.node_concurrent_recoveries";
+ Settings persistentSettings = Settings.builder().put(key2, "5").build();
+
+ ClusterUpdateSettingsRequestBuilder request = client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(transientSettings)
+ .setPersistentSettings(persistentSettings);
+
+ // Cluster settings updates are blocked when the cluster is read only
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK);
+
+ // But it's possible to update the settings to update the "cluster.blocks.read_only" setting
+ Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, false).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
+
+ } finally {
+ setClusterReadOnly(false);
+ }
+
+ // It should work now
+ ClusterUpdateSettingsResponse response = request.execute().actionGet();
+
+ assertAcked(response);
+ assertThat(response.getTransientSettings().get(key1), notNullValue());
+ assertThat(response.getTransientSettings().get(key2), nullValue());
+ assertThat(response.getPersistentSettings().get(key1), nullValue());
+ assertThat(response.getPersistentSettings().get(key2), notNullValue());
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testMissingUnits() {
+ assertAcked(prepareCreate("test"));
+
+ // Should fail (missing units for refresh_interval):
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "10")).execute().actionGet();
+ }
+
+ @Test
+ public void testMissingUnitsLenient() {
+ try {
+ createNode(Settings.builder().put(Settings.SETTINGS_REQUIRE_UNITS, "false").build());
+ assertAcked(prepareCreate("test"));
+ ensureGreen();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "10")).execute().actionGet();
+ } finally {
+ // Restore the default so subsequent tests require units:
+ assertFalse(Settings.getSettingsRequireUnits());
+ Settings.setSettingsRequireUnits(true);
+ }
+ }
+
+ private void createNode(Settings settings) {
+ internalCluster().startNode(Settings.builder()
+ .put(ClusterName.SETTING, "ClusterSettingsTests")
+ .put("node.name", "ClusterSettingsTests")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(EsExecutors.PROCESSORS, 1) // limit the number of threads created
+ .put("http.enabled", false)
+ .put("config.ignore_system_properties", true) // make sure we get what we set :)
+ .put(settings)
+ );
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringTests.java b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringTests.java
new file mode 100644
index 0000000000..e8876577ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsFilteringTests.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+@ClusterScope(scope = SUITE, numDataNodes = 1)
+public class SettingsFilteringTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", SettingsFilteringPlugin.class.getName())
+ .build();
+ }
+
+ public static class SettingsFilteringPlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "settings-filtering";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "Settings Filtering Plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> indexModules() {
+ Collection<Class<? extends Module>> modules = newArrayList();
+ modules.add(SettingsFilteringModule.class);
+ return modules;
+ }
+ }
+
+ public static class SettingsFilteringModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(SettingsFilteringService.class).asEagerSingleton();
+ }
+ }
+
+ public static class SettingsFilteringService {
+ @Inject
+ public SettingsFilteringService(SettingsFilter settingsFilter) {
+ settingsFilter.addFilter("index.filter_test.foo");
+ settingsFilter.addFilter("index.filter_test.bar*");
+ }
+ }
+
+
+ @Test
+ public void testSettingsFiltering() {
+
+ assertAcked(client().admin().indices().prepareCreate("test-idx").setSettings(Settings.builder()
+ .put("filter_test.foo", "test")
+ .put("filter_test.bar1", "test")
+ .put("filter_test.bar2", "test")
+ .put("filter_test.notbar", "test")
+ .put("filter_test.notfoo", "test")
+ .build()).get());
+ GetSettingsResponse response = client().admin().indices().prepareGetSettings("test-idx").get();
+ Settings settings = response.getIndexToSettings().get("test-idx");
+
+ assertThat(settings.get("index.filter_test.foo"), nullValue());
+ assertThat(settings.get("index.filter_test.bar1"), nullValue());
+ assertThat(settings.get("index.filter_test.bar2"), nullValue());
+ assertThat(settings.get("index.filter_test.notbar"), equalTo("test"));
+ assertThat(settings.get("index.filter_test.notfoo"), equalTo("test"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java
new file mode 100644
index 0000000000..5208a7bb8d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/settings/SettingsValidatorTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.settings;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public class SettingsValidatorTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testValidators() throws Exception {
+ assertThat(Validator.EMPTY.validate("", "anything goes"), nullValue());
+
+ assertThat(Validator.TIME.validate("", "10m"), nullValue());
+ assertThat(Validator.TIME.validate("", "10g"), notNullValue());
+ assertThat(Validator.TIME.validate("", "bad timing"), notNullValue());
+
+ assertThat(Validator.BYTES_SIZE.validate("", "10m"), nullValue());
+ assertThat(Validator.BYTES_SIZE.validate("", "10g"), nullValue());
+ assertThat(Validator.BYTES_SIZE.validate("", "bad"), notNullValue());
+
+ assertThat(Validator.FLOAT.validate("", "10.2"), nullValue());
+ assertThat(Validator.FLOAT.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "0.0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "-1.0"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_FLOAT.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.DOUBLE.validate("", "10.2"), nullValue());
+ assertThat(Validator.DOUBLE.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2"), nullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "2.0"), nullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "1.0"), notNullValue());
+ assertThat(Validator.DOUBLE_GTE_2.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "0.0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "-1.0"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_DOUBLE.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.INTEGER.validate("", "10"), nullValue());
+ assertThat(Validator.INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.INTEGER_GTE_2.validate("", "2"), nullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "1"), notNullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "0"), notNullValue());
+ assertThat(Validator.INTEGER_GTE_2.validate("", "10.2.3"), notNullValue());
+
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "2"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "1"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "0"), nullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "-1"), notNullValue());
+ assertThat(Validator.NON_NEGATIVE_INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "2"), nullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "1"), nullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "0"), notNullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "-1"), notNullValue());
+ assertThat(Validator.POSITIVE_INTEGER.validate("", "10.2"), notNullValue());
+
+ assertThat(Validator.PERCENTAGE.validate("", "asdasd"), notNullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "-1"), notNullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "20"), notNullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "-1%"), notNullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "101%"), notNullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "100%"), nullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "99%"), nullValue());
+ assertThat(Validator.PERCENTAGE.validate("", "0%"), nullValue());
+
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "asdasd"), notNullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "20"), notNullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "20mb"), nullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "-1%"), notNullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "101%"), notNullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "100%"), nullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "99%"), nullValue());
+ assertThat(Validator.BYTES_SIZE_OR_PERCENTAGE.validate("", "0%"), nullValue());
+ }
+
+ @Test
+ public void testDynamicValidators() throws Exception {
+ DynamicSettings ds = new DynamicSettings();
+ ds.addDynamicSetting("my.test.*", Validator.POSITIVE_INTEGER);
+ String valid = ds.validateDynamicSetting("my.test.setting", "-1");
+ assertThat(valid, equalTo("the value of the setting my.test.setting must be a positive integer"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java
new file mode 100644
index 0000000000..2daaa66d7e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster.shards;
+
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
+import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
+import org.elasticsearch.cluster.metadata.AliasAction;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(scope= Scope.SUITE, numDataNodes = 2)
+public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ switch(nodeOrdinal) {
+ case 1:
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("node.tag", "B").build();
+ case 0:
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("node.tag", "A").build();
+ }
+ return super.nodeSettings(nodeOrdinal);
+ }
+
+ @Test
+ public void testSingleShardAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "1").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
+ ensureGreen();
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(0));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(1));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ response = client().admin().cluster().prepareSearchShards("test").setRouting("A").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(0));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(1));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ }
+
+ @Test
+ public void testMultipleShardsSingleNodeAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 0).put("index.routing.allocation.include.tag", "A")).execute().actionGet();
+ ensureGreen();
+
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(4));
+ assertThat(response.getGroups()[0].getIndex(), equalTo("test"));
+ assertThat(response.getNodes().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShards()[0].currentNodeId(), equalTo(response.getNodes()[0].getId()));
+
+ response = client().admin().cluster().prepareSearchShards("test").setRouting("ABC").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+
+ response = client().admin().cluster().prepareSearchShards("test").setPreference("_shards:2").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(1));
+ assertThat(response.getGroups()[0].getShardId(), equalTo(2));
+ }
+
+ @Test
+ public void testMultipleIndicesAllocation() throws Exception {
+ client().admin().indices().prepareCreate("test1").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().indices().prepareCreate("test2").setSettings(settingsBuilder()
+ .put("index.number_of_shards", "4").put("index.number_of_replicas", 1)).execute().actionGet();
+ client().admin().indices().prepareAliases()
+ .addAliasAction(AliasAction.newAddAliasAction("test1", "routing_alias").routing("ABC"))
+ .addAliasAction(AliasAction.newAddAliasAction("test2", "routing_alias").routing("EFG"))
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("routing_alias").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(2));
+ assertThat(response.getGroups()[0].getShards().length, equalTo(2));
+ assertThat(response.getGroups()[1].getShards().length, equalTo(2));
+ boolean seenTest1 = false;
+ boolean seenTest2 = false;
+ for (ClusterSearchShardsGroup group : response.getGroups()) {
+ if (group.getIndex().equals("test1")) {
+ seenTest1 = true;
+ assertThat(group.getShards().length, equalTo(2));
+ } else if (group.getIndex().equals("test2")) {
+ seenTest2 = true;
+ assertThat(group.getShards().length, equalTo(2));
+ } else {
+ fail();
+ }
+ }
+ assertThat(seenTest1, equalTo(true));
+ assertThat(seenTest2, equalTo(true));
+ assertThat(response.getNodes().length, equalTo(2));
+ }
+
+ @Test
+ public void testClusterSearchShardsWithBlocks() {
+ createIndex("test-blocks");
+
+ NumShards numShards = getNumShards("test-blocks");
+
+ int docs = between(10, 100);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("test-blocks", "type", "" + i).setSource("test", "init").execute().actionGet();
+ }
+ ensureGreen("test-blocks");
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("test-blocks", blockSetting);
+ ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test-blocks").execute().actionGet();
+ assertThat(response.getGroups().length, equalTo(numShards.numPrimaries));
+ } finally {
+ disableIndexBlock("test-blocks", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ try {
+ enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().cluster().prepareSearchShards("test-blocks"));
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
new file mode 100644
index 0000000000..8bde8877d2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java
@@ -0,0 +1,327 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.structure;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class RoutingIteratorTests extends ElasticsearchAllocationTestCase {
+
+ @Test
+ public void testEmptyIterator() {
+ ShardShuffler shuffler = new RotationShardShuffler(0);
+ ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.<ShardRouting>of()));
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ }
+
+ @Test
+ public void testIterator1() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2))
+ .build();
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsIt(0);
+ assertThat(shardIterator.size(), equalTo(3));
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(2));
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(1));
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting3, notNullValue());
+ assertThat(shardRouting3, not(sameInstance(shardRouting1)));
+ assertThat(shardRouting3, not(sameInstance(shardRouting2)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ }
+
+ @Test
+ public void testIterator2() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsIt(0);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(1));
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.remaining(), equalTo(0));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(1);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, notNullValue());
+ ShardRouting shardRouting4 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ assertThat(shardRouting2, not(sameInstance(shardRouting1)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting1, not(sameInstance(shardRouting3)));
+ assertThat(shardRouting2, not(sameInstance(shardRouting4)));
+ assertThat(shardRouting1, sameInstance(shardRouting4));
+ assertThat(shardRouting2, sameInstance(shardRouting3));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(2);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting shardRouting5 = shardIterator.nextOrNull();
+ assertThat(shardRouting5, notNullValue());
+ ShardRouting shardRouting6 = shardIterator.nextOrNull();
+ assertThat(shardRouting6, notNullValue());
+ assertThat(shardRouting6, not(sameInstance(shardRouting5)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting5, sameInstance(shardRouting1));
+ assertThat(shardRouting6, sameInstance(shardRouting2));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(3);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting shardRouting7 = shardIterator.nextOrNull();
+ assertThat(shardRouting7, notNullValue());
+ ShardRouting shardRouting8 = shardIterator.nextOrNull();
+ assertThat(shardRouting8, notNullValue());
+ assertThat(shardRouting8, not(sameInstance(shardRouting7)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting7, sameInstance(shardRouting3));
+ assertThat(shardRouting8, sameInstance(shardRouting4));
+
+ shardIterator = routingTable.index("test1").shard(0).shardsIt(4);
+ assertThat(shardIterator.size(), equalTo(2));
+ ShardRouting shardRouting9 = shardIterator.nextOrNull();
+ assertThat(shardRouting9, notNullValue());
+ ShardRouting shardRouting10 = shardIterator.nextOrNull();
+ assertThat(shardRouting10, notNullValue());
+ assertThat(shardRouting10, not(sameInstance(shardRouting9)));
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ assertThat(shardRouting9, sameInstance(shardRouting5));
+ assertThat(shardRouting10, sameInstance(shardRouting6));
+ }
+
+ @Test
+ public void testRandomRouting() {
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test1"))
+ .addAsNew(metaData.index("test2"))
+ .build();
+
+ ShardIterator shardIterator = routingTable.index("test1").shard(0).shardsRandomIt();
+ ShardRouting shardRouting1 = shardIterator.nextOrNull();
+ assertThat(shardRouting1, notNullValue());
+ assertThat(shardIterator.nextOrNull(), notNullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+
+ shardIterator = routingTable.index("test1").shard(0).shardsRandomIt();
+ ShardRouting shardRouting2 = shardIterator.nextOrNull();
+ assertThat(shardRouting2, notNullValue());
+ ShardRouting shardRouting3 = shardIterator.nextOrNull();
+ assertThat(shardRouting3, notNullValue());
+ assertThat(shardIterator.nextOrNull(), nullValue());
+ assertThat(shardRouting1, not(sameInstance(shardRouting2)));
+ assertThat(shardRouting1, sameInstance(shardRouting3));
+ }
+
+ @Test
+ public void testAttributePreferenceRouting() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone")
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1", ImmutableMap.of("rack_id", "rack_1", "zone", "zone1")))
+ .put(newNode("node2", ImmutableMap.of("rack_id", "rack_2", "zone", "zone2")))
+ .localNodeId("node1")
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ // after all are started, check routing iteration
+ ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
+ ShardRouting shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node1"));
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node2"));
+
+ shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes());
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node1"));
+ shardRouting = shardIterator.nextOrNull();
+ assertThat(shardRouting, notNullValue());
+ assertThat(shardRouting.currentNodeId(), equalTo("node2"));
+ }
+
+
+ @Test
+ public void testShardsAndPreferNodeRouting() {
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 10)
+ .build());
+
+ MetaData metaData = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1))
+ .build();
+
+ RoutingTable routingTable = RoutingTable.builder()
+ .addAsNew(metaData.index("test"))
+ .build();
+
+ ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
+
+ clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
+ .put(newNode("node1"))
+ .put(newNode("node2"))
+ .localNodeId("node1")
+ ).build();
+ routingTable = strategy.reroute(clusterState).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ routingTable = strategy.applyStartedShards(clusterState, clusterState.routingNodes().shardsWithState(INITIALIZING)).routingTable();
+ clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
+
+ OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider());
+
+ GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(1));
+
+ //check node preference, first without preference to see they switch
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ String firstRoundNodeId = shardIterators.iterator().next().nextOrNull().currentNodeId();
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), not(equalTo(firstRoundNodeId)));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1"));
+
+ shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, new String[]{"test"}, null, "_shards:0;_prefer_node:node1");
+ assertThat(shardIterators.size(), equalTo(1));
+ assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
+ assertThat(shardIterators.iterator().next().nextOrNull().currentNodeId(), equalTo("node1"));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/codecs/CodecTests.java b/core/src/test/java/org/elasticsearch/codecs/CodecTests.java
new file mode 100644
index 0000000000..3dfbf8acc8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/codecs/CodecTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.codecs;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.VersionUtils;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ */
+@Slow
+public class CodecTests extends ElasticsearchSingleNodeTest {
+
+ public void testAcceptPostingsFormat() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("postings_format", Codec.getDefault().postingsFormat().getName()).endObject().endObject()
+ .endObject().endObject().string();
+ int i = 0;
+ for (Version v : VersionUtils.allVersions()) {
+ IndexService indexService = createIndex("test-" + i++, Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v).build());
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ try {
+ parser.parse(mapping);
+ if (v.onOrAfter(Version.V_2_0_0)) {
+ fail("Elasticsearch 2.0 should not support custom postings formats");
+ }
+ } catch (MapperParsingException e) {
+ if (v.before(Version.V_2_0_0)) {
+ // Elasticsearch 1.x should ignore custom postings formats
+ throw e;
+ }
+ Assert.assertThat(e.getMessage(), containsString("unsupported parameters: [postings_format"));
+ }
+ }
+ }
+
+ public void testAcceptDocValuesFormat() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("doc_values_format", Codec.getDefault().docValuesFormat().getName()).endObject().endObject()
+ .endObject().endObject().string();
+ int i = 0;
+ for (Version v : VersionUtils.allVersions()) {
+ IndexService indexService = createIndex("test-" + i++, Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, v).build());
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ try {
+ parser.parse(mapping);
+ if (v.onOrAfter(Version.V_2_0_0)) {
+ fail("Elasticsearch 2.0 should not support custom postings formats");
+ }
+ } catch (MapperParsingException e) {
+ if (v.before(Version.V_2_0_0)) {
+ // Elasticsearch 1.x should ignore custom postings formats
+ throw e;
+ }
+ Assert.assertThat(e.getMessage(), containsString("unsupported parameters: [doc_values_format"));
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/Base64Test.java b/core/src/test/java/org/elasticsearch/common/Base64Test.java
new file mode 100644
index 0000000000..f80d6afb44
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/Base64Test.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class Base64Test extends ElasticsearchTestCase {
+
+ @Test // issue #6334
+ public void testBase64DecodeWithExtraCharactersAfterPadding() throws Exception {
+ String plain = randomAsciiOfLengthBetween(1, 20) + ":" + randomAsciiOfLengthBetween(1, 20);
+ String encoded = Base64.encodeBytes(plain.getBytes(Charsets.UTF_8));
+ assertValidBase64(encoded, plain);
+
+ // lets append some trash here, if the encoded string has been padded
+ char lastChar = encoded.charAt(encoded.length() - 1);
+ if (lastChar == '=') {
+ assertInvalidBase64(encoded + randomAsciiOfLength(3));
+ }
+ }
+
+ private void assertValidBase64(String base64, String expected) throws IOException {
+ String decoded = new String(Base64.decode(base64.getBytes(Charsets.UTF_8)), Charsets.UTF_8);
+ assertThat(decoded, is(expected));
+ }
+
+ private void assertInvalidBase64(String base64) {
+ try {
+ Base64.decode(base64.getBytes(Charsets.UTF_8));
+ fail(String.format(Locale.ROOT, "Expected IOException to be thrown for string %s (len %d)", base64, base64.length()));
+ } catch (IOException e) {}
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/BooleansTests.java b/core/src/test/java/org/elasticsearch/common/BooleansTests.java
new file mode 100644
index 0000000000..3bacaed7f5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/BooleansTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class BooleansTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIsBoolean() {
+ String[] booleans = new String[]{"true", "false", "on", "off", "yes", "no", "0", "1"};
+ String[] notBooleans = new String[]{"11", "00", "sdfsdfsf", "F", "T"};
+ assertThat(Booleans.isBoolean(null, 0, 1), is(false));
+
+ for (String b : booleans) {
+ String t = "prefix" + b + "suffix";
+ assertThat("failed to recognize [" + b + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), b.length()), Matchers.equalTo(true));
+ }
+
+ for (String nb : notBooleans) {
+ String t = "prefix" + nb + "suffix";
+ assertThat("recognized [" + nb + "] as boolean", Booleans.isBoolean(t.toCharArray(), "prefix".length(), nb.length()), Matchers.equalTo(false));
+ }
+ }
+ @Test
+ public void parseBoolean() {
+ assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes", "1"), randomBoolean()), is(true));
+ assertThat(Booleans.parseBoolean(randomFrom("false", "off", "no", "0"), randomBoolean()), is(false));
+ assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT), randomBoolean()), is(true));
+ assertThat(Booleans.parseBoolean(null, false), is(false));
+ assertThat(Booleans.parseBoolean(null, true), is(true));
+
+ assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes", "1"), randomFrom(null, Boolean.TRUE, Boolean.FALSE)), is(true));
+ assertThat(Booleans.parseBoolean(randomFrom("false", "off", "no", "0"), randomFrom(null, Boolean.TRUE, Boolean.FALSE)), is(false));
+ assertThat(Booleans.parseBoolean(randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT),randomFrom(null, Boolean.TRUE, Boolean.FALSE)), is(true));
+ assertThat(Booleans.parseBoolean(null, Boolean.FALSE), is(false));
+ assertThat(Booleans.parseBoolean(null, Boolean.TRUE), is(true));
+ assertThat(Booleans.parseBoolean(null, null), nullValue());
+
+ char[] chars = randomFrom("true", "on", "yes", "1").toCharArray();
+ assertThat(Booleans.parseBoolean(chars, 0, chars.length, randomBoolean()), is(true));
+ chars = randomFrom("false", "off", "no", "0").toCharArray();
+ assertThat(Booleans.parseBoolean(chars,0, chars.length, randomBoolean()), is(false));
+ chars = randomFrom("true", "on", "yes").toUpperCase(Locale.ROOT).toCharArray();
+ assertThat(Booleans.parseBoolean(chars,0, chars.length, randomBoolean()), is(true));
+ }
+
+ @Test
+ public void parseBooleanExact() {
+ assertThat(Booleans.parseBooleanExact(randomFrom("true", "on", "yes", "1")), is(true));
+ assertThat(Booleans.parseBooleanExact(randomFrom("false", "off", "no", "0")), is(false));
+ try {
+ Booleans.parseBooleanExact(randomFrom(null, "fred", "foo", "barney"));
+ fail("Expected exception while parsing invalid boolean value ");
+ } catch (Exception ex) {
+ assertTrue(ex instanceof IllegalArgumentException);
+ }
+ }
+
+ public void testIsExplicit() {
+ assertThat(Booleans.isExplicitFalse(randomFrom("true", "on", "yes", "1", "foo", null)), is(false));
+ assertThat(Booleans.isExplicitFalse(randomFrom("false", "off", "no", "0")), is(true));
+ assertThat(Booleans.isExplicitTrue(randomFrom("true", "on", "yes", "1")), is(true));
+ assertThat(Booleans.isExplicitTrue(randomFrom("false", "off", "no", "0", "foo", null)), is(false));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/ChannelsTests.java b/core/src/test/java/org/elasticsearch/common/ChannelsTests.java
new file mode 100644
index 0000000000..2fae109a6a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/ChannelsTests.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.common.bytes.ByteBufferBytesReference;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.Channels;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.EOFException;
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.channels.FileLock;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.WritableByteChannel;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+
+public class ChannelsTests extends ElasticsearchTestCase {
+
+ byte[] randomBytes;
+ FileChannel fileChannel;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ Path tmpFile = createTempFile();
+ FileChannel randomAccessFile = FileChannel.open(tmpFile, StandardOpenOption.READ, StandardOpenOption.WRITE);
+ fileChannel = new MockFileChannel(randomAccessFile);
+ randomBytes = randomUnicodeOfLength(scaledRandomIntBetween(10, 100000)).getBytes("UTF-8");
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ fileChannel.close();
+ super.tearDown();
+ }
+
+ @Test
+ public void testReadWriteThoughArrays() throws Exception {
+ Channels.writeToChannel(randomBytes, fileChannel);
+ byte[] readBytes = Channels.readFromFileChannel(fileChannel, 0, randomBytes.length);
+ assertThat("read bytes didn't match written bytes", randomBytes, Matchers.equalTo(readBytes));
+ }
+
+
+ @Test
+ public void testPartialReadWriteThroughArrays() throws Exception {
+ int length = randomIntBetween(1, randomBytes.length / 2);
+ int offset = randomIntBetween(0, randomBytes.length - length);
+ Channels.writeToChannel(randomBytes, offset, length, fileChannel);
+
+ int lengthToRead = randomIntBetween(1, length);
+ int offsetToRead = randomIntBetween(0, length - lengthToRead);
+ byte[] readBytes = new byte[randomBytes.length];
+ Channels.readFromFileChannel(fileChannel, offsetToRead, readBytes, offset + offsetToRead, lengthToRead);
+
+ BytesReference source = new BytesArray(randomBytes, offset + offsetToRead, lengthToRead);
+ BytesReference read = new BytesArray(readBytes, offset + offsetToRead, lengthToRead);
+
+ assertThat("read bytes didn't match written bytes", source.toBytes(), Matchers.equalTo(read.toBytes()));
+ }
+
+ @Test(expected = EOFException.class)
+ public void testBufferReadPastEOFWithException() throws Exception {
+ int bytesToWrite = randomIntBetween(0, randomBytes.length - 1);
+ Channels.writeToChannel(randomBytes, 0, bytesToWrite, fileChannel);
+ Channels.readFromFileChannel(fileChannel, 0, bytesToWrite + 1 + randomInt(1000));
+ }
+
+ @Test
+ public void testBufferReadPastEOFWithoutException() throws Exception {
+ int bytesToWrite = randomIntBetween(0, randomBytes.length - 1);
+ Channels.writeToChannel(randomBytes, 0, bytesToWrite, fileChannel);
+ byte[] bytes = new byte[bytesToWrite + 1 + randomInt(1000)];
+ int read = Channels.readFromFileChannel(fileChannel, 0, bytes, 0, bytes.length);
+ assertThat(read, Matchers.lessThan(0));
+ }
+
+ @Test
+ public void testReadWriteThroughBuffers() throws IOException {
+ ByteBuffer source;
+ if (randomBoolean()) {
+ source = ByteBuffer.wrap(randomBytes);
+ } else {
+ source = ByteBuffer.allocateDirect(randomBytes.length);
+ source.put(randomBytes);
+ source.flip();
+ }
+ Channels.writeToChannel(source, fileChannel);
+ ByteBuffer copy;
+ if (randomBoolean()) {
+ copy = ByteBuffer.allocate(randomBytes.length);
+ } else {
+ copy = ByteBuffer.allocateDirect(randomBytes.length);
+ }
+ int read = Channels.readFromFileChannel(fileChannel, 0, copy);
+ assertThat(read, Matchers.equalTo(randomBytes.length));
+ byte[] copyBytes = new byte[read];
+ copy.flip();
+ copy.get(copyBytes);
+ assertThat("read bytes didn't match written bytes", randomBytes, Matchers.equalTo(copyBytes));
+ }
+
+ @Test
+ public void testPartialReadWriteThroughBuffers() throws IOException {
+ int length = randomIntBetween(1, randomBytes.length / 2);
+ int offset = randomIntBetween(0, randomBytes.length - length);
+ ByteBuffer source;
+ if (randomBoolean()) {
+ source = ByteBuffer.wrap(randomBytes, offset, length);
+ } else {
+ source = ByteBuffer.allocateDirect(length);
+ source.put(randomBytes, offset, length);
+ source.flip();
+ }
+ Channels.writeToChannel(source, fileChannel);
+
+ int lengthToRead = randomIntBetween(1, length);
+ int offsetToRead = randomIntBetween(0, length - lengthToRead);
+ ByteBuffer copy;
+ if (randomBoolean()) {
+ copy = ByteBuffer.allocate(lengthToRead);
+ } else {
+ copy = ByteBuffer.allocateDirect(lengthToRead);
+ }
+ int read = Channels.readFromFileChannel(fileChannel, offsetToRead, copy);
+ assertThat(read, Matchers.equalTo(lengthToRead));
+ copy.flip();
+
+ BytesReference sourceRef = new BytesArray(randomBytes, offset + offsetToRead, lengthToRead);
+ BytesReference copyRef = new ByteBufferBytesReference(copy);
+
+ assertTrue("read bytes didn't match written bytes", sourceRef.equals(copyRef));
+ }
+
+
+ @Test
+ public void testWriteFromChannel() throws IOException {
+ int length = randomIntBetween(1, randomBytes.length / 2);
+ int offset = randomIntBetween(0, randomBytes.length - length);
+ ByteBuffer byteBuffer = ByteBuffer.wrap(randomBytes);
+ ChannelBuffer source = new ByteBufferBackedChannelBuffer(byteBuffer);
+ Channels.writeToChannel(source, offset, length, fileChannel);
+
+ BytesReference copyRef = new BytesArray(Channels.readFromFileChannel(fileChannel, 0, length));
+ BytesReference sourceRef = new BytesArray(randomBytes, offset, length);
+
+ assertTrue("read bytes didn't match written bytes", sourceRef.equals(copyRef));
+ }
+
+ class MockFileChannel extends FileChannel {
+
+ FileChannel delegate;
+
+ public MockFileChannel(FileChannel delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public int read(ByteBuffer dst) throws IOException {
+ // delay buffer read..
+ int willActuallyRead = randomInt(dst.remaining());
+ ByteBuffer mockDst = dst.duplicate();
+ mockDst.limit(mockDst.position() + willActuallyRead);
+ try {
+ return delegate.read(mockDst);
+ } finally {
+ dst.position(mockDst.position());
+ }
+ }
+
+ @Override
+ public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
+ return delegate.read(dsts, offset, length);
+ }
+
+ @Override
+ public int write(ByteBuffer src) throws IOException {
+ // delay buffer write..
+ int willActuallyWrite = randomInt(src.remaining());
+ ByteBuffer mockSrc = src.duplicate();
+ mockSrc.limit(mockSrc.position() + willActuallyWrite);
+ try {
+ return delegate.write(mockSrc);
+ } finally {
+ src.position(mockSrc.position());
+ }
+ }
+
+ @Override
+ public long write(ByteBuffer[] srcs, int offset, int length) throws IOException {
+ return delegate.write(srcs, offset, length);
+ }
+
+ @Override
+ public long position() throws IOException {
+ return delegate.position();
+ }
+
+ @Override
+ public FileChannel position(long newPosition) throws IOException {
+ return delegate.position(newPosition);
+ }
+
+ @Override
+ public long size() throws IOException {
+ return delegate.size();
+ }
+
+ @Override
+ public FileChannel truncate(long size) throws IOException {
+ return delegate.truncate(size);
+ }
+
+ @Override
+ public void force(boolean metaData) throws IOException {
+ delegate.force(metaData);
+ }
+
+ @Override
+ public long transferTo(long position, long count, WritableByteChannel target) throws IOException {
+ return delegate.transferTo(position, count, target);
+ }
+
+ @Override
+ public long transferFrom(ReadableByteChannel src, long position, long count) throws IOException {
+ return delegate.transferFrom(src, position, count);
+ }
+
+ @Override
+ public int read(ByteBuffer dst, long position) throws IOException {
+ return delegate.read(dst, position);
+ }
+
+ @Override
+ public int write(ByteBuffer src, long position) throws IOException {
+ return delegate.write(src, position);
+ }
+
+ @Override
+ public MappedByteBuffer map(MapMode mode, long position, long size) throws IOException {
+ return delegate.map(mode, position, size);
+ }
+
+ @Override
+ public FileLock lock(long position, long size, boolean shared) throws IOException {
+ return delegate.lock(position, size, shared);
+ }
+
+ @Override
+ public FileLock tryLock(long position, long size, boolean shared) throws IOException {
+ return delegate.tryLock(position, size, shared);
+ }
+
+ @Override
+ protected void implCloseChannel() throws IOException {
+ delegate.close();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java
new file mode 100644
index 0000000000..7b0dacf850
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/ParseFieldTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import org.apache.commons.lang3.ArrayUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.EnumSet;
+
+import static org.hamcrest.CoreMatchers.*;
+
+public class ParseFieldTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParse() {
+ String[] values = new String[]{"foo_bar", "fooBar"};
+ ParseField field = new ParseField(randomFrom(values));
+ String[] deprecated = new String[]{"barFoo", "bar_foo"};
+ ParseField withDeprecations = field.withDeprecation("Foobar", randomFrom(deprecated));
+ assertThat(field, not(sameInstance(withDeprecations)));
+ assertThat(field.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(field.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
+ assertThat(field.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(false));
+ assertThat(field.match("barFoo", ParseField.EMPTY_FLAGS), is(false));
+
+ assertThat(withDeprecations.match(randomFrom(values), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(withDeprecations.match("foo bar", ParseField.EMPTY_FLAGS), is(false));
+ assertThat(withDeprecations.match(randomFrom(deprecated), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(withDeprecations.match("barFoo", ParseField.EMPTY_FLAGS), is(true));
+
+ // now with strict mode
+ EnumSet<ParseField.Flag> flags = EnumSet.of(ParseField.Flag.STRICT);
+ assertThat(field.match(randomFrom(values), flags), is(true));
+ assertThat(field.match("foo bar", flags), is(false));
+ assertThat(field.match(randomFrom(deprecated), flags), is(false));
+ assertThat(field.match("barFoo", flags), is(false));
+
+ assertThat(withDeprecations.match(randomFrom(values), flags), is(true));
+ assertThat(withDeprecations.match("foo bar", flags), is(false));
+ try {
+ withDeprecations.match(randomFrom(deprecated), flags);
+ fail();
+ } catch (IllegalArgumentException ex) {
+
+ }
+
+ try {
+ withDeprecations.match("barFoo", flags);
+ fail();
+ } catch (IllegalArgumentException ex) {
+
+ }
+ }
+
+ @Test
+ public void testAllDeprecated() {
+ String[] values = new String[]{"like_text", "likeText"};
+
+ boolean withDeprecatedNames = randomBoolean();
+ String[] deprecated = new String[]{"text", "same_as_text"};
+ String[] allValues = values;
+ if (withDeprecatedNames) {
+ allValues = ArrayUtils.addAll(values, deprecated);
+ }
+
+ ParseField field = new ParseField(randomFrom(values));
+ if (withDeprecatedNames) {
+ field = field.withDeprecation(deprecated);
+ }
+ field = field.withAllDeprecated("like");
+
+ // strict mode off
+ assertThat(field.match(randomFrom(allValues), ParseField.EMPTY_FLAGS), is(true));
+ assertThat(field.match("not a field name", ParseField.EMPTY_FLAGS), is(false));
+
+ // now with strict mode
+ EnumSet<ParseField.Flag> flags = EnumSet.of(ParseField.Flag.STRICT);
+ try {
+ field.match(randomFrom(allValues), flags);
+ fail();
+ } catch (IllegalArgumentException ex) {
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/PidFileTests.java b/core/src/test/java/org/elasticsearch/common/PidFileTests.java
new file mode 100644
index 0000000000..ab098924bc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/PidFileTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+
+/**
+ * UnitTest for {@link org.elasticsearch.common.PidFile}
+ */
+public class PidFileTests extends ElasticsearchTestCase {
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testParentIsFile() throws IOException {
+ Path dir = createTempDir();
+ Path parent = dir.resolve("foo");
+ try(BufferedWriter stream = Files.newBufferedWriter(parent, Charsets.UTF_8, StandardOpenOption.CREATE_NEW)) {
+ stream.write("foo");
+ }
+
+ PidFile.create(parent.resolve("bar.pid"), false);
+ }
+
+ @Test
+ public void testPidFile() throws IOException {
+ Path dir = createTempDir();
+ Path parent = dir.resolve("foo");
+ if (randomBoolean()) {
+ Files.createDirectories(parent);
+ if (randomBoolean()) {
+ try {
+ Path link = dir.resolve("link_to_real_path");
+ Files.createSymbolicLink(link, parent.getFileName());
+ parent = link;
+ } catch (UnsupportedOperationException | IOException | SecurityException ex) {
+ // fine - no links on this system
+ }
+
+ }
+ }
+ Path pidFile = parent.resolve("foo.pid");
+ long pid = randomLong();
+ if (randomBoolean() && Files.exists(parent)) {
+ try (BufferedWriter stream = Files.newBufferedWriter(pidFile, Charsets.UTF_8, StandardOpenOption.CREATE_NEW)) {
+ stream.write("foo");
+ }
+ }
+
+ final PidFile inst = PidFile.create(pidFile, false, pid);
+ assertEquals(pidFile, inst.getPath());
+ assertEquals(pid, inst.getPid());
+ assertFalse(inst.isDeleteOnExit());
+ assertTrue(Files.exists(pidFile));
+ assertEquals(pid, Long.parseLong(new String(Files.readAllBytes(pidFile), Charsets.UTF_8)));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/StringsTests.java b/core/src/test/java/org/elasticsearch/common/StringsTests.java
new file mode 100644
index 0000000000..e6f75aa9a1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/StringsTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class StringsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testToCamelCase() {
+ assertEquals("foo", Strings.toCamelCase("foo"));
+ assertEquals("fooBar", Strings.toCamelCase("fooBar"));
+ assertEquals("FooBar", Strings.toCamelCase("FooBar"));
+ assertEquals("fooBar", Strings.toCamelCase("foo_bar"));
+ assertEquals("fooBarFooBar", Strings.toCamelCase("foo_bar_foo_bar"));
+ assertEquals("fooBar", Strings.toCamelCase("foo_bar_"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/TableTests.java b/core/src/test/java/org/elasticsearch/common/TableTests.java
new file mode 100644
index 0000000000..032299cd1b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/TableTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+public class TableTests extends ElasticsearchTestCase {
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnStartRowWithoutHeader() {
+ Table table = new Table();
+ table.startRow();
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnEndHeadersWithoutStart() {
+ Table table = new Table();
+ table.endHeaders();
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnAddCellWithoutHeader() {
+ Table table = new Table();
+ table.addCell("error");
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnAddCellWithoutRow() {
+ Table table = this.getTableWithHeaders();
+ table.addCell("error");
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnEndRowWithoutStart() {
+ Table table = this.getTableWithHeaders();
+ table.endRow();
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnLessCellsThanDeclared() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.endRow(true);
+ }
+
+ @Test
+ public void testOnLessCellsThanDeclaredUnchecked() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.endRow(false);
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailOnMoreCellsThanDeclared() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo");
+ table.addCell("bar");
+ table.addCell("foobar");
+ }
+
+ @Test
+ public void testSimple() {
+ Table table = this.getTableWithHeaders();
+ table.startRow();
+ table.addCell("foo1");
+ table.addCell("bar1");
+ table.endRow();
+ table.startRow();
+ table.addCell("foo2");
+ table.addCell("bar2");
+ table.endRow();
+
+ // Check headers
+ List<Table.Cell> headers = table.getHeaders();
+ assertEquals(2, headers.size());
+ assertEquals("foo", headers.get(0).value.toString());
+ assertEquals(2, headers.get(0).attr.size());
+ assertEquals("f", headers.get(0).attr.get("alias"));
+ assertEquals("foo", headers.get(0).attr.get("desc"));
+ assertEquals("bar", headers.get(1).value.toString());
+ assertEquals(2, headers.get(1).attr.size());
+ assertEquals("b", headers.get(1).attr.get("alias"));
+ assertEquals("bar", headers.get(1).attr.get("desc"));
+
+ // Check rows
+ List<List<Table.Cell>> rows = table.getRows();
+ assertEquals(2, rows.size());
+ List<Table.Cell> row = rows.get(0);
+ assertEquals("foo1", row.get(0).value.toString());
+ assertEquals("bar1", row.get(1).value.toString());
+ row = rows.get(1);
+ assertEquals("foo2", row.get(0).value.toString());
+ assertEquals("bar2", row.get(1).value.toString());
+
+ // Check getAsMap
+ Map<String, List<Table.Cell>> map = table.getAsMap();
+ assertEquals(2, map.size());
+ row = map.get("foo");
+ assertEquals("foo1", row.get(0).value.toString());
+ assertEquals("foo2", row.get(1).value.toString());
+ row = map.get("bar");
+ assertEquals("bar1", row.get(0).value.toString());
+ assertEquals("bar2", row.get(1).value.toString());
+
+ // Check getHeaderMap
+ Map<String, Table.Cell> headerMap = table.getHeaderMap();
+ assertEquals(2, headerMap.size());
+ Table.Cell cell = headerMap.get("foo");
+ assertEquals("foo", cell.value.toString());
+ cell = headerMap.get("bar");
+ assertEquals("bar", cell.value.toString());
+
+ // Check findHeaderByName
+ cell = table.findHeaderByName("foo");
+ assertEquals("foo", cell.value.toString());
+ cell = table.findHeaderByName("missing");
+ assertNull(cell);
+ }
+
+ private Table getTableWithHeaders() {
+ Table table = new Table();
+ table.startHeaders();
+ table.addCell("foo", "alias:f;desc:foo");
+ table.addCell("bar", "alias:b;desc:bar");
+ table.endHeaders();
+ return table;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/UUIDTests.java b/core/src/test/java/org/elasticsearch/common/UUIDTests.java
new file mode 100644
index 0000000000..23af5fb141
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/UUIDTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashSet;
+
+public class UUIDTests extends ElasticsearchTestCase {
+
+ static UUIDGenerator timeUUIDGen = new TimeBasedUUIDGenerator();
+ static UUIDGenerator randomUUIDGen = new RandomBasedUUIDGenerator();
+
+ @Test
+ public void testRandomUUID() {
+ verifyUUIDSet(100000, randomUUIDGen);
+ }
+
+ @Test
+ public void testTimeUUID() {
+ verifyUUIDSet(100000, timeUUIDGen);
+ }
+
+ @Test
+ public void testThreadedTimeUUID() {
+ testUUIDThreaded(timeUUIDGen);
+ }
+
+ @Test
+ public void testThreadedRandomUUID() {
+ testUUIDThreaded(randomUUIDGen);
+ }
+
+ HashSet verifyUUIDSet(int count, UUIDGenerator uuidSource) {
+ HashSet<String> uuidSet = new HashSet<>();
+ for (int i = 0; i < count; ++i) {
+ uuidSet.add(uuidSource.getBase64UUID());
+ }
+ assertEquals(count, uuidSet.size());
+ return uuidSet;
+ }
+
+ class UUIDGenRunner implements Runnable {
+ int count;
+ public HashSet<String> uuidSet = null;
+ UUIDGenerator uuidSource;
+
+ public UUIDGenRunner(int count, UUIDGenerator uuidSource) {
+ this.count = count;
+ this.uuidSource = uuidSource;
+ }
+
+ @Override
+ public void run() {
+ uuidSet = verifyUUIDSet(count, uuidSource);
+ }
+ }
+
+ public void testUUIDThreaded(UUIDGenerator uuidSource) {
+ HashSet<UUIDGenRunner> runners = new HashSet<>();
+ HashSet<Thread> threads = new HashSet<>();
+ int count = 20;
+ int uuids = 10000;
+ for (int i = 0; i < count; ++i) {
+ UUIDGenRunner runner = new UUIDGenRunner(uuids, uuidSource);
+ Thread t = new Thread(runner);
+ threads.add(t);
+ runners.add(runner);
+ }
+ for (Thread t : threads) {
+ t.start();
+ }
+ boolean retry = false;
+ do {
+ for (Thread t : threads) {
+ try {
+ t.join();
+ } catch (InterruptedException ie) {
+ retry = true;
+ }
+ }
+ } while (retry);
+
+ HashSet<String> globalSet = new HashSet<>();
+ for (UUIDGenRunner runner : runners) {
+ globalSet.addAll(runner.uuidSet);
+ }
+ assertEquals(count*uuids, globalSet.size());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTest.java b/core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTest.java
new file mode 100644
index 0000000000..54def2b866
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTest.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.blobstore;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.blobstore.fs.FsBlobStore;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+public class BlobStoreTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testWriteRead() throws IOException {
+ final BlobStore store = newBlobStore();
+ final BlobContainer container = store.blobContainer(new BlobPath());
+ byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
+ try (OutputStream stream = container.createOutput("foobar")) {
+ stream.write(data);
+ }
+ try (InputStream stream = container.openInput("foobar")) {
+ BytesRefBuilder target = new BytesRefBuilder();
+ while (target.length() < data.length) {
+ byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())];
+ int offset = scaledRandomIntBetween(0, buffer.length - 1);
+ int read = stream.read(buffer, offset, buffer.length - offset);
+ target.append(new BytesRef(buffer, offset, read));
+ }
+ assertEquals(data.length, target.length());
+ assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length()));
+ }
+ store.close();
+ }
+
+ @Test
+ public void testMoveAndList() throws IOException {
+ final BlobStore store = newBlobStore();
+ final BlobContainer container = store.blobContainer(new BlobPath());
+ assertThat(container.listBlobs().size(), equalTo(0));
+ int numberOfFooBlobs = randomIntBetween(0, 10);
+ int numberOfBarBlobs = randomIntBetween(3, 20);
+ Map<String, Long> generatedBlobs = newHashMap();
+ for (int i = 0; i < numberOfFooBlobs; i++) {
+ int length = randomIntBetween(10, 100);
+ String name = "foo-" + i + "-";
+ generatedBlobs.put(name, (long) length);
+ createRandomBlob(container, name, length);
+ }
+ for (int i = 1; i < numberOfBarBlobs; i++) {
+ int length = randomIntBetween(10, 100);
+ String name = "bar-" + i + "-";
+ generatedBlobs.put(name, (long) length);
+ createRandomBlob(container, name, length);
+ }
+ int length = randomIntBetween(10, 100);
+ String name = "bar-0-";
+ generatedBlobs.put(name, (long) length);
+ byte[] data = createRandomBlob(container, name, length);
+
+ Map<String, BlobMetaData> blobs = container.listBlobs();
+ assertThat(blobs.size(), equalTo(numberOfFooBlobs + numberOfBarBlobs));
+ for (Map.Entry<String, Long> generated : generatedBlobs.entrySet()) {
+ BlobMetaData blobMetaData = blobs.get(generated.getKey());
+ assertThat(generated.getKey(), blobMetaData, notNullValue());
+ assertThat(blobMetaData.name(), equalTo(generated.getKey()));
+ assertThat(blobMetaData.length(), equalTo(generated.getValue()));
+ }
+
+ assertThat(container.listBlobsByPrefix("foo-").size(), equalTo(numberOfFooBlobs));
+ assertThat(container.listBlobsByPrefix("bar-").size(), equalTo(numberOfBarBlobs));
+ assertThat(container.listBlobsByPrefix("baz-").size(), equalTo(0));
+
+ String newName = "bar-new";
+ // Move to a new location
+ container.move(name, newName);
+ assertThat(container.listBlobsByPrefix(name).size(), equalTo(0));
+ blobs = container.listBlobsByPrefix(newName);
+ assertThat(blobs.size(), equalTo(1));
+ assertThat(blobs.get(newName).length(), equalTo(generatedBlobs.get(name)));
+ assertThat(data, equalTo(readBlobFully(container, newName, length)));
+ store.close();
+ }
+
+ protected byte[] createRandomBlob(BlobContainer container, String name, int length) throws IOException {
+ byte[] data = randomBytes(length);
+ try (OutputStream stream = container.createOutput(name)) {
+ stream.write(data);
+ }
+ return data;
+ }
+
+ protected byte[] readBlobFully(BlobContainer container, String name, int length) throws IOException {
+ byte[] data = new byte[length];
+ try (InputStream inputStream = container.openInput(name)) {
+ assertThat(inputStream.read(data), equalTo(length));
+ assertThat(inputStream.read(), equalTo(-1));
+ }
+ return data;
+ }
+
+ protected byte[] randomBytes(int length) {
+ byte[] data = new byte[length];
+ for (int i = 0; i < data.length; i++) {
+ data[i] = (byte) randomInt();
+ }
+ return data;
+ }
+
+ protected BlobStore newBlobStore() throws IOException {
+ Path tempDir = createTempDir();
+ Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
+ FsBlobStore store = new FsBlobStore(settings, tempDir);
+ return store;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java
new file mode 100644
index 0000000000..27946bacf4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.breaker;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.indices.breaker.BreakerSettings;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+/**
+ * Tests for the Memory Aggregating Circuit Breaker
+ */
+public class MemoryCircuitBreakerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThreadedUpdatesToBreaker() throws Exception {
+ final int NUM_THREADS = scaledRandomIntBetween(3, 15);
+ final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500);
+ final Thread[] threads = new Thread[NUM_THREADS];
+ final AtomicBoolean tripped = new AtomicBoolean(false);
+ final AtomicReference<Throwable> lastException = new AtomicReference<>(null);
+
+ final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue((BYTES_PER_THREAD * NUM_THREADS) - 1), 1.0, logger);
+
+ for (int i = 0; i < NUM_THREADS; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < BYTES_PER_THREAD; j++) {
+ try {
+ breaker.addEstimateBytesAndMaybeBreak(1L, "test");
+ } catch (CircuitBreakingException e) {
+ if (tripped.get()) {
+ assertThat("tripped too many times", true, equalTo(false));
+ } else {
+ assertThat(tripped.compareAndSet(false, true), equalTo(true));
+ }
+ } catch (Throwable e2) {
+ lastException.set(e2);
+ }
+ }
+ }
+ });
+
+ threads[i].start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
+ assertThat("breaker was tripped", tripped.get(), equalTo(true));
+ assertThat("breaker was tripped at least once", breaker.getTrippedCount(), greaterThanOrEqualTo(1L));
+ }
+
+ @Test
+ public void testThreadedUpdatesToChildBreaker() throws Exception {
+ final int NUM_THREADS = scaledRandomIntBetween(3, 15);
+ final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500);
+ final Thread[] threads = new Thread[NUM_THREADS];
+ final AtomicBoolean tripped = new AtomicBoolean(false);
+ final AtomicReference<Throwable> lastException = new AtomicReference<>(null);
+
+ final AtomicReference<ChildMemoryCircuitBreaker> breakerRef = new AtomicReference<>(null);
+ final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) {
+
+ @Override
+ public CircuitBreaker getBreaker(String name) {
+ return breakerRef.get();
+ }
+
+ @Override
+ public void checkParentLimit(String label) throws CircuitBreakingException {
+ // never trip
+ }
+ };
+ final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, (BYTES_PER_THREAD * NUM_THREADS) - 1, 1.0);
+ final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(settings, logger,
+ (HierarchyCircuitBreakerService)service, CircuitBreaker.REQUEST);
+ breakerRef.set(breaker);
+
+ for (int i = 0; i < NUM_THREADS; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < BYTES_PER_THREAD; j++) {
+ try {
+ breaker.addEstimateBytesAndMaybeBreak(1L, "test");
+ } catch (CircuitBreakingException e) {
+ if (tripped.get()) {
+ assertThat("tripped too many times", true, equalTo(false));
+ } else {
+ assertThat(tripped.compareAndSet(false, true), equalTo(true));
+ }
+ } catch (Throwable e2) {
+ lastException.set(e2);
+ }
+ }
+ }
+ });
+
+ threads[i].start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
+ assertThat("breaker was tripped", tripped.get(), equalTo(true));
+ assertThat("breaker was tripped at least once", breaker.getTrippedCount(), greaterThanOrEqualTo(1L));
+ }
+
+ @Test
+ public void testThreadedUpdatesToChildBreakerWithParentLimit() throws Exception {
+ final int NUM_THREADS = scaledRandomIntBetween(3, 15);
+ final int BYTES_PER_THREAD = scaledRandomIntBetween(500, 4500);
+ final int parentLimit = (BYTES_PER_THREAD * NUM_THREADS) - 2;
+ final int childLimit = parentLimit + 10;
+ final Thread[] threads = new Thread[NUM_THREADS];
+ final AtomicInteger tripped = new AtomicInteger(0);
+ final AtomicReference<Throwable> lastException = new AtomicReference<>(null);
+
+ final AtomicInteger parentTripped = new AtomicInteger(0);
+ final AtomicReference<ChildMemoryCircuitBreaker> breakerRef = new AtomicReference<>(null);
+ final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) {
+
+ @Override
+ public CircuitBreaker getBreaker(String name) {
+ return breakerRef.get();
+ }
+
+ @Override
+ public void checkParentLimit(String label) throws CircuitBreakingException {
+ // Parent will trip right before regular breaker would trip
+ if (getBreaker(CircuitBreaker.REQUEST).getUsed() > parentLimit) {
+ parentTripped.incrementAndGet();
+ logger.info("--> parent tripped");
+ throw new CircuitBreakingException("parent tripped");
+ }
+ }
+ };
+ final BreakerSettings settings = new BreakerSettings(CircuitBreaker.REQUEST, childLimit, 1.0);
+ final ChildMemoryCircuitBreaker breaker = new ChildMemoryCircuitBreaker(settings, logger,
+ (HierarchyCircuitBreakerService)service, CircuitBreaker.REQUEST);
+ breakerRef.set(breaker);
+
+ for (int i = 0; i < NUM_THREADS; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int j = 0; j < BYTES_PER_THREAD; j++) {
+ try {
+ breaker.addEstimateBytesAndMaybeBreak(1L, "test");
+ } catch (CircuitBreakingException e) {
+ tripped.incrementAndGet();
+ } catch (Throwable e2) {
+ lastException.set(e2);
+ }
+ }
+ }
+ });
+ }
+
+ logger.info("--> NUM_THREADS: [{}], BYTES_PER_THREAD: [{}], TOTAL_BYTES: [{}], PARENT_LIMIT: [{}], CHILD_LIMIT: [{}]",
+ NUM_THREADS, BYTES_PER_THREAD, (BYTES_PER_THREAD * NUM_THREADS), parentLimit, childLimit);
+
+ logger.info("--> starting threads...");
+ for (Thread t : threads) {
+ t.start();
+ }
+
+ for (Thread t : threads) {
+ t.join();
+ }
+
+ logger.info("--> child breaker: used: {}, limit: {}", breaker.getUsed(), breaker.getLimit());
+ logger.info("--> parent tripped: {}, total trip count: {} (expecting 1-2 for each)", parentTripped.get(), tripped.get());
+ assertThat("no other exceptions were thrown", lastException.get(), equalTo(null));
+ assertThat("breaker should be reset back to the parent limit after parent breaker trips",
+ breaker.getUsed(), greaterThanOrEqualTo((long)parentLimit - NUM_THREADS));
+ assertThat("parent breaker was tripped at least once", parentTripped.get(), greaterThanOrEqualTo(1));
+ assertThat("total breaker was tripped at least once", tripped.get(), greaterThanOrEqualTo(1));
+ }
+
+ @Test
+ public void testConstantFactor() throws Exception {
+ final MemoryCircuitBreaker breaker = new MemoryCircuitBreaker(new ByteSizeValue(15), 1.6, logger);
+ String field = "myfield";
+
+ // add only 7 bytes
+ breaker.addWithoutBreaking(7);
+
+ try {
+ // this won't actually add it because it trips the breaker
+ breaker.addEstimateBytesAndMaybeBreak(3, field);
+ fail("should never reach this");
+ } catch (CircuitBreakingException cbe) {
+ }
+
+ // shouldn't throw an exception
+ breaker.addEstimateBytesAndMaybeBreak(2, field);
+
+ assertThat(breaker.getUsed(), equalTo(9L));
+
+ // adding 3 more bytes (now at 12)
+ breaker.addWithoutBreaking(3);
+
+ try {
+ // Adding no bytes still breaks
+ breaker.addEstimateBytesAndMaybeBreak(0, field);
+ fail("should never reach this");
+ } catch (CircuitBreakingException cbe) {
+ assertThat("breaker was tripped exactly twice", breaker.getTrippedCount(), equalTo(2L));
+ assertThat(cbe.getMessage().contains("field [" + field + "]"), equalTo(true));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java b/core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java
new file mode 100644
index 0000000000..ad289d6678
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/bytes/ByteBufferBytesReference.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.io.Channels;
+import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.util.CharsetUtil;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.CharBuffer;
+import java.nio.channels.GatheringByteChannel;
+import java.nio.charset.CharacterCodingException;
+import java.nio.charset.CharsetDecoder;
+import java.nio.charset.CoderResult;
+
+/**
+ * Note: this is only used by one lone test method.
+ */
+public class ByteBufferBytesReference implements BytesReference {
+
+ private final ByteBuffer buffer;
+
+ public ByteBufferBytesReference(ByteBuffer buffer) {
+ this.buffer = buffer;
+ }
+
+ @Override
+ public byte get(int index) {
+ return buffer.get(buffer.position() + index);
+ }
+
+ @Override
+ public int length() {
+ return buffer.remaining();
+ }
+
+ @Override
+ public BytesReference slice(int from, int length) {
+ ByteBuffer dup = buffer.duplicate();
+ dup.position(buffer.position() + from);
+ dup.limit(buffer.position() + from + length);
+ return new ByteBufferBytesReference(dup);
+ }
+
+ @Override
+ public StreamInput streamInput() {
+ return new ByteBufferStreamInput(buffer);
+ }
+
+ @Override
+ public void writeTo(OutputStream os) throws IOException {
+ if (buffer.hasArray()) {
+ os.write(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+ } else {
+ byte[] tmp = new byte[8192];
+ ByteBuffer buf = buffer.duplicate();
+ while (buf.hasRemaining()) {
+ buf.get(tmp, 0, Math.min(tmp.length, buf.remaining()));
+ os.write(tmp);
+ }
+ }
+ }
+
+ @Override
+ public void writeTo(GatheringByteChannel channel) throws IOException {
+ Channels.writeToChannel(buffer, channel);
+ }
+
+ @Override
+ public byte[] toBytes() {
+ if (!buffer.hasRemaining()) {
+ return BytesRef.EMPTY_BYTES;
+ }
+ byte[] tmp = new byte[buffer.remaining()];
+ buffer.duplicate().get(tmp);
+ return tmp;
+ }
+
+ @Override
+ public BytesArray toBytesArray() {
+ if (buffer.hasArray()) {
+ return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+ }
+ return new BytesArray(toBytes());
+ }
+
+ @Override
+ public BytesArray copyBytesArray() {
+ return new BytesArray(toBytes());
+ }
+
+ @Override
+ public ChannelBuffer toChannelBuffer() {
+ return ChannelBuffers.wrappedBuffer(buffer);
+ }
+
+ @Override
+ public boolean hasArray() {
+ return buffer.hasArray();
+ }
+
+ @Override
+ public byte[] array() {
+ return buffer.array();
+ }
+
+ @Override
+ public int arrayOffset() {
+ return buffer.arrayOffset() + buffer.position();
+ }
+
+ @Override
+ public int hashCode() {
+ return Helper.bytesHashCode(this);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return Helper.bytesEqual(this, (BytesReference) obj);
+ }
+
+ @Override
+ public String toUtf8() {
+ if (!buffer.hasRemaining()) {
+ return "";
+ }
+ final CharsetDecoder decoder = CharsetUtil.getDecoder(Charsets.UTF_8);
+ final CharBuffer dst = CharBuffer.allocate(
+ (int) ((double) buffer.remaining() * decoder.maxCharsPerByte()));
+ try {
+ CoderResult cr = decoder.decode(buffer, dst, true);
+ if (!cr.isUnderflow()) {
+ cr.throwException();
+ }
+ cr = decoder.flush(dst);
+ if (!cr.isUnderflow()) {
+ cr.throwException();
+ }
+ } catch (CharacterCodingException x) {
+ throw new IllegalStateException(x);
+ }
+ return dst.flip().toString();
+ }
+
+ @Override
+ public BytesRef toBytesRef() {
+ if (buffer.hasArray()) {
+ return new BytesRef(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
+ }
+ return new BytesRef(toBytes());
+ }
+
+ @Override
+ public BytesRef copyBytesRef() {
+ return new BytesRef(toBytes());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/bytes/BytesReferenceTests.java b/core/src/test/java/org/elasticsearch/common/bytes/BytesReferenceTests.java
new file mode 100644
index 0000000000..aaf2ef557f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/bytes/BytesReferenceTests.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+public class BytesReferenceTests extends ElasticsearchTestCase {
+
+ public void testEquals() {
+ final int len = randomIntBetween(0, randomBoolean() ? 10: 100000);
+ final int offset1 = randomInt(5);
+ final byte[] array1 = new byte[offset1 + len + randomInt(5)];
+ getRandom().nextBytes(array1);
+ final int offset2 = randomInt(offset1);
+ final byte[] array2 = Arrays.copyOfRange(array1, offset1 - offset2, array1.length);
+
+ final BytesArray b1 = new BytesArray(array1, offset1, len);
+ final BytesArray b2 = new BytesArray(array2, offset2, len);
+ assertTrue(BytesReference.Helper.bytesEqual(b1, b2));
+ assertTrue(BytesReference.Helper.bytesEquals(b1, b2));
+ assertEquals(Arrays.hashCode(b1.toBytes()), b1.hashCode());
+ assertEquals(BytesReference.Helper.bytesHashCode(b1), BytesReference.Helper.slowHashCode(b2));
+
+ // test same instance
+ assertTrue(BytesReference.Helper.bytesEqual(b1, b1));
+ assertTrue(BytesReference.Helper.bytesEquals(b1, b1));
+ assertEquals(BytesReference.Helper.bytesHashCode(b1), BytesReference.Helper.slowHashCode(b1));
+
+ if (len > 0) {
+ // test different length
+ BytesArray differentLen = new BytesArray(array1, offset1, randomInt(len - 1));
+ assertFalse(BytesReference.Helper.bytesEqual(b1, differentLen));
+
+ // test changed bytes
+ array1[offset1 + randomInt(len - 1)] += 13;
+ assertFalse(BytesReference.Helper.bytesEqual(b1, b2));
+ assertFalse(BytesReference.Helper.bytesEquals(b1, b2));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java b/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java
new file mode 100644
index 0000000000..aa0f411039
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java
@@ -0,0 +1,582 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.ByteArray;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.Arrays;
+
+public class PagedBytesReferenceTest extends ElasticsearchTestCase {
+
+ private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
+
+ private BigArrays bigarrays;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ bigarrays = new BigArrays(null, new NoneCircuitBreakerService());
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ }
+
+ @Test
+ public void testGet() {
+ int length = randomIntBetween(1, PAGE_SIZE * 3);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(0, length / 2);
+ int sliceLength = Math.max(1, length - sliceOffset - 1);
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ assertEquals(pbr.get(sliceOffset), slice.get(0));
+ assertEquals(pbr.get(sliceOffset + sliceLength - 1), slice.get(sliceLength - 1));
+ }
+
+ public void testLength() {
+ int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomInt(PAGE_SIZE * 3)};
+
+ for (int i = 0; i < sizes.length; i++) {
+ BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]);
+ assertEquals(sizes[i], pbr.length());
+ }
+ }
+
+ public void testSlice() {
+ int length = randomInt(PAGE_SIZE * 3);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(0, length / 2);
+ int sliceLength = Math.max(0, length - sliceOffset - 1);
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ assertEquals(sliceLength, slice.length());
+
+ if (slice.hasArray()) {
+ assertEquals(sliceOffset, slice.arrayOffset());
+ } else {
+ try {
+ slice.arrayOffset();
+ fail("expected IllegalStateException");
+ } catch (IllegalStateException ise) {
+ // expected
+ }
+ }
+ }
+
+ public void testStreamInput() throws IOException {
+ int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ StreamInput si = pbr.streamInput();
+ assertNotNull(si);
+
+ // read single bytes one by one
+ assertEquals(pbr.get(0), si.readByte());
+ assertEquals(pbr.get(1), si.readByte());
+ assertEquals(pbr.get(2), si.readByte());
+
+ // reset the stream for bulk reading
+ si.reset();
+
+ // buffer for bulk reads
+ byte[] origBuf = new byte[length];
+ getRandom().nextBytes(origBuf);
+ byte[] targetBuf = Arrays.copyOf(origBuf, origBuf.length);
+
+ // bulk-read 0 bytes: must not modify buffer
+ si.readBytes(targetBuf, 0, 0);
+ assertEquals(origBuf[0], targetBuf[0]);
+ si.reset();
+
+ // read a few few bytes as ints
+ int bytesToRead = randomIntBetween(1, length / 2);
+ for (int i = 0; i < bytesToRead; i++) {
+ int b = si.read();
+ assertEquals(pbr.get(i), b);
+ }
+ si.reset();
+
+ // bulk-read all
+ si.readFully(targetBuf);
+ assertArrayEquals(pbr.toBytes(), targetBuf);
+
+ // continuing to read should now fail with EOFException
+ try {
+ si.readByte();
+ fail("expected EOF");
+ } catch (EOFException eof) {
+ // yay
+ }
+
+ // try to read more than the stream contains
+ si.reset();
+ try {
+ si.readBytes(targetBuf, 0, length * 2);
+ fail("expected IndexOutOfBoundsException: le > stream.length");
+ } catch (IndexOutOfBoundsException ioob) {
+ // expected
+ }
+ }
+
+ public void testStreamInputBulkReadWithOffset() throws IOException {
+ int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ StreamInput si = pbr.streamInput();
+ assertNotNull(si);
+
+ // read a bunch of single bytes one by one
+ int offset = randomIntBetween(1, length / 2);
+ for (int i = 0; i < offset; i++) {
+ assertEquals(pbr.get(i), si.readByte());
+ }
+
+ // now do NOT reset the stream - keep the stream's offset!
+
+ // buffer to compare remaining bytes against bulk read
+ byte[] pbrBytesWithOffset = Arrays.copyOfRange(pbr.toBytes(), offset, length);
+ // randomized target buffer to ensure no stale slots
+ byte[] targetBytes = new byte[pbrBytesWithOffset.length];
+ getRandom().nextBytes(targetBytes);
+
+ // bulk-read all
+ si.readFully(targetBytes);
+ assertArrayEquals(pbrBytesWithOffset, targetBytes);
+ }
+
+ public void testRandomReads() throws IOException {
+ int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ StreamInput streamInput = pbr.streamInput();
+ BytesRefBuilder target = new BytesRefBuilder();
+ while (target.length() < pbr.length()) {
+ switch (randomIntBetween(0, 10)) {
+ case 6:
+ case 5:
+ target.append(new BytesRef(new byte[]{streamInput.readByte()}));
+ break;
+ case 4:
+ case 3:
+ BytesRef bytesRef = streamInput.readBytesRef(scaledRandomIntBetween(1, pbr.length() - target.length()));
+ target.append(bytesRef);
+ break;
+ default:
+ byte[] buffer = new byte[scaledRandomIntBetween(1, pbr.length() - target.length())];
+ int offset = scaledRandomIntBetween(0, buffer.length - 1);
+ int read = streamInput.read(buffer, offset, buffer.length - offset);
+ target.append(new BytesRef(buffer, offset, read));
+ break;
+ }
+ }
+ assertEquals(pbr.length(), target.length());
+ BytesRef targetBytes = target.get();
+ assertArrayEquals(pbr.toBytes(), Arrays.copyOfRange(targetBytes.bytes, targetBytes.offset, targetBytes.length));
+ }
+
+ public void testSliceStreamInput() throws IOException {
+ int length = randomIntBetween(10, scaledRandomIntBetween(PAGE_SIZE * 2, PAGE_SIZE * 20));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+
+ // test stream input over slice (upper half of original)
+ int sliceOffset = randomIntBetween(1, length / 2);
+ int sliceLength = length - sliceOffset;
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ StreamInput sliceInput = slice.streamInput();
+
+ // single reads
+ assertEquals(slice.get(0), sliceInput.readByte());
+ assertEquals(slice.get(1), sliceInput.readByte());
+ assertEquals(slice.get(2), sliceInput.readByte());
+
+ // reset the slice stream for bulk reading
+ sliceInput.reset();
+
+ // bulk read
+ byte[] sliceBytes = new byte[sliceLength];
+ sliceInput.readFully(sliceBytes);
+
+ // compare slice content with upper half of original
+ byte[] pbrSliceBytes = Arrays.copyOfRange(pbr.toBytes(), sliceOffset, length);
+ assertArrayEquals(pbrSliceBytes, sliceBytes);
+
+ // compare slice bytes with bytes read from slice via streamInput :D
+ byte[] sliceToBytes = slice.toBytes();
+ assertEquals(sliceBytes.length, sliceToBytes.length);
+ assertArrayEquals(sliceBytes, sliceToBytes);
+
+ sliceInput.reset();
+ byte[] buffer = new byte[sliceLength + scaledRandomIntBetween(1, 100)];
+ int offset = scaledRandomIntBetween(0, Math.max(1, buffer.length - sliceLength - 1));
+ int read = sliceInput.read(buffer, offset, sliceLength / 2);
+ sliceInput.read(buffer, offset + read, sliceLength);
+ assertArrayEquals(sliceBytes, Arrays.copyOfRange(buffer, offset, offset + sliceLength));
+ }
+
+ public void testWriteToOutputStream() throws IOException {
+ int length = randomIntBetween(10, PAGE_SIZE * 4);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesStreamOutput out = new BytesStreamOutput();
+ pbr.writeTo(out);
+ assertEquals(pbr.length(), out.size());
+ assertArrayEquals(pbr.toBytes(), out.bytes().toBytes());
+ out.close();
+ }
+
+ public void testWriteToChannel() throws IOException {
+ int length = randomIntBetween(10, PAGE_SIZE * 4);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ Path tFile = createTempFile();
+ try (FileChannel channel = FileChannel.open(tFile, StandardOpenOption.WRITE)) {
+ pbr.writeTo(channel);
+ assertEquals(pbr.length(), channel.position());
+ }
+ assertArrayEquals(pbr.toBytes(), Files.readAllBytes(tFile));
+ }
+
+ public void testSliceWriteToOutputStream() throws IOException {
+ int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(1, length / 2);
+ int sliceLength = length - sliceOffset;
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ BytesStreamOutput sliceOut = new BytesStreamOutput(sliceLength);
+ slice.writeTo(sliceOut);
+ assertEquals(slice.length(), sliceOut.size());
+ assertArrayEquals(slice.toBytes(), sliceOut.bytes().toBytes());
+ sliceOut.close();
+ }
+
+ public void testSliceWriteToChannel() throws IOException {
+ int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 5));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(1, length / 2);
+ int sliceLength = length - sliceOffset;
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ Path tFile = createTempFile();
+ try (FileChannel channel = FileChannel.open(tFile, StandardOpenOption.WRITE)) {
+ slice.writeTo(channel);
+ assertEquals(slice.length(), channel.position());
+ }
+ assertArrayEquals(slice.toBytes(), Files.readAllBytes(tFile));
+ }
+
+ public void testToBytes() {
+ int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
+
+ for (int i = 0; i < sizes.length; i++) {
+ BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]);
+ byte[] bytes = pbr.toBytes();
+ assertEquals(sizes[i], bytes.length);
+ // verify that toBytes() is cheap for small payloads
+ if (sizes[i] <= PAGE_SIZE) {
+ assertSame(bytes, pbr.toBytes());
+ } else {
+ assertNotSame(bytes, pbr.toBytes());
+ }
+ }
+ }
+
+ public void testToBytesArraySharedPage() {
+ int length = randomIntBetween(10, PAGE_SIZE);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesArray ba = pbr.toBytesArray();
+ BytesArray ba2 = pbr.toBytesArray();
+ assertNotNull(ba);
+ assertNotNull(ba2);
+ assertEquals(pbr.length(), ba.length());
+ assertEquals(ba.length(), ba2.length());
+ // single-page optimization
+ assertSame(ba.array(), ba2.array());
+ }
+
+ public void testToBytesArrayMaterializedPages() {
+ // we need a length != (n * pagesize) to avoid page sharing at boundaries
+ int length = 0;
+ while ((length % PAGE_SIZE) == 0) {
+ length = randomIntBetween(PAGE_SIZE, PAGE_SIZE * randomIntBetween(2, 5));
+ }
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesArray ba = pbr.toBytesArray();
+ BytesArray ba2 = pbr.toBytesArray();
+ assertNotNull(ba);
+ assertNotNull(ba2);
+ assertEquals(pbr.length(), ba.length());
+ assertEquals(ba.length(), ba2.length());
+ // ensure no single-page optimization
+ assertNotSame(ba.array(), ba2.array());
+ }
+
+ public void testCopyBytesArray() {
+ // small PBR which would normally share the first page
+ int length = randomIntBetween(10, PAGE_SIZE);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesArray ba = pbr.copyBytesArray();
+ BytesArray ba2 = pbr.copyBytesArray();
+ assertNotNull(ba);
+ assertNotSame(ba, ba2);
+ assertNotSame(ba.array(), ba2.array());
+ }
+
+ public void testSliceCopyBytesArray() {
+ int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(0, pbr.length());
+ int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+
+ BytesArray ba1 = slice.copyBytesArray();
+ BytesArray ba2 = slice.copyBytesArray();
+ assertNotNull(ba1);
+ assertNotNull(ba2);
+ assertNotSame(ba1.array(), ba2.array());
+ assertArrayEquals(slice.toBytes(), ba1.array());
+ assertArrayEquals(slice.toBytes(), ba2.array());
+ assertArrayEquals(ba1.array(), ba2.array());
+ }
+
+ public void testToChannelBuffer() {
+ int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ ChannelBuffer cb = pbr.toChannelBuffer();
+ assertNotNull(cb);
+ byte[] bufferBytes = new byte[length];
+ cb.getBytes(0, bufferBytes);
+ assertArrayEquals(pbr.toBytes(), bufferBytes);
+ }
+
+ public void testEmptyToChannelBuffer() {
+ BytesReference pbr = getRandomizedPagedBytesReference(0);
+ ChannelBuffer cb = pbr.toChannelBuffer();
+ assertNotNull(cb);
+ assertEquals(0, pbr.length());
+ assertEquals(0, cb.capacity());
+ }
+
+ public void testSliceToChannelBuffer() {
+ int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(2, 8));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(0, pbr.length());
+ int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ ChannelBuffer cbSlice = slice.toChannelBuffer();
+ assertNotNull(cbSlice);
+ byte[] sliceBufferBytes = new byte[sliceLength];
+ cbSlice.getBytes(0, sliceBufferBytes);
+ assertArrayEquals(slice.toBytes(), sliceBufferBytes);
+ }
+
+ public void testHasArray() {
+ int length = randomIntBetween(10, PAGE_SIZE * randomIntBetween(1, 3));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ // must return true for <= pagesize
+ assertEquals(length <= PAGE_SIZE, pbr.hasArray());
+ }
+
+ public void testArray() {
+ int[] sizes = {0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5))};
+
+ for (int i = 0; i < sizes.length; i++) {
+ BytesReference pbr = getRandomizedPagedBytesReference(sizes[i]);
+ // verify that array() is cheap for small payloads
+ if (sizes[i] <= PAGE_SIZE) {
+ byte[] array = pbr.array();
+ assertNotNull(array);
+ assertEquals(sizes[i], array.length);
+ assertSame(array, pbr.array());
+ } else {
+ try {
+ pbr.array();
+ fail("expected IllegalStateException");
+ } catch (IllegalStateException isx) {
+ // expected
+ }
+ }
+ }
+ }
+
+ public void testArrayOffset() {
+ int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ if (pbr.hasArray()) {
+ assertEquals(0, pbr.arrayOffset());
+ } else {
+ try {
+ pbr.arrayOffset();
+ fail("expected IllegalStateException");
+ } catch (IllegalStateException ise) {
+ // expected
+ }
+ }
+ }
+
+ public void testSliceArrayOffset() {
+ int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ int sliceOffset = randomIntBetween(0, pbr.length());
+ int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
+ BytesReference slice = pbr.slice(sliceOffset, sliceLength);
+ if (slice.hasArray()) {
+ assertEquals(sliceOffset, slice.arrayOffset());
+ } else {
+ try {
+ slice.arrayOffset();
+ fail("expected IllegalStateException");
+ } catch (IllegalStateException ise) {
+ // expected
+ }
+ }
+ }
+
+ public void testToUtf8() throws IOException {
+ // test empty
+ BytesReference pbr = getRandomizedPagedBytesReference(0);
+ assertEquals("", pbr.toUtf8());
+ // TODO: good way to test?
+ }
+
+ public void testToBytesRef() {
+ int length = randomIntBetween(0, PAGE_SIZE);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesRef ref = pbr.toBytesRef();
+ assertNotNull(ref);
+ assertEquals(pbr.arrayOffset(), ref.offset);
+ assertEquals(pbr.length(), ref.length);
+ }
+
+ public void testSliceToBytesRef() {
+ int length = randomIntBetween(0, PAGE_SIZE);
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ // get a BytesRef from a slice
+ int sliceOffset = randomIntBetween(0, pbr.length());
+ int sliceLength = randomIntBetween(pbr.length() - sliceOffset, pbr.length() - sliceOffset);
+ BytesRef sliceRef = pbr.slice(sliceOffset, sliceLength).toBytesRef();
+ // note that these are only true if we have <= than a page, otherwise offset/length are shifted
+ assertEquals(sliceOffset, sliceRef.offset);
+ assertEquals(sliceLength, sliceRef.length);
+ }
+
+ public void testCopyBytesRef() {
+ int length = randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesRef ref = pbr.copyBytesRef();
+ assertNotNull(ref);
+ assertEquals(pbr.length(), ref.length);
+ }
+
+ public void testHashCode() {
+ // empty content must have hash 1 (JDK compat)
+ BytesReference pbr = getRandomizedPagedBytesReference(0);
+ assertEquals(Arrays.hashCode(BytesRef.EMPTY_BYTES), pbr.hashCode());
+
+ // test with content
+ pbr = getRandomizedPagedBytesReference(randomIntBetween(0, PAGE_SIZE * randomIntBetween(2, 5)));
+ int jdkHash = Arrays.hashCode(pbr.toBytes());
+ int pbrHash = pbr.hashCode();
+ assertEquals(jdkHash, pbrHash);
+
+ // test hashes of slices
+ int sliceFrom = randomIntBetween(0, pbr.length());
+ int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom);
+ BytesReference slice = pbr.slice(sliceFrom, sliceLength);
+ int sliceJdkHash = Arrays.hashCode(slice.toBytes());
+ int sliceHash = slice.hashCode();
+ assertEquals(sliceJdkHash, sliceHash);
+ }
+
+ public void testEquals() {
+ int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
+ ByteArray ba1 = bigarrays.newByteArray(length, false);
+ ByteArray ba2 = bigarrays.newByteArray(length, false);
+
+ // copy contents
+ for (long i = 0; i < length; i++) {
+ ba2.set(i, ba1.get(i));
+ }
+
+ // get refs & compare
+ BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length);
+ BytesReference pbr2 = new PagedBytesReference(bigarrays, ba2, length);
+ assertEquals(pbr, pbr2);
+ }
+
+ public void testEqualsPeerClass() {
+ int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
+ BytesReference pbr = getRandomizedPagedBytesReference(length);
+ BytesReference ba = new BytesArray(pbr.toBytes());
+ assertEquals(pbr, ba);
+ }
+
+ public void testSliceEquals() {
+ int length = randomIntBetween(100, PAGE_SIZE * randomIntBetween(2, 5));
+ ByteArray ba1 = bigarrays.newByteArray(length, false);
+ BytesReference pbr = new PagedBytesReference(bigarrays, ba1, length);
+
+ // test equality of slices
+ int sliceFrom = randomIntBetween(0, pbr.length());
+ int sliceLength = randomIntBetween(pbr.length() - sliceFrom, pbr.length() - sliceFrom);
+ BytesReference slice1 = pbr.slice(sliceFrom, sliceLength);
+ BytesReference slice2 = pbr.slice(sliceFrom, sliceLength);
+ assertArrayEquals(slice1.toBytes(), slice2.toBytes());
+
+ // test a slice with same offset but different length,
+ // unless randomized testing gave us a 0-length slice.
+ if (sliceLength > 0) {
+ BytesReference slice3 = pbr.slice(sliceFrom, sliceLength / 2);
+ assertFalse(Arrays.equals(slice1.toBytes(), slice3.toBytes()));
+ }
+ }
+
+ private BytesReference getRandomizedPagedBytesReference(int length) {
+ // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content
+ ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(length, bigarrays);
+ try {
+ for (int i = 0; i < length; i++) {
+ out.writeByte((byte) getRandom().nextInt(1 << 8));
+ }
+ } catch (IOException e) {
+ fail("should not happen " + e.getMessage());
+ }
+ assertThat(out.size(), Matchers.equalTo(length));
+ BytesReference ref = out.bytes();
+ assertThat(ref.length(), Matchers.equalTo(length));
+ assertThat(ref, Matchers.instanceOf(PagedBytesReference.class));
+ return ref;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java b/core/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java
new file mode 100644
index 0000000000..4914f589c5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/cli/CheckFileCommandTests.java
@@ -0,0 +1,336 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.cli;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Sets;
+import com.google.common.jimfs.Configuration;
+import com.google.common.jimfs.Jimfs;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.FileSystem;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.*;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class CheckFileCommandTests extends ElasticsearchTestCase {
+
+ private CliToolTestCase.CaptureOutputTerminal captureOutputTerminal = new CliToolTestCase.CaptureOutputTerminal();
+
+ private Configuration jimFsConfiguration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build();
+ private Configuration jimFsConfigurationWithoutPermissions = randomBoolean() ? Configuration.unix().toBuilder().setAttributeViews("basic").build() : Configuration.windows();
+
+ private enum Mode {
+ CHANGE, KEEP, DISABLED
+ }
+
+ @Test
+ public void testThatCommandLogsErrorMessageOnFail() throws Exception {
+ executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(containsString("Please ensure that the user account running Elasticsearch has read access to this file")));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingWhenPermissionRemains() throws Exception {
+ executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingWhenDisabled() throws Exception {
+ executeCommand(jimFsConfiguration, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfFilesystemDoesNotSupportPermissions() throws Exception {
+ executeCommand(jimFsConfigurationWithoutPermissions, new PermissionCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsOwnerChange() throws Exception {
+ executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(allOf(containsString("Owner of file ["), containsString("] used to be ["), containsString("], but now is ["))));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfOwnerRemainsSame() throws Exception {
+ executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfOwnerIsDisabled() throws Exception {
+ executeCommand(jimFsConfiguration, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfFileSystemDoesNotSupportOwners() throws Exception {
+ executeCommand(jimFsConfigurationWithoutPermissions, new OwnerCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsIfGroupChanges() throws Exception {
+ executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.CHANGE));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasItem(allOf(containsString("Group of file ["), containsString("] used to be ["), containsString("], but now is ["))));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfGroupRemainsSame() throws Exception {
+ executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.KEEP));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfGroupIsDisabled() throws Exception {
+ executeCommand(jimFsConfiguration, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandLogsNothingIfFileSystemDoesNotSupportGroups() throws Exception {
+ executeCommand(jimFsConfigurationWithoutPermissions, new GroupCheckFileCommand(createTempDir(), captureOutputTerminal, Mode.DISABLED));
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandDoesNotLogAnythingOnFileCreation() throws Exception {
+ Configuration configuration = randomBoolean() ? jimFsConfiguration : jimFsConfigurationWithoutPermissions;
+
+ try (FileSystem fs = Jimfs.newFileSystem(configuration)) {
+ Path path = fs.getPath(randomAsciiOfLength(10));
+ Settings settings = Settings.builder()
+ .put("path.home", createTempDir().toString())
+ .build();
+ new CreateFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings));
+ assertThat(Files.exists(path), is(true));
+ }
+
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ @Test
+ public void testThatCommandWorksIfFileIsDeletedByCommand() throws Exception {
+ Configuration configuration = randomBoolean() ? jimFsConfiguration : jimFsConfigurationWithoutPermissions;
+
+ try (FileSystem fs = Jimfs.newFileSystem(configuration)) {
+ Path path = fs.getPath(randomAsciiOfLength(10));
+ Files.write(path, "anything".getBytes(Charsets.UTF_8));
+
+ Settings settings = Settings.builder()
+ .put("path.home", createTempDir().toString())
+ .build();
+ new DeleteFileCommand(captureOutputTerminal, path).execute(settings, new Environment(settings));
+ assertThat(Files.exists(path), is(false));
+ }
+
+ assertThat(captureOutputTerminal.getTerminalOutput(), hasSize(0));
+ }
+
+ private void executeCommand(Configuration configuration, AbstractTestCheckFileCommand command) throws Exception {
+ try (FileSystem fs = Jimfs.newFileSystem(configuration)) {
+ command.execute(fs);
+ }
+ }
+
+ abstract class AbstractTestCheckFileCommand extends CheckFileCommand {
+
+ protected final Mode mode;
+ protected FileSystem fs;
+ protected Path[] paths;
+ final Path baseDir;
+
+ public AbstractTestCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException {
+ super(terminal);
+ this.mode = mode;
+ this.baseDir = baseDir;
+ }
+
+ public CliTool.ExitStatus execute(FileSystem fs) throws Exception {
+ this.fs = fs;
+ this.paths = new Path[] { writePath(fs, "p1", "anything"), writePath(fs, "p2", "anything"), writePath(fs, "p3", "anything") };
+ Settings settings = Settings.settingsBuilder()
+ .put("path.home", baseDir.toString())
+ .build();
+ return super.execute(Settings.EMPTY, new Environment(settings));
+ }
+
+ private Path writePath(FileSystem fs, String name, String content) throws IOException {
+ Path path = fs.getPath(name);
+ Files.write(path, content.getBytes(Charsets.UTF_8));
+ return path;
+ }
+
+ @Override
+ protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) {
+ return paths;
+ }
+ }
+
+ /**
+ * command that changes permissions from a file if enabled
+ */
+ class PermissionCheckFileCommand extends AbstractTestCheckFileCommand {
+
+ public PermissionCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException {
+ super(baseDir, terminal, mode);
+ }
+
+ @Override
+ public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception {
+ int randomInt = randomInt(paths.length - 1);
+ Path randomPath = paths[randomInt];
+ switch (mode) {
+ case CHANGE:
+ Files.write(randomPath, randomAsciiOfLength(10).getBytes(Charsets.UTF_8));
+ Files.setPosixFilePermissions(randomPath, Sets.newHashSet(PosixFilePermission.OWNER_EXECUTE, PosixFilePermission.OTHERS_EXECUTE, PosixFilePermission.GROUP_EXECUTE));
+ break;
+ case KEEP:
+ Files.write(randomPath, randomAsciiOfLength(10).getBytes(Charsets.UTF_8));
+ Set<PosixFilePermission> posixFilePermissions = Files.getPosixFilePermissions(randomPath);
+ Files.setPosixFilePermissions(randomPath, posixFilePermissions);
+ break;
+ }
+ return CliTool.ExitStatus.OK;
+ }
+
+ }
+
+ /**
+ * command that changes the owner of a file if enabled
+ */
+ class OwnerCheckFileCommand extends AbstractTestCheckFileCommand {
+
+ public OwnerCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException {
+ super(baseDir, terminal, mode);
+ }
+
+ @Override
+ public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception {
+ int randomInt = randomInt(paths.length - 1);
+ Path randomPath = paths[randomInt];
+ switch (mode) {
+ case CHANGE:
+ Files.write(randomPath, randomAsciiOfLength(10).getBytes(Charsets.UTF_8));
+ UserPrincipal randomOwner = fs.getUserPrincipalLookupService().lookupPrincipalByName(randomAsciiOfLength(10));
+ Files.setOwner(randomPath, randomOwner);
+ break;
+ case KEEP:
+ Files.write(randomPath, randomAsciiOfLength(10).getBytes(Charsets.UTF_8));
+ UserPrincipal originalOwner = Files.getOwner(randomPath);
+ Files.setOwner(randomPath, originalOwner);
+ break;
+ }
+
+ return CliTool.ExitStatus.OK;
+ }
+ }
+
+ /**
+ * command that changes the group of a file if enabled
+ */
+ class GroupCheckFileCommand extends AbstractTestCheckFileCommand {
+
+ public GroupCheckFileCommand(Path baseDir, Terminal terminal, Mode mode) throws IOException {
+ super(baseDir, terminal, mode);
+ }
+
+ @Override
+ public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception {
+ int randomInt = randomInt(paths.length - 1);
+ Path randomPath = paths[randomInt];
+ switch (mode) {
+ case CHANGE:
+ Files.write(randomPath, randomAsciiOfLength(10).getBytes(Charsets.UTF_8));
+ GroupPrincipal randomPrincipal = fs.getUserPrincipalLookupService().lookupPrincipalByGroupName(randomAsciiOfLength(10));
+ Files.getFileAttributeView(randomPath, PosixFileAttributeView.class).setGroup(randomPrincipal);
+ break;
+ case KEEP:
+ Files.write(randomPath, randomAsciiOfLength(10).getBytes(Charsets.UTF_8));
+ GroupPrincipal groupPrincipal = Files.readAttributes(randomPath, PosixFileAttributes.class).group();
+ Files.getFileAttributeView(randomPath, PosixFileAttributeView.class).setGroup(groupPrincipal);
+ break;
+ }
+
+ return CliTool.ExitStatus.OK;
+ }
+ }
+
+ /**
+ * A command that creates a non existing file
+ */
+ class CreateFileCommand extends CheckFileCommand {
+
+ private final Path pathToCreate;
+
+ public CreateFileCommand(Terminal terminal, Path pathToCreate) {
+ super(terminal);
+ this.pathToCreate = pathToCreate;
+ }
+
+ @Override
+ public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception {
+ Files.write(pathToCreate, "anything".getBytes(Charsets.UTF_8));
+ return CliTool.ExitStatus.OK;
+ }
+
+ @Override
+ protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception {
+ return new Path[] { pathToCreate };
+ }
+ }
+
+ /**
+ * A command that deletes an existing file
+ */
+ class DeleteFileCommand extends CheckFileCommand {
+
+ private final Path pathToDelete;
+
+ public DeleteFileCommand(Terminal terminal, Path pathToDelete) {
+ super(terminal);
+ this.pathToDelete = pathToDelete;
+ }
+
+ @Override
+ public CliTool.ExitStatus doExecute(Settings settings, Environment env) throws Exception {
+ Files.delete(pathToDelete);
+ return CliTool.ExitStatus.OK;
+ }
+
+ @Override
+ protected Path[] pathsForPermissionsCheck(Settings settings, Environment env) throws Exception {
+ return new Path[] {pathToDelete};
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java b/core/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java
new file mode 100644
index 0000000000..278869388c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/cli/CliToolTestCase.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.cli;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ *
+ */
+@Ignore
+public abstract class CliToolTestCase extends ElasticsearchTestCase {
+
+ @Before
+ public void setPathHome() {
+ System.setProperty("es.default.path.home", createTempDir().toString());
+ }
+
+ @After
+ public void clearPathHome() {
+ System.clearProperty("es.default.path.home");
+ }
+
+ protected static String[] args(String command) {
+ if (!Strings.hasLength(command)) {
+ return Strings.EMPTY_ARRAY;
+ }
+ return command.split("\\s+");
+ }
+
+ /**
+ * A terminal implementation that discards everything
+ */
+ public static class MockTerminal extends Terminal {
+
+ private static final PrintWriter DEV_NULL = new PrintWriter(new DevNullWriter());
+
+ public MockTerminal() {
+ super(Verbosity.NORMAL);
+ }
+
+ public MockTerminal(Verbosity verbosity) {
+ super(verbosity);
+ }
+
+ @Override
+ protected void doPrint(String msg, Object... args) {
+ }
+
+ @Override
+ public String readText(String text, Object... args) {
+ return null;
+ }
+
+ @Override
+ public char[] readSecret(String text, Object... args) {
+ return new char[0];
+ }
+
+ @Override
+ public void print(String msg, Object... args) {
+ }
+
+ @Override
+ public void printStackTrace(Throwable t) {
+ return;
+ }
+
+ @Override
+ public PrintWriter writer() {
+ return DEV_NULL;
+ }
+
+ private static class DevNullWriter extends Writer {
+
+ @Override
+ public void write(char[] cbuf, int off, int len) throws IOException {
+ }
+
+ @Override
+ public void flush() throws IOException {
+ }
+
+ @Override
+ public void close() throws IOException {
+ }
+ }
+ }
+
+ /**
+ * A terminal implementation that captures everything written to it
+ */
+ public static class CaptureOutputTerminal extends MockTerminal {
+
+ List<String> terminalOutput = new ArrayList();
+
+ public CaptureOutputTerminal() {
+ super(Verbosity.NORMAL);
+ }
+
+ public CaptureOutputTerminal(Verbosity verbosity) {
+ super(verbosity);
+ }
+
+ @Override
+ protected void doPrint(String msg, Object... args) {
+ terminalOutput.add(String.format(Locale.ROOT, msg, args));
+ }
+
+ @Override
+ public void print(String msg, Object... args) {
+ doPrint(msg, args);
+ }
+
+ @Override
+ public void printStackTrace(Throwable t) {
+ terminalOutput.add(ExceptionsHelper.stackTrace(t));
+ }
+
+ public List<String> getTerminalOutput() {
+ return terminalOutput;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/cli/CliToolTests.java b/core/src/test/java/org/elasticsearch/common/cli/CliToolTests.java
new file mode 100644
index 0000000000..eea1d0614a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/cli/CliToolTests.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.cli;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.cli.CommandLine;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class CliToolTests extends CliToolTestCase {
+
+ @Test
+ public void testOK() throws Exception {
+ Terminal terminal = new MockTerminal();
+ final AtomicReference<Boolean> executed = new AtomicReference<>(false);
+ final NamedCommand cmd = new NamedCommand("cmd", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) {
+ executed.set(true);
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ int status = tool.execute();
+ assertStatus(status, CliTool.ExitStatus.OK);
+ assertCommandHasBeenExecuted(executed);
+ }
+
+ @Test
+ public void testUsageError() throws Exception {
+ Terminal terminal = new MockTerminal();
+ final AtomicReference<Boolean> executed = new AtomicReference<>(false);
+ final NamedCommand cmd = new NamedCommand("cmd", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) {
+ executed.set(true);
+ return CliTool.ExitStatus.USAGE;
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ int status = tool.execute();
+ assertStatus(status, CliTool.ExitStatus.USAGE);
+ assertCommandHasBeenExecuted(executed);
+ }
+
+ @Test
+ public void testIOError() throws Exception {
+ Terminal terminal = new MockTerminal();
+ final AtomicReference<Boolean> executed = new AtomicReference<>(false);
+ final NamedCommand cmd = new NamedCommand("cmd", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ executed.set(true);
+ throw new IOException("io error");
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ int status = tool.execute();
+ assertStatus(status, CliTool.ExitStatus.IO_ERROR);
+ assertCommandHasBeenExecuted(executed);
+ }
+
+ @Test
+ public void testCodeError() throws Exception {
+ Terminal terminal = new MockTerminal();
+ final AtomicReference<Boolean> executed = new AtomicReference<>(false);
+ final NamedCommand cmd = new NamedCommand("cmd", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ executed.set(true);
+ throw new Exception("random error");
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ int status = tool.execute();
+ assertStatus(status, CliTool.ExitStatus.CODE_ERROR);
+ assertCommandHasBeenExecuted(executed);
+ }
+
+ @Test
+ public void testMultiCommand() {
+ Terminal terminal = new MockTerminal();
+ int count = randomIntBetween(2, 7);
+ final AtomicReference<Boolean>[] executed = new AtomicReference[count];
+ for (int i = 0; i < executed.length; i++) {
+ executed[i] = new AtomicReference<>(false);
+ }
+ NamedCommand[] cmds = new NamedCommand[count];
+ for (int i = 0; i < count; i++) {
+ final int index = i;
+ cmds[i] = new NamedCommand("cmd" + index, terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ executed[index].set(true);
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ }
+ MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds);
+ int cmdIndex = randomIntBetween(0, count-1);
+ int status = tool.execute("cmd" + cmdIndex);
+ assertThat(status, is(CliTool.ExitStatus.OK.status()));
+ for (int i = 0; i < executed.length; i++) {
+ assertThat(executed[i].get(), is(i == cmdIndex));
+ }
+ }
+
+ @Test
+ public void testMultiCommand_UnknownCommand() {
+ Terminal terminal = new MockTerminal();
+ int count = randomIntBetween(2, 7);
+ final AtomicReference<Boolean>[] executed = new AtomicReference[count];
+ for (int i = 0; i < executed.length; i++) {
+ executed[i] = new AtomicReference<>(false);
+ }
+ NamedCommand[] cmds = new NamedCommand[count];
+ for (int i = 0; i < count; i++) {
+ final int index = i;
+ cmds[i] = new NamedCommand("cmd" + index, terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ executed[index].set(true);
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ }
+ MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds);
+ int status = tool.execute("cmd" + count); // "cmd" + count doesn't exist
+ assertThat(status, is(CliTool.ExitStatus.USAGE.status()));
+ for (int i = 0; i < executed.length; i++) {
+ assertThat(executed[i].get(), is(false));
+ }
+ }
+
+ @Test
+ public void testSingleCommand_ToolHelp() throws Exception {
+ CaptureOutputTerminal terminal = new CaptureOutputTerminal();
+ final AtomicReference<Boolean> executed = new AtomicReference<>(false);
+ final NamedCommand cmd = new NamedCommand("cmd1", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ executed.set(true);
+ throw new IOException("io error");
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ int status = tool.execute(args("-h"));
+ assertStatus(status, CliTool.ExitStatus.OK);
+ assertThat(terminal.getTerminalOutput(), hasSize(3));
+ assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help")));
+ }
+
+ @Test
+ public void testMultiCommand_ToolHelp() {
+ CaptureOutputTerminal terminal = new CaptureOutputTerminal();
+ NamedCommand[] cmds = new NamedCommand[2];
+ cmds[0] = new NamedCommand("cmd0", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ cmds[1] = new NamedCommand("cmd1", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds);
+ int status = tool.execute(args("-h"));
+ assertStatus(status, CliTool.ExitStatus.OK);
+ assertThat(terminal.getTerminalOutput(), hasSize(3));
+ assertThat(terminal.getTerminalOutput(), hasItem(containsString("tool help")));
+ }
+
+ @Test
+ public void testMultiCommand_CmdHelp() {
+ CaptureOutputTerminal terminal = new CaptureOutputTerminal();
+ NamedCommand[] cmds = new NamedCommand[2];
+ cmds[0] = new NamedCommand("cmd0", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ cmds[1] = new NamedCommand("cmd1", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ MultiCmdTool tool = new MultiCmdTool("tool", terminal, cmds);
+ int status = tool.execute(args("cmd1 -h"));
+ assertStatus(status, CliTool.ExitStatus.OK);
+ assertThat(terminal.getTerminalOutput(), hasSize(3));
+ assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help")));
+ }
+
+ @Test
+ public void testThatThrowExceptionCanBeLogged() throws Exception {
+ CaptureOutputTerminal terminal = new CaptureOutputTerminal();
+ NamedCommand cmd = new NamedCommand("cmd", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
+ throw new ElasticsearchException("error message");
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ assertStatus(tool.execute(), CliTool.ExitStatus.CODE_ERROR);
+ assertThat(terminal.getTerminalOutput(), hasSize(1));
+ assertThat(terminal.getTerminalOutput(), hasItem(containsString("error message")));
+
+ // set env... and log stack trace
+ try {
+ System.setProperty(Terminal.DEBUG_SYSTEM_PROPERTY, "true");
+ terminal = new CaptureOutputTerminal();
+ assertStatus(new SingleCmdTool("tool", terminal, cmd).execute(), CliTool.ExitStatus.CODE_ERROR);
+ assertThat(terminal.getTerminalOutput(), hasSize(2));
+ assertThat(terminal.getTerminalOutput(), hasItem(containsString("error message")));
+ // This class must be part of the stack strace
+ assertThat(terminal.getTerminalOutput(), hasItem(containsString(getClass().getName())));
+ } finally {
+ System.clearProperty(Terminal.DEBUG_SYSTEM_PROPERTY);
+ }
+ }
+
+ @Test
+ public void testMultipleLaunch() throws Exception {
+ Terminal terminal = new MockTerminal();
+ final AtomicReference<Boolean> executed = new AtomicReference<>(false);
+ final NamedCommand cmd = new NamedCommand("cmd", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) {
+ executed.set(true);
+ return CliTool.ExitStatus.OK;
+ }
+ };
+ SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
+ tool.parse("cmd", Strings.splitStringByCommaToArray("--verbose"));
+ tool.parse("cmd", Strings.splitStringByCommaToArray("--silent"));
+ tool.parse("cmd", Strings.splitStringByCommaToArray("--help"));
+ }
+
+ @Test
+ public void testPromptForSetting() throws Exception {
+ final AtomicInteger counter = new AtomicInteger();
+ final AtomicReference<String> promptedSecretValue = new AtomicReference<>(null);
+ final AtomicReference<String> promptedTextValue = new AtomicReference<>(null);
+ final Terminal terminal = new MockTerminal() {
+ @Override
+ public char[] readSecret(String text, Object... args) {
+ counter.incrementAndGet();
+ assertThat(args, arrayContaining((Object) "foo.password"));
+ return "changeit".toCharArray();
+ }
+
+ @Override
+ public String readText(String text, Object... args) {
+ counter.incrementAndGet();
+ assertThat(args, arrayContaining((Object) "replace"));
+ return "replaced";
+ }
+ };
+ final NamedCommand cmd = new NamedCommand("noop", terminal) {
+ @Override
+ public CliTool.ExitStatus execute(Settings settings, Environment env) {
+ promptedSecretValue.set(settings.get("foo.password"));
+ promptedTextValue.set(settings.get("replace"));
+ return CliTool.ExitStatus.OK;
+ }
+ };
+
+ System.setProperty("es.foo.password", InternalSettingsPreparer.SECRET_PROMPT_VALUE);
+ System.setProperty("es.replace", InternalSettingsPreparer.TEXT_PROMPT_VALUE);
+ try {
+ new SingleCmdTool("tool", terminal, cmd).execute();
+ } finally {
+ System.clearProperty("es.foo.password");
+ System.clearProperty("es.replace");
+ }
+
+ assertThat(counter.intValue(), is(2));
+ assertThat(promptedSecretValue.get(), is("changeit"));
+ assertThat(promptedTextValue.get(), is("replaced"));
+ }
+
+ private void assertStatus(int status, CliTool.ExitStatus expectedStatus) {
+ assertThat(status, is(expectedStatus.status()));
+ }
+
+ private void assertCommandHasBeenExecuted(AtomicReference<Boolean> executed) {
+ assertThat("Expected command atomic reference counter to be set to true", executed.get(), is(Boolean.TRUE));
+ }
+
+ private static class SingleCmdTool extends CliTool {
+
+ private final Command command;
+
+ private SingleCmdTool(String name, Terminal terminal, NamedCommand command) {
+ super(CliToolConfig.config(name, SingleCmdTool.class)
+ .cmds(cmd(command.name, command.getClass()))
+ .build(), terminal);
+ this.command = command;
+ }
+
+ @Override
+ protected Command parse(String cmdName, CommandLine cli) throws Exception {
+ return command;
+ }
+ }
+
+ private static class MultiCmdTool extends CliTool {
+
+ private final Map<String, Command> commands;
+
+ private MultiCmdTool(String name, Terminal terminal, NamedCommand... commands) {
+ super(CliToolConfig.config(name, MultiCmdTool.class)
+ .cmds(cmds(commands))
+ .build(), terminal);
+ ImmutableMap.Builder<String, Command> commandByName = ImmutableMap.builder();
+ for (int i = 0; i < commands.length; i++) {
+ commandByName.put(commands[i].name, commands[i]);
+ }
+ this.commands = commandByName.build();
+ }
+
+ @Override
+ protected Command parse(String cmdName, CommandLine cli) throws Exception {
+ return commands.get(cmdName);
+ }
+
+ private static CliToolConfig.Cmd[] cmds(NamedCommand... commands) {
+ CliToolConfig.Cmd[] cmds = new CliToolConfig.Cmd[commands.length];
+ for (int i = 0; i < commands.length; i++) {
+ cmds[i] = cmd(commands[i].name, commands[i].getClass()).build();
+ }
+ return cmds;
+ }
+ }
+
+ private static abstract class NamedCommand extends CliTool.Command {
+
+ private final String name;
+
+ private NamedCommand(String name, Terminal terminal) {
+ super(terminal);
+ this.name = name;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java b/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java
new file mode 100644
index 0000000000..da0347790b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/cli/TerminalTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.cli;
+
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class TerminalTests extends CliToolTestCase {
+
+ @Test
+ public void testVerbosity() throws Exception {
+ CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.SILENT);
+ assertPrinted(terminal, Terminal.Verbosity.SILENT, "text");
+ assertNotPrinted(terminal, Terminal.Verbosity.NORMAL, "text");
+ assertNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
+
+ terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
+ assertPrinted(terminal, Terminal.Verbosity.SILENT, "text");
+ assertPrinted(terminal, Terminal.Verbosity.NORMAL, "text");
+ assertNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
+
+ terminal = new CaptureOutputTerminal(Terminal.Verbosity.VERBOSE);
+ assertPrinted(terminal, Terminal.Verbosity.SILENT, "text");
+ assertPrinted(terminal, Terminal.Verbosity.NORMAL, "text");
+ assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
+ }
+
+ private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {
+ logTerminal.print(verbosity, text);
+ assertThat(logTerminal.getTerminalOutput(), hasSize(1));
+ assertThat(logTerminal.getTerminalOutput(), hasItem(is("text")));
+ logTerminal.terminalOutput.clear();
+ }
+
+ private void assertNotPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {
+ logTerminal.print(verbosity, text);
+ assertThat(logTerminal.getTerminalOutput(), hasSize(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java b/core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java
new file mode 100644
index 0000000000..f86f4cf855
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableMap;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class CopyOnWriteHashMapTests extends ElasticsearchTestCase {
+
+ private static class O {
+
+ private final int value, hashCode;
+
+ O(int value, int hashCode) {
+ super();
+ this.value = value;
+ this.hashCode = hashCode;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || !(obj instanceof O)) {
+ return false;
+ }
+ return value == ((O) obj).value;
+ }
+ }
+
+ public void testDuel() {
+ final int iters = scaledRandomIntBetween(2, 5);
+ for (int iter = 0; iter < iters; ++iter) {
+ final int valueBits = randomIntBetween(1, 30);
+ final int hashBits = randomInt(valueBits);
+ // we compute the total number of ops based on the bits of the hash
+ // since the test is much heavier when few bits are used for the hash
+ final int numOps = randomInt(10 + hashBits * 100);
+
+ Map<O, Integer> ref = new HashMap<>();
+ CopyOnWriteHashMap<O, Integer> map = new CopyOnWriteHashMap<>();
+ assertEquals(ref, map);
+ final int hashBase = randomInt();
+ for (int i = 0; i < numOps; ++i) {
+ final int v = randomInt(1 << valueBits);
+ final int h = (v & ((1 << hashBits) - 1)) ^ hashBase;
+ O key = new O(v, h);
+
+ Map<O, Integer> newRef = new HashMap<>(ref);
+ final CopyOnWriteHashMap<O, Integer> newMap;
+
+ if (randomBoolean()) {
+ // ADD
+ Integer value = v;
+ newRef.put(key, value);
+ newMap = map.copyAndPut(key, value);
+ } else {
+ // REMOVE
+ final Integer removed = newRef.remove(key);
+ newMap = map.copyAndRemove(key);
+ if (removed == null) {
+ assertSame(map, newMap);
+ }
+ }
+
+ assertEquals(ref, map); // make sure that the old copy has not been modified
+ assertEquals(newRef, newMap);
+ assertEquals(newMap, newRef);
+
+ ref = newRef;
+ map = newMap;
+ }
+ assertEquals(ref, CopyOnWriteHashMap.copyOf(ref));
+ assertEquals(ImmutableMap.of(), CopyOnWriteHashMap.copyOf(ref).copyAndRemoveAll(ref.keySet()));
+ }
+ }
+
+ public void testCollision() {
+ CopyOnWriteHashMap<O, Integer> map = new CopyOnWriteHashMap<>();
+ map = map.copyAndPut(new O(3, 0), 2);
+ assertEquals((Integer) 2, map.get(new O(3, 0)));
+ assertNull(map.get(new O(5, 0)));
+
+ map = map.copyAndPut(new O(5, 0), 5);
+ assertEquals((Integer) 2, map.get(new O(3, 0)));
+ assertEquals((Integer) 5, map.get(new O(5, 0)));
+
+ map = map.copyAndRemove(new O(3, 0));
+ assertNull(map.get(new O(3, 0)));
+ assertEquals((Integer) 5, map.get(new O(5, 0)));
+
+ map = map.copyAndRemove(new O(5, 0));
+ assertNull(map.get(new O(3, 0)));
+ assertNull(map.get(new O(5, 0)));
+ }
+
+ public void testUnsupportedAPIs() {
+ try {
+ new CopyOnWriteHashMap<>().put("a", "b");
+ fail();
+ } catch (UnsupportedOperationException e) {
+ // expected
+ }
+
+ try {
+ new CopyOnWriteHashMap<>().copyAndPut("a", "b").remove("a");
+ fail();
+ } catch (UnsupportedOperationException e) {
+ // expected
+ }
+ }
+
+ public void testUnsupportedValues() {
+ try {
+ new CopyOnWriteHashMap<>().copyAndPut("a", null);
+ fail();
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+
+ try {
+ new CopyOnWriteHashMap<>().copyAndPut(null, "b");
+ fail();
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java b/core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java
new file mode 100644
index 0000000000..f489c47bc4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.collect;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.HashSet;
+import java.util.Set;
+
+public class CopyOnWriteHashSetTests extends ElasticsearchTestCase {
+
+ private static class O {
+
+ private final int value, hashCode;
+
+ O(int value, int hashCode) {
+ super();
+ this.value = value;
+ this.hashCode = hashCode;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null || !(obj instanceof O)) {
+ return false;
+ }
+ return value == ((O) obj).value;
+ }
+ }
+
+ public void testDuel() {
+ final int iters = scaledRandomIntBetween(2, 5);
+ for (int iter = 0; iter < iters; ++iter) {
+ final int valueBits = randomIntBetween(1, 30);
+ final int hashBits = randomInt(valueBits);
+ // we compute the total number of ops based on the bits of the hash
+ // since the test is much heavier when few bits are used for the hash
+ final int numOps = randomInt(10 + hashBits * 100);
+
+ Set<O> ref = new HashSet<>();
+ CopyOnWriteHashSet<O> set = new CopyOnWriteHashSet<>();
+ assertEquals(ref, set);
+ final int hashBase = randomInt();
+ for (int i = 0; i < numOps; ++i) {
+ final int v = randomInt(1 << valueBits);
+ final int h = (v & ((1 << hashBits) - 1)) ^ hashBase;
+ O key = new O(v, h);
+
+ Set<O> newRef = new HashSet<>(ref);
+ final CopyOnWriteHashSet<O> newSet;
+
+ if (randomBoolean()) {
+ // ADD
+ newRef.add(key);
+ newSet = set.copyAndAdd(key);
+ } else {
+ // REMOVE
+ final boolean modified = newRef.remove(key);
+ newSet = set.copyAndRemove(key);
+ if (!modified) {
+ assertSame(set, newSet);
+ }
+ }
+
+ assertEquals(ref, set); // make sure that the old copy has not been modified
+ assertEquals(newRef, newSet);
+ assertEquals(newSet, newRef);
+ assertEquals(ref.isEmpty(), set.isEmpty());
+ assertEquals(newRef.isEmpty(), newSet.isEmpty());
+
+ ref = newRef;
+ set = newSet;
+ }
+ assertEquals(ref, CopyOnWriteHashSet.copyOf(ref));
+ assertEquals(ImmutableSet.of(), CopyOnWriteHashSet.copyOf(ref).copyAndRemoveAll(ref));
+ }
+ }
+
+ public void testUnsupportedAPIs() {
+ try {
+ new CopyOnWriteHashSet<>().add("a");
+ fail();
+ } catch (UnsupportedOperationException e) {
+ // expected
+ }
+
+ try {
+ new CopyOnWriteHashSet<>().copyAndAdd("a").remove("a");
+ fail();
+ } catch (UnsupportedOperationException e) {
+ // expected
+ }
+ }
+
+ public void testUnsupportedValues() {
+ try {
+ new CopyOnWriteHashSet<>().copyAndAdd(null);
+ fail();
+ } catch (IllegalArgumentException e) {
+ // expected
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTests.java b/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTests.java
new file mode 100644
index 0000000000..e9404f4f20
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedStreamTests.java
@@ -0,0 +1,435 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * Test streaming compression (e.g. used for recovery)
+ */
+public abstract class AbstractCompressedStreamTests extends ElasticsearchTestCase {
+
+ private final Compressor compressor;
+
+ protected AbstractCompressedStreamTests(Compressor compressor) {
+ this.compressor = compressor;
+ }
+
+ public void testRandom() throws IOException {
+ Random r = getRandom();
+ for (int i = 0; i < 10; i++) {
+ byte bytes[] = new byte[TestUtil.nextInt(r, 1, 400000)];
+ r.nextBytes(bytes);
+ doTest(bytes);
+ }
+ }
+
+ public void testRandomThreads() throws Exception {
+ final Random r = getRandom();
+ int threadCount = TestUtil.nextInt(r, 2, 10);
+ Thread[] threads = new Thread[threadCount];
+ final CountDownLatch startingGun = new CountDownLatch(1);
+ for (int tid=0; tid < threadCount; tid++) {
+ final long seed = r.nextLong();
+ threads[tid] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Random r = new Random(seed);
+ startingGun.await();
+ for (int i = 0; i < 10; i++) {
+ byte bytes[] = new byte[TestUtil.nextInt(r, 1, 100000)];
+ r.nextBytes(bytes);
+ doTest(bytes);
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads[tid].start();
+ }
+ startingGun.countDown();
+ for (Thread t : threads) {
+ t.join();
+ }
+ }
+
+ public void testLineDocs() throws IOException {
+ Random r = getRandom();
+ LineFileDocs lineFileDocs = new LineFileDocs(r);
+ for (int i = 0; i < 10; i++) {
+ int numDocs = TestUtil.nextInt(r, 1, 200);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ for (int j = 0; j < numDocs; j++) {
+ String s = lineFileDocs.nextDoc().get("body");
+ bos.write(s.getBytes(StandardCharsets.UTF_8));
+ }
+ doTest(bos.toByteArray());
+ }
+ lineFileDocs.close();
+ }
+
+ public void testLineDocsThreads() throws Exception {
+ final Random r = getRandom();
+ int threadCount = TestUtil.nextInt(r, 2, 10);
+ Thread[] threads = new Thread[threadCount];
+ final CountDownLatch startingGun = new CountDownLatch(1);
+ for (int tid=0; tid < threadCount; tid++) {
+ final long seed = r.nextLong();
+ threads[tid] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Random r = new Random(seed);
+ startingGun.await();
+ LineFileDocs lineFileDocs = new LineFileDocs(r);
+ for (int i = 0; i < 10; i++) {
+ int numDocs = TestUtil.nextInt(r, 1, 200);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ for (int j = 0; j < numDocs; j++) {
+ String s = lineFileDocs.nextDoc().get("body");
+ bos.write(s.getBytes(StandardCharsets.UTF_8));
+ }
+ doTest(bos.toByteArray());
+ }
+ lineFileDocs.close();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads[tid].start();
+ }
+ startingGun.countDown();
+ for (Thread t : threads) {
+ t.join();
+ }
+ }
+
+ public void testRepetitionsL() throws IOException {
+ Random r = getRandom();
+ for (int i = 0; i < 10; i++) {
+ int numLongs = TestUtil.nextInt(r, 1, 10000);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ long theValue = r.nextLong();
+ for (int j = 0; j < numLongs; j++) {
+ if (r.nextInt(10) == 0) {
+ theValue = r.nextLong();
+ }
+ bos.write((byte) (theValue >>> 56));
+ bos.write((byte) (theValue >>> 48));
+ bos.write((byte) (theValue >>> 40));
+ bos.write((byte) (theValue >>> 32));
+ bos.write((byte) (theValue >>> 24));
+ bos.write((byte) (theValue >>> 16));
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+ doTest(bos.toByteArray());
+ }
+ }
+
+ public void testRepetitionsLThreads() throws Exception {
+ final Random r = getRandom();
+ int threadCount = TestUtil.nextInt(r, 2, 10);
+ Thread[] threads = new Thread[threadCount];
+ final CountDownLatch startingGun = new CountDownLatch(1);
+ for (int tid=0; tid < threadCount; tid++) {
+ final long seed = r.nextLong();
+ threads[tid] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Random r = new Random(seed);
+ startingGun.await();
+ for (int i = 0; i < 10; i++) {
+ int numLongs = TestUtil.nextInt(r, 1, 10000);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ long theValue = r.nextLong();
+ for (int j = 0; j < numLongs; j++) {
+ if (r.nextInt(10) == 0) {
+ theValue = r.nextLong();
+ }
+ bos.write((byte) (theValue >>> 56));
+ bos.write((byte) (theValue >>> 48));
+ bos.write((byte) (theValue >>> 40));
+ bos.write((byte) (theValue >>> 32));
+ bos.write((byte) (theValue >>> 24));
+ bos.write((byte) (theValue >>> 16));
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+ doTest(bos.toByteArray());
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads[tid].start();
+ }
+ startingGun.countDown();
+ for (Thread t : threads) {
+ t.join();
+ }
+ }
+
+ public void testRepetitionsI() throws IOException {
+ Random r = getRandom();
+ for (int i = 0; i < 10; i++) {
+ int numInts = TestUtil.nextInt(r, 1, 20000);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ int theValue = r.nextInt();
+ for (int j = 0; j < numInts; j++) {
+ if (r.nextInt(10) == 0) {
+ theValue = r.nextInt();
+ }
+ bos.write((byte) (theValue >>> 24));
+ bos.write((byte) (theValue >>> 16));
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+ doTest(bos.toByteArray());
+ }
+ }
+
+ public void testRepetitionsIThreads() throws Exception {
+ final Random r = getRandom();
+ int threadCount = TestUtil.nextInt(r, 2, 10);
+ Thread[] threads = new Thread[threadCount];
+ final CountDownLatch startingGun = new CountDownLatch(1);
+ for (int tid=0; tid < threadCount; tid++) {
+ final long seed = r.nextLong();
+ threads[tid] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Random r = new Random(seed);
+ startingGun.await();
+ for (int i = 0; i < 10; i++) {
+ int numInts = TestUtil.nextInt(r, 1, 20000);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ int theValue = r.nextInt();
+ for (int j = 0; j < numInts; j++) {
+ if (r.nextInt(10) == 0) {
+ theValue = r.nextInt();
+ }
+ bos.write((byte) (theValue >>> 24));
+ bos.write((byte) (theValue >>> 16));
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+ doTest(bos.toByteArray());
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads[tid].start();
+ }
+ startingGun.countDown();
+ for (Thread t : threads) {
+ t.join();
+ }
+ }
+
+ public void testRepetitionsS() throws IOException {
+ Random r = getRandom();
+ for (int i = 0; i < 10; i++) {
+ int numShorts = TestUtil.nextInt(r, 1, 40000);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ short theValue = (short) r.nextInt(65535);
+ for (int j = 0; j < numShorts; j++) {
+ if (r.nextInt(10) == 0) {
+ theValue = (short) r.nextInt(65535);
+ }
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+ doTest(bos.toByteArray());
+ }
+ }
+
+ public void testMixed() throws IOException {
+ Random r = getRandom();
+ LineFileDocs lineFileDocs = new LineFileDocs(r);
+ for (int i = 0; i < 2; ++i) {
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ int prevInt = r.nextInt();
+ long prevLong = r.nextLong();
+ while (bos.size() < 400000) {
+ switch (r.nextInt(4)) {
+ case 0:
+ addInt(r, prevInt, bos);
+ break;
+ case 1:
+ addLong(r, prevLong, bos);
+ break;
+ case 2:
+ addString(lineFileDocs, bos);
+ break;
+ case 3:
+ addBytes(r, bos);
+ break;
+ default:
+ throw new IllegalStateException("Random is broken");
+ }
+ }
+ doTest(bos.toByteArray());
+ }
+ }
+
+ private void addLong(Random r, long prev, ByteArrayOutputStream bos) {
+ long theValue = prev;
+ if (r.nextInt(10) != 0) {
+ theValue = r.nextLong();
+ }
+ bos.write((byte) (theValue >>> 56));
+ bos.write((byte) (theValue >>> 48));
+ bos.write((byte) (theValue >>> 40));
+ bos.write((byte) (theValue >>> 32));
+ bos.write((byte) (theValue >>> 24));
+ bos.write((byte) (theValue >>> 16));
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+
+ private void addInt(Random r, int prev, ByteArrayOutputStream bos) {
+ int theValue = prev;
+ if (r.nextInt(10) != 0) {
+ theValue = r.nextInt();
+ }
+ bos.write((byte) (theValue >>> 24));
+ bos.write((byte) (theValue >>> 16));
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+
+ private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException {
+ String s = lineFileDocs.nextDoc().get("body");
+ bos.write(s.getBytes(StandardCharsets.UTF_8));
+ }
+
+ private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException {
+ byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)];
+ r.nextBytes(bytes);
+ bos.write(bytes);
+ }
+
+ public void testRepetitionsSThreads() throws Exception {
+ final Random r = getRandom();
+ int threadCount = TestUtil.nextInt(r, 2, 10);
+ Thread[] threads = new Thread[threadCount];
+ final CountDownLatch startingGun = new CountDownLatch(1);
+ for (int tid=0; tid < threadCount; tid++) {
+ final long seed = r.nextLong();
+ threads[tid] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Random r = new Random(seed);
+ startingGun.await();
+ for (int i = 0; i < 10; i++) {
+ int numShorts = TestUtil.nextInt(r, 1, 40000);
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ short theValue = (short) r.nextInt(65535);
+ for (int j = 0; j < numShorts; j++) {
+ if (r.nextInt(10) == 0) {
+ theValue = (short) r.nextInt(65535);
+ }
+ bos.write((byte) (theValue >>> 8));
+ bos.write((byte) theValue);
+ }
+ doTest(bos.toByteArray());
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads[tid].start();
+ }
+ startingGun.countDown();
+ for (Thread t : threads) {
+ t.join();
+ }
+ }
+
+ private void doTest(byte bytes[]) throws IOException {
+ ByteBuffer bb = ByteBuffer.wrap(bytes);
+ StreamInput rawIn = new ByteBufferStreamInput(bb);
+ Compressor c = compressor;
+
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ OutputStreamStreamOutput rawOs = new OutputStreamStreamOutput(bos);
+ StreamOutput os = c.streamOutput(rawOs);
+
+ Random r = getRandom();
+ int bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(getRandom(), 1, 70000);
+ int prepadding = r.nextInt(70000);
+ int postpadding = r.nextInt(70000);
+ byte buffer[] = new byte[prepadding + bufferSize + postpadding];
+ r.nextBytes(buffer); // fill block completely with junk
+ int len;
+ while ((len = rawIn.read(buffer, prepadding, bufferSize)) != -1) {
+ os.write(buffer, prepadding, len);
+ }
+ os.close();
+ rawIn.close();
+
+ // now we have compressed byte array
+
+ byte compressed[] = bos.toByteArray();
+ ByteBuffer bb2 = ByteBuffer.wrap(compressed);
+ StreamInput compressedIn = new ByteBufferStreamInput(bb2);
+ StreamInput in = c.streamInput(compressedIn);
+
+ // randomize constants again
+ bufferSize = r.nextBoolean() ? 65535 : TestUtil.nextInt(getRandom(), 1, 70000);
+ prepadding = r.nextInt(70000);
+ postpadding = r.nextInt(70000);
+ buffer = new byte[prepadding + bufferSize + postpadding];
+ r.nextBytes(buffer); // fill block completely with junk
+
+ ByteArrayOutputStream uncompressedOut = new ByteArrayOutputStream();
+ while ((len = in.read(buffer, prepadding, bufferSize)) != -1) {
+ uncompressedOut.write(buffer, prepadding, len);
+ }
+ uncompressedOut.close();
+
+ assertArrayEquals(bytes, uncompressedOut.toByteArray());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTests.java b/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTests.java
new file mode 100644
index 0000000000..ad424b7e33
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/AbstractCompressedXContentTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress;
+
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+/**
+ *
+ */
+public abstract class AbstractCompressedXContentTests extends ElasticsearchTestCase {
+
+ private final Compressor compressor;
+
+ protected AbstractCompressedXContentTests(Compressor compressor) {
+ this.compressor = compressor;
+ }
+
+ private void assertEquals(CompressedXContent s1, CompressedXContent s2) {
+ Assert.assertEquals(s1, s2);
+ assertArrayEquals(s1.uncompressed(), s2.uncompressed());
+ assertEquals(s1.hashCode(), s2.hashCode());
+ }
+
+ public void simpleTests() throws IOException {
+ Compressor defaultCompressor = CompressorFactory.defaultCompressor();
+ try {
+ CompressorFactory.setDefaultCompressor(compressor);
+ String str = "---\nf:this is a simple string";
+ CompressedXContent cstr = new CompressedXContent(str);
+ assertThat(cstr.string(), equalTo(str));
+ assertThat(new CompressedXContent(str), equalTo(cstr));
+
+ String str2 = "---\nf:this is a simple string 2";
+ CompressedXContent cstr2 = new CompressedXContent(str2);
+ assertThat(cstr2.string(), not(equalTo(str)));
+ assertThat(new CompressedXContent(str2), not(equalTo(cstr)));
+ assertEquals(new CompressedXContent(str2), cstr2);
+ } finally {
+ CompressorFactory.setDefaultCompressor(defaultCompressor);
+ }
+ }
+
+ public void testRandom() throws IOException {
+ Compressor defaultCompressor = CompressorFactory.defaultCompressor();
+ try {
+ CompressorFactory.setDefaultCompressor(compressor);
+ Random r = getRandom();
+ for (int i = 0; i < 1000; i++) {
+ String string = TestUtil.randomUnicodeString(r, 10000);
+ // hack to make it detected as YAML
+ string = "---\n" + string;
+ CompressedXContent compressedXContent = new CompressedXContent(string);
+ assertThat(compressedXContent.string(), equalTo(string));
+ }
+ } finally {
+ CompressorFactory.setDefaultCompressor(defaultCompressor);
+ }
+ }
+
+ public void testDifferentCompressedRepresentation() throws Exception {
+ byte[] b = "---\nf:abcdefghijabcdefghij".getBytes("UTF-8");
+ BytesStreamOutput bout = new BytesStreamOutput();
+ StreamOutput out = compressor.streamOutput(bout);
+ out.writeBytes(b);
+ out.flush();
+ out.writeBytes(b);
+ out.close();
+ final BytesReference b1 = bout.bytes();
+
+ bout = new BytesStreamOutput();
+ out = compressor.streamOutput(bout);
+ out.writeBytes(b);
+ out.writeBytes(b);
+ out.close();
+ final BytesReference b2 = bout.bytes();
+
+ // because of the intermediate flush, the two compressed representations
+ // are different. It can also happen for other reasons like if hash tables
+ // of different size are being used
+ assertFalse(b1.equals(b2));
+ // we used the compressed representation directly and did not recompress
+ assertArrayEquals(b1.toBytes(), new CompressedXContent(b1).compressed());
+ assertArrayEquals(b2.toBytes(), new CompressedXContent(b2).compressed());
+ // but compressedstring instances are still equal
+ assertEquals(new CompressedXContent(b1), new CompressedXContent(b2));
+ }
+
+ public void testHashCode() throws IOException {
+ assertFalse(new CompressedXContent("{\"a\":\"b\"}").hashCode() == new CompressedXContent("{\"a\":\"c\"}").hashCode());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java b/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java
new file mode 100644
index 0000000000..6607274dfc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateCompressedStreamTests.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.deflate;
+
+import org.elasticsearch.common.compress.AbstractCompressedStreamTests;
+
+public class DeflateCompressedStreamTests extends AbstractCompressedStreamTests {
+
+ public DeflateCompressedStreamTests() {
+ super(new DeflateCompressor());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java b/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java
new file mode 100644
index 0000000000..8b103c9799
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/deflate/DeflateXContentTests.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.deflate;
+
+import org.elasticsearch.common.compress.AbstractCompressedXContentTests;
+
+public class DeflateXContentTests extends AbstractCompressedXContentTests {
+
+ public DeflateXContentTests() {
+ super(new DeflateCompressor());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java
new file mode 100644
index 0000000000..3cf0bcd5cf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/lzf/CompressedStreamOutput.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+/**
+ */
+public abstract class CompressedStreamOutput extends StreamOutput {
+
+ private final StreamOutput out;
+
+ protected byte[] uncompressed;
+ protected int uncompressedLength;
+ private int position = 0;
+
+ private boolean closed;
+
+ public CompressedStreamOutput(StreamOutput out) throws IOException {
+ this.out = out;
+ super.setVersion(out.getVersion());
+ writeHeader(out);
+ }
+
+ @Override
+ public StreamOutput setVersion(Version version) {
+ out.setVersion(version);
+ return super.setVersion(version);
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ if (position >= uncompressedLength) {
+ flushBuffer();
+ }
+ uncompressed[position++] = (byte) b;
+ }
+
+ @Override
+ public void writeByte(byte b) throws IOException {
+ if (position >= uncompressedLength) {
+ flushBuffer();
+ }
+ uncompressed[position++] = b;
+ }
+
+ @Override
+ public void writeBytes(byte[] input, int offset, int length) throws IOException {
+ // ES, check if length is 0, and don't write in this case
+ if (length == 0) {
+ return;
+ }
+ final int BUFFER_LEN = uncompressedLength;
+
+ // simple case first: buffering only (for trivially short writes)
+ int free = BUFFER_LEN - position;
+ if (free >= length) {
+ System.arraycopy(input, offset, uncompressed, position, length);
+ position += length;
+ return;
+ }
+ // fill partial input as much as possible and flush
+ if (position > 0) {
+ System.arraycopy(input, offset, uncompressed, position, free);
+ position += free;
+ flushBuffer();
+ offset += free;
+ length -= free;
+ }
+
+ // then write intermediate full block, if any, without copying:
+ while (length >= BUFFER_LEN) {
+ compress(input, offset, BUFFER_LEN, out);
+ offset += BUFFER_LEN;
+ length -= BUFFER_LEN;
+ }
+
+ // and finally, copy leftovers in input, if any
+ if (length > 0) {
+ System.arraycopy(input, offset, uncompressed, 0, length);
+ }
+ position = length;
+ }
+
+ @Override
+ public void flush() throws IOException {
+ flushBuffer();
+ out.flush();
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (!closed) {
+ flushBuffer();
+ closed = true;
+ doClose();
+ out.close();
+ }
+ }
+
+ protected abstract void doClose() throws IOException;
+
+ @Override
+ public void reset() throws IOException {
+ position = 0;
+ out.reset();
+ }
+
+ private void flushBuffer() throws IOException {
+ if (position > 0) {
+ compress(uncompressed, 0, position, out);
+ position = 0;
+ }
+ }
+
+ protected abstract void writeHeader(StreamOutput out) throws IOException;
+
+ /**
+ * Compresses the data into the output
+ */
+ protected abstract void compress(byte[] data, int offset, int len, StreamOutput out) throws IOException;
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java
new file mode 100644
index 0000000000..720a0331ee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/lzf/CorruptedCompressorTests.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import com.ning.compress.lzf.ChunkDecoder;
+import com.ning.compress.lzf.ChunkEncoder;
+import com.ning.compress.lzf.LZFChunk;
+import com.ning.compress.lzf.util.ChunkDecoderFactory;
+import com.ning.compress.lzf.util.ChunkEncoderFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+
+/**
+ * Test an extremely rare corruption produced by the pure java impl of ChunkEncoder.
+ */
+public class CorruptedCompressorTests extends ElasticsearchTestCase {
+
+ public void testCorruption() throws IOException {
+ // this test generates a hash collision: [0,1,153,64] hashes the same as [1,153,64,64]
+ // and then leverages the bug s/inPos/0/ to corrupt the array
+ // the first array is used to insert a reference from this hash to offset 6
+ // and then the hash table is reused and still thinks that there is such a hash at position 6
+ // and at position 7, it finds a sequence with the same hash
+ // so it inserts a buggy reference
+ byte[] b1 = new byte[] {0,1,2,3,4,(byte)153,64,64,64,9,9,9,9,9,9,9,9,9,9};
+ byte[] b2 = new byte[] {1,(byte)153,0,0,0,0,(byte)153,64,64,64,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ ChunkEncoder encoder = ChunkEncoderFactory.safeInstance();
+ ChunkDecoder decoder = ChunkDecoderFactory.safeInstance();
+ check(encoder, decoder, b1, 0, b1.length);
+ final int off = 6;
+ check(encoder, decoder, b2, off, b2.length - off);
+ }
+
+ private void check(ChunkEncoder encoder, ChunkDecoder decoder, byte[] bytes, int offset, int length) throws IOException {
+ ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
+ byte[] expected = new byte[length];
+ byte[] buffer = new byte[LZFChunk.MAX_CHUNK_LEN];
+ byte[] output = new byte[length];
+ System.arraycopy(bytes, offset, expected, 0, length);
+ encoder.encodeAndWriteChunk(bytes, offset, length, outputStream);
+ System.out.println(Arrays.toString(Arrays.copyOf(outputStream.toByteArray(), 20)));
+ InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
+ assertEquals(decoder.decodeChunk(inputStream, buffer, output), length);
+
+ System.out.println(Arrays.toString(Arrays.copyOf(output, 20)));
+ assertArrayEquals(expected, output);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
new file mode 100644
index 0000000000..3aa2a5de80
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamOutput.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import com.ning.compress.BufferRecycler;
+import com.ning.compress.lzf.ChunkEncoder;
+import com.ning.compress.lzf.LZFChunk;
+import com.ning.compress.lzf.util.ChunkEncoderFactory;
+
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+public class LZFCompressedStreamOutput extends CompressedStreamOutput {
+
+ private final BufferRecycler recycler;
+ private final ChunkEncoder encoder;
+
+ public LZFCompressedStreamOutput(StreamOutput out) throws IOException {
+ super(out);
+ this.recycler = BufferRecycler.instance();
+ this.uncompressed = this.recycler.allocOutputBuffer(LZFChunk.MAX_CHUNK_LEN);
+ this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
+ this.encoder = ChunkEncoderFactory.safeInstance(recycler);
+ }
+
+ @Override
+ public void writeHeader(StreamOutput out) throws IOException {
+ // nothing to do here, each chunk has a header of its own
+ }
+
+ @Override
+ protected void compress(byte[] data, int offset, int len, StreamOutput out) throws IOException {
+ encoder.encodeAndWriteChunk(data, offset, len, out);
+ }
+
+ @Override
+ protected void doClose() throws IOException {
+ byte[] buf = uncompressed;
+ if (buf != null) {
+ uncompressed = null;
+ recycler.releaseOutputBuffer(buf);
+ }
+ encoder.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java
new file mode 100644
index 0000000000..1d69fce1b9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFCompressedStreamTests.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import org.elasticsearch.common.compress.AbstractCompressedStreamTests;
+
+public class LZFCompressedStreamTests extends AbstractCompressedStreamTests {
+
+ public LZFCompressedStreamTests() {
+ super(new LZFTestCompressor());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java
new file mode 100644
index 0000000000..8f21b0cbf0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFTestCompressor.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import org.elasticsearch.common.io.stream.StreamOutput;
+
+import java.io.IOException;
+
+// LZF compressor with write support, for testing only
+public class LZFTestCompressor extends LZFCompressor {
+
+ @Override
+ public StreamOutput streamOutput(StreamOutput out) throws IOException {
+ return new LZFCompressedStreamOutput(out);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java
new file mode 100644
index 0000000000..698a033755
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/compress/lzf/LZFXContentTests.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.compress.lzf;
+
+import org.elasticsearch.common.compress.AbstractCompressedXContentTests;
+
+public class LZFXContentTests extends AbstractCompressedXContentTests {
+
+ public LZFXContentTests() {
+ super(new LZFTestCompressor());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java
new file mode 100644
index 0000000000..55ff0bdb21
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.geo;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+
+
+/**
+ * Tests for {@link GeoHashUtils}
+ */
+public class GeoHashTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testGeohashAsLongRoutines() {
+
+ //Ensure that for all points at all supported levels of precision
+ // that the long encoding of a geohash is compatible with its
+ // String based counterpart
+ for (double lat=-90;lat<90;lat++)
+ {
+ for (double lng=-180;lng<180;lng++)
+ {
+ for(int p=1;p<=12;p++)
+ {
+ long geoAsLong = GeoHashUtils.encodeAsLong(lat,lng,p);
+ String geohash = GeoHashUtils.encode(lat,lng,p);
+
+ String geohashFromLong=GeoHashUtils.toString(geoAsLong);
+ assertEquals(geohash, geohashFromLong);
+ GeoPoint pos=GeoHashUtils.decode(geohash);
+ GeoPoint pos2=GeoHashUtils.decode(geoAsLong);
+ assertEquals(pos, pos2);
+ }
+ }
+
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
new file mode 100644
index 0000000000..9a06b10d81
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java
@@ -0,0 +1,1003 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.shape.Circle;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.ShapeCollection;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.spatial4j.core.shape.jts.JtsPoint;
+import com.vividsolutions.jts.geom.*;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.common.geo.builders.ShapeBuilder.SPATIAL_CONTEXT;
+
+
+/**
+ * Tests for {@link GeoJSONShapeParser}
+ */
+public class GeoJSONShapeParserTests extends ElasticsearchTestCase {
+
+ private final static GeometryFactory GEOMETRY_FACTORY = SPATIAL_CONTEXT.getGeometryFactory();
+
+ public void testParse_simplePoint() throws IOException {
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .endObject().string();
+
+ Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson);
+ }
+
+ public void testParse_lineString() throws IOException {
+ String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> lineCoordinates = new ArrayList<>();
+ lineCoordinates.add(new Coordinate(100, 0));
+ lineCoordinates.add(new Coordinate(101, 1));
+
+ LineString expected = GEOMETRY_FACTORY.createLineString(
+ lineCoordinates.toArray(new Coordinate[lineCoordinates.size()]));
+ assertGeometryEquals(jtsGeom(expected), lineGeoJson);
+ }
+
+ public void testParse_multiLineString() throws IOException {
+ String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiLineString")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .startArray().value(103.0).value(3.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString(new LineString[]{
+ GEOMETRY_FACTORY.createLineString(new Coordinate[]{
+ new Coordinate(100, 0),
+ new Coordinate(101, 1),
+ }),
+ GEOMETRY_FACTORY.createLineString(new Coordinate[]{
+ new Coordinate(102, 2),
+ new Coordinate(103, 3),
+ }),
+ });
+ assertGeometryEquals(jtsGeom(expected), multilinesGeoJson);
+ }
+
+ public void testParse_circle() throws IOException {
+ String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "circle")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .field("radius", "100m")
+ .endObject().string();
+
+ Circle expected = SPATIAL_CONTEXT.makeCircle(100.0, 0.0, 360 * 100 / GeoUtils.EARTH_EQUATOR);
+ assertGeometryEquals(expected, multilinesGeoJson);
+ }
+
+ public void testParse_multiDimensionShapes() throws IOException {
+ // multi dimension point
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Point")
+ .startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray()
+ .endObject().string();
+
+ Point expectedPt = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expectedPt, SPATIAL_CONTEXT), pointGeoJson);
+
+ // multi dimension linestring
+ String lineGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "LineString")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).value(15.0).endArray()
+ .startArray().value(101.0).value(1.0).value(18.0).value(19.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> lineCoordinates = new ArrayList<>();
+ lineCoordinates.add(new Coordinate(100, 0));
+ lineCoordinates.add(new Coordinate(101, 1));
+
+ LineString expectedLS = GEOMETRY_FACTORY.createLineString(
+ lineCoordinates.toArray(new Coordinate[lineCoordinates.size()]));
+ assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson);
+ }
+
+ public void testParse_envelope() throws IOException {
+ // test #1: envelope with expected coordinate order (TopLeft, BottomRight)
+ String multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope")
+ .startArray("coordinates")
+ .startArray().value(-50).value(30).endArray()
+ .startArray().value(50).value(-30).endArray()
+ .endArray()
+ .endObject().string();
+
+ Rectangle expected = SPATIAL_CONTEXT.makeRectangle(-50, 50, -30, 30);
+ assertGeometryEquals(expected, multilinesGeoJson);
+
+ // test #2: envelope with agnostic coordinate order (TopRight, BottomLeft)
+ multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope")
+ .startArray("coordinates")
+ .startArray().value(50).value(30).endArray()
+ .startArray().value(-50).value(-30).endArray()
+ .endArray()
+ .endObject().string();
+
+ expected = SPATIAL_CONTEXT.makeRectangle(-50, 50, -30, 30);
+ assertGeometryEquals(expected, multilinesGeoJson);
+
+ // test #3: "envelope" (actually a triangle) with invalid number of coordinates (TopRight, BottomLeft, BottomRight)
+ multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope")
+ .startArray("coordinates")
+ .startArray().value(50).value(30).endArray()
+ .startArray().value(-50).value(-30).endArray()
+ .startArray().value(50).value(-39).endArray()
+ .endArray()
+ .endObject().string();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(multilinesGeoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test #4: "envelope" with empty coordinates
+ multilinesGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "envelope")
+ .startArray("coordinates")
+ .endArray()
+ .endObject().string();
+ parser = JsonXContent.jsonXContent.createParser(multilinesGeoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+ }
+
+ public void testParse_polygonNoHoles() throws IOException {
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null);
+ assertGeometryEquals(jtsGeom(expected), polygonGeoJson);
+ }
+
+ public void testParse_invalidPoint() throws IOException {
+ // test case 1: create an invalid point object with multipoint data format
+ String invalidPoint1 = XContentFactory.jsonBuilder().startObject().field("type", "point")
+ .startArray("coordinates")
+ .startArray().value(-74.011).value(40.753).endArray()
+ .endArray()
+ .endObject().string();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(invalidPoint1);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 2: create an invalid point object with an empty number of coordinates
+ String invalidPoint2 = XContentFactory.jsonBuilder().startObject().field("type", "point")
+ .startArray("coordinates")
+ .endArray()
+ .endObject().string();
+ parser = JsonXContent.jsonXContent.createParser(invalidPoint2);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+ }
+
+ public void testParse_invalidMultipoint() throws IOException {
+ // test case 1: create an invalid multipoint object with single coordinate
+ String invalidMultipoint1 = XContentFactory.jsonBuilder().startObject().field("type", "multipoint")
+ .startArray("coordinates").value(-74.011).value(40.753).endArray()
+ .endObject().string();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(invalidMultipoint1);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 2: create an invalid multipoint object with null coordinate
+ String invalidMultipoint2 = XContentFactory.jsonBuilder().startObject().field("type", "multipoint")
+ .startArray("coordinates")
+ .endArray()
+ .endObject().string();
+ parser = JsonXContent.jsonXContent.createParser(invalidMultipoint2);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 3: create a valid formatted multipoint object with invalid number (0) of coordinates
+ String invalidMultipoint3 = XContentFactory.jsonBuilder().startObject().field("type", "multipoint")
+ .startArray("coordinates")
+ .startArray().endArray()
+ .endArray()
+ .endObject().string();
+ parser = JsonXContent.jsonXContent.createParser(invalidMultipoint3);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+ }
+
+ public void testParse_invalidMultiPolygon() throws IOException {
+ // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring)
+ String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
+ .startArray("coordinates")
+ .startArray()//one poly (with two holes)
+ .startArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .startArray().value(103.0).value(2.0).endArray()
+ .startArray().value(103.0).value(3.0).endArray()
+ .startArray().value(102.0).value(3.0).endArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .endArray()
+ .startArray()// first hole
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .endArray()
+ .startArray()//second hole
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(multiPolygonGeoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class);
+ }
+
+ public void testParse_OGCPolygonWithoutHoles() throws IOException {
+ // test 1: ccw poly not crossing dateline
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ Shape shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 2: ccw poly crossing dateline
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+
+ // test 3: cw poly not crossing dateline
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(180.0).value(10.0).endArray()
+ .startArray().value(180.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 4: cw poly crossing dateline
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(184.0).value(15.0).endArray()
+ .startArray().value(184.0).value(0.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(174.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+ }
+
+ public void testParse_OGCPolygonWithHoles() throws IOException {
+ // test 1: ccw poly not crossing dateline
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .startArray().value(174.0).value(10.0).endArray()
+ .startArray().value(-172.0).value(-8.0).endArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ Shape shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 2: ccw poly crossing dateline
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .startArray().value(-180.0).value(-8.0).endArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+
+ // test 3: cw poly not crossing dateline
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(180.0).value(10.0).endArray()
+ .startArray().value(179.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(177.0).value(8.0).endArray()
+ .startArray().value(179.0).value(10.0).endArray()
+ .startArray().value(179.0).value(-8.0).endArray()
+ .startArray().value(177.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 4: cw poly crossing dateline
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(183.0).value(10.0).endArray()
+ .startArray().value(183.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(183.0).value(10.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .startArray().value(182.0).value(8.0).endArray()
+ .startArray().value(180.0).value(-8.0).endArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+ }
+
+ public void testParse_invalidPolygon() throws IOException {
+ /**
+ * The following 3 test cases ensure proper error handling of invalid polygons
+ * per the GeoJSON specification
+ */
+ // test case 1: create an invalid polygon with only 2 points
+ String invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(-74.011).value(40.753).endArray()
+ .startArray().value(-75.022).value(41.783).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 2: create an invalid polygon with only 1 point
+ invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(-74.011).value(40.753).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 3: create an invalid polygon with 0 points
+ invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 4: create an invalid polygon with null value points
+ invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().nullValue().nullValue().endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, IllegalArgumentException.class);
+
+ // test case 5: create an invalid polygon with 1 invalid LinearRing
+ invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates")
+ .nullValue().nullValue()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, IllegalArgumentException.class);
+
+ // test case 6: create an invalid polygon with 0 LinearRings
+ invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates").endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+
+ // test case 7: create an invalid polygon with 0 LinearRings
+ invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon")
+ .startArray("coordinates")
+ .startArray().value(-74.011).value(40.753).endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(invalidPoly);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class);
+ }
+
+ public void testParse_polygonWithHole() throws IOException {
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ // add 3d point to test ISSUE #10501
+ List<Coordinate> shellCoordinates = new ArrayList<>();
+ shellCoordinates.add(new Coordinate(100, 0, 15.0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1, 10.0));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ List<Coordinate> holeCoordinates = new ArrayList<>();
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(
+ shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ LinearRing[] holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(
+ holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes);
+ assertGeometryEquals(jtsGeom(expected), polygonGeoJson);
+ }
+
+ public void testParse_selfCrossingPolygon() throws IOException {
+ // test self crossing ccw poly not crossing dateline
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(-177.0).value(15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class);
+ }
+
+ public void testParse_multiPoint() throws IOException {
+ String multiPointGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPoint")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject().string();
+
+ ShapeCollection expected = shapeCollection(
+ SPATIAL_CONTEXT.makePoint(100, 0),
+ SPATIAL_CONTEXT.makePoint(101, 1.0));
+ assertGeometryEquals(expected, multiPointGeoJson);
+ }
+
+ public void testParse_multiPolygon() throws IOException {
+ // test #1: two polygons; one without hole, one with hole
+ String multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
+ .startArray("coordinates")
+ .startArray()//first poly (without holes)
+ .startArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .startArray().value(103.0).value(2.0).endArray()
+ .startArray().value(103.0).value(3.0).endArray()
+ .startArray().value(102.0).value(3.0).endArray()
+ .startArray().value(102.0).value(2.0).endArray()
+ .endArray()
+ .endArray()
+ .startArray()//second poly (with hole)
+ .startArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .endArray()
+ .startArray()//hole
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ List<Coordinate> shellCoordinates = new ArrayList<>();
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(100, 0));
+
+ List<Coordinate> holeCoordinates = new ArrayList<>();
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+
+ LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ LinearRing[] holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ Polygon withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes);
+
+ shellCoordinates = new ArrayList<>();
+ shellCoordinates.add(new Coordinate(102, 3));
+ shellCoordinates.add(new Coordinate(103, 3));
+ shellCoordinates.add(new Coordinate(103, 2));
+ shellCoordinates.add(new Coordinate(102, 2));
+ shellCoordinates.add(new Coordinate(102, 3));
+
+
+ shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ Polygon withoutHoles = GEOMETRY_FACTORY.createPolygon(shell, null);
+
+ Shape expected = shapeCollection(withoutHoles, withHoles);
+
+ assertGeometryEquals(expected, multiPolygonGeoJson);
+
+ // test #2: multipolygon; one polygon with one hole
+ // this test converting the multipolygon from a ShapeCollection type
+ // to a simple polygon (jtsGeom)
+ multiPolygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "MultiPolygon")
+ .startArray("coordinates")
+ .startArray()
+ .startArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .startArray().value(101.0).value(0.0).endArray()
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(100.0).value(1.0).endArray()
+ .endArray()
+ .startArray()// hole
+ .startArray().value(100.2).value(0.8).endArray()
+ .startArray().value(100.2).value(0.2).endArray()
+ .startArray().value(100.8).value(0.2).endArray()
+ .startArray().value(100.8).value(0.8).endArray()
+ .startArray().value(100.2).value(0.8).endArray()
+ .endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ shellCoordinates = new ArrayList<>();
+ shellCoordinates.add(new Coordinate(100, 1));
+ shellCoordinates.add(new Coordinate(101, 1));
+ shellCoordinates.add(new Coordinate(101, 0));
+ shellCoordinates.add(new Coordinate(100, 0));
+ shellCoordinates.add(new Coordinate(100, 1));
+
+ holeCoordinates = new ArrayList<>();
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.2));
+ holeCoordinates.add(new Coordinate(100.8, 0.8));
+ holeCoordinates.add(new Coordinate(100.2, 0.8));
+
+ shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]));
+ holes = new LinearRing[1];
+ holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()]));
+ withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes);
+
+ assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson);
+ }
+
+ public void testParse_geometryCollection() throws IOException {
+ String geometryCollectionGeoJson = XContentFactory.jsonBuilder().startObject()
+ .field("type", "GeometryCollection")
+ .startArray("geometries")
+ .startObject()
+ .field("type", "LineString")
+ .startArray("coordinates")
+ .startArray().value(100.0).value(0.0).endArray()
+ .startArray().value(101.0).value(1.0).endArray()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("type", "Point")
+ .startArray("coordinates").value(102.0).value(2.0).endArray()
+ .endObject()
+ .endArray()
+ .endObject()
+ .string();
+
+ Shape[] expected = new Shape[2];
+ LineString expectedLineString = GEOMETRY_FACTORY.createLineString(new Coordinate[]{
+ new Coordinate(100, 0),
+ new Coordinate(101, 1),
+ });
+ expected[0] = jtsGeom(expectedLineString);
+ Point expectedPoint = GEOMETRY_FACTORY.createPoint(new Coordinate(102.0, 2.0));
+ expected[1] = new JtsPoint(expectedPoint, SPATIAL_CONTEXT);
+
+ //equals returns true only if geometries are in the same order
+ assertGeometryEquals(shapeCollection(expected), geometryCollectionGeoJson);
+ }
+
+ public void testThatParserExtractsCorrectTypeAndCoordinatesFromArbitraryJson() throws IOException {
+ String pointGeoJson = XContentFactory.jsonBuilder().startObject()
+ .startObject("crs")
+ .field("type", "name")
+ .startObject("properties")
+ .field("name", "urn:ogc:def:crs:OGC:1.3:CRS84")
+ .endObject()
+ .endObject()
+ .field("bbox", "foobar")
+ .field("type", "point")
+ .field("bubu", "foobar")
+ .startArray("coordinates").value(100.0).value(0.0).endArray()
+ .startObject("nested").startArray("coordinates").value(200.0).value(0.0).endArray().endObject()
+ .startObject("lala").field("type", "NotAPoint").endObject()
+ .endObject().string();
+
+ Point expected = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0));
+ assertGeometryEquals(new JtsPoint(expected, SPATIAL_CONTEXT), pointGeoJson);
+ }
+
+ public void testParse_orientationOption() throws IOException {
+ // test 1: valid ccw (right handed system) poly not crossing dateline (with 'right' field)
+ String polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .field("orientation", "right")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .startArray().value(174.0).value(10.0).endArray()
+ .startArray().value(-172.0).value(-8.0).endArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ Shape shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 2: valid ccw (right handed system) poly not crossing dateline (with 'ccw' field)
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .field("orientation", "ccw")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .startArray().value(174.0).value(10.0).endArray()
+ .startArray().value(-172.0).value(-8.0).endArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 3: valid ccw (right handed system) poly not crossing dateline (with 'counterclockwise' field)
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .field("orientation", "counterclockwise")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .startArray().value(174.0).value(10.0).endArray()
+ .startArray().value(-172.0).value(-8.0).endArray()
+ .startArray().value(-172.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertPolygon(shape);
+
+ // test 4: valid cw (left handed system) poly crossing dateline (with 'left' field)
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .field("orientation", "left")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .startArray().value(180.0).value(-8.0).endArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+
+ // test 5: valid cw multipoly (left handed system) poly crossing dateline (with 'cw' field)
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .field("orientation", "cw")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .startArray().value(180.0).value(-8.0).endArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+
+ // test 6: valid cw multipoly (left handed system) poly crossing dateline (with 'clockwise' field)
+ polygonGeoJson = XContentFactory.jsonBuilder().startObject().field("type", "Polygon")
+ .field("orientation", "clockwise")
+ .startArray("coordinates")
+ .startArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .startArray().value(-177.0).value(10.0).endArray()
+ .startArray().value(-177.0).value(-10.0).endArray()
+ .startArray().value(176.0).value(-15.0).endArray()
+ .startArray().value(172.0).value(0.0).endArray()
+ .startArray().value(176.0).value(15.0).endArray()
+ .endArray()
+ .startArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .startArray().value(178.0).value(8.0).endArray()
+ .startArray().value(180.0).value(-8.0).endArray()
+ .startArray().value(-178.0).value(8.0).endArray()
+ .endArray()
+ .endArray()
+ .endObject().string();
+
+ parser = JsonXContent.jsonXContent.createParser(polygonGeoJson);
+ parser.nextToken();
+ shape = ShapeBuilder.parse(parser).build();
+
+ ElasticsearchGeoAssertions.assertMultiPolygon(shape);
+ }
+
+ private void assertGeometryEquals(Shape expected, String geoJson) throws IOException {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(geoJson);
+ parser.nextToken();
+ ElasticsearchGeoAssertions.assertEquals(expected, ShapeBuilder.parse(parser).build());
+ }
+
+ private ShapeCollection<Shape> shapeCollection(Shape... shapes) {
+ return new ShapeCollection<>(Arrays.asList(shapes), SPATIAL_CONTEXT);
+ }
+
+ private ShapeCollection<Shape> shapeCollection(Geometry... geoms) {
+ List<Shape> shapes = new ArrayList<>(geoms.length);
+ for (Geometry geom : geoms) {
+ shapes.add(jtsGeom(geom));
+ }
+ return new ShapeCollection<>(shapes, SPATIAL_CONTEXT);
+ }
+
+ private JtsGeometry jtsGeom(Geometry geom) {
+ return new JtsGeometry(geom, SPATIAL_CONTEXT, false, false);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
new file mode 100644
index 0000000000..d82b1bdd14
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
@@ -0,0 +1,607 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo;
+
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.shape.Circle;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.impl.PointImpl;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.LineString;
+import com.vividsolutions.jts.geom.Polygon;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.builders.PolygonBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.*;
+/**
+ * Tests for {@link ShapeBuilder}
+ */
+public class ShapeBuilderTests extends ElasticsearchTestCase {
+
+ public void testNewPoint() {
+ Point point = ShapeBuilder.newPoint(-100, 45).build();
+ assertEquals(-100D, point.getX(), 0.0d);
+ assertEquals(45D, point.getY(), 0.0d);
+ }
+
+ public void testNewRectangle() {
+ Rectangle rectangle = ShapeBuilder.newEnvelope().topLeft(-45, 30).bottomRight(45, -30).build();
+ assertEquals(-45D, rectangle.getMinX(), 0.0d);
+ assertEquals(-30D, rectangle.getMinY(), 0.0d);
+ assertEquals(45D, rectangle.getMaxX(), 0.0d);
+ assertEquals(30D, rectangle.getMaxY(), 0.0d);
+ }
+
+ public void testNewPolygon() {
+ Polygon polygon = ShapeBuilder.newPolygon()
+ .point(-45, 30)
+ .point(45, 30)
+ .point(45, -30)
+ .point(-45, -30)
+ .point(-45, 30).toPolygon();
+
+ LineString exterior = polygon.getExteriorRing();
+ assertEquals(exterior.getCoordinateN(0), new Coordinate(-45, 30));
+ assertEquals(exterior.getCoordinateN(1), new Coordinate(45, 30));
+ assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30));
+ assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30));
+ }
+
+ public void testNewPolygon_coordinate() {
+ Polygon polygon = ShapeBuilder.newPolygon()
+ .point(new Coordinate(-45, 30))
+ .point(new Coordinate(45, 30))
+ .point(new Coordinate(45, -30))
+ .point(new Coordinate(-45, -30))
+ .point(new Coordinate(-45, 30)).toPolygon();
+
+ LineString exterior = polygon.getExteriorRing();
+ assertEquals(exterior.getCoordinateN(0), new Coordinate(-45, 30));
+ assertEquals(exterior.getCoordinateN(1), new Coordinate(45, 30));
+ assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30));
+ assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30));
+ }
+
+ public void testNewPolygon_coordinates() {
+ Polygon polygon = ShapeBuilder.newPolygon()
+ .points(new Coordinate(-45, 30), new Coordinate(45, 30), new Coordinate(45, -30), new Coordinate(-45, -30), new Coordinate(-45, 30)).toPolygon();
+
+ LineString exterior = polygon.getExteriorRing();
+ assertEquals(exterior.getCoordinateN(0), new Coordinate(-45, 30));
+ assertEquals(exterior.getCoordinateN(1), new Coordinate(45, 30));
+ assertEquals(exterior.getCoordinateN(2), new Coordinate(45, -30));
+ assertEquals(exterior.getCoordinateN(3), new Coordinate(-45, -30));
+ }
+
+ public void testLineStringBuilder() {
+ // Building a simple LineString
+ ShapeBuilder.newLineString()
+ .point(-130.0, 55.0)
+ .point(-130.0, -40.0)
+ .point(-15.0, -40.0)
+ .point(-20.0, 50.0)
+ .point(-45.0, 50.0)
+ .point(-45.0, -15.0)
+ .point(-110.0, -15.0)
+ .point(-110.0, 55.0).build();
+
+ // Building a linestring that needs to be wrapped
+ ShapeBuilder.newLineString()
+ .point(100.0, 50.0)
+ .point(110.0, -40.0)
+ .point(240.0, -40.0)
+ .point(230.0, 60.0)
+ .point(200.0, 60.0)
+ .point(200.0, -30.0)
+ .point(130.0, -30.0)
+ .point(130.0, 60.0)
+ .build();
+
+ // Building a lineString on the dateline
+ ShapeBuilder.newLineString()
+ .point(-180.0, 80.0)
+ .point(-180.0, 40.0)
+ .point(-180.0, -40.0)
+ .point(-180.0, -80.0)
+ .build();
+
+ // Building a lineString on the dateline
+ ShapeBuilder.newLineString()
+ .point(180.0, 80.0)
+ .point(180.0, 40.0)
+ .point(180.0, -40.0)
+ .point(180.0, -80.0)
+ .build();
+ }
+
+ public void testMultiLineString() {
+ ShapeBuilder.newMultiLinestring()
+ .linestring()
+ .point(-100.0, 50.0)
+ .point(50.0, 50.0)
+ .point(50.0, 20.0)
+ .point(-100.0, 20.0)
+ .end()
+ .linestring()
+ .point(-100.0, 20.0)
+ .point(50.0, 20.0)
+ .point(50.0, 0.0)
+ .point(-100.0, 0.0)
+ .end()
+ .build();
+
+
+ // LineString that needs to be wrappped
+ ShapeBuilder.newMultiLinestring()
+ .linestring()
+ .point(150.0, 60.0)
+ .point(200.0, 60.0)
+ .point(200.0, 40.0)
+ .point(150.0, 40.0)
+ .end()
+ .linestring()
+ .point(150.0, 20.0)
+ .point(200.0, 20.0)
+ .point(200.0, 0.0)
+ .point(150.0, 0.0)
+ .end()
+ .build();
+ }
+
+ @Test(expected = InvalidShapeException.class)
+ public void testPolygonSelfIntersection() {
+ ShapeBuilder.newPolygon()
+ .point(-40.0, 50.0)
+ .point(40.0, 50.0)
+ .point(-40.0, -50.0)
+ .point(40.0, -50.0)
+ .close().build();
+ }
+
+ public void testGeoCircle() {
+ double earthCircumference = 40075016.69;
+ Circle circle = ShapeBuilder.newCircleBuilder().center(0, 0).radius("100m").build();
+ assertEquals((360 * 100) / earthCircumference, circle.getRadius(), 0.00000001);
+ assertEquals(new PointImpl(0, 0, ShapeBuilder.SPATIAL_CONTEXT), circle.getCenter());
+ circle = ShapeBuilder.newCircleBuilder().center(+180, 0).radius("100m").build();
+ assertEquals((360 * 100) / earthCircumference, circle.getRadius(), 0.00000001);
+ assertEquals(new PointImpl(180, 0, ShapeBuilder.SPATIAL_CONTEXT), circle.getCenter());
+ circle = ShapeBuilder.newCircleBuilder().center(-180, 0).radius("100m").build();
+ assertEquals((360 * 100) / earthCircumference, circle.getRadius(), 0.00000001);
+ assertEquals(new PointImpl(-180, 0, ShapeBuilder.SPATIAL_CONTEXT), circle.getCenter());
+ circle = ShapeBuilder.newCircleBuilder().center(0, 90).radius("100m").build();
+ assertEquals((360 * 100) / earthCircumference, circle.getRadius(), 0.00000001);
+ assertEquals(new PointImpl(0, 90, ShapeBuilder.SPATIAL_CONTEXT), circle.getCenter());
+ circle = ShapeBuilder.newCircleBuilder().center(0, -90).radius("100m").build();
+ assertEquals((360 * 100) / earthCircumference, circle.getRadius(), 0.00000001);
+ assertEquals(new PointImpl(0, -90, ShapeBuilder.SPATIAL_CONTEXT), circle.getCenter());
+ double randomLat = (randomDouble() * 180) - 90;
+ double randomLon = (randomDouble() * 360) - 180;
+ double randomRadius = randomIntBetween(1, (int) earthCircumference / 4);
+ circle = ShapeBuilder.newCircleBuilder().center(randomLon, randomLat).radius(randomRadius + "m").build();
+ assertEquals((360 * randomRadius) / earthCircumference, circle.getRadius(), 0.00000001);
+ assertEquals(new PointImpl(randomLon, randomLat, ShapeBuilder.SPATIAL_CONTEXT), circle.getCenter());
+ }
+
+ public void testPolygonWrapping() {
+ Shape shape = ShapeBuilder.newPolygon()
+ .point(-150.0, 65.0)
+ .point(-250.0, 65.0)
+ .point(-250.0, -65.0)
+ .point(-150.0, -65.0)
+ .close().build();
+
+ assertMultiPolygon(shape);
+ }
+
+ public void testLineStringWrapping() {
+ Shape shape = ShapeBuilder.newLineString()
+ .point(-150.0, 65.0)
+ .point(-250.0, 65.0)
+ .point(-250.0, -65.0)
+ .point(-150.0, -65.0)
+ .build();
+ assertMultiLineString(shape);
+ }
+
+ public void testDatelineOGC() {
+ // tests that the following shape (defined in counterclockwise OGC order)
+ // https://gist.github.com/anonymous/7f1bb6d7e9cd72f5977c crosses the dateline
+ // expected results: 3 polygons, 1 with a hole
+
+ // a giant c shape
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(174,0)
+ .point(-176,0)
+ .point(-176,3)
+ .point(177,3)
+ .point(177,5)
+ .point(-176,5)
+ .point(-176,8)
+ .point(174,8)
+ .point(174,0);
+
+ // 3/4 of an embedded 'c', crossing dateline once
+ builder.hole()
+ .point(175, 1)
+ .point(175, 7)
+ .point(-178, 7)
+ .point(-178, 6)
+ .point(176, 6)
+ .point(176, 2)
+ .point(179, 2)
+ .point(179,1)
+ .point(175, 1);
+
+ // embedded hole right of the dateline
+ builder.hole()
+ .point(-179, 1)
+ .point(-179, 2)
+ .point(-177, 2)
+ .point(-177,1)
+ .point(-179,1);
+
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ public void testDateline() {
+ // tests that the following shape (defined in clockwise non-OGC order)
+ // https://gist.github.com/anonymous/7f1bb6d7e9cd72f5977c crosses the dateline
+ // expected results: 3 polygons, 1 with a hole
+
+ // a giant c shape
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-186,0)
+ .point(-176,0)
+ .point(-176,3)
+ .point(-183,3)
+ .point(-183,5)
+ .point(-176,5)
+ .point(-176,8)
+ .point(-186,8)
+ .point(-186,0);
+
+ // 3/4 of an embedded 'c', crossing dateline once
+ builder.hole()
+ .point(-185,1)
+ .point(-181,1)
+ .point(-181,2)
+ .point(-184,2)
+ .point(-184,6)
+ .point(-178,6)
+ .point(-178,7)
+ .point(-185,7)
+ .point(-185,1);
+
+ // embedded hole right of the dateline
+ builder.hole()
+ .point(-179,1)
+ .point(-177,1)
+ .point(-177,2)
+ .point(-179,2)
+ .point(-179,1);
+
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ public void testComplexShapeWithHole() {
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-85.0018514,37.1311314)
+ .point(-85.0016645,37.1315293)
+ .point(-85.0016246,37.1317069)
+ .point(-85.0016526,37.1318183)
+ .point(-85.0017119,37.1319196)
+ .point(-85.0019371,37.1321182)
+ .point(-85.0019972,37.1322115)
+ .point(-85.0019942,37.1323234)
+ .point(-85.0019543,37.1324336)
+ .point(-85.001906,37.1324985)
+ .point(-85.001834,37.1325497)
+ .point(-85.0016965,37.1325907)
+ .point(-85.0016011,37.1325873)
+ .point(-85.0014816,37.1325353)
+ .point(-85.0011755,37.1323509)
+ .point(-85.000955,37.1322802)
+ .point(-85.0006241,37.1322529)
+ .point(-85.0000002,37.1322307)
+ .point(-84.9994,37.1323001)
+ .point(-84.999109,37.1322864)
+ .point(-84.998934,37.1322415)
+ .point(-84.9988639,37.1321888)
+ .point(-84.9987841,37.1320944)
+ .point(-84.9987208,37.131954)
+ .point(-84.998736,37.1316611)
+ .point(-84.9988091,37.131334)
+ .point(-84.9989283,37.1311337)
+ .point(-84.9991943,37.1309198)
+ .point(-84.9993573,37.1308459)
+ .point(-84.9995888,37.1307924)
+ .point(-84.9998746,37.130806)
+ .point(-85.0000002,37.1308358)
+ .point(-85.0004984,37.1310658)
+ .point(-85.0008008,37.1311625)
+ .point(-85.0009461,37.1311684)
+ .point(-85.0011373,37.1311515)
+ .point(-85.0016455,37.1310491)
+ .point(-85.0018514,37.1311314);
+
+ builder.hole()
+ .point(-85.0000002,37.1317672)
+ .point(-85.0001983,37.1317538)
+ .point(-85.0003378,37.1317582)
+ .point(-85.0004697,37.131792)
+ .point(-85.0008048,37.1319439)
+ .point(-85.0009342,37.1319838)
+ .point(-85.0010184,37.1319463)
+ .point(-85.0010618,37.13184)
+ .point(-85.0010057,37.1315102)
+ .point(-85.000977,37.1314403)
+ .point(-85.0009182,37.1313793)
+ .point(-85.0005366,37.1312209)
+ .point(-85.000224,37.1311466)
+ .point(-85.000087,37.1311356)
+ .point(-85.0000002,37.1311433)
+ .point(-84.9995021,37.1312336)
+ .point(-84.9993308,37.1312859)
+ .point(-84.9992567,37.1313252)
+ .point(-84.9991868,37.1314277)
+ .point(-84.9991593,37.1315381)
+ .point(-84.9991841,37.1316527)
+ .point(-84.9992329,37.1317117)
+ .point(-84.9993527,37.1317788)
+ .point(-84.9994931,37.1318061)
+ .point(-84.9996815,37.1317979)
+ .point(-85.0000002,37.1317672);
+
+ Shape shape = builder.close().build();
+ assertPolygon(shape);
+ }
+
+ public void testShapeWithHoleAtEdgeEndPoints() {
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-4, 2)
+ .point(4, 2)
+ .point(6, 0)
+ .point(4, -2)
+ .point(-4, -2)
+ .point(-6, 0)
+ .point(-4, 2);
+
+ builder.hole()
+ .point(4, 1)
+ .point(4, -1)
+ .point(-4, -1)
+ .point(-4, 1)
+ .point(4, 1);
+
+ Shape shape = builder.close().build();
+ assertPolygon(shape);
+ }
+
+ public void testShapeWithPointOnDateline() {
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(180, 0)
+ .point(176, 4)
+ .point(176, -4)
+ .point(180, 0);
+
+ Shape shape = builder.close().build();
+ assertPolygon(shape);
+ }
+
+ public void testShapeWithEdgeAlongDateline() {
+ // test case 1: test the positive side of the dateline
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(180, 0)
+ .point(176, 4)
+ .point(180, -4)
+ .point(180, 0);
+
+ Shape shape = builder.close().build();
+ assertPolygon(shape);
+
+ // test case 2: test the negative side of the dateline
+ builder = ShapeBuilder.newPolygon()
+ .point(-176, 4)
+ .point(-180, 0)
+ .point(-180, -4)
+ .point(-176, 4);
+
+ shape = builder.close().build();
+ assertPolygon(shape);
+ }
+
+ public void testShapeWithBoundaryHoles() {
+ // test case 1: test the positive side of the dateline
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-177, 10)
+ .point(176, 15)
+ .point(172, 0)
+ .point(176, -15)
+ .point(-177, -10)
+ .point(-177, 10);
+ builder.hole()
+ .point(176, 10)
+ .point(180, 5)
+ .point(180, -5)
+ .point(176, -10)
+ .point(176, 10);
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+
+ // test case 2: test the negative side of the dateline
+ builder = ShapeBuilder.newPolygon()
+ .point(-176, 15)
+ .point(179, 10)
+ .point(179, -10)
+ .point(-176, -15)
+ .point(-172, 0);
+ builder.hole()
+ .point(-176, 10)
+ .point(-176, -10)
+ .point(-180, -5)
+ .point(-180, 5)
+ .point(-176, 10);
+ shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ public void testShapeWithTangentialHole() {
+ // test a shape with one tangential (shared) vertex (should pass)
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(179, 10)
+ .point(168, 15)
+ .point(164, 0)
+ .point(166, -15)
+ .point(179, -10)
+ .point(179, 10);
+ builder.hole()
+ .point(-177, 10)
+ .point(-178, -10)
+ .point(-180, -5)
+ .point(-180, 5)
+ .point(-177, 10);
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ @Test(expected = InvalidShapeException.class)
+ public void testShapeWithInvalidTangentialHole() {
+ // test a shape with one invalid tangential (shared) vertex (should throw exception)
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(179, 10)
+ .point(168, 15)
+ .point(164, 0)
+ .point(166, -15)
+ .point(179, -10)
+ .point(179, 10);
+ builder.hole()
+ .point(164, 0)
+ .point(175, 10)
+ .point(175, 5)
+ .point(179, -10)
+ .point(164, 0);
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ public void testBoundaryShapeWithTangentialHole() {
+ // test a shape with one tangential (shared) vertex for each hole (should pass)
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-177, 10)
+ .point(176, 15)
+ .point(172, 0)
+ .point(176, -15)
+ .point(-177, -10)
+ .point(-177, 10);
+ builder.hole()
+ .point(-177, 10)
+ .point(-178, -10)
+ .point(-180, -5)
+ .point(-180, 5)
+ .point(-177, 10);
+ builder.hole()
+ .point(172, 0)
+ .point(176, 10)
+ .point(176, -5)
+ .point(172, 0);
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ @Test(expected = InvalidShapeException.class)
+ public void testBoundaryShapeWithInvalidTangentialHole() {
+ // test shape with two tangential (shared) vertices (should throw exception)
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-177, 10)
+ .point(176, 15)
+ .point(172, 0)
+ .point(176, -15)
+ .point(-177, -10)
+ .point(-177, 10);
+ builder.hole()
+ .point(-177, 10)
+ .point(172, 0)
+ .point(180, -5)
+ .point(176, -10)
+ .point(-177, 10);
+ Shape shape = builder.close().build();
+ assertMultiPolygon(shape);
+ }
+
+ /**
+ * Test an enveloping polygon around the max mercator bounds
+ */
+ @Test
+ public void testBoundaryShape() {
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-180, 90)
+ .point(180, 90)
+ .point(180, -90)
+ .point(-180, -90);
+
+ Shape shape = builder.close().build();
+
+ assertPolygon(shape);
+ }
+
+ @Test
+ public void testShapeWithAlternateOrientation() {
+ // cw: should produce a multi polygon spanning hemispheres
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(180, 0)
+ .point(176, 4)
+ .point(-176, 4)
+ .point(180, 0);
+
+ Shape shape = builder.close().build();
+ assertPolygon(shape);
+
+ // cw: geo core will convert to ccw across the dateline
+ builder = ShapeBuilder.newPolygon()
+ .point(180, 0)
+ .point(-176, 4)
+ .point(176, 4)
+ .point(180, 0);
+
+ shape = builder.close().build();
+
+ assertMultiPolygon(shape);
+ }
+
+ @Test(expected = InvalidShapeException.class)
+ public void testInvalidShapeWithConsecutiveDuplicatePoints() {
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(180, 0)
+ .point(176, 4)
+ .point(176, 4)
+ .point(-176, 4)
+ .point(180, 0);
+ Shape shape = builder.close().build();
+ assertPolygon(shape);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/hashing/MurmurHash3Tests.java b/core/src/test/java/org/elasticsearch/common/hashing/MurmurHash3Tests.java
new file mode 100644
index 0000000000..6d3f5c8d05
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/hashing/MurmurHash3Tests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.hashing;
+
+import com.google.common.hash.HashCode;
+import com.google.common.hash.Hashing;
+import org.elasticsearch.common.hash.MurmurHash3;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.LongBuffer;
+
+public class MurmurHash3Tests extends ElasticsearchTestCase {
+
+ public void testHash128() {
+ final int iters = scaledRandomIntBetween(100, 5000);
+ for (int i = 0; i < iters; ++i) {
+ final int seed = randomInt();
+ final int offset = randomInt(20);
+ final int len = randomInt(randomBoolean() ? 20 : 200);
+ final byte[] bytes = new byte[len + offset + randomInt(3)];
+ getRandom().nextBytes(bytes);
+ HashCode h1 = Hashing.murmur3_128(seed).hashBytes(bytes, offset, len);
+ MurmurHash3.Hash128 h2 = MurmurHash3.hash128(bytes, offset, len, seed, new MurmurHash3.Hash128());
+ assertEquals(h1, h2);
+ }
+ }
+
+ private void assertEquals(HashCode h1, MurmurHash3.Hash128 h2) {
+ final LongBuffer longs = ByteBuffer.wrap(h1.asBytes()).order(ByteOrder.LITTLE_ENDIAN).asLongBuffer();
+ assertEquals(2, longs.limit());
+ assertEquals(h1.asLong(), h2.h1);
+ assertEquals(longs.get(), h2.h1);
+ assertEquals(longs.get(), h2.h2);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java b/core/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java
new file mode 100644
index 0000000000..f3534da9f3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/hppc/HppcMapsTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.hppc;
+
+import com.carrotsearch.hppc.ObjectHashSet;
+import org.elasticsearch.common.collect.HppcMaps;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class HppcMapsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testIntersection() throws Exception {
+ boolean enabled = false;
+ assert enabled = true;
+ assumeTrue("assertions enabled", enabled);
+ ObjectHashSet<String> set1 = ObjectHashSet.from("1", "2", "3");
+ ObjectHashSet<String> set2 = ObjectHashSet.from("1", "2", "3");
+ List<String> values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(3));
+ assertThat(values.contains("1"), equalTo(true));
+ assertThat(values.contains("2"), equalTo(true));
+ assertThat(values.contains("3"), equalTo(true));
+
+ set1 = ObjectHashSet.from("1", "2", "3");
+ set2 = ObjectHashSet.from("3", "4", "5");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(1));
+ assertThat(values.get(0), equalTo("3"));
+
+ set1 = ObjectHashSet.from("1", "2", "3");
+ set2 = ObjectHashSet.from("4", "5", "6");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectHashSet.from();
+ set2 = ObjectHashSet.from("3", "4", "5");
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectHashSet.from("1", "2", "3");
+ set2 = ObjectHashSet.from();
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = ObjectHashSet.from();
+ set2 = ObjectHashSet.from();
+ values = toList(HppcMaps.intersection(set1, set2));
+ assertThat(values.size(), equalTo(0));
+
+ set1 = null;
+ set2 = ObjectHashSet.from();
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+
+ set1 = ObjectHashSet.from();
+ set2 = null;
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+
+ set1 = null;
+ set2 = null;
+ try {
+ toList(HppcMaps.intersection(set1, set2));
+ fail();
+ } catch (AssertionError e) {}
+ }
+
+ private List<String> toList(Iterable<String> iterable) {
+ List<String> list = new ArrayList<>();
+ for (String s : iterable) {
+ list.add(s);
+ }
+ return list;
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java b/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java
new file mode 100644
index 0000000000..aa8c56cc97
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/io/FileSystemUtilsTests.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import com.google.common.base.Charsets;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressFileSystems;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+
+/**
+ * Unit tests for {@link org.elasticsearch.common.io.FileSystemUtils}.
+ */
+@SuppressFileSystems("WindowsFS") // tries to move away open file handles
+public class FileSystemUtilsTests extends ElasticsearchTestCase {
+
+ private Path src;
+ private Path dst;
+
+ @Before
+ public void copySourceFilesToTarget() throws IOException, URISyntaxException {
+ src = createTempDir();
+ dst = createTempDir();
+ Files.createDirectories(src);
+ Files.createDirectories(dst);
+
+ // We first copy sources test files from src/test/resources
+ // Because after when the test runs, src files are moved to their destination
+ final Path path = getDataPath("/org/elasticsearch/common/io/copyappend");
+ FileSystemUtils.copyDirectoryRecursively(path, src);
+ }
+
+ @Test
+ public void testMoveOverExistingFileAndAppend() throws IOException {
+
+ FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dst, ".new");
+ assertFileContent(dst, "file1.txt", "version1");
+ assertFileContent(dst, "dir/file2.txt", "version1");
+
+ FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dst, ".new");
+ assertFileContent(dst, "file1.txt", "version1");
+ assertFileContent(dst, "dir/file2.txt", "version1");
+ assertFileContent(dst, "file1.txt.new", "version2");
+ assertFileContent(dst, "dir/file2.txt.new", "version2");
+ assertFileContent(dst, "file3.txt", "version1");
+ assertFileContent(dst, "dir/subdir/file4.txt", "version1");
+
+ FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dst, ".new");
+ assertFileContent(dst, "file1.txt", "version1");
+ assertFileContent(dst, "dir/file2.txt", "version1");
+ assertFileContent(dst, "file1.txt.new", "version3");
+ assertFileContent(dst, "dir/file2.txt.new", "version3");
+ assertFileContent(dst, "file3.txt", "version1");
+ assertFileContent(dst, "dir/subdir/file4.txt", "version1");
+ assertFileContent(dst, "file3.txt.new", "version2");
+ assertFileContent(dst, "dir/subdir/file4.txt.new", "version2");
+ assertFileContent(dst, "dir/subdir/file5.txt", "version1");
+ }
+
+ @Test
+ public void testMoveOverExistingFileAndIgnore() throws IOException {
+ Path dest = createTempDir();
+
+ FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dest, null);
+ assertFileContent(dest, "file1.txt", "version1");
+ assertFileContent(dest, "dir/file2.txt", "version1");
+
+ FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dest, null);
+ assertFileContent(dest, "file1.txt", "version1");
+ assertFileContent(dest, "dir/file2.txt", "version1");
+ assertFileContent(dest, "file1.txt.new", null);
+ assertFileContent(dest, "dir/file2.txt.new", null);
+ assertFileContent(dest, "file3.txt", "version1");
+ assertFileContent(dest, "dir/subdir/file4.txt", "version1");
+
+ FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dest, null);
+ assertFileContent(dest, "file1.txt", "version1");
+ assertFileContent(dest, "dir/file2.txt", "version1");
+ assertFileContent(dest, "file1.txt.new", null);
+ assertFileContent(dest, "dir/file2.txt.new", null);
+ assertFileContent(dest, "file3.txt", "version1");
+ assertFileContent(dest, "dir/subdir/file4.txt", "version1");
+ assertFileContent(dest, "file3.txt.new", null);
+ assertFileContent(dest, "dir/subdir/file4.txt.new", null);
+ assertFileContent(dest, "dir/subdir/file5.txt", "version1");
+ }
+
+ @Test
+ public void testMoveFilesDoesNotCreateSameFileWithSuffix() throws Exception {
+ Path[] dirs = new Path[] { createTempDir(), createTempDir(), createTempDir()};
+ for (Path dir : dirs) {
+ Files.write(dir.resolve("file1.txt"), "file1".getBytes(Charsets.UTF_8));
+ Files.createDirectory(dir.resolve("dir"));
+ Files.write(dir.resolve("dir").resolve("file2.txt"), "file2".getBytes(Charsets.UTF_8));
+ }
+
+ FileSystemUtils.moveFilesWithoutOverwriting(dirs[0], dst, ".new");
+ assertFileContent(dst, "file1.txt", "file1");
+ assertFileContent(dst, "dir/file2.txt", "file2");
+
+ // do the same operation again, make sure, no .new files have been added
+ FileSystemUtils.moveFilesWithoutOverwriting(dirs[1], dst, ".new");
+ assertFileContent(dst, "file1.txt", "file1");
+ assertFileContent(dst, "dir/file2.txt", "file2");
+ assertFileNotExists(dst.resolve("file1.txt.new"));
+ assertFileNotExists(dst.resolve("dir").resolve("file2.txt.new"));
+
+ // change file content, make sure it gets updated
+ Files.write(dirs[2].resolve("dir").resolve("file2.txt"), "UPDATED".getBytes(Charsets.UTF_8));
+ FileSystemUtils.moveFilesWithoutOverwriting(dirs[2], dst, ".new");
+ assertFileContent(dst, "file1.txt", "file1");
+ assertFileContent(dst, "dir/file2.txt", "file2");
+ assertFileContent(dst, "dir/file2.txt.new", "UPDATED");
+ }
+
+ /**
+ * Check that a file contains a given String
+ * @param dir root dir for file
+ * @param filename relative path from root dir to file
+ * @param expected expected content (if null, we don't expect any file)
+ */
+ public static void assertFileContent(Path dir, String filename, String expected) throws IOException {
+ Assert.assertThat(Files.exists(dir), is(true));
+ Path file = dir.resolve(filename);
+ if (expected == null) {
+ Assert.assertThat("file [" + file + "] should not exist.", Files.exists(file), is(false));
+ } else {
+ assertFileExists(file);
+ String fileContent = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
+ // trim the string content to prevent different handling on windows vs. unix and CR chars...
+ Assert.assertThat(fileContent.trim(), equalTo(expected.trim()));
+ }
+ }
+
+ @Test
+ public void testAppend() {
+ assertEquals(FileSystemUtils.append(PathUtils.get("/foo/bar"), PathUtils.get("/hello/world/this_is/awesome"), 0),
+ PathUtils.get("/foo/bar/hello/world/this_is/awesome"));
+
+ assertEquals(FileSystemUtils.append(PathUtils.get("/foo/bar"), PathUtils.get("/hello/world/this_is/awesome"), 2),
+ PathUtils.get("/foo/bar/this_is/awesome"));
+
+ assertEquals(FileSystemUtils.append(PathUtils.get("/foo/bar"), PathUtils.get("/hello/world/this_is/awesome"), 1),
+ PathUtils.get("/foo/bar/world/this_is/awesome"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/io/StreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/StreamsTests.java
new file mode 100644
index 0000000000..2528b25a92
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/io/StreamsTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.Arrays;
+
+import static org.elasticsearch.common.io.Streams.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Unit tests for {@link org.elasticsearch.common.io.Streams}.
+ */
+public class StreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCopyFromInputStream() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayInputStream in = new ByteArrayInputStream(content);
+ ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
+ long count = copy(in, out);
+
+ assertThat(count, equalTo((long) content.length));
+ assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true));
+ }
+
+ @Test
+ public void testCopyFromByteArray() throws IOException {
+ byte[] content = "content".getBytes(Charsets.UTF_8);
+ ByteArrayOutputStream out = new ByteArrayOutputStream(content.length);
+ copy(content, out);
+ assertThat(Arrays.equals(content, out.toByteArray()), equalTo(true));
+ }
+
+ @Test
+ public void testCopyFromReader() throws IOException {
+ String content = "content";
+ StringReader in = new StringReader(content);
+ StringWriter out = new StringWriter();
+ int count = copy(in, out);
+ assertThat(content.length(), equalTo(count));
+ assertThat(out.toString(), equalTo(content));
+ }
+
+ @Test
+ public void testCopyFromString() throws IOException {
+ String content = "content";
+ StringWriter out = new StringWriter();
+ copy(content, out);
+ assertThat(out.toString(), equalTo(content));
+ }
+
+ @Test
+ public void testCopyToString() throws IOException {
+ String content = "content";
+ StringReader in = new StringReader(content);
+ String result = copyToString(in);
+ assertThat(result, equalTo(content));
+ }
+
+ @Test
+ public void testBytesStreamInput() throws IOException {
+ byte stuff[] = new byte[] { 0, 1, 2, 3 };
+ BytesRef stuffRef = new BytesRef(stuff, 2, 2);
+ BytesArray stuffArray = new BytesArray(stuffRef);
+ StreamInput input = StreamInput.wrap(stuffArray);
+ assertEquals(2, input.read());
+ assertEquals(3, input.read());
+ assertEquals(-1, input.read());
+ input.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/io/rootdir.properties b/core/src/test/java/org/elasticsearch/common/io/rootdir.properties
new file mode 100644
index 0000000000..d816feb074
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/io/rootdir.properties
@@ -0,0 +1 @@
+copyappend.root.dir=${basedir}/src/test/resources/org/elasticsearch/common/io/copyappend
diff --git a/core/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java
new file mode 100644
index 0000000000..74a97ecc5f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.streams;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.lucene.BytesRefs;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for {@link BytesStreamOutput} paging behaviour.
+ */
+public class BytesStreamsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testEmpty() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ // test empty stream to array
+ assertEquals(0, out.size());
+ assertEquals(0, out.bytes().toBytes().length);
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleByte() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+ assertEquals(0, out.size());
+
+ int expectedSize = 1;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+
+ // write single byte
+ out.writeByte(expectedData[0]);
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleShortPage() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int expectedSize = 10;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+
+ // write byte-by-byte
+ for (int i = 0; i < expectedSize; i++) {
+ out.writeByte(expectedData[i]);
+ }
+
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testIllegalBulkWrite() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ // bulk-write with wrong args
+ try {
+ out.writeBytes(new byte[]{}, 0, 1);
+ fail("expected IllegalArgumentException: length > (size-offset)");
+ }
+ catch (IllegalArgumentException iax1) {
+ // expected
+ }
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleShortPageBulkWrite() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ // first bulk-write empty array: should not change anything
+ int expectedSize = 0;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+ out.writeBytes(expectedData);
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ // bulk-write again with actual bytes
+ expectedSize = 10;
+ expectedData = randomizedByteArrayWithSize(expectedSize);
+ out.writeBytes(expectedData);
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleFullPageBulkWrite() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int expectedSize = BigArrays.BYTE_PAGE_SIZE;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+
+ // write in bulk
+ out.writeBytes(expectedData);
+
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleFullPageBulkWriteWithOffset() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int initialOffset = 10;
+ int additionalLength = BigArrays.BYTE_PAGE_SIZE;
+ byte[] expectedData = randomizedByteArrayWithSize(initialOffset + additionalLength);
+
+ // first create initial offset
+ out.writeBytes(expectedData, 0, initialOffset);
+ assertEquals(initialOffset, out.size());
+
+ // now write the rest - more than fits into the remaining first page
+ out.writeBytes(expectedData, initialOffset, additionalLength);
+ assertEquals(expectedData.length, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleFullPageBulkWriteWithOffsetCrossover() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int initialOffset = 10;
+ int additionalLength = BigArrays.BYTE_PAGE_SIZE * 2;
+ byte[] expectedData = randomizedByteArrayWithSize(initialOffset + additionalLength);
+ out.writeBytes(expectedData, 0, initialOffset);
+ assertEquals(initialOffset, out.size());
+
+ // now write the rest - more than fits into the remaining page + a full page after
+ // that,
+ // ie. we cross over into a third
+ out.writeBytes(expectedData, initialOffset, additionalLength);
+ assertEquals(expectedData.length, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testSingleFullPage() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int expectedSize = BigArrays.BYTE_PAGE_SIZE;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+
+ // write byte-by-byte
+ for (int i = 0; i < expectedSize; i++) {
+ out.writeByte(expectedData[i]);
+ }
+
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testOneFullOneShortPage() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int expectedSize = BigArrays.BYTE_PAGE_SIZE + 10;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+
+ // write byte-by-byte
+ for (int i = 0; i < expectedSize; i++) {
+ out.writeByte(expectedData[i]);
+ }
+
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testTwoFullOneShortPage() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int expectedSize = (BigArrays.BYTE_PAGE_SIZE * 2) + 1;
+ byte[] expectedData = randomizedByteArrayWithSize(expectedSize);
+
+ // write byte-by-byte
+ for (int i = 0; i < expectedSize; i++) {
+ out.writeByte(expectedData[i]);
+ }
+
+ assertEquals(expectedSize, out.size());
+ assertArrayEquals(expectedData, out.bytes().toBytes());
+
+ out.close();
+ }
+
+ @Test
+ public void testSeek() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int position = 0;
+ assertEquals(position, out.position());
+
+ out.seek(position += 10);
+ out.seek(position += BigArrays.BYTE_PAGE_SIZE);
+ out.seek(position += BigArrays.BYTE_PAGE_SIZE + 10);
+ out.seek(position += BigArrays.BYTE_PAGE_SIZE * 2);
+ assertEquals(position, out.position());
+ assertEquals(position, out.bytes().toBytes().length);
+
+ out.close();
+ }
+
+ @Test
+ public void testSkip() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ int position = 0;
+ assertEquals(position, out.position());
+
+ int forward = 100;
+ out.skip(forward);
+ assertEquals(position + forward, out.position());
+
+ out.close();
+ }
+
+ @Test
+ public void testSimpleStreams() throws Exception {
+ assumeTrue("requires a 64-bit JRE ... ?!", Constants.JRE_IS_64BIT);
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.writeBoolean(false);
+ out.writeByte((byte) 1);
+ out.writeShort((short) -1);
+ out.writeInt(-1);
+ out.writeVInt(2);
+ out.writeLong(-3);
+ out.writeVLong(4);
+ out.writeFloat(1.1f);
+ out.writeDouble(2.2);
+ int[] intArray = {1, 2, 3};
+ out.writeGenericValue(intArray);
+ long[] longArray = {1, 2, 3};
+ out.writeGenericValue(longArray);
+ float[] floatArray = {1.1f, 2.2f, 3.3f};
+ out.writeGenericValue(floatArray);
+ double[] doubleArray = {1.1, 2.2, 3.3};
+ out.writeGenericValue(doubleArray);
+ out.writeString("hello");
+ out.writeString("goodbye");
+ out.writeGenericValue(BytesRefs.toBytesRef("bytesref"));
+ StreamInput in = StreamInput.wrap(out.bytes().toBytes());
+ assertThat(in.readBoolean(), equalTo(false));
+ assertThat(in.readByte(), equalTo((byte)1));
+ assertThat(in.readShort(), equalTo((short)-1));
+ assertThat(in.readInt(), equalTo(-1));
+ assertThat(in.readVInt(), equalTo(2));
+ assertThat(in.readLong(), equalTo((long)-3));
+ assertThat(in.readVLong(), equalTo((long)4));
+ assertThat((double)in.readFloat(), closeTo(1.1, 0.0001));
+ assertThat(in.readDouble(), closeTo(2.2, 0.0001));
+ assertThat(in.readGenericValue(), equalTo((Object) intArray));
+ assertThat(in.readGenericValue(), equalTo((Object)longArray));
+ assertThat(in.readGenericValue(), equalTo((Object)floatArray));
+ assertThat(in.readGenericValue(), equalTo((Object)doubleArray));
+ assertThat(in.readString(), equalTo("hello"));
+ assertThat(in.readString(), equalTo("goodbye"));
+ assertThat(in.readGenericValue(), equalTo((Object)BytesRefs.toBytesRef("bytesref")));
+ in.close();
+ out.close();
+ }
+
+ // we ignore this test for now since all existing callers of BytesStreamOutput happily
+ // call bytes() after close().
+ @Ignore
+ @Test
+ public void testAccessAfterClose() throws Exception {
+ BytesStreamOutput out = new BytesStreamOutput();
+
+ // immediately close
+ out.close();
+
+ assertEquals(-1, out.size());
+ assertEquals(-1, out.position());
+
+ // writing a single byte must fail
+ try {
+ out.writeByte((byte)0);
+ fail("expected IllegalStateException: stream closed");
+ }
+ catch (IllegalStateException iex1) {
+ // expected
+ }
+
+ // writing in bulk must fail
+ try {
+ out.writeBytes(new byte[0], 0, 0);
+ fail("expected IllegalStateException: stream closed");
+ }
+ catch (IllegalStateException iex1) {
+ // expected
+ }
+
+ // toByteArray() must fail
+ try {
+ out.bytes().toBytes();
+ fail("expected IllegalStateException: stream closed");
+ }
+ catch (IllegalStateException iex1) {
+ // expected
+ }
+
+ }
+
+ // create & fill byte[] with randomized data
+ protected byte[] randomizedByteArrayWithSize(int size) {
+ byte[] data = new byte[size];
+ getRandom().nextBytes(data);
+ return data;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java b/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java
new file mode 100644
index 0000000000..359e418bb6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/joda/DateMathParserTests.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.joda;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.TimeZone;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class DateMathParserTests extends ElasticsearchTestCase {
+
+ FormatDateTimeFormatter formatter = Joda.forPattern("dateOptionalTime||epoch_millis");
+ DateMathParser parser = new DateMathParser(formatter);
+
+ private static Callable<Long> callable(final long value) {
+ return new Callable<Long>() {
+ @Override
+ public Long call() throws Exception {
+ return value;
+ }
+ };
+ }
+
+ void assertDateMathEquals(String toTest, String expected) {
+ assertDateMathEquals(toTest, expected, 0, false, null);
+ }
+
+ void assertDateMathEquals(String toTest, String expected, final long now, boolean roundUp, DateTimeZone timeZone) {
+ long gotMillis = parser.parse(toTest, callable(now), roundUp, timeZone);
+ assertDateEquals(gotMillis, toTest, expected);
+ }
+
+ void assertDateEquals(long gotMillis, String original, String expected) {
+ long expectedMillis = parser.parse(expected, callable(0));
+ if (gotMillis != expectedMillis) {
+ fail("Date math not equal\n" +
+ "Original : " + original + "\n" +
+ "Parsed : " + formatter.printer().print(gotMillis) + "\n" +
+ "Expected : " + expected + "\n" +
+ "Expected milliseconds : " + expectedMillis + "\n" +
+ "Actual milliseconds : " + gotMillis + "\n");
+ }
+ }
+
+ public void testBasicDates() {
+ assertDateMathEquals("2014", "2014-01-01T00:00:00.000");
+ assertDateMathEquals("2014-05", "2014-05-01T00:00:00.000");
+ assertDateMathEquals("2014-05-30", "2014-05-30T00:00:00.000");
+ assertDateMathEquals("2014-05-30T20", "2014-05-30T20:00:00.000");
+ assertDateMathEquals("2014-05-30T20:21", "2014-05-30T20:21:00.000");
+ assertDateMathEquals("2014-05-30T20:21:35", "2014-05-30T20:21:35.000");
+ assertDateMathEquals("2014-05-30T20:21:35.123", "2014-05-30T20:21:35.123");
+ }
+
+ public void testRoundingDoesNotAffectExactDate() {
+ assertDateMathEquals("2014-11-12T22:55:00Z", "2014-11-12T22:55:00Z", 0, true, null);
+ assertDateMathEquals("2014-11-12T22:55:00Z", "2014-11-12T22:55:00Z", 0, false, null);
+ }
+
+ public void testTimezone() {
+ // timezone works within date format
+ assertDateMathEquals("2014-05-30T20:21+02:00", "2014-05-30T18:21:00.000");
+
+ // but also externally
+ assertDateMathEquals("2014-05-30T20:21", "2014-05-30T18:21:00.000", 0, false, DateTimeZone.forID("+02:00"));
+
+ // and timezone in the date has priority
+ assertDateMathEquals("2014-05-30T20:21+03:00", "2014-05-30T17:21:00.000", 0, false, DateTimeZone.forID("-08:00"));
+ assertDateMathEquals("2014-05-30T20:21Z", "2014-05-30T20:21:00.000", 0, false, DateTimeZone.forID("-08:00"));
+ }
+
+ public void testBasicMath() {
+ assertDateMathEquals("2014-11-18||+y", "2015-11-18");
+ assertDateMathEquals("2014-11-18||-2y", "2012-11-18");
+
+ assertDateMathEquals("2014-11-18||+3M", "2015-02-18");
+ assertDateMathEquals("2014-11-18||-M", "2014-10-18");
+
+ assertDateMathEquals("2014-11-18||+1w", "2014-11-25");
+ assertDateMathEquals("2014-11-18||-3w", "2014-10-28");
+
+ assertDateMathEquals("2014-11-18||+22d", "2014-12-10");
+ assertDateMathEquals("2014-11-18||-423d", "2013-09-21");
+
+ assertDateMathEquals("2014-11-18T14||+13h", "2014-11-19T03");
+ assertDateMathEquals("2014-11-18T14||-1h", "2014-11-18T13");
+ assertDateMathEquals("2014-11-18T14||+13H", "2014-11-19T03");
+ assertDateMathEquals("2014-11-18T14||-1H", "2014-11-18T13");
+
+ assertDateMathEquals("2014-11-18T14:27||+10240m", "2014-11-25T17:07");
+ assertDateMathEquals("2014-11-18T14:27||-10m", "2014-11-18T14:17");
+
+ assertDateMathEquals("2014-11-18T14:27:32||+60s", "2014-11-18T14:28:32");
+ assertDateMathEquals("2014-11-18T14:27:32||-3600s", "2014-11-18T13:27:32");
+ }
+
+ public void testLenientEmptyMath() {
+ assertDateMathEquals("2014-05-30T20:21||", "2014-05-30T20:21:00.000");
+ }
+
+ public void testMultipleAdjustments() {
+ assertDateMathEquals("2014-11-18||+1M-1M", "2014-11-18");
+ assertDateMathEquals("2014-11-18||+1M-1m", "2014-12-17T23:59");
+ assertDateMathEquals("2014-11-18||-1m+1M", "2014-12-17T23:59");
+ assertDateMathEquals("2014-11-18||+1M/M", "2014-12-01");
+ assertDateMathEquals("2014-11-18||+1M/M+1h", "2014-12-01T01");
+ }
+
+
+ public void testNow() {
+ final long now = parser.parse("2014-11-18T14:27:32", callable(0), false, null);
+
+ assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null);
+ assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null);
+ assertDateMathEquals("now-2d", "2014-11-16T14:27:32", now, false, null);
+ assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, null);
+
+ // timezone does not affect now
+ assertDateMathEquals("now/m", "2014-11-18T14:27", now, false, DateTimeZone.forID("+02:00"));
+ }
+
+ public void testRounding() {
+ assertDateMathEquals("2014-11-18||/y", "2014-01-01", 0, false, null);
+ assertDateMathEquals("2014-11-18||/y", "2014-12-31T23:59:59.999", 0, true, null);
+ assertDateMathEquals("2014||/y", "2014-01-01", 0, false, null);
+ assertDateMathEquals("2014-01-01T00:00:00.001||/y", "2014-12-31T23:59:59.999", 0, true, null);
+ // rounding should also take into account time zone
+ assertDateMathEquals("2014-11-18||/y", "2013-12-31T23:00:00.000Z", 0, false, DateTimeZone.forID("CET"));
+ assertDateMathEquals("2014-11-18||/y", "2014-12-31T22:59:59.999Z", 0, true, DateTimeZone.forID("CET"));
+
+ assertDateMathEquals("2014-11-18||/M", "2014-11-01", 0, false, null);
+ assertDateMathEquals("2014-11-18||/M", "2014-11-30T23:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11||/M", "2014-11-01", 0, false, null);
+ assertDateMathEquals("2014-11||/M", "2014-11-30T23:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18||/M", "2014-10-31T23:00:00.000Z", 0, false, DateTimeZone.forID("CET"));
+ assertDateMathEquals("2014-11-18||/M", "2014-11-30T22:59:59.999Z", 0, true, DateTimeZone.forID("CET"));
+
+ assertDateMathEquals("2014-11-18T14||/w", "2014-11-17", 0, false, null);
+ assertDateMathEquals("2014-11-18T14||/w", "2014-11-23T23:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18||/w", "2014-11-17", 0, false, null);
+ assertDateMathEquals("2014-11-18||/w", "2014-11-23T23:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18||/w", "2014-11-16T23:00:00.000Z", 0, false, DateTimeZone.forID("+01:00"));
+ assertDateMathEquals("2014-11-18||/w", "2014-11-17T01:00:00.000Z", 0, false, DateTimeZone.forID("-01:00"));
+ assertDateMathEquals("2014-11-18||/w", "2014-11-16T23:00:00.000Z", 0, false, DateTimeZone.forID("CET"));
+ assertDateMathEquals("2014-11-18||/w", "2014-11-23T22:59:59.999Z", 0, true, DateTimeZone.forID("CET"));
+ assertDateMathEquals("2014-07-22||/w", "2014-07-20T22:00:00.000Z", 0, false, DateTimeZone.forID("CET")); // with DST
+
+ assertDateMathEquals("2014-11-18T14||/d", "2014-11-18", 0, false, null);
+ assertDateMathEquals("2014-11-18T14||/d", "2014-11-18T23:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18||/d", "2014-11-18", 0, false, null);
+ assertDateMathEquals("2014-11-18||/d", "2014-11-18T23:59:59.999", 0, true, null);
+
+ assertDateMathEquals("2014-11-18T14:27||/h", "2014-11-18T14", 0, false, null);
+ assertDateMathEquals("2014-11-18T14:27||/h", "2014-11-18T14:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18T14||/H", "2014-11-18T14", 0, false, null);
+ assertDateMathEquals("2014-11-18T14||/H", "2014-11-18T14:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18T14:27||/h", "2014-11-18T14", 0, false, null);
+ assertDateMathEquals("2014-11-18T14:27||/h", "2014-11-18T14:59:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18T14||/H", "2014-11-18T14", 0, false, null);
+ assertDateMathEquals("2014-11-18T14||/H", "2014-11-18T14:59:59.999", 0, true, null);
+
+ assertDateMathEquals("2014-11-18T14:27:32||/m", "2014-11-18T14:27", 0, false, null);
+ assertDateMathEquals("2014-11-18T14:27:32||/m", "2014-11-18T14:27:59.999", 0, true, null);
+ assertDateMathEquals("2014-11-18T14:27||/m", "2014-11-18T14:27", 0, false, null);
+ assertDateMathEquals("2014-11-18T14:27||/m", "2014-11-18T14:27:59.999", 0, true, null);
+
+ assertDateMathEquals("2014-11-18T14:27:32.123||/s", "2014-11-18T14:27:32", 0, false, null);
+ assertDateMathEquals("2014-11-18T14:27:32.123||/s", "2014-11-18T14:27:32.999", 0, true, null);
+ assertDateMathEquals("2014-11-18T14:27:32||/s", "2014-11-18T14:27:32", 0, false, null);
+ assertDateMathEquals("2014-11-18T14:27:32||/s", "2014-11-18T14:27:32.999", 0, true, null);
+ }
+
+ public void testTimestamps() {
+ assertDateMathEquals("1418248078000", "2014-12-10T21:47:58.000");
+
+ // datemath still works on timestamps
+ assertDateMathEquals("1418248078000||/m", "2014-12-10T21:47:00.000");
+
+ // also check other time units
+ DateMathParser parser = new DateMathParser(Joda.forPattern("epoch_second||dateOptionalTime"));
+ long datetime = parser.parse("1418248078", callable(0));
+ assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000");
+
+ // a timestamp before 10000 is a year
+ assertDateMathEquals("9999", "9999-01-01T00:00:00.000");
+ // 10000 is also a year, breaking bwc, used to be a timestamp
+ assertDateMathEquals("10000", "10000-01-01T00:00:00.000");
+ // but 10000 with T is still a date format
+ assertDateMathEquals("10000T", "10000-01-01T00:00:00.000");
+ }
+
+ void assertParseException(String msg, String date, String exc) {
+ try {
+ parser.parse(date, callable(0));
+ fail("Date: " + date + "\n" + msg);
+ } catch (ElasticsearchParseException e) {
+ assertThat(ExceptionsHelper.detailedMessage(e).contains(exc), equalTo(true));
+ }
+ }
+
+ public void testIllegalMathFormat() {
+ assertParseException("Expected date math unsupported operator exception", "2014-11-18||*5", "operator not supported");
+ assertParseException("Expected date math incompatible rounding exception", "2014-11-18||/2m", "rounding");
+ assertParseException("Expected date math illegal unit type exception", "2014-11-18||+2a", "unit [a] not supported");
+ assertParseException("Expected date math truncation exception", "2014-11-18||+12", "truncated");
+ assertParseException("Expected date math truncation exception", "2014-11-18||-", "truncated");
+ }
+
+ public void testIllegalDateFormat() {
+ assertParseException("Expected bad timestamp exception", Long.toString(Long.MAX_VALUE) + "0", "failed to parse date field");
+ assertParseException("Expected bad date format exception", "123bogus", "with format");
+ }
+
+ public void testOnlyCallsNowIfNecessary() {
+ final AtomicBoolean called = new AtomicBoolean();
+ final Callable<Long> now = new Callable<Long>() {
+ @Override
+ public Long call() throws Exception {
+ called.set(true);
+ return 42L;
+ }
+ };
+ parser.parse("2014-11-18T14:27:32", now, false, null);
+ assertFalse(called.get());
+ parser.parse("now/d", now, false, null);
+ assertTrue(called.get());
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testThatUnixTimestampMayNotHaveTimeZone() {
+ DateMathParser parser = new DateMathParser(Joda.forPattern("epoch_millis"));
+ parser.parse("1234567890123", callable(42), false, DateTimeZone.forTimeZone(TimeZone.getTimeZone("CET")));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/logging/jdk/JDKESLoggerTests.java b/core/src/test/java/org/elasticsearch/common/logging/jdk/JDKESLoggerTests.java
new file mode 100644
index 0000000000..d236ad5ecf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/logging/jdk/JDKESLoggerTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.jdk;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.logging.Handler;
+import java.util.logging.Level;
+import java.util.logging.LogRecord;
+import java.util.logging.Logger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class JDKESLoggerTests extends ElasticsearchTestCase {
+
+ private ESLogger esTestLogger;
+ private TestHandler testHandler;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+
+ JdkESLoggerFactory esTestLoggerFactory = new JdkESLoggerFactory();
+ esTestLogger = esTestLoggerFactory.newInstance("test");
+ Logger testLogger = ((JdkESLogger) esTestLogger).logger();
+ testLogger.setLevel(Level.FINEST);
+ assertThat(testLogger.getLevel(), equalTo(Level.FINEST));
+ testHandler = new TestHandler();
+ testLogger.addHandler(testHandler);
+ }
+
+ @Test
+ public void locationInfoTest() {
+ esTestLogger.error("This is an error");
+ esTestLogger.warn("This is a warning");
+ esTestLogger.info("This is an info");
+ esTestLogger.debug("This is a debug");
+ esTestLogger.trace("This is a trace");
+ List<LogRecord> records = testHandler.getEvents();
+ assertThat(records, notNullValue());
+ assertThat(records.size(), equalTo(5));
+ LogRecord record = records.get(0);
+ assertThat(record, notNullValue());
+ assertThat(record.getLevel(), equalTo(Level.SEVERE));
+ assertThat(record.getMessage(), equalTo("This is an error"));
+ assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
+ assertThat(record.getSourceMethodName(), equalTo("locationInfoTest"));
+ record = records.get(1);
+ assertThat(record, notNullValue());
+ assertThat(record.getLevel(), equalTo(Level.WARNING));
+ assertThat(record.getMessage(), equalTo("This is a warning"));
+ assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
+ assertThat(record.getSourceMethodName(), equalTo("locationInfoTest"));
+ record = records.get(2);
+ assertThat(record, notNullValue());
+ assertThat(record.getLevel(), equalTo(Level.INFO));
+ assertThat(record.getMessage(), equalTo("This is an info"));
+ assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
+ assertThat(record.getSourceMethodName(), equalTo("locationInfoTest"));
+ record = records.get(3);
+ assertThat(record, notNullValue());
+ assertThat(record.getLevel(), equalTo(Level.FINE));
+ assertThat(record.getMessage(), equalTo("This is a debug"));
+ assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
+ assertThat(record.getSourceMethodName(), equalTo("locationInfoTest"));
+ record = records.get(4);
+ assertThat(record, notNullValue());
+ assertThat(record.getLevel(), equalTo(Level.FINEST));
+ assertThat(record.getMessage(), equalTo("This is a trace"));
+ assertThat(record.getSourceClassName(), equalTo(JDKESLoggerTests.class.getCanonicalName()));
+ assertThat(record.getSourceMethodName(), equalTo("locationInfoTest"));
+ }
+
+ private static class TestHandler extends Handler {
+
+ private List<LogRecord> records = new ArrayList<>();
+
+ @Override
+ public void close() {
+ }
+
+ public List<LogRecord> getEvents() {
+ return records;
+ }
+
+ @Override
+ public void publish(LogRecord record) {
+ // Forces it to generate the location information
+ record.getSourceClassName();
+ records.add(record);
+ }
+
+ @Override
+ public void flush() {
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java b/core/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java
new file mode 100644
index 0000000000..f0d0c076eb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/logging/log4j/Log4jESLoggerTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.log4j;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LocationInfo;
+import org.apache.log4j.spi.LoggingEvent;
+import org.elasticsearch.common.logging.DeprecationLogger;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Test;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class Log4jESLoggerTests extends ElasticsearchTestCase {
+
+ private ESLogger esTestLogger;
+ private TestAppender testAppender;
+ private String testLevel;
+ private DeprecationLogger deprecationLogger;
+ private TestAppender deprecationAppender;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ this.testLevel = Log4jESLoggerFactory.getLogger("test").getLevel();
+ LogConfigurator.reset();
+ Path configDir = getDataPath("config");
+ // Need to set custom path.conf so we can use a custom logging.yml file for the test
+ Settings settings = Settings.builder()
+ .put("path.conf", configDir.toAbsolutePath())
+ .put("path.home", createTempDir().toString())
+ .build();
+ LogConfigurator.configure(settings);
+
+ esTestLogger = Log4jESLoggerFactory.getLogger("test");
+ Logger testLogger = ((Log4jESLogger) esTestLogger).logger();
+ assertThat(testLogger.getLevel(), equalTo(Level.TRACE));
+ testAppender = new TestAppender();
+ testLogger.addAppender(testAppender);
+
+ // deprecation setup, needs to be set to debug to log
+ deprecationLogger = Log4jESLoggerFactory.getDeprecationLogger("test");
+ deprecationAppender = new TestAppender();
+ ESLogger logger = Log4jESLoggerFactory.getLogger("deprecation.test");
+ logger.setLevel("DEBUG");
+ (((Log4jESLogger) logger).logger()).addAppender(deprecationAppender);
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ esTestLogger.setLevel(testLevel);
+ Logger testLogger = ((Log4jESLogger) esTestLogger).logger();
+ testLogger.removeAppender(testAppender);
+ Logger deprecationLogger = ((Log4jESLogger) Log4jESLoggerFactory.getLogger("deprecation.test")).logger();
+ deprecationLogger.removeAppender(deprecationAppender);
+ }
+
+ @Test
+ public void locationInfoTest() {
+ esTestLogger.error("This is an error");
+ esTestLogger.warn("This is a warning");
+ esTestLogger.info("This is an info");
+ esTestLogger.debug("This is a debug");
+ esTestLogger.trace("This is a trace");
+ List<LoggingEvent> events = testAppender.getEvents();
+ assertThat(events, notNullValue());
+ assertThat(events.size(), equalTo(5));
+ LoggingEvent event = events.get(0);
+ assertThat(event, notNullValue());
+ assertThat(event.getLevel(), equalTo(Level.ERROR));
+ assertThat(event.getRenderedMessage(), equalTo("This is an error"));
+ LocationInfo locationInfo = event.getLocationInformation();
+ assertThat(locationInfo, notNullValue());
+ assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
+ assertThat(locationInfo.getMethodName(), equalTo("locationInfoTest"));
+ event = events.get(1);
+ assertThat(event, notNullValue());
+ assertThat(event.getLevel(), equalTo(Level.WARN));
+ assertThat(event.getRenderedMessage(), equalTo("This is a warning"));
+ locationInfo = event.getLocationInformation();
+ assertThat(locationInfo, notNullValue());
+ assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
+ assertThat(locationInfo.getMethodName(), equalTo("locationInfoTest"));
+ event = events.get(2);
+ assertThat(event, notNullValue());
+ assertThat(event.getLevel(), equalTo(Level.INFO));
+ assertThat(event.getRenderedMessage(), equalTo("This is an info"));
+ locationInfo = event.getLocationInformation();
+ assertThat(locationInfo, notNullValue());
+ assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
+ assertThat(locationInfo.getMethodName(), equalTo("locationInfoTest"));
+ event = events.get(3);
+ assertThat(event, notNullValue());
+ assertThat(event.getLevel(), equalTo(Level.DEBUG));
+ assertThat(event.getRenderedMessage(), equalTo("This is a debug"));
+ locationInfo = event.getLocationInformation();
+ assertThat(locationInfo, notNullValue());
+ assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
+ assertThat(locationInfo.getMethodName(), equalTo("locationInfoTest"));
+ event = events.get(4);
+ assertThat(event, notNullValue());
+ assertThat(event.getLevel(), equalTo(Level.TRACE));
+ assertThat(event.getRenderedMessage(), equalTo("This is a trace"));
+ locationInfo = event.getLocationInformation();
+ assertThat(locationInfo, notNullValue());
+ assertThat(locationInfo.getClassName(), equalTo(Log4jESLoggerTests.class.getCanonicalName()));
+ assertThat(locationInfo.getMethodName(), equalTo("locationInfoTest"));
+ }
+
+ @Test
+ public void testDeprecationLogger() {
+ deprecationLogger.deprecated("This is a deprecation message");
+ List<LoggingEvent> deprecationEvents = deprecationAppender.getEvents();
+ LoggingEvent event = deprecationEvents.get(0);
+ assertThat(event, notNullValue());
+ assertThat(event.getLevel(), equalTo(Level.DEBUG));
+ assertThat(event.getRenderedMessage(), equalTo("This is a deprecation message"));
+ }
+
+ private static class TestAppender extends AppenderSkeleton {
+
+ private List<LoggingEvent> events = new ArrayList<>();
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(LoggingEvent event) {
+ // Forces it to generate the location information
+ event.getLocationInformation();
+ events.add(event);
+ }
+
+ public List<LoggingEvent> getEvents() {
+ return events;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java b/core/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java
new file mode 100644
index 0000000000..4997ea6974
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/logging/log4j/LoggingConfigurationTests.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.logging.log4j;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.Logger;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class LoggingConfigurationTests extends ElasticsearchTestCase {
+
+ @Before
+ public void before() throws Exception {
+ LogConfigurator.reset();
+ }
+
+ @Test
+ public void testResolveMultipleConfigs() throws Exception {
+ String level = Log4jESLoggerFactory.getLogger("test").getLevel();
+ try {
+ Path configDir = getDataPath("config");
+ Settings settings = Settings.builder()
+ .put("path.conf", configDir.toAbsolutePath())
+ .put("path.home", createTempDir().toString())
+ .build();
+ LogConfigurator.configure(settings);
+
+ ESLogger esLogger = Log4jESLoggerFactory.getLogger("test");
+ Logger logger = ((Log4jESLogger) esLogger).logger();
+ Appender appender = logger.getAppender("console");
+ assertThat(appender, notNullValue());
+
+ esLogger = Log4jESLoggerFactory.getLogger("second");
+ logger = ((Log4jESLogger) esLogger).logger();
+ appender = logger.getAppender("console2");
+ assertThat(appender, notNullValue());
+
+ esLogger = Log4jESLoggerFactory.getLogger("third");
+ logger = ((Log4jESLogger) esLogger).logger();
+ appender = logger.getAppender("console3");
+ assertThat(appender, notNullValue());
+ } finally {
+ Log4jESLoggerFactory.getLogger("test").setLevel(level);
+ }
+ }
+
+ @Test
+ public void testResolveJsonLoggingConfig() throws Exception {
+ Path tmpDir = createTempDir();
+ Path loggingConf = tmpDir.resolve(loggingConfiguration("json"));
+ Files.write(loggingConf, "{\"json\": \"foo\"}".getBytes(StandardCharsets.UTF_8));
+ Environment environment = new Environment(
+ Settings.builder()
+ .put("path.conf", tmpDir.toAbsolutePath())
+ .put("path.home", createTempDir().toString())
+ .build());
+
+ Settings.Builder builder = Settings.builder();
+ LogConfigurator.resolveConfig(environment, builder);
+
+ Settings logSettings = builder.build();
+ assertThat(logSettings.get("json"), is("foo"));
+ }
+
+ @Test
+ public void testResolvePropertiesLoggingConfig() throws Exception {
+ Path tmpDir = createTempDir();
+ Path loggingConf = tmpDir.resolve(loggingConfiguration("properties"));
+ Files.write(loggingConf, "key: value".getBytes(StandardCharsets.UTF_8));
+ Environment environment = new Environment(
+ Settings.builder()
+ .put("path.conf", tmpDir.toAbsolutePath())
+ .put("path.home", createTempDir().toString())
+ .build());
+
+ Settings.Builder builder = Settings.builder();
+ LogConfigurator.resolveConfig(environment, builder);
+
+ Settings logSettings = builder.build();
+ assertThat(logSettings.get("key"), is("value"));
+ }
+
+ @Test
+ public void testResolveYamlLoggingConfig() throws Exception {
+ Path tmpDir = createTempDir();
+ Path loggingConf1 = tmpDir.resolve(loggingConfiguration("yml"));
+ Path loggingConf2 = tmpDir.resolve(loggingConfiguration("yaml"));
+ Files.write(loggingConf1, "yml: bar".getBytes(StandardCharsets.UTF_8));
+ Files.write(loggingConf2, "yaml: bar".getBytes(StandardCharsets.UTF_8));
+ Environment environment = new Environment(
+ Settings.builder()
+ .put("path.conf", tmpDir.toAbsolutePath())
+ .put("path.home", createTempDir().toString())
+ .build());
+
+ Settings.Builder builder = Settings.builder();
+ LogConfigurator.resolveConfig(environment, builder);
+
+ Settings logSettings = builder.build();
+ assertThat(logSettings.get("yml"), is("bar"));
+ assertThat(logSettings.get("yaml"), is("bar"));
+ }
+
+ @Test
+ public void testResolveConfigInvalidFilename() throws Exception {
+ Path tmpDir = createTempDir();
+ Path invalidSuffix = tmpDir.resolve(loggingConfiguration(randomFrom(LogConfigurator.ALLOWED_SUFFIXES)) + randomInvalidSuffix());
+ Files.write(invalidSuffix, "yml: bar".getBytes(StandardCharsets.UTF_8));
+ Environment environment = new Environment(
+ Settings.builder()
+ .put("path.conf", invalidSuffix.toAbsolutePath())
+ .put("path.home", createTempDir().toString())
+ .build());
+
+ Settings.Builder builder = Settings.builder();
+ LogConfigurator.resolveConfig(environment, builder);
+
+ Settings logSettings = builder.build();
+ assertThat(logSettings.get("yml"), Matchers.nullValue());
+ }
+
+ private static String loggingConfiguration(String suffix) {
+ return "logging." + randomAsciiOfLength(randomIntBetween(0, 10)) + "." + suffix;
+ }
+
+ private static String randomInvalidSuffix() {
+ String randomSuffix;
+ do {
+ randomSuffix = randomAsciiOfLength(randomIntBetween(1, 5));
+ } while (LogConfigurator.ALLOWED_SUFFIXES.contains(randomSuffix));
+ return randomSuffix;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java
new file mode 100644
index 0000000000..c96134d922
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LRUQueryCache;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryCachingPolicy;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.util.Set;
+
+public class IndexCacheableQueryTests extends ElasticsearchTestCase {
+
+ static class DummyIndexCacheableQuery extends IndexCacheableQuery {
+ @Override
+ public String toString(String field) {
+ return "DummyIndexCacheableQuery";
+ }
+
+ @Override
+ public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
+ return new Weight(this) {
+
+ @Override
+ public void extractTerms(Set<Term> terms) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ }
+
+ @Override
+ public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ return null;
+ }
+
+ };
+ }
+ }
+
+ public void testBasics() throws IOException {
+ DummyIndexCacheableQuery query = new DummyIndexCacheableQuery();
+ QueryUtils.check(query);
+
+ Query rewritten = query.rewrite(new MultiReader(new IndexReader[0]));
+ QueryUtils.check(rewritten);
+ QueryUtils.checkUnequal(query, rewritten);
+
+ Query rewritten2 = query.rewrite(new MultiReader(new IndexReader[0]));
+ QueryUtils.check(rewritten2);
+ QueryUtils.checkUnequal(rewritten, rewritten2);
+ }
+
+ public void testCache() throws IOException {
+ Directory dir = newDirectory();
+ LRUQueryCache cache = new LRUQueryCache(10000, Long.MAX_VALUE);
+ QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE;
+ RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir);
+ for (int i = 0; i < 10; ++i) {
+ writer.addDocument(new Document());
+ }
+
+ IndexReader reader = writer.getReader();
+ // IndexReader wrapping is disabled because of LUCENE-6500.
+ // Add it back when we are on 5.3
+ assert Version.LATEST == Version.LUCENE_5_2_0;
+ IndexSearcher searcher = newSearcher(reader, false);
+ reader = searcher.getIndexReader(); // reader might be wrapped
+ searcher.setQueryCache(cache);
+ searcher.setQueryCachingPolicy(policy);
+
+ assertEquals(0, cache.getCacheSize());
+ DummyIndexCacheableQuery query = new DummyIndexCacheableQuery();
+ searcher.count(query);
+ int expectedCacheSize = reader.leaves().size();
+ assertEquals(expectedCacheSize, cache.getCacheSize());
+ searcher.count(query);
+ assertEquals(expectedCacheSize, cache.getCacheSize());
+
+ writer.addDocument(new Document());
+
+ IndexReader reader2 = writer.getReader();
+ // IndexReader wrapping is disabled because of LUCENE-6500.
+ // Add it back when we are on 5.3
+ assert Version.LATEST == Version.LUCENE_5_2_0;
+ searcher = newSearcher(reader2, false);
+ reader2 = searcher.getIndexReader(); // reader might be wrapped
+ searcher.setQueryCache(cache);
+ searcher.setQueryCachingPolicy(policy);
+
+ // since the query is only cacheable at the index level, it has to be recomputed on all leaves
+ expectedCacheSize += reader2.leaves().size();
+ searcher.count(query);
+ assertEquals(expectedCacheSize, cache.getCacheSize());
+ searcher.count(query);
+ assertEquals(expectedCacheSize, cache.getCacheSize());
+
+ reader.close();
+ reader2.close();
+ writer.close();
+ assertEquals(0, cache.getCacheSize());
+ dir.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java
new file mode 100644
index 0000000000..85852cf9f9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ *
+ */
+public class LuceneTest extends ElasticsearchTestCase {
+
+
+ /*
+ * simple test that ensures that we bump the version on Upgrade
+ */
+ @Test
+ public void testVersion() {
+ // note this is just a silly sanity check, we test it in lucene, and we point to it this way
+ assertEquals(Lucene.VERSION, Version.LATEST);
+ }
+
+ public void testWaitForIndex() throws Exception {
+ final MockDirectoryWrapper dir = newMockDirectory();
+
+ final AtomicBoolean succeeded = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ // Create a shadow Engine, which will freak out because there is no
+ // index yet
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ if (Lucene.waitForIndex(dir, 5000)) {
+ succeeded.set(true);
+ } else {
+ fail("index should have eventually existed!");
+ }
+ } catch (InterruptedException e) {
+ // ignore interruptions
+ } catch (Exception e) {
+ fail("should have been able to create the engine! " + e.getMessage());
+ }
+ }
+ });
+ t.start();
+
+ // count down latch
+ // now shadow engine should try to be created
+ latch.countDown();
+
+ dir.setEnableVirusScanner(false);
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setMaxBufferedDocs(2);
+ IndexWriter writer = new IndexWriter(dir, iwc);
+ Document doc = new Document();
+ doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+
+ t.join();
+
+ writer.close();
+ dir.close();
+ assertTrue("index should have eventually existed", succeeded.get());
+ }
+
+ public void testCleanIndex() throws IOException {
+ MockDirectoryWrapper dir = newMockDirectory();
+ dir.setEnableVirusScanner(false);
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setMaxBufferedDocs(2);
+ IndexWriter writer = new IndexWriter(dir, iwc);
+ Document doc = new Document();
+ doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+
+ doc = new Document();
+ doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ writer.commit();
+ doc = new Document();
+ doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ writer.deleteDocuments(new Term("id", "2"));
+ writer.commit();
+ try (DirectoryReader open = DirectoryReader.open(writer, true)) {
+ assertEquals(3, open.numDocs());
+ assertEquals(1, open.numDeletedDocs());
+ assertEquals(4, open.maxDoc());
+ }
+ writer.close();
+ if (random().nextBoolean()) {
+ for (String file : dir.listAll()) {
+ if (file.startsWith("_1")) {
+ // delete a random file
+ dir.deleteFile(file);
+ break;
+ }
+ }
+ }
+ Lucene.cleanLuceneIndex(dir);
+ if (dir.listAll().length > 0) {
+ for (String file : dir.listAll()) {
+ if (file.startsWith("extra") == false) {
+ assertEquals(file, "write.lock");
+ }
+ }
+ }
+ dir.close();
+ }
+
+ public void testPruneUnreferencedFiles() throws IOException {
+ MockDirectoryWrapper dir = newMockDirectory();
+ dir.setEnableVirusScanner(false);
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setMaxBufferedDocs(2);
+ IndexWriter writer = new IndexWriter(dir, iwc);
+ Document doc = new Document();
+ doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+
+ doc = new Document();
+ doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ writer.commit();
+ SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir);
+
+ doc = new Document();
+ doc.add(new TextField("id", "4", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ writer.deleteDocuments(new Term("id", "2"));
+ writer.commit();
+ DirectoryReader open = DirectoryReader.open(writer, true);
+ assertEquals(3, open.numDocs());
+ assertEquals(1, open.numDeletedDocs());
+ assertEquals(4, open.maxDoc());
+ open.close();
+ writer.close();
+ SegmentInfos si = Lucene.pruneUnreferencedFiles(segmentCommitInfos.getSegmentsFileName(), dir);
+ assertEquals(si.getSegmentsFileName(), segmentCommitInfos.getSegmentsFileName());
+ open = DirectoryReader.open(dir);
+ assertEquals(3, open.numDocs());
+ assertEquals(0, open.numDeletedDocs());
+ assertEquals(3, open.maxDoc());
+
+ IndexSearcher s = new IndexSearcher(open);
+ assertEquals(s.search(new TermQuery(new Term("id", "1")), 1).totalHits, 1);
+ assertEquals(s.search(new TermQuery(new Term("id", "2")), 1).totalHits, 1);
+ assertEquals(s.search(new TermQuery(new Term("id", "3")), 1).totalHits, 1);
+ assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0);
+
+ for (String file : dir.listAll()) {
+ assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2"));
+ }
+ open.close();
+ dir.close();
+
+ }
+
+ public void testFiles() throws IOException {
+ MockDirectoryWrapper dir = newMockDirectory();
+ dir.setEnableVirusScanner(false);
+ IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setMaxBufferedDocs(2);
+ iwc.setUseCompoundFile(true);
+ IndexWriter writer = new IndexWriter(dir, iwc);
+ Document doc = new Document();
+ doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+ Set<String> files = new HashSet<>();
+ for (String f : Lucene.files(Lucene.readSegmentInfos(dir))) {
+ files.add(f);
+ }
+ final boolean simpleTextCFS = files.contains("_0.scf");
+ assertTrue(files.toString(), files.contains("segments_1"));
+ if (simpleTextCFS) {
+ assertFalse(files.toString(), files.contains("_0.cfs"));
+ assertFalse(files.toString(), files.contains("_0.cfe"));
+ } else {
+ assertTrue(files.toString(), files.contains("_0.cfs"));
+ assertTrue(files.toString(), files.contains("_0.cfe"));
+ }
+ assertTrue(files.toString(), files.contains("_0.si"));
+
+ doc = new Document();
+ doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+
+ files.clear();
+ for (String f : Lucene.files(Lucene.readSegmentInfos(dir))) {
+ files.add(f);
+ }
+ assertFalse(files.toString(), files.contains("segments_1"));
+ assertTrue(files.toString(), files.contains("segments_2"));
+ if (simpleTextCFS) {
+ assertFalse(files.toString(), files.contains("_0.cfs"));
+ assertFalse(files.toString(), files.contains("_0.cfe"));
+ } else {
+ assertTrue(files.toString(), files.contains("_0.cfs"));
+ assertTrue(files.toString(), files.contains("_0.cfe"));
+ }
+ assertTrue(files.toString(), files.contains("_0.si"));
+
+
+ if (simpleTextCFS) {
+ assertFalse(files.toString(), files.contains("_1.cfs"));
+ assertFalse(files.toString(), files.contains("_1.cfe"));
+ } else {
+ assertTrue(files.toString(), files.contains("_1.cfs"));
+ assertTrue(files.toString(), files.contains("_1.cfe"));
+ }
+ assertTrue(files.toString(), files.contains("_1.si"));
+ writer.close();
+ dir.close();
+ }
+
+ public void testNumDocs() throws IOException {
+ MockDirectoryWrapper dir = newMockDirectory();
+ dir.setEnableVirusScanner(false);
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, iwc);
+ Document doc = new Document();
+ doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+ SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir);
+ assertEquals(1, Lucene.getNumDocs(segmentCommitInfos));
+
+ doc = new Document();
+ doc.add(new TextField("id", "2", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new TextField("id", "3", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ segmentCommitInfos = Lucene.readSegmentInfos(dir);
+ assertEquals(1, Lucene.getNumDocs(segmentCommitInfos));
+ writer.commit();
+ segmentCommitInfos = Lucene.readSegmentInfos(dir);
+ assertEquals(3, Lucene.getNumDocs(segmentCommitInfos));
+ writer.deleteDocuments(new Term("id", "2"));
+ writer.commit();
+ segmentCommitInfos = Lucene.readSegmentInfos(dir);
+ assertEquals(2, Lucene.getNumDocs(segmentCommitInfos));
+
+ int numDocsToIndex = randomIntBetween(10, 50);
+ List<Term> deleteTerms = new ArrayList<>();
+ for (int i = 0; i < numDocsToIndex; i++) {
+ doc = new Document();
+ doc.add(new TextField("id", "extra_" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ deleteTerms.add(new Term("id", "extra_" + i));
+ writer.addDocument(doc);
+ }
+ int numDocsToDelete = randomIntBetween(0, numDocsToIndex);
+ Collections.shuffle(deleteTerms, random());
+ for (int i = 0; i < numDocsToDelete; i++) {
+ Term remove = deleteTerms.remove(0);
+ writer.deleteDocuments(remove);
+ }
+ writer.commit();
+ segmentCommitInfos = Lucene.readSegmentInfos(dir);
+ assertEquals(2 + deleteTerms.size(), Lucene.getNumDocs(segmentCommitInfos));
+ writer.close();
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java b/core/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java
new file mode 100644
index 0000000000..f6873d0c07
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+public class ShardCoreKeyMapTests extends ElasticsearchTestCase {
+
+ public void testMissingShard() throws IOException {
+ try (Directory dir = newDirectory();
+ RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {
+ w.addDocument(new Document());
+ try (IndexReader reader = w.getReader()) {
+ ShardCoreKeyMap map = new ShardCoreKeyMap();
+ for (LeafReaderContext ctx : reader.leaves()) {
+ try {
+ map.add(ctx.reader());
+ fail();
+ } catch (IllegalArgumentException expected) {
+ // ok
+ }
+ }
+ }
+ }
+ }
+
+ public void testBasics() throws IOException {
+ Directory dir1 = newDirectory();
+ RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1);
+ w1.addDocument(new Document());
+
+ Directory dir2 = newDirectory();
+ RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2);
+ w2.addDocument(new Document());
+
+ Directory dir3 = newDirectory();
+ RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3);
+ w3.addDocument(new Document());
+
+ ShardId shardId1 = new ShardId("index1", 1);
+ ShardId shardId2 = new ShardId("index1", 3);
+ ShardId shardId3 = new ShardId("index2", 2);
+
+ ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
+ ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
+ ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3);
+
+ ShardCoreKeyMap map = new ShardCoreKeyMap();
+ for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
+ for (LeafReaderContext ctx : reader.leaves()) {
+ map.add(ctx.reader());
+ }
+ }
+ assertEquals(3, map.size());
+
+ // Adding them back is a no-op
+ for (LeafReaderContext ctx : reader1.leaves()) {
+ map.add(ctx.reader());
+ }
+ assertEquals(3, map.size());
+
+ for (LeafReaderContext ctx : reader2.leaves()) {
+ assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey()));
+ }
+
+ w1.addDocument(new Document());
+ ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1);
+ reader1.close();
+ reader1 = newReader1;
+
+ // same for reader2, but with a force merge to trigger evictions
+ w2.addDocument(new Document());
+ w2.forceMerge(1);
+ ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2);
+ reader2.close();
+ reader2 = newReader2;
+
+ for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) {
+ for (LeafReaderContext ctx : reader.leaves()) {
+ map.add(ctx.reader());
+ }
+ }
+
+ final Set<Object> index1Keys = new HashSet<>();
+ for (DirectoryReader reader : Arrays.asList(reader1, reader2)) {
+ for (LeafReaderContext ctx : reader.leaves()) {
+ index1Keys.add(ctx.reader().getCoreCacheKey());
+ }
+ }
+ index1Keys.removeAll(map.getCoreKeysForIndex("index1"));
+ assertEquals(Collections.emptySet(), index1Keys);
+
+ reader1.close();
+ w1.close();
+ reader2.close();
+ w2.close();
+ reader3.close();
+ w3.close();
+ assertEquals(0, map.size());
+
+ dir1.close();
+ dir2.close();
+ dir3.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java
new file mode 100644
index 0000000000..10661a5ec0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.all;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.payloads.PayloadHelper;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleAllTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBoostOnEagerTokenizer() throws Exception {
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "all", 2.0f);
+ allEntries.addText("field2", "your", 1.0f);
+ allEntries.addText("field1", "boosts", 0.5f);
+ allEntries.reset();
+ // whitespace analyzer's tokenizer reads characters eagerly on the contrary to the standard tokenizer
+ final TokenStream ts = AllTokenStream.allTokenStream("any", allEntries, new WhitespaceAnalyzer());
+ final CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
+ final PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
+ ts.reset();
+ for (int i = 0; i < 3; ++i) {
+ assertTrue(ts.incrementToken());
+ final String term;
+ final float boost;
+ switch (i) {
+ case 0:
+ term = "all";
+ boost = 2;
+ break;
+ case 1:
+ term = "your";
+ boost = 1;
+ break;
+ case 2:
+ term = "boosts";
+ boost = 0.5f;
+ break;
+ default:
+ throw new AssertionError();
+ }
+ assertEquals(term, termAtt.toString());
+ final BytesRef payload = payloadAtt.getPayload();
+ if (payload == null || payload.length == 0) {
+ assertEquals(boost, 1f, 0.001f);
+ } else {
+ assertEquals(4, payload.length);
+ final float b = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
+ assertEquals(boost, b, 0.001f);
+ }
+ }
+ assertFalse(ts.incrementToken());
+ }
+
+ @Test
+ public void testAllEntriesRead() throws Exception {
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+
+ for (int i = 1; i < 30; i++) {
+ allEntries.reset();
+ char[] data = new char[i];
+ String value = slurpToString(allEntries, data);
+ assertThat("failed for " + i, value, equalTo("something else"));
+ }
+ }
+
+ private String slurpToString(AllEntries allEntries, char[] data) throws IOException {
+ StringBuilder sb = new StringBuilder();
+ while (true) {
+ int read = allEntries.read(data, 0, data.length);
+ if (read == -1) {
+ break;
+ }
+ sb.append(data, 0, read);
+ }
+ return sb.toString();
+ }
+
+ private void assertExplanationScore(IndexSearcher searcher, Query query, ScoreDoc scoreDoc) throws IOException {
+ final Explanation expl = searcher.explain(query, scoreDoc.doc);
+ assertEquals(scoreDoc.score, expl.getValue(), 0.00001f);
+ }
+
+ @Test
+ public void testSimpleAllNoBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else", 1.0f);
+ allEntries.addText("field2", "something", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ Query query = new AllTermQuery(new Term("_all", "else"));
+ TopDocs docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ query = new AllTermQuery(new Term("_all", "something"));
+ docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testSimpleAllWithBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something", 1.0f);
+ allEntries.addText("field2", "else", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else", 2.0f);
+ allEntries.addText("field2", "something", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // this one is boosted. so the second doc is more relevant
+ Query query = new AllTermQuery(new Term("_all", "else"));
+ TopDocs docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ query = new AllTermQuery(new Term("_all", "something"));
+ docs = searcher.search(query, 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertExplanationScore(searcher, query, docs.scoreDocs[0]);
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+ assertExplanationScore(searcher, query, docs.scoreDocs[1]);
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testMultipleTokensAllNoBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something moo", 1.0f);
+ allEntries.addText("field2", "else koo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else koo", 1.0f);
+ allEntries.addText("field2", "something moo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testMultipleTokensAllWithBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", "something moo", 1.0f);
+ allEntries.addText("field2", "else koo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new Field("_id", "2", StoredField.TYPE));
+ allEntries = new AllEntries();
+ allEntries.addText("field1", "else koo", 2.0f);
+ allEntries.addText("field2", "something moo", 1.0f);
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new AllTermQuery(new Term("_all", "else")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "koo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(1));
+ assertThat(docs.scoreDocs[1].doc, equalTo(0));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "something")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ docs = searcher.search(new AllTermQuery(new Term("_all", "moo")), 10);
+ assertThat(docs.totalHits, equalTo(2));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ assertThat(docs.scoreDocs[1].doc, equalTo(1));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testNoTokensWithKeywordAnalyzer() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.KEYWORD_ANALYZER));
+
+ Document doc = new Document();
+ doc.add(new Field("_id", "1", StoredField.TYPE));
+ AllEntries allEntries = new AllEntries();
+ allEntries.reset();
+ doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.KEYWORD_ANALYZER)));
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
+ assertThat(docs.totalHits, equalTo(1));
+ assertThat(docs.scoreDocs[0].doc, equalTo(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/ElasticsearchDirectoryReaderTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/ElasticsearchDirectoryReaderTests.java
new file mode 100644
index 0000000000..934dd8d6c4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/index/ElasticsearchDirectoryReaderTests.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.index;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+/** Simple tests for this filterreader */
+public class ElasticsearchDirectoryReaderTests extends ElasticsearchTestCase {
+
+ /** Test that core cache key (needed for NRT) is working */
+ public void testCoreCacheKey() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = new IndexWriterConfig(null);
+ iwc.setMaxBufferedDocs(100);
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ IndexWriter iw = new IndexWriter(dir, iwc);
+
+ // add two docs, id:0 and id:1
+ Document doc = new Document();
+ Field idField = new StringField("id", "", Field.Store.NO);
+ doc.add(idField);
+ idField.setStringValue("0");
+ iw.addDocument(doc);
+ idField.setStringValue("1");
+ iw.addDocument(doc);
+
+ // open reader
+ ShardId shardId = new ShardId(new Index("fake"), 1);
+ DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
+ assertEquals(2, ir.numDocs());
+ assertEquals(1, ir.leaves().size());
+
+ // delete id:0 and reopen
+ iw.deleteDocuments(new Term("id", "0"));
+ DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);
+
+ // we should have the same cache key as before
+ assertEquals(1, ir2.numDocs());
+ assertEquals(1, ir2.leaves().size());
+ assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());
+
+ // this is kind of stupid, but for now its here
+ assertNotSame(ir.leaves().get(0).reader().getCombinedCoreAndDeletesKey(), ir2.leaves().get(0).reader().getCombinedCoreAndDeletesKey());
+
+ IOUtils.close(ir, ir2, iw, dir);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java
new file mode 100644
index 0000000000..02468f98ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.index;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.frequently;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomInt;
+import static com.carrotsearch.randomizedtesting.RandomizedTest.randomIntBetween;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ */
+public class FreqTermsEnumTests extends ElasticsearchTestCase {
+
+ private String[] terms;
+ private IndexWriter iw;
+ private IndexReader reader;
+ private Map<String, FreqHolder> referenceAll;
+ private Map<String, FreqHolder> referenceNotDeleted;
+ private Map<String, FreqHolder> referenceFilter;
+ private Filter filter;
+
+ static class FreqHolder {
+ int docFreq;
+ long totalTermFreq;
+ }
+
+
+ @Before
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ referenceAll = Maps.newHashMap();
+ referenceNotDeleted = Maps.newHashMap();
+ referenceFilter = Maps.newHashMap();
+
+ Directory dir = newDirectory();
+ IndexWriterConfig conf = newIndexWriterConfig(new KeywordAnalyzer()); // use keyword analyzer we rely on the stored field holding the exact term.
+ if (frequently()) {
+ // we don't want to do any merges, so we won't expunge deletes
+ conf.setMergePolicy(NoMergePolicy.INSTANCE);
+ }
+
+ iw = new IndexWriter(dir, conf);
+ terms = new String[scaledRandomIntBetween(10, 300)];
+ for (int i = 0; i < terms.length; i++) {
+ terms[i] = randomAsciiOfLength(5);
+ }
+
+ int numberOfDocs = scaledRandomIntBetween(30, 300);
+ Document[] docs = new Document[numberOfDocs];
+ for (int i = 0; i < numberOfDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("id", Integer.toString(i), Field.Store.YES));
+ docs[i] = doc;
+ for (String term : terms) {
+ if (randomBoolean()) {
+ continue;
+ }
+ int freq = randomIntBetween(1, 3);
+ for (int j = 0; j < freq; j++) {
+ doc.add(new TextField("field", term, Field.Store.YES));
+ }
+ }
+ }
+
+ // add all docs
+
+ for (int i = 0; i < docs.length; i++) {
+ Document doc = docs[i];
+ iw.addDocument(doc);
+ if (rarely()) {
+ iw.commit();
+ }
+ }
+
+ Set<String> deletedIds = Sets.newHashSet();
+ for (int i = 0; i < docs.length; i++) {
+ Document doc = docs[i];
+ if (randomInt(5) == 2) {
+ Term idTerm = new Term("id", doc.getField("id").stringValue());
+ deletedIds.add(idTerm.text());
+ iw.deleteDocuments(idTerm);
+ }
+ }
+
+ for (String term : terms) {
+ referenceAll.put(term, new FreqHolder());
+ referenceFilter.put(term, new FreqHolder());
+ referenceNotDeleted.put(term, new FreqHolder());
+ }
+
+ // now go over each doc, build the relevant references and filter
+ reader = DirectoryReader.open(iw, true);
+ List<Term> filterTerms = Lists.newArrayList();
+ for (int docId = 0; docId < reader.maxDoc(); docId++) {
+ Document doc = reader.document(docId);
+ addFreqs(doc, referenceAll);
+ if (!deletedIds.contains(doc.getField("id").stringValue())) {
+ addFreqs(doc, referenceNotDeleted);
+ if (randomBoolean()) {
+ filterTerms.add(new Term("id", doc.getField("id").stringValue()));
+ addFreqs(doc, referenceFilter);
+ }
+ }
+ }
+ filter = new QueryWrapperFilter(new TermsQuery(filterTerms));
+ }
+
+ private void addFreqs(Document doc, Map<String, FreqHolder> reference) {
+ Set<String> addedDocFreq = Sets.newHashSet();
+ for (IndexableField field : doc.getFields("field")) {
+ String term = field.stringValue();
+ FreqHolder freqHolder = reference.get(term);
+ if (!addedDocFreq.contains(term)) {
+ freqHolder.docFreq++;
+ addedDocFreq.add(term);
+ }
+ freqHolder.totalTermFreq++;
+ }
+ }
+
+ @After
+ @Override
+ public void tearDown() throws Exception {
+ IOUtils.close(reader, iw, iw.getDirectory());
+ super.tearDown();
+ }
+
+ @Test
+ public void testAllFreqs() throws Exception {
+ assertAgainstReference(true, true, null, referenceAll);
+ assertAgainstReference(true, false, null, referenceAll);
+ assertAgainstReference(false, true, null, referenceAll);
+ }
+
+ @Test
+ public void testNonDeletedFreqs() throws Exception {
+ assertAgainstReference(true, true, Queries.newMatchAllQuery(), referenceNotDeleted);
+ assertAgainstReference(true, false, Queries.newMatchAllQuery(), referenceNotDeleted);
+ assertAgainstReference(false, true, Queries.newMatchAllQuery(), referenceNotDeleted);
+ }
+
+ @Test
+ public void testFilterFreqs() throws Exception {
+ assertAgainstReference(true, true, filter, referenceFilter);
+ assertAgainstReference(true, false, filter, referenceFilter);
+ assertAgainstReference(false, true, filter, referenceFilter);
+ }
+
+ private void assertAgainstReference(boolean docFreq, boolean totalTermFreq, Query filter, Map<String, FreqHolder> reference) throws Exception {
+ FreqTermsEnum freqTermsEnum = new FreqTermsEnum(reader, "field", docFreq, totalTermFreq, filter, BigArrays.NON_RECYCLING_INSTANCE);
+ assertAgainstReference(freqTermsEnum, reference, docFreq, totalTermFreq);
+ }
+
+ private void assertAgainstReference(FreqTermsEnum termsEnum, Map<String, FreqHolder> reference, boolean docFreq, boolean totalTermFreq) throws Exception {
+ int cycles = randomIntBetween(1, 5);
+ for (int i = 0; i < cycles; i++) {
+ List<String> terms = Lists.newArrayList(Arrays.asList(this.terms));
+
+ Collections.shuffle(terms, getRandom());
+ for (String term : terms) {
+ if (!termsEnum.seekExact(new BytesRef(term))) {
+ assertThat("term : " + term, reference.get(term).docFreq, is(0));
+ continue;
+ }
+ if (docFreq) {
+ assertThat("cycle " + i + ", term " + term + ", docFreq", termsEnum.docFreq(), equalTo(reference.get(term).docFreq));
+ }
+ if (totalTermFreq) {
+ assertThat("cycle " + i + ", term " + term + ", totalTermFreq", termsEnum.totalTermFreq(), equalTo(reference.get(term).totalTermFreq));
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java
new file mode 100644
index 0000000000..14e688d574
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+public class MultiPhrasePrefixQueryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void simpleTests() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+ Document doc = new Document();
+ doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery();
+ query.add(new Term("field", "aa"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.add(new Term("field", "aaa"));
+ query.add(new Term("field", "bb"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.setSlop(1);
+ query.add(new Term("field", "aaa"));
+ query.add(new Term("field", "cc"));
+ assertThat(Lucene.count(searcher, query), equalTo(1l));
+
+ query = new MultiPhrasePrefixQuery();
+ query.setSlop(1);
+ query.add(new Term("field", "xxx"));
+ assertThat(Lucene.count(searcher, query), equalTo(0l));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java
new file mode 100644
index 0000000000..2ae7df16bc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunctionTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.function;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.elasticsearch.script.AbstractFloatSearchScript;
+import org.elasticsearch.script.LeafSearchScript;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptException;
+import org.elasticsearch.script.SearchScript;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ScriptScoreFunctionTests extends ElasticsearchTestCase {
+
+ /**
+ * Tests https://github.com/elasticsearch/elasticsearch/issues/2426
+ */
+ @Test
+ public void testScriptScoresReturnsNaN() throws IOException {
+ ScoreFunction scoreFunction = new ScriptScoreFunction(new Script("Float.NaN"), new FloatValueScript(Float.NaN));
+ LeafScoreFunction leafScoreFunction = scoreFunction.getLeafScoreFunction(null);
+ try {
+ leafScoreFunction.score(randomInt(), randomFloat());
+ fail("should have thrown an exception about the script_score returning NaN");
+ } catch (ScriptException e) {
+ assertThat("message contains error about script_score returning NaN: " + e.getMessage(),
+ e.getMessage().contains("NaN"), equalTo(true));
+ }
+ }
+
+ static class FloatValueScript implements SearchScript {
+
+ private final float value;
+
+ FloatValueScript(float value) {
+ this.value = value;
+ }
+
+ @Override
+ public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
+ return new AbstractFloatSearchScript() {
+
+ @Override
+ public float runAsFloat() {
+ return value;
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ // nothing here
+ }
+ };
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java
new file mode 100644
index 0000000000..d18d7ff349
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/MoreLikeThisQueryTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.morelikethis;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.MoreLikeThisQuery;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MoreLikeThisQueryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+ indexWriter.commit();
+
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("text", "lucene", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ document.add(new TextField("text", "lucene release", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ MoreLikeThisQuery mltQuery = new MoreLikeThisQuery("lucene", new String[]{"text"}, Lucene.STANDARD_ANALYZER);
+ mltQuery.setLikeText("lucene");
+ mltQuery.setMinTermFrequency(1);
+ mltQuery.setMinDocFreq(1);
+ long count = Lucene.count(searcher, mltQuery);
+ assertThat(count, equalTo(2l));
+
+ reader.close();
+ indexWriter.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java
new file mode 100644
index 0000000000..cafa2ef9ee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/search/morelikethis/XMoreLikeThisTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.search.morelikethis;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.mlt.MoreLikeThis;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.Arrays;
+import java.util.List;
+
+public class XMoreLikeThisTests extends ElasticsearchTestCase {
+
+ private void addDoc(RandomIndexWriter writer, String[] texts) throws IOException {
+ Document doc = new Document();
+ for (String text : texts) {
+ doc.add(newTextField("text", text, Field.Store.YES));
+ }
+ writer.addDocument(doc);
+ }
+
+ @Test
+ public void testTopN() throws Exception {
+ int numDocs = 100;
+ int topN = 25;
+
+ // add series of docs with terms of decreasing df
+ Directory dir = newDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+ for (int i = 0; i < numDocs; i++) {
+ addDoc(writer, generateStrSeq(0, i + 1));
+ }
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ // setup MLT query
+ MoreLikeThis mlt = new MoreLikeThis(reader);
+ mlt.setAnalyzer(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false));
+ mlt.setMaxQueryTerms(topN);
+ mlt.setMinDocFreq(1);
+ mlt.setMinTermFreq(1);
+ mlt.setMinWordLen(1);
+ mlt.setFieldNames(new String[]{"text"});
+
+ // perform MLT query
+ String likeText = "";
+ for (String text : generateStrSeq(0, numDocs)) {
+ likeText += text + " ";
+ }
+ BooleanQuery query = (BooleanQuery) mlt.like("text", new StringReader(likeText));
+
+ // check best terms are topN of highest idf
+ List<BooleanClause> clauses = query.clauses();
+ assertEquals("Expected" + topN + "clauses only!", topN, clauses.size());
+
+ Term[] expectedTerms = new Term[topN];
+ int idx = 0;
+ for (String text : generateStrSeq(numDocs - topN, topN)) {
+ expectedTerms[idx++] = new Term("text", text);
+ }
+ for (BooleanClause clause : clauses) {
+ Term term = ((TermQuery) clause.getQuery()).getTerm();
+ assertTrue(Arrays.asList(expectedTerms).contains(term));
+ }
+
+ // clean up
+ reader.close();
+ dir.close();
+ }
+
+ private String[] generateStrSeq(int from, int size) {
+ String[] generatedStrings = new String[size];
+ for (int i = 0; i < generatedStrings.length; i++) {
+ generatedStrings[i] = String.valueOf(from + i);
+ }
+ return generatedStrings;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java b/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java
new file mode 100644
index 0000000000..315b93edd2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/store/InputStreamIndexInputTests.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.lucene.store;
+
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ *
+ */
+public class InputStreamIndexInputTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSingleReadSingleByteLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ for (int i = 0; i < 3; i++) {
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiSingleByteLimit1() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[2];
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(read), equalTo(1));
+ assertThat(read[0], equalTo((byte) 1));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(1l));
+ assertThat(is.read(read), equalTo(1));
+ assertThat(read[0], equalTo((byte) 2));
+ }
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testSingleReadTwoBytesLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(2));
+ assertThat(is.read(), equalTo(-1));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiTwoBytesLimit1() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[2];
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 1));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 2));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 2);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testReadMultiFourBytesLimit() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+
+ byte[] read = new byte[4];
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(4l));
+ assertThat(is.read(read), equalTo(4));
+ assertThat(read[0], equalTo((byte) 1));
+ assertThat(read[1], equalTo((byte) 1));
+ assertThat(read[2], equalTo((byte) 1));
+ assertThat(read[3], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), lessThan(input.length()));
+ is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(2l));
+ assertThat(is.read(read), equalTo(2));
+ assertThat(read[0], equalTo((byte) 2));
+ assertThat(read[1], equalTo((byte) 2));
+
+ assertThat(input.getFilePointer(), equalTo(input.length()));
+ is = new InputStreamIndexInput(input, 4);
+ assertThat(is.actualSizeToRead(), equalTo(0l));
+ assertThat(is.read(read), equalTo(-1));
+ }
+
+ @Test
+ public void testMarkRest() throws Exception {
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("test", IOContext.DEFAULT);
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 1);
+ }
+ for (int i = 0; i < 3; i++) {
+ output.writeByte((byte) 2);
+ }
+
+ output.close();
+
+ IndexInput input = dir.openInput("test", IOContext.DEFAULT);
+ InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
+ assertThat(is.markSupported(), equalTo(true));
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(1));
+ is.mark(0);
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ is.reset();
+ assertThat(is.read(), equalTo(1));
+ assertThat(is.read(), equalTo(2));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java
new file mode 100644
index 0000000000..fc055d243c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.lucene.uid;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.*;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.index.merge.policy.ElasticsearchMergePolicy;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class VersionsTests extends ElasticsearchTestCase {
+
+ public static DirectoryReader reopen(DirectoryReader reader) throws IOException {
+ return reopen(reader, true);
+ }
+
+ public static DirectoryReader reopen(DirectoryReader reader, boolean newReaderExpected) throws IOException {
+ DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
+ if (newReader != null) {
+ reader.close();
+ } else {
+ assertFalse(newReaderExpected);
+ }
+ return newReader;
+ }
+ @Test
+ public void testVersions() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ writer.addDocument(doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_SET));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(Versions.NOT_SET));
+
+ doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1l));
+
+ doc = new Document();
+ Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
+ Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 2);
+ doc.add(uid);
+ doc.add(version);
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2l));
+
+ // test reuse of uid field
+ doc = new Document();
+ version.setLongValue(3);
+ doc.add(uid);
+ doc.add(version);
+ writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
+
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3l));
+
+ writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ @Test
+ public void testNestedDocuments() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ List<Document> docs = new ArrayList<>();
+ for (int i = 0; i < 4; ++i) {
+ // Nested
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ docs.add(doc);
+ }
+ // Root
+ Document doc = new Document();
+ doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE));
+ NumericDocValuesField version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
+ doc.add(version);
+ docs.add(doc);
+
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5l));
+
+ version.setLongValue(6L);
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ version.setLongValue(7L);
+ writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7l));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7l));
+
+ writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+ assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue());
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ @Test
+ public void testBackwardCompatibility() throws IOException {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ DirectoryReader directoryReader = DirectoryReader.open(writer, true);
+ MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND));
+
+ Document doc = new Document();
+ UidField uidAndVersion = new UidField("1", 1L);
+ doc.add(uidAndVersion);
+ writer.addDocument(doc);
+
+ uidAndVersion.uid = "2";
+ uidAndVersion.version = 2;
+ writer.addDocument(doc);
+ writer.commit();
+
+ directoryReader = reopen(directoryReader);
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2l));
+ assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "3")), equalTo(Versions.NOT_FOUND));
+ directoryReader.close();
+ writer.close();
+ dir.close();
+ }
+
+ // This is how versions used to be encoded
+ private static class UidField extends Field {
+ private static final FieldType FIELD_TYPE = new FieldType();
+ static {
+ FIELD_TYPE.setTokenized(true);
+ FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
+ FIELD_TYPE.setStored(true);
+ FIELD_TYPE.freeze();
+ }
+ String uid;
+ long version;
+ UidField(String uid, long version) {
+ super(UidFieldMapper.NAME, uid, FIELD_TYPE);
+ this.uid = uid;
+ this.version = version;
+ }
+ @Override
+ public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) throws IOException {
+ return new TokenStream() {
+ boolean finished = true;
+ final CharTermAttribute term = addAttribute(CharTermAttribute.class);
+ final PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+ @Override
+ public boolean incrementToken() throws IOException {
+ if (finished) {
+ return false;
+ }
+ term.setEmpty().append(uid);
+ payload.setPayload(new BytesRef(Numbers.longToBytes(version)));
+ finished = true;
+ return true;
+ }
+ @Override
+ public void reset() throws IOException {
+ finished = false;
+ }
+ };
+ }
+ }
+
+ @Test
+ public void testMergingOldIndices() throws Exception {
+ final IndexWriterConfig iwConf = new IndexWriterConfig(new KeywordAnalyzer());
+ iwConf.setMergePolicy(new ElasticsearchMergePolicy(iwConf.getMergePolicy()));
+ final Directory dir = newDirectory();
+ final IndexWriter iw = new IndexWriter(dir, iwConf);
+
+ // 1st segment, no _version
+ Document document = new Document();
+ // Add a dummy field (enough to trigger #3237)
+ document.add(new StringField("a", "b", Store.NO));
+ StringField uid = new StringField(UidFieldMapper.NAME, "1", Store.YES);
+ document.add(uid);
+ iw.addDocument(document);
+ uid.setStringValue("2");
+ iw.addDocument(document);
+ iw.commit();
+
+ // 2nd segment, old layout
+ document = new Document();
+ UidField uidAndVersion = new UidField("3", 3L);
+ document.add(uidAndVersion);
+ iw.addDocument(document);
+ uidAndVersion.uid = "4";
+ uidAndVersion.version = 4L;
+ iw.addDocument(document);
+ iw.commit();
+
+ // 3rd segment new layout
+ document = new Document();
+ uid.setStringValue("5");
+ Field version = new NumericDocValuesField(VersionFieldMapper.NAME, 5L);
+ document.add(uid);
+ document.add(version);
+ iw.addDocument(document);
+ uid.setStringValue("6");
+ version.setLongValue(6L);
+ iw.addDocument(document);
+ iw.commit();
+
+ final Map<String, Long> expectedVersions = ImmutableMap.<String, Long>builder()
+ .put("1", 0L).put("2", 0L).put("3", 0L).put("4", 4L).put("5", 5L).put("6", 6L).build();
+
+ // Force merge and check versions
+ iw.forceMerge(1, true);
+ final LeafReader ir = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(iw.getDirectory()));
+ final NumericDocValues versions = ir.getNumericDocValues(VersionFieldMapper.NAME);
+ assertThat(versions, notNullValue());
+ for (int i = 0; i < ir.maxDoc(); ++i) {
+ final String uidValue = ir.document(i).get(UidFieldMapper.NAME);
+ final long expectedVersion = expectedVersions.get(uidValue);
+ assertThat(versions.get(i), equalTo(expectedVersion));
+ }
+
+ iw.close();
+ assertThat(IndexWriter.isLocked(iw.getDirectory()), is(false));
+ ir.close();
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/math/MathUtilsTests.java b/core/src/test/java/org/elasticsearch/common/math/MathUtilsTests.java
new file mode 100644
index 0000000000..f0daca4b17
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/math/MathUtilsTests.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.math;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class MathUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void mod() {
+ final int iters = scaledRandomIntBetween(1000, 10000);
+ for (int i = 0; i < iters; ++i) {
+ final int v = rarely() ? Integer.MIN_VALUE : rarely() ? Integer.MAX_VALUE : randomInt();
+ final int m = rarely() ? Integer.MAX_VALUE : randomIntBetween(1, Integer.MAX_VALUE);
+ final int mod = MathUtils.mod(v, m);
+ assertTrue(mod >= 0);
+ assertTrue(mod < m);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java b/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java
new file mode 100644
index 0000000000..3151352c1e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/path/PathTrieTests.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.path;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PathTrieTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPath() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("/a/b/c", "walla");
+ trie.insert("a/d/g", "kuku");
+ trie.insert("x/b/c", "lala");
+ trie.insert("a/x/*", "one");
+ trie.insert("a/b/*", "two");
+ trie.insert("*/*/x", "three");
+ trie.insert("{index}/insert/{docId}", "bingo");
+
+ assertThat(trie.retrieve("a/b/c"), equalTo("walla"));
+ assertThat(trie.retrieve("a/d/g"), equalTo("kuku"));
+ assertThat(trie.retrieve("x/b/c"), equalTo("lala"));
+ assertThat(trie.retrieve("a/x/b"), equalTo("one"));
+ assertThat(trie.retrieve("a/b/d"), equalTo("two"));
+
+ assertThat(trie.retrieve("a/b"), nullValue());
+ assertThat(trie.retrieve("a/b/c/d"), nullValue());
+ assertThat(trie.retrieve("g/t/x"), equalTo("three"));
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("index1/insert/12", params), equalTo("bingo"));
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("index"), equalTo("index1"));
+ assertThat(params.get("docId"), equalTo("12"));
+ }
+
+ @Test
+ public void testEmptyPath() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("/", "walla");
+ assertThat(trie.retrieve(""), equalTo("walla"));
+ }
+
+ @Test
+ public void testDifferentNamesOnDifferentPath() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("/a/{type}", "test1");
+ trie.insert("/b/{name}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/test", params), equalTo("test1"));
+ assertThat(params.get("type"), equalTo("test"));
+
+ params.clear();
+ assertThat(trie.retrieve("/b/testX", params), equalTo("test2"));
+ assertThat(params.get("name"), equalTo("testX"));
+ }
+
+ @Test
+ public void testSameNameOnDifferentPath() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("/a/c/{name}", "test1");
+ trie.insert("/b/{name}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/c/test", params), equalTo("test1"));
+ assertThat(params.get("name"), equalTo("test"));
+
+ params.clear();
+ assertThat(trie.retrieve("/b/testX", params), equalTo("test2"));
+ assertThat(params.get("name"), equalTo("testX"));
+ }
+
+ @Test
+ public void testPreferNonWildcardExecution() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("{test}", "test1");
+ trie.insert("b", "test2");
+ trie.insert("{test}/a", "test3");
+ trie.insert("b/a", "test4");
+ trie.insert("{test}/{testB}", "test5");
+ trie.insert("{test}/x/{testC}", "test6");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/b", params), equalTo("test2"));
+ assertThat(trie.retrieve("/b/a", params), equalTo("test4"));
+ assertThat(trie.retrieve("/v/x", params), equalTo("test5"));
+ assertThat(trie.retrieve("/v/x/c", params), equalTo("test6"));
+ }
+
+ @Test
+ public void testSamePathConcreteResolution() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("{x}/{y}/{z}", "test1");
+ trie.insert("{x}/_y/{k}", "test2");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/a/b/c", params), equalTo("test1"));
+ assertThat(params.get("x"), equalTo("a"));
+ assertThat(params.get("y"), equalTo("b"));
+ assertThat(params.get("z"), equalTo("c"));
+ params.clear();
+ assertThat(trie.retrieve("/a/_y/c", params), equalTo("test2"));
+ assertThat(params.get("x"), equalTo("a"));
+ assertThat(params.get("k"), equalTo("c"));
+ }
+
+ @Test
+ public void testNamedWildcardAndLookupWithWildcard() {
+ PathTrie<String> trie = new PathTrie<>();
+ trie.insert("x/{test}", "test1");
+ trie.insert("{test}/a", "test2");
+ trie.insert("/{test}", "test3");
+ trie.insert("/{test}/_endpoint", "test4");
+ trie.insert("/*/{test}/_endpoint", "test5");
+
+ Map<String, String> params = newHashMap();
+ assertThat(trie.retrieve("/x/*", params), equalTo("test1"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/b/a", params), equalTo("test2"));
+ assertThat(params.get("test"), equalTo("b"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/*", params), equalTo("test3"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("/*/_endpoint", params), equalTo("test4"));
+ assertThat(params.get("test"), equalTo("*"));
+
+ params = newHashMap();
+ assertThat(trie.retrieve("a/*/_endpoint", params), equalTo("test5"));
+ assertThat(params.get("test"), equalTo("*"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTest.java b/core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTest.java
new file mode 100644
index 0000000000..c79d0917c1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/property/PropertyPlaceholderTest.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.property;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+public class PropertyPlaceholderTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("{", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo1", "bar1");
+ map.put("foo2", "bar2");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("bar1", propertyPlaceholder.replacePlaceholders("{foo1}", placeholderResolver));
+ assertEquals("a bar1b", propertyPlaceholder.replacePlaceholders("a {foo1}b", placeholderResolver));
+ assertEquals("bar1bar2", propertyPlaceholder.replacePlaceholders("{foo1}{foo2}", placeholderResolver));
+ assertEquals("a bar1 b bar2 c", propertyPlaceholder.replacePlaceholders("a {foo1} b {foo2} c", placeholderResolver));
+ }
+
+ @Test
+ public void testVariousPrefixSuffix() {
+ // Test various prefix/suffix lengths
+ PropertyPlaceholder ppEqualsPrefix = new PropertyPlaceholder("{", "}", false);
+ PropertyPlaceholder ppLongerPrefix = new PropertyPlaceholder("${", "}", false);
+ PropertyPlaceholder ppShorterPrefix = new PropertyPlaceholder("{", "}}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo", "bar");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("bar", ppEqualsPrefix.replacePlaceholders("{foo}", placeholderResolver));
+ assertEquals("bar", ppLongerPrefix.replacePlaceholders("${foo}", placeholderResolver));
+ assertEquals("bar", ppShorterPrefix.replacePlaceholders("{foo}}", placeholderResolver));
+ }
+
+ @Test
+ public void testDefaultValue() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("bar", propertyPlaceholder.replacePlaceholders("${foo:bar}", placeholderResolver));
+ assertEquals("", propertyPlaceholder.replacePlaceholders("${foo:}", placeholderResolver));
+ }
+
+ @Test
+ public void testIgnoredUnresolvedPlaceholder() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", true);
+ Map<String, String> map = new LinkedHashMap<>();
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("${foo}", propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testNotIgnoredUnresolvedPlaceholder() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver);
+ }
+
+ @Test
+ public void testShouldIgnoreMissing() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, true);
+ assertEquals("bar", propertyPlaceholder.replacePlaceholders("bar${foo}", placeholderResolver));
+ }
+
+ @Test
+ public void testRecursive() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo", "${foo1}");
+ map.put("foo1", "${foo2}");
+ map.put("foo2", "bar");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("bar", propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver));
+ assertEquals("abarb", propertyPlaceholder.replacePlaceholders("a${foo}b", placeholderResolver));
+ }
+
+ @Test
+ public void testNestedLongerPrefix() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo", "${foo1}");
+ map.put("foo1", "${foo2}");
+ map.put("foo2", "bar");
+ map.put("barbar", "baz");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("baz", propertyPlaceholder.replacePlaceholders("${bar${foo}}", placeholderResolver));
+ }
+
+ @Test
+ public void testNestedSameLengthPrefixSuffix() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("{", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo", "{foo1}");
+ map.put("foo1", "{foo2}");
+ map.put("foo2", "bar");
+ map.put("barbar", "baz");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("baz", propertyPlaceholder.replacePlaceholders("{bar{foo}}", placeholderResolver));
+ }
+
+ @Test
+ public void testNestedShorterPrefix() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("{", "}}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo", "{foo1}}");
+ map.put("foo1", "{foo2}}");
+ map.put("foo2", "bar");
+ map.put("barbar", "baz");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ assertEquals("baz", propertyPlaceholder.replacePlaceholders("{bar{foo}}}}", placeholderResolver));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testCircularReference() {
+ PropertyPlaceholder propertyPlaceholder = new PropertyPlaceholder("${", "}", false);
+ Map<String, String> map = new LinkedHashMap<>();
+ map.put("foo", "${bar}");
+ map.put("bar", "${foo}");
+ PropertyPlaceholder.PlaceholderResolver placeholderResolver = new SimplePlaceholderResolver(map, false);
+ propertyPlaceholder.replacePlaceholders("${foo}", placeholderResolver);
+ }
+
+ private class SimplePlaceholderResolver implements PropertyPlaceholder.PlaceholderResolver {
+ private Map<String, String> map;
+ private boolean shouldIgnoreMissing;
+
+ SimplePlaceholderResolver(Map<String, String> map, boolean shouldIgnoreMissing) {
+ this.map = map;
+ this.shouldIgnoreMissing = shouldIgnoreMissing;
+ }
+
+ @Override
+ public String resolvePlaceholder(String placeholderName) {
+ return map.get(placeholderName);
+ }
+
+ @Override
+ public boolean shouldIgnoreMissing(String placeholderName) {
+ return shouldIgnoreMissing;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java b/core/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java
new file mode 100644
index 0000000000..60d161b412
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+import org.elasticsearch.common.recycler.Recycler.V;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public abstract class AbstractRecyclerTests extends ElasticsearchTestCase {
+
+ // marker states for data
+ protected static final byte FRESH = 1;
+ protected static final byte RECYCLED = 2;
+ protected static final byte DEAD = 42;
+
+ protected static final Recycler.C<byte[]> RECYCLER_C = new AbstractRecyclerC<byte[]>() {
+
+ @Override
+ public byte[] newInstance(int sizing) {
+ byte[] value = new byte[10];
+ // "fresh" is intentionally not 0 to ensure we covered this code path
+ Arrays.fill(value, FRESH);
+ return value;
+ }
+
+ @Override
+ public void recycle(byte[] value) {
+ Arrays.fill(value, RECYCLED);
+ }
+
+ @Override
+ public void destroy(byte[] value) {
+ // we cannot really free the internals of a byte[], so mark it for verification
+ Arrays.fill(value, DEAD);
+ }
+
+ };
+
+ protected void assertFresh(byte[] data) {
+ assertNotNull(data);
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(FRESH, data[i]);
+ }
+ }
+
+ protected void assertRecycled(byte[] data) {
+ assertNotNull(data);
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(RECYCLED, data[i]);
+ }
+ }
+
+ protected void assertDead(byte[] data) {
+ assertNotNull(data);
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(DEAD, data[i]);
+ }
+ }
+
+ protected abstract Recycler<byte[]> newRecycler(int limit);
+
+ protected int limit = randomIntBetween(5, 10);
+
+ public void testReuse() {
+ Recycler<byte[]> r = newRecycler(limit);
+ Recycler.V<byte[]> o = r.obtain();
+ assertFalse(o.isRecycled());
+ final byte[] b1 = o.v();
+ assertFresh(b1);
+ o.close();
+ assertRecycled(b1);
+ o = r.obtain();
+ final byte[] b2 = o.v();
+ if (o.isRecycled()) {
+ assertRecycled(b2);
+ assertSame(b1, b2);
+ } else {
+ assertFresh(b2);
+ assertNotSame(b1, b2);
+ }
+ o.close();
+ r.close();
+ }
+
+ public void testRecycle() {
+ Recycler<byte[]> r = newRecycler(limit);
+ Recycler.V<byte[]> o = r.obtain();
+ assertFresh(o.v());
+ getRandom().nextBytes(o.v());
+ o.close();
+ o = r.obtain();
+ assertRecycled(o.v());
+ o.close();
+ r.close();
+ }
+
+ public void testDoubleRelease() {
+ final Recycler<byte[]> r = newRecycler(limit);
+ final Recycler.V<byte[]> v1 = r.obtain();
+ v1.close();
+ try {
+ v1.close();
+ } catch (IllegalStateException e) {
+ // impl has protection against double release: ok
+ return;
+ }
+ // otherwise ensure that the impl may not be returned twice
+ final Recycler.V<byte[]> v2 = r.obtain();
+ final Recycler.V<byte[]> v3 = r.obtain();
+ assertNotSame(v2.v(), v3.v());
+ r.close();
+ }
+
+ public void testDestroyWhenOverCapacity() {
+ Recycler<byte[]> r = newRecycler(limit);
+
+ // get & keep reference to new/recycled data
+ Recycler.V<byte[]> o = r.obtain();
+ byte[] data = o.v();
+ assertFresh(data);
+
+ // now exhaust the recycler
+ List<V<byte[]>> vals = new ArrayList<>(limit);
+ for (int i = 0; i < limit ; ++i) {
+ vals.add(r.obtain());
+ }
+ // Recycler size increases on release, not on obtain!
+ for (V<byte[]> v: vals) {
+ v.close();
+ }
+
+ // release first ref, verify for destruction
+ o.close();
+ assertDead(data);
+
+ // close the rest
+ r.close();
+ }
+
+ public void testClose() {
+ Recycler<byte[]> r = newRecycler(limit);
+
+ // get & keep reference to pooled data
+ Recycler.V<byte[]> o = r.obtain();
+ byte[] data = o.v();
+ assertFresh(data);
+
+ // randomize & return to pool
+ getRandom().nextBytes(data);
+ o.close();
+
+ // verify that recycle() ran
+ assertRecycled(data);
+
+ // closing the recycler should mark recycled instances via destroy()
+ r.close();
+ assertDead(data);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java b/core/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java
new file mode 100644
index 0000000000..c8c4c2e8e7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/recycler/ConcurrentRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class ConcurrentRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler(int limit) {
+ return Recyclers.concurrent(Recyclers.dequeFactory(RECYCLER_C, limit), randomIntBetween(1,5));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java b/core/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java
new file mode 100644
index 0000000000..7d56dffce1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/recycler/LockedRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class LockedRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler(int limit) {
+ return Recyclers.locked(Recyclers.deque(RECYCLER_C, limit));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java b/core/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java
new file mode 100644
index 0000000000..d4acb54661
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/recycler/NoneRecyclerTests.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class NoneRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler(int limit) {
+ return Recyclers.none(RECYCLER_C);
+ }
+
+ @Override
+ protected void assertRecycled(byte[] data) {
+ // will never match
+ }
+
+ @Override
+ protected void assertDead(byte[] data) {
+ // will never match
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java b/core/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java
new file mode 100644
index 0000000000..20e229a65b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/recycler/QueueRecyclerTests.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.recycler;
+
+public class QueueRecyclerTests extends AbstractRecyclerTests {
+
+ @Override
+ protected Recycler<byte[]> newRecycler(int limit) {
+ return Recyclers.concurrentDeque(RECYCLER_C, limit);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/regex/RegexTests.java b/core/src/test/java/org/elasticsearch/common/regex/RegexTests.java
new file mode 100644
index 0000000000..380bf90ad5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/regex/RegexTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.regex;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Random;
+import java.util.regex.Pattern;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class RegexTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFlags() {
+ String[] supportedFlags = new String[]{"CASE_INSENSITIVE", "MULTILINE", "DOTALL", "UNICODE_CASE", "CANON_EQ", "UNIX_LINES",
+ "LITERAL", "COMMENTS", "UNICODE_CHAR_CLASS"};
+ int[] flags = new int[]{Pattern.CASE_INSENSITIVE, Pattern.MULTILINE, Pattern.DOTALL, Pattern.UNICODE_CASE, Pattern.CANON_EQ,
+ Pattern.UNIX_LINES, Pattern.LITERAL, Pattern.COMMENTS, Regex.UNICODE_CHARACTER_CLASS};
+ Random random = getRandom();
+ int num = 10 + random.nextInt(100);
+ for (int i = 0; i < num; i++) {
+ int numFlags = random.nextInt(flags.length + 1);
+ int current = 0;
+ StringBuilder builder = new StringBuilder();
+ for (int j = 0; j < numFlags; j++) {
+ int index = random.nextInt(flags.length);
+ current |= flags[index];
+ builder.append(supportedFlags[index]);
+ if (j < numFlags - 1) {
+ builder.append("|");
+ }
+ }
+ String flagsToString = Regex.flagsToString(current);
+ assertThat(Regex.flagsFromString(builder.toString()), equalTo(current));
+ assertThat(Regex.flagsFromString(builder.toString()), equalTo(Regex.flagsFromString(flagsToString)));
+ Pattern.compile("\\w\\d{1,2}", current); // accepts the flags?
+ }
+ }
+
+ @Test(timeout = 1000)
+ public void testDoubleWildcardMatch() {
+ assertTrue(Regex.simpleMatch("ddd", "ddd"));
+ assertTrue(Regex.simpleMatch("d*d*d", "dadd"));
+ assertTrue(Regex.simpleMatch("**ddd", "dddd"));
+ assertFalse(Regex.simpleMatch("**ddd", "fff"));
+ assertTrue(Regex.simpleMatch("fff*ddd", "fffabcddd"));
+ assertTrue(Regex.simpleMatch("fff**ddd", "fffabcddd"));
+ assertFalse(Regex.simpleMatch("fff**ddd", "fffabcdd"));
+ assertTrue(Regex.simpleMatch("fff*******ddd", "fffabcddd"));
+ assertFalse(Regex.simpleMatch("fff******ddd", "fffabcdd"));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java
new file mode 100644
index 0000000000..6846c408cb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+public class RoundingTests extends ElasticsearchTestCase {
+
+ /**
+ * simple test case to illustrate how Rounding.Interval works on readable input
+ */
+ @Test
+ public void testInterval() {
+ int interval = 10;
+ Rounding.Interval rounding = new Rounding.Interval(interval);
+ int value = 24;
+ final long key = rounding.roundKey(24);
+ final long r = rounding.round(24);
+ String message = "round(" + value + ", interval=" + interval + ") = " + r;
+ assertEquals(value/interval, key);
+ assertEquals(value/interval * interval, r);
+ assertEquals(message, 0, r % interval);
+ }
+
+ @Test
+ public void testIntervalRandom() {
+ final long interval = randomIntBetween(1, 100);
+ Rounding.Interval rounding = new Rounding.Interval(interval);
+ for (int i = 0; i < 1000; ++i) {
+ long l = Math.max(randomLong(), Long.MIN_VALUE + interval);
+ final long key = rounding.roundKey(l);
+ final long r = rounding.round(l);
+ String message = "round(" + l + ", interval=" + interval + ") = " + r;
+ assertEquals(message, 0, r % interval);
+ assertThat(message, r, lessThanOrEqualTo(l));
+ assertThat(message, r + interval, greaterThan(l));
+ assertEquals(message, r, key*interval);
+ }
+ }
+
+ /**
+ * Simple test case to illustrate how Rounding.Offset works on readable input.
+ * offset shifts input value back before rounding (so here 6 - 7 -> -1)
+ * then shifts rounded Value back (here -10 -> -3)
+ */
+ @Test
+ public void testOffsetRounding() {
+ final long interval = 10;
+ final long offset = 7;
+ Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(new Rounding.Interval(interval), offset);
+ assertEquals(-1, rounding.roundKey(6));
+ assertEquals(-3, rounding.round(6));
+ assertEquals(7, rounding.nextRoundingValue(-3));
+ assertEquals(0, rounding.roundKey(7));
+ assertEquals(7, rounding.round(7));
+ assertEquals(17, rounding.nextRoundingValue(7));
+ assertEquals(0, rounding.roundKey(16));
+ assertEquals(7, rounding.round(16));
+ assertEquals(1, rounding.roundKey(17));
+ assertEquals(17, rounding.round(17));
+ assertEquals(27, rounding.nextRoundingValue(17));
+ }
+
+ /**
+ * test OffsetRounding with an internal interval rounding on random inputs
+ */
+ @Test
+ public void testOffsetRoundingRandom() {
+ for (int i = 0; i < 1000; ++i) {
+ final long interval = randomIntBetween(1, 100);
+ Rounding.Interval internalRounding = new Rounding.Interval(interval);
+ final long offset = randomIntBetween(-100, 100);
+ Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset);
+ long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow
+ long value = Math.max(randomLong() - safetyMargin, Long.MIN_VALUE + safetyMargin);
+ final long key = rounding.roundKey(value);
+ final long key_next = rounding.roundKey(value + interval);
+ final long r_value = rounding.round(value);
+ final long nextRoundingValue = rounding.nextRoundingValue(r_value);
+ assertThat("Rounding should be idempotent", r_value, equalTo(rounding.round(r_value)));
+ assertThat("Rounded value smaller than unrounded, regardless of offset", r_value - offset, lessThanOrEqualTo(value - offset));
+ assertThat("Key and next_key should differ by one", key_next - key, equalTo(1L));
+ assertThat("Rounded value <= value < next interval start", r_value + interval, greaterThan(value));
+ assertThat("NextRounding value should be interval from rounded value", r_value + interval, equalTo(nextRoundingValue));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java
new file mode 100644
index 0000000000..0a6d8f980d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java
@@ -0,0 +1,316 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.rounding;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ */
+public class TimeZoneRoundingTests extends ElasticsearchTestCase {
+
+ final static DateTimeZone JERUSALEM_TIMEZONE = DateTimeZone.forID("Asia/Jerusalem");
+
+ @Test
+ public void testUTCTimeUnitRounding() {
+ Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-01T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-01T00:00:00.000Z")), equalTo(utc("2009-03-01T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build();
+ assertThat(tzRounding.round(utc("2012-01-10T01:01:01")), equalTo(utc("2012-01-09T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2012-01-09T00:00:00.000Z")), equalTo(utc("2012-01-16T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build();
+ assertThat(tzRounding.round(utc("2012-01-10T01:01:01")), equalTo(utc("2012-01-08T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2012-01-08T00:00:00.000Z")), equalTo(utc("2012-01-15T00:00:00.000Z")));
+ }
+
+ @Test
+ public void testUTCIntervalRounding() {
+ Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-03T00:00:00.000Z")));
+ long roundKey = tzRounding.roundKey(utc("2009-02-03T01:01:01"));
+ assertThat(roundKey, equalTo(tzRounding.roundKey(utc("2009-02-03T00:00:00.000Z"))));
+ assertThat(tzRounding.valueForKey(roundKey), equalTo(utc("2009-02-03T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T00:00:00.000Z")), equalTo(utc("2009-02-03T12:00:00.000Z")));
+ assertThat(tzRounding.round(utc("2009-02-03T13:01:01")), equalTo(utc("2009-02-03T12:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T12:00:00.000Z")), equalTo(utc("2009-02-04T00:00:00.000Z")));
+
+ tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(48)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-03T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T00:00:00.000Z")), equalTo(utc("2009-02-05T00:00:00.000Z")));
+ assertThat(tzRounding.round(utc("2009-02-05T13:01:01")), equalTo(utc("2009-02-05T00:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-05T00:00:00.000Z")), equalTo(utc("2009-02-07T00:00:00.000Z")));
+ }
+
+ /**
+ * test TimeIntervalTimeZoneRounding, (interval < 12h) with time zone shift
+ */
+ @Test
+ public void testTimeIntervalTimeZoneRounding() {
+ Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(6)).timeZone(DateTimeZone.forOffsetHours(-1)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T00:01:01")), equalTo(utc("2009-02-02T19:00:00.000Z")));
+ long roundKey = tzRounding.roundKey(utc("2009-02-03T00:01:01"));
+ assertThat(roundKey, equalTo(tzRounding.roundKey(utc("2009-02-02T19:00:00.000Z"))));
+ assertThat(tzRounding.valueForKey(roundKey), equalTo(utc("2009-02-02T19:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-02T19:00:00.000Z")), equalTo(utc("2009-02-03T01:00:00.000Z")));
+
+ assertThat(tzRounding.round(utc("2009-02-03T13:01:01")), equalTo(utc("2009-02-03T13:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T13:00:00.000Z")), equalTo(utc("2009-02-03T19:00:00.000Z")));
+ }
+
+ /**
+ * test DayIntervalTimeZoneRounding, (interval >= 12h) with time zone shift
+ */
+ @Test
+ public void testDayIntervalTimeZoneRounding() {
+ Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).timeZone(DateTimeZone.forOffsetHours(-8)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T00:01:01")), equalTo(utc("2009-02-02T20:00:00.000Z")));
+ long roundKey = tzRounding.roundKey(utc("2009-02-03T00:01:01"));
+ assertThat(roundKey, equalTo(tzRounding.roundKey(utc("2009-02-02T20:00:00.000Z"))));
+ assertThat(tzRounding.valueForKey(roundKey), equalTo(utc("2009-02-02T20:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-02T20:00:00.000Z")), equalTo(utc("2009-02-03T08:00:00.000Z")));
+
+ assertThat(tzRounding.round(utc("2009-02-03T13:01:01")), equalTo(utc("2009-02-03T08:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T08:00:00.000Z")), equalTo(utc("2009-02-03T20:00:00.000Z")));
+ }
+
+ @Test
+ public void testDayTimeZoneRounding() {
+ int timezoneOffset = -2;
+ Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset))
+ .build();
+ assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(24 + timezoneOffset).millis()));
+ assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(24 + timezoneOffset).millis()), equalTo(0l - TimeValue
+ .timeValueHours(timezoneOffset).millis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forID("-08:00")).build();
+ assertThat(tzRounding.round(utc("2012-04-01T04:15:30Z")), equalTo(utc("2012-03-31T08:00:00Z")));
+ assertThat(toUTCDateString(tzRounding.nextRoundingValue(utc("2012-03-31T08:00:00Z"))),
+ equalTo(toUTCDateString(utc("2012-04-01T08:0:00Z"))));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(DateTimeZone.forID("-08:00")).build();
+ assertThat(tzRounding.round(utc("2012-04-01T04:15:30Z")), equalTo(utc("2012-03-01T08:00:00Z")));
+ assertThat(toUTCDateString(tzRounding.nextRoundingValue(utc("2012-03-01T08:00:00Z"))),
+ equalTo(toUTCDateString(utc("2012-04-01T08:0:00Z"))));
+
+ // date in Feb-3rd, but still in Feb-2nd in -02:00 timezone
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forID("-02:00")).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-02T02:00:00")));
+ long roundKey = tzRounding.roundKey(utc("2009-02-03T01:01:01"));
+ assertThat(roundKey, equalTo(tzRounding.roundKey(utc("2009-02-02T02:00:00.000Z"))));
+ assertThat(tzRounding.valueForKey(roundKey), equalTo(utc("2009-02-02T02:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-02T02:00:00")), equalTo(utc("2009-02-03T02:00:00")));
+
+ // date in Feb-3rd, also in -02:00 timezone
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forID("-02:00")).build();
+ assertThat(tzRounding.round(utc("2009-02-03T02:01:01")), equalTo(utc("2009-02-03T02:00:00")));
+ roundKey = tzRounding.roundKey(utc("2009-02-03T02:01:01"));
+ assertThat(roundKey, equalTo(tzRounding.roundKey(utc("2009-02-03T02:00:00.000Z"))));
+ assertThat(tzRounding.valueForKey(roundKey), equalTo(utc("2009-02-03T02:00:00.000Z")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T02:00:00")), equalTo(utc("2009-02-04T02:00:00")));
+ }
+
+ @Test
+ public void testTimeTimeZoneRounding() {
+ // hour unit
+ Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(0), equalTo(0l));
+ assertThat(tzRounding.nextRoundingValue(0l), equalTo(TimeValue.timeValueHours(1l).getMillis()));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forOffsetHours(-2)).build();
+ assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-03T01:00:00")));
+ assertThat(tzRounding.nextRoundingValue(utc("2009-02-03T01:00:00")), equalTo(utc("2009-02-03T02:00:00")));
+ }
+
+ @Test
+ public void testTimeUnitRoundingDST() {
+ Rounding tzRounding;
+ // testing savings to non savings switch
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
+ assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forID("CET"))),
+ equalTo(time("2014-10-26T01:00:00", DateTimeZone.forID("CET"))));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("CET")).build();
+ assertThat(tzRounding.round(time("2014-10-26T01:01:01", DateTimeZone.forID("CET"))),
+ equalTo(time("2014-10-26T01:00:00", DateTimeZone.forID("CET"))));
+
+ // testing non savings to savings switch
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
+ assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forID("CET"))),
+ equalTo(time("2014-03-30T01:00:00", DateTimeZone.forID("CET"))));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("CET")).build();
+ assertThat(tzRounding.round(time("2014-03-30T01:01:01", DateTimeZone.forID("CET"))),
+ equalTo(time("2014-03-30T01:00:00", DateTimeZone.forID("CET"))));
+
+ // testing non savings to savings switch (America/Chicago)
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
+ assertThat(tzRounding.round(time("2014-03-09T03:01:01", DateTimeZone.forID("America/Chicago"))),
+ equalTo(time("2014-03-09T03:00:00", DateTimeZone.forID("America/Chicago"))));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("America/Chicago")).build();
+ assertThat(tzRounding.round(time("2014-03-09T03:01:01", DateTimeZone.forID("America/Chicago"))),
+ equalTo(time("2014-03-09T03:00:00", DateTimeZone.forID("America/Chicago"))));
+
+ // testing savings to non savings switch 2013 (America/Chicago)
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
+ assertThat(tzRounding.round(time("2013-11-03T06:01:01", DateTimeZone.forID("America/Chicago"))),
+ equalTo(time("2013-11-03T06:00:00", DateTimeZone.forID("America/Chicago"))));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("America/Chicago")).build();
+ assertThat(tzRounding.round(time("2013-11-03T06:01:01", DateTimeZone.forID("America/Chicago"))),
+ equalTo(time("2013-11-03T06:00:00", DateTimeZone.forID("America/Chicago"))));
+
+ // testing savings to non savings switch 2014 (America/Chicago)
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("UTC")).build();
+ assertThat(tzRounding.round(time("2014-11-02T06:01:01", DateTimeZone.forID("America/Chicago"))),
+ equalTo(time("2014-11-02T06:00:00", DateTimeZone.forID("America/Chicago"))));
+
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forID("America/Chicago")).build();
+ assertThat(tzRounding.round(time("2014-11-02T06:01:01", DateTimeZone.forID("America/Chicago"))),
+ equalTo(time("2014-11-02T06:00:00", DateTimeZone.forID("America/Chicago"))));
+ }
+
+ /**
+ * randomized test on TimeUnitRounding with random time units and time zone offsets
+ */
+ @Test
+ public void testTimeZoneRoundingRandom() {
+ for (int i = 0; i < 1000; ++i) {
+ DateTimeUnit timeUnit = randomTimeUnit();
+ TimeZoneRounding rounding;
+ int timezoneOffset = randomIntBetween(-23, 23);
+ rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, DateTimeZone.forOffsetHours(timezoneOffset));
+ long date = Math.abs(randomLong() % ((long) 10e11));
+ final long roundedDate = rounding.round(date);
+ final long nextRoundingValue = rounding.nextRoundingValue(roundedDate);
+ assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate)));
+ assertThat("Rounded value smaller or equal than unrounded, regardless of timezone", roundedDate, lessThanOrEqualTo(date));
+ assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate));
+ assertThat("NextRounding value should be a rounded date", nextRoundingValue, equalTo(rounding.round(nextRoundingValue)));
+ }
+ }
+
+ /**
+ * randomized test on TimeIntervalRounding with random interval and time zone offsets
+ */
+ @Test
+ public void testIntervalRoundingRandom() {
+ for (int i = 0; i < 1000; ++i) {
+ // max random interval is a year, can be negative
+ long interval = Math.abs(randomLong() % (TimeUnit.DAYS.toMillis(365)));
+ TimeZoneRounding rounding;
+ int timezoneOffset = randomIntBetween(-23, 23);
+ rounding = new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.forOffsetHours(timezoneOffset));
+ long date = Math.abs(randomLong() % ((long) 10e11));
+ final long roundedDate = rounding.round(date);
+ final long nextRoundingValue = rounding.nextRoundingValue(roundedDate);
+ assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate)));
+ assertThat("Rounded value smaller or equal than unrounded, regardless of timezone", roundedDate, lessThanOrEqualTo(date));
+ assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate));
+ assertThat("NextRounding value should be interval from rounded value", nextRoundingValue - roundedDate, equalTo(interval));
+ assertThat("NextRounding value should be a rounded date", nextRoundingValue, equalTo(rounding.round(nextRoundingValue)));
+ }
+ }
+
+ /**
+ * special test for DST switch from #9491
+ */
+ @Test
+ public void testAmbiguousHoursAfterDSTSwitch() {
+ Rounding tzRounding;
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(JERUSALEM_TIMEZONE).build();
+ // Both timestamps "2014-10-25T22:30:00Z" and "2014-10-25T23:30:00Z" are "2014-10-26T01:30:00" in local time because
+ // of DST switch between them. This test checks that they are both returned to their correct UTC time after rounding.
+ assertThat(tzRounding.round(time("2014-10-25T22:30:00", DateTimeZone.UTC)), equalTo(time("2014-10-25T22:00:00", DateTimeZone.UTC)));
+ assertThat(tzRounding.round(time("2014-10-25T23:30:00", DateTimeZone.UTC)), equalTo(time("2014-10-25T23:00:00", DateTimeZone.UTC)));
+
+ // Day interval
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(JERUSALEM_TIMEZONE).build();
+ assertThat(tzRounding.round(time("2014-11-11T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2014-11-11T00:00:00", JERUSALEM_TIMEZONE)));
+ // DST on
+ assertThat(tzRounding.round(time("2014-08-11T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2014-08-11T00:00:00", JERUSALEM_TIMEZONE)));
+ // Day of switching DST on -> off
+ assertThat(tzRounding.round(time("2014-10-26T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2014-10-26T00:00:00", JERUSALEM_TIMEZONE)));
+ // Day of switching DST off -> on
+ assertThat(tzRounding.round(time("2015-03-27T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2015-03-27T00:00:00", JERUSALEM_TIMEZONE)));
+
+ // Month interval
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(JERUSALEM_TIMEZONE).build();
+ assertThat(tzRounding.round(time("2014-11-11T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2014-11-01T00:00:00", JERUSALEM_TIMEZONE)));
+ // DST on
+ assertThat(tzRounding.round(time("2014-10-10T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2014-10-01T00:00:00", JERUSALEM_TIMEZONE)));
+
+ // Year interval
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(JERUSALEM_TIMEZONE).build();
+ assertThat(tzRounding.round(time("2014-11-11T17:00:00", JERUSALEM_TIMEZONE)), equalTo(time("2014-01-01T00:00:00", JERUSALEM_TIMEZONE)));
+
+ // Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491)
+ tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(JERUSALEM_TIMEZONE).build();
+ assertThat(tzRounding.round(time("2014-11-11T17:00:00", JERUSALEM_TIMEZONE)),
+ equalTo(tzRounding.round(time("2014-08-11T17:00:00", JERUSALEM_TIMEZONE))));
+ }
+
+ /**
+ * test for #10025, strict local to UTC conversion can cause joda exceptions
+ * on DST start
+ */
+ @Test
+ public void testLenientConversionDST() {
+ DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo");
+ long start = time("2014-10-18T20:50:00.000", tz);
+ long end = time("2014-10-19T01:00:00.000", tz);
+ Rounding tzRounding = new TimeZoneRounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz);
+ Rounding dayTzRounding = new TimeZoneRounding.TimeIntervalRounding(60000, tz);
+ for (long time = start; time < end; time = time + 60000) {
+ assertThat(tzRounding.nextRoundingValue(time), greaterThan(time));
+ assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time));
+ }
+ }
+
+ private DateTimeUnit randomTimeUnit() {
+ byte id = (byte) randomIntBetween(1, 8);
+ return DateTimeUnit.resolve(id);
+ }
+
+ private String toUTCDateString(long time) {
+ return new DateTime(time, DateTimeZone.UTC).toString();
+ }
+
+ private long utc(String time) {
+ return time(time, DateTimeZone.UTC);
+ }
+
+ private long time(String time, DateTimeZone zone) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java
new file mode 100644
index 0000000000..7c13810297
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.FakeRestRequest;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class SettingsFilterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAddingAndRemovingFilters() {
+ SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY);
+ settingsFilter.addFilter("foo");
+ settingsFilter.addFilter("bar");
+ settingsFilter.addFilter("baz");
+ assertThat(settingsFilter.getPatterns(), equalTo("foo,bar,baz"));
+
+ settingsFilter.removeFilter("bar");
+ assertThat(settingsFilter.getPatterns(), equalTo("foo,baz"));
+
+ settingsFilter.removeFilter("bar");
+ settingsFilter.removeFilter("foo");
+ settingsFilter.removeFilter("baz");
+
+ assertThat(settingsFilter.getPatterns(), equalTo(""));
+ }
+
+ @Test
+ public void testSettingsFiltering() throws IOException {
+
+ testFiltering(Settings.builder()
+ .put("foo", "foo_test")
+ .put("foo1", "foo1_test")
+ .put("bar", "bar_test")
+ .put("bar1", "bar1_test")
+ .put("bar.2", "bar2_test")
+ .build(),
+ Settings.builder()
+ .put("foo1", "foo1_test")
+ .build(),
+ "foo,bar*"
+ );
+
+ testFiltering(Settings.builder()
+ .put("foo", "foo_test")
+ .put("foo1", "foo1_test")
+ .put("bar", "bar_test")
+ .put("bar1", "bar1_test")
+ .put("bar.2", "bar2_test")
+ .build(),
+ Settings.builder()
+ .put("foo", "foo_test")
+ .put("foo1", "foo1_test")
+ .build(),
+ "bar*"
+ );
+
+ testFiltering(Settings.builder()
+ .put("foo", "foo_test")
+ .put("foo1", "foo1_test")
+ .put("bar", "bar_test")
+ .put("bar1", "bar1_test")
+ .put("bar.2", "bar2_test")
+ .build(),
+ Settings.builder()
+ .build(),
+ "foo,bar*,foo*"
+ );
+
+ testFiltering(Settings.builder()
+ .put("foo", "foo_test")
+ .put("bar", "bar_test")
+ .put("baz", "baz_test")
+ .build(),
+ Settings.builder()
+ .put("foo", "foo_test")
+ .put("bar", "bar_test")
+ .put("baz", "baz_test")
+ .build()
+ );
+ }
+
+ private void testFiltering(Settings source, Settings filtered, String... patterns) throws IOException {
+ SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY);
+ for (String pattern : patterns) {
+ settingsFilter.addFilter(pattern);
+ }
+
+ // Test using direct filtering
+ Settings filteredSettings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), source);
+ assertThat(filteredSettings.getAsMap().entrySet(), equalTo(filtered.getAsMap().entrySet()));
+
+ // Test using toXContent filtering
+ RestRequest request = new FakeRestRequest();
+ settingsFilter.addFilterSettingParams(request);
+ XContentBuilder xContentBuilder = XContentBuilder.builder(JsonXContent.jsonXContent);
+ xContentBuilder.startObject();
+ source.toXContent(xContentBuilder, request);
+ xContentBuilder.endObject();
+ String filteredSettingsString = xContentBuilder.string();
+ filteredSettings = Settings.builder().loadFromSource(filteredSettingsString).build();
+ assertThat(filteredSettings.getAsMap().entrySet(), equalTo(filtered.getAsMap().entrySet()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java
new file mode 100644
index 0000000000..1dbaf32f06
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/SettingsTests.java
@@ -0,0 +1,378 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.common.settings.bar.BarTestClass;
+import org.elasticsearch.common.settings.foo.FooTestClass;
+import org.elasticsearch.common.settings.loader.YamlSettingsLoader;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SettingsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCamelCaseSupport() {
+ Settings settings = settingsBuilder()
+ .put("test.camelCase", "bar")
+ .build();
+ assertThat(settings.get("test.camelCase"), equalTo("bar"));
+ assertThat(settings.get("test.camel_case"), equalTo("bar"));
+ }
+
+ @Test
+ public void testGetAsClass() {
+ Settings settings = settingsBuilder()
+ .put("test.class", "bar")
+ .put("test.class.package", "org.elasticsearch.common.settings.bar")
+ .build();
+
+ // Assert that defaultClazz is loaded if setting is not specified
+ assertThat(settings.getAsClass("no.settings", FooTestClass.class, "org.elasticsearch.common.settings.", "TestClass").getName(),
+ equalTo(FooTestClass.class.getName()));
+
+ // Assert that correct class is loaded if setting contain name without package
+ assertThat(settings.getAsClass("test.class", FooTestClass.class, "org.elasticsearch.common.settings.", "TestClass").getName(),
+ equalTo(BarTestClass.class.getName()));
+
+ // Assert that class cannot be loaded if wrong packagePrefix is specified
+ try {
+ settings.getAsClass("test.class", FooTestClass.class, "com.example.elasticsearch.test.unit..common.settings.", "TestClass");
+ fail("Class with wrong package name shouldn't be loaded");
+ } catch (NoClassSettingsException ex) {
+ // Ignore
+ }
+
+ // Assert that package name in settings is getting correctly applied
+ assertThat(settings.getAsClass("test.class.package", FooTestClass.class, "com.example.elasticsearch.test.unit.common.settings.", "TestClass").getName(),
+ equalTo(BarTestClass.class.getName()));
+
+ }
+
+ @Test
+ public void testLoadFromDelimitedString() {
+ Settings settings = settingsBuilder()
+ .loadFromDelimitedString("key1=value1;key2=value2", ';')
+ .build();
+ assertThat(settings.get("key1"), equalTo("value1"));
+ assertThat(settings.get("key2"), equalTo("value2"));
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
+
+ settings = settingsBuilder()
+ .loadFromDelimitedString("key1=value1;key2=value2;", ';')
+ .build();
+ assertThat(settings.get("key1"), equalTo("value1"));
+ assertThat(settings.get("key2"), equalTo("value2"));
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.toDelimitedString(';'), equalTo("key1=value1;key2=value2;"));
+ }
+
+ @Test(expected = NoClassSettingsException.class)
+ public void testThatAllClassNotFoundExceptionsAreCaught() {
+ // this should be nGram in order to really work, but for sure not not throw a NoClassDefFoundError
+ Settings settings = settingsBuilder().put("type", "ngram").build();
+ settings.getAsClass("type", null, "org.elasticsearch.index.analysis.", "TokenFilterFactory");
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderSystemProperty() {
+ System.setProperty("sysProp1", "sysVal1");
+ try {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${sysProp1}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), equalTo("sysVal1"));
+ } finally {
+ System.clearProperty("sysProp1");
+ }
+
+ Settings settings = settingsBuilder()
+ .put("setting1", "${sysProp1:defaultVal1}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), equalTo("defaultVal1"));
+
+ settings = settingsBuilder()
+ .put("setting1", "${sysProp1:}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), is(nullValue()));
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderIgnoreEnvUnset() {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${env.UNSET_ENV_VAR}")
+ .replacePropertyPlaceholders()
+ .build();
+ assertThat(settings.get("setting1"), is(nullValue()));
+ }
+
+ @Test
+ public void testReplacePropertiesPlaceholderIgnores() {
+ Settings settings = settingsBuilder()
+ .put("setting1", "${foo.bar}")
+ .put("setting2", "${foo.bar1}")
+ .replacePropertyPlaceholders("${foo.bar}", "${foo.bar1}")
+ .build();
+ assertThat(settings.get("setting1"), is("${foo.bar}"));
+ assertThat(settings.get("setting2"), is("${foo.bar1}"));
+ }
+
+ @Test
+ public void testUnFlattenedSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("bar", "def")
+ .put("baz.foo", "ghi")
+ .put("baz.bar", "jkl")
+ .putArray("baz.arr", "a", "b", "c")
+ .build();
+ Map<String, Object> map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("bar", "def")));
+
+ @SuppressWarnings("unchecked") Map<String, Object> bazMap = (Map<String, Object>) map.get("baz");
+ assertThat(bazMap.keySet(), Matchers.<String>hasSize(3));
+ assertThat(bazMap, allOf(
+ Matchers.<String, Object>hasEntry("foo", "ghi"),
+ Matchers.<String, Object>hasEntry("bar", "jkl")));
+ @SuppressWarnings("unchecked") List<String> bazArr = (List<String>) bazMap.get("arr");
+ assertThat(bazArr, contains("a", "b", "c"));
+
+ }
+
+ @Test
+ public void testFallbackToFlattenedSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("foo.bar", "def")
+ .put("foo.baz", "ghi").build();
+ Map<String, Object> map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("foo.bar", "def"),
+ Matchers.<String, Object>hasEntry("foo.baz", "ghi")));
+
+ settings = settingsBuilder()
+ .put("foo.bar", "def")
+ .put("foo", "abc")
+ .put("foo.baz", "ghi")
+ .build();
+ map = settings.getAsStructuredMap();
+ assertThat(map.keySet(), Matchers.<String>hasSize(3));
+ assertThat(map, allOf(
+ Matchers.<String, Object>hasEntry("foo", "abc"),
+ Matchers.<String, Object>hasEntry("foo.bar", "def"),
+ Matchers.<String, Object>hasEntry("foo.baz", "ghi")));
+ }
+
+ @Test
+ public void testGetAsSettings() {
+ Settings settings = settingsBuilder()
+ .put("foo", "abc")
+ .put("foo.bar", "def")
+ .put("foo.baz", "ghi").build();
+
+ Settings fooSettings = settings.getAsSettings("foo");
+ assertThat(fooSettings.get("bar"), equalTo("def"));
+ assertThat(fooSettings.get("baz"), equalTo("ghi"));
+ }
+
+ @Test
+ public void testNames() {
+ Settings settings = settingsBuilder()
+ .put("bar", "baz")
+ .put("foo", "abc")
+ .put("foo.bar", "def")
+ .put("foo.baz", "ghi").build();
+
+ Set<String> names = settings.names();
+ assertThat(names.size(), equalTo(2));
+ assertTrue(names.contains("bar"));
+ assertTrue(names.contains("foo"));
+
+ Settings fooSettings = settings.getAsSettings("foo");
+ names = fooSettings.names();
+ assertThat(names.size(), equalTo(2));
+ assertTrue(names.contains("bar"));
+ assertTrue(names.contains("baz"));
+ }
+
+ @Test
+ public void testThatArraysAreOverriddenCorrectly() throws IOException {
+ // overriding a single value with an array
+ Settings settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "1").build())
+ .put(settingsBuilder().putArray("value", "2", "3").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("2", "3"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().put("value", "1").build())
+ .put(settingsBuilder().putArray("value", "2", "3").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("2", "3"));
+
+ settings = settingsBuilder()
+ .put(new YamlSettingsLoader().load("value: 1"))
+ .put(new YamlSettingsLoader().load("value: [ 2, 3 ]"))
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("2", "3"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().put("value.with.deep.key", "1").build())
+ .put(settingsBuilder().putArray("value.with.deep.key", "2", "3").build())
+ .build();
+ assertThat(settings.getAsArray("value.with.deep.key"), arrayContaining("2", "3"));
+
+ // overriding an array with a shorter array
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "1", "2").build())
+ .put(settingsBuilder().putArray("value", "3").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("3"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "1", "2", "3").build())
+ .put(settingsBuilder().putArray("value", "4", "5").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("4", "5"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value.deep.key", "1", "2", "3").build())
+ .put(settingsBuilder().putArray("value.deep.key", "4", "5").build())
+ .build();
+ assertThat(settings.getAsArray("value.deep.key"), arrayContaining("4", "5"));
+
+ // overriding an array with a longer array
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "1", "2").build())
+ .put(settingsBuilder().putArray("value", "3", "4", "5").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("3", "4", "5"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value.deep.key", "1", "2", "3").build())
+ .put(settingsBuilder().putArray("value.deep.key", "4", "5").build())
+ .build();
+ assertThat(settings.getAsArray("value.deep.key"), arrayContaining("4", "5"));
+
+ // overriding an array with a single value
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "1", "2").build())
+ .put(settingsBuilder().put("value", "3").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("3"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value.deep.key", "1", "2").build())
+ .put(settingsBuilder().put("value.deep.key", "3").build())
+ .build();
+ assertThat(settings.getAsArray("value.deep.key"), arrayContaining("3"));
+
+ // test that other arrays are not overridden
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "1", "2", "3").putArray("a", "b", "c").build())
+ .put(settingsBuilder().putArray("value", "4", "5").putArray("d", "e", "f").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("4", "5"));
+ assertThat(settings.getAsArray("a"), arrayContaining("b", "c"));
+ assertThat(settings.getAsArray("d"), arrayContaining("e", "f"));
+
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value.deep.key", "1", "2", "3").putArray("a", "b", "c").build())
+ .put(settingsBuilder().putArray("value.deep.key", "4", "5").putArray("d", "e", "f").build())
+ .build();
+ assertThat(settings.getAsArray("value.deep.key"), arrayContaining("4", "5"));
+ assertThat(settings.getAsArray("a"), notNullValue());
+ assertThat(settings.getAsArray("d"), notNullValue());
+
+ // overriding a deeper structure with an array
+ settings = settingsBuilder()
+ .put(settingsBuilder().put("value.data", "1").build())
+ .put(settingsBuilder().putArray("value", "4", "5").build())
+ .build();
+ assertThat(settings.getAsArray("value"), arrayContaining("4", "5"));
+
+ // overriding an array with a deeper structure
+ settings = settingsBuilder()
+ .put(settingsBuilder().putArray("value", "4", "5").build())
+ .put(settingsBuilder().put("value.data", "1").build())
+ .build();
+ assertThat(settings.get("value.data"), is("1"));
+ assertThat(settings.get("value"), is(nullValue()));
+ }
+
+ @Test
+ public void testPrefixNormalization() {
+
+ Settings settings = settingsBuilder().normalizePrefix("foo.").build();
+
+ assertThat(settings.names().size(), equalTo(0));
+
+ settings = settingsBuilder()
+ .put("bar", "baz")
+ .normalizePrefix("foo.")
+ .build();
+
+ assertThat(settings.getAsMap().size(), equalTo(1));
+ assertThat(settings.get("bar"), nullValue());
+ assertThat(settings.get("foo.bar"), equalTo("baz"));
+
+
+ settings = settingsBuilder()
+ .put("bar", "baz")
+ .put("foo.test", "test")
+ .normalizePrefix("foo.")
+ .build();
+
+ assertThat(settings.getAsMap().size(), equalTo(2));
+ assertThat(settings.get("bar"), nullValue());
+ assertThat(settings.get("foo.bar"), equalTo("baz"));
+ assertThat(settings.get("foo.test"), equalTo("test"));
+
+ settings = settingsBuilder()
+ .put("foo.test", "test")
+ .normalizePrefix("foo.")
+ .build();
+
+
+ assertThat(settings.getAsMap().size(), equalTo(1));
+ assertThat(settings.get("foo.test"), equalTo("test"));
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java b/core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java
new file mode 100644
index 0000000000..8c7b0c1f25
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/bar/BarTestClass.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.bar;
+
+//used in SettingsTest
+public class BarTestClass {
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java b/core/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java
new file mode 100644
index 0000000000..6d8ca4a798
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/foo/FooTestClass.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.foo;
+
+// used in SettingsTest
+public class FooTestClass {
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
new file mode 100644
index 0000000000..5d492a6d6a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/JsonSettingsLoaderTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class JsonSettingsLoaderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleJsonSettings() throws Exception {
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.json")
+ .build();
+
+ assertThat(settings.get("test1.value1"), equalTo("value1"));
+ assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
+ assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
+
+ // check array
+ assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
+ assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
+ assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
+ assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
+ assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
new file mode 100644
index 0000000000..a9c77e9b31
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/YamlSettingsLoaderTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings.loader;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class YamlSettingsLoaderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleYamlSettings() throws Exception {
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/common/settings/loader/test-settings.yml")
+ .build();
+
+ assertThat(settings.get("test1.value1"), equalTo("value1"));
+ assertThat(settings.get("test1.test2.value2"), equalTo("value2"));
+ assertThat(settings.getAsInt("test1.test2.value3", -1), equalTo(2));
+
+ // check array
+ assertThat(settings.get("test1.test3.0"), equalTo("test3-1"));
+ assertThat(settings.get("test1.test3.1"), equalTo("test3-2"));
+ assertThat(settings.getAsArray("test1.test3").length, equalTo(2));
+ assertThat(settings.getAsArray("test1.test3")[0], equalTo("test3-1"));
+ assertThat(settings.getAsArray("test1.test3")[1], equalTo("test3-2"));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json b/core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json
new file mode 100644
index 0000000000..7190648d59
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.json
@@ -0,0 +1,10 @@
+{
+ test1:{
+ value1:"value1",
+ test2:{
+ value2:"value2",
+ value3:2
+ },
+ test3:["test3-1", "test3-2"]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml b/core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml
new file mode 100644
index 0000000000..b533ae036e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/settings/loader/test-settings.yml
@@ -0,0 +1,8 @@
+test1:
+ value1: value1
+ test2:
+ value2: value2
+ value3: 2
+ test3:
+ - test3-1
+ - test3-2
diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
new file mode 100644
index 0000000000..8b39e4eecb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.ByteSizeUnit.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ByteSizeUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testBytes() {
+ assertThat(BYTES.toBytes(1), equalTo(1l));
+ assertThat(BYTES.toKB(1024), equalTo(1l));
+ assertThat(BYTES.toMB(1024 * 1024), equalTo(1l));
+ assertThat(BYTES.toGB(1024 * 1024 * 1024), equalTo(1l));
+ }
+
+ @Test
+ public void testKB() {
+ assertThat(KB.toBytes(1), equalTo(1024l));
+ assertThat(KB.toKB(1), equalTo(1l));
+ assertThat(KB.toMB(1024), equalTo(1l));
+ assertThat(KB.toGB(1024 * 1024), equalTo(1l));
+ }
+
+ @Test
+ public void testMB() {
+ assertThat(MB.toBytes(1), equalTo(1024l * 1024));
+ assertThat(MB.toKB(1), equalTo(1024l));
+ assertThat(MB.toMB(1), equalTo(1l));
+ assertThat(MB.toGB(1024), equalTo(1l));
+ }
+
+ @Test
+ public void testGB() {
+ assertThat(GB.toBytes(1), equalTo(1024l * 1024 * 1024));
+ assertThat(GB.toKB(1), equalTo(1024l * 1024));
+ assertThat(GB.toMB(1), equalTo(1024l));
+ assertThat(GB.toGB(1), equalTo(1l));
+ }
+
+ @Test
+ public void testTB() {
+ assertThat(TB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024));
+ assertThat(TB.toKB(1), equalTo(1024l * 1024 * 1024));
+ assertThat(TB.toMB(1), equalTo(1024l * 1024));
+ assertThat(TB.toGB(1), equalTo(1024l));
+ assertThat(TB.toTB(1), equalTo(1l));
+ }
+
+ @Test
+ public void testPB() {
+ assertThat(PB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024 * 1024));
+ assertThat(PB.toKB(1), equalTo(1024l * 1024 * 1024 * 1024));
+ assertThat(PB.toMB(1), equalTo(1024l * 1024 * 1024));
+ assertThat(PB.toGB(1), equalTo(1024l * 1024));
+ assertThat(PB.toTB(1), equalTo(1024l));
+ assertThat(PB.toPB(1), equalTo(1l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
new file mode 100644
index 0000000000..ebbd71132e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/ByteSizeValueTests.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class ByteSizeValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testActualPeta() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).bytes(), equalTo(4503599627370496l));
+ }
+
+ @Test
+ public void testActualTera() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).bytes(), equalTo(4398046511104l));
+ }
+
+ @Test
+ public void testActual() {
+ MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296l));
+ }
+
+ @Test
+ public void testSimple() {
+ assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).bytes()));
+ assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).kb()));
+ assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).mb()));
+ assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).gb()));
+ assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).tb()));
+ assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).pb()));
+ }
+
+ @Test
+ public void testToString() {
+ assertThat("10b", is(new ByteSizeValue(10, ByteSizeUnit.BYTES).toString()));
+ assertThat("1.5kb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.BYTES).toString()));
+ assertThat("1.5mb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.KB).toString()));
+ assertThat("1.5gb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.MB).toString()));
+ assertThat("1.5tb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.GB).toString()));
+ assertThat("1.5pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.TB).toString()));
+ assertThat("1536pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.PB).toString()));
+ }
+
+ @Test
+ public void testParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("42PB", "testParsing").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42 PB", "testParsing").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42pb", "testParsing").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42 pb", "testParsing").toString(), is("42pb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("42P", "testParsing").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42 P", "testParsing").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42p", "testParsing").toString(), is("42pb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("42 p", "testParsing").toString(), is("42pb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("54TB", "testParsing").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54 TB", "testParsing").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54tb", "testParsing").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54 tb", "testParsing").toString(), is("54tb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("54T", "testParsing").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54 T", "testParsing").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54t", "testParsing").toString(), is("54tb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("54 t", "testParsing").toString(), is("54tb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("12GB", "testParsing").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12 GB", "testParsing").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12gb", "testParsing").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12 gb", "testParsing").toString(), is("12gb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("12G", "testParsing").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12 G", "testParsing").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12g", "testParsing").toString(), is("12gb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12 g", "testParsing").toString(), is("12gb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("12M", "testParsing").toString(), is("12mb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12 M", "testParsing").toString(), is("12mb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12m", "testParsing").toString(), is("12mb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("12 m", "testParsing").toString(), is("12mb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("23KB", "testParsing").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23 KB", "testParsing").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23kb", "testParsing").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23 kb", "testParsing").toString(), is("23kb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("23K", "testParsing").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23 K", "testParsing").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23k", "testParsing").toString(), is("23kb"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("23 k", "testParsing").toString(), is("23kb"));
+
+ assertThat(ByteSizeValue.parseBytesSizeValue("1B", "testParsing").toString(), is("1b"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("1 B", "testParsing").toString(), is("1b"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("1b", "testParsing").toString(), is("1b"));
+ assertThat(ByteSizeValue.parseBytesSizeValue("1 b", "testParsing").toString(), is("1b"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnMissingUnits() {
+ ByteSizeValue.parseBytesSizeValue("23", "test");
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnUnknownUnits() {
+ ByteSizeValue.parseBytesSizeValue("23jw", "test");
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnEmptyParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("", "emptyParsing").toString(), is("23kb"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnEmptyNumberParsing() {
+ assertThat(ByteSizeValue.parseBytesSizeValue("g", "emptyNumberParsing").toString(), is("23b"));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testNoDotsAllowed() {
+ ByteSizeValue.parseBytesSizeValue("42b.", null, "test");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
new file mode 100644
index 0000000000..f6748da3bc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DistanceUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleDistanceUnit() {
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.MILES), closeTo(16.09344, 0.001));
+ assertThat(DistanceUnit.MILES.convert(10, DistanceUnit.MILES), closeTo(10, 0.001));
+ assertThat(DistanceUnit.MILES.convert(10, DistanceUnit.KILOMETERS), closeTo(6.21371192, 0.001));
+ assertThat(DistanceUnit.NAUTICALMILES.convert(10, DistanceUnit.MILES), closeTo(8.689762, 0.001));
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.KILOMETERS), closeTo(10, 0.001));
+ assertThat(DistanceUnit.KILOMETERS.convert(10, DistanceUnit.METERS), closeTo(0.01, 0.00001));
+ assertThat(DistanceUnit.KILOMETERS.convert(1000,DistanceUnit.METERS), closeTo(1, 0.001));
+ assertThat(DistanceUnit.METERS.convert(1, DistanceUnit.KILOMETERS), closeTo(1000, 0.001));
+ }
+
+ @Test
+ public void testDistanceUnitParsing() {
+ assertThat(DistanceUnit.Distance.parseDistance("50km").unit, equalTo(DistanceUnit.KILOMETERS));
+ assertThat(DistanceUnit.Distance.parseDistance("500m").unit, equalTo(DistanceUnit.METERS));
+ assertThat(DistanceUnit.Distance.parseDistance("51mi").unit, equalTo(DistanceUnit.MILES));
+ assertThat(DistanceUnit.Distance.parseDistance("53nmi").unit, equalTo(DistanceUnit.NAUTICALMILES));
+ assertThat(DistanceUnit.Distance.parseDistance("53NM").unit, equalTo(DistanceUnit.NAUTICALMILES));
+ assertThat(DistanceUnit.Distance.parseDistance("52yd").unit, equalTo(DistanceUnit.YARD));
+ assertThat(DistanceUnit.Distance.parseDistance("12in").unit, equalTo(DistanceUnit.INCH));
+ assertThat(DistanceUnit.Distance.parseDistance("23mm").unit, equalTo(DistanceUnit.MILLIMETERS));
+ assertThat(DistanceUnit.Distance.parseDistance("23cm").unit, equalTo(DistanceUnit.CENTIMETERS));
+
+ double testValue = 12345.678;
+ for (DistanceUnit unit : DistanceUnit.values()) {
+ assertThat("Unit can be parsed from '" + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
+ assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
+ assertThat("Value can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
new file mode 100644
index 0000000000..74ed24a5ec
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.unit;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.number.IsCloseTo.closeTo;
+
+public class FuzzinessTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNumerics() {
+ String[] options = new String[]{"1.0", "1", "1.000000"};
+ assertThat(Fuzziness.build(randomFrom(options)).asByte(), equalTo((byte) 1));
+ assertThat(Fuzziness.build(randomFrom(options)).asInt(), equalTo(1));
+ assertThat(Fuzziness.build(randomFrom(options)).asFloat(), equalTo(1f));
+ assertThat(Fuzziness.build(randomFrom(options)).asDouble(), equalTo(1d));
+ assertThat(Fuzziness.build(randomFrom(options)).asLong(), equalTo(1l));
+ assertThat(Fuzziness.build(randomFrom(options)).asShort(), equalTo((short) 1));
+ }
+
+ @Test
+ public void testParseFromXContent() throws IOException {
+ final int iters = randomIntBetween(10, 50);
+ for (int i = 0; i < iters; i++) {
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ float floatValue = randomFloat();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, floatValue)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asFloat(), equalTo(floatValue));
+ assertThat(parse.asDouble(), closeTo((double) floatValue, 0.000001));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ Integer intValue = frequently() ? randomIntBetween(0, 2) : randomIntBetween(0, 100);
+ Float floatRep = randomFloat();
+ Number value = intValue;
+ if (randomBoolean()) {
+ value = new Float(floatRep += intValue);
+ }
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER), equalTo(XContentParser.Token.VALUE_STRING)));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asInt(), equalTo(value.intValue()));
+ assertThat((int) parse.asShort(), equalTo(value.intValue()));
+ assertThat((int) parse.asByte(), equalTo(value.intValue()));
+ assertThat(parse.asLong(), equalTo(value.longValue()));
+ if (value.intValue() >= 1) {
+ assertThat(parse.asDistance(), equalTo(Math.min(2, value.intValue())));
+ }
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ if (intValue.equals(value)) {
+ switch (intValue) {
+ case 1:
+ assertThat(parse, sameInstance(Fuzziness.ONE));
+ break;
+ case 2:
+ assertThat(parse, sameInstance(Fuzziness.TWO));
+ break;
+ case 0:
+ assertThat(parse, sameInstance(Fuzziness.ZERO));
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ {
+ XContent xcontent = XContentType.JSON.xContent();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, randomBoolean() ? "AUTO" : "auto")
+ .endObject().string();
+ if (randomBoolean()) {
+ json = Fuzziness.AUTO.toXContent(jsonBuilder().startObject(), null).endObject().string();
+ }
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse, sameInstance(Fuzziness.AUTO));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+
+ {
+ String[] values = new String[]{"d", "H", "ms", "s", "S", "w"};
+ String actual = randomIntBetween(1, 3) + randomFrom(values);
+ XContent xcontent = XContentType.JSON.xContent();
+ String json = jsonBuilder().startObject()
+ .field(Fuzziness.X_FIELD_NAME, actual)
+ .endObject().string();
+ XContentParser parser = xcontent.createParser(json);
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ Fuzziness parse = Fuzziness.parse(parser);
+ assertThat(parse.asTimeValue(), equalTo(TimeValue.parseTimeValue(actual, null, "fuzziness")));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+ }
+
+ }
+
+ @Test
+ public void testAuto() {
+ final int codePoints = randomIntBetween(0, 10);
+ String string = randomRealisticUnicodeOfCodepointLength(codePoints);
+ if (codePoints <= 2) {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(0));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(0));
+ } else if (codePoints > 5) {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(2));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(2));
+ } else {
+ assertThat(Fuzziness.AUTO.asDistance(string), equalTo(1));
+ assertThat(Fuzziness.fromSimilarity(Fuzziness.AUTO.asSimilarity(string)).asDistance(string), equalTo(1));
+ }
+ assertThat(Fuzziness.AUTO.asByte(), equalTo((byte) 1));
+ assertThat(Fuzziness.AUTO.asInt(), equalTo(1));
+ assertThat(Fuzziness.AUTO.asFloat(), equalTo(1f));
+ assertThat(Fuzziness.AUTO.asDouble(), equalTo(1d));
+ assertThat(Fuzziness.AUTO.asLong(), equalTo(1l));
+ assertThat(Fuzziness.AUTO.asShort(), equalTo((short) 1));
+ assertThat(Fuzziness.AUTO.asTimeValue(), equalTo(TimeValue.parseTimeValue("1ms", TimeValue.timeValueMillis(1), "fuzziness")));
+
+ }
+
+ @Test
+ public void testAsDistance() {
+ final int iters = randomIntBetween(10, 50);
+ for (int i = 0; i < iters; i++) {
+ Integer integer = Integer.valueOf(randomIntBetween(0, 10));
+ String value = "" + (randomBoolean() ? integer.intValue() : integer.floatValue());
+ assertThat(Fuzziness.build(value).asDistance(), equalTo(Math.min(2, integer.intValue())));
+ }
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10638")
+ public void testSimilarityToDistance() {
+ assertThat(Fuzziness.fromSimilarity(0.5f).asDistance("ab"), equalTo(1));
+ assertThat(Fuzziness.fromSimilarity(0.66f).asDistance("abcefg"), equalTo(2));
+ assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("ab"), equalTo(0));
+ assertThat(Fuzziness.fromSimilarity(0.8f).asDistance("abcefg"), equalTo(1));
+ assertThat((double) Fuzziness.ONE.asSimilarity("abcefg"), closeTo(0.8f, 0.05));
+ assertThat((double) Fuzziness.TWO.asSimilarity("abcefg"), closeTo(0.66f, 0.05));
+ assertThat((double) Fuzziness.ONE.asSimilarity("ab"), closeTo(0.5f, 0.05));
+
+ int iters = randomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ Fuzziness fuzziness = Fuzziness.fromEdits(between(1, 2));
+ String string = rarely() ? randomRealisticUnicodeOfLengthBetween(2, 4) :
+ randomRealisticUnicodeOfLengthBetween(4, 10);
+ float similarity = fuzziness.asSimilarity(string);
+ if (similarity != 0.0f) {
+ Fuzziness similarityBased = Fuzziness.build(similarity);
+ assertThat((double) similarityBased.asSimilarity(string), closeTo(similarity, 0.05));
+ assertThat(similarityBased.asDistance(string), equalTo(Math.min(2, fuzziness.asDistance(string))));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/RatioValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/RatioValueTests.java
new file mode 100644
index 0000000000..d9a04726e4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/RatioValueTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Tests for the {@link RatioValue} class
+ */
+public class RatioValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testParsing() {
+ assertThat(RatioValue.parseRatioValue("100%").toString(), is("100.0%"));
+ assertThat(RatioValue.parseRatioValue("0%").toString(), is("0.0%"));
+ assertThat(RatioValue.parseRatioValue("-0%").toString(), is("0.0%"));
+ assertThat(RatioValue.parseRatioValue("15.1%").toString(), is("15.1%"));
+ assertThat(RatioValue.parseRatioValue("0.1%").toString(), is("0.1%"));
+ assertThat(RatioValue.parseRatioValue("1.0").toString(), is("100.0%"));
+ assertThat(RatioValue.parseRatioValue("0").toString(), is("0.0%"));
+ assertThat(RatioValue.parseRatioValue("-0").toString(), is("0.0%"));
+ assertThat(RatioValue.parseRatioValue("0.0").toString(), is("0.0%"));
+ assertThat(RatioValue.parseRatioValue("-0.0").toString(), is("0.0%"));
+ assertThat(RatioValue.parseRatioValue("0.151").toString(), is("15.1%"));
+ assertThat(RatioValue.parseRatioValue("0.001").toString(), is("0.1%"));
+ }
+
+ @Test
+ public void testNegativeCase() {
+ testInvalidRatio("100.0001%");
+ testInvalidRatio("-0.1%");
+ testInvalidRatio("1a0%");
+ testInvalidRatio("2");
+ testInvalidRatio("-0.01");
+ testInvalidRatio("0.1.0");
+ testInvalidRatio("five");
+ testInvalidRatio("1/2");
+ }
+
+ public void testInvalidRatio(String r) {
+ try {
+ RatioValue.parseRatioValue(r);
+ fail("Value: [" + r + "] should be an invalid ratio");
+ } catch (ElasticsearchParseException e) {
+ // success
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java
new file mode 100644
index 0000000000..5e0ab4f3b0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class SizeValueTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatConversionWorks() {
+ SizeValue sizeValue = new SizeValue(1000);
+ assertThat(sizeValue.kilo(), is(1l));
+ assertThat(sizeValue.toString(), is("1k"));
+
+ sizeValue = new SizeValue(1000, SizeUnit.KILO);
+ assertThat(sizeValue.singles(), is(1000000L));
+ assertThat(sizeValue.toString(), is("1m"));
+
+ sizeValue = new SizeValue(1000, SizeUnit.MEGA);
+ assertThat(sizeValue.singles(), is(1000000000L));
+ assertThat(sizeValue.toString(), is("1g"));
+
+ sizeValue = new SizeValue(1000, SizeUnit.GIGA);
+ assertThat(sizeValue.singles(), is(1000000000000L));
+ assertThat(sizeValue.toString(), is("1t"));
+
+ sizeValue = new SizeValue(1000, SizeUnit.TERA);
+ assertThat(sizeValue.singles(), is(1000000000000000L));
+ assertThat(sizeValue.toString(), is("1p"));
+
+ sizeValue = new SizeValue(1000, SizeUnit.PETA);
+ assertThat(sizeValue.singles(), is(1000000000000000000L));
+ assertThat(sizeValue.toString(), is("1000p"));
+ }
+
+ @Test
+ public void testThatParsingWorks() {
+ assertThat(SizeValue.parseSizeValue("1k").toString(), is(new SizeValue(1000).toString()));
+ assertThat(SizeValue.parseSizeValue("1p").toString(), is(new SizeValue(1, SizeUnit.PETA).toString()));
+ assertThat(SizeValue.parseSizeValue("1G").toString(), is(new SizeValue(1, SizeUnit.GIGA).toString()));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testThatNegativeValuesThrowException() {
+ new SizeValue(-1);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java
new file mode 100644
index 0000000000..b901f1f393
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.unit;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.PeriodType;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+public class TimeValueTests extends ElasticsearchTestCase {
+
+ public void testSimple() {
+ assertThat(TimeUnit.MILLISECONDS.toMillis(10), equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).millis()));
+ assertThat(TimeUnit.MICROSECONDS.toMicros(10), equalTo(new TimeValue(10, TimeUnit.MICROSECONDS).micros()));
+ assertThat(TimeUnit.SECONDS.toSeconds(10), equalTo(new TimeValue(10, TimeUnit.SECONDS).seconds()));
+ assertThat(TimeUnit.MINUTES.toMinutes(10), equalTo(new TimeValue(10, TimeUnit.MINUTES).minutes()));
+ assertThat(TimeUnit.HOURS.toHours(10), equalTo(new TimeValue(10, TimeUnit.HOURS).hours()));
+ assertThat(TimeUnit.DAYS.toDays(10), equalTo(new TimeValue(10, TimeUnit.DAYS).days()));
+ }
+
+ public void testToString() {
+ assertThat("10ms", equalTo(new TimeValue(10, TimeUnit.MILLISECONDS).toString()));
+ assertThat("1.5s", equalTo(new TimeValue(1533, TimeUnit.MILLISECONDS).toString()));
+ assertThat("1.5m", equalTo(new TimeValue(90, TimeUnit.SECONDS).toString()));
+ assertThat("1.5h", equalTo(new TimeValue(90, TimeUnit.MINUTES).toString()));
+ assertThat("1.5d", equalTo(new TimeValue(36, TimeUnit.HOURS).toString()));
+ assertThat("1000d", equalTo(new TimeValue(1000, TimeUnit.DAYS).toString()));
+ }
+
+ public void testFormat() {
+ assertThat(new TimeValue(1025, TimeUnit.MILLISECONDS).format(PeriodType.dayTime()), equalTo("1 second and 25 milliseconds"));
+ assertThat(new TimeValue(1, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 minute"));
+ assertThat(new TimeValue(65, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("1 hour and 5 minutes"));
+ assertThat(new TimeValue(24 * 600 + 85, TimeUnit.MINUTES).format(PeriodType.dayTime()), equalTo("241 hours and 25 minutes"));
+ }
+
+ public void testMinusOne() {
+ assertThat(new TimeValue(-1).nanos(), lessThan(0l));
+ }
+
+ public void testParseTimeValue() {
+ // Space is allowed before unit:
+ assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS),
+ TimeValue.parseTimeValue("10 ms", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS),
+ TimeValue.parseTimeValue("10ms", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS),
+ TimeValue.parseTimeValue("10 MS", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.MILLISECONDS),
+ TimeValue.parseTimeValue("10MS", null, "test"));
+
+ assertEquals(new TimeValue(10, TimeUnit.SECONDS),
+ TimeValue.parseTimeValue("10 s", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.SECONDS),
+ TimeValue.parseTimeValue("10s", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.SECONDS),
+ TimeValue.parseTimeValue("10 S", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.SECONDS),
+ TimeValue.parseTimeValue("10S", null, "test"));
+
+ assertEquals(new TimeValue(10, TimeUnit.MINUTES),
+ TimeValue.parseTimeValue("10 m", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.MINUTES),
+ TimeValue.parseTimeValue("10m", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.MINUTES),
+ TimeValue.parseTimeValue("10 M", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.MINUTES),
+ TimeValue.parseTimeValue("10M", null, "test"));
+
+ assertEquals(new TimeValue(10, TimeUnit.HOURS),
+ TimeValue.parseTimeValue("10 h", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.HOURS),
+ TimeValue.parseTimeValue("10h", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.HOURS),
+ TimeValue.parseTimeValue("10 H", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.HOURS),
+ TimeValue.parseTimeValue("10H", null, "test"));
+
+ assertEquals(new TimeValue(10, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10 d", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10d", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10 D", null, "test"));
+ assertEquals(new TimeValue(10, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10D", null, "test"));
+
+ assertEquals(new TimeValue(70, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10 w", null, "test"));
+ assertEquals(new TimeValue(70, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10w", null, "test"));
+ assertEquals(new TimeValue(70, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10 W", null, "test"));
+ assertEquals(new TimeValue(70, TimeUnit.DAYS),
+ TimeValue.parseTimeValue("10W", null, "test"));
+ }
+
+ private void assertEqualityAfterSerialize(TimeValue value) throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ value.writeTo(out);
+
+ StreamInput in = StreamInput.wrap(out.bytes());
+ TimeValue inValue = TimeValue.readTimeValue(in);
+
+ assertThat(inValue, equalTo(value));
+ }
+
+ public void testSerialize() throws Exception {
+ assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS));
+ assertEqualityAfterSerialize(new TimeValue(-1));
+ assertEqualityAfterSerialize(new TimeValue(1, TimeUnit.NANOSECONDS));
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnUnknownUnits() {
+ TimeValue.parseTimeValue("23tw", null, "test");
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testFailOnMissingUnits() {
+ TimeValue.parseTimeValue("42", null, "test");
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testNoDotsAllowed() {
+ TimeValue.parseTimeValue("42ms.", null, "test");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java
new file mode 100644
index 0000000000..f85ab89021
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/ArrayUtilsTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.BitSet;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class ArrayUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void binarySearch() throws Exception {
+
+ for (int j = 0; j < 100; j++) {
+
+ int index = Math.min(randomInt(0, 10), 9);
+ double tolerance = Math.random() * 0.01;
+ double lookForValue = randomFreq(0.9) ? -1 : Double.NaN; // sometimes we'll look for NaN
+ double[] array = new double[10];
+ for (int i = 0; i < array.length; i++) {
+ double value;
+ if (randomFreq(0.9)) {
+ value = Math.random() * 10;
+ array[i] = value + ((randomFreq(0.5) ? 1 : -1) * Math.random() * tolerance);
+
+ } else { // sometimes we'll have NaN in the array
+ value = Double.NaN;
+ array[i] = value;
+ }
+ if (i == index && lookForValue < 0) {
+ lookForValue = value;
+ }
+ }
+ Arrays.sort(array);
+
+ // pick up all the indices that fall within the range of [lookForValue - tolerance, lookForValue + tolerance]
+ // we need to do this, since we choose the values randomly and we might end up having multiple values in the
+ // array that will match the looked for value with the random tolerance. In such cases, the binary search will
+ // return the first one that will match.
+ BitSet bitSet = new BitSet(10);
+ for (int i = 0; i < array.length; i++) {
+ if (Double.isNaN(lookForValue) && Double.isNaN(array[i])) {
+ bitSet.set(i);
+ } else if ((array[i] >= lookForValue - tolerance) && (array[i] <= lookForValue + tolerance)) {
+ bitSet.set(i);
+ }
+ }
+
+ int foundIndex = ArrayUtils.binarySearch(array, lookForValue, tolerance);
+
+ if (bitSet.cardinality() == 0) {
+ assertThat(foundIndex, is(-1));
+ } else {
+ assertThat(bitSet.get(foundIndex), is(true));
+ }
+ }
+ }
+
+ private boolean randomFreq(double freq) {
+ return Math.random() < freq;
+ }
+
+ private int randomInt(int min, int max) {
+ int delta = (int) (Math.random() * (max - min));
+ return min + delta;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java
new file mode 100644
index 0000000000..3dd8e65cd0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java
@@ -0,0 +1,383 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.breaker.CircuitBreakingException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.cache.recycler.MockBigArrays;
+import org.junit.Before;
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+
+public class BigArraysTests extends ElasticsearchSingleNodeTest {
+
+ public static BigArrays randombigArrays() {
+ final PageCacheRecycler recycler = randomBoolean() ? null : ElasticsearchSingleNodeTest.getInstanceFromNode(PageCacheRecycler.class);
+ return new MockBigArrays(recycler, new NoneCircuitBreakerService());
+ }
+
+ private BigArrays bigArrays;
+
+ @Before
+ public void init() {
+ bigArrays = randombigArrays();
+ }
+
+ public void testByteArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 4000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ ByteArray array = bigArrays.newByteArray(startLen, randomBoolean());
+ byte[] ref = new byte[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomByte();
+ array = bigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.close();
+ }
+
+ public void testIntArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ IntArray array = bigArrays.newIntArray(startLen, randomBoolean());
+ int[] ref = new int[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomInt();
+ array = bigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.close();
+ }
+
+ public void testLongArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ LongArray array = bigArrays.newLongArray(startLen, randomBoolean());
+ long[] ref = new long[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomLong();
+ array = bigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i));
+ }
+ array.close();
+ }
+
+ public void testFloatArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ FloatArray array = bigArrays.newFloatArray(startLen, randomBoolean());
+ float[] ref = new float[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomFloat();
+ array = bigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i), 0.001d);
+ }
+ array.close();
+ }
+
+ public void testDoubleArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ DoubleArray array = bigArrays.newDoubleArray(startLen, randomBoolean());
+ double[] ref = new double[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomDouble();
+ array = bigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertEquals(ref[i], array.get(i), 0.001d);
+ }
+ array.close();
+ }
+
+ public void testObjectArrayGrowth() {
+ final int totalLen = randomIntBetween(1, 1000000);
+ final int startLen = randomIntBetween(1, randomBoolean() ? 1000 : totalLen);
+ ObjectArray<Object> array = bigArrays.newObjectArray(startLen);
+ final Object[] pool = new Object[100];
+ for (int i = 0; i < pool.length; ++i) {
+ pool[i] = new Object();
+ }
+ Object[] ref = new Object[totalLen];
+ for (int i = 0; i < totalLen; ++i) {
+ ref[i] = randomFrom(pool);
+ array = bigArrays.grow(array, i + 1);
+ array.set(i, ref[i]);
+ }
+ for (int i = 0; i < totalLen; ++i) {
+ assertSame(ref[i], array.get(i));
+ }
+ array.close();
+ }
+
+ public void testByteArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final ByteArray array2 = bigArrays.newByteArray(len, randomBoolean());
+ final byte[] array1 = new byte[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomByte();
+ array2.set(i, array1[i]);
+ }
+ final byte rand = randomByte();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i), 0.001d);
+ }
+ array2.close();
+ }
+
+ public void testFloatArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final FloatArray array2 = bigArrays.newFloatArray(len, randomBoolean());
+ final float[] array1 = new float[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomFloat();
+ array2.set(i, array1[i]);
+ }
+ final float rand = randomFloat();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i), 0.001d);
+ }
+ array2.close();
+ }
+
+ public void testDoubleArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final DoubleArray array2 = bigArrays.newDoubleArray(len, randomBoolean());
+ final double[] array1 = new double[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomDouble();
+ array2.set(i, array1[i]);
+ }
+ final double rand = randomDouble();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i), 0.001d);
+ }
+ array2.close();
+ }
+
+ public void testLongArrayFill() {
+ final int len = randomIntBetween(1, 100000);
+ final int fromIndex = randomIntBetween(0, len - 1);
+ final int toIndex = randomBoolean()
+ ? Math.min(fromIndex + randomInt(100), len) // single page
+ : randomIntBetween(fromIndex, len); // likely multiple pages
+ final LongArray array2 = bigArrays.newLongArray(len, randomBoolean());
+ final long[] array1 = new long[len];
+ for (int i = 0; i < len; ++i) {
+ array1[i] = randomLong();
+ array2.set(i, array1[i]);
+ }
+ final long rand = randomLong();
+ Arrays.fill(array1, fromIndex, toIndex, rand);
+ array2.fill(fromIndex, toIndex, rand);
+ for (int i = 0; i < len; ++i) {
+ assertEquals(array1[i], array2.get(i));
+ }
+ array2.close();
+ }
+
+ public void testByteArrayBulkGet() {
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final ByteArray array2 = bigArrays.newByteArray(array1.length, randomBoolean());
+ for (int i = 0; i < array1.length; ++i) {
+ array2.set(i, array1[i]);
+ }
+ final BytesRef ref = new BytesRef();
+ for (int i = 0; i < 1000; ++i) {
+ final int offset = randomInt(array1.length - 1);
+ final int len = randomInt(Math.min(randomBoolean() ? 10 : Integer.MAX_VALUE, array1.length - offset));
+ array2.get(offset, len, ref);
+ assertEquals(new BytesRef(array1, offset, len), ref);
+ }
+ array2.close();
+ }
+
+ public void testByteArrayBulkSet() {
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final ByteArray array2 = bigArrays.newByteArray(array1.length, randomBoolean());
+ for (int i = 0; i < array1.length; ) {
+ final int len = Math.min(array1.length - i, randomBoolean() ? randomInt(10) : randomInt(3 * BigArrays.BYTE_PAGE_SIZE));
+ array2.set(i, array1, i, len);
+ i += len;
+ }
+ for (int i = 0; i < array1.length; ++i) {
+ assertEquals(array1[i], array2.get(i));
+ }
+ array2.close();
+ }
+
+ public void testByteArrayEquals() {
+ final ByteArray empty1 = byteArrayWithBytes(BytesRef.EMPTY_BYTES);
+ final ByteArray empty2 = byteArrayWithBytes(BytesRef.EMPTY_BYTES);
+
+ // identity = equality
+ assertTrue(bigArrays.equals(empty1, empty1));
+ // equality: both empty
+ assertTrue(bigArrays.equals(empty1, empty2));
+ empty1.close();
+ empty2.close();
+
+ // not equal: contents differ
+ final ByteArray a1 = byteArrayWithBytes(new byte[]{0});
+ final ByteArray a2 = byteArrayWithBytes(new byte[]{1});
+ assertFalse(bigArrays.equals(a1, a2));
+ a1.close();
+ a2.close();
+
+ // not equal: contents differ
+ final ByteArray a3 = byteArrayWithBytes(new byte[]{1,2,3});
+ final ByteArray a4 = byteArrayWithBytes(new byte[]{1, 1, 3});
+ assertFalse(bigArrays.equals(a3, a4));
+ a3.close();
+ a4.close();
+
+ // not equal: contents differ
+ final ByteArray a5 = byteArrayWithBytes(new byte[]{1,2,3});
+ final ByteArray a6 = byteArrayWithBytes(new byte[]{1,2,4});
+ assertFalse(bigArrays.equals(a5, a6));
+ a5.close();
+ a6.close();
+ }
+
+ public void testByteArrayHashCode() {
+ // null arg has hashCode 0
+ assertEquals(0, bigArrays.hashCode(null));
+
+ // empty array should have equal hash
+ final int emptyHash = Arrays.hashCode(BytesRef.EMPTY_BYTES);
+ final ByteArray emptyByteArray = byteArrayWithBytes(BytesRef.EMPTY_BYTES);
+ final int emptyByteArrayHash = bigArrays.hashCode(emptyByteArray);
+ assertEquals(emptyHash, emptyByteArrayHash);
+ emptyByteArray.close();
+
+ // FUN FACT: Arrays.hashCode() and BytesReference.bytesHashCode() are inconsistent for empty byte[]
+ // final int emptyHash3 = new BytesArray(BytesRef.EMPTY_BYTES).hashCode();
+ // assertEquals(emptyHash1, emptyHash3); -> fail (1 vs. 0)
+
+ // large arrays should be different
+ final byte[] array1 = new byte[randomIntBetween(1, 4000000)];
+ getRandom().nextBytes(array1);
+ final int array1Hash = Arrays.hashCode(array1);
+ final ByteArray array2 = byteArrayWithBytes(array1);
+ final int array2Hash = bigArrays.hashCode(array2);
+ assertEquals(array1Hash, array2Hash);
+ array2.close();
+ }
+
+ private ByteArray byteArrayWithBytes(byte[] bytes) {
+ ByteArray bytearray = bigArrays.newByteArray(bytes.length);
+ for (int i = 0; i < bytes.length; ++i) {
+ bytearray.set(i, bytes[i]);
+ }
+ return bytearray;
+ }
+
+ public void testMaxSizeExceededOnNew() throws Exception {
+ final int size = scaledRandomIntBetween(5, 1 << 22);
+ for (String type : Arrays.asList("Byte", "Int", "Long", "Float", "Double", "Object")) {
+ HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService(
+ Settings.builder()
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, size - 1, ByteSizeUnit.BYTES)
+ .build(),
+ new NodeSettingsService(Settings.EMPTY));
+ BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking();
+ Method create = BigArrays.class.getMethod("new" + type + "Array", long.class);
+ try {
+ create.invoke(bigArrays, size);
+ fail("expected an exception on " + create);
+ } catch (InvocationTargetException e) {
+ assertTrue(e.getCause() instanceof CircuitBreakingException);
+ }
+ assertEquals(0, hcbs.getBreaker(CircuitBreaker.REQUEST).getUsed());
+ }
+ }
+
+ public void testMaxSizeExceededOnResize() throws Exception {
+ for (String type : Arrays.asList("Byte", "Int", "Long", "Float", "Double", "Object")) {
+ final long maxSize = randomIntBetween(1 << 10, 1 << 22);
+ HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService(
+ Settings.builder()
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, maxSize, ByteSizeUnit.BYTES)
+ .build(),
+ new NodeSettingsService(Settings.EMPTY));
+ BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking();
+ Method create = BigArrays.class.getMethod("new" + type + "Array", long.class);
+ final int size = scaledRandomIntBetween(1, 20);
+ BigArray array = (BigArray) create.invoke(bigArrays, size);
+ Method resize = BigArrays.class.getMethod("resize", array.getClass().getInterfaces()[0], long.class);
+ while (true) {
+ long newSize = array.size() * 2;
+ try {
+ array = (BigArray) resize.invoke(bigArrays, array, newSize);
+ } catch (InvocationTargetException e) {
+ assertTrue(e.getCause() instanceof CircuitBreakingException);
+ break;
+ }
+ }
+ assertEquals(array.ramBytesUsed(), hcbs.getBreaker(CircuitBreaker.REQUEST).getUsed());
+ array.close();
+ assertEquals(0, hcbs.getBreaker(CircuitBreaker.REQUEST).getUsed());
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java
new file mode 100644
index 0000000000..729033bdd6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/ByteUtilsTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+
+public class ByteUtilsTests extends ElasticsearchTestCase {
+
+ public void testZigZag(long l) {
+ assertEquals(l, ByteUtils.zigZagDecode(ByteUtils.zigZagEncode(l)));
+ }
+
+ public void testZigZag() {
+ testZigZag(0);
+ testZigZag(1);
+ testZigZag(-1);
+ testZigZag(Long.MAX_VALUE);
+ testZigZag(Long.MIN_VALUE);
+ for (int i = 0; i < 1000; ++i) {
+ testZigZag(randomLong());
+ assertTrue(ByteUtils.zigZagEncode(randomInt(1000)) >= 0);
+ assertTrue(ByteUtils.zigZagEncode(-randomInt(1000)) >= 0);
+ }
+ }
+
+ public void testFloat() throws IOException {
+ final float[] data = new float[scaledRandomIntBetween(1000, 10000)];
+ final byte[] encoded = new byte[data.length * 4];
+ for (int i = 0; i < data.length; ++i) {
+ data[i] = randomFloat();
+ ByteUtils.writeFloatLE(data[i], encoded, i * 4);
+ }
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readFloatLE(encoded, i * 4), Float.MIN_VALUE);
+ }
+ }
+
+ public void testDouble() throws IOException {
+ final double[] data = new double[scaledRandomIntBetween(1000, 10000)];
+ final byte[] encoded = new byte[data.length * 8];
+ for (int i = 0; i < data.length; ++i) {
+ data[i] = randomDouble();
+ ByteUtils.writeDoubleLE(data[i], encoded, i * 8);
+ }
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readDoubleLE(encoded, i * 8), Double.MIN_VALUE);
+ }
+ }
+
+ public void testVLong() throws IOException {
+ final long[] data = new long[scaledRandomIntBetween(1000, 10000)];
+ for (int i = 0; i < data.length; ++i) {
+ switch (randomInt(4)) {
+ case 0:
+ data[i] = 0;
+ break;
+ case 1:
+ data[i] = Long.MAX_VALUE;
+ break;
+ case 2:
+ data[i] = Long.MIN_VALUE;
+ break;
+ case 3:
+ data[i] = randomInt(1 << randomIntBetween(2,30));
+ break;
+ case 4:
+ data[i] = randomLong();
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+ final byte[] encoded = new byte[ByteUtils.MAX_BYTES_VLONG * data.length];
+ ByteArrayDataOutput out = new ByteArrayDataOutput(encoded);
+ for (int i = 0; i < data.length; ++i) {
+ final int pos = out.getPosition();
+ ByteUtils.writeVLong(out, data[i]);
+ if (data[i] < 0) {
+ assertEquals(ByteUtils.MAX_BYTES_VLONG, out.getPosition() - pos);
+ }
+ }
+ final ByteArrayDataInput in = new ByteArrayDataInput(encoded);
+ for (int i = 0; i < data.length; ++i) {
+ assertEquals(data[i], ByteUtils.readVLong(in));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java b/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java
new file mode 100644
index 0000000000..e7d98db5c5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.carrotsearch.hppc.ObjectLongMap;
+import com.carrotsearch.hppc.ObjectLongHashMap;
+import com.carrotsearch.hppc.cursors.ObjectLongCursor;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+public class BytesRefHashTests extends ElasticsearchSingleNodeTest {
+
+ BytesRefHash hash;
+
+ private void newHash() {
+ if (hash != null) {
+ hash.close();
+ }
+ // Test high load factors to make sure that collision resolution works fine
+ final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
+ hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randombigArrays());
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ newHash();
+ }
+
+ public void testDuell() {
+ final int len = randomIntBetween(1, 100000);
+ final BytesRef[] values = new BytesRef[len];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = new BytesRef(randomAsciiOfLength(5));
+ }
+ final ObjectLongMap<BytesRef> valueToId = new ObjectLongHashMap<>();
+ final BytesRef[] idToValue = new BytesRef[values.length];
+ final int iters = randomInt(1000000);
+ for (int i = 0; i < iters; ++i) {
+ final BytesRef value = randomFrom(values);
+ if (valueToId.containsKey(value)) {
+ assertEquals(- 1 - valueToId.get(value), hash.add(value, value.hashCode()));
+ } else {
+ assertEquals(valueToId.size(), hash.add(value, value.hashCode()));
+ idToValue[valueToId.size()] = value;
+ valueToId.put(value, valueToId.size());
+ }
+ }
+
+ assertEquals(valueToId.size(), hash.size());
+ for (Iterator<ObjectLongCursor<BytesRef>> iterator = valueToId.iterator(); iterator.hasNext(); ) {
+ final ObjectLongCursor<BytesRef> next = iterator.next();
+ assertEquals(next.value, hash.find(next.key, next.key.hashCode()));
+ }
+
+ for (long i = 0; i < hash.capacity(); ++i) {
+ final long id = hash.id(i);
+ BytesRef spare = new BytesRef();
+ if (id >= 0) {
+ hash.get(id, spare);
+ assertEquals(idToValue[(int) id], spare);
+ }
+ }
+ hash.close();
+ }
+
+ // START - tests borrowed from LUCENE
+
+ /**
+ * Test method for {@link org.apache.lucene.util.BytesRefHash#size()}.
+ */
+ @Test
+ public void testSize() {
+ BytesRefBuilder ref = new BytesRefBuilder();
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ final int mod = 1+randomInt(40);
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref.get());
+ if (key < 0)
+ assertEquals(hash.size(), count);
+ else
+ assertEquals(hash.size(), count + 1);
+ if(i % mod == 0) {
+ newHash();
+ }
+ }
+ }
+ hash.close();
+ }
+
+ /**
+ * Test method for
+ * {@link org.apache.lucene.util.BytesRefHash#get(int, BytesRef)}
+ * .
+ */
+ @Test
+ public void testGet() {
+ BytesRefBuilder ref = new BytesRefBuilder();
+ BytesRef scratch = new BytesRef();
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ Map<String, Long> strings = new HashMap<>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref.get());
+ if (key >= 0) {
+ assertNull(strings.put(str, Long.valueOf(key)));
+ assertEquals(uniqueCount, key);
+ uniqueCount++;
+ assertEquals(hash.size(), count + 1);
+ } else {
+ assertTrue((-key)-1 < count);
+ assertEquals(hash.size(), count);
+ }
+ }
+ for (Entry<String, Long> entry : strings.entrySet()) {
+ ref.copyChars(entry.getKey());
+ assertEquals(ref.get(), hash.get(entry.getValue().longValue(), scratch));
+ }
+ newHash();
+ }
+ hash.close();
+ }
+
+ /**
+ * Test method for
+ * {@link org.apache.lucene.util.BytesRefHash#add(org.apache.lucene.util.BytesRef)}
+ * .
+ */
+ @Test
+ public void testAdd() {
+ BytesRefBuilder ref = new BytesRefBuilder();
+ BytesRef scratch = new BytesRef();
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ Set<String> strings = new HashSet<>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.add(ref.get());
+
+ if (key >=0) {
+ assertTrue(strings.add(str));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ } else {
+ assertFalse(strings.add(str));
+ assertTrue((-key)-1 < count);
+ assertEquals(str, hash.get((-key)-1, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ }
+ }
+
+ assertAllIn(strings, hash);
+ newHash();
+ }
+ hash.close();
+ }
+
+ @Test
+ public void testFind() throws Exception {
+ BytesRefBuilder ref = new BytesRefBuilder();
+ BytesRef scratch = new BytesRef();
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ Set<String> strings = new HashSet<>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ String str;
+ do {
+ str = TestUtil.randomRealisticUnicodeString(getRandom(), 1000);
+ } while (str.length() == 0);
+ ref.copyChars(str);
+ long count = hash.size();
+ long key = hash.find(ref.get()); //hash.add(ref);
+ if (key >= 0) { // string found in hash
+ assertFalse(strings.add(str));
+ assertTrue(key < count);
+ assertEquals(str, hash.get(key, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ } else {
+ key = hash.add(ref.get());
+ assertTrue(strings.add(str));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ }
+ }
+
+ assertAllIn(strings, hash);
+ newHash();
+ }
+ hash.close();
+ }
+
+ private void assertAllIn(Set<String> strings, BytesRefHash hash) {
+ BytesRefBuilder ref = new BytesRefBuilder();
+ BytesRef scratch = new BytesRef();
+ long count = hash.size();
+ for (String string : strings) {
+ ref.copyChars(string);
+ long key = hash.add(ref.get()); // add again to check duplicates
+ assertEquals(string, hash.get((-key)-1, scratch).utf8ToString());
+ assertEquals(count, hash.size());
+ assertTrue("key: " + key + " count: " + count + " string: " + string,
+ key < count);
+ }
+ }
+
+ // END - tests borrowed from LUCENE
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTest.java b/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTest.java
new file mode 100644
index 0000000000..66db5241b7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/CancellableThreadsTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.common.util.CancellableThreads.Interruptable;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+
+public class CancellableThreadsTest extends ElasticsearchTestCase {
+
+ private static class CustomException extends RuntimeException {
+
+ public CustomException(String msg) {
+ super(msg);
+ }
+ }
+
+ private class TestPlan {
+ public final int id;
+ public final boolean busySpin;
+ public final boolean exceptBeforeCancel;
+ public final boolean exitBeforeCancel;
+ public final boolean exceptAfterCancel;
+ public final boolean presetInterrupt;
+
+ private TestPlan(int id) {
+ this.id = id;
+ this.busySpin = randomBoolean();
+ this.exceptBeforeCancel = randomBoolean();
+ this.exitBeforeCancel = randomBoolean();
+ this.exceptAfterCancel = randomBoolean();
+ this.presetInterrupt = randomBoolean();
+ }
+ }
+
+
+ @Test
+ public void testCancellableThreads() throws InterruptedException {
+ Thread[] threads = new Thread[randomIntBetween(3, 10)];
+ final TestPlan[] plans = new TestPlan[threads.length];
+ final Throwable[] throwables = new Throwable[threads.length];
+ final boolean[] interrupted = new boolean[threads.length];
+ final CancellableThreads cancellableThreads = new CancellableThreads();
+ final CountDownLatch readyForCancel = new CountDownLatch(threads.length);
+ for (int i = 0; i < threads.length; i++) {
+ final TestPlan plan = new TestPlan(i);
+ plans[i] = plan;
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ if (plan.presetInterrupt) {
+ Thread.currentThread().interrupt();
+ }
+ cancellableThreads.execute(new Interruptable() {
+ @Override
+ public void run() throws InterruptedException {
+ assertFalse("interrupt thread should have been clear", Thread.currentThread().isInterrupted());
+ if (plan.exceptBeforeCancel) {
+ throw new CustomException("thread [" + plan.id + "] pre-cancel exception");
+ } else if (plan.exitBeforeCancel) {
+ return;
+ }
+ readyForCancel.countDown();
+ try {
+ if (plan.busySpin) {
+ while (!Thread.currentThread().isInterrupted()) {
+ }
+ } else {
+ Thread.sleep(50000);
+ }
+ } finally {
+ if (plan.exceptAfterCancel) {
+ throw new CustomException("thread [" + plan.id + "] post-cancel exception");
+ }
+ }
+ }
+ });
+ } catch (Throwable t) {
+ throwables[plan.id] = t;
+ }
+ if (plan.exceptBeforeCancel || plan.exitBeforeCancel) {
+ // we have to mark we're ready now (actually done).
+ readyForCancel.countDown();
+ }
+ interrupted[plan.id] = Thread.currentThread().isInterrupted();
+
+ }
+ });
+ threads[i].setDaemon(true);
+ threads[i].start();
+ }
+
+ readyForCancel.await();
+ cancellableThreads.cancel("test");
+ for (Thread thread : threads) {
+ thread.join(20000);
+ assertFalse(thread.isAlive());
+ }
+ for (int i = 0; i < threads.length; i++) {
+ TestPlan plan = plans[i];
+ if (plan.exceptBeforeCancel) {
+ assertThat(throwables[i], Matchers.instanceOf(CustomException.class));
+ } else if (plan.exitBeforeCancel) {
+ assertNull(throwables[i]);
+ } else {
+ // in all other cases, we expect a cancellation exception.
+ assertThat(throwables[i], Matchers.instanceOf(CancellableThreads.ExecutionCancelledException.class));
+ if (plan.exceptAfterCancel) {
+ assertThat(throwables[i].getSuppressed(),
+ Matchers.arrayContaining(
+ Matchers.instanceOf(CustomException.class)
+ ));
+ } else {
+ assertThat(throwables[i].getSuppressed(), Matchers.emptyArray());
+ }
+ }
+ assertThat(interrupted[plan.id], Matchers.equalTo(plan.presetInterrupt));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java
new file mode 100644
index 0000000000..14b77df40a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefArray;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.Counter;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class CollectionUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void rotateEmpty() {
+ assertTrue(CollectionUtils.rotate(ImmutableList.of(), randomInt()).isEmpty());
+ }
+
+ @Test
+ public void rotate() {
+ final int iters = scaledRandomIntBetween(10, 100);
+ for (int k = 0; k < iters; ++k) {
+ final int size = randomIntBetween(1, 100);
+ final int distance = randomInt();
+ List<Object> list = new ArrayList<>();
+ for (int i = 0; i < size; ++i) {
+ list.add(new Object());
+ }
+ final List<Object> rotated = CollectionUtils.rotate(list, distance);
+ // check content is the same
+ assertEquals(rotated.size(), list.size());
+ assertEquals(Iterables.size(rotated), list.size());
+ assertEquals(new HashSet<>(rotated), new HashSet<>(list));
+ // check stability
+ for (int j = randomInt(4); j >= 0; --j) {
+ assertEquals(rotated, CollectionUtils.rotate(list, distance));
+ }
+ // reverse
+ if (distance != Integer.MIN_VALUE) {
+ assertEquals(list, CollectionUtils.rotate(CollectionUtils.rotate(list, distance), -distance));
+ }
+ }
+ }
+
+ @Test
+ public void testSortAndDedupByteRefArray() {
+ SortedSet<BytesRef> set = new TreeSet<>();
+ final int numValues = scaledRandomIntBetween(0, 10000);
+ List<BytesRef> tmpList = new ArrayList<>();
+ BytesRefArray array = new BytesRefArray(Counter.newCounter());
+ for (int i = 0; i < numValues; i++) {
+ String s = randomRealisticUnicodeOfCodepointLengthBetween(1, 100);
+ set.add(new BytesRef(s));
+ tmpList.add(new BytesRef(s));
+ array.append(new BytesRef(s));
+ }
+ if (randomBoolean()) {
+ Collections.shuffle(tmpList, getRandom());
+ for (BytesRef ref : tmpList) {
+ array.append(ref);
+ }
+ }
+ int[] indices = new int[array.size()];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = i;
+ }
+ int numUnique = CollectionUtils.sortAndDedup(array, indices);
+ assertThat(numUnique, equalTo(set.size()));
+ Iterator<BytesRef> iterator = set.iterator();
+
+ BytesRefBuilder spare = new BytesRefBuilder();
+ for (int i = 0; i < numUnique; i++) {
+ assertThat(iterator.hasNext(), is(true));
+ assertThat(array.get(spare, indices[i]), equalTo(iterator.next()));
+ }
+
+ }
+
+ @Test
+ public void testSortByteRefArray() {
+ List<BytesRef> values = new ArrayList<>();
+ final int numValues = scaledRandomIntBetween(0, 10000);
+ BytesRefArray array = new BytesRefArray(Counter.newCounter());
+ for (int i = 0; i < numValues; i++) {
+ String s = randomRealisticUnicodeOfCodepointLengthBetween(1, 100);
+ values.add(new BytesRef(s));
+ array.append(new BytesRef(s));
+ }
+ if (randomBoolean()) {
+ Collections.shuffle(values, getRandom());
+ }
+ int[] indices = new int[array.size()];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = i;
+ }
+ CollectionUtils.sort(array, indices);
+ Collections.sort(values);
+ Iterator<BytesRef> iterator = values.iterator();
+
+ BytesRefBuilder spare = new BytesRefBuilder();
+ for (int i = 0; i < values.size(); i++) {
+ assertThat(iterator.hasNext(), is(true));
+ assertThat(array.get(spare, indices[i]), equalTo(iterator.next()));
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java b/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java
new file mode 100644
index 0000000000..d35a6f1416
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/LongHashTests.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.carrotsearch.hppc.LongLongHashMap;
+import com.carrotsearch.hppc.LongLongMap;
+import com.carrotsearch.hppc.cursors.LongLongCursor;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.*;
+
+public class LongHashTests extends ElasticsearchSingleNodeTest {
+
+ LongHash hash;
+
+ private void newHash() {
+ if (hash != null) {
+ hash.close();
+ }
+
+ // Test high load factors to make sure that collision resolution works fine
+ final float maxLoadFactor = 0.6f + randomFloat() * 0.39f;
+ hash = new LongHash(randomIntBetween(0, 100), maxLoadFactor, BigArraysTests.randombigArrays());
+ }
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ newHash();
+ }
+
+ public void testDuell() {
+ final Long[] values = new Long[randomIntBetween(1, 100000)];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = randomLong();
+ }
+ final LongLongMap valueToId = new LongLongHashMap();
+ final long[] idToValue = new long[values.length];
+ final int iters = randomInt(1000000);
+ for (int i = 0; i < iters; ++i) {
+ final Long value = randomFrom(values);
+ if (valueToId.containsKey(value)) {
+ assertEquals(-1 - valueToId.get(value), hash.add(value));
+ } else {
+ assertEquals(valueToId.size(), hash.add(value));
+ idToValue[valueToId.size()] = value;
+ valueToId.put(value, valueToId.size());
+ }
+ }
+
+ assertEquals(valueToId.size(), hash.size());
+ for (Iterator<LongLongCursor> iterator = valueToId.iterator(); iterator.hasNext(); ) {
+ final LongLongCursor next = iterator.next();
+ assertEquals(next.value, hash.find(next.key));
+ }
+
+ for (long i = 0; i < hash.capacity(); ++i) {
+ final long id = hash.id(i);
+ if (id >= 0) {
+ assertEquals(idToValue[(int) id], hash.get(id));
+ }
+ }
+
+ for (long i = 0; i < hash.size(); i++) {
+ assertEquals(idToValue[(int) i], hash.get(i));
+ }
+
+ hash.close();
+ }
+
+ @Test
+ public void testSize() {
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ final int mod = 1 + randomInt(40);
+ for (int i = 0; i < 797; i++) {
+ long count = hash.size();
+ long key = hash.add(randomLong());
+ if (key < 0)
+ assertEquals(hash.size(), count);
+ else
+ assertEquals(hash.size(), count + 1);
+ if (i % mod == 0) {
+ newHash();
+ }
+ }
+ }
+ hash.close();
+ }
+
+ @Test
+ public void testKey() {
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ Map<Long, Long> longs = new HashMap<>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ long ref = randomLong();
+ long count = hash.size();
+ long key = hash.add(ref);
+ if (key >= 0) {
+ assertNull(longs.put(ref, key));
+ assertEquals(uniqueCount, key);
+ uniqueCount++;
+ assertEquals(hash.size(), count + 1);
+ } else {
+ assertTrue((-key) - 1L < count);
+ assertEquals(hash.size(), count);
+ }
+ }
+
+ for (Map.Entry<Long, Long> entry : longs.entrySet()) {
+ long expected = entry.getKey();
+ long keyIdx = entry.getValue();
+ assertEquals(expected, hash.get(keyIdx));
+ }
+
+ newHash();
+ }
+ hash.close();
+ }
+
+ @Test
+ public void testAdd() {
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ Set<Long> longs = new HashSet<>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ long ref = randomLong();
+ long count = hash.size();
+ long key = hash.add(ref);
+ if (key >= 0) {
+ assertTrue(longs.add(ref));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ } else {
+ assertFalse(longs.add(ref));
+ assertTrue((-key) - 1 < count);
+ assertEquals(ref, hash.get((-key) - 1));
+ assertEquals(count, hash.size());
+ }
+ }
+
+ assertAllIn(longs, hash);
+ newHash();
+ }
+ hash.close();
+ }
+
+ @Test
+ public void testFind() throws Exception {
+ int num = scaledRandomIntBetween(2, 20);
+ for (int j = 0; j < num; j++) {
+ Set<Long> longs = new HashSet<>();
+ int uniqueCount = 0;
+ for (int i = 0; i < 797; i++) {
+ long ref = randomLong();
+ long count = hash.size();
+ long key = hash.find(ref);
+ if (key >= 0) { // found in hash
+ assertFalse(longs.add(ref));
+ assertTrue(key < count);
+ assertEquals(ref, hash.get(key));
+ assertEquals(count, hash.size());
+ } else {
+ key = hash.add(ref);
+ assertTrue(longs.add(ref));
+ assertEquals(uniqueCount, key);
+ assertEquals(hash.size(), count + 1);
+ uniqueCount++;
+ }
+ }
+
+ assertAllIn(longs, hash);
+ newHash();
+ }
+ hash.close();
+ }
+
+ private static void assertAllIn(Set<Long> longs, LongHash hash) {
+ long count = hash.size();
+ for (Long l : longs) {
+ long key = hash.add(l); // add again to check duplicates
+ assertEquals(l.longValue(), hash.get((-key) - 1));
+ assertEquals(count, hash.size());
+ assertTrue("key: " + key + " count: " + count + " long: " + l, key < count);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java b/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
new file mode 100644
index 0000000000..fae90cd395
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.carrotsearch.hppc.LongObjectHashMap;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+public class LongObjectHashMapTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void duel() {
+ final LongObjectHashMap<Object> map1 = new LongObjectHashMap<>();
+ final LongObjectPagedHashMap<Object> map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, BigArraysTests.randombigArrays());
+ final int maxKey = randomIntBetween(1, 10000);
+ final int iters = scaledRandomIntBetween(10000, 100000);
+ for (int i = 0; i < iters; ++i) {
+ final boolean put = randomBoolean();
+ final int iters2 = randomIntBetween(1, 100);
+ for (int j = 0; j < iters2; ++j) {
+ final long key = randomInt(maxKey);
+ if (put) {
+ final Object value = new Object();
+ assertSame(map1.put(key, value), map2.put(key, value));
+ } else {
+ assertSame(map1.remove(key), map2.remove(key));
+ }
+ assertEquals(map1.size(), map2.size());
+ }
+ }
+ for (int i = 0; i <= maxKey; ++i) {
+ assertSame(map1.get(i), map2.get(i));
+ }
+ final LongObjectHashMap<Object> copy = new LongObjectHashMap<>();
+ for (LongObjectPagedHashMap.Cursor<Object> cursor : map2) {
+ copy.put(cursor.key, cursor.value);
+ }
+ map2.close();
+ assertEquals(map1, copy);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/MultiDataPathUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/MultiDataPathUpgraderTests.java
new file mode 100644
index 0000000000..343d7e3041
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/MultiDataPathUpgraderTests.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+import com.google.common.base.Charsets;
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.CollectionUtil;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityTests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.gateway.MetaDataStateFormat;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardPath;
+import org.elasticsearch.index.shard.ShardStateMetaData;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URISyntaxException;
+import java.nio.file.*;
+import java.util.*;
+
+/**
+ */
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+public class MultiDataPathUpgraderTests extends ElasticsearchTestCase {
+
+ public void testUpgradeRandomPaths() throws IOException {
+ try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
+ final String uuid = Strings.base64UUID();
+ final ShardId shardId = new ShardId("foo", 0);
+ final Path[] shardDataPaths = nodeEnvironment.availableShardPaths(shardId);
+ if (nodeEnvironment.nodeDataPaths().length == 1) {
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
+ assertFalse(helper.needsUpgrading(shardId));
+ return;
+ }
+ int numIdxFiles = 0;
+ int numTranslogFiles = 0;
+ int metaStateVersion = 0;
+ for (Path shardPath : shardDataPaths) {
+ final Path translog = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
+ final Path idx = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
+ Files.createDirectories(translog);
+ Files.createDirectories(idx);
+ int numFiles = randomIntBetween(1, 10);
+ for (int i = 0; i < numFiles; i++, numIdxFiles++) {
+ String filename = Integer.toString(numIdxFiles);
+ try (BufferedWriter w = Files.newBufferedWriter(idx.resolve(filename + ".tst"), Charsets.UTF_8)) {
+ w.write(filename);
+ }
+ }
+ numFiles = randomIntBetween(1, 10);
+ for (int i = 0; i < numFiles; i++, numTranslogFiles++) {
+ String filename = Integer.toString(numTranslogFiles);
+ try (BufferedWriter w = Files.newBufferedWriter(translog.resolve(filename + ".translog"), Charsets.UTF_8)) {
+ w.write(filename);
+ }
+ }
+ ++metaStateVersion;
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(metaStateVersion, true, uuid), metaStateVersion, shardDataPaths);
+ }
+ final Path path = randomFrom(shardDataPaths);
+ ShardPath targetPath = new ShardPath(path, path, uuid, new ShardId("foo", 0));
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
+ helper.upgrade(shardId, targetPath);
+ assertFalse(helper.needsUpgrading(shardId));
+ if (shardDataPaths.length > 1) {
+ for (Path shardPath : shardDataPaths) {
+ if (shardPath.equals(targetPath.getDataPath())) {
+ continue;
+ }
+ final Path translog = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
+ final Path idx = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
+ final Path state = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
+ assertFalse(Files.exists(translog));
+ assertFalse(Files.exists(idx));
+ assertFalse(Files.exists(state));
+ assertFalse(Files.exists(shardPath));
+ }
+ }
+
+ final ShardStateMetaData stateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, targetPath.getShardStatePath());
+ assertEquals(metaStateVersion, stateMetaData.version);
+ assertTrue(stateMetaData.primary);
+ assertEquals(uuid, stateMetaData.indexUUID);
+ final Path translog = targetPath.getDataPath().resolve(ShardPath.TRANSLOG_FOLDER_NAME);
+ final Path idx = targetPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME);
+ Files.deleteIfExists(idx.resolve("write.lock"));
+ assertEquals(numTranslogFiles, FileSystemUtils.files(translog).length);
+ assertEquals(numIdxFiles, FileSystemUtils.files(idx).length);
+ final HashSet<Path> translogFiles = Sets.newHashSet(FileSystemUtils.files(translog));
+ for (int i = 0; i < numTranslogFiles; i++) {
+ final String name = Integer.toString(i);
+ translogFiles.contains(translog.resolve(name + ".translog"));
+ byte[] content = Files.readAllBytes(translog.resolve(name + ".translog"));
+ assertEquals(name , new String(content, Charsets.UTF_8));
+ }
+ final HashSet<Path> idxFiles = Sets.newHashSet(FileSystemUtils.files(idx));
+ for (int i = 0; i < numIdxFiles; i++) {
+ final String name = Integer.toString(i);
+ idxFiles.contains(idx.resolve(name + ".tst"));
+ byte[] content = Files.readAllBytes(idx.resolve(name + ".tst"));
+ assertEquals(name , new String(content, Charsets.UTF_8));
+ }
+ }
+ }
+
+ /**
+ * Run upgrade on a real bwc index
+ */
+ public void testUpgradeRealIndex() throws IOException, URISyntaxException {
+ List<Path> indexes = new ArrayList<>();
+ Path dir = getDataPath("/" + OldIndexBackwardsCompatibilityTests.class.getPackage().getName().replace('.', '/')); // the files are in the same pkg as the OldIndexBackwardsCompatibilityTests test
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir, "index-*.zip")) {
+ for (Path path : stream) {
+ indexes.add(path);
+ }
+ }
+ CollectionUtil.introSort(indexes, new Comparator<Path>() {
+ @Override
+ public int compare(Path o1, Path o2) {
+ return o1.getFileName().compareTo(o2.getFileName());
+ }
+ });
+ final ShardId shardId = new ShardId("test", 0);
+ final Path path = randomFrom(indexes);
+ final Path indexFile = path;
+ final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
+ try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
+ if (nodeEnvironment.nodeDataPaths().length == 1) {
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
+ assertFalse(helper.needsUpgrading(shardId));
+ return;
+ }
+ Path unzipDir = createTempDir();
+ Path unzipDataDir = unzipDir.resolve("data");
+ // decompress the index
+ try (InputStream stream = Files.newInputStream(indexFile)) {
+ TestUtil.unzip(stream, unzipDir);
+ }
+ // check it is unique
+ assertTrue(Files.exists(unzipDataDir));
+ Path[] list = FileSystemUtils.files(unzipDataDir);
+ if (list.length != 1) {
+ throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length);
+ }
+ // the bwc scripts packs the indices under this path
+ Path src = list[0].resolve("nodes/0/indices/" + indexName);
+ assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
+ Path[] multiDataPath = new Path[nodeEnvironment.nodeDataPaths().length];
+ int i = 0;
+ for (NodeEnvironment.NodePath nodePath : nodeEnvironment.nodePaths()) {
+ multiDataPath[i++] = nodePath.indicesPath;
+ }
+ logger.info("--> injecting index [{}] into multiple data paths", indexName);
+ OldIndexBackwardsCompatibilityTests.copyIndex(logger, src, indexName, multiDataPath);
+ final ShardPath shardPath = new ShardPath(nodeEnvironment.availableShardPaths(new ShardId(indexName, 0))[0], nodeEnvironment.availableShardPaths(new ShardId(indexName, 0))[0], IndexMetaData.INDEX_UUID_NA_VALUE, new ShardId(indexName, 0));
+
+ logger.info("{}", FileSystemUtils.files(shardPath.resolveIndex()));
+
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
+ helper.upgrade(new ShardId(indexName, 0), shardPath);
+ helper.checkIndex(shardPath);
+ assertFalse(helper.needsUpgrading(new ShardId(indexName, 0)));
+ }
+ }
+
+ public void testNeedsUpgrade() throws IOException {
+ try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
+ String uuid = Strings.randomBase64UUID();
+ final ShardId shardId = new ShardId("foo", 0);
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(1, true, uuid), 1, nodeEnvironment.availableShardPaths(shardId));
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
+ boolean multiDataPaths = nodeEnvironment.nodeDataPaths().length > 1;
+ boolean needsUpgrading = helper.needsUpgrading(shardId);
+ if (multiDataPaths) {
+ assertTrue(needsUpgrading);
+ } else {
+ assertFalse(needsUpgrading);
+ }
+ }
+ }
+
+ public void testPickTargetShardPath() throws IOException {
+ try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
+ final ShardId shard = new ShardId("foo", 0);
+ final Path[] paths = nodeEnvironment.availableShardPaths(shard);
+ if (paths.length == 1) {
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
+ try {
+ helper.pickShardPath(new ShardId("foo", 0));
+ fail("one path needs no upgrading");
+ } catch (IllegalStateException ex) {
+ // only one path
+ }
+ } else {
+ final Map<Path, Tuple<Long, Long>> pathToSpace = new HashMap<>();
+ final Path expectedPath;
+ if (randomBoolean()) { // path with most of the file bytes
+ expectedPath = randomFrom(paths);
+ long[] used = new long[paths.length];
+ long sumSpaceUsed = 0;
+ for (int i = 0; i < used.length; i++) {
+ long spaceUsed = paths[i] == expectedPath ? randomIntBetween(101, 200) : randomIntBetween(10, 100);
+ sumSpaceUsed += spaceUsed;
+ used[i] = spaceUsed;
+ }
+ for (int i = 0; i < used.length; i++) {
+ long availalbe = randomIntBetween((int)(2*sumSpaceUsed-used[i]), 4 * (int)sumSpaceUsed);
+ pathToSpace.put(paths[i], new Tuple<>(availalbe, used[i]));
+ }
+ } else { // path with largest available space
+ expectedPath = randomFrom(paths);
+ long[] used = new long[paths.length];
+ long sumSpaceUsed = 0;
+ for (int i = 0; i < used.length; i++) {
+ long spaceUsed = randomIntBetween(10, 100);
+ sumSpaceUsed += spaceUsed;
+ used[i] = spaceUsed;
+ }
+
+ for (int i = 0; i < used.length; i++) {
+ long availalbe = paths[i] == expectedPath ? randomIntBetween((int)(sumSpaceUsed), (int)(2*sumSpaceUsed)) : randomIntBetween(0, (int)(sumSpaceUsed) - 1) ;
+ pathToSpace.put(paths[i], new Tuple<>(availalbe, used[i]));
+ }
+
+ }
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment) {
+ @Override
+ protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
+ return pathToSpace.get(path.resolve(shard)).v1();
+ }
+
+ @Override
+ protected long getSpaceUsedByShard(Path path) throws IOException {
+ return pathToSpace.get(path).v2();
+ }
+ };
+ String uuid = Strings.randomBase64UUID();
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(1, true, uuid), 1, paths);
+ final ShardPath shardPath = helper.pickShardPath(new ShardId("foo", 0));
+ assertEquals(expectedPath, shardPath.getDataPath());
+ assertEquals(expectedPath, shardPath.getShardStatePath());
+ }
+
+ MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment) {
+ @Override
+ protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
+ return randomIntBetween(0, 10);
+ }
+
+ @Override
+ protected long getSpaceUsedByShard(Path path) throws IOException {
+ return randomIntBetween(11, 20);
+ }
+ };
+
+ try {
+ helper.pickShardPath(new ShardId("foo", 0));
+ fail("not enough space");
+ } catch (IllegalStateException ex) {
+ // not enough space
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/SingleObjectCacheTests.java b/core/src/test/java/org/elasticsearch/common/util/SingleObjectCacheTests.java
new file mode 100644
index 0000000000..de06302d28
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/SingleObjectCacheTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class SingleObjectCacheTests extends ElasticsearchTestCase {
+
+ public void testRefresh() {
+ final AtomicInteger count = new AtomicInteger(0);
+ final AtomicBoolean needsRefresh = new AtomicBoolean(true);
+ SingleObjectCache<Integer> cache = new SingleObjectCache<Integer>(TimeValue.timeValueMillis(100000), 0) {
+
+ @Override
+ protected Integer refresh() {
+ return count.incrementAndGet();
+ }
+
+ @Override
+ protected boolean needsRefresh() {
+ return needsRefresh.get();
+ }
+ };
+ assertEquals(1, cache.getOrRefresh().intValue());
+ assertEquals(2, cache.getOrRefresh().intValue());
+ needsRefresh.set(false);
+ assertEquals(2, cache.getOrRefresh().intValue());
+ needsRefresh.set(true);
+ assertEquals(3, cache.getOrRefresh().intValue());
+ }
+
+ public void testRefreshDoesntBlock() throws InterruptedException {
+ final AtomicInteger count = new AtomicInteger(0);
+ final AtomicBoolean needsRefresh = new AtomicBoolean(true);
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch waiting = new CountDownLatch(1);
+ final SingleObjectCache<Integer> cache = new SingleObjectCache<Integer>(TimeValue.timeValueMillis(1000), 0) {
+
+ @Override
+ protected Integer refresh() {
+ if (count.get() == 1) {
+ try {
+ waiting.countDown();
+ latch.await();
+ } catch (InterruptedException e) {
+ assert false;
+ }
+ }
+ return count.incrementAndGet();
+ }
+
+ @Override
+ protected boolean needsRefresh() {
+ return needsRefresh.get();
+ }
+ };
+ assertEquals(1, cache.getOrRefresh().intValue());
+ needsRefresh.set(true);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ Integer value = cache.getOrRefresh();
+ assertEquals(2, value.intValue());
+ }
+ };
+ t.start();
+ waiting.await();
+ assertEquals(1, cache.getOrRefresh().intValue());
+ needsRefresh.set(false);
+ latch.countDown();
+ t.join();
+ assertEquals(2, cache.getOrRefresh().intValue());
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java
new file mode 100644
index 0000000000..24d89824c3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/CountDownTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+
+public class CountDownTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testConcurrent() throws InterruptedException {
+ final AtomicInteger count = new AtomicInteger(0);
+ final CountDown countDown = new CountDown(scaledRandomIntBetween(10, 1000));
+ Thread[] threads = new Thread[between(3, 10)];
+ final CountDownLatch latch = new CountDownLatch(1);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+ while (true) {
+ if(frequently()) {
+ if (countDown.isCountedDown()) {
+ break;
+ }
+ }
+ if (countDown.countDown()) {
+ count.incrementAndGet();
+ break;
+ }
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown();
+ Thread.yield();
+ if (rarely()) {
+ if (countDown.fastForward()) {
+ count.incrementAndGet();
+ }
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+
+ }
+
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(count.get(), Matchers.equalTo(1));
+ }
+
+ @Test
+ public void testSingleThreaded() {
+ int atLeast = scaledRandomIntBetween(10, 1000);
+ final CountDown countDown = new CountDown(atLeast);
+ while(!countDown.isCountedDown()) {
+ atLeast--;
+ if (countDown.countDown()) {
+ assertThat(atLeast, equalTo(0));
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+ break;
+ }
+ if (rarely()) {
+ assertThat(countDown.fastForward(), equalTo(true));
+ assertThat(countDown.isCountedDown(), equalTo(true));
+ assertThat(countDown.fastForward(), equalTo(false));
+ }
+ assertThat(atLeast, greaterThan(0));
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
new file mode 100644
index 0000000000..9e05683836
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util.concurrent;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ */
+public class EsExecutorsTests extends ElasticsearchTestCase {
+
+ private TimeUnit randomTimeUnit() {
+ return TimeUnit.values()[between(0, TimeUnit.values().length - 1)];
+ }
+
+ @Test
+ public void testFixedForcedExecution() throws Exception {
+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
+ final CountDownLatch wait = new CountDownLatch(1);
+
+ final CountDownLatch exec1Wait = new CountDownLatch(1);
+ final AtomicBoolean executed1 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ wait.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ executed1.set(true);
+ exec1Wait.countDown();
+ }
+ });
+
+ final CountDownLatch exec2Wait = new CountDownLatch(1);
+ final AtomicBoolean executed2 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed2.set(true);
+ exec2Wait.countDown();
+ }
+ });
+
+ final AtomicBoolean executed3 = new AtomicBoolean();
+ final CountDownLatch exec3Wait = new CountDownLatch(1);
+ executor.execute(new AbstractRunnable() {
+ @Override
+ protected void doRun() {
+ executed3.set(true);
+ exec3Wait.countDown();
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return true;
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ throw new AssertionError(t);
+ }
+ });
+
+ wait.countDown();
+
+ exec1Wait.await();
+ exec2Wait.await();
+ exec3Wait.await();
+
+ assertThat(executed1.get(), equalTo(true));
+ assertThat(executed2.get(), equalTo(true));
+ assertThat(executed3.get(), equalTo(true));
+
+ executor.shutdownNow();
+ }
+
+ @Test
+ public void testFixedRejected() throws Exception {
+ EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
+ final CountDownLatch wait = new CountDownLatch(1);
+
+ final CountDownLatch exec1Wait = new CountDownLatch(1);
+ final AtomicBoolean executed1 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ wait.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ executed1.set(true);
+ exec1Wait.countDown();
+ }
+ });
+
+ final CountDownLatch exec2Wait = new CountDownLatch(1);
+ final AtomicBoolean executed2 = new AtomicBoolean();
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed2.set(true);
+ exec2Wait.countDown();
+ }
+ });
+
+ final AtomicBoolean executed3 = new AtomicBoolean();
+ try {
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executed3.set(true);
+ }
+ });
+ fail("should be rejected...");
+ } catch (EsRejectedExecutionException e) {
+ // all is well
+ }
+
+ wait.countDown();
+
+ exec1Wait.await();
+ exec2Wait.await();
+
+ assertThat(executed1.get(), equalTo(true));
+ assertThat(executed2.get(), equalTo(true));
+ assertThat(executed3.get(), equalTo(false));
+
+ terminate(executor);
+ }
+
+ @Test
+ public void testScaleUp() throws Exception {
+ final int min = between(1, 3);
+ final int max = between(min + 1, 6);
+ final ThreadBarrier barrier = new ThreadBarrier(max + 1);
+
+ ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"));
+ assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
+ assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
+
+ for (int i = 0; i < max; ++i) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ pool.execute(new Runnable() {
+ @Override
+ public void run() {
+ latch.countDown();
+ try {
+ barrier.await();
+ barrier.await();
+ } catch (Throwable e) {
+ barrier.reset(e);
+ }
+ }
+ });
+
+ //wait until thread executes this task
+ //otherwise, a task might be queued
+ latch.await();
+ }
+
+ barrier.await();
+ assertThat("wrong pool size", pool.getPoolSize(), equalTo(max));
+ assertThat("wrong active size", pool.getActiveCount(), equalTo(max));
+ barrier.await();
+ terminate(pool);
+ }
+
+ @Test
+ public void testScaleDown() throws Exception {
+ final int min = between(1, 3);
+ final int max = between(min + 1, 6);
+ final ThreadBarrier barrier = new ThreadBarrier(max + 1);
+
+ final ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"));
+ assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
+ assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
+
+ for (int i = 0; i < max; ++i) {
+ final CountDownLatch latch = new CountDownLatch(1);
+ pool.execute(new Runnable() {
+ @Override
+ public void run() {
+ latch.countDown();
+ try {
+ barrier.await();
+ barrier.await();
+ } catch (Throwable e) {
+ barrier.reset(e);
+ }
+ }
+ });
+
+ //wait until thread executes this task
+ //otherwise, a task might be queued
+ latch.await();
+ }
+
+ barrier.await();
+ assertThat("wrong pool size", pool.getPoolSize(), equalTo(max));
+ assertThat("wrong active size", pool.getActiveCount(), equalTo(max));
+ barrier.await();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat("wrong active count", pool.getActiveCount(), equalTo(0));
+ assertThat("idle threads didn't shrink below max. (" + pool.getPoolSize() + ")", pool.getPoolSize(), lessThan(max));
+ }
+ });
+ terminate(pool);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
new file mode 100644
index 0000000000..38477eda67
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
@@ -0,0 +1,331 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class PrioritizedExecutorsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPriorityQueue() throws Exception {
+ PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<>();
+ List<Priority> priorities = Lists.newArrayList(Priority.values());
+ Collections.shuffle(priorities);
+
+ for (Priority priority : priorities) {
+ queue.add(priority);
+ }
+
+ Priority prevPriority = null;
+ while (!queue.isEmpty()) {
+ if (prevPriority == null) {
+ prevPriority = queue.poll();
+ } else {
+ assertThat(queue.poll().after(prevPriority), is(true));
+ }
+ }
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithRunnables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
+ List<Integer> results = new ArrayList<>(8);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(8);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new Job(7, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new Job(5, Priority.LOW, results, finishedLatch));
+ executor.submit(new Job(2, Priority.HIGH, results, finishedLatch));
+ executor.submit(new Job(6, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new Job(1, Priority.URGENT, results, finishedLatch));
+ executor.submit(new Job(4, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new Job(3, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ executor.submit(new Job(0, Priority.IMMEDIATE, results, finishedLatch));
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(8));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ assertThat(results.get(7), equalTo(7));
+ terminate(executor);
+ }
+
+ @Test
+ public void testExecutePrioritizedExecutorWithRunnables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
+ List<Integer> results = new ArrayList<>(8);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(8);
+ executor.execute(new AwaitingJob(awaitingLatch));
+ executor.execute(new Job(7, Priority.LANGUID, results, finishedLatch));
+ executor.execute(new Job(5, Priority.LOW, results, finishedLatch));
+ executor.execute(new Job(2, Priority.HIGH, results, finishedLatch));
+ executor.execute(new Job(6, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.execute(new Job(1, Priority.URGENT, results, finishedLatch));
+ executor.execute(new Job(4, Priority.NORMAL, results, finishedLatch));
+ executor.execute(new Job(3, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ executor.execute(new Job(0, Priority.IMMEDIATE, results, finishedLatch));
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(8));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ assertThat(results.get(7), equalTo(7));
+ terminate(executor);
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
+ List<Integer> results = new ArrayList<>(8);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(8);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new CallableJob(7, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new CallableJob(5, Priority.LOW, results, finishedLatch));
+ executor.submit(new CallableJob(2, Priority.HIGH, results, finishedLatch));
+ executor.submit(new CallableJob(6, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new CallableJob(1, Priority.URGENT, results, finishedLatch));
+ executor.submit(new CallableJob(4, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new CallableJob(3, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ executor.submit(new CallableJob(0, Priority.IMMEDIATE, results, finishedLatch));
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(8));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ assertThat(results.get(7), equalTo(7));
+ terminate(executor);
+ }
+
+ @Test
+ public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
+ ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
+ List<Integer> results = new ArrayList<>(8);
+ CountDownLatch awaitingLatch = new CountDownLatch(1);
+ CountDownLatch finishedLatch = new CountDownLatch(8);
+ executor.submit(new AwaitingJob(awaitingLatch));
+ executor.submit(new CallableJob(7, Priority.LANGUID, results, finishedLatch));
+ executor.submit(new Job(5, Priority.LOW, results, finishedLatch));
+ executor.submit(new CallableJob(2, Priority.HIGH, results, finishedLatch));
+ executor.submit(new Job(6, Priority.LOW, results, finishedLatch)); // will execute after the first LOW (fifo)
+ executor.submit(new CallableJob(1, Priority.URGENT, results, finishedLatch));
+ executor.submit(new Job(4, Priority.NORMAL, results, finishedLatch));
+ executor.submit(new CallableJob(3, Priority.HIGH, results, finishedLatch)); // will execute after the first HIGH (fifo)
+ executor.submit(new Job(0, Priority.IMMEDIATE, results, finishedLatch));
+ awaitingLatch.countDown();
+ finishedLatch.await();
+
+ assertThat(results.size(), equalTo(8));
+ assertThat(results.get(0), equalTo(0));
+ assertThat(results.get(1), equalTo(1));
+ assertThat(results.get(2), equalTo(2));
+ assertThat(results.get(3), equalTo(3));
+ assertThat(results.get(4), equalTo(4));
+ assertThat(results.get(5), equalTo(5));
+ assertThat(results.get(6), equalTo(6));
+ assertThat(results.get(7), equalTo(7));
+ terminate(executor);
+ }
+
+ @Test
+ public void testTimeout() throws Exception {
+ ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor(EsExecutors.daemonThreadFactory(getTestName()));
+ PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
+ final CountDownLatch invoked = new CountDownLatch(1);
+ final CountDownLatch block = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ invoked.countDown();
+ block.await();
+ } catch (InterruptedException e) {
+ fail();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "the blocking";
+ }
+ });
+ invoked.await();
+ PrioritizedEsThreadPoolExecutor.Pending[] pending = executor.getPending();
+ assertThat(pending.length, equalTo(1));
+ assertThat(pending[0].task.toString(), equalTo("the blocking"));
+ assertThat(pending[0].executing, equalTo(true));
+
+ final AtomicBoolean executeCalled = new AtomicBoolean();
+ final CountDownLatch timedOut = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ executeCalled.set(true);
+ }
+
+ @Override
+ public String toString() {
+ return "the waiting";
+ }
+ }, timer, TimeValue.timeValueMillis(100) /* enough timeout to catch them in the pending list... */, new Runnable() {
+ @Override
+ public void run() {
+ timedOut.countDown();
+ }
+ }
+ );
+
+ pending = executor.getPending();
+ assertThat(pending.length, equalTo(2));
+ assertThat(pending[0].task.toString(), equalTo("the blocking"));
+ assertThat(pending[0].executing, equalTo(true));
+ assertThat(pending[1].task.toString(), equalTo("the waiting"));
+ assertThat(pending[1].executing, equalTo(false));
+
+ assertThat(timedOut.await(2, TimeUnit.SECONDS), equalTo(true));
+ block.countDown();
+ Thread.sleep(100); // sleep a bit to double check that execute on the timed out update task is not called...
+ assertThat(executeCalled.get(), equalTo(false));
+ assertTrue(terminate(timer, executor));
+ }
+
+ @Test
+ public void testTimeoutCleanup() throws Exception {
+ ThreadPool threadPool = new ThreadPool("test");
+ ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler();
+ final AtomicBoolean timeoutCalled = new AtomicBoolean();
+ PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
+ final CountDownLatch invoked = new CountDownLatch(1);
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ invoked.countDown();
+ }
+ }, timer, TimeValue.timeValueMillis(1000), new Runnable() {
+ @Override
+ public void run() {
+ // We should never get here
+ timeoutCalled.set(true);
+ }
+ }
+ );
+ invoked.await();
+ assertThat(timer.getQueue().size(), equalTo(0));
+ assertThat(timeoutCalled.get(), equalTo(false));
+ assertTrue(terminate(executor));
+ assertTrue(terminate(threadPool));
+ }
+
+ static class AwaitingJob extends PrioritizedRunnable {
+
+ private final CountDownLatch latch;
+
+ private AwaitingJob(CountDownLatch latch) {
+ super(Priority.URGENT);
+ this.latch = latch;
+ }
+
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ static class Job extends PrioritizedRunnable {
+
+ private final int result;
+ private final List<Integer> results;
+ private final CountDownLatch latch;
+
+ Job(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
+ super(priority);
+ this.result = result;
+ this.results = results;
+ this.latch = latch;
+ }
+
+ @Override
+ public void run() {
+ results.add(result);
+ latch.countDown();
+ }
+ }
+
+ static class CallableJob extends PrioritizedCallable<Integer> {
+
+ private final int result;
+ private final List<Integer> results;
+ private final CountDownLatch latch;
+
+ CallableJob(int result, Priority priority, List<Integer> results, CountDownLatch latch) {
+ super(priority);
+ this.result = result;
+ this.results = results;
+ this.latch = latch;
+ }
+
+ @Override
+ public Integer call() throws Exception {
+ results.add(result);
+ latch.countDown();
+ return result;
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTest.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTest.java
new file mode 100644
index 0000000000..3ff0767fa4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/RefCountedTest.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.util.concurrent;
+
+import org.apache.lucene.store.AlreadyClosedException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ */
+public class RefCountedTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testRefCount() throws IOException {
+ MyRefCounted counted = new MyRefCounted();
+
+ int incs = randomIntBetween(1, 100);
+ for (int i = 0; i < incs; i++) {
+ if (randomBoolean()) {
+ counted.incRef();
+ } else {
+ assertTrue(counted.tryIncRef());
+ }
+ counted.ensureOpen();
+ }
+
+ for (int i = 0; i < incs; i++) {
+ counted.decRef();
+ counted.ensureOpen();
+ }
+
+ counted.incRef();
+ counted.decRef();
+ for (int i = 0; i < incs; i++) {
+ if (randomBoolean()) {
+ counted.incRef();
+ } else {
+ assertTrue(counted.tryIncRef());
+ }
+ counted.ensureOpen();
+ }
+
+ for (int i = 0; i < incs; i++) {
+ counted.decRef();
+ counted.ensureOpen();
+ }
+
+ counted.decRef();
+ assertFalse(counted.tryIncRef());
+ try {
+ counted.incRef();
+ fail(" expected exception");
+ } catch (AlreadyClosedException ex) {
+ assertThat(ex.getMessage(), equalTo("test is already closed can't increment refCount current count [0]"));
+ }
+
+ try {
+ counted.ensureOpen();
+ fail(" expected exception");
+ } catch (AlreadyClosedException ex) {
+ assertThat(ex.getMessage(), equalTo("closed"));
+ }
+ }
+
+ @Test
+ public void testMultiThreaded() throws InterruptedException {
+ final MyRefCounted counted = new MyRefCounted();
+ Thread[] threads = new Thread[randomIntBetween(2, 5)];
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CopyOnWriteArrayList<Throwable> exceptions = new CopyOnWriteArrayList<>();
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ for (int j = 0; j < 10000; j++) {
+ counted.incRef();
+ try {
+ counted.ensureOpen();
+ } finally {
+ counted.decRef();
+ }
+ }
+ } catch (Throwable e) {
+ exceptions.add(e);
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown();
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+ counted.decRef();
+ try {
+ counted.ensureOpen();
+ fail("expected to be closed");
+ } catch (AlreadyClosedException ex) {
+ assertThat(ex.getMessage(), equalTo("closed"));
+ }
+ assertThat(counted.refCount(), is(0));
+ assertThat(exceptions, Matchers.emptyIterable());
+
+ }
+
+ private final class MyRefCounted extends AbstractRefCounted {
+
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+
+ public MyRefCounted() {
+ super("test");
+ }
+
+ @Override
+ protected void closeInternal() {
+ this.closed.set(true);
+ }
+
+ public void ensureOpen() {
+ if (closed.get()) {
+ assert this.refCount() == 0;
+ throw new AlreadyClosedException("closed");
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java
new file mode 100644
index 0000000000..f3d89e86ca
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/XContentFactoryTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent;
+
+import com.fasterxml.jackson.dataformat.cbor.CBORConstants;
+import com.fasterxml.jackson.dataformat.smile.SmileConstants;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class XContentFactoryTests extends ElasticsearchTestCase {
+
+
+ @Test
+ public void testGuessJson() throws IOException {
+ testGuessType(XContentType.JSON);
+ }
+
+ @Test
+ public void testGuessSmile() throws IOException {
+ testGuessType(XContentType.SMILE);
+ }
+
+ @Test
+ public void testGuessYaml() throws IOException {
+ testGuessType(XContentType.YAML);
+ }
+
+ @Test
+ public void testGuessCbor() throws IOException {
+ testGuessType(XContentType.CBOR);
+ }
+
+ private void testGuessType(XContentType type) throws IOException {
+ XContentBuilder builder = XContentFactory.contentBuilder(type);
+ builder.startObject();
+ builder.field("field1", "value1");
+ builder.endObject();
+
+ assertThat(XContentFactory.xContentType(builder.bytes()), equalTo(type));
+ BytesArray bytesArray = builder.bytes().toBytesArray();
+ assertThat(XContentFactory.xContentType(StreamInput.wrap(bytesArray.array(), bytesArray.arrayOffset(), bytesArray.length())), equalTo(type));
+
+ // CBOR is binary, cannot use String
+ if (type != XContentType.CBOR) {
+ assertThat(XContentFactory.xContentType(builder.string()), equalTo(type));
+ }
+ }
+
+ public void testCBORBasedOnMajorObjectDetection() {
+ // for this {"f "=> 5} perl encoder for example generates:
+ byte[] bytes = new byte[] {(byte) 0xA1, (byte) 0x43, (byte) 0x66, (byte) 6f, (byte) 6f, (byte) 0x5};
+ assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR));
+ //assertThat(((Number) XContentHelper.convertToMap(bytes, true).v2().get("foo")).intValue(), equalTo(5));
+
+ // this if for {"foo" : 5} in python CBOR
+ bytes = new byte[] {(byte) 0xA1, (byte) 0x63, (byte) 0x66, (byte) 0x6f, (byte) 0x6f, (byte) 0x5};
+ assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR));
+ assertThat(((Number) XContentHelper.convertToMap(new BytesArray(bytes), true).v2().get("foo")).intValue(), equalTo(5));
+
+ // also make sure major type check doesn't collide with SMILE and JSON, just in case
+ assertThat(CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, SmileConstants.HEADER_BYTE_1), equalTo(false));
+ assertThat(CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, (byte) '{'), equalTo(false));
+ assertThat(CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, (byte) ' '), equalTo(false));
+ assertThat(CBORConstants.hasMajorType(CBORConstants.MAJOR_TYPE_OBJECT, (byte) '-'), equalTo(false));
+ }
+
+ public void testCBORBasedOnMagicHeaderDetection() {
+ byte[] bytes = new byte[] {(byte) 0xd9, (byte) 0xd9, (byte) 0xf7};
+ assertThat(XContentFactory.xContentType(bytes), equalTo(XContentType.CBOR));
+ }
+
+ public void testEmptyStream() throws Exception {
+ ByteArrayInputStream is = new ByteArrayInputStream(new byte[0]);
+ assertNull(XContentFactory.xContentType(is));
+
+ is = new ByteArrayInputStream(new byte[] {(byte) 1});
+ assertNull(XContentFactory.xContentType(is));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java
new file mode 100644
index 0000000000..d1f90ced81
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/BuilderRawFieldTests.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.builder;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BuilderRawFieldTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testJsonRawField() throws IOException {
+ testRawField(XContentType.JSON);
+ }
+
+ @Test
+ public void testSmileRawField() throws IOException {
+ testRawField(XContentType.SMILE);
+ }
+
+ @Test
+ public void testYamlRawField() throws IOException {
+ testRawField(XContentType.YAML);
+ }
+
+ @Test
+ public void testCborRawField() throws IOException {
+ testRawField(XContentType.CBOR);
+ }
+
+ private void testRawField(XContentType type) throws IOException {
+ XContentBuilder builder = XContentFactory.contentBuilder(type);
+ builder.startObject();
+ builder.field("field1", "value1");
+ builder.rawField("_source", XContentFactory.contentBuilder(type).startObject().field("s_field", "s_value").endObject().bytes());
+ builder.field("field2", "value2");
+ builder.rawField("payload_i", new BytesArray(Long.toString(1)));
+ builder.field("field3", "value3");
+ builder.rawField("payload_d", new BytesArray(Double.toString(1.1)));
+ builder.field("field4", "value4");
+ builder.rawField("payload_s", new BytesArray("test"));
+ builder.field("field5", "value5");
+ builder.endObject();
+
+ XContentParser parser = XContentFactory.xContent(type).createParser(builder.bytes());
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field1"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value1"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("_source"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("s_field"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("s_value"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field2"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value2"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_i"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ assertThat(parser.numberType(), equalTo(XContentParser.NumberType.INT));
+ assertThat(parser.longValue(), equalTo(1l));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field3"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value3"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_d"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_NUMBER));
+ assertThat(parser.numberType(), equalTo(XContentParser.NumberType.DOUBLE));
+ assertThat(parser.doubleValue(), equalTo(1.1d));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field4"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value4"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("payload_s"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("test"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("field5"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.VALUE_STRING));
+ assertThat(parser.text(), equalTo("value5"));
+
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
new file mode 100644
index 0000000000..fc00f931d7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.builder;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.FastCharArrayWriter;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.CAMELCASE;
+import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class XContentBuilderTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPrettyWithLfAtEnd() throws Exception {
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(writer);
+ generator.usePrettyPrint();
+ generator.usePrintLineFeedAtEnd();
+
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+
+ generator.close();
+ // double close, and check there is no error...
+ generator.close();
+
+ assertThat(writer.unsafeCharArray()[writer.size() - 1], equalTo('\n'));
+ }
+
+ @Test
+ public void verifyReuseJsonGenerator() throws Exception {
+ FastCharArrayWriter writer = new FastCharArrayWriter();
+ XContentGenerator generator = XContentFactory.xContent(XContentType.JSON).createGenerator(writer);
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+
+ assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}"));
+
+ // try again...
+ writer.reset();
+ generator.writeStartObject();
+ generator.writeStringField("test", "value");
+ generator.writeEndObject();
+ generator.flush();
+ // we get a space at the start here since it thinks we are not in the root object (fine, we will ignore it in the real code we use)
+ assertThat(writer.toStringTrim(), equalTo("{\"test\":\"value\"}"));
+ }
+
+ @Test
+ public void testRaw() throws IOException {
+ {
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
+ xContentBuilder.startObject();
+ xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.endObject();
+ assertThat(xContentBuilder.bytes().toUtf8(), equalTo("{\"foo\":{\"test\":\"value\"}}"));
+ }
+ {
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
+ xContentBuilder.startObject();
+ xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.rawField("foo1", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.endObject();
+ assertThat(xContentBuilder.bytes().toUtf8(), equalTo("{\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"}}"));
+ }
+ {
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
+ xContentBuilder.startObject();
+ xContentBuilder.field("test", "value");
+ xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.endObject();
+ assertThat(xContentBuilder.bytes().toUtf8(), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"}}"));
+ }
+ {
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
+ xContentBuilder.startObject();
+ xContentBuilder.field("test", "value");
+ xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.field("test1", "value1");
+ xContentBuilder.endObject();
+ assertThat(xContentBuilder.bytes().toUtf8(), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
+ }
+ {
+ XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
+ xContentBuilder.startObject();
+ xContentBuilder.field("test", "value");
+ xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.rawField("foo1", new BytesArray("{\"test\":\"value\"}"));
+ xContentBuilder.field("test1", "value1");
+ xContentBuilder.endObject();
+ assertThat(xContentBuilder.bytes().toUtf8(), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
+ }
+ }
+
+ @Test
+ public void testSimpleGenerator() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test\":\"value\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test\":\"value\"}"));
+ }
+
+ @Test
+ public void testOverloadedList() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test", Lists.newArrayList("1", "2")).endObject();
+ assertThat(builder.string(), equalTo("{\"test\":[\"1\",\"2\"]}"));
+ }
+
+ @Test
+ public void testWritingBinaryToStream() throws Exception {
+ BytesStreamOutput bos = new BytesStreamOutput();
+
+ XContentGenerator gen = XContentFactory.xContent(XContentType.JSON).createGenerator(bos);
+ gen.writeStartObject();
+ gen.writeStringField("name", "something");
+ gen.flush();
+ bos.write(", source : { test : \"value\" }".getBytes("UTF8"));
+ gen.writeStringField("name2", "something2");
+ gen.writeEndObject();
+ gen.close();
+
+ byte[] data = bos.bytes().toBytes();
+ String sData = new String(data, "UTF8");
+ System.out.println("DATA: " + sData);
+ }
+
+ @Test
+ public void testFieldCaseConversion() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).fieldCaseConversion(CAMELCASE);
+ builder.startObject().field("test_name", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"testName\":\"value\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON).fieldCaseConversion(UNDERSCORE);
+ builder.startObject().field("testName", "value").endObject();
+ assertThat(builder.string(), equalTo("{\"test_name\":\"value\"}"));
+ }
+
+ @Test
+ public void testByteConversion() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("test_name", (Byte)(byte)120).endObject();
+ assertThat(builder.bytes().toUtf8(), equalTo("{\"test_name\":120}"));
+ }
+
+ @Test
+ public void testDateTypesConversion() throws Exception {
+ Date date = new Date();
+ String expectedDate = XContentBuilder.defaultDatePrinter.print(date.getTime());
+ Calendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"), Locale.ROOT);
+ String expectedCalendar = XContentBuilder.defaultDatePrinter.print(calendar.getTimeInMillis());
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("date", date).endObject();
+ assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject().field("calendar", calendar).endObject();
+ assertThat(builder.string(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ Map<String, Object> map = new HashMap<>();
+ map.put("date", date);
+ builder.map(map);
+ assertThat(builder.string(), equalTo("{\"date\":\"" + expectedDate + "\"}"));
+
+ builder = XContentFactory.contentBuilder(XContentType.JSON);
+ map = new HashMap<>();
+ map.put("calendar", calendar);
+ builder.map(map);
+ assertThat(builder.string(), equalTo("{\"calendar\":\"" + expectedCalendar + "\"}"));
+ }
+
+ @Test
+ public void testCopyCurrentStructure() throws Exception {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
+ builder.startObject()
+ .field("test", "test field")
+ .startObject("filter")
+ .startObject("terms");
+
+ // up to 20k random terms
+ int numTerms = randomInt(20000) + 1;
+ List<String> terms = new ArrayList<>(numTerms);
+ for (int i = 0; i < numTerms; i++) {
+ terms.add("test" + i);
+ }
+
+ builder.field("fakefield", terms).endObject().endObject().endObject();
+
+ XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(builder.bytes());
+
+ XContentBuilder filterBuilder = null;
+ XContentParser.Token token;
+ String currentFieldName = null;
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("test".equals(currentFieldName)) {
+ assertThat(parser.text(), equalTo("test field"));
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("filter".equals(currentFieldName)) {
+ filterBuilder = XContentFactory.contentBuilder(parser.contentType());
+ filterBuilder.copyCurrentStructure(parser);
+ }
+ }
+ }
+
+ assertNotNull(filterBuilder);
+ parser = XContentFactory.xContent(XContentType.JSON).createParser(filterBuilder.bytes());
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("terms"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
+ assertThat(parser.currentName(), equalTo("fakefield"));
+ assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_ARRAY));
+ int i = 0;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ assertThat(parser.text(), equalTo(terms.get(i++)));
+ }
+
+ assertThat(i, equalTo(terms.size()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentParserTests.java
new file mode 100644
index 0000000000..4bc093ff95
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/CborXContentParserTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.cbor;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class CborXContentParserTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testEmptyValue() throws IOException {
+ BytesReference ref = XContentFactory.cborBuilder().startObject().field("field", "").endObject().bytes();
+
+ for (int i = 0; i < 2; i++) {
+ // Running this part twice triggers the issue.
+ // See https://github.com/elastic/elasticsearch/issues/8629
+ XContentParser parser = XContentFactory.xContent(XContentType.CBOR).createParser(ref);
+ while (parser.nextToken() != null) {
+ parser.utf8Bytes();
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java
new file mode 100644
index 0000000000..bda1c31a3f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/cbor/JsonVsCborTests.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.cbor;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class JsonVsCborTests extends ElasticsearchTestCase {
+
+ @Test
+ public void compareParsingTokens() throws IOException {
+ BytesStreamOutput xsonOs = new BytesStreamOutput();
+ XContentGenerator xsonGen = XContentFactory.xContent(XContentType.CBOR).createGenerator(xsonOs);
+
+ BytesStreamOutput jsonOs = new BytesStreamOutput();
+ XContentGenerator jsonGen = XContentFactory.xContent(XContentType.JSON).createGenerator(jsonOs);
+
+ xsonGen.writeStartObject();
+ jsonGen.writeStartObject();
+
+ xsonGen.writeStringField("test", "value");
+ jsonGen.writeStringField("test", "value");
+
+ xsonGen.writeArrayFieldStart("arr");
+ jsonGen.writeArrayFieldStart("arr");
+ xsonGen.writeNumber(1);
+ jsonGen.writeNumber(1);
+ xsonGen.writeNull();
+ jsonGen.writeNull();
+ xsonGen.writeEndArray();
+ jsonGen.writeEndArray();
+
+ xsonGen.writeEndObject();
+ jsonGen.writeEndObject();
+
+ xsonGen.close();
+ jsonGen.close();
+
+ verifySameTokens(XContentFactory.xContent(XContentType.JSON).createParser(jsonOs.bytes().toBytes()), XContentFactory.xContent(XContentType.CBOR).createParser(xsonOs.bytes().toBytes()));
+ }
+
+ private void verifySameTokens(XContentParser parser1, XContentParser parser2) throws IOException {
+ while (true) {
+ XContentParser.Token token1 = parser1.nextToken();
+ XContentParser.Token token2 = parser2.nextToken();
+ if (token1 == null) {
+ assertThat(token2, nullValue());
+ return;
+ }
+ assertThat(token1, equalTo(token2));
+ switch (token1) {
+ case FIELD_NAME:
+ assertThat(parser1.currentName(), equalTo(parser2.currentName()));
+ break;
+ case VALUE_STRING:
+ assertThat(parser1.text(), equalTo(parser2.text()));
+ break;
+ case VALUE_NUMBER:
+ assertThat(parser1.numberType(), equalTo(parser2.numberType()));
+ assertThat(parser1.numberValue(), equalTo(parser2.numberValue()));
+ break;
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
new file mode 100644
index 0000000000..0a57adf8d3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/smile/JsonVsSmileTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.smile;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentGenerator;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class JsonVsSmileTests extends ElasticsearchTestCase {
+
+// @Test public void testBinarySmileField() throws Exception {
+// JsonGenerator gen = new SmileFactory().createJsonGenerator(new ByteArrayOutputStream());
+//// JsonGenerator gen = new JsonFactory().createJsonGenerator(new ByteArrayOutputStream(), JsonEncoding.UTF8);
+// gen.writeStartObject();
+// gen.writeFieldName("field1");
+// gen.writeBinary(new byte[]{1, 2, 3});
+// gen.writeEndObject();
+// }
+
+ @Test
+ public void compareParsingTokens() throws IOException {
+ BytesStreamOutput xsonOs = new BytesStreamOutput();
+ XContentGenerator xsonGen = XContentFactory.xContent(XContentType.SMILE).createGenerator(xsonOs);
+
+ BytesStreamOutput jsonOs = new BytesStreamOutput();
+ XContentGenerator jsonGen = XContentFactory.xContent(XContentType.JSON).createGenerator(jsonOs);
+
+ xsonGen.writeStartObject();
+ jsonGen.writeStartObject();
+
+ xsonGen.writeStringField("test", "value");
+ jsonGen.writeStringField("test", "value");
+
+ xsonGen.writeArrayFieldStart("arr");
+ jsonGen.writeArrayFieldStart("arr");
+ xsonGen.writeNumber(1);
+ jsonGen.writeNumber(1);
+ xsonGen.writeNull();
+ jsonGen.writeNull();
+ xsonGen.writeEndArray();
+ jsonGen.writeEndArray();
+
+ xsonGen.writeEndObject();
+ jsonGen.writeEndObject();
+
+ xsonGen.close();
+ jsonGen.close();
+
+ verifySameTokens(XContentFactory.xContent(XContentType.JSON).createParser(jsonOs.bytes().toBytes()), XContentFactory.xContent(XContentType.SMILE).createParser(xsonOs.bytes().toBytes()));
+ }
+
+ private void verifySameTokens(XContentParser parser1, XContentParser parser2) throws IOException {
+ while (true) {
+ XContentParser.Token token1 = parser1.nextToken();
+ XContentParser.Token token2 = parser2.nextToken();
+ if (token1 == null) {
+ assertThat(token2, nullValue());
+ return;
+ }
+ assertThat(token1, equalTo(token2));
+ switch (token1) {
+ case FIELD_NAME:
+ assertThat(parser1.currentName(), equalTo(parser2.currentName()));
+ break;
+ case VALUE_STRING:
+ assertThat(parser1.text(), equalTo(parser2.text()));
+ break;
+ case VALUE_NUMBER:
+ assertThat(parser1.numberType(), equalTo(parser2.numberType()));
+ assertThat(parser1.numberValue(), equalTo(parser2.numberValue()));
+ break;
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
new file mode 100644
index 0000000000..a66cb2086f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class XContentHelperTests extends ElasticsearchTestCase {
+
+ Map<String, Object> getMap(Object... keyValues) {
+ Map<String, Object> map = new HashMap<>();
+ for (int i = 0; i < keyValues.length; i++) {
+ map.put((String) keyValues[i], keyValues[++i]);
+ }
+ return map;
+ }
+
+ Map<String, Object> getNamedMap(String name, Object... keyValues) {
+ Map<String, Object> map = getMap(keyValues);
+
+ Map<String, Object> namedMap = new HashMap<>(1);
+ namedMap.put(name, map);
+ return namedMap;
+ }
+
+ List<Object> getList(Object... values) {
+ return Arrays.asList(values);
+ }
+
+ @Test
+ public void testMergingListValuesAreMapsOfOne() {
+
+ Map<String, Object> defaults = getMap("test", getList(getNamedMap("name1", "t1", "1"), getNamedMap("name2", "t2", "2")));
+ Map<String, Object> content = getMap("test", getList(getNamedMap("name2", "t3", "3"), getNamedMap("name4", "t4", "4")));
+ Map<String, Object> expected = getMap("test",
+ getList(getNamedMap("name2", "t2", "2", "t3", "3"), getNamedMap("name4", "t4", "4"), getNamedMap("name1", "t1", "1")));
+
+ XContentHelper.mergeDefaults(content, defaults);
+
+ assertThat(content, Matchers.equalTo(expected));
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
new file mode 100644
index 0000000000..056b9964a8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+/**
+ */
+public class XContentMapValuesTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testFilter() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .field("something_else", "value3")
+ .endObject();
+
+ Map<String, Object> source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ Map<String, Object> filter = XContentMapValues.filter(source, new String[]{"test1"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(1));
+ assertThat(filter.get("test1").toString(), equalTo("value1"));
+
+ filter = XContentMapValues.filter(source, new String[]{"test*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(2));
+ assertThat(filter.get("test1").toString(), equalTo("value1"));
+ assertThat(filter.get("test2").toString(), equalTo("value2"));
+
+ filter = XContentMapValues.filter(source, Strings.EMPTY_ARRAY, new String[]{"test1"});
+ assertThat(filter.size(), equalTo(2));
+ assertThat(filter.get("test2").toString(), equalTo("value2"));
+ assertThat(filter.get("something_else").toString(), equalTo("value3"));
+
+ // more complex object...
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1")
+ .startArray("path2")
+ .startObject().field("test", "value1").endObject()
+ .startObject().field("test", "value2").endObject()
+ .endArray()
+ .endObject()
+ .field("test1", "value1")
+ .endObject();
+
+ source = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ filter = XContentMapValues.filter(source, new String[]{"path1"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.size(), equalTo(1));
+
+ filter = XContentMapValues.filter(source, new String[]{"path1*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("path1"), equalTo(source.get("path1")));
+ assertThat(filter.containsKey("test1"), equalTo(false));
+
+ filter = XContentMapValues.filter(source, new String[]{"test1*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("test1"), equalTo(source.get("test1")));
+ assertThat(filter.containsKey("path1"), equalTo(false));
+
+ filter = XContentMapValues.filter(source, new String[]{"path1.path2.*"}, Strings.EMPTY_ARRAY);
+ assertThat(filter.get("path1"), equalTo(source.get("path1")));
+ assertThat(filter.containsKey("test1"), equalTo(false));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testExtractValue() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test", "value")
+ .endObject();
+
+ Map<String, Object> map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("test", map).toString(), equalTo("value"));
+ assertThat(XContentMapValues.extractValue("test.me", map), nullValue());
+ assertThat(XContentMapValues.extractValue("something.else.2", map), nullValue());
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").startObject("path2").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("path1.path2.test", map).toString(), equalTo("value"));
+ assertThat(XContentMapValues.extractValue("path1.path2.test_me", map), nullValue());
+ assertThat(XContentMapValues.extractValue("path1.non_path2.test", map), nullValue());
+
+ Object extValue = XContentMapValues.extractValue("path1.path2", map);
+ assertThat(extValue, instanceOf(Map.class));
+ Map<String, Object> extMapValue = (Map<String, Object>) extValue;
+ assertThat(extMapValue, hasEntry("test", (Object) "value"));
+
+ extValue = XContentMapValues.extractValue("path1", map);
+ assertThat(extValue, instanceOf(Map.class));
+ extMapValue = (Map<String, Object>) extValue;
+ assertThat(extMapValue.containsKey("path2"), equalTo(true));
+
+ // lists
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("test", "value1", "value2").endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+
+ extValue = XContentMapValues.extractValue("path1.test", map);
+ assertThat(extValue, instanceOf(List.class));
+
+ List extListValue = (List) extValue;
+ assertThat(extListValue.size(), equalTo(2));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1")
+ .startArray("path2")
+ .startObject().field("test", "value1").endObject()
+ .startObject().field("test", "value2").endObject()
+ .endArray()
+ .endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+
+ extValue = XContentMapValues.extractValue("path1.path2.test", map);
+ assertThat(extValue, instanceOf(List.class));
+
+ extListValue = (List) extValue;
+ assertThat(extListValue.size(), equalTo(2));
+ assertThat(extListValue.get(0).toString(), equalTo("value1"));
+ assertThat(extListValue.get(1).toString(), equalTo("value2"));
+
+ // fields with . in them
+ builder = XContentFactory.jsonBuilder().startObject()
+ .field("xxx.yyy", "value")
+ .endObject();
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("xxx.yyy", map).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1.xxx").startObject("path2.yyy").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractValue("path1.xxx.path2.yyy.test", map).toString(), equalTo("value"));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testExtractRawValue() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .field("test", "value")
+ .endObject();
+
+ Map<String, Object> map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("test", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .field("test.me", "value")
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("test.me", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").startObject("path2").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("path1.path2.test", map).get(0).toString(), equalTo("value"));
+
+ builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("path1.xxx").startObject("path2.yyy").field("test", "value").endObject().endObject()
+ .endObject();
+
+ map = XContentFactory.xContent(XContentType.JSON).createParser(builder.string()).mapAndClose();
+ assertThat(XContentMapValues.extractRawValues("path1.xxx.path2.yyy.test", map).get(0).toString(), equalTo("value"));
+ }
+
+ @Test
+ public void prefixedNamesFilteringTest() {
+ Map<String, Object> map = new HashMap<>();
+ map.put("obj", "value");
+ map.put("obj_name", "value_name");
+ Map<String, Object> filterdMap = XContentMapValues.filter(map, new String[]{"obj_name"}, Strings.EMPTY_ARRAY);
+ assertThat(filterdMap.size(), equalTo(1));
+ assertThat((String) filterdMap.get("obj_name"), equalTo("value_name"));
+ }
+
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void nestedFilteringTest() {
+ Map<String, Object> map = new HashMap<>();
+ map.put("field", "value");
+ map.put("array",
+ Arrays.asList(
+ 1,
+ new HashMap<String, Object>() {{
+ put("nested", 2);
+ put("nested_2", 3);
+ }}));
+ Map<String, Object> falteredMap = XContentMapValues.filter(map, new String[]{"array.nested"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+
+ // Selecting members of objects within arrays (ex. [ 1, { nested: "value"} ]) always returns all values in the array (1 in the ex)
+ // this is expected behavior as this types of objects are not supported in ES
+ assertThat((Integer) ((List) falteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).size(), equalTo(1));
+ assertThat((Integer) ((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).get("nested"), equalTo(2));
+
+ falteredMap = XContentMapValues.filter(map, new String[]{"array.*"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat((Integer) ((List) falteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) falteredMap.get("array")).get(1)).size(), equalTo(2));
+
+ map.clear();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ falteredMap = XContentMapValues.filter(map, new String[]{"obj.field"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) falteredMap.get("obj")).size(), equalTo(1));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field"), equalTo("value"));
+
+ falteredMap = XContentMapValues.filter(map, new String[]{"obj.*"}, Strings.EMPTY_ARRAY);
+ assertThat(falteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) falteredMap.get("obj")).size(), equalTo(2));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field"), equalTo("value"));
+ assertThat((String) ((Map<String, Object>) falteredMap.get("obj")).get("field2"), equalTo("value2"));
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void completeObjectFilteringTest() {
+ Map<String, Object> map = new HashMap<>();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ map.put("array",
+ Arrays.asList(
+ 1,
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }}));
+
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{"obj"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(2));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field").toString(), equalTo("value"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field2").toString(), equalTo("value2"));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"obj"}, new String[]{"*.field2"});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).get("field").toString(), equalTo("value"));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"array"}, new String[]{});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((List) filteredMap.get("array")).size(), equalTo(2));
+ assertThat((Integer) ((List) filteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).size(), equalTo(2));
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"array"}, new String[]{"*.field2"});
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(((List) filteredMap.get("array")).size(), equalTo(2));
+ assertThat((Integer) ((List) filteredMap.get("array")).get(0), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).size(), equalTo(1));
+ assertThat(((Map<String, Object>) ((List) filteredMap.get("array")).get(1)).get("field").toString(), equalTo("value"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void filterIncludesUsingStarPrefix() {
+ Map<String, Object> map = new HashMap<>();
+ map.put("field", "value");
+ map.put("obj",
+ new HashMap<String, Object>() {{
+ put("field", "value");
+ put("field2", "value2");
+ }});
+ map.put("n_obj",
+ new HashMap<String, Object>() {{
+ put("n_field", "value");
+ put("n_field2", "value2");
+ }});
+
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, new String[]{"*.field2"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")), hasKey("field2"));
+
+ // only objects
+ filteredMap = XContentMapValues.filter(map, new String[]{"*.*"}, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(2));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")).size(), equalTo(2));
+ assertThat(filteredMap, hasKey("n_obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")).size(), equalTo(2));
+
+
+ filteredMap = XContentMapValues.filter(map, new String[]{"*"}, new String[]{"*.*2"});
+ assertThat(filteredMap.size(), equalTo(3));
+ assertThat(filteredMap, hasKey("field"));
+ assertThat(filteredMap, hasKey("obj"));
+ assertThat(((Map) filteredMap.get("obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("obj")), hasKey("field"));
+ assertThat(filteredMap, hasKey("n_obj"));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredMap.get("n_obj")), hasKey("n_field"));
+
+ }
+
+ @Test
+ public void filterWithEmptyIncludesExcludes() {
+ Map<String, Object> map = new HashMap<>();
+ map.put("field", "value");
+ Map<String, Object> filteredMap = XContentMapValues.filter(map, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY);
+ assertThat(filteredMap.size(), equalTo(1));
+ assertThat(filteredMap.get("field").toString(), equalTo("value"));
+
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testThatFilterIncludesEmptyObjectWhenUsingIncludes() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"obj"}, Strings.EMPTY_ARRAY);
+
+ assertThat(mapTuple.v2(), equalTo(filteredSource));
+ }
+
+ @Test
+ public void testThatFilterIncludesEmptyObjectWhenUsingExcludes() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"nonExistingField"});
+
+ assertThat(mapTuple.v2(), equalTo(filteredSource));
+ }
+
+ @Test
+ public void testNotOmittingObjectsWithExcludedProperties() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj")
+ .field("f1", "v1")
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"obj.f1"});
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj"));
+ assertThat(((Map) filteredSource.get("obj")).size(), equalTo(0));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testNotOmittingObjectWithNestedExcludedObject() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj1")
+ .startObject("obj2")
+ .startObject("obj3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ // implicit include
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), Strings.EMPTY_ARRAY, new String[]{"*.obj2"});
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), Matchers.equalTo(0));
+
+ // explicit include
+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"obj1"}, new String[]{"*.obj2"});
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), Matchers.equalTo(0));
+
+ // wild card include
+ filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"*.obj2"}, new String[]{"*.obj3"});
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map<String, Object>) filteredSource.get("obj1")), hasKey("obj2"));
+ assertThat(((Map) ((Map) filteredSource.get("obj1")).get("obj2")).size(), Matchers.equalTo(0));
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Test
+ public void testIncludingObjectWithNestedIncludedObject() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject()
+ .startObject("obj1")
+ .startObject("obj2")
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(builder.bytes(), true);
+ Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), new String[]{"*.obj2"}, Strings.EMPTY_ARRAY);
+
+ assertThat(filteredSource.size(), equalTo(1));
+ assertThat(filteredSource, hasKey("obj1"));
+ assertThat(((Map) filteredSource.get("obj1")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) filteredSource.get("obj1")), hasKey("obj2"));
+ assertThat(((Map) ((Map) filteredSource.get("obj1")).get("obj2")).size(), equalTo(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTests.java
new file mode 100644
index 0000000000..d07bf44288
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractFilteringJsonGeneratorTests.java
@@ -0,0 +1,524 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support.filtering;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public abstract class AbstractFilteringJsonGeneratorTests extends ElasticsearchTestCase {
+
+ protected abstract XContentType getXContentType();
+
+ protected abstract void assertXContentBuilder(XContentBuilder expected, XContentBuilder builder);
+
+ protected void assertString(XContentBuilder expected, XContentBuilder builder) {
+ assertNotNull(builder);
+ assertNotNull(expected);
+
+ // Verify that the result is equal to the expected string
+ assertThat(builder.bytes().toUtf8(), is(expected.bytes().toUtf8()));
+ }
+
+ protected void assertBinary(XContentBuilder expected, XContentBuilder builder) {
+ assertNotNull(builder);
+ assertNotNull(expected);
+
+ try {
+ XContent xContent = XContentFactory.xContent(builder.contentType());
+ XContentParser jsonParser = xContent.createParser(expected.bytes());
+ XContentParser testParser = xContent.createParser(builder.bytes());
+
+ while (true) {
+ XContentParser.Token token1 = jsonParser.nextToken();
+ XContentParser.Token token2 = testParser.nextToken();
+ if (token1 == null) {
+ assertThat(token2, nullValue());
+ return;
+ }
+ assertThat(token1, equalTo(token2));
+ switch (token1) {
+ case FIELD_NAME:
+ assertThat(jsonParser.currentName(), equalTo(testParser.currentName()));
+ break;
+ case VALUE_STRING:
+ assertThat(jsonParser.text(), equalTo(testParser.text()));
+ break;
+ case VALUE_NUMBER:
+ assertThat(jsonParser.numberType(), equalTo(testParser.numberType()));
+ assertThat(jsonParser.numberValue(), equalTo(testParser.numberValue()));
+ break;
+ }
+ }
+ } catch (Exception e) {
+ fail("Fail to verify the result of the XContentBuilder: " + e.getMessage());
+ }
+ }
+
+ private XContentBuilder newXContentBuilder(String... filters) throws IOException {
+ return XContentBuilder.builder(getXContentType().xContent(), filters);
+ }
+
+ /**
+ * Build a sample using a given XContentBuilder
+ */
+ private XContentBuilder sample(XContentBuilder builder) throws IOException {
+ assertNotNull(builder);
+ builder.startObject()
+ .field("title", "My awesome book")
+ .field("pages", 456)
+ .field("price", 27.99)
+ .field("timestamp", 1428582942867L)
+ .nullField("default")
+ .startArray("tags")
+ .value("elasticsearch")
+ .value("java")
+ .endArray()
+ .startArray("authors")
+ .startObject()
+ .field("name", "John Doe")
+ .field("lastname", "John")
+ .field("firstname", "Doe")
+ .endObject()
+ .startObject()
+ .field("name", "William Smith")
+ .field("lastname", "William")
+ .field("firstname", "Smith")
+ .endObject()
+ .endArray()
+ .startObject("properties")
+ .field("weight", 0.8d)
+ .startObject("language")
+ .startObject("en")
+ .field("lang", "English")
+ .field("available", true)
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .field("street", "Hampton St")
+ .field("city", "London")
+ .endObject()
+ .startObject()
+ .field("name", "address #2")
+ .field("street", "Queen St")
+ .field("city", "Stornoway")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject("fr")
+ .field("lang", "French")
+ .field("available", false)
+ .startArray("distributors")
+ .startObject()
+ .field("name", "La Maison du Livre")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .field("street", "Rue Mouffetard")
+ .field("city", "Paris")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Thetra")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ return builder;
+ }
+
+ /**
+ * Instanciates a new XContentBuilder with the given filters and builds a sample with it.
+ */
+ private XContentBuilder sample(String... filters) throws IOException {
+ return sample(newXContentBuilder(filters));
+ }
+
+ @Test
+ public void testNoFiltering() throws Exception {
+ XContentBuilder expected = sample();
+
+ assertXContentBuilder(expected, sample());
+ assertXContentBuilder(expected, sample("*"));
+ assertXContentBuilder(expected, sample("**"));
+ }
+
+ @Test
+ public void testNoMatch() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject().endObject();
+
+ assertXContentBuilder(expected, sample("xyz"));
+ }
+
+ @Test
+ public void testSimpleField() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .field("title", "My awesome book")
+ .endObject();
+
+ assertXContentBuilder(expected, sample("title"));
+ }
+
+ @Test
+ public void testSimpleFieldWithWildcard() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .field("price", 27.99)
+ .startObject("properties")
+ .field("weight", 0.8d)
+ .startObject("language")
+ .startObject("en")
+ .field("lang", "English")
+ .field("available", true)
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .field("street", "Hampton St")
+ .field("city", "London")
+ .endObject()
+ .startObject()
+ .field("name", "address #2")
+ .field("street", "Queen St")
+ .field("city", "Stornoway")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject("fr")
+ .field("lang", "French")
+ .field("available", false)
+ .startArray("distributors")
+ .startObject()
+ .field("name", "La Maison du Livre")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .field("street", "Rue Mouffetard")
+ .field("city", "Paris")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Thetra")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("pr*"));
+ }
+
+ @Test
+ public void testMultipleFields() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .field("title", "My awesome book")
+ .field("pages", 456)
+ .endObject();
+
+ assertXContentBuilder(expected, sample("title", "pages"));
+ }
+
+ @Test
+ public void testSimpleArray() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startArray("tags")
+ .value("elasticsearch")
+ .value("java")
+ .endArray()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("tags"));
+ }
+
+ @Test
+ public void testSimpleArrayOfObjects() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startArray("authors")
+ .startObject()
+ .field("name", "John Doe")
+ .field("lastname", "John")
+ .field("firstname", "Doe")
+ .endObject()
+ .startObject()
+ .field("name", "William Smith")
+ .field("lastname", "William")
+ .field("firstname", "Smith")
+ .endObject()
+ .endArray()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("authors"));
+ assertXContentBuilder(expected, sample("authors.*"));
+ assertXContentBuilder(expected, sample("authors.*name"));
+ }
+
+ @Test
+ public void testSimpleArrayOfObjectsProperty() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startArray("authors")
+ .startObject()
+ .field("lastname", "John")
+ .endObject()
+ .startObject()
+ .field("lastname", "William")
+ .endObject()
+ .endArray()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("authors.lastname"));
+ assertXContentBuilder(expected, sample("authors.l*"));
+ }
+
+ @Test
+ public void testRecurseField1() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startArray("authors")
+ .startObject()
+ .field("name", "John Doe")
+ .endObject()
+ .startObject()
+ .field("name", "William Smith")
+ . endObject()
+ .endArray()
+ .startObject("properties")
+ .startObject("language")
+ .startObject("en")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .endObject()
+ .startObject()
+ .field("name", "address #2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject("fr")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "La Maison du Livre")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Thetra")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("**.name"));
+ }
+
+ @Test
+ public void testRecurseField2() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startObject("properties")
+ .startObject("language")
+ .startObject("en")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .endObject()
+ .startObject()
+ .field("name", "address #2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject("fr")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "La Maison du Livre")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Thetra")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("properties.**.name"));
+ }
+
+ @Test
+ public void testRecurseField3() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startObject("properties")
+ .startObject("language")
+ .startObject("en")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .startArray("addresses")
+ .startObject()
+ .field("name", "address #1")
+ .endObject()
+ .startObject()
+ .field("name", "address #2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("properties.*.en.**.name"));
+ }
+
+ @Test
+ public void testRecurseField4() throws Exception {
+ XContentBuilder expected = newXContentBuilder().startObject()
+ .startObject("properties")
+ .startObject("language")
+ .startObject("en")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "The Book Shop")
+ .endObject()
+ .startObject()
+ .field("name", "Sussex Books House")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject("fr")
+ .startArray("distributors")
+ .startObject()
+ .field("name", "La Maison du Livre")
+ .endObject()
+ .startObject()
+ .field("name", "Thetra")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertXContentBuilder(expected, sample("properties.**.distributors.name"));
+ }
+
+ @Test
+ public void testRawField() throws Exception {
+
+ XContentBuilder expectedRawField = newXContentBuilder().startObject().field("foo", 0).startObject("raw").field("content", "hello world!").endObject().endObject();
+ XContentBuilder expectedRawFieldFiltered = newXContentBuilder().startObject().field("foo", 0).endObject();
+ XContentBuilder expectedRawFieldNotFiltered =newXContentBuilder().startObject().startObject("raw").field("content", "hello world!").endObject().endObject();
+
+ BytesReference raw = newXContentBuilder().startObject().field("content", "hello world!").endObject().bytes();
+
+ // Test method: rawField(String fieldName, BytesReference content)
+ assertXContentBuilder(expectedRawField, newXContentBuilder().startObject().field("foo", 0).rawField("raw", raw).endObject());
+ assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilder("f*").startObject().field("foo", 0).rawField("raw", raw).endObject());
+ assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilder("r*").startObject().field("foo", 0).rawField("raw", raw).endObject());
+
+ // Test method: rawField(String fieldName, byte[] content)
+ assertXContentBuilder(expectedRawField, newXContentBuilder().startObject().field("foo", 0).rawField("raw", raw.toBytes()).endObject());
+ assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilder("f*").startObject().field("foo", 0).rawField("raw", raw.toBytes()).endObject());
+ assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilder("r*").startObject().field("foo", 0).rawField("raw", raw.toBytes()).endObject());
+
+ // Test method: rawField(String fieldName, InputStream content)
+ assertXContentBuilder(expectedRawField, newXContentBuilder().startObject().field("foo", 0).rawField("raw", new ByteArrayInputStream(raw.toBytes())).endObject());
+ assertXContentBuilder(expectedRawFieldFiltered, newXContentBuilder("f*").startObject().field("foo", 0).rawField("raw", new ByteArrayInputStream(raw.toBytes())).endObject());
+ assertXContentBuilder(expectedRawFieldNotFiltered, newXContentBuilder("r*").startObject().field("foo", 0).rawField("raw", new ByteArrayInputStream(raw.toBytes())).endObject());
+ }
+
+ @Test
+ public void testArrays() throws Exception {
+ // Test: Array of values (no filtering)
+ XContentBuilder expected = newXContentBuilder().startObject().startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject();
+ assertXContentBuilder(expected, newXContentBuilder("t*").startObject().startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject());
+ assertXContentBuilder(expected, newXContentBuilder("tags").startObject().startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject());
+
+ // Test: Array of values (with filtering)
+ assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilder("foo").startObject().startArray("tags").value("lorem").value("ipsum").value("dolor").endArray().endObject());
+
+ // Test: Array of objects (no filtering)
+ expected = newXContentBuilder().startObject().startArray("tags").startObject().field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject();
+ assertXContentBuilder(expected, newXContentBuilder("t*").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
+ assertXContentBuilder(expected, newXContentBuilder("tags").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
+
+ // Test: Array of objects (with filtering)
+ assertXContentBuilder(newXContentBuilder().startObject().endObject(), newXContentBuilder("foo").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
+
+ // Test: Array of objects (with partial filtering)
+ expected = newXContentBuilder().startObject().startArray("tags").startObject().field("firstname", "ipsum").endObject().endArray().endObject();
+ assertXContentBuilder(expected, newXContentBuilder("t*.firstname").startObject().startArray("tags").startObject().field("lastname", "lorem").endObject().startObject().field("firstname", "ipsum").endObject().endArray().endObject());
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborFilteringGeneratorTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborFilteringGeneratorTests.java
new file mode 100644
index 0000000000..fab77a26be
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborFilteringGeneratorTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support.filtering;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+
+public class CborFilteringGeneratorTests extends JsonFilteringGeneratorTests {
+
+ @Override
+ protected XContentType getXContentType() {
+ return XContentType.CBOR;
+ }
+
+ @Override
+ protected void assertXContentBuilder(XContentBuilder expected, XContentBuilder builder) {
+ assertBinary(expected, builder);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGeneratorBenchmark.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGeneratorBenchmark.java
new file mode 100644
index 0000000000..97ce4fcb83
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGeneratorBenchmark.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support.filtering;
+
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Locale;
+
+/**
+ * Benchmark class to compare filtered and unfiltered XContent generators.
+ */
+public class FilteringJsonGeneratorBenchmark {
+
+ public static void main(String[] args) throws IOException {
+ final XContent XCONTENT = JsonXContent.jsonXContent;
+
+ System.out.println("Executing " + FilteringJsonGeneratorBenchmark.class + "...");
+
+ System.out.println("Warming up...");
+ run(XCONTENT, 500_000, 100, 0.5);
+ System.out.println("Warmed up.");
+
+ System.out.println("nb documents | nb fields | nb fields written | % fields written | time (millis) | rate (docs/sec) | avg size");
+
+ for (int nbFields : Arrays.asList(10, 25, 50, 100, 250)) {
+ for (int nbDocs : Arrays.asList(100, 1000, 10_000, 100_000, 500_000)) {
+ for (double ratio : Arrays.asList(0.0, 1.0, 0.99, 0.95, 0.9, 0.75, 0.5, 0.25, 0.1, 0.05, 0.01)) {
+ run(XCONTENT, nbDocs, nbFields, ratio);
+ }
+ }
+ }
+ System.out.println("Done.");
+ }
+
+ private static void run(XContent xContent, long nbIterations, int nbFields, double ratio) throws IOException {
+ String[] fields = fields(nbFields);
+ String[] filters = fields((int) (nbFields * ratio));
+
+ long size = 0;
+ BytesStreamOutput os = new BytesStreamOutput();
+
+ long start = System.nanoTime();
+ for (int i = 0; i < nbIterations; i++) {
+ XContentBuilder builder = new XContentBuilder(xContent, os, filters);
+ builder.startObject();
+
+ for (String field : fields) {
+ builder.field(field, System.nanoTime());
+ }
+ builder.endObject();
+
+ size += builder.bytes().length();
+ os.reset();
+ }
+ double milliseconds = (System.nanoTime() - start) / 1_000_000d;
+
+ System.out.printf(Locale.ROOT, "%12d | %9d | %17d | %14.2f %% | %10.3f ms | %15.2f | %8.0f %n",
+ nbIterations, nbFields,
+ (int) (nbFields * ratio),
+ (ratio * 100d),
+ milliseconds,
+ ((double) nbIterations) / (milliseconds / 1000d),
+ size / ((double) nbIterations));
+ }
+
+ /**
+ * Returns a String array of field names starting from "field_0" with a length of n.
+ * If n=3, the array is ["field_0","field_1","field_2"]
+ */
+ private static String[] fields(int n) {
+ String[] fields = new String[n];
+ for (int i = 0; i < n; i++) {
+ fields[i] = "field_" + i;
+ }
+ return fields;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonFilteringGeneratorTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonFilteringGeneratorTests.java
new file mode 100644
index 0000000000..9468746fac
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonFilteringGeneratorTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support.filtering;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+
+public class JsonFilteringGeneratorTests extends AbstractFilteringJsonGeneratorTests {
+
+ @Override
+ protected XContentType getXContentType() {
+ return XContentType.JSON;
+ }
+
+ @Override
+ protected void assertXContentBuilder(XContentBuilder expected, XContentBuilder builder) {
+ assertString(expected, builder);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java
new file mode 100644
index 0000000000..a12e12be17
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support.filtering;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+
+public class SmileFilteringGeneratorTests extends JsonFilteringGeneratorTests {
+
+ @Override
+ protected XContentType getXContentType() {
+ return XContentType.SMILE;
+ }
+
+ @Override
+ protected void assertXContentBuilder(XContentBuilder expected, XContentBuilder builder) {
+ assertBinary(expected, builder);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java
new file mode 100644
index 0000000000..d7e3a934ec
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.xcontent.support.filtering;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
+
+public class YamlFilteringGeneratorTests extends AbstractFilteringJsonGeneratorTests {
+
+ @Override
+ protected XContentType getXContentType() {
+ return XContentType.YAML;
+ }
+
+ @Override
+ protected void assertXContentBuilder(XContentBuilder expected, XContentBuilder builder) {
+ assertString(expected, builder);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java b/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java
new file mode 100644
index 0000000000..5019f6eaee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/consistencylevel/WriteConsistencyLevelTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.consistencylevel;
+
+import org.elasticsearch.action.UnavailableShardsException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class WriteConsistencyLevelTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testWriteConsistencyLevelReplication2() throws Exception {
+ prepareCreate("test", 1, Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(1).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // indexing, by default, will work (ONE consistency level)
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test")).setConsistencyLevel(WriteConsistencyLevel.ONE).execute().actionGet();
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.QUORUM)
+ .setTimeout(timeValueMillis(100)).execute().actionGet();
+ fail("can't index, does not match consistency");
+ } catch (UnavailableShardsException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ assertThat(e.getMessage(), equalTo("[test][0] Not enough active copies to meet write consistency of [QUORUM] (have 1, needed 2). Timeout: [100ms], request: index {[test][type1][1], source[{ type1 : { \"id\" : \"1\", \"name\" : \"test\" } }]}"));
+ // but really, all is well
+ }
+
+ allowNodes("test", 2);
+
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ // this should work, since we now have
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.QUORUM)
+ .setTimeout(timeValueSeconds(1)).execute().actionGet();
+
+ try {
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.ALL)
+ .setTimeout(timeValueMillis(100)).execute().actionGet();
+ fail("can't index, does not match consistency");
+ } catch (UnavailableShardsException e) {
+ assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ assertThat(e.getMessage(), equalTo("[test][0] Not enough active copies to meet write consistency of [ALL] (have 2, needed 3). Timeout: [100ms], request: index {[test][type1][1], source[{ type1 : { \"id\" : \"1\", \"name\" : \"test\" } }]}"));
+ // but really, all is well
+ }
+
+ allowNodes("test", 3);
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ // this should work, since we now have
+ client().prepareIndex("test", "type1", "1").setSource(source("1", "test"))
+ .setConsistencyLevel(WriteConsistencyLevel.ALL)
+ .setTimeout(timeValueSeconds(1)).execute().actionGet();
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java b/core/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java
new file mode 100644
index 0000000000..8d613fb6ac
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/count/simple/SimpleCountTests.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.count.simple;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.is;
+
+public class SimpleCountTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testCountRandomPreference() throws InterruptedException, ExecutionException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+
+ String randomPreference = randomUnicodeOfLengthBetween(0, 4);
+ // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary)
+ while (randomPreference.startsWith("_")) {
+ randomPreference = randomUnicodeOfLengthBetween(0, 4);
+ }
+ // id is not indexed, but lets see that we automatically convert to
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomPreference).get();
+ assertHitCount(countResponse, 6l);
+ }
+ }
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ createIndex("test");
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ CountResponse countResponse = client().prepareCount()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7")))
+ .execute().actionGet();
+
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ createIndex("test");
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ CountResponse countResponse = client().prepareCount().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ // id is not index, but we can automatically support prefix as well
+ countResponse = client().prepareCount().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount().setQuery(QueryBuilders.queryStringQuery("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void simpleCountEarlyTerminationTests() throws Exception {
+ // set up one shard only to test early termination
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+ int max = randomIntBetween(3, 29);
+ List<IndexRequestBuilder> docbuilders = new ArrayList<>(max);
+
+ for (int i = 1; i <= max; i++) {
+ String id = String.valueOf(i);
+ docbuilders.add(client().prepareIndex("test", "type1", id).setSource("field", i));
+ }
+
+ indexRandom(true, docbuilders);
+ ensureGreen();
+ refresh();
+
+ // sanity check
+ CountResponse countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).execute().actionGet();
+ assertHitCount(countResponse, max);
+
+ // threshold <= actual count
+ for (int i = 1; i <= max; i++) {
+ countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(i).execute().actionGet();
+ assertHitCount(countResponse, i);
+ assertTrue(countResponse.terminatedEarly());
+ }
+
+ // threshold > actual count
+ countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)).setTerminateAfter(max + randomIntBetween(1, max)).execute().actionGet();
+ assertHitCount(countResponse, max);
+ assertFalse(countResponse.terminatedEarly());
+ }
+
+ @Test
+ public void localDependentDateTests() throws Exception {
+ assumeFalse("Locals are buggy on JDK9EA", Constants.JRE_IS_MINIMUM_JAVA9 && systemPropertyAsBoolean("tests.security.manager", false));
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("date_field")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "de")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", "" + i).setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800").execute().actionGet();
+ client().prepareIndex("test", "type1", "" + (10 + i)).setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800").execute().actionGet();
+ }
+
+ refresh();
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(countResponse, 10l);
+
+ countResponse = client().prepareCount("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(countResponse, 20l);
+ }
+ }
+
+ @Test
+ public void testThatNonEpochDatesCanBeSearch() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "yyyyMMddHH").endObject().endObject()
+ .endObject().endObject()));
+ ensureGreen("test");
+
+ XContentBuilder document = jsonBuilder()
+ .startObject()
+ .field("date_field", "2015060210")
+ .endObject();
+ assertThat(client().prepareIndex("test", "type1").setSource(document).get().isCreated(), is(true));
+
+ document = jsonBuilder()
+ .startObject()
+ .field("date_field", "2014060210")
+ .endObject();
+ assertThat(client().prepareIndex("test", "type1").setSource(document).get().isCreated(), is(true));
+
+ // this is a timestamp in 2015 and should not be returned in counting when filtering by year
+ document = jsonBuilder()
+ .startObject()
+ .field("date_field", "1433236702")
+ .endObject();
+ assertThat(client().prepareIndex("test", "type1").setSource(document).get().isCreated(), is(true));
+
+ refresh();
+
+ assertHitCount(client().prepareCount("test").get(), 3);
+
+ CountResponse countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("date_field").from("2015010100").to("2015123123")).get();
+ assertHitCount(countResponse, 1);
+
+ countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("date_field").from(2015010100).to(2015123123)).get();
+ assertHitCount(countResponse, 1);
+
+ countResponse = client().prepareCount("test").setQuery(QueryBuilders.rangeQuery("date_field").from(2015010100).to(2015123123).timeZone("UTC")).get();
+ assertHitCount(countResponse, 1);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java b/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java
new file mode 100644
index 0000000000..2ba71a4012
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/deps/jackson/JacksonLocationTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.jackson;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class JacksonLocationTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLocationExtraction() throws IOException {
+ // {
+ // "index" : "test",
+ // "source" : {
+ // value : "something"
+ // }
+ // }
+ BytesStreamOutput os = new BytesStreamOutput();
+ JsonGenerator gen = new JsonFactory().createGenerator(os);
+ gen.writeStartObject();
+
+ gen.writeStringField("index", "test");
+
+ gen.writeFieldName("source");
+ gen.writeStartObject();
+ gen.writeStringField("value", "something");
+ gen.writeEndObject();
+
+ gen.writeEndObject();
+
+ gen.close();
+
+ byte[] data = os.bytes().toBytes();
+ JsonParser parser = new JsonFactory().createParser(data);
+
+ assertThat(parser.nextToken(), equalTo(JsonToken.START_OBJECT));
+ assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "index"
+ assertThat(parser.nextToken(), equalTo(JsonToken.VALUE_STRING));
+ assertThat(parser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "source"
+// JsonLocation location1 = parser.getCurrentLocation();
+// parser.skipChildren();
+// JsonLocation location2 = parser.getCurrentLocation();
+//
+// byte[] sourceData = new byte[(int) (location2.getByteOffset() - location1.getByteOffset())];
+// System.arraycopy(data, (int) location1.getByteOffset(), sourceData, 0, sourceData.length);
+//
+// JsonParser sourceParser = new JsonFactory().createJsonParser(new FastByteArrayInputStream(sourceData));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.START_OBJECT));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.FIELD_NAME)); // "value"
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.VALUE_STRING));
+// assertThat(sourceParser.nextToken(), equalTo(JsonToken.END_OBJECT));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java b/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java
new file mode 100644
index 0000000000..c35953ca21
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java
@@ -0,0 +1,290 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.joda;
+
+import org.elasticsearch.common.joda.FormatDateTimeFormatter;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.MutableDateTime;
+import org.joda.time.format.*;
+import org.junit.Test;
+
+import java.util.Date;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleJodaTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testMultiParsers() {
+ DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder();
+ DateTimeParser[] parsers = new DateTimeParser[3];
+ parsers[0] = DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getParser();
+ parsers[1] = DateTimeFormat.forPattern("MM-dd-yyyy").withZone(DateTimeZone.UTC).getParser();
+ parsers[2] = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(DateTimeZone.UTC).getParser();
+ builder.append(DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getPrinter(), parsers);
+
+ DateTimeFormatter formatter = builder.toFormatter();
+
+ formatter.parseMillis("2009-11-15 14:12:12");
+ }
+
+ @Test
+ public void testIsoDateFormatDateTimeNoMillisUTC() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
+
+ assertThat(millis, equalTo(0l));
+ }
+
+ @Test
+ public void testUpperBound() {
+ MutableDateTime dateTime = new MutableDateTime(3000, 12, 31, 23, 59, 59, 999, DateTimeZone.UTC);
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+
+ String value = "2000-01-01";
+ int i = formatter.parseInto(dateTime, value, 0);
+ assertThat(i, equalTo(value.length()));
+ assertThat(dateTime.toString(), equalTo("2000-01-01T23:59:59.999Z"));
+ }
+
+ @Test
+ public void testIsoDateFormatDateOptionalTimeUTC() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
+ assertThat(millis, equalTo(0l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.001Z");
+ assertThat(millis, equalTo(1l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.1Z");
+ assertThat(millis, equalTo(100l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00.1");
+ assertThat(millis, equalTo(100l));
+ millis = formatter.parseMillis("1970-01-01T00:00:00");
+ assertThat(millis, equalTo(0l));
+ millis = formatter.parseMillis("1970-01-01");
+ assertThat(millis, equalTo(0l));
+
+ millis = formatter.parseMillis("1970");
+ assertThat(millis, equalTo(0l));
+
+ try {
+ formatter.parseMillis("1970 kuku");
+ fail("formatting should fail");
+ } catch (IllegalArgumentException e) {
+ // all is well
+ }
+
+ // test offset in format
+ millis = formatter.parseMillis("1970-01-01T00:00:00-02:00");
+ assertThat(millis, equalTo(TimeValue.timeValueHours(2).millis()));
+ }
+
+ @Test
+ public void testIsoVsCustom() {
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ long millis = formatter.parseMillis("1970-01-01T00:00:00");
+ assertThat(millis, equalTo(0l));
+
+ formatter = DateTimeFormat.forPattern("yyyy/MM/dd HH:mm:ss").withZone(DateTimeZone.UTC);
+ millis = formatter.parseMillis("1970/01/01 00:00:00");
+ assertThat(millis, equalTo(0l));
+
+ FormatDateTimeFormatter formatter2 = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
+ millis = formatter2.parser().parseMillis("1970/01/01 00:00:00");
+ assertThat(millis, equalTo(0l));
+ }
+
+ @Test
+ public void testWriteAndParse() {
+ DateTimeFormatter dateTimeWriter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
+ DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
+ Date date = new Date();
+ assertThat(formatter.parseMillis(dateTimeWriter.print(date.getTime())), equalTo(date.getTime()));
+ }
+
+ @Test
+ public void testSlashInFormat() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("MM/yyyy");
+ formatter.parser().parseMillis("01/2001");
+
+ formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
+ long millis = formatter.parser().parseMillis("1970/01/01 00:00:00");
+ formatter.printer().print(millis);
+
+ try {
+ millis = formatter.parser().parseMillis("1970/01/01");
+ fail();
+ } catch (IllegalArgumentException e) {
+ // it really can't parse this one
+ }
+ }
+
+ @Test
+ public void testMultipleFormats() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ long millis = formatter.parser().parseMillis("1970/01/01 00:00:00");
+ assertThat("1970/01/01 00:00:00", is(formatter.printer().print(millis)));
+ }
+
+ @Test
+ public void testMultipleDifferentFormats() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ String input = "1970/01/01 00:00:00";
+ long millis = formatter.parser().parseMillis(input);
+ assertThat(input, is(formatter.printer().print(millis)));
+
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||dateOptionalTime");
+ Joda.forPattern("dateOptionalTime||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd");
+ Joda.forPattern("yyyy/MM/dd HH:mm:ss||dateOptionalTime||yyyy/MM/dd");
+ Joda.forPattern("date_time||date_time_no_millis");
+ Joda.forPattern(" date_time || date_time_no_millis");
+ }
+
+ @Test
+ public void testInvalidPatterns() {
+ expectInvalidPattern("does_not_exist_pattern", "Invalid format: [does_not_exist_pattern]: Illegal pattern component: o");
+ expectInvalidPattern("OOOOO", "Invalid format: [OOOOO]: Illegal pattern component: OOOOO");
+ expectInvalidPattern(null, "No date pattern provided");
+ expectInvalidPattern("", "No date pattern provided");
+ expectInvalidPattern(" ", "No date pattern provided");
+ expectInvalidPattern("||date_time_no_millis", "No date pattern provided");
+ expectInvalidPattern("date_time_no_millis||", "No date pattern provided");
+ }
+
+ private void expectInvalidPattern(String pattern, String errorMessage) {
+ try {
+ Joda.forPattern(pattern);
+ fail("Pattern " + pattern + " should have thrown an exception but did not");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString(errorMessage));
+ }
+ }
+
+ @Test
+ public void testRounding() {
+ long TIME = utcTimeInMillis("2009-02-03T01:01:01");
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setMillis(TIME);
+ assertThat(time.monthOfYear().roundFloor().toString(), equalTo("2009-02-01T00:00:00.000Z"));
+ time.setMillis(TIME);
+ assertThat(time.hourOfDay().roundFloor().toString(), equalTo("2009-02-03T01:00:00.000Z"));
+ time.setMillis(TIME);
+ assertThat(time.dayOfMonth().roundFloor().toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ }
+
+ @Test
+ public void testRoundingSetOnTime() {
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().monthOfYear(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-01T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-01T00:00:00.000Z")));
+
+ time.setMillis(utcTimeInMillis("2009-05-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-05-01T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-05-01T00:00:00.000Z")));
+
+ time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-03T00:00:00.000Z")));
+
+ time.setMillis(utcTimeInMillis("2009-02-02T23:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-02T00:00:00.000Z")));
+
+ time = new MutableDateTime(DateTimeZone.UTC);
+ time.setRounding(time.getChronology().weekOfWeekyear(), MutableDateTime.ROUND_FLOOR);
+ time.setMillis(utcTimeInMillis("2011-05-05T01:01:01"));
+ assertThat(time.toString(), equalTo("2011-05-02T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTimeInMillis("2011-05-02T00:00:00.000Z")));
+ }
+
+ @Test
+ public void testRoundingWithTimeZone() {
+ MutableDateTime time = new MutableDateTime(DateTimeZone.UTC);
+ time.setZone(DateTimeZone.forOffsetHours(-2));
+ time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+
+ MutableDateTime utcTime = new MutableDateTime(DateTimeZone.UTC);
+ utcTime.setRounding(utcTime.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR);
+
+ time.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+ utcTime.setMillis(utcTimeInMillis("2009-02-03T01:01:01"));
+
+ assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000-02:00"));
+ assertThat(utcTime.toString(), equalTo("2009-02-03T00:00:00.000Z"));
+ // the time is on the 2nd, and utcTime is on the 3rd, but, because time already encapsulates
+ // time zone, the millis diff is not 24, but 22 hours
+ assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
+
+ time.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
+ utcTime.setMillis(utcTimeInMillis("2009-02-04T01:01:01"));
+ assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000-02:00"));
+ assertThat(utcTime.toString(), equalTo("2009-02-04T00:00:00.000Z"));
+ assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis()));
+ }
+
+ @Test
+ public void testThatEpochsInSecondsCanBeParsed() {
+ boolean parseMilliSeconds = randomBoolean();
+
+ // epoch: 1433144433655 => date: Mon Jun 1 09:40:33.655 CEST 2015
+ FormatDateTimeFormatter formatter = Joda.forPattern(parseMilliSeconds ? "epoch_millis" : "epoch_second");
+ DateTime dateTime = formatter.parser().parseDateTime(parseMilliSeconds ? "1433144433655" : "1433144433");
+
+ assertThat(dateTime.getYear(), is(2015));
+ assertThat(dateTime.getDayOfMonth(), is(1));
+ assertThat(dateTime.getMonthOfYear(), is(6));
+ assertThat(dateTime.getHourOfDay(), is(7)); // utc timezone, +2 offset due to CEST
+ assertThat(dateTime.getMinuteOfHour(), is(40));
+ assertThat(dateTime.getSecondOfMinute(), is(33));
+
+ if (parseMilliSeconds) {
+ assertThat(dateTime.getMillisOfSecond(), is(655));
+ } else {
+ assertThat(dateTime.getMillisOfSecond(), is(0));
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testForInvalidDatesInEpochSecond() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("epoch_second");
+ formatter.parser().parseDateTime(randomFrom("invalid date", "12345678901", "12345678901234"));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testForInvalidDatesInEpochMillis() {
+ FormatDateTimeFormatter formatter = Joda.forPattern("epoch_millis");
+ formatter.parser().parseDateTime(randomFrom("invalid date", "12345678901234"));
+ }
+
+ private long utcTimeInMillis(String time) {
+ return ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC).parseMillis(time);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java
new file mode 100644
index 0000000000..7158d2e526
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.lucene;
+
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleLuceneTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSortValues() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+ for (int i = 0; i < 10; i++) {
+ Document document = new Document();
+ String text = new String(new char[]{(char) (97 + i), (char) (97 + i)});
+ document.add(new TextField("str", text, Field.Store.YES));
+ document.add(new SortedDocValuesField("str", new BytesRef(text)));
+ indexWriter.addDocument(document);
+ }
+ IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(indexWriter, true));
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, 10, new Sort(new SortField("str", SortField.Type.STRING)));
+ for (int i = 0; i < 10; i++) {
+ FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i];
+ assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[]{(char) (97 + i), (char) (97 + i)}))));
+ }
+ }
+
+ @Test
+ public void testAddDocAfterPrepareCommit() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ indexWriter.addDocument(document);
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+ assertThat(reader.numDocs(), equalTo(1));
+
+ indexWriter.prepareCommit();
+ // Returns null b/c no changes.
+ assertThat(DirectoryReader.openIfChanged(reader), equalTo(null));
+
+ document = new Document();
+ document.add(new TextField("_id", "2", Field.Store.YES));
+ indexWriter.addDocument(document);
+ indexWriter.commit();
+ reader = DirectoryReader.openIfChanged(reader);
+ assertThat(reader.numDocs(), equalTo(2));
+ }
+
+ @Test
+ public void testSimpleNumericOps() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new IntField("test", 2, IntField.TYPE_STORED));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+ Document doc = searcher.doc(topDocs.scoreDocs[0].doc);
+ IndexableField f = doc.getField("test");
+ assertThat(f.stringValue(), equalTo("2"));
+
+ BytesRefBuilder bytes = new BytesRefBuilder();
+ NumericUtils.intToPrefixCoded(2, 0, bytes);
+ topDocs = searcher.search(new TermQuery(new Term("test", bytes.get())), 1);
+ doc = searcher.doc(topDocs.scoreDocs[0].doc);
+ f = doc.getField("test");
+ assertThat(f.stringValue(), equalTo("2"));
+
+ indexWriter.close();
+ }
+
+ /**
+ * Here, we verify that the order that we add fields to a document counts, and not the lexi order
+ * of the field. This means that heavily accessed fields that use field selector should be added
+ * first (with load and break).
+ */
+ @Test
+ public void testOrdering() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new TextField("#id", "1", Field.Store.YES));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+ final ArrayList<String> fieldsOrder = new ArrayList<>();
+ searcher.doc(topDocs.scoreDocs[0].doc, new StoredFieldVisitor() {
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ fieldsOrder.add(fieldInfo.name);
+ return Status.YES;
+ }
+ });
+
+ assertThat(fieldsOrder.size(), equalTo(2));
+ assertThat(fieldsOrder.get(0), equalTo("_id"));
+ assertThat(fieldsOrder.get(1), equalTo("#id"));
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testBoost() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ for (int i = 0; i < 100; i++) {
+ // TODO (just setting the boost value does not seem to work...)
+ StringBuilder value = new StringBuilder().append("value");
+ for (int j = 0; j < i; j++) {
+ value.append(" ").append("value");
+ }
+ Document document = new Document();
+ TextField textField = new TextField("_id", Integer.toString(i), Field.Store.YES);
+ textField.setBoost(i);
+ document.add(textField);
+ textField = new TextField("value", value.toString(), Field.Store.YES);
+ textField.setBoost(i);
+ document.add(textField);
+ indexWriter.addDocument(document);
+ }
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TermQuery query = new TermQuery(new Term("value", "value"));
+ TopDocs topDocs = searcher.search(query, 100);
+ assertThat(100, equalTo(topDocs.totalHits));
+ for (int i = 0; i < topDocs.scoreDocs.length; i++) {
+ Document doc = searcher.doc(topDocs.scoreDocs[i].doc);
+// System.out.println(doc.get("id") + ": " + searcher.explain(query, topDocs.scoreDocs[i].doc));
+ assertThat(doc.get("_id"), equalTo(Integer.toString(100 - i - 1)));
+ }
+
+ indexWriter.close();
+ }
+
+ @Test
+ public void testNRTSearchOnClosedWriter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+ DirectoryReader reader = DirectoryReader.open(indexWriter, true);
+
+ for (int i = 0; i < 100; i++) {
+ Document document = new Document();
+ TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES);
+ field.setBoost(i);
+ document.add(field);
+ indexWriter.addDocument(document);
+ }
+ reader = refreshReader(reader);
+
+ indexWriter.close();
+
+ TermsEnum termDocs = SlowCompositeReaderWrapper.wrap(reader).terms("_id").iterator();
+ termDocs.next();
+ }
+
+ /**
+ * A test just to verify that term freqs are not stored for numeric fields. <tt>int1</tt> is not storing termFreq
+ * and <tt>int2</tt> does.
+ */
+ @Test
+ public void testNumericTermDocsFreqs() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document doc = new Document();
+ FieldType type = IntField.TYPE_NOT_STORED;
+ IntField field = new IntField("int1", 1, type);
+ doc.add(field);
+
+ type = new FieldType(IntField.TYPE_NOT_STORED);
+ type.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
+ type.freeze();
+
+ field = new IntField("int1", 1, type);
+ doc.add(field);
+
+ field = new IntField("int2", 1, type);
+ doc.add(field);
+
+ field = new IntField("int2", 1, type);
+ doc.add(field);
+
+ indexWriter.addDocument(doc);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ LeafReader atomicReader = SlowCompositeReaderWrapper.wrap(reader);
+
+ Terms terms = atomicReader.terms("int1");
+ TermsEnum termsEnum = terms.iterator();
+ termsEnum.next();
+
+ PostingsEnum termDocs = termsEnum.postings(atomicReader.getLiveDocs(), null);
+ assertThat(termDocs.nextDoc(), equalTo(0));
+ assertThat(termDocs.docID(), equalTo(0));
+ assertThat(termDocs.freq(), equalTo(1));
+
+ terms = atomicReader.terms("int2");
+ termsEnum = terms.iterator();
+ termsEnum.next();
+ termDocs = termsEnum.postings(atomicReader.getLiveDocs(), termDocs);
+ assertThat(termDocs.nextDoc(), equalTo(0));
+ assertThat(termDocs.docID(), equalTo(0));
+ assertThat(termDocs.freq(), equalTo(2));
+
+ reader.close();
+ indexWriter.close();
+ }
+
+ private DirectoryReader refreshReader(DirectoryReader reader) throws IOException {
+ DirectoryReader oldReader = reader;
+ reader = DirectoryReader.openIfChanged(reader);
+ if (reader != oldReader) {
+ oldReader.close();
+ }
+ return reader;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java
new file mode 100644
index 0000000000..f3b2944a49
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.deps.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.vectorhighlight.CustomFieldQuery;
+import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class VectorHighlighterTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVectorHighlighter() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+ assertThat(fragment, equalTo("the big <b>bad</b> dog"));
+ }
+
+ @Test
+ public void testVectorHighlighterPrefixQuery() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+
+ PrefixQuery prefixQuery = new PrefixQuery(new Term("content", "ba"));
+ assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_FILTER_REWRITE.getClass().getName()));
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(prefixQuery),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+
+ prefixQuery.setRewriteMethod(PrefixQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+ Query rewriteQuery = prefixQuery.rewrite(reader);
+ fragment = highlighter.getBestFragment(highlighter.getFieldQuery(rewriteQuery),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+
+ // now check with the custom field query
+ prefixQuery = new PrefixQuery(new Term("content", "ba"));
+ assertThat(prefixQuery.getRewriteMethod().getClass().getName(), equalTo(PrefixQuery.CONSTANT_SCORE_FILTER_REWRITE.getClass().getName()));
+ fragment = highlighter.getBestFragment(new CustomFieldQuery(prefixQuery, reader, highlighter),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, notNullValue());
+ }
+
+ @Test
+ public void testVectorHighlighterNoStore() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+ }
+
+ @Test
+ public void testVectorHighlighterNoTermVector() throws Exception {
+ Directory dir = new RAMDirectory();
+ IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ Document document = new Document();
+ document.add(new TextField("_id", "1", Field.Store.YES));
+ document.add(new Field("content", "the big bad dog", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
+ indexWriter.addDocument(document);
+
+ IndexReader reader = DirectoryReader.open(indexWriter, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+ TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
+
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ FastVectorHighlighter highlighter = new FastVectorHighlighter();
+ String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
+ reader, topDocs.scoreDocs[0].doc, "content", 30);
+ assertThat(fragment, nullValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java
new file mode 100644
index 0000000000..a4de39bc97
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/BlockingClusterStatePublishResponseHandlerTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.discovery;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CyclicBarrier;
+
+import static org.hamcrest.Matchers.*;
+
+public class BlockingClusterStatePublishResponseHandlerTests extends ElasticsearchTestCase {
+
+ static private class PublishResponder extends AbstractRunnable {
+
+ final boolean fail;
+ final DiscoveryNode node;
+ final CyclicBarrier barrier;
+ final ESLogger logger;
+ final BlockingClusterStatePublishResponseHandler handler;
+
+ public PublishResponder(boolean fail, DiscoveryNode node, CyclicBarrier barrier, ESLogger logger, BlockingClusterStatePublishResponseHandler handler) {
+ this.fail = fail;
+
+ this.node = node;
+ this.barrier = barrier;
+ this.logger = logger;
+ this.handler = handler;
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.error("unexpected error", t);
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ barrier.await();
+ if (fail) {
+ handler.onFailure(node, new Exception("bla"));
+ } else {
+ handler.onResponse(node);
+ }
+ }
+ }
+
+ public void testConcurrentAccess() throws InterruptedException {
+ int nodeCount = scaledRandomIntBetween(10, 20);
+ DiscoveryNode[] allNodes = new DiscoveryNode[nodeCount];
+ for (int i = 0; i < nodeCount; i++) {
+ DiscoveryNode node = new DiscoveryNode("node_" + i, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ allNodes[i] = node;
+ }
+
+ BlockingClusterStatePublishResponseHandler handler = new BlockingClusterStatePublishResponseHandler(new HashSet<>(Arrays.asList(allNodes)));
+
+ int firstRound = randomIntBetween(5, nodeCount - 1);
+ Thread[] threads = new Thread[firstRound];
+ CyclicBarrier barrier = new CyclicBarrier(firstRound);
+ Set<DiscoveryNode> completedNodes = new HashSet<>();
+ for (int i = 0; i < threads.length; i++) {
+ completedNodes.add(allNodes[i]);
+ threads[i] = new Thread(new PublishResponder(randomBoolean(), allNodes[i], barrier, logger, handler));
+ threads[i].start();
+ }
+ // wait on the threads to finish
+ for (Thread t : threads) {
+ t.join();
+ }
+ // verify that the publisher times out
+ assertFalse("expected handler wait to timeout as not all nodes responded", handler.awaitAllNodes(new TimeValue(10)));
+ Set<DiscoveryNode> pendingNodes = new HashSet<>(Arrays.asList(handler.pendingNodes()));
+ assertThat(completedNodes, not(contains(pendingNodes.toArray(new DiscoveryNode[0]))));
+ assertThat(completedNodes.size() + pendingNodes.size(), equalTo(allNodes.length));
+ int secondRound = allNodes.length - firstRound;
+ threads = new Thread[secondRound];
+ barrier = new CyclicBarrier(secondRound);
+
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new PublishResponder(randomBoolean(), allNodes[firstRound + i], barrier, logger, handler));
+ threads[i].start();
+ }
+ // wait on the threads to finish
+ for (Thread t : threads) {
+ t.join();
+ }
+ assertTrue("expected handler not to timeout as all nodes responded", handler.awaitAllNodes(new TimeValue(10)));
+ assertThat(handler.pendingNodes(), arrayWithSize(0));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java
new file mode 100644
index 0000000000..6792298bab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsTests.java
@@ -0,0 +1,1083 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.DjbHashFunction;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.zen.ZenDiscovery;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.fd.FaultDetection;
+import org.elasticsearch.discovery.zen.membership.MembershipAction;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.discovery.zen.ping.ZenPingService;
+import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
+import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration;
+import org.elasticsearch.test.disruption.*;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@LuceneTestCase.Slow
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
+public class DiscoveryWithServiceDisruptionsTests extends ElasticsearchIntegrationTest {
+
+ private static final TimeValue DISRUPTION_HEALING_OVERHEAD = TimeValue.timeValueSeconds(40); // we use 30s as timeout in many places.
+
+ private ClusterDiscoveryConfiguration discoveryConfig;
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return discoveryConfig.node(nodeOrdinal);
+ }
+
+ @Before
+ public void clearConfig() {
+ discoveryConfig = null;
+ }
+
+ @Override
+ protected int numberOfShards() {
+ return 3;
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 1;
+ }
+
+ private List<String> startCluster(int numberOfNodes) throws ExecutionException, InterruptedException {
+ return startCluster(numberOfNodes, -1);
+ }
+
+ private List<String> startCluster(int numberOfNodes, int minimumMasterNode) throws ExecutionException, InterruptedException {
+ configureCluster(numberOfNodes, minimumMasterNode);
+ List<String> nodes = internalCluster().startNodesAsync(numberOfNodes).get();
+ ensureStableCluster(numberOfNodes);
+
+ // TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results
+ for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) {
+ for (ZenPing zenPing : pingService.zenPings()) {
+ if (zenPing instanceof UnicastZenPing) {
+ ((UnicastZenPing) zenPing).clearTemporalResponses();
+ }
+ }
+ }
+ return nodes;
+ }
+
+
+ private List<String> startUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException {
+ configureUnicastCluster(numberOfNodes, unicastHostsOrdinals, minimumMasterNode);
+ List<String> nodes = internalCluster().startNodesAsync(numberOfNodes).get();
+ ensureStableCluster(numberOfNodes);
+
+ // TODO: this is a temporary solution so that nodes will not base their reaction to a partition based on previous successful results
+ for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) {
+ for (ZenPing zenPing : pingService.zenPings()) {
+ if (zenPing instanceof UnicastZenPing) {
+ ((UnicastZenPing) zenPing).clearTemporalResponses();
+ }
+ }
+ }
+ return nodes;
+ }
+
+ final static Settings DEFAULT_SETTINGS = Settings.builder()
+ .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly
+ .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly
+ .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
+ .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly
+ .put("http.enabled", false) // just to make test quicker
+ .put("gateway.local.list_timeout", "10s") // still long to induce failures but to long so test won't time out
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .build();
+
+ private void configureCluster(int numberOfNodes, int minimumMasterNode) throws ExecutionException, InterruptedException {
+ if (randomBoolean()) {
+ configureMulticastCluster(numberOfNodes, minimumMasterNode);
+ } else {
+ configureUnicastCluster(numberOfNodes, null, minimumMasterNode);
+ }
+
+ }
+
+ private void configureMulticastCluster(int numberOfNodes, int minimumMasterNode) throws ExecutionException, InterruptedException {
+ if (minimumMasterNode < 0) {
+ minimumMasterNode = numberOfNodes / 2 + 1;
+ }
+ // TODO: Rarely use default settings form some of these
+ Settings settings = Settings.builder()
+ .put(DEFAULT_SETTINGS)
+ .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, minimumMasterNode)
+ .build();
+
+ if (discoveryConfig == null) {
+ discoveryConfig = new ClusterDiscoveryConfiguration(numberOfNodes, settings);
+ }
+ }
+
+ private void configureUnicastCluster(int numberOfNodes, @Nullable int[] unicastHostsOrdinals, int minimumMasterNode) throws ExecutionException, InterruptedException {
+ if (minimumMasterNode < 0) {
+ minimumMasterNode = numberOfNodes / 2 + 1;
+ }
+ // TODO: Rarely use default settings form some of these
+ Settings nodeSettings = Settings.builder()
+ .put(DEFAULT_SETTINGS)
+ .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, minimumMasterNode)
+ .build();
+
+ if (discoveryConfig == null) {
+ if (unicastHostsOrdinals == null) {
+ discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(numberOfNodes, nodeSettings);
+ } else {
+ discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(numberOfNodes, nodeSettings, unicastHostsOrdinals);
+ }
+ }
+ }
+
+
+ /**
+ * Test that no split brain occurs under partial network partition. See https://github.com/elasticsearch/elasticsearch/issues/2488
+ *
+ * @throws Exception
+ */
+ @Test
+ public void failWithMinimumMasterNodesConfigured() throws Exception {
+
+ List<String> nodes = startCluster(3);
+
+ // Figure out what is the elected master node
+ final String masterNode = internalCluster().getMasterName();
+ logger.info("---> legit elected master node=" + masterNode);
+
+ // Pick a node that isn't the elected master.
+ Set<String> nonMasters = new HashSet<>(nodes);
+ nonMasters.remove(masterNode);
+ final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY));
+
+
+ // Simulate a network issue between the unlucky node and elected master node in both directions.
+
+ NetworkDisconnectPartition networkDisconnect = new NetworkDisconnectPartition(masterNode, unluckyNode, getRandom());
+ setDisruptionScheme(networkDisconnect);
+ networkDisconnect.startDisrupting();
+
+ // Wait until elected master has removed that the unlucky node...
+ ensureStableCluster(2, masterNode);
+
+ // The unlucky node must report *no* master node, since it can't connect to master and in fact it should
+ // continuously ping until network failures have been resolved. However
+ // It may a take a bit before the node detects it has been cut off from the elected master
+ assertNoMaster(unluckyNode);
+
+ networkDisconnect.stopDisrupting();
+
+ // Wait until the master node sees all 3 nodes again.
+ ensureStableCluster(3);
+
+ // The elected master shouldn't have changed, since the unlucky node never could have elected himself as
+ // master since m_m_n of 2 could never be satisfied.
+ assertMaster(masterNode, nodes);
+ }
+
+
+ /** Verify that nodes fault detection works after master (re) election */
+ @Test
+ public void testNodesFDAfterMasterReelection() throws Exception {
+ startCluster(3);
+
+ logger.info("stopping current master");
+ internalCluster().stopCurrentMasterNode();
+
+ ensureStableCluster(2);
+
+ String master = internalCluster().getMasterName();
+ String nonMaster = null;
+ for (String node : internalCluster().getNodeNames()) {
+ if (!node.equals(master)) {
+ nonMaster = node;
+ }
+ }
+
+ logger.info("--> isolating [{}]", nonMaster);
+ addRandomIsolation(nonMaster).startDisrupting();
+
+ logger.info("--> waiting for master to remove it");
+ ensureStableCluster(1, master);
+ }
+
+ /**
+ * Verify that the proper block is applied when nodes loose their master
+ */
+ @Test
+ public void testVerifyApiBlocksDuringPartition() throws Exception {
+ startCluster(3);
+
+ // Makes sure that the get request can be executed on each node locally:
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
+ ));
+
+ // Everything is stable now, it is now time to simulate evil...
+ // but first make sure we have no initializing shards and all is green
+ // (waiting for green here, because indexing / search in a yellow index is fine as long as no other nodes go down)
+ ensureGreen("test");
+
+ NetworkPartition networkPartition = addRandomPartition();
+
+ final String isolatedNode = networkPartition.getMinoritySide().get(0);
+ final String nonIsolatedNode = networkPartition.getMajoritySide().get(0);
+
+ // Simulate a network issue between the unlucky node and the rest of the cluster.
+ networkPartition.startDisrupting();
+
+
+ // The unlucky node must report *no* master node, since it can't connect to master and in fact it should
+ // continuously ping until network failures have been resolved. However
+ // It may a take a bit before the node detects it has been cut off from the elected master
+ logger.info("waiting for isolated node [{}] to have no master", isolatedNode);
+ assertNoMaster(isolatedNode, DiscoverySettings.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(10));
+
+
+ logger.info("wait until elected master has been removed and a new 2 node cluster was from (via [{}])", isolatedNode);
+ ensureStableCluster(2, nonIsolatedNode);
+
+ for (String node : networkPartition.getMajoritySide()) {
+ ClusterState nodeState = getNodeClusterState(node);
+ boolean success = true;
+ if (nodeState.nodes().getMasterNode() == null) {
+ success = false;
+ }
+ if (!nodeState.blocks().global().isEmpty()) {
+ success = false;
+ }
+ if (!success) {
+ fail("node [" + node + "] has no master or has blocks, despite of being on the right side of the partition. State dump:\n"
+ + nodeState.prettyPrint());
+ }
+ }
+
+
+ networkPartition.stopDisrupting();
+
+ // Wait until the master node sees al 3 nodes again.
+ ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkPartition.expectedTimeToHeal().millis()));
+
+ logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK, "all");
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK, "all"))
+ .get();
+
+ networkPartition.startDisrupting();
+
+
+ // The unlucky node must report *no* master node, since it can't connect to master and in fact it should
+ // continuously ping until network failures have been resolved. However
+ // It may a take a bit before the node detects it has been cut off from the elected master
+ logger.info("waiting for isolated node [{}] to have no master", isolatedNode);
+ assertNoMaster(isolatedNode, DiscoverySettings.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(10));
+
+ // make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node
+ // the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause
+ // the test to fail due to unfreed resources
+ ensureStableCluster(2, nonIsolatedNode);
+
+ }
+
+ /**
+ * This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition
+ * and verifies that all node agree on the new cluster state
+ */
+ @Test
+ public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception {
+ final List<String> nodes = startCluster(3);
+
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
+ ));
+
+ ensureGreen();
+ String isolatedNode = internalCluster().getMasterName();
+ NetworkPartition networkPartition = addRandomIsolation(isolatedNode);
+ networkPartition.startDisrupting();
+
+ String nonIsolatedNode = networkPartition.getMajoritySide().get(0);
+
+ // make sure cluster reforms
+ ensureStableCluster(2, nonIsolatedNode);
+
+ // make sure isolated need picks up on things.
+ assertNoMaster(isolatedNode, TimeValue.timeValueSeconds(40));
+
+ // restore isolation
+ networkPartition.stopDisrupting();
+
+ ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkPartition.expectedTimeToHeal().millis()));
+
+ logger.info("issue a reroute");
+ // trigger a reroute now, instead of waiting for the background reroute of RerouteService
+ assertAcked(client().admin().cluster().prepareReroute());
+ // and wait for it to finish and for the cluster to stabilize
+ ensureGreen("test");
+
+ // verify all cluster states are the same
+ ClusterState state = null;
+ for (String node : nodes) {
+ ClusterState nodeState = getNodeClusterState(node);
+ if (state == null) {
+ state = nodeState;
+ continue;
+ }
+ // assert nodes are identical
+ try {
+ assertEquals("unequal versions", state.version(), nodeState.version());
+ assertEquals("unequal node count", state.nodes().size(), nodeState.nodes().size());
+ assertEquals("different masters ", state.nodes().masterNodeId(), nodeState.nodes().masterNodeId());
+ assertEquals("different meta data version", state.metaData().version(), nodeState.metaData().version());
+ if (!state.routingTable().prettyPrint().equals(nodeState.routingTable().prettyPrint())) {
+ fail("different routing");
+ }
+ } catch (AssertionError t) {
+ fail("failed comparing cluster state: " + t.getMessage() + "\n" +
+ "--- cluster state of node [" + nodes.get(0) + "]: ---\n" + state.prettyPrint() +
+ "\n--- cluster state [" + node + "]: ---\n" + nodeState.prettyPrint());
+ }
+
+ }
+ }
+
+ /**
+ * Test that we do not loose document whose indexing request was successful, under a randomly selected disruption scheme
+ * We also collect & report the type of indexing failures that occur.
+ *
+ * This test is a superset of tests run in the Jepsen test suite, with the exception of versioned updates
+ */
+ @Test
+ // NOTE: if you remove the awaitFix, make sure to port the test to the 1.x branch
+ @LuceneTestCase.AwaitsFix(bugUrl = "needs some more work to stabilize")
+ @TestLogging("action.index:TRACE,action.get:TRACE,discovery:TRACE,cluster.service:TRACE,indices.recovery:TRACE,indices.cluster:TRACE")
+ public void testAckedIndexing() throws Exception {
+ // TODO: add node count randomizaion
+ final List<String> nodes = startCluster(3);
+
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2))
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(2))
+ ));
+ ensureGreen();
+
+ ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme();
+ logger.info("disruption scheme [{}] added", disruptionScheme);
+
+ final ConcurrentHashMap<String, String> ackedDocs = new ConcurrentHashMap<>(); // id -> node sent.
+
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ List<Thread> indexers = new ArrayList<>(nodes.size());
+ List<Semaphore> semaphores = new ArrayList<>(nodes.size());
+ final AtomicInteger idGenerator = new AtomicInteger(0);
+ final AtomicReference<CountDownLatch> countDownLatchRef = new AtomicReference<>();
+ final List<Exception> exceptedExceptions = Collections.synchronizedList(new ArrayList<Exception>());
+
+ logger.info("starting indexers");
+ try {
+ for (final String node : nodes) {
+ final Semaphore semaphore = new Semaphore(0);
+ semaphores.add(semaphore);
+ final Client client = client(node);
+ final String name = "indexer_" + indexers.size();
+ final int numPrimaries = getNumShards("test").numPrimaries;
+ Thread thread = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ String id = null;
+ try {
+ if (!semaphore.tryAcquire(10, TimeUnit.SECONDS)) {
+ continue;
+ }
+ logger.info("[{}] Acquired semaphore and it has {} permits left", name, semaphore.availablePermits());
+ try {
+ id = Integer.toString(idGenerator.incrementAndGet());
+ int shard = ((InternalTestCluster) cluster()).getInstance(DjbHashFunction.class).hash(id) % numPrimaries;
+ logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
+ IndexResponse response = client.prepareIndex("test", "type", id).setSource("{}").setTimeout("1s").get();
+ assertThat(response.getVersion(), equalTo(1l));
+ ackedDocs.put(id, node);
+ logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
+ } catch (ElasticsearchException e) {
+ exceptedExceptions.add(e);
+ logger.trace("[{}] failed id [{}] through node [{}]", e, name, id, node);
+ } finally {
+ countDownLatchRef.get().countDown();
+ logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount());
+ }
+ } catch (InterruptedException e) {
+ // fine - semaphore interrupt
+ } catch (Throwable t) {
+ logger.info("unexpected exception in background thread of [{}]", t, node);
+ }
+ }
+ }
+ });
+
+ thread.setName(name);
+ thread.setDaemon(true);
+ thread.start();
+ indexers.add(thread);
+ }
+
+ int docsPerIndexer = randomInt(3);
+ logger.info("indexing " + docsPerIndexer + " docs per indexer before partition");
+ countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
+ for (Semaphore semaphore : semaphores) {
+ semaphore.release(docsPerIndexer);
+ }
+ assertTrue(countDownLatchRef.get().await(1, TimeUnit.MINUTES));
+
+ for (int iter = 1 + randomInt(2); iter > 0; iter--) {
+ logger.info("starting disruptions & indexing (iteration [{}])", iter);
+ disruptionScheme.startDisrupting();
+
+ docsPerIndexer = 1 + randomInt(5);
+ logger.info("indexing " + docsPerIndexer + " docs per indexer during partition");
+ countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
+ Collections.shuffle(semaphores);
+ for (Semaphore semaphore : semaphores) {
+ assertThat(semaphore.availablePermits(), equalTo(0));
+ semaphore.release(docsPerIndexer);
+ }
+ assertTrue(countDownLatchRef.get().await(60000 + disruptionScheme.expectedTimeToHeal().millis() * (docsPerIndexer * indexers.size()), TimeUnit.MILLISECONDS));
+
+ logger.info("stopping disruption");
+ disruptionScheme.stopDisrupting();
+ ensureStableCluster(3, TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() + DISRUPTION_HEALING_OVERHEAD.millis()));
+ ensureGreen("test");
+
+ logger.info("validating successful docs");
+ for (String node : nodes) {
+ try {
+ logger.debug("validating through node [{}]", node);
+ for (String id : ackedDocs.keySet()) {
+ assertTrue("doc [" + id + "] indexed via node [" + ackedDocs.get(id) + "] not found",
+ client(node).prepareGet("test", "type", id).setPreference("_local").get().isExists());
+ }
+ } catch (AssertionError e) {
+ throw new AssertionError(e.getMessage() + " (checked via node [" + node + "]", e);
+ }
+ }
+
+ logger.info("done validating (iteration [{}])", iter);
+ }
+ } finally {
+ if (exceptedExceptions.size() > 0) {
+ StringBuilder sb = new StringBuilder("Indexing exceptions during disruption:");
+ for (Exception e : exceptedExceptions) {
+ sb.append("\n").append(e.getMessage());
+ }
+ logger.debug(sb.toString());
+ }
+ logger.info("shutting down indexers");
+ stop.set(true);
+ for (Thread indexer : indexers) {
+ indexer.interrupt();
+ indexer.join(60000);
+ }
+ }
+ }
+
+ /**
+ * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one
+ */
+ @Test
+ public void testMasterNodeGCs() throws Exception {
+ // TODO: on mac OS multicast threads are shared between nodes and we therefore we can't simulate GC and stop pinging for just one node
+ // find a way to block thread creation in the generic thread pool to avoid this.
+ List<String> nodes = startUnicastCluster(3, null, -1);
+
+ String oldMasterNode = internalCluster().getMasterName();
+ // a very long GC, but it's OK as we remove the disruption when it has had an effect
+ SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(oldMasterNode, getRandom(), 100, 200, 30000, 60000);
+ internalCluster().setDisruptionScheme(masterNodeDisruption);
+ masterNodeDisruption.startDisrupting();
+
+ Set<String> oldNonMasterNodesSet = new HashSet<>(nodes);
+ oldNonMasterNodesSet.remove(oldMasterNode);
+
+ List<String> oldNonMasterNodes = new ArrayList<>(oldNonMasterNodesSet);
+
+ logger.info("waiting for nodes to de-elect master [{}]", oldMasterNode);
+ for (String node : oldNonMasterNodesSet) {
+ assertDifferentMaster(node, oldMasterNode);
+ }
+
+ logger.info("waiting for nodes to elect a new master");
+ ensureStableCluster(2, oldNonMasterNodes.get(0));
+
+ logger.info("waiting for any pinging to stop");
+ assertDiscoveryCompleted(oldNonMasterNodes);
+
+ // restore GC
+ masterNodeDisruption.stopDisrupting();
+ ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()),
+ oldNonMasterNodes.get(0));
+
+ // make sure all nodes agree on master
+ String newMaster = internalCluster().getMasterName();
+ assertThat(newMaster, not(equalTo(oldMasterNode)));
+ assertMaster(newMaster, nodes);
+ }
+
+ /**
+ * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes
+ * that already are following another elected master node. These nodes should reject this cluster state and prevent
+ * them from following the stale master.
+ */
+ @Test
+ public void testStaleMasterNotHijackingMajority() throws Exception {
+ // TODO: on mac OS multicast threads are shared between nodes and we therefore we can't simulate GC and stop pinging for just one node
+ // find a way to block thread creation in the generic thread pool to avoid this.
+ // 3 node cluster with unicast discovery and minimum_master_nodes set to 2:
+ final List<String> nodes = startUnicastCluster(3, null, 2);
+
+ // Save the current master node as old master node, because that node will get frozen
+ final String oldMasterNode = internalCluster().getMasterName();
+ for (String node : nodes) {
+ ensureStableCluster(3, node);
+ }
+ assertMaster(oldMasterNode, nodes);
+
+ // Simulating a painful gc by suspending all threads for a long time on the current elected master node.
+ SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(getRandom(), oldMasterNode);
+
+ // Save the majority side
+ final List<String> majoritySide = new ArrayList<>(nodes);
+ majoritySide.remove(oldMasterNode);
+
+ // Keeps track of the previous and current master when a master node transition took place on each node on the majority side:
+ final Map<String, List<Tuple<String, String>>> masters = Collections.synchronizedMap(new HashMap<String, List<Tuple<String, String>>>());
+ for (final String node : majoritySide) {
+ masters.put(node, new ArrayList<Tuple<String, String>>());
+ internalCluster().getInstance(ClusterService.class, node).add(new ClusterStateListener() {
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode();
+ DiscoveryNode currentMaster = event.state().nodes().getMasterNode();
+ if (!Objects.equals(previousMaster, currentMaster)) {
+ logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), event.previousState());
+ String previousMasterNodeName = previousMaster != null ? previousMaster.name() : null;
+ String currentMasterNodeName = currentMaster != null ? currentMaster.name() : null;
+ masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName));
+ }
+ }
+ });
+ }
+
+ final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1);
+ internalCluster().getInstance(ClusterService.class, oldMasterNode).add(new ClusterStateListener() {
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (event.state().nodes().masterNodeId() == null) {
+ oldMasterNodeSteppedDown.countDown();
+ }
+ }
+ });
+
+ internalCluster().setDisruptionScheme(masterNodeDisruption);
+ logger.info("freezing node [{}]", oldMasterNode);
+ masterNodeDisruption.startDisrupting();
+
+ // Wait for the majority side to get stable
+ assertDifferentMaster(majoritySide.get(0), oldMasterNode);
+ assertDifferentMaster(majoritySide.get(1), oldMasterNode);
+ assertDiscoveryCompleted(majoritySide);
+
+ // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed,
+ // but will be queued and once the old master node un-freezes it gets executed.
+ // The old master node will send this update + the cluster state where he is flagged as master to the other
+ // nodes that follow the new master. These nodes should ignore this update.
+ internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ return ClusterState.builder(currentState).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("failure [{}]", t, source);
+ }
+ });
+
+ // Save the new elected master node
+ final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0));
+ logger.info("new detected master node [{}]", newMasterNode);
+
+ // Stop disruption
+ logger.info("Unfreeze node [{}]", oldMasterNode);
+ masterNodeDisruption.stopDisrupting();
+
+ oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS);
+ // Make sure that the end state is consistent on all nodes:
+ assertDiscoveryCompleted(nodes);
+ // Use assertBusy(...) because the unfrozen node may take a while to actually join the cluster.
+ // The assertDiscoveryCompleted(...) can't know if all nodes have the old master node in all of the local cluster states
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertMaster(newMasterNode, nodes);
+ }
+ });
+
+
+ assertThat(masters.size(), equalTo(2));
+ for (Map.Entry<String, List<Tuple<String, String>>> entry : masters.entrySet()) {
+ String nodeName = entry.getKey();
+ List<Tuple<String, String>> recordedMasterTransition = entry.getValue();
+ assertThat("[" + nodeName + "] Each node should only record two master node transitions", recordedMasterTransition.size(), equalTo(2));
+ assertThat("[" + nodeName + "] First transition's previous master should be [null]", recordedMasterTransition.get(0).v1(), equalTo(oldMasterNode));
+ assertThat("[" + nodeName + "] First transition's current master should be [" + newMasterNode + "]", recordedMasterTransition.get(0).v2(), nullValue());
+ assertThat("[" + nodeName + "] Second transition's previous master should be [null]", recordedMasterTransition.get(1).v1(), nullValue());
+ assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]", recordedMasterTransition.get(1).v2(), equalTo(newMasterNode));
+ }
+ }
+
+ /**
+ * Test that a document which is indexed on the majority side of a partition, is available from the minory side,
+ * once the partition is healed
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testRejoinDocumentExistsInAllShardCopies() throws Exception {
+ List<String> nodes = startCluster(3);
+
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
+ )
+ .get());
+ ensureGreen("test");
+
+ nodes = new ArrayList<>(nodes);
+ Collections.shuffle(nodes, getRandom());
+ String isolatedNode = nodes.get(0);
+ String notIsolatedNode = nodes.get(1);
+
+ ServiceDisruptionScheme scheme = addRandomIsolation(isolatedNode);
+ scheme.startDisrupting();
+ ensureStableCluster(2, notIsolatedNode);
+ assertFalse(client(notIsolatedNode).admin().cluster().prepareHealth("test").setWaitForYellowStatus().get().isTimedOut());
+
+
+ IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ logger.info("Verifying if document exists via node[" + notIsolatedNode + "]");
+ GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
+ .setPreference("_local")
+ .get();
+ assertThat(getResponse.isExists(), is(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
+
+ scheme.stopDisrupting();
+
+ ensureStableCluster(3);
+ ensureGreen("test");
+
+ for (String node : nodes) {
+ logger.info("Verifying if document exists after isolating node[" + isolatedNode + "] via node[" + node + "]");
+ getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
+ .setPreference("_local")
+ .get();
+ assertThat(getResponse.isExists(), is(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
+ }
+ }
+
+ /**
+ * A 4 node cluster with m_m_n set to 3 and each node has one unicast enpoint. One node partitions from the master node.
+ * The temporal unicast responses is empty. When partition is solved the one ping response contains a master node.
+ * The rejoining node should take this master node and connect.
+ */
+ @Test
+ public void unicastSinglePingResponseContainsMaster() throws Exception {
+ List<String> nodes = startUnicastCluster(4, new int[]{0}, -1);
+ // Figure out what is the elected master node
+ final String masterNode = internalCluster().getMasterName();
+ logger.info("---> legit elected master node=" + masterNode);
+ List<String> otherNodes = new ArrayList<>(nodes);
+ otherNodes.remove(masterNode);
+ otherNodes.remove(nodes.get(0)); // <-- Don't isolate the node that is in the unicast endpoint for all the other nodes.
+ final String isolatedNode = otherNodes.get(0);
+
+ // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
+ // includes all the other nodes that have pinged it and the issue doesn't manifest
+ for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) {
+ for (ZenPing zenPing : pingService.zenPings()) {
+ ((UnicastZenPing) zenPing).clearTemporalResponses();
+ }
+ }
+
+ // Simulate a network issue between the unlucky node and elected master node in both directions.
+ NetworkDisconnectPartition networkDisconnect = new NetworkDisconnectPartition(masterNode, isolatedNode, getRandom());
+ setDisruptionScheme(networkDisconnect);
+ networkDisconnect.startDisrupting();
+ // Wait until elected master has removed that the unlucky node...
+ ensureStableCluster(3, masterNode);
+
+ // The isolate master node must report no master, so it starts with pinging
+ assertNoMaster(isolatedNode);
+ networkDisconnect.stopDisrupting();
+ // Wait until the master node sees all 4 nodes again.
+ ensureStableCluster(4);
+ // The elected master shouldn't have changed, since the isolated node never could have elected himself as
+ // master since m_m_n of 3 could never be satisfied.
+ assertMaster(masterNode, nodes);
+ }
+
+ @Test
+ @TestLogging("discovery.zen:TRACE,cluster.service:TRACE")
+ public void isolatedUnicastNodes() throws Exception {
+ List<String> nodes = startUnicastCluster(4, new int[]{0}, -1);
+ // Figure out what is the elected master node
+ final String unicastTarget = nodes.get(0);
+
+ Set<String> unicastTargetSide = new HashSet<>();
+ unicastTargetSide.add(unicastTarget);
+
+ Set<String> restOfClusterSide = new HashSet<>();
+ restOfClusterSide.addAll(nodes);
+ restOfClusterSide.remove(unicastTarget);
+
+ // Forcefully clean temporal response lists on all nodes. Otherwise the node in the unicast host list
+ // includes all the other nodes that have pinged it and the issue doesn't manifest
+ for (ZenPingService pingService : internalCluster().getInstances(ZenPingService.class)) {
+ for (ZenPing zenPing : pingService.zenPings()) {
+ ((UnicastZenPing) zenPing).clearTemporalResponses();
+ }
+ }
+
+ // Simulate a network issue between the unicast target node and the rest of the cluster
+ NetworkDisconnectPartition networkDisconnect = new NetworkDisconnectPartition(unicastTargetSide, restOfClusterSide, getRandom());
+ setDisruptionScheme(networkDisconnect);
+ networkDisconnect.startDisrupting();
+ // Wait until elected master has removed that the unlucky node...
+ ensureStableCluster(3, nodes.get(1));
+
+ // The isolate master node must report no master, so it starts with pinging
+ assertNoMaster(unicastTarget);
+ networkDisconnect.stopDisrupting();
+ // Wait until the master node sees all 3 nodes again.
+ ensureStableCluster(4);
+ }
+
+
+ /** Test cluster join with issues in cluster state publishing * */
+ @Test
+ public void testClusterJoinDespiteOfPublishingIssues() throws Exception {
+ List<String> nodes = startCluster(2, 1);
+
+ String masterNode = internalCluster().getMasterName();
+ String nonMasterNode;
+ if (masterNode.equals(nodes.get(0))) {
+ nonMasterNode = nodes.get(1);
+ } else {
+ nonMasterNode = nodes.get(0);
+ }
+
+ DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonMasterNode).state().nodes();
+
+ logger.info("blocking requests from non master [{}] to master [{}]", nonMasterNode, masterNode);
+ MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, nonMasterNode);
+ nonMasterTransportService.addFailToSendNoConnectRule(discoveryNodes.masterNode());
+
+ assertNoMaster(nonMasterNode);
+
+ logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode);
+ MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode);
+ masterTransportService.addFailToSendNoConnectRule(discoveryNodes.localNode(), PublishClusterStateAction.ACTION_NAME);
+
+ logger.info("allowing requests from non master [{}] to master [{}], waiting for two join request", nonMasterNode, masterNode);
+ final CountDownLatch countDownLatch = new CountDownLatch(2);
+ nonMasterTransportService.addDelegate(discoveryNodes.masterNode(), new MockTransportService.DelegateTransport(nonMasterTransportService.original()) {
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (action.equals(MembershipAction.DISCOVERY_JOIN_ACTION_NAME)) {
+ countDownLatch.countDown();
+ }
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ });
+
+ countDownLatch.await();
+
+ logger.info("waiting for cluster to reform");
+ masterTransportService.clearRule(discoveryNodes.localNode());
+ nonMasterTransportService.clearRule(discoveryNodes.masterNode());
+
+ ensureStableCluster(2);
+ }
+
+
+ @Test
+ public void testClusterFormingWithASlowNode() throws Exception {
+ configureCluster(3, 2);
+
+ SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(getRandom(), 0, 0, 1000, 2000);
+
+ // don't wait for initial state, wat want to add the disruption while the cluster is forming..
+ internalCluster().startNodesAsync(3,
+ Settings.builder()
+ .put(DiscoveryService.SETTING_INITIAL_STATE_TIMEOUT, "1ms")
+ .put(DiscoverySettings.PUBLISH_TIMEOUT, "3s")
+ .build()).get();
+
+ logger.info("applying disruption while cluster is forming ...");
+
+ internalCluster().setDisruptionScheme(disruption);
+ disruption.startDisrupting();
+
+ ensureStableCluster(3);
+ }
+
+ /**
+ * Adds an asymetric break between a master and one of the nodes and makes
+ * sure that the node is removed form the cluster, that the node start pinging and that
+ * the cluster reforms when healed.
+ */
+ @Test
+ @TestLogging("discovery.zen:TRACE,action:TRACE")
+ public void testNodeNotReachableFromMaster() throws Exception {
+ startCluster(3);
+
+ String masterNode = internalCluster().getMasterName();
+ String nonMasterNode = null;
+ while (nonMasterNode == null) {
+ nonMasterNode = randomFrom(internalCluster().getNodeNames());
+ if (nonMasterNode.equals(masterNode)) {
+ nonMasterNode = null;
+ }
+ }
+
+ logger.info("blocking request from master [{}] to [{}]", masterNode, nonMasterNode);
+ MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode);
+ if (randomBoolean()) {
+ masterTransportService.addUnresponsiveRule(internalCluster().getInstance(ClusterService.class, nonMasterNode).localNode());
+ } else {
+ masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(ClusterService.class, nonMasterNode).localNode());
+ }
+
+ logger.info("waiting for [{}] to be removed from cluster", nonMasterNode);
+ ensureStableCluster(2, masterNode);
+
+ logger.info("waiting for [{}] to have no master", nonMasterNode);
+ assertNoMaster(nonMasterNode);
+
+ logger.info("healing partition and checking cluster reforms");
+ masterTransportService.clearAllRules();
+
+ ensureStableCluster(3);
+ }
+
+
+ protected NetworkPartition addRandomPartition() {
+ NetworkPartition partition;
+ if (randomBoolean()) {
+ partition = new NetworkUnresponsivePartition(getRandom());
+ } else {
+ partition = new NetworkDisconnectPartition(getRandom());
+ }
+
+ setDisruptionScheme(partition);
+
+ return partition;
+ }
+
+ protected NetworkPartition addRandomIsolation(String isolatedNode) {
+ Set<String> side1 = new HashSet<>();
+ Set<String> side2 = new HashSet<>(Arrays.asList(internalCluster().getNodeNames()));
+ side1.add(isolatedNode);
+ side2.remove(isolatedNode);
+
+ NetworkPartition partition;
+ if (randomBoolean()) {
+ partition = new NetworkUnresponsivePartition(side1, side2, getRandom());
+ } else {
+ partition = new NetworkDisconnectPartition(side1, side2, getRandom());
+ }
+
+ internalCluster().setDisruptionScheme(partition);
+
+ return partition;
+ }
+
+ private ServiceDisruptionScheme addRandomDisruptionScheme() {
+ // TODO: add partial partitions
+ List<ServiceDisruptionScheme> list = Arrays.asList(
+ new NetworkUnresponsivePartition(getRandom()),
+ new NetworkDelaysPartition(getRandom()),
+ new NetworkDisconnectPartition(getRandom()),
+ new SlowClusterStateProcessing(getRandom())
+ );
+ Collections.shuffle(list);
+ setDisruptionScheme(list.get(0));
+ return list.get(0);
+ }
+
+ private void ensureStableCluster(int nodeCount) {
+ ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30), null);
+ }
+
+ private void ensureStableCluster(int nodeCount, TimeValue timeValue) {
+ ensureStableCluster(nodeCount, timeValue, null);
+ }
+
+ private void ensureStableCluster(int nodeCount, @Nullable String viaNode) {
+ ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30), viaNode);
+ }
+
+ private void ensureStableCluster(int nodeCount, TimeValue timeValue, @Nullable String viaNode) {
+ if (viaNode == null) {
+ viaNode = randomFrom(internalCluster().getNodeNames());
+ }
+ logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue);
+ ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setWaitForNodes(Integer.toString(nodeCount))
+ .setTimeout(timeValue)
+ .setWaitForRelocatingShards(0)
+ .get();
+ if (clusterHealthResponse.isTimedOut()) {
+ ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get();
+ fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n"
+ + stateResponse.getState().prettyPrint());
+ }
+ assertThat(clusterHealthResponse.isTimedOut(), is(false));
+ }
+
+ private ClusterState getNodeClusterState(String node) {
+ return client(node).admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+
+ private void assertNoMaster(final String node) throws Exception {
+ assertNoMaster(node, null, TimeValue.timeValueSeconds(10));
+ }
+
+ private void assertNoMaster(final String node, TimeValue maxWaitTime) throws Exception {
+ assertNoMaster(node, null, maxWaitTime);
+ }
+
+ private void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState state = getNodeClusterState(node);
+ assertNull("node [" + node + "] still has [" + state.nodes().masterNode() + "] as master", state.nodes().masterNode());
+ if (expectedBlocks != null) {
+ for (ClusterBlockLevel level : expectedBlocks.levels()) {
+ assertTrue("node [" + node + "] does have level [" + level + "] in it's blocks", state.getBlocks().hasGlobalBlock(level));
+ }
+ }
+ }
+ }, maxWaitTime.getMillis(), TimeUnit.MILLISECONDS);
+ }
+
+ private void assertDifferentMaster(final String node, final String oldMasterNode) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState state = getNodeClusterState(node);
+ String masterNode = null;
+ if (state.nodes().masterNode() != null) {
+ masterNode = state.nodes().masterNode().name();
+ }
+ logger.trace("[{}] master is [{}]", node, state.nodes().masterNode());
+ assertThat("node [" + node + "] still has [" + masterNode + "] as master",
+ oldMasterNode, not(equalTo(masterNode)));
+ }
+ }, 10, TimeUnit.SECONDS);
+ }
+
+ private void assertMaster(String masterNode, List<String> nodes) {
+ for (String node : nodes) {
+ ClusterState state = getNodeClusterState(node);
+ String failMsgSuffix = "cluster_state:\n" + state.prettyPrint();
+ assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().size(), equalTo(nodes.size()));
+ String otherMasterNodeName = state.nodes().masterNode() != null ? state.nodes().masterNode().name() : null;
+ assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode));
+ }
+ }
+
+ private void assertDiscoveryCompleted(List<String> nodes) throws InterruptedException {
+ for (final String node : nodes) {
+ assertTrue("node [" + node + "] is still joining master", awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return !((ZenDiscovery) internalCluster().getInstance(Discovery.class, node)).joiningCluster();
+ }
+ }, 30, TimeUnit.SECONDS));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java
new file mode 100644
index 0000000000..ae240b6847
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.fd.FaultDetection;
+import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
+import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.cluster.NoopClusterService;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportConnectionListener;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ZenFaultDetectionTests extends ElasticsearchTestCase {
+
+ protected ThreadPool threadPool;
+
+ protected static final Version version0 = Version.fromId(/*0*/99);
+ protected DiscoveryNode nodeA;
+ protected MockTransportService serviceA;
+
+ protected static final Version version1 = Version.fromId(199);
+ protected DiscoveryNode nodeB;
+ protected MockTransportService serviceB;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ threadPool = new ThreadPool(getClass().getName());
+ serviceA = build(Settings.builder().put("name", "TS_A").build(), version0);
+ nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version0);
+ serviceB = build(Settings.builder().put("name", "TS_B").build(), version1);
+ nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version1);
+
+ // wait till all nodes are properly connected and the event has been sent, so tests in this class
+ // will not get this callback called on the connections done in this setup
+ final CountDownLatch latch = new CountDownLatch(4);
+ TransportConnectionListener waitForConnection = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ fail("disconnect should not be called " + node);
+ }
+ };
+ serviceA.addConnectionListener(waitForConnection);
+ serviceB.addConnectionListener(waitForConnection);
+
+ serviceA.connectToNode(nodeB);
+ serviceA.connectToNode(nodeA);
+ serviceB.connectToNode(nodeA);
+ serviceB.connectToNode(nodeB);
+
+ assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ serviceA.removeConnectionListener(waitForConnection);
+ serviceB.removeConnectionListener(waitForConnection);
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ serviceA.close();
+ serviceB.close();
+ terminate(threadPool);
+ }
+
+ protected MockTransportService build(Settings settings, Version version) {
+ MockTransportService transportService = new MockTransportService(Settings.EMPTY, new LocalTransport(settings, threadPool, version), threadPool);
+ transportService.start();
+ return transportService;
+ }
+
+ private DiscoveryNodes buildNodesForA(boolean master) {
+ DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
+ builder.put(nodeA);
+ builder.put(nodeB);
+ builder.localNodeId(nodeA.id());
+ builder.masterNodeId(master ? nodeA.id() : nodeB.id());
+ return builder.build();
+ }
+
+ private DiscoveryNodes buildNodesForB(boolean master) {
+ DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
+ builder.put(nodeA);
+ builder.put(nodeB);
+ builder.localNodeId(nodeB.id());
+ builder.masterNodeId(master ? nodeB.id() : nodeA.id());
+ return builder.build();
+ }
+
+ @Test
+ public void testNodesFaultDetectionConnectOnDisconnect() throws InterruptedException {
+ Settings.Builder settings = Settings.builder();
+ boolean shouldRetry = randomBoolean();
+ // make sure we don't ping again after the initial ping
+ settings.put(FaultDetection.SETTING_CONNECT_ON_NETWORK_DISCONNECT, shouldRetry)
+ .put(FaultDetection.SETTING_PING_INTERVAL, "5m");
+ ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(buildNodesForA(true)).build();
+ NodesFaultDetection nodesFDA = new NodesFaultDetection(settings.build(), threadPool, serviceA, clusterState.getClusterName());
+ nodesFDA.setLocalNode(nodeA);
+ NodesFaultDetection nodesFDB = new NodesFaultDetection(settings.build(), threadPool, serviceB, clusterState.getClusterName());
+ nodesFDB.setLocalNode(nodeB);
+ final CountDownLatch pingSent = new CountDownLatch(1);
+ nodesFDB.addListener(new NodesFaultDetection.Listener() {
+ @Override
+ public void onPingReceived(NodesFaultDetection.PingRequest pingRequest) {
+ pingSent.countDown();
+ }
+ });
+ nodesFDA.updateNodesAndPing(clusterState);
+
+ // wait for the first ping to go out, so we will really respond to a disconnect event rather then
+ // the ping failing
+ pingSent.await(30, TimeUnit.SECONDS);
+
+ final String[] failureReason = new String[1];
+ final DiscoveryNode[] failureNode = new DiscoveryNode[1];
+ final CountDownLatch notified = new CountDownLatch(1);
+ nodesFDA.addListener(new NodesFaultDetection.Listener() {
+ @Override
+ public void onNodeFailure(DiscoveryNode node, String reason) {
+ failureNode[0] = node;
+ failureReason[0] = reason;
+ notified.countDown();
+ }
+ });
+ // will raise a disconnect on A
+ serviceB.stop();
+ notified.await(30, TimeUnit.SECONDS);
+
+ assertEquals(nodeB, failureNode[0]);
+ Matcher<String> matcher = Matchers.containsString("verified");
+ if (!shouldRetry) {
+ matcher = Matchers.not(matcher);
+ }
+
+ assertThat(failureReason[0], matcher);
+ }
+
+ @Test
+ public void testMasterFaultDetectionConnectOnDisconnect() throws InterruptedException {
+
+ Settings.Builder settings = Settings.builder();
+ boolean shouldRetry = randomBoolean();
+ // make sure we don't ping
+ settings.put(FaultDetection.SETTING_CONNECT_ON_NETWORK_DISCONNECT, shouldRetry)
+ .put(FaultDetection.SETTING_PING_INTERVAL, "5m");
+ ClusterName clusterName = new ClusterName(randomAsciiOfLengthBetween(3, 20));
+ final ClusterState state = ClusterState.builder(clusterName).nodes(buildNodesForA(false)).build();
+ MasterFaultDetection masterFD = new MasterFaultDetection(settings.build(), threadPool, serviceA, clusterName,
+ new NoopClusterService(state));
+ masterFD.start(nodeB, "test");
+
+ final String[] failureReason = new String[1];
+ final DiscoveryNode[] failureNode = new DiscoveryNode[1];
+ final CountDownLatch notified = new CountDownLatch(1);
+ masterFD.addListener(new MasterFaultDetection.Listener() {
+
+ @Override
+ public void onMasterFailure(DiscoveryNode masterNode, String reason) {
+ failureNode[0] = masterNode;
+ failureReason[0] = reason;
+ notified.countDown();
+ }
+ });
+ // will raise a disconnect on A
+ serviceB.stop();
+ notified.await(30, TimeUnit.SECONDS);
+
+ assertEquals(nodeB, failureNode[0]);
+ Matcher<String> matcher = Matchers.containsString("verified");
+ if (!shouldRetry) {
+ matcher = Matchers.not(matcher);
+ }
+
+ assertThat(failureReason[0], matcher);
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java
new file mode 100644
index 0000000000..dc92507856
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+@Slow
+public class ZenUnicastDiscoveryTests extends ElasticsearchIntegrationTest {
+
+ private ClusterDiscoveryConfiguration discoveryConfig;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return discoveryConfig.node(nodeOrdinal);
+ }
+
+ @Before
+ public void clearConfig() {
+ discoveryConfig = null;
+ }
+
+ @Test
+ public void testNormalClusterForming() throws ExecutionException, InterruptedException {
+ int currentNumNodes = randomIntBetween(3, 5);
+
+ // use explicit unicast hosts so we can start those first
+ int[] unicastHostOrdinals = new int[randomIntBetween(1, currentNumNodes)];
+ for (int i = 0; i < unicastHostOrdinals.length; i++) {
+ unicastHostOrdinals[i] = i;
+ }
+ discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, unicastHostOrdinals);
+
+ // start the unicast hosts
+ internalCluster().startNodesAsync(unicastHostOrdinals.length).get();
+
+ // start the rest of the cluster
+ internalCluster().startNodesAsync(currentNumNodes - unicastHostOrdinals.length).get();
+
+ if (client().admin().cluster().prepareHealth().setWaitForNodes("" + currentNumNodes).get().isTimedOut()) {
+ logger.info("cluster forming timed out, cluster state:\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint());
+ fail("timed out waiting for cluster to form with [" + currentNumNodes + "] nodes");
+ }
+ }
+
+ @Test
+ // Without the 'include temporalResponses responses to nodesToConnect' improvement in UnicastZenPing#sendPings this
+ // test fails, because 2 nodes elect themselves as master and the health request times out b/c waiting_for_nodes=N
+ // can't be satisfied.
+ public void testMinimumMasterNodes() throws Exception {
+ int currentNumNodes = randomIntBetween(3, 5);
+ final int min_master_nodes = currentNumNodes / 2 + 1;
+ int currentNumOfUnicastHosts = randomIntBetween(min_master_nodes, currentNumNodes);
+ final Settings settings = Settings.settingsBuilder().put("discovery.zen.minimum_master_nodes", min_master_nodes).build();
+ discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, currentNumOfUnicastHosts, settings);
+
+ List<String> nodes = internalCluster().startNodesAsync(currentNumNodes).get();
+
+ ensureGreen();
+
+ DiscoveryNode masterDiscoNode = null;
+ for (String node : nodes) {
+ ClusterState state = internalCluster().client(node).admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
+ assertThat(state.nodes().size(), equalTo(currentNumNodes));
+ if (masterDiscoNode == null) {
+ masterDiscoNode = state.nodes().masterNode();
+ } else {
+ assertThat(masterDiscoNode.equals(state.nodes().masterNode()), equalTo(true));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTest.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTest.java
new file mode 100644
index 0000000000..a4ac5ff023
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+public class ElectMasterServiceTest extends ElasticsearchTestCase {
+
+ ElectMasterService electMasterService() {
+ return new ElectMasterService(Settings.EMPTY);
+ }
+
+ List<DiscoveryNode> generateRandomNodes() {
+ int count = scaledRandomIntBetween(1, 100);
+ ArrayList<DiscoveryNode> nodes = new ArrayList<>(count);
+
+ Map<String, String> master = new HashMap<>();
+ master.put("master", "true");
+ Map<String, String> nonMaster = new HashMap<>();
+ nonMaster.put("master", "false");
+
+ for (int i = 0; i < count; i++) {
+ Map<String, String> attributes = randomBoolean() ? master : nonMaster;
+ DiscoveryNode node = new DiscoveryNode("n_" + i, "n_" + i, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ nodes.add(node);
+ }
+
+ Collections.shuffle(nodes, getRandom());
+ return nodes;
+ }
+
+ @Test
+ public void sortByMasterLikelihood() {
+ List<DiscoveryNode> nodes = generateRandomNodes();
+ List<DiscoveryNode> sortedNodes = electMasterService().sortByMasterLikelihood(nodes);
+ assertEquals(nodes.size(), sortedNodes.size());
+ DiscoveryNode prevNode = sortedNodes.get(0);
+ for (int i = 1; i < sortedNodes.size(); i++) {
+ DiscoveryNode node = sortedNodes.get(i);
+ if (!prevNode.masterNode()) {
+ assertFalse(node.masterNode());
+ } else if (node.masterNode()) {
+ assertTrue(prevNode.id().compareTo(node.id()) < 0);
+ }
+ prevNode = node;
+ }
+
+ }
+
+ @Test
+ public void electMaster() {
+ List<DiscoveryNode> nodes = generateRandomNodes();
+ ElectMasterService service = electMasterService();
+ int min_master_nodes = randomIntBetween(0, nodes.size());
+ service.minimumMasterNodes(min_master_nodes);
+
+ int master_nodes = 0;
+ for (DiscoveryNode node : nodes) {
+ if (node.masterNode()) {
+ master_nodes++;
+ }
+ }
+ DiscoveryNode master = null;
+ if (service.hasEnoughMasterNodes(nodes)) {
+ master = service.electMaster(nodes);
+ }
+
+ if (master_nodes == 0) {
+ assertNull(master);
+ } else if (min_master_nodes > 0 && master_nodes < min_master_nodes) {
+ assertNull(master);
+ } else {
+ for (DiscoveryNode node : nodes) {
+ if (node.masterNode()) {
+ assertTrue(master.id().compareTo(node.id()) <= 0);
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java
new file mode 100644
index 0000000000..565c964bf5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.zen.fd.FaultDetection;
+import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
+@Slow
+public class ZenDiscoveryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testChangeRejoinOnMasterOptionIsDynamic() throws Exception {
+ Settings nodeSettings = Settings.settingsBuilder()
+ .put("discovery.type", "zen") // <-- To override the local setting if set externally
+ .build();
+ String nodeName = internalCluster().startNode(nodeSettings);
+ ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName);
+ assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true));
+
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, false))
+ .get();
+
+ assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false));
+ }
+
+ @Test
+ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
+ Settings defaultSettings = Settings.builder()
+ .put(FaultDetection.SETTING_PING_TIMEOUT, "1s")
+ .put(FaultDetection.SETTING_PING_RETRIES, "1")
+ .put("discovery.type", "zen")
+ .build();
+
+ Settings masterNodeSettings = Settings.builder()
+ .put("node.data", false)
+ .put(defaultSettings)
+ .build();
+ internalCluster().startNodesAsync(2, masterNodeSettings).get();
+ Settings dateNodeSettings = Settings.builder()
+ .put("node.master", false)
+ .put(defaultSettings)
+ .build();
+ internalCluster().startNodesAsync(2, dateNodeSettings).get();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setWaitForNodes("4")
+ .setWaitForRelocatingShards(0)
+ .get();
+ assertThat(clusterHealthResponse.isTimedOut(), is(false));
+
+ createIndex("test");
+ ensureSearchable("test");
+ RecoveryResponse r = client().admin().indices().prepareRecoveries("test").get();
+ int numRecoveriesBeforeNewMaster = r.shardResponses().get("test").size();
+
+ final String oldMaster = internalCluster().getMasterName();
+ internalCluster().stopCurrentMasterNode();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ String current = internalCluster().getMasterName();
+ assertThat(current, notNullValue());
+ assertThat(current, not(equalTo(oldMaster)));
+ }
+ });
+ ensureSearchable("test");
+
+ r = client().admin().indices().prepareRecoveries("test").get();
+ int numRecoveriesAfterNewMaster = r.shardResponses().get("test").size();
+ assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster));
+ }
+
+ @Test
+ @TestLogging(value = "action.admin.cluster.health:TRACE")
+ public void testNodeFailuresAreProcessedOnce() throws ExecutionException, InterruptedException, IOException {
+ Settings defaultSettings = Settings.builder()
+ .put(FaultDetection.SETTING_PING_TIMEOUT, "1s")
+ .put(FaultDetection.SETTING_PING_RETRIES, "1")
+ .put("discovery.type", "zen")
+ .build();
+
+ Settings masterNodeSettings = Settings.builder()
+ .put("node.data", false)
+ .put(defaultSettings)
+ .build();
+ String master = internalCluster().startNode(masterNodeSettings);
+ Settings dateNodeSettings = Settings.builder()
+ .put("node.master", false)
+ .put(defaultSettings)
+ .build();
+ internalCluster().startNodesAsync(2, dateNodeSettings).get();
+ client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
+
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
+ final ArrayList<ClusterState> statesFound = new ArrayList<>();
+ final CountDownLatch nodesStopped = new CountDownLatch(1);
+ clusterService.add(new ClusterStateListener() {
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ statesFound.add(event.state());
+ try {
+ // block until both nodes have stopped to accumulate node failures
+ nodesStopped.await();
+ } catch (InterruptedException e) {
+ //meh
+ }
+ }
+ });
+
+ internalCluster().stopRandomNonMasterNode();
+ internalCluster().stopRandomNonMasterNode();
+ nodesStopped.countDown();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // wait for all to be processed
+ assertThat(statesFound, Matchers.hasSize(2));
+ }
+
+ @Test
+ public void testNodeRejectsClusterStateWithWrongMasterNode() throws Exception {
+ Settings settings = Settings.builder()
+ .put("discovery.type", "zen")
+ .build();
+ List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
+ client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
+
+ List<String> nonMasterNodes = new ArrayList<>(nodeNames);
+ nonMasterNodes.remove(internalCluster().getMasterName());
+ String noneMasterNode = nonMasterNodes.get(0);
+
+ ClusterState state = internalCluster().getInstance(ClusterService.class).state();
+ DiscoveryNode node = null;
+ for (DiscoveryNode discoveryNode : state.nodes()) {
+ if (discoveryNode.name().equals(noneMasterNode)) {
+ node = discoveryNode;
+ }
+ }
+ assert node != null;
+
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes())
+ .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT)).masterNodeId("abc");
+ ClusterState.Builder builder = ClusterState.builder(state);
+ builder.nodes(nodes);
+ BytesReference bytes = PublishClusterStateAction.serializeFullClusterState(builder.build(), node.version());
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicReference<Exception> reference = new AtomicReference<>();
+ internalCluster().getInstance(TransportService.class, noneMasterNode).sendRequest(node, PublishClusterStateAction.ACTION_NAME, new BytesTransportRequest(bytes, Version.CURRENT), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ super.handleResponse(response);
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ super.handleException(exp);
+ reference.set(exp);
+ latch.countDown();
+ }
+ });
+ latch.await();
+ assertThat(reference.get(), notNullValue());
+ assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master then the current one, rejecting "));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java
new file mode 100644
index 0000000000..169dbdbe4a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.Queue;
+
+import static org.elasticsearch.discovery.zen.ZenDiscovery.ProcessClusterState;
+import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState;
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ */
+public class ZenDiscoveryUnitTest extends ElasticsearchTestCase {
+
+ public void testShouldIgnoreNewClusterState() {
+ ClusterName clusterName = new ClusterName("abc");
+
+ DiscoveryNodes.Builder currentNodes = DiscoveryNodes.builder();
+ currentNodes.masterNodeId("a");
+ DiscoveryNodes.Builder newNodes = DiscoveryNodes.builder();
+ newNodes.masterNodeId("a");
+
+ ClusterState.Builder currentState = ClusterState.builder(clusterName);
+ currentState.nodes(currentNodes);
+ ClusterState.Builder newState = ClusterState.builder(clusterName);
+ newState.nodes(newNodes);
+
+ currentState.version(2);
+ newState.version(1);
+ assertTrue("should ignore, because new state's version is lower to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()));
+ currentState.version(1);
+ newState.version(1);
+ assertFalse("should not ignore, because new state's version is equal to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()));
+ currentState.version(1);
+ newState.version(2);
+ assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()));
+
+ currentNodes = DiscoveryNodes.builder();
+ currentNodes.masterNodeId("b");
+ // version isn't taken into account, so randomize it to ensure this.
+ if (randomBoolean()) {
+ currentState.version(2);
+ newState.version(1);
+ } else {
+ currentState.version(1);
+ newState.version(2);
+ }
+ currentState.nodes(currentNodes);
+ try {
+ shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build());
+ fail("should ignore, because current state's master is not equal to new state's master");
+ } catch (IllegalStateException e) {
+ assertThat(e.getMessage(), containsString("cluster state from a different master then the current one, rejecting"));
+ }
+
+ currentNodes = DiscoveryNodes.builder();
+ currentNodes.masterNodeId(null);
+ currentState.nodes(currentNodes);
+ // version isn't taken into account, so randomize it to ensure this.
+ if (randomBoolean()) {
+ currentState.version(2);
+ newState.version(1);
+ } else {
+ currentState.version(1);
+ newState.version(2);
+ }
+ assertFalse("should not ignore, because current state doesn't have a master", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()));
+ }
+
+ public void testSelectNextStateToProcess_empty() {
+ Queue<ProcessClusterState> queue = new LinkedList<>();
+ assertThat(ZenDiscovery.selectNextStateToProcess(queue), nullValue());
+ }
+
+ public void testSelectNextStateToProcess() {
+ ClusterName clusterName = new ClusterName("abc");
+ DiscoveryNodes nodes = DiscoveryNodes.builder().masterNodeId("a").build();
+
+ int numUpdates = scaledRandomIntBetween(50, 100);
+ LinkedList<ProcessClusterState> queue = new LinkedList<>();
+ for (int i = 0; i < numUpdates; i++) {
+ queue.add(new ProcessClusterState(ClusterState.builder(clusterName).version(i).nodes(nodes).build()));
+ }
+ ProcessClusterState mostRecent = queue.get(numUpdates - 1);
+ Collections.shuffle(queue, getRandom());
+
+ assertThat(ZenDiscovery.selectNextStateToProcess(queue), sameInstance(mostRecent.clusterState));
+ assertThat(mostRecent.processed, is(true));
+ assertThat(queue.size(), equalTo(0));
+ }
+
+ public void testSelectNextStateToProcess_differentMasters() {
+ ClusterName clusterName = new ClusterName("abc");
+ DiscoveryNodes nodes1 = DiscoveryNodes.builder().masterNodeId("a").build();
+ DiscoveryNodes nodes2 = DiscoveryNodes.builder().masterNodeId("b").build();
+
+ LinkedList<ProcessClusterState> queue = new LinkedList<>();
+ ProcessClusterState thirdMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(1).nodes(nodes1).build());
+ queue.offer(thirdMostRecent);
+ ProcessClusterState secondMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(2).nodes(nodes1).build());
+ queue.offer(secondMostRecent);
+ ProcessClusterState mostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(3).nodes(nodes1).build());
+ queue.offer(mostRecent);
+ Collections.shuffle(queue, getRandom());
+ queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(4).nodes(nodes2).build()));
+ queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(5).nodes(nodes1).build()));
+
+
+ assertThat(ZenDiscovery.selectNextStateToProcess(queue), sameInstance(mostRecent.clusterState));
+ assertThat(thirdMostRecent.processed, is(true));
+ assertThat(secondMostRecent.processed, is(true));
+ assertThat(mostRecent.processed, is(true));
+ assertThat(queue.size(), equalTo(2));
+ assertThat(queue.get(0).processed, is(false));
+ assertThat(queue.get(1).processed, is(false));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java
new file mode 100644
index 0000000000..6ded8a9f95
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class ZenPingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testPingCollection() {
+ DiscoveryNode[] nodes = new DiscoveryNode[randomIntBetween(1, 30)];
+ long maxIdPerNode[] = new long[nodes.length];
+ DiscoveryNode masterPerNode[] = new DiscoveryNode[nodes.length];
+ boolean hasJoinedOncePerNode[] = new boolean[nodes.length];
+ ArrayList<ZenPing.PingResponse> pings = new ArrayList<>();
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = new DiscoveryNode("" + i, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ }
+
+ for (int pingCount = scaledRandomIntBetween(10, nodes.length * 10); pingCount > 0; pingCount--) {
+ int node = randomInt(nodes.length - 1);
+ DiscoveryNode masterNode = null;
+ if (randomBoolean()) {
+ masterNode = nodes[randomInt(nodes.length - 1)];
+ }
+ boolean hasJoinedOnce = randomBoolean();
+ ZenPing.PingResponse ping = new ZenPing.PingResponse(nodes[node], masterNode, ClusterName.DEFAULT, hasJoinedOnce);
+ if (rarely()) {
+ // ignore some pings
+ continue;
+ }
+ // update max ping info
+ maxIdPerNode[node] = ping.id();
+ masterPerNode[node] = masterNode;
+ hasJoinedOncePerNode[node] = hasJoinedOnce;
+ pings.add(ping);
+ }
+
+ // shuffle
+ Collections.shuffle(pings);
+
+ ZenPing.PingCollection collection = new ZenPing.PingCollection();
+ collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()]));
+
+ ZenPing.PingResponse[] aggregate = collection.toArray();
+
+ for (ZenPing.PingResponse ping : aggregate) {
+ int nodeId = Integer.parseInt(ping.node().id());
+ assertThat(maxIdPerNode[nodeId], equalTo(ping.id()));
+ assertThat(masterPerNode[nodeId], equalTo(ping.master()));
+ assertThat(hasJoinedOncePerNode[nodeId], equalTo(ping.hasJoinedOnce()));
+
+ maxIdPerNode[nodeId] = -1; // mark as seen
+ }
+
+ for (int i = 0; i < maxIdPerNode.length; i++) {
+ assertTrue("node " + i + " had pings but it was not found in collection", maxIdPerNode[i] <= 0);
+ }
+
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java
new file mode 100644
index 0000000000..761f900829
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPingTests.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.multicast;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.discovery.zen.ping.PingContextProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.junit.Test;
+
+import java.net.DatagramPacket;
+import java.net.InetAddress;
+import java.net.MulticastSocket;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class MulticastZenPingTests extends ElasticsearchTestCase {
+
+ private Settings buildRandomMulticast(Settings settings) {
+ Settings.Builder builder = Settings.builder().put(settings);
+ builder.put("discovery.zen.ping.multicast.group", "224.2.3." + randomIntBetween(0, 255));
+ builder.put("discovery.zen.ping.multicast.port", randomIntBetween(55000, 56000));
+ if (randomBoolean()) {
+ builder.put("discovery.zen.ping.multicast.shared", randomBoolean());
+ }
+ return builder.build();
+ }
+
+ @Test
+ public void testSimplePings() throws InterruptedException {
+ Settings settings = Settings.EMPTY;
+ settings = buildRandomMulticast(settings);
+
+ ThreadPool threadPool = new ThreadPool("testSimplePings");
+ final ClusterName clusterName = new ClusterName("test");
+ final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ final TransportService transportServiceB = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeB = new DiscoveryNode("B", transportServiceB.boundAddress().publishAddress(), Version.CURRENT);
+
+ MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT);
+ zenPingA.setPingContextProvider(new PingContextProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+
+ @Override
+ public boolean nodeHasJoinedClusterOnce() {
+ return false;
+ }
+ });
+ zenPingA.start();
+
+ MulticastZenPing zenPingB = new MulticastZenPing(threadPool, transportServiceB, clusterName, Version.CURRENT);
+ zenPingB.setPingContextProvider(new PingContextProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeB).localNodeId("B").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+
+ @Override
+ public boolean nodeHasJoinedClusterOnce() {
+ return true;
+ }
+ });
+ zenPingB.start();
+
+ try {
+ logger.info("ping from A");
+ ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].node().id(), equalTo("B"));
+ assertTrue(pingResponses[0].hasJoinedOnce());
+
+ logger.info("ping from B");
+ pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].node().id(), equalTo("A"));
+ assertFalse(pingResponses[0].hasJoinedOnce());
+
+ } finally {
+ zenPingA.close();
+ zenPingB.close();
+ transportServiceA.close();
+ transportServiceB.close();
+ terminate(threadPool);
+ }
+ }
+
+ @Test
+ public void testExternalPing() throws Exception {
+ Settings settings = Settings.EMPTY;
+ settings = buildRandomMulticast(settings);
+
+ final ThreadPool threadPool = new ThreadPool("testExternalPing");
+ final ClusterName clusterName = new ClusterName("test");
+ final TransportService transportServiceA = new TransportService(new LocalTransport(settings, threadPool, Version.CURRENT), threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName, Version.CURRENT);
+ zenPingA.setPingContextProvider(new PingContextProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+
+ @Override
+ public boolean nodeHasJoinedClusterOnce() {
+ return false;
+ }
+ });
+ zenPingA.start();
+
+ MulticastSocket multicastSocket = null;
+ try {
+ Loggers.getLogger(MulticastZenPing.class).setLevel("TRACE");
+ multicastSocket = new MulticastSocket(54328);
+ multicastSocket.setReceiveBufferSize(2048);
+ multicastSocket.setSendBufferSize(2048);
+ multicastSocket.setSoTimeout(60000);
+
+ DatagramPacket datagramPacket = new DatagramPacket(new byte[2048], 2048, InetAddress.getByName("224.2.2.4"), 54328);
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("request").field("cluster_name", "test").endObject().endObject();
+ datagramPacket.setData(builder.bytes().toBytes());
+ multicastSocket.send(datagramPacket);
+ Thread.sleep(100);
+ } finally {
+ Loggers.getLogger(MulticastZenPing.class).setLevel("INFO");
+ if (multicastSocket != null) multicastSocket.close();
+ zenPingA.close();
+ terminate(threadPool);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
new file mode 100644
index 0000000000..8c77529dee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingTests.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.discovery.zen.ping.unicast;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.discovery.zen.ping.PingContextProvider;
+import org.elasticsearch.discovery.zen.ping.ZenPing;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@Slow
+public class UnicastZenPingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimplePings() throws InterruptedException {
+ Settings settings = Settings.EMPTY;
+ int startPort = 11000 + randomIntBetween(0, 1000);
+ int endPort = startPort + 10;
+ settings = Settings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
+
+ ThreadPool threadPool = new ThreadPool(getClass().getName());
+ ClusterName clusterName = new ClusterName("test");
+ NetworkService networkService = new NetworkService(settings);
+ ElectMasterService electMasterService = new ElectMasterService(settings);
+
+ NettyTransport transportA = new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT);
+ final TransportService transportServiceA = new TransportService(transportA, threadPool).start();
+ final DiscoveryNode nodeA = new DiscoveryNode("UZP_A", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ InetSocketTransportAddress addressA = (InetSocketTransportAddress) transportA.boundAddress().publishAddress();
+
+ NettyTransport transportB = new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT);
+ final TransportService transportServiceB = new TransportService(transportB, threadPool).start();
+ final DiscoveryNode nodeB = new DiscoveryNode("UZP_B", transportServiceA.boundAddress().publishAddress(), Version.CURRENT);
+
+ InetSocketTransportAddress addressB = (InetSocketTransportAddress) transportB.boundAddress().publishAddress();
+
+ Settings hostsSettings = Settings.settingsBuilder().putArray("discovery.zen.ping.unicast.hosts",
+ addressA.address().getAddress().getHostAddress() + ":" + addressA.address().getPort(),
+ addressB.address().getAddress().getHostAddress() + ":" + addressB.address().getPort())
+ .build();
+
+ UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, transportServiceA, clusterName, Version.CURRENT, electMasterService, null);
+ zenPingA.setPingContextProvider(new PingContextProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeA).localNodeId("UZP_A").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+
+ @Override
+ public boolean nodeHasJoinedClusterOnce() {
+ return false;
+ }
+ });
+ zenPingA.start();
+
+ UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, transportServiceB, clusterName, Version.CURRENT, electMasterService, null);
+ zenPingB.setPingContextProvider(new PingContextProvider() {
+ @Override
+ public DiscoveryNodes nodes() {
+ return DiscoveryNodes.builder().put(nodeB).localNodeId("UZP_B").build();
+ }
+
+ @Override
+ public NodeService nodeService() {
+ return null;
+ }
+
+ @Override
+ public boolean nodeHasJoinedClusterOnce() {
+ return true;
+ }
+ });
+ zenPingB.start();
+
+ try {
+ logger.info("ping from UZP_A");
+ ZenPing.PingResponse[] pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(10));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].node().id(), equalTo("UZP_B"));
+ assertTrue(pingResponses[0].hasJoinedOnce());
+
+ // ping again, this time from B,
+ logger.info("ping from UZP_B");
+ pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(10));
+ assertThat(pingResponses.length, equalTo(1));
+ assertThat(pingResponses[0].node().id(), equalTo("UZP_A"));
+ assertFalse(pingResponses[0].hasJoinedOnce());
+
+ } finally {
+ zenPingA.close();
+ zenPingB.close();
+ transportServiceA.close();
+ transportServiceB.close();
+ terminate(threadPool);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java b/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java
new file mode 100644
index 0000000000..8d757cd7d5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/document/AliasedIndexDocumentActionsTests.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import org.elasticsearch.action.admin.indices.alias.Alias;
+
+import static org.elasticsearch.client.Requests.createIndexRequest;
+
+/**
+ *
+ */
+public class AliasedIndexDocumentActionsTests extends DocumentActionsTests {
+
+ @Override
+ protected void createIndex() {
+ logger.info("Creating index [test1] with alias [test]");
+ try {
+ client().admin().indices().prepareDelete("test1").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ logger.info("--> creating index test");
+ client().admin().indices().create(createIndexRequest("test1").alias(new Alias("test"))).actionGet();
+ }
+
+ @Override
+ protected String getConcreteIndexName() {
+ return "test1";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/document/BulkTests.java b/core/src/test/java/org/elasticsearch/document/BulkTests.java
new file mode 100644
index 0000000000..edb38190f5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/document/BulkTests.java
@@ -0,0 +1,1002 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import com.google.common.base.Charsets;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.concurrent.CyclicBarrier;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class BulkTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBulkUpdate_simple() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("2").setSource("field", 2).setCreate(true))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("3").setSource("field", 3))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("4").setSource("field", 4))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("5").setSource("field", 5))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ assertThat(bulkItemResponse.getIndex(), equalTo("test"));
+ }
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("1")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("2")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).setRetryOnConflict(3))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("3")
+ .setDoc(jsonBuilder().startObject().field("field1", "test").endObject())).execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ assertThat(bulkItemResponse.getIndex(), equalTo("test"));
+ }
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("1"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("3"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").setFields("field").execute()
+ .actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(2l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").setFields("field1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("test"));
+
+ bulkResponse = client()
+ .prepareBulk()
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("6")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .setUpsert(jsonBuilder().startObject().field("field", 0).endObject()))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("7")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("2")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))).execute()
+ .actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("6"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertThat(bulkResponse.getItems()[1].getResponse(), nullValue());
+ assertThat(bulkResponse.getItems()[1].getFailure().getIndex(), equalTo("test"));
+ assertThat(bulkResponse.getItems()[1].getFailure().getId(), equalTo("7"));
+ assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("document missing"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getIndex(), equalTo("test"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(0l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(3l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(4l));
+ }
+
+ @Test
+ public void testBulkUpdate_simpleOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("2").setSource("field", 2).setCreate(true))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("3").setSource("field", 3))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("4").setSource("field", 4))
+ .add(client().prepareIndex().setIndex(indexOrAlias()).setType("type1").setId("5").setSource("field", 5)).execute()
+ .actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ assertThat(bulkItemResponse.getIndex(), equalTo("test"));
+ }
+
+ bulkResponse = client()
+ .prepareBulk()
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("1")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("2")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).setRetryOnConflict(3))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("3").setDoc(jsonBuilder().startObject().field("field1", "test").endObject()))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ assertThat(bulkItemResponse.getIndex(), equalTo("test"));
+ }
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("1"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("3"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ GetResponse getResponse = client().prepareGet().setIndex("test").setType("type1").setId("1").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(2l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("3").setFields("field1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(2l));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("test"));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("6")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
+ .setUpsert(jsonBuilder().startObject().field("field", 0).endObject()))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("7")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE))
+ .add(client().prepareUpdate().setIndex(indexOrAlias()).setType("type1").setId("2")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getId(), equalTo("6"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertThat(bulkResponse.getItems()[1].getResponse(), nullValue());
+ assertThat(bulkResponse.getItems()[1].getFailure().getIndex(), equalTo("test"));
+ assertThat(bulkResponse.getItems()[1].getFailure().getId(), equalTo("7"));
+ assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("document missing"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getIndex(), equalTo("test"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("6").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(0l));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("7").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ getResponse = client().prepareGet().setIndex("test").setType("type1").setId("2").setFields("field").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(3l));
+ assertThat(((Long) getResponse.getField("field").getValue()), equalTo(4l));
+ }
+
+ @Test
+ public void testBulkVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field", "1"))
+ .add(client().prepareIndex("test", "type", "2").setCreate(true).setSource("field", "1"))
+ .add(client().prepareIndex("test", "type", "1").setSource("field", "2")).get();
+
+ assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(1l));
+ assertTrue(((IndexResponse) bulkResponse.getItems()[1].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(1l));
+ assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(2l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate("test", "type", "1").setVersion(4l).setDoc("field", "2"))
+ .add(client().prepareUpdate("test", "type", "2").setDoc("field", "2"))
+ .add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")).get();
+
+ assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex("test", "type", "e1").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareIndex("test", "type", "e2").setCreate(true).setSource("field", "1").setVersion(10).setVersionType(VersionType.EXTERNAL))
+ .add(client().prepareIndex("test", "type", "e1").setSource("field", "2").setVersion(12).setVersionType(VersionType.EXTERNAL)).get();
+
+ assertTrue(((IndexResponse) bulkResponse.getItems()[0].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[0].getResponse()).getVersion(), equalTo(10l));
+ assertTrue(((IndexResponse) bulkResponse.getItems()[1].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(10l));
+ assertFalse(((IndexResponse) bulkResponse.getItems()[2].getResponse()).isCreated());
+ assertThat(((IndexResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(12l));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "2").setVersion(10)) // INTERNAL
+ .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "3").setVersion(20).setVersionType(VersionType.FORCE))
+ .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "3").setVersion(20).setVersionType(VersionType.INTERNAL)).get();
+
+ assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(20l));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(21l));
+ }
+
+ @Test
+ public void testBulkUpdate_malformedScripts() throws Exception {
+
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("3").setSource("field", 1))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+
+ bulkResponse = client().prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1")
+ .setScript(new Script("ctx._source.field += a", ScriptService.ScriptType.INLINE, null, null)).setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3")
+ .setScript(new Script("ctx._source.field += a", ScriptService.ScriptType.INLINE, null, null)).setFields("field"))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(bulkResponse.getItems()[0].getFailure().getId(), equalTo("1"));
+ assertThat(bulkResponse.getItems()[0].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[0].getResponse(), nullValue());
+
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getGetResult().field("field").getValue()),
+ equalTo(2));
+ assertThat(bulkResponse.getItems()[1].getFailure(), nullValue());
+
+ assertThat(bulkResponse.getItems()[2].getFailure().getId(), equalTo("3"));
+ assertThat(bulkResponse.getItems()[2].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[2].getResponse(), nullValue());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testBulkUpdate_malformedScriptsOldScriptAPI() throws Exception {
+
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource("field", 1))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("3").setSource("field", 1)).execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+
+ bulkResponse = client()
+ .prepareBulk()
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("1")
+ .setScript("ctx._source.field += a", ScriptService.ScriptType.INLINE).setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("2")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).setFields("field"))
+ .add(client().prepareUpdate().setIndex("test").setType("type1").setId("3")
+ .setScript("ctx._source.field += a", ScriptService.ScriptType.INLINE).setFields("field"))
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(3));
+ assertThat(bulkResponse.getItems()[0].getFailure().getId(), equalTo("1"));
+ assertThat(bulkResponse.getItems()[0].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[0].getResponse(), nullValue());
+
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getId(), equalTo("2"));
+ assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getGetResult().field("field").getValue()), equalTo(2));
+ assertThat(bulkResponse.getItems()[1].getFailure(), nullValue());
+
+ assertThat(bulkResponse.getItems()[2].getFailure().getId(), equalTo("3"));
+ assertThat(bulkResponse.getItems()[2].getFailure().getMessage(), containsString("failed to execute script"));
+ assertThat(bulkResponse.getItems()[2].getResponse(), nullValue());
+ }
+
+ @Test
+ public void testBulkUpdate_largerVolume() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ int numDocs = scaledRandomIntBetween(100, 2000);
+ if (numDocs % 2 == 1) {
+ numDocs++; // this test needs an even num of docs
+ }
+ logger.info("Bulk-Indexing {} docs", numDocs);
+ BulkRequestBuilder builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript(new Script("ctx._source.counter += 1", ScriptService.ScriptType.INLINE, null, null)).setFields("counter")
+ .setUpsert(jsonBuilder().startObject().field("counter", 1).endObject()));
+ }
+
+ BulkResponse response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(1l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(1l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()),
+ equalTo(1));
+
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute()
+ .actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat((Long) getResponse.getField("counter").getValue(), equalTo(1l));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ UpdateRequestBuilder updateBuilder = client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setFields("counter");
+ if (i % 2 == 0) {
+ updateBuilder.setScript(new Script("ctx._source.counter += 1", ScriptService.ScriptType.INLINE, null, null));
+ } else {
+ updateBuilder.setDoc(jsonBuilder().startObject().field("counter", 2).endObject());
+ }
+ if (i % 3 == 0) {
+ updateBuilder.setRetryOnConflict(3);
+ }
+
+ builder.add(updateBuilder);
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(2l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()),
+ equalTo(2));
+ }
+
+ builder = client().prepareBulk();
+ int maxDocs = numDocs / 2 + numDocs;
+ for (int i = (numDocs / 2); i < maxDocs; i++) {
+ builder.add(client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript(new Script("ctx._source.counter += 1", ScriptService.ScriptType.INLINE, null, null)));
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(true));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ int id = i + (numDocs / 2);
+ if (i >= (numDocs / 2)) {
+ assertThat(response.getItems()[i].getFailure().getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getFailure().getMessage(), containsString("document missing"));
+ } else {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(3l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript(new Script("ctx.op = \"none\"", ScriptService.ScriptType.INLINE, null, null)));
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.buildFailureMessage(), response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript(new Script("ctx.op = \"delete\"", ScriptService.ScriptType.INLINE, null, null)));
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute()
+ .actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testBulkUpdate_largerVolumeOldScriptAPI() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ int numDocs = scaledRandomIntBetween(100, 2000);
+ if (numDocs % 2 == 1) {
+ numDocs++; // this test needs an even num of docs
+ }
+ logger.info("Bulk-Indexing {} docs", numDocs);
+ BulkRequestBuilder builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(client().prepareUpdate().setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript("ctx._source.counter += 1", ScriptService.ScriptType.INLINE).setFields("counter")
+ .setUpsert(jsonBuilder().startObject().field("counter", 1).endObject())
+ );
+ }
+
+ BulkResponse response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(1l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(1l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()), equalTo(1));
+
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getVersion(), equalTo(1l));
+ assertThat((Long) getResponse.getField("counter").getValue(), equalTo(1l));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ UpdateRequestBuilder updateBuilder = client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setFields("counter");
+ if (i % 2 == 0) {
+ updateBuilder.setScript("ctx._source.counter += 1", ScriptService.ScriptType.INLINE);
+ } else {
+ updateBuilder.setDoc(jsonBuilder().startObject().field("counter", 2).endObject());
+ }
+ if (i % 3 == 0) {
+ updateBuilder.setRetryOnConflict(3);
+ }
+
+ builder.add(updateBuilder);
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(2l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getId(), equalTo(Integer.toString(i)));
+ assertThat(((UpdateResponse) response.getItems()[i].getResponse()).getVersion(), equalTo(2l));
+ assertThat(((Integer) ((UpdateResponse) response.getItems()[i].getResponse()).getGetResult().field("counter").getValue()), equalTo(2));
+ }
+
+ builder = client().prepareBulk();
+ int maxDocs = numDocs / 2 + numDocs;
+ for (int i = (numDocs / 2); i < maxDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i)).setScript("ctx._source.counter += 1", ScriptService.ScriptType.INLINE)
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(true));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ int id = i + (numDocs / 2);
+ if (i >= (numDocs / 2)) {
+ assertThat(response.getItems()[i].getFailure().getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getFailure().getMessage(), containsString("document missing"));
+ } else {
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id)));
+ assertThat(response.getItems()[i].getVersion(), equalTo(3l));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript("ctx.op = \"none\"", ScriptService.ScriptType.INLINE)
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.buildFailureMessage(), response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ }
+
+ builder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ builder.add(
+ client().prepareUpdate()
+ .setIndex("test").setType("type1").setId(Integer.toString(i))
+ .setScript("ctx.op = \"delete\"", ScriptService.ScriptType.INLINE)
+ );
+ }
+ response = builder.execute().actionGet();
+ assertThat(response.hasFailures(), equalTo(false));
+ assertThat(response.getItems().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getItems()[i].getItemId(), equalTo(i));
+ assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(i)));
+ assertThat(response.getItems()[i].getIndex(), equalTo("test"));
+ assertThat(response.getItems()[i].getType(), equalTo("type1"));
+ assertThat(response.getItems()[i].getOpType(), equalTo("update"));
+ for (int j = 0; j < 5; j++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).setFields("counter").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+ }
+ }
+
+ @Test
+ public void testBulkIndexingWhileInitializing() throws Exception {
+
+ int replica = randomInt(2);
+
+ internalCluster().ensureAtLeastNumDataNodes(1 + replica);
+
+ assertAcked(prepareCreate("test").setSettings(
+ Settings.builder()
+ .put(indexSettings())
+ .put("index.number_of_replicas", replica)
+ ));
+
+ int numDocs = scaledRandomIntBetween(100, 5000);
+ int bulk = scaledRandomIntBetween(1, 99);
+ for (int i = 0; i < numDocs; ) {
+ final BulkRequestBuilder builder = client().prepareBulk();
+ for (int j = 0; j < bulk && i < numDocs; j++, i++) {
+ builder.add(client().prepareIndex("test", "type1", Integer.toString(i)).setSource("val", i));
+ }
+ logger.info("bulk indexing {}-{}", i - bulk, i - 1);
+ BulkResponse response = builder.get();
+ if (response.hasFailures()) {
+ fail(response.buildFailureMessage());
+ }
+ }
+
+ refresh();
+
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+
+ /*
+ Test for https://github.com/elasticsearch/elasticsearch/issues/3444
+ */
+ @Test
+ public void testBulkUpdateDocAsUpsertWithParent() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("parent", "{\"parent\":{}}")
+ .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}")
+ .execute().actionGet();
+ ensureGreen();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n" +
+ "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChild = new BytesArray("{ \"update\" : { \"_index\" : \"test\", \"_type\" : \"child\", \"_id\" : \"child1\", \"parent\" : \"parent1\"}}\n" +
+ "{\"doc\" : { \"field1\" : \"value1\"}, \"doc_as_upsert\" : \"true\"}\n").array();
+
+ builder.add(addParent, 0, addParent.length);
+ builder.add(addChild, 0, addChild.length);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+
+ client().admin().indices().prepareRefresh("test").get();
+
+ //we check that the _parent field was set on the child document by using the has parent query
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery()))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSearchHits(searchResponse, "child1");
+ }
+
+ /*
+ Test for https://github.com/elasticsearch/elasticsearch/issues/3444
+ */
+ @Test
+ public void testBulkUpdateUpsertWithParent() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent", "{\"parent\":{}}")
+ .addMapping("child", "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}"));
+ ensureGreen();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n" +
+ "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChild = new BytesArray("{\"update\" : { \"_id\" : \"child1\", \"_type\" : \"child\", \"_index\" : \"test\", \"parent\" : \"parent1\"} }\n" +
+ "{ \"script\" : \"ctx._source.field2 = 'value2'\", \"upsert\" : {\"field1\" : \"value1\"}}\n").array();
+
+ builder.add(addParent, 0, addParent.length);
+ builder.add(addChild, 0, addChild.length);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", QueryBuilders.matchAllQuery()))
+ .get();
+
+ assertSearchHits(searchResponse, "child1");
+ }
+
+ /*
+ * Test for https://github.com/elasticsearch/elasticsearch/issues/8365
+ */
+ @Test
+ public void testBulkUpdateChildMissingParentRouting() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("parent", "{\"parent\":{}}").addMapping("child",
+ "{\"child\": {\"_parent\": {\"type\": \"parent\"}}}"));
+ ensureGreen();
+
+ BulkRequestBuilder builder = client().prepareBulk();
+
+ byte[] addParent = new BytesArray("{\"index\" : { \"_index\" : \"test\", \"_type\" : \"parent\", \"_id\" : \"parent1\"}}\n"
+ + "{\"field1\" : \"value1\"}\n").array();
+
+ byte[] addChildOK = new BytesArray(
+ "{\"index\" : { \"_id\" : \"child1\", \"_type\" : \"child\", \"_index\" : \"test\", \"parent\" : \"parent1\"} }\n"
+ + "{ \"field1\" : \"value1\"}\n").array();
+ byte[] addChildMissingRouting = new BytesArray(
+ "{\"index\" : { \"_id\" : \"child2\", \"_type\" : \"child\", \"_index\" : \"test\"} }\n" + "{ \"field1\" : \"value1\"}\n")
+ .array();
+
+ builder.add(addParent, 0, addParent.length);
+ builder.add(addChildOK, 0, addChildOK.length);
+ builder.add(addChildMissingRouting, 0, addChildMissingRouting.length);
+ builder.add(addChildOK, 0, addChildOK.length);
+
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.getItems().length, equalTo(4));
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(true));
+ assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false));
+ }
+
+ @Test
+ public void testFailingVersionedUpdatedOnBulk() throws Exception {
+ createIndex("test");
+ index("test", "type", "1", "field", "1");
+ final BulkResponse[] responses = new BulkResponse[30];
+ final CyclicBarrier cyclicBarrier = new CyclicBarrier(responses.length);
+ Thread[] threads = new Thread[responses.length];
+
+
+ for (int i = 0; i < responses.length; i++) {
+ final int threadID = i;
+ threads[threadID] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ cyclicBarrier.await();
+ } catch (Exception e) {
+ return;
+ }
+ BulkRequestBuilder requestBuilder = client().prepareBulk();
+ requestBuilder.add(client().prepareUpdate("test", "type", "1").setVersion(1).setDoc("field", threadID));
+ responses[threadID] = requestBuilder.get();
+
+ }
+ });
+ threads[threadID].start();
+
+ }
+
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+
+ int successes = 0;
+ for (BulkResponse response : responses) {
+ if (!response.hasFailures()) {
+ successes++;
+ }
+ }
+
+ assertThat(successes, equalTo(1));
+ }
+
+ @Test // issue 4745
+ public void preParsingSourceDueToMappingShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", true)
+ .field("path", "last_modified")
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate("test").addMapping("type", builder));
+
+ String brokenBuildRequestData = "{\"index\": {\"_id\": \"1\"}}\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": {\"_id\": \"2\"}}\n" +
+ "{\"name\": \"Good\", \"last_modified\" : \"2013-04-05\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(get("test", "type", "2"));
+ }
+
+ @Test // issue 4745
+ public void preParsingSourceDueToRoutingShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_routing")
+ .field("required", true)
+ .field("path", "my_routing")
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate("test").addMapping("type", builder)
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2_ID));
+ ensureYellow("test");
+
+ String brokenBuildRequestData = "{\"index\": {} }\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": { \"_id\" : \"24000\" } }\n" +
+ "{\"name\": \"Good\", \"my_routing\" : \"48000\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(client().prepareGet("test", "type", "24000").setRouting("48000").get());
+ }
+
+
+ @Test // issue 4745
+ public void preParsingSourceDueToIdShouldNotBreakCompleteBulkRequest() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_id")
+ .field("path", "my_id")
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate("test").addMapping("type", builder)
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2_ID));
+ ensureYellow("test");
+
+ String brokenBuildRequestData = "{\"index\": {} }\n" +
+ "{\"name\": \"Malformed}\n" +
+ "{\"index\": {} }\n" +
+ "{\"name\": \"Good\", \"my_id\" : \"48\"}\n";
+
+ BulkResponse bulkResponse = client().prepareBulk().add(brokenBuildRequestData.getBytes(Charsets.UTF_8), 0, brokenBuildRequestData.length(), "test", "type").setRefresh(true).get();
+ assertThat(bulkResponse.getItems().length, is(2));
+ assertThat(bulkResponse.getItems()[0].isFailed(), is(true));
+ assertThat(bulkResponse.getItems()[1].isFailed(), is(false));
+
+ assertExists(get("test", "type", "48"));
+ }
+
+ @Test // issue 4987
+ public void testThatInvalidIndexNamesShouldNotBreakCompleteBulkRequest() {
+ int bulkEntryCount = randomIntBetween(10, 50);
+ BulkRequestBuilder builder = client().prepareBulk();
+ boolean[] expectedFailures = new boolean[bulkEntryCount];
+ ArrayList<String> badIndexNames = new ArrayList<>();
+ for (int i = randomIntBetween(1, 5); i > 0; i--) {
+ badIndexNames.add("INVALID.NAME" + i);
+ }
+ boolean expectFailure = false;
+ for (int i = 0; i < bulkEntryCount; i++) {
+ expectFailure |= expectedFailures[i] = randomBoolean();
+ String name;
+ if (expectedFailures[i]) {
+ name = randomFrom(badIndexNames);
+ } else {
+ name = "test";
+ }
+ builder.add(client().prepareIndex().setIndex(name).setType("type1").setId("1").setSource("field", 1));
+ }
+ BulkResponse bulkResponse = builder.get();
+ assertThat(bulkResponse.hasFailures(), is(expectFailure));
+ assertThat(bulkResponse.getItems().length, is(bulkEntryCount));
+ for (int i = 0; i < bulkEntryCount; i++) {
+ assertThat(bulkResponse.getItems()[i].isFailed(), is(expectedFailures[i]));
+ }
+ }
+
+ @Test // issue 6630
+ public void testThatFailedUpdateRequestReturnsCorrectType() throws Exception {
+ BulkResponse indexBulkItemResponse = client().prepareBulk()
+ .add(new IndexRequest("test", "type", "3").source("{ \"title\" : \"Great Title of doc 3\" }"))
+ .add(new IndexRequest("test", "type", "4").source("{ \"title\" : \"Great Title of doc 4\" }"))
+ .add(new IndexRequest("test", "type", "5").source("{ \"title\" : \"Great Title of doc 5\" }"))
+ .add(new IndexRequest("test", "type", "6").source("{ \"title\" : \"Great Title of doc 6\" }"))
+ .setRefresh(true)
+ .get();
+ assertNoFailures(indexBulkItemResponse);
+
+ BulkResponse bulkItemResponse = client().prepareBulk()
+ .add(new IndexRequest("test", "type", "1").source("{ \"title\" : \"Great Title of doc 1\" }"))
+ .add(new IndexRequest("test", "type", "2").source("{ \"title\" : \"Great Title of doc 2\" }"))
+ .add(new UpdateRequest("test", "type", "3").doc("{ \"date\" : \"2014-01-30T23:59:57\"}"))
+ .add(new UpdateRequest("test", "type", "4").doc("{ \"date\" : \"2014-13-30T23:59:57\"}"))
+ .add(new DeleteRequest("test", "type", "5"))
+ .add(new DeleteRequest("test", "type", "6"))
+ .get();
+
+ assertNoFailures(indexBulkItemResponse);
+ assertThat(bulkItemResponse.getItems().length, is(6));
+ assertThat(bulkItemResponse.getItems()[0].getOpType(), is("index"));
+ assertThat(bulkItemResponse.getItems()[1].getOpType(), is("index"));
+ assertThat(bulkItemResponse.getItems()[2].getOpType(), is("update"));
+ assertThat(bulkItemResponse.getItems()[3].getOpType(), is("update"));
+ assertThat(bulkItemResponse.getItems()[4].getOpType(), is("delete"));
+ assertThat(bulkItemResponse.getItems()[5].getOpType(), is("delete"));
+ }
+
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+ @Test // issue 6410
+ public void testThatMissingIndexDoesNotAbortFullBulkRequest() throws Exception{
+ createIndex("bulkindex1", "bulkindex2");
+ ensureYellow();
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(new IndexRequest("bulkindex1", "index1_type", "1").source("text", "hallo1"))
+ .add(new IndexRequest("bulkindex2", "index2_type", "1").source("text", "hallo2"))
+ .add(new IndexRequest("bulkindex2", "index2_type").source("text", "hallo2"))
+ .add(new UpdateRequest("bulkindex2", "index2_type", "2").doc("foo", "bar"))
+ .add(new DeleteRequest("bulkindex2", "index2_type", "3"))
+ .refresh(true);
+
+ client().bulk(bulkRequest).get();
+ SearchResponse searchResponse = client().prepareSearch("bulkindex*").get();
+ assertHitCount(searchResponse, 3);
+
+ assertAcked(client().admin().indices().prepareClose("bulkindex2"));
+
+ BulkResponse bulkResponse = client().bulk(bulkRequest).get();
+ assertThat(bulkResponse.hasFailures(), is(true));
+ assertThat(bulkResponse.getItems().length, is(5));
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java
new file mode 100644
index 0000000000..0b869d72e3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java
@@ -0,0 +1,253 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class DocumentActionsTests extends ElasticsearchIntegrationTest {
+
+ protected void createIndex() {
+ createIndex(getConcreteIndexName());
+ }
+
+
+ protected String getConcreteIndexName() {
+ return "test";
+ }
+
+ @Test
+ public void testIndexActions() throws Exception {
+ createIndex();
+ NumShards numShards = getNumShards(getConcreteIndexName());
+ logger.info("Running Cluster Health");
+ ensureGreen();
+ logger.info("Indexing [type1/1]");
+ IndexResponse indexResponse = client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")).setRefresh(true).execute().actionGet();
+ assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(indexResponse.getId(), equalTo("1"));
+ assertThat(indexResponse.getType(), equalTo("type1"));
+ logger.info("Refreshing");
+ RefreshResponse refreshResponse = refresh();
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+ logger.info("--> index exists?");
+ assertThat(indexExists(getConcreteIndexName()), equalTo(true));
+ logger.info("--> index exists?, fake index");
+ assertThat(indexExists("test1234565"), equalTo(false));
+
+ logger.info("Clearing cache");
+ ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true)).actionGet();
+ assertNoFailures(clearIndicesCacheResponse);
+ assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+ logger.info("Optimizing");
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ OptimizeResponse optimizeResponse = optimize();
+ assertThat(optimizeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+ GetResponse getResult;
+
+ logger.info("Get [type1/1]");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().prepareGet("test", "type1", "1").setOperationThreaded(false).execute().actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test"));
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(true)).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+
+ logger.info("Get [type1/1] with script");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().prepareGet("test", "type1", "1").setFields("name").execute().actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(getResult.isExists(), equalTo(true));
+ assertThat(getResult.getSourceAsBytes(), nullValue());
+ assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test"));
+ }
+
+ logger.info("Get [type1/2] (should be empty)");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat(getResult.isExists(), equalTo(false));
+ }
+
+ logger.info("Delete [type1/1]");
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").execute().actionGet();
+ assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(deleteResponse.getId(), equalTo("1"));
+ assertThat(deleteResponse.getType(), equalTo("type1"));
+ logger.info("Refreshing");
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] (should be empty)");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.isExists(), equalTo(false));
+ }
+
+ logger.info("Index [type1/1]");
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ logger.info("Index [type1/2]");
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test2"))).actionGet();
+
+ logger.info("Flushing");
+ FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet();
+ assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+ assertThat(flushResult.getFailedShards(), equalTo(0));
+ logger.info("Refreshing");
+ client().admin().indices().refresh(refreshRequest("test")).actionGet();
+
+ logger.info("Get [type1/1] and [type1/2]");
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ String ste1 = getResult.getSourceAsString();
+ String ste2 = source("2", "test2").string();
+ assertThat("cycle #" + i, ste1, equalTo(ste2));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+
+ logger.info("Count");
+ // check count
+ for (int i = 0; i < 5; i++) {
+ // test successful
+ CountResponse countResponse = client().prepareCount("test").setQuery(termQuery("_type", "type1")).execute().actionGet();
+ assertNoFailures(countResponse);
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+
+ // test failed (simply query that can't be parsed)
+ try {
+ client().count(countRequest("test").source("{ term : { _type : \"type1 } }")).actionGet();
+ } catch(SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, equalTo(numShards.numPrimaries));
+ }
+
+ // count with no query is a match all one
+ countResponse = client().prepareCount("test").execute().actionGet();
+ assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
+ assertThat(countResponse.getCount(), equalTo(2l));
+ assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
+ assertThat(countResponse.getFailedShards(), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testBulk() throws Exception {
+ createIndex();
+ NumShards numShards = getNumShards(getConcreteIndexName());
+ logger.info("-> running Cluster Health");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk()
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setId("2").setSource(source("2", "test")).setCreate(true))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setSource(source("3", "test")))
+ .add(client().prepareDelete().setIndex("test").setType("type1").setId("1"))
+ .add(client().prepareIndex().setIndex("test").setType("type1").setSource("{ xxx }")) // failure
+ .execute().actionGet();
+
+ assertThat(bulkResponse.hasFailures(), equalTo(true));
+ assertThat(bulkResponse.getItems().length, equalTo(5));
+
+ assertThat(bulkResponse.getItems()[0].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[0].getOpType(), equalTo("index"));
+ assertThat(bulkResponse.getItems()[0].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[0].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[0].getId(), equalTo("1"));
+
+ assertThat(bulkResponse.getItems()[1].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[1].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[1].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[1].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[1].getId(), equalTo("2"));
+
+ assertThat(bulkResponse.getItems()[2].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[2].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[2].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[2].getType(), equalTo("type1"));
+ String generatedId3 = bulkResponse.getItems()[2].getId();
+
+ assertThat(bulkResponse.getItems()[3].isFailed(), equalTo(false));
+ assertThat(bulkResponse.getItems()[3].getOpType(), equalTo("delete"));
+ assertThat(bulkResponse.getItems()[3].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[3].getType(), equalTo("type1"));
+ assertThat(bulkResponse.getItems()[3].getId(), equalTo("1"));
+
+ assertThat(bulkResponse.getItems()[4].isFailed(), equalTo(true));
+ assertThat(bulkResponse.getItems()[4].getOpType(), equalTo("create"));
+ assertThat(bulkResponse.getItems()[4].getIndex(), equalTo(getConcreteIndexName()));
+ assertThat(bulkResponse.getItems()[4].getType(), equalTo("type1"));
+
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertNoFailures(refreshResponse);
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
+
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ assertThat("cycle #" + i, getResult.isExists(), equalTo(false));
+
+ getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("2", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+
+ getResult = client().get(getRequest("test").type("type1").id(generatedId3)).actionGet();
+ assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("3", "test").string()));
+ assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
+ }
+ }
+
+ private XContentBuilder source(String id, String nameValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().field("id", id).field("name", nameValue).endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/document/ShardInfoTests.java b/core/src/test/java/org/elasticsearch/document/ShardInfoTests.java
new file mode 100644
index 0000000000..3c587b9be0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/document/ShardInfoTests.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.document;
+
+import org.elasticsearch.action.ActionWriteResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class ShardInfoTests extends ElasticsearchIntegrationTest {
+
+ private int numCopies;
+ private int numNodes;
+
+ @Test
+ public void testIndexAndDelete() throws Exception {
+ prepareIndex(1);
+ IndexResponse indexResponse = client().prepareIndex("idx", "type").setSource("{}").get();
+ assertShardInfo(indexResponse);
+ DeleteResponse deleteResponse = client().prepareDelete("idx", "type", indexResponse.getId()).get();
+ assertShardInfo(deleteResponse);
+ }
+
+ @Test
+ public void testUpdate() throws Exception {
+ prepareIndex(1);
+ UpdateResponse updateResponse = client().prepareUpdate("idx", "type", "1").setDoc("{}").setDocAsUpsert(true).get();
+ assertShardInfo(updateResponse);
+ }
+
+ @Test
+ public void testBulk_withIndexAndDeleteItems() throws Exception {
+ prepareIndex(1);
+ BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
+ for (int i = 0; i < 10; i++) {
+ bulkRequestBuilder.add(client().prepareIndex("idx", "type").setSource("{}"));
+ }
+
+ BulkResponse bulkResponse = bulkRequestBuilder.get();
+ bulkRequestBuilder = client().prepareBulk();
+ for (BulkItemResponse item : bulkResponse) {
+ assertThat(item.isFailed(), equalTo(false));
+ assertShardInfo(item.getResponse());
+ bulkRequestBuilder.add(client().prepareDelete("idx", "type", item.getId()));
+ }
+
+ bulkResponse = bulkRequestBuilder.get();
+ for (BulkItemResponse item : bulkResponse) {
+ assertThat(item.isFailed(), equalTo(false));
+ assertShardInfo(item.getResponse());
+ }
+ }
+
+ @Test
+ public void testBulk_withUpdateItems() throws Exception {
+ prepareIndex(1);
+ BulkRequestBuilder bulkRequestBuilder = client().prepareBulk();
+ for (int i = 0; i < 10; i++) {
+ bulkRequestBuilder.add(client().prepareUpdate("idx", "type", Integer.toString(i)).setDoc("{}").setDocAsUpsert(true));
+ }
+
+ BulkResponse bulkResponse = bulkRequestBuilder.get();
+ for (BulkItemResponse item : bulkResponse) {
+ assertThat(item.isFailed(), equalTo(false));
+ assertShardInfo(item.getResponse());
+ }
+ }
+
+ private void prepareIndex(int numberOfPrimaryShards) throws Exception {
+ prepareIndex(numberOfPrimaryShards, false);
+ }
+
+ private void prepareIndex(int numberOfPrimaryShards, boolean routingRequired) throws Exception {
+ numNodes = cluster().numDataNodes();
+ logger.info("Number of nodes: {}", numNodes);
+ int maxNumberOfCopies = (numNodes * 2) - 1;
+ numCopies = randomIntBetween(numNodes, maxNumberOfCopies);
+ logger.info("Number of copies: {}", numCopies);
+
+ assertAcked(prepareCreate("idx").setSettings(
+ Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfPrimaryShards)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numCopies - 1))
+ .addMapping("type", "_routing", "required=" + routingRequired)
+ .get());
+ for (int i = 0; i < numberOfPrimaryShards; i++) {
+ ensureActiveShardCopies(i, numNodes);
+ }
+ }
+
+ private void assertShardInfo(ActionWriteResponse response) {
+ assertShardInfo(response, numCopies, numNodes);
+ }
+
+ private void assertShardInfo(ActionWriteResponse response, int expectedTotal, int expectedSuccessful) {
+ assertThat(response.getShardInfo().getTotal(), greaterThanOrEqualTo(expectedTotal));
+ assertThat(response.getShardInfo().getSuccessful(), greaterThanOrEqualTo(expectedSuccessful));
+ }
+
+ private void ensureActiveShardCopies(final int shardId, final int copyCount) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ assertThat(state.routingTable().index("idx"), not(nullValue()));
+ assertThat(state.routingTable().index("idx").shard(shardId), not(nullValue()));
+ assertThat(state.routingTable().index("idx").shard(shardId).activeShards().size(), equalTo(copyCount));
+
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("idx")
+ .setWaitForRelocatingShards(0)
+ .get();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("idx")
+ .setActiveOnly(true)
+ .get();
+ assertThat(recoveryResponse.shardResponses().get("idx").size(), equalTo(0));
+ }
+ });
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java
new file mode 100644
index 0000000000..3eba6c46c5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.env;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.net.URL;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.CoreMatchers.nullValue;
+
+/**
+ * Simple unit-tests for Environment.java
+ */
+public class EnvironmentTests extends ElasticsearchTestCase {
+
+ public Environment newEnvironment() throws IOException {
+ return newEnvironment(Settings.EMPTY);
+ }
+
+ public Environment newEnvironment(Settings settings) throws IOException {
+ Settings build = Settings.builder()
+ .put(settings)
+ .put("path.home", createTempDir().toAbsolutePath())
+ .putArray("path.data", tmpPaths()).build();
+ return new Environment(build);
+ }
+
+ @Test
+ public void testResolveJaredResource() throws IOException {
+ Environment environment = newEnvironment();
+ URL url = environment.resolveConfig("META-INF/MANIFEST.MF"); // this works because there is one jar having this file in the classpath
+ assertNotNull(url);
+ try (BufferedReader reader = FileSystemUtils.newBufferedReader(url, Charsets.UTF_8)) {
+ String string = Streams.copyToString(reader);
+ assertTrue(string, string.contains("Manifest-Version"));
+ }
+ }
+
+ @Test
+ public void testResolveFileResource() throws IOException {
+ Environment environment = newEnvironment();
+ URL url = environment.resolveConfig("org/elasticsearch/common/cli/tool.help");
+ assertNotNull(url);
+ try (BufferedReader reader = FileSystemUtils.newBufferedReader(url, Charsets.UTF_8)) {
+ String string = Streams.copyToString(reader);
+ assertEquals(string, "tool help");
+ }
+ }
+
+ @Test
+ public void testRepositoryResolution() throws IOException {
+ Environment environment = newEnvironment();
+ assertThat(environment.resolveRepoFile("/test/repos/repo1"), nullValue());
+ assertThat(environment.resolveRepoFile("test/repos/repo1"), nullValue());
+ environment = newEnvironment(settingsBuilder().putArray("path.repo", "/test/repos", "/another/repos", "/test/repos/../other").build());
+ assertThat(environment.resolveRepoFile("/test/repos/repo1"), notNullValue());
+ assertThat(environment.resolveRepoFile("test/repos/repo1"), notNullValue());
+ assertThat(environment.resolveRepoFile("/another/repos/repo1"), notNullValue());
+ assertThat(environment.resolveRepoFile("/test/repos/../repo1"), nullValue());
+ assertThat(environment.resolveRepoFile("/test/repos/../repos/repo1"), notNullValue());
+ assertThat(environment.resolveRepoFile("/somethingeles/repos/repo1"), nullValue());
+ assertThat(environment.resolveRepoFile("/test/other/repo"), notNullValue());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
new file mode 100644
index 0000000000..1651966ea2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.env;
+
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+@LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to allow extras
+public class NodeEnvironmentTests extends ElasticsearchTestCase {
+
+ private final Settings idxSettings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).build();
+
+ @Test
+ public void testNodeLockSingleEnvironment() throws IOException {
+ NodeEnvironment env = newNodeEnvironment(Settings.builder()
+ .put("node.max_local_storage_nodes", 1).build());
+ Settings settings = env.getSettings();
+ String[] dataPaths = env.getSettings().getAsArray("path.data");
+
+ try {
+ new NodeEnvironment(settings, new Environment(settings));
+ fail("env is already locked");
+ } catch (IllegalStateException ex) {
+
+ }
+ env.close();
+
+ // now can recreate and lock it
+ env = new NodeEnvironment(settings, new Environment(settings));
+ assertEquals(env.nodeDataPaths().length, dataPaths.length);
+
+ for (int i = 0; i < dataPaths.length; i++) {
+ assertTrue(env.nodeDataPaths()[i].startsWith(PathUtils.get(dataPaths[i])));
+ }
+ env.close();
+ assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
+
+ }
+
+ @Test
+ public void testNodeLockMultipleEnvironment() throws IOException {
+ final NodeEnvironment first = newNodeEnvironment();
+ String[] dataPaths = first.getSettings().getAsArray("path.data");
+ NodeEnvironment second = new NodeEnvironment(first.getSettings(), new Environment(first.getSettings()));
+ assertEquals(first.nodeDataPaths().length, dataPaths.length);
+ assertEquals(second.nodeDataPaths().length, dataPaths.length);
+ for (int i = 0; i < dataPaths.length; i++) {
+ assertEquals(first.nodeDataPaths()[i].getParent(), second.nodeDataPaths()[i].getParent());
+ }
+ IOUtils.close(first, second);
+ }
+
+ @Test
+ public void testShardLock() throws IOException {
+ final NodeEnvironment env = newNodeEnvironment();
+
+ ShardLock fooLock = env.shardLock(new ShardId("foo", 0));
+ assertEquals(new ShardId("foo", 0), fooLock.getShardId());
+
+ try {
+ env.shardLock(new ShardId("foo", 0));
+ fail("shard is locked");
+ } catch (LockObtainFailedException ex) {
+ // expected
+ }
+ for (Path path : env.indexPaths(new Index("foo"))) {
+ Files.createDirectories(path.resolve("0"));
+ Files.createDirectories(path.resolve("1"));
+ }
+ Settings settings = settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)).build();
+ try {
+ env.lockAllForIndex(new Index("foo"), settings, randomIntBetween(0, 10));
+ fail("shard 0 is locked");
+ } catch (LockObtainFailedException ex) {
+ // expected
+ }
+
+ fooLock.close();
+ // can lock again?
+ env.shardLock(new ShardId("foo", 0)).close();
+
+ List<ShardLock> locks = env.lockAllForIndex(new Index("foo"), settings, randomIntBetween(0, 10));
+ try {
+ env.shardLock(new ShardId("foo", 0));
+ fail("shard is locked");
+ } catch (LockObtainFailedException ex) {
+ // expected
+ }
+ IOUtils.close(locks);
+ assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
+ env.close();
+ }
+
+ @Test
+ public void testGetAllIndices() throws Exception {
+ final NodeEnvironment env = newNodeEnvironment();
+ final int numIndices = randomIntBetween(1, 10);
+ for (int i = 0; i < numIndices; i++) {
+ for (Path path : env.indexPaths(new Index("foo" + i))) {
+ Files.createDirectories(path);
+ }
+ }
+ Set<String> indices = env.findAllIndices();
+ assertEquals(indices.size(), numIndices);
+ for (int i = 0; i < numIndices; i++) {
+ assertTrue(indices.contains("foo" + i));
+ }
+ assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
+ env.close();
+ }
+
+ @Test
+ public void testDeleteSafe() throws IOException, InterruptedException {
+ final NodeEnvironment env = newNodeEnvironment();
+ ShardLock fooLock = env.shardLock(new ShardId("foo", 0));
+ assertEquals(new ShardId("foo", 0), fooLock.getShardId());
+
+
+ for (Path path : env.indexPaths(new Index("foo"))) {
+ Files.createDirectories(path.resolve("0"));
+ Files.createDirectories(path.resolve("1"));
+ }
+
+ try {
+ env.deleteShardDirectorySafe(new ShardId("foo", 0), idxSettings);
+ fail("shard is locked");
+ } catch (LockObtainFailedException ex) {
+ // expected
+ }
+
+ for (Path path : env.indexPaths(new Index("foo"))) {
+ assertTrue(Files.exists(path.resolve("0")));
+ assertTrue(Files.exists(path.resolve("1")));
+
+ }
+
+ env.deleteShardDirectorySafe(new ShardId("foo", 1), idxSettings);
+
+ for (Path path : env.indexPaths(new Index("foo"))) {
+ assertTrue(Files.exists(path.resolve("0")));
+ assertFalse(Files.exists(path.resolve("1")));
+ }
+
+ try {
+ env.deleteIndexDirectorySafe(new Index("foo"), randomIntBetween(0, 10), idxSettings);
+ fail("shard is locked");
+ } catch (LockObtainFailedException ex) {
+ // expected
+ }
+ fooLock.close();
+
+ for (Path path : env.indexPaths(new Index("foo"))) {
+ assertTrue(Files.exists(path));
+ }
+
+ final AtomicReference<Throwable> threadException = new AtomicReference<>();
+ final CountDownLatch latch = new CountDownLatch(1);
+ final CountDownLatch blockLatch = new CountDownLatch(1);
+ final CountDownLatch start = new CountDownLatch(1);
+ if (randomBoolean()) {
+ Thread t = new Thread(new AbstractRunnable() {
+ @Override
+ public void onFailure(Throwable t) {
+ logger.error("unexpected error", t);
+ threadException.set(t);
+ latch.countDown();
+ blockLatch.countDown();
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ start.await();
+ try (ShardLock _ = env.shardLock(new ShardId("foo", 0))) {
+ blockLatch.countDown();
+ Thread.sleep(randomIntBetween(1, 10));
+ }
+ latch.countDown();
+ }
+ });
+ t.start();
+ } else {
+ latch.countDown();
+ blockLatch.countDown();
+ }
+ start.countDown();
+ blockLatch.await();
+
+ env.deleteIndexDirectorySafe(new Index("foo"), 5000, idxSettings);
+
+ assertNull(threadException.get());
+
+ for (Path path : env.indexPaths(new Index("foo"))) {
+ assertFalse(Files.exists(path));
+ }
+ latch.await();
+ assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
+ env.close();
+ }
+
+ @Test
+ public void testStressShardLock() throws IOException, InterruptedException {
+ class Int {
+ int value = 0;
+ }
+ final NodeEnvironment env = newNodeEnvironment();
+ final int shards = randomIntBetween(2, 10);
+ final Int[] counts = new Int[shards];
+ final AtomicInteger[] countsAtomic = new AtomicInteger[shards];
+ final AtomicInteger[] flipFlop = new AtomicInteger[shards];
+
+ for (int i = 0; i < counts.length; i++) {
+ counts[i] = new Int();
+ countsAtomic[i] = new AtomicInteger();
+ flipFlop[i] = new AtomicInteger();
+ }
+
+ Thread[] threads = new Thread[randomIntBetween(2,5)];
+ final CountDownLatch latch = new CountDownLatch(1);
+ final int iters = scaledRandomIntBetween(10000, 100000);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ fail(e.getMessage());
+ }
+ for (int i = 0; i < iters; i++) {
+ int shard = randomIntBetween(0, counts.length-1);
+ try {
+ try (ShardLock _ = env.shardLock(new ShardId("foo", shard), scaledRandomIntBetween(0, 10))) {
+ counts[shard].value++;
+ countsAtomic[shard].incrementAndGet();
+ assertEquals(flipFlop[shard].incrementAndGet(), 1);
+ assertEquals(flipFlop[shard].decrementAndGet(), 0);
+ }
+ } catch (LockObtainFailedException ex) {
+ // ok
+ } catch (IOException ex) {
+ fail(ex.toString());
+ }
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown(); // fire the threads up
+ for (int i = 0; i < threads.length; i++) {
+ threads[i].join();
+ }
+
+ assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
+ for (int i = 0; i < counts.length; i++) {
+ assertTrue(counts[i].value > 0);
+ assertEquals(flipFlop[i].get(), 0);
+ assertEquals(counts[i].value, countsAtomic[i].get());
+ }
+ env.close();
+ }
+
+ @Test
+ public void testCustomDataPaths() throws Exception {
+ String[] dataPaths = tmpPaths();
+ NodeEnvironment env = newNodeEnvironment(dataPaths, Settings.EMPTY);
+
+ Settings s1 = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).build();
+ Settings s2 = Settings.builder().put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build();
+ ShardId sid = new ShardId("myindex", 0);
+ Index i = new Index("myindex");
+
+ assertFalse("no settings should mean no custom data path", NodeEnvironment.hasCustomDataPath(s1));
+ assertTrue("settings with path_data should have a custom data path", NodeEnvironment.hasCustomDataPath(s2));
+
+ assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid)));
+ assertFalse(NodeEnvironment.hasCustomDataPath(s1));
+ assertThat(env.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/0/myindex/0")));
+ assertTrue(NodeEnvironment.hasCustomDataPath(s2));
+
+ assertThat("shard paths with a custom data_path should contain only regular paths",
+ env.availableShardPaths(sid),
+ equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex/0")));
+
+ assertThat("index paths uses the regular template",
+ env.indexPaths(i), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex")));
+
+ env.close();
+ NodeEnvironment env2 = newNodeEnvironment(dataPaths,
+ Settings.builder().put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH, false).build());
+
+ assertThat(env2.availableShardPaths(sid), equalTo(env2.availableShardPaths(sid)));
+ assertThat(env2.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/myindex/0")));
+
+ assertThat("shard paths with a custom data_path should contain only regular paths",
+ env2.availableShardPaths(sid),
+ equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex/0")));
+
+ assertThat("index paths uses the regular template",
+ env2.indexPaths(i), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex")));
+
+ env2.close();
+ }
+
+ /** Converts an array of Strings to an array of Paths, adding an additional child if specified */
+ private Path[] stringsToPaths(String[] strings, String additional) {
+ Path[] locations = new Path[strings.length];
+ for (int i = 0; i < strings.length; i++) {
+ locations[i] = PathUtils.get(strings[i], additional);
+ }
+ return locations;
+ }
+
+ @Override
+ public String[] tmpPaths() {
+ final int numPaths = randomIntBetween(1, 3);
+ final String[] absPaths = new String[numPaths];
+ for (int i = 0; i < numPaths; i++) {
+ absPaths[i] = createTempDir().toAbsolutePath().toString();
+ }
+ return absPaths;
+ }
+
+ @Override
+ public NodeEnvironment newNodeEnvironment() throws IOException {
+ return newNodeEnvironment(Settings.EMPTY);
+ }
+
+ @Override
+ public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException {
+ Settings build = Settings.builder()
+ .put(settings)
+ .put("path.home", createTempDir().toAbsolutePath().toString())
+ .put(NodeEnvironment.SETTING_CUSTOM_DATA_PATH_ENABLED, true)
+ .putArray("path.data", tmpPaths()).build();
+ return new NodeEnvironment(build, new Environment(build));
+ }
+
+ public NodeEnvironment newNodeEnvironment(String[] dataPaths, Settings settings) throws IOException {
+ Settings build = Settings.builder()
+ .put(settings)
+ .put("path.home", createTempDir().toAbsolutePath().toString())
+ .put(NodeEnvironment.SETTING_CUSTOM_DATA_PATH_ENABLED, true)
+ .putArray("path.data", dataPaths).build();
+ return new NodeEnvironment(build, new Environment(build));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/exists/SimpleExistsTests.java b/core/src/test/java/org/elasticsearch/exists/SimpleExistsTests.java
new file mode 100644
index 0000000000..78e50de0f5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/exists/SimpleExistsTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.exists;
+
+import org.elasticsearch.action.exists.ExistsResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists;
+
+public class SimpleExistsTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testExistsRandomPreference() throws Exception {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+
+ String randomPreference = randomUnicodeOfLengthBetween(0, 4);
+ // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary)
+ while (randomPreference.startsWith("_")) {
+ randomPreference = randomUnicodeOfLengthBetween(0, 4);
+ }
+ // id is not indexed, but lets see that we automatically convert to
+ ExistsResponse existsResponse = client().prepareExists().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomPreference).get();
+ assertExists(existsResponse, true);
+ }
+ }
+
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ createIndex("test");
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ ExistsResponse existsResponse = client().prepareExists()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7"))).get();
+
+ assertExists(existsResponse, true);
+
+ existsResponse = client().prepareExists().setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.4")).must(rangeQuery("to").gt("192.168.0.11"))).get();
+
+ assertExists(existsResponse, false);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ createIndex("test");
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ ExistsResponse existsResponse = client().prepareExists().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertExists(existsResponse, true);
+
+ existsResponse = client().prepareExists().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).execute().actionGet();
+ assertExists(existsResponse, true);
+
+ existsResponse = client().prepareExists().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertExists(existsResponse, true);
+
+ existsResponse = client().prepareExists().setQuery(QueryBuilders.queryStringQuery("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertExists(existsResponse, true);
+ }
+
+ @Test
+ public void simpleNonExistenceTests() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", 5).execute().actionGet();
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").execute().actionGet();
+ ensureGreen();
+ refresh();
+ ExistsResponse existsResponse = client().prepareExists("test").setQuery(QueryBuilders.rangeQuery("field").gte(6).lte(8)).execute().actionGet();
+ assertExists(existsResponse, false);
+
+ existsResponse = client().prepareExists("test").setQuery(QueryBuilders.queryStringQuery("_id:XXY*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertExists(existsResponse, false);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/explain/ExplainActionTests.java b/core/src/test/java/org/elasticsearch/explain/ExplainActionTests.java
new file mode 100644
index 0000000000..79e9cef86d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/explain/ExplainActionTests.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.explain;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+public class ExplainActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen("test");
+
+ client().prepareIndex("test", "test", "1").setSource("field", "value1").get();
+
+ ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ assertNotNull(response);
+ assertFalse(response.isExists()); // not a match b/c not realtime
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getType(), equalTo("test"));
+ assertThat(response.getId(), equalTo("1"));
+ assertFalse(response.isMatch()); // not a match b/c not realtime
+
+ refresh();
+ response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getType(), equalTo("test"));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+
+ response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.termQuery("field", "value2")).get();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getType(), equalTo("test"));
+ assertThat(response.getId(), equalTo("1"));
+ assertNotNull(response.getExplanation());
+ assertFalse(response.getExplanation().isMatch());
+
+ response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.boolQuery()
+ .must(QueryBuilders.termQuery("field", "value1"))
+ .must(QueryBuilders.termQuery("field", "value2"))).get();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getType(), equalTo("test"));
+ assertThat(response.getId(), equalTo("1"));
+ assertNotNull(response.getExplanation());
+ assertFalse(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getDetails().length, equalTo(2));
+
+ response = client().prepareExplain(indexOrAlias(), "test", "2")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ assertNotNull(response);
+ assertFalse(response.isExists());
+ assertFalse(response.isMatch());
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getType(), equalTo("test"));
+ assertThat(response.getId(), equalTo("2"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testExplainWithFields() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen("test");
+
+ client().prepareIndex("test", "test", "1")
+ .setSource(
+ jsonBuilder().startObject()
+ .startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()).get();
+
+ refresh();
+ ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1").get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getFields().size(), equalTo(1));
+ assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1"));
+ assertThat(response.getGetResult().isSourceEmpty(), equalTo(true));
+
+ refresh();
+ response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1").setFetchSource(true).get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getFields().size(), equalTo(1));
+ assertThat(response.getGetResult().getFields().get("obj1.field1").getValue().toString(), equalTo("value1"));
+ assertThat(response.getGetResult().isSourceEmpty(), equalTo(false));
+
+ response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFields("obj1.field1", "obj1.field2").get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ String v1 = (String) response.getGetResult().field("obj1.field1").getValue();
+ String v2 = (String) response.getGetResult().field("obj1.field2").getValue();
+ assertThat(v1, equalTo("value1"));
+ assertThat(v2, equalTo("value2"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testExplainWitSource() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen("test");
+
+ client().prepareIndex("test", "test", "1")
+ .setSource(
+ jsonBuilder().startObject()
+ .startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .endObject()).get();
+
+ refresh();
+ ExplainResponse response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFetchSource("obj1.field1", null).get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertNotNull(response.getExplanation());
+ assertTrue(response.getExplanation().isMatch());
+ assertThat(response.getExplanation().getValue(), equalTo(1.0f));
+ assertThat(response.getGetResult().isExists(), equalTo(true));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getSource().size(), equalTo(1));
+ assertThat(((Map<String, Object>) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1"));
+
+ response = client().prepareExplain(indexOrAlias(), "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setFetchSource(null, "obj1.field2").get();
+ assertNotNull(response);
+ assertTrue(response.isMatch());
+ assertThat(((Map<String, Object>) response.getGetResult().getSource().get("obj1")).get("field1").toString(), equalTo("value1"));
+ }
+
+ @Test
+ public void testExplainWithFilteredAlias() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "field2", "type=string")
+ .addAlias(new Alias("alias1").filter(QueryBuilders.termQuery("field2", "value2"))));
+ ensureGreen("test");
+
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1", "field2", "value1").get();
+ refresh();
+
+ ExplainResponse response = client().prepareExplain("alias1", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ }
+
+ @Test
+ public void testExplainWithFilteredAliasFetchSource() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("test", "field2", "type=string")
+ .addAlias(new Alias("alias1").filter(QueryBuilders.termQuery("field2", "value2"))));
+ ensureGreen("test");
+
+ client().prepareIndex("test", "test", "1").setSource("field1", "value1", "field2", "value1").get();
+ refresh();
+
+ ExplainResponse response = client().prepareExplain("alias1", "test", "1")
+ .setQuery(QueryBuilders.matchAllQuery()).setFetchSource(true).get();
+
+ assertNotNull(response);
+ assertTrue(response.isExists());
+ assertFalse(response.isMatch());
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getType(), equalTo("test"));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getGetResult(), notNullValue());
+ assertThat(response.getGetResult().getIndex(), equalTo("test"));
+ assertThat(response.getGetResult().getType(), equalTo("test"));
+ assertThat(response.getGetResult().getId(), equalTo("1"));
+ assertThat(response.getGetResult().getSource(), notNullValue());
+ assertThat((String)response.getGetResult().getSource().get("field1"), equalTo("value1"));
+ }
+
+ @Test
+ public void explainDateRangeInQueryString() {
+ createIndex("test");
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+
+ refresh();
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "type", "1").setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.isMatch(), equalTo(true));
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+
+ @Test
+ public void streamExplainTest() throws Exception {
+
+ Explanation exp = Explanation.match(2f, "some explanation");
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ Lucene.writeExplanation(out, exp);
+
+ // read
+ ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
+
+ Explanation result = Lucene.readExplanation(esBuffer);
+ assertThat(exp.toString(),equalTo(result.toString()));
+
+ exp = Explanation.match(2.0f, "some explanation", Explanation.match(2.0f,"another explanation"));
+
+ // write complex
+ outBuffer = new ByteArrayOutputStream();
+ out = new OutputStreamStreamOutput(outBuffer);
+ Lucene.writeExplanation(out, exp);
+
+ // read complex
+ esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ esBuffer = new InputStreamStreamInput(esInBuffer);
+
+ result = Lucene.readExplanation(esBuffer);
+ assertThat(exp.toString(),equalTo(result.toString()));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java
new file mode 100644
index 0000000000..97231bec44
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.fieldstats;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.fieldstats.FieldStats;
+import org.elasticsearch.action.fieldstats.FieldStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ public void testRandom() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "test", "string", "type=string", "date", "type=date", "double", "type=double", "double", "type=double",
+ "float", "type=float", "long", "type=long", "integer", "type=integer", "short", "type=short", "byte", "type=byte"
+ ));
+ ensureGreen("test");
+
+ byte minByte = Byte.MAX_VALUE;
+ byte maxByte = Byte.MIN_VALUE;
+ short minShort = Short.MAX_VALUE;
+ short maxShort = Short.MIN_VALUE;
+ int minInt = Integer.MAX_VALUE;
+ int maxInt = Integer.MIN_VALUE;
+ long minLong = Long.MAX_VALUE;
+ long maxLong = Long.MIN_VALUE;
+ float minFloat = Float.MAX_VALUE;
+ float maxFloat = Float.MIN_VALUE;
+ double minDouble = Double.MAX_VALUE;
+ double maxDouble = Double.MIN_VALUE;
+ String minString = new String(Character.toChars(1114111));
+ String maxString = "0";
+
+ int numDocs = scaledRandomIntBetween(128, 1024);
+ List<IndexRequestBuilder> request = new ArrayList<>(numDocs);
+ for (int doc = 0; doc < numDocs; doc++) {
+ byte b = randomByte();
+ minByte = (byte) Math.min(minByte, b);
+ maxByte = (byte) Math.max(maxByte, b);
+ short s = randomShort();
+ minShort = (short) Math.min(minShort, s);
+ maxShort = (short) Math.max(maxShort, s);
+ int i = randomInt();
+ minInt = Math.min(minInt, i);
+ maxInt = Math.max(maxInt, i);
+ long l = randomLong();
+ minLong = Math.min(minLong, l);
+ maxLong = Math.max(maxLong, l);
+ float f = randomFloat();
+ minFloat = Math.min(minFloat, f);
+ maxFloat = Math.max(maxFloat, f);
+ double d = randomDouble();
+ minDouble = Math.min(minDouble, d);
+ maxDouble = Math.max(maxDouble, d);
+ String str = randomRealisticUnicodeOfLength(3);
+ if (str.compareTo(minString) < 0) {
+ minString = str;
+ }
+ if (str.compareTo(maxString) > 0) {
+ maxString = str;
+ }
+
+ request.add(client().prepareIndex("test", "test", Integer.toString(doc))
+ .setSource("byte", b, "short", s, "integer", i, "long", l, "float", f, "double", d, "string", str)
+ );
+ }
+ indexRandom(true, false, request);
+
+ FieldStatsResponse response = client().prepareFieldStats().setFields("byte", "short", "integer", "long", "float", "double", "string").get();
+ assertAllSuccessful(response);
+
+ for (FieldStats stats : response.getAllFieldStats().values()) {
+ assertThat(stats.getMaxDoc(), equalTo((long) numDocs));
+ assertThat(stats.getDocCount(), equalTo((long) numDocs));
+ assertThat(stats.getDensity(), equalTo(100));
+ }
+
+ assertThat(response.getAllFieldStats().get("byte").getMinValue(), equalTo(Byte.toString(minByte)));
+ assertThat(response.getAllFieldStats().get("byte").getMaxValue(), equalTo(Byte.toString(maxByte)));
+ assertThat(response.getAllFieldStats().get("short").getMinValue(), equalTo(Short.toString(minShort)));
+ assertThat(response.getAllFieldStats().get("short").getMaxValue(), equalTo(Short.toString(maxShort)));
+ assertThat(response.getAllFieldStats().get("integer").getMinValue(), equalTo(Integer.toString(minInt)));
+ assertThat(response.getAllFieldStats().get("integer").getMaxValue(), equalTo(Integer.toString(maxInt)));
+ assertThat(response.getAllFieldStats().get("long").getMinValue(), equalTo(Long.toString(minLong)));
+ assertThat(response.getAllFieldStats().get("long").getMaxValue(), equalTo(Long.toString(maxLong)));
+ assertThat(response.getAllFieldStats().get("float").getMinValue(), equalTo(Float.toString(minFloat)));
+ assertThat(response.getAllFieldStats().get("float").getMaxValue(), equalTo(Float.toString(maxFloat)));
+ assertThat(response.getAllFieldStats().get("double").getMinValue(), equalTo(Double.toString(minDouble)));
+ assertThat(response.getAllFieldStats().get("double").getMaxValue(), equalTo(Double.toString(maxDouble)));
+ }
+
+ public void testFieldStatsIndexLevel() throws Exception {
+ assertAcked(prepareCreate("test1").addMapping(
+ "test", "value", "type=long"
+ ));
+ assertAcked(prepareCreate("test2").addMapping(
+ "test", "value", "type=long"
+ ));
+ assertAcked(prepareCreate("test3").addMapping(
+ "test", "value", "type=long"
+ ));
+ ensureGreen("test1", "test2", "test3");
+
+ indexRange("test1", -10, 100);
+ indexRange("test2", 101, 200);
+ indexRange("test3", 201, 300);
+
+ // default:
+ FieldStatsResponse response = client().prepareFieldStats().setFields("value").get();
+ assertAllSuccessful(response);
+ assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(Long.toString(-10)));
+ assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(Long.toString(300)));
+ assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
+ assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(Long.toString(-10)));
+ assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(Long.toString(300)));
+
+ // Level: cluster
+ response = client().prepareFieldStats().setFields("value").setLevel("cluster").get();
+ assertAllSuccessful(response);
+ assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(Long.toString(-10)));
+ assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(Long.toString(300)));
+ assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
+ assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(Long.toString(-10)));
+ assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(Long.toString(300)));
+
+ // Level: indices
+ response = client().prepareFieldStats().setFields("value").setLevel("indices").get();
+ assertAllSuccessful(response);
+ assertThat(response.getAllFieldStats(), nullValue());
+ assertThat(response.getIndicesMergedFieldStats().size(), equalTo(3));
+ assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(Long.toString(-10)));
+ assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(Long.toString(100)));
+ assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(Long.toString(101)));
+ assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(Long.toString(200)));
+ assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(Long.toString(201)));
+ assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(Long.toString(300)));
+
+ // Illegal level option:
+ try {
+ client().prepareFieldStats().setFields("value").setLevel("illegal").get();
+ fail();
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.getMessage(), equalTo("Validation Failed: 1: invalid level option [illegal];"));
+ }
+ }
+
+ public void testIncompatibleFieldTypes() {
+ assertAcked(prepareCreate("test1").addMapping(
+ "test", "value", "type=long"
+ ));
+ assertAcked(prepareCreate("test2").addMapping(
+ "test", "value", "type=string"
+ ));
+ ensureGreen("test1", "test2");
+
+ client().prepareIndex("test1", "test").setSource("value", 1l).get();
+ client().prepareIndex("test1", "test").setSource("value", 2l).get();
+ client().prepareIndex("test2", "test").setSource("value", "a").get();
+ client().prepareIndex("test2", "test").setSource("value", "b").get();
+ refresh();
+
+ try {
+ client().prepareFieldStats().setFields("value").get();
+ fail();
+ } catch (IllegalStateException e){
+ assertThat(e.getMessage(), containsString("trying to merge the field stats of field [value]"));
+ }
+
+ FieldStatsResponse response = client().prepareFieldStats().setFields("value").setLevel("indices").get();
+ assertAllSuccessful(response);
+ assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
+ assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(Long.toString(1)));
+ assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(Long.toString(2)));
+ assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo("a"));
+ assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo("b"));
+ }
+
+ private void indexRange(String index, long from, long to) throws ExecutionException, InterruptedException {
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ for (long value = from; value <= to; value++) {
+ requests.add(client().prepareIndex(index, "test").setSource("value", value));
+ }
+ indexRandom(true, false, requests);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
new file mode 100644
index 0000000000..7b6a51982c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.fieldstats;
+
+import org.elasticsearch.action.fieldstats.FieldStats;
+import org.elasticsearch.action.fieldstats.FieldStatsResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class FieldStatsTests extends ElasticsearchSingleNodeTest {
+
+ public void testByte() {
+ testNumberRange("field1", "byte", 12, 18);
+ testNumberRange("field1", "byte", -5, 5);
+ testNumberRange("field1", "byte", -18, -12);
+ }
+
+ public void testShort() {
+ testNumberRange("field1", "short", 256, 266);
+ testNumberRange("field1", "short", -5, 5);
+ testNumberRange("field1", "short", -266, -256);
+ }
+
+ public void testInteger() {
+ testNumberRange("field1", "integer", 56880, 56890);
+ testNumberRange("field1", "integer", -5, 5);
+ testNumberRange("field1", "integer", -56890, -56880);
+ }
+
+ public void testLong() {
+ testNumberRange("field1", "long", 312321312312412l, 312321312312422l);
+ testNumberRange("field1", "long", -5, 5);
+ testNumberRange("field1", "long", -312321312312422l, -312321312312412l);
+ }
+
+ public void testString() {
+ createIndex("test", Settings.EMPTY, "field", "value", "type=string");
+ for (int value = 0; value <= 10; value++) {
+ client().prepareIndex("test", "test").setSource("field", String.format(Locale.ENGLISH, "%03d", value)).get();
+ }
+ client().admin().indices().prepareRefresh().get();
+
+ FieldStatsResponse result = client().prepareFieldStats().setFields("field").get();
+ assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11l));
+ assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11l));
+ assertThat(result.getAllFieldStats().get("field").getDensity(), equalTo(100));
+ assertThat(result.getAllFieldStats().get("field").getMinValue(), equalTo(String.format(Locale.ENGLISH, "%03d", 0)));
+ assertThat(result.getAllFieldStats().get("field").getMaxValue(), equalTo(String.format(Locale.ENGLISH, "%03d", 10)));
+ }
+
+ public void testDouble() {
+ String fieldName = "field";
+ createIndex("test", Settings.EMPTY, fieldName, "value", "type=double");
+ for (double value = -1; value <= 9; value++) {
+ client().prepareIndex("test", "test").setSource(fieldName, value).get();
+ }
+ client().admin().indices().prepareRefresh().get();
+
+ FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
+ assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l));
+ assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l));
+ assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
+ assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(Double.toString(-1)));
+ assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(Double.toString(9)));
+ }
+
+ public void testFloat() {
+ String fieldName = "field";
+ createIndex("test", Settings.EMPTY, fieldName, "value", "type=float");
+ for (float value = -1; value <= 9; value++) {
+ client().prepareIndex("test", "test").setSource(fieldName, value).get();
+ }
+ client().admin().indices().prepareRefresh().get();
+
+ FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
+ assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l));
+ assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l));
+ assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
+ assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(Float.toString(-1)));
+ assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(Float.toString(9)));
+ }
+
+ private void testNumberRange(String fieldName, String fieldType, long min, long max) {
+ createIndex("test", Settings.EMPTY, fieldName, "value", "type=" + fieldType);
+ for (long value = min; value <= max; value++) {
+ client().prepareIndex("test", "test").setSource(fieldName, value).get();
+ }
+ client().admin().indices().prepareRefresh().get();
+
+ FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
+ long numDocs = max - min + 1;
+ assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(numDocs));
+ assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(numDocs));
+ assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
+ assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(java.lang.Long.toString(min)));
+ assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(java.lang.Long.toString(max)));
+ client().admin().indices().prepareDelete("test").get();
+ }
+
+ public void testMerge() {
+ List<FieldStats> stats = new ArrayList<>();
+ stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
+ stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
+ stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
+
+ FieldStats stat = new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l);
+ for (FieldStats otherStat : stats) {
+ stat.append(otherStat);
+ }
+ assertThat(stat.getMaxDoc(), equalTo(4l));
+ assertThat(stat.getDocCount(), equalTo(4l));
+ assertThat(stat.getSumDocFreq(), equalTo(4l));
+ assertThat(stat.getSumTotalTermFreq(), equalTo(4l));
+ }
+
+ public void testMerge_notAvailable() {
+ List<FieldStats> stats = new ArrayList<>();
+ stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
+ stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
+ stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
+
+ FieldStats stat = new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l);
+ for (FieldStats otherStat : stats) {
+ stat.append(otherStat);
+ }
+ assertThat(stat.getMaxDoc(), equalTo(4l));
+ assertThat(stat.getDocCount(), equalTo(-1l));
+ assertThat(stat.getSumDocFreq(), equalTo(-1l));
+ assertThat(stat.getSumTotalTermFreq(), equalTo(-1l));
+
+ stats.add(new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l));
+ stat = stats.remove(0);
+ for (FieldStats otherStat : stats) {
+ stat.append(otherStat);
+ }
+ assertThat(stat.getMaxDoc(), equalTo(4l));
+ assertThat(stat.getDocCount(), equalTo(-1l));
+ assertThat(stat.getSumDocFreq(), equalTo(-1l));
+ assertThat(stat.getSumTotalTermFreq(), equalTo(-1l));
+ }
+
+ public void testInvalidField() {
+ createIndex("test1", Settings.EMPTY, "field1", "value", "type=string");
+ client().prepareIndex("test1", "test").setSource("field1", "a").get();
+ client().prepareIndex("test1", "test").setSource("field1", "b").get();
+
+ createIndex("test2", Settings.EMPTY, "field2", "value", "type=string");
+ client().prepareIndex("test2", "test").setSource("field2", "a").get();
+ client().prepareIndex("test2", "test").setSource("field2", "b").get();
+ client().admin().indices().prepareRefresh().get();
+
+ FieldStatsResponse result = client().prepareFieldStats().setFields("field1", "field2").get();
+ assertThat(result.getFailedShards(), equalTo(2));
+ assertThat(result.getTotalShards(), equalTo(2));
+ assertThat(result.getSuccessfulShards(), equalTo(0));
+ assertThat(result.getShardFailures()[0].reason(), either(containsString("field [field1] doesn't exist")).or(containsString("field [field2] doesn't exist")));
+ assertThat(result.getIndicesMergedFieldStats().size(), equalTo(0));
+
+ // will only succeed on the 'test2' shard, because there the field does exist
+ result = client().prepareFieldStats().setFields("field1").get();
+ assertThat(result.getFailedShards(), equalTo(1));
+ assertThat(result.getTotalShards(), equalTo(2));
+ assertThat(result.getSuccessfulShards(), equalTo(1));
+ assertThat(result.getShardFailures()[0].reason(), either(containsString("field [field1] doesn't exist")).or(containsString("field [field2] doesn't exist")));
+ assertThat(result.getIndicesMergedFieldStats().get("_all").get("field1").getMinValue(), equalTo("a"));
+ assertThat(result.getIndicesMergedFieldStats().get("_all").get("field1").getMaxValue(), equalTo("b"));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java
new file mode 100644
index 0000000000..ece5757355
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.support.nodes.BaseNodeResponse;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ */
+public class AsyncShardFetchTests extends ElasticsearchTestCase {
+
+ private final DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT);
+ private final Response response1 = new Response(node1);
+ private final Throwable failure1 = new Throwable("simulated failure 1");
+ private final DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT);
+ private final Response response2 = new Response(node2);
+ private final Throwable failure2 = new Throwable("simulate failure 2");
+
+ private ThreadPool threadPool;
+ private TestFetch test;
+
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ this.threadPool = new ThreadPool(getTestName());
+ this.test = new TestFetch(threadPool);
+ }
+
+ @After
+ public void terminate() throws Exception {
+ terminate(threadPool);
+ }
+
+ @Test
+ public void testClose() throws Exception {
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build();
+ test.addSimulation(node1.getId(), response1);
+
+ // first fetch, no data, still on going
+ AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+ assertThat(test.reroute.get(), equalTo(0));
+
+ // fire a response, wait on reroute incrementing
+ test.fireSimulationAndWait(node1.getId());
+ // verify we get back the data node
+ assertThat(test.reroute.get(), equalTo(1));
+ test.close();
+ try {
+ test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ fail("fetch data should fail when closed");
+ } catch (IllegalStateException e) {
+ // all is well
+ }
+ }
+
+
+ @Test
+ public void testFullCircleSingleNodeSuccess() throws Exception {
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build();
+ test.addSimulation(node1.getId(), response1);
+
+ // first fetch, no data, still on going
+ AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+ assertThat(test.reroute.get(), equalTo(0));
+
+ // fire a response, wait on reroute incrementing
+ test.fireSimulationAndWait(node1.getId());
+ // verify we get back the data node
+ assertThat(test.reroute.get(), equalTo(1));
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(true));
+ assertThat(fetchData.getData().size(), equalTo(1));
+ assertThat(fetchData.getData().get(node1), sameInstance(response1));
+ }
+
+ @Test
+ public void testFullCircleSingleNodeFailure() throws Exception {
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build();
+ // add a failed response for node1
+ test.addSimulation(node1.getId(), failure1);
+
+ // first fetch, no data, still on going
+ AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+ assertThat(test.reroute.get(), equalTo(0));
+
+ // fire a response, wait on reroute incrementing
+ test.fireSimulationAndWait(node1.getId());
+ // failure, fetched data exists, but has no data
+ assertThat(test.reroute.get(), equalTo(1));
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(true));
+ assertThat(fetchData.getData().size(), equalTo(0));
+
+ // on failure, we reset the failure on a successive call to fetchData, and try again afterwards
+ test.addSimulation(node1.getId(), response1);
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+
+ test.fireSimulationAndWait(node1.getId());
+ // 2 reroutes, cause we have a failure that we clear
+ assertThat(test.reroute.get(), equalTo(3));
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(true));
+ assertThat(fetchData.getData().size(), equalTo(1));
+ assertThat(fetchData.getData().get(node1), sameInstance(response1));
+ }
+
+ @Test
+ public void testTwoNodesOnSetup() throws Exception {
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build();
+ test.addSimulation(node1.getId(), response1);
+ test.addSimulation(node2.getId(), response2);
+
+ // no fetched data, 2 requests still on going
+ AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+ assertThat(test.reroute.get(), equalTo(0));
+
+ // fire the first response, it should trigger a reroute
+ test.fireSimulationAndWait(node1.getId());
+ // there is still another on going request, so no data
+ assertThat(test.getNumberOfInFlightFetches(), equalTo(1));
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+
+ // fire the second simulation, this should allow us to get the data
+ test.fireSimulationAndWait(node2.getId());
+ // no more ongoing requests, we should fetch the data
+ assertThat(test.reroute.get(), equalTo(2));
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(true));
+ assertThat(fetchData.getData().size(), equalTo(2));
+ assertThat(fetchData.getData().get(node1), sameInstance(response1));
+ assertThat(fetchData.getData().get(node2), sameInstance(response2));
+ }
+
+ @Test
+ public void testTwoNodesOnSetupAndFailure() throws Exception {
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build();
+ test.addSimulation(node1.getId(), response1);
+ test.addSimulation(node2.getId(), failure2);
+
+ // no fetched data, 2 requests still on going
+ AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+ assertThat(test.reroute.get(), equalTo(0));
+
+ // fire the first response, it should trigger a reroute
+ test.fireSimulationAndWait(node1.getId());
+ assertThat(test.reroute.get(), equalTo(1));
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+
+ // fire the second simulation, this should allow us to get the data
+ test.fireSimulationAndWait(node2.getId());
+ assertThat(test.reroute.get(), equalTo(2));
+ // since one of those failed, we should only have one entry
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(true));
+ assertThat(fetchData.getData().size(), equalTo(1));
+ assertThat(fetchData.getData().get(node1), sameInstance(response1));
+ }
+
+ @Test
+ public void testTwoNodesAddedInBetween() throws Exception {
+ DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build();
+ test.addSimulation(node1.getId(), response1);
+
+ // no fetched data, 2 requests still on going
+ AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+ assertThat(test.reroute.get(), equalTo(0));
+
+ // fire the first response, it should trigger a reroute
+ test.fireSimulationAndWait(node1.getId());
+
+ // now, add a second node to the nodes, it should add it to the ongoing requests
+ nodes = DiscoveryNodes.builder(nodes).put(node2).build();
+ test.addSimulation(node2.getId(), response2);
+ // no fetch data, has a new node introduced
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(false));
+
+ // fire the second simulation, this should allow us to get the data
+ test.fireSimulationAndWait(node2.getId());
+
+ // since one of those failed, we should only have one entry
+ fetchData = test.fetchData(nodes, MetaData.EMPTY_META_DATA, ImmutableSet.<String>of());
+ assertThat(fetchData.hasData(), equalTo(true));
+ assertThat(fetchData.getData().size(), equalTo(2));
+ assertThat(fetchData.getData().get(node1), sameInstance(response1));
+ assertThat(fetchData.getData().get(node2), sameInstance(response2));
+ }
+
+ static class TestFetch extends AsyncShardFetch<Response> {
+
+ static class Entry {
+ public final Response response;
+ public final Throwable failure;
+ private final CountDownLatch executeLatch = new CountDownLatch(1);
+ private final CountDownLatch waitLatch = new CountDownLatch(1);
+
+ public Entry(Response response, Throwable failure) {
+ this.response = response;
+ this.failure = failure;
+ }
+ }
+
+ private final ThreadPool threadPool;
+ private final Map<String, Entry> simulations = new ConcurrentHashMap<>();
+ private AtomicInteger reroute = new AtomicInteger();
+
+ public TestFetch(ThreadPool threadPool) {
+ super(Loggers.getLogger(TestFetch.class), "test", new ShardId("test", 1), null);
+ this.threadPool = threadPool;
+ }
+
+ public void addSimulation(String nodeId, Response response) {
+ simulations.put(nodeId, new Entry(response, null));
+ }
+
+ public void addSimulation(String nodeId, Throwable t) {
+ simulations.put(nodeId, new Entry(null, t));
+ }
+
+ public void fireSimulationAndWait(String nodeId) throws InterruptedException {
+ simulations.get(nodeId).executeLatch.countDown();
+ simulations.get(nodeId).waitLatch.await();
+ simulations.remove(nodeId);
+ }
+
+ @Override
+ protected void reroute(ShardId shardId, String reason) {
+ reroute.incrementAndGet();
+ }
+
+ @Override
+ protected void asyncFetch(final ShardId shardId, String[] nodesIds, MetaData metaData) {
+ for (final String nodeId : nodesIds) {
+ threadPool.generic().execute(new Runnable() {
+ @Override
+ public void run() {
+ Entry entry = null;
+ try {
+ entry = simulations.get(nodeId);
+ if (entry == null) {
+ // we are simulating a master node switch, wait for it to not be null
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return simulations.containsKey(nodeId);
+ }
+ });
+ }
+ assert entry != null;
+ entry.executeLatch.await();
+ if (entry.failure != null) {
+ processAsyncFetch(shardId, null, new FailedNodeException[]{new FailedNodeException(nodeId, "unexpected", entry.failure)});
+ } else {
+ processAsyncFetch(shardId, new Response[]{entry.response}, null);
+ }
+ } catch (Throwable e) {
+ logger.error("unexpected failure", e);
+ } finally {
+ if (entry != null) {
+ entry.waitLatch.countDown();
+ }
+ }
+ }
+ });
+ }
+ }
+ }
+
+
+ static class Response extends BaseNodeResponse {
+
+ public Response(DiscoveryNode node) {
+ super(node);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java
new file mode 100644
index 0000000000..1983094dcc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/DanglingIndicesStateTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+/**
+ */
+public class DanglingIndicesStateTests extends ElasticsearchTestCase {
+
+ private static Settings indexSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .build();
+
+ @Test
+ public void testCleanupWhenEmpty() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
+ DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
+
+ assertTrue(danglingState.getDanglingIndices().isEmpty());
+ MetaData metaData = MetaData.builder().build();
+ danglingState.cleanupAllocatedDangledIndices(metaData);
+ assertTrue(danglingState.getDanglingIndices().isEmpty());
+ }
+ }
+
+ @Test
+ public void testDanglingProcessing() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
+ DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
+
+ MetaData metaData = MetaData.builder().build();
+
+ IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build();
+ metaStateService.writeIndex("test_write", dangledIndex, null);
+
+ // check that several runs when not in the metadata still keep the dangled index around
+ int numberOfChecks = randomIntBetween(1, 10);
+ for (int i = 0; i < numberOfChecks; i++) {
+ Map<String, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
+ assertThat(newDanglingIndices.size(), equalTo(1));
+ assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1"));
+ assertTrue(danglingState.getDanglingIndices().isEmpty());
+ }
+
+ for (int i = 0; i < numberOfChecks; i++) {
+ danglingState.findNewAndAddDanglingIndices(metaData);
+
+ assertThat(danglingState.getDanglingIndices().size(), equalTo(1));
+ assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1"));
+ }
+
+ // simulate allocation to the metadata
+ metaData = MetaData.builder(metaData).put(dangledIndex, true).build();
+
+ // check that several runs when in the metadata, but not cleaned yet, still keeps dangled
+ for (int i = 0; i < numberOfChecks; i++) {
+ Map<String, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
+ assertTrue(newDanglingIndices.isEmpty());
+
+ assertThat(danglingState.getDanglingIndices().size(), equalTo(1));
+ assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1"));
+ }
+
+ danglingState.cleanupAllocatedDangledIndices(metaData);
+ assertTrue(danglingState.getDanglingIndices().isEmpty());
+ }
+ }
+
+ @Test
+ public void testRenameOfIndexState() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
+ DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
+
+ MetaData metaData = MetaData.builder().build();
+
+ IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build();
+ metaStateService.writeIndex("test_write", dangledIndex, null);
+
+ for (Path path : env.indexPaths(new Index("test1"))) {
+ Files.move(path, path.getParent().resolve("test1_renamed"));
+ }
+
+ Map<String, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
+ assertThat(newDanglingIndices.size(), equalTo(1));
+ assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1_renamed"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java
new file mode 100644
index 0000000000..3123edb4c5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateTests.java
@@ -0,0 +1,385 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.indices.IndexClosedException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster.RestartCallback;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+@Slow
+public class GatewayIndexStateTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(GatewayIndexStateTests.class);
+
+ @Test
+ public void testMappingMetaDataParsed() throws Exception {
+
+ logger.info("--> starting 1 nodes");
+ internalCluster().startNode();
+
+ logger.info("--> creating test index, with meta routing");
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> waiting for yellow status");
+ ensureYellow();
+
+ logger.info("--> verify meta _routing required exists");
+ MappingMetaData mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
+ assertThat(mappingMd.routing().required(), equalTo(true));
+
+ logger.info("--> restarting nodes...");
+ internalCluster().fullRestart();
+
+ logger.info("--> waiting for yellow status");
+ ensureYellow();
+
+ logger.info("--> verify meta _routing required exists");
+ mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
+ assertThat(mappingMd.routing().required(), equalTo(true));
+ }
+
+ @Test
+ public void testSimpleOpenClose() throws Exception {
+
+ logger.info("--> starting 2 nodes");
+ internalCluster().startNodesAsync(2).get();
+
+ logger.info("--> creating test index");
+ createIndex("test");
+
+ NumShards test = getNumShards("test");
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").execute().actionGet();
+
+ // we need this until we have https://github.com/elasticsearch/elasticsearch/issues/8688
+ // the test rarely fails else because the master does not apply the new mapping quick enough and it is lost
+ waitForConcreteMappingsOnAll("test", "type1", "field1");
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> verifying that the state is green");
+ ensureGreen();
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet();
+ fail();
+ } catch (IndexClosedException e) {
+ // all is well
+ }
+
+ logger.info("--> creating another index (test2) by indexing into it");
+ client().prepareIndex("test2", "type1", "1").setSource("field1", "value1").execute().actionGet();
+ logger.info("--> verifying that the state is green");
+ ensureGreen();
+
+ logger.info("--> opening the first index again...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> verifying that the state is green");
+ ensureGreen();
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
+
+ logger.info("--> trying to get the indexed document on the first index");
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> restarting nodes...");
+ internalCluster().fullRestart();
+ logger.info("--> waiting for two nodes and green status");
+ ensureGreen();
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimeout("1s").execute().actionGet();
+ fail();
+ } catch (IndexClosedException e) {
+ // all is well
+ }
+
+ logger.info("--> opening index...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
+
+ logger.info("--> trying to get the indexed document on the first round (before close and shutdown)");
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1").execute().actionGet();
+ }
+
+ @Test
+ public void testJustMasterNode() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 1 master node non data");
+ internalCluster().startNode(settingsBuilder().put("node.data", false).build());
+
+ logger.info("--> create an index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> closing master node");
+ internalCluster().closeNonSharedNodes(false);
+
+ logger.info("--> starting 1 master node non data again");
+ internalCluster().startNode(settingsBuilder().put("node.data", false).build());
+
+ logger.info("--> waiting for test index to be created");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setIndices("test").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify we have an index");
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().setIndices("test").execute().actionGet();
+ assertThat(clusterStateResponse.getState().metaData().hasIndex("test"), equalTo(true));
+ }
+
+ @Test
+ public void testJustMasterNodeAndJustDataNode() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 1 master node non data");
+ internalCluster().startNode(settingsBuilder().put("node.data", false).build());
+ internalCluster().startNode(settingsBuilder().put("node.master", false).build());
+
+ logger.info("--> create an index");
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ logger.info("--> waiting for test index to be created");
+ ensureYellow();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").setTimeout("100ms").execute().actionGet();
+ }
+
+ @Test
+ public void testTwoNodesSingleDoc() throws Exception {
+ logger.info("--> cleaning nodes");
+
+ logger.info("--> starting 2 nodes");
+ internalCluster().startNodesAsync(2).get();
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+
+ logger.info("--> closing test index...");
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> opening the index...");
+ client().admin().indices().prepareOpen("test").execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ logger.info("--> verify 1 doc in the index");
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+ }
+
+ @Test
+ public void testDanglingIndicesConflictWithAlias() throws Exception {
+ logger.info("--> starting two nodes");
+ internalCluster().startNodesAsync(2).get();
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ internalCluster().stopRandomNonMasterNode();
+
+ // wait for master to processed node left (so delete won't timeout waiting for it)
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("1").get().isTimedOut());
+
+ logger.info("--> deleting index");
+ assertAcked(client().admin().indices().prepareDelete("test"));
+
+ index("test2", "type1", "2", "{}");
+
+ logger.info("--> creating index with an alias");
+ assertAcked(client().admin().indices().prepareAliases().addAlias("test2", "test"));
+
+ logger.info("--> starting node back up");
+ internalCluster().startNode();
+
+ ensureGreen();
+
+ // make sure that any other events were processed
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
+
+ logger.info("--> verify we read the right thing through alias");
+ assertThat(client().prepareGet("test", "type1", "2").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> deleting alias");
+ assertAcked(client().admin().indices().prepareAliases().removeAlias("test2", "test"));
+
+ logger.info("--> waiting for dangling index to be imported");
+
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertTrue(client().admin().indices().prepareExists("test").execute().actionGet().isExists());
+ }
+ });
+
+ ensureGreen();
+
+ logger.info("--> verifying dangling index contains doc");
+
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testDanglingIndices() throws Exception {
+ logger.info("--> starting two nodes");
+
+ final String node_1 = internalCluster().startNodesAsync(2).get().get(0);
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ logger.info("--> verify 1 doc in the index");
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> restarting the nodes");
+ final Gateway gateway1 = internalCluster().getInstance(Gateway.class, node_1);
+ internalCluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ if (node_1.equals(nodeName)) {
+ logger.info("--> deleting the data for the first node");
+ gateway1.reset();
+ }
+ return null;
+ }
+ });
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ // spin a bit waiting for the index to exists
+ long time = System.currentTimeMillis();
+ while ((System.currentTimeMillis() - time) < TimeValue.timeValueSeconds(10).millis()) {
+ if (client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ break;
+ }
+ }
+
+ logger.info("--> verify that the dangling index exists");
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ logger.info("--> verify the doc is there");
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
new file mode 100644
index 0000000000..ffe4e716b5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
+import org.elasticsearch.test.ElasticsearchAllocationTestCase;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test IndexMetaState for master and data only nodes return correct list of indices to write
+ * There are many parameters:
+ * - meta state is not in memory
+ * - meta state is in memory with old version/ new version
+ * - meta state is in memory with new version
+ * - version changed in cluster state event/ no change
+ * - node is data only node
+ * - node is master eligible
+ * for data only nodes: shard initializing on shard
+ */
+public class GatewayMetaStateTests extends ElasticsearchAllocationTestCase {
+
+ ClusterChangedEvent generateEvent(boolean initializing, boolean versionChanged, boolean masterEligible) {
+ //ridiculous settings to make sure we don't run into uninitialized because fo default
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 100)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100)
+ .build());
+ ClusterState newClusterState, previousClusterState;
+ MetaData metaDataOldClusterState = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTableOldClusterState = RoutingTable.builder()
+ .addAsNew(metaDataOldClusterState.index("test"))
+ .build();
+
+ // assign all shards
+ ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
+ .metaData(metaDataOldClusterState)
+ .routingTable(routingTableOldClusterState)
+ .nodes(generateDiscoveryNodes(masterEligible))
+ .build();
+ // new cluster state will have initializing shards on node 1
+ RoutingTable routingTableNewClusterState = strategy.reroute(init).routingTable();
+ if (initializing == false) {
+ // pretend all initialized, nothing happened
+ ClusterState temp = ClusterState.builder(init).routingTable(routingTableNewClusterState).metaData(metaDataOldClusterState).build();
+ routingTableNewClusterState = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
+ routingTableOldClusterState = routingTableNewClusterState;
+
+ } else {
+ // nothing to do, we have one routing table with unassigned and one with initializing
+ }
+
+ // create new meta data either with version changed or not
+ MetaData metaDataNewClusterState = MetaData.builder()
+ .put(init.metaData().index("test"), versionChanged)
+ .build();
+
+
+ // create the cluster states with meta data and routing tables as computed before
+ previousClusterState = ClusterState.builder(init)
+ .metaData(metaDataOldClusterState)
+ .routingTable(routingTableOldClusterState)
+ .nodes(generateDiscoveryNodes(masterEligible))
+ .build();
+ newClusterState = ClusterState.builder(previousClusterState).routingTable(routingTableNewClusterState).metaData(metaDataNewClusterState).version(previousClusterState.getVersion() + 1).build();
+
+ ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState);
+ assertThat(event.state().version(), equalTo(event.previousState().version() + 1));
+ return event;
+ }
+
+ ClusterChangedEvent generateCloseEvent(boolean masterEligible) {
+ //ridiculous settings to make sure we don't run into uninitialized because fo default
+ AllocationService strategy = createAllocationService(settingsBuilder()
+ .put("cluster.routing.allocation.concurrent_recoveries", 100)
+ .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
+ .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100)
+ .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100)
+ .build());
+ ClusterState newClusterState, previousClusterState;
+ MetaData metaDataIndexCreated = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2))
+ .build();
+
+ RoutingTable routingTableIndexCreated = RoutingTable.builder()
+ .addAsNew(metaDataIndexCreated.index("test"))
+ .build();
+
+ // assign all shards
+ ClusterState init = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
+ .metaData(metaDataIndexCreated)
+ .routingTable(routingTableIndexCreated)
+ .nodes(generateDiscoveryNodes(masterEligible))
+ .build();
+ RoutingTable routingTableInitializing = strategy.reroute(init).routingTable();
+ ClusterState temp = ClusterState.builder(init).routingTable(routingTableInitializing).build();
+ RoutingTable routingTableStarted = strategy.applyStartedShards(temp, temp.getRoutingNodes().shardsWithState(INITIALIZING)).routingTable();
+
+ // create new meta data either with version changed or not
+ MetaData metaDataStarted = MetaData.builder()
+ .put(init.metaData().index("test"), true)
+ .build();
+
+ // create the cluster states with meta data and routing tables as computed before
+ MetaData metaDataClosed = MetaData.builder()
+ .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).state(IndexMetaData.State.CLOSE).numberOfShards(5).numberOfReplicas(2)).version(metaDataStarted.version() + 1)
+ .build();
+ previousClusterState = ClusterState.builder(init)
+ .metaData(metaDataStarted)
+ .routingTable(routingTableStarted)
+ .nodes(generateDiscoveryNodes(masterEligible))
+ .build();
+ newClusterState = ClusterState.builder(previousClusterState)
+ .routingTable(routingTableIndexCreated)
+ .metaData(metaDataClosed)
+ .version(previousClusterState.getVersion() + 1).build();
+
+ ClusterChangedEvent event = new ClusterChangedEvent("test", newClusterState, previousClusterState);
+ assertThat(event.state().version(), equalTo(event.previousState().version() + 1));
+ return event;
+ }
+
+ private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) {
+ Map<String, String> masterNodeAttributes = new HashMap<>();
+ masterNodeAttributes.put("master", "true");
+ masterNodeAttributes.put("data", "true");
+ Map<String, String> dataNodeAttributes = new HashMap<>();
+ dataNodeAttributes.put("master", "false");
+ dataNodeAttributes.put("data", "true");
+ return DiscoveryNodes.builder().put(newNode("node1", masterEligible ? masterNodeAttributes : dataNodeAttributes)).put(newNode("master_node", masterNodeAttributes)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node");
+ }
+
+ public void assertState(ClusterChangedEvent event,
+ boolean stateInMemory,
+ boolean expectMetaData) throws Exception {
+ MetaData inMemoryMetaData = null;
+ ImmutableSet<String> oldIndicesList = ImmutableSet.of();
+ if (stateInMemory) {
+ inMemoryMetaData = event.previousState().metaData();
+ ImmutableSet.Builder<String> relevantIndices = ImmutableSet.builder();
+ oldIndicesList = relevantIndices.addAll(GatewayMetaState.getRelevantIndices(event.previousState(), oldIndicesList)).build();
+ }
+ Set<String> newIndicesList = GatewayMetaState.getRelevantIndices(event.state(), oldIndicesList);
+ // third, get the actual write info
+ Iterator<GatewayMetaState.IndexMetaWriteInfo> indices = GatewayMetaState.resolveStatesToBeWritten(oldIndicesList, newIndicesList, inMemoryMetaData, event.state().metaData()).iterator();
+
+ if (expectMetaData) {
+ assertThat(indices.hasNext(), equalTo(true));
+ assertThat(indices.next().getNewMetaData().index(), equalTo("test"));
+ assertThat(indices.hasNext(), equalTo(false));
+ } else {
+ assertThat(indices.hasNext(), equalTo(false));
+ }
+ }
+
+ @Test
+ public void testVersionChangeIsAlwaysWritten() throws Exception {
+ // test that version changes are always written
+ boolean initializing = randomBoolean();
+ boolean versionChanged = true;
+ boolean stateInMemory = randomBoolean();
+ boolean masterEligible = randomBoolean();
+ boolean expectMetaData = true;
+ ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible);
+ assertState(event, stateInMemory, expectMetaData);
+ }
+
+ @Test
+ public void testNewShardsAlwaysWritten() throws Exception {
+ // make sure new shards on data only node always written
+ boolean initializing = true;
+ boolean versionChanged = randomBoolean();
+ boolean stateInMemory = randomBoolean();
+ boolean masterEligible = false;
+ boolean expectMetaData = true;
+ ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible);
+ assertState(event, stateInMemory, expectMetaData);
+ }
+
+ @Test
+ public void testAllUpToDateNothingWritten() throws Exception {
+ // make sure state is not written again if we wrote already
+ boolean initializing = false;
+ boolean versionChanged = false;
+ boolean stateInMemory = true;
+ boolean masterEligible = randomBoolean();
+ boolean expectMetaData = false;
+ ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible);
+ assertState(event, stateInMemory, expectMetaData);
+ }
+
+ @Test
+ public void testNoWriteIfNothingChanged() throws Exception {
+ boolean initializing = false;
+ boolean versionChanged = false;
+ boolean stateInMemory = true;
+ boolean masterEligible = randomBoolean();
+ boolean expectMetaData = false;
+ ClusterChangedEvent event = generateEvent(initializing, versionChanged, masterEligible);
+ ClusterChangedEvent newEventWithNothingChanged = new ClusterChangedEvent("test cluster state", event.state(), event.state());
+ assertState(newEventWithNothingChanged, stateInMemory, expectMetaData);
+ }
+
+ @Test
+ public void testWriteClosedIndex() throws Exception {
+ // test that the closing of an index is written also on data only node
+ boolean masterEligible = randomBoolean();
+ boolean expectMetaData = true;
+ boolean stateInMemory = true;
+ ClusterChangedEvent event = generateCloseEvent(masterEligible);
+ assertState(event, stateInMemory, expectMetaData);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java
new file mode 100644
index 0000000000..8034870680
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/GatewayServiceTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.cluster.NoopClusterService;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+
+public class GatewayServiceTests extends ElasticsearchTestCase {
+
+
+ private GatewayService createService(Settings.Builder settings) {
+ return new GatewayService(Settings.builder()
+ .put("http.enabled", "false")
+ .put("discovery.type", "local")
+ .put(settings.build()).build(), null, null, new NoopClusterService(), null, null);
+
+ }
+
+ @Test
+ public void testDefaultRecoverAfterTime() throws IOException {
+
+ // check that the default is not set
+ GatewayService service = createService(Settings.builder());
+ assertNull(service.recoverAfterTime());
+
+ // ensure default is set when setting expected_nodes
+ service = createService(Settings.builder().put("gateway.expected_nodes", 1));
+ assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));
+
+ // ensure default is set when setting expected_data_nodes
+ service = createService(Settings.builder().put("gateway.expected_data_nodes", 1));
+ assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));
+
+ // ensure default is set when setting expected_master_nodes
+ service = createService(Settings.builder().put("gateway.expected_master_nodes", 1));
+ assertThat(service.recoverAfterTime(), Matchers.equalTo(GatewayService.DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET));
+
+ // ensure settings override default
+ TimeValue timeValue = TimeValue.timeValueHours(3);
+ // ensure default is set when setting expected_nodes
+ service = createService(Settings.builder().put("gateway.expected_nodes", 1).put("gateway.recover_after_time", timeValue.toString()));
+ assertThat(service.recoverAfterTime().millis(), Matchers.equalTo(timeValue.millis()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java
new file mode 100644
index 0000000000..ed160d92b3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java
@@ -0,0 +1,562 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import com.google.common.collect.Iterators;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.store.ChecksumIndexInput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.SimpleFSDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.InputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URISyntaxException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.startsWith;
+
+@LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS
+public class MetaDataStateFormatTest extends ElasticsearchTestCase {
+
+
+ /**
+ * Ensure we can read a pre-generated cluster state.
+ */
+ public void testReadClusterState() throws URISyntaxException, IOException {
+ final MetaDataStateFormat<MetaData> format = new MetaDataStateFormat<MetaData>(randomFrom(XContentType.values()), "global-") {
+
+ @Override
+ public void toXContent(XContentBuilder builder, MetaData state) throws IOException {
+ fail("this test doesn't write");
+ }
+
+ @Override
+ public MetaData fromXContent(XContentParser parser) throws IOException {
+ return MetaData.Builder.fromXContent(parser);
+ }
+ };
+ Path tmp = createTempDir();
+ final InputStream resource = this.getClass().getResourceAsStream("global-3.st");
+ assertThat(resource, notNullValue());
+ Path dst = tmp.resolve("global-3.st");
+ Files.copy(resource, dst);
+ MetaData read = format.read(dst);
+ assertThat(read, notNullValue());
+ assertThat(read.uuid(), equalTo("3O1tDF1IRB6fSJ-GrTMUtg"));
+ // indices are empty since they are serialized separately
+ }
+
+ public void testReadWriteState() throws IOException {
+ Path[] dirs = new Path[randomIntBetween(1, 5)];
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = createTempDir();
+ }
+ final long id = addDummyFiles("foo-", dirs);
+ Format format = new Format(randomFrom(XContentType.values()), "foo-");
+ DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean());
+ int version = between(0, Integer.MAX_VALUE/2);
+ format.write(state, version, dirs);
+ for (Path file : dirs) {
+ Path[] list = content("*", file);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo(MetaDataStateFormat.STATE_DIR_NAME));
+ Path stateDir = list[0];
+ assertThat(Files.isDirectory(stateDir), is(true));
+ list = content("foo-*", stateDir);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo("foo-" + id + ".st"));
+ DummyState read = format.read(list[0]);
+ assertThat(read, equalTo(state));
+ }
+ final int version2 = between(version, Integer.MAX_VALUE);
+ DummyState state2 = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean());
+ format.write(state2, version2, dirs);
+
+ for (Path file : dirs) {
+ Path[] list = content("*", file);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo(MetaDataStateFormat.STATE_DIR_NAME));
+ Path stateDir = list[0];
+ assertThat(Files.isDirectory(stateDir), is(true));
+ list = content("foo-*", stateDir);
+ assertEquals(list.length,1);
+ assertThat(list[0].getFileName().toString(), equalTo("foo-"+ (id+1) + ".st"));
+ DummyState read = format.read(list[0]);
+ assertThat(read, equalTo(state2));
+
+ }
+ }
+
+ @Test
+ public void testVersionMismatch() throws IOException {
+ Path[] dirs = new Path[randomIntBetween(1, 5)];
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = createTempDir();
+ }
+ final long id = addDummyFiles("foo-", dirs);
+
+ Format format = new Format(randomFrom(XContentType.values()), "foo-");
+ DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean());
+ int version = between(0, Integer.MAX_VALUE/2);
+ format.write(state, version, dirs);
+ for (Path file : dirs) {
+ Path[] list = content("*", file);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo(MetaDataStateFormat.STATE_DIR_NAME));
+ Path stateDir = list[0];
+ assertThat(Files.isDirectory(stateDir), is(true));
+ list = content("foo-*", stateDir);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo("foo-" + id + ".st"));
+ DummyState read = format.read(list[0]);
+ assertThat(read, equalTo(state));
+ }
+ }
+
+ public void testCorruption() throws IOException {
+ Path[] dirs = new Path[randomIntBetween(1, 5)];
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = createTempDir();
+ }
+ final long id = addDummyFiles("foo-", dirs);
+ Format format = new Format(randomFrom(XContentType.values()), "foo-");
+ DummyState state = new DummyState(randomRealisticUnicodeOfCodepointLengthBetween(1, 1000), randomInt(), randomLong(), randomDouble(), randomBoolean());
+ int version = between(0, Integer.MAX_VALUE/2);
+ format.write(state, version, dirs);
+ for (Path file : dirs) {
+ Path[] list = content("*", file);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo(MetaDataStateFormat.STATE_DIR_NAME));
+ Path stateDir = list[0];
+ assertThat(Files.isDirectory(stateDir), is(true));
+ list = content("foo-*", stateDir);
+ assertEquals(list.length, 1);
+ assertThat(list[0].getFileName().toString(), equalTo("foo-" + id + ".st"));
+ DummyState read = format.read(list[0]);
+ assertThat(read, equalTo(state));
+ // now corrupt it
+ corruptFile(list[0], logger);
+ try {
+ format.read(list[0]);
+ fail("corrupted file");
+ } catch (CorruptStateException ex) {
+ // expected
+ }
+ }
+ }
+
+ public static void corruptFile(Path file, ESLogger logger) throws IOException {
+ Path fileToCorrupt = file;
+ try (final SimpleFSDirectory dir = new SimpleFSDirectory(fileToCorrupt.getParent())) {
+ long checksumBeforeCorruption;
+ try (IndexInput input = dir.openInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
+ checksumBeforeCorruption = CodecUtil.retrieveChecksum(input);
+ }
+ try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
+ raf.position(randomIntBetween(0, (int)Math.min(Integer.MAX_VALUE, raf.size()-1)));
+ long filePointer = raf.position();
+ ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
+ raf.read(bb);
+
+ bb.flip();
+ byte oldValue = bb.get(0);
+ byte newValue = (byte) ~oldValue;
+ bb.put(0, newValue);
+ raf.write(bb, filePointer);
+ logger.debug("Corrupting file {} -- flipping at position {} from {} to {} ", fileToCorrupt.getFileName().toString(), filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue));
+ }
+ long checksumAfterCorruption;
+ long actualChecksumAfterCorruption;
+ try (ChecksumIndexInput input = dir.openChecksumInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
+ assertThat(input.getFilePointer(), is(0l));
+ input.seek(input.length() - 8); // one long is the checksum... 8 bytes
+ checksumAfterCorruption = input.getChecksum();
+ actualChecksumAfterCorruption = input.readLong();
+ }
+ StringBuilder msg = new StringBuilder();
+ msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]");
+ msg.append(" after: [").append(checksumAfterCorruption).append("]");
+ msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
+ msg.append(" file: ").append(fileToCorrupt.getFileName().toString()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
+ logger.debug(msg.toString());
+ assumeTrue("Checksum collision - " + msg.toString(),
+ checksumAfterCorruption != checksumBeforeCorruption // collision
+ || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
+ }
+ }
+
+ // If the latest version doesn't use the legacy format while previous versions do, then fail hard
+ public void testLatestVersionDoesNotUseLegacy() throws IOException {
+ final ToXContent.Params params = ToXContent.EMPTY_PARAMS;
+ MetaDataStateFormat<MetaData> format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params);
+ final Path[] dirs = new Path[2];
+ dirs[0] = createTempDir();
+ dirs[1] = createTempDir();
+ for (Path dir : dirs) {
+ Files.createDirectories(dir.resolve(MetaDataStateFormat.STATE_DIR_NAME));
+ }
+ final Path dir1 = randomFrom(dirs);
+ final int v1 = randomInt(10);
+ // write a first state file in the new format
+ format.write(randomMeta(), v1, dir1);
+
+ // write older state files in the old format but with a newer version
+ final int numLegacyFiles = randomIntBetween(1, 5);
+ for (int i = 0; i < numLegacyFiles; ++i) {
+ final Path dir2 = randomFrom(dirs);
+ final int v2 = v1 + 1 + randomInt(10);
+ try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v2)))) {
+ xcontentBuilder.startObject();
+ MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params);
+ xcontentBuilder.endObject();
+ }
+ }
+
+ try {
+ format.loadLatestState(logger, dirs);
+ fail("latest version can not be read");
+ } catch (IllegalStateException ex) {
+ assertThat(ex.getMessage(), startsWith("Could not find a state file to recover from among "));
+ }
+ // write the next state file in the new format and ensure it get's a higher ID
+ final MetaData meta = randomMeta();
+ format.write(meta, v1, dirs);
+ final MetaData metaData = format.loadLatestState(logger, dirs);
+ assertEquals(meta.uuid(), metaData.uuid());
+ final Path path = randomFrom(dirs);
+ final Path[] files = FileSystemUtils.files(path.resolve("_state"));
+ assertEquals(1, files.length);
+ assertEquals("global-" + format.findMaxStateId("global-", dirs) + ".st", files[0].getFileName().toString());
+
+ }
+
+ // If both the legacy and the new format are available for the latest version, prefer the new format
+ public void testPrefersNewerFormat() throws IOException {
+ final ToXContent.Params params = ToXContent.EMPTY_PARAMS;
+ MetaDataStateFormat<MetaData> format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params);
+ final Path[] dirs = new Path[2];
+ dirs[0] = createTempDir();
+ dirs[1] = createTempDir();
+ for (Path dir : dirs) {
+ Files.createDirectories(dir.resolve(MetaDataStateFormat.STATE_DIR_NAME));
+ }
+ final long v = randomInt(10);
+
+ MetaData meta = randomMeta();
+ String uuid = meta.uuid();
+
+ // write a first state file in the old format
+ final Path dir2 = randomFrom(dirs);
+ MetaData meta2 = randomMeta();
+ assertFalse(meta2.uuid().equals(uuid));
+ try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v)))) {
+ xcontentBuilder.startObject();
+ MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params);
+ xcontentBuilder.endObject();
+ }
+
+ // write a second state file in the new format but with the same version
+ format.write(meta, v, dirs);
+
+ MetaData state = format.loadLatestState(logger, dirs);
+ final Path path = randomFrom(dirs);
+ assertTrue(Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + (v+1) + ".st")));
+ assertEquals(state.uuid(), uuid);
+ }
+
+ @Test
+ public void testLoadState() throws IOException {
+ final ToXContent.Params params = ToXContent.EMPTY_PARAMS;
+ final Path[] dirs = new Path[randomIntBetween(1, 5)];
+ int numStates = randomIntBetween(1, 5);
+ int numLegacy = randomIntBetween(0, numStates);
+ List<MetaData> meta = new ArrayList<>();
+ for (int i = 0; i < numStates; i++) {
+ meta.add(randomMeta());
+ }
+ Set<Path> corruptedFiles = new HashSet<>();
+ MetaDataStateFormat<MetaData> format = MetaStateService.globalStateFormat(randomFrom(XContentType.values()), params);
+ for (int i = 0; i < dirs.length; i++) {
+ dirs[i] = createTempDir();
+ Files.createDirectories(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME));
+ for (int j = 0; j < numLegacy; j++) {
+ XContentType type = format.format();
+ if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) {
+ Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-"+j);
+ Files.createFile(file); // randomly create 0-byte files -- there is extra logic to skip them
+ } else {
+ try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(type, Files.newOutputStream(dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j)))) {
+ xcontentBuilder.startObject();
+ MetaData.Builder.toXContent(meta.get(j), xcontentBuilder, params);
+ xcontentBuilder.endObject();
+ }
+ }
+ }
+ for (int j = numLegacy; j < numStates; j++) {
+ format.write(meta.get(j), j, dirs[i]);
+ if (randomBoolean() && (j < numStates - 1 || dirs.length > 0 && i != 0)) { // corrupt a file that we do not necessarily need here....
+ Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + j + ".st");
+ corruptedFiles.add(file);
+ MetaDataStateFormatTest.corruptFile(file, logger);
+ }
+ }
+
+ }
+ List<Path> dirList = Arrays.asList(dirs);
+ Collections.shuffle(dirList, getRandom());
+ MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0]));
+ MetaData latestMetaData = meta.get(numStates-1);
+ assertThat(loadedMetaData.uuid(), not(equalTo("_na_")));
+ assertThat(loadedMetaData.uuid(), equalTo(latestMetaData.uuid()));
+ ImmutableOpenMap<String,IndexMetaData> indices = loadedMetaData.indices();
+ assertThat(indices.size(), equalTo(latestMetaData.indices().size()));
+ for (IndexMetaData original : latestMetaData) {
+ IndexMetaData deserialized = indices.get(original.getIndex());
+ assertThat(deserialized, notNullValue());
+ assertThat(deserialized.version(), equalTo(original.version()));
+ assertThat(deserialized.numberOfReplicas(), equalTo(original.numberOfReplicas()));
+ assertThat(deserialized.numberOfShards(), equalTo(original.numberOfShards()));
+ }
+
+ // now corrupt all the latest ones and make sure we fail to load the state
+ if (numStates > numLegacy) {
+ for (int i = 0; i < dirs.length; i++) {
+ Path file = dirs[i].resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + (numStates-1) + ".st");
+ if (corruptedFiles.contains(file)) {
+ continue;
+ }
+ MetaDataStateFormatTest.corruptFile(file, logger);
+ }
+ try {
+ format.loadLatestState(logger, dirList.toArray(new Path[0]));
+ fail("latest version can not be read");
+ } catch (ElasticsearchException ex) {
+ assertThat(ex.getCause(), instanceOf(CorruptStateException.class));
+ }
+ }
+
+ }
+
+ private MetaData randomMeta() throws IOException {
+ int numIndices = randomIntBetween(1, 10);
+ MetaData.Builder mdBuilder = MetaData.builder();
+ mdBuilder.generateUuidIfNeeded();
+ for (int i = 0; i < numIndices; i++) {
+ mdBuilder.put(indexBuilder(randomAsciiOfLength(10) + "idx-"+i));
+ }
+ return mdBuilder.build();
+ }
+
+ private IndexMetaData.Builder indexBuilder(String index) throws IOException {
+ return IndexMetaData.builder(index)
+ .settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 10)).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 5)));
+ }
+
+
+ private class Format extends MetaDataStateFormat<DummyState> {
+
+ Format(XContentType format, String prefix) {
+ super(format, prefix);
+ }
+
+ @Override
+ public void toXContent(XContentBuilder builder, DummyState state) throws IOException {
+ state.toXContent(builder, null);
+ }
+
+ @Override
+ public DummyState fromXContent(XContentParser parser) throws IOException {
+ return new DummyState().parse(parser);
+ }
+
+ @Override
+ protected Directory newDirectory(Path dir) throws IOException {
+ MockDirectoryWrapper mock = new MockDirectoryWrapper(getRandom(), super.newDirectory(dir));
+ closeAfterSuite(mock);
+ return mock;
+ }
+ }
+
+ private static class DummyState implements ToXContent {
+ String string;
+ int aInt;
+ long aLong;
+ double aDouble;
+ boolean aBoolean;
+
+ @Override
+ public String toString() {
+ return "DummyState{" +
+ "string='" + string + '\'' +
+ ", aInt=" + aInt +
+ ", aLong=" + aLong +
+ ", aDouble=" + aDouble +
+ ", aBoolean=" + aBoolean +
+ '}';
+ }
+
+ public DummyState(String string, int aInt, long aLong, double aDouble, boolean aBoolean) {
+ this.string = string;
+ this.aInt = aInt;
+ this.aLong = aLong;
+ this.aDouble = aDouble;
+ this.aBoolean = aBoolean;
+ }
+
+ public DummyState() {
+
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("string", string);
+ builder.field("int", aInt);
+ builder.field("long", aLong);
+ builder.field("double", aDouble);
+ builder.field("boolean", aBoolean);
+ return builder;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ DummyState that = (DummyState) o;
+
+ if (aBoolean != that.aBoolean) return false;
+ if (Double.compare(that.aDouble, aDouble) != 0) return false;
+ if (aInt != that.aInt) return false;
+ if (aLong != that.aLong) return false;
+ return string.equals(that.string);
+
+ }
+
+ @Override
+ public int hashCode() {
+ int result;
+ long temp;
+ result = string.hashCode();
+ result = 31 * result + aInt;
+ result = 31 * result + (int) (aLong ^ (aLong >>> 32));
+ temp = Double.doubleToLongBits(aDouble);
+ result = 31 * result + (int) (temp ^ (temp >>> 32));
+ result = 31 * result + (aBoolean ? 1 : 0);
+ return result;
+ }
+
+ public DummyState parse(XContentParser parser) throws IOException {
+ String fieldName = null;
+ parser.nextToken(); // start object
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ XContentParser.Token token = parser.currentToken();
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token == XContentParser.Token.VALUE_STRING) {
+ assertTrue("string".equals(fieldName));
+ string = parser.text();
+ } else if (token == XContentParser.Token.VALUE_NUMBER) {
+ switch (fieldName) {
+ case "double":
+ aDouble = parser.doubleValue();
+ break;
+ case "int":
+ aInt = parser.intValue();
+ break;
+ case "long":
+ aLong = parser.longValue();
+ break;
+ default:
+ fail("unexpected numeric value " + token);
+ break;
+ }
+ }else if (token == XContentParser.Token.VALUE_BOOLEAN) {
+ assertTrue("boolean".equals(fieldName));
+ aBoolean = parser.booleanValue();
+ } else {
+ fail("unexpected value " + token);
+ }
+ }
+ return this;
+ }
+ }
+
+ public Path[] content(String glob, Path dir) throws IOException {
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir, glob)) {
+ return Iterators.toArray(stream.iterator(), Path.class);
+ }
+ }
+
+ public long addDummyFiles(String prefix, Path... paths) throws IOException {
+ int realId = -1;
+ for (Path path : paths) {
+ if (randomBoolean()) {
+ Path stateDir = path.resolve(MetaDataStateFormat.STATE_DIR_NAME);
+ Files.createDirectories(stateDir);
+ String actualPrefix = prefix;
+ int id = randomIntBetween(0, 10);
+ if (randomBoolean()) {
+ actualPrefix = "dummy-";
+ } else {
+ realId = Math.max(realId, id);
+ }
+ try (OutputStream stream = Files.newOutputStream(stateDir.resolve(actualPrefix + id + MetaDataStateFormat.STATE_FILE_EXTENSION))) {
+ stream.write(0);
+ }
+ }
+ }
+ return realId + 1;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java
new file mode 100644
index 0000000000..71143159cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataWriteDataNodesTests.java
@@ -0,0 +1,358 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.LinkedHashMap;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class MetaDataWriteDataNodesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testMetaWrittenAlsoOnDataNode() throws Exception {
+ // this test checks that index state is written on data only nodes
+ String masterNodeName = startMasterNode();
+ String redNode = startDataNode("red");
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)));
+ index("test", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+ waitForConcreteMappingsOnAll("test", "doc", "text");
+ ensureGreen("test");
+ assertIndexInMetaState(redNode, "test");
+ assertIndexInMetaState(masterNodeName, "test");
+ //stop master node and start again with an empty data folder
+ ((InternalTestCluster) cluster()).stopCurrentMasterNode();
+ String newMasterNode = startMasterNode();
+ ensureGreen("test");
+ // wait for mapping also on master becasue then we can be sure the state was written
+ waitForConcreteMappingsOnAll("test", "doc", "text");
+ // check for meta data
+ assertIndexInMetaState(redNode, "test");
+ assertIndexInMetaState(newMasterNode, "test");
+ // check if index and doc is still there
+ ensureGreen("test");
+ assertTrue(client().prepareGet("test", "doc", "1").get().isExists());
+ }
+
+ @Test
+ public void testMetaWrittenOnlyForIndicesOnNodesThatHaveAShard() throws Exception {
+ // this test checks that the index state is only written to a data only node if they have a shard of that index allocated on the node
+ String masterNode = startMasterNode();
+ String blueNode = startDataNode("blue");
+ String redNode = startDataNode("red");
+
+ assertAcked(prepareCreate("blue_index").setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")));
+ index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+ assertAcked(prepareCreate("red_index").setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")));
+ index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+ ensureGreen();
+ waitForConcreteMappingsOnAll("blue_index", "doc", "text");
+ waitForConcreteMappingsOnAll("red_index", "doc", "text");
+ assertIndexNotInMetaState(blueNode, "red_index");
+ assertIndexNotInMetaState(redNode, "blue_index");
+ assertIndexInMetaState(blueNode, "blue_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ assertIndexInMetaState(masterNode, "blue_index");
+
+ // not the index state for blue_index should only be written on blue_node and the for red_index only on red_node
+ // we restart red node and master but with empty data folders
+ stopNode(redNode);
+ ((InternalTestCluster) cluster()).stopCurrentMasterNode();
+ masterNode = startMasterNode();
+ redNode = startDataNode("red");
+
+ ensureGreen();
+ assertIndexNotInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(blueNode, "blue_index");
+ assertIndexNotInMetaState(redNode, "red_index");
+ assertIndexNotInMetaState(redNode, "blue_index");
+ assertIndexNotInMetaState(masterNode, "red_index");
+ assertIndexInMetaState(masterNode, "blue_index");
+ // check that blue index is still there
+ assertFalse(client().admin().indices().prepareExists("red_index").get().isExists());
+ assertTrue(client().prepareGet("blue_index", "doc", "1").get().isExists());
+ // red index should be gone
+ // if the blue node had stored the index state then cluster health would be red and red_index would exist
+ assertFalse(client().admin().indices().prepareExists("red_index").get().isExists());
+
+ }
+
+ @Test
+ public void testMetaIsRemovedIfAllShardsFromIndexRemoved() throws Exception {
+ // this test checks that the index state is removed from a data only node once all shards have been allocated away from it
+ String masterNode = startMasterNode();
+ String blueNode = startDataNode("blue");
+ String redNode = startDataNode("red");
+
+ // create blue_index on blue_node and same for red
+ client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get();
+ assertAcked(prepareCreate("blue_index").setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")));
+ index("blue_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+ assertAcked(prepareCreate("red_index").setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")));
+ index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+
+ ensureGreen();
+ assertIndexNotInMetaState(redNode, "blue_index");
+ assertIndexNotInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(blueNode, "blue_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ assertIndexInMetaState(masterNode, "blue_index");
+
+ // now relocate blue_index to red_node and red_index to blue_node
+ logger.debug("relocating indices...");
+ client().admin().indices().prepareUpdateSettings("blue_index").setSettings(Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")).get();
+ client().admin().indices().prepareUpdateSettings("red_index").setSettings(Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")).get();
+ client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get();
+ ensureGreen();
+ assertIndexNotInMetaState(redNode, "red_index");
+ assertIndexNotInMetaState(blueNode, "blue_index");
+ assertIndexInMetaState(redNode, "blue_index");
+ assertIndexInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ assertIndexInMetaState(masterNode, "blue_index");
+ waitForConcreteMappingsOnAll("blue_index", "doc", "text");
+ waitForConcreteMappingsOnAll("red_index", "doc", "text");
+
+ //at this point the blue_index is on red node and the red_index on blue node
+ // now, when we start red and master node again but without data folder, the red index should be gone but the blue index should initialize fine
+ stopNode(redNode);
+ ((InternalTestCluster) cluster()).stopCurrentMasterNode();
+ masterNode = startMasterNode();
+ redNode = startDataNode("red");
+ ensureGreen();
+ assertIndexNotInMetaState(redNode, "blue_index");
+ assertIndexNotInMetaState(blueNode, "blue_index");
+ assertIndexNotInMetaState(redNode, "red_index");
+ assertIndexInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ assertIndexNotInMetaState(masterNode, "blue_index");
+ assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists());
+ // if the red_node had stored the index state then cluster health would be red and blue_index would exist
+ assertFalse(client().admin().indices().prepareExists("blue_index").get().isExists());
+ }
+
+ @Test
+ public void testMetaWrittenWhenIndexIsClosed() throws Exception {
+ String masterNode = startMasterNode();
+ String redNodeDataPath = createTempDir().toString();
+ String redNode = startDataNode("red", redNodeDataPath);
+ String blueNode = startDataNode("blue");
+ // create red_index on red_node and same for red
+ client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("3")).get();
+ assertAcked(prepareCreate("red_index").setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")));
+ index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+
+ ensureGreen();
+ assertIndexNotInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+
+ waitForConcreteMappingsOnAll("red_index", "doc", "text");
+ client().admin().indices().prepareClose("red_index").get();
+ // close the index
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name()));
+
+ // restart master with empty data folder and maybe red node
+ boolean restartRedNode = randomBoolean();
+ //at this point the red_index on red node
+ if (restartRedNode) {
+ stopNode(redNode);
+ }
+ ((InternalTestCluster) cluster()).stopCurrentMasterNode();
+ masterNode = startMasterNode();
+ if (restartRedNode) {
+ redNode = startDataNode("red", redNodeDataPath);
+ }
+
+ ensureGreen("red_index");
+ assertIndexNotInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ clusterStateResponse = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name()));
+
+ // open the index again
+ client().admin().indices().prepareOpen("red_index").get();
+ clusterStateResponse = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name()));
+ // restart again
+ ensureGreen();
+ if (restartRedNode) {
+ stopNode(redNode);
+ }
+ ((InternalTestCluster) cluster()).stopCurrentMasterNode();
+ masterNode = startMasterNode();
+ if (restartRedNode) {
+ redNode = startDataNode("red", redNodeDataPath);
+ }
+ ensureGreen("red_index");
+ assertIndexNotInMetaState(blueNode, "red_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ clusterStateResponse = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.OPEN.name()));
+ assertTrue(client().prepareGet("red_index", "doc", "1").get().isExists());
+ }
+
+ @Test
+ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception {
+ String masterNode = startMasterNode();
+ String redNodeDataPath = createTempDir().toString();
+ String redNode = startDataNode("red", redNodeDataPath);
+ // create red_index on red_node and same for red
+ client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2")).get();
+ assertAcked(prepareCreate("red_index").setSettings(Settings.builder().put("index.number_of_replicas", 0).put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")));
+ index("red_index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
+
+ logger.info("--> wait for green red_index");
+ ensureGreen();
+ logger.info("--> wait for meta state written for red_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+
+ waitForConcreteMappingsOnAll("red_index", "doc", "text");
+
+ logger.info("--> close red_index");
+ client().admin().indices().prepareClose("red_index").get();
+ // close the index
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name()));
+
+ logger.info("--> restart red node");
+ stopNode(redNode);
+ redNode = startDataNode("red", redNodeDataPath);
+ client().admin().indices().preparePutMapping("red_index").setType("doc").setSource(jsonBuilder().startObject()
+ .startObject("properties")
+ .startObject("integer_field")
+ .field("type", "integer")
+ .endObject()
+ .endObject()
+ .endObject()).get();
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get();
+ assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field"));
+ // restart master with empty data folder and maybe red node
+ ((InternalTestCluster) cluster()).stopCurrentMasterNode();
+ masterNode = startMasterNode();
+
+ ensureGreen("red_index");
+ assertIndexInMetaState(redNode, "red_index");
+ assertIndexInMetaState(masterNode, "red_index");
+ clusterStateResponse = client().admin().cluster().prepareState().get();
+ assertThat(clusterStateResponse.getState().getMetaData().index("red_index").getState().name(), equalTo(IndexMetaData.State.CLOSE.name()));
+ getMappingsResponse = client().admin().indices().prepareGetMappings("red_index").addTypes("doc").get();
+ assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get("red_index").get("doc").getSourceAsMap().get("properties"))).get("integer_field"));
+
+ }
+
+ private String startDataNode(String color) {
+ return startDataNode(color, createTempDir().toString());
+ }
+
+ private String startDataNode(String color, String newDataPath) {
+ Settings.Builder settingsBuilder = Settings.builder()
+ .put("node.data", true)
+ .put("node.master", false)
+ .put("node.color", color)
+ .put("path.data", newDataPath);
+ return internalCluster().startNode(settingsBuilder.build());
+ }
+
+ private String startMasterNode() {
+ Settings.Builder settingsBuilder = Settings.builder()
+ .put("node.data", false)
+ .put("node.master", true)
+ .put("path.data", createTempDir().toString());
+ return internalCluster().startNode(settingsBuilder.build());
+ }
+
+ private void stopNode(String name) throws IOException {
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(name));
+ }
+
+ protected void assertIndexNotInMetaState(String nodeName, String indexName) throws Exception {
+ assertMetaState(nodeName, indexName, false);
+ }
+
+ protected void assertIndexInMetaState(String nodeName, String indexName) throws Exception {
+ assertMetaState(nodeName, indexName, true);
+ }
+
+
+ private void assertMetaState(final String nodeName, final String indexName, final boolean shouldBe) throws Exception {
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ logger.info("checking if meta state exists...");
+ try {
+ return shouldBe == metaStateExists(nodeName, indexName);
+ } catch (Throwable t) {
+ logger.info("failed to load meta state", t);
+ // TODO: loading of meta state fails rarely if the state is deleted while we try to load it
+ // this here is a hack, would be much better to use for example a WatchService
+ return false;
+ }
+ }
+ });
+ boolean inMetaSate = metaStateExists(nodeName, indexName);
+ if (shouldBe) {
+ assertTrue("expected " + indexName + " in meta state of node " + nodeName, inMetaSate);
+ } else {
+ assertFalse("expected " + indexName + " to not be in meta state of node " + nodeName, inMetaSate);
+ }
+ }
+
+ private boolean metaStateExists(String nodeName, String indexName) throws Exception {
+ GatewayMetaState nodeMetaState = ((InternalTestCluster) cluster()).getInstance(GatewayMetaState.class, nodeName);
+ MetaData nodeMetaData = null;
+ nodeMetaData = nodeMetaState.loadMetaState();
+ ImmutableOpenMap<String, IndexMetaData> indices = nodeMetaData.getIndices();
+ boolean inMetaSate = false;
+ for (ObjectObjectCursor<String, IndexMetaData> index : indices) {
+ inMetaSate = inMetaSate || index.key.equals(indexName);
+ }
+ return inMetaSate;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java
new file mode 100644
index 0000000000..7386d6e50b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/MetaStateServiceTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class MetaStateServiceTests extends ElasticsearchTestCase {
+
+ private static Settings indexSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .build();
+
+ @Test
+ public void testWriteLoadIndex() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
+
+ IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build();
+ metaStateService.writeIndex("test_write", index, null);
+ assertThat(metaStateService.loadIndexState("test1"), equalTo(index));
+ }
+ }
+
+ @Test
+ public void testLoadMissingIndex() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
+ assertThat(metaStateService.loadIndexState("test1"), nullValue());
+ }
+ }
+
+ @Test
+ public void testWriteLoadGlobal() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
+
+ MetaData metaData = MetaData.builder()
+ .persistentSettings(Settings.builder().put("test1", "value1").build())
+ .build();
+ metaStateService.writeGlobalState("test_write", metaData);
+ assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metaData.persistentSettings()));
+ }
+ }
+
+ @Test
+ public void testWriteGlobalStateWithIndexAndNoIndexIsLoaded() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
+
+ MetaData metaData = MetaData.builder()
+ .persistentSettings(Settings.builder().put("test1", "value1").build())
+ .build();
+ IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build();
+ MetaData metaDataWithIndex = MetaData.builder(metaData).put(index, true).build();
+
+ metaStateService.writeGlobalState("test_write", metaDataWithIndex);
+ assertThat(metaStateService.loadGlobalState().persistentSettings(), equalTo(metaData.persistentSettings()));
+ assertThat(metaStateService.loadGlobalState().hasIndex("test1"), equalTo(false));
+ }
+ }
+
+ @Test
+ public void tesLoadGlobal() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
+
+ IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build();
+ MetaData metaData = MetaData.builder()
+ .persistentSettings(Settings.builder().put("test1", "value1").build())
+ .put(index, true)
+ .build();
+
+ metaStateService.writeGlobalState("test_write", metaData);
+ metaStateService.writeIndex("test_write", index, null);
+
+ MetaData loadedState = metaStateService.loadFullState();
+ assertThat(loadedState.persistentSettings(), equalTo(metaData.persistentSettings()));
+ assertThat(loadedState.hasIndex("test1"), equalTo(true));
+ assertThat(loadedState.index("test1"), equalTo(index));
+ }
+ }
+
+ private Settings randomSettings() {
+ Settings.Builder builder = Settings.builder();
+ if (randomBoolean()) {
+ builder.put(MetaStateService.FORMAT_SETTING, randomFrom(XContentType.values()).shortName());
+ }
+ return builder.build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayTests.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayTests.java
new file mode 100644
index 0000000000..0f5a5816c7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayTests.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster.RestartCallback;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(numDataNodes =0, scope= Scope.TEST)
+public class QuorumGatewayTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfReplicas() {
+ return 2;
+ }
+
+ @Test
+ @Slow
+ public void testChangeInitialShardsRecovery() throws Exception {
+ logger.info("--> starting 3 nodes");
+ final String[] nodes = internalCluster().startNodesAsync(3).get().toArray(new String[0]);
+
+ createIndex("test");
+ ensureGreen();
+ NumShards test = getNumShards("test");
+
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ //We don't check for failures in the flush response: if we do we might get the following:
+ // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
+ flush();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
+ refresh();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+
+ final String nodeToRemove = nodes[between(0,2)];
+ logger.info("--> restarting 1 nodes -- kill 2");
+ internalCluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return Settings.EMPTY;
+ }
+
+ @Override
+ public boolean doRestart(String nodeName) {
+ return nodeToRemove.equals(nodeName);
+ }
+ });
+ if (randomBoolean()) {
+ Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate
+ }
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
+ return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null;
+ }}), equalTo(true)); // wait until we get a cluster state - could be null if we quick enough.
+ final ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
+ assertThat(clusterStateResponse.getState(), notNullValue());
+ assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue());
+ assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false));
+ logger.info("--> change the recovery.initial_shards setting, and make sure its recovered");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get();
+
+ logger.info("--> running cluster_health (wait for the shards to startup), primaries only since we only have 1 node");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(test.numPrimaries)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testQuorumRecovery() throws Exception {
+
+ logger.info("--> starting 3 nodes");
+ internalCluster().startNodesAsync(3).get();
+ // we are shutting down nodes - make sure we don't have 2 clusters if we test network
+ setMinimumMasterNodes(2);
+
+ createIndex("test");
+ ensureGreen();
+ final NumShards test = getNumShards("test");
+
+ logger.info("--> indexing...");
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
+ //We don't check for failures in the flush response: if we do we might get the following:
+ // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
+ flush();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
+ refresh();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2l);
+ }
+ logger.info("--> restart all nodes");
+ internalCluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return null;
+ }
+
+ @Override
+ public void doAfterNodes(int numNodes, final Client activeClient) throws Exception {
+ if (numNodes == 1) {
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = activeClient.admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForNodes("2").waitForActiveShards(test.numPrimaries * 2)).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+ return (!clusterHealth.isTimedOut()) && clusterHealth.getStatus() == ClusterHealthStatus.YELLOW;
+ }
+ }, 30, TimeUnit.SECONDS), equalTo(true));
+ logger.info("--> one node is closed -- index 1 document into the remaining nodes");
+ activeClient.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).get();
+ assertNoFailures(activeClient.admin().indices().prepareRefresh().get());
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(activeClient.prepareCount().setQuery(matchAllQuery()).get(), 3l);
+ }
+ }
+ }
+
+ });
+ logger.info("--> all nodes are started back, verifying we got the latest version");
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 3l);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java b/core/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java
new file mode 100644
index 0000000000..35bc9fb3e0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import com.google.common.collect.ImmutableSet;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+@Slow
+public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest {
+
+ private final static TimeValue BLOCK_WAIT_TIMEOUT = TimeValue.timeValueSeconds(10);
+
+ public ImmutableSet<ClusterBlock> waitForNoBlocksOnNode(TimeValue timeout, Client nodeClient) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ ImmutableSet<ClusterBlock> blocks;
+ do {
+ blocks = nodeClient.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE);
+ }
+ while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis());
+ return blocks;
+ }
+
+ public Client startNode(Settings.Builder settings) {
+ String name = internalCluster().startNode(settings);
+ return internalCluster().client(name);
+ }
+
+ @Test
+ public void testRecoverAfterNodes() throws Exception {
+ logger.info("--> start node (1)");
+ Client clientNode1 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+ assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start node (2)");
+ Client clientNode2 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+ Thread.sleep(BLOCK_WAIT_TIMEOUT.millis());
+ assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(clientNode2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start node (3)");
+ Client clientNode3 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3));
+
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode3).isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testRecoverAfterMasterNodes() throws Exception {
+ logger.info("--> start master_node (1)");
+ Client master1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (1)");
+ Client data1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (2)");
+ Client data2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start master_node (2)");
+ Client master2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testRecoverAfterDataNodes() throws Exception {
+ logger.info("--> start master_node (1)");
+ Client master1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (1)");
+ Client data1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start master_node (2)");
+ Client master2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true));
+ assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+ assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet()
+ .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE),
+ hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK));
+
+ logger.info("--> start data_node (2)");
+ Client data2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true));
+ assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityTests.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityTests.java
new file mode 100644
index 0000000000..7eeff09a94
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gateway;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+@ElasticsearchIntegrationTest.ClusterScope(numDataNodes = 0, scope = ElasticsearchIntegrationTest.Scope.TEST, numClientNodes = 0, transportClientRatio = 0.0)
+public class RecoveryBackwardsCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("action.admin.cluster.node.shutdown.delay", "10ms")
+ .put("gateway.recover_after_nodes", 2).build();
+ }
+
+ @Override
+ protected int minExternalNodes() {
+ return 2;
+ }
+
+ @Override
+ protected int maxExternalNodes() {
+ return 3;
+ }
+
+
+ @Test
+ @LuceneTestCase.Slow
+ public void testReusePeerRecovery() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings())
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)));
+ logger.info("--> indexing docs");
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
+ }
+ indexRandom(true, builders);
+ ensureGreen();
+
+ logger.info("--> bump number of replicas from 0 to 1");
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").build()).get();
+ ensureGreen();
+
+ assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
+
+ logger.info("--> upgrade cluster");
+ logClusterState();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none")).execute().actionGet();
+ backwardsCluster().upgradeAllNodes();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all")).execute().actionGet();
+ ensureGreen();
+
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").setDetailed(true).get();
+ HashMap<String, String> map = new HashMap<>();
+ map.put("details", "true");
+ final ToXContent.Params params = new ToXContent.MapParams(map);
+ for (ShardRecoveryResponse response : recoveryResponse.shardResponses().get("test")) {
+ RecoveryState recoveryState = response.recoveryState();
+ final String recoverStateAsJSON = XContentHelper.toString(recoveryState, params);
+ if (!recoveryState.getPrimary()) {
+ RecoveryState.Index index = recoveryState.getIndex();
+ assertThat(recoverStateAsJSON, index.recoveredBytes(), equalTo(0l));
+ assertThat(recoverStateAsJSON, index.reusedBytes(), greaterThan(0l));
+ assertThat(recoverStateAsJSON, index.reusedBytes(), equalTo(index.totalBytes()));
+ assertThat(recoverStateAsJSON, index.recoveredFileCount(), equalTo(0));
+ assertThat(recoverStateAsJSON, index.reusedFileCount(), equalTo(index.totalFileCount()));
+ assertThat(recoverStateAsJSON, index.reusedFileCount(), greaterThan(0));
+ assertThat(recoverStateAsJSON, index.recoveredBytesPercent(), equalTo(100.f));
+ assertThat(recoverStateAsJSON, index.recoveredFilesPercent(), equalTo(100.f));
+ assertThat(recoverStateAsJSON, index.reusedBytes(), greaterThan(index.recoveredBytes()));
+ // TODO upgrade via optimize?
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java
new file mode 100644
index 0000000000..a35397833a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gateway;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.flush.SyncedFlushUtil;
+import org.elasticsearch.indices.recovery.RecoveryState;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster.RestartCallback;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockFSDirectoryService;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(numDataNodes = 0, scope = Scope.TEST)
+@Slow
+public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testOneNodeRecoverFromGateway() throws Exception {
+
+ internalCluster().startNode();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("appAccountIds").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+
+
+ client().prepareIndex("test", "type1", "10990239").setSource(jsonBuilder().startObject()
+ .field("_id", "10990239")
+ .startArray("appAccountIds").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990473").setSource(jsonBuilder().startObject()
+ .field("_id", "10990473")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990513").setSource(jsonBuilder().startObject()
+ .field("_id", "10990513")
+ .startArray("appAccountIds").value(14).value(179).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "10990695").setSource(jsonBuilder().startObject()
+ .field("_id", "10990695")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "11026351").setSource(jsonBuilder().startObject()
+ .field("_id", "11026351")
+ .startArray("appAccountIds").value(14).endArray().endObject()).execute().actionGet();
+
+ refresh();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+ ensureYellow("test"); // wait for primary allocations here otherwise if we have a lot of shards we might have a
+ // shard that is still in post recovery when we restart and the ensureYellow() below will timeout
+ internalCluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+
+ internalCluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertHitCount(client().prepareCount().setQuery(termQuery("appAccountIds", 179)).execute().actionGet(), 2);
+ }
+
+ @Test
+ @Slow
+ public void testSingleNodeNoFlush() throws Exception {
+
+ internalCluster().startNode();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("field").field("type", "string").endObject().startObject("num").field("type", "integer").endObject().endObject()
+ .endObject().endObject().string();
+ // note: default replica settings are tied to #data nodes-1 which is 0 here. We can do with 1 in this test.
+ int numberOfShards = numberOfShards();
+ assertAcked(prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, numberOfShards(),
+ SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 1)
+ ).addMapping("type1", mapping));
+
+ int value1Docs;
+ int value2Docs;
+ boolean indexToAllShards = randomBoolean();
+
+ if (indexToAllShards) {
+ // insert enough docs so all shards will have a doc
+ value1Docs = randomIntBetween(numberOfShards * 10, numberOfShards * 20);
+ value2Docs = randomIntBetween(numberOfShards * 10, numberOfShards * 20);
+
+ } else {
+ // insert a two docs, some shards will not have anything
+ value1Docs = 1;
+ value2Docs = 1;
+ }
+
+
+ for (int i = 0; i < 1 + randomInt(100); i++) {
+ for (int id = 0; id < Math.max(value1Docs, value2Docs); id++) {
+ if (id < value1Docs) {
+ index("test", "type1", "1_" + id,
+ jsonBuilder().startObject().field("field", "value1").startArray("num").value(14).value(179).endArray().endObject()
+ );
+ }
+ if (id < value2Docs) {
+ index("test", "type1", "2_" + id,
+ jsonBuilder().startObject().field("field", "value2").startArray("num").value(14).endArray().endObject()
+ );
+ }
+ }
+
+ }
+
+ refresh();
+
+ for (int i = 0; i <= randomInt(10); i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), value1Docs + value2Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).get(), value1Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).get(), value2Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).get(), value1Docs);
+ }
+ if (!indexToAllShards) {
+ // we have to verify primaries are started for them to be restored
+ logger.info("Ensure all primaries have been started");
+ ensureYellow();
+ }
+ internalCluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ for (int i = 0; i <= randomInt(10); i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), value1Docs + value2Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).get(), value1Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).get(), value2Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).get(), value1Docs);
+ }
+
+ internalCluster().fullRestart();
+
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ for (int i = 0; i <= randomInt(10); i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), value1Docs + value2Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value1")).get(), value1Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("field", "value2")).get(), value2Docs);
+ assertHitCount(client().prepareCount().setQuery(termQuery("num", 179)).get(), value1Docs);
+ }
+ }
+
+
+ @Test
+ @Slow
+ public void testSingleNodeWithFlush() throws Exception {
+
+ internalCluster().startNode();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ flush();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ refresh();
+
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+
+ ensureYellow("test"); // wait for primary allocations here otherwise if we have a lot of shards we might have a
+ // shard that is still in post recovery when we restart and the ensureYellow() below will timeout
+
+ internalCluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ internalCluster().fullRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testTwoNodeFirstNodeCleared() throws Exception {
+
+ final String firstNode = internalCluster().startNode();
+ internalCluster().startNode();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ flush();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ refresh();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ internalCluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return settingsBuilder().put("gateway.recover_after_nodes", 2).build();
+ }
+
+ @Override
+ public boolean clearData(String nodeName) {
+ return firstNode.equals(nodeName);
+ }
+
+ });
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+ }
+
+ @Test
+ @Slow
+ public void testLatestVersionLoaded() throws Exception {
+ // clean two nodes
+ internalCluster().startNodesAsync(2, settingsBuilder().put("gateway.recover_after_nodes", 2).build()).get();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
+ }
+
+ String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid();
+ assertThat(metaDataUuid, not(equalTo("_na_")));
+
+ logger.info("--> closing first node, and indexing more data to the second node");
+ internalCluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public void doAfterNodes(int numNodes, Client client) throws Exception {
+ if (numNodes == 1) {
+ logger.info("--> one node is closed - start indexing data into the second one");
+ client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet();
+ // TODO: remove once refresh doesn't fail immediately if there a master block:
+ // https://github.com/elasticsearch/elasticsearch/issues/9997
+ client.admin().cluster().prepareHealth("test").setWaitForYellowStatus().get();
+ client.admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client.prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
+ }
+
+ logger.info("--> add some metadata, additional type and template");
+ client.admin().indices().preparePutMapping("test").setType("type2")
+ .setSource(jsonBuilder().startObject().startObject("type2").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareAliases().addAlias("test", "test_alias", QueryBuilders.termQuery("field", "value")).execute().actionGet();
+ logger.info("--> starting two nodes back, verifying we got the latest version");
+ }
+
+ }
+
+ });
+
+ logger.info("--> running cluster_health (wait for the shards to startup)");
+ ensureGreen();
+
+ assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid(), equalTo(metaDataUuid));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
+ }
+
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ assertThat(state.metaData().index("test").mapping("type2"), notNullValue());
+ assertThat(state.metaData().templates().get("template_1").template(), equalTo("te*"));
+ assertThat(state.metaData().index("test").aliases().get("test_alias"), notNullValue());
+ assertThat(state.metaData().index("test").aliases().get("test_alias").filter(), notNullValue());
+ }
+
+ @Test
+ @Slow
+ @TestLogging("gateway:TRACE,indices.recovery:TRACE,index.engine:TRACE")
+ public void testReusePeerRecovery() throws Exception {
+ final Settings settings = settingsBuilder()
+ .put("action.admin.cluster.node.shutdown.delay", "10ms")
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false)
+ .put("gateway.recover_after_nodes", 4)
+
+ .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, 4)
+ .put(MockFSDirectoryService.CRASH_INDEX, false).build();
+
+ internalCluster().startNodesAsync(4, settings).get();
+ // prevent any rebalance actions during the peer recovery
+ // if we run into a relocation the reuse count will be 0 and this fails the test. We are testing here if
+ // we reuse the files on disk after full restarts for replicas.
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(indexSettings())
+ .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)));
+ ensureGreen();
+ logger.info("--> indexing docs");
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
+ if ((i % 200) == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+ if (randomBoolean()) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ logger.info("Running Cluster Health");
+ ensureGreen();
+ client().admin().indices().prepareOptimize("test").setMaxNumSegments(100).get(); // just wait for merges
+ client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get();
+
+ boolean useSyncIds = randomBoolean();
+ if (useSyncIds == false) {
+ logger.info("--> disabling allocation while the cluster is shut down");
+
+ // Disable allocations while we are closing nodes
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE))
+ .get();
+ logger.info("--> full cluster restart");
+ internalCluster().fullRestart();
+
+ logger.info("--> waiting for cluster to return to green after first shutdown");
+ ensureGreen();
+ } else {
+ logger.info("--> trying to sync flush");
+ assertEquals(SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").failedShards(), 0);
+ assertSyncIdsNotNull();
+ }
+
+ logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time");
+ // Disable allocations while we are closing nodes
+ client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE))
+ .get();
+ logger.info("--> full cluster restart");
+ internalCluster().fullRestart();
+
+ logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second ");
+ ensureGreen();
+
+ if (useSyncIds) {
+ assertSyncIdsNotNull();
+ }
+ RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
+ for (ShardRecoveryResponse response : recoveryResponse.shardResponses().get("test")) {
+ RecoveryState recoveryState = response.recoveryState();
+ long recovered = 0;
+ for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) {
+ if (file.name().startsWith("segments")) {
+ recovered += file.length();
+ }
+ }
+ if (!recoveryState.getPrimary() && (useSyncIds == false)) {
+ logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}",
+ response.getShardId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
+ recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
+ assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered));
+ assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0l));
+ // we have to recover the segments file since we commit the translog ID on engine startup
+ assertThat("all bytes should be reused except of the segments file", recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes() - recovered));
+ assertThat("no files should be recovered except of the segments file", recoveryState.getIndex().recoveredFileCount(), equalTo(1));
+ assertThat("all files should be reused except of the segments file", recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount() - 1));
+ assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0));
+ } else {
+ if (useSyncIds && !recoveryState.getPrimary()) {
+ logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}",
+ response.getShardId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(),
+ recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes());
+ }
+ assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0l));
+ assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes()));
+ assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0));
+ assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount()));
+ }
+ }
+ }
+
+ public void assertSyncIdsNotNull() {
+ IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ for (ShardStats shardStats : indexStats.getShards()) {
+ assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+ }
+
+ @Test
+ @Slow
+ public void testRecoveryDifferentNodeOrderStartup() throws Exception {
+ // we need different data paths so we make sure we start the second node fresh
+
+ final String node_1 = internalCluster().startNode(settingsBuilder().put("path.data", createTempDir()).build());
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value").execute().actionGet();
+
+ internalCluster().startNode(settingsBuilder().put("path.data", createTempDir()).build());
+
+ ensureGreen();
+
+ internalCluster().fullRestart(new RestartCallback() {
+
+ @Override
+ public boolean doRestart(String nodeName) {
+ return !node_1.equals(nodeName);
+ }
+ });
+
+ ensureYellow();
+
+ assertThat(client().admin().indices().prepareExists("test").execute().actionGet().isExists(), equalTo(true));
+ assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 1);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/get/GetActionTests.java b/core/src/test/java/org/elasticsearch/get/GetActionTests.java
new file mode 100644
index 0000000000..d73e91d849
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/get/GetActionTests.java
@@ -0,0 +1,1348 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.*;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class GetActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleGetTests() {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))
+ .addAlias(new Alias("alias")));
+ ensureGreen();
+
+ GetResponse response = client().prepareGet(indexOrAlias(), "type1", "1").get();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ logger.info("--> realtime get 1");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> realtime get 1 (no source, implicit)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setFields(Strings.EMPTY_ARRAY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getFields().size(), equalTo(0));
+ assertThat(response.getSourceAsBytes(), nullValue());
+
+ logger.info("--> realtime get 1 (no source, explicit)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setFetchSource(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getFields().size(), equalTo(0));
+ assertThat(response.getSourceAsBytes(), nullValue());
+
+ logger.info("--> realtime get 1 (no type)");
+ response = client().prepareGet(indexOrAlias(), null, "1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> non realtime get 1");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> realtime fetch of field (requires fetching parsing source)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsBytes(), nullValue());
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> realtime fetch of field & source (requires fetching parsing source)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").setFetchSource("field1", null).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap(), hasKey("field1"));
+ assertThat(response.getSourceAsMap(), not(hasKey("field2")));
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> flush the index, so we load it from it");
+ flush();
+
+ logger.info("--> realtime get 1 (loaded from index)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> non realtime get 1 (loaded from index)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2"));
+
+ logger.info("--> realtime fetch of field (loaded from index)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsBytes(), nullValue());
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> realtime fetch of field & source (loaded from index)");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setFields("field1").setFetchSource(true).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsBytes(), not(nullValue()));
+ assertThat(response.getField("field1").getValues().get(0).toString(), equalTo("value1"));
+ assertThat(response.getField("field2"), nullValue());
+
+ logger.info("--> update doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").get();
+
+ logger.info("--> realtime get 1");
+ response = client().prepareGet(indexOrAlias(), "type1", "1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_1"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_1"));
+
+ logger.info("--> update doc 1 again");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_2", "field2", "value2_2").get();
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getSourceAsMap().get("field1").toString(), equalTo("value1_2"));
+ assertThat(response.getSourceAsMap().get("field2").toString(), equalTo("value2_2"));
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").get();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").get();
+ assertThat(response.isExists(), equalTo(false));
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+ @Test
+ public void simpleMultiGetTests() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ MultiGetResponse response = client().prepareMultiGet().add(indexOrAlias(), "type1", "1").get();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).get();
+ }
+
+ response = client().prepareMultiGet()
+ .add(indexOrAlias(), "type1", "1")
+ .add(indexOrAlias(), "type1", "15")
+ .add(indexOrAlias(), "type1", "3")
+ .add(indexOrAlias(), "type1", "9")
+ .add(indexOrAlias(), "type1", "11").get();
+ assertThat(response.getResponses().length, equalTo(5));
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("15"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getResponse().getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(false));
+ assertThat(response.getResponses()[2].getId(), equalTo("3"));
+ assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[3].getId(), equalTo("9"));
+ assertThat(response.getResponses()[3].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[3].getResponse().getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[3].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[4].getId(), equalTo("11"));
+ assertThat(response.getResponses()[4].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[4].getResponse().getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[4].getResponse().isExists(), equalTo(false));
+
+ // multi get with specific field
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").fields("field"))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "3").fields("field"))
+ .get();
+
+ assertThat(response.getResponses().length, equalTo(2));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsBytes(), nullValue());
+ assertThat(response.getResponses()[0].getResponse().getField("field").getValues().get(0).toString(), equalTo("value1"));
+ }
+
+ @Test
+ public void realtimeGetWithCompressBackcompat() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1).put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id))
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("_source").field("compress", true).endObject().endObject().endObject()));
+ ensureGreen();
+
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < 10000; i++) {
+ sb.append((char) i);
+ }
+ String fieldValue = sb.toString();
+ client().prepareIndex("test", "type", "1").setSource("field", fieldValue).get();
+
+ // realtime get
+ GetResponse getResponse = client().prepareGet("test", "type", "1").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo(fieldValue));
+ }
+
+ @Test
+ public void getFieldsWithDifferentTypes() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("properties")
+ .startObject("str").field("type", "string").field("store", "yes").endObject()
+ .startObject("strs").field("type", "string").field("store", "yes").endObject()
+ .startObject("int").field("type", "integer").field("store", "yes").endObject()
+ .startObject("ints").field("type", "integer").field("store", "yes").endObject()
+ .startObject("date").field("type", "date").field("store", "yes").endObject()
+ .startObject("binary").field("type", "binary").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(
+ jsonBuilder().startObject()
+ .field("str", "test")
+ .field("strs", new String[]{"A", "B", "C"})
+ .field("int", 42)
+ .field("ints", new int[]{1, 2, 3, 4})
+ .field("date", "2012-11-13T15:26:14.000Z")
+ .field("binary", Base64.encodeBytes(new byte[]{1, 2, 3}))
+ .endObject()).get();
+
+ client().prepareIndex("test", "type2", "1").setSource(
+ jsonBuilder().startObject()
+ .field("str", "test")
+ .field("strs", new String[]{"A", "B", "C"})
+ .field("int", 42)
+ .field("ints", new int[]{1, 2, 3, 4})
+ .field("date", "2012-11-13T15:26:14.000Z")
+ .field("binary", Base64.encodeBytes(new byte[]{1, 2, 3}))
+ .endObject()).get();
+
+ // realtime get with stored source
+ logger.info("--> realtime get (from source)");
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("str", "strs", "int", "ints", "date", "binary").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Long) getResponse.getField("int").getValue(), equalTo(42l));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1L, 2L, 3L, 4L));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat(getResponse.getField("binary").getValue(), instanceOf(String.class)); // its a String..., not binary mapped
+
+ logger.info("--> realtime get (from stored fields)");
+ getResponse = client().prepareGet("test", "type2", "1").setFields("str", "strs", "int", "ints", "date", "binary").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Integer) getResponse.getField("int").getValue(), equalTo(42));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1, 2, 3, 4));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat((BytesReference) getResponse.getField("binary").getValue(), equalTo((BytesReference) new BytesArray(new byte[]{1, 2, 3})));
+
+ logger.info("--> flush the index, so we load it from it");
+ flush();
+
+ logger.info("--> non realtime get (from source)");
+ getResponse = client().prepareGet("test", "type1", "1").setFields("str", "strs", "int", "ints", "date", "binary").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Long) getResponse.getField("int").getValue(), equalTo(42l));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1L, 2L, 3L, 4L));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat(getResponse.getField("binary").getValue(), instanceOf(String.class)); // its a String..., not binary mapped
+
+ logger.info("--> non realtime get (from stored fields)");
+ getResponse = client().prepareGet("test", "type2", "1").setFields("str", "strs", "int", "ints", "date", "binary").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat((String) getResponse.getField("str").getValue(), equalTo("test"));
+ assertThat(getResponse.getField("strs").getValues(), contains((Object) "A", "B", "C"));
+ assertThat((Integer) getResponse.getField("int").getValue(), equalTo(42));
+ assertThat(getResponse.getField("ints").getValues(), contains((Object) 1, 2, 3, 4));
+ assertThat((String) getResponse.getField("date").getValue(), equalTo("2012-11-13T15:26:14.000Z"));
+ assertThat((BytesReference) getResponse.getField("binary").getValue(), equalTo((BytesReference) new BytesArray(new byte[]{1, 2, 3})));
+ }
+
+ @Test
+ public void testGetDocWithMultivaluedFields() throws Exception {
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2")
+ .startObject("properties")
+ .startObject("field").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", mapping1)
+ .addMapping("type2", mapping2)
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ GetResponse response = client().prepareGet("test", "type1", "1").get();
+ assertThat(response.isExists(), equalTo(false));
+ response = client().prepareGet("test", "type2", "1").get();
+ assertThat(response.isExists(), equalTo(false));
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject()).get();
+
+ client().prepareIndex("test", "type2", "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").endObject()).get();
+
+ response = client().prepareGet("test", "type1", "1").setFields("field").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getType(), equalTo("type1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+
+ response = client().prepareGet("test", "type2", "1").setFields("field").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getType(), equalTo("type2"));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+ // Now test values being fetched from stored fields.
+ refresh();
+ response = client().prepareGet("test", "type1", "1").setFields("field").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+
+ response = client().prepareGet("test", "type2", "1").setFields("field").get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getFields().size(), equalTo(1));
+ assertThat(response.getFields().get("field").getValues().size(), equalTo(2));
+ assertThat(response.getFields().get("field").getValues().get(0).toString(), equalTo("1"));
+ assertThat(response.getFields().get("field").getValues().get(1).toString(), equalTo("2"));
+ }
+
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithExcludeBackcompat() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("excludes", "excluded")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ assertAcked(prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id));
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").field("excluded", "should not be seen").endObject())
+ .get();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").get();
+ client().admin().indices().prepareFlush(index).get();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").get();
+
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("field"));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+ }
+
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithIncludeBackcompat() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("includes", "included")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ assertAcked(prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id));
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject().field("field", "1", "2").field("included", "should be seen").endObject())
+ .get();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").get();
+ flush();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").get();
+
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included"));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testThatGetFromTranslogShouldWorkWithIncludeExcludeAndFieldsBackcompat() throws Exception {
+ String index = "test";
+ String type = "type1";
+
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("_source")
+ .array("includes", "included")
+ .array("excludes", "excluded")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ assertAcked(prepareCreate(index)
+ .addMapping(type, mapping)
+ .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id));
+
+ client().prepareIndex(index, type, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("field", "1", "2")
+ .startObject("included").field("field", "should be seen").field("field2", "extra field to remove").endObject()
+ .startObject("excluded").field("field", "should not be seen").field("field2", "should not be seen").endObject()
+ .endObject())
+ .get();
+
+ GetResponse responseBeforeFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").get();
+ assertThat(responseBeforeFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlush.getSourceAsMap(), hasKey("included"));
+
+ // now tests that extra source filtering works as expected
+ GetResponse responseBeforeFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field")
+ .setFetchSource(new String[]{"field", "*.field"}, new String[]{"*.field2"}).get();
+ assertThat(responseBeforeFlushWithExtraFilters.isExists(), is(true));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("excluded")));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), not(hasKey("field")));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsMap(), hasKey("included"));
+ assertThat((Map<String, Object>) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), hasKey("field"));
+ assertThat((Map<String, Object>) responseBeforeFlushWithExtraFilters.getSourceAsMap().get("included"), not(hasKey("field2")));
+
+ flush();
+ GetResponse responseAfterFlush = client().prepareGet(index, type, "1").setFields("_source", "included.field", "excluded.field").get();
+ GetResponse responseAfterFlushWithExtraFilters = client().prepareGet(index, type, "1").setFields("included.field", "excluded.field")
+ .setFetchSource("*.field", "*.field2").get();
+
+ assertThat(responseAfterFlush.isExists(), is(true));
+ assertThat(responseBeforeFlush.getSourceAsString(), is(responseAfterFlush.getSourceAsString()));
+
+ assertThat(responseAfterFlushWithExtraFilters.isExists(), is(true));
+ assertThat(responseBeforeFlushWithExtraFilters.getSourceAsString(), is(responseAfterFlushWithExtraFilters.getSourceAsString()));
+ }
+
+ @Test
+ public void testGetWithVersion() {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ GetResponse response = client().prepareGet("test", "type1", "1").get();
+ assertThat(response.isExists(), equalTo(false));
+
+ logger.info("--> index doc 1");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ // From translog:
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ // From Lucene index:
+ refresh();
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(1l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ logger.info("--> index doc 1 again, so increasing the version");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+
+ // From translog:
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ // From Lucene index:
+ refresh();
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
+ fail();
+ } catch (VersionConflictEngineException e) {
+ //all good
+ }
+
+ response = client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get();
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getId(), equalTo("1"));
+ assertThat(response.getIndex(), equalTo("test"));
+ assertThat(response.getVersion(), equalTo(2l));
+ }
+
+ @Test
+ public void testMultiGetWithVersion() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ MultiGetResponse response = client().prepareMultiGet().add(indexOrAlias(), "type1", "1").get();
+ assertThat(response.getResponses().length, equalTo(1));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(false));
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).get();
+ }
+
+ // Version from translog
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(Versions.MATCH_ANY))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(1))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(2))
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+ //Version from Lucene index
+ refresh();
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(Versions.MATCH_ANY))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(1))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").version(2))
+ .setRealtime(false)
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("1"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[1].getId(), equalTo("1"));
+ assertThat(response.getResponses()[1].getFailure(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1"));
+ assertThat(response.getResponses()[2].getFailure(), notNullValue());
+ assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1"));
+ assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+
+
+ for (int i = 0; i < 3; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value" + i).get();
+ }
+
+ // Version from translog
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(Versions.MATCH_ANY))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(1))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(2))
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+
+
+ //Version from Lucene index
+ refresh();
+ response = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(Versions.MATCH_ANY))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(1))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").version(2))
+ .setRealtime(false)
+ .get();
+ assertThat(response.getResponses().length, equalTo(3));
+ // [0] version doesn't matter, which is the default
+ assertThat(response.getResponses()[0].getFailure(), nullValue());
+ assertThat(response.getResponses()[0].getId(), equalTo("2"));
+ assertThat(response.getResponses()[0].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[0].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ assertThat(response.getResponses()[1].getFailure(), notNullValue());
+ assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(response.getResponses()[1].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("VersionConflictEngineException"));
+ assertThat(response.getResponses()[2].getId(), equalTo("2"));
+ assertThat(response.getResponses()[2].getIndex(), equalTo("test"));
+ assertThat(response.getResponses()[2].getFailure(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().isExists(), equalTo(true));
+ assertThat(response.getResponses()[2].getResponse().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testGetFields_metaData() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+
+ client().prepareIndex("test", "my-type1", "1")
+ .setRouting("1")
+ .setSource(jsonBuilder().startObject().field("field1", "value").endObject())
+ .get();
+
+ GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1")
+ .setRouting("1")
+ .setFields("field1", "_routing")
+ .get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value"));
+ assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true));
+ assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1"));
+
+ flush();
+
+ client().prepareGet(indexOrAlias(), "my-type1", "1")
+ .setFields("field1", "_routing")
+ .setRouting("1")
+ .get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField("field1").isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField("field1").getValue().toString(), equalTo("value"));
+ assertThat(getResponse.getField("_routing").isMetadataField(), equalTo(true));
+ assertThat(getResponse.getField("_routing").getValue().toString(), equalTo("1"));
+ }
+
+ @Test
+ public void testGetFields_nonLeafField() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("my-type1", jsonBuilder().startObject().startObject("my-type1").startObject("properties")
+ .startObject("field1").startObject("properties")
+ .startObject("field2").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1)));
+
+ client().prepareIndex("test", "my-type1", "1")
+ .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
+ .get();
+
+ try {
+ client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ //all well
+ }
+
+ flush();
+
+ try {
+ client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ //all well
+ }
+ }
+
+ @Test
+ @TestLogging("index.shard.service:TRACE,cluster.service:TRACE,action.admin.indices.flush:TRACE")
+ public void testGetFields_complexField() throws Exception {
+ assertAcked(prepareCreate("my-index")
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties")
+ .startObject("field1").field("type", "object").startObject("properties")
+ .startObject("field2").field("type", "object").startObject("properties")
+ .startObject("field3").field("type", "object").startObject("properties")
+ .startObject("field4").field("type", "string").field("store", "yes")
+ .endObject().endObject()
+ .endObject().endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+
+ BytesReference source = jsonBuilder().startObject()
+ .startArray("field1")
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().bytes();
+
+ logger.info("indexing documents");
+
+ client().prepareIndex("my-index", "my-type1", "1").setSource(source).get();
+ client().prepareIndex("my-index", "my-type2", "1").setSource(source).get();
+
+ logger.info("checking real time retrieval");
+
+ String field = "field1.field2.field3.field4";
+ GetResponse getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ logger.info("waiting for recoveries to complete");
+
+ // Flush fails if shard has ongoing recoveries, make sure the cluster is settled down
+ ensureGreen();
+
+ logger.info("flushing");
+ FlushResponse flushResponse = client().admin().indices().prepareFlush("my-index").setForce(true).get();
+ if (flushResponse.getSuccessfulShards() == 0) {
+ StringBuilder sb = new StringBuilder("failed to flush at least one shard. total shards [")
+ .append(flushResponse.getTotalShards()).append("], failed shards: [").append(flushResponse.getFailedShards()).append("]");
+ for (ShardOperationFailedException failure: flushResponse.getShardFailures()) {
+ sb.append("\nShard failure: ").append(failure);
+ }
+ fail(sb.toString());
+ }
+
+ logger.info("checking post-flush retrieval");
+
+ getResponse = client().prepareGet("my-index", "my-type1", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+
+ getResponse = client().prepareGet("my-index", "my-type2", "1").setFields(field).get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getField(field).isMetadataField(), equalTo(false));
+ assertThat(getResponse.getField(field).getValues().size(), equalTo(2));
+ assertThat(getResponse.getField(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(getResponse.getField(field).getValues().get(1).toString(), equalTo("value2"));
+ }
+
+ @Test
+ public void testGet_allField() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .addMapping("my-type1", jsonBuilder()
+ .startObject()
+ .startObject("my-type1")
+ .startObject("_all")
+ .field("store", true)
+ .endObject()
+ .startObject("properties")
+ .startObject("some_field")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ index("test", "my-type1", "1", "some_field", "some text");
+ refresh();
+
+ GetResponse getResponse = client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("_all").get();
+ assertNotNull(getResponse.getField("_all").getValue());
+ assertThat(getResponse.getField("_all").getValue().toString(), equalTo("some text" + " "));
+ }
+
+ @Test
+ public void testUngeneratedFieldsThatAreNeverStored() throws IOException {
+ String createIndexSource = "{\n" +
+ " \"settings\": {\n" +
+ " \"index.translog.disable_flush\": true,\n" +
+ " \"refresh_interval\": \"-1\"\n" +
+ " },\n" +
+ " \"mappings\": {\n" +
+ " \"doc\": {\n" +
+ " \"properties\": {\n" +
+ " \"suggest\": {\n" +
+ " \"type\": \"completion\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
+ ensureGreen();
+ String doc = "{\n" +
+ " \"suggest\": {\n" +
+ " \"input\": [\n" +
+ " \"Nevermind\",\n" +
+ " \"Nirvana\"\n" +
+ " ],\n" +
+ " \"output\": \"Nirvana - Nevermind\"\n" +
+ " }\n" +
+ "}";
+
+ index("test", "doc", "1", doc);
+ String[] fieldsList = {"suggest"};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ }
+
+ @Test
+ public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException {
+ String createIndexSource = "{\n" +
+ " \"settings\": {\n" +
+ " \"index.translog.disable_flush\": true,\n" +
+ " \"refresh_interval\": \"-1\"\n" +
+ " },\n" +
+ " \"mappings\": {\n" +
+ " \"parentdoc\": {},\n" +
+ " \"doc\": {\n" +
+ " \"_parent\": {\n" +
+ " \"type\": \"parentdoc\"\n" +
+ " },\n" +
+ " \"_ttl\": {\n" +
+ " \"enabled\": true\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
+ ensureGreen();
+
+ client().prepareIndex("test", "doc").setId("1").setSource("{}").setParent("1").setTTL(TimeValue.timeValueHours(1).getMillis()).get();
+
+ String[] fieldsList = {"_ttl", "_parent"};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
+ }
+
+ @Test
+ public void testUngeneratedFieldsPartOfSourceUnstoredSourceDisabledBackcompat() throws IOException {
+ indexSingleDocumentWithUngeneratedFieldsThatArePartOf_source(false, false);
+ String[] fieldsList = {};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ }
+
+ @Test
+ public void testUngeneratedFieldsPartOfSourceEitherStoredOrSourceEnabledBackcompat() throws IOException {
+ boolean stored = randomBoolean();
+ boolean sourceEnabled = true;
+ if (stored) {
+ sourceEnabled = randomBoolean();
+ }
+ indexSingleDocumentWithUngeneratedFieldsThatArePartOf_source(stored, sourceEnabled);
+ String[] fieldsList = {};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ }
+
+ void indexSingleDocumentWithUngeneratedFieldsThatArePartOf_source(boolean stored, boolean sourceEnabled) {
+ String storedString = stored ? "yes" : "no";
+ String createIndexSource = "{\n" +
+ " \"settings\": {\n" +
+ " \"index.translog.disable_flush\": true,\n" +
+ " \"refresh_interval\": \"-1\",\n" +
+ " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" +
+ " },\n" +
+ " \"mappings\": {\n" +
+ " \"doc\": {\n" +
+ " \"_source\": {\n" +
+ " \"enabled\": " + sourceEnabled + "\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
+ ensureGreen();
+ String doc = "{\n" +
+ " \"my_boost\": 5.0,\n" +
+ " \"_ttl\": \"1h\"\n" +
+ "}\n";
+
+ client().prepareIndex("test", "doc").setId("1").setSource(doc).setRouting("1").get();
+ }
+
+
+ @Test
+ public void testUngeneratedFieldsNotPartOfSourceUnstored() throws IOException {
+ indexSingleDocumentWithUngeneratedFieldsThatAreNeverPartOf_source(false, randomBoolean());
+ String[] fieldsList = {"_timestamp"};
+ String[] alwaysStoredFieldsList = {"_routing", "_size"};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList, "1");
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", alwaysStoredFieldsList, "1");
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList, "1");
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", alwaysStoredFieldsList, "1");
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList, "1");
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", alwaysStoredFieldsList, "1");
+ }
+
+ @Test
+ public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException {
+ indexSingleDocumentWithUngeneratedFieldsThatAreNeverPartOf_source(true, randomBoolean());
+ String[] fieldsList = {"_timestamp", "_size", "_routing"};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
+ }
+
+ void indexSingleDocumentWithUngeneratedFieldsThatAreNeverPartOf_source(boolean stored, boolean sourceEnabled) {
+ String storedString = stored ? "yes" : "no";
+ String createIndexSource = "{\n" +
+ " \"settings\": {\n" +
+ " \"index.translog.disable_flush\": true,\n" +
+ " \"refresh_interval\": \"-1\"\n" +
+ " },\n" +
+ " \"mappings\": {\n" +
+ " \"parentdoc\": {},\n" +
+ " \"doc\": {\n" +
+ " \"_timestamp\": {\n" +
+ " \"store\": \"" + storedString + "\",\n" +
+ " \"enabled\": true\n" +
+ " },\n" +
+ " \"_size\": {\n" +
+ " \"enabled\": true\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
+ ensureGreen();
+ String doc = "{\n" +
+ " \"text\": \"some text.\"\n" +
+ "}\n";
+ client().prepareIndex("test", "doc").setId("1").setSource(doc).setRouting("1").get();
+ }
+
+
+ @Test
+ public void testGeneratedStringFieldsUnstored() throws IOException {
+ indexSingleDocumentWithStringFieldsGeneratedFromText(false, randomBoolean());
+ String[] fieldsList = {"_all", "_field_names"};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ }
+
+ @Test
+ public void testGeneratedStringFieldsStored() throws IOException {
+ indexSingleDocumentWithStringFieldsGeneratedFromText(true, randomBoolean());
+ String[] fieldsList = {"_all"};
+ String[] alwaysNotStoredFieldsList = {"_field_names"};
+ // before refresh - document is only in translog
+ assertGetFieldsNull(indexOrAlias(), "doc", "1", fieldsList);
+ assertGetFieldsException(indexOrAlias(), "doc", "1", fieldsList);
+ assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
+ }
+
+ void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) {
+
+ String storedString = stored ? "yes" : "no";
+ String createIndexSource = "{\n" +
+ " \"settings\": {\n" +
+ " \"index.translog.disable_flush\": true,\n" +
+ " \"refresh_interval\": \"-1\",\n" +
+ " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" +
+ " },\n" +
+ " \"mappings\": {\n" +
+ " \"doc\": {\n" +
+ " \"_source\" : {\"enabled\" : " + sourceEnabled + "}," +
+ " \"_all\" : {\"enabled\" : true, \"store\":\"" + storedString + "\" }" +
+ " }\n" +
+ " }\n" +
+ "}";
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
+ ensureGreen();
+ String doc = "{\n" +
+ " \"text1\": \"some text.\"\n," +
+ " \"text2\": \"more text.\"\n" +
+ "}\n";
+ index("test", "doc", "1", doc);
+ }
+
+
+ @Test
+ public void testGeneratedNumberFieldsUnstored() throws IOException {
+ indexSingleDocumentWithNumericFieldsGeneratedFromText(false, randomBoolean());
+ String[] fieldsList = {"token_count", "text.token_count", "murmur", "text.murmur"};
+ // before refresh - document is only in translog
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
+ }
+
+ @Test
+ public void testGeneratedNumberFieldsStored() throws IOException {
+ indexSingleDocumentWithNumericFieldsGeneratedFromText(true, randomBoolean());
+ String[] fieldsList = {"token_count", "text.token_count", "murmur", "text.murmur"};
+ // before refresh - document is only in translog
+ assertGetFieldsNull(indexOrAlias(), "doc", "1", fieldsList);
+ assertGetFieldsException(indexOrAlias(), "doc", "1", fieldsList);
+ refresh();
+ //after refresh - document is in translog and also indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ flush();
+ //after flush - document is in not anymore translog - only indexed
+ assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
+ }
+
+ void indexSingleDocumentWithNumericFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) {
+ String storedString = stored ? "yes" : "no";
+ String createIndexSource = "{\n" +
+ " \"settings\": {\n" +
+ " \"index.translog.disable_flush\": true,\n" +
+ " \"refresh_interval\": \"-1\",\n" +
+ " \"" + IndexMetaData.SETTING_VERSION_CREATED + "\": " + Version.V_1_4_2.id + "\n" +
+ " },\n" +
+ " \"mappings\": {\n" +
+ " \"doc\": {\n" +
+ " \"_source\" : {\"enabled\" : " + sourceEnabled + "}," +
+ " \"properties\": {\n" +
+ " \"token_count\": {\n" +
+ " \"type\": \"token_count\",\n" +
+ " \"analyzer\": \"standard\",\n" +
+ " \"store\": \"" + storedString + "\"" +
+ " },\n" +
+ " \"murmur\": {\n" +
+ " \"type\": \"murmur3\",\n" +
+ " \"store\": \"" + storedString + "\"" +
+ " },\n" +
+ " \"text\": {\n" +
+ " \"type\": \"string\",\n" +
+ " \"fields\": {\n" +
+ " \"token_count\": {\n" +
+ " \"type\": \"token_count\",\n" +
+ " \"analyzer\": \"standard\",\n" +
+ " \"store\": \"" + storedString + "\"" +
+ " },\n" +
+ " \"murmur\": {\n" +
+ " \"type\": \"murmur3\",\n" +
+ " \"store\": \"" + storedString + "\"" +
+ " }\n" +
+ " }\n" +
+ " }" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setSource(createIndexSource));
+ ensureGreen();
+ String doc = "{\n" +
+ " \"murmur\": \"Some value that can be hashed\",\n" +
+ " \"token_count\": \"A text with five words.\",\n" +
+ " \"text\": \"A text with five words.\"\n" +
+ "}\n";
+ index("test", "doc", "1", doc);
+ }
+
+ private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields) {
+ assertGetFieldsAlwaysWorks(index, type, docId, fields, null);
+ }
+
+ private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields, @Nullable String routing) {
+ for (String field : fields) {
+ assertGetFieldWorks(index, type, docId, field, false, routing);
+ assertGetFieldWorks(index, type, docId, field, true, routing);
+ }
+ }
+
+ private void assertGetFieldWorks(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
+ GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing);
+ assertThat(response.getId(), equalTo(docId));
+ assertTrue(response.isExists());
+ assertNotNull(response.getField(field));
+ response = multiGetDocument(index, type, docId, field, ignoreErrors, routing);
+ assertThat(response.getId(), equalTo(docId));
+ assertTrue(response.isExists());
+ assertNotNull(response.getField(field));
+ }
+
+ protected void assertGetFieldsException(String index, String type, String docId, String[] fields) {
+ for (String field : fields) {
+ assertGetFieldException(index, type, docId, field);
+ }
+ }
+
+ private void assertGetFieldException(String index, String type, String docId, String field) {
+ try {
+ client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(false).get();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called."));
+ }
+ MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).setIgnoreErrorsOnGeneratedFields(false).get();
+ assertNull(multiGetResponse.getResponses()[0].getResponse());
+ assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called."));
+ }
+
+ protected void assertGetFieldsNull(String index, String type, String docId, String[] fields) {
+ assertGetFieldsNull(index, type, docId, fields, null);
+ }
+
+ protected void assertGetFieldsNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
+ for (String field : fields) {
+ assertGetFieldNull(index, type, docId, field, true, routing);
+ }
+ }
+
+ protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields) {
+ assertGetFieldsAlwaysNull(index, type, docId, fields, null);
+ }
+
+ protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
+ for (String field : fields) {
+ assertGetFieldNull(index, type, docId, field, true, routing);
+ assertGetFieldNull(index, type, docId, field, false, routing);
+ }
+ }
+
+ protected void assertGetFieldNull(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
+ //for get
+ GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing);
+ assertTrue(response.isExists());
+ assertNull(response.getField(field));
+ assertThat(response.getId(), equalTo(docId));
+ //same for multi get
+ response = multiGetDocument(index, type, docId, field, ignoreErrors, routing);
+ assertNull(response.getField(field));
+ assertThat(response.getId(), equalTo(docId));
+ assertTrue(response.isExists());
+ }
+
+ private GetResponse multiGetDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
+ MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).fields(field);
+ if (routing != null) {
+ getItem.routing(routing);
+ }
+ MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem).setIgnoreErrorsOnGeneratedFields(ignoreErrors);
+ MultiGetResponse multiGetResponse = multiGetRequestBuilder.get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(1));
+ return multiGetResponse.getResponses()[0].getResponse();
+ }
+
+ private GetResponse getDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
+ GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(ignoreErrors);
+ if (routing != null) {
+ getRequestBuilder.setRouting(routing);
+ }
+ return getRequestBuilder.get();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortTests.java b/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortTests.java
new file mode 100644
index 0000000000..d2466ac649
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/HttpPublishPortTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class HttpPublishPortTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(Node.HTTP_ENABLED, true)
+ .put("http.publish_port", 9080)
+ .build();
+ }
+
+ @Test
+ public void testHttpPublishPort() throws Exception {
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setHttp(true).get();
+ assertThat(response.getNodes(), arrayWithSize(greaterThanOrEqualTo(1)));
+ NodeInfo nodeInfo = response.getNodes()[0];
+
+ BoundTransportAddress address = nodeInfo.getHttp().address();
+ assertThat(address.publishAddress(), instanceOf(InetSocketTransportAddress.class));
+
+ InetSocketTransportAddress publishAddress = (InetSocketTransportAddress) address.publishAddress();
+ assertThat(publishAddress.address().getPort(), is(9080));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java
new file mode 100644
index 0000000000..03df6cc068
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpClient.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.http.netty;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Function;
+import com.google.common.collect.Collections2;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.handler.codec.http.*;
+
+import java.io.Closeable;
+import java.net.SocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.concurrent.CountDownLatch;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThan;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.HOST;
+import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+
+/**
+ * Tiny helper
+ */
+public class NettyHttpClient implements Closeable {
+
+ private static final Function<? super HttpResponse, String> FUNCTION_RESPONSE_TO_CONTENT = new Function<HttpResponse, String>() {
+ @Override
+ public String apply(HttpResponse response) {
+ return response.getContent().toString(Charsets.UTF_8);
+ }
+ };
+
+ private static final Function<? super HttpResponse, String> FUNCTION_RESPONSE_OPAQUE_ID = new Function<HttpResponse, String>() {
+ @Override
+ public String apply(HttpResponse response) {
+ return response.headers().get("X-Opaque-Id");
+ }
+ };
+
+ public static Collection<String> returnHttpResponseBodies(Collection<HttpResponse> responses) {
+ return Collections2.transform(responses, FUNCTION_RESPONSE_TO_CONTENT);
+ }
+
+ public static Collection<String> returnOpaqueIds(Collection<HttpResponse> responses) {
+ return Collections2.transform(responses, FUNCTION_RESPONSE_OPAQUE_ID);
+ }
+
+ private final ClientBootstrap clientBootstrap;
+
+ public NettyHttpClient() {
+ clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory());;
+ }
+
+ public synchronized Collection<HttpResponse> sendRequests(SocketAddress remoteAddress, String... uris) throws InterruptedException {
+ final CountDownLatch latch = new CountDownLatch(uris.length);
+ final Collection<HttpResponse> content = Collections.synchronizedList(new ArrayList<HttpResponse>(uris.length));
+
+ clientBootstrap.setPipelineFactory(new CountDownLatchPipelineFactory(latch, content));
+
+ ChannelFuture channelFuture = null;
+ try {
+ channelFuture = clientBootstrap.connect(remoteAddress);
+ channelFuture.await(1000);
+
+ for (int i = 0; i < uris.length; i++) {
+ final HttpRequest httpRequest = new DefaultHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]);
+ httpRequest.headers().add(HOST, "localhost");
+ httpRequest.headers().add("X-Opaque-ID", String.valueOf(i));
+ channelFuture.getChannel().write(httpRequest);
+ }
+ latch.await();
+
+ } finally {
+ if (channelFuture != null) {
+ channelFuture.getChannel().close();
+ }
+ }
+
+ return content;
+ }
+
+ @Override
+ public void close() {
+ clientBootstrap.shutdown();
+ clientBootstrap.releaseExternalResources();
+ }
+
+ /**
+ * helper factory which adds returned data to a list and uses a count down latch to decide when done
+ */
+ public static class CountDownLatchPipelineFactory implements ChannelPipelineFactory {
+ private final CountDownLatch latch;
+ private final Collection<HttpResponse> content;
+
+ public CountDownLatchPipelineFactory(CountDownLatch latch, Collection<HttpResponse> content) {
+ this.latch = latch;
+ this.content = content;
+ }
+
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ final int maxBytes = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt();
+ return Channels.pipeline(
+ new HttpClientCodec(),
+ new HttpChunkAggregator(maxBytes),
+ new SimpleChannelUpstreamHandler() {
+ @Override
+ public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) {
+ final Object message = e.getMessage();
+
+ if (message instanceof HttpResponse) {
+ HttpResponse response = (HttpResponse) message;
+ content.add(response);
+ }
+
+ latch.countDown();
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) throws Exception {
+ super.exceptionCaught(ctx, e);
+ latch.countDown();
+ }
+ });
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java
new file mode 100644
index 0000000000..53666c0d2c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.http.netty;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.http.netty.pipelining.OrderedDownstreamChannelEvent;
+import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.cache.recycler.MockBigArrays;
+import org.elasticsearch.test.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.jboss.netty.buffer.ChannelBuffer;
+import org.jboss.netty.buffer.ChannelBuffers;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.handler.codec.http.*;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.http.netty.NettyHttpClient.returnHttpResponseBodies;
+import static org.elasticsearch.http.netty.NettyHttpServerTransport.HttpChannelPipelineFactory;
+import static org.hamcrest.Matchers.*;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
+import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+
+/**
+ * This test just tests, if he pipelining works in general with out any connection the elasticsearch handler
+ */
+public class NettyHttpServerPipeliningTest extends ElasticsearchTestCase {
+
+ private NetworkService networkService;
+ private ThreadPool threadPool;
+ private MockPageCacheRecycler mockPageCacheRecycler;
+ private MockBigArrays bigArrays;
+ private CustomNettyHttpServerTransport httpServerTransport;
+
+ @Before
+ public void setup() throws Exception {
+ networkService = new NetworkService(Settings.EMPTY);
+ threadPool = new ThreadPool("test");
+ mockPageCacheRecycler = new MockPageCacheRecycler(Settings.EMPTY, threadPool);
+ bigArrays = new MockBigArrays(mockPageCacheRecycler, new NoneCircuitBreakerService());
+ }
+
+ @After
+ public void shutdown() throws Exception {
+ if (threadPool != null) {
+ threadPool.shutdownNow();
+ }
+ if (httpServerTransport != null) {
+ httpServerTransport.close();
+ }
+ }
+
+ @Test
+ public void testThatHttpPipeliningWorksWhenEnabled() throws Exception {
+ Settings settings = settingsBuilder().put("http.pipelining", true).build();
+ httpServerTransport = new CustomNettyHttpServerTransport(settings);
+ httpServerTransport.start();
+ InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
+
+ List<String> requests = Arrays.asList("/firstfast", "/slow?sleep=500", "/secondfast", "/slow?sleep=1000", "/thirdfast");
+ try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
+ Collection<HttpResponse> responses = nettyHttpClient.sendRequests(transportAddress.address(), requests.toArray(new String[]{}));
+ Collection<String> responseBodies = returnHttpResponseBodies(responses);
+ assertThat(responseBodies, contains("/firstfast", "/slow?sleep=500", "/secondfast", "/slow?sleep=1000", "/thirdfast"));
+ }
+ }
+
+ @Test
+ public void testThatHttpPipeliningCanBeDisabled() throws Exception {
+ Settings settings = settingsBuilder().put("http.pipelining", false).build();
+ httpServerTransport = new CustomNettyHttpServerTransport(settings);
+ httpServerTransport.start();
+ InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
+
+ List<String> requests = Arrays.asList("/slow?sleep=1000", "/firstfast", "/secondfast", "/thirdfast", "/slow?sleep=500");
+ try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
+ Collection<HttpResponse> responses = nettyHttpClient.sendRequests(transportAddress.address(), requests.toArray(new String[]{}));
+ List<String> responseBodies = Lists.newArrayList(returnHttpResponseBodies(responses));
+ // we cannot be sure about the order of the fast requests, but the slow ones should have to be last
+ assertThat(responseBodies, hasSize(5));
+ assertThat(responseBodies.get(3), is("/slow?sleep=500"));
+ assertThat(responseBodies.get(4), is("/slow?sleep=1000"));
+ }
+ }
+
+ class CustomNettyHttpServerTransport extends NettyHttpServerTransport {
+
+ private final ExecutorService executorService;
+
+ public CustomNettyHttpServerTransport(Settings settings) {
+ super(settings, NettyHttpServerPipeliningTest.this.networkService, NettyHttpServerPipeliningTest.this.bigArrays);
+ this.executorService = Executors.newFixedThreadPool(5);
+ }
+
+ @Override
+ public ChannelPipelineFactory configureServerChannelPipelineFactory() {
+ return new CustomHttpChannelPipelineFactory(this, executorService);
+ }
+
+ @Override
+ public HttpServerTransport stop() {
+ executorService.shutdownNow();
+ return super.stop();
+ }
+ }
+
+ private class CustomHttpChannelPipelineFactory extends HttpChannelPipelineFactory {
+
+ private final ExecutorService executorService;
+
+ public CustomHttpChannelPipelineFactory(NettyHttpServerTransport transport, ExecutorService executorService) {
+ super(transport, randomBoolean());
+ this.executorService = executorService;
+ }
+
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ ChannelPipeline pipeline = super.getPipeline();
+ pipeline.replace("handler", "handler", new PossiblySlowUpstreamHandler(executorService));
+ return pipeline;
+ }
+ }
+
+ class PossiblySlowUpstreamHandler extends SimpleChannelUpstreamHandler {
+
+ private final ExecutorService executorService;
+
+ public PossiblySlowUpstreamHandler(ExecutorService executorService) {
+ this.executorService = executorService;
+ }
+
+ @Override
+ public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws Exception {
+ executorService.submit(new PossiblySlowRunnable(ctx, e));
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e) {
+ e.getCause().printStackTrace();
+ e.getChannel().close();
+ }
+ }
+
+ class PossiblySlowRunnable implements Runnable {
+
+ private ChannelHandlerContext ctx;
+ private MessageEvent e;
+
+ public PossiblySlowRunnable(ChannelHandlerContext ctx, MessageEvent e) {
+ this.ctx = ctx;
+ this.e = e;
+ }
+
+ @Override
+ public void run() {
+ HttpRequest request;
+ OrderedUpstreamMessageEvent oue = null;
+ if (e instanceof OrderedUpstreamMessageEvent) {
+ oue = (OrderedUpstreamMessageEvent) e;
+ request = (HttpRequest) oue.getMessage();
+ } else {
+ request = (HttpRequest) e.getMessage();
+ }
+
+ ChannelBuffer buffer = ChannelBuffers.copiedBuffer(request.getUri(), Charsets.UTF_8);
+
+ DefaultHttpResponse httpResponse = new DefaultHttpResponse(HTTP_1_1, OK);
+ httpResponse.headers().add(CONTENT_LENGTH, buffer.readableBytes());
+ httpResponse.setContent(buffer);
+
+ QueryStringDecoder decoder = new QueryStringDecoder(request.getUri());
+
+ final int timeout = request.getUri().startsWith("/slow") && decoder.getParameters().containsKey("sleep") ? Integer.valueOf(decoder.getParameters().get("sleep").get(0)) : 0;
+ if (timeout > 0) {
+ try {
+ Thread.sleep(timeout);
+ } catch (InterruptedException e1) {
+ Thread.currentThread().interrupt();
+ throw new RuntimeException();
+ }
+ }
+
+ if (oue != null) {
+ ctx.sendDownstream(new OrderedDownstreamChannelEvent(oue, 0, true, httpResponse));
+ } else {
+ ctx.getChannel().write(httpResponse);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningDisabledIntegrationTest.java b/core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningDisabledIntegrationTest.java
new file mode 100644
index 0000000000..0d8ba52666
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningDisabledIntegrationTest.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.http.netty;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.http.netty.NettyHttpClient.returnOpaqueIds;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 1)
+public class NettyPipeliningDisabledIntegrationTest extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put(Node.HTTP_ENABLED, true).put("http.pipelining", false).build();
+ }
+
+ @Test
+ public void testThatNettyHttpServerDoesNotSupportPipelining() throws Exception {
+ ensureGreen();
+ List<String> requests = Arrays.asList("/", "/_nodes/stats", "/", "/_cluster/state", "/", "/_nodes", "/");
+
+ HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
+ InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
+
+ try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
+ Collection<HttpResponse> responses = nettyHttpClient.sendRequests(inetSocketTransportAddress.address(), requests.toArray(new String[]{}));
+ assertThat(responses, hasSize(requests.size()));
+
+ List<String> opaqueIds = Lists.newArrayList(returnOpaqueIds(responses));
+
+ assertResponsesOutOfOrder(opaqueIds);
+ }
+ }
+
+ /**
+ * checks if all responses are there, but also tests that they are out of order because pipelining is disabled
+ */
+ private void assertResponsesOutOfOrder(List<String> opaqueIds) {
+ String message = String.format(Locale.ROOT, "Expected returned http message ids to be in any order of: %s", opaqueIds);
+ assertThat(message, opaqueIds, containsInAnyOrder("0", "1", "2", "3", "4", "5", "6"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningEnabledIntegrationTest.java b/core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningEnabledIntegrationTest.java
new file mode 100644
index 0000000000..b3f8479936
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/NettyPipeliningEnabledIntegrationTest.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.http.netty;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.jboss.netty.handler.codec.http.HttpResponse;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.http.netty.NettyHttpClient.returnOpaqueIds;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 1)
+public class NettyPipeliningEnabledIntegrationTest extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put(Node.HTTP_ENABLED, true).put("http.pipelining", true).build();
+ }
+
+ @Test
+ public void testThatNettyHttpServerSupportsPipelining() throws Exception {
+ List<String> requests = Arrays.asList("/", "/_nodes/stats", "/", "/_cluster/state", "/");
+
+ HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
+ InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().boundAddress();
+
+ try (NettyHttpClient nettyHttpClient = new NettyHttpClient()) {
+ Collection<HttpResponse> responses = nettyHttpClient.sendRequests(inetSocketTransportAddress.address(), requests.toArray(new String[]{}));
+ assertThat(responses, hasSize(5));
+
+ Collection<String> opaqueIds = returnOpaqueIds(responses);
+ assertOpaqueIdsInOrder(opaqueIds);
+ }
+ }
+
+ private void assertOpaqueIdsInOrder(Collection<String> opaqueIds) {
+ // check if opaque ids are monotonically increasing
+ int i = 0;
+ String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [" + opaqueIds + "]");
+ for (String opaqueId : opaqueIds) {
+ assertThat(msg, opaqueId, is(String.valueOf(i++)));
+ }
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTest.java b/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTest.java
new file mode 100644
index 0000000000..110a2d7316
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandlerTest.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.http.netty.pipelining;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.jboss.netty.bootstrap.ClientBootstrap;
+import org.jboss.netty.bootstrap.ServerBootstrap;
+import org.jboss.netty.channel.*;
+import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory;
+import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.jboss.netty.handler.codec.http.*;
+import org.jboss.netty.util.HashedWheelTimer;
+import org.jboss.netty.util.Timeout;
+import org.jboss.netty.util.TimerTask;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static org.jboss.netty.buffer.ChannelBuffers.EMPTY_BUFFER;
+import static org.jboss.netty.buffer.ChannelBuffers.copiedBuffer;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.*;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.CHUNKED;
+import static org.jboss.netty.handler.codec.http.HttpHeaders.Values.KEEP_ALIVE;
+import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
+import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+import static org.jboss.netty.util.CharsetUtil.UTF_8;
+
+/**
+ *
+ */
+public class HttpPipeliningHandlerTest extends ElasticsearchTestCase {
+
+ private static final long RESPONSE_TIMEOUT = 10000L;
+ private static final long CONNECTION_TIMEOUT = 10000L;
+ private static final String CONTENT_TYPE_TEXT = "text/plain; charset=UTF-8";
+ // TODO make me random
+ private static final InetSocketAddress HOST_ADDR = new InetSocketAddress("127.0.0.1", 9080);
+ private static final String PATH1 = "/1";
+ private static final String PATH2 = "/2";
+ private static final String SOME_RESPONSE_TEXT = "some response for ";
+
+ private ClientBootstrap clientBootstrap;
+ private ServerBootstrap serverBootstrap;
+
+ private CountDownLatch responsesIn;
+ private final List<String> responses = new ArrayList<>(2);
+
+ private HashedWheelTimer timer;
+
+ @Before
+ public void startBootstraps() {
+ clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory());
+
+ clientBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(
+ new HttpClientCodec(),
+ new ClientHandler()
+ );
+ }
+ });
+
+ serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory());
+
+ serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ return Channels.pipeline(
+ new HttpRequestDecoder(),
+ new HttpResponseEncoder(),
+ new HttpPipeliningHandler(10000),
+ new ServerHandler()
+ );
+ }
+ });
+
+ serverBootstrap.bind(HOST_ADDR);
+
+ timer = new HashedWheelTimer();
+ }
+
+ @After
+ public void releaseResources() {
+ timer.stop();
+
+ serverBootstrap.shutdown();
+ serverBootstrap.releaseExternalResources();
+ clientBootstrap.shutdown();
+ clientBootstrap.releaseExternalResources();
+ }
+
+ @Test
+ public void shouldReturnMessagesInOrder() throws InterruptedException {
+ responsesIn = new CountDownLatch(1);
+ responses.clear();
+
+ final ChannelFuture connectionFuture = clientBootstrap.connect(HOST_ADDR);
+
+ assertTrue(connectionFuture.await(CONNECTION_TIMEOUT));
+ final Channel clientChannel = connectionFuture.getChannel();
+
+ final HttpRequest request1 = new DefaultHttpRequest(
+ HTTP_1_1, HttpMethod.GET, PATH1);
+ request1.headers().add(HOST, HOST_ADDR.toString());
+
+ final HttpRequest request2 = new DefaultHttpRequest(
+ HTTP_1_1, HttpMethod.GET, PATH2);
+ request2.headers().add(HOST, HOST_ADDR.toString());
+
+ clientChannel.write(request1);
+ clientChannel.write(request2);
+
+ responsesIn.await(RESPONSE_TIMEOUT, MILLISECONDS);
+
+ assertTrue(responses.contains(SOME_RESPONSE_TEXT + PATH1));
+ assertTrue(responses.contains(SOME_RESPONSE_TEXT + PATH2));
+ }
+
+ public class ClientHandler extends SimpleChannelUpstreamHandler {
+ @Override
+ public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) {
+ final Object message = e.getMessage();
+ if (message instanceof HttpChunk) {
+ final HttpChunk response = (HttpChunk) e.getMessage();
+ if (!response.isLast()) {
+ final String content = response.getContent().toString(UTF_8);
+ responses.add(content);
+ if (content.equals(SOME_RESPONSE_TEXT + PATH2)) {
+ responsesIn.countDown();
+ }
+ }
+ }
+ }
+ }
+
+ public class ServerHandler extends SimpleChannelUpstreamHandler {
+ private final AtomicBoolean sendFinalChunk = new AtomicBoolean(false);
+
+ @Override
+ public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws InterruptedException {
+ final HttpRequest request = (HttpRequest) e.getMessage();
+
+ final OrderedUpstreamMessageEvent oue = (OrderedUpstreamMessageEvent) e;
+ final String uri = request.getUri();
+
+ final HttpResponse initialChunk = new DefaultHttpResponse(HTTP_1_1, OK);
+ initialChunk.headers().add(CONTENT_TYPE, CONTENT_TYPE_TEXT);
+ initialChunk.headers().add(CONNECTION, KEEP_ALIVE);
+ initialChunk.headers().add(TRANSFER_ENCODING, CHUNKED);
+
+ ctx.sendDownstream(new OrderedDownstreamChannelEvent(oue, 0, false, initialChunk));
+
+ timer.newTimeout(new ChunkWriter(ctx, e, uri, oue, 1), 0, MILLISECONDS);
+ }
+
+ private class ChunkWriter implements TimerTask {
+ private final ChannelHandlerContext ctx;
+ private final MessageEvent e;
+ private final String uri;
+ private final OrderedUpstreamMessageEvent oue;
+ private final int subSequence;
+
+ public ChunkWriter(final ChannelHandlerContext ctx, final MessageEvent e, final String uri,
+ final OrderedUpstreamMessageEvent oue, final int subSequence) {
+ this.ctx = ctx;
+ this.e = e;
+ this.uri = uri;
+ this.oue = oue;
+ this.subSequence = subSequence;
+ }
+
+ @Override
+ public void run(final Timeout timeout) {
+ if (sendFinalChunk.get() && subSequence > 1) {
+ final HttpChunk finalChunk = new DefaultHttpChunk(EMPTY_BUFFER);
+ ctx.sendDownstream(new OrderedDownstreamChannelEvent(oue, subSequence, true, finalChunk));
+ } else {
+ final HttpChunk chunk = new DefaultHttpChunk(copiedBuffer(SOME_RESPONSE_TEXT + uri, UTF_8));
+ ctx.sendDownstream(new OrderedDownstreamChannelEvent(oue, subSequence, false, chunk));
+
+ timer.newTimeout(new ChunkWriter(ctx, e, uri, oue, subSequence + 1), 0, MILLISECONDS);
+
+ if (uri.equals(PATH2)) {
+ sendFinalChunk.set(true);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java b/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java
new file mode 100644
index 0000000000..e77242cfea
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/IndexRequestBuilderTests.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+public class IndexRequestBuilderTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testSetSource() throws InterruptedException, ExecutionException {
+ createIndex("test");
+ ensureYellow();
+ Map<String, Object> map = new HashMap<>();
+ map.put("test_field", "foobar");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[] {
+ client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar"),
+ client().prepareIndex("test", "test").setSource("{\"test_field\" : \"foobar\"}"),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}")),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}")),
+ client().prepareIndex("test", "test").setSource(new BytesArray("{\"test_field\" : \"foobar\"}").toBytes()),
+ client().prepareIndex("test", "test").setSource(map)
+ };
+ indexRandom(true, builders);
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.termQuery("test_field", "foobar")).get();
+ ElasticsearchAssertions.assertHitCount(searchResponse, builders.length);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testOddNumberOfSourceObjetc() {
+ client().prepareIndex("test", "test").setSource((Object)"test_field", (Object)"foobar", new Object());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java
new file mode 100644
index 0000000000..73ea27ae84
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasTests.java
@@ -0,0 +1,726 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShadowIndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.snapshots.SnapshotState;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Tests for indices that use shadow replicas and a shared filesystem
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+public class IndexWithShadowReplicasTests extends ElasticsearchIntegrationTest {
+
+ private Settings nodeSettings() {
+ return Settings.builder()
+ .put("node.add_id_to_custom_path", false)
+ .put("node.enable_custom_paths", true)
+ .put("index.store.fs.fs_lock", randomFrom("native", "simple"))
+ .build();
+ }
+
+ /**
+ * Tests the case where we create an index without shadow replicas, snapshot it and then restore into
+ * an index with shadow replicas enabled.
+ */
+ public void testRestoreToShadow() throws ExecutionException, InterruptedException {
+ Settings nodeSettings = nodeSettings();
+
+ internalCluster().startNodesAsync(3, nodeSettings).get();
+ final Path dataPath = createTempDir();
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
+ assertAcked(prepareCreate("foo").setSettings(idxSettings));
+ ensureGreen();
+ final int numDocs = randomIntBetween(10, 100);
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("foo", "doc", ""+i).setSource("foo", "bar").get();
+ }
+ assertNoFailures(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())));
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("foo").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ Settings shadowSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).build();
+
+ logger.info("--> restore the index into shadow replica index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setIndexSettings(shadowSettings).setWaitForCompletion(true)
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy")
+ .execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureGreen();
+ refresh();
+
+ for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
+ if (service.hasIndex("foo-copy")) {
+ IndexShard shard = service.indexServiceSafe("foo-copy").shard(0);
+ if (shard.routingEntry().primary()) {
+ assertFalse(shard instanceof ShadowIndexShard);
+ } else {
+ assertTrue(shard instanceof ShadowIndexShard);
+ }
+ }
+ }
+ logger.info("--> performing query");
+ SearchResponse resp = client().prepareSearch("foo-copy").setQuery(matchAllQuery()).get();
+ assertHitCount(resp, numDocs);
+
+ }
+
+ @Test
+ public void testIndexWithFewDocuments() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ internalCluster().startNodesAsync(3, nodeSettings).get();
+ final String IDX = "test";
+ final Path dataPath = createTempDir();
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureGreen(IDX);
+
+ // So basically, the primary should fail and the replica will need to
+ // replay the translog, this is what this tests
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
+
+ // Check that we can get doc 1 and 2, because we are doing realtime
+ // gets and getting from the primary
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(true).setFields("foo").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(true).setFields("foo").get();
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ flushAndRefresh(IDX);
+ client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
+ refresh();
+
+ // Check that we can get doc 1 and 2 without realtime
+ gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).setFields("foo").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).setFields("foo").get();
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ logger.info("--> restarting all nodes");
+ if (randomBoolean()) {
+ logger.info("--> rolling restart");
+ internalCluster().rollingRestart();
+ } else {
+ logger.info("--> full restart");
+ internalCluster().fullRestart();
+ }
+
+ client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
+ ensureGreen(IDX);
+ flushAndRefresh(IDX);
+
+ logger.info("--> performing query");
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, 4);
+
+ logger.info("--> deleting index");
+ assertAcked(client().admin().indices().prepareDelete(IDX));
+ }
+
+ @Test
+ public void testReplicaToPrimaryPromotion() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ String node1 = internalCluster().startNode(nodeSettings);
+ Path dataPath = createTempDir();
+ String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureYellow(IDX);
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
+
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ assertTrue(gResp1.isExists());
+ assertTrue(gResp2.isExists());
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ // Node1 has the primary, now node2 has the replica
+ String node2 = internalCluster().startNode(nodeSettings);
+ ensureGreen(IDX);
+ client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
+ flushAndRefresh(IDX);
+
+ logger.info("--> stopping node1 [{}]", node1);
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
+ ensureYellow(IDX);
+
+ logger.info("--> performing query");
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, 2);
+
+ gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ assertTrue(gResp1.isExists());
+ assertTrue(gResp2.toString(), gResp2.isExists());
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "foobar").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "foobar").get();
+ gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ assertTrue(gResp1.isExists());
+ assertTrue(gResp2.toString(), gResp2.isExists());
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("foobar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("foobar"));
+ }
+
+ @Test
+ public void testPrimaryRelocation() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ String node1 = internalCluster().startNode(nodeSettings);
+ Path dataPath = createTempDir();
+ String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureYellow(IDX);
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
+
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ assertTrue(gResp1.isExists());
+ assertTrue(gResp2.isExists());
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ // Node1 has the primary, now node2 has the replica
+ String node2 = internalCluster().startNode(nodeSettings);
+ ensureGreen(IDX);
+ client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
+ flushAndRefresh(IDX);
+
+ // now prevent primary from being allocated on node 1 move to node_3
+ String node3 = internalCluster().startNode(nodeSettings);
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
+ client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
+
+ ensureGreen(IDX);
+ logger.info("--> performing query");
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, 2);
+
+ gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
+ gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ assertTrue(gResp1.isExists());
+ assertTrue(gResp2.toString(), gResp2.isExists());
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
+ gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").setFields("foo").get();
+ gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").setFields("foo").get();
+ assertTrue(gResp1.isExists());
+ assertTrue(gResp2.isExists());
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+ }
+
+ @Test
+ public void testPrimaryRelocationWithConcurrentIndexing() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ String node1 = internalCluster().startNode(nodeSettings);
+ Path dataPath = createTempDir();
+ final String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureYellow(IDX);
+ // Node1 has the primary, now node2 has the replica
+ String node2 = internalCluster().startNode(nodeSettings);
+ ensureGreen(IDX);
+ flushAndRefresh(IDX);
+ String node3 = internalCluster().startNode(nodeSettings);
+ final AtomicInteger counter = new AtomicInteger(0);
+ final CountDownLatch started = new CountDownLatch(1);
+
+ final int numPhase1Docs = scaledRandomIntBetween(25, 200);
+ final int numPhase2Docs = scaledRandomIntBetween(25, 200);
+ final CountDownLatch phase1finished = new CountDownLatch(1);
+ final CountDownLatch phase2finished = new CountDownLatch(1);
+
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ started.countDown();
+ while (counter.get() < (numPhase1Docs + numPhase2Docs)) {
+ final IndexResponse indexResponse = client().prepareIndex(IDX, "doc",
+ Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get();
+ assertTrue(indexResponse.isCreated());
+ final int docCount = counter.get();
+ if (docCount == numPhase1Docs) {
+ phase1finished.countDown();
+ }
+ }
+ logger.info("--> stopping indexing thread");
+ phase2finished.countDown();
+ }
+ };
+ thread.start();
+ started.await();
+ phase1finished.await(); // wait for a certain number of documents to be indexed
+ logger.info("--> excluding {} from allocation", node1);
+ // now prevent primary from being allocated on node 1 move to node_3
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
+ client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
+ // wait for more documents to be indexed post-recovery, also waits for
+ // indexing thread to stop
+ phase2finished.await();
+ ensureGreen(IDX);
+ thread.join();
+ logger.info("--> performing query");
+ flushAndRefresh();
+
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, counter.get());
+ assertHitCount(resp, numPhase1Docs + numPhase2Docs);
+ }
+
+ @Test
+ public void testPrimaryRelocationWhereRecoveryFails() throws Exception {
+ Settings nodeSettings = Settings.builder()
+ .put("node.add_id_to_custom_path", false)
+ .put("node.enable_custom_paths", true)
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .build();
+
+ String node1 = internalCluster().startNode(nodeSettings);
+ Path dataPath = createTempDir();
+ final String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureYellow(IDX);
+ // Node1 has the primary, now node2 has the replica
+ String node2 = internalCluster().startNode(nodeSettings);
+ ensureGreen(IDX);
+ flushAndRefresh(IDX);
+ String node3 = internalCluster().startNode(nodeSettings);
+ final AtomicInteger counter = new AtomicInteger(0);
+ final CountDownLatch started = new CountDownLatch(1);
+
+ final int numPhase1Docs = scaledRandomIntBetween(25, 200);
+ final int numPhase2Docs = scaledRandomIntBetween(25, 200);
+ final int numPhase3Docs = scaledRandomIntBetween(25, 200);
+ final CountDownLatch phase1finished = new CountDownLatch(1);
+ final CountDownLatch phase2finished = new CountDownLatch(1);
+ final CountDownLatch phase3finished = new CountDownLatch(1);
+
+ final AtomicBoolean keepFailing = new AtomicBoolean(true);
+
+ MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, node1));
+ mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, node3).localNode(),
+ new MockTransportService.DelegateTransport(mockTransportService.original()) {
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action,
+ TransportRequest request, TransportRequestOptions options)
+ throws IOException, TransportException {
+ if (keepFailing.get() && action.equals(RecoveryTarget.Actions.TRANSLOG_OPS)) {
+ logger.info("--> failing translog ops");
+ throw new ElasticsearchException("failing on purpose");
+ }
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ });
+
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ started.countDown();
+ while (counter.get() < (numPhase1Docs + numPhase2Docs + numPhase3Docs)) {
+ final IndexResponse indexResponse = client().prepareIndex(IDX, "doc",
+ Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get();
+ assertTrue(indexResponse.isCreated());
+ final int docCount = counter.get();
+ if (docCount == numPhase1Docs) {
+ phase1finished.countDown();
+ } else if (docCount == (numPhase1Docs + numPhase2Docs)) {
+ phase2finished.countDown();
+ }
+ }
+ logger.info("--> stopping indexing thread");
+ phase3finished.countDown();
+ }
+ };
+ thread.start();
+ started.await();
+ phase1finished.await(); // wait for a certain number of documents to be indexed
+ logger.info("--> excluding {} from allocation", node1);
+ // now prevent primary from being allocated on node 1 move to node_3
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
+ client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
+ // wait for more documents to be indexed post-recovery, also waits for
+ // indexing thread to stop
+ phase2finished.await();
+ // stop failing
+ keepFailing.set(false);
+ // wait for more docs to be indexed
+ phase3finished.await();
+ ensureGreen(IDX);
+ thread.join();
+ logger.info("--> performing query");
+ flushAndRefresh();
+
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, counter.get());
+ }
+
+ @Test
+ public void testIndexWithShadowReplicasCleansUp() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ int nodeCount = randomIntBetween(2, 5);
+ internalCluster().startNodesAsync(nodeCount, nodeSettings).get();
+ Path dataPath = createTempDir();
+ String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(1, nodeCount - 1))
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureGreen(IDX);
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
+ flushAndRefresh(IDX);
+
+ GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").setFields("foo").get();
+ GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").setFields("foo").get();
+ assertThat(gResp1.getField("foo").getValue().toString(), equalTo("bar"));
+ assertThat(gResp2.getField("foo").getValue().toString(), equalTo("bar"));
+
+ logger.info("--> performing query");
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, 2);
+
+ assertAcked(client().admin().indices().prepareDelete(IDX));
+
+ assertPathHasBeenCleared(dataPath);
+ }
+
+ /**
+ * Tests that shadow replicas can be "naturally" rebalanced and relocated
+ * around the cluster. By "naturally" I mean without using the reroute API
+ * @throws Exception
+ */
+ @Test
+ public void testShadowReplicaNaturalRelocation() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ internalCluster().startNodesAsync(2, nodeSettings).get();
+ Path dataPath = createTempDir();
+ String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string").get();
+ ensureGreen(IDX);
+
+ int docCount = randomIntBetween(10, 100);
+ List<IndexRequestBuilder> builders = newArrayList();
+ for (int i = 0; i < docCount; i++) {
+ builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar"));
+ }
+ indexRandom(true, true, true, builders);
+ flushAndRefresh(IDX);
+
+ // start a third node, with 5 shards each on the other nodes, they
+ // should relocate some to the third node
+ final String node3 = internalCluster().startNode(nodeSettings);
+
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
+ ClusterStateResponse resp = client().admin().cluster().prepareState().get();
+ RoutingNodes nodes = resp.getState().getRoutingNodes();
+ for (RoutingNode node : nodes) {
+ logger.info("--> node has {} shards (needs at least 2)", node.numberOfOwningShards());
+ assertThat("at least 2 shards on node", node.numberOfOwningShards(), greaterThanOrEqualTo(2));
+ }
+ }
+ });
+ ensureYellow(IDX);
+
+ logger.info("--> performing query");
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
+ assertHitCount(resp, docCount);
+
+ assertAcked(client().admin().indices().prepareDelete(IDX));
+
+ assertPathHasBeenCleared(dataPath);
+ }
+
+ @Test
+ public void testShadowReplicasUsingFieldData() throws Exception {
+ Settings nodeSettings = nodeSettings();
+
+ internalCluster().startNodesAsync(3, nodeSettings).get();
+ Path dataPath = createTempDir();
+ String IDX = "test";
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .build();
+
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get();
+ ensureGreen(IDX);
+
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get();
+ client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get();
+ flushAndRefresh(IDX);
+
+ SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addFieldDataField("foo").addSort("foo", SortOrder.ASC).get();
+ assertHitCount(resp, 4);
+ assertOrderedSearchHits(resp, "2", "3", "4", "1");
+ SearchHit[] hits = resp.getHits().hits();
+ assertThat(hits[0].field("foo").getValue().toString(), equalTo("bar"));
+ assertThat(hits[1].field("foo").getValue().toString(), equalTo("baz"));
+ assertThat(hits[2].field("foo").getValue().toString(), equalTo("eggplant"));
+ assertThat(hits[3].field("foo").getValue().toString(), equalTo("foo"));
+ }
+
+ /** wait until none of the nodes have shards allocated on them */
+ private void assertNoShardsOn(final List<String> nodeList) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterStateResponse resp = client().admin().cluster().prepareState().get();
+ RoutingNodes nodes = resp.getState().getRoutingNodes();
+ for (RoutingNode node : nodes) {
+ logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards());
+ if (nodeList.contains(node.node().getName())) {
+ assertThat("no shards on node", node.numberOfOwningShards(), equalTo(0));
+ }
+ }
+ }
+ });
+ }
+
+ /** wait until the node has the specified number of shards allocated on it */
+ private void assertShardCountOn(final String nodeName, final int shardCount) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterStateResponse resp = client().admin().cluster().prepareState().get();
+ RoutingNodes nodes = resp.getState().getRoutingNodes();
+ for (RoutingNode node : nodes) {
+ logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards());
+ if (nodeName.equals(node.node().getName())) {
+ assertThat(node.numberOfOwningShards(), equalTo(shardCount));
+ }
+ }
+ }
+ });
+ }
+
+ @Test
+ public void testIndexOnSharedFSRecoversToAnyNode() throws Exception {
+ Settings nodeSettings = nodeSettings();
+ Settings fooSettings = Settings.builder().put(nodeSettings).put("node.affinity", "foo").build();
+ Settings barSettings = Settings.builder().put(nodeSettings).put("node.affinity", "bar").build();
+
+ final Future<List<String>> fooNodes = internalCluster().startNodesAsync(2, fooSettings);
+ final Future<List<String>> barNodes = internalCluster().startNodesAsync(2, barSettings);
+ fooNodes.get();
+ barNodes.get();
+ Path dataPath = createTempDir();
+ String IDX = "test";
+
+ Settings includeFoo = Settings.builder()
+ .put("index.routing.allocation.include.affinity", "foo")
+ .build();
+ Settings includeBar = Settings.builder()
+ .put("index.routing.allocation.include.affinity", "bar")
+ .build();
+
+ Settings idxSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
+ .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
+ .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)
+ .put(includeFoo) // start with requiring the shards on "foo"
+ .build();
+
+ // only one node, so all primaries will end up on node1
+ prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=string,index=not_analyzed").get();
+ ensureGreen(IDX);
+
+ // Index some documents
+ client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get();
+ client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
+ client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get();
+ client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get();
+ flushAndRefresh(IDX);
+
+ // put shards on "bar"
+ client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get();
+
+ // wait for the shards to move from "foo" nodes to "bar" nodes
+ assertNoShardsOn(fooNodes.get());
+
+ // put shards back on "foo"
+ client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get();
+
+ // wait for the shards to move from "bar" nodes to "foo" nodes
+ assertNoShardsOn(barNodes.get());
+
+ // Stop a foo node
+ logger.info("--> stopping first 'foo' node");
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get().get(0)));
+
+ // Ensure that the other foo node has all the shards now
+ assertShardCountOn(fooNodes.get().get(1), 5);
+
+ // Assert no shards on the "bar" nodes
+ assertNoShardsOn(barNodes.get());
+
+ // Stop the second "foo" node
+ logger.info("--> stopping second 'foo' node");
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get().get(1)));
+
+ // The index should still be able to be allocated (on the "bar" nodes),
+ // all the "foo" nodes are gone
+ ensureGreen(IDX);
+
+ // Start another "foo" node and make sure the index moves back
+ logger.info("--> starting additional 'foo' node");
+ String newFooNode = internalCluster().startNode(fooSettings);
+
+ assertShardCountOn(newFooNode, 5);
+ assertNoShardsOn(barNodes.get());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresTest.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresTest.java
new file mode 100644
index 0000000000..e70ddfded4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresTest.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.index.IndexAction;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.zen.fd.FaultDetection;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test failure when index replication actions fail mid-flight
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
+public class TransportIndexFailuresTest extends ElasticsearchIntegrationTest {
+
+ private static final Settings nodeSettings = Settings.settingsBuilder()
+ .put("discovery.type", "zen") // <-- To override the local setting if set externally
+ .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly
+ .put(FaultDetection.SETTING_PING_RETRIES, "1") // <-- for hitting simulated network failures quickly
+ .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly
+ .put("discovery.zen.minimum_master_nodes", 1)
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .build();
+
+ @Override
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void testNetworkPartitionDuringReplicaIndexOp() throws Exception {
+ final String INDEX = "testidx";
+
+ List<String> nodes = internalCluster().startNodesAsync(2, nodeSettings).get();
+
+ // Create index test with 1 shard, 1 replica and ensure it is green
+ createIndex(INDEX);
+ ensureGreen(INDEX);
+
+ // Disable allocation so the replica cannot be reallocated when it fails
+ Settings s = Settings.builder().put("cluster.routing.allocation.enable", "none").build();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(s).get();
+
+ // Determine which node holds the primary shard
+ ClusterState state = getNodeClusterState(nodes.get(0));
+ IndexShardRoutingTable shard = state.getRoutingTable().index(INDEX).shard(0);
+ String primaryNode;
+ String replicaNode;
+ if (shard.getShards().get(0).primary()) {
+ primaryNode = nodes.get(0);
+ replicaNode = nodes.get(1);
+ } else {
+ primaryNode = nodes.get(1);
+ replicaNode = nodes.get(0);
+ }
+ logger.info("--> primary shard is on {}", primaryNode);
+
+ // Index a document to make sure everything works well
+ IndexResponse resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "bar").get();
+ assertThat("document exists on primary node",
+ internalCluster().client(primaryNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(),
+ equalTo(true));
+ assertThat("document exists on replica node",
+ internalCluster().client(replicaNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(),
+ equalTo(true));
+
+ // Disrupt the network so indexing requests fail to replicate
+ logger.info("--> preventing index/replica operations");
+ TransportService mockTransportService = internalCluster().getInstance(TransportService.class, primaryNode);
+ ((MockTransportService) mockTransportService).addFailToSendNoConnectRule(
+ internalCluster().getInstance(Discovery.class, replicaNode).localNode(),
+ ImmutableSet.of(IndexAction.NAME + "[r]")
+ );
+ mockTransportService = internalCluster().getInstance(TransportService.class, replicaNode);
+ ((MockTransportService) mockTransportService).addFailToSendNoConnectRule(
+ internalCluster().getInstance(Discovery.class, primaryNode).localNode(),
+ ImmutableSet.of(IndexAction.NAME + "[r]")
+ );
+
+ logger.info("--> indexing into primary");
+ // the replica shard should now be marked as failed because the replication operation will fail
+ resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "baz").get();
+ // wait until the cluster reaches an exact yellow state, meaning replica has failed
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat(client().admin().cluster().prepareHealth().get().getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ }
+ });
+ assertThat("document should still be indexed and available",
+ client().prepareGet(INDEX, "doc", resp.getId()).get().isExists(), equalTo(true));
+
+ state = getNodeClusterState(randomFrom(nodes.toArray(Strings.EMPTY_ARRAY)));
+ RoutingNodes rn = state.routingNodes();
+ logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shards(new Predicate<MutableShardRouting>() {
+ @Override
+ public boolean apply(org.elasticsearch.cluster.routing.MutableShardRouting input) {
+ return true;
+ }
+ }).size(),
+ rn.shardsWithState(UNASSIGNED).size(),
+ rn.shardsWithState(INITIALIZING).size(),
+ rn.shardsWithState(RELOCATING).size(),
+ rn.shardsWithState(STARTED).size());
+ logger.info("--> unassigned: {}, initializing: {}, relocating: {}, started: {}",
+ rn.shardsWithState(UNASSIGNED),
+ rn.shardsWithState(INITIALIZING),
+ rn.shardsWithState(RELOCATING),
+ rn.shardsWithState(STARTED));
+
+ assertThat("only a single shard is now active (replica should be failed and not reallocated)",
+ rn.shardsWithState(STARTED).size(), equalTo(1));
+ }
+
+ private ClusterState getNodeClusterState(String node) {
+ return internalCluster().client(node).admin().cluster().prepareState().setLocal(true).get().getState();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java b/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java
new file mode 100644
index 0000000000..4d38cdf320
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/VersionTypeTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class VersionTypeTests extends ElasticsearchTestCase {
+ @Test
+ public void testInternalVersionConflict() throws Exception {
+
+ assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForReads(10, Versions.MATCH_ANY));
+ // if we don't have a version in the index we accept everything
+ assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_SET, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_SET, Versions.MATCH_ANY));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_SET, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we don't like it unless MATCH_ANY
+ assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.MATCH_ANY));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+ // and the stupid usual case
+ assertFalse(VersionType.INTERNAL.isVersionConflictForWrites(10, 10));
+ assertFalse(VersionType.INTERNAL.isVersionConflictForReads(10, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(9, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflictForReads(9, 10));
+ assertTrue(VersionType.INTERNAL.isVersionConflictForWrites(10, 9));
+ assertTrue(VersionType.INTERNAL.isVersionConflictForReads(10, 9));
+
+// Old indexing code, dictating behavior
+// if (expectedVersion != Versions.MATCH_ANY && currentVersion != Versions.NOT_SET) {
+// // an explicit version is provided, see if there is a conflict
+// // if we did not find anything, and a version is provided, so we do expect to find a doc under that version
+// // this is important, since we don't allow to preset a version in order to handle deletes
+// if (currentVersion == Versions.NOT_FOUND) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), Versions.NOT_FOUND, expectedVersion);
+// } else if (expectedVersion != currentVersion) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion);
+// }
+// }
+// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+ }
+
+ @Test
+ public void testVersionValidation() {
+ assertTrue(VersionType.EXTERNAL.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.EXTERNAL.validateVersionForWrites(Versions.MATCH_ANY));
+ assertFalse(VersionType.EXTERNAL.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0)));
+ assertTrue(VersionType.EXTERNAL.validateVersionForReads(Versions.MATCH_ANY));
+ assertTrue(VersionType.EXTERNAL.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.EXTERNAL.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1)));
+
+ assertTrue(VersionType.EXTERNAL_GTE.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.EXTERNAL_GTE.validateVersionForWrites(Versions.MATCH_ANY));
+ assertFalse(VersionType.EXTERNAL_GTE.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0)));
+ assertTrue(VersionType.EXTERNAL_GTE.validateVersionForReads(Versions.MATCH_ANY));
+ assertTrue(VersionType.EXTERNAL_GTE.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.EXTERNAL_GTE.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1)));
+
+ assertTrue(VersionType.FORCE.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.FORCE.validateVersionForWrites(Versions.MATCH_ANY));
+ assertFalse(VersionType.FORCE.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0)));
+ assertTrue(VersionType.FORCE.validateVersionForReads(Versions.MATCH_ANY));
+ assertTrue(VersionType.FORCE.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.FORCE.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1)));
+
+ assertTrue(VersionType.INTERNAL.validateVersionForWrites(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertTrue(VersionType.INTERNAL.validateVersionForWrites(Versions.MATCH_ANY));
+ assertFalse(VersionType.INTERNAL.validateVersionForWrites(randomIntBetween(Integer.MIN_VALUE, 0)));
+ assertTrue(VersionType.INTERNAL.validateVersionForReads(Versions.MATCH_ANY));
+ assertTrue(VersionType.INTERNAL.validateVersionForReads(randomIntBetween(1, Integer.MAX_VALUE)));
+ assertFalse(VersionType.INTERNAL.validateVersionForReads(randomIntBetween(Integer.MIN_VALUE, -1)));
+ }
+
+ @Test
+ public void testExternalVersionConflict() throws Exception {
+
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_SET, 10));
+ // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we always accept
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+ // and the standard behavior
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, 10));
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForWrites(9, 10));
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForWrites(10, 9));
+
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForReads(10, 10));
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(9, 10));
+ assertTrue(VersionType.EXTERNAL.isVersionConflictForReads(10, 9));
+ assertFalse(VersionType.EXTERNAL.isVersionConflictForReads(10, Versions.MATCH_ANY));
+
+
+// Old indexing code, dictating behavior
+// // an external version is provided, just check, if a local version exists, that its higher than it
+// // the actual version checking is one in an external system, and we just want to not index older versions
+// if (currentVersion >= 0) { // we can check!, its there
+// if (currentVersion >= index.version()) {
+// throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, index.version());
+// }
+// }
+// updatedVersion = index.version();
+ }
+
+ @Test
+ public void testExternalGTEVersionConflict() throws Exception {
+
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_SET, 10));
+ // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
+ assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we always accept
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+
+ assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+
+ // and the standard behavior
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, 10));
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(9, 10));
+ assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForWrites(10, 9));
+
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForReads(10, 10));
+ assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(9, 10));
+ assertTrue(VersionType.EXTERNAL_GTE.isVersionConflictForReads(10, 9));
+ assertFalse(VersionType.EXTERNAL_GTE.isVersionConflictForReads(10, Versions.MATCH_ANY));
+
+ }
+
+ @Test
+ public void testForceVersionConflict() throws Exception {
+
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_SET, 10));
+ // MATCH_ANY must throw an exception in the case of external version, as the version must be set! it used as the new value
+ assertTrue(VersionType.FORCE.isVersionConflictForWrites(10, Versions.MATCH_ANY));
+
+ // if we didn't find a version (but the index does support it), we always accept
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(Versions.NOT_FOUND, 10));
+
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.NOT_FOUND));
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, 10));
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(Versions.NOT_FOUND, Versions.MATCH_ANY));
+
+
+ // and the standard behavior
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 10));
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(9, 10));
+ assertFalse(VersionType.FORCE.isVersionConflictForWrites(10, 9));
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(10, 10));
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(9, 10));
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(10, 9));
+ assertFalse(VersionType.FORCE.isVersionConflictForReads(10, Versions.MATCH_ANY));
+ }
+
+ @Test
+ public void testUpdateVersion() {
+
+ assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(1l));
+ assertThat(VersionType.INTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(1l));
+ assertThat(VersionType.INTERNAL.updateVersion(1, 1), equalTo(2l));
+ assertThat(VersionType.INTERNAL.updateVersion(2, Versions.MATCH_ANY), equalTo(3l));
+
+
+ assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_SET, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL.updateVersion(Versions.NOT_FOUND, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL.updateVersion(1, 10), equalTo(10l));
+
+ assertThat(VersionType.EXTERNAL_GTE.updateVersion(Versions.NOT_SET, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL_GTE.updateVersion(Versions.NOT_FOUND, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL_GTE.updateVersion(1, 10), equalTo(10l));
+ assertThat(VersionType.EXTERNAL_GTE.updateVersion(10, 10), equalTo(10l));
+
+ assertThat(VersionType.FORCE.updateVersion(Versions.NOT_SET, 10), equalTo(10l));
+ assertThat(VersionType.FORCE.updateVersion(Versions.NOT_FOUND, 10), equalTo(10l));
+ assertThat(VersionType.FORCE.updateVersion(11, 10), equalTo(10l));
+
+// Old indexing code
+// if (index.versionType() == VersionType.INTERNAL) { // internal version type
+// updatedVersion = (currentVersion == Versions.NOT_SET || currentVersion == Versions.NOT_FOUND) ? 1 : currentVersion + 1;
+// } else { // external version type
+// updatedVersion = expectedVersion;
+// }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java b/core/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java
new file mode 100644
index 0000000000..484e5c9227
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.aliases;
+
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.indices.InvalidAliasNameException;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest {
+
+ public IndexAliasesService newIndexAliasesService() {
+ Settings settings = Settings.builder().put("name", "IndexAliasesServiceTests").build();
+ IndexService indexService = createIndex("test", settings);
+ return indexService.aliasesService();
+ }
+
+ public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.close();
+ return new CompressedXContent(builder.string());
+ }
+
+ @Test
+ public void testFilteringAliases() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termQuery("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termQuery("animal", "dog")));
+ indexAliasesService.add("all", null);
+
+ assertThat(indexAliasesService.hasAlias("cats"), equalTo(true));
+ assertThat(indexAliasesService.hasAlias("dogs"), equalTo(true));
+ assertThat(indexAliasesService.hasAlias("turtles"), equalTo(false));
+
+ assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("animal:cat"));
+ assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("animal:cat animal:dog"));
+
+ // Non-filtering alias should turn off all filters because filters are ORed
+ assertThat(indexAliasesService.aliasFilter("all"), nullValue());
+ assertThat(indexAliasesService.aliasFilter("cats", "all"), nullValue());
+ assertThat(indexAliasesService.aliasFilter("all", "cats"), nullValue());
+
+ indexAliasesService.add("cats", filter(termQuery("animal", "feline")));
+ indexAliasesService.add("dogs", filter(termQuery("animal", "canine")));
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("animal:canine animal:feline"));
+ }
+
+ @Test
+ public void testAliasFilters() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termQuery("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termQuery("animal", "dog")));
+
+ assertThat(indexAliasesService.aliasFilter(), nullValue());
+ assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("animal:dog"));
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("animal:dog animal:cat"));
+
+ indexAliasesService.add("cats", filter(termQuery("animal", "feline")));
+ indexAliasesService.add("dogs", filter(termQuery("animal", "canine")));
+
+ assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("animal:canine animal:feline"));
+ }
+
+ @Test(expected = InvalidAliasNameException.class)
+ public void testRemovedAliasFilter() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termQuery("animal", "cat")));
+ indexAliasesService.remove("cats");
+ indexAliasesService.aliasFilter("cats");
+ }
+
+
+ @Test
+ public void testUnknownAliasFilter() throws Exception {
+ IndexAliasesService indexAliasesService = newIndexAliasesService();
+ indexAliasesService.add("cats", filter(termQuery("animal", "cat")));
+ indexAliasesService.add("dogs", filter(termQuery("animal", "dog")));
+
+ try {
+ indexAliasesService.aliasFilter("unknown");
+ fail();
+ } catch (InvalidAliasNameException e) {
+ // all is well
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java
new file mode 100644
index 0000000000..1e16122af5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/ASCIIFoldingTokenFilterFactoryTests.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+public class ASCIIFoldingTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding");
+ String source = "Ansprüche";
+ String[] expected = new String[]{"Anspruche"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testPreserveOriginal() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_ascii_folding.type", "asciifolding")
+ .put("index.analysis.filter.my_ascii_folding.preserve_original", true)
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_ascii_folding");
+ String source = "Ansprüche";
+ String[] expected = new String[]{"Anspruche", "Ansprüche"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java
new file mode 100644
index 0000000000..eac199db7e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
+import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * Alerts us if new analyzers are added to lucene, so we don't miss them.
+ * <p>
+ * If we don't want to expose one for a specific reason, just map it to Void
+ */
+public class AnalysisFactoryTests extends ElasticsearchTestCase {
+
+ static final Map<String,Class<?>> KNOWN_TOKENIZERS = new HashMap<String,Class<?>>() {{
+ // deprecated ones, we dont care about these
+ put("arabicletter", Deprecated.class);
+ put("chinese", Deprecated.class);
+ put("cjk", Deprecated.class);
+ put("russianletter", Deprecated.class);
+
+ // exposed in ES
+ put("classic", ClassicTokenizerFactory.class);
+ put("edgengram", EdgeNGramTokenizerFactory.class);
+ put("keyword", KeywordTokenizerFactory.class);
+ put("letter", LetterTokenizerFactory.class);
+ put("lowercase", LowerCaseTokenizerFactory.class);
+ put("ngram", NGramTokenizerFactory.class);
+ put("pathhierarchy", PathHierarchyTokenizerFactory.class);
+ put("pattern", PatternTokenizerFactory.class);
+ put("standard", StandardTokenizerFactory.class);
+ put("thai", ThaiTokenizerFactory.class);
+ put("uax29urlemail", UAX29URLEmailTokenizerFactory.class);
+ put("whitespace", WhitespaceTokenizerFactory.class);
+
+ // this one "seems to mess up offsets". probably shouldn't be a tokenizer...
+ put("wikipedia", Void.class);
+ }};
+
+ public void testTokenizers() {
+ Set<String> missing = new TreeSet<String>(org.apache.lucene.analysis.util.TokenizerFactory.availableTokenizers());
+ missing.removeAll(KNOWN_TOKENIZERS.keySet());
+ assertTrue("new tokenizers found, please update KNOWN_TOKENIZERS: " + missing.toString(), missing.isEmpty());
+ }
+
+ static final Map<String,Class<?>> KNOWN_TOKENFILTERS = new HashMap<String,Class<?>>() {{
+ // deprecated ones, we dont care about these
+ put("chinese", Deprecated.class);
+ put("collationkey", Deprecated.class);
+ put("position", Deprecated.class);
+ put("thaiword", Deprecated.class);
+
+
+ // exposed in ES
+ put("apostrophe", ApostropheFilterFactory.class);
+ put("arabicnormalization", ArabicNormalizationFilterFactory.class);
+ put("arabicstem", ArabicStemTokenFilterFactory.class);
+ put("asciifolding", ASCIIFoldingTokenFilterFactory.class);
+ put("brazilianstem", BrazilianStemTokenFilterFactory.class);
+ put("bulgarianstem", StemmerTokenFilterFactory.class);
+ put("cjkbigram", CJKBigramFilterFactory.class);
+ put("cjkwidth", CJKWidthFilterFactory.class);
+ put("classic", ClassicFilterFactory.class);
+ put("commongrams", CommonGramsTokenFilterFactory.class);
+ put("commongramsquery", CommonGramsTokenFilterFactory.class);
+ put("czechstem", CzechStemTokenFilterFactory.class);
+ put("delimitedpayload", DelimitedPayloadTokenFilterFactory.class);
+ put("dictionarycompoundword", DictionaryCompoundWordTokenFilterFactory.class);
+ put("edgengram", EdgeNGramTokenFilterFactory.class);
+ put("elision", ElisionTokenFilterFactory.class);
+ put("englishminimalstem", StemmerTokenFilterFactory.class);
+ put("englishpossessive", StemmerTokenFilterFactory.class);
+ put("finnishlightstem", StemmerTokenFilterFactory.class);
+ put("frenchlightstem", StemmerTokenFilterFactory.class);
+ put("frenchminimalstem", StemmerTokenFilterFactory.class);
+ put("galicianminimalstem", StemmerTokenFilterFactory.class);
+ put("galicianstem", StemmerTokenFilterFactory.class);
+ put("germanstem", GermanStemTokenFilterFactory.class);
+ put("germanlightstem", StemmerTokenFilterFactory.class);
+ put("germanminimalstem", StemmerTokenFilterFactory.class);
+ put("germannormalization", GermanNormalizationFilterFactory.class);
+ put("greeklowercase", LowerCaseTokenFilterFactory.class);
+ put("greekstem", StemmerTokenFilterFactory.class);
+ put("hindinormalization", HindiNormalizationFilterFactory.class);
+ put("hindistem", StemmerTokenFilterFactory.class);
+ put("hungarianlightstem", StemmerTokenFilterFactory.class);
+ put("hunspellstem", HunspellTokenFilterFactory.class);
+ put("hyphenationcompoundword", HyphenationCompoundWordTokenFilterFactory.class);
+ put("indicnormalization", IndicNormalizationFilterFactory.class);
+ put("irishlowercase", LowerCaseTokenFilterFactory.class);
+ put("indonesianstem", StemmerTokenFilterFactory.class);
+ put("italianlightstem", StemmerTokenFilterFactory.class);
+ put("keepword", KeepWordFilterFactory.class);
+ put("keywordmarker", KeywordMarkerTokenFilterFactory.class);
+ put("kstem", KStemTokenFilterFactory.class);
+ put("latvianstem", StemmerTokenFilterFactory.class);
+ put("length", LengthTokenFilterFactory.class);
+ put("limittokencount", LimitTokenCountFilterFactory.class);
+ put("lowercase", LowerCaseTokenFilterFactory.class);
+ put("ngram", NGramTokenFilterFactory.class);
+ put("norwegianlightstem", StemmerTokenFilterFactory.class);
+ put("norwegianminimalstem", StemmerTokenFilterFactory.class);
+ put("patterncapturegroup", PatternCaptureGroupTokenFilterFactory.class);
+ put("patternreplace", PatternReplaceTokenFilterFactory.class);
+ put("persiannormalization", PersianNormalizationFilterFactory.class);
+ put("porterstem", PorterStemTokenFilterFactory.class);
+ put("portuguesestem", StemmerTokenFilterFactory.class);
+ put("portugueselightstem", StemmerTokenFilterFactory.class);
+ put("portugueseminimalstem", StemmerTokenFilterFactory.class);
+ put("reversestring", ReverseTokenFilterFactory.class);
+ put("russianlightstem", StemmerTokenFilterFactory.class);
+ put("scandinavianfolding", ScandinavianFoldingFilterFactory.class);
+ put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class);
+ put("serbiannormalization", SerbianNormalizationFilterFactory.class);
+ put("shingle", ShingleTokenFilterFactory.class);
+ put("snowballporter", SnowballTokenFilterFactory.class);
+ put("soraninormalization", SoraniNormalizationFilterFactory.class);
+ put("soranistem", StemmerTokenFilterFactory.class);
+ put("spanishlightstem", StemmerTokenFilterFactory.class);
+ put("standard", StandardTokenFilterFactory.class);
+ put("stemmeroverride", StemmerOverrideTokenFilterFactory.class);
+ put("stop", StopTokenFilterFactory.class);
+ put("swedishlightstem", StemmerTokenFilterFactory.class);
+ put("synonym", SynonymTokenFilterFactory.class);
+ put("trim", TrimTokenFilterFactory.class);
+ put("truncate", TruncateTokenFilterFactory.class);
+ put("turkishlowercase", LowerCaseTokenFilterFactory.class);
+ put("type", KeepTypesFilterFactory.class);
+ put("uppercase", UpperCaseTokenFilterFactory.class);
+ put("worddelimiter", WordDelimiterTokenFilterFactory.class);
+
+ // TODO: these tokenfilters are not yet exposed: useful?
+
+ // suggest stop
+ put("suggeststop", Void.class);
+ // capitalizes tokens
+ put("capitalization", Void.class);
+ // like length filter (but codepoints)
+ put("codepointcount", Void.class);
+ // puts hyphenated words back together
+ put("hyphenatedwords", Void.class);
+ // repeats anything marked as keyword
+ put("keywordrepeat", Void.class);
+ // like limittokencount, but by offset
+ put("limittokenoffset", Void.class);
+ // like limittokencount, but by position
+ put("limittokenposition", Void.class);
+ // ???
+ put("numericpayload", Void.class);
+ // removes duplicates at the same position (this should be used by the existing factory)
+ put("removeduplicates", Void.class);
+ // ???
+ put("tokenoffsetpayload", Void.class);
+ // puts the type into the payload
+ put("typeaspayload", Void.class);
+ }};
+
+ public void testTokenFilters() {
+ Set<String> missing = new TreeSet<String>(org.apache.lucene.analysis.util.TokenFilterFactory.availableTokenFilters());
+ missing.removeAll(KNOWN_TOKENFILTERS.keySet());
+ assertTrue("new tokenfilters found, please update KNOWN_TOKENFILTERS: " + missing.toString(), missing.isEmpty());
+ }
+
+ static final Map<String,Class<?>> KNOWN_CHARFILTERS = new HashMap<String,Class<?>>() {{
+ // exposed in ES
+ put("htmlstrip", HtmlStripCharFilterFactory.class);
+ put("mapping", MappingCharFilterFactory.class);
+ put("patternreplace", PatternReplaceCharFilterFactory.class);
+
+ // TODO: these charfilters are not yet exposed: useful?
+ // handling of zwnj for persian
+ put("persian", Void.class);
+ }};
+
+ public void testCharFilters() {
+ Set<String> missing = new TreeSet<String>(org.apache.lucene.analysis.util.CharFilterFactory.availableCharFilters());
+ missing.removeAll(KNOWN_CHARFILTERS.keySet());
+ assertTrue("new charfilters found, please update KNOWN_CHARFILTERS: " + missing.toString(), missing.isEmpty());
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
new file mode 100644
index 0000000000..c1d260392f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisModuleTests.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
+import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.ProvisionException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.StringReader;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class AnalysisModuleTests extends ElasticsearchTestCase {
+
+ private Injector injector;
+
+ public AnalysisService getAnalysisService(Settings settings) {
+ Index index = new Index("test");
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+
+ private Settings loadFromClasspath(String path) {
+ return settingsBuilder().loadFromClasspath(path)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("path.home", createTempDir().toString())
+ .build();
+
+ }
+
+ @Test
+ public void testSimpleConfigurationJson() {
+ Settings settings = loadFromClasspath("org/elasticsearch/index/analysis/test1.json");
+ testSimpleConfiguration(settings);
+ }
+
+ @Test
+ public void testSimpleConfigurationYaml() {
+ Settings settings = loadFromClasspath("org/elasticsearch/index/analysis/test1.yml");
+ testSimpleConfiguration(settings);
+ }
+
+ @Test
+ public void testDefaultFactoryTokenFilters() throws IOException {
+ assertTokenFilter("keyword_repeat", KeywordRepeatFilter.class);
+ assertTokenFilter("persian_normalization", PersianNormalizationFilter.class);
+ assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class);
+ }
+
+ @Test
+ public void testVersionedAnalyzers() throws Exception {
+ Settings settings2 = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/index/analysis/test1.yml")
+ .put("path.home", createTempDir().toString())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0)
+ .build();
+ AnalysisService analysisService2 = getAnalysisService(settings2);
+
+ // indicesanalysisservice always has the current version
+ IndicesAnalysisService indicesAnalysisService2 = injector.getInstance(IndicesAnalysisService.class);
+ assertThat(indicesAnalysisService2.analyzer("default"), is(instanceOf(NamedAnalyzer.class)));
+ NamedAnalyzer defaultNamedAnalyzer = (NamedAnalyzer) indicesAnalysisService2.analyzer("default");
+ assertThat(defaultNamedAnalyzer.analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertEquals(Version.CURRENT.luceneVersion, defaultNamedAnalyzer.analyzer().getVersion());
+
+ // analysis service has the expected version
+ assertThat(analysisService2.analyzer("standard").analyzer(), is(instanceOf(StandardAnalyzer.class)));
+ assertEquals(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("standard").analyzer().getVersion());
+ assertEquals(Version.V_0_90_0.luceneVersion, analysisService2.analyzer("thai").analyzer().getVersion());
+ }
+
+ private void assertTokenFilter(String name, Class clazz) throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("path.home", createTempDir().toString()).build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter(name);
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream stream = tokenFilter.create(tokenizer);
+ assertThat(stream, instanceOf(clazz));
+ }
+
+ private void testSimpleConfiguration(Settings settings) {
+ AnalysisService analysisService = getAnalysisService(settings);
+ Analyzer analyzer = analysisService.analyzer("custom1").analyzer();
+
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom1 = (CustomAnalyzer) analyzer;
+ assertThat(custom1.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+ assertThat(custom1.tokenFilters().length, equalTo(2));
+
+ StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
+ assertThat(stop1.stopWords().size(), equalTo(1));
+ //assertThat((Iterable<char[]>) stop1.stopWords(), hasItem("test-stop".toCharArray()));
+
+ analyzer = analysisService.analyzer("custom2").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom2 = (CustomAnalyzer) analyzer;
+
+// HtmlStripCharFilterFactory html = (HtmlStripCharFilterFactory) custom2.charFilters()[0];
+// assertThat(html.readAheadLimit(), equalTo(HTMLStripCharFilter.DEFAULT_READ_AHEAD));
+//
+// html = (HtmlStripCharFilterFactory) custom2.charFilters()[1];
+// assertThat(html.readAheadLimit(), equalTo(1024));
+
+ // verify position offset gap
+ analyzer = analysisService.analyzer("custom6").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom6 = (CustomAnalyzer) analyzer;
+ assertThat(custom6.getPositionIncrementGap("any_string"), equalTo(256));
+
+ // verify characters mapping
+ analyzer = analysisService.analyzer("custom5").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
+ assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
+
+ // verify aliases
+ analyzer = analysisService.analyzer("alias1").analyzer();
+ assertThat(analyzer, instanceOf(StandardAnalyzer.class));
+
+ // check custom pattern replace filter
+ analyzer = analysisService.analyzer("custom3").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
+ PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
+ assertThat(patternReplaceCharFilterFactory.getPattern().pattern(), equalTo("sample(.*)"));
+ assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
+
+ // check custom class name (my)
+ analyzer = analysisService.analyzer("custom4").analyzer();
+ assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+ CustomAnalyzer custom4 = (CustomAnalyzer) analyzer;
+ assertThat(custom4.tokenFilters()[0], instanceOf(MyFilterTokenFilterFactory.class));
+
+// // verify Czech stemmer
+// analyzer = analysisService.analyzer("czechAnalyzerWithStemmer").analyzer();
+// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+// CustomAnalyzer czechstemmeranalyzer = (CustomAnalyzer) analyzer;
+// assertThat(czechstemmeranalyzer.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+// assertThat(czechstemmeranalyzer.tokenFilters().length, equalTo(4));
+// assertThat(czechstemmeranalyzer.tokenFilters()[3], instanceOf(CzechStemTokenFilterFactory.class));
+//
+// // check dictionary decompounder
+// analyzer = analysisService.analyzer("decompoundingAnalyzer").analyzer();
+// assertThat(analyzer, instanceOf(CustomAnalyzer.class));
+// CustomAnalyzer dictionaryDecompounderAnalyze = (CustomAnalyzer) analyzer;
+// assertThat(dictionaryDecompounderAnalyze.tokenizerFactory(), instanceOf(StandardTokenizerFactory.class));
+// assertThat(dictionaryDecompounderAnalyze.tokenFilters().length, equalTo(1));
+// assertThat(dictionaryDecompounderAnalyze.tokenFilters()[0], instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
+
+ Set<?> wordList = Analysis.getWordSet(null, settings, "index.analysis.filter.dict_dec.word_list");
+ MatcherAssert.assertThat(wordList.size(), equalTo(6));
+// MatcherAssert.assertThat(wordList, hasItems("donau", "dampf", "schiff", "spargel", "creme", "suppe"));
+ }
+
+ @Test
+ public void testWordListPath() throws Exception {
+ Settings settings = Settings.builder()
+ .put("path.home", createTempDir().toString())
+ .build();
+ Environment env = new Environment(settings);
+ String[] words = new String[]{"donau", "dampf", "schiff", "spargel", "creme", "suppe"};
+
+ Path wordListFile = generateWordList(words);
+ settings = settingsBuilder().loadFromSource("index: \n word_list_path: " + wordListFile.toAbsolutePath()).build();
+
+ Set<?> wordList = Analysis.getWordSet(env, settings, "index.word_list");
+ MatcherAssert.assertThat(wordList.size(), equalTo(6));
+// MatcherAssert.assertThat(wordList, hasItems(words));
+ Files.delete(wordListFile);
+ }
+
+ private Path generateWordList(String[] words) throws Exception {
+ Path wordListFile = createTempDir().resolve("wordlist.txt");
+ try (BufferedWriter writer = Files.newBufferedWriter(wordListFile, StandardCharsets.UTF_8)) {
+ for (String word : words) {
+ writer.write(word);
+ writer.write('\n');
+ }
+ }
+ return wordListFile;
+ }
+
+ @Test
+ public void testUnderscoreInAnalyzerName() {
+ Settings settings = Settings.builder()
+ .put("index.analysis.analyzer._invalid_name.tokenizer", "keyword")
+ .put("path.home", createTempDir().toString())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, "1")
+ .build();
+ try {
+ getAnalysisService(settings);
+ fail("This should fail with IllegalArgumentException because the analyzers name starts with _");
+ } catch (ProvisionException e) {
+ assertTrue(e.getCause() instanceof IllegalArgumentException);
+ assertThat(e.getCause().getMessage(), equalTo("analyzer name must not start with '_'. got \"_invalid_name\""));
+ }
+ }
+
+ @Test
+ public void testUnderscoreInAnalyzerNameAlias() {
+ Settings settings = Settings.builder()
+ .put("index.analysis.analyzer.valid_name.tokenizer", "keyword")
+ .put("index.analysis.analyzer.valid_name.alias", "_invalid_name")
+ .put("path.home", createTempDir().toString())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, "1")
+ .build();
+ try {
+ getAnalysisService(settings);
+ fail("This should fail with IllegalArgumentException because the analyzers alias starts with _");
+ } catch (ProvisionException e) {
+ assertTrue(e.getCause() instanceof IllegalArgumentException);
+ assertThat(e.getCause().getMessage(), equalTo("analyzer name must not start with '_'. got \"_invalid_name\""));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java
new file mode 100644
index 0000000000..30050ba6c0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.util.CharArraySet;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.is;
+
+public class AnalysisTests extends ElasticsearchTestCase {
+ @Test
+ public void testParseStemExclusion() {
+
+ /* Comma separated list */
+ Settings settings = settingsBuilder().put("stem_exclusion", "foo,bar").build();
+ CharArraySet set = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
+ assertThat(set.contains("foo"), is(true));
+ assertThat(set.contains("bar"), is(true));
+ assertThat(set.contains("baz"), is(false));
+
+ /* Array */
+ settings = settingsBuilder().putArray("stem_exclusion", "foo","bar").build();
+ set = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
+ assertThat(set.contains("foo"), is(true));
+ assertThat(set.contains("bar"), is(true));
+ assertThat(set.contains("baz"), is(false));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
new file mode 100644
index 0000000000..6f7581768f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTestsHelper.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+
+import java.nio.file.Path;
+
+public class AnalysisTestsHelper {
+
+ public static AnalysisService createAnalysisServiceFromClassPath(Path baseDir, String resource) {
+ Settings settings = Settings.settingsBuilder()
+ .loadFromClasspath(resource)
+ .put("path.home", baseDir.toString())
+ .build();
+
+ return createAnalysisServiceFromSettings(settings);
+ }
+
+ public static AnalysisService createAnalysisServiceFromSettings(
+ Settings settings) {
+ Index index = new Index("test");
+ if (settings.get(IndexMetaData.SETTING_VERSION_CREATED) == null) {
+ settings = Settings.builder().put(settings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ }
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings),
+ new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+
+ AnalysisModule analysisModule = new AnalysisModule(settings,
+ parentInjector.getInstance(IndicesAnalysisService.class));
+
+ Injector injector = new ModulesBuilder().add(new IndexSettingsModule(index, settings),
+ new IndexNameModule(index), analysisModule).createChildInjector(parentInjector);
+
+ return injector.getInstance(AnalysisService.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java
new file mode 100644
index 0000000000..e633895e9e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalyzerBackwardsCompatTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Ignore;
+
+import java.io.IOException;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
+
+/**
+ */
+public class AnalyzerBackwardsCompatTests extends ElasticsearchTokenStreamTestCase {
+
+ @Ignore
+ private void testNoStopwordsAfter(org.elasticsearch.Version noStopwordVersion, String type) throws IOException {
+ final int iters = scaledRandomIntBetween(10, 100);
+ org.elasticsearch.Version version = org.elasticsearch.Version.CURRENT;
+ for (int i = 0; i < iters; i++) {
+ Settings.Builder builder = Settings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
+ if (version.onOrAfter(noStopwordVersion)) {
+ if (random().nextBoolean()) {
+ builder.put(SETTING_VERSION_CREATED, version);
+ }
+ } else {
+ builder.put(SETTING_VERSION_CREATED, version);
+ }
+ builder.put("index.analysis.analyzer.foo.type", type);
+ builder.put("path.home", createTempDir().toString());
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
+ NamedAnalyzer analyzer = analysisService.analyzer("foo");
+ if (version.onOrAfter(noStopwordVersion)) {
+ assertAnalyzesTo(analyzer, "this is bogus", new String[]{"this", "is", "bogus"});
+ } else {
+ assertAnalyzesTo(analyzer, "this is bogus", new String[]{"bogus"});
+ }
+ version = randomVersion();
+ }
+ }
+
+ public void testPatternAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "pattern");
+ }
+
+ public void testStandardHTMLStripAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_RC1, "standard_html_strip");
+ }
+
+ public void testStandardAnalyzer() throws IOException {
+ testNoStopwordsAfter(org.elasticsearch.Version.V_1_0_0_Beta1, "standard");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
new file mode 100644
index 0000000000..bfa4c5ed59
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CJKFilterFactoryTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class CJKFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/cjk_analysis.json";
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_bigram");
+ String source = "多ãã®å­¦ç”ŸãŒè©¦é¨“ã«è½ã¡ãŸã€‚";
+ String[] expected = new String[]{"多ã", "ãã®", "ã®å­¦", "学生", "生ãŒ", "ãŒè©¦", "試験", "験ã«", "ã«è½", "è½ã¡", "ã¡ãŸ" };
+ Tokenizer tokenizer = new StandardTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testNoFlags() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_no_flags");
+ String source = "多ãã®å­¦ç”ŸãŒè©¦é¨“ã«è½ã¡ãŸã€‚";
+ String[] expected = new String[]{"多ã", "ãã®", "ã®å­¦", "学生", "生ãŒ", "ãŒè©¦", "試験", "験ã«", "ã«è½", "è½ã¡", "ã¡ãŸ" };
+ Tokenizer tokenizer = new StandardTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testHanOnly() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_only");
+ String source = "多ãã®å­¦ç”ŸãŒè©¦é¨“ã«è½ã¡ãŸã€‚";
+ String[] expected = new String[]{"多", "ã", "ã®", "学生", "ãŒ", "試験", "ã«", "è½", "ã¡", "ãŸ" };
+ Tokenizer tokenizer = new StandardTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testHanUnigramOnly() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("cjk_han_unigram_only");
+ String source = "多ãã®å­¦ç”ŸãŒè©¦é¨“ã«è½ã¡ãŸã€‚";
+ String[] expected = new String[]{"多", "ã", "ã®", "å­¦", "学生", "生", "ãŒ", "試", "試験", "験", "ã«", "è½", "ã¡", "ãŸ" };
+ Tokenizer tokenizer = new StandardTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
new file mode 100644
index 0000000000..e2ce99d833
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CharFilterTests.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ */
+public class CharFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testMappingCharFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("index.analysis.char_filter.my_mapping.type", "mapping")
+ .putArray("index.analysis.char_filter.my_mapping.mappings", "ph=>f", "qu=>q")
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
+ .put("path.home", createTempDir().toString())
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
+
+ // Repeat one more time to make sure that char filter is reinitialized correctly
+ assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
+ }
+
+ @Test
+ public void testHtmlStripCharFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "html_strip")
+ .put("path.home", createTempDir().toString())
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("custom_with_char_filter");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
+
+ // Repeat one more time to make sure that char filter is reinitialized correctly
+ assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
new file mode 100644
index 0000000000..3f1edbbdd4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/CompoundAnalysisTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class CompoundAnalysisTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDefaultsCompoundAnalysis() throws Exception {
+ Index index = new Index("test");
+ Settings settings = getJsonSettings();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ TokenFilterFactory filterFactory = analysisService.tokenFilter("dict_dec");
+ MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class));
+ }
+
+ @Test
+ public void testDictionaryDecompounder() throws Exception {
+ Settings[] settingsArr = new Settings[]{getJsonSettings(), getYamlSettings()};
+ for (Settings settings : settingsArr) {
+ List<String> terms = analyze(settings, "decompoundingAnalyzer", "donaudampfschiff spargelcremesuppe");
+ MatcherAssert.assertThat(terms.size(), equalTo(8));
+ MatcherAssert.assertThat(terms, hasItems("donau", "dampf", "schiff", "donaudampfschiff", "spargel", "creme", "suppe", "spargelcremesuppe"));
+ }
+ }
+
+ private List<String> analyze(Settings settings, String analyzerName, String text) throws IOException {
+ Index index = new Index("test");
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field1", text, 1.0f);
+ allEntries.reset();
+
+ TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+ stream.reset();
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
+
+ List<String> terms = new ArrayList<>();
+ while (stream.incrementToken()) {
+ String tokText = termAtt.toString();
+ terms.add(tokText);
+ }
+ return terms;
+ }
+
+ private Settings getJsonSettings() {
+ return settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/index/analysis/test1.json")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("path.home", createTempDir().toString())
+ .build();
+ }
+
+ private Settings getYamlSettings() {
+ return settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/index/analysis/test1.yml")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("path.home", createTempDir().toString())
+ .build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
new file mode 100644
index 0000000000..cb8a8975e7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactoryTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+public class HunspellTokenFilterFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDedup() throws IOException {
+ Settings settings = settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("path.conf", getDataPath("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ HunspellTokenFilterFactory hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.dedup(), is(true));
+
+ settings = settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("path.conf", getDataPath("/indices/analyze/conf_dir"))
+ .put("index.analysis.filter.en_US.type", "hunspell")
+ .put("index.analysis.filter.en_US.dedup", false)
+ .put("index.analysis.filter.en_US.locale", "en_US")
+ .build();
+
+ analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ tokenFilter = analysisService.tokenFilter("en_US");
+ assertThat(tokenFilter, instanceOf(HunspellTokenFilterFactory.class));
+ hunspellTokenFilter = (HunspellTokenFilterFactory) tokenFilter;
+ assertThat(hunspellTokenFilter.dedup(), is(false));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
new file mode 100644
index 0000000000..a9a07af9d9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.FailedToResolveConfigException;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/keep_analysis.json";
+
+
+ @Test
+ public void testLoadWithoutSettings() {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep");
+ Assert.assertNull(tokenFilter);
+ }
+
+ @Test
+ public void testLoadOverConfiguredSettings() {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.broken_keep_filter.type", "keep")
+ .put("index.analysis.filter.broken_keep_filter.keep_words_path", "does/not/exists.txt")
+ .put("index.analysis.filter.broken_keep_filter.keep_words", "[\"Hello\", \"worlD\"]")
+ .build();
+ try {
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Assert.fail("path and array are configured");
+ } catch (Exception e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+ }
+
+ @Test
+ public void testKeepWordsPathSettings() {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.non_broken_keep_filter.type", "keep")
+ .put("index.analysis.filter.non_broken_keep_filter.keep_words_path", "does/not/exists.txt")
+ .build();
+ try {
+ // test our none existing setup is picked up
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ fail("expected an exception due to non existent keep_words_path");
+ } catch (Throwable e) {
+ assertThat(e.getCause(), instanceOf(FailedToResolveConfigException.class));
+ }
+
+ settings = Settings.settingsBuilder().put(settings)
+ .put("index.analysis.filter.non_broken_keep_filter.keep_words", new String[]{"test"})
+ .build();
+ try {
+ // test our none existing setup is picked up
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] ");
+ } catch (Throwable e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+
+ }
+
+ @Test
+ public void testCaseInsensitiveMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_keep_filter");
+ assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
+ String source = "hello small world";
+ String[] expected = new String[]{"hello", "world"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{1, 2});
+ }
+
+ @Test
+ public void testCaseSensitiveMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_case_sensitive_keep_filter");
+ assertThat(tokenFilter, instanceOf(KeepWordFilterFactory.class));
+ String source = "Hello small world";
+ String[] expected = new String[]{"Hello"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{1});
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java
new file mode 100644
index 0000000000..966d550a55
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/KeepTypesFilterFactoryTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class KeepTypesFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testKeepTypes() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.keep_numbers.type", "keep_types")
+ .putArray("index.analysis.filter.keep_numbers.types", new String[] {"<NUM>", "<SOMETHINGELSE>"})
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("keep_numbers");
+ assertThat(tokenFilter, instanceOf(KeepTypesFilterFactory.class));
+ String source = "Hello 123 world";
+ String[] expected = new String[]{"123"};
+ Tokenizer tokenizer = new StandardTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected, new int[]{2});
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
new file mode 100644
index 0000000000..e75120d9ee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/LimitTokenCountFilterFactoryTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+public class LimitTokenCountFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.limit_default.type", "limit")
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_default");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testSettings() throws IOException {
+ {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 3)
+ .put("index.analysis.filter.limit_1.consume_all_tokens", true)
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 3)
+ .put("index.analysis.filter.limit_1.consume_all_tokens", false)
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.limit_1.type", "limit")
+ .put("index.analysis.filter.limit_1.max_token_count", 17)
+ .put("index.analysis.filter.limit_1.consume_all_tokens", true)
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("limit_1");
+ String source = "the quick brown fox";
+ String[] expected = new String[] { "the", "quick", "brown", "fox" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
new file mode 100644
index 0000000000..39a2372e3b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.ngram.*;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween;
+import static org.hamcrest.Matchers.instanceOf;
+
+public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+
+ @Test
+ public void testParseTokenChars() {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = newAnalysisSettingsBuilder().build();
+ for (String tokenChars : Arrays.asList("letters", "number", "DIRECTIONALITY_UNDEFINED")) {
+ final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
+ try {
+ new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ fail();
+ } catch (IllegalArgumentException expected) {
+ // OK
+ }
+ }
+ for (String tokenChars : Arrays.asList("letter", " digit ", "punctuation", "DIGIT", "CoNtRoL", "dash_punctuation")) {
+ final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", tokenChars).build();
+ new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ // no exception
+ }
+ }
+
+ @Test
+ public void testNoTokenChars() throws IOException {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = newAnalysisSettingsBuilder().build();
+ final Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 4).putArray("token_chars", new String[0]).build();
+ Tokenizer tokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ tokenizer.setReader(new StringReader("1.34"));
+ assertTokenStreamContents(tokenizer, new String[] {"1.", "1.3", "1.34", ".3", ".34", "34"});
+ }
+
+ @Test
+ public void testPreTokenization() throws IOException {
+ // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = newAnalysisSettingsBuilder().build();
+ Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
+ Tokenizer tokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f "));
+ assertTokenStreamContents(tokenizer,
+ new String[] {"Åb", "Åbc", "bc", "dé", "déf", "éf", "g\uD801\uDC00", "g\uD801\uDC00f", "\uD801\uDC00f"});
+ settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
+ tokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ tokenizer.setReader(new StringReader(" a!$ 9"));
+ assertTokenStreamContents(tokenizer,
+ new String[] {" a", " a!", "a!", "a!$", "!$", "!$ ", "$ ", "$ 9", " 9"});
+ }
+
+ @Test
+ public void testPreTokenizationEdge() throws IOException {
+ // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
+ final Index index = new Index("test");
+ final String name = "ngr";
+ final Settings indexSettings = newAnalysisSettingsBuilder().build();
+ Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
+ Tokenizer tokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create();
+ tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f "));
+ assertTokenStreamContents(tokenizer,
+ new String[] {"Åb", "Åbc", "dé", "déf", "g\uD801\uDC00", "g\uD801\uDC00f"});
+ settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
+ tokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create();
+ tokenizer.setReader(new StringReader(" a!$ 9"));
+ assertTokenStreamContents(tokenizer,
+ new String[] {" a", " a!"});
+ }
+
+ @Test
+ public void testBackwardsCompatibilityEdgeNgramTokenizer() throws Exception {
+ int iters = scaledRandomIntBetween(20, 100);
+ final Index index = new Index("test");
+ final String name = "ngr";
+ for (int i = 0; i < iters; i++) {
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create();
+ edgeNGramTokenizer.setReader(new StringReader("foo bar"));
+ if (compatVersion) {
+ assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
+ } else {
+ assertThat(edgeNGramTokenizer, instanceOf(EdgeNGramTokenizer.class));
+ }
+
+ } else {
+ Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create();
+ edgeNGramTokenizer.setReader(new StringReader("foo bar"));
+ assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
+ }
+ }
+ Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ try {
+ new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create();
+ fail("should fail side:back is not supported anymore");
+ } catch (IllegalArgumentException ex) {
+ }
+
+ }
+
+ @Test
+ public void testBackwardsCompatibilityNgramTokenizer() throws Exception {
+ int iters = scaledRandomIntBetween(20, 100);
+ for (int i = 0; i < iters; i++) {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer nGramTokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ nGramTokenizer.setReader(new StringReader("foo bar"));
+ if (compatVersion) {
+ assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
+ } else {
+ assertThat(nGramTokenizer, instanceOf(NGramTokenizer.class));
+ }
+
+ } else {
+ Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer nGramTokenizer = new NGramTokenizerFactory(index, indexSettings, name, settings).create();
+ nGramTokenizer.setReader(new StringReader("foo bar"));
+ assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
+ }
+ }
+ }
+
+ @Test
+ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception {
+ int iters = scaledRandomIntBetween(20, 100);
+ for (int i = 0; i < iters; i++) {
+ final Index index = new Index("test");
+ final String name = "ngr";
+ Version v = randomVersion(random());
+ if (v.onOrAfter(Version.V_0_90_2)) {
+ Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3);
+ boolean compatVersion = false;
+ if ((compatVersion = random().nextBoolean())) {
+ builder.put("version", "4." + random().nextInt(3));
+ }
+ boolean reverse = random().nextBoolean();
+ if (reverse) {
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer tokenizer = new MockTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(index, indexSettings, name, settings).create(tokenizer);
+ if (reverse) {
+ assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class));
+ } else if (compatVersion) {
+ assertThat(edgeNGramTokenFilter, instanceOf(Lucene43EdgeNGramTokenFilter.class));
+ } else {
+ assertThat(edgeNGramTokenFilter, instanceOf(EdgeNGramTokenFilter.class));
+ }
+
+ } else {
+ Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3);
+ boolean reverse = random().nextBoolean();
+ if (reverse) {
+ builder.put("side", "back");
+ }
+ Settings settings = builder.build();
+ Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
+ Tokenizer tokenizer = new MockTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory(index, indexSettings, name, settings).create(tokenizer);
+ if (reverse) {
+ assertThat(edgeNGramTokenFilter, instanceOf(ReverseStringFilter.class));
+ } else {
+ assertThat(edgeNGramTokenFilter, instanceOf(Lucene43EdgeNGramTokenFilter.class));
+ }
+ }
+ }
+ }
+
+
+ private Version randomVersion(Random random) throws IllegalArgumentException, IllegalAccessException {
+ Field[] declaredFields = Version.class.getDeclaredFields();
+ List<Field> versionFields = new ArrayList<>();
+ for (Field field : declaredFields) {
+ if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getName().startsWith("V_") && field.getType() == Version.class) {
+ versionFields.add(field);
+ }
+ }
+ return (Version) versionFields.get(random.nextInt(versionFields.size())).get(Version.class);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java
new file mode 100644
index 0000000000..7f7b363cf9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/NumericAnalyzerTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class NumericAnalyzerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testAttributeEqual() throws IOException {
+ final int precisionStep = 8;
+ final double value = randomDouble();
+ NumericDoubleAnalyzer analyzer = new NumericDoubleAnalyzer(precisionStep);
+
+ final TokenStream ts1 = analyzer.tokenStream("dummy", String.valueOf(value));
+ final NumericTokenStream ts2 = new NumericTokenStream(precisionStep);
+ ts2.setDoubleValue(value);
+ final NumericTermAttribute numTerm1 = ts1.addAttribute(NumericTermAttribute.class);
+ final NumericTermAttribute numTerm2 = ts1.addAttribute(NumericTermAttribute.class);
+ final PositionIncrementAttribute posInc1 = ts1.addAttribute(PositionIncrementAttribute.class);
+ final PositionIncrementAttribute posInc2 = ts1.addAttribute(PositionIncrementAttribute.class);
+ ts1.reset();
+ ts2.reset();
+ while (ts1.incrementToken()) {
+ assertThat(ts2.incrementToken(), is(true));
+ assertThat(posInc1, equalTo(posInc2));
+ // can't use equalTo directly on the numeric attribute cause it doesn't implement equals (LUCENE-5070)
+ assertThat(numTerm1.getRawValue(), equalTo(numTerm2.getRawValue()));
+ assertThat(numTerm2.getShift(), equalTo(numTerm2.getShift()));
+ }
+ assertThat(ts2.incrementToken(), is(false));
+ ts1.end();
+ ts2.end();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTest.java b/core/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTest.java
new file mode 100644
index 0000000000..98197a15c0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTest.java
@@ -0,0 +1,154 @@
+package org.elasticsearch.index.analysis;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.IOException;
+import java.lang.Thread.UncaughtExceptionHandler;
+import java.util.Arrays;
+import java.util.regex.Pattern;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+
+/**
+ * Verifies the behavior of PatternAnalyzer.
+ */
+public class PatternAnalyzerTest extends ElasticsearchTokenStreamTestCase {
+
+ /**
+ * Test PatternAnalyzer when it is configured with a non-word pattern.
+ */
+ public void testNonWordPattern() throws IOException {
+ // Split on non-letter pattern, do not lowercase, no stopwords
+ PatternAnalyzer a = new PatternAnalyzer(Pattern.compile("\\W+"), false, null);
+ assertAnalyzesTo(a, "The quick brown Fox,the abcd1234 (56.78) dc.",
+ new String[] { "The", "quick", "brown", "Fox", "the", "abcd1234", "56", "78", "dc" });
+
+ // split on non-letter pattern, lowercase, english stopwords
+ PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\W+"), true,
+ StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.",
+ new String[] { "quick", "brown", "fox", "abcd1234", "56", "78", "dc" });
+ }
+
+ /**
+ * Test PatternAnalyzer when it is configured with a whitespace pattern.
+ * Behavior can be similar to WhitespaceAnalyzer (depending upon options)
+ */
+ public void testWhitespacePattern() throws IOException {
+ // Split on whitespace patterns, do not lowercase, no stopwords
+ PatternAnalyzer a = new PatternAnalyzer(Pattern.compile("\\s+"), false, null);
+ assertAnalyzesTo(a, "The quick brown Fox,the abcd1234 (56.78) dc.",
+ new String[] { "The", "quick", "brown", "Fox,the", "abcd1234", "(56.78)", "dc." });
+
+ // Split on whitespace patterns, lowercase, english stopwords
+ PatternAnalyzer b = new PatternAnalyzer(Pattern.compile("\\s+"), true,
+ StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ assertAnalyzesTo(b, "The quick brown Fox,the abcd1234 (56.78) dc.",
+ new String[] { "quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
+ }
+
+ /**
+ * Test PatternAnalyzer when it is configured with a custom pattern. In this
+ * case, text is tokenized on the comma ","
+ */
+ public void testCustomPattern() throws IOException {
+ // Split on comma, do not lowercase, no stopwords
+ PatternAnalyzer a = new PatternAnalyzer(Pattern.compile(","), false, null);
+ assertAnalyzesTo(a, "Here,Are,some,Comma,separated,words,",
+ new String[] { "Here", "Are", "some", "Comma", "separated", "words" });
+
+ // split on comma, lowercase, english stopwords
+ PatternAnalyzer b = new PatternAnalyzer(Pattern.compile(","), true,
+ StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ assertAnalyzesTo(b, "Here,Are,some,Comma,separated,words,",
+ new String[] { "here", "some", "comma", "separated", "words" });
+ }
+
+ /**
+ * Test PatternAnalyzer against a large document.
+ */
+ public void testHugeDocument() throws IOException {
+ StringBuilder document = new StringBuilder();
+ // 5000 a's
+ char largeWord[] = new char[5000];
+ Arrays.fill(largeWord, 'a');
+ document.append(largeWord);
+
+ // a space
+ document.append(' ');
+
+ // 2000 b's
+ char largeWord2[] = new char[2000];
+ Arrays.fill(largeWord2, 'b');
+ document.append(largeWord2);
+
+ // Split on whitespace patterns, do not lowercase, no stopwords
+ PatternAnalyzer a = new PatternAnalyzer(Pattern.compile("\\s+"), false, null);
+ assertAnalyzesTo(a, document.toString(),
+ new String[] { new String(largeWord), new String(largeWord2) });
+ }
+
+ /** blast some random strings through the analyzer */
+ public void testRandomStrings() throws Exception {
+ Analyzer a = new PatternAnalyzer(Pattern.compile(","), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+
+ // dodge jre bug http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=7104012
+ final UncaughtExceptionHandler savedHandler = Thread.getDefaultUncaughtExceptionHandler();
+ Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
+ @Override
+ public void uncaughtException(Thread thread, Throwable throwable) {
+ assumeTrue("not failing due to jre bug ", !isJREBug7104012(throwable));
+ // otherwise its some other bug, pass to default handler
+ savedHandler.uncaughtException(thread, throwable);
+ }
+ });
+
+ try {
+ Thread.getDefaultUncaughtExceptionHandler();
+ checkRandomData(random(), a, 10000*RANDOM_MULTIPLIER);
+ } catch (ArrayIndexOutOfBoundsException ex) {
+ assumeTrue("not failing due to jre bug ", !isJREBug7104012(ex));
+ throw ex; // otherwise rethrow
+ } finally {
+ Thread.setDefaultUncaughtExceptionHandler(savedHandler);
+ }
+ }
+
+ static boolean isJREBug7104012(Throwable t) {
+ if (!(t instanceof ArrayIndexOutOfBoundsException)) {
+ // BaseTokenStreamTestCase now wraps exc in a new RuntimeException:
+ t = t.getCause();
+ if (!(t instanceof ArrayIndexOutOfBoundsException)) {
+ return false;
+ }
+ }
+ StackTraceElement trace[] = t.getStackTrace();
+ for (StackTraceElement st : trace) {
+ if ("java.text.RuleBasedBreakIterator".equals(st.getClassName()) ||
+ "sun.util.locale.provider.RuleBasedBreakIterator".equals(st.getClassName())
+ && "lookupBackwardState".equals(st.getMethodName())) {
+ return true;
+ }
+ }
+ return false;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
new file mode 100644
index 0000000000..11a4f8745c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+public class PatternCaptureTokenFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testPatternCaptureTokenFilter() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .put("path.home", createTempDir())
+ .loadFromClasspath("org/elasticsearch/index/analysis/pattern_capture.json")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("single");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "foobarbaz"), new String[]{"foobarbaz","foobar","foo"});
+
+ NamedAnalyzer analyzer2 = analysisService.analyzer("multi");
+
+ assertTokenStreamContents(analyzer2.tokenStream("test", "abc123def"), new String[]{"abc123def","abc","123","def"});
+
+ NamedAnalyzer analyzer3 = analysisService.analyzer("preserve");
+
+ assertTokenStreamContents(analyzer3.tokenStream("test", "foobarbaz"), new String[]{"foobar","foo"});
+ }
+
+
+ @Test(expected=IllegalArgumentException.class)
+ public void testNoPatterns() {
+ new PatternCaptureGroupTokenFilterFactory(new Index("test"), settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), "pattern_capture", settingsBuilder().put("pattern", "foobar").build());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java
new file mode 100644
index 0000000000..78b1bb9d67
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactoryTests.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerProviderFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testVersioningInFactoryProvider() throws Exception {
+ PreBuiltAnalyzerProviderFactory factory = new PreBuiltAnalyzerProviderFactory("default", AnalyzerScope.INDEX, PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT));
+
+ AnalyzerProvider former090AnalyzerProvider = factory.create("default", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ AnalyzerProvider currentAnalyzerProviderReference = factory.create("default", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ // would love to access the version inside of the lucene analyzer, but that is not possible...
+ assertThat(currentAnalyzerProviderReference, is(not(former090AnalyzerProvider)));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
new file mode 100644
index 0000000000..cf9f09204d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class PreBuiltAnalyzerTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testThatDefaultAndStandardAnalyzerAreTheSameInstance() {
+ Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.CURRENT);
+ Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.CURRENT);
+
+ // special case, these two are the same instance
+ assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
+ }
+
+ @Test
+ public void testThatDefaultAndStandardAnalyzerChangedIn10Beta1() throws IOException {
+ Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_1_0_0_Beta1);
+ Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
+
+ // special case, these two are the same instance
+ assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
+ PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
+ final int n = scaledRandomIntBetween(10, 100);
+ Version version = Version.CURRENT;
+ for(int i = 0; i < n; i++) {
+ if (version.equals(Version.V_1_0_0_Beta1)) {
+ assertThat(currentDefaultAnalyzer, is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version)));
+ } else {
+ assertThat(currentDefaultAnalyzer, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ }
+ Analyzer analyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(version);
+ TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
+ ts.reset();
+ CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
+ List<String> list = new ArrayList<>();
+ while(ts.incrementToken()) {
+ list.add(charTermAttribute.toString());
+ }
+ if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
+ assertThat(list.size(), is(4));
+ assertThat(list, contains("this", "is", "it", "dude"));
+
+ } else {
+ assertThat(list.size(), is(1));
+ assertThat(list, contains("dude"));
+ }
+ ts.close();
+ version = randomVersion(random());
+ }
+ }
+
+ @Test
+ public void testAnalyzerChangedIn10RC1() throws IOException {
+ Analyzer pattern = PreBuiltAnalyzers.PATTERN.getAnalyzer(Version.V_1_0_0_RC1);
+ Analyzer standardHtml = PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(Version.V_1_0_0_RC1);
+ final int n = scaledRandomIntBetween(10, 100);
+ Version version = Version.CURRENT;
+ for(int i = 0; i < n; i++) {
+ if (version.equals(Version.V_1_0_0_RC1)) {
+ assertThat(pattern, is(PreBuiltAnalyzers.PATTERN.getAnalyzer(version)));
+ assertThat(standardHtml, is(PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version)));
+ } else {
+ assertThat(pattern, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ assertThat(standardHtml, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
+ }
+ Analyzer analyzer = randomBoolean() ? PreBuiltAnalyzers.PATTERN.getAnalyzer(version) : PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version);
+ TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
+ ts.reset();
+ CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
+ List<String> list = new ArrayList<>();
+ while(ts.incrementToken()) {
+ list.add(charTermAttribute.toString());
+ }
+ if (version.onOrAfter(Version.V_1_0_0_RC1)) {
+ assertThat(list.toString(), list.size(), is(4));
+ assertThat(list, contains("this", "is", "it", "dude"));
+
+ } else {
+ assertThat(list.size(), is(1));
+ assertThat(list, contains("dude"));
+ }
+ ts.close();
+ version = randomVersion(random());
+ }
+ }
+
+ @Test
+ public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
+ assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT),
+ is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0)));
+ }
+
+ @Test
+ public void testThatInstancesAreCachedAndReused() {
+ assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT),
+ is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)));
+ assertThat(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0),
+ is(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_0_18_0)));
+ }
+
+ @Test
+ public void testThatInstancesWithSameLuceneVersionAreReused() {
+ // both are lucene 4.4 and should return the same instance
+ assertThat(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_4),
+ is(PreBuiltAnalyzers.CATALAN.getAnalyzer(Version.V_0_90_5)));
+ }
+
+ @Test
+ public void testThatAnalyzersAreUsedInMapping() throws IOException {
+ int randomInt = randomInt(PreBuiltAnalyzers.values().length-1);
+ PreBuiltAnalyzers randomPreBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
+ String analyzerName = randomPreBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
+
+ Version randomVersion = randomVersion(random());
+ Settings indexSettings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
+
+ NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", analyzerName).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+
+ FieldMapper fieldMapper = docMapper.mappers().getMapper("field");
+ assertThat(fieldMapper.fieldType().searchAnalyzer(), instanceOf(NamedAnalyzer.class));
+ NamedAnalyzer fieldMapperNamedAnalyzer = (NamedAnalyzer) fieldMapper.fieldType().searchAnalyzer();
+
+ assertThat(fieldMapperNamedAnalyzer.analyzer(), is(namedAnalyzer.analyzer()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java
new file mode 100644
index 0000000000..5690315f7a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltCharFilterFactoryFactoryTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltCharFilters;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ *
+ */
+public class PreBuiltCharFilterFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltCharFilterFactoryFactory factory = new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT));
+
+ CharFilterFactory former090TokenizerFactory = factory.create("html_strip", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ CharFilterFactory former090TokenizerFactoryCopy = factory.create("html_strip", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_0).build());
+ CharFilterFactory currentTokenizerFactory = factory.create("html_strip", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(currentTokenizerFactory, is(former090TokenizerFactory));
+ assertThat(currentTokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java
new file mode 100644
index 0000000000..8531dc81c3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenFilterFactoryFactoryTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenFilters;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ *
+ */
+public class PreBuiltTokenFilterFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatCachingWorksForCachingStrategyOne() {
+ PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.WORD_DELIMITER.getTokenFilterFactory(Version.CURRENT));
+
+ TokenFilterFactory former090TokenizerFactory = factory.create("word_delimiter", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenFilterFactory former090TokenizerFactoryCopy = factory.create("word_delimiter", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenFilterFactory currentTokenizerFactory = factory.create("word_delimiter", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(currentTokenizerFactory, is(former090TokenizerFactory));
+ assertThat(currentTokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltTokenFilterFactoryFactory factory = new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.STOP.getTokenFilterFactory(Version.CURRENT));
+
+ TokenFilterFactory former090TokenizerFactory = factory.create("stop", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenFilterFactory former090TokenizerFactoryCopy = factory.create("stop", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenFilterFactory currentTokenizerFactory = factory.create("stop", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(currentTokenizerFactory, is(not(former090TokenizerFactory)));
+ assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java
new file mode 100644
index 0000000000..d60155e0e0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltTokenizerFactoryFactoryTests.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.PreBuiltTokenizers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.*;
+
+/**
+ *
+ */
+public class PreBuiltTokenizerFactoryFactoryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatDifferentVersionsCanBeLoaded() {
+ PreBuiltTokenizerFactoryFactory factory = new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.STANDARD.getTokenizerFactory(Version.CURRENT));
+
+ // different es versions, same lucene version, thus cached
+ TokenizerFactory former090TokenizerFactory = factory.create("standard", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_1).build());
+ TokenizerFactory former090TokenizerFactoryCopy = factory.create("standard", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_0_90_2).build());
+ TokenizerFactory currentTokenizerFactory = factory.create("standard", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+
+ assertThat(currentTokenizerFactory, is(not(former090TokenizerFactory)));
+ assertThat(currentTokenizerFactory, is(not(former090TokenizerFactoryCopy)));
+ assertThat(former090TokenizerFactory, is(former090TokenizerFactoryCopy));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
new file mode 100644
index 0000000000..866aad321f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactoryTests.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+@ThreadLeakScope(Scope.NONE)
+public class ShingleTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ private static final String RESOURCE = "org/elasticsearch/index/analysis/shingle_analysis.json";
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle");
+ String source = "the quick brown fox";
+ String[] expected = new String[]{"the", "the quick", "quick", "quick brown", "brown", "brown fox", "fox"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testInverseMapping() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
+ String source = "the quick brown fox";
+ String[] expected = new String[]{"the_quick_brown", "quick_brown_fox"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testInverseMappingNoShingles() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_inverse");
+ assertThat(tokenFilter, instanceOf(ShingleTokenFilterFactory.class));
+ String source = "the quick";
+ String[] expected = new String[]{"the", "quick"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testFillerToken() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromClassPath(createTempDir(), RESOURCE);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("shingle_filler");
+ String source = "simon the sorcerer";
+ String[] expected = new String[]{"simon FILLER", "simon FILLER sorcerer", "FILLER sorcerer"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ TokenStream stream = new StopFilter(tokenizer, StopFilter.makeStopSet("the"));
+ assertTokenStreamContents(tokenFilter.create(stream), expected);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/SnowballAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/SnowballAnalyzerTests.java
new file mode 100644
index 0000000000..a34a5c674a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/SnowballAnalyzerTests.java
@@ -0,0 +1,59 @@
+package org.elasticsearch.index.analysis;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+
+public class SnowballAnalyzerTests extends ElasticsearchTokenStreamTestCase {
+
+ public void testEnglish() throws Exception {
+ Analyzer a = new SnowballAnalyzer("English");
+ assertAnalyzesTo(a, "he abhorred accents",
+ new String[]{"he", "abhor", "accent"});
+ }
+
+ public void testStopwords() throws Exception {
+ Analyzer a = new SnowballAnalyzer("English",
+ StandardAnalyzer.STOP_WORDS_SET);
+ assertAnalyzesTo(a, "the quick brown fox jumped",
+ new String[]{"quick", "brown", "fox", "jump"});
+ }
+
+ /**
+ * Test turkish lowercasing
+ */
+ public void testTurkish() throws Exception {
+ Analyzer a = new SnowballAnalyzer("Turkish");
+
+ assertAnalyzesTo(a, "ağacı", new String[] { "ağaç" });
+ assertAnalyzesTo(a, "AĞACI", new String[] { "ağaç" });
+ }
+
+
+ public void testReusableTokenStream() throws Exception {
+ Analyzer a = new SnowballAnalyzer("English");
+ assertAnalyzesTo(a, "he abhorred accents",
+ new String[]{"he", "abhor", "accent"});
+ assertAnalyzesTo(a, "she abhorred him",
+ new String[]{"she", "abhor", "him"});
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java
new file mode 100644
index 0000000000..6b5fb9fa34
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactoryTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.analysis.en.PorterStemFilter;
+import org.apache.lucene.analysis.snowball.SnowballFilter;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.scaledRandomIntBetween;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ *
+ */
+public class StemmerTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testEnglishBackwardsCompatibility() throws IOException {
+ int iters = scaledRandomIntBetween(20, 100);
+ for (int i = 0; i < iters; i++) {
+
+ Version v = VersionUtils.randomVersion(random());
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.my_english.type", "stemmer")
+ .put("index.analysis.filter.my_english.language", "english")
+ .put("index.analysis.analyzer.my_english.tokenizer","whitespace")
+ .put("index.analysis.analyzer.my_english.filter","my_english")
+ .put(SETTING_VERSION_CREATED,v)
+ .put("path.home", createTempDir().toString())
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_english");
+ assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream create = tokenFilter.create(tokenizer);
+ NamedAnalyzer analyzer = analysisService.analyzer("my_english");
+
+ if (v.onOrAfter(Version.V_1_3_0)) {
+ assertThat(create, instanceOf(PorterStemFilter.class));
+ assertAnalyzesTo(analyzer, "consolingly", new String[]{"consolingli"});
+ } else {
+ assertThat(create, instanceOf(SnowballFilter.class));
+ assertAnalyzesTo(analyzer, "consolingly", new String[]{"consol"});
+ }
+ }
+
+ }
+
+ @Test
+ public void testPorter2BackwardsCompatibility() throws IOException {
+ int iters = scaledRandomIntBetween(20, 100);
+ for (int i = 0; i < iters; i++) {
+
+ Version v = VersionUtils.randomVersion(random());
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.my_porter2.type", "stemmer")
+ .put("index.analysis.filter.my_porter2.language", "porter2")
+ .put("index.analysis.analyzer.my_porter2.tokenizer","whitespace")
+ .put("index.analysis.analyzer.my_porter2.filter","my_porter2")
+ .put(SETTING_VERSION_CREATED,v)
+ .put("path.home", createTempDir().toString())
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_porter2");
+ assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class));
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream create = tokenFilter.create(tokenizer);
+ NamedAnalyzer analyzer = analysisService.analyzer("my_porter2");
+ assertThat(create, instanceOf(SnowballFilter.class));
+
+ if (v.onOrAfter(Version.V_1_3_0)) {
+ assertAnalyzesTo(analyzer, "possibly", new String[]{"possibl"});
+ } else {
+ assertAnalyzesTo(analyzer, "possibly", new String[]{"possibli"});
+ }
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
new file mode 100644
index 0000000000..0b4dc23b79
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/StopAnalyzerTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+public class StopAnalyzerTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefaultsCompoundAnalysis() throws Exception {
+ Index index = new Index("test");
+ Settings settings = settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/index/analysis/stop.json")
+ .put("path.home", createTempDir().toString())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .build();
+ Injector parentInjector = new ModulesBuilder().add(new SettingsModule(settings), new EnvironmentModule(new Environment(settings)), new IndicesAnalysisModule()).createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ AnalysisService analysisService = injector.getInstance(AnalysisService.class);
+
+ NamedAnalyzer analyzer1 = analysisService.analyzer("analyzer1");
+
+ assertTokenStreamContents(analyzer1.tokenStream("test", "to be or not to be"), new String[0]);
+
+ NamedAnalyzer analyzer2 = analysisService.analyzer("analyzer2");
+
+ assertTokenStreamContents(analyzer2.tokenStream("test", "to be or not to be"), new String[0]);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java b/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
new file mode 100644
index 0000000000..f61a50ea2d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/StopTokenFilterTests.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.Lucene43StopFilter;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.inject.ProvisionException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+
+public class StopTokenFilterTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test(expected = ProvisionException.class)
+ public void testPositionIncrementSetting() throws IOException {
+ Builder builder = Settings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.enable_position_increments", false);
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.version", "5.0");
+ }
+ builder.put("path.home", createTempDir().toString());
+ Settings settings = builder.build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ analysisService.tokenFilter("my_stop");
+ }
+
+ @Test
+ public void testCorrectPositionIncrementSetting() throws IOException {
+ Builder builder = Settings.settingsBuilder().put("index.analysis.filter.my_stop.type", "stop");
+ int thingToDo = random().nextInt(3);
+ if (thingToDo == 0) {
+ builder.put("index.analysis.filter.my_stop.version", Version.LATEST);
+ } else if (thingToDo == 1) {
+ builder.put("index.analysis.filter.my_stop.version", Version.LUCENE_4_0);
+ if (random().nextBoolean()) {
+ builder.put("index.analysis.filter.my_stop.enable_position_increments", true);
+ }
+ } else {
+ // don't specify
+ }
+ builder.put("path.home", createTempDir().toString());
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(builder.build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream create = tokenFilter.create(tokenizer);
+ if (thingToDo == 1) {
+ assertThat(create, instanceOf(Lucene43StopFilter.class));
+ } else {
+ assertThat(create, instanceOf(StopFilter.class));
+ }
+ }
+
+ @Test
+ public void testDeprecatedPositionIncrementSettingWithVersions() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.enable_position_increments", false)
+ .put("index.analysis.filter.my_stop.version", "4.3")
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader("foo bar"));
+ TokenStream create = tokenFilter.create(tokenizer);
+ assertThat(create, instanceOf(Lucene43StopFilter.class));
+ }
+
+ @Test
+ public void testThatSuggestStopFilterWorks() throws Exception {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.my_stop.type", "stop")
+ .put("index.analysis.filter.my_stop.remove_trailing", false)
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_stop");
+ assertThat(tokenFilter, instanceOf(StopTokenFilterFactory.class));
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader("foo an"));
+ TokenStream create = tokenFilter.create(tokenizer);
+ assertThat(create, instanceOf(SuggestStopFilter.class));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
new file mode 100644
index 0000000000..cb67e657dd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/WordDelimiterTokenFilterFactoryTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis;
+
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+public class WordDelimiterTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateWords() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "500", "42", "wifi", "wifi", "4000", "j", "2", "se", "ONeil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateNumbers() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.catenate_numbers", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "50042", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testCatenateAll() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false")
+ .put("index.analysis.filter.my_word_delimiter.catenate_all", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testSplitOnCaseChange() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot";
+ String[] expected = new String[]{"PowerShot"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testPreserveOriginal() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.preserve_original", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"PowerShot", "Power", "Shot", "500-42", "500", "42", "wi-fi", "wi", "fi", "wi-fi-4000", "wi", "fi", "4000", "j2se", "j", "2", "se", "O'Neil's", "O", "Neil"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ @Test
+ public void testStemEnglishPossessive() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
+ String[] expected = new String[]{"Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s"};
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ /** Correct offset order when doing both parts and concatenation: PowerShot is a synonym of Power */
+ @Test
+ public void testPartsAndCatenate() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot";
+ String[] expected = new String[]{"Power", "PowerShot", "Shot" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+ /** Back compat:
+ * old offset order when doing both parts and concatenation: PowerShot is a synonym of Shot */
+ @Test
+ public void testDeprecatedPartsAndCatenate() throws IOException {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("index.analysis.filter.my_word_delimiter.type", "word_delimiter")
+ .put("index.analysis.filter.my_word_delimiter.catenate_words", "true")
+ .put("index.analysis.filter.my_word_delimiter.generate_word_parts", "true")
+ .put("index.analysis.filter.my_word_delimiter.version", "4.7")
+ .build());
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("my_word_delimiter");
+ String source = "PowerShot";
+ String[] expected = new String[]{"Power", "Shot", "PowerShot" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json b/core/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json
new file mode 100644
index 0000000000..89a1281473
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/cjk_analysis.json
@@ -0,0 +1,37 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "cjk_all_flags":{
+ "type":"cjk_bigram",
+ "output_unigrams":true,
+ "ignored_scripts":[
+ "han",
+ "hiragana",
+ "katakana",
+ "hangul",
+ "foobar"
+ ]
+ },
+ "cjk_han_only":{
+ "type":"cjk_bigram",
+ "output_unigrams":false,
+ "ignored_scripts":[
+ "hiragana"
+ ]
+ },
+ "cjk_han_unigram_only":{
+ "type":"cjk_bigram",
+ "output_unigrams":true,
+ "ignored_scripts":[
+ "hiragana"
+ ]
+ },
+ "cjk_no_flags":{
+ "type":"cjk_bigram",
+ "output_unigrams":false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
new file mode 100644
index 0000000000..d4bf9058bc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.commongrams;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.WhitespaceTokenizer;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.analysis.AnalysisTestsHelper;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+
+import static org.hamcrest.Matchers.instanceOf;
+public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStreamTestCase {
+
+ @Test
+ public void testDefault() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .put("path.home", createTempDir().toString())
+ .build();
+
+ try {
+ AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Assert.fail("[common_words] or [common_words_path] is set");
+ } catch (Exception e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+ }
+ @Test
+ public void testWithoutCommonWordsMatch() throws IOException {
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
+ .put("path.home", createTempDir().toString())
+ .build();
+
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_default.type", "common_grams")
+ .put("index.analysis.filter.common_grams_default.query_mode", false)
+ .put("path.home", createTempDir().toString())
+ .putArray("index.analysis.filter.common_grams_default.common_words", "chromosome", "protein")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ {
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_default");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "quick", "brown", "is", "a", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+ }
+
+ @Test
+ public void testSettings() throws IOException {
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams")
+ .put("index.analysis.filter.common_grams_1.ignore_case", true)
+ .put("path.home", createTempDir().toString())
+ .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ String source = "the quick brown is a fox or noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "fox_or", "or", "or_noT", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams")
+ .put("index.analysis.filter.common_grams_2.ignore_case", false)
+ .put("path.home", createTempDir().toString())
+ .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "or", "why", "why_noT", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams")
+ .putArray("index.analysis.filter.common_grams_3.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the", "the_quick", "quick", "brown", "brown_is", "is", "is_a", "a", "a_fox", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testCommonGramsAnalysis() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams.json")
+ .put("path.home", createTempDir().toString())
+ .build();
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick", "quick_brown", "brown", "brown_is", "is", "a", "a_fox", "fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ }
+
+ @Test
+ public void testQueryModeSettings() throws IOException {
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_1.type", "common_grams")
+ .put("index.analysis.filter.common_grams_1.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_1.common_words", "the", "Or", "Not", "a", "is", "an", "they", "are")
+ .put("index.analysis.filter.common_grams_1.ignore_case", true)
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_1");
+ String source = "the quick brown is a fox or noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox_or", "or_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_2.type", "common_grams")
+ .put("index.analysis.filter.common_grams_2.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_2.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .put("index.analysis.filter.common_grams_2.ignore_case", false)
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_2");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_3.type", "common_grams")
+ .put("index.analysis.filter.common_grams_3.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_3.common_words", "the", "Or", "noT", "a", "is", "an", "they", "are")
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_3");
+ String source = "the quick brown is a fox or why noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "or", "why_noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ {
+ Settings settings = Settings.settingsBuilder().put("index.analysis.filter.common_grams_4.type", "common_grams")
+ .put("index.analysis.filter.common_grams_4.query_mode", true)
+ .putArray("index.analysis.filter.common_grams_4.common_words", "the", "or", "not", "a", "is", "an", "they", "are")
+ .put("path.home", createTempDir().toString())
+ .build();
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ TokenFilterFactory tokenFilter = analysisService.tokenFilter("common_grams_4");
+ String source = "the quick brown is a fox Or noT";
+ String[] expected = new String[] { "the_quick", "quick", "brown_is", "is_a", "a_fox", "fox", "Or", "noT" };
+ Tokenizer tokenizer = new WhitespaceTokenizer();
+ tokenizer.setReader(new StringReader(source));
+ assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
+ }
+ }
+
+ @Test
+ public void testQueryModeCommonGramsAnalysis() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .loadFromClasspath("org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json")
+ .put("path.home", createTempDir().toString())
+ .build();
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ {
+ AnalysisService analysisService = AnalysisTestsHelper.createAnalysisServiceFromSettings(settings);
+ Analyzer analyzer = analysisService.analyzer("commongramsAnalyzer_file").analyzer();
+ String source = "the quick brown is a fox or not";
+ String[] expected = new String[] { "the", "quick_brown", "brown_is", "is", "a_fox", "fox_or", "or", "not" };
+ assertTokenStreamContents(analyzer.tokenStream("test", source), expected);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt
new file mode 100644
index 0000000000..f97b799c4d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/common_words.txt
@@ -0,0 +1,2 @@
+brown
+fox
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json
new file mode 100644
index 0000000000..6db49fc87c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams.json
@@ -0,0 +1,29 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "commongramsAnalyzer":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams" ]
+ },
+ "commongramsAnalyzer_file":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams_file" ]
+ }
+ },
+ "filter":{
+ "common_grams":{
+ "type":"common_grams",
+ "common_words":[
+ "brown",
+ "fox"
+ ]
+ },
+ "common_grams_file":{
+ "type":"common_grams",
+ "common_words_path":"org/elasticsearch/index/analysis/commongrams/common_words.txt"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json
new file mode 100644
index 0000000000..6f0c015570
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/commongrams/commongrams_query_mode.json
@@ -0,0 +1,31 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "commongramsAnalyzer":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams" ]
+ },
+ "commongramsAnalyzer_file":{
+ "tokenizer":"whitespace",
+ "filter":[ "common_grams_file" ]
+ }
+ },
+ "filter":{
+ "common_grams":{
+ "type":"common_grams",
+ "query_mode" : true,
+ "common_words":[
+ "brown",
+ "fox"
+ ]
+ },
+ "common_grams_file":{
+ "type":"common_grams",
+ "query_mode" : true,
+ "common_words_path":"org/elasticsearch/index/analysis/commongrams/common_words.txt"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java b/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
new file mode 100644
index 0000000000..47bf1bb810
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/filter1/MyFilterTokenFilterFactory.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.analysis.filter1;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.StopAnalyzer;
+import org.apache.lucene.analysis.core.StopFilter;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
+import org.elasticsearch.index.settings.IndexSettings;
+
+public class MyFilterTokenFilterFactory extends AbstractTokenFilterFactory {
+
+ @Inject
+ public MyFilterTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, String name) {
+ super(index, indexSettings, name, Settings.Builder.EMPTY_SETTINGS);
+ }
+
+ @Override
+ public TokenStream create(TokenStream tokenStream) {
+ return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json b/core/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json
new file mode 100644
index 0000000000..233d6f3e3d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/keep_analysis.json
@@ -0,0 +1,19 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "my_keep_filter":{
+ "type":"keep",
+ "keep_words" : ["Hello", "worlD"],
+ "keep_words_case" : true
+ },
+ "my_case_sensitive_keep_filter":{
+ "type":"keep",
+ "keep_words" : ["Hello", "worlD"],
+ "enable_position_increments" : false,
+ "version" : "4.2"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json b/core/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json
new file mode 100644
index 0000000000..d82fb987e6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/pattern_capture.json
@@ -0,0 +1,46 @@
+{
+ "index": {
+ "number_of_shards": 1,
+ "number_of_replicas": 0,
+ "analysis": {
+ "filter": {
+ "single": {
+ "type": "pattern_capture",
+ "patterns": "((...)...)"
+ },
+ "multi": {
+ "type": "pattern_capture",
+ "patterns": [
+ "(\\d+)",
+ "([a-z]+)"
+ ]
+ },
+ "preserve": {
+ "type": "pattern_capture",
+ "preserve_original": false,
+ "patterns": "((...)...)"
+ }
+ },
+ "analyzer": {
+ "single": {
+ "tokenizer": "keyword",
+ "filter": [
+ "single"
+ ]
+ },
+ "multi": {
+ "tokenizer": "keyword",
+ "filter": [
+ "multi"
+ ]
+ },
+ "preserve": {
+ "tokenizer": "keyword",
+ "filter": [
+ "preserve"
+ ]
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json b/core/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json
new file mode 100644
index 0000000000..33c09fe8db
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/shingle_analysis.json
@@ -0,0 +1,23 @@
+{
+ "index":{
+ "analysis":{
+ "filter":{
+ "shingle_inverse":{
+ "type":"shingle",
+ "max_shingle_size" : 3,
+ "min_shingle_size" : 3,
+ "output_unigrams" : false,
+ "output_unigrams_if_no_shingles" : true,
+ "token_separator" : "_"
+ },
+ "shingle_filler":{
+ "type":"shingle",
+ "max_shingle_size" : 3,
+ "min_shingle_size" : 2,
+ "output_unigrams" : false,
+ "filler_token" : "FILLER"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/stop.json b/core/src/test/java/org/elasticsearch/index/analysis/stop.json
new file mode 100644
index 0000000000..717c9fdee5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/stop.json
@@ -0,0 +1,18 @@
+{
+ "index":{
+ "number_of_shards":1,
+ "number_of_replicas":0,
+ "analysis":{
+ "analyzer":{
+ "analyzer1":{
+ "type":"stop",
+ "stopwords":["_english_"]
+ },
+ "analyzer2":{
+ "type":"stop",
+ "stopwords":"_english_"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java
new file mode 100644
index 0000000000..289c1a20b7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/SynonymsAnalysisTest.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.analysis.synonyms;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
+import org.elasticsearch.indices.analysis.IndicesAnalysisService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SynonymsAnalysisTest extends ElasticsearchTestCase {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+ private AnalysisService analysisService;
+
+ @Test
+ public void testSynonymsAnalysis() throws IOException {
+ Settings settings = settingsBuilder().
+ loadFromClasspath("org/elasticsearch/index/analysis/synonyms/synonyms.json")
+ .put("path.home", createTempDir().toString())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+
+ Index index = new Index("test");
+
+ Injector parentInjector = new ModulesBuilder().add(
+ new SettingsModule(settings),
+ new EnvironmentModule(new Environment(settings)),
+ new IndicesAnalysisModule())
+ .createInjector();
+ Injector injector = new ModulesBuilder().add(
+ new IndexSettingsModule(index, settings),
+ new IndexNameModule(index),
+ new AnalysisModule(settings, parentInjector.getInstance(IndicesAnalysisService.class)))
+ .createChildInjector(parentInjector);
+
+ analysisService = injector.getInstance(AnalysisService.class);
+
+ match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
+ match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
+ match("synonymAnalyzerWordnet", "abstain", "abstain refrain desist");
+ match("synonymAnalyzerWordnet_file", "abstain", "abstain refrain desist");
+ match("synonymAnalyzerWithsettings", "kimchy", "sha hay");
+
+ }
+
+ private void match(String analyzerName, String source, String target) throws IOException {
+
+ Analyzer analyzer = analysisService.analyzer(analyzerName).analyzer();
+
+ AllEntries allEntries = new AllEntries();
+ allEntries.addText("field", source, 1.0f);
+ allEntries.reset();
+
+ TokenStream stream = AllTokenStream.allTokenStream("_all", allEntries, analyzer);
+ stream.reset();
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
+
+ StringBuilder sb = new StringBuilder();
+ while (stream.incrementToken()) {
+ sb.append(termAtt.toString()).append(" ");
+ }
+
+ MatcherAssert.assertThat(target, equalTo(sb.toString().trim()));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json
new file mode 100644
index 0000000000..84898af429
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.json
@@ -0,0 +1,72 @@
+{
+ "index":{
+ "analysis":{
+ "analyzer":{
+ "synonymAnalyzer":{
+ "tokenizer":"standard",
+ "filter":[ "synonym" ]
+ },
+ "synonymAnalyzer_file":{
+ "tokenizer":"standard",
+ "filter":[ "synonym_file" ]
+ },
+ "synonymAnalyzerWordnet":{
+ "tokenizer":"standard",
+ "filter":[ "synonymWordnet" ]
+ },
+ "synonymAnalyzerWordnet_file":{
+ "tokenizer":"standard",
+ "filter":[ "synonymWordnet_file" ]
+ },
+ "synonymAnalyzerWithsettings":{
+ "tokenizer":"trigram",
+ "filter":["synonymWithTokenizerSettings"]
+ }
+ },
+ "tokenizer":{
+ "trigram" : {
+ "type" : "ngram",
+ "min_gram" : 3,
+ "max_gram" : 3
+ }
+ },
+ "filter":{
+ "synonym":{
+ "type":"synonym",
+ "synonyms":[
+ "kimchy => shay",
+ "dude => elasticsearch",
+ "abides => man!"
+ ]
+ },
+ "synonym_file":{
+ "type":"synonym",
+ "synonyms_path":"org/elasticsearch/index/analysis/synonyms/synonyms.txt"
+ },
+ "synonymWordnet":{
+ "type":"synonym",
+ "format":"wordnet",
+ "synonyms":[
+ "s(100000001,1,'abstain',v,1,0).",
+ "s(100000001,2,'refrain',v,1,0).",
+ "s(100000001,3,'desist',v,1,0)."
+ ]
+ },
+ "synonymWordnet_file":{
+ "type":"synonym",
+ "format":"wordnet",
+ "synonyms_path":"org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt"
+ },
+ "synonymWithTokenizerSettings":{
+ "type":"synonym",
+ "synonyms":[
+ "kimchy => shay"
+ ],
+ "tokenizer" : "trigram",
+ "min_gram" : 3,
+ "max_gram" : 3
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt
new file mode 100644
index 0000000000..ef4b225627
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms.txt
@@ -0,0 +1,3 @@
+kimchy => shay
+dude => elasticsearch
+abides => man!
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt
new file mode 100644
index 0000000000..f7b68e399e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/synonyms/synonyms_wordnet.txt
@@ -0,0 +1,3 @@
+s(100000001,1,'abstain',v,1,0).
+s(100000001,2,'refrain',v,1,0).
+s(100000001,3,'desist',v,1,0).
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/test1.json b/core/src/test/java/org/elasticsearch/index/analysis/test1.json
new file mode 100644
index 0000000000..69be6db8f8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/test1.json
@@ -0,0 +1,84 @@
+{
+ "index":{
+ "analysis":{
+ "tokenizer":{
+ "standard":{
+ "type":"standard"
+ }
+ },
+ "char_filter":{
+ "my_html":{
+ "type":"html_strip",
+ "escaped_tags":["xxx", "yyy"],
+ "read_ahead":1024
+ },
+ "my_pattern":{
+ "type":"pattern_replace",
+ "pattern":"sample(.*)",
+ "replacement":"replacedSample $1"
+ },
+ "my_mapping":{
+ "type":"mapping",
+ "mappings":["ph=>f", "qu=>q"]
+ }
+ },
+ "filter":{
+ "stop":{
+ "type":"stop",
+ "stopwords":["test-stop"]
+ },
+ "stop2":{
+ "type":"stop",
+ "stopwords":["stop2-1", "stop2-2"]
+ },
+ "my":{
+ "type":"org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory"
+ },
+ "dict_dec":{
+ "type":"dictionary_decompounder",
+ "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"]
+ }
+ },
+ "analyzer":{
+ "standard":{
+ "alias":"alias1,alias2",
+ "type":"standard",
+ "stopwords":["test1", "test2", "test3"]
+ },
+ "custom1":{
+ "alias":["alias4", "alias5"],
+ "tokenizer":"standard",
+ "filter":["stop", "stop2"]
+ },
+ "custom2":{
+ "tokenizer":"standard",
+ "char_filter":["html_strip", "my_html"]
+ },
+ "custom3":{
+ "tokenizer":"standard",
+ "char_filter":["my_pattern"]
+ },
+ "custom4":{
+ "tokenizer":"standard",
+ "filter":["my"]
+ },
+ "custom5":{
+ "tokenizer":"standard",
+ "char_filter":["my_mapping"]
+ },
+ "custom6":{
+ "tokenizer":"standard",
+ "position_offset_gap": 256
+ },
+ "czechAnalyzerWithStemmer":{
+ "tokenizer":"standard",
+ "filter":["standard", "lowercase", "stop", "czech_stem"]
+ },
+ "decompoundingAnalyzer":{
+ "tokenizer":"standard",
+ "filter":["dict_dec"]
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/analysis/test1.yml b/core/src/test/java/org/elasticsearch/index/analysis/test1.yml
new file mode 100644
index 0000000000..81ef235310
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/analysis/test1.yml
@@ -0,0 +1,62 @@
+index :
+ analysis :
+ tokenizer :
+ standard :
+ type : standard
+ char_filter :
+ my_html :
+ type : html_strip
+ escaped_tags : [xxx, yyy]
+ read_ahead : 1024
+ my_pattern :
+ type: pattern_replace
+ pattern: sample(.*)
+ replacement: replacedSample $1
+ my_mapping :
+ type : mapping
+ mappings : [ph=>f, qu=>q]
+ filter :
+ stop :
+ type : stop
+ stopwords : [test-stop]
+ stop2 :
+ type : stop
+ stopwords : [stop2-1, stop2-2]
+ my :
+ type : org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory
+ dict_dec :
+ type : dictionary_decompounder
+ word_list : [donau, dampf, schiff, spargel, creme, suppe]
+ analyzer :
+ standard :
+ alias: alias1,alias2
+ type : standard
+ stopwords : [test1, test2, test3]
+ custom1 :
+ alias : [alias4, alias5]
+ tokenizer : standard
+ filter : [stop, stop2]
+ custom2 :
+ tokenizer : standard
+ char_filter : [html_strip, my_html]
+ custom3 :
+ tokenizer : standard
+ char_filter : [my_pattern]
+ custom4 :
+ tokenizer : standard
+ filter : [my]
+ custom5 :
+ tokenizer : standard
+ char_filter : [my_mapping]
+ custom6 :
+ tokenizer : standard
+ position_offset_gap: 256
+ custom7 :
+ type : standard
+ version: 3.6
+ czechAnalyzerWithStemmer :
+ tokenizer : standard
+ filter : [standard, lowercase, stop, czech_stem]
+ decompoundingAnalyzer :
+ tokenizer : standard
+ filter : [dict_dec]
diff --git a/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java
new file mode 100644
index 0000000000..4125c34790
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.cache.bitset;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LogByteSizeMergePolicy;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class BitSetFilterCacheTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testInvalidateEntries() throws Exception {
+ IndexWriter writer = new IndexWriter(
+ new RAMDirectory(),
+ new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy())
+ );
+ Document document = new Document();
+ document.add(new StringField("field", "value", Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ document = new Document();
+ document.add(new StringField("field", "value", Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ document = new Document();
+ document.add(new StringField("field", "value", Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ IndexReader reader = DirectoryReader.open(writer, false);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ BitsetFilterCache cache = new BitsetFilterCache(new Index("test"), Settings.EMPTY);
+ BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "value"))));
+ TopDocs docs = searcher.search(new ConstantScoreQuery(filter), 1);
+ assertThat(docs.totalHits, equalTo(3));
+
+ // now cached
+ docs = searcher.search(new ConstantScoreQuery(filter), 1);
+ assertThat(docs.totalHits, equalTo(3));
+ // There are 3 segments
+ assertThat(cache.getLoadedFilters().size(), equalTo(3l));
+
+ writer.forceMerge(1);
+ reader.close();
+ reader = DirectoryReader.open(writer, false);
+ searcher = new IndexSearcher(reader);
+
+ docs = searcher.search(new ConstantScoreQuery(filter), 1);
+ assertThat(docs.totalHits, equalTo(3));
+
+ // now cached
+ docs = searcher.search(new ConstantScoreQuery(filter), 1);
+ assertThat(docs.totalHits, equalTo(3));
+ // Only one segment now, so the size must be 1
+ assertThat(cache.getLoadedFilters().size(), equalTo(1l));
+
+ reader.close();
+ writer.close();
+ // There is no reference from readers and writer to any segment in the test index, so the size in the fbs cache must be 0
+ assertThat(cache.getLoadedFilters().size(), equalTo(0l));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
new file mode 100644
index 0000000000..f5fc8c9934
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.lucene40.Lucene40Codec;
+import org.apache.lucene.codecs.lucene41.Lucene41Codec;
+import org.apache.lucene.codecs.lucene410.Lucene410Codec;
+import org.apache.lucene.codecs.lucene42.Lucene42Codec;
+import org.apache.lucene.codecs.lucene45.Lucene45Codec;
+import org.apache.lucene.codecs.lucene46.Lucene46Codec;
+import org.apache.lucene.codecs.lucene49.Lucene49Codec;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.SegmentReader;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+@SuppressCodecs("*") // we test against default codec so never get a random one here!
+public class CodecTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testResolveDefaultCodecs() throws Exception {
+ CodecService codecService = createCodecService();
+ assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
+ assertThat(codecService.codec("default"), instanceOf(Lucene50Codec.class));
+ assertThat(codecService.codec("Lucene410"), instanceOf(Lucene410Codec.class));
+ assertThat(codecService.codec("Lucene49"), instanceOf(Lucene49Codec.class));
+ assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class));
+ assertThat(codecService.codec("Lucene45"), instanceOf(Lucene45Codec.class));
+ assertThat(codecService.codec("Lucene40"), instanceOf(Lucene40Codec.class));
+ assertThat(codecService.codec("Lucene41"), instanceOf(Lucene41Codec.class));
+ assertThat(codecService.codec("Lucene42"), instanceOf(Lucene42Codec.class));
+ }
+
+ public void testDefault() throws Exception {
+ Codec codec = createCodecService().codec("default");
+ assertCompressionEquals(Mode.BEST_SPEED, codec);
+ }
+
+ public void testBestCompression() throws Exception {
+ Codec codec = createCodecService().codec("best_compression");
+ assertCompressionEquals(Mode.BEST_COMPRESSION, codec);
+ }
+
+ // write some docs with it, inspect .si to see this was the used compression
+ private void assertCompressionEquals(Mode expected, Codec actual) throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(null);
+ iwc.setCodec(actual);
+ IndexWriter iw = new IndexWriter(dir, iwc);
+ iw.addDocument(new Document());
+ iw.commit();
+ iw.close();
+ DirectoryReader ir = DirectoryReader.open(dir);
+ SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader();
+ String v = sr.getSegmentInfo().info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY);
+ assertNotNull(v);
+ assertEquals(expected, Mode.valueOf(v));
+ ir.close();
+ dir.close();
+ }
+
+ private static CodecService createCodecService() {
+ return createCodecService(Settings.Builder.EMPTY_SETTINGS);
+ }
+
+ private static CodecService createCodecService(Settings settings) {
+ IndexService indexService = createIndex("test", settings);
+ return indexService.injector().getInstance(CodecService.class);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java b/core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java
new file mode 100644
index 0000000000..444dfbd28b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/codec/postingformat/Elasticsearch090RWPostingsFormat.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingformat;
+
+import com.google.common.base.Predicates;
+import com.google.common.collect.Iterators;
+
+import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.FilterLeafReader;
+import org.apache.lucene.index.SegmentWriteState;
+import org.elasticsearch.common.util.BloomFilter;
+import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat.BloomFilteredFieldsConsumer;
+import org.elasticsearch.index.codec.postingsformat.BloomFilterPostingsFormat;
+import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+/** read-write version with blooms for testing */
+public class Elasticsearch090RWPostingsFormat extends Elasticsearch090PostingsFormat {
+ @Override
+ public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
+ final PostingsFormat delegate = getDefaultWrapped();
+ final BloomFilteredFieldsConsumer fieldsConsumer = new BloomFilterPostingsFormat(delegate, BloomFilter.Factory.DEFAULT) {
+ @Override
+ public BloomFilteredFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
+ return new BloomFilteredFieldsConsumer(delegate.fieldsConsumer(state), state,delegate);
+ }
+ }.fieldsConsumer(state);
+ return new FieldsConsumer() {
+
+ @Override
+ public void write(Fields fields) throws IOException {
+
+ Fields maskedFields = new FilterLeafReader.FilterFields(fields) {
+ @Override
+ public Iterator<String> iterator() {
+ return Iterators.filter(this.in.iterator(), Predicates.not(UID_FIELD_FILTER));
+ }
+ };
+ fieldsConsumer.getDelegate().write(maskedFields);
+ maskedFields = new FilterLeafReader.FilterFields(fields) {
+ @Override
+ public Iterator<String> iterator() {
+ return Iterators.singletonIterator(UidFieldMapper.NAME);
+ }
+ };
+ // only go through bloom for the UID field
+ fieldsConsumer.write(maskedFields);
+ }
+
+ @Override
+ public void close() throws IOException {
+ fieldsConsumer.close();
+ }
+ };
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/codec/postingformat/ElasticsearchPostingsFormatTest.java b/core/src/test/java/org/elasticsearch/index/codec/postingformat/ElasticsearchPostingsFormatTest.java
new file mode 100644
index 0000000000..58bbe5825f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/codec/postingformat/ElasticsearchPostingsFormatTest.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.codec.postingformat;
+
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.index.BasePostingsFormatTestCase;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+/** Runs elasticsearch postings format against lucene's standard postings format tests */
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@TimeoutSuite(millis = TimeUnits.HOUR)
+@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
+public class ElasticsearchPostingsFormatTest extends BasePostingsFormatTestCase {
+
+ @Override
+ protected Codec getCodec() {
+ return TestUtil.alwaysPostingsFormat(new Elasticsearch090RWPostingsFormat());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java b/core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java
new file mode 100644
index 0000000000..cf21ea84b9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotDeletionPolicyTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.lucene.index.DirectoryReader.listCommits;
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * A set of tests for {@link org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy}.
+ */
+public class SnapshotDeletionPolicyTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ private RAMDirectory dir;
+ private SnapshotDeletionPolicy deletionPolicy;
+ private IndexWriter indexWriter;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ dir = new RAMDirectory();
+ deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
+ indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
+ .setIndexDeletionPolicy(deletionPolicy)
+ .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ indexWriter.close();
+ dir.close();
+ }
+
+ private Document testDocument() {
+ Document document = new Document();
+ document.add(new TextField("test", "1", Field.Store.YES));
+ return document;
+ }
+
+ @Test
+ public void testSimpleSnapshot() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // add another document and commit, resulting again in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and then add a document and commit, now we should have two commit points
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release the commit, add a document and commit, now we should be back to one commit point
+ snapshot.close();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+
+ @Test
+ public void testMultiSnapshot() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // take two snapshots
+ SnapshotIndexCommit snapshot1 = deletionPolicy.snapshot();
+ SnapshotIndexCommit snapshot2 = deletionPolicy.snapshot();
+
+ // we should have two commits points
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release one snapshot, we should still have two commit points
+ snapshot1.close();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // release the second snapshot, we should be back to one commit
+ snapshot2.close();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+
+ @Test
+ public void testMultiReleaseException() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and release it twice, the seconds should throw an exception
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ snapshot.close();
+ snapshot.close();
+ }
+
+ @Test
+ public void testSimpleSnapshots() throws Exception {
+ // add a document and commit, resulting in one commit point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // add another document and commit, resulting again in one commint point
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+
+ // snapshot the last commit, and then add a document and commit, now we should have two commit points
+ SnapshotIndexCommit snapshot = deletionPolicy.snapshot();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(2));
+
+ // now, take a snapshot of all the commits
+ SnapshotIndexCommits snapshots = deletionPolicy.snapshots();
+ assertThat(snapshots.size(), equalTo(2));
+
+ // release the snapshot, add a document and commit
+ // we should have 3 commits points since we are holding onto the first two with snapshots
+ // and we are using the keep only last
+ snapshot.close();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(3));
+
+ // now release the snapshots, we should be back to a single commit point
+ snapshots.close();
+ indexWriter.addDocument(testDocument());
+ indexWriter.commit();
+ assertThat(listCommits(dir).size(), equalTo(1));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java b/core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java
new file mode 100644
index 0000000000..6ef7974d17
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/deletionpolicy/SnapshotIndexCommitExistsMatcher.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.deletionpolicy;
+
+import com.google.common.collect.Sets;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+
+/**
+ *
+ */
+public class SnapshotIndexCommitExistsMatcher extends TypeSafeMatcher<SnapshotIndexCommit> {
+
+ @Override
+ public boolean matchesSafely(SnapshotIndexCommit snapshotIndexCommit) {
+ try {
+ HashSet<String> files = Sets.newHashSet(snapshotIndexCommit.getDirectory().listAll());
+ for (String fileName : snapshotIndexCommit.getFiles()) {
+ if (files.contains(fileName) == false) {
+ return false;
+ }
+ }
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ return true;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("an index commit existence");
+ }
+
+ public static Matcher<SnapshotIndexCommit> snapshotIndexCommitExists() {
+ return new SnapshotIndexCommitExistsMatcher();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java b/core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java
new file mode 100644
index 0000000000..75aa4ed9b4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/CommitStatsTests.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.index.SegmentInfos;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+
+
+public class CommitStatsTests extends ElasticsearchTestCase {
+ public void testStreamingWithNullId() throws IOException {
+ SegmentInfos segmentInfos = new SegmentInfos();
+ CommitStats commitStats = new CommitStats(segmentInfos);
+ org.elasticsearch.Version targetNodeVersion = randomVersion(random());
+
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ out.setVersion(targetNodeVersion);
+ commitStats.writeTo(out);
+
+ ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
+ in.setVersion(targetNodeVersion);
+ CommitStats readCommitStats = CommitStats.readCommitStatsFrom(in);
+ assertNull(readCommitStats.getId());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java b/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java
new file mode 100644
index 0000000000..583b0f5d55
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/EngineSearcherTotalHitsMatcher.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public final class EngineSearcherTotalHitsMatcher extends TypeSafeMatcher<Engine.Searcher> {
+
+ private final Query query;
+
+ private final int totalHits;
+ private int count;
+
+ public EngineSearcherTotalHitsMatcher(Query query, int totalHits) {
+ this.query = query;
+ this.totalHits = totalHits;
+ }
+
+ @Override
+ public boolean matchesSafely(Engine.Searcher searcher) {
+ try {
+ this.count = (int) Lucene.count(searcher.searcher(), query);
+ return count == totalHits;
+ } catch (IOException e) {
+ return false;
+ }
+ }
+
+ @Override
+ protected void describeMismatchSafely(Engine.Searcher item, Description mismatchDescription) {
+ mismatchDescription.appendText("was ").appendValue(count);
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("total hits of size ").appendValue(totalHits).appendText(" with query ").appendValue(query);
+ }
+
+ public static Matcher<Engine.Searcher> engineSearcherTotalHits(Query query, int totalHits) {
+ return new EngineSearcherTotalHitsMatcher(query, totalHits);
+ }
+
+ public static Matcher<Engine.Searcher> engineSearcherTotalHits(int totalHits) {
+ return new EngineSearcherTotalHitsMatcher(Queries.newMatchAllQuery(), totalHits);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineIntegrationTest.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineIntegrationTest.java
new file mode 100644
index 0000000000..9f46af7626
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineIntegrationTest.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+public class InternalEngineIntegrationTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSetIndexCompoundOnFlush() {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("number_of_replicas", 0).put("number_of_shards", 1)).get();
+ ensureGreen();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(1, 1, "test");
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, false)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(1, 2, "test");
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, true)).get();
+ client().prepareIndex("test", "foo").setSource("field", "foo").get();
+ refresh();
+ assertTotalCompoundSegments(2, 3, "test");
+ }
+
+ private void assertTotalCompoundSegments(int i, int t, String index) {
+ IndicesSegmentResponse indicesSegmentResponse = client().admin().indices().prepareSegments(index).get();
+ assertNotNull("indices segments response should contain indices", indicesSegmentResponse.getIndices());
+ IndexSegments indexSegments = indicesSegmentResponse.getIndices().get(index);
+ assertNotNull(indexSegments);
+ assertNotNull(indexSegments.getShards());
+ Collection<IndexShardSegments> values = indexSegments.getShards().values();
+ int compounds = 0;
+ int total = 0;
+ for (IndexShardSegments indexShardSegments : values) {
+ for (ShardSegments s : indexShardSegments) {
+ for (Segment segment : s) {
+ if (segment.isSearch() && segment.getNumDocs() > 0) {
+ if (segment.isCompound()) {
+ compounds++;
+ }
+ total++;
+ }
+ }
+ }
+ }
+ assertThat(compounds, Matchers.equalTo(i));
+ assertThat(total, Matchers.equalTo(t));
+ }
+
+ private Set<Segment> segments(IndexSegments segments) {
+ Set<Segment> segmentSet = new HashSet<>();
+ for (IndexShardSegments s : segments) {
+ for (ShardSegments shardSegments : s) {
+ segmentSet.addAll(shardSegments.getSegments());
+ }
+ }
+ return segmentSet;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeTests.java
new file mode 100644
index 0000000000..b7325c350d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.engine;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.index.LogByteSizeMergePolicy;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.merge.policy.LogDocMergePolicyProvider;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+/**
+ */
+@ClusterScope(numDataNodes = 1, scope = Scope.SUITE)
+public class InternalEngineMergeTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testMergesHappening() throws InterruptedException, IOException, ExecutionException {
+ final int numOfShards = randomIntBetween(1,5);
+ // some settings to keep num segments low
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numOfShards)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(LogDocMergePolicyProvider.MIN_MERGE_DOCS_KEY, 10)
+ .put(LogDocMergePolicyProvider.MERGE_FACTORY_KEY, 5)
+ .put(LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB, 0.5)
+ .build()));
+ long id = 0;
+ final int rounds = scaledRandomIntBetween(50, 300);
+ logger.info("Starting rounds [{}] ", rounds);
+ for (int i = 0; i < rounds; ++i) {
+ final int numDocs = scaledRandomIntBetween(100, 1000);
+ BulkRequestBuilder request = client().prepareBulk();
+ for (int j = 0; j < numDocs; ++j) {
+ request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++)).source(jsonBuilder().startObject().field("l", randomLong()).endObject()));
+ }
+ BulkResponse response = request.execute().actionGet();
+ refresh();
+ assertNoFailures(response);
+ IndicesStatsResponse stats = client().admin().indices().prepareStats("test").setSegments(true).setMerge(true).get();
+ logger.info("index round [{}] - segments {}, total merges {}, current merge {}", i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
+ }
+ final long upperNumberSegments = 2 * numOfShards * 10;
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get();
+ logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
+ long current = stats.getPrimaries().getMerge().getCurrent();
+ long count = stats.getPrimaries().getSegments().getCount();
+ return count < upperNumberSegments && current == 0;
+ }
+ });
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get();
+ logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
+ long count = stats.getPrimaries().getSegments().getCount();
+ assertThat(count, Matchers.lessThanOrEqualTo(upperNumberSegments));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java
new file mode 100644
index 0000000000..856a275a5b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.index.LiveIndexWriterConfig;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest {
+
+ public void testSettingsUpdate() {
+ final IndexService service = createIndex("foo");
+ // INDEX_COMPOUND_ON_FLUSH
+ InternalEngine engine = ((InternalEngine)engine(service));
+ assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(true));
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, false).build()).get();
+ assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(false));
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, true).build()).get();
+ assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(true));
+
+
+ // VERSION MAP SIZE
+ long indexBufferSize = engine.config().getIndexingBufferSize().bytes();
+ long versionMapSize = engine.config().getVersionMapSize().bytes();
+ assertThat(versionMapSize, equalTo((long) (indexBufferSize * 0.25)));
+
+ final int iters = between(1, 20);
+ for (int i = 0; i < iters; i++) {
+ boolean compoundOnFlush = randomBoolean();
+
+ // Tricky: TimeValue.parseTimeValue casts this long to a double, which steals 11 of the 64 bits for exponent, so we can't use
+ // the full long range here else the assert below fails:
+ long gcDeletes = random().nextLong() & (Long.MAX_VALUE >> 11);
+
+ boolean versionMapAsPercent = randomBoolean();
+ double versionMapPercent = randomIntBetween(0, 100);
+ long versionMapSizeInMB = randomIntBetween(10, 20);
+ String versionMapString = versionMapAsPercent ? versionMapPercent + "%" : versionMapSizeInMB + "mb";
+
+ Settings build = Settings.builder()
+ .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush)
+ .put(EngineConfig.INDEX_GC_DELETES_SETTING, gcDeletes, TimeUnit.MILLISECONDS)
+ .put(EngineConfig.INDEX_VERSION_MAP_SIZE, versionMapString)
+ .build();
+ assertEquals(gcDeletes, build.getAsTime(EngineConfig.INDEX_GC_DELETES_SETTING, null).millis());
+
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(build).get();
+ LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig();
+ assertEquals(engine.config().isCompoundOnFlush(), compoundOnFlush);
+ assertEquals(currentIndexWriterConfig.getUseCompoundFile(), compoundOnFlush);
+
+
+ assertEquals(engine.config().getGcDeletesInMillis(), gcDeletes);
+ assertEquals(engine.getGcDeletesInMillis(), gcDeletes);
+
+ indexBufferSize = engine.config().getIndexingBufferSize().bytes();
+ versionMapSize = engine.config().getVersionMapSize().bytes();
+ if (versionMapAsPercent) {
+ assertThat(versionMapSize, equalTo((long) (indexBufferSize * (versionMapPercent / 100))));
+ } else {
+ assertThat(versionMapSize, equalTo(1024 * 1024 * versionMapSizeInMB));
+ }
+ }
+
+ Settings settings = Settings.builder()
+ .put(EngineConfig.INDEX_GC_DELETES_SETTING, 1000, TimeUnit.MILLISECONDS)
+ .build();
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
+ assertEquals(engine.getGcDeletesInMillis(), 1000);
+ assertTrue(engine.config().isEnableGcDeletes());
+
+
+ settings = Settings.builder()
+ .put(EngineConfig.INDEX_GC_DELETES_SETTING, "0ms")
+ .build();
+
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
+ assertEquals(engine.getGcDeletesInMillis(), 0);
+ assertTrue(engine.config().isEnableGcDeletes());
+
+ settings = Settings.builder()
+ .put(EngineConfig.INDEX_GC_DELETES_SETTING, 1000, TimeUnit.MILLISECONDS)
+ .build();
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
+ assertEquals(engine.getGcDeletesInMillis(), 1000);
+ assertTrue(engine.config().isEnableGcDeletes());
+
+ settings = Settings.builder()
+ .put(EngineConfig.INDEX_VERSION_MAP_SIZE, "sdfasfd")
+ .build();
+ try {
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
+ fail("settings update didn't fail, but should have");
+ } catch (IllegalArgumentException e) {
+ // good
+ }
+
+ settings = Settings.builder()
+ .put(EngineConfig.INDEX_VERSION_MAP_SIZE, "-12%")
+ .build();
+ try {
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
+ fail("settings update didn't fail, but should have");
+ } catch (IllegalArgumentException e) {
+ // good
+ }
+
+ settings = Settings.builder()
+ .put(EngineConfig.INDEX_VERSION_MAP_SIZE, "130%")
+ .build();
+ try {
+ client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
+ fail("settings update didn't fail, but should have");
+ } catch (IllegalArgumentException e) {
+ // good
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
new file mode 100644
index 0000000000..c8234e8f4b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -0,0 +1,1897 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.engine.Engine.Searcher;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.object.RootObjectMapper;
+import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.settings.IndexDynamicSettingsModule;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardUtils;
+import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
+import org.elasticsearch.index.similarity.SimilarityLookupService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.MatcherAssert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
+import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
+import static org.hamcrest.Matchers.*;
+
+public class InternalEngineTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected ThreadPool threadPool;
+
+ private Store store;
+ private Store storeReplica;
+
+ protected InternalEngine engine;
+ protected InternalEngine replicaEngine;
+
+ private Settings defaultSettings;
+ private int indexConcurrency;
+ private String codecName;
+ private Path primaryTranslogDir;
+ private Path replicaTranslogDir;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+
+ CodecService codecService = new CodecService(shardId.index());
+ indexConcurrency = randomIntBetween(1, 20);
+ String name = Codec.getDefault().getName();
+ if (Arrays.asList(codecService.availableCodecs()).contains(name)) {
+ // some codecs are read only so we only take the ones that we have in the service and randomly
+ // selected by lucene test case.
+ codecName = name;
+ } else {
+ codecName = "default";
+ }
+ defaultSettings = Settings.builder()
+ .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean())
+ .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
+ .put(EngineConfig.INDEX_CODEC_SETTING, codecName)
+ .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency)
+ .build(); // TODO randomize more settings
+ threadPool = new ThreadPool(getClass().getName());
+ store = createStore();
+ storeReplica = createStore();
+ Lucene.cleanLuceneIndex(store.directory());
+ Lucene.cleanLuceneIndex(storeReplica.directory());
+ primaryTranslogDir = createTempDir("translog-primary");
+ engine = createEngine(store, primaryTranslogDir);
+ LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig();
+
+ assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName());
+ assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName());
+ if (randomBoolean()) {
+ engine.config().setEnableGcDeletes(false);
+ }
+ replicaTranslogDir = createTempDir("translog-replica");
+ replicaEngine = createEngine(storeReplica, replicaTranslogDir);
+ currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig();
+
+ assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName());
+ assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName());
+ if (randomBoolean()) {
+ engine.config().setEnableGcDeletes(false);
+ }
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ IOUtils.close(
+ replicaEngine, storeReplica,
+ engine, store);
+ terminate(threadPool);
+ }
+
+
+ private Document testDocumentWithTextField() {
+ Document document = testDocument();
+ document.add(new TextField("value", "test", Field.Store.YES));
+ return document;
+ }
+
+ private Document testDocument() {
+ return new Document();
+ }
+
+
+ private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, Document document, BytesReference source, Mapping mappingUpdate) {
+ Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
+ Field versionField = new NumericDocValuesField("_version", 0);
+ document.add(uidField);
+ document.add(versionField);
+ return new ParsedDocument(uidField, versionField, id, type, routing, timestamp, ttl, Arrays.asList(document), source, mappingUpdate);
+ }
+
+ protected Store createStore() throws IOException {
+ return createStore(newDirectory());
+ }
+
+ protected Store createStore(final Directory directory) throws IOException {
+ final DirectoryService directoryService = new DirectoryService(shardId, EMPTY_SETTINGS) {
+ @Override
+ public Directory newDirectory() throws IOException {
+ return directory;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+ };
+ return new Store(shardId, EMPTY_SETTINGS, directoryService, new DummyShardLock(shardId));
+ }
+
+ protected Translog createTranslog() throws IOException {
+ return createTranslog(primaryTranslogDir);
+ }
+
+ protected Translog createTranslog(Path translogPath) throws IOException {
+ TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, EMPTY_SETTINGS, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
+ return new Translog(translogConfig);
+ }
+
+ protected Translog createTranslogReplica() throws IOException {
+ return createTranslog(replicaTranslogDir);
+ }
+
+ protected IndexDeletionPolicy createIndexDeletionPolicy() {
+ return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS);
+ }
+
+ protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() {
+ return new SnapshotDeletionPolicy(createIndexDeletionPolicy());
+ }
+
+ protected MergePolicyProvider<?> createMergePolicy() {
+ return new LogByteSizeMergePolicyProvider(store, new IndexSettingsService(new Index("test"), EMPTY_SETTINGS));
+ }
+
+ protected MergeSchedulerProvider createMergeScheduler(IndexSettingsService indexSettingsService) {
+ return new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, indexSettingsService);
+ }
+
+ protected InternalEngine createEngine(Store store, Path translogPath) {
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), Settings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+ return createEngine(indexSettingsService, store, translogPath, createMergeScheduler(indexSettingsService));
+ }
+
+ protected InternalEngine createEngine(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) {
+ return new InternalEngine(config(indexSettingsService, store, translogPath, mergeSchedulerProvider), false);
+ }
+
+ public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) {
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettingsService.getSettings(), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
+
+ EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService
+ , null, store, createSnapshotDeletionPolicy(), createMergePolicy(), mergeSchedulerProvider,
+ iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(shardId.index()), new Engine.FailedEngineListener() {
+ @Override
+ public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) {
+ // we don't need to notify anybody in this test
+ }
+ }, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
+
+ return config;
+ }
+
+ protected static final BytesReference B_1 = new BytesArray(new byte[]{1});
+ protected static final BytesReference B_2 = new BytesArray(new byte[]{2});
+ protected static final BytesReference B_3 = new BytesArray(new byte[]{3});
+
+ @Test
+ public void testSegments() throws Exception {
+ List<Segment> segments = engine.segments(false);
+ assertThat(segments.isEmpty(), equalTo(true));
+ assertThat(engine.segmentsStats().getCount(), equalTo(0l));
+ assertThat(engine.segmentsStats().getMemoryInBytes(), equalTo(0l));
+ final boolean defaultCompound = defaultSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, true);
+
+ // create a doc and refresh
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+ engine.refresh("test");
+
+ segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(1));
+ SegmentsStats stats = engine.segmentsStats();
+ assertThat(stats.getCount(), equalTo(1l));
+ assertThat(stats.getTermsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0l));
+ assertThat(stats.getNormsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0l));
+ assertThat(segments.get(0).isCommitted(), equalTo(false));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+ assertThat(segments.get(0).ramTree, nullValue());
+
+ engine.flush();
+
+ segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(1));
+ assertThat(engine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ engine.config().setCompoundOnFlush(false);
+
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ engine.refresh("test");
+
+ segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(2));
+ assertThat(engine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(engine.segmentsStats().getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
+ assertThat(engine.segmentsStats().getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
+ assertThat(engine.segmentsStats().getTermVectorsMemoryInBytes(), equalTo(0l));
+ assertThat(engine.segmentsStats().getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
+ assertThat(engine.segmentsStats().getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+ engine.refresh("test");
+
+ segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(2));
+ assertThat(engine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ engine.config().setCompoundOnFlush(true);
+ ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), B_3, null);
+ engine.create(new Engine.Create(null, newUid("4"), doc4));
+ engine.refresh("test");
+
+ segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(3));
+ assertThat(engine.segmentsStats().getCount(), equalTo(3l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ assertThat(segments.get(2).isCommitted(), equalTo(false));
+ assertThat(segments.get(2).isSearch(), equalTo(true));
+ assertThat(segments.get(2).getNumDocs(), equalTo(1));
+ assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(2).isCompound(), equalTo(true));
+ }
+
+ public void testVerboseSegments() throws Exception {
+ List<Segment> segments = engine.segments(true);
+ assertThat(segments.isEmpty(), equalTo(true));
+
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ engine.refresh("test");
+
+ segments = engine.segments(true);
+ assertThat(segments.size(), equalTo(1));
+ assertThat(segments.get(0).ramTree, notNullValue());
+
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null);
+ engine.create(new Engine.Create(null, newUid("2"), doc2));
+ engine.refresh("test");
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null);
+ engine.create(new Engine.Create(null, newUid("3"), doc3));
+ engine.refresh("test");
+
+ segments = engine.segments(true);
+ assertThat(segments.size(), equalTo(3));
+ assertThat(segments.get(0).ramTree, notNullValue());
+ assertThat(segments.get(1).ramTree, notNullValue());
+ assertThat(segments.get(2).ramTree, notNullValue());
+
+ }
+
+
+ @Test
+ public void testSegmentsWithMergeFlag() throws Exception {
+ ConcurrentMergeSchedulerProvider mergeSchedulerProvider = new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, new IndexSettingsService(shardId.index(), EMPTY_SETTINGS));
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), Settings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+ try (Store store = createStore();
+ Engine engine = createEngine(indexSettingsService, store, createTempDir(), mergeSchedulerProvider)) {
+
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ engine.flush();
+ assertThat(engine.segments(false).size(), equalTo(1));
+ index = new Engine.Index(null, newUid("2"), doc);
+ engine.index(index);
+ engine.flush();
+ List<Segment> segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(2));
+ for (Segment segment : segments) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+ index = new Engine.Index(null, newUid("3"), doc);
+ engine.index(index);
+ engine.flush();
+ segments = engine.segments(false);
+ assertThat(segments.size(), equalTo(3));
+ for (Segment segment : segments) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+
+ index = new Engine.Index(null, newUid("4"), doc);
+ engine.index(index);
+ engine.flush();
+ final long gen1 = store.readLastCommittedSegmentsInfo().getGeneration();
+ // now, optimize and wait for merges, see that we have no merge flag
+ engine.forceMerge(true);
+
+ for (Segment segment : engine.segments(false)) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+ // we could have multiple underlying merges, so the generation may increase more than once
+ assertTrue(store.readLastCommittedSegmentsInfo().getGeneration() > gen1);
+
+ final boolean flush = randomBoolean();
+ final long gen2 = store.readLastCommittedSegmentsInfo().getGeneration();
+ engine.forceMerge(flush);
+ for (Segment segment : engine.segments(false)) {
+ assertThat(segment.getMergeId(), nullValue());
+ }
+
+ if (flush) {
+ // we should have had just 1 merge, so last generation should be exact
+ assertEquals(gen2 + 1, store.readLastCommittedSegmentsInfo().getLastGeneration());
+ }
+ }
+ }
+
+ public void testCommitStats() {
+ Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ CommitStats stats1 = engine.commitStats();
+ assertThat(stats1.getGeneration(), greaterThan(0l));
+ assertThat(stats1.getId(), notNullValue());
+ assertThat(stats1.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
+
+ engine.flush(true, true);
+ CommitStats stats2 = engine.commitStats();
+ assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration()));
+ assertThat(stats2.getId(), notNullValue());
+ assertThat(stats2.getId(), not(equalTo(stats1.getId())));
+ assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
+ assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));
+ assertThat(stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))));
+ assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY)))
+ ;
+ }
+
+ @Test
+ public void testSimpleOperations() throws Exception {
+ Engine.Searcher searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.close();
+
+ // create a document
+ Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ // but, we can still get it (in realtime)
+ Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_1.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // but, not there non realtime
+ getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+ // refresh and it should be there
+ engine.refresh("test");
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+
+ // also in non realtime
+ getResult = engine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ document.add(new Field(SourceFieldMapper.NAME, B_2.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_2, null);
+ engine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // but, we can still get it (in realtime)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_2.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // refresh and it should be updated
+ engine.refresh("test");
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+
+ // now delete
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+
+ // its not deleted yet
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+
+ // but, get should not see it (in realtime)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // refresh and it should be deleted
+ engine.refresh("test");
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // add it back
+ document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // refresh and it should be there
+ engine.refresh("test");
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // now flush
+ engine.flush();
+
+ // and, verify get (in real time)
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // make sure we can still work with the engine
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ engine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // refresh and it should be updated
+ engine.refresh("test");
+
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+ }
+
+ @Test
+ public void testSearchResultRelease() throws Exception {
+ Engine.Searcher searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.close();
+
+ // create a document
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ // refresh and it should be there
+ engine.refresh("test");
+
+ // now its there...
+ searchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ // don't release the search result yet...
+
+ // delete, refresh and do a new search, it should not be there
+ engine.delete(new Engine.Delete("test", "1", newUid("1")));
+ engine.refresh("test");
+ Engine.Searcher updateSearchResult = engine.acquireSearcher("test");
+ MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ updateSearchResult.close();
+
+ // the non release search result should not see the deleted yet...
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+ }
+
+ public void testSyncedFlush() throws IOException {
+ final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ Engine.CommitId commitID = engine.flush();
+ assertThat(commitID, equalTo(new Engine.CommitId(store.readLastCommittedSegmentsInfo().getId())));
+ byte[] wrongBytes = Base64.decode(commitID.toString());
+ wrongBytes[0] = (byte) ~wrongBytes[0];
+ Engine.CommitId wrongId = new Engine.CommitId(wrongBytes);
+ assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId),
+ Engine.SyncedFlushResult.COMMIT_MISMATCH);
+ engine.create(new Engine.Create(null, newUid("2"), doc));
+ assertEquals("should fail to sync flush with right id but pending doc", engine.syncFlush(syncId + "2", commitID),
+ Engine.SyncedFlushResult.PENDING_OPERATIONS);
+ commitID = engine.flush();
+ assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID),
+ Engine.SyncedFlushResult.SUCCESS);
+ assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ }
+
+ public void testSycnedFlushSurvivesEngineRestart() throws IOException {
+ final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ final Engine.CommitId commitID = engine.flush();
+ assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID),
+ Engine.SyncedFlushResult.SUCCESS);
+ assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ EngineConfig config = engine.config();
+ if (randomBoolean()) {
+ engine.close();
+ } else {
+ engine.flushAndClose();
+ }
+ engine = new InternalEngine(config, randomBoolean());
+ assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ }
+
+ public void testSycnedFlushVanishesOnReplay() throws IOException {
+ final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ final Engine.CommitId commitID = engine.flush();
+ assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID),
+ Engine.SyncedFlushResult.SUCCESS);
+ assertEquals(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId);
+ doc = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), new BytesArray("{}"), null);
+ engine.create(new Engine.Create(null, newUid("2"), doc));
+ EngineConfig config = engine.config();
+ engine.close();
+ final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
+ if (directory != null) {
+ // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
+ // this so we have to disable the check explicitly
+ directory.setPreventDoubleWrite(false);
+ }
+ engine = new InternalEngine(config, false);
+ assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+
+ @Test
+ public void testVersioningNewCreate() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ create = new Engine.Create(null, newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.create(create);
+ assertThat(create.version(), equalTo(1l));
+ }
+
+ @Test
+ public void testExternalVersioningNewCreate() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc, 12, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, 0);
+ engine.create(create);
+ assertThat(create.version(), equalTo(12l));
+
+ create = new Engine.Create(null, newUid("1"), doc, create.version(), create.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.create(create);
+ assertThat(create.version(), equalTo(12l));
+ }
+
+ @Test
+ public void testVersioningNewIndex() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1l));
+ }
+
+ @Test
+ public void testExternalVersioningNewIndex() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(12l));
+ }
+
+ @Test
+ public void testVersioningIndexConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ index = new Engine.Index(null, newUid("1"), doc, 1l, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ index = new Engine.Index(null, newUid("1"), doc, 3l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExternalVersioningIndexConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0);
+ engine.index(index);
+ assertThat(index.version(), equalTo(14l));
+
+ index = new Engine.Index(null, newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningIndexConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ engine.flush();
+
+ index = new Engine.Index(null, newUid("1"), doc, 1l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ index = new Engine.Index(null, newUid("1"), doc, 3l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExternalVersioningIndexConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc, 12, VersionType.EXTERNAL, PRIMARY, 0);
+ engine.index(index);
+ assertThat(index.version(), equalTo(12l));
+
+ index = new Engine.Index(null, newUid("1"), doc, 14, VersionType.EXTERNAL, PRIMARY, 0);
+ engine.index(index);
+ assertThat(index.version(), equalTo(14l));
+
+ engine.flush();
+
+ index = new Engine.Index(null, newUid("1"), doc, 13, VersionType.EXTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ public void testForceMerge() {
+ int numDocs = randomIntBetween(10, 100);
+ for (int i = 0; i < numDocs; i++) {
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid(Integer.toString(i)), doc);
+ engine.index(index);
+ engine.refresh("test");
+ }
+ try (Engine.Searcher test = engine.acquireSearcher("test")) {
+ assertEquals(numDocs, test.reader().numDocs());
+ }
+ engine.forceMerge(true, 1, false, false, false);
+ assertEquals(engine.segments(true).size(), 1);
+
+ ParsedDocument doc = testParsedDocument(Integer.toString(0), Integer.toString(0), "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid(Integer.toString(0)), doc);
+ engine.delete(new Engine.Delete(index.type(), index.id(), index.uid()));
+ engine.forceMerge(true, 10, true, false, false); //expunge deletes
+
+ assertEquals(engine.segments(true).size(), 1);
+ try (Engine.Searcher test = engine.acquireSearcher("test")) {
+ assertEquals(numDocs - 1, test.reader().numDocs());
+ assertEquals(numDocs - 1, test.reader().maxDoc());
+ }
+
+ doc = testParsedDocument(Integer.toString(1), Integer.toString(1), "test", null, -1, -1, testDocument(), B_1, null);
+ index = new Engine.Index(null, newUid(Integer.toString(1)), doc);
+ engine.delete(new Engine.Delete(index.type(), index.id(), index.uid()));
+ engine.forceMerge(true, 10, false, false, false); //expunge deletes
+
+ assertEquals(engine.segments(true).size(), 1);
+ try (Engine.Searcher test = engine.acquireSearcher("test")) {
+ assertEquals(numDocs - 2, test.reader().numDocs());
+ assertEquals(numDocs - 1, test.reader().maxDoc());
+ }
+ }
+
+ public void testForceMergeAndClose() throws IOException, InterruptedException {
+ int numIters = randomIntBetween(2, 10);
+ for (int j = 0; j < numIters; j++) {
+ try (Store store = createStore()) {
+ final InternalEngine engine = createEngine(store, createTempDir());
+ final CountDownLatch startGun = new CountDownLatch(1);
+ final CountDownLatch indexed = new CountDownLatch(1);
+
+ Thread thread = new Thread() {
+ public void run() {
+ try {
+ try {
+ startGun.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ int i = 0;
+ while (true) {
+ int numDocs = randomIntBetween(1, 20);
+ for (int j = 0; j < numDocs; j++) {
+ i++;
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid(Integer.toString(i)), doc);
+ engine.index(index);
+ }
+ engine.refresh("test");
+ indexed.countDown();
+ try {
+ engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), randomBoolean());
+ } catch (ForceMergeFailedEngineException ex) {
+ // ok
+ return;
+ }
+ }
+ } catch (AlreadyClosedException | EngineClosedException ex) {
+ // fine
+ }
+ }
+ };
+
+ thread.start();
+ startGun.countDown();
+ int someIters = randomIntBetween(1, 10);
+ for (int i = 0; i < someIters; i++) {
+ engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), randomBoolean());
+ }
+ indexed.await();
+ IOUtils.close(engine);
+ }
+ }
+
+ }
+
+ @Test
+ public void testVersioningDeleteConflict() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), 1l, VersionType.INTERNAL, PRIMARY, 0, false);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ delete = new Engine.Delete("test", "1", newUid("1"), 3l, VersionType.INTERNAL, PRIMARY, 0, false);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // now actually delete
+ delete = new Engine.Delete("test", "1", newUid("1"), 2l, VersionType.INTERNAL, PRIMARY, 0, false);
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // now check if we can index to a delete doc with version
+ index = new Engine.Index(null, newUid("1"), doc, 2l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // we shouldn't be able to create as well
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc, 2l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.create(create);
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningDeleteConflictWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ engine.flush();
+
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"), 1l, VersionType.INTERNAL, PRIMARY, 0, false);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // future versions should not work as well
+ delete = new Engine.Delete("test", "1", newUid("1"), 3l, VersionType.INTERNAL, PRIMARY, 0, false);
+ try {
+ engine.delete(delete);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ engine.flush();
+
+ // now actually delete
+ delete = new Engine.Delete("test", "1", newUid("1"), 2l, VersionType.INTERNAL, PRIMARY, 0, false);
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ engine.flush();
+
+ // now check if we can index to a delete doc with version
+ index = new Engine.Index(null, newUid("1"), doc, 2l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // we shouldn't be able to create as well
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc, 2l, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.create(create);
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningCreateExistsException() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ create = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.create(create);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningCreateExistsExceptionWithFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Create create = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0);
+ engine.create(create);
+ assertThat(create.version(), equalTo(1l));
+
+ engine.flush();
+
+ create = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, 0);
+ try {
+ engine.create(create);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningReplicaConflict1() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // apply the second index to the replica, should work fine
+ index = new Engine.Index(null, newUid("1"), doc, index.version(), VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // now, the old one should not work
+ index = new Engine.Index(null, newUid("1"), doc, 1l, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ try {
+ replicaEngine.index(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // second version on replica should fail as well
+ try {
+ index = new Engine.Index(null, newUid("1"), doc, 2l
+ , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(2l));
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testVersioningReplicaConflict2() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ // apply the first index to the replica, should work fine
+ index = new Engine.Index(null, newUid("1"), doc, 1l
+ , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.index(index);
+ assertThat(index.version(), equalTo(1l));
+
+ // index it again
+ index = new Engine.Index(null, newUid("1"), doc);
+ engine.index(index);
+ assertThat(index.version(), equalTo(2l));
+
+ // now delete it
+ Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"));
+ engine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // apply the delete on the replica (skipping the second index)
+ delete = new Engine.Delete("test", "1", newUid("1"), 3l
+ , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, false);
+ replicaEngine.delete(delete);
+ assertThat(delete.version(), equalTo(3l));
+
+ // second time delete with same version should fail
+ try {
+ delete = new Engine.Delete("test", "1", newUid("1"), 3l
+ , VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0, false);
+ replicaEngine.delete(delete);
+ fail("excepted VersionConflictEngineException to be thrown");
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+
+ // now do the second index on the replica, it should fail
+ try {
+ index = new Engine.Index(null, newUid("1"), doc, 2l, VersionType.INTERNAL.versionTypeForReplicationAndRecovery(), REPLICA, 0);
+ replicaEngine.index(index);
+ fail("excepted VersionConflictEngineException to be thrown");
+ } catch (VersionConflictEngineException e) {
+ // all is well
+ }
+ }
+
+
+ @Test
+ public void testBasicCreatedFlag() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ assertTrue(engine.index(index));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ assertFalse(engine.index(index));
+
+ engine.delete(new Engine.Delete(null, "1", newUid("1")));
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ assertTrue(engine.index(index));
+ }
+
+ @Test
+ public void testCreatedFlagAfterFlush() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ Engine.Index index = new Engine.Index(null, newUid("1"), doc);
+ assertTrue(engine.index(index));
+
+ engine.delete(new Engine.Delete(null, "1", newUid("1")));
+
+ engine.flush();
+
+ index = new Engine.Index(null, newUid("1"), doc);
+ assertTrue(engine.index(index));
+ }
+
+ private static class MockAppender extends AppenderSkeleton {
+ public boolean sawIndexWriterMessage;
+
+ public boolean sawIndexWriterIFDMessage;
+
+ @Override
+ protected void append(LoggingEvent event) {
+ if (event.getLevel() == Level.TRACE && event.getMessage().toString().contains("[index][1] ")) {
+ if (event.getLoggerName().endsWith("lucene.iw") &&
+ event.getMessage().toString().contains("IW: apply all deletes during flush")) {
+ sawIndexWriterMessage = true;
+ }
+ if (event.getLoggerName().endsWith("lucene.iw.ifd")) {
+ sawIndexWriterIFDMessage = true;
+ }
+ }
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ }
+ }
+
+ // #5891: make sure IndexWriter's infoStream output is
+ // sent to lucene.iw with log level TRACE:
+
+ @Test
+ public void testIndexWriterInfoStream() {
+ assumeFalse("who tests the tester?", VERBOSE);
+ MockAppender mockAppender = new MockAppender();
+
+ Logger rootLogger = Logger.getRootLogger();
+ Level savedLevel = rootLogger.getLevel();
+ rootLogger.addAppender(mockAppender);
+ rootLogger.setLevel(Level.DEBUG);
+
+ try {
+ // First, with DEBUG, which should NOT log IndexWriter output:
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ engine.flush();
+ assertFalse(mockAppender.sawIndexWriterMessage);
+
+ // Again, with TRACE, which should log IndexWriter output:
+ rootLogger.setLevel(Level.TRACE);
+ engine.create(new Engine.Create(null, newUid("2"), doc));
+ engine.flush();
+ assertTrue(mockAppender.sawIndexWriterMessage);
+
+ } finally {
+ rootLogger.removeAppender(mockAppender);
+ rootLogger.setLevel(savedLevel);
+ }
+ }
+
+ // #8603: make sure we can separately log IFD's messages
+ public void testIndexWriterIFDInfoStream() {
+ assumeFalse("who tests the tester?", VERBOSE);
+ MockAppender mockAppender = new MockAppender();
+
+ // Works when running this test inside Intellij:
+ Logger iwIFDLogger = LogManager.exists("org.elasticsearch.index.engine.lucene.iw.ifd");
+ if (iwIFDLogger == null) {
+ // Works when running this test from command line:
+ iwIFDLogger = LogManager.exists("index.engine.lucene.iw.ifd");
+ assertNotNull(iwIFDLogger);
+ }
+
+ iwIFDLogger.addAppender(mockAppender);
+ iwIFDLogger.setLevel(Level.DEBUG);
+
+ try {
+ // First, with DEBUG, which should NOT log IndexWriter output:
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ engine.create(new Engine.Create(null, newUid("1"), doc));
+ engine.flush();
+ assertFalse(mockAppender.sawIndexWriterMessage);
+ assertFalse(mockAppender.sawIndexWriterIFDMessage);
+
+ // Again, with TRACE, which should only log IndexWriter IFD output:
+ iwIFDLogger.setLevel(Level.TRACE);
+ engine.create(new Engine.Create(null, newUid("2"), doc));
+ engine.flush();
+ assertFalse(mockAppender.sawIndexWriterMessage);
+ assertTrue(mockAppender.sawIndexWriterIFDMessage);
+
+ } finally {
+ iwIFDLogger.removeAppender(mockAppender);
+ iwIFDLogger.setLevel(null);
+ }
+ }
+
+ @Slow
+ @Test
+ public void testEnableGcDeletes() throws Exception {
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), Settings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+ try (Store store = createStore();
+ Engine engine = new InternalEngine(config(indexSettingsService, store, createTempDir(), createMergeScheduler(indexSettingsService)), false)) {
+ engine.config().setEnableGcDeletes(false);
+
+ // Add document
+ Document document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_2, null);
+ engine.index(new Engine.Index(null, newUid("1"), doc, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false));
+
+ // Delete document we just added:
+ engine.delete(new Engine.Delete("test", "1", newUid("1"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false));
+
+ // Get should not find the document
+ Engine.GetResult getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+
+ // Give the gc pruning logic a chance to kick in
+ Thread.sleep(1000);
+
+ if (randomBoolean()) {
+ engine.refresh("test");
+ }
+
+ // Delete non-existent document
+ engine.delete(new Engine.Delete("test", "2", newUid("2"), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false));
+
+ // Get should not find the document (we never indexed uid=2):
+ getResult = engine.get(new Engine.Get(true, newUid("2")));
+ assertThat(getResult.exists(), equalTo(false));
+
+ // Try to index uid=1 with a too-old version, should fail:
+ try {
+ engine.index(new Engine.Index(null, newUid("1"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ fail("did not hit expected exception");
+ } catch (VersionConflictEngineException vcee) {
+ // expected
+ }
+
+ // Get should still not find the document
+ getResult = engine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+
+ // Try to index uid=2 with a too-old version, should fail:
+ try {
+ engine.index(new Engine.Index(null, newUid("2"), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ fail("did not hit expected exception");
+ } catch (VersionConflictEngineException vcee) {
+ // expected
+ }
+
+ // Get should not find the document
+ getResult = engine.get(new Engine.Get(true, newUid("2")));
+ assertThat(getResult.exists(), equalTo(false));
+ }
+ }
+
+ protected Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+
+ @Test
+ public void testExtractShardId() {
+ try (Engine.Searcher test = this.engine.acquireSearcher("test")) {
+ ShardId shardId = ShardUtils.extractShardId(test.reader());
+ assertNotNull(shardId);
+ assertEquals(shardId, engine.config().getShardId());
+ }
+ }
+
+ /**
+ * Random test that throws random exception and ensures all references are
+ * counted down / released and resources are closed.
+ */
+ @Test
+ public void testFailStart() throws IOException {
+ // this test fails if any reader, searcher or directory is not closed - MDW FTW
+ final int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ MockDirectoryWrapper wrapper = newMockDirectory();
+ wrapper.setFailOnOpenInput(randomBoolean());
+ wrapper.setAllowRandomFileNotFoundException(randomBoolean());
+ wrapper.setRandomIOExceptionRate(randomDouble());
+ wrapper.setRandomIOExceptionRateOnOpen(randomDouble());
+ final Path translogPath = createTempDir("testFailStart");
+ try (Store store = createStore(wrapper)) {
+ int refCount = store.refCount();
+ assertTrue("refCount: " + store.refCount(), store.refCount() > 0);
+ InternalEngine holder;
+ try {
+ holder = createEngine(store, translogPath);
+ } catch (EngineCreationFailureException ex) {
+ assertEquals(store.refCount(), refCount);
+ continue;
+ }
+ assertEquals(store.refCount(), refCount + 1);
+ final int numStarts = scaledRandomIntBetween(1, 5);
+ for (int j = 0; j < numStarts; j++) {
+ try {
+ assertEquals(store.refCount(), refCount + 1);
+ holder.close();
+ holder = createEngine(store, translogPath);
+ assertEquals(store.refCount(), refCount + 1);
+ } catch (EngineCreationFailureException ex) {
+ // all is fine
+ assertEquals(store.refCount(), refCount);
+ break;
+ }
+ }
+ holder.close();
+ assertEquals(store.refCount(), refCount);
+ }
+ }
+ }
+
+ @Test
+ public void testSettings() {
+ CodecService codecService = new CodecService(shardId.index());
+ LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig();
+
+ assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName());
+ assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName());
+ assertEquals(engine.config().getIndexConcurrency(), indexConcurrency);
+ assertEquals(currentIndexWriterConfig.getMaxThreadStates(), indexConcurrency);
+
+
+ IndexDynamicSettingsModule settings = new IndexDynamicSettingsModule();
+ assertTrue(settings.containsSetting(EngineConfig.INDEX_COMPOUND_ON_FLUSH));
+ assertTrue(settings.containsSetting(EngineConfig.INDEX_GC_DELETES_SETTING));
+ }
+
+ @Test
+ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException {
+
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ boolean canHaveDuplicates = false;
+ boolean autoGeneratedId = true;
+
+ Engine.Create index = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(index);
+ assertThat(index.version(), equalTo(1l));
+
+ index = new Engine.Create(null, newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ replicaEngine.create(index);
+ assertThat(index.version(), equalTo(1l));
+
+ canHaveDuplicates = true;
+ index = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(index);
+ assertThat(index.version(), equalTo(1l));
+ engine.refresh("test");
+ Engine.Searcher searcher = engine.acquireSearcher("test");
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ index = new Engine.Create(null, newUid("1"), doc, index.version(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ try {
+ replicaEngine.create(index);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // we ignore version conflicts on replicas, see TransportReplicationAction.ignoreReplicaException
+ }
+ replicaEngine.refresh("test");
+ Engine.Searcher replicaSearcher = replicaEngine.acquireSearcher("test");
+ topDocs = replicaSearcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertThat(topDocs.totalHits, equalTo(1));
+ searcher.close();
+ replicaSearcher.close();
+ }
+
+ @Test
+ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException {
+
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
+ boolean canHaveDuplicates = true;
+ boolean autoGeneratedId = true;
+
+ Engine.Create firstIndexRequest = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1l));
+
+ Engine.Create firstIndexRequestReplica = new Engine.Create(null, newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ replicaEngine.create(firstIndexRequestReplica);
+ assertThat(firstIndexRequestReplica.version(), equalTo(1l));
+
+ canHaveDuplicates = false;
+ Engine.Create secondIndexRequest = new Engine.Create(null, newUid("1"), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ try {
+ engine.create(secondIndexRequest);
+ fail();
+ } catch (DocumentAlreadyExistsException e) {
+ // we can ignore the exception. In case this happens because the retry request arrived first then this error will not be sent back anyway.
+ // in any other case this is an actual error
+ }
+ engine.refresh("test");
+ Engine.Searcher searcher = engine.acquireSearcher("test");
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertThat(topDocs.totalHits, equalTo(1));
+
+ Engine.Create secondIndexRequestReplica = new Engine.Create(null, newUid("1"), doc, firstIndexRequest.version(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ try {
+ replicaEngine.create(secondIndexRequestReplica);
+ fail();
+ } catch (VersionConflictEngineException e) {
+ // we ignore version conflicts on replicas, see TransportReplicationAction.ignoreReplicaException.
+ }
+ replicaEngine.refresh("test");
+ Engine.Searcher replicaSearcher = replicaEngine.acquireSearcher("test");
+ topDocs = replicaSearcher.searcher().search(new MatchAllDocsQuery(), 10);
+ assertThat(topDocs.totalHits, equalTo(1));
+ searcher.close();
+ replicaSearcher.close();
+ }
+
+ // #10312
+ @Test
+ public void testDeletesAloneCanTriggerRefresh() throws Exception {
+ // Tiny indexing buffer:
+ Settings indexSettings = Settings.builder().put(defaultSettings)
+ .put(EngineConfig.INDEX_BUFFER_SIZE_SETTING, "1kb").build();
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), indexSettings);
+ try (Store store = createStore();
+ Engine engine = new InternalEngine(config(indexSettingsService, store, createTempDir(), createMergeScheduler(indexSettingsService)),
+ false)) {
+ for (int i = 0; i < 100; i++) {
+ String id = Integer.toString(i);
+ ParsedDocument doc = testParsedDocument(id, id, "test", null, -1, -1, testDocument(), B_1, null);
+ engine.index(new Engine.Index(null, newUid(id), doc, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()));
+ }
+
+ // Force merge so we know all merges are done before we start deleting:
+ engine.forceMerge(true, 1, false, false, false);
+
+ Searcher s = engine.acquireSearcher("test");
+ final long version1 = ((DirectoryReader) s.reader()).getVersion();
+ s.close();
+ for (int i = 0; i < 100; i++) {
+ String id = Integer.toString(i);
+ engine.delete(new Engine.Delete("test", id, newUid(id), 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), false));
+ }
+
+ // We must assertBusy because refresh due to version map being full is done in background (REFRESH) thread pool:
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ Searcher s2 = engine.acquireSearcher("test");
+ long version2 = ((DirectoryReader) s2.reader()).getVersion();
+ s2.close();
+
+ // 100 buffered deletes will easily exceed 25% of our 1 KB indexing buffer so it should have forced a refresh:
+ assertThat(version2, greaterThan(version1));
+ }
+ });
+ }
+ }
+
+ public void testMissingTranslog() throws IOException {
+ // test that we can force start the engine , even if the translog is missing.
+ engine.close();
+ // fake a new translog, causing the engine to point to a missing one.
+ Translog translog = createTranslog();
+ long id = translog.currentFileGeneration();
+ translog.close();
+ IOUtils.rm(translog.location().resolve(Translog.getFilename(id)));
+ try {
+ engine = createEngine(store, primaryTranslogDir);
+ fail("engine shouldn't start without a valid translog id");
+ } catch (EngineCreationFailureException ex) {
+ // expected
+ }
+ // now it should be OK.
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), Settings.builder().put(defaultSettings)
+ .put(EngineConfig.INDEX_FORCE_NEW_TRANSLOG, true).build());
+ engine = createEngine(indexSettingsService, store, primaryTranslogDir, createMergeScheduler(indexSettingsService));
+ }
+
+ public void testTranslogReplayWithFailure() throws IOException {
+ boolean canHaveDuplicates = true;
+ boolean autoGeneratedId = true;
+ final int numDocs = randomIntBetween(1, 10);
+ for (int i = 0; i < numDocs; i++) {
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
+ Engine.Create firstIndexRequest = new Engine.Create(null, newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1l));
+ }
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ engine.close();
+ boolean recoveredButFailed = false;
+ final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
+ if (directory != null) {
+ // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
+ // this so we have to disable the check explicitly
+ directory.setPreventDoubleWrite(false);
+ boolean started = false;
+ final int numIters = randomIntBetween(10, 20);
+ for (int i = 0; i < numIters; i++) {
+ directory.setRandomIOExceptionRateOnOpen(randomDouble());
+ directory.setRandomIOExceptionRate(randomDouble());
+ directory.setFailOnOpenInput(randomBoolean());
+ directory.setAllowRandomFileNotFoundException(randomBoolean());
+ try {
+ engine = createEngine(store, primaryTranslogDir);
+ started = true;
+ break;
+ } catch (EngineCreationFailureException ex) {
+ }
+ }
+
+ directory.setRandomIOExceptionRateOnOpen(0.0);
+ directory.setRandomIOExceptionRate(0.0);
+ directory.setFailOnOpenInput(false);
+ directory.setAllowRandomFileNotFoundException(false);
+ if (started == false) {
+ engine = createEngine(store, primaryTranslogDir);
+ }
+ } else {
+ // no mock directory, no fun.
+ engine = createEngine(store, primaryTranslogDir);
+ }
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ }
+
+ @Test
+ public void testSkipTranslogReplay() throws IOException {
+ boolean canHaveDuplicates = true;
+ boolean autoGeneratedId = true;
+ final int numDocs = randomIntBetween(1, 10);
+ for (int i = 0; i < numDocs; i++) {
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
+ Engine.Create firstIndexRequest = new Engine.Create(null, newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1l));
+ }
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
+ if (directory != null) {
+ // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
+ // this so we have to disable the check explicitly
+ directory.setPreventDoubleWrite(false);
+ }
+ engine.close();
+ engine = new InternalEngine(engine.config(), true);
+
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(0));
+ }
+
+ }
+
+ private Mapping dynamicUpdate() {
+ BuilderContext context = new BuilderContext(Settings.EMPTY, new ContentPath());
+ final RootObjectMapper root = MapperBuilders.rootObject("some_type").build(context);
+ return new Mapping(Version.CURRENT, root, new RootMapper[0], new Mapping.SourceTransform[0], ImmutableMap.<String, Object>of());
+ }
+
+ public void testTranslogReplay() throws IOException {
+ boolean canHaveDuplicates = true;
+ boolean autoGeneratedId = true;
+ final int numDocs = randomIntBetween(1, 10);
+ for (int i = 0; i < numDocs; i++) {
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
+ Engine.Create firstIndexRequest = new Engine.Create(null, newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1l));
+ }
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
+ if (directory != null) {
+ // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
+ // this so we have to disable the check explicitly
+ directory.setPreventDoubleWrite(false);
+ }
+
+ TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
+ parser.mappingUpdate = dynamicUpdate();
+
+ engine.close();
+ engine = new InternalEngine(engine.config(), false); // we need to reuse the engine config unless the parser.mappingModified won't work
+
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
+ assertEquals(numDocs, parser.recoveredOps.get());
+ if (parser.mappingUpdate != null) {
+ assertEquals(1, parser.getRecoveredTypes().size());
+ assertTrue(parser.getRecoveredTypes().containsKey("test"));
+ } else {
+ assertEquals(0, parser.getRecoveredTypes().size());
+ }
+
+ engine.close();
+ engine = createEngine(store, primaryTranslogDir);
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
+ assertEquals(0, parser.recoveredOps.get());
+
+ final boolean flush = randomBoolean();
+ int randomId = randomIntBetween(numDocs + 1, numDocs + 10);
+ String uuidValue = "test#" + Integer.toString(randomId);
+ ParsedDocument doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
+ Engine.Create firstIndexRequest = new Engine.Create(null, newUid(uuidValue), doc, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1l));
+ if (flush) {
+ engine.flush();
+ }
+
+ doc = testParsedDocument(uuidValue, Integer.toString(randomId), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
+ Engine.Index idxRequest = new Engine.Index(null, newUid(uuidValue), doc, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime());
+ engine.index(idxRequest);
+ engine.refresh("test");
+ assertThat(idxRequest.version(), equalTo(2l));
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1);
+ assertThat(topDocs.totalHits, equalTo(numDocs + 1));
+ }
+
+ engine.close();
+ engine = createEngine(store, primaryTranslogDir);
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs + 1);
+ assertThat(topDocs.totalHits, equalTo(numDocs + 1));
+ }
+ parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer();
+ assertEquals(flush ? 1 : 2, parser.recoveredOps.get());
+ engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(uuidValue)));
+ if (randomBoolean()) {
+ engine.refresh("test");
+ } else {
+ engine.close();
+ engine = createEngine(store, primaryTranslogDir);
+ }
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), numDocs);
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ }
+
+ public static class TranslogHandler extends TranslogRecoveryPerformer {
+
+ private final DocumentMapper docMapper;
+ public Mapping mappingUpdate = null;
+
+ public final AtomicInteger recoveredOps = new AtomicInteger(0);
+
+ public TranslogHandler(String indexName) {
+ super(null, new MapperAnalyzer(null), null, null, null);
+ Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ RootObjectMapper.Builder rootBuilder = new RootObjectMapper.Builder("test");
+ Index index = new Index(indexName);
+ AnalysisService analysisService = new AnalysisService(index, settings);
+ SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings);
+ MapperService mapperService = new MapperService(index, settings, analysisService, null, similarityLookupService, null);
+ DocumentMapper.Builder b = new DocumentMapper.Builder(indexName, settings, rootBuilder);
+ DocumentMapperParser parser = new DocumentMapperParser(index, settings, mapperService, analysisService, similarityLookupService, null);
+ this.docMapper = b.build(mapperService, parser);
+
+ }
+
+ @Override
+ protected Tuple<DocumentMapper, Mapping> docMapper(String type) {
+ return new Tuple<>(docMapper, mappingUpdate);
+ }
+
+ @Override
+ protected void operationProcessed() {
+ recoveredOps.incrementAndGet();
+ }
+ }
+
+ public void testRecoverFromForeignTranslog() throws IOException {
+ boolean canHaveDuplicates = true;
+ boolean autoGeneratedId = true;
+ final int numDocs = randomIntBetween(1, 10);
+ for (int i = 0; i < numDocs; i++) {
+ ParsedDocument doc = testParsedDocument(Integer.toString(i), Integer.toString(i), "test", null, -1, -1, testDocument(), new BytesArray("{}"), null);
+ Engine.Create firstIndexRequest = new Engine.Create(null, newUid(Integer.toString(i)), doc, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), canHaveDuplicates, autoGeneratedId);
+ engine.create(firstIndexRequest);
+ assertThat(firstIndexRequest.version(), equalTo(1l));
+ }
+ engine.refresh("test");
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
+ if (directory != null) {
+ // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents
+ // this so we have to disable the check explicitly
+ directory.setPreventDoubleWrite(false);
+ }
+ Translog.TranslogGeneration generation = engine.getTranslog().getGeneration();
+ engine.close();
+
+ Translog translog = new Translog(new TranslogConfig(shardId, createTempDir(), Settings.EMPTY, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool));
+ translog.add(new Translog.Create("test", "SomeBogusId", "{}".getBytes(Charset.forName("UTF-8"))));
+ assertEquals(generation.translogFileGeneration, translog.currentFileGeneration());
+ translog.close();
+
+ EngineConfig config = engine.config();
+ /* create a TranslogConfig that has been created with a different UUID */
+ TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
+
+ EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettingsService()
+ , null, store, createSnapshotDeletionPolicy(), createMergePolicy(), config.getMergeScheduler(),
+ config.getAnalyzer(), config.getSimilarity(), new CodecService(shardId.index()), config.getFailedEngineListener()
+ , config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
+
+ try {
+ new InternalEngine(brokenConfig, false);
+ fail("translog belongs to a different engine");
+ } catch (EngineCreationFailureException ex) {
+ }
+
+ engine = createEngine(store, primaryTranslogDir); // and recover again!
+ try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
+ TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10));
+ assertThat(topDocs.totalHits, equalTo(numDocs));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java
new file mode 100644
index 0000000000..29783391a1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java
@@ -0,0 +1,978 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.engine;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.IndexDeletionPolicy;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LiveIndexWriterConfig;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.indexing.ShardIndexingService;
+import org.elasticsearch.index.indexing.slowlog.ShardSlowLogIndexingService;
+import org.elasticsearch.index.mapper.Mapping;
+import org.elasticsearch.index.mapper.ParseContext;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider;
+import org.elasticsearch.index.merge.policy.MergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerProvider;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardUtils;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.DirectoryUtils;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.hamcrest.MatcherAssert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * TODO: document me!
+ */
+public class ShadowEngineTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected ThreadPool threadPool;
+
+ private Store store;
+ private Store storeReplica;
+
+
+ protected Engine primaryEngine;
+ protected Engine replicaEngine;
+
+ private Settings defaultSettings;
+ private int indexConcurrency;
+ private String codecName;
+ private Path dirPath;
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ CodecService codecService = new CodecService(shardId.index());
+ indexConcurrency = randomIntBetween(1, 20);
+ String name = Codec.getDefault().getName();
+ if (Arrays.asList(codecService.availableCodecs()).contains(name)) {
+ // some codecs are read only so we only take the ones that we have in the service and randomly
+ // selected by lucene test case.
+ codecName = name;
+ } else {
+ codecName = "default";
+ }
+ defaultSettings = Settings.builder()
+ .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean())
+ .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
+ .put(EngineConfig.INDEX_CODEC_SETTING, codecName)
+ .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency)
+ .build(); // TODO randomize more settings
+ threadPool = new ThreadPool(getClass().getName());
+ dirPath = createTempDir();
+ store = createStore(dirPath);
+ storeReplica = createStore(dirPath);
+ Lucene.cleanLuceneIndex(store.directory());
+ Lucene.cleanLuceneIndex(storeReplica.directory());
+ primaryEngine = createInternalEngine(store, createTempDir("translog-primary"));
+ LiveIndexWriterConfig currentIndexWriterConfig = ((InternalEngine)primaryEngine).getCurrentIndexWriterConfig();
+
+ assertEquals(primaryEngine.config().getCodec().getName(), codecService.codec(codecName).getName());
+ assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName());
+ if (randomBoolean()) {
+ primaryEngine.config().setEnableGcDeletes(false);
+ }
+
+ replicaEngine = createShadowEngine(storeReplica);
+
+ assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName());
+ if (randomBoolean()) {
+ replicaEngine.config().setEnableGcDeletes(false);
+ }
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ replicaEngine.close();
+ storeReplica.close();
+ primaryEngine.close();
+ store.close();
+ terminate(threadPool);
+ }
+
+ private ParseContext.Document testDocumentWithTextField() {
+ ParseContext.Document document = testDocument();
+ document.add(new TextField("value", "test", Field.Store.YES));
+ return document;
+ }
+
+ private ParseContext.Document testDocument() {
+ return new ParseContext.Document();
+ }
+
+
+ private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) {
+ Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
+ Field versionField = new NumericDocValuesField("_version", 0);
+ document.add(uidField);
+ document.add(versionField);
+ return new ParsedDocument(uidField, versionField, id, type, routing, timestamp, ttl, Arrays.asList(document), source, mappingsUpdate);
+ }
+
+ protected Store createStore(Path p) throws IOException {
+ return createStore(newMockFSDirectory(p));
+ }
+
+ protected Store createStore(final Directory directory) throws IOException {
+ final DirectoryService directoryService = new DirectoryService(shardId, EMPTY_SETTINGS) {
+ @Override
+ public Directory newDirectory() throws IOException {
+ return directory;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+ };
+ return new Store(shardId, EMPTY_SETTINGS, directoryService, new DummyShardLock(shardId));
+ }
+
+ protected IndexDeletionPolicy createIndexDeletionPolicy() {
+ return new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS);
+ }
+
+ protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() {
+ return new SnapshotDeletionPolicy(createIndexDeletionPolicy());
+ }
+
+ protected MergePolicyProvider<?> createMergePolicy() {
+ return new LogByteSizeMergePolicyProvider(store, new IndexSettingsService(new Index("test"), EMPTY_SETTINGS));
+ }
+
+ protected MergeSchedulerProvider createMergeScheduler(IndexSettingsService indexSettingsService) {
+ return new ConcurrentMergeSchedulerProvider(shardId, EMPTY_SETTINGS, threadPool, indexSettingsService);
+ }
+
+ protected ShadowEngine createShadowEngine(Store store) {
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), Settings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+ return createShadowEngine(indexSettingsService, store, createMergeScheduler(indexSettingsService));
+ }
+
+ protected InternalEngine createInternalEngine(Store store, Path translogPath) {
+ IndexSettingsService indexSettingsService = new IndexSettingsService(shardId.index(), Settings.builder().put(defaultSettings).put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build());
+ return createInternalEngine(indexSettingsService, store, translogPath, createMergeScheduler(indexSettingsService));
+ }
+
+ protected ShadowEngine createShadowEngine(IndexSettingsService indexSettingsService, Store store, MergeSchedulerProvider mergeSchedulerProvider) {
+ return new ShadowEngine(config(indexSettingsService, store, null, mergeSchedulerProvider));
+ }
+
+ protected InternalEngine createInternalEngine(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) {
+ return new InternalEngine(config(indexSettingsService, store, translogPath, mergeSchedulerProvider), true);
+ }
+
+ public EngineConfig config(IndexSettingsService indexSettingsService, Store store, Path translogPath, MergeSchedulerProvider mergeSchedulerProvider) {
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettingsService.getSettings(), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
+ EngineConfig config = new EngineConfig(shardId, threadPool, new ShardIndexingService(shardId, EMPTY_SETTINGS, new ShardSlowLogIndexingService(shardId, EMPTY_SETTINGS, indexSettingsService)), indexSettingsService
+ , null, store, createSnapshotDeletionPolicy(), createMergePolicy(), mergeSchedulerProvider,
+ iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(shardId.index()), new Engine.FailedEngineListener() {
+ @Override
+ public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) {
+ // we don't need to notify anybody in this test
+ }}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
+ return config;
+ }
+
+ protected Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+
+ protected static final BytesReference B_1 = new BytesArray(new byte[]{1});
+ protected static final BytesReference B_2 = new BytesArray(new byte[]{2});
+ protected static final BytesReference B_3 = new BytesArray(new byte[]{3});
+
+ public void testCommitStats() {
+ // create a doc and refresh
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+
+ CommitStats stats1 = replicaEngine.commitStats();
+ assertThat(stats1.getGeneration(), greaterThan(0l));
+ assertThat(stats1.getId(), notNullValue());
+ assertThat(stats1.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
+
+ // flush the primary engine
+ primaryEngine.flush();
+ // flush on replica to make flush visible
+ replicaEngine.flush();
+
+ CommitStats stats2 = replicaEngine.commitStats();
+ assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration()));
+ assertThat(stats2.getId(), notNullValue());
+ assertThat(stats2.getId(), not(equalTo(stats1.getId())));
+ assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
+ assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));
+ assertThat(stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))));
+ assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY)));
+ }
+
+
+ @Test
+ public void testSegments() throws Exception {
+ List<Segment> segments = primaryEngine.segments(false);
+ assertThat(segments.isEmpty(), equalTo(true));
+ assertThat(primaryEngine.segmentsStats().getCount(), equalTo(0l));
+ assertThat(primaryEngine.segmentsStats().getMemoryInBytes(), equalTo(0l));
+ final boolean defaultCompound = defaultSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, true);
+
+ // create a doc and refresh
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null);
+ primaryEngine.create(new Engine.Create(null, newUid("2"), doc2));
+ primaryEngine.refresh("test");
+
+ segments = primaryEngine.segments(false);
+ assertThat(segments.size(), equalTo(1));
+ SegmentsStats stats = primaryEngine.segmentsStats();
+ assertThat(stats.getCount(), equalTo(1l));
+ assertThat(stats.getTermsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0l));
+ assertThat(stats.getNormsMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0l));
+ assertThat(segments.get(0).isCommitted(), equalTo(false));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+ assertThat(segments.get(0).ramTree, nullValue());
+
+ // Check that the replica sees nothing
+ segments = replicaEngine.segments(false);
+ assertThat(segments.size(), equalTo(0));
+ stats = replicaEngine.segmentsStats();
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getTermsMemoryInBytes(), equalTo(0l));
+ assertThat(stats.getStoredFieldsMemoryInBytes(), equalTo(0l));
+ assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0l));
+ assertThat(stats.getNormsMemoryInBytes(), equalTo(0l));
+ assertThat(stats.getDocValuesMemoryInBytes(), equalTo(0l));
+ assertThat(segments.size(), equalTo(0));
+
+ // flush the primary engine
+ primaryEngine.flush();
+ // refresh the replica
+ replicaEngine.refresh("tests");
+
+ // Check that the primary AND replica sees segments now
+ segments = primaryEngine.segments(false);
+ assertThat(segments.size(), equalTo(1));
+ assertThat(primaryEngine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ segments = replicaEngine.segments(false);
+ assertThat(segments.size(), equalTo(1));
+ assertThat(replicaEngine.segmentsStats().getCount(), equalTo(1l));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+
+ primaryEngine.config().setCompoundOnFlush(false);
+
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null);
+ primaryEngine.create(new Engine.Create(null, newUid("3"), doc3));
+ primaryEngine.refresh("test");
+
+ segments = primaryEngine.segments(false);
+ assertThat(segments.size(), equalTo(2));
+ assertThat(primaryEngine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(primaryEngine.segmentsStats().getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
+ assertThat(primaryEngine.segmentsStats().getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
+ assertThat(primaryEngine.segmentsStats().getTermVectorsMemoryInBytes(), equalTo(0l));
+ assertThat(primaryEngine.segmentsStats().getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
+ assertThat(primaryEngine.segmentsStats().getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+ assertThat(segments.get(1).isCommitted(), equalTo(false));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ // Make visible to shadow replica
+ primaryEngine.flush();
+ replicaEngine.refresh("test");
+
+ segments = replicaEngine.segments(false);
+ assertThat(segments.size(), equalTo(2));
+ assertThat(replicaEngine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(replicaEngine.segmentsStats().getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes()));
+ assertThat(replicaEngine.segmentsStats().getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes()));
+ assertThat(replicaEngine.segmentsStats().getTermVectorsMemoryInBytes(), equalTo(0l));
+ assertThat(replicaEngine.segmentsStats().getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes()));
+ assertThat(replicaEngine.segmentsStats().getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes()));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(2));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+ assertThat(segments.get(1).isCommitted(), equalTo(true));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ primaryEngine.delete(new Engine.Delete("test", "1", newUid("1")));
+ primaryEngine.refresh("test");
+
+ segments = primaryEngine.segments(false);
+ assertThat(segments.size(), equalTo(2));
+ assertThat(primaryEngine.segmentsStats().getCount(), equalTo(2l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+ assertThat(segments.get(1).isCommitted(), equalTo(true));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ // Make visible to shadow replica
+ primaryEngine.flush();
+ replicaEngine.refresh("test");
+
+ primaryEngine.config().setCompoundOnFlush(true);
+ ParsedDocument doc4 = testParsedDocument("4", "4", "test", null, -1, -1, testDocumentWithTextField(), B_3, null);
+ primaryEngine.create(new Engine.Create(null, newUid("4"), doc4));
+ primaryEngine.refresh("test");
+
+ segments = primaryEngine.segments(false);
+ assertThat(segments.size(), equalTo(3));
+ assertThat(primaryEngine.segmentsStats().getCount(), equalTo(3l));
+ assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
+ assertThat(segments.get(0).isCommitted(), equalTo(true));
+ assertThat(segments.get(0).isSearch(), equalTo(true));
+ assertThat(segments.get(0).getNumDocs(), equalTo(1));
+ assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
+ assertThat(segments.get(0).isCompound(), equalTo(defaultCompound));
+
+ assertThat(segments.get(1).isCommitted(), equalTo(true));
+ assertThat(segments.get(1).isSearch(), equalTo(true));
+ assertThat(segments.get(1).getNumDocs(), equalTo(1));
+ assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(1).isCompound(), equalTo(false));
+
+ assertThat(segments.get(2).isCommitted(), equalTo(false));
+ assertThat(segments.get(2).isSearch(), equalTo(true));
+ assertThat(segments.get(2).getNumDocs(), equalTo(1));
+ assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
+ assertThat(segments.get(2).isCompound(), equalTo(true));
+ }
+
+ @Test
+ public void testVerboseSegments() throws Exception {
+ List<Segment> segments = primaryEngine.segments(true);
+ assertThat(segments.isEmpty(), equalTo(true));
+
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+ primaryEngine.refresh("test");
+
+ segments = primaryEngine.segments(true);
+ assertThat(segments.size(), equalTo(1));
+ assertThat(segments.get(0).ramTree, notNullValue());
+
+ ParsedDocument doc2 = testParsedDocument("2", "2", "test", null, -1, -1, testDocumentWithTextField(), B_2, null);
+ primaryEngine.create(new Engine.Create(null, newUid("2"), doc2));
+ primaryEngine.refresh("test");
+ ParsedDocument doc3 = testParsedDocument("3", "3", "test", null, -1, -1, testDocumentWithTextField(), B_3, null);
+ primaryEngine.create(new Engine.Create(null, newUid("3"), doc3));
+ primaryEngine.refresh("test");
+
+ segments = primaryEngine.segments(true);
+ assertThat(segments.size(), equalTo(3));
+ assertThat(segments.get(0).ramTree, notNullValue());
+ assertThat(segments.get(1).ramTree, notNullValue());
+ assertThat(segments.get(2).ramTree, notNullValue());
+
+ // Now make the changes visible to the replica
+ primaryEngine.flush();
+ replicaEngine.refresh("test");
+
+ segments = replicaEngine.segments(true);
+ assertThat(segments.size(), equalTo(3));
+ assertThat(segments.get(0).ramTree, notNullValue());
+ assertThat(segments.get(1).ramTree, notNullValue());
+ assertThat(segments.get(2).ramTree, notNullValue());
+
+ }
+
+ @Test
+ public void testShadowEngineIgnoresWriteOperations() throws Exception {
+ // create a document
+ ParseContext.Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ try {
+ replicaEngine.create(new Engine.Create(null, newUid("1"), doc));
+ fail("should have thrown an exception");
+ } catch (UnsupportedOperationException e) {}
+ replicaEngine.refresh("test");
+
+ // its not there...
+ Engine.Searcher searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+ Engine.GetResult getResult = replicaEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // index a document
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ try {
+ replicaEngine.index(new Engine.Index(null, newUid("1"), doc));
+ fail("should have thrown an exception");
+ } catch (UnsupportedOperationException e) {}
+ replicaEngine.refresh("test");
+
+ // its still not there...
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+ getResult = replicaEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // Now, add a document to the primary so we can test shadow engine deletes
+ document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+ primaryEngine.flush();
+ replicaEngine.refresh("test");
+
+ // Now the replica can see it
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+
+ // And the replica can retrieve it
+ getResult = replicaEngine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // try to delete it on the replica
+ try {
+ replicaEngine.delete(new Engine.Delete("test", "1", newUid("1")));
+ fail("should have thrown an exception");
+ } catch (UnsupportedOperationException e) {}
+ replicaEngine.flush();
+ replicaEngine.refresh("test");
+ primaryEngine.refresh("test");
+
+ // it's still there!
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+ getResult = replicaEngine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // it's still there on the primary also!
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+ getResult = primaryEngine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+ }
+
+ @Test
+ public void testSimpleOperations() throws Exception {
+ Engine.Searcher searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.close();
+
+ // create a document
+ ParseContext.Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ // not on the replica either...
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ // but, we can still get it (in realtime)
+ Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_1.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // can't get it from the replica, because it's not in the translog for a shadow replica
+ getResult = replicaEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // but, not there non realtime
+ getResult = primaryEngine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+ // refresh and it should be there
+ primaryEngine.refresh("test");
+
+ // now its there...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+
+ // also in non realtime
+ getResult = primaryEngine.get(new Engine.Get(false, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // still not in the replica because no flush
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ document.add(new Field(SourceFieldMapper.NAME, B_2.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_2, null);
+ primaryEngine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // but, we can still get it (in realtime)
+ getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source().source.toBytesArray(), equalTo(B_2.toBytesArray()));
+ assertThat(getResult.docIdAndVersion(), nullValue());
+ getResult.release();
+
+ // refresh and it should be updated
+ primaryEngine.refresh("test");
+
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+
+ // flush, now shadow replica should have the files
+ primaryEngine.flush();
+
+ // still not in the replica because the replica hasn't refreshed
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ replicaEngine.refresh("test");
+
+ // the replica finally sees it because primary has flushed and replica refreshed
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+
+ // now delete
+ primaryEngine.delete(new Engine.Delete("test", "1", newUid("1")));
+
+ // its not deleted yet
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+
+ // but, get should not see it (in realtime)
+ getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(false));
+ getResult.release();
+
+ // refresh and it should be deleted
+ primaryEngine.refresh("test");
+
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // add it back
+ document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // refresh and it should be there
+ primaryEngine.refresh("test");
+
+ // now its there...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // now flush
+ primaryEngine.flush();
+
+ // and, verify get (in real time)
+ getResult = primaryEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // the replica should see it if we refresh too!
+ replicaEngine.refresh("test");
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+ getResult = replicaEngine.get(new Engine.Get(true, newUid("1")));
+ assertThat(getResult.exists(), equalTo(true));
+ assertThat(getResult.source(), nullValue());
+ assertThat(getResult.docIdAndVersion(), notNullValue());
+ getResult.release();
+
+ // make sure we can still work with the engine
+ // now do an update
+ document = testDocument();
+ document.add(new TextField("value", "test1", Field.Store.YES));
+ doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ primaryEngine.index(new Engine.Index(null, newUid("1"), doc));
+
+ // its not updated yet...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0));
+ searchResult.close();
+
+ // refresh and it should be updated
+ primaryEngine.refresh("test");
+
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+
+ // Make visible to shadow replica
+ primaryEngine.flush();
+ replicaEngine.refresh("test");
+
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1));
+ searchResult.close();
+ }
+
+ @Test
+ public void testSearchResultRelease() throws Exception {
+ Engine.Searcher searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ searchResult.close();
+
+ // create a document
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+
+ // its not there...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0));
+ searchResult.close();
+
+ // flush & refresh and it should everywhere
+ primaryEngine.flush();
+ primaryEngine.refresh("test");
+ replicaEngine.refresh("test");
+
+ // now its there...
+ searchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+
+ searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ // don't release the replica search result yet...
+
+ // delete, refresh and do a new search, it should not be there
+ primaryEngine.delete(new Engine.Delete("test", "1", newUid("1")));
+ primaryEngine.flush();
+ primaryEngine.refresh("test");
+ replicaEngine.refresh("test");
+ Engine.Searcher updateSearchResult = primaryEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
+ updateSearchResult.close();
+
+ // the non released replica search result should not see the deleted yet...
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+ }
+
+ @Test
+ public void testFailEngineOnCorruption() {
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+ primaryEngine.flush();
+ MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class);
+ leaf.setRandomIOExceptionRate(1.0);
+ leaf.setRandomIOExceptionRateOnOpen(1.0);
+ try {
+ replicaEngine.refresh("foo");
+ fail("exception expected");
+ } catch (Exception ex) {
+
+ }
+ try {
+ Engine.Searcher searchResult = replicaEngine.acquireSearcher("test");
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
+ MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
+ searchResult.close();
+ fail("exception expected");
+ } catch (EngineClosedException ex) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testExtractShardId() {
+ try (Engine.Searcher test = replicaEngine.acquireSearcher("test")) {
+ ShardId shardId = ShardUtils.extractShardId(test.reader());
+ assertNotNull(shardId);
+ assertEquals(shardId, replicaEngine.config().getShardId());
+ }
+ }
+
+ /**
+ * Random test that throws random exception and ensures all references are
+ * counted down / released and resources are closed.
+ */
+ @Test
+ public void testFailStart() throws IOException {
+ // Need a commit point for this
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
+ primaryEngine.create(new Engine.Create(null, newUid("1"), doc));
+ primaryEngine.flush();
+
+ // this test fails if any reader, searcher or directory is not closed - MDW FTW
+ final int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ MockDirectoryWrapper wrapper = newMockFSDirectory(dirPath);
+ wrapper.setFailOnOpenInput(randomBoolean());
+ wrapper.setAllowRandomFileNotFoundException(randomBoolean());
+ wrapper.setRandomIOExceptionRate(randomDouble());
+ wrapper.setRandomIOExceptionRateOnOpen(randomDouble());
+ try (Store store = createStore(wrapper)) {
+ int refCount = store.refCount();
+ assertTrue("refCount: "+ store.refCount(), store.refCount() > 0);
+ ShadowEngine holder;
+ try {
+ holder = createShadowEngine(store);
+ } catch (EngineCreationFailureException ex) {
+ assertEquals(store.refCount(), refCount);
+ continue;
+ }
+ assertEquals(store.refCount(), refCount+1);
+ final int numStarts = scaledRandomIntBetween(1, 5);
+ for (int j = 0; j < numStarts; j++) {
+ try {
+ assertEquals(store.refCount(), refCount + 1);
+ holder.close();
+ holder = createShadowEngine(store);
+ assertEquals(store.refCount(), refCount + 1);
+ } catch (EngineCreationFailureException ex) {
+ // all is fine
+ assertEquals(store.refCount(), refCount);
+ break;
+ }
+ }
+ holder.close();
+ assertEquals(store.refCount(), refCount);
+ }
+ }
+ }
+
+ @Test
+ public void testSettings() {
+ CodecService codecService = new CodecService(shardId.index());
+ assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName());
+ assertEquals(replicaEngine.config().getIndexConcurrency(), indexConcurrency);
+ }
+
+ @Test
+ public void testShadowEngineCreationRetry() throws Exception {
+ final Path srDir = createTempDir();
+ final Store srStore = createStore(srDir);
+ Lucene.cleanLuceneIndex(srStore.directory());
+
+ final AtomicBoolean succeeded = new AtomicBoolean(false);
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ // Create a shadow Engine, which will freak out because there is no
+ // index yet
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ // ignore interruptions
+ }
+ try (ShadowEngine srEngine = createShadowEngine(srStore)) {
+ succeeded.set(true);
+ } catch (Exception e) {
+ fail("should have been able to create the engine!");
+ }
+ }
+ });
+ t.start();
+
+ // count down latch
+ // now shadow engine should try to be created
+ latch.countDown();
+
+ // Create an InternalEngine, which creates the index so the shadow
+ // replica will handle it correctly
+ Store pStore = createStore(srDir);
+ InternalEngine pEngine = createInternalEngine(pStore, createTempDir("translog-primary"));
+
+ // create a document
+ ParseContext.Document document = testDocumentWithTextField();
+ document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE));
+ ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
+ pEngine.create(new Engine.Create(null, newUid("1"), doc));
+ pEngine.flush(true, true);
+
+ t.join();
+ assertTrue("ShadowEngine should have been able to be created", succeeded.get());
+ // (shadow engine is already shut down in the try-with-resources)
+ IOUtils.close(srStore, pEngine, pStore);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java
new file mode 100644
index 0000000000..fe5bf83a29
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTests.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public abstract class AbstractFieldDataImplTests extends AbstractFieldDataTests {
+
+ protected String one() {
+ return "1";
+ }
+
+ protected String two() {
+ return "2";
+ }
+
+ protected String three() {
+ return "3";
+ }
+
+ protected String four() {
+ return "4";
+ }
+
+ protected String toString(Object value) {
+ if (value instanceof BytesRef) {
+ return ((BytesRef) value).utf8ToString();
+ }
+ return value.toString();
+ }
+
+ protected abstract void fillSingleValueAllSet() throws Exception;
+
+ protected abstract void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception;
+
+ protected long minRamBytesUsed() {
+ // minimum number of bytes that this fielddata instance is expected to require
+ return 1;
+ }
+
+ @Test
+ public void testDeletedDocs() throws Exception {
+ add2SingleValuedDocumentsAndDeleteOneOfThem();
+ IndexFieldData indexFieldData = getForField("value");
+ LeafReaderContext readerContext = refreshReader();
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ SortedBinaryDocValues values = fieldData.getBytesValues();
+ for (int i = 0; i < readerContext.reader().maxDoc(); ++i) {
+ values.setDocument(i);
+ assertThat(values.count(), greaterThanOrEqualTo(1));
+ }
+ }
+
+ @Test
+ public void testSingleValueAllSet() throws Exception {
+ fillSingleValueAllSet();
+ IndexFieldData indexFieldData = getForField("value");
+ LeafReaderContext readerContext = refreshReader();
+ AtomicFieldData fieldData = indexFieldData.load(readerContext);
+ assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
+
+ SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
+
+ bytesValues.setDocument(0);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(two())));
+ bytesValues.setDocument(1);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(one())));
+ bytesValues.setDocument(2);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(three())));
+
+ assertValues(bytesValues, 0, two());
+ assertValues(bytesValues, 1, one());
+ assertValues(bytesValues, 2, three());
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one()));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[1]).fields[0]), equalTo(two()));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three()));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ protected abstract void fillSingleValueWithMissing() throws Exception;
+
+ public void assertValues(SortedBinaryDocValues values, int docId, BytesRef... actualValues) {
+ values.setDocument(docId);
+ assertThat(values.count(), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.valueAt(i), equalTo(actualValues[i]));
+ }
+ }
+
+ public void assertValues(SortedBinaryDocValues values, int docId, String... actualValues) {
+ values.setDocument(docId);
+ assertThat(values.count(), equalTo(actualValues.length));
+ for (int i = 0; i < actualValues.length; i++) {
+ assertThat(values.valueAt(i), equalTo(new BytesRef(actualValues[i])));
+ }
+ }
+
+
+ @Test
+ public void testSingleValueWithMissing() throws Exception {
+ fillSingleValueWithMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
+
+ SortedBinaryDocValues bytesValues = fieldData
+ .getBytesValues();
+
+ assertValues(bytesValues, 0, two());
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, three());
+ }
+
+ protected abstract void fillMultiValueAllSet() throws Exception;
+
+ @Test
+ public void testMultiValueAllSet() throws Exception {
+ fillMultiValueAllSet();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
+
+ SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
+
+ assertValues(bytesValues, 0, two(), four());
+ assertValues(bytesValues, 1, one());
+ assertValues(bytesValues, 2, three());
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs.length, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs.length, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ protected abstract void fillMultiValueWithMissing() throws Exception;
+
+ @Test
+ public void testMultiValueWithMissing() throws Exception {
+ fillMultiValueWithMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
+
+ SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
+
+ assertValues(bytesValues, 0, two(), four());
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, three());
+ }
+
+ public void testMissingValueForAll() throws Exception {
+ fillAllMissing();
+ IndexFieldData indexFieldData = getForField("value");
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+ // Some impls (FST) return size 0 and some (PagedBytes) do take size in the case no actual data is loaded
+ assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(0l));
+
+ SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
+
+ assertValues(bytesValues, 0, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(bytesValues, 2, Strings.EMPTY_ARRAY);
+ SortedBinaryDocValues hashedBytesValues = fieldData.getBytesValues();
+
+ assertValues(hashedBytesValues, 0, Strings.EMPTY_ARRAY);
+ assertValues(hashedBytesValues, 1, Strings.EMPTY_ARRAY);
+ assertValues(hashedBytesValues, 2, Strings.EMPTY_ARRAY);
+ }
+
+ protected abstract void fillAllMissing() throws Exception;
+
+ @Test
+ public void testSortMultiValuesFields() throws Exception {
+ fillExtendedMvSet();
+ IndexFieldData indexFieldData = getForField("value");
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("!08"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("02"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("03"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("06"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("08"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+ assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(XFieldComparatorSource.MAX_TERM));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+ assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(XFieldComparatorSource.MAX_TERM));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("10"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("08"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("06"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("04"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("03"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("!10"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+ assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+ assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+ }
+
+ protected abstract void fillExtendedMvSet() throws Exception;
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java
new file mode 100644
index 0000000000..9f9e58853b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTests.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MapperBuilders;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.After;
+import org.junit.Before;
+
+import static org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+
+public abstract class AbstractFieldDataTests extends ElasticsearchSingleNodeTest {
+
+ protected IndexService indexService;
+ protected IndexFieldDataService ifdService;
+ protected MapperService mapperService;
+ protected IndexWriter writer;
+ protected LeafReaderContext readerContext;
+ protected IndexReader topLevelReader;
+ protected IndicesFieldDataCache indicesFieldDataCache;
+
+ protected abstract FieldDataType getFieldDataType();
+
+ protected boolean hasDocValues() {
+ return false;
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(String fieldName) {
+ return getForField(getFieldDataType(), fieldName, hasDocValues());
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(FieldDataType type, String fieldName) {
+ return getForField(type, fieldName, hasDocValues());
+ }
+
+ public <IFD extends IndexFieldData<?>> IFD getForField(FieldDataType type, String fieldName, boolean docValues) {
+ final FieldMapper mapper;
+ final BuilderContext context = new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
+ if (type.getType().equals("string")) {
+ mapper = MapperBuilders.stringField(fieldName).tokenized(false).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("float")) {
+ mapper = MapperBuilders.floatField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("double")) {
+ mapper = MapperBuilders.doubleField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("long")) {
+ mapper = MapperBuilders.longField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("int")) {
+ mapper = MapperBuilders.integerField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("short")) {
+ mapper = MapperBuilders.shortField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("byte")) {
+ mapper = MapperBuilders.byteField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("geo_point")) {
+ mapper = MapperBuilders.geoPointField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else if (type.getType().equals("_parent")) {
+ mapper = MapperBuilders.parent().type(fieldName).build(context);
+ } else if (type.getType().equals("binary")) {
+ mapper = MapperBuilders.binaryField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context);
+ } else {
+ throw new UnsupportedOperationException(type.getType());
+ }
+ return ifdService.getForField(mapper);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ Settings settings = Settings.builder().put("index.fielddata.cache", "none").build();
+ indexService = createIndex("test", settings);
+ mapperService = indexService.mapperService();
+ indicesFieldDataCache = indexService.injector().getInstance(IndicesFieldDataCache.class);
+ ifdService = indexService.fieldData();
+ // LogByteSizeMP to preserve doc ID order
+ writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
+ }
+
+ protected LeafReaderContext refreshReader() throws Exception {
+ if (readerContext != null) {
+ readerContext.reader().close();
+ }
+ LeafReader reader = SlowCompositeReaderWrapper.wrap(topLevelReader = DirectoryReader.open(writer, true));
+ readerContext = reader.getContext();
+ return readerContext;
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ if (readerContext != null) {
+ readerContext.reader().close();
+ }
+ writer.close();
+ }
+
+ protected Nested createNested(Filter parentFilter, Filter childFilter) {
+ BitsetFilterCache s = indexService.bitsetFilterCache();
+ return new Nested(s.getBitDocIdSetFilter(parentFilter), s.getBitDocIdSetFilter(childFilter));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java
new file mode 100644
index 0000000000..271a0424d9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractNumericFieldDataTests.java
@@ -0,0 +1,512 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.search.*;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Test;
+
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public abstract class AbstractNumericFieldDataTests extends AbstractFieldDataImplTests {
+
+ @Override
+ protected abstract FieldDataType getFieldDataType();
+
+ protected Settings.Builder getFieldDataSettings() {
+ Settings.Builder builder = Settings.builder();
+ IndexFieldData.CommonSettings.MemoryStorageFormat[] formats = IndexFieldData.CommonSettings.MemoryStorageFormat.values();
+ int i = randomInt(formats.length);
+ if (i < formats.length) {
+ builder.put(IndexFieldData.CommonSettings.SETTING_MEMORY_STORAGE_HINT, formats[i].name().toLowerCase(Locale.ROOT));
+ }
+ return builder;
+ }
+
+ @Test
+ public void testSingleValueAllSetNumber() throws Exception {
+ fillSingleValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ SortedNumericDocValues longValues = fieldData.getLongValues();
+
+ assertThat(FieldData.isMultiValued(longValues), equalTo(false));
+
+ longValues.setDocument(0);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(2l));
+
+ longValues.setDocument(1);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(1l));
+
+ longValues.setDocument(2);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(3l));
+
+ SortedNumericDoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(FieldData.isMultiValued(doubleValues), equalTo(false));
+
+ doubleValues.setDocument(0);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(2d));
+
+ doubleValues.setDocument(1);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(1d));
+
+ doubleValues.setDocument(2);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(3d));
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ @Test
+ public void testSingleValueWithMissingNumber() throws Exception {
+ fillSingleValueWithMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ SortedNumericDocValues longValues = fieldData.getLongValues();
+
+ assertThat(FieldData.isMultiValued(longValues), equalTo(false));
+
+ longValues.setDocument(0);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(2l));
+
+ longValues.setDocument(1);
+ assertThat(longValues.count(), equalTo(0));
+
+ longValues.setDocument(2);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(3l));
+
+ SortedNumericDoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(FieldData.isMultiValued(doubleValues), equalTo(false));
+
+ doubleValues.setDocument(0);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(2d));
+
+ doubleValues.setDocument(1);
+ assertThat(0, equalTo(doubleValues.count()));
+
+ doubleValues.setDocument(2);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(3d));
+
+ IndexSearcher searcher = new IndexSearcher(readerContext.reader());
+ TopFieldDocs topDocs;
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("1", MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("1", MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(3));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(1));
+ }
+
+ @Test
+ public void testMultiValueAllSetNumber() throws Exception {
+ fillMultiValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ SortedNumericDocValues longValues = fieldData.getLongValues();
+
+ assertThat(FieldData.isMultiValued(longValues), equalTo(true));
+
+ longValues.setDocument(0);
+ assertThat(longValues.count(), equalTo(2));
+ assertThat(longValues.valueAt(0), equalTo(2l));
+ assertThat(longValues.valueAt(1), equalTo(4l));
+
+ longValues.setDocument(1);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(1l));
+
+ longValues.setDocument(2);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(3l));
+
+ SortedNumericDoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(FieldData.isMultiValued(doubleValues), equalTo(true));
+
+ doubleValues.setDocument(0);
+ assertThat(2, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(2d));
+ assertThat(doubleValues.valueAt(1), equalTo(4d));
+
+ doubleValues.setDocument(1);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(1d));
+
+ doubleValues.setDocument(2);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(3d));
+ }
+
+ @Test
+ public void testMultiValueWithMissingNumber() throws Exception {
+ fillMultiValueWithMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ SortedNumericDocValues longValues = fieldData.getLongValues();
+
+ assertThat(FieldData.isMultiValued(longValues), equalTo(true));
+
+ longValues.setDocument(0);
+ assertThat(longValues.count(), equalTo(2));
+ assertThat(longValues.valueAt(0), equalTo(2l));
+ assertThat(longValues.valueAt(1), equalTo(4l));
+
+ longValues.setDocument(1);
+ assertThat(longValues.count(), equalTo(0));
+
+ longValues.setDocument(2);
+ assertThat(longValues.count(), equalTo(1));
+ assertThat(longValues.valueAt(0), equalTo(3l));
+
+ SortedNumericDoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(FieldData.isMultiValued(doubleValues), equalTo(true));
+
+ doubleValues.setDocument(0);
+ assertThat(2, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(2d));
+ assertThat(doubleValues.valueAt(1), equalTo(4d));
+
+ doubleValues.setDocument(1);
+ assertThat(0, equalTo(doubleValues.count()));
+
+ doubleValues.setDocument(2);
+ assertThat(1, equalTo(doubleValues.count()));
+ assertThat(doubleValues.valueAt(0), equalTo(3d));
+
+ }
+
+ @Override
+ @Test
+ public void testMissingValueForAll() throws Exception {
+ fillAllMissing();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ // long values
+
+ SortedNumericDocValues longValues = fieldData.getLongValues();
+
+ assertThat(FieldData.isMultiValued(longValues), equalTo(false));
+
+ for (int i = 0; i < 3; ++i) {
+ longValues.setDocument(0);
+ assertThat(longValues.count(), equalTo(0));
+ }
+
+ // double values
+
+ SortedNumericDoubleValues doubleValues = fieldData.getDoubleValues();
+
+ assertThat(FieldData.isMultiValued(doubleValues), equalTo(false));
+
+ doubleValues.setDocument(0);
+ assertThat(0, equalTo(doubleValues.count()));
+
+ doubleValues.setDocument(1);
+ assertThat(0, equalTo(doubleValues.count()));
+
+ doubleValues.setDocument(2);
+ assertThat(0, equalTo(doubleValues.count()));
+ }
+
+
+ @Override
+ protected void fillAllMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ @Test
+ public void testSortMultiValuesFields() throws Exception {
+ fillExtendedMvSet();
+ IndexFieldData indexFieldData = getForField("value");
+
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-10));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-8));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.SUM, null)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-27));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(15));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(21));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(27));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.SUM, null), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(27));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(21));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(15));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(6));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-27));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.AVG, null)))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-9));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(5));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.AVG, null), true))); // defaults to _last
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).intValue(), equalTo(-9));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(1));
+// assertThat(((FieldDoc) topDocs.scoreDocs[6]).fields[0], equalTo(null));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(5));
+// assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(6));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("_first", MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(6));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("-9", MultiValueMode.MIN, null))));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(7));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(6));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10,
+ new Sort(new SortField("value", indexFieldData.comparatorSource("9", MultiValueMode.MAX, null), true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(6));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(5));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(4));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(0));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(2));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java
new file mode 100644
index 0000000000..e9e5839bc0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTests.java
@@ -0,0 +1,630 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomAccessOrds;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopFieldDocs;
+import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalsIndexFieldData;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ */
+public abstract class AbstractStringFieldDataTests extends AbstractFieldDataImplTests {
+
+ private void addField(Document d, String name, String value) {
+ d.add(new StringField(name, value, Field.Store.YES));
+ d.add(new SortedSetDocValuesField(name, new BytesRef(value)));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "2");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "1");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "3");
+ addField(d, "value", "3");
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "2");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "2");
+ addField(d, "value", "4");
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "2");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "2");
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "3");
+ addField(d, "value", "3");
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "2");
+ addField(d, "value", "4");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "2");
+ addField(d, "value", "1");
+ writer.addDocument(d);
+ writer.commit(); // TODO: Have tests with more docs for sorting
+
+ d = new Document();
+ addField(d, "_id", "3");
+ addField(d, "value", "3");
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "2");
+ addField(d, "value", "4");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "2");
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "3");
+ addField(d, "value", "3");
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillAllMissing() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "2");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "3");
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ addField(d, "_id", "1");
+ addField(d, "value", "02");
+ addField(d, "value", "04");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "2");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "3");
+ addField(d, "value", "03");
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ addField(d, "_id", "4");
+ addField(d, "value", "04");
+ addField(d, "value", "05");
+ addField(d, "value", "06");
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "5");
+ addField(d, "value", "06");
+ addField(d, "value", "07");
+ addField(d, "value", "08");
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ addField(d, "_id", "7");
+ addField(d, "value", "08");
+ addField(d, "value", "09");
+ addField(d, "value", "10");
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ addField(d, "_id", "8");
+ addField(d, "value", "!08");
+ addField(d, "value", "!09");
+ addField(d, "value", "!10");
+ writer.addDocument(d);
+ }
+
+ public void testActualMissingValue() throws IOException {
+ testActualMissingValue(false);
+ }
+
+ public void testActualMissingValueReverse() throws IOException {
+ testActualMissingValue(true);
+ }
+
+ public void testActualMissingValue(boolean reverse) throws IOException {
+ // missing value is set to an actual value
+ final String[] values = new String[randomIntBetween(2, 30)];
+ for (int i = 1; i < values.length; ++i) {
+ values[i] = TestUtil.randomUnicodeString(getRandom());
+ }
+ final int numDocs = scaledRandomIntBetween(10, 10000);
+ for (int i = 0; i < numDocs; ++i) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value == null) {
+ writer.addDocument(new Document());
+ } else {
+ Document d = new Document();
+ addField(d, "value", value);
+ writer.addDocument(d);
+ }
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+
+ final IndexFieldData indexFieldData = getForField("value");
+ final String missingValue = values[1];
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ XFieldComparatorSource comparator = indexFieldData.comparatorSource(missingValue, MultiValueMode.MIN, null);
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
+ assertEquals(numDocs, topDocs.totalHits);
+ BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
+ final BytesRef value = new BytesRef(docValue == null ? missingValue : docValue);
+ if (reverse) {
+ assertTrue(previousValue.compareTo(value) >= 0);
+ } else {
+ assertTrue(previousValue.compareTo(value) <= 0);
+ }
+ previousValue = value;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ public void testSortMissingFirst() throws IOException {
+ testSortMissing(true, false);
+ }
+
+ public void testSortMissingFirstReverse() throws IOException {
+ testSortMissing(true, true);
+ }
+
+ public void testSortMissingLast() throws IOException {
+ testSortMissing(false, false);
+ }
+
+ public void testSortMissingLastReverse() throws IOException {
+ testSortMissing(false, true);
+ }
+
+ public void testSortMissing(boolean first, boolean reverse) throws IOException {
+ final String[] values = new String[randomIntBetween(2, 10)];
+ for (int i = 1; i < values.length; ++i) {
+ values[i] = TestUtil.randomUnicodeString(getRandom());
+ }
+ final int numDocs = scaledRandomIntBetween(10, 10000);
+ for (int i = 0; i < numDocs; ++i) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value == null) {
+ writer.addDocument(new Document());
+ } else {
+ Document d = new Document();
+ addField(d, "value", value);
+ writer.addDocument(d);
+ }
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+ final IndexFieldData indexFieldData = getForField("value");
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ XFieldComparatorSource comparator = indexFieldData.comparatorSource(first ? "_first" : "_last", MultiValueMode.MIN, null);
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse)));
+ assertEquals(numDocs, topDocs.totalHits);
+ BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef();
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final String docValue = searcher.doc(topDocs.scoreDocs[i].doc).get("value");
+ if (first && docValue == null) {
+ assertNull(previousValue);
+ } else if (!first && docValue != null) {
+ assertNotNull(previousValue);
+ }
+ final BytesRef value = docValue == null ? null : new BytesRef(docValue);
+ if (previousValue != null && value != null) {
+ if (reverse) {
+ assertTrue(previousValue.compareTo(value) >= 0);
+ } else {
+ assertTrue(previousValue.compareTo(value) <= 0);
+ }
+ }
+ previousValue = value;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ public void testNestedSortingMin() throws IOException {
+ testNestedSorting(MultiValueMode.MIN);
+ }
+
+ public void testNestedSortingMax() throws IOException {
+ testNestedSorting(MultiValueMode.MAX);
+ }
+
+ public void testNestedSorting(MultiValueMode sortMode) throws IOException {
+ final String[] values = new String[randomIntBetween(2, 20)];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = TestUtil.randomSimpleString(getRandom());
+ }
+ final int numParents = scaledRandomIntBetween(10, 10000);
+ List<Document> docs = new ArrayList<>();
+ FixedBitSet parents = new FixedBitSet(64);
+ for (int i = 0; i < numParents; ++i) {
+ docs.clear();
+ final int numChildren = randomInt(4);
+ for (int j = 0; j < numChildren; ++j) {
+ final Document child = new Document();
+ final int numValues = randomInt(3);
+ for (int k = 0; k < numValues; ++k) {
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ addField(child, "text", value);
+ }
+ docs.add(child);
+ }
+ final Document parent = new Document();
+ parent.add(new StringField("type", "parent", Store.YES));
+ final String value = RandomPicks.randomFrom(getRandom(), values);
+ if (value != null) {
+ addField(parent, "text", value);
+ }
+ docs.add(parent);
+ int bit = parents.prevSetBit(parents.length() - 1) + docs.size();
+ parents = FixedBitSet.ensureCapacity(parents, bit);
+ parents.set(bit);
+ writer.addDocuments(docs);
+ if (randomInt(10) == 0) {
+ writer.commit();
+ }
+ }
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ IndexFieldData<?> fieldData = getForField("text");
+ final Object missingValue;
+ switch (randomInt(4)) {
+ case 0:
+ missingValue = "_first";
+ break;
+ case 1:
+ missingValue = "_last";
+ break;
+ case 2:
+ missingValue = new BytesRef(RandomPicks.randomFrom(getRandom(), values));
+ break;
+ default:
+ missingValue = new BytesRef(TestUtil.randomSimpleString(getRandom()));
+ break;
+ }
+ Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("type", "parent")));
+ Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter));
+ Nested nested = createNested(parentFilter, childFilter);
+ BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode, nested);
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("text", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort);
+ assertTrue(topDocs.scoreDocs.length > 0);
+ BytesRef previous = null;
+ for (int i = 0; i < topDocs.scoreDocs.length; ++i) {
+ final int docID = topDocs.scoreDocs[i].doc;
+ assertTrue("expected " + docID + " to be a parent", parents.get(docID));
+ BytesRef cmpValue = null;
+ for (int child = parents.prevSetBit(docID - 1) + 1; child < docID; ++child) {
+ String[] sVals = searcher.doc(child).getValues("text");
+ final BytesRef[] vals;
+ if (sVals.length == 0) {
+ vals = new BytesRef[0];
+ } else {
+ vals = new BytesRef[sVals.length];
+ for (int j = 0; j < vals.length; ++j) {
+ vals[j] = new BytesRef(sVals[j]);
+ }
+ }
+ for (BytesRef value : vals) {
+ if (cmpValue == null) {
+ cmpValue = value;
+ } else if (sortMode == MultiValueMode.MIN && value.compareTo(cmpValue) < 0) {
+ cmpValue = value;
+ } else if (sortMode == MultiValueMode.MAX && value.compareTo(cmpValue) > 0) {
+ cmpValue = value;
+ }
+ }
+ }
+ if (cmpValue == null) {
+ if ("_first".equals(missingValue)) {
+ cmpValue = new BytesRef();
+ } else if ("_last".equals(missingValue)) {
+ cmpValue = XFieldComparatorSource.MAX_TERM;
+ } else {
+ cmpValue = (BytesRef) missingValue;
+ }
+ }
+ if (previous != null) {
+ assertTrue(previous.utf8ToString() + " / " + cmpValue.utf8ToString(), previous.compareTo(cmpValue) <= 0);
+ }
+ previous = cmpValue;
+ }
+ searcher.getIndexReader().close();
+ }
+
+ private void assertIteratorConsistentWithRandomAccess(RandomAccessOrds ords, int maxDoc) {
+ for (int doc = 0; doc < maxDoc; ++doc) {
+ ords.setDocument(doc);
+ final int cardinality = ords.cardinality();
+ for (int i = 0; i < cardinality; ++i) {
+ assertEquals(ords.nextOrd(), ords.ordAt(i));
+ }
+ for (int i = 0; i < 3; ++i) {
+ assertEquals(ords.nextOrd(), -1);
+ }
+ }
+ }
+
+ @Test
+ public void testGlobalOrdinals() throws Exception {
+ fillExtendedMvSet();
+ refreshReader();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("global_values", "fixed"));
+ IndexOrdinalsFieldData ifd = getForField(fieldDataType, "value", hasDocValues());
+ IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
+ assertThat(topLevelReader.leaves().size(), equalTo(3));
+
+ // First segment
+ assertThat(globalOrdinals, instanceOf(GlobalOrdinalsIndexFieldData.class));
+ LeafReaderContext leaf = topLevelReader.leaves().get(0);
+ AtomicOrdinalsFieldData afd = globalOrdinals.load(leaf);
+ RandomAccessOrds values = afd.getOrdinalsValues();
+ assertIteratorConsistentWithRandomAccess(values, leaf.reader().maxDoc());
+ values.setDocument(0);
+ assertThat(values.cardinality(), equalTo(2));
+ long ord = values.nextOrd();
+ assertThat(ord, equalTo(3l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("02"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(5l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("04"));
+ values.setDocument(1);
+ assertThat(values.cardinality(), equalTo(0));
+ values.setDocument(2);
+ assertThat(values.cardinality(), equalTo(1));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(4l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("03"));
+
+ // Second segment
+ leaf = topLevelReader.leaves().get(1);
+ afd = globalOrdinals.load(leaf);
+ values = afd.getOrdinalsValues();
+ assertIteratorConsistentWithRandomAccess(values, leaf.reader().maxDoc());
+ values.setDocument(0);
+ assertThat(values.cardinality(), equalTo(3));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(5l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("04"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(6l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("05"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(7l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("06"));
+ values.setDocument(1);
+ assertThat(values.cardinality(), equalTo(3));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(7l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("06"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(8l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("07"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(9l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("08"));
+ values.setDocument(2);
+ assertThat(values.cardinality(), equalTo(0));
+ values.setDocument(3);
+ assertThat(values.cardinality(), equalTo(3));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(9l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("08"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(10l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("09"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(11l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("10"));
+
+ // Third segment
+ leaf = topLevelReader.leaves().get(2);
+ afd = globalOrdinals.load(leaf);
+ values = afd.getOrdinalsValues();
+ assertIteratorConsistentWithRandomAccess(values, leaf.reader().maxDoc());
+ values.setDocument(0);
+ values.setDocument(0);
+ assertThat(values.cardinality(), equalTo(3));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(0l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("!08"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(1l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("!09"));
+ ord = values.nextOrd();
+ assertThat(ord, equalTo(2l));
+ assertThat(values.lookupOrd(ord).utf8ToString(), equalTo("!10"));
+ }
+
+ @Test
+ public void testTermsEnum() throws Exception {
+ fillExtendedMvSet();
+ LeafReaderContext atomicReaderContext = refreshReader();
+
+ IndexOrdinalsFieldData ifd = getForField("value");
+ AtomicOrdinalsFieldData afd = ifd.load(atomicReaderContext);
+
+ TermsEnum termsEnum = afd.getOrdinalsValues().termsEnum();
+ int size = 0;
+ while (termsEnum.next() != null) {
+ size++;
+ }
+ assertThat(size, equalTo(12));
+
+ assertThat(termsEnum.seekExact(new BytesRef("10")), is(true));
+ assertThat(termsEnum.term().utf8ToString(), equalTo("10"));
+ assertThat(termsEnum.next(), nullValue());
+
+ assertThat(termsEnum.seekExact(new BytesRef("08")), is(true));
+ assertThat(termsEnum.term().utf8ToString(), equalTo("08"));
+ size = 0;
+ while (termsEnum.next() != null) {
+ size++;
+ }
+ assertThat(size, equalTo(2));
+
+ termsEnum.seekExact(8);
+ assertThat(termsEnum.term().utf8ToString(), equalTo("07"));
+ size = 0;
+ while (termsEnum.next() != null) {
+ size++;
+ }
+ assertThat(size, equalTo(3));
+ }
+
+ @Test
+ public void testGlobalOrdinalsGetRemovedOnceIndexReaderCloses() throws Exception {
+ fillExtendedMvSet();
+ refreshReader();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("global_values", "fixed").put("cache", "node"));
+ IndexOrdinalsFieldData ifd = getForField(fieldDataType, "value", hasDocValues());
+ IndexOrdinalsFieldData globalOrdinals = ifd.loadGlobal(topLevelReader);
+ assertThat(ifd.loadGlobal(topLevelReader), sameInstance(globalOrdinals));
+ // 3 b/c 1 segment level caches and 1 top level cache
+ // in case of doc values, we don't cache atomic FD, so only the top-level cache is there
+ assertThat(indicesFieldDataCache.getCache().size(), equalTo(hasDocValues() ? 1L : 4L));
+
+ IndexOrdinalsFieldData cachedInstance = null;
+ for (Accountable ramUsage : indicesFieldDataCache.getCache().asMap().values()) {
+ if (ramUsage instanceof IndexOrdinalsFieldData) {
+ cachedInstance = (IndexOrdinalsFieldData) ramUsage;
+ break;
+ }
+ }
+ assertThat(cachedInstance, sameInstance(globalOrdinals));
+ topLevelReader.close();
+ // Now only 3 segment level entries, only the toplevel reader has been closed, but the segment readers are still used by IW
+ assertThat(indicesFieldDataCache.getCache().size(), equalTo(hasDocValues() ? 0L : 3L));
+
+ refreshReader();
+ assertThat(ifd.loadGlobal(topLevelReader), not(sameInstance(globalOrdinals)));
+
+ ifdService.clear();
+ assertThat(indicesFieldDataCache.getCache().size(), equalTo(0l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java
new file mode 100644
index 0000000000..dc683381d8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class BinaryDVFieldDataTests extends AbstractFieldDataTests {
+
+ @Override
+ protected boolean hasDocValues() {
+ return true;
+ }
+
+ @Test
+ public void testDocValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("test")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "binary")
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ final DocumentMapper mapper = mapperService.documentMapperParser().parse(mapping);
+
+
+ ObjectArrayList<byte[]> bytesList1 = new ObjectArrayList<>(2);
+ bytesList1.add(randomBytes());
+ bytesList1.add(randomBytes());
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList1.get(0)).value(bytesList1.get(1)).endArray().endObject();
+ ParsedDocument d = mapper.parse("test", "1", doc.bytes());
+ writer.addDocument(d.rootDoc());
+
+ byte[] bytes1 = randomBytes();
+ doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1).endObject();
+ d = mapper.parse("test", "2", doc.bytes());
+ writer.addDocument(d.rootDoc());
+
+ doc = XContentFactory.jsonBuilder().startObject().endObject();
+ d = mapper.parse("test", "3", doc.bytes());
+ writer.addDocument(d.rootDoc());
+
+ // test remove duplicate value
+ ObjectArrayList<byte[]> bytesList2 = new ObjectArrayList<>(2);
+ bytesList2.add(randomBytes());
+ bytesList2.add(randomBytes());
+ doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList2.get(0)).value(bytesList2.get(1)).value(bytesList2.get(0)).endArray().endObject();
+ d = mapper.parse("test", "4", doc.bytes());
+ writer.addDocument(d.rootDoc());
+
+ LeafReaderContext reader = refreshReader();
+ IndexFieldData<?> indexFieldData = getForField("field");
+ AtomicFieldData fieldData = indexFieldData.load(reader);
+
+ SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
+
+ CollectionUtils.sortAndDedup(bytesList1);
+ bytesValues.setDocument(0);
+ assertThat(bytesValues.count(), equalTo(2));
+ assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(bytesList1.get(0))));
+ assertThat(bytesValues.valueAt(1), equalTo(new BytesRef(bytesList1.get(1))));
+
+ bytesValues.setDocument(1);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(bytes1)));
+
+ bytesValues.setDocument(2);
+ assertThat(bytesValues.count(), equalTo(0));
+
+ CollectionUtils.sortAndDedup(bytesList2);
+ bytesValues.setDocument(3);
+ assertThat(bytesValues.count(), equalTo(2));
+ assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(bytesList2.get(0))));
+ assertThat(bytesValues.valueAt(1), equalTo(new BytesRef(bytesList2.get(1))));
+ }
+
+ private byte[] randomBytes() {
+ int size = randomIntBetween(10, 1000);
+ byte[] bytes = new byte[size];
+ getRandom().nextBytes(bytes);
+ return bytes;
+ }
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("binary", Settings.builder().put("format", "doc_values"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java
new file mode 100644
index 0000000000..9124f6c450
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/DisabledFieldDataFormatTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class DisabledFieldDataFormatTests extends ElasticsearchSingleNodeTest {
+
+ public void test() throws Exception {
+ createIndex("test", Settings.EMPTY, "type", "s", "type=string");
+ logger.info("indexing data start");
+ for (int i = 0; i < 10; ++i) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("s", "value" + i).execute().actionGet();
+ }
+ logger.info("indexing data end");
+
+ final int searchCycles = 1;
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // disable field data
+ updateFormat("disabled");
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ SearchResponse resp = null;
+ // try to run something that relies on field data and make sure that it fails
+ for (int i = 0; i < searchCycles; i++) {
+ try {
+ resp = client().prepareSearch("test").setPreference(Integer.toString(i)).addAggregation(AggregationBuilders.terms("t").field("s")
+ .collectMode(aggCollectionMode)).execute().actionGet();
+ assertFailures(resp);
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+ }
+
+ // enable it again
+ updateFormat("paged_bytes");
+
+ // try to run something that relies on field data and make sure that it works
+ for (int i = 0; i < searchCycles; i++) {
+ resp = client().prepareSearch("test").setPreference(Integer.toString(i)).addAggregation(AggregationBuilders.terms("t").field("s")
+ .collectMode(aggCollectionMode)).execute().actionGet();
+ assertNoFailures(resp);
+ }
+
+ // disable it again
+ updateFormat("disabled");
+
+ // this time, it should work because segments are already loaded
+ for (int i = 0; i < searchCycles; i++) {
+ resp = client().prepareSearch("test").setPreference(Integer.toString(i)).addAggregation(AggregationBuilders.terms("t").field("s")
+ .collectMode(aggCollectionMode)).execute().actionGet();
+ assertNoFailures(resp);
+ }
+
+ // but add more docs and the new segment won't be loaded
+ client().prepareIndex("test", "type", "-1").setSource("s", "value").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < searchCycles; i++) {
+ try {
+ resp = client().prepareSearch("test").setPreference(Integer.toString(i)).addAggregation(AggregationBuilders.terms("t").field("s")
+ .collectMode(aggCollectionMode)).execute().actionGet();
+ assertFailures(resp);
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+ }
+ }
+
+ private void updateFormat(final String format) throws Exception {
+ logger.info(">> put mapping start {}", format);
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type").setSource(
+ XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("s")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field("format", format)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).get());
+ logger.info(">> put mapping end {}", format);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java
new file mode 100644
index 0000000000..242e01475d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/DoubleFieldDataTests.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+
+/**
+ */
+public class DoubleFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("double", getFieldDataSettings());
+ }
+
+ @Override
+ protected String one() {
+ return "1.0";
+ }
+
+ @Override
+ protected String two() {
+ return "2.0";
+ }
+
+ @Override
+ protected String three() {
+ return "3.0";
+ }
+
+ @Override
+ protected String four() {
+ return "4.0";
+ }
+
+ @Override
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 1.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new DoubleField("value", 1.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0d, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2.0d, Field.Store.NO));
+ d.add(new DoubleField("value", 4.0d, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new DoubleField("value", 2, Field.Store.NO));
+ d.add(new DoubleField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new DoubleField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new DoubleField("value", 4, Field.Store.NO));
+ d.add(new DoubleField("value", 5, Field.Store.NO));
+ d.add(new DoubleField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new DoubleField("value", 6, Field.Store.NO));
+ d.add(new DoubleField("value", 7, Field.Store.NO));
+ d.add(new DoubleField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new DoubleField("value", 8, Field.Store.NO));
+ d.add(new DoubleField("value", 9, Field.Store.NO));
+ d.add(new DoubleField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new DoubleField("value", -8, Field.Store.NO));
+ d.add(new DoubleField("value", -9, Field.Store.NO));
+ d.add(new DoubleField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java
new file mode 100644
index 0000000000..1b3168af98
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/DuelFieldDataTests.java
@@ -0,0 +1,639 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.English;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.junit.Test;
+
+import java.util.*;
+import java.util.Map.Entry;
+
+import static org.hamcrest.Matchers.*;
+
+public class DuelFieldDataTests extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return null;
+ }
+
+ @Test
+ public void testDuelAllTypesSingleValue() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("bytes").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+ final DocumentMapper mapper = mapperService.documentMapperParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = scaledRandomIntBetween(1000, 1500);
+ for (int i = 0; i < atLeast; i++) {
+ String s = Integer.toString(randomByte());
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject();
+ for (String fieldName : Arrays.asList("bytes", "byte", "short", "integer", "long", "float", "double")) {
+ doc = doc.field(fieldName, s);
+ }
+
+ doc = doc.endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ LeafReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<>();
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "paged_bytes")), Type.Bytes);
+ typeMap.put(new FieldDataType("byte", Settings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("short", Settings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("int", Settings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("long", Settings.builder().put("format", "array")), Type.Long);
+ typeMap.put(new FieldDataType("double", Settings.builder().put("format", "array")), Type.Double);
+ typeMap.put(new FieldDataType("float", Settings.builder().put("format", "array")), Type.Float);
+ typeMap.put(new FieldDataType("byte", Settings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("short", Settings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("int", Settings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("long", Settings.builder().put("format", "doc_values")), Type.Long);
+ typeMap.put(new FieldDataType("double", Settings.builder().put("format", "doc_values")), Type.Double);
+ typeMap.put(new FieldDataType("float", Settings.builder().put("format", "doc_values")), Type.Float);
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "doc_values")), Type.Bytes);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<>(typeMap.entrySet());
+ Preprocessor pre = new ToDoublePreprocessor();
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+
+ ifdService.clear();
+ IndexFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+ ifdService.clear();
+ IndexFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+ duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre);
+ duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<LeafReaderContext> leaves = composite.leaves();
+ for (LeafReaderContext atomicReaderContext : leaves) {
+ duelFieldDataBytes(random, atomicReaderContext, leftFieldData, rightFieldData, pre);
+ }
+ }
+ }
+
+
+ @Test
+ public void testDuelIntegers() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("byte").field("type", "byte").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("short").field("type", "short").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("integer").field("type", "integer").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("long").field("type", "long").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = mapperService.documentMapperParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = scaledRandomIntBetween(1000, 1500);
+ final int maxNumValues = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ byte[] values = new byte[maxNumValues];
+ for (int i = 0; i < atLeast; i++) {
+ int numValues = randomInt(maxNumValues);
+ // FD loses values if they are duplicated, so we must deduplicate for this test
+ Set<Byte> vals = new HashSet<Byte>();
+ for (int j = 0; j < numValues; ++j) {
+ vals.add(randomByte());
+ }
+
+ numValues = vals.size();
+ int upto = 0;
+ for (Byte bb : vals) {
+ values[upto++] = bb.byteValue();
+ }
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject();
+ for (String fieldName : Arrays.asList("byte", "short", "integer", "long")) {
+ doc = doc.startArray(fieldName);
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray();
+ }
+ doc = doc.endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ LeafReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<>();
+ typeMap.put(new FieldDataType("byte", Settings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("short", Settings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("int", Settings.builder().put("format", "array")), Type.Integer);
+ typeMap.put(new FieldDataType("long", Settings.builder().put("format", "array")), Type.Long);
+ typeMap.put(new FieldDataType("byte", Settings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("short", Settings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("int", Settings.builder().put("format", "doc_values")), Type.Integer);
+ typeMap.put(new FieldDataType("long", Settings.builder().put("format", "doc_values")), Type.Long);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexNumericFieldData leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+ ifdService.clear();
+ IndexNumericFieldData rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataLong(random, context, leftFieldData, rightFieldData);
+ duelFieldDataLong(random, context, rightFieldData, leftFieldData);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<LeafReaderContext> leaves = composite.leaves();
+ for (LeafReaderContext atomicReaderContext : leaves) {
+ duelFieldDataLong(random, atomicReaderContext, leftFieldData, rightFieldData);
+ }
+ }
+
+ }
+
+ @Test
+ public void testDuelDoubles() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("float").field("type", "float").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .startObject("double").field("type", "double").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = mapperService.documentMapperParser().parse(mapping);
+ Random random = getRandom();
+ int atLeast = scaledRandomIntBetween(1000, 1500);
+ final int maxNumValues = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ float[] values = new float[maxNumValues];
+ for (int i = 0; i < atLeast; i++) {
+ int numValues = randomInt(maxNumValues);
+ float def = randomBoolean() ? randomFloat() : Float.NaN;
+ // FD loses values if they are duplicated, so we must deduplicate for this test
+ Set<Float> vals = new HashSet<Float>();
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ vals.add(def);
+ } else {
+ vals.add(randomFloat());
+ }
+ }
+ numValues = vals.size();
+ int upto = 0;
+ for (Float f : vals) {
+ values[upto++] = f.floatValue();
+ }
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("float");
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray().startArray("double");
+ for (int j = 0; j < numValues; ++j) {
+ doc = doc.value(values[j]);
+ }
+ doc = doc.endArray().endObject();
+
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ LeafReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<>();
+ typeMap.put(new FieldDataType("double", Settings.builder().put("format", "array")), Type.Double);
+ typeMap.put(new FieldDataType("float", Settings.builder().put("format", "array")), Type.Float);
+ typeMap.put(new FieldDataType("double", Settings.builder().put("format", "doc_values")), Type.Double);
+ typeMap.put(new FieldDataType("float", Settings.builder().put("format", "doc_values")), Type.Float);
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexNumericFieldData leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexNumericFieldData rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataDouble(random, context, leftFieldData, rightFieldData);
+ duelFieldDataDouble(random, context, rightFieldData, leftFieldData);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<LeafReaderContext> leaves = composite.leaves();
+ for (LeafReaderContext atomicReaderContext : leaves) {
+ duelFieldDataDouble(random, atomicReaderContext, leftFieldData, rightFieldData);
+ }
+ }
+
+ }
+
+
+ @Test
+ public void testDuelStrings() throws Exception {
+ Random random = getRandom();
+ int atLeast = scaledRandomIntBetween(1000, 1500);
+ for (int i = 0; i < atLeast; i++) {
+ Document d = new Document();
+ d.add(new StringField("_id", "" + i, Field.Store.NO));
+ if (random.nextInt(15) != 0) {
+ int[] numbers = getNumbers(random, Integer.MAX_VALUE);
+ for (int j : numbers) {
+ final String s = English.longToEnglish(j);
+ d.add(new StringField("bytes", s, Field.Store.NO));
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef(s)));
+ }
+ if (random.nextInt(10) == 0) {
+ d.add(new StringField("bytes", "", Field.Store.NO));
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef()));
+ }
+ }
+ writer.addDocument(d);
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ LeafReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<>();
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "paged_bytes")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "doc_values")), Type.Bytes);
+ // TODO add filters
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<>(typeMap.entrySet());
+ Preprocessor pre = new Preprocessor();
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexFieldData<?> leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexFieldData<?> rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataBytes(random, context, leftFieldData, rightFieldData, pre);
+ duelFieldDataBytes(random, context, rightFieldData, leftFieldData, pre);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<LeafReaderContext> leaves = composite.leaves();
+ for (LeafReaderContext atomicReaderContext : leaves) {
+ duelFieldDataBytes(random, atomicReaderContext, leftFieldData, rightFieldData, pre);
+ }
+ perSegment.close();
+ }
+
+ }
+
+ public void testDuelGlobalOrdinals() throws Exception {
+ Random random = getRandom();
+ final int numDocs = scaledRandomIntBetween(10, 1000);
+ final int numValues = scaledRandomIntBetween(10, 500);
+ final String[] values = new String[numValues];
+ for (int i = 0; i < numValues; ++i) {
+ values[i] = new String(RandomStrings.randomAsciiOfLength(random, 10));
+ }
+ for (int i = 0; i < numDocs; i++) {
+ Document d = new Document();
+ final int numVals = randomInt(3);
+ for (int j = 0; j < numVals; ++j) {
+ final String value = RandomPicks.randomFrom(random, Arrays.asList(values));
+ d.add(new StringField("string", value, Field.Store.NO));
+ d.add(new SortedSetDocValuesField("bytes", new BytesRef(value)));
+ }
+ writer.addDocument(d);
+ if (randomInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ refreshReader();
+
+ Map<FieldDataType, Type> typeMap = new HashMap<FieldDataType, DuelFieldDataTests.Type>();
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "fst")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "paged_bytes")), Type.Bytes);
+ typeMap.put(new FieldDataType("string", Settings.builder().put("format", "doc_values")), Type.Bytes);
+
+ for (Map.Entry<FieldDataType, Type> entry : typeMap.entrySet()) {
+ ifdService.clear();
+ IndexOrdinalsFieldData fieldData = getForField(entry.getKey(), entry.getValue().name().toLowerCase(Locale.ROOT));
+ RandomAccessOrds left = fieldData.load(readerContext).getOrdinalsValues();
+ fieldData.clear();
+ RandomAccessOrds right = fieldData.loadGlobal(topLevelReader).load(topLevelReader.leaves().get(0)).getOrdinalsValues();
+ assertEquals(left.getValueCount(), right.getValueCount());
+ for (long ord = 0; ord < left.getValueCount(); ++ord) {
+ assertEquals(left.lookupOrd(ord), right.lookupOrd(ord));
+ }
+ }
+ }
+
+ public void testDuelGeoPoints() throws Exception {
+ final String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("geopoint").field("type", "geo_point").startObject("fielddata").field("format", "doc_values").endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ final DocumentMapper mapper = mapperService.documentMapperParser().parse(mapping);
+
+ Random random = getRandom();
+ int atLeast = scaledRandomIntBetween(1000, 1500);
+ int maxValuesPerDoc = randomBoolean() ? 1 : randomIntBetween(2, 40);
+ // to test deduplication
+ double defaultLat = randomDouble() * 180 - 90;
+ double defaultLon = randomDouble() * 360 - 180;
+ for (int i = 0; i < atLeast; i++) {
+ final int numValues = randomInt(maxValuesPerDoc);
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("geopoint");
+ for (int j = 0; j < numValues; ++j) {
+ if (randomBoolean()) {
+ doc.startObject().field("lat", defaultLat).field("lon", defaultLon).endObject();
+ } else {
+ doc.startObject().field("lat", randomDouble() * 180 - 90).field("lon", randomDouble() * 360 - 180).endObject();
+ }
+ }
+ doc = doc.endArray().endObject();
+ final ParsedDocument d = mapper.parse("type", Integer.toString(i), doc.bytes());
+
+ writer.addDocument(d.rootDoc());
+ if (random.nextInt(10) == 0) {
+ refreshReader();
+ }
+ }
+ LeafReaderContext context = refreshReader();
+ Map<FieldDataType, Type> typeMap = new HashMap<>();
+ final Distance precision = new Distance(1, randomFrom(DistanceUnit.values()));
+ typeMap.put(new FieldDataType("geo_point", Settings.builder().put("format", "array")), Type.GeoPoint);
+ typeMap.put(new FieldDataType("geo_point", Settings.builder().put("format", "compressed").put("precision", precision)), Type.GeoPoint);
+ typeMap.put(new FieldDataType("geo_point", Settings.builder().put("format", "doc_values")), Type.GeoPoint);
+
+ ArrayList<Entry<FieldDataType, Type>> list = new ArrayList<>(typeMap.entrySet());
+ while (!list.isEmpty()) {
+ Entry<FieldDataType, Type> left;
+ Entry<FieldDataType, Type> right;
+ if (list.size() > 1) {
+ left = list.remove(random.nextInt(list.size()));
+ right = list.remove(random.nextInt(list.size()));
+ } else {
+ right = left = list.remove(0);
+ }
+ ifdService.clear();
+ IndexGeoPointFieldData leftFieldData = getForField(left.getKey(), left.getValue().name().toLowerCase(Locale.ROOT));
+
+ ifdService.clear();
+ IndexGeoPointFieldData rightFieldData = getForField(right.getKey(), right.getValue().name().toLowerCase(Locale.ROOT));
+
+ duelFieldDataGeoPoint(random, context, leftFieldData, rightFieldData, precision);
+ duelFieldDataGeoPoint(random, context, rightFieldData, leftFieldData, precision);
+
+ DirectoryReader perSegment = DirectoryReader.open(writer, true);
+ CompositeReaderContext composite = perSegment.getContext();
+ List<LeafReaderContext> leaves = composite.leaves();
+ for (LeafReaderContext atomicReaderContext : leaves) {
+ duelFieldDataGeoPoint(random, atomicReaderContext, leftFieldData, rightFieldData, precision);
+ }
+ perSegment.close();
+ }
+ }
+
+ private int[] getNumbers(Random random, int margin) {
+ if (random.nextInt(20) == 0) {
+ int[] num = new int[1 + random.nextInt(10)];
+ for (int i = 0; i < num.length; i++) {
+ int v = (random.nextBoolean() ? -1 * random.nextInt(margin) : random.nextInt(margin));
+ num[i] = v;
+ }
+ return num;
+ }
+ return new int[]{(random.nextBoolean() ? -1 * random.nextInt(margin) : random.nextInt(margin))};
+ }
+
+
+ private static void duelFieldDataBytes(Random random, LeafReaderContext context, IndexFieldData<?> left, IndexFieldData<?> right, Preprocessor pre) throws Exception {
+ AtomicFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ int numDocs = context.reader().maxDoc();
+ SortedBinaryDocValues leftBytesValues = leftData.getBytesValues();
+ SortedBinaryDocValues rightBytesValues = rightData.getBytesValues();
+ BytesRefBuilder leftSpare = new BytesRefBuilder();
+ BytesRefBuilder rightSpare = new BytesRefBuilder();
+
+ for (int i = 0; i < numDocs; i++) {
+ leftBytesValues.setDocument(i);
+ rightBytesValues.setDocument(i);
+ int numValues = leftBytesValues.count();
+ assertThat(numValues, equalTo(rightBytesValues.count()));
+ BytesRef previous = null;
+ for (int j = 0; j < numValues; j++) {
+ rightSpare.copyBytes(rightBytesValues.valueAt(j));
+ leftSpare.copyBytes(leftBytesValues.valueAt(j));
+ if (previous != null) {
+ assertThat(pre.compare(previous, rightSpare.get()), lessThan(0));
+ }
+ previous = BytesRef.deepCopyOf(rightSpare.get());
+ pre.toString(rightSpare.get());
+ pre.toString(leftSpare.get());
+ assertThat(pre.toString(leftSpare.get()), equalTo(pre.toString(rightSpare.get())));
+ }
+ }
+ }
+
+
+ private static void duelFieldDataDouble(Random random, LeafReaderContext context, IndexNumericFieldData left, IndexNumericFieldData right) throws Exception {
+ AtomicNumericFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicNumericFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ int numDocs = context.reader().maxDoc();
+ SortedNumericDoubleValues leftDoubleValues = leftData.getDoubleValues();
+ SortedNumericDoubleValues rightDoubleValues = rightData.getDoubleValues();
+ for (int i = 0; i < numDocs; i++) {
+ leftDoubleValues.setDocument(i);
+ rightDoubleValues.setDocument(i);
+ int numValues = leftDoubleValues.count();
+ assertThat(numValues, equalTo(rightDoubleValues.count()));
+ double previous = 0;
+ for (int j = 0; j < numValues; j++) {
+ double current = rightDoubleValues.valueAt(j);
+ if (Double.isNaN(current)) {
+ assertTrue(Double.isNaN(leftDoubleValues.valueAt(j)));
+ } else {
+ assertThat(leftDoubleValues.valueAt(j), closeTo(current, 0.0001));
+ }
+ if (j > 0) {
+ assertThat(Double.compare(previous,current), lessThan(0));
+ }
+ previous = current;
+ }
+ }
+ }
+
+ private static void duelFieldDataLong(Random random, LeafReaderContext context, IndexNumericFieldData left, IndexNumericFieldData right) throws Exception {
+ AtomicNumericFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicNumericFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ int numDocs = context.reader().maxDoc();
+ SortedNumericDocValues leftLongValues = leftData.getLongValues();
+ SortedNumericDocValues rightLongValues = rightData.getLongValues();
+ for (int i = 0; i < numDocs; i++) {
+ leftLongValues.setDocument(i);
+ rightLongValues.setDocument(i);
+ int numValues = leftLongValues.count();
+ long previous = 0;
+ assertThat(numValues, equalTo(rightLongValues.count()));
+ for (int j = 0; j < numValues; j++) {
+ long current;
+ assertThat(leftLongValues.valueAt(j), equalTo(current = rightLongValues.valueAt(j)));
+ if (j > 0) {
+ assertThat(previous, lessThan(current));
+ }
+ previous = current;
+ }
+ }
+ }
+
+ private static void duelFieldDataGeoPoint(Random random, LeafReaderContext context, IndexGeoPointFieldData left, IndexGeoPointFieldData right, Distance precision) throws Exception {
+ AtomicGeoPointFieldData leftData = random.nextBoolean() ? left.load(context) : left.loadDirect(context);
+ AtomicGeoPointFieldData rightData = random.nextBoolean() ? right.load(context) : right.loadDirect(context);
+
+ int numDocs = context.reader().maxDoc();
+ MultiGeoPointValues leftValues = leftData.getGeoPointValues();
+ MultiGeoPointValues rightValues = rightData.getGeoPointValues();
+ for (int i = 0; i < numDocs; ++i) {
+ leftValues.setDocument(i);
+ final int numValues = leftValues.count();
+ rightValues.setDocument(i);;
+ assertEquals(numValues, rightValues.count());
+ List<GeoPoint> leftPoints = Lists.newArrayList();
+ List<GeoPoint> rightPoints = Lists.newArrayList();
+ for (int j = 0; j < numValues; ++j) {
+ GeoPoint l = leftValues.valueAt(j);
+ leftPoints.add(new GeoPoint(l.getLat(), l.getLon()));
+ GeoPoint r = rightValues.valueAt(j);
+ rightPoints.add(new GeoPoint(r.getLat(), r.getLon()));
+ }
+ for (GeoPoint l : leftPoints) {
+ assertTrue("Couldn't find " + l + " among " + rightPoints, contains(l, rightPoints, precision));
+ }
+ for (GeoPoint r : rightPoints) {
+ assertTrue("Couldn't find " + r + " among " + leftPoints, contains(r, leftPoints, precision));
+ }
+ }
+ }
+
+ private static boolean contains(GeoPoint point, List<GeoPoint> set, Distance precision) {
+ for (GeoPoint r : set) {
+ final double distance = GeoDistance.PLANE.calculate(point.getLat(), point.getLon(), r.getLat(), r.getLon(), DistanceUnit.METERS);
+ if (new Distance(distance, DistanceUnit.METERS).compareTo(precision) <= 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private static class Preprocessor {
+
+ public String toString(BytesRef ref) {
+ return ref.utf8ToString();
+ }
+
+ public int compare(BytesRef a, BytesRef b) {
+ return a.compareTo(b);
+ }
+ }
+
+ private static class ToDoublePreprocessor extends Preprocessor {
+
+ @Override
+ public String toString(BytesRef ref) {
+ assertTrue(ref.length > 0);
+ return Double.toString(Double.parseDouble(super.toString(ref)));
+ }
+
+ @Override
+ public int compare(BytesRef a, BytesRef b) {
+ Double _a = Double.parseDouble(super.toString(a));
+ return _a.compareTo(Double.parseDouble(super.toString(b)));
+ }
+ }
+
+
+ private static enum Type {
+ Float, Double, Integer, Long, Bytes, GeoPoint;
+ }
+
+}
+
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java
new file mode 100644
index 0000000000..a64ab8d98b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/FSTPackedBytesStringFieldDataTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+
+/**
+ */
+public class FSTPackedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", Settings.builder().put("format", "fst").put(OrdinalsBuilder.FORCE_MULTI_ORDINALS, randomBoolean()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java
new file mode 100644
index 0000000000..eeab72920e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataFilterIntegrationTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+public class FieldDataFilterIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ @Test
+ public void testRegexpFilter() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test");
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .startObject("fielddata")
+ .startObject("filter")
+ .startObject("regex")
+ .field("pattern", "^bac.*")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("not_filtered")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type", mapping));
+ ensureGreen();
+ int numDocs = scaledRandomIntBetween(5, 50);
+ for (int i = 0; i < numDocs; i++) {
+ client().prepareIndex("test", "type", "" + 0).setSource("name", "bacon bastards", "not_filtered", "bacon bastards").get();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(0)
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("name").field("name"))
+ .addAggregation(terms("not_filtered").field("not_filtered")).get();
+ Aggregations aggs = searchResponse.getAggregations();
+ Terms nameAgg = aggs.get("name");
+ assertThat(nameAgg.getBuckets().size(), Matchers.equalTo(1));
+ assertThat(nameAgg.getBuckets().iterator().next().getKeyAsString(), Matchers.equalTo("bacon"));
+
+ Terms notFilteredAgg = aggs.get("not_filtered");
+ assertThat(notFilteredAgg.getBuckets().size(), Matchers.equalTo(2));
+ assertThat(notFilteredAgg.getBuckets().get(0).getKeyAsString(), Matchers.isOneOf("bacon", "bastards"));
+ assertThat(notFilteredAgg.getBuckets().get(1).getKeyAsString(), Matchers.isOneOf("bacon", "bastards"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingTests.java
new file mode 100644
index 0000000000..b3b820be0f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataLoadingTests.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ */
+public class FieldDataLoadingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testEagerFieldDataLoading() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .startObject("fielddata").field("loading", "eager").endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("name", "name").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ }
+
+ @Test
+ public void testEagerGlobalOrdinalsFieldDataLoading() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .startObject("fielddata").field("loading", "eager_global_ordinals").endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("name", "name").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataTests.java
new file mode 100644
index 0000000000..46d3ce955e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/FieldDataTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.util.NumericUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+public class FieldDataTests extends ElasticsearchTestCase {
+
+ public void testSortableLongBitsToDoubles() {
+ final double value = randomDouble();
+ final long valueBits = NumericUtils.doubleToSortableLong(value);
+
+ NumericDocValues values = new NumericDocValues() {
+ @Override
+ public long get(int docID) {
+ return valueBits;
+ }
+ };
+
+ SortedNumericDoubleValues asMultiDoubles = FieldData.sortableLongBitsToDoubles(DocValues.singleton(values, null));
+ NumericDoubleValues asDoubles = FieldData.unwrapSingleton(asMultiDoubles);
+ assertNotNull(asDoubles);
+ assertEquals(value, asDoubles.get(0), 0);
+
+ NumericDocValues backToLongs = DocValues.unwrapSingleton(FieldData.toSortableLongBits(asMultiDoubles));
+ assertSame(values, backToLongs);
+
+ SortedNumericDocValues multiValues = new SortedNumericDocValues() {
+
+ @Override
+ public long valueAt(int index) {
+ return valueBits;
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ }
+
+ @Override
+ public int count() {
+ return 1;
+ }
+ };
+
+ asMultiDoubles = FieldData.sortableLongBitsToDoubles(multiValues);
+ assertEquals(value, asMultiDoubles.valueAt(0), 0);
+ assertSame(multiValues, FieldData.toSortableLongBits(asMultiDoubles));
+ }
+
+ public void testDoublesToSortableLongBits() {
+ final double value = randomDouble();
+ final long valueBits = NumericUtils.doubleToSortableLong(value);
+
+ NumericDoubleValues values = new NumericDoubleValues() {
+ @Override
+ public double get(int docID) {
+ return value;
+ }
+ };
+
+ SortedNumericDocValues asMultiLongs = FieldData.toSortableLongBits(FieldData.singleton(values, null));
+ NumericDocValues asLongs = DocValues.unwrapSingleton(asMultiLongs);
+ assertNotNull(asLongs);
+ assertEquals(valueBits, asLongs.get(0));
+
+ SortedNumericDoubleValues multiValues = new SortedNumericDoubleValues() {
+ @Override
+ public double valueAt(int index) {
+ return value;
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ }
+
+ @Override
+ public int count() {
+ return 1;
+ }
+ };
+
+ asMultiLongs = FieldData.toSortableLongBits(multiValues);
+ assertEquals(valueBits, asMultiLongs.valueAt(0));
+ assertSame(multiValues, FieldData.sortableLongBitsToDoubles(asMultiLongs));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java b/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java
new file mode 100644
index 0000000000..3c13999eb3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/FilterFieldDataTest.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomAccessOrds;
+import org.elasticsearch.common.settings.Settings;
+import org.junit.Test;
+
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class FilterFieldDataTest extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Test
+ public void testFilterByFrequency() throws Exception {
+ Random random = getRandom();
+ for (int i = 0; i < 1000; i++) {
+ Document d = new Document();
+ d.add(new StringField("id", "" + i, Field.Store.NO));
+ if (i % 100 == 0) {
+ d.add(new StringField("high_freq", "100", Field.Store.NO));
+ d.add(new StringField("low_freq", "100", Field.Store.NO));
+ d.add(new StringField("med_freq", "100", Field.Store.NO));
+ }
+ if (i % 10 == 0) {
+ d.add(new StringField("high_freq", "10", Field.Store.NO));
+ d.add(new StringField("med_freq", "10", Field.Store.NO));
+ }
+ if (i % 5 == 0) {
+ d.add(new StringField("high_freq", "5", Field.Store.NO));
+ }
+ writer.addDocument(d);
+ }
+ writer.forceMerge(1, true);
+ LeafReaderContext context = refreshReader();
+ String[] formats = new String[] { "fst", "paged_bytes"};
+
+ for (String format : formats) {
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 100).put("filter.frequency.min", 0.0d).put("filter.frequency.max", random.nextBoolean() ? 100 : 0.5d));
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "high_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(2L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("100"));
+ }
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 100).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d).put("filter.frequency.max", 201));
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "high_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(1L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("5"));
+ }
+
+ {
+ ifdService.clear(); // test # docs with value
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 101).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d));
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "med_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(2L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("100"));
+ }
+
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.frequency.min_segment_size", 101).put("filter.frequency.min", random.nextBoolean() ? 101 : 101d/200.0d));
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "med_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(2L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("100"));
+ }
+
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d{2,3}") // allows 10 & 100
+ .put("filter.frequency.min_segment_size", 0)
+ .put("filter.frequency.min", random.nextBoolean() ? 2 : 1d/200.0d) // 100, 10, 5
+ .put("filter.frequency.max", random.nextBoolean() ? 99 : 99d/200.0d)); // 100
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "high_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(1L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("100"));
+ }
+ }
+
+ }
+
+ @Test
+ public void testFilterByRegExp() throws Exception {
+
+ int hundred = 0;
+ int ten = 0;
+ int five = 0;
+ for (int i = 0; i < 1000; i++) {
+ Document d = new Document();
+ d.add(new StringField("id", "" + i, Field.Store.NO));
+ if (i % 100 == 0) {
+ hundred++;
+ d.add(new StringField("high_freq", "100", Field.Store.NO));
+ }
+ if (i % 10 == 0) {
+ ten++;
+ d.add(new StringField("high_freq", "10", Field.Store.NO));
+ }
+ if (i % 5 == 0) {
+ five++;
+ d.add(new StringField("high_freq", "5", Field.Store.NO));
+
+ }
+ writer.addDocument(d);
+ }
+ logger.debug(hundred + " " + ten + " " + five);
+ writer.forceMerge(1, true);
+ LeafReaderContext context = refreshReader();
+ String[] formats = new String[] { "fst", "paged_bytes"};
+ for (String format : formats) {
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d"));
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "high_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(1L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("5"));
+ }
+ {
+ ifdService.clear();
+ FieldDataType fieldDataType = new FieldDataType("string", Settings.builder().put("format", format)
+ .put("filter.regex.pattern", "\\d{1,2}"));
+ IndexOrdinalsFieldData fieldData = getForField(fieldDataType, "high_freq");
+ AtomicOrdinalsFieldData loadDirect = fieldData.loadDirect(context);
+ RandomAccessOrds bytesValues = loadDirect.getOrdinalsValues();
+ assertThat(2L, equalTo(bytesValues.getValueCount()));
+ assertThat(bytesValues.lookupOrd(0).utf8ToString(), equalTo("10"));
+ assertThat(bytesValues.lookupOrd(1).utf8ToString(), equalTo("5"));
+ }
+ }
+
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java
new file mode 100644
index 0000000000..b81a8cdf17
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/FloatFieldDataTests.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.Term;
+
+/**
+ */
+public class FloatFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("float", getFieldDataSettings());
+ }
+
+ @Override
+ protected String one() {
+ return "1.0";
+ }
+
+ @Override
+ protected String two() {
+ return "2.0";
+ }
+
+ @Override
+ protected String three() {
+ return "3.0";
+ }
+
+ @Override
+ protected String four() {
+ return "4.0";
+ }
+
+ @Override
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 1.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new FloatField("value", 1.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2.0f, Field.Store.NO));
+ d.add(new FloatField("value", 4.0f, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3.0f, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new FloatField("value", 2, Field.Store.NO));
+ d.add(new FloatField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new FloatField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new FloatField("value", 4, Field.Store.NO));
+ d.add(new FloatField("value", 5, Field.Store.NO));
+ d.add(new FloatField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new FloatField("value", 6, Field.Store.NO));
+ d.add(new FloatField("value", 7, Field.Store.NO));
+ d.add(new FloatField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new FloatField("value", 8, Field.Store.NO));
+ d.add(new FloatField("value", 9, Field.Store.NO));
+ d.add(new FloatField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new FloatField("value", -8, Field.Store.NO));
+ d.add(new FloatField("value", -9, Field.Store.NO));
+ d.add(new FloatField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java
new file mode 100644
index 0000000000..38a557c4ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.analysis.core.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.plain.*;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MapperBuilders;
+import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.instanceOf;
+
+public class IndexFieldDataServiceTests extends ElasticsearchSingleNodeTest {
+
+ private static Settings DOC_VALUES_SETTINGS = Settings.builder().put(FieldDataType.FORMAT_KEY, FieldDataType.DOC_VALUES_FORMAT_VALUE).build();
+
+ public void testGetForFieldDefaults() {
+ final IndexService indexService = createIndex("test");
+ final IndexFieldDataService ifdService = indexService.fieldData();
+ for (boolean docValues : Arrays.asList(true, false)) {
+ final BuilderContext ctx = new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
+ final StringFieldMapper stringMapper = new StringFieldMapper.Builder("string").tokenized(false).docValues(docValues).build(ctx);
+ ifdService.clear();
+ IndexFieldData<?> fd = ifdService.getForField(stringMapper);
+ if (docValues) {
+ assertTrue(fd instanceof SortedSetDVOrdinalsIndexFieldData);
+ } else {
+ assertTrue(fd instanceof PagedBytesIndexFieldData);
+ }
+
+ for (FieldMapper mapper : Arrays.asList(
+ new ByteFieldMapper.Builder("int").docValues(docValues).build(ctx),
+ new ShortFieldMapper.Builder("int").docValues(docValues).build(ctx),
+ new IntegerFieldMapper.Builder("int").docValues(docValues).build(ctx),
+ new LongFieldMapper.Builder("long").docValues(docValues).build(ctx)
+ )) {
+ ifdService.clear();
+ fd = ifdService.getForField(mapper);
+ if (docValues) {
+ assertTrue(fd instanceof SortedNumericDVIndexFieldData);
+ } else {
+ assertTrue(fd instanceof PackedArrayIndexFieldData);
+ }
+ }
+
+ final FloatFieldMapper floatMapper = new FloatFieldMapper.Builder("float").docValues(docValues).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(floatMapper);
+ if (docValues) {
+ assertTrue(fd instanceof SortedNumericDVIndexFieldData);
+ } else {
+ assertTrue(fd instanceof FloatArrayIndexFieldData);
+ }
+
+ final DoubleFieldMapper doubleMapper = new DoubleFieldMapper.Builder("double").docValues(docValues).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(doubleMapper);
+ if (docValues) {
+ assertTrue(fd instanceof SortedNumericDVIndexFieldData);
+ } else {
+ assertTrue(fd instanceof DoubleArrayIndexFieldData);
+ }
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public void testByPassDocValues() {
+ final IndexService indexService = createIndex("test");
+ final IndexFieldDataService ifdService = indexService.fieldData();
+ final BuilderContext ctx = new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
+ final StringFieldMapper stringMapper = MapperBuilders.stringField("string").tokenized(false).fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(Settings.builder().put("format", "fst").build()).build(ctx);
+ ifdService.clear();
+ IndexFieldData<?> fd = ifdService.getForField(stringMapper);
+ assertTrue(fd instanceof FSTBytesIndexFieldData);
+
+ final Settings fdSettings = Settings.builder().put("format", "array").build();
+ for (FieldMapper mapper : Arrays.asList(
+ new ByteFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new ShortFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new IntegerFieldMapper.Builder("int").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx),
+ new LongFieldMapper.Builder("long").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx)
+ )) {
+ ifdService.clear();
+ fd = ifdService.getForField(mapper);
+ assertTrue(fd instanceof PackedArrayIndexFieldData);
+ }
+
+ final FloatFieldMapper floatMapper = MapperBuilders.floatField("float").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(floatMapper);
+ assertTrue(fd instanceof FloatArrayIndexFieldData);
+
+ final DoubleFieldMapper doubleMapper = MapperBuilders.doubleField("double").fieldDataSettings(DOC_VALUES_SETTINGS).fieldDataSettings(fdSettings).build(ctx);
+ ifdService.clear();
+ fd = ifdService.getForField(doubleMapper);
+ assertTrue(fd instanceof DoubleArrayIndexFieldData);
+ }
+
+ public void testChangeFieldDataFormat() throws Exception {
+ final IndexService indexService = createIndex("test");
+ final IndexFieldDataService ifdService = indexService.fieldData();
+ final BuilderContext ctx = new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
+ final StringFieldMapper mapper1 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx);
+ final IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
+ Document doc = new Document();
+ doc.add(new StringField("s", "thisisastring", Store.NO));
+ writer.addDocument(doc);
+ final IndexReader reader1 = DirectoryReader.open(writer, true);
+ IndexFieldData<?> ifd = ifdService.getForField(mapper1);
+ assertThat(ifd, instanceOf(PagedBytesIndexFieldData.class));
+ Set<LeafReader> oldSegments = Collections.newSetFromMap(new IdentityHashMap<LeafReader, Boolean>());
+ for (LeafReaderContext arc : reader1.leaves()) {
+ oldSegments.add(arc.reader());
+ AtomicFieldData afd = ifd.load(arc);
+ assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
+ }
+ // write new segment
+ writer.addDocument(doc);
+ final IndexReader reader2 = DirectoryReader.open(writer, true);
+ final StringFieldMapper mapper2 = MapperBuilders.stringField("s").tokenized(false).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "fst").build()).build(ctx);
+ ifdService.onMappingUpdate();
+ ifd = ifdService.getForField(mapper2);
+ assertThat(ifd, instanceOf(FSTBytesIndexFieldData.class));
+ for (LeafReaderContext arc : reader2.leaves()) {
+ AtomicFieldData afd = ifd.load(arc);
+ if (oldSegments.contains(arc.reader())) {
+ assertThat(afd, instanceOf(PagedBytesAtomicFieldData.class));
+ } else {
+ assertThat(afd, instanceOf(FSTBytesAtomicFieldData.class));
+ }
+ }
+ reader1.close();
+ reader2.close();
+ writer.close();
+ writer.getDirectory().close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java
new file mode 100644
index 0000000000..09a24a4283
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/LongFieldDataTests.java
@@ -0,0 +1,431 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import com.carrotsearch.hppc.LongHashSet;
+import com.carrotsearch.hppc.cursors.LongCursor;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.Term;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThan;
+
+/**
+ * Tests for all integer types (byte, short, int, long).
+ */
+public class LongFieldDataTests extends AbstractNumericFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ // we don't want to optimize the type so it will always be a long...
+ return new FieldDataType("long", getFieldDataSettings());
+ }
+
+ @Override
+ protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ writer.commit();
+
+ writer.deleteDocuments(new Term("_id", "1"));
+ }
+
+ @Test
+ public void testOptimizeTypeLong() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", Integer.MAX_VALUE + 1l, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", Integer.MIN_VALUE - 1l, Field.Store.NO));
+ writer.addDocument(d);
+
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+ assertThat(getFirst(fieldData.getLongValues(), 0), equalTo((long) Integer.MAX_VALUE + 1l));
+ assertThat(getFirst(fieldData.getLongValues(), 1), equalTo((long) Integer.MIN_VALUE - 1l));
+ }
+
+ private static long getFirst(SortedNumericDocValues values, int docId) {
+ values.setDocument(docId);
+ final int numValues = values.count();
+ assertThat(numValues, is(1));
+ return values.valueAt(0);
+ }
+
+ private static double getFirst(SortedNumericDoubleValues values, int docId) {
+ values.setDocument(docId);
+ final int numValues = values.count();
+ assertThat(numValues, is(1));
+ return values.valueAt(0);
+ }
+
+ @Test
+ public void testDateScripts() throws Exception {
+ fillSingleValueAllSet();
+ IndexNumericFieldData indexFieldData = getForField("value");
+ AtomicNumericFieldData fieldData = indexFieldData.load(refreshReader());
+
+ ScriptDocValues.Longs scriptValues = (ScriptDocValues.Longs) fieldData.getScriptValues();
+ scriptValues.setNextDocId(0);
+ assertThat(scriptValues.getValue(), equalTo(2l));
+ assertThat(scriptValues.getDate().getMillis(), equalTo(2l));
+ assertThat(scriptValues.getDate().getZone(), equalTo(DateTimeZone.UTC));
+ }
+
+ @Override
+ protected void fillSingleValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 1, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillSingleValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueAllSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ d.add(new LongField("value", 1, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillMultiValueWithMissing() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ //d.add(new StringField("value", one(), Field.Store.NO)); // MISSING
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ @Override
+ protected void fillExtendedMvSet() throws Exception {
+ Document d = new Document();
+ d.add(new StringField("_id", "1", Field.Store.NO));
+ d.add(new LongField("value", 2, Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "2", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "3", Field.Store.NO));
+ d.add(new LongField("value", 3, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "4", Field.Store.NO));
+ d.add(new LongField("value", 4, Field.Store.NO));
+ d.add(new LongField("value", 5, Field.Store.NO));
+ d.add(new LongField("value", 6, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "5", Field.Store.NO));
+ d.add(new LongField("value", 6, Field.Store.NO));
+ d.add(new LongField("value", 7, Field.Store.NO));
+ d.add(new LongField("value", 8, Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "6", Field.Store.NO));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField("_id", "7", Field.Store.NO));
+ d.add(new LongField("value", 8, Field.Store.NO));
+ d.add(new LongField("value", 9, Field.Store.NO));
+ d.add(new LongField("value", 10, Field.Store.NO));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField("_id", "8", Field.Store.NO));
+ d.add(new LongField("value", -8, Field.Store.NO));
+ d.add(new LongField("value", -9, Field.Store.NO));
+ d.add(new LongField("value", -10, Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ private static final int SECONDS_PER_YEAR = 60 * 60 * 24 * 365;
+
+ // TODO: use random() when migrating to Junit
+ public static enum Data {
+ SINGLE_VALUED_DENSE_ENUM {
+ @Override
+ public int numValues(Random r) {
+ return 1;
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ return 1 + r.nextInt(16);
+ }
+ },
+ SINGLE_VALUED_DENSE_DATE {
+ @Override
+ public int numValues(Random r) {
+ return 1;
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + r.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_DATE {
+ @Override
+ public int numValues(Random r) {
+ return r.nextInt(3);
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ // somewhere in-between 2010 and 2012
+ return 1000L * (40L * SECONDS_PER_YEAR + r.nextInt(2 * SECONDS_PER_YEAR));
+ }
+ },
+ MULTI_VALUED_ENUM {
+ @Override
+ public int numValues(Random r) {
+ return r.nextInt(3);
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ return 3 + r.nextInt(8);
+ }
+ },
+ SINGLE_VALUED_SPARSE_RANDOM {
+ @Override
+ public int numValues(Random r) {
+ return r.nextFloat() < 0.01 ? 1 : 0;
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ },
+ MULTI_VALUED_SPARSE_RANDOM {
+ @Override
+ public int numValues(Random r) {
+ return r.nextFloat() < 0.01f ? 1 + r.nextInt(5) : 0;
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ },
+ MULTI_VALUED_DENSE_RANDOM {
+ @Override
+ public int numValues(Random r) {
+ return 1 + r.nextInt(3);
+ }
+
+ @Override
+ public long nextValue(Random r) {
+ return r.nextLong();
+ }
+ };
+
+ public abstract int numValues(Random r);
+
+ public abstract long nextValue(Random r);
+ }
+
+ private void test(List<LongHashSet> values) throws Exception {
+ StringField id = new StringField("_id", "", Field.Store.NO);
+
+ for (int i = 0; i < values.size(); ++i) {
+ Document doc = new Document();
+ id.setStringValue("" + i);
+ doc.add(id);
+ final LongHashSet v = values.get(i);
+ for (LongCursor c : v) {
+ LongField value = new LongField("value", c.value, Field.Store.NO);
+ doc.add(value);
+ }
+ writer.addDocument(doc);
+ }
+ writer.forceMerge(1, true);
+
+ final IndexNumericFieldData indexFieldData = getForField("value");
+ final AtomicNumericFieldData atomicFieldData = indexFieldData.load(refreshReader());
+ final SortedNumericDocValues data = atomicFieldData.getLongValues();
+ final SortedNumericDoubleValues doubleData = atomicFieldData.getDoubleValues();
+ final LongHashSet set = new LongHashSet();
+ final LongHashSet doubleSet = new LongHashSet();
+ for (int i = 0; i < values.size(); ++i) {
+ final LongHashSet v = values.get(i);
+
+ data.setDocument(i);
+ assertThat(data.count() > 0, equalTo(!v.isEmpty()));
+ doubleData.setDocument(i);
+ assertThat(doubleData.count() > 0, equalTo(!v.isEmpty()));
+
+ set.clear();
+ data.setDocument(i);
+ int numValues = data.count();
+ for (int j = 0; j < numValues; j++) {
+ set.add(data.valueAt(j));
+ }
+ assertThat(set, equalTo(v));
+
+ final LongHashSet doubleV = new LongHashSet();
+ for (LongCursor c : v) {
+ doubleV.add(Double.doubleToLongBits(c.value));
+ }
+ doubleSet.clear();
+ doubleData.setDocument(i);
+ numValues = doubleData.count();
+ double prev = 0;
+ for (int j = 0; j < numValues; j++) {
+ double current = doubleData.valueAt(j);
+ doubleSet.add(Double.doubleToLongBits(current));
+ if (j > 0) {
+ assertThat(prev, lessThan(current));
+ }
+ prev = current;
+ }
+ assertThat(doubleSet, equalTo(doubleV));
+ }
+ }
+
+ private void test(Data data) throws Exception {
+ Random r = getRandom();
+ final int numDocs = 1000 + r.nextInt(19000);
+ final List<LongHashSet> values = new ArrayList<>(numDocs);
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = data.numValues(r);
+ final LongHashSet vals = new LongHashSet(numValues);
+ for (int j = 0; j < numValues; ++j) {
+ vals.add(data.nextValue(r));
+ }
+ values.add(vals);
+ }
+ test(values);
+ }
+
+ public void testSingleValuedDenseEnum() throws Exception {
+ test(Data.SINGLE_VALUED_DENSE_ENUM);
+ }
+
+ public void testSingleValuedDenseDate() throws Exception {
+ test(Data.SINGLE_VALUED_DENSE_DATE);
+ }
+
+ public void testSingleValuedSparseRandom() throws Exception {
+ test(Data.SINGLE_VALUED_SPARSE_RANDOM);
+ }
+
+ public void testMultiValuedDate() throws Exception {
+ test(Data.MULTI_VALUED_DATE);
+ }
+
+ public void testMultiValuedEnum() throws Exception {
+ test(Data.MULTI_VALUED_ENUM);
+ }
+
+ public void testMultiValuedSparseRandom() throws Exception {
+ test(Data.MULTI_VALUED_SPARSE_RANDOM);
+ }
+
+ public void testMultiValuedDenseRandom() throws Exception {
+ test(Data.MULTI_VALUED_DENSE_RANDOM);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java
new file mode 100644
index 0000000000..ceeb448586
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.IndexReader;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.mapper.MappedFieldType.Names;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Test;
+
+/** Returns an implementation based on paged bytes which doesn't implement WithOrdinals in order to visit different paths in the code,
+ * eg. BytesRefFieldComparatorSource makes decisions based on whether the field data implements WithOrdinals. */
+public class NoOrdinalsStringFieldDataTests extends PagedBytesStringFieldDataTests {
+
+ public static IndexFieldData<AtomicFieldData> hideOrdinals(final IndexFieldData<?> in) {
+ return new IndexFieldData<AtomicFieldData>() {
+
+ @Override
+ public Index index() {
+ return in.index();
+ }
+
+ @Override
+ public Names getFieldNames() {
+ return in.getFieldNames();
+ }
+
+ @Override
+ public FieldDataType getFieldDataType() {
+ return in.getFieldDataType();
+ }
+
+ @Override
+ public AtomicFieldData load(LeafReaderContext context) {
+ return in.load(context);
+ }
+
+ @Override
+ public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception {
+ return in.loadDirect(context);
+ }
+
+ @Override
+ public XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) {
+ return new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested);
+ }
+
+ @Override
+ public void clear() {
+ in.clear();
+ }
+
+ @Override
+ public void clear(IndexReader reader) {
+ in.clear(reader);
+ }
+
+ };
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public IndexFieldData<AtomicFieldData> getForField(String fieldName) {
+ return hideOrdinals(super.getForField(fieldName));
+ }
+
+ @Test
+ @Override
+ public void testTermsEnum() throws Exception {
+ // We can't test this, since the returned IFD instance doesn't implement IndexFieldData.WithOrdinals
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java
new file mode 100644
index 0000000000..1b8909ea63
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/PagedBytesStringFieldDataTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+
+/**
+ */
+public class PagedBytesStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", Settings.builder().put("format", "paged_bytes").put(OrdinalsBuilder.FORCE_MULTI_ORDINALS, randomBoolean()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java
new file mode 100644
index 0000000000..2d6beac58b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class ParentChildFieldDataTests extends AbstractFieldDataTests {
+
+ private final String parentType = "parent";
+ private final String childType = "child";
+ private final String grandChildType = "grand-child";
+
+ @Before
+ public void before() throws Exception {
+ mapperService.merge(
+ childType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType).string()), true
+ );
+ mapperService.merge(
+ grandChildType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(grandChildType, "_parent", "type=" + childType).string()), true
+ );
+
+ Document d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
+ d.add(createJoinField(parentType, "1"));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "2"), Field.Store.NO));
+ d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
+ d.add(createJoinField(parentType, "1"));
+ d.add(createJoinField(childType, "2"));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "3"), Field.Store.NO));
+ d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
+ d.add(createJoinField(parentType, "1"));
+ d.add(createJoinField(childType, "3"));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(parentType, "2"), Field.Store.NO));
+ d.add(createJoinField(parentType, "2"));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "4"), Field.Store.NO));
+ d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "2"), Field.Store.NO));
+ d.add(createJoinField(parentType, "2"));
+ d.add(createJoinField(childType, "4"));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(childType, "5"), Field.Store.NO));
+ d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(parentType, "1"), Field.Store.NO));
+ d.add(createJoinField(parentType, "1"));
+ d.add(createJoinField(childType, "5"));
+ writer.addDocument(d);
+ writer.commit();
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid(grandChildType, "6"), Field.Store.NO));
+ d.add(new StringField(ParentFieldMapper.NAME, Uid.createUid(childType, "2"), Field.Store.NO));
+ d.add(createJoinField(childType, "2"));
+ writer.addDocument(d);
+
+ d = new Document();
+ d.add(new StringField(UidFieldMapper.NAME, Uid.createUid("other-type", "1"), Field.Store.NO));
+ writer.addDocument(d);
+ }
+
+ private SortedDocValuesField createJoinField(String parentType, String id) {
+ return new SortedDocValuesField(ParentFieldMapper.joinField(parentType), new BytesRef(id));
+ }
+
+ @Test
+ public void testGetBytesValues() throws Exception {
+ IndexFieldData indexFieldData = getForField(childType);
+ AtomicFieldData fieldData = indexFieldData.load(refreshReader());
+
+ SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
+ bytesValues.setDocument(0);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("1"));
+
+ bytesValues.setDocument(1);
+ assertThat(bytesValues.count(), equalTo(2));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("1"));
+ assertThat(bytesValues.valueAt(1).utf8ToString(), equalTo("2"));
+
+ bytesValues.setDocument(2);
+ assertThat(bytesValues.count(), equalTo(2));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("1"));
+ assertThat(bytesValues.valueAt(1).utf8ToString(), equalTo("3"));
+
+ bytesValues.setDocument(3);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("2"));
+
+ bytesValues.setDocument(4);
+ assertThat(bytesValues.count(), equalTo(2));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("2"));
+ assertThat(bytesValues.valueAt(1).utf8ToString(), equalTo("4"));
+
+ bytesValues.setDocument(5);
+ assertThat(bytesValues.count(), equalTo(2));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("1"));
+ assertThat(bytesValues.valueAt(1).utf8ToString(), equalTo("5"));
+
+ bytesValues.setDocument(6);
+ assertThat(bytesValues.count(), equalTo(1));
+ assertThat(bytesValues.valueAt(0).utf8ToString(), equalTo("2"));
+
+ bytesValues.setDocument(7);
+ assertThat(bytesValues.count(), equalTo(0));
+ }
+
+ @Test
+ public void testSorting() throws Exception {
+ IndexFieldData indexFieldData = getForField(childType);
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, true));
+ IndexFieldData.XFieldComparatorSource comparator = indexFieldData.comparatorSource("_last", MultiValueMode.MIN, null);
+
+ TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.NAME, comparator, false)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(1));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(5));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("2"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("2"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0]).utf8ToString(), equalTo("2"));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0]), equalTo(XFieldComparatorSource.MAX_TERM));
+
+ topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.NAME, comparator, true)));
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(8));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("2"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(4));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("2"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(6));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("2"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(0));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(1));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[5].doc, equalTo(2));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[5]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[6].doc, equalTo(5));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[6]).fields[0]).utf8ToString(), equalTo("1"));
+ assertThat(topDocs.scoreDocs[7].doc, equalTo(7));
+ assertThat(((FieldDoc) topDocs.scoreDocs[7]).fields[0], nullValue());
+ }
+
+ public void testThreads() throws Exception {
+ final ParentChildIndexFieldData indexFieldData = getForField(childType);
+ final DirectoryReader reader = DirectoryReader.open(writer, true);
+ final IndexParentChildFieldData global = indexFieldData.loadGlobal(reader);
+ final AtomicReference<Exception> error = new AtomicReference<>();
+ final int numThreads = scaledRandomIntBetween(3, 8);
+ final Thread[] threads = new Thread[numThreads];
+ final CountDownLatch latch = new CountDownLatch(1);
+
+ final Map<Object, BytesRef[]> expected = new HashMap<>();
+ for (LeafReaderContext context : reader.leaves()) {
+ AtomicParentChildFieldData leafData = global.load(context);
+ SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
+ final BytesRef[] ids = new BytesRef[parentIds.getValueCount()];
+ for (int j = 0; j < parentIds.getValueCount(); ++j) {
+ final BytesRef id = parentIds.lookupOrd(j);
+ if (id != null) {
+ ids[j] = BytesRef.deepCopyOf(id);
+ }
+ }
+ expected.put(context.reader().getCoreCacheKey(), ids);
+ }
+
+ for (int i = 0; i < numThreads; ++i) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ latch.await();
+ for (int i = 0; i < 100000; ++i) {
+ for (LeafReaderContext context : reader.leaves()) {
+ AtomicParentChildFieldData leafData = global.load(context);
+ SortedDocValues parentIds = leafData.getOrdinalsValues(parentType);
+ final BytesRef[] expectedIds = expected.get(context.reader().getCoreCacheKey());
+ for (int j = 0; j < parentIds.getValueCount(); ++j) {
+ final BytesRef id = parentIds.lookupOrd(j);
+ assertEquals(expectedIds[j], id);
+ }
+ }
+ }
+ } catch (Exception e) {
+ error.compareAndSet(null, e);
+ }
+ }
+ };
+ threads[i].start();
+ }
+ latch.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ if (error.get() != null) {
+ throw error.get();
+ }
+ }
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("_parent");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesTests.java
new file mode 100644
index 0000000000..a51af09f76
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+public class ScriptDocValuesTests extends ElasticsearchTestCase {
+
+ private static MultiGeoPointValues wrap(final GeoPoint... points) {
+ return new MultiGeoPointValues() {
+ int docID = -1;
+
+ @Override
+ public GeoPoint valueAt(int i) {
+ if (docID != 0) {
+ fail();
+ }
+ return points[i];
+ }
+
+ @Override
+ public void setDocument(int docId) {
+ this.docID = docId;
+ }
+
+ @Override
+ public int count() {
+ if (docID != 0) {
+ return 0;
+ }
+ return points.length;
+ }
+ };
+ }
+
+ private static double randomLat() {
+ return randomDouble() * 180 - 90;
+ }
+
+ private static double randomLon() {
+ return randomDouble() * 360 - 180;
+ }
+
+ public void testGeoGetLatLon() {
+ final double lat1 = randomLat();
+ final double lat2 = randomLat();
+ final double lon1 = randomLon();
+ final double lon2 = randomLon();
+ final MultiGeoPointValues values = wrap(new GeoPoint(lat1, lon1), new GeoPoint(lat2, lon2));
+ final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values);
+ script.setNextDocId(1);
+ assertEquals(true, script.isEmpty());
+ script.setNextDocId(0);
+ assertEquals(false, script.isEmpty());
+ assertEquals(new GeoPoint(lat1, lon1), script.getValue());
+ assertEquals(Arrays.asList(new GeoPoint(lat1, lon1), new GeoPoint(lat2, lon2)), script.getValues());
+ assertEquals(lat1, script.getLat(), 0);
+ assertEquals(lon1, script.getLon(), 0);
+ assertTrue(Arrays.equals(new double[] {lat1, lat2}, script.getLats()));
+ assertTrue(Arrays.equals(new double[] {lon1, lon2}, script.getLons()));
+ }
+
+ public void testGeoDistance() {
+ final double lat = randomLat();
+ final double lon = randomLon();
+ final MultiGeoPointValues values = wrap(new GeoPoint(lat, lon));
+ final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values);
+ script.setNextDocId(0);
+
+ final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(wrap());
+ emptyScript.setNextDocId(0);
+
+ final double otherLat = randomLat();
+ final double otherLon = randomLon();
+
+ assertEquals(GeoDistance.ARC.calculate(lat, lon, otherLat, otherLon, DistanceUnit.KILOMETERS),
+ script.arcDistanceInKm(otherLat, otherLon), 0.01);
+ assertEquals(GeoDistance.ARC.calculate(lat, lon, otherLat, otherLon, DistanceUnit.KILOMETERS),
+ script.arcDistanceInKmWithDefault(otherLat, otherLon, 42), 0.01);
+ assertEquals(42, emptyScript.arcDistanceInKmWithDefault(otherLat, otherLon, 42), 0);
+
+ assertEquals(GeoDistance.PLANE.calculate(lat, lon, otherLat, otherLon, DistanceUnit.KILOMETERS),
+ script.distanceInKm(otherLat, otherLon), 0.01);
+ assertEquals(GeoDistance.PLANE.calculate(lat, lon, otherLat, otherLon, DistanceUnit.KILOMETERS),
+ script.distanceInKmWithDefault(otherLat, otherLon, 42), 0.01);
+ assertEquals(42, emptyScript.distanceInKmWithDefault(otherLat, otherLon, 42), 0);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/SortedSetDVStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/SortedSetDVStringFieldDataTests.java
new file mode 100644
index 0000000000..013a7ec989
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/SortedSetDVStringFieldDataTests.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
+
+public class SortedSetDVStringFieldDataTests extends AbstractStringFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", Settings.builder().put("format", "doc_values").put(OrdinalsBuilder.FORCE_MULTI_ORDINALS, randomBoolean()));
+ }
+
+ @Override
+ protected boolean hasDocValues() {
+ return true;
+ }
+
+ @Override
+ protected long minRamBytesUsed() {
+ return 0;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java
new file mode 100644
index 0000000000..08a960e769
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/fieldcomparator/ReplaceMissingTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.fieldcomparator;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+public class ReplaceMissingTests extends ElasticsearchTestCase {
+
+ public void test() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriterConfig iwc = newIndexWriterConfig(null);
+ iwc.setMergePolicy(newLogMergePolicy());
+ IndexWriter iw = new IndexWriter(dir, iwc);
+
+ Document doc = new Document();
+ doc.add(new SortedDocValuesField("field", new BytesRef("cat")));
+ iw.addDocument(doc);
+
+ doc = new Document();
+ iw.addDocument(doc);
+
+ doc = new Document();
+ doc.add(new SortedDocValuesField("field", new BytesRef("dog")));
+ iw.addDocument(doc);
+ iw.forceMerge(1);
+ iw.close();
+
+ DirectoryReader reader = DirectoryReader.open(dir);
+ LeafReader ar = getOnlySegmentReader(reader);
+ SortedDocValues raw = ar.getSortedDocValues("field");
+ assertEquals(2, raw.getValueCount());
+
+ // existing values
+ SortedDocValues dv = new BytesRefFieldComparatorSource.ReplaceMissing(raw, new BytesRef("cat"));
+ assertEquals(2, dv.getValueCount());
+ assertEquals("cat", dv.lookupOrd(0).utf8ToString());
+ assertEquals("dog", dv.lookupOrd(1).utf8ToString());
+
+ assertEquals(0, dv.getOrd(0));
+ assertEquals(0, dv.getOrd(1));
+ assertEquals(1, dv.getOrd(2));
+
+ dv = new BytesRefFieldComparatorSource.ReplaceMissing(raw, new BytesRef("dog"));
+ assertEquals(2, dv.getValueCount());
+ assertEquals("cat", dv.lookupOrd(0).utf8ToString());
+ assertEquals("dog", dv.lookupOrd(1).utf8ToString());
+
+ assertEquals(0, dv.getOrd(0));
+ assertEquals(1, dv.getOrd(1));
+ assertEquals(1, dv.getOrd(2));
+
+ // non-existing values
+ dv = new BytesRefFieldComparatorSource.ReplaceMissing(raw, new BytesRef("apple"));
+ assertEquals(3, dv.getValueCount());
+ assertEquals("apple", dv.lookupOrd(0).utf8ToString());
+ assertEquals("cat", dv.lookupOrd(1).utf8ToString());
+ assertEquals("dog", dv.lookupOrd(2).utf8ToString());
+
+ assertEquals(1, dv.getOrd(0));
+ assertEquals(0, dv.getOrd(1));
+ assertEquals(2, dv.getOrd(2));
+
+ dv = new BytesRefFieldComparatorSource.ReplaceMissing(raw, new BytesRef("company"));
+ assertEquals(3, dv.getValueCount());
+ assertEquals("cat", dv.lookupOrd(0).utf8ToString());
+ assertEquals("company", dv.lookupOrd(1).utf8ToString());
+ assertEquals("dog", dv.lookupOrd(2).utf8ToString());
+
+ assertEquals(0, dv.getOrd(0));
+ assertEquals(1, dv.getOrd(1));
+ assertEquals(2, dv.getOrd(2));
+
+ dv = new BytesRefFieldComparatorSource.ReplaceMissing(raw, new BytesRef("ebay"));
+ assertEquals(3, dv.getValueCount());
+ assertEquals("cat", dv.lookupOrd(0).utf8ToString());
+ assertEquals("dog", dv.lookupOrd(1).utf8ToString());
+ assertEquals("ebay", dv.lookupOrd(2).utf8ToString());
+
+ assertEquals(0, dv.getOrd(0));
+ assertEquals(2, dv.getOrd(1));
+ assertEquals(1, dv.getOrd(2));
+
+ reader.close();
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java
new file mode 100644
index 0000000000..dd84f1515f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinalsTests.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.index.RandomAccessOrds;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.util.packed.PackedInts;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.FieldData;
+import org.elasticsearch.search.MultiValueMode;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class MultiOrdinalsTests extends ElasticsearchTestCase {
+
+ protected final Ordinals creationMultiOrdinals(OrdinalsBuilder builder) {
+ return this.creationMultiOrdinals(builder, Settings.builder());
+ }
+
+
+ protected Ordinals creationMultiOrdinals(OrdinalsBuilder builder, Settings.Builder settings) {
+ return builder.build(settings.build());
+ }
+
+
+ @Test
+ public void testRandomValues() throws IOException {
+ Random random = getRandom();
+ int numDocs = 100 + random.nextInt(1000);
+ int numOrdinals = 1 + random.nextInt(200);
+ int numValues = 100 + random.nextInt(100000);
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ Set<OrdAndId> ordsAndIdSet = new HashSet<>();
+ for (int i = 0; i < numValues; i++) {
+ ordsAndIdSet.add(new OrdAndId(random.nextInt(numOrdinals), random.nextInt(numDocs)));
+ }
+ List<OrdAndId> ordsAndIds = new ArrayList<>(ordsAndIdSet);
+ Collections.sort(ordsAndIds, new Comparator<OrdAndId>() {
+
+ @Override
+ public int compare(OrdAndId o1, OrdAndId o2) {
+ if (o1.ord < o2.ord) {
+ return -1;
+ }
+ if (o1.ord == o2.ord) {
+ if (o1.id < o2.id) {
+ return -1;
+ }
+ if (o1.id > o2.id) {
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+ }
+ });
+ long lastOrd = -1;
+ for (OrdAndId ordAndId : ordsAndIds) {
+ if (lastOrd != ordAndId.ord) {
+ lastOrd = ordAndId.ord;
+ builder.nextOrdinal();
+ }
+ ordAndId.ord = builder.currentOrdinal(); // remap the ordinals in case we have gaps?
+ builder.addDoc(ordAndId.id);
+ }
+
+ Collections.sort(ordsAndIds, new Comparator<OrdAndId>() {
+
+ @Override
+ public int compare(OrdAndId o1, OrdAndId o2) {
+ if (o1.id < o2.id) {
+ return -1;
+ }
+ if (o1.id == o2.id) {
+ if (o1.ord < o2.ord) {
+ return -1;
+ }
+ if (o1.ord > o2.ord) {
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+ }
+ });
+ Ordinals ords = creationMultiOrdinals(builder);
+ RandomAccessOrds docs = ords.ordinals();
+ final SortedDocValues singleOrds = MultiValueMode.MIN.select(docs);
+ int docId = ordsAndIds.get(0).id;
+ List<Long> docOrds = new ArrayList<>();
+ for (OrdAndId ordAndId : ordsAndIds) {
+ if (docId == ordAndId.id) {
+ docOrds.add(ordAndId.ord);
+ } else {
+ if (!docOrds.isEmpty()) {
+ assertThat((long) singleOrds.getOrd(docId), equalTo(docOrds.get(0)));
+
+ docs.setDocument(docId);
+ final int numOrds = docs.cardinality();
+ assertThat(numOrds, equalTo(docOrds.size()));
+ for (int i = 0; i < numOrds; i++) {
+ assertThat(docs.nextOrd(), equalTo(docOrds.get(i)));
+ }
+ final long[] array = new long[docOrds.size()];
+ for (int i = 0; i < array.length; i++) {
+ array[i] = docOrds.get(i);
+ }
+ assertIter(docs, docId, array);
+ }
+ for (int i = docId + 1; i < ordAndId.id; i++) {
+ assertThat((long) singleOrds.getOrd(i), equalTo(RandomAccessOrds.NO_MORE_ORDS));
+ }
+ docId = ordAndId.id;
+ docOrds.clear();
+ docOrds.add(ordAndId.ord);
+
+ }
+ }
+
+ }
+
+ public static class OrdAndId {
+ long ord;
+ final int id;
+
+ public OrdAndId(long ord, int id) {
+ this.ord = ord;
+ this.id = id;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + id;
+ result = prime * result + (int) ord;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ OrdAndId other = (OrdAndId) obj;
+ if (id != other.id) {
+ return false;
+ }
+ if (ord != other.ord) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ @Test
+ public void testOrdinals() throws Exception {
+ int maxDoc = 7;
+ long maxOrds = 32;
+ OrdinalsBuilder builder = new OrdinalsBuilder(maxDoc);
+ builder.nextOrdinal(); // 0
+ builder.addDoc(1).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 1
+ builder.addDoc(0).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 3
+ builder.addDoc(2).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 3
+ builder.addDoc(0).addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 4
+ builder.addDoc(4).addDoc(5).addDoc(6);
+ builder.nextOrdinal(); // 5
+ builder.addDoc(4).addDoc(5).addDoc(6);
+ while (builder.getValueCount() < maxOrds) {
+ builder.nextOrdinal();
+ builder.addDoc(5).addDoc(6);
+ }
+
+ long[][] ordinalPlan = new long[][]{
+ {1, 3},
+ {0},
+ {2},
+ {},
+ {0, 2, 3, 4, 5},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
+ };
+
+ Ordinals ordinals = creationMultiOrdinals(builder);
+ RandomAccessOrds docs = ordinals.ordinals();
+ assertEquals(docs, ordinalPlan);
+ }
+
+ protected static void assertIter(RandomAccessOrds docs, int docId, long... expectedOrdinals) {
+ docs.setDocument(docId);
+ assertThat(docs.cardinality(), equalTo(expectedOrdinals.length));
+ for (long expectedOrdinal : expectedOrdinals) {
+ assertThat(docs.nextOrd(), equalTo(expectedOrdinal));
+ }
+ }
+
+ @Test
+ public void testMultiValuesDocsWithOverlappingStorageArrays() throws Exception {
+ int maxDoc = 7;
+ long maxOrds = 15;
+ OrdinalsBuilder builder = new OrdinalsBuilder(maxDoc);
+ for (int i = 0; i < maxOrds; i++) {
+ builder.nextOrdinal();
+ if (i < 10) {
+ builder.addDoc(0);
+ }
+ builder.addDoc(1);
+ if (i == 0) {
+ builder.addDoc(2);
+ }
+ if (i < 5) {
+ builder.addDoc(3);
+
+ }
+ if (i < 6) {
+ builder.addDoc(4);
+
+ }
+ if (i == 1) {
+ builder.addDoc(5);
+ }
+ if (i < 10) {
+ builder.addDoc(6);
+ }
+ }
+
+ long[][] ordinalPlan = new long[][]{
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+ {0,1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
+ {0},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4, 5},
+ {1},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ };
+
+ Ordinals ordinals = new MultiOrdinals(builder, PackedInts.FASTEST);
+ RandomAccessOrds docs = ordinals.ordinals();
+ assertEquals(docs, ordinalPlan);
+ }
+
+ private void assertEquals(RandomAccessOrds docs, long[][] ordinalPlan) {
+ long maxOrd = 0;
+ for (int doc = 0; doc < ordinalPlan.length; ++doc) {
+ if (ordinalPlan[doc].length > 0) {
+ maxOrd = Math.max(maxOrd, 1 + ordinalPlan[doc][ordinalPlan[doc].length - 1]);
+ }
+ }
+ assertThat(docs.getValueCount(), equalTo(maxOrd));
+ assertThat(FieldData.isMultiValued(docs), equalTo(true));
+ for (int doc = 0; doc < ordinalPlan.length; ++doc) {
+ long[] ords = ordinalPlan[doc];
+ docs.setDocument(doc);
+ assertThat(docs.cardinality(), equalTo(ords.length));
+ for (int i = 0; i < ords.length; ++i) {
+ assertThat(docs.ordAt(i), equalTo(ords[i]));
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java
new file mode 100644
index 0000000000..ebb8eb361f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/ordinals/SingleOrdinalsTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.fielddata.ordinals;
+
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.RandomAccessOrds;
+import org.apache.lucene.index.SortedDocValues;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class SingleOrdinalsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSvValues() throws IOException {
+ int numDocs = 1000000;
+ int numOrdinals = numDocs / 4;
+ Map<Integer, Long> controlDocToOrdinal = new HashMap<>();
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ long ordinal = builder.currentOrdinal();
+ for (int doc = 0; doc < numDocs; doc++) {
+ if (doc % numOrdinals == 0) {
+ ordinal = builder.nextOrdinal();
+ }
+ controlDocToOrdinal.put(doc, ordinal);
+ builder.addDoc(doc);
+ }
+
+ Ordinals ords = builder.build(Settings.EMPTY);
+ assertThat(ords, instanceOf(SinglePackedOrdinals.class));
+ RandomAccessOrds docs = ords.ordinals();
+ final SortedDocValues singleOrds = DocValues.unwrapSingleton(docs);
+ assertNotNull(singleOrds);
+
+ for (Map.Entry<Integer, Long> entry : controlDocToOrdinal.entrySet()) {
+ assertThat(entry.getValue(), equalTo((long) singleOrds.getOrd(entry.getKey())));
+ }
+ }
+
+ @Test
+ public void testMvOrdinalsTrigger() throws IOException {
+ int numDocs = 1000000;
+ OrdinalsBuilder builder = new OrdinalsBuilder(numDocs);
+ builder.nextOrdinal();
+ for (int doc = 0; doc < numDocs; doc++) {
+ builder.addDoc(doc);
+ }
+
+ Ordinals ords = builder.build(Settings.EMPTY);
+ assertThat(ords, instanceOf(SinglePackedOrdinals.class));
+
+ builder.nextOrdinal();
+ builder.addDoc(0);
+ ords = builder.build(Settings.EMPTY);
+ assertThat(ords, not(instanceOf(SinglePackedOrdinals.class)));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java
new file mode 100644
index 0000000000..48c95aa3f6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.fielddata.plain;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ */
+public class ParentChildFilteredTermsEnumTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimple_twoFieldEachUniqueValue() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ for (int i = 1; i <= 10000; i++) {
+ Document document = new Document();
+ String fieldName = i % 2 == 0 ? "field1" : "field2";
+ document.add(new StringField(fieldName, format(i), Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
+ TermsEnum[] compoundTermsEnums = new TermsEnum[]{
+ new ParentChildIntersectTermsEnum(SlowCompositeReaderWrapper.wrap(indexReader), "field1", "field2")
+ };
+ for (TermsEnum termsEnum : compoundTermsEnums) {
+ int expected = 0;
+ for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
+ ++expected;
+ assertThat(term.utf8ToString(), equalTo(format(expected)));
+ PostingsEnum docsEnum = termsEnum.postings(null, null);
+ assertThat(docsEnum, notNullValue());
+ int docId = docsEnum.nextDoc();
+ assertThat(docId, not(equalTo(-1)));
+ assertThat(docId, not(equalTo(DocIdSetIterator.NO_MORE_DOCS)));
+ assertThat(docsEnum.nextDoc(), equalTo(DocIdSetIterator.NO_MORE_DOCS));
+ }
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ @Test
+ public void testDocument_twoFieldsEachSharingValues() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+ for (int i = 1; i <= 1000; i++) {
+ Document document = new Document();
+ document.add(new StringField("field1", format(i), Field.Store.NO));
+ indexWriter.addDocument(document);
+
+ for (int j = 0; j < 10; j++) {
+ document = new Document();
+ document.add(new StringField("field2", format(i), Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+ }
+
+ IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
+ TermsEnum[] compoundTermsEnums = new TermsEnum[]{
+ new ParentChildIntersectTermsEnum(SlowCompositeReaderWrapper.wrap(indexReader), "field1", "field2")
+ };
+ for (TermsEnum termsEnum : compoundTermsEnums) {
+ int expected = 0;
+ for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) {
+ ++expected;
+ assertThat(term.utf8ToString(), equalTo(format(expected)));
+ PostingsEnum docsEnum = termsEnum.postings(null, null);
+ assertThat(docsEnum, notNullValue());
+ int numDocs = 0;
+ for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) {
+ numDocs++;
+ }
+ assertThat(numDocs, equalTo(11));
+ }
+ }
+
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ static String format(int i) {
+ return String.format(Locale.ROOT, "%06d", i);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java b/core/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java
new file mode 100644
index 0000000000..f3182fa7a6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/gateway/CommitPointsTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.gateway;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class CommitPointsTests extends ElasticsearchTestCase {
+
+ private final ESLogger logger = Loggers.getLogger(CommitPointsTests.class);
+
+ @Test
+ public void testCommitPointXContent() throws Exception {
+ ArrayList<CommitPoint.FileInfo> indexFiles = Lists.newArrayList();
+ indexFiles.add(new CommitPoint.FileInfo("file1", "file1_p", 100, "ck1"));
+ indexFiles.add(new CommitPoint.FileInfo("file2", "file2_p", 200, "ck2"));
+
+ ArrayList<CommitPoint.FileInfo> translogFiles = Lists.newArrayList();
+ translogFiles.add(new CommitPoint.FileInfo("t_file1", "t_file1_p", 100, null));
+ translogFiles.add(new CommitPoint.FileInfo("t_file2", "t_file2_p", 200, null));
+
+ CommitPoint commitPoint = new CommitPoint(1, "test", CommitPoint.Type.GENERATED, indexFiles, translogFiles);
+
+ byte[] serialized = CommitPoints.toXContent(commitPoint);
+ logger.info("serialized commit_point {}", new String(serialized, Charsets.UTF_8));
+
+ CommitPoint desCp = CommitPoints.fromXContent(serialized);
+ assertThat(desCp.version(), equalTo(commitPoint.version()));
+ assertThat(desCp.name(), equalTo(commitPoint.name()));
+
+ assertThat(desCp.indexFiles().size(), equalTo(commitPoint.indexFiles().size()));
+ for (int i = 0; i < desCp.indexFiles().size(); i++) {
+ assertThat(desCp.indexFiles().get(i).name(), equalTo(commitPoint.indexFiles().get(i).name()));
+ assertThat(desCp.indexFiles().get(i).physicalName(), equalTo(commitPoint.indexFiles().get(i).physicalName()));
+ assertThat(desCp.indexFiles().get(i).length(), equalTo(commitPoint.indexFiles().get(i).length()));
+ assertThat(desCp.indexFiles().get(i).checksum(), equalTo(commitPoint.indexFiles().get(i).checksum()));
+ }
+
+ assertThat(desCp.translogFiles().size(), equalTo(commitPoint.translogFiles().size()));
+ for (int i = 0; i < desCp.indexFiles().size(); i++) {
+ assertThat(desCp.translogFiles().get(i).name(), equalTo(commitPoint.translogFiles().get(i).name()));
+ assertThat(desCp.translogFiles().get(i).physicalName(), equalTo(commitPoint.translogFiles().get(i).physicalName()));
+ assertThat(desCp.translogFiles().get(i).length(), equalTo(commitPoint.translogFiles().get(i).length()));
+ assertThat(desCp.translogFiles().get(i).checksum(), nullValue());
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java
new file mode 100644
index 0000000000..32da43da40
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+
+public class DynamicMappingIntegrationTests extends ElasticsearchIntegrationTest {
+
+ public void testConflictingDynamicMappings() {
+ // we don't use indexRandom because the order of requests is important here
+ createIndex("index");
+ client().prepareIndex("index", "type", "1").setSource("foo", 3).get();
+ try {
+ client().prepareIndex("index", "type", "2").setSource("foo", "bar").get();
+ fail("Indexing request should have failed!");
+ } catch (MapperParsingException e) {
+ // expected
+ }
+ }
+
+ public void testConflictingDynamicMappingsBulk() {
+ // we don't use indexRandom because the order of requests is important here
+ createIndex("index");
+ client().prepareIndex("index", "type", "1").setSource("foo", 3).get();
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("index", "type", "1").setSource("foo", 3)).get();
+ assertFalse(bulkResponse.hasFailures());
+ bulkResponse = client().prepareBulk().add(client().prepareIndex("index", "type", "2").setSource("foo", "bar")).get();
+ assertTrue(bulkResponse.hasFailures());
+ }
+
+ private static void assertMappingsHaveField(GetMappingsResponse mappings, String index, String type, String field) throws IOException {
+ ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.getMappings().get("index");
+ assertNotNull(indexMappings);
+ MappingMetaData typeMappings = indexMappings.get(type);
+ assertNotNull(typeMappings);
+ Map<String, Object> typeMappingsMap = typeMappings.getSourceAsMap();
+ Map<String, Object> properties = (Map<String, Object>) typeMappingsMap.get("properties");
+ assertTrue("Could not find [" + field + "] in " + typeMappingsMap.toString(), properties.containsKey(field));
+ }
+
+ public void testMappingsPropagatedToMasterNodeImmediately() throws IOException {
+ createIndex("index");
+
+ // works when the type has been dynamically created
+ client().prepareIndex("index", "type", "1").setSource("foo", 3).get();
+ GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get();
+ assertMappingsHaveField(mappings, "index", "type", "foo");
+
+ // works if the type already existed
+ client().prepareIndex("index", "type", "1").setSource("bar", "baz").get();
+ mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get();
+ assertMappingsHaveField(mappings, "index", "type", "bar");
+
+ // works if we indexed an empty document
+ client().prepareIndex("index", "type2", "1").setSource().get();
+ mappings = client().admin().indices().prepareGetMappings("index").setTypes("type2").get();
+ assertTrue(mappings.getMappings().get("index").toString(), mappings.getMappings().get("index").containsKey("type2"));
+ }
+
+ public void testConcurrentDynamicUpdates() throws Throwable {
+ createIndex("index");
+ final Thread[] indexThreads = new Thread[32];
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final AtomicReference<Throwable> error = new AtomicReference<>();
+ for (int i = 0; i < indexThreads.length; ++i) {
+ final String id = Integer.toString(i);
+ indexThreads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ startLatch.await();
+ assertTrue(client().prepareIndex("index", "type", id).setSource("field" + id, "bar").get().isCreated());
+ } catch (Throwable t) {
+ error.compareAndSet(null, t);
+ }
+ }
+ });
+ indexThreads[i].start();
+ }
+ startLatch.countDown();
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ if (error.get() != null) {
+ throw error.get();
+ }
+ Thread.sleep(2000);
+ GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type").get();
+ for (int i = 0; i < indexThreads.length; ++i) {
+ assertMappingsHaveField(mappings, "index", "type", "field" + i);
+ }
+ for (int i = 0; i < indexThreads.length; ++i) {
+ assertTrue(client().prepareGet("index", "type", Integer.toString(i)).get().isExists());
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java
new file mode 100644
index 0000000000..81bce4489b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java
@@ -0,0 +1,363 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DynamicMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testDynamicTrue() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "true")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+ }
+
+ public void testDynamicFalse() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "false")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("field2"), nullValue());
+ }
+
+
+ public void testDynamicStrict() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ try {
+ defaultMapper.parse("type", "1", jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", (String) null)
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+ }
+
+ public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "false")
+ .startObject("properties")
+ .startObject("obj1").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", jsonBuilder()
+ .startObject().startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("obj1.field1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("obj1.field2"), nullValue());
+ }
+
+ public void testDynamicStrictWithInnerObjectButDynamicSetOnRoot() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .field("dynamic", "strict")
+ .startObject("properties")
+ .startObject("obj1").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ try {
+ defaultMapper.parse("type", "1", jsonBuilder()
+ .startObject().startObject("obj1")
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject()
+ .bytes());
+ fail();
+ } catch (StrictDynamicMappingException e) {
+ // all is well
+ }
+ }
+
+ public void testDynamicMappingOnEmptyString() throws Exception {
+ IndexService service = createIndex("test");
+ client().prepareIndex("test", "type").setSource("empty_field", "").get();
+ FieldMapper mapper = service.mapperService().fullName("empty_field");
+ assertNotNull(mapper);
+ }
+
+ public void testTypeNotCreatedOnIndexFailure() throws IOException, InterruptedException {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("_default_")
+ .field("dynamic", "strict")
+ .endObject().endObject();
+
+ IndexService indexService = createIndex("test", Settings.EMPTY, "_default_", mapping);
+
+ try {
+ client().prepareIndex().setIndex("test").setType("type").setSource(jsonBuilder().startObject().field("test", "test").endObject()).get();
+ fail();
+ } catch (StrictDynamicMappingException e) {
+
+ }
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ assertNull(getMappingsResponse.getMappings().get("test").get("type"));
+ }
+
+ private String serialize(ToXContent mapper) throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ mapper.toXContent(builder, new ToXContent.MapParams(ImmutableMap.<String, String>of()));
+ return builder.endObject().string();
+ }
+
+ private Mapper parse(DocumentMapper mapper, DocumentMapperParser parser, XContentBuilder builder) throws Exception {
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
+ ParseContext.InternalParseContext ctx = new ParseContext.InternalParseContext("test", settings, parser, mapper, new ContentPath(0));
+ SourceToParse source = SourceToParse.source(builder.bytes());
+ ctx.reset(XContentHelper.createParser(source.source()), new ParseContext.Document(), source);
+ assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken());
+ ctx.parser().nextToken();
+ return DocumentParser.parseObject(ctx, mapper.root());
+ }
+
+ public void testDynamicMappingsNotNeeded() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("foo").field("type", "string").endObject().endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject());
+ // foo is already defined in the mappings
+ assertNull(update);
+ }
+
+ public void testField() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type").endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject());
+ assertNotNull(update);
+ // original mapping not modified
+ assertEquals(mapping, serialize(mapper));
+ // but we have an update
+ assertEquals("{\"type\":{\"properties\":{\"foo\":{\"type\":\"string\"}}}}", serialize(update));
+ }
+
+ public void testIncremental() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ // Make sure that mapping updates are incremental, this is important for performance otherwise
+ // every new field introduction runs in linear time with the total number of fields
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("foo").field("type", "string").endObject().endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").field("bar", "baz").endObject());
+ assertNotNull(update);
+ // original mapping not modified
+ assertEquals(mapping, serialize(mapper));
+ // but we have an update
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ // foo is NOT in the update
+ .startObject("bar").field("type", "string").endObject()
+ .endObject().endObject().string(), serialize(update));
+ }
+
+ public void testIntroduceTwoFields() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type").endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").field("bar", "baz").endObject());
+ assertNotNull(update);
+ // original mapping not modified
+ assertEquals(mapping, serialize(mapper));
+ // but we have an update
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("bar").field("type", "string").endObject()
+ .startObject("foo").field("type", "string").endObject()
+ .endObject().endObject().string(), serialize(update));
+ }
+
+ public void testObject() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type").endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar").field("baz", "foo").endObject().endObject().endObject());
+ assertNotNull(update);
+ // original mapping not modified
+ assertEquals(mapping, serialize(mapper));
+ // but we have an update
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "string").endObject().endObject().endObject().endObject().endObject()
+ .endObject().endObject().endObject().string(), serialize(update));
+ }
+
+ public void testArray() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type").endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startArray("foo").value("bar").value("baz").endArray().endObject());
+ assertNotNull(update);
+ // original mapping not modified
+ assertEquals(mapping, serialize(mapper));
+ // but we have an update
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .endObject().endObject().endObject().string(), serialize(update));
+ }
+
+ public void testInnerDynamicMapping() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties")
+ .startObject("foo").field("type", "object").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startObject("foo").startObject("bar").field("baz", "foo").endObject().endObject().endObject());
+ assertNotNull(update);
+ // original mapping not modified
+ assertEquals(mapping, serialize(mapper));
+ // but we have an update
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("foo").startObject("properties").startObject("bar").startObject("properties").startObject("baz").field("type", "string").endObject().endObject().endObject().endObject().endObject()
+ .endObject().endObject().endObject().string(), serialize(update));
+ }
+
+ public void testComplexArray() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type").endObject()
+ .endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ assertEquals(mapping, serialize(mapper));
+
+ Mapper update = parse(mapper, parser, XContentFactory.jsonBuilder().startObject().startArray("foo")
+ .startObject().field("bar", "baz").endObject()
+ .startObject().field("baz", 3).endObject()
+ .endArray().endObject());
+ assertEquals(mapping, serialize(mapper));
+ assertEquals(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("foo").startObject("properties")
+ .startObject("bar").field("type", "string").endObject()
+ .startObject("baz").field("type", "long").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject().string(), serialize(update));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java
new file mode 100644
index 0000000000..ab336cf7da
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import org.apache.lucene.document.FieldType;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+public class FieldMappersLookupTests extends ElasticsearchTestCase {
+
+ public void testEmpty() {
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ assertNull(lookup.fullName("foo"));
+ assertNull(lookup.indexName("foo"));
+ Collection<String> names = lookup.simpleMatchToFullName("foo");
+ assertNotNull(names);
+ assertTrue(names.isEmpty());
+ names = lookup.simpleMatchToFullName("foo");
+ assertNotNull(names);
+ assertTrue(names.isEmpty());
+ assertNull(lookup.smartName("foo"));
+ assertNull(lookup.smartNameFieldMapper("foo"));
+ assertNull(lookup.get("foo"));
+ Iterator<FieldMapper> itr = lookup.iterator();
+ assertNotNull(itr);
+ assertFalse(itr.hasNext());
+ }
+
+ public void testNewField() {
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ FakeFieldMapper f = new FakeFieldMapper("foo", "bar");
+ FieldMappersLookup lookup2 = lookup.copyAndAddAll(newList(f));
+ assertNull(lookup.fullName("foo"));
+ assertNull(lookup.indexName("bar"));
+
+ FieldMappers mappers = lookup2.fullName("foo");
+ assertNotNull(mappers);
+ assertEquals(1, mappers.mappers().size());
+ assertEquals(f, mappers.mapper());
+ mappers = lookup2.indexName("bar");
+ assertNotNull(mappers);
+ assertEquals(1, mappers.mappers().size());
+ assertEquals(f, mappers.mapper());
+ assertEquals(1, Iterators.size(lookup2.iterator()));
+ }
+
+ public void testExtendField() {
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ FakeFieldMapper f = new FakeFieldMapper("foo", "bar");
+ FakeFieldMapper other = new FakeFieldMapper("blah", "blah");
+ lookup = lookup.copyAndAddAll(newList(f, other));
+ FakeFieldMapper f2 = new FakeFieldMapper("foo", "bar");
+ FieldMappersLookup lookup2 = lookup.copyAndAddAll(newList(f2));
+
+ FieldMappers mappers = lookup2.fullName("foo");
+ assertNotNull(mappers);
+ assertEquals(2, mappers.mappers().size());
+
+ mappers = lookup2.indexName("bar");
+ assertNotNull(mappers);
+ assertEquals(2, mappers.mappers().size());
+ assertEquals(3, Iterators.size(lookup2.iterator()));
+ }
+
+ public void testIndexName() {
+ FakeFieldMapper f1 = new FakeFieldMapper("foo", "foo");
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ lookup = lookup.copyAndAddAll(newList(f1));
+
+ FieldMappers mappers = lookup.indexName("foo");
+ assertNotNull(mappers);
+ assertEquals(1, mappers.mappers().size());
+ assertEquals(f1, mappers.mapper());
+ }
+
+ public void testSimpleMatchIndexNames() {
+ FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz");
+ FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo");
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ lookup = lookup.copyAndAddAll(newList(f1, f2));
+ Collection<String> names = lookup.simpleMatchToIndexNames("b*");
+ assertTrue(names.contains("baz"));
+ assertTrue(names.contains("boo"));
+ }
+
+ public void testSimpleMatchFullNames() {
+ FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz");
+ FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo");
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ lookup = lookup.copyAndAddAll(newList(f1, f2));
+ Collection<String> names = lookup.simpleMatchToFullName("b*");
+ assertTrue(names.contains("foo"));
+ assertTrue(names.contains("bar"));
+ }
+
+ public void testSmartName() {
+ FakeFieldMapper f1 = new FakeFieldMapper("foo", "realfoo");
+ FakeFieldMapper f2 = new FakeFieldMapper("foo", "realbar");
+ FakeFieldMapper f3 = new FakeFieldMapper("baz", "realfoo");
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ lookup = lookup.copyAndAddAll(newList(f1, f2, f3));
+
+ assertNotNull(lookup.smartName("foo"));
+ assertEquals(2, lookup.smartName("foo").mappers().size());
+ assertNotNull(lookup.smartName("realfoo"));
+ assertEquals(f1, lookup.smartNameFieldMapper("foo"));
+ assertEquals(f2, lookup.smartNameFieldMapper("realbar"));
+ }
+
+ public void testIteratorImmutable() {
+ FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar");
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ lookup = lookup.copyAndAddAll(newList(f1));
+
+ try {
+ Iterator<FieldMapper> itr = lookup.iterator();
+ assertTrue(itr.hasNext());
+ assertEquals(f1, itr.next());
+ itr.remove();
+ fail("remove should have failed");
+ } catch (UnsupportedOperationException e) {
+ // expected
+ }
+ }
+
+ public void testGetMapper() {
+ FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar");
+ FieldMappersLookup lookup = new FieldMappersLookup();
+ lookup = lookup.copyAndAddAll(newList(f1));
+
+ assertEquals(f1, lookup.get("foo"));
+ assertNull(lookup.get("bar")); // get is only by full name
+ FakeFieldMapper f2 = new FakeFieldMapper("foo", "foo");
+ lookup = lookup.copyAndAddAll(newList(f2));
+ try {
+ lookup.get("foo");
+ fail("get should have enforced foo is unique");
+ } catch (IllegalStateException e) {
+ // expected
+ }
+ }
+
+ static List<FieldMapper> newList(FieldMapper... mapper) {
+ return Lists.newArrayList(mapper);
+ }
+
+ // this sucks how much must be overriden just do get a dummy field mapper...
+ static class FakeFieldMapper extends AbstractFieldMapper {
+ static Settings dummySettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id).build();
+ public FakeFieldMapper(String fullName, String indexName) {
+ super(makeFieldType(fullName, indexName), null, null, dummySettings, null, null);
+ }
+ static MappedFieldType makeFieldType(String fullName, String indexName) {
+ MappedFieldType fieldType = Defaults.FIELD_TYPE.clone();
+ fieldType.setNames(new MappedFieldType.Names(fullName, indexName, indexName, fullName));
+ return fieldType;
+ }
+ @Override
+ public MappedFieldType defaultFieldType() { return null; }
+ @Override
+ public FieldDataType defaultFieldDataType() { return null; }
+ @Override
+ protected String contentType() { return null; }
+ @Override
+ protected void parseCreateField(ParseContext context, List list) throws IOException {}
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java b/core/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java
new file mode 100644
index 0000000000..443ce1b39f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/TransformOnIndexMapperIntegrationTest.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.search.suggest.SuggestBuilders;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertExists;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestion;
+import static org.hamcrest.Matchers.both;
+import static org.hamcrest.Matchers.hasEntry;
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.not;
+
+/**
+ * Tests for transforming the source document before indexing.
+ */
+@SuppressCodecs("*") // requires custom completion format
+public class TransformOnIndexMapperIntegrationTest extends ElasticsearchIntegrationTest {
+ @Test
+ public void searchOnTransformed() throws Exception {
+ setup(true);
+
+ // Searching by the field created in the transport finds the entry
+ SearchResponse response = client().prepareSearch("test").setQuery(termQuery("destination", "findme")).get();
+ assertSearchHits(response, "righttitle");
+ // The field built in the transform isn't in the source but source is,
+ // even though we didn't index it!
+ assertRightTitleSourceUntransformed(response.getHits().getAt(0).sourceAsMap());
+
+ // Can't find by a field removed from the document by the transform
+ response = client().prepareSearch("test").setQuery(termQuery("content", "findme")).get();
+ assertHitCount(response, 0);
+ }
+
+ @Test
+ public void getTransformed() throws Exception {
+ setup(getRandom().nextBoolean());
+ GetResponse response = client().prepareGet("test", "test", "righttitle").get();
+ assertExists(response);
+ assertRightTitleSourceUntransformed(response.getSource());
+
+ response = client().prepareGet("test", "test", "righttitle").setTransformSource(true).get();
+ assertExists(response);
+ assertRightTitleSourceTransformed(response.getSource());
+ }
+
+ // TODO: the completion suggester currently returns payloads with no reencoding so this test
+ // exists to make sure that _source transformation and completion work well together. If we
+ // ever fix the completion suggester to reencode the payloads then we can remove this test.
+ @Test
+ public void contextSuggestPayloadTransformed() throws Exception {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ builder.startObject("properties");
+ builder.startObject("suggest").field("type", "completion").field("payloads", true).endObject();
+ builder.endObject();
+ builder.startObject("transform");
+ builder.field("script", "ctx._source.suggest = ['input': ctx._source.text];ctx._source.suggest.payload = ['display': ctx._source.text, 'display_detail': 'on the fly']");
+ builder.field("lang", GroovyScriptEngineService.NAME);
+ builder.endObject();
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("test", builder));
+ // Payload is stored using original source format (json, smile, yaml, whatever)
+ XContentType type = XContentType.values()[between(0, XContentType.values().length - 1)];
+ XContentBuilder source = XContentFactory.contentBuilder(type);
+ source.startObject().field("text", "findme").endObject();
+ indexRandom(true, client().prepareIndex("test", "test", "findme").setSource(source));
+ SuggestResponse response = client().prepareSuggest("test").addSuggestion(
+ SuggestBuilders.completionSuggestion("test").field("suggest").text("findme")).get();
+ assertSuggestion(response.getSuggest(), 0, 0, "test", "findme");
+ CompletionSuggestion.Entry.Option option = (CompletionSuggestion.Entry.Option)response.getSuggest().getSuggestion("test").getEntries().get(0).getOptions().get(0);
+ // And it comes back in exactly that way.
+ XContentBuilder expected = XContentFactory.contentBuilder(type);
+ expected.startObject().field("display", "findme").field("display_detail", "on the fly").endObject();
+ assertEquals(expected.string(), option.getPayloadAsString());
+ }
+
+ /**
+ * Setup an index with some source transforms. Randomly picks the number of
+ * transforms but all but one of the transforms is a noop. The other is a
+ * script that fills the 'destination' field with the 'content' field only
+ * if the 'title' field starts with 't' and then always removes the
+ * 'content' field regarless of the contents of 't'. The actual script
+ * randomly uses parameters or not.
+ *
+ * @param forceRefresh
+ * should the data be flushed to disk? Set to false to test real
+ * time fetching
+ */
+ private void setup(boolean forceRefresh) throws IOException, InterruptedException, ExecutionException {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ builder.field("transform");
+ if (getRandom().nextBoolean()) {
+ // Single transform
+ builder.startObject();
+ buildTransformScript(builder);
+ builder.field("lang", randomFrom(null, GroovyScriptEngineService.NAME));
+ builder.endObject();
+ } else {
+ // Multiple transforms
+ int total = between(1, 10);
+ int actual = between(0, total - 1);
+ builder.startArray();
+ for (int s = 0; s < total; s++) {
+ builder.startObject();
+ if (s == actual) {
+ buildTransformScript(builder);
+ } else {
+ builder.field("script", "true");
+ }
+ builder.field("lang", randomFrom(null, GroovyScriptEngineService.NAME));
+ builder.endObject();
+ }
+ builder.endArray();
+ }
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("test", builder));
+
+ indexRandom(forceRefresh, client().prepareIndex("test", "test", "notitle").setSource("content", "findme"),
+ client().prepareIndex("test", "test", "badtitle").setSource("content", "findme", "title", "cat"),
+ client().prepareIndex("test", "test", "righttitle").setSource("content", "findme", "title", "table"));
+ }
+
+ private void buildTransformScript(XContentBuilder builder) throws IOException {
+ String script = "if (ctx._source['title']?.startsWith('t')) { ctx._source['destination'] = ctx._source[sourceField] }; ctx._source.remove(sourceField);";
+ if (getRandom().nextBoolean()) {
+ script = script.replace("sourceField", "'content'");
+ } else {
+ builder.field("params", ImmutableMap.of("sourceField", "content"));
+ }
+ builder.field("script", script);
+ }
+
+ private void assertRightTitleSourceUntransformed(Map<String, Object> source) {
+ assertThat(source, both(hasEntry("content", (Object) "findme")).and(not(hasKey("destination"))));
+ }
+
+ private void assertRightTitleSourceTransformed(Map<String, Object> source) {
+ assertThat(source, both(hasEntry("destination", (Object) "findme")).and(not(hasKey("content"))));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/UidTests.java b/core/src/test/java/org/elasticsearch/index/mapper/UidTests.java
new file mode 100644
index 0000000000..1d2be4592f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/UidTests.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class UidTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCreateAndSplitId() {
+ BytesRef createUid = Uid.createUidAsBytes("foo", "bar");
+ BytesRef[] splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(createUid);
+ assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString()));
+ assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString()));
+ // split also with an offset
+ BytesRef ref = new BytesRef(createUid.length+10);
+ ref.offset = 9;
+ ref.length = createUid.length;
+ System.arraycopy(createUid.bytes, createUid.offset, ref.bytes, ref.offset, ref.length);
+ splitUidIntoTypeAndId = Uid.splitUidIntoTypeAndId(ref);
+ assertThat("foo", equalTo(splitUidIntoTypeAndId[0].utf8ToString()));
+ assertThat("bar", equalTo(splitUidIntoTypeAndId[1].utf8ToString()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java
new file mode 100644
index 0000000000..b0ff89f414
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java
@@ -0,0 +1,467 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.all;
+
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.all.AllEntries;
+import org.elasticsearch.common.lucene.all.AllField;
+import org.elasticsearch.common.lucene.all.AllTermQuery;
+import org.elasticsearch.common.lucene.all.AllTokenStream;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.engine.Engine.Searcher;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.index.mapper.internal.SizeFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.empty;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class SimpleAllMapperTests extends ElasticsearchSingleNodeTest {
+
+ public void testSimpleAllMappers() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ // One field is boosted so we should see AllTokenStream used:
+ assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.instanceOf(AllTokenStream.class));
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ AllFieldMapper mapper = docMapper.allFieldMapper();
+ assertThat(field.fieldType().omitNorms(), equalTo(true));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+ }
+
+ public void testAllMappersNoBoost() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json");
+ IndexService index = createIndex("test");
+ DocumentMapper docMapper = index.mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ public void testAllMappersTermQuery() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ AllFieldMapper mapper = docMapper.allFieldMapper();
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+
+ }
+
+ // #6187: make sure we see AllTermQuery even when offsets are indexed in the _all field:
+ public void testAllMappersWithOffsetsTermQuery() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_offsets_on_all.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ // _all field indexes positions, and mapping has boosts, so we should see AllTokenStream:
+ assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.instanceOf(AllTokenStream.class));
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ AllFieldMapper mapper = docMapper.allFieldMapper();
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ assertThat(mapper.queryStringTermQuery(new Term("_all", "foobar")), Matchers.instanceOf(AllTermQuery.class));
+ }
+
+ // #6187: if _all doesn't index positions then we never use AllTokenStream, even if some fields have boost
+ public void testBoostWithOmitPositions() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ // _all field omits positions, so we should not get AllTokenStream even though fields are boosted
+ assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+ }
+
+ // #6187: if no fields were boosted, we shouldn't use AllTokenStream
+ public void testNoBoost() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ // no fields have boost, so we should not see AllTokenStream:
+ assertThat(field.tokenStream(docMapper.mappers().indexAnalyzer(), null), Matchers.not(Matchers.instanceOf(AllTokenStream.class)));
+ }
+
+ public void testSimpleAllMappersWithReparse() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mapping.json");
+ DocumentMapper docMapper = parser.parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = parser.parse(builtMapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = builtDocMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().toString(), allEntries.fields().size(), equalTo(3));
+ assertThat(allEntries.fields().contains("address.last.location"), equalTo(true));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+ assertThat(field.fieldType().omitNorms(), equalTo(true));
+ }
+
+ public void testSimpleAllMappersWithStore() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = docMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+
+ String text = field.stringValue();
+ assertThat(text, equalTo(allEntries.buildText()));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ public void testSimpleAllMappersWithReparseWithStore() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/store-mapping.json");
+ DocumentMapper docMapper = parser.parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = parser.parse(builtMapping);
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json");
+ Document doc = builtDocMapper.parse("person", "1", new BytesArray(json)).rootDoc();
+
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("name.last"), equalTo(true));
+ assertThat(allEntries.fields().contains("simple1"), equalTo(true));
+
+ String text = field.stringValue();
+ assertThat(text, equalTo(allEntries.buildText()));
+ assertThat(field.fieldType().omitNorms(), equalTo(false));
+ }
+
+ public void testRandom() throws Exception {
+ boolean omitNorms = false;
+ boolean stored = false;
+ boolean enabled = true;
+ boolean tv_stored = false;
+ boolean tv_payloads = false;
+ boolean tv_offsets = false;
+ boolean tv_positions = false;
+ String similarity = null;
+ boolean fieldData = false;
+ XContentBuilder mappingBuilder = jsonBuilder();
+ mappingBuilder.startObject().startObject("test");
+ List<Tuple<String, Boolean>> booleanOptionList = new ArrayList<>();
+ boolean allDefault = true;
+ if (frequently()) {
+ allDefault = false;
+ mappingBuilder.startObject("_all");
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("omit_norms", omitNorms = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("store", stored = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("store_term_vectors", tv_stored = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("enabled", enabled = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("store_term_vector_offsets", tv_offsets = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("store_term_vector_positions", tv_positions = randomBoolean()));
+ }
+ if (randomBoolean()) {
+ booleanOptionList.add(new Tuple<>("store_term_vector_payloads", tv_payloads = randomBoolean()));
+ }
+ Collections.shuffle(booleanOptionList, getRandom());
+ for (Tuple<String, Boolean> option : booleanOptionList) {
+ mappingBuilder.field(option.v1(), option.v2().booleanValue());
+ }
+ tv_stored |= tv_positions || tv_payloads || tv_offsets;
+ if (randomBoolean()) {
+ mappingBuilder.field("similarity", similarity = randomBoolean() ? "BM25" : "TF/IDF");
+ }
+ if (randomBoolean()) {
+ fieldData = true;
+ mappingBuilder.startObject("fielddata");
+ mappingBuilder.field("foo", "bar");
+ mappingBuilder.endObject();
+ }
+ mappingBuilder.endObject();
+ }
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping = mappingBuilder.endObject().endObject().bytes().toUtf8();
+ logger.info(mapping);
+ DocumentMapper docMapper = parser.parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+ // reparse it
+ DocumentMapper builtDocMapper = parser.parse(builtMapping);
+
+ byte[] json = jsonBuilder().startObject()
+ .field("foo", "bar")
+ .field("foobar", "foobar")
+ .endObject().bytes().toBytes();
+ Document doc = builtDocMapper.parse("test", "1", new BytesArray(json)).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ if (enabled) {
+ assertThat(field.fieldType().omitNorms(), equalTo(omitNorms));
+ assertThat(field.fieldType().stored(), equalTo(stored));
+ assertThat(field.fieldType().storeTermVectorOffsets(), equalTo(tv_offsets));
+ assertThat(field.fieldType().storeTermVectorPayloads(), equalTo(tv_payloads));
+ assertThat(field.fieldType().storeTermVectorPositions(), equalTo(tv_positions));
+ assertThat(field.fieldType().storeTermVectors(), equalTo(tv_stored));
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields().size(), equalTo(2));
+ assertThat(allEntries.fields().contains("foobar"), equalTo(true));
+ assertThat(allEntries.fields().contains("foo"), equalTo(true));
+ if (!stored) {
+ assertThat(field.stringValue(), nullValue());
+ }
+ String text = stored ? field.stringValue() : "bar foobar";
+ assertThat(text.trim(), equalTo(allEntries.buildText().trim()));
+ } else {
+ assertThat(field, nullValue());
+ }
+ if (similarity == null || similarity.equals("TF/IDF")) {
+ assertThat(builtDocMapper.allFieldMapper().fieldType().similarity(), nullValue());
+ } else {
+ assertThat(similarity, equalTo(builtDocMapper.allFieldMapper().fieldType().similarity().name()));
+ }
+ assertThat(builtMapping.contains("fielddata"), is(fieldData));
+ if (allDefault) {
+ BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(0);
+ XContentBuilder b = new XContentBuilder(XContentType.JSON.xContent(), bytesStreamOutput);
+ XContentBuilder xContentBuilder = builtDocMapper.allFieldMapper().toXContent(b, ToXContent.EMPTY_PARAMS);
+ xContentBuilder.flush();
+ assertThat(bytesStreamOutput.size(), equalTo(0));
+ }
+
+ }
+
+ public void testMultiField_includeInAllSetToFalse() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/multifield-mapping_include_in_all_set_to_false.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject()
+ .field("foo")
+ .startObject()
+ .field("bar", "Elasticsearch rules!")
+ .endObject()
+ .endObject();
+
+ Document doc = docMapper.parse("test", "1", builder.bytes()).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields(), empty());
+ }
+
+ public void testMultiField_defaults() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/multifield-mapping_default.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject()
+ .field("foo")
+ .startObject()
+ .field("bar", "Elasticsearch rules!")
+ .endObject()
+ .endObject();
+
+ Document doc = docMapper.parse("test", "1", builder.bytes()).rootDoc();
+ AllField field = (AllField) doc.getField("_all");
+ AllEntries allEntries = field.getAllEntries();
+ assertThat(allEntries.fields(), hasSize(1));
+ assertThat(allEntries.fields(), hasItem("foo.bar"));
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void testMisplacedTypeInRoot() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/misplaced_type_in_root.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("test", mapping);
+ }
+
+ // related to https://github.com/elasticsearch/elasticsearch/issues/5864
+ @Test(expected = MapperParsingException.class)
+ public void testMistypedTypeInRoot() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("test", mapping);
+ }
+
+ // issue https://github.com/elasticsearch/elasticsearch/issues/5864
+ @Test(expected = MapperParsingException.class)
+ public void testMisplacedMappingAsRoot() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("test", mapping);
+ }
+
+ // issue https://github.com/elasticsearch/elasticsearch/issues/5864
+ // test that RootObjectMapping still works
+ public void testRootObjectMapperPropertiesDoNotCauseException() throws IOException {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json");
+ parser.parse("test", mapping);
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json");
+ parser.parse("test", mapping);
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_date_detection_mapping.json");
+ parser.parse("test", mapping);
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/type_numeric_detection_mapping.json");
+ parser.parse("test", mapping);
+ }
+
+ // issue https://github.com/elasticsearch/elasticsearch/issues/5864
+ public void testRootMappersStillWorking() {
+ String mapping = "{";
+ Map<String, String> rootTypes = new HashMap<>();
+ //just pick some example from DocumentMapperParser.rootTypeParsers
+ rootTypes.put(SizeFieldMapper.NAME, "{\"enabled\" : true}");
+ rootTypes.put(IndexFieldMapper.NAME, "{\"enabled\" : true}");
+ rootTypes.put("include_in_all", "true");
+ rootTypes.put("dynamic_date_formats", "[\"yyyy-MM-dd\", \"dd-MM-yyyy\"]");
+ rootTypes.put("numeric_detection", "true");
+ rootTypes.put("dynamic_templates", "[]");
+ for (String key : rootTypes.keySet()) {
+ mapping += "\"" + key+ "\"" + ":" + rootTypes.get(key) + ",\n";
+ }
+ mapping += "\"properties\":{}}" ;
+ createIndex("test").mapperService().documentMapperParser().parse("test", mapping);
+ }
+
+ public void testDocValuesNotAllowed() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .startObject("_all")
+ .field("doc_values", true)
+ .endObject().endObject().endObject().string();
+ try {
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values"));
+ }
+
+
+ mapping = jsonBuilder().startObject().startObject("type")
+ .startObject("_all")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject().endObject().endObject().endObject().string();
+ Settings legacySettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ try {
+ createIndex("test_old", legacySettings).mapperService().documentMapperParser().parse(mapping);
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values"));
+ }
+ }
+
+ public void testAutoBoost() throws Exception {
+ for (boolean boost : new boolean[] {false, true}) {
+ String index = "test_" + boost;
+ IndexService indexService = createIndex(index, client().admin().indices().prepareCreate(index).addMapping("type", "foo", "type=string" + (boost ? ",boost=2" : "")));
+ client().prepareIndex(index, "type").setSource("foo", "bar").get();
+ client().admin().indices().prepareRefresh(index).get();
+ Query query = indexService.mapperService().documentMapper("type").allFieldMapper().termQuery("bar", null);
+ try (Searcher searcher = indexService.shard(0).acquireSearcher("tests")) {
+ query = searcher.searcher().rewrite(query);
+ final Class<?> expected = boost ? AllTermQuery.class : TermQuery.class;
+ assertThat(query, Matchers.instanceOf(expected));
+ }
+ }
+ }
+
+ public void testIncludeInObjectBackcompat() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().field("_all", "foo").endObject().bytes());
+
+ assertNull(doc.rootDoc().get("_all"));
+ AllField field = (AllField) doc.rootDoc().getField("_all");
+ // the backcompat behavior is actually ignoring directly specifying _all
+ assertFalse(field.getAllEntries().fields().iterator().hasNext());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping.json
new file mode 100644
index 0000000000..f956b84f95
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true,
+ "omit_norms":true
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost":2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json
new file mode 100644
index 0000000000..452ef9f083
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_boost_omit_positions_on_all.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled": true ,
+ "index_options" : "freqs"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost": 2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_offsets_on_all.json b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_offsets_on_all.json
new file mode 100644
index 0000000000..f6b0699bee
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_offsets_on_all.json
@@ -0,0 +1,56 @@
+{
+ "person":{
+ "_all":{
+ "enabled": true ,
+ "index_options" : "offsets"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost": 2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json
new file mode 100644
index 0000000000..f8e418ce8e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/mapping_omit_positions_on_all.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "_all":{
+ "enabled": true ,
+ "index_options" : "freqs"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json b/core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json
new file mode 100644
index 0000000000..f08757a9e9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json
@@ -0,0 +1,11 @@
+{
+ "mapping": {
+ "test": {
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_type_in_root.json b/core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_type_in_root.json
new file mode 100644
index 0000000000..f4b325c6c1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/misplaced_type_in_root.json
@@ -0,0 +1,8 @@
+{
+ "type": "string",
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json b/core/src/test/java/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json
new file mode 100644
index 0000000000..19edf59767
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json
@@ -0,0 +1,9 @@
+{
+ "testX": {
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_default.json b/core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_default.json
new file mode 100644
index 0000000000..6a5f044b12
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_default.json
@@ -0,0 +1,21 @@
+{
+ "test": {
+ "properties": {
+ "foo": {
+ "type": "nested",
+ "properties": {
+ "bar": {
+ "type": "string",
+ "index": "not_analyzed",
+ "fields": {
+ "lower": {
+ "analyzer": "standard",
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_include_in_all_set_to_false.json b/core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_include_in_all_set_to_false.json
new file mode 100644
index 0000000000..5a0ad92afa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/multifield-mapping_include_in_all_set_to_false.json
@@ -0,0 +1,23 @@
+{
+ "test": {
+ "properties": {
+ "foo": {
+ "type": "nested",
+ "include_in_all": false,
+ "properties": {
+ "bar": {
+ "type": "string",
+ "index": "not_analyzed",
+ "include_in_all": false,
+ "fields": {
+ "lower": {
+ "analyzer": "standard",
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json
new file mode 100644
index 0000000000..799a3ab460
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/noboost-mapping.json
@@ -0,0 +1,54 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "include_in_all":true
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json
new file mode 100644
index 0000000000..8f653a3484
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/store-mapping.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "_all":{
+ "enabled":true,
+ "store":"yes"
+ },
+ "properties":{
+ "name":{
+ "type":"object",
+ "dynamic":false,
+ "properties":{
+ "first":{
+ "type":"string",
+ "store":"yes",
+ "include_in_all":false
+ },
+ "last":{
+ "type":"string",
+ "index":"not_analyzed",
+ "boost":2.0
+ }
+ }
+ },
+ "address":{
+ "type":"object",
+ "include_in_all":false,
+ "properties":{
+ "first":{
+ "properties":{
+ "location":{
+ "type":"string",
+ "store":"yes"
+ }
+ }
+ },
+ "last":{
+ "properties":{
+ "location":{
+ "type":"string"
+ }
+ }
+ }
+ }
+ },
+ "simple1":{
+ "type":"long",
+ "include_in_all":true
+ },
+ "simple2":{
+ "type":"long",
+ "include_in_all":false
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/test1.json b/core/src/test/java/org/elasticsearch/index/mapper/all/test1.json
new file mode 100644
index 0000000000..4437d3f798
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/test1.json
@@ -0,0 +1,16 @@
+{
+ "name":{
+ "first":"shay",
+ "last":"banon"
+ },
+ "address":{
+ "first":{
+ "location":"first location"
+ },
+ "last":{
+ "location":"last location"
+ }
+ },
+ "simple1":1,
+ "simple2":2
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/type_date_detection_mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/type_date_detection_mapping.json
new file mode 100644
index 0000000000..c2db712ced
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/type_date_detection_mapping.json
@@ -0,0 +1,8 @@
+{
+ "date_detection" : false,
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json
new file mode 100644
index 0000000000..7e6afd397c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_date_formats_mapping.json
@@ -0,0 +1,8 @@
+{
+ "dynamic_date_formats" : ["yyyy-MM-dd", "dd-MM-yyyy"],
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json
new file mode 100644
index 0000000000..b155fb7204
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/type_dynamic_template_mapping.json
@@ -0,0 +1,17 @@
+{
+ "dynamic_templates" : [
+ {
+ "dynamic_template_name" : {
+ "match" : "*",
+ "mapping" : {
+ "store" : true
+ }
+ }
+ }
+ ],
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/type_numeric_detection_mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/all/type_numeric_detection_mapping.json
new file mode 100644
index 0000000000..4729354600
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/all/type_numeric_detection_mapping.json
@@ -0,0 +1,8 @@
+{
+ "numeric_detection" : false,
+ "properties": {
+ "foo": {
+ "type": "string"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java
new file mode 100644
index 0000000000..efe22e0eaf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/binary/BinaryMappingTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.binary;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ */
+public class BinaryMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testDefaultMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "binary")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field");
+ assertThat(fieldMapper, instanceOf(BinaryFieldMapper.class));
+ assertThat(fieldMapper.fieldType().stored(), equalTo(false));
+ }
+
+ public void testStoredValue() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "binary")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ // case 1: a simple binary value
+ final byte[] binaryValue1 = new byte[100];
+ binaryValue1[56] = 1;
+
+ // case 2: a value that looks compressed: this used to fail in 1.x
+ BytesStreamOutput out = new BytesStreamOutput();
+ try (StreamOutput compressed = CompressorFactory.defaultCompressor().streamOutput(out)) {
+ new BytesArray(binaryValue1).writeTo(compressed);
+ }
+ final byte[] binaryValue2 = out.bytes().toBytes();
+ assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue2)));
+
+ for (byte[] value : Arrays.asList(binaryValue1, binaryValue2)) {
+ ParsedDocument doc = mapper.parse("type", "id", XContentFactory.jsonBuilder().startObject().field("field", value).endObject().bytes());
+ BytesRef indexedValue = doc.rootDoc().getBinaryValue("field");
+ assertEquals(new BytesRef(value), indexedValue);
+ FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field");
+ Object originalValue = fieldMapper.valueForSearch(indexedValue);
+ assertEquals(new BytesArray(value), originalValue);
+ }
+ }
+
+ public void testCompressedBackCompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "binary")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_5_0).build();
+ DocumentMapper mapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ final byte[] original = new byte[100];
+ original[56] = 1;
+ BytesStreamOutput out = new BytesStreamOutput();
+ try (StreamOutput compressed = CompressorFactory.defaultCompressor().streamOutput(out)) {
+ new BytesArray(original).writeTo(compressed);
+ }
+ final byte[] binaryValue = out.bytes().toBytes();
+ assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue)));
+
+ ParsedDocument doc = mapper.parse("type", "id", XContentFactory.jsonBuilder().startObject().field("field", binaryValue).endObject().bytes());
+ BytesRef indexedValue = doc.rootDoc().getBinaryValue("field");
+ assertEquals(new BytesRef(binaryValue), indexedValue);
+ FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field");
+ Object originalValue = fieldMapper.valueForSearch(indexedValue);
+ assertEquals(new BytesArray(original), originalValue);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java
new file mode 100644
index 0000000000..0e1c9a8d5a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/boost/CustomBoostMappingTests.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CustomBoostMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testCustomBoostValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("s_field").field("type", "string").endObject()
+ .startObject("l_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("i_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("sh_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("b_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("d_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("f_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("s_field").field("value", "s_value").field("boost", 2.0f).endObject()
+ .startObject("l_field").field("value", 1l).field("boost", 3.0f).endObject()
+ .startObject("i_field").field("value", 1).field("boost", 4.0f).endObject()
+ .startObject("sh_field").field("value", 1).field("boost", 5.0f).endObject()
+ .startObject("b_field").field("value", 1).field("boost", 6.0f).endObject()
+ .startObject("d_field").field("value", 1).field("boost", 7.0f).endObject()
+ .startObject("f_field").field("value", 1).field("boost", 8.0f).endObject()
+ .startObject("date_field").field("value", "20100101").field("boost", 9.0f).endObject()
+ .endObject().bytes());
+
+ assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(2.0f));
+ assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(3.0f));
+ assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(4.0f));
+ assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(5.0f));
+ assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(6.0f));
+ assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(7.0f));
+ assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f));
+ assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java b/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java
new file mode 100644
index 0000000000..8b9e9ab052
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/boost/FieldLevelBoostTests.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.boost;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+
+/**
+ */
+public class FieldLevelBoostTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testFieldLevelBoost() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("str_field").field("type", "string").endObject()
+ .startObject("int_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("byte_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("double_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("float_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("long_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("short_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference json = XContentFactory.jsonBuilder().startObject()
+ .startObject("str_field").field("boost", 2.0).field("value", "some name").endObject()
+ .startObject("int_field").field("boost", 3.0).field("value", 10).endObject()
+ .startObject("byte_field").field("boost", 4.0).field("value", 20).endObject()
+ .startObject("date_field").field("boost", 5.0).field("value", "2012-01-10").endObject()
+ .startObject("double_field").field("boost", 6.0).field("value", 30.0).endObject()
+ .startObject("float_field").field("boost", 7.0).field("value", 40.0).endObject()
+ .startObject("long_field").field("boost", 8.0).field("value", 50).endObject()
+ .startObject("short_field").field("boost", 9.0).field("value", 60).endObject()
+ .bytes();
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ IndexableField f = doc.getField("str_field");
+ assertThat((double) f.boost(), closeTo(2.0, 0.001));
+
+ f = doc.getField("int_field");
+ assertThat((double) f.boost(), closeTo(3.0, 0.001));
+
+ f = doc.getField("byte_field");
+ assertThat((double) f.boost(), closeTo(4.0, 0.001));
+
+ f = doc.getField("date_field");
+ assertThat((double) f.boost(), closeTo(5.0, 0.001));
+
+ f = doc.getField("double_field");
+ assertThat((double) f.boost(), closeTo(6.0, 0.001));
+
+ f = doc.getField("float_field");
+ assertThat((double) f.boost(), closeTo(7.0, 0.001));
+
+ f = doc.getField("long_field");
+ assertThat((double) f.boost(), closeTo(8.0, 0.001));
+
+ f = doc.getField("short_field");
+ assertThat((double) f.boost(), closeTo(9.0, 0.001));
+ }
+
+ @Test
+ public void testInvalidFieldLevelBoost() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("str_field").field("type", "string").endObject()
+ .startObject("int_field").field("type", "integer").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("byte_field").field("type", "byte").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("date_field").field("type", "date").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("double_field").field("type", "double").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("float_field").field("type", "float").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("long_field").field("type", "long").startObject("norms").field("enabled", true).endObject().endObject()
+ .startObject("short_field").field("type", "short").startObject("norms").field("enabled", true).endObject().endObject()
+ .string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("str_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("int_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("byte_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("date_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("double_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("float_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("long_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ try {
+ docMapper.parse("person", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("short_field").field("foo", "bar")
+ .endObject().bytes()).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ // Expected
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java b/core/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java
new file mode 100644
index 0000000000..ae4298c063
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/camelcase/CamelCaseFieldNameTests.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.camelcase;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class CamelCaseFieldNameTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testCamelCaseFieldNameStaysAsIs() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
+ DocumentMapper documentMapper = index.mapperService().documentMapper("type");
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("thisIsCamelCase", "value1")
+ .endObject().bytes());
+
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString()).get();
+
+ assertNotNull(documentMapper.mappers().getMapper("thisIsCamelCase"));
+ assertNull(documentMapper.mappers().getMapper("this_is_camel_case"));
+
+ documentMapper = index.mapperService().documentMapperParser().parse(documentMapper.mappingSource().string());
+
+ assertNotNull(documentMapper.mappers().getMapper("thisIsCamelCase"));
+ assertNull(documentMapper.mappers().getMapper("this_is_camel_case"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java
new file mode 100644
index 0000000000..dfc9624438
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/completion/CompletionFieldMapperTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.completion;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+public class CompletionFieldMapperTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testDefaultConfiguration() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion");
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ assertThat(completionFieldMapper.isStoringPayloads(), is(false));
+ }
+
+ @Test
+ public void testThatSerializationIncludesAllElements() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .field("analyzer", "simple")
+ .field("search_analyzer", "standard")
+ .field("payloads", true)
+ .field("preserve_separators", false)
+ .field("preserve_position_increments", true)
+ .field("max_input_length", 14)
+
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion");
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ completionFieldMapper.toXContent(builder, null).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
+ assertThat(configMap.get("analyzer").toString(), is("simple"));
+ assertThat(configMap.get("search_analyzer").toString(), is("standard"));
+ assertThat(Boolean.valueOf(configMap.get("payloads").toString()), is(true));
+ assertThat(Boolean.valueOf(configMap.get("preserve_separators").toString()), is(false));
+ assertThat(Boolean.valueOf(configMap.get("preserve_position_increments").toString()), is(true));
+ assertThat(Integer.valueOf(configMap.get("max_input_length").toString()), is(14));
+ }
+
+ @Test
+ public void testThatSerializationCombinesToOneAnalyzerFieldIfBothAreEqual() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .field("analyzer", "simple")
+ .field("search_analyzer", "simple")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion");
+ assertThat(fieldMapper, instanceOf(CompletionFieldMapper.class));
+
+ CompletionFieldMapper completionFieldMapper = (CompletionFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ completionFieldMapper.toXContent(builder, null).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> configMap = (Map<String, Object>) serializedMap.get("completion");
+ assertThat(configMap.get("analyzer").toString(), is("simple"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java
new file mode 100644
index 0000000000..5dbe80d2a2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/compound/CompoundTypesTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.compound;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+
+public class CompoundTypesTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testStringType() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(1.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("field1").field("value", "value1").field("boost", 2.0f).endObject()
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(2.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .bytes());
+
+ assertThat(doc.rootDoc().get("field1"), equalTo("value1"));
+ assertThat((double) doc.rootDoc().getField("field1").boost(), closeTo(1.0d, 0.000001d));
+ assertThat(doc.rootDoc().get("field2"), equalTo("value2"));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java
new file mode 100644
index 0000000000..d30fa25a99
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperIntegrationTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.copyto;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class CopyToMapperIntegrationTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testDynamicTemplateCopyTo() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("test-idx")
+ .addMapping("doc", createDynamicTemplateMapping())
+ );
+
+ int recordCount = between(1, 200);
+
+ for (int i = 0; i < recordCount * 2; i++) {
+ client().prepareIndex("test-idx", "doc", Integer.toString(i))
+ .setSource("test_field", "test " + i, "even", i % 2 == 0)
+ .get();
+ }
+ client().admin().indices().prepareRefresh("test-idx").execute().actionGet();
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+
+ SearchResponse response = client().prepareSearch("test-idx")
+ .setQuery(QueryBuilders.termQuery("even", true))
+ .addAggregation(AggregationBuilders.terms("test").field("test_field").size(recordCount * 2)
+ .collectMode(aggCollectionMode))
+ .addAggregation(AggregationBuilders.terms("test_raw").field("test_field_raw").size(recordCount * 2)
+ .collectMode(aggCollectionMode))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) recordCount));
+
+ assertThat(((Terms) response.getAggregations().get("test")).getBuckets().size(), equalTo(recordCount + 1));
+ assertThat(((Terms) response.getAggregations().get("test_raw")).getBuckets().size(), equalTo(recordCount));
+
+ }
+
+
+ private XContentBuilder createDynamicTemplateMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .startArray("dynamic_templates")
+
+ .startObject().startObject("template_raw")
+ .field("match", "*_raw")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("type", "string").field("index", "not_analyzed").endObject()
+ .endObject().endObject()
+
+ .startObject().startObject("template_all")
+ .field("match", "*")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("type", "string").field("copy_to", "{name}_raw").endObject()
+ .endObject().endObject()
+
+ .endArray();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java
new file mode 100644
index 0000000000..116bf9cfdf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java
@@ -0,0 +1,359 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.copyto;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParseContext;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.startsWith;
+
+/**
+ *
+ */
+public class CopyToMapperTests extends ElasticsearchSingleNodeTest {
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "another_field", "cyclic_test")
+ .endObject()
+
+ .startObject("another_field")
+ .field("type", "string")
+ .endObject()
+
+ .startObject("cyclic_test")
+ .field("type", "string")
+ .array("copy_to", "copy_test")
+ .endObject()
+
+ .startObject("int_to_str_test")
+ .field("type", "integer")
+ .field("doc_values", false)
+ .array("copy_to", "another_field", "new_field")
+ .endObject()
+ .endObject().endObject().endObject().string();
+
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("type1").setSource(mapping).get();
+ DocumentMapper docMapper = index.mapperService().documentMapper("type1");
+ FieldMapper fieldMapper = docMapper.mappers().getMapper("copy_test");
+
+ // Check json serialization
+ StringFieldMapper stringFieldMapper = (StringFieldMapper) fieldMapper;
+ XContentBuilder builder = jsonBuilder().startObject();
+ stringFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ Map<String, Object> copyTestMap = (Map<String, Object>) serializedMap.get("copy_test");
+ assertThat(copyTestMap.get("type").toString(), is("string"));
+ List<String> copyToList = (List<String>) copyTestMap.get("copy_to");
+ assertThat(copyToList.size(), equalTo(2));
+ assertThat(copyToList.get(0).toString(), equalTo("another_field"));
+ assertThat(copyToList.get(1).toString(), equalTo("cyclic_test"));
+
+ // Check data parsing
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .field("cyclic_test", "bar")
+ .field("int_to_str_test", 42)
+ .endObject().bytes();
+
+ ParsedDocument parsedDoc = docMapper.parse("type1", "1", json);
+ ParseContext.Document doc = parsedDoc.rootDoc();
+ assertThat(doc.getFields("copy_test").length, equalTo(2));
+ assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("copy_test")[1].stringValue(), equalTo("bar"));
+
+ assertThat(doc.getFields("another_field").length, equalTo(2));
+ assertThat(doc.getFields("another_field")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("another_field")[1].stringValue(), equalTo("42"));
+
+ assertThat(doc.getFields("cyclic_test").length, equalTo(2));
+ assertThat(doc.getFields("cyclic_test")[0].stringValue(), equalTo("foo"));
+ assertThat(doc.getFields("cyclic_test")[1].stringValue(), equalTo("bar"));
+
+ assertThat(doc.getFields("int_to_str_test").length, equalTo(1));
+ assertThat(doc.getFields("int_to_str_test")[0].numericValue().intValue(), equalTo(42));
+
+ assertThat(doc.getFields("new_field").length, equalTo(2)); // new field has doc values
+ assertThat(doc.getFields("new_field")[0].numericValue().intValue(), equalTo(42));
+
+ assertNotNull(parsedDoc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("type1").setSource(parsedDoc.dynamicMappingsUpdate().toString()).get();
+
+ fieldMapper = docMapper.mappers().getMapper("new_field");
+ assertThat(fieldMapper, instanceOf(LongFieldMapper.class));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsInnerObjectParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .field("copy_to", "very.inner.field")
+ .endObject()
+
+ .startObject("very")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("inner")
+ .field("type", "object")
+ .endObject()
+ .endObject()
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .startObject("foo").startObject("bar").field("baz", "zoo").endObject().endObject()
+ .endObject().bytes();
+
+ ParseContext.Document doc = docMapper.parse("type1", "1", json).rootDoc();
+ assertThat(doc.getFields("copy_test").length, equalTo(1));
+ assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
+
+ assertThat(doc.getFields("very.inner.field").length, equalTo(1));
+ assertThat(doc.getFields("very.inner.field")[0].stringValue(), equalTo("foo"));
+
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testCopyToFieldsNonExistingInnerObjectParsing() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .field("copy_to", "very.inner.field")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("copy_test", "foo")
+ .endObject().bytes();
+
+ try {
+ docMapper.parse("type1", "1", json).rootDoc();
+ fail();
+ } catch (MapperParsingException ex) {
+ assertThat(ex.getMessage(), startsWith("attempt to copy value to non-existing object"));
+ }
+ }
+
+ @Test
+ public void testCopyToFieldMerge() throws Exception {
+
+ String mappingBefore = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "foo", "bar")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ String mappingAfter = jsonBuilder().startObject().startObject("type1").startObject("properties")
+
+ .startObject("copy_test")
+ .field("type", "string")
+ .array("copy_to", "baz", "bar")
+ .endObject()
+
+ .endObject().endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper docMapperBefore = parser.parse(mappingBefore);
+
+ List<String> fields = docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields();
+
+ assertThat(fields.size(), equalTo(2));
+ assertThat(fields.get(0), equalTo("foo"));
+ assertThat(fields.get(1), equalTo("bar"));
+
+
+ DocumentMapper docMapperAfter = parser.parse(mappingAfter);
+
+ MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true);
+
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapperBefore.merge(docMapperAfter.mapping(), false);
+
+ fields = docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields();
+
+ assertThat(fields.size(), equalTo(2));
+ assertThat(fields.get(0), equalTo("baz"));
+ assertThat(fields.get(1), equalTo("bar"));
+ }
+
+ public void testCopyToNestedField() throws Exception {
+ IndexService indexService = createIndex("test");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ for (boolean mapped : new boolean[] {true, false}) {
+ XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("target")
+ .field("type", "long")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("n1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("target")
+ .field("type", "long")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("n2")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("target")
+ .field("type", "long")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("source")
+ .field("type", "long")
+ .field("doc_values", false)
+ .startArray("copy_to")
+ .value("target") // should go to the root doc
+ .value("n1.target") // should go to the parent doc
+ .value("n1.n2.target") // should go to the current doc
+ .endArray()
+ .endObject();
+ for (int i = 0; i < 3; ++i) {
+ if (mapped) {
+ mapping = mapping.startObject("target").field("type", "long").field("doc_values", false).endObject();
+ }
+ mapping = mapping.endObject().endObject();
+ }
+ mapping = mapping.endObject();
+
+ DocumentMapper mapper = parser.parse(mapping.string());
+
+ XContentBuilder jsonDoc = XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("n1")
+ .startObject()
+ .startArray("n2")
+ .startObject()
+ .field("source", 3)
+ .endObject()
+ .startObject()
+ .field("source", 5)
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .startArray("n2")
+ .startObject()
+ .field("source", 7)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject();
+
+ ParsedDocument doc = mapper.parse("type", "1", jsonDoc.bytes());
+ assertEquals(6, doc.docs().size());
+
+ Document nested = doc.docs().get(0);
+ assertFieldValue(nested, "n1.n2.target", 7L);
+ assertFieldValue(nested, "n1.target");
+ assertFieldValue(nested, "target");
+
+ nested = doc.docs().get(2);
+ assertFieldValue(nested, "n1.n2.target", 5L);
+ assertFieldValue(nested, "n1.target");
+ assertFieldValue(nested, "target");
+
+ nested = doc.docs().get(3);
+ assertFieldValue(nested, "n1.n2.target", 3L);
+ assertFieldValue(nested, "n1.target");
+ assertFieldValue(nested, "target");
+
+ Document parent = doc.docs().get(1);
+ assertFieldValue(parent, "target");
+ assertFieldValue(parent, "n1.target", 7L);
+ assertFieldValue(parent, "n1.n2.target");
+
+ parent = doc.docs().get(4);
+ assertFieldValue(parent, "target");
+ assertFieldValue(parent, "n1.target", 3L, 5L);
+ assertFieldValue(parent, "n1.n2.target");
+
+ Document root = doc.docs().get(5);
+ assertFieldValue(root, "target", 3L, 5L, 7L);
+ assertFieldValue(root, "n1.target");
+ assertFieldValue(root, "n1.n2.target");
+ }
+ }
+
+ private void assertFieldValue(Document doc, String field, Number... expected) {
+ IndexableField[] values = doc.getFields(field);
+ if (values == null) {
+ values = new IndexableField[0];
+ }
+ Number[] actual = new Number[values.length];
+ for (int i = 0; i < values.length; ++i) {
+ actual[i] = values[i].numericValue();
+ }
+ assertArrayEquals(expected, actual);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java
new file mode 100644
index 0000000000..16abe80515
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldMapperTests.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Before;
+
+import java.io.IOException;
+
+public class BooleanFieldMapperTests extends ElasticsearchSingleNodeTest {
+
+ IndexService indexService;
+ DocumentMapperParser parser;
+
+ @Before
+ public void before() {
+ indexService = createIndex("test");
+ parser = indexService.mapperService().documentMapperParser();
+ }
+
+ public void testDefaults() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "boolean").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", true)
+ .endObject()
+ .bytes());
+
+ try (Directory dir = new RAMDirectory();
+ IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(getRandom())))) {
+ w.addDocuments(doc.docs());
+ try (DirectoryReader reader = DirectoryReader.open(w, true)) {
+ final LeafReader leaf = reader.leaves().get(0).reader();
+ // boolean fields are indexed and have doc values by default
+ assertEquals(new BytesRef("T"), leaf.terms("field").iterator().next());
+ SortedNumericDocValues values = leaf.getSortedNumericDocValues("field");
+ assertNotNull(values);
+ values.setDocument(0);
+ assertEquals(1, values.count());
+ assertEquals(1, values.valueAt(0));
+ }
+ }
+ }
+
+ public void testSerialization() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "boolean").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().getMapper("field");
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ mapper.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ assertEquals("{\"field\":{\"type\":\"boolean\"}}", builder.string());
+
+ // now change some parameters
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "boolean")
+ .field("doc_values", "false")
+ .field("null_value", true)
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = parser.parse(mapping);
+ mapper = defaultMapper.mappers().getMapper("field");
+ builder = XContentFactory.jsonBuilder().startObject();
+ mapper.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ assertEquals("{\"field\":{\"type\":\"boolean\",\"doc_values\":false,\"null_value\":true}}", builder.string());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java
new file mode 100644
index 0000000000..d28609e5be
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.index.IndexOptions;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Before;
+
+public class Murmur3FieldMapperTests extends ElasticsearchSingleNodeTest {
+
+ IndexService indexService;
+ DocumentMapperParser parser;
+
+ @Before
+ public void before() {
+ indexService = createIndex("test");
+ parser = indexService.mapperService().documentMapperParser();
+ }
+
+ public void testDocValuesSettingNotAllowed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "murmur3")
+ .field("doc_values", false)
+ .endObject().endObject().endObject().endObject().string();
+ try {
+ parser.parse(mapping);
+ fail("expected a mapper parsing exception");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("Setting [doc_values] cannot be modified"));
+ }
+
+ // even setting to the default is not allowed, the setting is invalid
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "murmur3")
+ .field("doc_values", true)
+ .endObject().endObject().endObject().endObject().string();
+ try {
+ parser.parse(mapping);
+ fail("expected a mapper parsing exception");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("Setting [doc_values] cannot be modified"));
+ }
+ }
+
+ public void testIndexSettingNotAllowed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "murmur3")
+ .field("index", "not_analyzed")
+ .endObject().endObject().endObject().endObject().string();
+ try {
+ parser.parse(mapping);
+ fail("expected a mapper parsing exception");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("Setting [index] cannot be modified"));
+ }
+
+ // even setting to the default is not allowed, the setting is invalid
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "murmur3")
+ .field("index", "no")
+ .endObject().endObject().endObject().endObject().string();
+ try {
+ parser.parse(mapping);
+ fail("expected a mapper parsing exception");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("Setting [index] cannot be modified"));
+ }
+ }
+
+ public void testDocValuesSettingBackcompat() throws Exception {
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ indexService = createIndex("test_bwc", settings);
+ parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "murmur3")
+ .field("doc_values", false)
+ .endObject().endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field");
+ assertFalse(mapper.fieldType().hasDocValues());
+ }
+
+ public void testIndexSettingBackcompat() throws Exception {
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ indexService = createIndex("test_bwc", settings);
+ parser = indexService.mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field")
+ .field("type", "murmur3")
+ .field("index", "not_analyzed")
+ .endObject().endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field");
+ assertEquals(IndexOptions.DOCS, mapper.fieldType().indexOptions());
+ }
+
+ // TODO: add more tests
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java
new file mode 100644
index 0000000000..ffbe671580
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import com.google.common.collect.ImmutableList;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrationTest {
+ @ParametersFactory
+ public static Iterable<Object[]> buildParameters() {
+ List<Object[]> parameters = new ArrayList<>();
+ for (boolean storeCountedFields : new boolean[] { true, false }) {
+ for (boolean loadCountedFields : new boolean[] { true, false }) {
+ parameters.add(new Object[] { storeCountedFields, loadCountedFields });
+ }
+ }
+ return parameters;
+ }
+
+ private final boolean storeCountedFields;
+ private final boolean loadCountedFields;
+
+ public TokenCountFieldMapperIntegrationTests(@Name("storeCountedFields") boolean storeCountedFields,
+ @Name("loadCountedFields") boolean loadCountedFields) {
+ this.storeCountedFields = storeCountedFields;
+ this.loadCountedFields = loadCountedFields;
+ }
+
+ /**
+ * It is possible to get the token count in a search response.
+ */
+ @Test
+ public void searchReturnsTokenCount() throws IOException {
+ init();
+
+ assertSearchReturns(searchById("single"), "single");
+ assertSearchReturns(searchById("bulk1"), "bulk1");
+ assertSearchReturns(searchById("bulk2"), "bulk2");
+ assertSearchReturns(searchById("multi"), "multi");
+ assertSearchReturns(searchById("multibulk1"), "multibulk1");
+ assertSearchReturns(searchById("multibulk2"), "multibulk2");
+ }
+
+ /**
+ * It is possible to search by token count.
+ */
+ @Test
+ public void searchByTokenCount() throws IOException {
+ init();
+
+ assertSearchReturns(searchByNumericRange(4, 4).get(), "single");
+ assertSearchReturns(searchByNumericRange(10, 10).get(), "multibulk2");
+ assertSearchReturns(searchByNumericRange(7, 10).get(), "multi", "multibulk1", "multibulk2");
+ assertSearchReturns(searchByNumericRange(1, 10).get(), "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
+ assertSearchReturns(searchByNumericRange(12, 12).get());
+ }
+
+ /**
+ * It is possible to search by token count.
+ */
+ @Test
+ public void facetByTokenCount() throws IOException {
+ init();
+
+ String facetField = randomFrom(ImmutableList.of(
+ "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values"));
+ SearchResponse result = searchByNumericRange(1, 10)
+ .addAggregation(AggregationBuilders.terms("facet").field(facetField)).get();
+ assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2");
+ assertThat(result.getAggregations().asList().size(), equalTo(1));
+ Terms terms = (Terms) result.getAggregations().asList().get(0);
+ assertThat(terms.getBuckets().size(), equalTo(9));
+ }
+
+ private void init() throws IOException {
+ prepareCreate("test").addMapping("test", jsonBuilder().startObject()
+ .startObject("test")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("store", storeCountedFields)
+ .field("analyzer", "simple")
+ .endObject()
+ .startObject("token_count")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .field("store", true)
+ .endObject()
+ .startObject("token_count_unstored")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("token_count_with_doc_values")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()).get();
+ ensureGreen();
+
+ assertTrue(prepareIndex("single", "I have four terms").get().isCreated());
+ BulkResponse bulk = client().prepareBulk()
+ .add(prepareIndex("bulk1", "bulk three terms"))
+ .add(prepareIndex("bulk2", "this has five bulk terms")).get();
+ assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
+ assertTrue(prepareIndex("multi", "two terms", "wow now I have seven lucky terms").get().isCreated());
+ bulk = client().prepareBulk()
+ .add(prepareIndex("multibulk1", "one", "oh wow now I have eight unlucky terms"))
+ .add(prepareIndex("multibulk2", "six is a bunch of terms", "ten! ten terms is just crazy! too many too count!")).get();
+ assertFalse(bulk.buildFailureMessage(), bulk.hasFailures());
+
+ assertThat(refresh().getFailedShards(), equalTo(0));
+ }
+
+ private IndexRequestBuilder prepareIndex(String id, String... texts) throws IOException {
+ return client().prepareIndex("test", "test", id).setSource("foo", texts);
+ }
+
+ private SearchResponse searchById(String id) {
+ return prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).get();
+ }
+
+ private SearchRequestBuilder searchByNumericRange(int low, int high) {
+ return prepareSearch().setQuery(QueryBuilders.rangeQuery(randomFrom(
+ ImmutableList.of("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")
+ )).gte(low).lte(high));
+ }
+
+ private SearchRequestBuilder prepareSearch() {
+ SearchRequestBuilder request = client().prepareSearch("test").setTypes("test");
+ request.addField("foo.token_count");
+ if (loadCountedFields) {
+ request.addField("foo");
+ }
+ return request;
+ }
+
+ private void assertSearchReturns(SearchResponse result, String... ids) {
+ assertThat(result.getHits().getTotalHits(), equalTo((long) ids.length));
+ assertThat(result.getHits().hits().length, equalTo(ids.length));
+ List<String> foundIds = new ArrayList<>();
+ for (SearchHit hit : result.getHits()) {
+ foundIds.add(hit.id());
+ }
+ assertThat(foundIds, containsInAnyOrder(ids));
+ for (SearchHit hit : result.getHits()) {
+ String id = hit.id();
+ if (id.equals("single")) {
+ assertSearchHit(hit, 4);
+ } else if (id.equals("bulk1")) {
+ assertSearchHit(hit, 3);
+ } else if (id.equals("bulk2")) {
+ assertSearchHit(hit, 5);
+ } else if (id.equals("multi")) {
+ assertSearchHit(hit, 2, 7);
+ } else if (id.equals("multibulk1")) {
+ assertSearchHit(hit, 1, 8);
+ } else if (id.equals("multibulk2")) {
+ assertSearchHit(hit, 6, 10);
+ } else {
+ throw new ElasticsearchException("Unexpected response!");
+ }
+ }
+ }
+
+ private void assertSearchHit(SearchHit hit, int... termCounts) {
+ assertThat(hit.field("foo.token_count"), not(nullValue()));
+ assertThat(hit.field("foo.token_count").values().size(), equalTo(termCounts.length));
+ for (int i = 0; i < termCounts.length; i++) {
+ assertThat((Integer) hit.field("foo.token_count").values().get(i), equalTo(termCounts[i]));
+ }
+
+ if (loadCountedFields && storeCountedFields) {
+ assertThat(hit.field("foo").values().size(), equalTo(termCounts.length));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java
new file mode 100644
index 0000000000..ae1aeccae9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.core;
+
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test for {@link TokenCountFieldMapper}.
+ */
+public class TokenCountFieldMapperTests extends ElasticsearchSingleNodeTest {
+ @Test
+ public void testMerge() throws IOException {
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("person")
+ .startObject("properties")
+ .startObject("tc")
+ .field("type", "token_count")
+ .field("analyzer", "keyword")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper stage1 = parser.parse(stage1Mapping);
+
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("person")
+ .startObject("properties")
+ .startObject("tc")
+ .field("type", "token_count")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper stage2 = parser.parse(stage2Mapping);
+
+ MergeResult mergeResult = stage1.merge(stage2.mapping(), true);
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // Just simulated so merge hasn't happened yet
+ assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword"));
+
+ mergeResult = stage1.merge(stage2.mapping(), false);
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // Just simulated so merge hasn't happened yet
+ assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard"));
+ }
+
+ @Test
+ public void testCountPositions() throws IOException {
+ // We're looking to make sure that we:
+ Token t1 = new Token(); // Don't count tokens without an increment
+ t1.setPositionIncrement(0);
+ Token t2 = new Token();
+ t2.setPositionIncrement(1); // Count normal tokens with one increment
+ Token t3 = new Token();
+ t2.setPositionIncrement(2); // Count funny tokens with more than one increment
+ int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
+ Token[] tokens = new Token[] {t1, t2, t3};
+ Collections.shuffle(Arrays.asList(tokens), getRandom());
+ TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
+ assertThat(TokenCountFieldMapper.countPositions(tokenStream), equalTo(7));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java
new file mode 100644
index 0000000000..86d9fc322e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.date;
+
+import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.LocaleUtils;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.TestSearchContext;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.mapper.string.SimpleStringMappingTests.docValuesType;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testAutomaticDateParser() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field1", "2011/01/22")
+ .field("date_field2", "2011/01/22 00:00:00")
+ .field("wrong_date1", "-4")
+ .field("wrong_date2", "2012/2")
+ .field("wrong_date3", "2012/test")
+ .endObject()
+ .bytes());
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test-0").setType("type").setSource(doc.dynamicMappingsUpdate().toString()).get();
+
+ FieldMapper fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field1");
+ assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("date_field2");
+ assertThat(fieldMapper, instanceOf(DateFieldMapper.class));
+
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date1");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date2");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ fieldMapper = defaultMapper.mappers().smartNameFieldMapper("wrong_date3");
+ assertThat(fieldMapper, instanceOf(StringFieldMapper.class));
+ }
+
+ public void testParseLocal() {
+ assertThat(Locale.GERMAN, equalTo(LocaleUtils.parse("de")));
+ assertThat(Locale.GERMANY, equalTo(LocaleUtils.parse("de_DE")));
+ assertThat(new Locale("de","DE","DE"), equalTo(LocaleUtils.parse("de_DE_DE")));
+
+ try {
+ LocaleUtils.parse("de_DE_DE_DE");
+ fail();
+ } catch(IllegalArgumentException ex) {
+ // expected
+ }
+ assertThat(Locale.ROOT, equalTo(LocaleUtils.parse("")));
+ assertThat(Locale.ROOT, equalTo(LocaleUtils.parse("ROOT")));
+ }
+
+ public void testLocale() throws IOException {
+ assumeFalse("Locals are buggy on JDK9EA", Constants.JRE_IS_MINIMUM_JAVA9 && systemPropertyAsBoolean("tests.security.manager", false));
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("date_field_default")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .endObject()
+ .startObject("date_field_en")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "EN")
+ .endObject()
+ .startObject("date_field_de")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "DE_de")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field_en", "Wed, 06 Dec 2000 02:55:00 -0800")
+ .field("date_field_de", "Mi, 06 Dez 2000 02:55:00 -0800")
+ .field("date_field_default", "Wed, 06 Dec 2000 02:55:00 -0800") // check default - no exception is a successs!
+ .endObject()
+ .bytes());
+ assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_de");
+ assertNumericTokensEqual(doc, defaultMapper, "date_field_en", "date_field_default");
+ }
+
+ @Before
+ public void reset() {
+ i = 0;
+ }
+
+ int i = 0;
+
+ private DocumentMapper mapper(String type, String mapping) throws IOException {
+ final String indexName = "test-" + (i++);
+ IndexService index = createIndex(indexName);
+ client().admin().indices().preparePutMapping(indexName).setType(type).setSource(mapping).get();
+ return index.mapperService().documentMapper(type);
+ }
+
+ private void assertNumericTokensEqual(ParsedDocument doc, DocumentMapper defaultMapper, String fieldA, String fieldB) throws IOException {
+ assertThat(doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.mappers().indexAnalyzer(), null), notNullValue());
+ assertThat(doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.mappers().indexAnalyzer(), null), notNullValue());
+
+ TokenStream tokenStream = doc.rootDoc().getField(fieldA).tokenStream(defaultMapper.mappers().indexAnalyzer(), null);
+ tokenStream.reset();
+ NumericTermAttribute nta = tokenStream.addAttribute(NumericTermAttribute.class);
+ List<Long> values = new ArrayList<>();
+ while(tokenStream.incrementToken()) {
+ values.add(nta.getRawValue());
+ }
+
+ tokenStream = doc.rootDoc().getField(fieldB).tokenStream(defaultMapper.mappers().indexAnalyzer(), null);
+ tokenStream.reset();
+ nta = tokenStream.addAttribute(NumericTermAttribute.class);
+ int pos = 0;
+ while(tokenStream.incrementToken()) {
+ assertThat(values.get(pos++), equalTo(nta.getRawValue()));
+ }
+ assertThat(pos, equalTo(values.size()));
+ }
+
+ public void testTimestampAsDate() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ long value = System.currentTimeMillis();
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", value)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("date_field").tokenStream(defaultMapper.mappers().indexAnalyzer(), null), notNullValue());
+ }
+
+ public void testDateDetection() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "2010-01-01")
+ .field("date_field_x", "2010-01-01")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("date_field"), equalTo("1262304000000"));
+ assertThat(doc.rootDoc().get("date_field_x"), equalTo("2010-01-01"));
+ }
+
+ public void testHourFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "HH:mm:ss").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "10:00:00")
+ .endObject()
+ .bytes());
+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(10).millis(), DateTimeZone.UTC).getMillis())));
+
+ NumericRangeQuery<Long> rangeQuery;
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ rangeQuery = (NumericRangeQuery<Long>) defaultMapper.mappers().smartNameFieldMapper("date_field").rangeQuery("10:00:00", "11:00:00", true, true, null);
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ assertThat(rangeQuery.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(11).millis()).getMillis()));
+ assertThat(rangeQuery.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(10).millis()).getMillis()));
+ }
+
+ public void testDayWithoutYearFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("date_detection", false)
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "MMM dd HH:mm:ss").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "Jan 02 10:00:00")
+ .endObject()
+ .bytes());
+ assertThat(((LongFieldMapper.CustomLongNumericField) doc.rootDoc().getField("date_field")).numericAsString(), equalTo(Long.toString(new DateTime(TimeValue.timeValueHours(34).millis(), DateTimeZone.UTC).getMillis())));
+
+ NumericRangeQuery<Long> rangeQuery;
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ rangeQuery = (NumericRangeQuery<Long>) defaultMapper.mappers().smartNameFieldMapper("date_field").rangeQuery("Jan 02 10:00:00", "Jan 02 11:00:00", true, true, null);
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ assertThat(rangeQuery.getMax(), equalTo(new DateTime(TimeValue.timeValueHours(35).millis()).getMillis()));
+ assertThat(rangeQuery.getMin(), equalTo(new DateTime(TimeValue.timeValueHours(34).millis()).getMillis()));
+ }
+
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "date").field("ignore_malformed", true).endObject()
+ .startObject("field2").field("type", "date").field("ignore_malformed", false).endObject()
+ .startObject("field3").field("type", "date").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "a")
+ .field("field2", "2010-01-01")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = createIndex("test2", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(MapperParsingException.class));
+ }
+ }
+
+ public void testThatMergingWorks() throws Exception {
+ String initialMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field").field("type", "date")
+ .field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field")
+ .field("type", "date")
+ .field("format", "EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", initialMapping);
+ DocumentMapper mergeMapper = mapper("type", updatedMapping);
+
+ assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class)));
+ DateFieldMapper initialDateFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field");
+ Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper);
+ assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy"));
+
+ MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false);
+
+ assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false));
+ assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class)));
+
+ DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field");
+ Map<String, String> mergedConfig = getConfigurationViaXContent(mergedFieldMapper);
+ assertThat(mergedConfig.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy||yyyy-MM-dd'T'HH:mm:ss.SSSZZ"));
+ }
+
+ public void testDefaultDocValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("date_field").field("type", "date").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "2010-01-01")
+ .endObject()
+ .bytes());
+ ParseContext.Document doc = parsedDoc.rootDoc();
+ assertEquals(DocValuesType.SORTED_NUMERIC, docValuesType(doc, "date_field"));
+ }
+
+ private Map<String, String> getConfigurationViaXContent(DateFieldMapper dateFieldMapper) throws IOException {
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ dateFieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ Map<String, Object> dateFieldMapperMap = JsonXContent.jsonXContent.createParser(builder.string()).mapAndClose();
+ assertThat(dateFieldMapperMap, hasKey("field"));
+ assertThat(dateFieldMapperMap.get("field"), is(instanceOf(Map.class)));
+ return (Map<String, String>) dateFieldMapperMap.get("field");
+ }
+
+ private static long getDateAsMillis(Document doc, String field) {
+ for (IndexableField f : doc.getFields(field)) {
+ if (f.numericValue() != null) {
+ return f.numericValue().longValue();
+ }
+ }
+ throw new AssertionError("missing");
+ }
+
+ public void testNumericResolution() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "date_time").field("numeric_resolution", "seconds").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ // provided as an int
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", 42)
+ .endObject()
+ .bytes());
+ assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(42000L));
+
+ // provided as a string
+ doc = defaultMapper.parse("type", "2", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "43")
+ .endObject()
+ .bytes());
+ assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(43000L));
+
+ // but formatted dates still parse as milliseconds
+ doc = defaultMapper.parse("type", "2", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "1970-01-01T00:00:44.000Z")
+ .endObject()
+ .bytes());
+ assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(44000L));
+ }
+
+ public void testThatEpochCanBeIgnoredWithCustomFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("date_field").field("type", "date").field("format", "yyyyMMddHH").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = mapper("type", mapping);
+
+ XContentBuilder document = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", "2015060210")
+ .endObject();
+ ParsedDocument doc = defaultMapper.parse("type", "1", document.bytes());
+ assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(1433239200000L));
+ IndexResponse indexResponse = client().prepareIndex("test", "test").setSource(document).get();
+ assertThat(indexResponse.isCreated(), is(true));
+
+ // integers should always be parsed as well... cannot be sure it is a unix timestamp only
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("date_field", 2015060210)
+ .endObject()
+ .bytes());
+ assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(1433239200000L));
+ indexResponse = client().prepareIndex("test", "test").setSource(document).get();
+ assertThat(indexResponse.isCreated(), is(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java
new file mode 100644
index 0000000000..d1a4a05a67
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/GenericStoreDynamicTemplateTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.genericstore;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GenericStoreDynamicTemplateTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json");
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping).get();
+ DocumentMapper docMapper = index.mapperService().documentMapper("person");
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json");
+ ParsedDocument parsedDoc = docMapper.parse("person", "1", new BytesArray(json));
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(parsedDoc.dynamicMappingsUpdate().toString()).get();
+ Document doc = parsedDoc.rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ FieldMapper fieldMapper = docMapper.mappers().getMapper("name");
+ assertThat(fieldMapper.fieldType().stored(), equalTo(true));
+
+ f = doc.getField("age");
+ assertThat(f.name(), equalTo("age"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ fieldMapper = docMapper.mappers().getMapper("age");
+ assertThat(fieldMapper.fieldType().stored(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json
new file mode 100644
index 0000000000..b7439dcb9f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json
@@ -0,0 +1,4 @@
+{
+ "name":"some name",
+ "age":1
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json
new file mode 100644
index 0000000000..d99067c2b5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-mapping.json
@@ -0,0 +1,14 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "template_1":{
+ "match":"*",
+ "mapping":{
+ "store":"yes"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java
new file mode 100644
index 0000000000..c8c159b2df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/PathMatchDynamicTemplateTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.pathmatch;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PathMatchDynamicTemplateTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json");
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping).get();
+ DocumentMapper docMapper = index.mapperService().documentMapper("person");
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json");
+ ParsedDocument parsedDoc = docMapper.parse("person", "1", new BytesArray(json));
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(parsedDoc.dynamicMappingsUpdate().toString()).get();
+ Document doc = parsedDoc.rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("top_level"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+
+ FieldMapper fieldMapper = docMapper.mappers().getMapper("name");
+ assertThat(fieldMapper.fieldType().stored(), equalTo(false));
+
+ f = doc.getField("obj1.name");
+ assertThat(f.name(), equalTo("obj1.name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+
+ fieldMapper = docMapper.mappers().getMapper("obj1.name");
+ assertThat(fieldMapper.fieldType().stored(), equalTo(true));
+
+ f = doc.getField("obj1.obj2.name");
+ assertThat(f.name(), equalTo("obj1.obj2.name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+
+ fieldMapper = docMapper.mappers().getMapper("obj1.obj2.name");
+ assertThat(fieldMapper.fieldType().stored(), equalTo(false));
+
+ // verify more complex path_match expressions
+
+ fieldMapper = docMapper.mappers().getMapper("obj3.obj4.prop1");
+ assertNotNull(fieldMapper);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json
new file mode 100644
index 0000000000..2e6ec997c4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json
@@ -0,0 +1,14 @@
+{
+ "name":"top_level",
+ "obj1":{
+ "name":"obj1_level",
+ "obj2":{
+ "name":"obj2_level"
+ }
+ },
+ "obj3":{
+ "obj4":{
+ "prop1":"prop1_value"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json
new file mode 100644
index 0000000000..dce33dadfc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-mapping.json
@@ -0,0 +1,30 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "template_1":{
+ "path_match":"obj1.obj2.*",
+ "mapping":{
+ "store":"no"
+ }
+ }
+ },
+ {
+ "template_2":{
+ "path_match":"obj1.*",
+ "mapping":{
+ "store":"yes"
+ }
+ }
+ },
+ {
+ "template_3":{
+ "path_match":"*.obj4.*",
+ "mapping":{
+ "type":"string"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java
new file mode 100644
index 0000000000..a8b2502471
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/SimpleDynamicTemplatesTests.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.dynamictemplate.simple;
+
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleDynamicTemplatesTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testMatchTypeOnly() throws Exception {
+ XContentBuilder builder = JsonXContent.contentBuilder();
+ builder.startObject().startObject("person").startArray("dynamic_templates").startObject().startObject("test")
+ .field("match_mapping_type", "string")
+ .startObject("mapping").field("index", "no").endObject()
+ .endObject().endObject().endArray().endObject().endObject();
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(builder.string()).get();
+ DocumentMapper docMapper = index.mapperService().documentMapper("person");
+ builder = JsonXContent.contentBuilder();
+ builder.startObject().field("s", "hello").field("l", 1).endObject();
+ ParsedDocument parsedDoc = docMapper.parse("person", "1", builder.bytes());
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(parsedDoc.dynamicMappingsUpdate().toString()).get();
+
+ DocumentFieldMappers mappers = docMapper.mappers();
+
+ assertThat(mappers.smartNameFieldMapper("s"), Matchers.notNullValue());
+ assertEquals(IndexOptions.NONE, mappers.smartNameFieldMapper("s").fieldType().indexOptions());
+
+ assertThat(mappers.smartNameFieldMapper("l"), Matchers.notNullValue());
+ assertNotSame(IndexOptions.NONE, mappers.smartNameFieldMapper("l").fieldType().indexOptions());
+
+
+ }
+
+
+ @Test
+ public void testSimple() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping).get();
+ DocumentMapper docMapper = index.mapperService().documentMapper("person");
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
+ ParsedDocument parsedDoc = docMapper.parse("person", "1", new BytesArray(json));
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(parsedDoc.dynamicMappingsUpdate().toString()).get();
+ Document doc = parsedDoc.rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ FieldMapper fieldMapper = docMapper.mappers().getMapper("name");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi1");
+ assertThat(f.name(), equalTo("multi1"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMapper = docMapper.mappers().getMapper("multi1");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi1.org");
+ assertThat(f.name(), equalTo("multi1.org"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMapper = docMapper.mappers().getMapper("multi1.org");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi2");
+ assertThat(f.name(), equalTo("multi2"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMapper = docMapper.mappers().getMapper("multi2");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi2.org");
+ assertThat(f.name(), equalTo("multi2.org"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMapper = docMapper.mappers().getMapper("multi2.org");
+ assertNotNull(fieldMapper);
+ }
+
+ @Test
+ public void testSimpleWithXContentTraverse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json");
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping).get();
+ DocumentMapper docMapper = index.mapperService().documentMapper("person");
+ byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json");
+ ParsedDocument parsedDoc = docMapper.parse("person", "1", new BytesArray(json));
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(parsedDoc.dynamicMappingsUpdate().toString()).get();
+ Document doc = parsedDoc.rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ FieldMapper fieldMapper = docMapper.mappers().getMapper("name");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi1");
+ assertThat(f.name(), equalTo("multi1"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMapper = docMapper.mappers().getMapper("multi1");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi1.org");
+ assertThat(f.name(), equalTo("multi1.org"));
+ assertThat(f.stringValue(), equalTo("multi 1"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMapper = docMapper.mappers().getMapper("multi1.org");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi2");
+ assertThat(f.name(), equalTo("multi2"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+
+ fieldMapper = docMapper.mappers().getMapper("multi2");
+ assertNotNull(fieldMapper);
+
+ f = doc.getField("multi2.org");
+ assertThat(f.name(), equalTo("multi2.org"));
+ assertThat(f.stringValue(), equalTo("multi 2"));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ assertThat(f.fieldType().tokenized(), equalTo(false));
+
+ fieldMapper = docMapper.mappers().getMapper("multi2.org");
+ assertNotNull(fieldMapper);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json
new file mode 100644
index 0000000000..1ed3c50b98
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json
@@ -0,0 +1,6 @@
+{
+ "name":"some name",
+ "age":1,
+ "multi1":"multi 1",
+ "multi2":"multi 2"
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json
new file mode 100644
index 0000000000..9c8f8d8e6a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/dynamictemplate/simple/test-mapping.json
@@ -0,0 +1,33 @@
+{
+ "person":{
+ "dynamic_templates":[
+ {
+ "tempalte_1":{
+ "match":"multi*",
+ "mapping":{
+ "type":"{dynamic_type}",
+ "index":"analyzed",
+ "store":"yes",
+ "fields":{
+ "org":{
+ "type":"{dynamic_type}",
+ "index":"not_analyzed",
+ "store":"yes"
+ }
+ }
+ }
+ }
+ },
+ {
+ "template_2":{
+ "match":"*",
+ "match_mapping_type":"string",
+ "mapping":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalIndexModule.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalIndexModule.java
new file mode 100755
index 0000000000..bcc6fc055e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalIndexModule.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+/**
+ *
+ */
+public class ExternalIndexModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(RegisterExternalTypes.class).asEagerSingleton();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java
new file mode 100755
index 0000000000..45c6322f35
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.spatial4j.core.shape.Point;
+import org.apache.lucene.document.Field;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.Mapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParseContext;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
+import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
+
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseField;
+import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
+
+/**
+ * This mapper add a new sub fields
+ * .bin Binary type
+ * .bool Boolean type
+ * .point GeoPoint type
+ * .shape GeoShape type
+ */
+public class ExternalMapper extends AbstractFieldMapper {
+
+ public static class Names {
+ public static final String FIELD_BIN = "bin";
+ public static final String FIELD_BOOL = "bool";
+ public static final String FIELD_POINT = "point";
+ public static final String FIELD_SHAPE = "shape";
+ }
+
+ public static class Builder extends AbstractFieldMapper.Builder<Builder, ExternalMapper> {
+
+ private BinaryFieldMapper.Builder binBuilder = new BinaryFieldMapper.Builder(Names.FIELD_BIN);
+ private BooleanFieldMapper.Builder boolBuilder = new BooleanFieldMapper.Builder(Names.FIELD_BOOL);
+ private GeoPointFieldMapper.Builder pointBuilder = new GeoPointFieldMapper.Builder(Names.FIELD_POINT);
+ private GeoShapeFieldMapper.Builder shapeBuilder = new GeoShapeFieldMapper.Builder(Names.FIELD_SHAPE);
+ private Mapper.Builder stringBuilder;
+ private String generatedValue;
+ private String mapperName;
+
+ public Builder(String name, String generatedValue, String mapperName) {
+ super(name, Defaults.FIELD_TYPE);
+ this.builder = this;
+ this.stringBuilder = stringField(name).store(false);
+ this.generatedValue = generatedValue;
+ this.mapperName = mapperName;
+ }
+
+ public Builder string(Mapper.Builder content) {
+ this.stringBuilder = content;
+ return this;
+ }
+
+ @Override
+ public ExternalMapper build(BuilderContext context) {
+ ContentPath.Type origPathType = context.path().pathType();
+ context.path().pathType(ContentPath.Type.FULL);
+
+ context.path().add(name);
+ BinaryFieldMapper binMapper = binBuilder.build(context);
+ BooleanFieldMapper boolMapper = boolBuilder.build(context);
+ GeoPointFieldMapper pointMapper = pointBuilder.build(context);
+ GeoShapeFieldMapper shapeMapper = shapeBuilder.build(context);
+ FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context);
+ context.path().remove();
+
+ context.path().pathType(origPathType);
+ setupFieldType(context);
+
+ return new ExternalMapper(fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper,
+ context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
+ }
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+
+ private String generatedValue;
+ private String mapperName;
+
+ TypeParser(String mapperName, String generatedValue) {
+ this.mapperName = mapperName;
+ this.generatedValue = generatedValue;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ ExternalMapper.Builder builder = new ExternalMapper.Builder(name, generatedValue, mapperName);
+ parseField(builder, name, node, parserContext);
+ for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
+ Map.Entry<String, Object> entry = iterator.next();
+ String propName = Strings.toUnderscoreCase(entry.getKey());
+ Object propNode = entry.getValue();
+
+ if (parseMultiField(builder, name, parserContext, propName, propNode)) {
+ iterator.remove();
+ }
+ }
+
+ return builder;
+ }
+ }
+
+ private final String generatedValue;
+ private final String mapperName;
+
+ private final BinaryFieldMapper binMapper;
+ private final BooleanFieldMapper boolMapper;
+ private final GeoPointFieldMapper pointMapper;
+ private final GeoShapeFieldMapper shapeMapper;
+ private final FieldMapper stringMapper;
+
+ public ExternalMapper(MappedFieldType fieldType,
+ String generatedValue, String mapperName,
+ BinaryFieldMapper binMapper, BooleanFieldMapper boolMapper, GeoPointFieldMapper pointMapper,
+ GeoShapeFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
+ super(fieldType, false, null, indexSettings,
+ multiFields, copyTo);
+ this.generatedValue = generatedValue;
+ this.mapperName = mapperName;
+ this.binMapper = binMapper;
+ this.boolMapper = boolMapper;
+ this.pointMapper = pointMapper;
+ this.shapeMapper = shapeMapper;
+ this.stringMapper = stringMapper;
+ }
+
+ @Override
+ public MappedFieldType defaultFieldType() {
+ return Defaults.FIELD_TYPE;
+ }
+
+ @Override
+ public FieldDataType defaultFieldDataType() {
+ return null;
+ }
+
+ @Override
+ public Mapper parse(ParseContext context) throws IOException {
+ byte[] bytes = "Hello world".getBytes(Charset.defaultCharset());
+ binMapper.parse(context.createExternalValueContext(bytes));
+
+ boolMapper.parse(context.createExternalValueContext(true));
+
+ // Let's add a Dummy Point
+ Double lat = 42.0;
+ Double lng = 51.0;
+ GeoPoint point = new GeoPoint(lat, lng);
+ pointMapper.parse(context.createExternalValueContext(point));
+
+ // Let's add a Dummy Shape
+ Point shape = ShapeBuilder.newPoint(-100, 45).build();
+ shapeMapper.parse(context.createExternalValueContext(shape));
+
+ context = context.createExternalValueContext(generatedValue);
+
+ // Let's add a Original String
+ stringMapper.parse(context);
+
+ multiFields.parse(this, context);
+ return null;
+ }
+
+ @Override
+ protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+ // ignore this for now
+ }
+
+ @Override
+ public Iterator<Mapper> iterator() {
+ return Iterators.concat(super.iterator(), Lists.newArrayList(binMapper, boolMapper, pointMapper, shapeMapper, stringMapper).iterator());
+ }
+
+ @Override
+ public void close() {
+ binMapper.close();
+ boolMapper.close();
+ pointMapper.close();
+ shapeMapper.close();
+ stringMapper.close();
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(fieldType().names().shortName());
+ builder.field("type", mapperName);
+ multiFields.toXContent(builder, params);
+ builder.endObject();
+ return builder;
+ }
+
+ @Override
+ protected String contentType() {
+ return mapperName;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapperPlugin.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapperPlugin.java
new file mode 100644
index 0000000000..d9821af638
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapperPlugin.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.plugins.AbstractPlugin;
+
+import java.util.Collection;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+public class ExternalMapperPlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "external-mappers";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "External Mappers Plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> indexModules() {
+ Collection<Class<? extends Module>> modules = newArrayList();
+ modules.add(ExternalIndexModule.class);
+ return modules;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java
new file mode 100644
index 0000000000..9e4d43f518
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.*;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map;
+
+public class ExternalRootMapper implements RootMapper {
+
+ static final String CONTENT_TYPE = "_external_root";
+ static final String FIELD_NAME = "_is_external";
+ static final String FIELD_VALUE = "true";
+
+ @Override
+ public String name() {
+ return CONTENT_TYPE;
+ }
+
+ @Override
+ public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
+ if (!(mergeWith instanceof ExternalRootMapper)) {
+ mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this);
+ }
+ }
+
+ @Override
+ public Iterator<Mapper> iterator() {
+ return Collections.emptyIterator();
+ }
+
+ @Override
+ public void close() {
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ return builder.startObject(CONTENT_TYPE).endObject();
+ }
+
+ @Override
+ public void preParse(ParseContext context) throws IOException {
+ }
+
+ @Override
+ public void postParse(ParseContext context) throws IOException {
+ context.doc().add(new StringField(FIELD_NAME, FIELD_VALUE, Store.YES));
+ }
+
+ public static class Builder extends Mapper.Builder<Builder, ExternalRootMapper> {
+
+ protected Builder() {
+ super(CONTENT_TYPE);
+ }
+
+ @Override
+ public ExternalRootMapper build(BuilderContext context) {
+ return new ExternalRootMapper();
+ }
+
+ }
+
+ public static class TypeParser implements Mapper.TypeParser {
+
+ @Override
+ public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
+ return new Builder();
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationTests.java
new file mode 100644
index 0000000000..7ee7a099cf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class ExternalValuesMapperIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", ExternalMapperPlugin.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testExternalValues() throws Exception {
+ prepareCreate("test-idx").addMapping("type",
+ XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject(ExternalRootMapper.CONTENT_TYPE)
+ .endObject()
+ .startObject("properties")
+ .startObject("field").field("type", RegisterExternalTypes.EXTERNAL).endObject()
+ .endObject()
+ .endObject().endObject()).execute().get();
+ ensureYellow("test-idx");
+
+ index("test-idx", "type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject());
+ refresh();
+
+ SearchResponse response;
+
+ response = client().prepareSearch("test-idx")
+ .setPostFilter(QueryBuilders.termQuery("field.bool", "T"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) 1));
+
+ response = client().prepareSearch("test-idx")
+ .setPostFilter(QueryBuilders.geoDistanceRangeQuery("field.point").point(42.0, 51.0).to("1km"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) 1));
+
+ response = client().prepareSearch("test-idx")
+ .setPostFilter(QueryBuilders.geoShapeQuery("field.shape", ShapeBuilder.newPoint(-100, 45), ShapeRelation.WITHIN))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) 1));
+
+ response = client().prepareSearch("test-idx")
+ .setPostFilter(QueryBuilders.termQuery("field.field", "foo"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) 1));
+ }
+
+ @Test
+ public void testExternalValuesWithMultifield() throws Exception {
+ prepareCreate("test-idx").addMapping("doc",
+ XContentFactory.jsonBuilder().startObject().startObject("doc").startObject("properties")
+ .startObject("f")
+ .field("type", RegisterExternalTypes.EXTERNAL_UPPER)
+ .startObject("fields")
+ .startObject("f")
+ .field("type", "string")
+ .field("store", "yes")
+ .startObject("fields")
+ .startObject("raw")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()).execute().get();
+ ensureYellow("test-idx");
+
+ index("test-idx", "doc", "1", "f", "This is my text");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test-idx")
+ .setQuery(QueryBuilders.termQuery("f.f.raw", "FOO BAR"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo((long) 1));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/RegisterExternalTypes.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/RegisterExternalTypes.java
new file mode 100755
index 0000000000..5cd8110a37
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/RegisterExternalTypes.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.settings.IndexSettings;
+
+public class RegisterExternalTypes extends AbstractIndexComponent {
+ public static final String EXTERNAL = "external";
+ public static final String EXTERNAL_BIS = "external_bis";
+ public static final String EXTERNAL_UPPER = "external_upper";
+
+ @Inject
+ public RegisterExternalTypes(Index index, @IndexSettings Settings indexSettings, MapperService mapperService) {
+ super(index, indexSettings);
+
+ mapperService.documentMapperParser().putRootTypeParser(ExternalRootMapper.CONTENT_TYPE, new ExternalRootMapper.TypeParser());
+ mapperService.documentMapperParser().putTypeParser(EXTERNAL, new ExternalMapper.TypeParser(EXTERNAL, "foo"));
+ mapperService.documentMapperParser().putTypeParser(EXTERNAL_BIS, new ExternalMapper.TypeParser(EXTERNAL_BIS, "bar"));
+ mapperService.documentMapperParser().putTypeParser(EXTERNAL_UPPER, new ExternalMapper.TypeParser(EXTERNAL_UPPER, "FOO BAR"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java
new file mode 100644
index 0000000000..b95fda8d7b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/SimpleExternalMappingTests.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.externalvalues;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+public class SimpleExternalMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testExternalValues() throws Exception {
+ MapperService mapperService = createIndex("test").mapperService();
+ mapperService.documentMapperParser().putRootTypeParser(ExternalRootMapper.CONTENT_TYPE,
+ new ExternalRootMapper.TypeParser());
+ mapperService.documentMapperParser().putTypeParser(RegisterExternalTypes.EXTERNAL,
+ new ExternalMapper.TypeParser(RegisterExternalTypes.EXTERNAL, "foo"));
+
+ DocumentMapper documentMapper = mapperService.documentMapperParser().parse(
+ XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject(ExternalRootMapper.CONTENT_TYPE)
+ .endObject()
+ .startObject("properties")
+ .startObject("field").field("type", "external").endObject()
+ .endObject()
+ .endObject().endObject().string()
+ );
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field.bool"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T"));
+
+ assertThat(doc.rootDoc().getField("field.point"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
+
+ assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
+
+ assertThat(doc.rootDoc().getField("field.field"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo"));
+
+ assertThat(doc.rootDoc().getField(ExternalRootMapper.FIELD_NAME).stringValue(), is(ExternalRootMapper.FIELD_VALUE));
+
+ }
+
+ @Test
+ public void testExternalValuesWithMultifield() throws Exception {
+ MapperService mapperService = createIndex("test").mapperService();
+ mapperService.documentMapperParser().putTypeParser(RegisterExternalTypes.EXTERNAL,
+ new ExternalMapper.TypeParser(RegisterExternalTypes.EXTERNAL, "foo"));
+
+ DocumentMapper documentMapper = mapperService.documentMapperParser().parse(
+ XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("field")
+ .field("type", RegisterExternalTypes.EXTERNAL)
+ .startObject("fields")
+ .startObject("field")
+ .field("type", "string")
+ .field("store", "yes")
+ .startObject("fields")
+ .startObject("raw")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()
+ .string());
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field.bool"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T"));
+
+ assertThat(doc.rootDoc().getField("field.point"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
+
+ assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
+
+ assertThat(doc.rootDoc().getField("field.field"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo"));
+
+ assertThat(doc.rootDoc().getField("field.field.raw"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.field.raw").stringValue(), is("foo"));
+ }
+
+ @Test
+ public void testExternalValuesWithMultifieldTwoLevels() throws Exception {
+ MapperService mapperService = createIndex("test").mapperService();
+
+ mapperService.documentMapperParser().putTypeParser(RegisterExternalTypes.EXTERNAL,
+ new ExternalMapper.TypeParser(RegisterExternalTypes.EXTERNAL, "foo"));
+ mapperService.documentMapperParser().putTypeParser(RegisterExternalTypes.EXTERNAL_BIS,
+ new ExternalMapper.TypeParser(RegisterExternalTypes.EXTERNAL_BIS, "bar"));
+
+ DocumentMapper documentMapper = mapperService.documentMapperParser().parse(
+ XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("field")
+ .field("type", RegisterExternalTypes.EXTERNAL)
+ .startObject("fields")
+ .startObject("field")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("generated")
+ .field("type", RegisterExternalTypes.EXTERNAL_BIS)
+ .endObject()
+ .startObject("raw")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("raw")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()
+ .string());
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field.bool"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T"));
+
+ assertThat(doc.rootDoc().getField("field.point"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.point").stringValue(), is("42.0,51.0"));
+
+ assertThat(doc.rootDoc().getField("field.shape"), notNullValue());
+
+ assertThat(doc.rootDoc().getField("field.field"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.field").stringValue(), is("foo"));
+
+ assertThat(doc.rootDoc().getField("field.field.generated.generated"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.field.generated.generated").stringValue(), is("bar"));
+
+ assertThat(doc.rootDoc().getField("field.field.raw"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.field.raw").stringValue(), is("foo"));
+
+ assertThat(doc.rootDoc().getField("field.raw"), notNullValue());
+ assertThat(doc.rootDoc().getField("field.raw").stringValue(), is("foo"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java
new file mode 100644
index 0000000000..f52363e89a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoEncodingTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+
+public class GeoEncodingTests extends ElasticsearchTestCase {
+
+ public void test() {
+ for (int i = 0; i < 10000; ++i) {
+ final double lat = randomDouble() * 180 - 90;
+ final double lon = randomDouble() * 360 - 180;
+ final Distance precision = new Distance(1+(randomDouble() * 9), randomFrom(Arrays.asList(DistanceUnit.MILLIMETERS, DistanceUnit.METERS, DistanceUnit.KILOMETERS)));
+ final GeoPointFieldMapper.Encoding encoding = GeoPointFieldMapper.Encoding.of(precision);
+ assertThat(encoding.precision().convert(DistanceUnit.METERS).value, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
+ final GeoPoint geoPoint = encoding.decode(encoding.encodeCoordinate(lat), encoding.encodeCoordinate(lon), new GeoPoint());
+ final double error = GeoDistance.PLANE.calculate(lat, lon, geoPoint.lat(), geoPoint.lon(), DistanceUnit.METERS);
+ assertThat(error, lessThanOrEqualTo(precision.convert(DistanceUnit.METERS).value));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java
new file mode 100644
index 0000000000..0c292d9779
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoMappingTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.DistanceUnit.Distance;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+public class GeoMappingTests extends ElasticsearchIntegrationTest {
+
+ public void testUpdatePrecision() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .startObject("fielddata")
+ .field("format", "compressed")
+ .field("precision", "2mm")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).get());
+ ensureYellow();
+ assertPrecision(new Distance(2, DistanceUnit.MILLIMETERS));
+
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .startObject("fielddata")
+ .field("format", "compressed")
+ .field("precision", "11m")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).get());
+
+ assertPrecision(new Distance(11, DistanceUnit.METERS));
+ }
+
+ private void assertPrecision(Distance expected) throws Exception {
+ ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = client().admin().indices().getMappings(new GetMappingsRequest().indices("test").types("type1")).actionGet().getMappings();
+ assertNotNull(mappings);
+ Map<String, ?> properties = (Map<String, ?>) mappings.get("test").get("type1").getSourceAsMap().get("properties");
+ Map<String, ?> pinProperties = (Map<String, ?>) properties.get("pin");
+ Map<String, ?> pinFieldData = (Map<String, ?>) pinProperties.get("fielddata");
+ Distance precision = Distance.parseDistance(pinFieldData.get("precision").toString());
+ assertEquals(expected, precision);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java
new file mode 100644
index 0000000000..084c6b7d3c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java
@@ -0,0 +1,504 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+public class GeoPointFieldMapperTests extends ElasticsearchSingleNodeTest {
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").fieldType().stored(), is(false));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").fieldType().stored(), is(false));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonValuesWithGeohash() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testLatLonInOneValueWithGeohash() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testGeoHashIndexValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ @Test
+ public void testNormalizeLatLonValuesDefault() throws Exception {
+ // default to normalize
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("89.0,1.0"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("-89.0,-1.0"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 181).field("lon", 361).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), equalTo("-1.0,-179.0"));
+ }
+
+ @Test
+ public void testValidateLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 90).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+ fail();
+ } catch (MapperParsingException e) {
+
+ }
+ }
+
+ @Test
+ public void testNoValidateLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("normalize", false).field("validate", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 90).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", -91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 91).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", -181).endObject()
+ .endObject()
+ .bytes());
+
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 181).endObject()
+ .endObject()
+ .bytes());
+ }
+
+ @Test
+ public void testLatLonValuesStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getField("point.geohash"), nullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testArrayLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .startObject().field("lat", 1.2).field("lon", 1.3).endObject()
+ .startObject().field("lat", 1.4).field("lon", 1.5).endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValueStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValueArray() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .value("1.2,1.3")
+ .value("1.4,1.5")
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testLonLatArray() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayDynamic() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startArray("dynamic_templates").startObject()
+ .startObject("point").field("match", "point*").startObject("mapping").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endArray()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point").value(1.3).value(1.2).endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("point.lat"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lat").numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getField("point.lon"), notNullValue());
+ assertThat(doc.rootDoc().getField("point.lon").numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLonLatArrayArrayStored() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("store", "yes").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("point")
+ .startArray().value(1.3).value(1.2).endArray()
+ .startArray().value(1.5).value(1.4).endArray()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getFields("point.lat").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lon").length, equalTo(2));
+ assertThat(doc.rootDoc().getFields("point.lat")[0].numericValue().doubleValue(), equalTo(1.2));
+ assertThat(doc.rootDoc().getFields("point.lon")[0].numericValue().doubleValue(), equalTo(1.3));
+ assertThat(doc.rootDoc().getFields("point")[0].stringValue(), equalTo("1.2,1.3"));
+ assertThat(doc.rootDoc().getFields("point.lat")[1].numericValue().doubleValue(), equalTo(1.4));
+ assertThat(doc.rootDoc().getFields("point.lon")[1].numericValue().doubleValue(), equalTo(1.5));
+ assertThat(doc.rootDoc().getFields("point")[1].stringValue(), equalTo("1.4,1.5"));
+ }
+
+ @Test
+ public void testGeoPointMapperMerge() throws Exception {
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
+ .field("validate", true).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper stage1 = parser.parse(stage1Mapping);
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
+ .field("validate", false).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper stage2 = parser.parse(stage2Mapping);
+
+ MergeResult mergeResult = stage1.merge(stage2.mapping(), false);
+ assertThat(mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.buildConflicts().length, equalTo(2));
+ // todo better way of checking conflict?
+ assertThat("mapper [point] has different validate_lat", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts()))));
+
+ // correct mapping and ensure no failures
+ stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", true).field("geohash", true)
+ .field("validate", true).field("normalize", true).endObject().endObject()
+ .endObject().endObject().string();
+ stage2 = parser.parse(stage2Mapping);
+ mergeResult = stage1.merge(stage2.mapping(), false);
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java
new file mode 100644
index 0000000000..d31e2a1b01
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java
@@ -0,0 +1,384 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.geo;
+
+import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.isIn;
+
+public class GeoShapeFieldMapperTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testDefaultConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(GeoShapeFieldMapper.Defaults.DISTANCE_ERROR_PCT));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoShapeFieldMapper.Defaults.GEOHASH_LEVELS));
+ assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(GeoShapeFieldMapper.Defaults.ORIENTATION));
+ }
+
+ /**
+ * Test that orientation parameter correctly parses
+ * @throws IOException
+ */
+ public void testOrientationParsing() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("orientation", "left")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ ShapeBuilder.Orientation orientation = ((GeoShapeFieldMapper)fieldMapper).fieldType().orientation();
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.CLOCKWISE));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.LEFT));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.CW));
+
+ // explicit right orientation test
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("orientation", "right")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse(mapping);
+ fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ orientation = ((GeoShapeFieldMapper)fieldMapper).fieldType().orientation();
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.COUNTER_CLOCKWISE));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.RIGHT));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.CCW));
+ }
+
+ @Test
+ public void testGeohashConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", "4")
+ .field("distance_error_pct", "0.1")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.1));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(4));
+ }
+
+ @Test
+ public void testQuadtreeConfiguration() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "6")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(6));
+ }
+
+ @Test
+ public void testLevelPrecisionConfiguration() throws IOException {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "6")
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ // 70m is more precise so it wins
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(70d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", "26")
+ .field("precision", "70m")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ // distance_error_pct was not specified so we expect the mapper to take the highest precision between "precision" and
+ // "tree_levels" setting distErrPct to 0 to guarantee desired precision
+ assertThat(strategy.getDistErrPct(), equalTo(0.0));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ // 70m is less precise so it loses
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(26));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", "6")
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ // 70m is more precise so it wins
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(70d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("tree_levels", GeoUtils.geoHashLevelsForPrecision(70d)+1)
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(70d)+1));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("tree_levels", GeoUtils.quadTreeLevelsForPrecision(70d)+1)
+ .field("precision", "70m")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(70d)+1));
+ }
+ }
+
+ @Test
+ public void testLevelDefaults() throws IOException {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(QuadPrefixTree.class));
+ /* 50m is default */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.quadTreeLevelsForPrecision(50d)));
+ }
+
+ {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .field("distance_error_pct", "0.5")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+ FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy.getDistErrPct(), equalTo(0.5));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ /* 50m is default */
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(50d)));
+ }
+ }
+
+ @Test
+ public void testGeoShapeMapperMerge() throws Exception {
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("shape").field("type", "geo_shape").field("tree", "geohash").field("strategy", "recursive")
+ .field("precision", "1m").field("tree_levels", 8).field("distance_error_pct", 0.01).field("orientation", "ccw")
+ .endObject().endObject().endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper stage1 = parser.parse(stage1Mapping);
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("shape").field("type", "geo_shape").field("tree", "quadtree")
+ .field("strategy", "term").field("precision", "1km").field("tree_levels", 26).field("distance_error_pct", 26)
+ .field("orientation", "cw").endObject().endObject().endObject().endObject().string();
+ DocumentMapper stage2 = parser.parse(stage2Mapping);
+
+ MergeResult mergeResult = stage1.merge(stage2.mapping(), false);
+ // check correct conflicts
+ assertThat(mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.buildConflicts().length, equalTo(3));
+ ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.buildConflicts()));
+ assertThat("mapper [shape] has different strategy", isIn(conflicts));
+ assertThat("mapper [shape] has different tree", isIn(conflicts));
+ assertThat("mapper [shape] has different tree_levels or precision", isIn(conflicts));
+
+ // verify nothing changed
+ FieldMapper fieldMapper = stage1.mappers().getMapper("shape");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ PrefixTreeStrategy strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy, instanceOf(RecursivePrefixTreeStrategy.class));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getDistErrPct(), equalTo(0.01));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(1d)));
+ assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(ShapeBuilder.Orientation.CCW));
+
+ // correct mapping
+ stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("shape").field("type", "geo_shape").field("precision", "1m")
+ .field("distance_error_pct", 0.001).field("orientation", "cw").endObject().endObject().endObject().endObject().string();
+ stage2 = parser.parse(stage2Mapping);
+ mergeResult = stage1.merge(stage2.mapping(), false);
+
+ // verify mapping changes, and ensure no failures
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+
+ fieldMapper = stage1.mappers().getMapper("shape");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ geoShapeFieldMapper = (GeoShapeFieldMapper) fieldMapper;
+ strategy = geoShapeFieldMapper.fieldType().defaultStrategy();
+
+ assertThat(strategy, instanceOf(RecursivePrefixTreeStrategy.class));
+ assertThat(strategy.getGrid(), instanceOf(GeohashPrefixTree.class));
+ assertThat(strategy.getDistErrPct(), equalTo(0.001));
+ assertThat(strategy.getGrid().getMaxLevels(), equalTo(GeoUtils.geoHashLevelsForPrecision(1d)));
+ assertThat(geoShapeFieldMapper.fieldType().orientation(), equalTo(ShapeBuilder.Orientation.CW));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
new file mode 100644
index 0000000000..12d5211f08
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeohashMappingGeoPointTests.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeohashMappingGeoPointTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testLatLonValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject()
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testLatLonInOneValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("lat_lon", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", "1.2,1.3")
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), equalTo("1.2,1.3"));
+ }
+
+ @Test
+ public void testGeoHashValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", GeoHashUtils.encode(1.2, 1.3))
+ .endObject()
+ .bytes());
+
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lat"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().getField("point.lon"), nullValue());
+ MatcherAssert.assertThat(doc.rootDoc().get("point.geohash"), equalTo(GeoHashUtils.encode(1.2, 1.3)));
+ MatcherAssert.assertThat(doc.rootDoc().get("point"), notNullValue());
+ }
+
+ @Test
+ public void testGeoHashPrecisionAsInteger() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).field("geohash_precision", 10).endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point");
+ assertThat(mapper, instanceOf(GeoPointFieldMapper.class));
+ GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) mapper;
+ assertThat(geoPointFieldMapper.fieldType().geohashPrecision(), is(10));
+ }
+
+ @Test
+ public void testGeoHashPrecisionAsLength() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").field("geohash", true).field("geohash_precision", "5m").endObject().endObject()
+ .endObject().endObject().string();
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("point");
+ assertThat(mapper, instanceOf(GeoPointFieldMapper.class));
+ GeoPointFieldMapper geoPointFieldMapper = (GeoPointFieldMapper) mapper;
+ assertThat(geoPointFieldMapper.fieldType().geohashPrecision(), is(10));
+ }
+
+ @Test
+ public void testNullValue() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("point").field("type", "geo_point").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("point", (Object) null)
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("point"), nullValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java
new file mode 100644
index 0000000000..8d74dec869
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/id/IdMappingTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.id;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+public class IdMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testId() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), nullValue());
+
+ try {
+ docMapper.parse("type", null, XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+ fail("expect missing id");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("No id found"));
+ }
+ }
+
+ public void testIdIndexedBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").field("index", "not_analyzed").endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue());
+ assertThat(doc.rootDoc().get(IdFieldMapper.NAME), notNullValue());
+ }
+
+ public void testIdPathBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_id").field("path", "my_path").endObject()
+ .endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2_ID).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ // serialize the id mapping
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ builder = docMapper.idFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ String serialized_id_mapping = builder.string();
+
+ String expected_id_mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("_id").field("path", "my_path").endObject()
+ .endObject().string();
+
+ assertThat(serialized_id_mapping, equalTo(expected_id_mapping));
+ }
+
+ public void testIncludeInObjectBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_id", "1")
+ .endObject()
+ .bytes()).type("type"));
+
+ // _id is not indexed so we need to check _uid
+ assertEquals(Uid.createUid("type", "1"), doc.rootDoc().get(UidFieldMapper.NAME));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperIntegrationTests.java
new file mode 100644
index 0000000000..c4c4ee7ffc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperIntegrationTests.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.index;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class IndexTypeMapperIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test // issue 5053
+ public void testThatUpdatingMappingShouldNotRemoveSizeMappingConfiguration() throws Exception {
+ String index = "foo";
+ String type = "mytype";
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("_index").field("enabled", true).endObject().endObject();
+ assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
+
+ // check mapping again
+ assertIndexMappingEnabled(index, type);
+
+ // update some field in the mapping
+ XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "string").endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get();
+ assertAcked(putMappingResponse);
+
+ // make sure timestamp field is still in mapping
+ assertIndexMappingEnabled(index, type);
+ }
+
+ private void assertIndexMappingEnabled(String index, String type) throws IOException {
+ String errMsg = String.format(Locale.ROOT, "Expected index field mapping to be enabled for %s/%s", index, type);
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes(type).get();
+ Map<String, Object> mappingSource = getMappingsResponse.getMappings().get(index).get(type).getSourceAsMap();
+ assertThat(errMsg, mappingSource, hasKey("_index"));
+ String ttlAsString = mappingSource.get("_index").toString();
+ assertThat(ttlAsString, is(notNullValue()));
+ assertThat(errMsg, ttlAsString, is("{enabled=true}"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java
new file mode 100644
index 0000000000..612a986878
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.index;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import static org.hamcrest.Matchers.*;
+
+public class IndexTypeMapperTests extends ElasticsearchSingleNodeTest {
+
+ public void testSimpleIndexMapper() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.indexMapper();
+ assertThat(indexMapper.enabled(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), equalTo("test"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ public void testExplicitDisabledIndexMapper() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), nullValue());
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ public void testDefaultDisabledIndexMapper() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), nullValue());
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ public void testThatMergingFieldMappingAllowsDisabling() throws Exception {
+ String mappingWithIndexEnabled = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper mapperEnabled = parser.parse(mappingWithIndexEnabled);
+
+
+ String mappingWithIndexDisabled = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper mapperDisabled = parser.parse(mappingWithIndexDisabled);
+
+ mapperEnabled.merge(mapperDisabled.mapping(), false);
+ assertThat(mapperEnabled.IndexFieldMapper().enabled(), is(false));
+ }
+
+ public void testThatDisablingWorksWhenMerging() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper enabledMapper = parser.parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = parser.parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper.mapping(), false);
+ assertThat(enabledMapper.indexMapper().enabled(), is(false));
+ }
+
+ public void testCustomSettingsBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index")
+ .field("enabled", true)
+ .field("store", "yes").endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ IndexFieldMapper indexMapper = docMapper.rootMapper(IndexFieldMapper.class);
+ assertThat(indexMapper.enabled(), equalTo(true));
+ assertThat(indexMapper.fieldType().stored(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("_index"), equalTo("test"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java
new file mode 100644
index 0000000000..a8ce3c145a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java
@@ -0,0 +1,186 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.internal;
+
+import org.apache.lucene.index.IndexOptions;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.Arrays;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+public class FieldNamesFieldMapperTests extends ElasticsearchSingleNodeTest {
+
+ private static SortedSet<String> extract(String path) {
+ SortedSet<String> set = new TreeSet<>();
+ for (String fieldName : FieldNamesFieldMapper.extractFieldNames(path)) {
+ set.add(fieldName);
+ }
+ return set;
+ }
+
+ private static <T> SortedSet<T> set(T... values) {
+ return new TreeSet<>(Arrays.asList(values));
+ }
+
+ void assertFieldNames(SortedSet<String> expected, ParsedDocument doc) {
+ String[] got = doc.rootDoc().getValues("_field_names");
+ assertEquals(expected, set(got));
+ }
+
+ public void testExtractFieldNames() {
+ assertEquals(set("abc"), extract("abc"));
+ assertEquals(set("a", "a.b"), extract("a.b"));
+ assertEquals(set("a", "a.b", "a.b.c"), extract("a.b.c"));
+ // and now corner cases
+ assertEquals(set("", ".a"), extract(".a"));
+ assertEquals(set("a", "a."), extract("a."));
+ assertEquals(set("", ".", ".."), extract(".."));
+ }
+
+ public void testFieldType() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class);
+ assertFalse(fieldNamesMapper.fieldType().hasDocValues());
+ assertEquals(IndexOptions.DOCS, fieldNamesMapper.fieldType().indexOptions());
+ assertFalse(fieldNamesMapper.fieldType().tokenized());
+ assertFalse(fieldNamesMapper.fieldType().stored());
+ assertTrue(fieldNamesMapper.fieldType().omitNorms());
+ }
+
+ public void testInjectIntoDocDuringParsing() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("a", "100")
+ .startObject("b")
+ .field("c", 42)
+ .endObject()
+ .endObject()
+ .bytes());
+
+ assertFieldNames(set("a", "b", "b.c", "_uid", "_type", "_version", "_source", "_all"), doc);
+ }
+
+ public void testExplicitEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class);
+ assertTrue(fieldNamesMapper.enabled());
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertFieldNames(set("field", "_uid", "_type", "_version", "_source", "_all"), doc);
+ }
+
+ public void testDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class);
+ assertFalse(fieldNamesMapper.enabled());
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertNull(doc.rootDoc().get("_field_names"));
+ }
+
+ public void testPre13Disabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_2_4.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class);
+ assertFalse(fieldNamesMapper.enabled());
+ }
+
+ public void testDisablingBackcompat() throws Exception {
+ // before 1.5, disabling happened by setting index:no
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").field("index", "no").endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class);
+ assertFalse(fieldNamesMapper.enabled());
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes());
+
+ assertNull(doc.rootDoc().get("_field_names"));
+ }
+
+ public void testFieldTypeSettingsBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").field("store", "yes").endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class);
+ assertTrue(fieldNamesMapper.fieldType().stored());
+ }
+
+ public void testMergingMappings() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_field_names").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ DocumentMapper mapperEnabled = parser.parse(enabledMapping);
+ DocumentMapper mapperDisabled = parser.parse(disabledMapping);
+ mapperEnabled.merge(mapperDisabled.mapping(), false);
+ assertFalse(mapperEnabled.rootMapper(FieldNamesFieldMapper.class).enabled());
+
+ mapperEnabled = parser.parse(enabledMapping);
+ mapperDisabled.merge(mapperEnabled.mapping(), false);
+ assertTrue(mapperEnabled.rootMapper(FieldNamesFieldMapper.class).enabled());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java
new file mode 100644
index 0000000000..510c002caf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ip;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testSimpleMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("ip").field("type", "ip").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("ip", "127.0.0.1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("ip").numericValue().longValue(), is(2130706433L));
+ assertThat(doc.rootDoc().get("ip"), is("2130706433"));
+ }
+
+ @Test
+ public void testThatValidIpCanBeConvertedToLong() throws Exception {
+ assertThat(IpFieldMapper.ipToLong("127.0.0.1"), is(2130706433L));
+ }
+
+ @Test
+ public void testThatInvalidIpThrowsException() throws Exception {
+ try {
+ IpFieldMapper.ipToLong("127.0.011.1111111");
+ fail("Expected ip address parsing to fail but did not happen");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("not a valid ip address"));
+ }
+ }
+
+ @Test
+ public void testThatIpv6AddressThrowsException() throws Exception {
+ try {
+ IpFieldMapper.ipToLong("2001:db8:0:8d3:0:8a2e:70:7344");
+ fail("Expected ip address parsing to fail but did not happen");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("not a valid ipv4 address"));
+ }
+ }
+
+ @Test
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field1")
+ .field("type", "ip").field("ignore_malformed", true).endObject().startObject("field2").field("type", "ip")
+ .field("ignore_malformed", false).endObject().startObject("field3").field("type", "ip").endObject().endObject().endObject()
+ .endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1",
+ XContentFactory.jsonBuilder().startObject().field("field1", "").field("field2", "10.20.30.40").endObject().bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field3", "").endObject().bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = createIndex("test2", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field3", "").endObject().bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java b/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java
new file mode 100644
index 0000000000..f88f174cfe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/lucene/DoubleIndexingDocTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.lucene;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperUtils;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class DoubleIndexingDocTest extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testDoubleIndexingSameDoc() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(random(), Lucene.STANDARD_ANALYZER));
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").endObject()
+ .endObject().endObject().string();
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
+ DocumentMapper mapper = index.mapperService().documentMapper("type");
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "value1")
+ .field("field2", 1)
+ .field("field3", 1.1)
+ .field("field4", "2010-01-01")
+ .startArray("field5").value(1).value(2).value(3).endArray()
+ .endObject()
+ .bytes());
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString()).get();
+
+ writer.addDocument(doc.rootDoc());
+ writer.addDocument(doc.rootDoc());
+
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ TopDocs topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field1").termQuery("value1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field2").termQuery("1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field3").termQuery("1.1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field4").termQuery("2010-01-01", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").termQuery("1", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").termQuery("2", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+
+ topDocs = searcher.search(mapper.mappers().smartNameFieldMapper("field5").termQuery("3", null), 10);
+ assertThat(topDocs.totalHits, equalTo(2));
+ writer.close();
+ reader.close();
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java b/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java
new file mode 100644
index 0000000000..67d260c036
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/lucene/StoredNumericValuesTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.lucene;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class StoredNumericValuesTest extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testBytesAndNumericRepresentation() throws Exception {
+ IndexWriter writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("store", "yes").endObject()
+ .startObject("field2").field("type", "float").field("store", "yes").endObject()
+ .startObject("field3").field("type", "long").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", 1)
+ .field("field2", 1.1)
+ .startArray("field3").value(1).value(2).value(3).endArray()
+ .endObject()
+ .bytes());
+
+ writer.addDocument(doc.rootDoc());
+
+ // Indexing a doc in the old way
+ FieldType fieldType = new FieldType();
+ fieldType.setStored(true);
+ fieldType.setNumericType(FieldType.NumericType.INT);
+ Document doc2 = new Document();
+ doc2.add(new StoredField("field1", new BytesRef(Numbers.intToBytes(1))));
+ doc2.add(new StoredField("field2", new BytesRef(Numbers.floatToBytes(1.1f))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(1l))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(2l))));
+ doc2.add(new StoredField("field3", new BytesRef(Numbers.longToBytes(3l))));
+ writer.addDocument(doc2);
+
+ DirectoryReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ Set<String> fields = new HashSet<>(Arrays.asList("field1", "field2", "field3"));
+ CustomFieldsVisitor fieldsVisitor = new CustomFieldsVisitor(fields, false);
+ searcher.doc(0, fieldsVisitor);
+ fieldsVisitor.postProcess(mapper);
+ assertThat(fieldsVisitor.fields().size(), equalTo(3));
+ assertThat(fieldsVisitor.fields().get("field1").size(), equalTo(1));
+ assertThat((Integer) fieldsVisitor.fields().get("field1").get(0), equalTo(1));
+ assertThat(fieldsVisitor.fields().get("field2").size(), equalTo(1));
+ assertThat((Float) fieldsVisitor.fields().get("field2").get(0), equalTo(1.1f));
+ assertThat(fieldsVisitor.fields().get("field3").size(), equalTo(3));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(0), equalTo(1l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(1), equalTo(2l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(2), equalTo(3l));
+
+ // Make sure the doc gets loaded as if it was stored in the new way
+ fieldsVisitor.reset();
+ searcher.doc(1, fieldsVisitor);
+ fieldsVisitor.postProcess(mapper);
+ assertThat(fieldsVisitor.fields().size(), equalTo(3));
+ assertThat(fieldsVisitor.fields().get("field1").size(), equalTo(1));
+ assertThat((Integer) fieldsVisitor.fields().get("field1").get(0), equalTo(1));
+ assertThat(fieldsVisitor.fields().get("field2").size(), equalTo(1));
+ assertThat((Float) fieldsVisitor.fields().get("field2").get(0), equalTo(1.1f));
+ assertThat(fieldsVisitor.fields().get("field3").size(), equalTo(3));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(0), equalTo(1l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(1), equalTo(2l));
+ assertThat((Long) fieldsVisitor.fields().get("field3").get(2), equalTo(3l));
+
+ reader.close();
+ writer.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java
new file mode 100644
index 0000000000..a34bf2120f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.merge;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.analysis.FieldNameAnalyzer;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.mapper.DocumentFieldMappers;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.Mapping;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class TestMergeMapperTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void test1Merge() throws Exception {
+
+ String stage1Mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper stage1 = parser.parse(stage1Mapping);
+ String stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("age").field("type", "integer").endObject()
+ .startObject("obj1").startObject("properties").startObject("prop1").field("type", "integer").endObject().endObject().endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper stage2 = parser.parse(stage2Mapping);
+
+ MergeResult mergeResult = stage1.merge(stage2.mapping(), true);
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // since we are simulating, we should not have the age mapping
+ assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue());
+ assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue());
+ // now merge, don't simulate
+ mergeResult = stage1.merge(stage2.mapping(), false);
+ // there is still merge failures
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ // but we have the age in
+ assertThat(stage1.mappers().smartNameFieldMapper("age"), notNullValue());
+ assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue());
+ }
+
+ @Test
+ public void testMergeObjectDynamic() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").endObject().endObject().string();
+ DocumentMapper mapper = parser.parse(objectMapping);
+ assertNull(mapper.root().dynamic());
+
+ String withDynamicMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").field("dynamic", "false").endObject().endObject().string();
+ DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping);
+ assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
+
+ MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false);
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE));
+ }
+
+ @Test
+ public void testMergeObjectAndNested() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("obj").field("type", "object").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper objectMapper = parser.parse(objectMapping);
+ String nestedMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("obj").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+ DocumentMapper nestedMapper = parser.parse(nestedMapping);
+
+ MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true);
+ assertThat(mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.buildConflicts().length, equalTo(1));
+ assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested"));
+
+ mergeResult = nestedMapper.merge(objectMapper.mapping(), true);
+ assertThat(mergeResult.buildConflicts().length, equalTo(1));
+ assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested"));
+ }
+
+ @Test
+ public void testMergeSearchAnalyzer() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("search_analyzer", "whitespace").endObject().endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("search_analyzer", "keyword").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper existing = parser.parse(mapping1);
+ DocumentMapper changed = parser.parse(mapping2);
+
+ assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace"));
+ MergeResult mergeResult = existing.merge(changed.mapping(), false);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword"));
+ }
+
+ @Test
+ public void testChangeSearchAnalyzerToDefault() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("search_analyzer", "whitespace").endObject().endObject()
+ .endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("analyzer", "standard").field("ignore_above", 14).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper existing = parser.parse(mapping1);
+ DocumentMapper changed = parser.parse(mapping2);
+
+ assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace"));
+ MergeResult mergeResult = existing.merge(changed.mapping(), false);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard"));
+ assertThat(((StringFieldMapper) (existing.mappers().getMapper("field"))).getIgnoreAbove(), equalTo(14));
+ }
+
+ public void testConcurrentMergeTest() throws Throwable {
+ final MapperService mapperService = createIndex("test").mapperService();
+ mapperService.merge("test", new CompressedXContent("{\"test\":{}}"), true);
+ final DocumentMapper documentMapper = mapperService.documentMapper("test");
+
+ DocumentFieldMappers dfm = documentMapper.mappers();
+ try {
+ ((FieldNameAnalyzer) dfm.indexAnalyzer()).getWrappedAnalyzer("non_existing_field");
+ fail();
+ } catch (IllegalArgumentException e) {
+ // ok that's expected
+ }
+
+ final AtomicBoolean stopped = new AtomicBoolean(false);
+ final CyclicBarrier barrier = new CyclicBarrier(2);
+ final AtomicReference<String> lastIntroducedFieldName = new AtomicReference<>();
+ final AtomicReference<Throwable> error = new AtomicReference<>();
+ final Thread updater = new Thread() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ for (int i = 0; i < 200 && stopped.get() == false; i++) {
+ final String fieldName = Integer.toString(i);
+ ParsedDocument doc = documentMapper.parse("test", fieldName, new BytesArray("{ \"" + fieldName + "\" : \"test\" }"));
+ Mapping update = doc.dynamicMappingsUpdate();
+ assert update != null;
+ lastIntroducedFieldName.set(fieldName);
+ mapperService.merge("test", new CompressedXContent(update.toString()), false);
+ }
+ } catch (Throwable t) {
+ error.set(t);
+ } finally {
+ stopped.set(true);
+ }
+ }
+ };
+ updater.start();
+ try {
+ barrier.await();
+ while(stopped.get() == false) {
+ final String fieldName = lastIntroducedFieldName.get();
+ final BytesReference source = new BytesArray("{ \"" + fieldName + "\" : \"test\" }");
+ ParsedDocument parsedDoc = documentMapper.parse("test", "random", source);
+ if (parsedDoc.dynamicMappingsUpdate() != null) {
+ // not in the mapping yet, try again
+ continue;
+ }
+ dfm = documentMapper.mappers();
+ ((FieldNameAnalyzer) dfm.indexAnalyzer()).getWrappedAnalyzer(fieldName);
+ }
+ } finally {
+ stopped.set(true);
+ updater.join();
+ }
+ if (error.get() != null) {
+ throw error.get();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java
new file mode 100644
index 0000000000..2d68f801d2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java
@@ -0,0 +1,513 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield;
+
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.mapper.MapperBuilders.doc;
+import static org.elasticsearch.index.mapper.MapperBuilders.rootObject;
+import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MultiFieldTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testMultiField_multiFieldType() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json");
+ testMultiField(mapping);
+ }
+
+ @Test
+ public void testMultiField_multiFields() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-fields.json");
+ testMultiField(mapping);
+ }
+
+ private void testMultiField(String mapping) throws Exception {
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertEquals(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("object1.multi1");
+ assertThat(f.name(), equalTo("object1.multi1"));
+
+ f = doc.getField("object1.multi1.string");
+ assertThat(f.name(), equalTo("object1.multi1.string"));
+ assertThat(f.stringValue(), equalTo("2010-01-01"));
+
+ assertThat(docMapper.mappers().getMapper("name"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name").fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("name").fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name.indexed").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("name.indexed").fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), instanceOf(StringFieldMapper.class));
+ assertEquals(IndexOptions.NONE, docMapper.mappers().getMapper("name.not_indexed").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed").fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("name.not_indexed").fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().getMapper("name.test1"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.test1"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name.test1").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.test1").fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("name.test1").fieldType().tokenized(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("name.test1").fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER));
+
+ assertThat(docMapper.mappers().getMapper("name.test2"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.test2"), instanceOf(TokenCountFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name.test2").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.test2").fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("name.test2").fieldType().tokenized(), equalTo(false));
+ assertThat(((TokenCountFieldMapper) docMapper.mappers().getMapper("name.test2")).analyzer(), equalTo("simple"));
+ assertThat(((TokenCountFieldMapper) docMapper.mappers().getMapper("name.test2")).analyzer(), equalTo("simple"));
+
+ assertThat(docMapper.mappers().getMapper("object1.multi1"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("object1.multi1"), instanceOf(DateFieldMapper.class));
+ assertThat(docMapper.mappers().getMapper("object1.multi1.string"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("object1.multi1.string"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("object1.multi1.string").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("object1.multi1.string").fieldType().tokenized(), equalTo(false));
+ }
+
+ @Test
+ public void testBuildThenParse() throws Exception {
+ IndexService indexService = createIndex("test");
+ Settings settings = indexService.settingsService().getSettings();
+ DocumentMapperParser mapperParser = indexService.mapperService().documentMapperParser();
+
+ DocumentMapper builderDocMapper = doc("test", settings, rootObject("person").add(
+ stringField("name").store(true)
+ .addMultiField(stringField("indexed").index(true).tokenized(true))
+ .addMultiField(stringField("not_indexed").index(false).store(true))
+ )).build(indexService.mapperService(), mapperParser);
+
+ String builtMapping = builderDocMapper.mappingSource().string();
+// System.out.println(builtMapping);
+ // reparse it
+ DocumentMapper docMapper = mapperParser.parse(builtMapping);
+
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ IndexableField f = doc.getField("name");
+ assertThat(f.name(), equalTo("name"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().tokenized(), equalTo(true));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertEquals(IndexOptions.NONE, f.fieldType().indexOptions());
+ }
+
+ @Test
+ public void testConvertMultiFieldNoDefaultField() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ assertNull(doc.getField("name"));
+ IndexableField f = doc.getField("name.indexed");
+ assertThat(f.name(), equalTo("name.indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("name.not_indexed");
+ assertThat(f.name(), equalTo("name.not_indexed"));
+ assertThat(f.stringValue(), equalTo("some name"));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertEquals(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ assertThat(docMapper.mappers().getMapper("name"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name"), instanceOf(StringFieldMapper.class));
+ assertEquals(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("name").fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), instanceOf(StringFieldMapper.class));
+ assertNotNull(docMapper.mappers().getMapper("name.indexed").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("name.indexed").fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), instanceOf(StringFieldMapper.class));
+ assertEquals(IndexOptions.NONE, docMapper.mappers().getMapper("name.not_indexed").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed").fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("name.not_indexed").fieldType().tokenized(), equalTo(true));
+
+ assertNull(doc.getField("age"));
+ f = doc.getField("age.not_stored");
+ assertThat(f.name(), equalTo("age.not_stored"));
+ assertThat(f.numericValue(), equalTo((Number) 28L));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("age.stored");
+ assertThat(f.name(), equalTo("age.stored"));
+ assertThat(f.numericValue(), equalTo((Number) 28L));
+ assertThat(f.fieldType().stored(), equalTo(true));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ assertThat(docMapper.mappers().getMapper("age"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("age"), instanceOf(LongFieldMapper.class));
+ assertEquals(IndexOptions.NONE, docMapper.mappers().getMapper("age").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("age").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("age").fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().getMapper("age.not_stored"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("age.not_stored"), instanceOf(LongFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("age.not_stored").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("age.not_stored").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("age.not_stored").fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().getMapper("age.stored"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("age.stored"), instanceOf(LongFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("age.stored").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("age.stored").fieldType().stored(), equalTo(true));
+ assertThat(docMapper.mappers().getMapper("age.stored").fieldType().tokenized(), equalTo(false));
+ }
+
+ @Test
+ public void testConvertMultiFieldGeoPoint() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.mappers().getMapper("a"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("a"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("a").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("a").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("a").fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().getMapper("a.b"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("a.b"), instanceOf(GeoPointFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("a.b").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("a.b").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("a.b").fieldType().tokenized(), equalTo(false));
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("a", "-1,-1")
+ .endObject().bytes();
+ Document doc = docMapper.parse("type", "1", json).rootDoc();
+
+ IndexableField f = doc.getField("a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a"));
+ assertThat(f.stringValue(), equalTo("-1,-1"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("a.b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a.b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ assertThat(docMapper.mappers().getMapper("b"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("b"), instanceOf(GeoPointFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("b").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("b").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("b").fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().getMapper("b.a"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("b.a"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("b.a").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("b.a").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("b.a").fieldType().tokenized(), equalTo(false));
+
+ json = jsonBuilder().startObject()
+ .field("b", "-1,-1")
+ .endObject().bytes();
+ doc = docMapper.parse("type", "1", json).rootDoc();
+
+ f = doc.getField("b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ assertThat(f.stringValue(), equalTo("-1,-1"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ json = jsonBuilder().startObject()
+ .startArray("b").startArray().value(-1).value(-1).endArray().startArray().value(-2).value(-2).endArray().endArray()
+ .endObject().bytes();
+ doc = docMapper.parse("type", "1", json).rootDoc();
+
+ f = doc.getFields("b")[0];
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-1.0,-1.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getFields("b")[1];
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("-2.0,-2.0"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ // NOTE: "]" B/c the lat,long aren't specified as a string, we miss the actual values when parsing the multi
+ // fields. We already skipped over the coordinates values and can't get to the coordinates.
+ // This happens if coordinates are specified as array and object.
+ assertThat(f.stringValue(), equalTo("]"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ }
+
+ @Test
+ public void testConvertMultiFieldCompletion() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.mappers().getMapper("a"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("a"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("a").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("a").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("a").fieldType().tokenized(), equalTo(false));
+
+ assertThat(docMapper.mappers().getMapper("a.b"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("a.b"), instanceOf(CompletionFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("a.b").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("a.b").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("a.b").fieldType().tokenized(), equalTo(true));
+
+ BytesReference json = jsonBuilder().startObject()
+ .field("a", "complete me")
+ .endObject().bytes();
+ Document doc = docMapper.parse("type", "1", json).rootDoc();
+
+ IndexableField f = doc.getField("a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("a.b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("a.b"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ assertThat(docMapper.mappers().getMapper("b"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("b"), instanceOf(CompletionFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("b").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("b").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("b").fieldType().tokenized(), equalTo(true));
+
+ assertThat(docMapper.mappers().getMapper("b.a"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("b.a"), instanceOf(StringFieldMapper.class));
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("b.a").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("b.a").fieldType().stored(), equalTo(false));
+ assertThat(docMapper.mappers().getMapper("b.a").fieldType().tokenized(), equalTo(false));
+
+ json = jsonBuilder().startObject()
+ .field("b", "complete me")
+ .endObject().bytes();
+ doc = docMapper.parse("type", "1", json).rootDoc();
+
+ f = doc.getField("b");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+
+ f = doc.getField("b.a");
+ assertThat(f, notNullValue());
+ assertThat(f.name(), equalTo("b.a"));
+ assertThat(f.stringValue(), equalTo("complete me"));
+ assertThat(f.fieldType().stored(), equalTo(false));
+ assertNotSame(IndexOptions.NONE, f.fieldType().indexOptions());
+ }
+
+ @Test
+ // The underlying order of the fields in multi fields in the mapping source should always be consistent, if not this
+ // can to unnecessary re-syncing of the mappings between the local instance and cluster state
+ public void testMultiFieldsInConsistentOrder() throws Exception {
+ String[] multiFieldNames = new String[randomIntBetween(2, 10)];
+ for (int i = 0; i < multiFieldNames.length; i++) {
+ multiFieldNames[i] = randomAsciiOfLength(4);
+ }
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("my_field").field("type", "string").startObject("fields");
+ for (String multiFieldName : multiFieldNames) {
+ builder = builder.startObject(multiFieldName).field("type", "string").endObject();
+ }
+ builder = builder.endObject().endObject().endObject().endObject().endObject();
+ String mapping = builder.string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ Arrays.sort(multiFieldNames);
+
+ Map<String, Object> sourceAsMap = XContentHelper.convertToMap(docMapper.mappingSource().compressedReference(), true).v2();
+ @SuppressWarnings("unchecked")
+ Map<String, Object> multiFields = (Map<String, Object>) XContentMapValues.extractValue("type.properties.my_field.fields", sourceAsMap);
+ assertThat(multiFields.size(), equalTo(multiFieldNames.length));
+
+ int i = 0;
+ // underlying map is LinkedHashMap, so this ok:
+ for (String field : multiFields.keySet()) {
+ assertThat(field, equalTo(multiFieldNames[i++]));
+ }
+ }
+
+ @Test
+ // The fielddata settings need to be the same after deserializing/re-serialsing, else unneccesary mapping sync's can be triggered
+ public void testMultiFieldsFieldDataSettingsInConsistentOrder() throws Exception {
+ final String MY_MULTI_FIELD = "multi_field";
+
+ // Possible fielddata settings
+ Map<String, Object> possibleSettings = new TreeMap<String, Object>();
+ possibleSettings.put("filter.frequency.min", 1);
+ possibleSettings.put("filter.frequency.max", 2);
+ possibleSettings.put("filter.regex.pattern", ".*");
+ possibleSettings.put("format", "fst");
+ possibleSettings.put("loading", "eager");
+ possibleSettings.put("foo", "bar");
+ possibleSettings.put("zetting", "zValue");
+ possibleSettings.put("aSetting", "aValue");
+
+ // Generate a mapping with the a random subset of possible fielddata settings
+ XContentBuilder builder = jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("my_field").field("type", "string").startObject("fields").startObject(MY_MULTI_FIELD)
+ .field("type", "string").startObject("fielddata");
+ String[] keys = possibleSettings.keySet().toArray(new String[]{});
+ Collections.shuffle(Arrays.asList(keys));
+ for(int i = randomIntBetween(0, possibleSettings.size()-1); i >= 0; --i)
+ builder.field(keys[i], possibleSettings.get(keys[i]));
+ builder.endObject().endObject().endObject().endObject().endObject().endObject().endObject();
+
+ // Check the mapping remains identical when deserialed/re-serialsed
+ final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper docMapper = parser.parse(builder.string());
+ DocumentMapper docMapper2 = parser.parse(docMapper.mappingSource().string());
+ assertThat(docMapper.mappingSource(), equalTo(docMapper2.mappingSource()));
+ }
+
+ public void testObjectFieldNotAllowed() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field")
+ .field("type", "string").startObject("fields").startObject("multi").field("type", "object").endObject().endObject()
+ .endObject().endObject().endObject().endObject().string();
+ final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ try {
+ parser.parse(mapping);
+ fail("expected mapping parse failure");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("cannot be used in multi field"));
+ }
+ }
+
+ public void testNestedFieldNotAllowed() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field")
+ .field("type", "string").startObject("fields").startObject("multi").field("type", "nested").endObject().endObject()
+ .endObject().endObject().endObject().endObject().string();
+ final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ try {
+ parser.parse(mapping);
+ fail("expected mapping parse failure");
+ } catch (MapperParsingException e) {
+ assertTrue(e.getMessage().contains("cannot be used in multi field"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java
new file mode 100644
index 0000000000..60609d82b1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldsIntegrationTests.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiFieldsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testMultiFields() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createTypeSource())
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource));
+ assertThat(titleFields.size(), equalTo(1));
+ assertThat(titleFields.get("not_analyzed"), notNullValue());
+ assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1")
+ .setSource("title", "Multi fields")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title", "multi"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title.not_analyzed", "Multi fields"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ assertAcked(
+ client().admin().indices().preparePutMapping("my-index").setType("my-type")
+ .setSource(createPutMappingSource())
+ );
+
+ getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ mappingSource = mappingMetaData.sourceAsMap();
+ assertThat(((Map) XContentMapValues.extractValue("properties.title", mappingSource)).size(), equalTo(2));
+ titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource));
+ assertThat(titleFields.size(), equalTo(2));
+ assertThat(titleFields.get("not_analyzed"), notNullValue());
+ assertThat(((Map)titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed"));
+ assertThat(titleFields.get("uncased"), notNullValue());
+ assertThat(((Map)titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace"));
+
+ client().prepareIndex("my-index", "my-type", "1")
+ .setSource("title", "Multi fields")
+ .setRefresh(true)
+ .get();
+
+ searchResponse = client().prepareSearch("my-index")
+ .setQuery(matchQuery("title.uncased", "Multi"))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testGeoPointMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("geo_point"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ logger.info("Keys: " + aField.keySet());
+ assertThat(aField.size(), equalTo(2));
+ assertThat(aField.get("type").toString(), equalTo("geo_point"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "51,19").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index")
+ .setQuery(constantScoreQuery(geoDistanceQuery("a").point(51, 19).distance(50, DistanceUnit.KILOMETERS)))
+ .get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "51,19")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testTokenCountMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", "token_count")
+ .field("analyzer", "simple")
+ .startObject("fields")
+ .startObject("b")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject())
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(3));
+ assertThat(aField.get("type").toString(), equalTo("token_count"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "my tokens").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "my tokens")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testCompletionMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("completion"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(7));
+ assertThat(aField.get("type").toString(), equalTo("completion"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "complete me").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "complete me")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testIpMultiField() throws Exception {
+ assertAcked(
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", createMappingSource("ip"))
+ );
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get();
+ MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type");
+ assertThat(mappingMetaData, not(nullValue()));
+ Map<String, Object> mappingSource = mappingMetaData.sourceAsMap();
+ Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource));
+ assertThat(aField.size(), equalTo(2));
+ assertThat(aField.get("type").toString(), equalTo("ip"));
+ assertThat(aField.get("fields"), notNullValue());
+
+ Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource));
+ assertThat(bField.size(), equalTo(2));
+ assertThat(bField.get("type").toString(), equalTo("string"));
+ assertThat(bField.get("index").toString(), equalTo("not_analyzed"));
+
+ client().prepareIndex("my-index", "my-type", "1").setSource("a", "127.0.0.1").setRefresh(true).get();
+ CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "127.0.0.1")).get();
+ assertThat(countResponse.getCount(), equalTo(1l));
+ }
+
+ private XContentBuilder createMappingSource(String fieldType) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("a")
+ .field("type", fieldType)
+ .startObject("fields")
+ .startObject("b")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ private XContentBuilder createTypeSource() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("not_analyzed")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ private XContentBuilder createPutMappingSource() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("my-type")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("uncased")
+ .field("type", "string")
+ .field("analyzer", "whitespace")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java
new file mode 100644
index 0000000000..1235b6f693
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.multifield.merge;
+
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testMergeMultiField() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue());
+
+ BytesReference json = XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject().bytes();
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+ IndexableField f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, nullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json");
+ DocumentMapper docMapper2 = parser.parse(mapping);
+
+ MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper2.mapping(), false);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed2"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue());
+
+ doc = docMapper.parse("person", "1", json).rootDoc();
+ f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, notNullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json");
+ DocumentMapper docMapper3 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper3.mapping(), true);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper3.mapping(), false);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed2"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json");
+ DocumentMapper docMapper4 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper4.mapping(), true);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper4.mapping(), false);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed2"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed3"), notNullValue());
+ }
+
+ @Test
+ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json");
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue());
+
+ BytesReference json = XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject().bytes();
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+ IndexableField f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json");
+ DocumentMapper docMapper2 = parser.parse(mapping);
+
+ MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper2.mapping(), false);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed2"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue());
+
+ doc = docMapper.parse("person", "1", json).rootDoc();
+ f = doc.getField("name");
+ assertThat(f, notNullValue());
+ f = doc.getField("name.indexed");
+ assertThat(f, notNullValue());
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json");
+ DocumentMapper docMapper3 = parser.parse(mapping);
+
+ mergeResult = docMapper.merge(docMapper3.mapping(), true);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false));
+
+ docMapper.merge(docMapper3.mapping(), false);
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed2"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue());
+
+
+ mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json");
+ DocumentMapper docMapper4 = parser.parse(mapping);
+ mergeResult = docMapper.merge(docMapper4.mapping(), true);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true));
+ assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values"));
+ assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values"));
+
+ mergeResult = docMapper.merge(docMapper4.mapping(), false);
+ assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true));
+
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values"));
+ assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values"));
+
+ // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts
+ assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions());
+ assertThat(docMapper.mappers().getMapper("name.indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed2"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name.not_indexed3"), notNullValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json
new file mode 100644
index 0000000000..c539fcc885
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-data.json
@@ -0,0 +1,4 @@
+{
+ _id:1,
+ name:"some name"
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json
new file mode 100644
index 0000000000..61f08af57e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping1.json
@@ -0,0 +1,11 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json
new file mode 100644
index 0000000000..02ce8957a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json
@@ -0,0 +1,27 @@
+{
+ "person" :{
+ "properties" :{
+ "name":{
+ "type" :"string",
+ "index" :"analyzed",
+ "store" :"yes",
+ "fields":{
+ "name":{
+ "type" :"string",
+ "index" :"analyzed",
+ "store" :"yes"
+ },
+ "indexed":{
+ "type" :"string",
+ "index" :"analyzed"
+ },
+ "not_indexed":{
+ "type" :"string",
+ "index" :"no",
+ "store" :"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json
new file mode 100644
index 0000000000..ea07675446
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json
@@ -0,0 +1,32 @@
+{
+ "person" : {
+ "properties" :{
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "store" : "yes",
+ "fields": {
+ "name" : {
+ "type" : "string",
+ "index" : "analyzed",
+ "store" : "yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ },
+ "not_indexed2":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json
new file mode 100644
index 0000000000..384c2634cb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json
@@ -0,0 +1,18 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes",
+ "fields":{
+ "not_indexed3":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json
new file mode 100644
index 0000000000..6206592afc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json
@@ -0,0 +1,25 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json
new file mode 100644
index 0000000000..4a8fbf66ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json
@@ -0,0 +1,30 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "name":{
+ type:"string",
+ index:"analyzed",
+ store:"yes"
+ },
+ "indexed":{
+ type:"string",
+ index:"analyzed"
+ },
+ "not_indexed":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ },
+ "not_indexed2":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json
new file mode 100644
index 0000000000..9b309789f5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json
@@ -0,0 +1,16 @@
+{
+ person:{
+ properties:{
+ "name":{
+ type:"multi_field",
+ "fields":{
+ "not_indexed3":{
+ type:"string",
+ index:"no",
+ store:"yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json
new file mode 100644
index 0000000000..2e8ab256df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-data.json
@@ -0,0 +1,7 @@
+{
+ "age":28,
+ "name":"some name",
+ "object1":{
+ "multi1":"2010-01-01"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json
new file mode 100644
index 0000000000..d36e9d2d84
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-completion.json
@@ -0,0 +1,30 @@
+{
+ "type":{
+ "properties":{
+ "a":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"completion"
+ }
+ }
+ },
+ "b":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"completion"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json
new file mode 100644
index 0000000000..c7d11becc2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-geo_point.json
@@ -0,0 +1,30 @@
+{
+ "type":{
+ "properties":{
+ "a":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"geo_point"
+ }
+ }
+ },
+ "b":{
+ "type":"multi_field",
+ "fields":{
+ "a":{
+ "type":"string",
+ "index":"not_analyzed"
+ },
+ "b":{
+ "type":"geo_point"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json
new file mode 100644
index 0000000000..99b74c0167
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type-no-default-field.json
@@ -0,0 +1,32 @@
+{
+ "person": {
+ "properties": {
+ "name": {
+ "type": "multi_field",
+ "fields": {
+ "indexed": {
+ "type": "string",
+ "index": "analyzed"
+ },
+ "not_indexed": {
+ "type": "string",
+ "index": "no",
+ "store": "yes"
+ }
+ }
+ },
+ "age": {
+ "type": "multi_field",
+ "fields": {
+ "not_stored": {
+ "type": "long"
+ },
+ "stored": {
+ "type": "long",
+ "store": "yes"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json
new file mode 100644
index 0000000000..b099b9ab20
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-field-type.json
@@ -0,0 +1,55 @@
+{
+ "person":{
+ "properties":{
+ "name":{
+ "type":"multi_field",
+ "fields":{
+ "name":{
+ "type":"string",
+ "index":"analyzed",
+ "store":"yes"
+ },
+ "indexed":{
+ "type":"string",
+ "index":"analyzed"
+ },
+ "not_indexed":{
+ "type":"string",
+ "index":"no",
+ "store":"yes"
+ },
+ "test1" : {
+ "type":"string",
+ "index":"analyzed",
+ "store" : "yes",
+ "fielddata" : {
+ "loading" : "eager"
+ }
+ },
+ "test2" : {
+ "type" : "token_count",
+ "store" : "yes",
+ "index" : "not_analyzed",
+ "analyzer" : "simple"
+ }
+ }
+ },
+ "object1":{
+ "properties":{
+ "multi1":{
+ "type":"multi_field",
+ "fields":{
+ "multi1":{
+ "type":"date"
+ },
+ "string":{
+ "type":"string",
+ "index":"not_analyzed"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json
new file mode 100644
index 0000000000..b116665829
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/test-multi-fields.json
@@ -0,0 +1,50 @@
+{
+ "person": {
+ "properties": {
+ "name": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "yes",
+ "fields": {
+ "indexed": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "no"
+ },
+ "not_indexed": {
+ "type": "string",
+ "index": "no",
+ "store": "yes"
+ },
+ "test1": {
+ "type": "string",
+ "index": "analyzed",
+ "store": "yes",
+ "fielddata": {
+ "loading": "eager"
+ }
+ },
+ "test2": {
+ "type": "token_count",
+ "index": "not_analyzed",
+ "store": "yes",
+ "analyzer": "simple"
+ }
+ }
+ },
+ "object1": {
+ "properties": {
+ "multi1": {
+ "type": "date",
+ "fields": {
+ "string": {
+ "type": "string",
+ "index": "not_analyzed"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java
new file mode 100644
index 0000000000..25e81269ef
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/nested/NestedMappingTests.java
@@ -0,0 +1,351 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.nested;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.mapper.object.ObjectMapper.Dynamic;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class NestedMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void emptyNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .nullField("nested1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(1));
+
+ doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested").endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(1));
+ }
+
+ @Test
+ public void singleNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startObject("nested1").field("field1", "1").field("field2", "2").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(2));
+ assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2"));
+
+ assertThat(doc.docs().get(1).get("field"), equalTo("value"));
+
+
+ doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").field("field2", "2").endObject()
+ .startObject().field("field1", "3").field("field2", "4").endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(3));
+ assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3"));
+ assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4"));
+ assertThat(doc.docs().get(1).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
+ assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2"));
+
+ assertThat(doc.docs().get(2).get("field"), equalTo("value"));
+ }
+
+ @Test
+ public void multiNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested")
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).get("nested1.nested2.field2"), nullValue());
+ }
+
+ @Test
+ public void multiObjectAndNested1() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_parent", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).get("nested1.nested2.field2"), nullValue());
+ }
+
+ @Test
+ public void multiObjectAndNested2() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").field("include_in_parent", true).startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_parent", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).getFields("nested1.field1").length, equalTo(2));
+ assertThat(doc.docs().get(6).getFields("nested1.nested2.field2").length, equalTo(4));
+ }
+
+ @Test
+ public void multiRootAndNested1() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").field("include_in_root", true)
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested1Mapper.nested().isIncludeInRoot(), equalTo(false));
+ ObjectMapper nested2Mapper = docMapper.objectMappers().get("nested1.nested2");
+ assertThat(nested2Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false));
+ assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(true));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(7));
+ assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
+ assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
+ assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), nullValue());
+ assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
+ assertThat(doc.docs().get(3).get("field"), nullValue());
+ assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
+ assertThat(doc.docs().get(4).get("field"), nullValue());
+ assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
+ assertThat(doc.docs().get(5).get("field"), nullValue());
+ assertThat(doc.docs().get(6).get("field"), equalTo("value"));
+ assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
+ assertThat(doc.docs().get(6).getFields("nested1.nested2.field2").length, equalTo(4));
+ }
+
+ @Test
+ public void nestedArray_strict() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1").field("type", "nested").field("dynamic", "strict").startObject("properties")
+ .startObject("field1").field("type", "string")
+ .endObject().endObject()
+ .endObject().endObject().endObject().string();
+
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat(docMapper.hasNestedObjects(), equalTo(true));
+ ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
+ assertThat(nested1Mapper.nested().isNested(), equalTo(true));
+ assertThat(nested1Mapper.dynamic(), equalTo(Dynamic.STRICT));
+
+ ParsedDocument doc = docMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").endObject()
+ .startObject().field("field1", "4").endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.docs().size(), equalTo(3));
+ assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("4"));
+ assertThat(doc.docs().get(0).get("field"), nullValue());
+ assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
+ assertThat(doc.docs().get(1).get("field"), nullValue());
+ assertThat(doc.docs().get(2).get("field"), equalTo("value"));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/null_value/NullValueTests.java b/core/src/test/java/org/elasticsearch/index/mapper/null_value/NullValueTests.java
new file mode 100644
index 0000000000..ec8b3b077d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/null_value/NullValueTests.java
@@ -0,0 +1,65 @@
+package org.elasticsearch.index.mapper.null_value;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class NullValueTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testNullNull_Value() throws Exception {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
+ String[] typesToTest = {"integer", "long", "double", "float", "short", "date", "ip", "string", "boolean", "byte"};
+
+ for (String type : typesToTest) {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("numeric")
+ .field("type", type)
+ .field("null_value", (String) null)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().string();
+
+ try {
+ indexService.mapperService().documentMapperParser().parse(mapping);
+ fail("Test should have failed because [null_value] was null.");
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("Property [null_value] cannot be null."));
+ }
+
+ }
+
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java b/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java
new file mode 100644
index 0000000000..28be8bd2c9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/numeric/SimpleNumericTests.java
@@ -0,0 +1,522 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.numeric;
+
+import org.apache.lucene.analysis.NumericTokenStream;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.string.SimpleStringMappingTests;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class SimpleNumericTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testNumericDetectionEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("numeric_detection", true)
+ .endObject().endObject().string();
+
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
+ DocumentMapper defaultMapper = index.mapperService().documentMapper("type");
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("s_long", "100")
+ .field("s_double", "100.0")
+ .endObject()
+ .bytes());
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString()).get();
+
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
+ assertThat(mapper, instanceOf(LongFieldMapper.class));
+
+ mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
+ assertThat(mapper, instanceOf(DoubleFieldMapper.class));
+ }
+
+ @Test
+ public void testNumericDetectionDefault() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ IndexService index = createIndex("test");
+ client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
+ DocumentMapper defaultMapper = index.mapperService().documentMapper("type");
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("s_long", "100")
+ .field("s_double", "100.0")
+ .endObject()
+ .bytes());
+ assertNotNull(doc.dynamicMappingsUpdate());
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type").setSource(doc.dynamicMappingsUpdate().toString()).get());
+
+ FieldMapper mapper = defaultMapper.mappers().smartNameFieldMapper("s_long");
+ assertThat(mapper, instanceOf(StringFieldMapper.class));
+
+ mapper = defaultMapper.mappers().smartNameFieldMapper("s_double");
+ assertThat(mapper, instanceOf(StringFieldMapper.class));
+ }
+
+ @Test
+ public void testIgnoreMalformedOption() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "integer").field("ignore_malformed", true).endObject()
+ .startObject("field2").field("type", "integer").field("ignore_malformed", false).endObject()
+ .startObject("field3").field("type", "integer").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "a")
+ .field("field2", "1")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field1"), nullValue());
+ assertThat(doc.rootDoc().getField("field2"), notNullValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+
+ // Verify that the default is false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+
+ // Unless the global ignore_malformed option is set to true
+ Settings indexSettings = settingsBuilder().put("index.mapping.ignore_malformed", true).build();
+ defaultMapper = createIndex("test2", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field3", "a")
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("field3"), nullValue());
+
+ // This should still throw an exception, since field2 is specifically set to ignore_malformed=false
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field2", "a")
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(NumberFormatException.class));
+ }
+ }
+
+ @Test
+ public void testCoerceOption() throws Exception {
+ String [] nonFractionNumericFieldTypes={"integer","long","short"};
+ //Test co-ercion policies on all non-fraction numerics
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ for (String nonFractionNumericFieldType : nonFractionNumericFieldTypes) {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("noErrorNoCoerceField").field("type", nonFractionNumericFieldType).field("ignore_malformed", true)
+ .field("coerce", false).endObject()
+ .startObject("noErrorCoerceField").field("type", nonFractionNumericFieldType).field("ignore_malformed", true)
+ .field("coerce", true).endObject()
+ .startObject("errorDefaultCoerce").field("type", nonFractionNumericFieldType).field("ignore_malformed", false).endObject()
+ .startObject("errorNoCoerce").field("type", nonFractionNumericFieldType).field("ignore_malformed", false)
+ .field("coerce", false).endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ //Test numbers passed as strings
+ String invalidJsonNumberAsString="1";
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", invalidJsonNumberAsString)
+ .field("noErrorCoerceField", invalidJsonNumberAsString)
+ .field("errorDefaultCoerce", invalidJsonNumberAsString)
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("noErrorNoCoerceField"), nullValue());
+ assertThat(doc.rootDoc().getField("noErrorCoerceField"), notNullValue());
+ //Default is ignore_malformed=true and coerce=true
+ assertThat(doc.rootDoc().getField("errorDefaultCoerce"), notNullValue());
+
+ //Test valid case of numbers passed as numbers
+ int validNumber=1;
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", validNumber)
+ .field("noErrorCoerceField", validNumber)
+ .field("errorDefaultCoerce", validNumber)
+ .endObject()
+ .bytes());
+ assertEquals(validNumber,doc.rootDoc().getField("noErrorNoCoerceField").numericValue().intValue());
+ assertEquals(validNumber,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ assertEquals(validNumber,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+ //Test valid case of negative numbers passed as numbers
+ int validNegativeNumber=-1;
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", validNegativeNumber)
+ .field("noErrorCoerceField", validNegativeNumber)
+ .field("errorDefaultCoerce", validNegativeNumber)
+ .endObject()
+ .bytes());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("noErrorNoCoerceField").numericValue().intValue());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ assertEquals(validNegativeNumber,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("errorNoCoerce", invalidJsonNumberAsString)
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+
+
+ //Test questionable case of floats passed to ints
+ float invalidJsonForInteger=1.9f;
+ int coercedFloatValue=1; //This is what the JSON parser will do to a float - truncate not round
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("noErrorNoCoerceField", invalidJsonForInteger)
+ .field("noErrorCoerceField", invalidJsonForInteger)
+ .field("errorDefaultCoerce", invalidJsonForInteger)
+ .endObject()
+ .bytes());
+ assertThat(doc.rootDoc().getField("noErrorNoCoerceField"), nullValue());
+ assertEquals(coercedFloatValue,doc.rootDoc().getField("noErrorCoerceField").numericValue().intValue());
+ //Default is ignore_malformed=true and coerce=true
+ assertEquals(coercedFloatValue,doc.rootDoc().getField("errorDefaultCoerce").numericValue().intValue());
+
+ try {
+ defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("errorNoCoerce", invalidJsonForInteger)
+ .endObject()
+ .bytes());
+ } catch (MapperParsingException e) {
+ assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
+ }
+ }
+ }
+
+
+ public void testDocValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("int", "1234")
+ .field("double", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(DocValuesType.SORTED_NUMERIC, SimpleStringMappingTests.docValuesType(doc, "int"));
+ assertEquals(DocValuesType.SORTED_NUMERIC, SimpleStringMappingTests.docValuesType(doc, "double"));
+ }
+
+ public void testDocValuesOnNested() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("nested")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startArray("nested")
+ .startObject()
+ .field("int", "1234")
+ .field("double", "1234")
+ .endObject()
+ .startObject()
+ .field("int", "-1")
+ .field("double", "-2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .bytes());
+ for (Document doc : parsedDoc.docs()) {
+ if (doc == parsedDoc.rootDoc()) {
+ continue;
+ }
+ assertEquals(DocValuesType.SORTED_NUMERIC, SimpleStringMappingTests.docValuesType(doc, "nested.int"));
+ assertEquals(DocValuesType.SORTED_NUMERIC, SimpleStringMappingTests.docValuesType(doc, "nested.double"));
+ }
+ }
+
+ /** Test default precision step for autodetected numeric types */
+ @Test
+ public void testPrecisionStepDefaultsDetected() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .field("numeric_detection", true)
+ .field("date_detection", true)
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("long", "100")
+ .field("double", "100.0")
+ .field("date", "2010-01-01")
+ .endObject()
+ .bytes());
+
+ assertEquals(1, doc.docs().size());
+ Document luceneDoc = doc.docs().get(0);
+
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("long"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("double"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("date"));
+ }
+
+ /** Test default precision step for numeric types */
+ @Test
+ public void testPrecisionStepDefaultsMapped() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .endObject()
+ .startObject("float")
+ .field("type", "float")
+ .endObject()
+ .startObject("long")
+ .field("type", "long")
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .endObject()
+ .startObject("short")
+ .field("type", "short")
+ .endObject()
+ .startObject("byte")
+ .field("type", "byte")
+ .endObject()
+ .startObject("date")
+ .field("type", "date")
+ .endObject()
+ .startObject("ip")
+ .field("type", "ip")
+ .endObject()
+
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("int", "100")
+ .field("float", "100.0")
+ .field("long", "5000")
+ .field("double", "34.545")
+ .field("short", "1645")
+ .field("byte", "50")
+ .field("date", "2010-01-01")
+ .field("ip", "255.255.255.255")
+ .endObject()
+ .bytes());
+
+ assertEquals(1, doc.docs().size());
+ Document luceneDoc = doc.docs().get(0);
+
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("long"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("double"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("date"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, luceneDoc.getField("ip"));
+
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_32_BIT, luceneDoc.getField("int"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_32_BIT, luceneDoc.getField("float"));
+
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_16_BIT, luceneDoc.getField("short"));
+ assertPrecisionStepEquals(NumberFieldMapper.Defaults.PRECISION_STEP_8_BIT, luceneDoc.getField("byte"));
+ }
+
+ /** Test precision step set to silly explicit values */
+ @Test
+ public void testPrecisionStepExplicit() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int")
+ .field("type", "integer")
+ .field("precision_step", "1")
+ .endObject()
+ .startObject("float")
+ .field("type", "float")
+ .field("precision_step", "2")
+ .endObject()
+ .startObject("long")
+ .field("type", "long")
+ .field("precision_step", "1")
+ .endObject()
+ .startObject("double")
+ .field("type", "double")
+ .field("precision_step", "2")
+ .endObject()
+ .startObject("short")
+ .field("type", "short")
+ .field("precision_step", "1")
+ .endObject()
+ .startObject("byte")
+ .field("type", "byte")
+ .field("precision_step", "2")
+ .endObject()
+ .startObject("date")
+ .field("type", "date")
+ .field("precision_step", "1")
+ .endObject()
+ .startObject("ip")
+ .field("type", "ip")
+ .field("precision_step", "2")
+ .endObject()
+
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("int", "100")
+ .field("float", "100.0")
+ .field("long", "5000")
+ .field("double", "34.545")
+ .field("short", "1645")
+ .field("byte", "50")
+ .field("date", "2010-01-01")
+ .field("ip", "255.255.255.255")
+ .endObject()
+ .bytes());
+
+ assertEquals(1, doc.docs().size());
+ Document luceneDoc = doc.docs().get(0);
+
+ assertPrecisionStepEquals(1, luceneDoc.getField("int"));
+ assertPrecisionStepEquals(2, luceneDoc.getField("float"));
+ assertPrecisionStepEquals(1, luceneDoc.getField("long"));
+ assertPrecisionStepEquals(2, luceneDoc.getField("double"));
+ assertPrecisionStepEquals(1, luceneDoc.getField("short"));
+ assertPrecisionStepEquals(2, luceneDoc.getField("byte"));
+ assertPrecisionStepEquals(1, luceneDoc.getField("date"));
+ assertPrecisionStepEquals(2, luceneDoc.getField("ip"));
+
+ }
+
+ /** checks precisionstep on both the fieldtype and the tokenstream */
+ private static void assertPrecisionStepEquals(int expected, IndexableField field) throws IOException {
+ assertNotNull(field);
+ assertThat(field, instanceOf(Field.class));
+
+ // check fieldtype's precisionstep
+ assertEquals(expected, ((Field)field).fieldType().numericPrecisionStep());
+
+ // check the tokenstream actually used by the indexer
+ TokenStream ts = field.tokenStream(null, null);
+ assertThat(ts, instanceOf(NumericTokenStream.class));
+ assertEquals(expected, ((NumericTokenStream)ts).getPrecisionStep());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java
new file mode 100644
index 0000000000..835ac5d380
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/object/NullValueObjectMappingTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class NullValueObjectMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testNullValueObject() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("obj1").field("type", "object").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("obj1").endObject()
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .nullField("obj1")
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("obj1").field("field", "value").endObject()
+ .field("value1", "test1")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("obj1.field"), equalTo("value"));
+ assertThat(doc.rootDoc().get("value1"), equalTo("test1"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java
new file mode 100644
index 0000000000..774aec4c0d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/object/SimpleObjectMappingTests.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.object;
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+/**
+ */
+public class SimpleObjectMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testDifferentInnerObjectTokenFailure() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ try {
+ defaultMapper.parse("type", "1", new BytesArray(" {\n" +
+ " \"object\": {\n" +
+ " \"array\":[\n" +
+ " {\n" +
+ " \"object\": { \"value\": \"value\" }\n" +
+ " },\n" +
+ " {\n" +
+ " \"object\":\"value\"\n" +
+ " }\n" +
+ " ]\n" +
+ " },\n" +
+ " \"value\":\"value\"\n" +
+ " }"));
+ fail();
+ } catch (MapperParsingException e) {
+ // all is well
+ }
+ }
+
+ @Test
+ public void testEmptyArrayProperties() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startArray("properties").endArray()
+ .endObject().endObject().string();
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ }
+
+ @Test
+ public void emptyFieldsArrayMultiFieldsTest() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("tweet")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .field("index", "analyzed")
+ .startArray("fields")
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void fieldsArrayMultiFieldsShouldThrowExceptionTest() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("tweet")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .field("index", "analyzed")
+ .startArray("fields")
+ .startObject().field("test", "string").endObject()
+ .startObject().field("test2", "string").endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ }
+
+ @Test
+ public void emptyFieldsArrayTest() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("tweet")
+ .startObject("properties")
+ .startArray("fields")
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void fieldsWithFilledArrayShouldThrowExceptionTest() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("tweet")
+ .startObject("properties")
+ .startArray("fields")
+ .startObject().field("test", "string").endObject()
+ .startObject().field("test2", "string").endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ }
+
+ @Test
+ public void fieldPropertiesArrayTest() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("tweet")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .field("index", "analyzed")
+ .startObject("fields")
+ .startObject("raw")
+ .field("type", "string")
+ .field("index","not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+ createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java
new file mode 100644
index 0000000000..e748510ee3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/parent/ParentMappingTests.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.parent;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import static org.hamcrest.Matchers.nullValue;
+
+public class ParentMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testParentNotSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1"));
+
+ // no _parent mapping, dynamically used as a string field
+ assertNull(doc.parent());
+ assertNotNull(doc.rootDoc().get("_parent"));
+ }
+
+ public void testParentSetInDocBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("_parent", "1122")
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1"));
+
+ assertEquals("1122", doc.parent());
+ assertEquals(Uid.createUid("p_type", "1122"), doc.rootDoc().get("_parent"));
+ }
+
+ public void testParentSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_parent").field("type", "p_type").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("x_field", "x_value")
+ .endObject()
+ .bytes()).type("type").id("1").parent("1122"));
+
+ assertEquals(Uid.createUid("p_type", "1122"), doc.rootDoc().get("_parent"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java
new file mode 100644
index 0000000000..9a19a449c8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/path/PathMapperTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.path;
+
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PathMapperTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testPathMapping() throws IOException {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/path/test-mapping.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ // test full name
+ assertThat(docMapper.mappers().getMapper("first1"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name1.first1"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("last1"), nullValue());
+ assertThat(docMapper.mappers().getMapper("i_last_1"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name1.last1"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("name1.i_last_1"), nullValue());
+
+ assertThat(docMapper.mappers().getMapper("first2"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name2.first2"), notNullValue());
+ assertThat(docMapper.mappers().getMapper("last2"), nullValue());
+ assertThat(docMapper.mappers().getMapper("i_last_2"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name2.i_last_2"), nullValue());
+ assertThat(docMapper.mappers().getMapper("name2.last2"), notNullValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json
new file mode 100644
index 0000000000..8af451a0d1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/path/test-mapping.json
@@ -0,0 +1,28 @@
+{
+ "person":{
+ "properties":{
+ "name1":{
+ "type":"object",
+ "properties":{
+ "first1":{
+ "type":"string"
+ },
+ "last1":{
+ "type":"string"
+ }
+ }
+ },
+ "name2":{
+ "type":"object",
+ "properties":{
+ "first2":{
+ "type":"string"
+ },
+ "last2":{
+ "type":"string"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java
new file mode 100644
index 0000000000..d4acc74dbb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/routing/RoutingTypeMapperTests.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.routing;
+
+import org.apache.lucene.index.IndexOptions;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class RoutingTypeMapperTests extends ElasticsearchSingleNodeTest {
+
+ public void testRoutingMapper() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes()).type("type").id("1").routing("routing_value"));
+
+ assertThat(doc.rootDoc().get("_routing"), equalTo("routing_value"));
+ assertThat(doc.rootDoc().get("field"), equalTo("value"));
+ }
+
+ public void testFieldTypeSettingsBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing")
+ .field("store", "no")
+ .field("index", "no")
+ .endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ assertThat(docMapper.routingFieldMapper().fieldType().stored(), equalTo(false));
+ assertEquals(IndexOptions.NONE, docMapper.routingFieldMapper().fieldType().indexOptions());
+ }
+
+ public void testFieldTypeSettingsSerializationBackcompat() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing").field("store", "no").field("index", "no").endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper enabledMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(enabledMapping);
+
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ enabledMapper.routingFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ assertThat(serializedMap, hasKey("_routing"));
+ assertThat(serializedMap.get("_routing"), instanceOf(Map.class));
+ Map<String, Object> routingConfiguration = (Map<String, Object>) serializedMap.get("_routing");
+ assertThat(routingConfiguration, hasKey("store"));
+ assertThat(routingConfiguration.get("store").toString(), is("false"));
+ assertThat(routingConfiguration, hasKey("index"));
+ assertThat(routingConfiguration.get("index").toString(), is("no"));
+ }
+
+ public void testPathBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_routing").field("path", "custom_routing").endObject()
+ .endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("custom_routing", "routing_value").endObject();
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(MetaData.builder().build(), mappingMetaData, true, "test");
+
+ assertEquals(request.routing(), "routing_value");
+ }
+
+ public void testIncludeInObjectBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("_timestamp", 2000000).endObject();
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(MetaData.builder().build(), mappingMetaData, true, "test");
+
+ // _routing in a document never worked, so backcompat is ignoring the field
+ assertNull(request.routing());
+ assertNull(docMapper.parse("type", "1", doc.bytes()).rootDoc().get("_routing"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java
new file mode 100644
index 0000000000..c429a012f8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/SimpleMapperTests.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.simple;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.mapper.MapperBuilders.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleMapperTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testSimpleMapper() throws Exception {
+ IndexService indexService = createIndex("test");
+ Settings settings = indexService.settingsService().getSettings();
+ DocumentMapperParser mapperParser = indexService.mapperService().documentMapperParser();
+ DocumentMapper docMapper = doc("test", settings,
+ rootObject("person")
+ .add(object("name").add(stringField("first").store(true).index(false)))
+ ).build(indexService.mapperService(), mapperParser);
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+
+ assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ doc = docMapper.parse("person", "1", json).rootDoc();
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testParseToJsonAndParse() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper docMapper = parser.parse(mapping);
+ String builtMapping = docMapper.mappingSource().string();
+// System.out.println(builtMapping);
+ // reparse it
+ DocumentMapper builtDocMapper = parser.parse(builtMapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = builtDocMapper.parse("person", "1", json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().fieldType().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testSimpleParser() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().fieldType().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testSimpleParserNoTypeNoId() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json"));
+ Document doc = docMapper.parse("person", "1", json).rootDoc();
+ assertThat(doc.get(docMapper.uidMapper().fieldType().names().indexName()), equalTo(Uid.createUid("person", "1")));
+ assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().names().indexName()), equalTo("shay"));
+// System.out.println("Document: " + doc);
+// System.out.println("Json: " + docMapper.sourceMapper().value(doc));
+ }
+
+ @Test
+ public void testAttributes() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json");
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper docMapper = parser.parse(mapping);
+
+ assertThat((String) docMapper.meta().get("param1"), equalTo("value1"));
+
+ String builtMapping = docMapper.mappingSource().string();
+ DocumentMapper builtDocMapper = parser.parse(builtMapping);
+ assertThat((String) builtDocMapper.meta().get("param1"), equalTo("value1"));
+ }
+
+ @Test
+ public void testNoDocumentSent() throws Exception {
+ IndexService indexService = createIndex("test");
+ Settings settings = indexService.settingsService().getSettings();
+ DocumentMapperParser mapperParser = indexService.mapperService().documentMapperParser();
+ DocumentMapper docMapper = doc("test", settings,
+ rootObject("person")
+ .add(object("name").add(stringField("first").store(true).index(false)))
+ ).build(indexService.mapperService(), mapperParser);
+
+ BytesReference json = new BytesArray("".getBytes(Charsets.UTF_8));
+ try {
+ docMapper.parse("person", "1", json).rootDoc();
+ fail("this point is never reached");
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse, document is empty"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json b/core/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json
new file mode 100644
index 0000000000..e001673758
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/test-mapping.json
@@ -0,0 +1,84 @@
+{
+ person:{
+ "_meta":{
+ "param1":"value1"
+ },
+ date_formats:["yyyy-MM-dd", "dd-MM-yyyy"],
+ dynamic:false,
+ enabled:true,
+ _source:{
+ },
+ properties:{
+ name:{
+ type:"object",
+ dynamic:false,
+ properties:{
+ first:{
+ type:"string",
+ store:"yes"
+ },
+ last:{
+ type:"string",
+ index:"not_analyzed"
+ }
+ }
+ },
+ address:{
+ type:"object",
+ properties:{
+ first:{
+ properties:{
+ location:{
+ type:"string",
+ store:"yes"
+ }
+ }
+ },
+ last:{
+ properties:{
+ location:{
+ type:"string"
+ }
+ }
+ }
+ }
+ },
+ age:{
+ type:"integer",
+ null_value:0
+ },
+ birthdate:{
+ type:"date",
+ format:"yyyy-MM-dd"
+ },
+ nerd:{
+ type:"boolean"
+ },
+ dogs:{
+ type:"string"
+ },
+ complex:{
+ type:"object",
+ properties:{
+ value1:{
+ type:"string"
+ },
+ value2:{
+ type:"string"
+ }
+ }
+ },
+ complex2:{
+ type:"object",
+ properties:{
+ value1:{
+ type:"string"
+ },
+ value2:{
+ type:"string"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json
new file mode 100644
index 0000000000..eb71b7a820
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype-noid.json
@@ -0,0 +1,39 @@
+{
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json
new file mode 100644
index 0000000000..e91f2f543d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-notype.json
@@ -0,0 +1,40 @@
+{
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json
new file mode 100644
index 0000000000..5711d5835b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1-withtype.json
@@ -0,0 +1,42 @@
+{
+ person:{
+ _id:"1",
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/simple/test1.json b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1.json
new file mode 100644
index 0000000000..a4e64e9cc2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/simple/test1.json
@@ -0,0 +1,39 @@
+{
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIntegrationTests.java b/core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIntegrationTests.java
new file mode 100644
index 0000000000..55e763065a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingIntegrationTests.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.mapper.size;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class SizeMappingIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test // issue 5053
+ public void testThatUpdatingMappingShouldNotRemoveSizeMappingConfiguration() throws Exception {
+ String index = "foo";
+ String type = "mytype";
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject();
+ assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
+
+ // check mapping again
+ assertSizeMappingEnabled(index, type, true);
+
+ // update some field in the mapping
+ XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "string").endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get();
+ assertAcked(putMappingResponse);
+
+ // make sure size field is still in mapping
+ assertSizeMappingEnabled(index, type, true);
+ }
+
+ @Test
+ public void testThatSizeCanBeSwitchedOnAndOff() throws Exception {
+ String index = "foo";
+ String type = "mytype";
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("_size").field("enabled", true).endObject().endObject();
+ assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
+
+ // check mapping again
+ assertSizeMappingEnabled(index, type, true);
+
+ // update some field in the mapping
+ XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("_size").field("enabled", false).endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get();
+ assertAcked(putMappingResponse);
+
+ // make sure size field is still in mapping
+ assertSizeMappingEnabled(index, type, false);
+ }
+
+ private void assertSizeMappingEnabled(String index, String type, boolean enabled) throws IOException {
+ String errMsg = String.format(Locale.ROOT, "Expected size field mapping to be " + (enabled ? "enabled" : "disabled") + " for %s/%s", index, type);
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes(type).get();
+ Map<String, Object> mappingSource = getMappingsResponse.getMappings().get(index).get(type).getSourceAsMap();
+ assertThat(errMsg, mappingSource, hasKey("_size"));
+ String sizeAsString = mappingSource.get("_size").toString();
+ assertThat(sizeAsString, is(notNullValue()));
+ assertThat(errMsg, sizeAsString, is("{enabled=" + (enabled) + "}"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java
new file mode 100644
index 0000000000..80ce787e55
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.size;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import static org.hamcrest.Matchers.*;
+
+public class SizeMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testSizeEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.mappers().indexAnalyzer(), null), notNullValue());
+ }
+
+ public void testSizeEnabledAndStoredBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size").fieldType().stored(), equalTo(true));
+ assertThat(doc.rootDoc().getField("_size").tokenStream(docMapper.mappers().indexAnalyzer(), null), notNullValue());
+ }
+
+ public void testSizeDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size"), nullValue());
+ }
+
+ public void testSizeNotSet() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1"));
+
+ assertThat(doc.rootDoc().getField("_size"), nullValue());
+ }
+
+ public void testThatDisablingWorksWhenMerging() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper enabledMapper = parser.parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_size").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = parser.parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper.mapping(), false);
+ assertThat(enabledMapper.SizeFieldMapper().enabled(), is(false));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java
new file mode 100644
index 0000000000..e08562cfb7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.source;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class CompressSourceMappingTests extends ElasticsearchSingleNodeTest {
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+
+ @Test
+ public void testCompressDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false));
+ }
+
+ @Test
+ public void testCompressEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .endObject().bytes());
+
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
+ }
+
+ @Test
+ public void testCompressThreshold() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("compress_threshold", "200b").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .endObject().bytes());
+
+ BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false));
+
+ doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
+ .endObject().bytes());
+
+ bytes = doc.rootDoc().getBinaryValue("_source");
+ assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java
new file mode 100644
index 0000000000..10f33c9025
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.source;
+
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest {
+
+ public void testNoFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper documentMapper = parser.parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+
+ documentMapper = parser.parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE));
+ }
+
+ public void testJsonFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("format", "json").endObject()
+ .endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper documentMapper = parser.parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+
+ documentMapper = parser.parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
+ }
+
+ public void testJsonFormatCompressedBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("format", "json").field("compress", true).endObject()
+ .endObject().endObject().string();
+
+ Settings backcompatSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser();
+ DocumentMapper documentMapper = parser.parse(mapping);
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
+ byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
+ assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
+
+ documentMapper = parser.parse(mapping);
+ doc = documentMapper.parse("type", "1", XContentFactory.smileBuilder().startObject()
+ .field("field", "value")
+ .endObject().bytes());
+
+ assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
+ uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
+ assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
+ }
+
+ public void testIncludes() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("includes", new String[]{"path1*"}).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("field1", "value1").endObject()
+ .startObject("path2").field("field2", "value2").endObject()
+ .endObject().bytes());
+
+ IndexableField sourceField = doc.rootDoc().getField("_source");
+ Map<String, Object> sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose();
+ assertThat(sourceAsMap.containsKey("path1"), equalTo(true));
+ assertThat(sourceAsMap.containsKey("path2"), equalTo(false));
+ }
+
+ public void testExcludes() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("excludes", new String[]{"path1*"}).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject()
+ .startObject("path1").field("field1", "value1").endObject()
+ .startObject("path2").field("field2", "value2").endObject()
+ .endObject().bytes());
+
+ IndexableField sourceField = doc.rootDoc().getField("_source");
+ Map<String, Object> sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose();
+ assertThat(sourceAsMap.containsKey("path1"), equalTo(false));
+ assertThat(sourceAsMap.containsKey("path2"), equalTo(true));
+ }
+
+ public void testDefaultMappingAndNoMapping() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper mapper = parser.parse("my_type", null, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ try {
+ mapper = parser.parse(null, null, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ fail();
+ } catch (MapperParsingException e) {
+ // all is well
+ }
+ try {
+ mapper = parser.parse(null, "{}", defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getMessage(), equalTo("malformed mapping no root object found"));
+ // all is well
+ }
+ }
+
+ public void testDefaultMappingAndWithMappingOverride() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = createIndex("test").mapperService().documentMapperParser().parse("my_type", mapping, defaultMapping);
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(true));
+ }
+
+ public void testDefaultMappingAndNoMappingWithMapperService() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ MapperService mapperService = createIndex("test").mapperService();
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), true);
+
+ DocumentMapper mapper = mapperService.documentMapperWithAutoCreate("my_type").v1();
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(false));
+ }
+
+ public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+
+ MapperService mapperService = createIndex("test").mapperService();
+ mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent(defaultMapping), true);
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ mapperService.merge("my_type", new CompressedXContent(mapping), true);
+
+ DocumentMapper mapper = mapperService.documentMapper("my_type");
+ assertThat(mapper.type(), equalTo("my_type"));
+ assertThat(mapper.sourceMapper().enabled(), equalTo(true));
+ }
+
+ void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException {
+ DocumentMapper docMapper = parser.parse(mapping1);
+ docMapper = parser.parse(docMapper.mappingSource().string());
+ MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true);
+
+ List<String> expectedConflicts = new ArrayList<>(Arrays.asList(conflicts));
+ for (String conflict : mergeResult.buildConflicts()) {
+ assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict));
+ }
+ assertTrue("missing conflicts: " + Arrays.toString(expectedConflicts.toArray()), expectedConflicts.isEmpty());
+ }
+
+ public void testEnabledNotUpdateable() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ // using default of true
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ assertConflicts(mapping1, mapping2, parser, "Cannot update enabled setting for [_source]");
+
+ // not changing is ok
+ String mapping3 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ assertConflicts(mapping1, mapping3, parser);
+ }
+
+ public void testIncludesNotUpdateable() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").array("includes", "foo.*").endObject()
+ .endObject().endObject().string();
+ assertConflicts(defaultMapping, mapping1, parser, "Cannot update includes setting for [_source]");
+ assertConflicts(mapping1, defaultMapping, parser, "Cannot update includes setting for [_source]");
+
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").array("includes", "foo.*", "bar.*").endObject()
+ .endObject().endObject().string();
+ assertConflicts(mapping1, mapping2, parser, "Cannot update includes setting for [_source]");
+
+ // not changing is ok
+ assertConflicts(mapping1, mapping1, parser);
+ }
+
+ public void testExcludesNotUpdateable() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ String mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").array("excludes", "foo.*").endObject()
+ .endObject().endObject().string();
+ assertConflicts(defaultMapping, mapping1, parser, "Cannot update excludes setting for [_source]");
+ assertConflicts(mapping1, defaultMapping, parser, "Cannot update excludes setting for [_source]");
+
+ String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").array("excludes", "foo.*", "bar.*").endObject()
+ .endObject().endObject().string();
+ assertConflicts(mapping1, mapping2, parser, "Cannot update excludes setting for [_source]");
+
+ // not changing is ok
+ assertConflicts(mapping1, mapping1, parser);
+ }
+
+ public void testComplete() throws Exception {
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+ assertTrue(parser.parse(mapping).sourceMapper().isComplete());
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ assertFalse(parser.parse(mapping).sourceMapper().isComplete());
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").array("includes", "foo.*").endObject()
+ .endObject().endObject().string();
+ assertFalse(parser.parse(mapping).sourceMapper().isComplete());
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_source").array("excludes", "foo.*").endObject()
+ .endObject().endObject().string();
+ assertFalse(parser.parse(mapping).sourceMapper().isComplete());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java
new file mode 100644
index 0000000000..0583e28999
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java
@@ -0,0 +1,522 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.string;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.IndexableFieldType;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.TermQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.ContentPath;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.Mapper.BuilderContext;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParseContext.Document;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.elasticsearch.index.mapper.core.StringFieldMapper.Builder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest {
+
+ private static Settings DOC_VALUES_SETTINGS = Settings.builder().put(FieldDataType.FORMAT_KEY, FieldDataType.DOC_VALUES_FORMAT_VALUE).build();
+
+ IndexService indexService;
+ DocumentMapperParser parser;
+
+ @Before
+ public void before() {
+ indexService = createIndex("test");
+ parser = indexService.mapperService().documentMapperParser();
+ }
+
+ @Test
+ public void testLimit() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("ignore_above", 5).endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), notNullValue());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "12345")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), notNullValue());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "123456")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field"), nullValue());
+ }
+
+ private void assertDefaultAnalyzedFieldType(IndexableFieldType fieldType) {
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ }
+
+ private void assertEquals(IndexableFieldType ft1, IndexableFieldType ft2) {
+ assertEquals(ft1.tokenized(), ft2.tokenized());
+ assertEquals(ft1.omitNorms(), ft2.omitNorms());
+ assertEquals(ft1.indexOptions(), ft2.indexOptions());
+ assertEquals(ft1.storeTermVectors(), ft2.storeTermVectors());
+ assertEquals(ft1.docValuesType(), ft2.docValuesType());
+ }
+
+ private void assertParseIdemPotent(IndexableFieldType expected, DocumentMapper mapper) throws Exception {
+ String mapping = mapper.toXContent(XContentFactory.jsonBuilder().startObject(), new ToXContent.MapParams(ImmutableMap.<String, String>of())).endObject().string();
+ mapper = parser.parse(mapping);
+ ParsedDocument doc = mapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "2345")
+ .endObject()
+ .bytes());
+ assertEquals(expected, doc.rootDoc().getField("field").fieldType());
+ }
+
+ @Test
+ public void testDefaultsForAnalyzed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertDefaultAnalyzedFieldType(fieldType);
+ assertParseIdemPotent(fieldType, defaultMapper);
+ }
+
+ @Test
+ public void testDefaultsForNotAnalyzed() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(true));
+ assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+
+ // now test it explicitly set
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").startObject("norms").field("enabled", true).endObject().field("index_options", "freqs").endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = parser.parse(mapping);
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS_AND_FREQS));
+ assertThat(fieldType.storeTermVectors(), equalTo(false));
+ assertThat(fieldType.storeTermVectorOffsets(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPositions(), equalTo(false));
+ assertThat(fieldType.storeTermVectorPayloads(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+
+ // also test the deprecated omit_norms
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").field("index", "not_analyzed").field("omit_norms", false).endObject().endObject()
+ .endObject().endObject().string();
+
+ defaultMapper = parser.parse(mapping);
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertThat(fieldType.omitNorms(), equalTo(false));
+ assertParseIdemPotent(fieldType, defaultMapper);
+ }
+
+ @Test
+ public void testSearchQuoteAnalyzerSerialization() throws Exception {
+ // Cases where search_quote_analyzer should not be added to the mapping.
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "string")
+ .field("position_offset_gap", 1000)
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("position_offset_gap", 1000)
+ .field("analyzer", "standard")
+ .endObject()
+ .startObject("field3")
+ .field("type", "string")
+ .field("position_offset_gap", 1000)
+ .field("analyzer", "standard")
+ .field("search_analyzer", "simple")
+ .endObject()
+ .startObject("field4")
+ .field("type", "string")
+ .field("position_offset_gap", 1000)
+ .field("analyzer", "standard")
+ .field("search_analyzer", "simple")
+ .field("search_quote_analyzer", "simple")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper mapper = parser.parse(mapping);
+ for (String fieldName : Lists.newArrayList("field1", "field2", "field3", "field4")) {
+ Map<String, Object> serializedMap = getSerializedMap(fieldName, mapper);
+ assertFalse(serializedMap.containsKey("search_quote_analyzer"));
+ }
+
+ // Cases where search_quote_analyzer should be present.
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "string")
+ .field("position_offset_gap", 1000)
+ .field("search_quote_analyzer", "simple")
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("position_offset_gap", 1000)
+ .field("analyzer", "standard")
+ .field("search_analyzer", "standard")
+ .field("search_quote_analyzer", "simple")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ mapper = parser.parse(mapping);
+ for (String fieldName : Lists.newArrayList("field1", "field2")) {
+ Map<String, Object> serializedMap = getSerializedMap(fieldName, mapper);
+ assertEquals(serializedMap.get("search_quote_analyzer"), "simple");
+ }
+ }
+
+ private Map<String, Object> getSerializedMap(String fieldName, DocumentMapper mapper) throws Exception {
+ FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper(fieldName);
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ fieldMapper.toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+
+ Map<String, Object> fieldMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ @SuppressWarnings("unchecked")
+ Map<String, Object> result = (Map<String, Object>) fieldMap.get(fieldName);
+ return result;
+ }
+
+ @Test
+ public void testTermVectors() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "string")
+ .field("term_vector", "no")
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .field("term_vector", "yes")
+ .endObject()
+ .startObject("field3")
+ .field("type", "string")
+ .field("term_vector", "with_offsets")
+ .endObject()
+ .startObject("field4")
+ .field("type", "string")
+ .field("term_vector", "with_positions")
+ .endObject()
+ .startObject("field5")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .startObject("field6")
+ .field("type", "string")
+ .field("term_vector", "with_positions_offsets_payloads")
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field1", "1234")
+ .field("field2", "1234")
+ .field("field3", "1234")
+ .field("field4", "1234")
+ .field("field5", "1234")
+ .field("field6", "1234")
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectors(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field2").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPositions(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field3").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorOffsets(), equalTo(false));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field4").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field5").fieldType().storeTermVectorPayloads(), equalTo(false));
+
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectors(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorOffsets(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPositions(), equalTo(true));
+ assertThat(doc.rootDoc().getField("field6").fieldType().storeTermVectorPayloads(), equalTo(true));
+ }
+
+ public void testDocValuesFielddata() throws Exception {
+ IndexService indexService = createIndex("index");
+ DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
+ final BuilderContext ctx = new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
+
+ assertFalse(new Builder("anything").index(false).build(ctx).fieldType().hasDocValues());
+ assertTrue(new Builder("anything").index(false).fieldDataSettings(DOC_VALUES_SETTINGS).build(ctx).fieldType().hasDocValues());
+ assertTrue(new Builder("anything").index(false).docValues(true).build(ctx).fieldType().hasDocValues());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("str1")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field("format", "fst")
+ .endObject()
+ .endObject()
+ .startObject("str2")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("str1", "1234")
+ .field("str2", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(DocValuesType.NONE, docValuesType(doc, "str1"));
+ assertEquals(DocValuesType.SORTED_SET, docValuesType(doc, "str2"));
+ }
+
+ public void testDocValues() throws Exception {
+ // doc values only work on non-analyzed content
+ final BuilderContext ctx = new BuilderContext(indexService.settingsService().getSettings(), new ContentPath(1));
+ try {
+ new StringFieldMapper.Builder("anything").docValues(true).build(ctx);
+ fail();
+ } catch (Exception e) { /* OK */ }
+
+ assertFalse(new Builder("anything").index(false).build(ctx).fieldType().hasDocValues());
+ assertTrue(new Builder("anything").index(true).tokenized(false).build(ctx).fieldType().hasDocValues());
+ assertFalse(new Builder("anything").index(true).tokenized(true).build(ctx).fieldType().hasDocValues());
+ assertFalse(new Builder("anything").index(false).tokenized(false).docValues(false).build(ctx).fieldType().hasDocValues());
+ assertTrue(new Builder("anything").index(false).docValues(true).build(ctx).fieldType().hasDocValues());
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("str1")
+ .field("type", "string")
+ .field("index", "no")
+ .endObject()
+ .startObject("str2")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .startObject("str3")
+ .field("type", "string")
+ .field("index", "analyzed")
+ .endObject()
+ .startObject("str4")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("str5")
+ .field("type", "string")
+ .field("index", "no")
+ .field("doc_values", true)
+ .endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument parsedDoc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("str1", "1234")
+ .field("str2", "1234")
+ .field("str3", "1234")
+ .field("str4", "1234")
+ .field("str5", "1234")
+ .endObject()
+ .bytes());
+ final Document doc = parsedDoc.rootDoc();
+ assertEquals(DocValuesType.NONE, docValuesType(doc, "str1"));
+ assertEquals(DocValuesType.SORTED_SET, docValuesType(doc, "str2"));
+ assertEquals(DocValuesType.NONE, docValuesType(doc, "str3"));
+ assertEquals(DocValuesType.NONE, docValuesType(doc, "str4"));
+ assertEquals(DocValuesType.SORTED_SET, docValuesType(doc, "str5"));
+
+ }
+
+ // TODO: this function shouldn't be necessary. parsing should just add a single field that is indexed and dv
+ public static DocValuesType docValuesType(Document document, String fieldName) {
+ for (IndexableField field : document.getFields(fieldName)) {
+ if (field.fieldType().docValuesType() != DocValuesType.NONE) {
+ return field.fieldType().docValuesType();
+ }
+ }
+ return DocValuesType.NONE;
+ }
+
+ @Test
+ public void testDisableNorms() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapper defaultMapper = parser.parse(mapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ IndexableFieldType fieldType = doc.rootDoc().getField("field").fieldType();
+ assertEquals(false, fieldType.omitNorms());
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject()
+ .endObject().endObject().endObject().endObject().string();
+ MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false);
+ assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts());
+
+ doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "1234")
+ .endObject()
+ .bytes());
+
+ fieldType = doc.rootDoc().getField("field").fieldType();
+ assertEquals(true, fieldType.omitNorms());
+
+ updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", true).endObject()
+ .endObject().endObject().endObject().endObject().string();
+ mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), true);
+ assertTrue(mergeResult.hasConflicts());
+ assertEquals(1, mergeResult.buildConflicts().length);
+ assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java
new file mode 100644
index 0000000000..8c65418892
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java
@@ -0,0 +1,792 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.timestamp;
+
+import org.apache.lucene.index.IndexOptions;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.TimestampParsingException;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.Version.V_1_5_0;
+import static org.elasticsearch.Version.V_2_0_0;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.isIn;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+public class TimestampMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testSimpleDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").timestamp(1));
+
+ assertThat(doc.rootDoc().getField("_timestamp"), equalTo(null));
+ }
+
+ @Test
+ public void testEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", "yes").field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").timestamp(1));
+
+ assertThat(doc.rootDoc().getField("_timestamp").fieldType().stored(), equalTo(true));
+ assertNotSame(IndexOptions.NONE, doc.rootDoc().getField("_timestamp").fieldType().indexOptions());
+ assertThat(doc.rootDoc().getField("_timestamp").tokenStream(docMapper.mappers().indexAnalyzer(), null), notNullValue());
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ for (Version version : Arrays.asList(V_1_5_0, V_2_0_0, randomVersion(random()))) {
+ for (String mapping : Arrays.asList(
+ XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string(),
+ XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_timestamp").endObject().endObject().string())) {
+ DocumentMapper docMapper = createIndex("test", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build()).mapperService().documentMapperParser().parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(TimestampFieldMapper.Defaults.ENABLED.enabled));
+ assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(version.onOrAfter(Version.V_2_0_0) ? true : false));
+ assertThat(docMapper.timestampFieldMapper().fieldType().indexOptions(), equalTo(TimestampFieldMapper.Defaults.FIELD_TYPE.indexOptions()));
+ assertThat(docMapper.timestampFieldMapper().path(), equalTo(TimestampFieldMapper.Defaults.PATH));
+ assertThat(docMapper.timestampFieldMapper().fieldType().dateTimeFormatter().format(), equalTo(TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT));
+ assertThat(docMapper.timestampFieldMapper().fieldType().hasDocValues(), equalTo(false));
+ assertAcked(client().admin().indices().prepareDelete("test").execute().get());
+ }
+ }
+ }
+
+
+ @Test
+ public void testSetValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes").field("store", "no").field("index", "no")
+ .field("path", "timestamp").field("format", "year")
+ .field("doc_values", true)
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().enabled(), equalTo(true));
+ assertThat(docMapper.timestampFieldMapper().fieldType().stored(), equalTo(false));
+ assertEquals(IndexOptions.NONE, docMapper.timestampFieldMapper().fieldType().indexOptions());
+ assertThat(docMapper.timestampFieldMapper().path(), equalTo("timestamp"));
+ assertThat(docMapper.timestampFieldMapper().fieldType().dateTimeFormatter().format(), equalTo("year"));
+ assertThat(docMapper.timestampFieldMapper().fieldType().hasDocValues(), equalTo(true));
+ }
+
+ @Test
+ public void testThatDisablingDuringMergeIsWorking() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper enabledMapper = parser.parse(enabledMapping);
+
+ String disabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false).endObject()
+ .endObject().endObject().string();
+ DocumentMapper disabledMapper = parser.parse(disabledMapping);
+
+ enabledMapper.merge(disabledMapper.mapping(), false);
+
+ assertThat(enabledMapper.timestampFieldMapper().enabled(), is(false));
+ }
+
+ @Test // issue 3174
+ public void testThatSerializationWorksCorrectlyForIndexField() throws Exception {
+ String enabledMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").field("index", "no").endObject()
+ .endObject().endObject().string();
+ DocumentMapper enabledMapper = createIndex("test").mapperService().documentMapperParser().parse(enabledMapping);
+
+ XContentBuilder builder = JsonXContent.contentBuilder().startObject();
+ enabledMapper.timestampFieldMapper().toXContent(builder, ToXContent.EMPTY_PARAMS).endObject();
+ builder.close();
+ Map<String, Object> serializedMap = JsonXContent.jsonXContent.createParser(builder.bytes()).mapAndClose();
+ assertThat(serializedMap, hasKey("_timestamp"));
+ assertThat(serializedMap.get("_timestamp"), instanceOf(Map.class));
+ Map<String, Object> timestampConfiguration = (Map<String, Object>) serializedMap.get("_timestamp");
+ assertThat(timestampConfiguration, hasKey("index"));
+ assertThat(timestampConfiguration.get("index").toString(), is("no"));
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testPathMissingDefaultValue() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("path", "timestamp")
+ .field("ignore_missing", false)
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ try {
+ request.process(metaData, mappingMetaData, true, "test");
+ fail();
+ } catch (TimestampParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("timestamp is required by mapping"));
+ }
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testTimestampDefaultValue() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(metaData, mappingMetaData, true, "test");
+ assertThat(request.timestamp(), notNullValue());
+
+ // We should have less than one minute (probably some ms)
+ long delay = System.currentTimeMillis() - Long.parseLong(request.timestamp());
+ assertThat(delay, lessThanOrEqualTo(60000L));
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testPathMissingDefaultToEpochValue() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("path", "timestamp")
+ .field("default", "1970-01-01")
+ .field("format", "YYYY-MM-dd")
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(metaData, mappingMetaData, true, "test");
+ assertThat(request.timestamp(), notNullValue());
+ assertThat(request.timestamp(), is(MappingMetaData.Timestamp.parseStringTimestamp("1970-01-01", Joda.forPattern("YYYY-MM-dd"))));
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testTimestampMissingDefaultToEpochValue() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("default", "1970-01-01")
+ .field("format", "YYYY-MM-dd")
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(metaData, mappingMetaData, true, "test");
+ assertThat(request.timestamp(), notNullValue());
+ assertThat(request.timestamp(), is(MappingMetaData.Timestamp.parseStringTimestamp("1970-01-01", Joda.forPattern("YYYY-MM-dd"))));
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testPathMissingNowDefaultValue() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("path", "timestamp")
+ .field("default", "now")
+ .field("format", "YYYY-MM-dd")
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(metaData, mappingMetaData, true, "test");
+ assertThat(request.timestamp(), notNullValue());
+
+ // We should have less than one minute (probably some ms)
+ long delay = System.currentTimeMillis() - Long.parseLong(request.timestamp());
+ assertThat(delay, lessThanOrEqualTo(60000L));
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testTimestampMissingNowDefaultValue() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("default", "now")
+ .field("format", "YYYY-MM-dd")
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(metaData, mappingMetaData, true, "test");
+ assertThat(request.timestamp(), notNullValue());
+
+ // We should have less than one minute (probably some ms)
+ long delay = System.currentTimeMillis() - Long.parseLong(request.timestamp());
+ assertThat(delay, lessThanOrEqualTo(60000L));
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testPathMissingWithForcedNullDefaultShouldFail() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("path", "timestamp")
+ .field("default", (String) null)
+ .endObject()
+ .endObject().endObject();
+ try {
+ createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+ fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null");
+ } catch (TimestampParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null"));
+ }
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testPathMissingShouldFail() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("path", "timestamp")
+ .field("ignore_missing", false)
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ try {
+ request.process(metaData, mappingMetaData, true, "test");
+ fail("we should reject the mapping with a TimestampParsingException: timestamp is required by mapping");
+ } catch (TimestampParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("timestamp is required by mapping"));
+ }
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testTimestampMissingWithForcedNullDefaultShouldFail() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("default", (String) null)
+ .endObject()
+ .endObject().endObject();
+
+ try {
+ createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+ fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set to null");
+ } catch (TimestampParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set to null"));
+ }
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testTimestampDefaultAndIgnore() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .field("default", "1971-12-26")
+ .field("ignore_missing", false)
+ .endObject()
+ .endObject().endObject();
+
+ try {
+ createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+ fail("we should reject the mapping with a TimestampParsingException: default timestamp can not be set with ignore_missing set to false");
+ } catch (TimestampParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("default timestamp can not be set with ignore_missing set to false"));
+ }
+ }
+
+ @Test // Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
+ public void testTimestampMissingShouldNotFail() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", "yes")
+ .endObject()
+ .endObject().endObject();
+ XContentBuilder doc = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("foo", "bar")
+ .endObject();
+
+ MetaData metaData = MetaData.builder().build();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping.string());
+
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(metaData, mappingMetaData, true, "test");
+
+ assertThat(request.timestamp(), notNullValue());
+
+ // We should have less than one minute (probably some ms)
+ long delay = System.currentTimeMillis() - Long.parseLong(request.timestamp());
+ assertThat(delay, lessThanOrEqualTo(60000L));
+ }
+
+ @Test
+ public void testDefaultTimestampStream() throws IOException {
+ // Testing null value for default timestamp
+ {
+ MappingMetaData.Timestamp timestamp = new MappingMetaData.Timestamp(true, null,
+ TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT, null, null);
+ MappingMetaData expected = new MappingMetaData("type", new CompressedXContent("{}".getBytes(StandardCharsets.UTF_8)),
+ new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false);
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ expected.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+
+ MappingMetaData metaData = MappingMetaData.PROTO.readFrom(StreamInput.wrap(bytes));
+
+ assertThat(metaData, is(expected));
+ }
+
+ // Testing "now" value for default timestamp
+ {
+ MappingMetaData.Timestamp timestamp = new MappingMetaData.Timestamp(true, null,
+ TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT, "now", null);
+ MappingMetaData expected = new MappingMetaData("type", new CompressedXContent("{}".getBytes(StandardCharsets.UTF_8)),
+ new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false);
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ expected.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+
+ MappingMetaData metaData = MappingMetaData.PROTO.readFrom(StreamInput.wrap(bytes));
+
+ assertThat(metaData, is(expected));
+ }
+
+ // Testing "ignore_missing" value for default timestamp
+ {
+ MappingMetaData.Timestamp timestamp = new MappingMetaData.Timestamp(true, null,
+ TimestampFieldMapper.DEFAULT_DATE_TIME_FORMAT, "now", false);
+ MappingMetaData expected = new MappingMetaData("type", new CompressedXContent("{}".getBytes(StandardCharsets.UTF_8)),
+ new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false);
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ expected.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+
+ MappingMetaData metaData = MappingMetaData.PROTO.readFrom(StreamInput.wrap(bytes));
+
+ assertThat(metaData, is(expected));
+ }
+ }
+
+ @Test
+ public void testMergingFielddataLoadingWorks() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "lazy").field("format", "doc_values").endObject().field("store", "yes").endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapperParser parser = createIndex("test", indexSettings).mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.LAZY));
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("doc_values"));
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject()
+ .endObject().endObject().string();
+
+ MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false);
+ assertThat(mergeResult.buildConflicts().length, equalTo(0));
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER));
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("array"));
+ }
+
+ @Test
+ public void testParsingNotDefaultTwiceDoesNotChangeMapping() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true)
+ .field("index", randomBoolean() ? "no" : "analyzed") // default is "not_analyzed" which will be omitted when building the source again
+ .field("doc_values", true)
+ .field("path", "foo")
+ .field("default", "1970-01-01")
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ docMapper = parser.parse(docMapper.mappingSource().string());
+ assertThat(docMapper.mappingSource().string(), equalTo(mapping));
+ }
+
+ @Test
+ public void testParsingTwiceDoesNotChangeTokenizeValue() throws Exception {
+ String[] index_options = {"no", "analyzed", "not_analyzed"};
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true)
+ .field("index", index_options[randomInt(2)])
+ .field("store", true)
+ .field("path", "foo")
+ .field("default", "1970-01-01")
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .endObject()
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ boolean tokenized = docMapper.timestampFieldMapper().fieldType().tokenized();
+ docMapper = parser.parse(docMapper.mappingSource().string());
+ assertThat(tokenized, equalTo(docMapper.timestampFieldMapper().fieldType().tokenized()));
+ }
+
+ @Test
+ public void testMergingConflicts() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true)
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .field("store", "yes")
+ .field("index", "analyzed")
+ .field("path", "foo")
+ .field("default", "1970-01-01")
+ .endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapperParser parser = createIndex("test", indexSettings).mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.LAZY));
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false)
+ .startObject("fielddata").field("format", "array").endObject()
+ .field("store", "no")
+ .field("index", "no")
+ .field("path", "bar")
+ .field("default", "1970-01-02")
+ .endObject()
+ .endObject().endObject().string();
+
+ MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true);
+ List<String> expectedConflicts = new ArrayList<>(Arrays.asList(
+ "mapper [_timestamp] has different index values",
+ "mapper [_timestamp] has different store values",
+ "Cannot update default in _timestamp value. Value is 1970-01-01 now encountering 1970-01-02",
+ "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar",
+ "mapper [_timestamp] has different tokenize values"));
+
+ for (String conflict : mergeResult.buildConflicts()) {
+ assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict));
+ }
+ assertTrue("missing conflicts: " + Arrays.toString(expectedConflicts.toArray()), expectedConflicts.isEmpty());
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.LAZY));
+ assertTrue(docMapper.timestampFieldMapper().enabled());
+ assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("doc_values"));
+ }
+
+ @Test
+ public void testMergingConflictsForIndexValues() throws Exception {
+ List<String> indexValues = new ArrayList<>();
+ indexValues.add("analyzed");
+ indexValues.add("no");
+ indexValues.add("not_analyzed");
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("index", indexValues.remove(randomInt(2)))
+ .endObject()
+ .endObject().endObject().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+
+ DocumentMapper docMapper = parser.parse(mapping);
+ mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("index", indexValues.remove(randomInt(1)))
+ .endObject()
+ .endObject().endObject().string();
+
+ MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true);
+ List<String> expectedConflicts = new ArrayList<>();
+ expectedConflicts.add("mapper [_timestamp] has different index values");
+ expectedConflicts.add("mapper [_timestamp] has different tokenize values");
+ if (indexValues.get(0).equals("not_analyzed") == false) {
+ // if the only index value left is not_analyzed, then the doc values setting will be the same, but in the
+ // other two cases, it will change
+ expectedConflicts.add("mapper [_timestamp] has different doc_values values");
+ }
+
+ for (String conflict : mergeResult.buildConflicts()) {
+ assertThat(conflict, isIn(expectedConflicts));
+ }
+ }
+
+ /**
+ * Test for issue #9223
+ */
+ @Test
+ public void testInitMappers() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", true)
+ .field("default", (String) null)
+ .endObject()
+ .endObject().endObject().string();
+ // This was causing a NPE
+ new MappingMetaData(new CompressedXContent(mapping));
+ }
+
+ @Test
+ public void testMergePaths() throws Exception {
+ String[] possiblePathValues = {"some_path", "anotherPath", null};
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ XContentBuilder mapping1 = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp");
+ String path1 = possiblePathValues[randomInt(2)];
+ if (path1!=null) {
+ mapping1.field("path", path1);
+ }
+ mapping1.endObject()
+ .endObject().endObject();
+ XContentBuilder mapping2 = XContentFactory.jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("_timestamp");
+ String path2 = possiblePathValues[randomInt(2)];
+ if (path2!=null) {
+ mapping2.field("path", path2);
+ }
+ mapping2.endObject()
+ .endObject().endObject();
+
+ assertConflict(mapping1.string(), mapping2.string(), parser, (path1 == path2 ? null : "Cannot update path in _timestamp value"));
+ }
+
+ void assertConflict(String mapping1, String mapping2, DocumentMapperParser parser, String conflict) throws IOException {
+ DocumentMapper docMapper = parser.parse(mapping1);
+ docMapper = parser.parse(docMapper.mappingSource().string());
+ MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true);
+ assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0 : 1));
+ if (conflict != null) {
+ assertThat(mergeResult.buildConflicts()[0], containsString(conflict));
+ }
+ }
+
+ public void testDocValuesSerialization() throws Exception {
+ // default
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .endObject().endObject().endObject().string();
+ assertDocValuesSerialization(mapping);
+
+ // just format specified
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .endObject().endObject().endObject().string();
+ assertDocValuesSerialization(mapping);
+
+ // explicitly enabled
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("doc_values", true)
+ .endObject().endObject().endObject().string();
+ assertDocValuesSerialization(mapping);
+
+ // explicitly disabled
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("doc_values", false)
+ .endObject().endObject().endObject().string();
+ assertDocValuesSerialization(mapping);
+
+ // explicitly enabled, with format
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("doc_values", true)
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .endObject().endObject().endObject().string();
+ assertDocValuesSerialization(mapping);
+
+ // explicitly disabled, with format
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp")
+ .field("doc_values", false)
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .endObject().endObject().endObject().string();
+ assertDocValuesSerialization(mapping);
+ }
+
+ void assertDocValuesSerialization(String mapping) throws Exception {
+ DocumentMapperParser parser = createIndex("test_doc_values").mapperService().documentMapperParser();
+ DocumentMapper docMapper = parser.parse(mapping);
+ boolean docValues = docMapper.timestampFieldMapper().fieldType().hasDocValues();
+ docMapper = parser.parse(docMapper.mappingSource().string());
+ assertThat(docMapper.timestampFieldMapper().fieldType().hasDocValues(), equalTo(docValues));
+ assertAcked(client().admin().indices().prepareDelete("test_doc_values"));
+ }
+
+ public void testPath() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("path", "custom_timestamp").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("custom_timestamp", 1).endObject();
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(MetaData.builder().build(), mappingMetaData, true, "test");
+
+ assertEquals(request.timestamp(), "1");
+ }
+
+ public void testIncludeInObjectBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("default", "1970").field("format", "YYYY").endObject()
+ .endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("_timestamp", 2000000).endObject();
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(MetaData.builder().build(), mappingMetaData, true, "test");
+
+ // _timestamp in a document never worked, so backcompat is ignoring the field
+ assertEquals(MappingMetaData.Timestamp.parseStringTimestamp("1970", Joda.forPattern("YYYY")), request.timestamp());
+ assertNull(docMapper.parse("type", "1", doc.bytes()).rootDoc().get("_timestamp"));
+ }
+
+ public void testThatEpochCanBeIgnoredWithCustomFormat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("format", "yyyyMMddHH").field("path", "custom_timestamp").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("custom_timestamp", 2015060210).endObject();
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+ request.process(MetaData.builder().build(), mappingMetaData, true, "test");
+
+ assertThat(request.timestamp(), is("1433239200000"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
new file mode 100644
index 0000000000..32b75094a8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java
@@ -0,0 +1,352 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.ttl;
+
+import org.apache.lucene.index.IndexOptions;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class TTLMappingTests extends ElasticsearchSingleNodeTest {
+ @Test
+ public void testSimpleDisabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").ttl(Long.MAX_VALUE));
+
+ assertThat(doc.rootDoc().getField("_ttl"), equalTo(null));
+ }
+
+ @Test
+ public void testEnabled() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl").field("enabled", "yes").endObject()
+ .endObject().endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ BytesReference source = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .endObject()
+ .bytes();
+ ParsedDocument doc = docMapper.parse(SourceToParse.source(source).type("type").id("1").ttl(Long.MAX_VALUE));
+
+ assertThat(doc.rootDoc().getField("_ttl").fieldType().stored(), equalTo(true));
+ assertNotSame(IndexOptions.NONE, doc.rootDoc().getField("_ttl").fieldType().indexOptions());
+ assertThat(doc.rootDoc().getField("_ttl").tokenStream(docMapper.mappers().indexAnalyzer(), null), notNullValue());
+ }
+
+ @Test
+ public void testDefaultValues() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().string();
+ DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
+ assertThat(docMapper.TTLFieldMapper().enabled(), equalTo(TTLFieldMapper.Defaults.ENABLED_STATE.enabled));
+ assertThat(docMapper.TTLFieldMapper().fieldType().stored(), equalTo(TTLFieldMapper.Defaults.TTL_FIELD_TYPE.stored()));
+ assertThat(docMapper.TTLFieldMapper().fieldType().indexOptions(), equalTo(TTLFieldMapper.Defaults.TTL_FIELD_TYPE.indexOptions()));
+ }
+
+
+ @Test
+ public void testSetValuesBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes").field("store", "no")
+ .endObject()
+ .endObject().endObject().string();
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", indexSettings).mapperService().documentMapperParser().parse(mapping);
+ assertThat(docMapper.TTLFieldMapper().enabled(), equalTo(true));
+ assertThat(docMapper.TTLFieldMapper().fieldType().stored(), equalTo(true)); // store was never serialized, so it was always lost
+
+ }
+
+ @Test
+ public void testThatEnablingTTLFieldOnMergeWorks() throws Exception {
+ String mappingWithoutTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ String mappingWithTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper mapperWithoutTtl = parser.parse(mappingWithoutTtl);
+ DocumentMapper mapperWithTtl = parser.parse(mappingWithTtl);
+
+ MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), false);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testThatChangingTTLKeepsMapperEnabled() throws Exception {
+ String mappingWithTtl = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", "yes")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("default", "1w")
+ .endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper initialMapper = parser.parse(mappingWithTtl);
+ DocumentMapper updatedMapper = parser.parse(updatedMapping);
+
+ MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(false));
+ assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testThatDisablingTTLReportsConflict() throws Exception {
+ String mappingWithTtl = getMappingWithTtlEnabled().string();
+ String mappingWithTtlDisabled = getMappingWithTtlDisabled().string();
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper initialMapper = parser.parse(mappingWithTtl);
+ DocumentMapper updatedMapper = parser.parse(mappingWithTtlDisabled);
+
+ MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true);
+
+ assertThat(mergeResult.hasConflicts(), equalTo(true));
+ assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true));
+ }
+
+ @Test
+ public void testThatDisablingTTLReportsConflictOnCluster() throws Exception {
+ String mappingWithTtl = getMappingWithTtlEnabled().string();
+ String mappingWithTtlDisabled = getMappingWithTtlDisabled().string();
+ assertAcked(client().admin().indices().prepareCreate("testindex").addMapping("type", mappingWithTtl));
+ GetMappingsResponse mappingsBeforeUpdateResponse = client().admin().indices().prepareGetMappings("testindex").addTypes("type").get();
+ try {
+ client().admin().indices().preparePutMapping("testindex").setSource(mappingWithTtlDisabled).setType("type").get();
+ fail();
+ } catch (MergeMappingException mme) {
+ assertThat(mme.getDetailedMessage(), containsString("_ttl cannot be disabled once it was enabled."));
+ }
+ GetMappingsResponse mappingsAfterUpdateResponse = client().admin().indices().prepareGetMappings("testindex").addTypes("type").get();
+ assertThat(mappingsBeforeUpdateResponse.getMappings().get("testindex").get("type").source(), equalTo(mappingsAfterUpdateResponse.getMappings().get("testindex").get("type").source()));
+ }
+
+ @Test
+ public void testThatEnablingTTLAfterFirstDisablingWorks() throws Exception {
+ String mappingWithTtl = getMappingWithTtlEnabled().string();
+ String withTtlDisabled = getMappingWithTtlDisabled().string();
+ assertAcked(client().admin().indices().prepareCreate("testindex").addMapping("type", withTtlDisabled));
+ GetMappingsResponse mappingsAfterUpdateResponse = client().admin().indices().prepareGetMappings("testindex").addTypes("type").get();
+ assertThat(mappingsAfterUpdateResponse.getMappings().get("testindex").get("type").sourceAsMap().get("_ttl").toString(), equalTo("{enabled=false}"));
+ client().admin().indices().preparePutMapping("testindex").setSource(mappingWithTtl).setType("type").get();
+ mappingsAfterUpdateResponse = client().admin().indices().prepareGetMappings("testindex").addTypes("type").get();
+ assertThat(mappingsAfterUpdateResponse.getMappings().get("testindex").get("type").sourceAsMap().get("_ttl").toString(), equalTo("{enabled=true}"));
+ }
+
+ @Test
+ public void testNoConflictIfNothingSetAndDisabledLater() throws Exception {
+ IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type");
+ XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d");
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean());
+ assertFalse(mergeResult.hasConflicts());
+ }
+
+ @Test
+ public void testNoConflictIfNothingSetAndEnabledLater() throws Exception {
+ IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type");
+ XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean());
+ assertFalse(mergeResult.hasConflicts());
+ }
+
+ @Test
+ public void testMergeWithOnlyDefaultSet() throws Exception {
+ XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
+ IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled);
+ XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m");
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false);
+ assertFalse(mergeResult.hasConflicts());
+ CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
+ }
+
+ @Test
+ public void testMergeWithOnlyDefaultSetTtlDisabled() throws Exception {
+ XContentBuilder mappingWithTtlEnabled = getMappingWithTtlDisabled("7d");
+ IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled);
+ CompressedXContent mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterCreation, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
+ XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m");
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false);
+ assertFalse(mergeResult.hasConflicts());
+ CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
+ }
+
+ @Test
+ public void testThatSimulatedMergingLeavesStateUntouched() throws Exception {
+
+ //check if default ttl changed when simulate set to true
+ XContentBuilder mappingWithTtl = getMappingWithTtlEnabled("6d");
+ IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtl);
+ CompressedXContent mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d");
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true);
+ assertFalse(mergeResult.hasConflicts());
+ // make sure simulate flag actually worked - no mappings applied
+ CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge));
+
+ client().admin().indices().prepareDelete("testindex").get();
+ // check if enabled changed when simulate set to true
+ XContentBuilder mappingWithoutTtl = getMappingWithTtlDisabled();
+ indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl);
+ mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled();
+ mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true);
+ assertFalse(mergeResult.hasConflicts());
+ // make sure simulate flag actually worked - no mappings applied
+ mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge));
+
+ client().admin().indices().prepareDelete("testindex").get();
+ // check if enabled changed when simulate set to true
+ mappingWithoutTtl = getMappingWithTtlDisabled("6d");
+ indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl);
+ mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
+ mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true);
+ assertFalse(mergeResult.hasConflicts());
+ // make sure simulate flag actually worked - no mappings applied
+ mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge));
+
+ client().admin().indices().prepareDelete("testindex").get();
+ // check if switching simulate flag off works
+ mappingWithoutTtl = getMappingWithTtlDisabled("6d");
+ indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl);
+ mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
+ mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false);
+ assertFalse(mergeResult.hasConflicts());
+ // make sure simulate flag actually worked - mappings applied
+ mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
+
+ client().admin().indices().prepareDelete("testindex").get();
+ // check if switching simulate flag off works if nothing was applied in the beginning
+ indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type");
+ mappingWithTtlEnabled = getMappingWithTtlEnabled("7d");
+ mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false);
+ assertFalse(mergeResult.hasConflicts());
+ // make sure simulate flag actually worked - mappings applied
+ mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}")));
+
+ }
+
+ public void testIncludeInObjectBackcompat() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject().endObject().string();
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ DocumentMapper docMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
+
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().field("_ttl", "2d").endObject();
+ MappingMetaData mappingMetaData = new MappingMetaData(docMapper);
+ IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
+ request.process(MetaData.builder().build(), mappingMetaData, true, "test");
+
+ // _ttl in a document never worked, so backcompat is ignoring the field
+ assertEquals(-1, request.ttl());
+ assertNull(docMapper.parse("type", "1", doc.bytes()).rootDoc().get("_ttl"));
+ }
+
+ private org.elasticsearch.common.xcontent.XContentBuilder getMappingWithTtlEnabled() throws IOException {
+ return getMappingWithTtlEnabled(null);
+ }
+
+ private org.elasticsearch.common.xcontent.XContentBuilder getMappingWithTtlDisabled() throws IOException {
+ return getMappingWithTtlDisabled(null);
+ }
+
+ private org.elasticsearch.common.xcontent.XContentBuilder getMappingWithTtlEnabled(String defaultValue) throws IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", true);
+ if (defaultValue != null) {
+ mapping.field("default", defaultValue);
+ }
+ return mapping.endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject();
+ }
+
+ private org.elasticsearch.common.xcontent.XContentBuilder getMappingWithTtlDisabled(String defaultValue) throws IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl")
+ .field("enabled", false);
+ if (defaultValue != null) {
+ mapping.field("default", defaultValue);
+ }
+ return mapping.endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject();
+ }
+
+ private org.elasticsearch.common.xcontent.XContentBuilder getMappingWithOnlyTtlDefaultSet(String defaultValue) throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_ttl").field("default", defaultValue).endObject()
+ .startObject("properties").field("field").startObject().field("type", "string").endObject().endObject()
+ .endObject().endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java b/core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java
new file mode 100644
index 0000000000..db007e10c8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseDocumentTypeLevelsTests.java
@@ -0,0 +1,241 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.typelevels;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class ParseDocumentTypeLevelsTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testNoLevel() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevel() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsValue() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("type", "value_type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsValue() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("type", "value_type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsObject() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ // in this case, we analyze the type object as the actual document, and ignore the other same level fields
+ assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsObject() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsValueNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("test2", "value2")
+ .field("type", "value_type")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsValueNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .field("type", "value_type")
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type"), equalTo("value_type"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testNoLevelWithFieldTypeAsObjectNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject()
+ .field("test1", "value1")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject()
+ .bytes());
+
+ // when the type is not the first one, we don't confuse it...
+ assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("inner.inner_field"), equalTo("inner_value"));
+ }
+
+ @Test
+ public void testTypeLevelWithFieldTypeAsObjectNotFirst() throws Exception {
+ String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
+
+ DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(defaultMapping);
+
+ ParsedDocument doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder()
+ .startObject().startObject("type")
+ .field("test1", "value1")
+ .startObject("type").field("type_field", "type_value").endObject()
+ .field("test2", "value2")
+ .startObject("inner").field("inner_field", "inner_value").endObject()
+ .endObject().endObject()
+ .bytes());
+
+ assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value"));
+ assertThat(doc.rootDoc().get("type.test1"), equalTo("value1"));
+ assertThat(doc.rootDoc().get("type.test2"), equalTo("value2"));
+ assertThat(doc.rootDoc().get("type.inner.inner_field"), equalTo("inner_value"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java b/core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java
new file mode 100644
index 0000000000..072cc80271
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/typelevels/ParseMappingTypeLevelTests.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.typelevels;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperParser;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+// TODO: move this test...it doesn't need to be by itself
+public class ParseMappingTypeLevelTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testTypeLevel() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_index").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
+ DocumentMapper mapper = parser.parse("type", mapping);
+ assertThat(mapper.type(), equalTo("type"));
+ assertThat(mapper.indexMapper().enabled(), equalTo(true));
+
+ mapper = parser.parse(mapping);
+ assertThat(mapper.type(), equalTo("type"));
+ assertThat(mapper.indexMapper().enabled(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterTests.java
new file mode 100644
index 0000000000..890db5e3fd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterTests.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.update;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+public class UpdateMappingOnClusterTests extends ElasticsearchIntegrationTest {
+
+ private static final String INDEX = "index";
+ private static final String TYPE = "type";
+
+
+ @Test
+ public void test_all_enabled() throws Exception {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("mappings").startObject(TYPE).startObject("_all").field("enabled", "false").endObject().endObject().endObject().endObject();
+ XContentBuilder mappingUpdate = jsonBuilder().startObject().startObject("_all").field("enabled", "true").endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject();
+ String errorMessage = "[_all] enabled is false now encountering true";
+ testConflict(mapping.string(), mappingUpdate.string(), errorMessage);
+ }
+
+ @Test
+ public void test_all_conflicts() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_create_index.json");
+ String mappingUpdate = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json");
+ String[] errorMessage = {"[_all] enabled is true now encountering false",
+ "[_all] cannot enable norms (`norms.enabled`)",
+ "[_all] has different store values",
+ "[_all] has different store_term_vector values",
+ "[_all] has different store_term_vector_offsets values",
+ "[_all] has different store_term_vector_positions values",
+ "[_all] has different store_term_vector_payloads values",
+ "[_all] has different analyzer",
+ "[_all] has different similarity"};
+ // fielddata and search_analyzer should not report conflict
+ testConflict(mapping, mappingUpdate, errorMessage);
+ }
+
+
+ @Test
+ public void test_all_with_default() throws Exception {
+ String defaultMapping = jsonBuilder().startObject().startObject("_default_")
+ .startObject("_all")
+ .field("enabled", false)
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("index").addMapping("_default_", defaultMapping).get();
+ String docMapping = jsonBuilder().startObject()
+ .startObject("doc")
+ .endObject()
+ .endObject().string();
+ PutMappingResponse response = client().admin().indices().preparePutMapping("index").setType("doc").setSource(docMapping).get();
+ assertTrue(response.isAcknowledged());
+ String docMappingUpdate = jsonBuilder().startObject().startObject("doc")
+ .startObject("properties")
+ .startObject("text")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().string();
+ response = client().admin().indices().preparePutMapping("index").setType("doc").setSource(docMappingUpdate).get();
+ assertTrue(response.isAcknowledged());
+ String docMappingAllExplicitEnabled = jsonBuilder().startObject()
+ .startObject("doc_all_enabled")
+ .startObject("_all")
+ .field("enabled", true)
+ .endObject()
+ .endObject()
+ .endObject().string();
+ response = client().admin().indices().preparePutMapping("index").setType("doc_all_enabled").setSource(docMappingAllExplicitEnabled).get();
+ assertTrue(response.isAcknowledged());
+
+ GetMappingsResponse mapping = client().admin().indices().prepareGetMappings("index").get();
+ HashMap props = (HashMap)mapping.getMappings().get("index").get("doc").getSourceAsMap().get("_all");
+ assertThat((Boolean)props.get("enabled"), equalTo(false));
+ props = (HashMap)mapping.getMappings().get("index").get("doc").getSourceAsMap().get("properties");
+ assertNotNull(props);
+ assertNotNull(props.get("text"));
+ props = (HashMap)mapping.getMappings().get("index").get("doc_all_enabled").getSourceAsMap().get("_all");
+ assertThat((Boolean)props.get("enabled"), equalTo(true));
+ props = (HashMap)mapping.getMappings().get("index").get("_default_").getSourceAsMap().get("_all");
+ assertThat((Boolean)props.get("enabled"), equalTo(false));
+
+ }
+
+ @Test
+ public void test_doc_valuesInvalidMapping() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("mappings").startObject(TYPE).startObject("_all").startObject("fielddata").field("format", "doc_values").endObject().endObject().endObject().endObject().endObject().string();
+ try {
+ prepareCreate(INDEX).setSource(mapping).get();
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values"));
+ }
+ }
+
+ @Test
+ public void test_doc_valuesInvalidMappingOnUpdate() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject(TYPE).startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject().string();
+ prepareCreate(INDEX).addMapping(TYPE, mapping).get();
+ String mappingUpdate = jsonBuilder().startObject().startObject(TYPE).startObject("_all").startObject("fielddata").field("format", "doc_values").endObject().endObject().endObject().endObject().string();
+ GetMappingsResponse mappingsBeforeUpdateResponse = client().admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).get();
+ try {
+ client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(mappingUpdate).get();
+ fail();
+ } catch (MapperParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("[_all] is always tokenized and cannot have doc values"));
+ }
+ // make sure all nodes have same cluster state
+ compareMappingOnNodes(mappingsBeforeUpdateResponse);
+ }
+
+ // checks if the setting for timestamp and size are kept even if disabled
+ @Test
+ public void testDisabledSizeTimestampIndexDoNotLooseMappings() throws Exception {
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json");
+ prepareCreate(INDEX).addMapping(TYPE, mapping).get();
+ GetMappingsResponse mappingsBeforeGreen = client().admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).get();
+ ensureGreen(INDEX);
+ // make sure all nodes have same cluster state
+ compareMappingOnNodes(mappingsBeforeGreen);
+ }
+
+ protected void testConflict(String mapping, String mappingUpdate, String... errorMessages) throws InterruptedException {
+ assertAcked(prepareCreate(INDEX).setSource(mapping).get());
+ ensureGreen(INDEX);
+ GetMappingsResponse mappingsBeforeUpdateResponse = client().admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).get();
+ try {
+ client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(mappingUpdate).get();
+ fail();
+ } catch (MergeMappingException e) {
+ for (String errorMessage : errorMessages) {
+ assertThat(e.getDetailedMessage(), containsString(errorMessage));
+ }
+ }
+ compareMappingOnNodes(mappingsBeforeUpdateResponse);
+
+ }
+
+ private void compareMappingOnNodes(GetMappingsResponse previousMapping) {
+ // make sure all nodes have same cluster state
+ for (Client client : cluster()) {
+ GetMappingsResponse currentMapping = client.admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).setLocal(true).get();
+ assertThat(previousMapping.getMappings().get(INDEX).get(TYPE).source(), equalTo(currentMapping.getMappings().get(INDEX).get(TYPE).source()));
+ }
+ }
+
+ @Test
+ public void testUpdateTimestamp() throws IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "lazy").field("format", "doc_values").endObject().field("store", "no").endObject()
+ .endObject().endObject();
+ client().admin().indices().prepareCreate("test").addMapping("type", mapping).get();
+ GetMappingsResponse appliedMappings = client().admin().indices().prepareGetMappings("test").get();
+ LinkedHashMap timestampMapping = (LinkedHashMap) appliedMappings.getMappings().get("test").get("type").getSourceAsMap().get("_timestamp");
+ assertThat((Boolean) timestampMapping.get("store"), equalTo(false));
+ assertThat((String)((LinkedHashMap) timestampMapping.get("fielddata")).get("loading"), equalTo("lazy"));
+ assertThat((String)((LinkedHashMap) timestampMapping.get("fielddata")).get("format"), equalTo("doc_values"));
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "no").endObject()
+ .endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping).get();
+ appliedMappings = client().admin().indices().prepareGetMappings("test").get();
+ timestampMapping = (LinkedHashMap) appliedMappings.getMappings().get("test").get("type").getSourceAsMap().get("_timestamp");
+ assertThat((Boolean) timestampMapping.get("store"), equalTo(false));
+ assertThat((String)((LinkedHashMap) timestampMapping.get("fielddata")).get("loading"), equalTo("eager"));
+ assertThat((String)((LinkedHashMap) timestampMapping.get("fielddata")).get("format"), equalTo("array"));
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10297")
+ public void testTimestampMergingConflicts() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject(TYPE)
+ .startObject("_timestamp").field("enabled", true)
+ .startObject("fielddata").field("format", "doc_values").endObject()
+ .field("store", "yes")
+ .field("index", "analyzed")
+ .field("path", "foo")
+ .field("default", "1970-01-01")
+ .endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).get();
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", false)
+ .startObject("fielddata").field("format", "array").endObject()
+ .field("store", "no")
+ .field("index", "no")
+ .field("path", "bar")
+ .field("default", "1970-01-02")
+ .endObject()
+ .endObject().endObject().string();
+ GetMappingsResponse mappingsBeforeUpdateResponse = client().admin().indices().prepareGetMappings(INDEX).addTypes(TYPE).get();
+ try {
+ client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(mapping).get();
+ fail("This should result in conflicts when merging the mapping");
+ } catch (MergeMappingException e) {
+ String[] expectedConflicts = {"mapper [_timestamp] has different index values", "mapper [_timestamp] has different store values", "Cannot update default in _timestamp value. Value is 1970-01-01 now encountering 1970-01-02", "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar"};
+ for (String conflict : expectedConflicts) {
+ assertThat(e.getDetailedMessage(), containsString(conflict));
+ }
+ }
+ compareMappingOnNodes(mappingsBeforeUpdateResponse);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java
new file mode 100644
index 0000000000..80b7cadce7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.mapper.update;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.MergeResult;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.LinkedHashMap;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+
+public class UpdateMappingTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void test_all_enabled_after_disabled() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", false).endObject().endObject();
+ XContentBuilder mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", true).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject();
+ testConflictWhileMergingAndMappingUnchanged(mapping, mappingUpdate);
+ }
+
+ @Test
+ public void test_all_disabled_after_enabled() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", true).endObject().endObject();
+ XContentBuilder mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", false).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject();
+ testConflictWhileMergingAndMappingUnchanged(mapping, mappingUpdate);
+ }
+
+ @Test
+ public void test_all_disabled_after_default_enabled() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("properties").startObject("some_text").field("type", "string").endObject().endObject().endObject();
+ XContentBuilder mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", false).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject();
+ testConflictWhileMergingAndMappingUnchanged(mapping, mappingUpdate);
+ }
+
+ @Test
+ public void test_all_enabled_after_enabled() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", true).endObject().endObject();
+ XContentBuilder mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", true).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject();
+ XContentBuilder expectedMapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_all").field("enabled", true).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject().endObject();
+ testNoConflictWhileMergingAndMappingChanged(mapping, mappingUpdate, expectedMapping);
+ }
+
+ @Test
+ public void test_all_disabled_after_disabled() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", false).endObject().endObject();
+ XContentBuilder mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("_all").field("enabled", false).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject();
+ XContentBuilder expectedMapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("_all").field("enabled", false).endObject().startObject("properties").startObject("text").field("type", "string").endObject().endObject().endObject().endObject();
+ testNoConflictWhileMergingAndMappingChanged(mapping, mappingUpdate, expectedMapping);
+ }
+
+ private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping);
+ // simulate like in MetaDataMappingService#putMapping
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false);
+ // assure we have no conflicts
+ assertThat(mergeResult.buildConflicts().length, equalTo(0));
+ // make sure mappings applied
+ CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string()));
+ }
+
+ public void testConflictFieldsMapping(String fieldName) throws Exception {
+ //test store, ... all the parameters that are not to be changed just like in other fields
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject(fieldName).field("enabled", true).field("store", "no").endObject()
+ .endObject().endObject();
+ XContentBuilder mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject(fieldName).field("enabled", true).field("store", "yes").endObject()
+ .startObject("properties").startObject("text").field("type", "string").endObject().endObject()
+ .endObject().endObject();
+ testConflictWhileMergingAndMappingUnchanged(mapping, mappingUpdate);
+ }
+
+ protected void testConflictWhileMergingAndMappingUnchanged(XContentBuilder mapping, XContentBuilder mappingUpdate) throws IOException {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping);
+ CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource();
+ // simulate like in MetaDataMappingService#putMapping
+ MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true);
+ // assure we have conflicts
+ assertThat(mergeResult.buildConflicts().length, equalTo(1));
+ // make sure simulate flag actually worked - no mappings applied
+ CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource();
+ assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate));
+ }
+
+ @Test
+ public void testIndexFieldParsing() throws IOException {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
+ XContentBuilder indexMapping = XContentFactory.jsonBuilder();
+ boolean enabled = randomBoolean();
+ indexMapping.startObject()
+ .startObject("type")
+ .startObject("_index")
+ .field("enabled", enabled)
+ .endObject()
+ .endObject()
+ .endObject();
+ DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(indexMapping.string()), true);
+ assertThat(documentMapper.indexMapper().enabled(), equalTo(enabled));
+ documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
+ assertThat(documentMapper.indexMapper().enabled(), equalTo(enabled));
+ }
+
+ @Test
+ public void testTimestampParsing() throws IOException {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
+ XContentBuilder indexMapping = XContentFactory.jsonBuilder();
+ boolean enabled = randomBoolean();
+ indexMapping.startObject()
+ .startObject("type")
+ .startObject("_timestamp")
+ .field("enabled", enabled)
+ .field("store", true)
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(indexMapping.string()), true);
+ assertThat(documentMapper.timestampFieldMapper().enabled(), equalTo(enabled));
+ assertTrue(documentMapper.timestampFieldMapper().fieldType().stored());
+ assertTrue(documentMapper.timestampFieldMapper().fieldType().hasDocValues());
+ documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
+ assertThat(documentMapper.timestampFieldMapper().enabled(), equalTo(enabled));
+ assertTrue(documentMapper.timestampFieldMapper().fieldType().hasDocValues());
+ assertTrue(documentMapper.timestampFieldMapper().fieldType().stored());
+ }
+
+ @Test
+ public void testSizeParsing() throws IOException {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
+ XContentBuilder indexMapping = XContentFactory.jsonBuilder();
+ boolean enabled = randomBoolean();
+ indexMapping.startObject()
+ .startObject("type")
+ .startObject("_size")
+ .field("enabled", enabled)
+ .endObject()
+ .endObject()
+ .endObject();
+ DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(indexMapping.string()), true);
+ assertThat(documentMapper.sizeFieldMapper().enabled(), equalTo(enabled));
+ assertTrue(documentMapper.sizeFieldMapper().fieldType().stored());
+ documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
+ assertThat(documentMapper.sizeFieldMapper().enabled(), equalTo(enabled));
+ }
+
+ @Test
+ public void testSizeTimestampIndexParsing() throws IOException {
+ IndexService indexService = createIndex("test", Settings.settingsBuilder().build());
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json");
+ DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(mapping), true);
+ assertThat(documentMapper.mappingSource().string(), equalTo(mapping));
+ documentMapper = indexService.mapperService().parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true);
+ assertThat(documentMapper.mappingSource().string(), equalTo(mapping));
+ }
+
+ @Test
+ public void testDefaultApplied() throws IOException {
+ createIndex("test1", Settings.settingsBuilder().build());
+ createIndex("test2", Settings.settingsBuilder().build());
+ XContentBuilder defaultMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject(MapperService.DEFAULT_MAPPING).startObject("_size").field("enabled", true).endObject().endObject()
+ .endObject();
+ client().admin().indices().preparePutMapping().setType(MapperService.DEFAULT_MAPPING).setSource(defaultMapping).get();
+ XContentBuilder typeMapping = XContentFactory.jsonBuilder().startObject()
+ .startObject("type").startObject("_all").field("enabled", false).endObject().endObject()
+ .endObject();
+ client().admin().indices().preparePutMapping("test1").setType("type").setSource(typeMapping).get();
+ client().admin().indices().preparePutMapping("test1", "test2").setType("type").setSource(typeMapping).get();
+
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings("test2").get();
+ assertNotNull(response.getMappings().get("test2").get("type").getSourceAsMap().get("_all"));
+ assertFalse((Boolean) ((LinkedHashMap) response.getMappings().get("test2").get("type").getSourceAsMap().get("_all")).get("enabled"));
+ assertNotNull(response.getMappings().get("test2").get("type").getSourceAsMap().get("_size"));
+ assertTrue((Boolean)((LinkedHashMap)response.getMappings().get("test2").get("type").getSourceAsMap().get("_size")).get("enabled"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_create_index.json b/core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_create_index.json
new file mode 100644
index 0000000000..2b9c42d50b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_create_index.json
@@ -0,0 +1,31 @@
+{
+ "mappings": {
+ "type": {
+ "_all": {
+ "store": true,
+ "store_term_vectors": true,
+ "store_term_vector_offsets": true,
+ "store_term_vector_positions": true,
+ "store_term_vector_payloads": true,
+ "omit_norms": true,
+ "analyzer": "standard",
+ "search_analyzer": "whitespace",
+ "similarity": "my_similarity",
+ "fielddata": {
+ "format": "fst"
+ }
+ }
+ }
+ },
+ "settings": {
+ "similarity": {
+ "my_similarity": {
+ "type": "DFR",
+ "basic_model": "g",
+ "after_effect": "l",
+ "normalization": "h2",
+ "normalization.h2.c": "3.0"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json b/core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json
new file mode 100644
index 0000000000..252aafefb0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json
@@ -0,0 +1,19 @@
+{
+ "type": {
+ "_all": {
+ "store": false,
+ "enabled": false,
+ "store_term_vectors": false,
+ "store_term_vector_offsets": false,
+ "store_term_vector_positions": false,
+ "store_term_vector_payloads": false,
+ "omit_norms": false,
+ "analyzer": "whitespace",
+ "search_analyzer": "standard",
+ "similarity": "bm25",
+ "fielddata": {
+ "format": "paged_bytes"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json b/core/src/test/java/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json
new file mode 100644
index 0000000000..139f7bc344
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json
@@ -0,0 +1 @@
+{"type":{"_index":{"enabled":false},"_size":{"enabled":false},"_timestamp":{"enabled":false}}} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java b/core/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java
new file mode 100644
index 0000000000..584a07df2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.index.LogByteSizeMergePolicy;
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
+import static org.hamcrest.Matchers.equalTo;
+
+public class MergePolicySettingsTest extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ @Test
+ public void testCompoundFileSettings() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+
+ assertThat(new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service).getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+ assertThat(new TieredMergePolicyProvider(createStore(build(true)), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0.5)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new TieredMergePolicyProvider(createStore(build(1.0)), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("true")), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("True")), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("False")), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build("false")), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(false)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new TieredMergePolicyProvider(createStore(build(0.0)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service).getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(true)), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0.5)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(1.0)), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("true")), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("True")), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("False")), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build("false")), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(false)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogByteSizeMergePolicyProvider(createStore(build(0.0)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ assertThat(new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service).getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(true)), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0.5)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.5));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(1.0)), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("true")), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("True")), service).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("False")), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build("false")), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(false)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ assertThat(new LogDocMergePolicyProvider(createStore(build(0.0)), service).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+
+ }
+
+ @Test
+ public void testInvalidValue() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ try {
+ new LogDocMergePolicyProvider(createStore(build(-0.1)), service).getMergePolicy().getNoCFSRatio();
+ fail("exception expected");
+ } catch (IllegalArgumentException ex) {
+
+ }
+ try {
+ new LogDocMergePolicyProvider(createStore(build(1.1)), service).getMergePolicy().getNoCFSRatio();
+ fail("exception expected");
+ } catch (IllegalArgumentException ex) {
+
+ }
+ try {
+ new LogDocMergePolicyProvider(createStore(build("Falsch")), service).getMergePolicy().getNoCFSRatio();
+ fail("exception expected");
+ } catch (IllegalArgumentException ex) {
+
+ }
+
+ }
+
+ @Test
+ public void testUpdateSettings() throws IOException {
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ TieredMergePolicyProvider mp = new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogByteSizeMergePolicyProvider mp = new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+
+ {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogDocMergePolicyProvider mp = new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(1.0));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(1.0));
+
+ service.refreshSettings(build(0.1));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ service.refreshSettings(build(0.0));
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.0));
+ }
+ }
+
+ public void testLogDocSizeMergePolicySettingsUpdate() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogDocMergePolicyProvider mp = new LogDocMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS);
+ service.refreshSettings(Settings.builder().put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2).build());
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2);
+
+ assertEquals(mp.getMergePolicy().getMinMergeDocs(), LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS);
+ service.refreshSettings(Settings.builder().put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_DOCS, LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS / 2).build());
+ assertEquals(mp.getMergePolicy().getMinMergeDocs(), LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS / 2);
+
+ assertTrue(mp.getMergePolicy().getCalibrateSizeByDeletes());
+ service.refreshSettings(Settings.builder().put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_CALIBRATE_SIZE_BY_DELETES, false).build());
+ assertFalse(mp.getMergePolicy().getCalibrateSizeByDeletes());
+
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogDocMergePolicy.DEFAULT_MERGE_FACTOR);
+ service.refreshSettings(Settings.builder().put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, LogDocMergePolicy.DEFAULT_MERGE_FACTOR * 2).build());
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogDocMergePolicy.DEFAULT_MERGE_FACTOR * 2);
+
+ service.refreshSettings(EMPTY_SETTINGS); // update without the settings and see if we stick to the values
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2);
+ assertEquals(mp.getMergePolicy().getMinMergeDocs(), LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS / 2);
+ assertFalse(mp.getMergePolicy().getCalibrateSizeByDeletes());
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR * 2);
+
+
+ service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ mp = new LogDocMergePolicyProvider(createStore(Settings.builder()
+ .put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2)
+ .put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR / 2)
+ .put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_CALIBRATE_SIZE_BY_DELETES, false)
+ .put(LogDocMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_DOCS, LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS - 1)
+ .build()), service);
+
+
+ assertEquals(mp.getMergePolicy().getMinMergeDocs(), LogDocMergePolicy.DEFAULT_MIN_MERGE_DOCS - 1);
+ assertFalse(mp.getMergePolicy().getCalibrateSizeByDeletes());
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR / 2);
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2);
+ }
+
+ public void testLogByteSizeMergePolicySettingsUpdate() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ LogByteSizeMergePolicyProvider mp = new LogByteSizeMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+
+ assertEquals(mp.getMergePolicy().getMaxMergeMB(), LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SIZE.mbFrac(), 0.0d);
+ service.refreshSettings(Settings.builder().put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_SIZE, new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SIZE.mb() / 2, ByteSizeUnit.MB)).build());
+ assertEquals(mp.getMergePolicy().getMaxMergeMB(), new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SIZE.mb() / 2, ByteSizeUnit.MB).mbFrac(), 0.0d);
+
+ assertEquals(mp.getMergePolicy().getMinMergeMB(), LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE_SIZE.mbFrac(), 0.0d);
+ service.refreshSettings(Settings.builder().put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_SIZE, new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE_SIZE.mb() + 1, ByteSizeUnit.MB)).build());
+ assertEquals(mp.getMergePolicy().getMinMergeMB(), new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE_SIZE.mb() + 1, ByteSizeUnit.MB).mbFrac(), 0.0d);
+
+ assertTrue(mp.getMergePolicy().getCalibrateSizeByDeletes());
+ service.refreshSettings(Settings.builder().put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_CALIBRATE_SIZE_BY_DELETES, false).build());
+ assertFalse(mp.getMergePolicy().getCalibrateSizeByDeletes());
+
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR);
+ service.refreshSettings(Settings.builder().put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR / 2).build());
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR / 2);
+
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS);
+ service.refreshSettings(Settings.builder().put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2).build());
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2);
+
+ service.refreshSettings(EMPTY_SETTINGS); // update without the settings and see if we stick to the values
+ assertEquals(mp.getMergePolicy().getMaxMergeMB(), new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SIZE.mb() / 2, ByteSizeUnit.MB).mbFrac(), 0.0d);
+ assertEquals(mp.getMergePolicy().getMinMergeMB(), new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE_SIZE.mb() + 1, ByteSizeUnit.MB).mbFrac(), 0.0d);
+ assertFalse(mp.getMergePolicy().getCalibrateSizeByDeletes());
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR / 2);
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS / 2);
+
+
+ service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ mp = new LogByteSizeMergePolicyProvider(createStore(Settings.builder()
+ .put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_DOCS, LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS * 2)
+ .put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MERGE_FACTOR, LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR * 2)
+ .put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_SIZE, new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SIZE.mb() / 2, ByteSizeUnit.MB))
+ .put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_CALIBRATE_SIZE_BY_DELETES, false)
+ .put(LogByteSizeMergePolicyProvider.INDEX_MERGE_POLICY_MIN_MERGE_SIZE, new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE_SIZE.mb() + 1, ByteSizeUnit.MB))
+ .build()), service);
+
+
+ assertEquals(mp.getMergePolicy().getMaxMergeMB(), new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SIZE.mb() / 2, ByteSizeUnit.MB).mbFrac(), 0.0d);
+ assertEquals(mp.getMergePolicy().getMinMergeMB(), new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE_SIZE.mb() + 1, ByteSizeUnit.MB).mbFrac(), 0.0d);
+ assertFalse(mp.getMergePolicy().getCalibrateSizeByDeletes());
+ assertEquals(mp.getMergePolicy().getMergeFactor(), LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR * 2);
+ assertEquals(mp.getMergePolicy().getMaxMergeDocs(), LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS * 2);
+ }
+
+ public void testTieredMergePolicySettingsUpdate() throws IOException {
+ IndexSettingsService service = new IndexSettingsService(new Index("test"), EMPTY_SETTINGS);
+ TieredMergePolicyProvider mp = new TieredMergePolicyProvider(createStore(EMPTY_SETTINGS), service);
+ assertThat(mp.getMergePolicy().getNoCFSRatio(), equalTo(0.1));
+
+ assertEquals(mp.getMergePolicy().getForceMergeDeletesPctAllowed(), TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build());
+ assertEquals(mp.getMergePolicy().getForceMergeDeletesPctAllowed(), TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
+
+ assertEquals(mp.getMergePolicy().getFloorSegmentMB(), TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.mbFrac(), 0);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT, new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.mb() + 1, ByteSizeUnit.MB)).build());
+ assertEquals(mp.getMergePolicy().getFloorSegmentMB(), new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.mb() + 1, ByteSizeUnit.MB).mbFrac(), 0.001);
+
+ assertEquals(mp.getMergePolicy().getMaxMergeAtOnce(), TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE -1 ).build());
+ assertEquals(mp.getMergePolicy().getMaxMergeAtOnce(), TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE-1);
+
+ assertEquals(mp.getMergePolicy().getMaxMergeAtOnceExplicit(), TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT -1 ).build());
+ assertEquals(mp.getMergePolicy().getMaxMergeAtOnceExplicit(), TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
+
+ assertEquals(mp.getMergePolicy().getMaxMergedSegmentMB(), TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.mbFrac(), 0.0001);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.bytes() + 1)).build());
+ assertEquals(mp.getMergePolicy().getMaxMergedSegmentMB(), new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.bytes() + 1).mbFrac(), 0.0001);
+
+ assertEquals(mp.getMergePolicy().getReclaimDeletesWeight(), TieredMergePolicyProvider.DEFAULT_RECLAIM_DELETES_WEIGHT, 0);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, TieredMergePolicyProvider.DEFAULT_RECLAIM_DELETES_WEIGHT + 1 ).build());
+ assertEquals(mp.getMergePolicy().getReclaimDeletesWeight(), TieredMergePolicyProvider.DEFAULT_RECLAIM_DELETES_WEIGHT + 1, 0);
+
+ assertEquals(mp.getMergePolicy().getSegmentsPerTier(), TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0);
+ service.refreshSettings(Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ).build());
+ assertEquals(mp.getMergePolicy().getSegmentsPerTier(), TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
+
+ service.refreshSettings(EMPTY_SETTINGS); // update without the settings and see if we stick to the values
+
+ assertEquals(mp.getMergePolicy().getForceMergeDeletesPctAllowed(), TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
+ assertEquals(mp.getMergePolicy().getFloorSegmentMB(), new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.mb() + 1, ByteSizeUnit.MB).mbFrac(), 0.001);
+ assertEquals(mp.getMergePolicy().getMaxMergeAtOnce(), TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE-1);
+ assertEquals(mp.getMergePolicy().getMaxMergeAtOnceExplicit(), TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
+ assertEquals(mp.getMergePolicy().getMaxMergedSegmentMB(), new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.bytes() + 1).mbFrac(), 0.0001);
+ assertEquals(mp.getMergePolicy().getReclaimDeletesWeight(), TieredMergePolicyProvider.DEFAULT_RECLAIM_DELETES_WEIGHT + 1, 0);
+ assertEquals(mp.getMergePolicy().getSegmentsPerTier(), TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
+ }
+
+ public Settings build(String value) {
+ return Settings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(double value) {
+ return Settings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(int value) {
+ return Settings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ public Settings build(boolean value) {
+ return Settings.builder().put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT, value).build();
+ }
+
+ protected Store createStore(Settings settings) throws IOException {
+ final DirectoryService directoryService = new DirectoryService(shardId, EMPTY_SETTINGS) {
+ @Override
+ public Directory newDirectory() throws IOException {
+ return new RAMDirectory() ;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+ };
+ return new Store(shardId, settings, directoryService, new DummyShardLock(shardId));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/merge/policy/VersionFieldUpgraderTest.java b/core/src/test/java/org/elasticsearch/index/merge/policy/VersionFieldUpgraderTest.java
new file mode 100644
index 0000000000..3c66cbc3aa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/merge/policy/VersionFieldUpgraderTest.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.merge.policy;
+
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.CodecReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocValuesType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.common.Numbers;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+/** Tests upgrading old document versions from _uid payloads to _version docvalues */
+public class VersionFieldUpgraderTest extends ElasticsearchTestCase {
+
+ /** Simple test: one doc in the old format, check that it looks correct */
+ public void testUpgradeOneDocument() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
+
+ // add a document with a _uid having a payload of 3
+ Document doc = new Document();
+ Token token = new Token("1", 0, 1);
+ token.setPayload(new BytesRef(Numbers.longToBytes(3)));
+ doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token)));
+ iw.addDocument(doc);
+ iw.commit();
+
+ CodecReader reader = getOnlySegmentReader(DirectoryReader.open(iw, true));
+ CodecReader upgraded = VersionFieldUpgrader.wrap(reader);
+ // we need to be upgraded, should be a different instance
+ assertNotSame(reader, upgraded);
+
+ // make sure we can see our numericdocvalues in fieldinfos
+ FieldInfo versionField = upgraded.getFieldInfos().fieldInfo(VersionFieldMapper.NAME);
+ assertNotNull(versionField);
+ assertEquals(DocValuesType.NUMERIC, versionField.getDocValuesType());
+ // should have a value of 3, and be visible in docsWithField
+ assertEquals(3, upgraded.getNumericDocValues(VersionFieldMapper.NAME).get(0));
+ assertTrue(upgraded.getDocsWithField(VersionFieldMapper.NAME).get(0));
+
+ // verify filterreader with checkindex
+ TestUtil.checkReader(upgraded);
+
+ reader.close();
+ iw.close();
+ dir.close();
+ }
+
+ /** test that we are a non-op if the segment already has the version field */
+ public void testAlreadyUpgraded() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
+
+ // add a document with a _uid having a payload of 3
+ Document doc = new Document();
+ Token token = new Token("1", 0, 1);
+ token.setPayload(new BytesRef(Numbers.longToBytes(3)));
+ doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token)));
+ doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 3));
+ iw.addDocument(doc);
+ iw.commit();
+
+ CodecReader reader = getOnlySegmentReader(DirectoryReader.open(iw, true));
+ CodecReader upgraded = VersionFieldUpgrader.wrap(reader);
+ // we already upgraded: should be same instance
+ assertSame(reader, upgraded);
+
+ reader.close();
+ iw.close();
+ dir.close();
+ }
+
+ /** Test upgrading two documents */
+ public void testUpgradeTwoDocuments() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(null));
+
+ // add a document with a _uid having a payload of 3
+ Document doc = new Document();
+ Token token = new Token("1", 0, 1);
+ token.setPayload(new BytesRef(Numbers.longToBytes(3)));
+ doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token)));
+ iw.addDocument(doc);
+
+ doc = new Document();
+ token = new Token("2", 0, 1);
+ token.setPayload(new BytesRef(Numbers.longToBytes(4)));
+ doc.add(new TextField(UidFieldMapper.NAME, new CannedTokenStream(token)));
+ iw.addDocument(doc);
+
+ iw.commit();
+
+ CodecReader reader = getOnlySegmentReader(DirectoryReader.open(iw, true));
+ CodecReader upgraded = VersionFieldUpgrader.wrap(reader);
+ // we need to be upgraded, should be a different instance
+ assertNotSame(reader, upgraded);
+
+ // make sure we can see our numericdocvalues in fieldinfos
+ FieldInfo versionField = upgraded.getFieldInfos().fieldInfo(VersionFieldMapper.NAME);
+ assertNotNull(versionField);
+ assertEquals(DocValuesType.NUMERIC, versionField.getDocValuesType());
+ // should have a values of 3 and 4, and be visible in docsWithField
+ assertEquals(3, upgraded.getNumericDocValues(VersionFieldMapper.NAME).get(0));
+ assertEquals(4, upgraded.getNumericDocValues(VersionFieldMapper.NAME).get(1));
+ assertTrue(upgraded.getDocsWithField(VersionFieldMapper.NAME).get(0));
+ assertTrue(upgraded.getDocsWithField(VersionFieldMapper.NAME).get(1));
+
+ // verify filterreader with checkindex
+ TestUtil.checkReader(upgraded);
+
+ reader.close();
+ iw.close();
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java
new file mode 100644
index 0000000000..b7acdf8c17
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.geo.builders.EnvelopeBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+public class GeoShapeQueryBuilderTests extends ElasticsearchTestCase {
+
+ @Test // see #3878
+ public void testThatXContentSerializationInsideOfArrayWorks() throws Exception {
+ EnvelopeBuilder envelopeBuilder = ShapeBuilder.newEnvelope().topLeft(0, 0).bottomRight(10, 10);
+ GeoShapeQueryBuilder geoQuery = QueryBuilders.geoShapeQuery("searchGeometry", envelopeBuilder);
+ JsonXContent.contentBuilder().startArray().value(geoQuery).endArray();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeFormatTests.java b/core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeFormatTests.java
new file mode 100644
index 0000000000..5111b36969
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeFormatTests.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.TestSearchContext;
+import org.joda.time.DateTime;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class IndexQueryParserFilterDateRangeFormatTests extends ElasticsearchSingleNodeTest {
+
+ private Injector injector;
+ private IndexQueryParserService queryParser;
+
+ @Before
+ public void setup() throws IOException {
+ IndexService indexService = createIndex("test");
+ injector = indexService.injector();
+
+ MapperService mapperService = indexService.mapperService();
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ mapperService.merge("person", new CompressedXContent(mapping), true);
+ ParsedDocument doc = mapperService.documentMapper("person").parse("person", "1", new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(doc.dynamicMappingsUpdate().toString()).get();
+ queryParser = injector.getInstance(IndexQueryParserService.class);
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ @Test
+ public void testDateRangeFilterFormat() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_filter_format.json");
+ queryParser.parse(query).query();
+ // Sadly from NoCacheFilter, we can not access to the delegate filter so we can not check
+ // it's the one we are expecting
+
+ // Test Invalid format
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_filter_format_invalid.json");
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ queryParser.parse(query).query();
+ fail("A Range Filter with a specific format but with an unexpected date should raise a QueryParsingException");
+ } catch (QueryParsingException e) {
+ // We expect it
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ }
+
+ @Test
+ public void testDateRangeQueryFormat() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ // We test 01/01/2012 from gte and 2030 for lt
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_query_format.json");
+ Query parsedQuery;
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ parsedQuery = queryParser.parse(query).query();
+ } finally {
+ SearchContext.removeCurrent();;
+ }
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+
+ // Min value was 01/01/2012 (dd/MM/yyyy)
+ DateTime min = DateTime.parse("2012-01-01T00:00:00.000+00");
+ assertThat(((NumericRangeQuery) parsedQuery).getMin().longValue(), is(min.getMillis()));
+
+ // Max value was 2030 (yyyy)
+ DateTime max = DateTime.parse("2030-01-01T00:00:00.000+00");
+ assertThat(((NumericRangeQuery) parsedQuery).getMax().longValue(), is(max.getMillis()));
+
+ // Test Invalid format
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_query_format_invalid.json");
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ queryParser.parse(query).query();
+ fail("A Range Query with a specific format but with an unexpected date should raise a QueryParsingException");
+ } catch (QueryParsingException e) {
+ // We expect it
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ }
+
+ @Test
+ public void testDateRangeBoundaries() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_query_boundaries_inclusive.json");
+ Query parsedQuery;
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ parsedQuery = queryParser.parse(query).query();
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+
+ DateTime min = DateTime.parse("2014-11-01T00:00:00.000+00");
+ assertThat(rangeQuery.getMin().longValue(), is(min.getMillis()));
+ assertTrue(rangeQuery.includesMin());
+
+ DateTime max = DateTime.parse("2014-12-08T23:59:59.999+00");
+ assertThat(rangeQuery.getMax().longValue(), is(max.getMillis()));
+ assertTrue(rangeQuery.includesMax());
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_query_boundaries_exclusive.json");
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ parsedQuery = queryParser.parse(query).query();
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ rangeQuery = (NumericRangeQuery) parsedQuery;
+
+ min = DateTime.parse("2014-11-30T23:59:59.999+00");
+ assertThat(rangeQuery.getMin().longValue(), is(min.getMillis()));
+ assertFalse(rangeQuery.includesMin());
+
+ max = DateTime.parse("2014-12-08T00:00:00.000+00");
+ assertThat(rangeQuery.getMax().longValue(), is(max.getMillis()));
+ assertFalse(rangeQuery.includesMax());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeTimezoneTests.java b/core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeTimezoneTests.java
new file mode 100644
index 0000000000..fde771c545
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/IndexQueryParserFilterDateRangeTimezoneTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.TestSearchContext;
+import org.joda.time.DateTime;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ *
+ */
+public class IndexQueryParserFilterDateRangeTimezoneTests extends ElasticsearchSingleNodeTest {
+
+ private Injector injector;
+ private IndexQueryParserService queryParser;
+
+ @Before
+ public void setup() throws IOException {
+ IndexService indexService = createIndex("test");
+ injector = indexService.injector();
+
+ MapperService mapperService = indexService.mapperService();
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ mapperService.merge("person", new CompressedXContent(mapping), true);
+ ParsedDocument doc = mapperService.documentMapper("person").parse("person", "1", new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(doc.dynamicMappingsUpdate().toString()).get();
+ queryParser = injector.getInstance(IndexQueryParserService.class);
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ @Test
+ public void testDateRangeFilterTimezone() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_filter_timezone.json");
+ queryParser.parse(query).query();
+ // Sadly from NoCacheFilter, we can not access to the delegate filter so we can not check
+ // it's the one we are expecting
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_filter_timezone_numeric_field.json");
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ queryParser.parse(query).query();
+ fail("A Range Filter on a numeric field with a TimeZone should raise a QueryParsingException");
+ } catch (QueryParsingException e) {
+ // We expect it
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ }
+
+ @Test
+ public void testDateRangeQueryTimezone() throws IOException {
+ long startDate = System.currentTimeMillis();
+
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_query_timezone.json");
+ Query parsedQuery;
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ parsedQuery = queryParser.parse(query).query();
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+
+ // Min value was 2012-01-01 (UTC) so we need to remove one hour
+ DateTime min = DateTime.parse("2012-01-01T00:00:00.000+01:00");
+ // Max value is when we started the test. So it should be some ms from now
+ DateTime max = new DateTime(startDate);
+
+ assertThat(((NumericRangeQuery) parsedQuery).getMin().longValue(), is(min.getMillis()));
+
+ // We should not have a big difference here (should be some ms)
+ assertThat(((NumericRangeQuery) parsedQuery).getMax().longValue() - max.getMillis(), lessThanOrEqualTo(60000L));
+
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/date_range_query_timezone_numeric_field.json");
+ try {
+ SearchContext.setCurrent(new TestSearchContext());
+ queryParser.parse(query).query();
+ fail("A Range Query on a numeric field with a TimeZone should raise a QueryParsingException");
+ } catch (QueryParsingException e) {
+ // We expect it
+ } finally {
+ SearchContext.removeCurrent();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
new file mode 100644
index 0000000000..ad55c65938
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java
@@ -0,0 +1,2535 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.index.*;
+import org.apache.lucene.index.memory.MemoryIndex;
+import org.apache.lucene.queries.BoostingQuery;
+import org.apache.lucene.queries.ExtendedCommonTermsQuery;
+import org.apache.lucene.queries.TermsQuery;
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.search.spans.*;
+import org.apache.lucene.spatial.prefix.IntersectsPrefixTreeFilter;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.CharsRefBuilder;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
+import org.elasticsearch.action.termvectors.*;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.lucene.search.MoreLikeThisQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.lucene.search.function.BoostScoreFunction;
+import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
+import org.elasticsearch.common.lucene.search.function.WeightFactorFunction;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.core.NumberFieldMapper;
+import org.elasticsearch.index.search.geo.GeoDistanceRangeQuery;
+import org.elasticsearch.index.search.geo.GeoPolygonQuery;
+import org.elasticsearch.index.search.geo.InMemoryGeoBoundingBoxQuery;
+import org.elasticsearch.index.search.morelikethis.MoreLikeThisFetchService;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.hamcrest.Matchers;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+
+import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest {
+
+ private IndexQueryParserService queryParser;
+
+ private static class DummyQuery extends Query {
+
+ public boolean isFilter;
+
+ @Override
+ public String toString(String field) {
+ return getClass().getSimpleName();
+ }
+
+ }
+
+ public static class DummyQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ @Inject
+ public DummyQueryParser(Index index, Settings indexSettings) {
+ super(index, indexSettings);
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] {"dummy"};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ assertEquals(XContentParser.Token.END_OBJECT, parseContext.parser().nextToken());
+ DummyQuery query = new DummyQuery();
+ query.isFilter = parseContext.isFilter();
+ return query;
+ }
+
+ }
+
+ private static class DummyQueryBuilder extends QueryBuilder {
+ @Override
+ protected void doXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("dummy").endObject();
+ }
+ }
+
+ private static DummyQueryBuilder dummyQuery() {
+ return new DummyQueryBuilder();
+ }
+
+ @Before
+ public void setup() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.queryparser.query.dummy.type", DummyQueryParser.class)
+ .put("index.cache.filter.type", "none")
+ .put("name", "SimpleIndexQueryParserTests")
+ .build();
+ IndexService indexService = createIndex("test", settings);
+ MapperService mapperService = indexService.mapperService();
+
+ String mapping = copyToStringFromClasspath("/org/elasticsearch/index/query/mapping.json");
+ mapperService.merge("person", new CompressedXContent(mapping), true);
+ ParsedDocument doc = mapperService.documentMapper("person").parse("person", "1", new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/query/data.json")));
+ assertNotNull(doc.dynamicMappingsUpdate());
+ client().admin().indices().preparePutMapping("test").setType("person").setSource(doc.dynamicMappingsUpdate().toString()).get();
+
+ queryParser = indexService.queryParserService();
+ }
+
+ private IndexQueryParserService queryParser() throws IOException {
+ return this.queryParser;
+ }
+
+ private BytesRef longToPrefixCoded(long val, int shift) {
+ BytesRefBuilder bytesRef = new BytesRefBuilder();
+ NumericUtils.longToPrefixCoded(val, shift, bytesRef);
+ return bytesRef.get();
+ }
+
+ @Test
+ public void testQueryStringBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryStringQuery("test").defaultField("content").phraseSlop(1)).query();
+
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ TermQuery termQuery = (TermQuery) parsedQuery;
+ assertThat(termQuery.getTerm(), equalTo(new Term("content", "test")));
+ }
+
+ @Test
+ public void testQueryString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ TermQuery termQuery = (TermQuery) parsedQuery;
+ assertThat(termQuery.getTerm(), equalTo(new Term("content", "test")));
+ }
+
+ @Test
+ public void testQueryStringBoostsBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ QueryStringQueryBuilder builder = queryStringQuery("field:boosted^2");
+ Query parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) parsedQuery).getTerm(), equalTo(new Term("field", "boosted")));
+ assertThat(parsedQuery.getBoost(), equalTo(2.0f));
+ builder.boost(2.0f);
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery.getBoost(), equalTo(4.0f));
+
+ builder = queryStringQuery("((field:boosted^2) AND (field:foo^1.5))^3");
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("field", "boosted")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getBoost(), equalTo(2.0f));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("field", "foo")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getBoost(), equalTo(1.5f));
+ assertThat(parsedQuery.getBoost(), equalTo(3.0f));
+ builder.boost(2.0f);
+ parsedQuery = queryParser.parse(builder).query();
+ assertThat(parsedQuery.getBoost(), equalTo(6.0f));
+ }
+
+ @Test
+ public void testQueryStringFields1Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryStringQuery("test").field("content").field("name").useDisMax(false)).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields1() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFieldsMatch() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields-match.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery bQuery = (BooleanQuery) parsedQuery;
+ assertThat(bQuery.clauses().size(), equalTo(2));
+ assertEquals(Sets.newHashSet(new Term("name.first", "test"), new Term("name.last", "test")),
+ Sets.newHashSet(assertBooleanSubQuery(parsedQuery, TermQuery.class, 0).getTerm(),
+ assertBooleanSubQuery(parsedQuery, TermQuery.class, 1).getTerm()));
+ }
+
+ @Test
+ public void testQueryStringFields2Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryStringQuery("test").field("content").field("name").useDisMax(true)).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ }
+
+ @Test
+ public void testQueryStringFields3Builder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(queryStringQuery("test").field("content", 2.2f).field("name").useDisMax(true)).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat((double) disjuncts.get(0).getBoost(), closeTo(2.2, 0.01));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ assertThat((double) disjuncts.get(1).getBoost(), closeTo(1, 0.01));
+ }
+
+ @Test
+ public void testQueryStringFields3() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-fields3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ List<Query> disjuncts = disMaxQuery.getDisjuncts();
+ assertThat(((TermQuery) disjuncts.get(0)).getTerm(), equalTo(new Term("content", "test")));
+ assertThat((double) disjuncts.get(0).getBoost(), closeTo(2.2, 0.01));
+ assertThat(((TermQuery) disjuncts.get(1)).getTerm(), equalTo(new Term("name", "test")));
+ assertThat((double) disjuncts.get(1).getBoost(), closeTo(1, 0.01));
+ }
+
+ @Test
+ public void testQueryStringTimezone() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-timezone.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(TermRangeQuery.class));
+
+ try {
+ queryParser.parse(copyToStringFromClasspath("/org/elasticsearch/index/query/query-timezone-incorrect.json"));
+ fail("we expect a QueryParsingException as we are providing an unknown time_zome");
+ } catch (QueryParsingException e) {
+ // We expect this one
+ }
+ }
+
+ @Test
+ public void testQueryStringRegexp() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-regexp-max-determinized-states.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertTrue(regexpQuery.toString().contains("/foo*bar/"));
+ }
+
+ @Test
+ public void testQueryStringRegexpTooManyDeterminizedStates() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-regexp-too-many-determinized-states.json");
+ try {
+ queryParser.parse(query).query();
+ fail("did not hit exception");
+ } catch (QueryParsingException qpe) {
+ // expected
+ assertTrue(qpe.getCause() instanceof TooComplexToDeterminizeException);
+ }
+ }
+
+ @Test
+ public void testMatchAllBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(matchAllQuery().boost(1.2f)).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery;
+ assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01));
+ }
+
+ @Test
+ public void testMatchAll() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/matchAll.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ MatchAllDocsQuery matchAllDocsQuery = (MatchAllDocsQuery) parsedQuery;
+ assertThat((double) matchAllDocsQuery.getBoost(), closeTo(1.2, 0.01));
+ }
+
+ @Test
+ public void testMatchAllEmpty1() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match_all_empty1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, equalTo(Queries.newMatchAllQuery()));
+ assertThat(parsedQuery, not(sameInstance(Queries.newMatchAllQuery())));
+ }
+
+ @Test
+ public void testMatchAllEmpty2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match_all_empty2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, equalTo(Queries.newMatchAllQuery()));
+ assertThat(parsedQuery, not(sameInstance(Queries.newMatchAllQuery())));
+
+ }
+
+ @Test
+ public void testStarColonStar() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/starColonStar.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ }
+
+ @Test
+ public void testDisMaxBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(disMaxQuery().boost(1.2f).tieBreaker(0.7f).add(termQuery("name.first", "first")).add(termQuery("name.last", "last"))).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01));
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(2));
+
+ Query firstQ = disjuncts.get(0);
+ assertThat(firstQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("name.first", "first")));
+
+ Query secondsQ = disjuncts.get(1);
+ assertThat(secondsQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("name.last", "last")));
+ }
+
+ @Test
+ public void testDisMax() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/disMax.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+ assertThat((double) disjunctionMaxQuery.getBoost(), closeTo(1.2, 0.01));
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(2));
+
+ Query firstQ = disjuncts.get(0);
+ assertThat(firstQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) firstQ).getTerm(), equalTo(new Term("name.first", "first")));
+
+ Query secondsQ = disjuncts.get(1);
+ assertThat(secondsQ, instanceOf(TermQuery.class));
+ assertThat(((TermQuery) secondsQ).getTerm(), equalTo(new Term("name.last", "last")));
+ }
+
+ @Test
+ public void testDisMax2() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/disMax2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ DisjunctionMaxQuery disjunctionMaxQuery = (DisjunctionMaxQuery) parsedQuery;
+
+ List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
+ assertThat(disjuncts.size(), equalTo(1));
+
+ PrefixQuery firstQ = (PrefixQuery) disjuncts.get(0);
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(firstQ.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) firstQ.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testTermQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termQuery("age", 34).buildAsBytes()).query();
+ TermQuery fieldQuery = unwrapTermQuery(parsedQuery);
+ assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l)));
+ }
+
+ @Test
+ public void testTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term.json");
+ TermQuery fieldQuery = unwrapTermQuery(queryParser.parse(query).query());
+ assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l)));
+ }
+
+ @Test(expected = QueryParsingException.class)
+ public void testTermQueryArrayInvalid() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-array-invalid.json");
+ unwrapTermQuery(queryParser.parse(query).query());
+ }
+
+ private static TermQuery unwrapTermQuery(Query q) {
+ assertThat(q, instanceOf(TermQuery.class));
+ return (TermQuery) q;
+ }
+
+ @Test
+ public void testFuzzyQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testFuzzyQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFieldsBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(fuzzyQuery("name.first", "sh").fuzziness(Fuzziness.fromSimilarity(0.1f)).prefixLength(1).boost(2.0f).buildAsBytes()).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length())));
+ assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
+ assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFields() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy-with-fields.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
+ assertThat(fuzzyQuery.getTerm(), equalTo(new Term("name.first", "sh")));
+ assertThat(fuzzyQuery.getMaxEdits(), equalTo(FuzzyQuery.floatToEdits(0.1f, "sh".length())));
+ assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
+ assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testFuzzyQueryWithFields2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fuzzy-with-fields2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fuzzyQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fuzzyQuery.getMin().longValue(), equalTo(7l));
+ assertThat(fuzzyQuery.getMax().longValue(), equalTo(17l));
+ }
+
+ @Test
+ public void testTermWithBoostQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+
+ Query parsedQuery = queryParser.parse(termQuery("age", 34).boost(2.0f)).query();
+ TermQuery fieldQuery = unwrapTermQuery(parsedQuery);
+ assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l)));
+ assertThat((double) parsedQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ private BytesRef indexedValueForSearch(long value) {
+ BytesRefBuilder bytesRef = new BytesRefBuilder();
+ NumericUtils.longToPrefixCoded(value, 0, bytesRef); // 0 because of
+ // exact
+ // match
+ return bytesRef.get();
+ }
+
+ @Test
+ public void testTermWithBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-with-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ TermQuery fieldQuery = unwrapTermQuery(parsedQuery);
+ assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l)));
+ assertThat((double) parsedQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh")).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ }
+
+ @Test
+ public void testPrefixBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testPrefiFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), prefixQuery("name.first", "sh"))).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new PrefixQuery(new Term("name.first", "sh")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testPrefiFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new PrefixQuery(new Term("name.first", "sh")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testPrefixNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new PrefixQuery(new Term("name.first", "sh")));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testPrefixQueryBoostQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("name.first", "sh").boost(2.0f)).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/prefix-with-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("name.first", "sh")));
+ assertThat((double) prefixQuery.getBoost(), closeTo(2.0, 0.01));
+ }
+
+ @Test
+ public void testPrefixQueryWithUnknownField() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(prefixQuery("unknown", "sh")).query();
+ assertThat(parsedQuery, instanceOf(PrefixQuery.class));
+ PrefixQuery prefixQuery = (PrefixQuery) parsedQuery;
+ assertThat(prefixQuery.getPrefix(), equalTo(new Term("unknown", "sh")));
+ assertThat(prefixQuery.getRewriteMethod(), notNullValue());
+ }
+
+ @Test
+ public void testRegexpQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(regexpQuery("name.first", "s.*y")).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpQueryWithMaxDeterminizedStates() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-max-determinized-states.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ }
+
+ @Test
+ public void testRegexpFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new RegexpQuery(new Term("name.first", "s.*y")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testRegexpFilteredQueryWithMaxDeterminizedStates() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-max-determinized-states.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new RegexpQuery(new Term("name.first", "s.*y")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testNamedRegexpFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new RegexpQuery(new Term("name.first", "s.*y")));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testRegexpWithFlagsFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-flags.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new RegexpQuery(new Term("name.first", "s.*y")));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testNamedAndCachedRegexpWithFlagsFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new RegexpQuery(new Term("name.first", "s.*y")));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testRegexpBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/regexp-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(RegexpQuery.class));
+ RegexpQuery regexpQuery = (RegexpQuery) parsedQuery;
+ assertThat(regexpQuery.getField(), equalTo("name.first"));
+ assertThat(regexpQuery.getBoost(), equalTo(1.2f));
+ }
+
+ @Test
+ public void testWildcardQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(wildcardQuery("name.first", "sh*")).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ }
+
+ @Test
+ public void testWildcardQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/wildcard.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ }
+
+ @Test
+ public void testWildcardBoostQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/wildcard-boost.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(WildcardQuery.class));
+ WildcardQuery wildcardQuery = (WildcardQuery) parsedQuery;
+ assertThat(wildcardQuery.getTerm(), equalTo(new Term("name.first", "sh*")));
+ assertThat((double) wildcardQuery.getBoost(), closeTo(1.2, 0.00001));
+ }
+
+ @Test
+ public void testRangeQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(rangeQuery("age").from(23).to(54).includeLower(true).includeUpper(false)).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRange2Query() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery rangeQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(rangeQuery.getField(), equalTo("age"));
+ assertThat(rangeQuery.getMin().intValue(), equalTo(23));
+ assertThat(rangeQuery.getMax().intValue(), equalTo(54));
+ assertThat(rangeQuery.includesMin(), equalTo(true));
+ assertThat(rangeQuery.includesMax(), equalTo(false));
+ }
+
+ @Test
+ public void testRangeFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeQuery("age").from(23).to(54).includeLower(true).includeUpper(false))).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ NumericRangeQuery.newLongRange("age", 23L, 54L, true, false));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testRangeFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ NumericRangeQuery.newLongRange("age", 23L, 54L, true, false));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testRangeNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/range-filter-named.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ NumericRangeQuery.newLongRange("age", 23L, 54L, true, false));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testBoolFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), boolQuery().must(termQuery("name.first", "shay1")).must(termQuery("name.first", "shay4")).mustNot(termQuery("name.first", "shay2")).should(termQuery("name.first", "shay3")))).query();
+
+ BooleanQuery filter = new BooleanQuery();
+ filter.add(new TermQuery(new Term("name.first", "shay1")), Occur.MUST);
+ filter.add(new TermQuery(new Term("name.first", "shay4")), Occur.MUST);
+ filter.add(new TermQuery(new Term("name.first", "shay2")), Occur.MUST_NOT);
+ filter.add(new TermQuery(new Term("name.first", "shay3")), Occur.SHOULD);
+ filter.setMinimumNumberShouldMatch(1);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ filter);
+ assertEquals(expected, parsedQuery);
+ }
+
+
+ @Test
+ public void testBoolFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ BooleanQuery filter = new BooleanQuery();
+ filter.add(new TermQuery(new Term("name.first", "shay1")), Occur.MUST);
+ filter.add(new TermQuery(new Term("name.first", "shay4")), Occur.MUST);
+ filter.add(new TermQuery(new Term("name.first", "shay2")), Occur.MUST_NOT);
+ filter.add(new TermQuery(new Term("name.first", "shay3")), Occur.SHOULD);
+ filter.setMinimumNumberShouldMatch(1);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ filter);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testAndFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), andQuery(termQuery("name.first", "shay1"), termQuery("name.first", "shay4")))).query();
+ BooleanQuery and = new BooleanQuery();
+ and.add(new TermQuery(new Term("name.first", "shay1")), Occur.MUST);
+ and.add(new TermQuery(new Term("name.first", "shay4")), Occur.MUST);
+ ConstantScoreQuery expected = new ConstantScoreQuery(and);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testAndFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ BooleanQuery and = new BooleanQuery();
+ and.add(new TermQuery(new Term("name.first", "shay1")), Occur.MUST);
+ and.add(new TermQuery(new Term("name.first", "shay4")), Occur.MUST);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ and);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testAndNamedFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-named.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ BooleanQuery and = new BooleanQuery();
+ and.add(new TermQuery(new Term("name.first", "shay1")), Occur.MUST);
+ and.add(new TermQuery(new Term("name.first", "shay4")), Occur.MUST);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ and);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testAndFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ BooleanQuery and = new BooleanQuery();
+ and.add(new TermQuery(new Term("name.first", "shay1")), Occur.MUST);
+ and.add(new TermQuery(new Term("name.first", "shay4")), Occur.MUST);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ and);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testOrFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), orQuery(termQuery("name.first", "shay1"), termQuery("name.first", "shay4")))).query();
+ BooleanQuery or = new BooleanQuery();
+ or.add(new TermQuery(new Term("name.first", "shay1")), Occur.SHOULD);
+ or.add(new TermQuery(new Term("name.first", "shay4")), Occur.SHOULD);
+ ConstantScoreQuery expected = new ConstantScoreQuery(or);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testOrFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ BooleanQuery or = new BooleanQuery();
+ or.add(new TermQuery(new Term("name.first", "shay1")), Occur.SHOULD);
+ or.add(new TermQuery(new Term("name.first", "shay4")), Occur.SHOULD);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ or);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testOrFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ BooleanQuery or = new BooleanQuery();
+ or.add(new TermQuery(new Term("name.first", "shay1")), Occur.SHOULD);
+ or.add(new TermQuery(new Term("name.first", "shay4")), Occur.SHOULD);
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ or);
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testNotFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), notQuery(termQuery("name.first", "shay1")))).query();
+ ConstantScoreQuery expected = new ConstantScoreQuery(Queries.not(new TermQuery(new Term("name.first", "shay1"))));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testNotFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ Queries.not(new TermQuery(new Term("name.first", "shay1"))));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testNotFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ Queries.not(new TermQuery(new Term("name.first", "shay1"))));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testNotFilteredQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/not-filter3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ Queries.not(new TermQuery(new Term("name.first", "shay1"))));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testBoostingQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(boostingQuery().positive(termQuery("field1", "value1")).negative(termQuery("field1", "value2")).negativeBoost(0.2f)).query();
+ assertThat(parsedQuery, instanceOf(BoostingQuery.class));
+ }
+
+ @Test
+ public void testBoostingQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/boosting-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BoostingQuery.class));
+ }
+
+ @Test
+ public void testQueryStringFuzzyNumeric() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(NumericRangeQuery.class));
+ NumericRangeQuery fuzzyQuery = (NumericRangeQuery) parsedQuery;
+ assertThat(fuzzyQuery.getMin().longValue(), equalTo(12l));
+ assertThat(fuzzyQuery.getMax().longValue(), equalTo(12l));
+ }
+
+ @Test
+ public void testBoolQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(boolQuery().must(termQuery("content", "test1")).must(termQuery("content", "test4")).mustNot(termQuery("content", "test2")).should(termQuery("content", "test3"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(4));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test4")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test2")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+
+ assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test3")));
+ assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+
+ @Test
+ public void testBoolQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(4));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("content", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("content", "test4")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.MUST));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("content", "test2")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.MUST_NOT));
+
+ assertThat(((TermQuery) clauses[3].getQuery()).getTerm(), equalTo(new Term("content", "test3")));
+ assertThat(clauses[3].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termsQuery("name.first", Lists.newArrayList("shay", "test"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(2));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(2));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "shay")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testTermsQueryWithMultipleFields() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = XContentFactory.jsonBuilder().startObject()
+ .startObject("terms").array("foo", 123).array("bar", 456).endObject()
+ .endObject().string();
+ try {
+ queryParser.parse(query).query();
+ fail();
+ } catch (QueryParsingException ex) {
+ assertThat(ex.getMessage(), equalTo("[terms] query does not support multiple fields"));
+ }
+ }
+
+ @Test
+ public void testTermsFilterWithMultipleFields() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = XContentFactory.jsonBuilder().startObject()
+ .startObject("filtered")
+ .startObject("query").startObject("match_all").endObject().endObject()
+ .startObject("filter").startObject("terms").array("foo", 123).array("bar", 456).endObject().endObject()
+ .endObject().string();
+ try {
+ queryParser.parse(query).query();
+ fail();
+ } catch (QueryParsingException ex) {
+ assertThat(ex.getMessage(), equalTo("[terms] query does not support multiple fields"));
+ }
+ }
+
+
+
+ @Test
+ public void testInQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(termsQuery("name.first", Lists.newArrayList("test1", "test2", "test3"))).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ BooleanClause[] clauses = booleanQuery.getClauses();
+
+ assertThat(clauses.length, equalTo(3));
+
+ assertThat(((TermQuery) clauses[0].getQuery()).getTerm(), equalTo(new Term("name.first", "test1")));
+ assertThat(clauses[0].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[1].getQuery()).getTerm(), equalTo(new Term("name.first", "test2")));
+ assertThat(clauses[1].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+
+ assertThat(((TermQuery) clauses[2].getQuery()).getTerm(), equalTo(new Term("name.first", "test3")));
+ assertThat(clauses[2].getOccur(), equalTo(BooleanClause.Occur.SHOULD));
+ }
+
+ @Test
+ public void testFilteredQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termQuery("name.last", "banon"))).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testFilteredQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testFilteredQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testFilteredQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ NumericRangeQuery.newLongRange("age", 23L, 54L, true, false));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testFilteredQuery4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/filtered-query4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expectedQuery = new WildcardQuery(new Term("name.first", "sh*"));
+ expectedQuery.setBoost(1.1f);
+ Query expected = Queries.filtered(
+ expectedQuery,
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testTermFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testTermNamedFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testTermsFilterQueryBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termsQuery("name.last", "banon", "kimchy"))).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermsQuery("name.last", new BytesRef("banon"), new BytesRef("kimchy")));
+ assertEquals(expected, parsedQuery);
+ }
+
+
+ @Test
+ public void testTermsFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermsQuery("name.last", new BytesRef("banon"), new BytesRef("kimchy")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testTermsWithNameFilterQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/terms-filter-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermsQuery("name.last", new BytesRef("banon"), new BytesRef("kimchy")));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testConstantScoreQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(constantScoreQuery(termQuery("name.last", "banon"))).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ assertThat(getTerm(constantScoreQuery.getQuery()), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testConstantScoreQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/constantScore-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ assertThat(getTerm(constantScoreQuery.getQuery()), equalTo(new Term("name.last", "banon")));
+ }
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder_withFunctionScore() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(functionScoreQuery(termQuery("name.last", "banon"), factorFunction(1.3f))).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(((TermQuery) functionScoreQuery.getSubQuery()).getTerm(), equalTo(new Term("name.last", "banon")));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+ @Test
+ public void testCustomBoostFactorQueryBuilder_withFunctionScoreWithoutQueryGiven() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(functionScoreQuery(factorFunction(1.3f))).query();
+ assertThat(parsedQuery, instanceOf(FunctionScoreQuery.class));
+ FunctionScoreQuery functionScoreQuery = (FunctionScoreQuery) parsedQuery;
+ assertThat(functionScoreQuery.getSubQuery() instanceof MatchAllDocsQuery, equalTo(true));
+ assertThat((double) ((BoostScoreFunction) functionScoreQuery.getFunction()).getBoost(), closeTo(1.3, 0.001));
+ }
+
+ @Test
+ public void testSpanTermQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanTermQuery("age", 34)).query();
+ assertThat(parsedQuery, instanceOf(SpanTermQuery.class));
+ SpanTermQuery termQuery = (SpanTermQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(termQuery.getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ }
+
+ @Test
+ public void testSpanTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanTerm.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanTermQuery.class));
+ SpanTermQuery termQuery = (SpanTermQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(termQuery.getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ }
+
+ @Test
+ public void testSpanNotQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanNotQuery().include(spanTermQuery("age", 34)).exclude(spanTermQuery("age", 35))).query();
+ assertThat(parsedQuery, instanceOf(SpanNotQuery.class));
+ SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ }
+
+ @Test
+ public void testSpanNotQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanNot.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNotQuery.class));
+ SpanNotQuery spanNotQuery = (SpanNotQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanNotQuery.getInclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ }
+
+ @Test
+ public void testSpanWithinQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
+ new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
+ Query actualQuery = queryParser.parse(spanWithinQuery()
+ .big(spanTermQuery("age", 34))
+ .little(spanTermQuery("age", 35)))
+ .query();
+ assertEquals(expectedQuery, actualQuery);
+ }
+
+ @Test
+ public void testSpanWithinQueryParser() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
+ new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
+ String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanWithin.json");
+ Query actualQuery = queryParser.parse(queryText).query();
+ assertEquals(expectedQuery, actualQuery);
+ }
+
+ @Test
+ public void testSpanContainingQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
+ new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
+ Query actualQuery = queryParser.parse(spanContainingQuery()
+ .big(spanTermQuery("age", 34))
+ .little(spanTermQuery("age", 35)))
+ .query();
+ assertEquals(expectedQuery, actualQuery);
+ }
+
+ @Test
+ public void testSpanContainingQueryParser() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))),
+ new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0))));
+ String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanContaining.json");
+ Query actualQuery = queryParser.parse(queryText).query();
+ assertEquals(expectedQuery, actualQuery);
+ }
+
+ @Test
+ public void testSpanFirstQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanFirstQuery(spanTermQuery("age", 34), 12)).query();
+ assertThat(parsedQuery, instanceOf(SpanFirstQuery.class));
+ SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(spanFirstQuery.getEnd(), equalTo(12));
+ }
+
+ @Test
+ public void testSpanFirstQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanFirst.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanFirstQuery.class));
+ SpanFirstQuery spanFirstQuery = (SpanFirstQuery) parsedQuery;
+ // since age is automatically registered in data, we encode it as numeric
+ assertThat(((SpanTermQuery) spanFirstQuery.getMatch()).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(spanFirstQuery.getEnd(), equalTo(12));
+ }
+
+ @Test
+ public void testSpanNearQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanNearQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36)).slop(12).inOrder(false).collectPayloads(false)).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+ @Test
+ public void testSpanNearQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanNear.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+ @Test
+ public void testFieldMaskingSpanQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanFieldMaskingTerm.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanNearQuery.class));
+ SpanNearQuery spanNearQuery = (SpanNearQuery) parsedQuery;
+ assertThat(spanNearQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanNearQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) ((FieldMaskingSpanQuery) spanNearQuery.getClauses()[2]).getMaskedQuery()).getTerm(), equalTo(new Term("age_1", "36")));
+ assertThat(spanNearQuery.isInOrder(), equalTo(false));
+ }
+
+
+ @Test
+ public void testSpanOrQueryBuilder() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(spanOrQuery().clause(spanTermQuery("age", 34)).clause(spanTermQuery("age", 35)).clause(spanTermQuery("age", 36))).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanOrQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanOr.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanOrQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/spanOr2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanOrQuery.class));
+ SpanOrQuery spanOrQuery = (SpanOrQuery) parsedQuery;
+ assertThat(spanOrQuery.getClauses().length, equalTo(3));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[0]).getTerm(), equalTo(new Term("age", longToPrefixCoded(34, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[1]).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0))));
+ assertThat(((SpanTermQuery) spanOrQuery.getClauses()[2]).getTerm(), equalTo(new Term("age", longToPrefixCoded(36, 0))));
+ }
+
+ @Test
+ public void testSpanMultiTermWildcardQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-wildcard.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ WildcardQuery expectedWrapped = new WildcardQuery(new Term("user", "ki*y"));
+ expectedWrapped.setBoost(1.08f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermPrefixQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-prefix.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ PrefixQuery expectedWrapped = new PrefixQuery(new Term("user", "ki"));
+ expectedWrapped.setBoost(1.08f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermFuzzyTermQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper.getField(), equalTo("user"));
+ }
+
+ @Test
+ public void testSpanMultiTermFuzzyRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, 7l, 17l, true, true);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermNumericRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-range-numeric.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ NumericRangeQuery<Long> expectedWrapped = NumericRangeQuery.newLongRange("age", NumberFieldMapper.Defaults.PRECISION_STEP_64_BIT, 10l, 20l, true, false);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testSpanMultiTermTermRangeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/span-multi-term-range-term.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(SpanMultiTermQueryWrapper.class));
+ TermRangeQuery expectedWrapped = TermRangeQuery.newStringRange("user", "alice", "bob", true, false);
+ expectedWrapped.setBoost(2.0f);
+ SpanMultiTermQueryWrapper<MultiTermQuery> wrapper = (SpanMultiTermQueryWrapper<MultiTermQuery>) parsedQuery;
+ assertThat(wrapper, equalTo(new SpanMultiTermQueryWrapper<MultiTermQuery>(expectedWrapped)));
+ }
+
+ @Test
+ public void testQueryQueryBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), termQuery("name.last", "banon"))).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new TermQuery(new Term("name.last", "banon")));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testQueryFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/query-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new ConstantScoreQuery(new TermQuery(new Term("name.last", "banon"))));
+ assertEquals(expected, parsedQuery);
+ }
+
+ @Test
+ public void testFQueryFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fquery-filter.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ Query expected = Queries.filtered(
+ new TermQuery(new Term("name.first", "shay")),
+ new ConstantScoreQuery(new TermQuery(new Term("name.last", "banon"))));
+ assertEquals(expected, parsedQuery.query());
+ }
+
+ @Test
+ public void testMoreLikeThisBuilder() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query parsedQuery = queryParser.parse(moreLikeThisQuery("name.first", "name.last").likeText("something").minTermFreq(1).maxQueryTerms(12)).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testMoreLikeThis() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mlt.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
+ assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));
+ assertThat(mltQuery.getMoreLikeFields()[1], equalTo("name.last"));
+ assertThat(mltQuery.getLikeText(), equalTo("something"));
+ assertThat(mltQuery.getMinTermFrequency(), equalTo(1));
+ assertThat(mltQuery.getMaxQueryTerms(), equalTo(12));
+ }
+
+ @Test
+ public void testMoreLikeThisIds() throws Exception {
+ MoreLikeThisQueryParser parser = (MoreLikeThisQueryParser) queryParser.queryParser("more_like_this");
+ parser.setFetchService(new MockMoreLikeThisFetchService());
+
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mlt-items.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ BooleanQuery booleanQuery = (BooleanQuery) parsedQuery;
+ assertThat(booleanQuery.getClauses().length, is(1));
+
+ BooleanClause itemClause = booleanQuery.getClauses()[0];
+ assertThat(itemClause.getOccur(), is(BooleanClause.Occur.SHOULD));
+ assertThat(itemClause.getQuery(), instanceOf(MoreLikeThisQuery.class));
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) itemClause.getQuery();
+
+ // check each Fields is for each item
+ for (int id = 1; id <= 4; id++) {
+ Fields fields = mltQuery.getLikeFields()[id - 1];
+ assertThat(termsToString(fields.terms("name.first")), is(String.valueOf(id)));
+ assertThat(termsToString(fields.terms("name.last")), is(String.valueOf(id)));
+ }
+ }
+
+ @Test
+ public void testMLTMinimumShouldMatch() throws Exception {
+ // setup for mocking fetching items
+ MoreLikeThisQueryParser parser = (MoreLikeThisQueryParser) queryParser.queryParser("more_like_this");
+ parser.setFetchService(new MockMoreLikeThisFetchService());
+
+ // parsing the ES query
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/mlt-items.json");
+ BooleanQuery parsedQuery = (BooleanQuery) queryParser.parse(query).query();
+
+ // get MLT query, other clause is for include/exclude items
+ MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery.getClauses()[0].getQuery();
+
+ // all terms must match
+ mltQuery.setMinimumShouldMatch("100%");
+ mltQuery.setMinWordLen(0);
+ mltQuery.setMinDocFreq(0);
+
+ // one document has all values
+ MemoryIndex index = new MemoryIndex();
+ index.addField("name.first", "apache lucene", new WhitespaceAnalyzer());
+ index.addField("name.last", "1 2 3 4", new WhitespaceAnalyzer());
+
+ // two clauses, one for items and one for like_text if set
+ BooleanQuery luceneQuery = (BooleanQuery) mltQuery.rewrite(index.createSearcher().getIndexReader());
+ BooleanClause[] clauses = luceneQuery.getClauses();
+
+ // check for items
+ int minNumberShouldMatch = ((BooleanQuery) (clauses[0].getQuery())).getMinimumNumberShouldMatch();
+ assertThat(minNumberShouldMatch, is(4));
+
+ // and for like_text
+ minNumberShouldMatch = ((BooleanQuery) (clauses[1].getQuery())).getMinimumNumberShouldMatch();
+ assertThat(minNumberShouldMatch, is(2));
+ }
+
+ private static class MockMoreLikeThisFetchService extends MoreLikeThisFetchService {
+
+ public MockMoreLikeThisFetchService() {
+ super(null, Settings.Builder.EMPTY_SETTINGS);
+ }
+
+ @Override
+ public MultiTermVectorsResponse fetchResponse(MultiTermVectorsRequest items) throws IOException {
+ MultiTermVectorsItemResponse[] responses = new MultiTermVectorsItemResponse[items.size()];
+ int i = 0;
+ for (TermVectorsRequest item : items) {
+ TermVectorsResponse response = new TermVectorsResponse(item.index(), item.type(), item.id());
+ response.setExists(true);
+ Fields generatedFields = generateFields(item.selectedFields().toArray(Strings.EMPTY_ARRAY), item.id());
+ EnumSet<TermVectorsRequest.Flag> flags = EnumSet.of(TermVectorsRequest.Flag.Positions, TermVectorsRequest.Flag.Offsets);
+ response.setFields(generatedFields, item.selectedFields(), flags, generatedFields);
+ responses[i++] = new MultiTermVectorsItemResponse(response, null);
+ }
+ return new MultiTermVectorsResponse(responses);
+ }
+ }
+
+ private static Fields generateFields(String[] fieldNames, String text) throws IOException {
+ MemoryIndex index = new MemoryIndex();
+ for (String fieldName : fieldNames) {
+ index.addField(fieldName, text, new WhitespaceAnalyzer());
+ }
+ return MultiFields.getFields(index.createSearcher().getIndexReader());
+ }
+
+ private static String termsToString(Terms terms) throws IOException {
+ String strings = "";
+ TermsEnum termsEnum = terms.iterator();
+ CharsRefBuilder spare = new CharsRefBuilder();
+ BytesRef text;
+ while((text = termsEnum.next()) != null) {
+ spare.copyUTF8Bytes(text);
+ String term = spare.toString();
+ strings += term;
+ }
+ return strings;
+ }
+
+ @Test
+ public void testGeoDistanceRangeQueryNamed() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery.query();
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery5() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance5.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery6() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance6.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery7() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance7.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(0.012, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery8() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance8.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.KILOMETERS.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery9() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance9.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery10() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance10.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery11() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance11.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoDistanceRangeQuery12() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_distance12.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoDistanceRangeQuery filter = (GeoDistanceRangeQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.lat(), closeTo(40, 0.00001));
+ assertThat(filter.lon(), closeTo(-70, 0.00001));
+ assertThat(filter.minInclusiveDistance(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(filter.maxInclusiveDistance(), closeTo(DistanceUnit.DEFAULT.convert(12, DistanceUnit.MILES), 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilterNamed() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.query(), instanceOf(ConstantScoreQuery.class));
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery.query();
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoBoundingBoxFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter5() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox5.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+ @Test
+ public void testGeoBoundingBoxFilter6() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_boundingbox6.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ InMemoryGeoBoundingBoxQuery filter = (InMemoryGeoBoundingBoxQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.topLeft().lat(), closeTo(40, 0.00001));
+ assertThat(filter.topLeft().lon(), closeTo(-70, 0.00001));
+ assertThat(filter.bottomRight().lat(), closeTo(30, 0.00001));
+ assertThat(filter.bottomRight().lon(), closeTo(-80, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoPolygonNamedFilter() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon-named.json");
+ ParsedQuery parsedQuery = queryParser.parse(query);
+ assertThat(parsedQuery.namedFilters().containsKey("test"), equalTo(true));
+ assertThat(parsedQuery.query(), instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery.query();
+ GeoPolygonQuery filter = (GeoPolygonQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+
+ @Test
+ public void testGeoPolygonFilterParsingExceptions() throws IOException {
+ String[] brokenFiles = new String[]{
+ "/org/elasticsearch/index/query/geo_polygon_exception_1.json",
+ "/org/elasticsearch/index/query/geo_polygon_exception_2.json",
+ "/org/elasticsearch/index/query/geo_polygon_exception_3.json",
+ "/org/elasticsearch/index/query/geo_polygon_exception_4.json",
+ "/org/elasticsearch/index/query/geo_polygon_exception_5.json"
+ };
+ for (String brokenFile : brokenFiles) {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath(brokenFile);
+ try {
+ queryParser.parse(query).query();
+ fail("parsing a broken geo_polygon filter didn't fail as expected while parsing: " + brokenFile);
+ } catch (QueryParsingException e) {
+ // success!
+ }
+ }
+ }
+
+
+ @Test
+ public void testGeoPolygonFilter1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoPolygonQuery filter = (GeoPolygonQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoPolygonQuery filter = (GeoPolygonQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoPolygonQuery filter = (GeoPolygonQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoPolygonFilter4() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geo_polygon4.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
+ GeoPolygonQuery filter = (GeoPolygonQuery) constantScoreQuery.getQuery();
+ assertThat(filter.fieldName(), equalTo("location"));
+ assertThat(filter.points().length, equalTo(4));
+ assertThat(filter.points()[0].lat(), closeTo(40, 0.00001));
+ assertThat(filter.points()[0].lon(), closeTo(-70, 0.00001));
+ assertThat(filter.points()[1].lat(), closeTo(30, 0.00001));
+ assertThat(filter.points()[1].lon(), closeTo(-80, 0.00001));
+ assertThat(filter.points()[2].lat(), closeTo(20, 0.00001));
+ assertThat(filter.points()[2].lon(), closeTo(-90, 0.00001));
+ }
+
+ @Test
+ public void testGeoShapeFilter() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geoShape-filter.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ while (parsedQuery instanceof ConstantScoreQuery) {
+ parsedQuery = ((ConstantScoreQuery) parsedQuery).getQuery();
+ }
+ assertThat(parsedQuery, instanceOf(IntersectsPrefixTreeFilter.class));
+ }
+
+ @Test
+ public void testGeoShapeQuery() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/geoShape-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ ConstantScoreQuery csq = (ConstantScoreQuery) parsedQuery;
+ assertThat(csq.getQuery(), instanceOf(IntersectsPrefixTreeFilter.class));
+ }
+
+ @Test
+ public void testCommonTermsQuery1() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query1.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue());
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2"));
+ }
+
+ @Test
+ public void testCommonTermsQuery2() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query2.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), equalTo("50%"));
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("5<20%"));
+ }
+
+ @Test
+ public void testCommonTermsQuery3() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/commonTerms-query3.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ExtendedCommonTermsQuery.class));
+ ExtendedCommonTermsQuery ectQuery = (ExtendedCommonTermsQuery) parsedQuery;
+ assertThat(ectQuery.getHighFreqMinimumNumberShouldMatchSpec(), nullValue());
+ assertThat(ectQuery.getLowFreqMinimumNumberShouldMatchSpec(), equalTo("2"));
+ }
+
+ @Test(expected = QueryParsingException.class)
+ public void assureMalformedThrowsException() throws IOException {
+ IndexQueryParserService queryParser;
+ queryParser = queryParser();
+ String query;
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/faulty-function-score-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ }
+
+ @Test
+ public void testFilterParsing() throws IOException {
+ IndexQueryParserService queryParser;
+ queryParser = queryParser();
+ String query;
+ query = copyToStringFromClasspath("/org/elasticsearch/index/query/function-filter-score-query.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat((double) (parsedQuery.getBoost()), Matchers.closeTo(3.0, 1.e-7));
+ }
+
+ @Test
+ public void testBadTypeMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match-query-bad-type.json");
+ QueryParsingException expectedException = null;
+ try {
+ queryParser.parse(query).query();
+ } catch (QueryParsingException qpe) {
+ expectedException = qpe;
+ }
+ assertThat(expectedException, notNullValue());
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-simple.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(DisjunctionMaxQuery.class));
+ }
+
+ @Test
+ public void testBadTypeMultiMatchQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-bad-type.json");
+ QueryParsingException expectedException = null;
+ try {
+ queryParser.parse(query).query();
+ } catch (QueryParsingException qpe) {
+ expectedException = qpe;
+ }
+ assertThat(expectedException, notNullValue());
+ }
+
+ @Test
+ public void testMultiMatchQueryWithFieldsAsString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ }
+
+ @Test
+ public void testSimpleQueryString() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/simple-query-string.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(BooleanQuery.class));
+ }
+
+ @Test
+ public void testMatchWithFuzzyTranspositions() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match-with-fuzzy-transpositions.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ assertThat( ((FuzzyQuery) parsedQuery).getTranspositions(), equalTo(true));
+ }
+
+ @Test
+ public void testMatchWithoutFuzzyTranspositions() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/match-without-fuzzy-transpositions.json");
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
+ assertThat( ((FuzzyQuery) parsedQuery).getTranspositions(), equalTo(false));
+ }
+
+ // https://github.com/elasticsearch/elasticsearch/issues/7240
+ @Test
+ public void testEmptyBooleanQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = jsonBuilder().startObject().startObject("bool").endObject().endObject().string();
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(MatchAllDocsQuery.class));
+ }
+
+ // https://github.com/elasticsearch/elasticsearch/issues/7240
+ @Test
+ public void testEmptyBooleanQueryInsideFQuery() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/fquery-with-empty-bool-query.json");
+ XContentParser parser = XContentHelper.createParser(new BytesArray(query));
+ ParsedQuery parsedQuery = queryParser.parseInnerFilter(parser);
+ assertEquals(new ConstantScoreQuery(Queries.filtered(new TermQuery(new Term("text", "apache")), new TermQuery(new Term("text", "apache")))), parsedQuery.query());
+ }
+
+ @Test
+ public void testProperErrorMessageWhenTwoFunctionsDefinedInQueryBody() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/function-score-query-causing-NPE.json");
+ try {
+ queryParser.parse(query).query();
+ fail("FunctionScoreQueryParser should throw an exception here because two functions in body are not allowed.");
+ } catch (QueryParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("Use functions[{...},...] if you want to define several functions."));
+ }
+ }
+
+ @Test
+ public void testWeight1fStillProducesWeighFunction() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String queryString = jsonBuilder().startObject()
+ .startObject("function_score")
+ .startArray("functions")
+ .startObject()
+ .startObject("field_value_factor")
+ .field("field", "popularity")
+ .endObject()
+ .field("weight", 1.0)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject().string();
+ IndexService indexService = createIndex("testidx", client().admin().indices().prepareCreate("testidx")
+ .addMapping("doc",jsonBuilder().startObject()
+ .startObject("properties")
+ .startObject("popularity").field("type", "float").endObject()
+ .endObject()
+ .endObject()));
+ SearchContext.setCurrent(createSearchContext(indexService));
+ Query query = queryParser.parse(queryString).query();
+ assertThat(query, instanceOf(FunctionScoreQuery.class));
+ assertThat(((FunctionScoreQuery) query).getFunction(), instanceOf(WeightFactorFunction.class));
+ SearchContext.removeCurrent();
+ }
+
+ @Test
+ public void testProperErrorMessagesForMisplacedWeightsAndFunctions() throws IOException {
+ IndexQueryParserService queryParser = queryParser();
+ String query = jsonBuilder().startObject().startObject("function_score")
+ .startArray("functions")
+ .startObject().field("weight", 2).field("boost_factor",2).endObject()
+ .endArray()
+ .endObject().endObject().string();
+ try {
+ queryParser.parse(query).query();
+ fail("Expect exception here because boost_factor must not have a weight");
+ } catch (QueryParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE));
+ }
+ try {
+ functionScoreQuery().add(factorFunction(2.0f).setWeight(2.0f));
+ fail("Expect exception here because boost_factor must not have a weight");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE));
+ }
+ query = jsonBuilder().startObject().startObject("function_score")
+ .startArray("functions")
+ .startObject().field("boost_factor",2).endObject()
+ .endArray()
+ .field("weight", 2)
+ .endObject().endObject().string();
+ try {
+ queryParser.parse(query).query();
+ fail("Expect exception here because array of functions and one weight in body is not allowed.");
+ } catch (QueryParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("You can either define \"functions\":[...] or a single function, not both. Found \"functions\": [...] already, now encountering \"weight\"."));
+ }
+ query = jsonBuilder().startObject().startObject("function_score")
+ .field("weight", 2)
+ .startArray("functions")
+ .startObject().field("boost_factor",2).endObject()
+ .endArray()
+ .endObject().endObject().string();
+ try {
+ queryParser.parse(query).query();
+ fail("Expect exception here because array of functions and one weight in body is not allowed.");
+ } catch (QueryParsingException e) {
+ assertThat(e.getDetailedMessage(), containsString("You can either define \"functions\":[...] or a single function, not both. Found \"weight\" already, now encountering \"functions\": [...]."));
+ }
+ }
+
+ // https://github.com/elasticsearch/elasticsearch/issues/6722
+ public void testEmptyBoolSubClausesIsMatchAll() throws IOException {
+ String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json");
+ IndexService indexService = createIndex("testidx", client().admin().indices().prepareCreate("testidx")
+ .addMapping("foo", "nested", "type=nested"));
+ SearchContext.setCurrent(createSearchContext(indexService));
+ IndexQueryParserService queryParser = indexService.queryParserService();
+ Query parsedQuery = queryParser.parse(query).query();
+ assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(ToParentBlockJoinQuery.class));
+ assertThat(((ConstantScoreQuery) parsedQuery).getQuery().toString(), equalTo("ToParentBlockJoinQuery (+*:* #random_access(QueryWrapperFilter(_type:__nested)))"));
+ SearchContext.removeCurrent();
+ }
+
+ /**
+ * helper to extract term from TermQuery. */
+ private Term getTerm(Query query) {
+ while (query instanceof QueryWrapperFilter) {
+ query = ((QueryWrapperFilter) query).getQuery();
+ }
+ TermQuery wrapped = (TermQuery) query;
+ return wrapped.getTerm();
+ }
+
+ public void testDefaultBooleanQueryMinShouldMatch() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+
+ // Queries have a minShouldMatch of 0
+ BooleanQuery bq = (BooleanQuery) queryParser.parse(boolQuery().must(termQuery("foo", "bar"))).query();
+ assertEquals(0, bq.getMinimumNumberShouldMatch());
+
+ bq = (BooleanQuery) queryParser.parse(boolQuery().should(termQuery("foo", "bar"))).query();
+ assertEquals(0, bq.getMinimumNumberShouldMatch());
+
+ // Filters have a minShouldMatch of 0/1
+ ConstantScoreQuery csq = (ConstantScoreQuery) queryParser.parse(constantScoreQuery(boolQuery().must(termQuery("foo", "bar")))).query();
+ bq = (BooleanQuery) csq.getQuery();
+ assertEquals(0, bq.getMinimumNumberShouldMatch());
+
+ csq = (ConstantScoreQuery) queryParser.parse(constantScoreQuery(boolQuery().should(termQuery("foo", "bar")))).query();
+ bq = (BooleanQuery) csq.getQuery();
+ assertEquals(1, bq.getMinimumNumberShouldMatch());
+ }
+
+ public void testTermsQueryFilter() throws Exception {
+ // TermsQuery is tricky in that it parses differently as a query or a filter
+ IndexQueryParserService queryParser = queryParser();
+ Query q = queryParser.parse(termsQuery("foo", Arrays.asList("bar"))).query();
+ assertThat(q, instanceOf(BooleanQuery.class));
+
+ ConstantScoreQuery csq = (ConstantScoreQuery) queryParser.parse(constantScoreQuery(termsQuery("foo", Arrays.asList("bar")))).query();
+ q = csq.getQuery();
+ assertThat(q, instanceOf(TermsQuery.class));
+ }
+
+ public void testConstantScoreParsesFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ Query q = queryParser.parse(constantScoreQuery(dummyQuery())).query();
+ Query inner = ((ConstantScoreQuery) q).getQuery();
+ assertThat(inner, instanceOf(DummyQuery.class));
+ assertEquals(true, ((DummyQuery) inner).isFilter);
+ }
+
+ public void testBooleanParsesFilter() throws Exception {
+ IndexQueryParserService queryParser = queryParser();
+ // single clause, serialized as inner object
+ Query q = queryParser.parse(boolQuery()
+ .should(dummyQuery())
+ .must(dummyQuery())
+ .filter(dummyQuery())
+ .mustNot(dummyQuery())).query();
+ assertThat(q, instanceOf(BooleanQuery.class));
+ BooleanQuery bq = (BooleanQuery) q;
+ assertEquals(4, bq.clauses().size());
+ for (BooleanClause clause : bq.clauses()) {
+ DummyQuery dummy = (DummyQuery) clause.getQuery();
+ switch (clause.getOccur()) {
+ case FILTER:
+ case MUST_NOT:
+ assertEquals(true, dummy.isFilter);
+ break;
+ case MUST:
+ case SHOULD:
+ assertEquals(false, dummy.isFilter);
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+
+ // multiple clauses, serialized as inner arrays
+ q = queryParser.parse(boolQuery()
+ .should(dummyQuery()).should(dummyQuery())
+ .must(dummyQuery()).must(dummyQuery())
+ .filter(dummyQuery()).filter(dummyQuery())
+ .mustNot(dummyQuery()).mustNot(dummyQuery())).query();
+ assertThat(q, instanceOf(BooleanQuery.class));
+ bq = (BooleanQuery) q;
+ assertEquals(8, bq.clauses().size());
+ for (BooleanClause clause : bq.clauses()) {
+ DummyQuery dummy = (DummyQuery) clause.getQuery();
+ switch (clause.getOccur()) {
+ case FILTER:
+ case MUST_NOT:
+ assertEquals(true, dummy.isFilter);
+ break;
+ case MUST:
+ case SHOULD:
+ assertEquals(false, dummy.isFilter);
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/TemplateQueryBuilderTest.java b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryBuilderTest.java
new file mode 100644
index 0000000000..426185cc06
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryBuilderTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.script.Template;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Test building and serialising a template search request.
+ * */
+public class TemplateQueryBuilderTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testJSONGeneration() throws IOException {
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("template", "filled");
+ TemplateQueryBuilder builder = new TemplateQueryBuilder(
+ new Template("I am a $template string", ScriptType.INLINE, null, null, vars));
+ XContentBuilder content = XContentFactory.jsonBuilder();
+ content.startObject();
+ builder.doXContent(content, null);
+ content.endObject();
+ content.close();
+ assertEquals("{\"template\":{\"inline\":\"I am a $template string\",\"params\":{\"template\":\"filled\"}}}", content.string());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testJSONGenerationOldScriptAPI() throws IOException {
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("template", "filled");
+ TemplateQueryBuilder builder = new TemplateQueryBuilder("I am a $template string", vars);
+ XContentBuilder content = XContentFactory.jsonBuilder();
+ content.startObject();
+ builder.doXContent(content, null);
+ content.endObject();
+ content.close();
+ assertEquals("{\"template\":{\"inline\":\"I am a $template string\",\"params\":{\"template\":\"filled\"}}}", content.string());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java
new file mode 100644
index 0000000000..ad737fd797
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryParserTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+/**
+ * Test parsing and executing a template request.
+ */
+// NOTE: this can't be migrated to ElasticsearchSingleNodeTest because of the custom path.conf
+public class TemplateQueryParserTest extends ElasticsearchTestCase {
+
+ private Injector injector;
+ private QueryParseContext context;
+
+ @Before
+ public void setup() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("path.conf", this.getDataPath("config"))
+ .put("name", getClass().getName())
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .build();
+
+ Index index = new Index("test");
+ injector = new ModulesBuilder().add(
+ new EnvironmentModule(new Environment(settings)),
+ new SettingsModule(settings),
+ new ThreadPoolModule(new ThreadPool(settings)),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new SimilarityModule(settings),
+ new IndexNameModule(index),
+ new IndexQueryParserModule(settings),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService queryParserService = injector.getInstance(IndexQueryParserService.class);
+ context = new QueryParseContext(index, queryParserService);
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ terminate(injector.getInstance(ThreadPool.class));
+ }
+
+ @Test
+ public void testParser() throws IOException {
+ String templateString = "{" + "\"query\":{\"match_{{template}}\": {}}," + "\"params\":{\"template\":\"all\"}" + "}";
+
+ XContentParser templateSourceParser = XContentFactory.xContent(templateString).createParser(templateString);
+ context.reset(templateSourceParser);
+ templateSourceParser.nextToken();
+
+ TemplateQueryParser parser = injector.getInstance(TemplateQueryParser.class);
+ Query query = parser.parse(context);
+ assertTrue("Parsing template query failed.", query instanceof MatchAllDocsQuery);
+ }
+
+ @Test
+ public void testParserCanExtractTemplateNames() throws Exception {
+ String templateString = "{ \"file\": \"storedTemplate\" ,\"params\":{\"template\":\"all\" } } ";
+
+ XContentParser templateSourceParser = XContentFactory.xContent(templateString).createParser(templateString);
+ context.reset(templateSourceParser);
+ templateSourceParser.nextToken();
+
+ TemplateQueryParser parser = injector.getInstance(TemplateQueryParser.class);
+ Query query = parser.parse(context);
+ assertTrue("Parsing template query failed.", query instanceof MatchAllDocsQuery);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java
new file mode 100644
index 0000000000..4ba9b010eb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/TemplateQueryTest.java
@@ -0,0 +1,766 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.query;
+
+import com.google.common.collect.Maps;
+
+import org.elasticsearch.action.index.IndexRequest.OpType;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptResponse;
+import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequestBuilder;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.script.Template;
+import org.elasticsearch.script.mustache.MustacheScriptEngineService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Full integration test of the template query plugin.
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class TemplateQueryTest extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void setup() throws IOException {
+ createIndex("test");
+ ensureGreen("test");
+
+ index("test", "testtype", "1", jsonBuilder().startObject().field("text", "value1").endObject());
+ index("test", "testtype", "2", jsonBuilder().startObject().field("text", "value2").endObject());
+ refresh();
+ }
+
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ .put("path.conf", this.getDataPath("config")).build();
+ }
+
+ @Test
+ public void testTemplateInBody() throws IOException {
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("template", "all");
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder(new Template("{\"match_{{template}}\": {}}\"", ScriptType.INLINE, null,
+ null, vars));
+ SearchResponse sr = client().prepareSearch().setQuery(builder)
+ .execute().actionGet();
+ assertHitCount(sr, 2);
+ }
+
+ @Test
+ public void testTemplateInBodyWithSize() throws IOException {
+ String request = "{\n" +
+ " \"size\":0," +
+ " \"query\": {\n" +
+ " \"template\": {\n" +
+ " \"query\": {\"match_{{template}}\": {}},\n" +
+ " \"params\" : {\n" +
+ " \"template\" : \"all\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ SearchResponse sr = client().prepareSearch().setSource(request)
+ .execute().actionGet();
+ assertNoFailures(sr);
+ assertThat(sr.getHits().hits().length, equalTo(0));
+ request = "{\n" +
+ " \"query\": {\n" +
+ " \"template\": {\n" +
+ " \"query\": {\"match_{{template}}\": {}},\n" +
+ " \"params\" : {\n" +
+ " \"template\" : \"all\"\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ " \"size\":0" +
+ "}";
+
+ sr = client().prepareSearch().setSource(request)
+ .execute().actionGet();
+ assertNoFailures(sr);
+ assertThat(sr.getHits().hits().length, equalTo(0));
+ }
+
+ @Test
+ public void testTemplateWOReplacementInBody() throws IOException {
+ Map<String, Object> vars = new HashMap<>();
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder(new Template(
+ "{\"match_all\": {}}\"", ScriptType.INLINE, null, null, vars));
+ SearchResponse sr = client().prepareSearch().setQuery(builder)
+ .execute().actionGet();
+ assertHitCount(sr, 2);
+ }
+
+ @Test
+ public void testTemplateInFile() {
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("template", "all");
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder(new Template(
+ "storedTemplate", ScriptService.ScriptType.FILE, null, null, vars));
+ SearchResponse sr = client().prepareSearch().setQuery(builder)
+ .execute().actionGet();
+ assertHitCount(sr, 2);
+ }
+
+ @Test
+ public void testRawEscapedTemplate() throws IOException {
+ String query = "{\"template\": {\"query\": \"{\\\"match_{{template}}\\\": {}}\\\"\",\"params\" : {\"template\" : \"all\"}}}";
+
+ SearchResponse sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 2);
+ }
+
+ @Test
+ public void testRawTemplate() throws IOException {
+ String query = "{\"template\": {\"query\": {\"match_{{template}}\": {}},\"params\" : {\"template\" : \"all\"}}}";
+ SearchResponse sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 2);
+ }
+
+ @Test
+ public void testRawFSTemplate() throws IOException {
+ String query = "{\"template\": {\"file\": \"storedTemplate\",\"params\" : {\"template\" : \"all\"}}}";
+
+ SearchResponse sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 2);
+ }
+
+ @Test
+ public void testSearchRequestTemplateSource() throws Exception {
+ SearchRequest searchRequest = new SearchRequest();
+ searchRequest.indices("_all");
+
+ String query = "{ \"template\" : { \"query\": {\"match_{{template}}\": {} } }, \"params\" : { \"template\":\"all\" } }";
+ BytesReference bytesRef = new BytesArray(query);
+ searchRequest.templateSource(bytesRef);
+
+ SearchResponse searchResponse = client().search(searchRequest).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ // Releates to #6318
+ public void testSearchRequestFail() throws Exception {
+ SearchRequest searchRequest = new SearchRequest();
+ searchRequest.indices("_all");
+ try {
+ String query = "{ \"template\" : { \"query\": {\"match_all\": {}}, \"size\" : \"{{my_size}}\" } }";
+ BytesReference bytesRef = new BytesArray(query);
+ searchRequest.templateSource(bytesRef);
+ client().search(searchRequest).get();
+ fail("expected exception");
+ } catch (Exception ex) {
+ // expected - no params
+ }
+ String query = "{ \"template\" : { \"query\": {\"match_all\": {}}, \"size\" : \"{{my_size}}\" }, \"params\" : { \"my_size\": 1 } }";
+ BytesReference bytesRef = new BytesArray(query);
+ searchRequest.templateSource(bytesRef);
+
+ SearchResponse searchResponse = client().search(searchRequest).get();
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ }
+
+ @Test
+ public void testThatParametersCanBeSet() throws Exception {
+ index("test", "type", "1", jsonBuilder().startObject().field("theField", "foo").endObject());
+ index("test", "type", "2", jsonBuilder().startObject().field("theField", "foo 2").endObject());
+ index("test", "type", "3", jsonBuilder().startObject().field("theField", "foo 3").endObject());
+ index("test", "type", "4", jsonBuilder().startObject().field("theField", "foo 4").endObject());
+ index("test", "type", "5", jsonBuilder().startObject().field("otherField", "foo").endObject());
+ refresh();
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("mySize", "2");
+ templateParams.put("myField", "theField");
+ templateParams.put("myValue", "foo");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type")
+ .setTemplate(new Template("full-query-template", ScriptType.FILE, MustacheScriptEngineService.NAME, null, templateParams))
+ .get();
+ assertHitCount(searchResponse, 4);
+ // size kicks in here...
+ assertThat(searchResponse.getHits().getHits().length, is(2));
+
+ templateParams.put("myField", "otherField");
+ searchResponse = client().prepareSearch("test").setTypes("type")
+ .setTemplate(new Template("full-query-template", ScriptType.FILE, MustacheScriptEngineService.NAME, null, templateParams))
+ .get();
+ assertHitCount(searchResponse, 1);
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testIndexedTemplateClient() throws Exception {
+ createIndex(ScriptService.SCRIPT_INDEX);
+ ensureGreen(ScriptService.SCRIPT_INDEX);
+
+ PutIndexedScriptResponse scriptResponse = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "testTemplate", "{" +
+ "\"template\":{" +
+ " \"query\":{" +
+ " \"match\":{" +
+ " \"theField\" : \"{{fieldParam}}\"}" +
+ " }" +
+ "}" +
+ "}").get();
+
+ assertTrue(scriptResponse.isCreated());
+
+ scriptResponse = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "testTemplate", "{" +
+ "\"template\":{" +
+ " \"query\":{" +
+ " \"match\":{" +
+ " \"theField\" : \"{{fieldParam}}\"}" +
+ " }" +
+ "}" +
+ "}").get();
+
+ assertEquals(scriptResponse.getVersion(), 2);
+
+ GetIndexedScriptResponse getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "testTemplate").get();
+ assertTrue(getResponse.isExists());
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ builders.add(client().prepareIndex("test", "type", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "type", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "type", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "type", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "type", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("fieldParam", "foo");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type")
+ .setTemplate(new Template("testTemplate", ScriptType.INDEXED, MustacheScriptEngineService.NAME, null, templateParams))
+ .get();
+ assertHitCount(searchResponse, 4);
+
+ DeleteIndexedScriptResponse deleteResponse = client().prepareDeleteIndexedScript(MustacheScriptEngineService.NAME, "testTemplate")
+ .get();
+ assertTrue(deleteResponse.isFound());
+
+ getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "testTemplate").get();
+ assertFalse(getResponse.isExists());
+
+ client().prepareSearch("test")
+ .setTypes("type")
+ .setTemplate(
+ new Template("/template_index/mustache/1000", ScriptType.INDEXED, MustacheScriptEngineService.NAME, null,
+ templateParams)).get();
+ }
+
+ @Test
+ public void testIndexedTemplate() throws Exception {
+ createIndex(ScriptService.SCRIPT_INDEX);
+ ensureGreen(ScriptService.SCRIPT_INDEX);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "1a").setSource("{" +
+ "\"template\":{"+
+ " \"query\":{" +
+ " \"match\":{" +
+ " \"theField\" : \"{{fieldParam}}\"}" +
+ " }" +
+ "}" +
+ "}"));
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "2").setSource("{" +
+ "\"template\":{"+
+ " \"query\":{" +
+ " \"match\":{" +
+ " \"theField\" : \"{{fieldParam}}\"}" +
+ " }" +
+ "}" +
+ "}"));
+
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "3").setSource("{" +
+ "\"template\":{"+
+ " \"match\":{" +
+ " \"theField\" : \"{{fieldParam}}\"}" +
+ " }" +
+ "}"));
+
+ indexRandom(true, builders);
+
+ builders.clear();
+
+ builders.add(client().prepareIndex("test", "type", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "type", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "type", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "type", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "type", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("fieldParam", "foo");
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type")
+ .setTemplate(
+ new Template("/mustache/1a", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null,
+ templateParams)).get();
+ assertHitCount(searchResponse, 4);
+
+ try {
+ client().prepareSearch("test")
+ .setTypes("type")
+ .setTemplate(
+ new Template("/template_index/mustache/1000", ScriptService.ScriptType.INDEXED,
+ MustacheScriptEngineService.NAME, null, templateParams)).get();
+ fail("shouldn't get here");
+ } catch (SearchPhaseExecutionException spee) {
+ //all good
+ }
+
+ try {
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type")
+ .setTemplate(
+ new Template("/myindex/mustache/1", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null,
+ templateParams)).get();
+ assertFailures(searchResponse);
+ } catch (SearchPhaseExecutionException spee) {
+ //all good
+ }
+
+ searchResponse = client().prepareSearch("test").setTypes("type")
+ .setTemplate(new Template("1a", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null, templateParams))
+ .get();
+ assertHitCount(searchResponse, 4);
+
+ templateParams.put("fieldParam", "bar");
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type")
+ .setTemplate(
+ new Template("/mustache/2", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null,
+ templateParams)).get();
+ assertHitCount(searchResponse, 1);
+
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("fieldParam", "bar");
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder(new Template(
+ "3", ScriptService.ScriptType.INDEXED, null, null, vars));
+ SearchResponse sr = client().prepareSearch().setQuery(builder)
+ .execute().actionGet();
+ assertHitCount(sr, 1);
+
+ String query = "{\"template\": {\"id\": \"3\",\"params\" : {\"fieldParam\" : \"foo\"}}}";
+ sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 4);
+
+ query = "{\"template\": {\"id\": \"/mustache/3\",\"params\" : {\"fieldParam\" : \"foo\"}}}";
+ sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 4);
+ }
+
+ // Relates to #10397
+ @Test
+ public void testIndexedTemplateOverwrite() throws Exception {
+ createIndex("testindex");
+ ensureGreen("testindex");
+
+ index("testindex", "test", "1", jsonBuilder().startObject().field("searchtext", "dev1").endObject());
+ refresh();
+
+ int iterations = randomIntBetween(2, 11);
+ for (int i = 1; i < iterations; i++) {
+ PutIndexedScriptResponse scriptResponse = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "git01",
+ "{\"query\": {\"match\": {\"searchtext\": {\"query\": \"{{P_Keyword1}}\",\"type\": \"ooophrase_prefix\"}}}}").get();
+ assertEquals(i * 2 - 1, scriptResponse.getVersion());
+
+ GetIndexedScriptResponse getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "git01").get();
+ assertTrue(getResponse.isExists());
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("P_Keyword1", "dev");
+
+ try {
+ client().prepareSearch("testindex")
+ .setTypes("test")
+ .setTemplate(
+ new Template("git01", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null,
+ templateParams)).get();
+ fail("Broken test template is parsing w/o error.");
+ } catch (SearchPhaseExecutionException e) {
+ // the above is expected to fail
+ }
+
+ PutIndexedScriptRequestBuilder builder = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "git01",
+ "{\"query\": {\"match\": {\"searchtext\": {\"query\": \"{{P_Keyword1}}\",\"type\": \"phrase_prefix\"}}}}").setOpType(
+ OpType.INDEX);
+ scriptResponse = builder.get();
+ assertEquals(i * 2, scriptResponse.getVersion());
+ SearchResponse searchResponse = client()
+ .prepareSearch("testindex")
+ .setTypes("test")
+ .setTemplate(
+ new Template("git01", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null, templateParams))
+ .get();
+ assertHitCount(searchResponse, 1);
+ }
+ }
+
+
+ @Test
+ public void testIndexedTemplateWithArray() throws Exception {
+ createIndex(ScriptService.SCRIPT_INDEX);
+ ensureGreen(ScriptService.SCRIPT_INDEX);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ String multiQuery = "{\"query\":{\"terms\":{\"theField\":[\"{{#fieldParam}}\",\"{{.}}\",\"{{/fieldParam}}\"]}}}";
+
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "4").setSource(jsonBuilder().startObject().field("template", multiQuery).endObject()));
+
+ indexRandom(true,builders);
+
+ builders.clear();
+
+ builders.add(client().prepareIndex("test", "type", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "type", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "type", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "type", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "type", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true,builders);
+
+ Map<String, Object> arrayTemplateParams = new HashMap<>();
+ String[] fieldParams = {"foo","bar"};
+ arrayTemplateParams.put("fieldParam", fieldParams);
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type")
+ .setTemplate(
+ new Template("/mustache/4", ScriptService.ScriptType.INDEXED, MustacheScriptEngineService.NAME, null,
+ arrayTemplateParams)).get();
+ assertHitCount(searchResponse, 5);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testTemplateInBodyOldScriptAPI() throws IOException {
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("template", "all");
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder("{\"match_{{template}}\": {}}\"", vars);
+ SearchResponse sr = client().prepareSearch().setQuery(builder).execute().actionGet();
+ assertHitCount(sr, 2);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testTemplateWOReplacementInBodyOldScriptAPI() throws IOException {
+ Map<String, Object> vars = new HashMap<>();
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder("{\"match_all\": {}}\"", vars);
+ SearchResponse sr = client().prepareSearch().setQuery(builder).execute().actionGet();
+ assertHitCount(sr, 2);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testTemplateInFileOldScriptAPI() {
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("template", "all");
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder("storedTemplate", ScriptService.ScriptType.FILE, vars);
+ SearchResponse sr = client().prepareSearch().setQuery(builder).execute().actionGet();
+ assertHitCount(sr, 2);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testIndexedTemplateOldScriptAPI() throws Exception {
+ createIndex(ScriptService.SCRIPT_INDEX);
+ ensureGreen(ScriptService.SCRIPT_INDEX);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "1a").setSource(
+ "{" + "\"template\":{" + " \"query\":{" + " \"match\":{"
+ + " \"theField\" : \"{{fieldParam}}\"}" + " }" + "}" + "}"));
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "2").setSource(
+ "{" + "\"template\":{" + " \"query\":{" + " \"match\":{"
+ + " \"theField\" : \"{{fieldParam}}\"}" + " }" + "}" + "}"));
+
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "3").setSource(
+ "{" + "\"template\":{" + " \"match\":{" + " \"theField\" : \"{{fieldParam}}\"}" + " }"
+ + "}"));
+
+ indexRandom(true, builders);
+
+ builders.clear();
+
+ builders.add(client().prepareIndex("test", "type", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "type", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "type", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "type", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "type", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("fieldParam", "foo");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("/mustache/1a")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ assertHitCount(searchResponse, 4);
+
+ try {
+ client().prepareSearch("test").setTypes("type").setTemplateName("/template_index/mustache/1000")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ fail("shouldn't get here");
+ } catch (SearchPhaseExecutionException spee) {
+ //all good
+ }
+
+ try {
+ searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("/myindex/mustache/1")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ assertFailures(searchResponse);
+ } catch (SearchPhaseExecutionException spee) {
+ //all good
+ }
+
+ searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("1a")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ assertHitCount(searchResponse, 4);
+
+ templateParams.put("fieldParam", "bar");
+ searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("/mustache/2")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ assertHitCount(searchResponse, 1);
+
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("fieldParam", "bar");
+
+ TemplateQueryBuilder builder = new TemplateQueryBuilder("3", ScriptService.ScriptType.INDEXED, vars);
+ SearchResponse sr = client().prepareSearch().setQuery(builder).execute().actionGet();
+ assertHitCount(sr, 1);
+
+ String query = "{\"template\": {\"id\": \"3\",\"params\" : {\"fieldParam\" : \"foo\"}}}";
+ sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 4);
+
+ query = "{\"template\": {\"id\": \"/mustache/3\",\"params\" : {\"fieldParam\" : \"foo\"}}}";
+ sr = client().prepareSearch().setQuery(query).get();
+ assertHitCount(sr, 4);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testThatParametersCanBeSetOldScriptAPI() throws Exception {
+ index("test", "type", "1", jsonBuilder().startObject().field("theField", "foo").endObject());
+ index("test", "type", "2", jsonBuilder().startObject().field("theField", "foo 2").endObject());
+ index("test", "type", "3", jsonBuilder().startObject().field("theField", "foo 3").endObject());
+ index("test", "type", "4", jsonBuilder().startObject().field("theField", "foo 4").endObject());
+ index("test", "type", "5", jsonBuilder().startObject().field("otherField", "foo").endObject());
+ refresh();
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("mySize", "2");
+ templateParams.put("myField", "theField");
+ templateParams.put("myValue", "foo");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("full-query-template")
+ .setTemplateParams(templateParams).setTemplateType(ScriptService.ScriptType.FILE).get();
+ assertHitCount(searchResponse, 4);
+ // size kicks in here...
+ assertThat(searchResponse.getHits().getHits().length, is(2));
+
+ templateParams.put("myField", "otherField");
+ searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("full-query-template")
+ .setTemplateParams(templateParams).setTemplateType(ScriptService.ScriptType.FILE).get();
+ assertHitCount(searchResponse, 1);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testIndexedTemplateClientOldScriptAPI() throws Exception {
+ createIndex(ScriptService.SCRIPT_INDEX);
+ ensureGreen(ScriptService.SCRIPT_INDEX);
+
+ PutIndexedScriptResponse scriptResponse = client().preparePutIndexedScript(
+ MustacheScriptEngineService.NAME,
+ "testTemplate",
+ "{" + "\"template\":{" + " \"query\":{" + " \"match\":{"
+ + " \"theField\" : \"{{fieldParam}}\"}" + " }" + "}" + "}").get();
+
+ assertTrue(scriptResponse.isCreated());
+
+ scriptResponse = client().preparePutIndexedScript(
+ MustacheScriptEngineService.NAME,
+ "testTemplate",
+ "{" + "\"template\":{" + " \"query\":{" + " \"match\":{"
+ + " \"theField\" : \"{{fieldParam}}\"}" + " }" + "}" + "}").get();
+
+ assertEquals(scriptResponse.getVersion(), 2);
+
+ GetIndexedScriptResponse getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "testTemplate").get();
+ assertTrue(getResponse.isExists());
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ builders.add(client().prepareIndex("test", "type", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "type", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "type", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "type", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "type", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("fieldParam", "foo");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("testTemplate")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ assertHitCount(searchResponse, 4);
+
+ DeleteIndexedScriptResponse deleteResponse = client().prepareDeleteIndexedScript(MustacheScriptEngineService.NAME, "testTemplate")
+ .get();
+ assertTrue(deleteResponse.isFound());
+
+ getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "testTemplate").get();
+ assertFalse(getResponse.isExists());
+
+ client().prepareSearch("test").setTypes("type").setTemplateName("/template_index/mustache/1000")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ // Relates to #10397
+ @Test
+ public void testIndexedTemplateOverwriteOldScriptAPI() throws Exception {
+ createIndex("testindex");
+ ensureGreen("testindex");
+
+ index("testindex", "test", "1", jsonBuilder().startObject().field("searchtext", "dev1").endObject());
+ refresh();
+
+ int iterations = randomIntBetween(2, 11);
+ for (int i = 1; i < iterations; i++) {
+ PutIndexedScriptResponse scriptResponse = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "git01",
+ "{\"query\": {\"match\": {\"searchtext\": {\"query\": \"{{P_Keyword1}}\",\"type\": \"ooophrase_prefix\"}}}}").get();
+ assertEquals(i * 2 - 1, scriptResponse.getVersion());
+
+ GetIndexedScriptResponse getResponse = client().prepareGetIndexedScript(MustacheScriptEngineService.NAME, "git01").get();
+ assertTrue(getResponse.isExists());
+
+ Map<String, Object> templateParams = Maps.newHashMap();
+ templateParams.put("P_Keyword1", "dev");
+
+ try {
+ client().prepareSearch("testindex").setTypes("test").setTemplateName("git01")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ fail("Broken test template is parsing w/o error.");
+ } catch (SearchPhaseExecutionException e) {
+ // the above is expected to fail
+ }
+
+ PutIndexedScriptRequestBuilder builder = client().preparePutIndexedScript(MustacheScriptEngineService.NAME, "git01",
+ "{\"query\": {\"match\": {\"searchtext\": {\"query\": \"{{P_Keyword1}}\",\"type\": \"phrase_prefix\"}}}}").setOpType(
+ OpType.INDEX);
+ scriptResponse = builder.get();
+ assertEquals(i * 2, scriptResponse.getVersion());
+ SearchResponse searchResponse = client().prepareSearch("testindex").setTypes("test").setTemplateName("git01")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(templateParams).get();
+ assertHitCount(searchResponse, 1);
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testIndexedTemplateWithArrayOldScriptAPI() throws Exception {
+ createIndex(ScriptService.SCRIPT_INDEX);
+ ensureGreen(ScriptService.SCRIPT_INDEX);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ String multiQuery = "{\"query\":{\"terms\":{\"theField\":[\"{{#fieldParam}}\",\"{{.}}\",\"{{/fieldParam}}\"]}}}";
+
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, MustacheScriptEngineService.NAME, "4").setSource(
+ jsonBuilder().startObject().field("template", multiQuery).endObject()));
+
+ indexRandom(true, builders);
+
+ builders.clear();
+
+ builders.add(client().prepareIndex("test", "type", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "type", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "type", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "type", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "type", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+
+ Map<String, Object> arrayTemplateParams = new HashMap<>();
+ String[] fieldParams = { "foo", "bar" };
+ arrayTemplateParams.put("fieldParam", fieldParams);
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setTemplateName("/mustache/4")
+ .setTemplateType(ScriptService.ScriptType.INDEXED).setTemplateParams(arrayTemplateParams).get();
+ assertHitCount(searchResponse, 5);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java b/core/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java
new file mode 100644
index 0000000000..951b31e59a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.elasticsearch.index.Index;
+
+/**
+ * Class used to avoid dragging QueryContext into unit testing framework for
+ * basic exception handling
+ */
+public class TestQueryParsingException extends QueryParsingException {
+
+ public TestQueryParsingException(Index index, int line, int col, String msg, Throwable cause) {
+ super(index, line, col, msg, cause);
+ }
+
+ public TestQueryParsingException(Index index, String msg, Throwable cause) {
+ super(index, UNKNOWN_POSITION, UNKNOWN_POSITION, msg, cause);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/and-filter-cache.json b/core/src/test/java/org/elasticsearch/index/query/and-filter-cache.json
new file mode 100644
index 0000000000..41cc482e0f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/and-filter-cache.json
@@ -0,0 +1,21 @@
+{
+ "filtered":{
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/and-filter-named.json b/core/src/test/java/org/elasticsearch/index/query/and-filter-named.json
new file mode 100644
index 0000000000..605a1936e8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/and-filter-named.json
@@ -0,0 +1,26 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/and-filter.json b/core/src/test/java/org/elasticsearch/index/query/and-filter.json
new file mode 100644
index 0000000000..752add1a28
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/and-filter.json
@@ -0,0 +1,25 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/and-filter2.json b/core/src/test/java/org/elasticsearch/index/query/and-filter2.json
new file mode 100644
index 0000000000..580b8e95b9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/and-filter2.json
@@ -0,0 +1,23 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "and":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/bool-filter.json b/core/src/test/java/org/elasticsearch/index/query/bool-filter.json
new file mode 100644
index 0000000000..484e517fd4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/bool-filter.json
@@ -0,0 +1,35 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ bool:{
+ must:[
+ {
+ term:{
+ "name.first":"shay1"
+ }
+ },
+ {
+ term:{
+ "name.first":"shay4"
+ }
+ }
+ ],
+ must_not:{
+ term:{
+ "name.first":"shay2"
+ }
+ },
+ should:{
+ term:{
+ "name.first":"shay3"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json b/core/src/test/java/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json
new file mode 100644
index 0000000000..5864359648
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json
@@ -0,0 +1,17 @@
+{
+ "filtered": {
+ "filter": {
+ "nested": {
+ "path": "nested",
+ "query": {
+ "bool": {
+ "must": [],
+ "must_not": [],
+ "should": []
+ }
+ }
+ },
+ "query": []
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/bool.json b/core/src/test/java/org/elasticsearch/index/query/bool.json
new file mode 100644
index 0000000000..1619fcf48c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/bool.json
@@ -0,0 +1,30 @@
+{
+ bool:{
+ must:[
+ {
+ query_string:{
+ default_field:"content",
+ query:"test1"
+ }
+ },
+ {
+ query_string:{
+ default_field:"content",
+ query:"test4"
+ }
+ }
+ ],
+ must_not:{
+ query_string:{
+ default_field:"content",
+ query:"test2"
+ }
+ },
+ should:{
+ query_string:{
+ default_field:"content",
+ query:"test3"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/boosting-query.json b/core/src/test/java/org/elasticsearch/index/query/boosting-query.json
new file mode 100644
index 0000000000..87b6e6d158
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/boosting-query.json
@@ -0,0 +1,15 @@
+{
+ "boosting":{
+ "positive":{
+ "term":{
+ "field1":"value1"
+ }
+ },
+ "negative":{
+ "term":{
+ "field2":"value2"
+ }
+ },
+ "negative_boost":0.2
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/child-mapping.json b/core/src/test/java/org/elasticsearch/index/query/child-mapping.json
new file mode 100644
index 0000000000..6f3b6e5819
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/child-mapping.json
@@ -0,0 +1,12 @@
+{
+ "child":{
+ "properties":{
+ "field":{
+ "type":"string"
+ }
+ },
+ "_parent" : {
+ "type" : "person"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json b/core/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json
new file mode 100644
index 0000000000..b2728dac09
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/commonTerms-query1.json
@@ -0,0 +1,11 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "cutoff_frequency" : 1,
+ "minimum_should_match" : {
+ "low_freq" : 2
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json b/core/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json
new file mode 100644
index 0000000000..aeb281bb75
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/commonTerms-query2.json
@@ -0,0 +1,11 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "minimum_should_match" : {
+ "high_freq" : "50%",
+ "low_freq" : "5<20%"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json b/core/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json
new file mode 100644
index 0000000000..f276209ffc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/commonTerms-query3.json
@@ -0,0 +1,9 @@
+{
+ "common" : {
+ "dogs" : {
+ "query" : "buck mia tom",
+ "cutoff_frequency" : 1,
+ "minimum_should_match" : 2
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/constantScore-query.json b/core/src/test/java/org/elasticsearch/index/query/constantScore-query.json
new file mode 100644
index 0000000000..bf59bc5b47
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/constantScore-query.json
@@ -0,0 +1,9 @@
+{
+ constant_score:{
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/data.json b/core/src/test/java/org/elasticsearch/index/query/data.json
new file mode 100644
index 0000000000..79f139f84b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/data.json
@@ -0,0 +1,43 @@
+{
+ name:{
+ first:"shay",
+ last:"banon"
+ },
+ address:{
+ first:{
+ location:"first location"
+ },
+ last:{
+ location:"last location"
+ }
+ },
+ age:32,
+ birthDate:"1977-11-15",
+ nerd:true,
+ dogs:["buck", "mia"],
+ complex:[
+ {
+ value1:"value1"
+ },
+ {
+ value2:"value2"
+ }
+ ],
+ complex2:[
+ [
+ {
+ value1:"value1"
+ }
+ ],
+ [
+ {
+ value2:"value2"
+ }
+ ]
+ ],
+ nullValue:null,
+ "location":{
+ "lat":1.1,
+ "lon":1.2
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_filter_format.json b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_format.json
new file mode 100644
index 0000000000..94596788a2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_format.json
@@ -0,0 +1,13 @@
+{
+ "constant_score": {
+ "filter": {
+ "range" : {
+ "born" : {
+ "gte": "01/01/2012",
+ "lt": "2030",
+ "format": "dd/MM/yyyy||yyyy"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_filter_format_invalid.json b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_format_invalid.json
new file mode 100644
index 0000000000..7b5c272442
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_format_invalid.json
@@ -0,0 +1,13 @@
+{
+ "constant_score": {
+ "filter": {
+ "range" : {
+ "born" : {
+ "gte": "01/01/2012",
+ "lt": "2030",
+ "format": "yyyy"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone.json b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone.json
new file mode 100644
index 0000000000..158550afbe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone.json
@@ -0,0 +1,13 @@
+{
+ "constant_score": {
+ "filter": {
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now",
+ "time_zone": "+01:00"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone_numeric_field.json b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone_numeric_field.json
new file mode 100644
index 0000000000..6e0719475b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_filter_timezone_numeric_field.json
@@ -0,0 +1,13 @@
+{
+ "constant_score": {
+ "filter": {
+ "range" : {
+ "age" : {
+ "gte": "0",
+ "lte": "100",
+ "time_zone": "-01:00"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_exclusive.json b/core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_exclusive.json
new file mode 100644
index 0000000000..30fe50a129
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_exclusive.json
@@ -0,0 +1,8 @@
+{
+ "range" : {
+ "born" : {
+ "gt": "2014-11-05||/M",
+ "lt": "2014-12-08||/d"
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_inclusive.json b/core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_inclusive.json
new file mode 100644
index 0000000000..3f3aab0f6c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_query_boundaries_inclusive.json
@@ -0,0 +1,8 @@
+{
+ "range" : {
+ "born" : {
+ "gte": "2014-11-05||/M",
+ "lte": "2014-12-08||/d"
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_query_format.json b/core/src/test/java/org/elasticsearch/index/query/date_range_query_format.json
new file mode 100644
index 0000000000..f679dc9696
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_query_format.json
@@ -0,0 +1,9 @@
+{
+ "range" : {
+ "born" : {
+ "gte": "01/01/2012",
+ "lt": "2030",
+ "format": "dd/MM/yyyy||yyyy"
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_query_format_invalid.json b/core/src/test/java/org/elasticsearch/index/query/date_range_query_format_invalid.json
new file mode 100644
index 0000000000..307e9775e5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_query_format_invalid.json
@@ -0,0 +1,9 @@
+{
+ "range" : {
+ "born" : {
+ "gte": "01/01/2012",
+ "lt": "2030",
+ "format": "yyyy"
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone.json b/core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone.json
new file mode 100644
index 0000000000..0cabb1511a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone.json
@@ -0,0 +1,9 @@
+{
+ "range" : {
+ "born" : {
+ "gte": "2012-01-01",
+ "lte": "now",
+ "time_zone": "+01:00"
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone_numeric_field.json b/core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone_numeric_field.json
new file mode 100644
index 0000000000..b7526a2c29
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/date_range_query_timezone_numeric_field.json
@@ -0,0 +1,9 @@
+{
+ "range" : {
+ "age" : {
+ "gte": "0",
+ "lte": "100",
+ "time_zone": "-01:00"
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/disMax.json b/core/src/test/java/org/elasticsearch/index/query/disMax.json
new file mode 100644
index 0000000000..99da2df025
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/disMax.json
@@ -0,0 +1,18 @@
+{
+ dis_max:{
+ tie_breaker:0.7,
+ boost:1.2,
+ queries:[
+ {
+ term:{
+ "name.first":"first"
+ }
+ },
+ {
+ term:{
+ "name.last":"last"
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/disMax2.json b/core/src/test/java/org/elasticsearch/index/query/disMax2.json
new file mode 100644
index 0000000000..ea92d6498f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/disMax2.json
@@ -0,0 +1,14 @@
+{
+ "dis_max":{
+ "queries":[
+ {
+ "prefix":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json b/core/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json
new file mode 100644
index 0000000000..07f906c87a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/faulty-function-score-query.json
@@ -0,0 +1,15 @@
+{
+ "function_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "functions": {
+ {
+ "boost_factor" : 3
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/field3.json b/core/src/test/java/org/elasticsearch/index/query/field3.json
new file mode 100644
index 0000000000..61e349f7b7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/field3.json
@@ -0,0 +1,9 @@
+{
+ field:{
+ age:{
+ query:34,
+ boost:2.0,
+ enable_position_increments:false
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/filtered-query.json b/core/src/test/java/org/elasticsearch/index/query/filtered-query.json
new file mode 100644
index 0000000000..8eea99a135
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/filtered-query.json
@@ -0,0 +1,14 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/filtered-query2.json b/core/src/test/java/org/elasticsearch/index/query/filtered-query2.json
new file mode 100644
index 0000000000..b23faf4e74
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/filtered-query2.json
@@ -0,0 +1,14 @@
+{
+ filtered:{
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ },
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/filtered-query3.json b/core/src/test/java/org/elasticsearch/index/query/filtered-query3.json
new file mode 100644
index 0000000000..4a9db4909a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/filtered-query3.json
@@ -0,0 +1,19 @@
+{
+ filtered:{
+ filter:{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+ },
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/filtered-query4.json b/core/src/test/java/org/elasticsearch/index/query/filtered-query4.json
new file mode 100644
index 0000000000..8c10013f1e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/filtered-query4.json
@@ -0,0 +1,17 @@
+{
+ filtered:{
+ query:{
+ wildcard:{
+ "name.first":{
+ wildcard:"sh*",
+ boost:1.1
+ }
+ }
+ },
+ filter:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/fquery-filter.json b/core/src/test/java/org/elasticsearch/index/query/fquery-filter.json
new file mode 100644
index 0000000000..6015334774
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/fquery-filter.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "fquery":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/fquery-with-empty-bool-query.json b/core/src/test/java/org/elasticsearch/index/query/fquery-with-empty-bool-query.json
new file mode 100644
index 0000000000..6a6a48c9ed
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/fquery-with-empty-bool-query.json
@@ -0,0 +1,18 @@
+{
+ "fquery": {
+ "query": {
+ "filtered": {
+ "query": {
+ "term": {
+ "text": "apache"
+ }
+ },
+ "filter": {
+ "term": {
+ "text": "apache"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json b/core/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json
new file mode 100644
index 0000000000..e78c54973a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/function-filter-score-query.json
@@ -0,0 +1,30 @@
+
+
+{
+ "function_score":{
+ "query":{
+ "term":{
+ "name.last":"banon"
+ }
+ },
+ "functions": [
+ {
+ "boost_factor": 3,
+ "filter": {
+ term:{
+ "name.last":"banon"
+ }
+ }
+ },
+ {
+ "boost_factor": 3
+ },
+ {
+ "boost_factor": 3
+ }
+ ],
+ "boost" : 3,
+ "score_mode" : "avg",
+ "max_boost" : 10
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/function-score-query-causing-NPE.json b/core/src/test/java/org/elasticsearch/index/query/function-score-query-causing-NPE.json
new file mode 100644
index 0000000000..283682bd90
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/function-score-query-causing-NPE.json
@@ -0,0 +1,9 @@
+{
+ "function_score": {
+ "script_score": {
+ "script": "_index['text']['foo'].tf()"
+ },
+ "weight": 2
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json b/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json
new file mode 100644
index 0000000000..3e3d30ffdc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields.json
@@ -0,0 +1,10 @@
+{
+ "fuzzy":{
+ "name.first":{
+ "value":"sh",
+ "fuzziness":0.1,
+ "prefix_length":1,
+ "boost":2.0
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json b/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json
new file mode 100644
index 0000000000..095ecc6341
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/fuzzy-with-fields2.json
@@ -0,0 +1,9 @@
+{
+ "fuzzy":{
+ "age":{
+ "value":12,
+ "fuzziness":5,
+ "boost":2.0
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/fuzzy.json b/core/src/test/java/org/elasticsearch/index/query/fuzzy.json
new file mode 100644
index 0000000000..27d8deebe5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/fuzzy.json
@@ -0,0 +1,5 @@
+{
+ "fuzzy":{
+ "name.first":"sh"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/geoShape-filter.json b/core/src/test/java/org/elasticsearch/index/query/geoShape-filter.json
new file mode 100644
index 0000000000..a4392ae346
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geoShape-filter.json
@@ -0,0 +1,21 @@
+{
+ "filtered" : {
+ "query" : {
+ "match_all" : {}
+ },
+ "filter" : {
+ "geo_shape" : {
+ "country" : {
+ "shape" : {
+ "type" : "Envelope",
+ "coordinates" : [
+ [-45, 45],
+ [45, -45]
+ ]
+ },
+ "relation" : "intersects"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/geoShape-query.json b/core/src/test/java/org/elasticsearch/index/query/geoShape-query.json
new file mode 100644
index 0000000000..e0af8278a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geoShape-query.json
@@ -0,0 +1,14 @@
+{
+ "geo_shape" : {
+ "country" : {
+ "shape" : {
+ "type" : "Envelope",
+ "coordinates" : [
+ [-45, 45],
+ [45, -45]
+ ]
+ },
+ "relation" : "intersects"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json
new file mode 100644
index 0000000000..6db6d5aed0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox-named.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":[-70, 40],
+ "bottom_right":[-80, 30]
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json
new file mode 100644
index 0000000000..8d04915a8a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox1.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":[-70, 40],
+ "bottom_right":[-80, 30]
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json
new file mode 100644
index 0000000000..6321654442
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox2.json
@@ -0,0 +1,21 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":{
+ "lat":40,
+ "lon":-70
+ },
+ "bottom_right":{
+ "lat":30,
+ "lon":-80
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json
new file mode 100644
index 0000000000..08999604c7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox3.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":"40, -70",
+ "bottom_right":"30, -80"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json
new file mode 100644
index 0000000000..170a02d355
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox4.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_left":"drn5x1g8cu2y",
+ "bottom_right":"30, -80"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json
new file mode 100644
index 0000000000..347a463f0a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox5.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "top_right":"40, -80",
+ "bottom_left":"30, -70"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json
new file mode 100644
index 0000000000..96ccbd0268
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_boundingbox6.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_bounding_box":{
+ "location":{
+ "right": -80,
+ "top": 40,
+ "left": -70,
+ "bottom": 30
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance-named.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance-named.json
new file mode 100644
index 0000000000..a3e0be9549
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance-named.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance1.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance1.json
new file mode 100644
index 0000000000..cf3b0ab398
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance1.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance10.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance10.json
new file mode 100644
index 0000000000..067b39e341
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance10.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":19.312128,
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance11.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance11.json
new file mode 100644
index 0000000000..008d5b5b08
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance11.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance12.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance12.json
new file mode 100644
index 0000000000..8769223d90
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance12.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance2.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance2.json
new file mode 100644
index 0000000000..32838672ca
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance2.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":[-70, 40]
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance3.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance3.json
new file mode 100644
index 0000000000..193f234fd0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance3.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":"40, -70"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance4.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance4.json
new file mode 100644
index 0000000000..56a74095ff
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance4.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12mi",
+ "location":"drn5x1g8cu2y"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance5.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance5.json
new file mode 100644
index 0000000000..bea9a3df8a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance5.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":12,
+ "unit":"mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance6.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance6.json
new file mode 100644
index 0000000000..4afa128b02
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance6.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"12",
+ "unit":"mi",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance7.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance7.json
new file mode 100644
index 0000000000..7fcf8bd074
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance7.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance8.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance8.json
new file mode 100644
index 0000000000..3bafd163ad
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance8.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":19.312128,
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_distance9.json b/core/src/test/java/org/elasticsearch/index/query/geo_distance9.json
new file mode 100644
index 0000000000..e6c8f12060
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_distance9.json
@@ -0,0 +1,17 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_distance":{
+ "distance":"19.312128",
+ "unit":"km",
+ "location":{
+ "lat":40,
+ "lon":-70
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json
new file mode 100644
index 0000000000..91256c14d0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon-named.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ },
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon1.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon1.json
new file mode 100644
index 0000000000..99ac329b9e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon1.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon2.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon2.json
new file mode 100644
index 0000000000..588b22f882
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon2.json
@@ -0,0 +1,27 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ {
+ "lat":40,
+ "lon":-70
+ },
+ {
+ "lat":30,
+ "lon":-80
+ },
+ {
+ "lat":20,
+ "lon":-90
+ }
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon3.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon3.json
new file mode 100644
index 0000000000..d6d905b865
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon3.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ "40, -70",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon4.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon4.json
new file mode 100644
index 0000000000..ae9608d21c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon4.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "match_all":{}
+ },
+ "filter":{
+ "geo_polygon":{
+ "location":{
+ "points":[
+ "drn5x1g8cu2y",
+ "30, -80",
+ "20, -90"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_1.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_1.json
new file mode 100644
index 0000000000..e079d64eb8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_1.json
@@ -0,0 +1,20 @@
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_polygon": {
+ "location": {
+ "points": {
+ "points": [
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_2.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_2.json
new file mode 100644
index 0000000000..0955c26072
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_2.json
@@ -0,0 +1,22 @@
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_polygon": {
+ "location": {
+ "points": [
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ],
+ "something_else": {
+
+ }
+
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_3.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_3.json
new file mode 100644
index 0000000000..0ac2a7bbb3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_3.json
@@ -0,0 +1,12 @@
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_polygon": {
+ "location": ["WRONG"]
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_4.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_4.json
new file mode 100644
index 0000000000..51f6ad0037
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_4.json
@@ -0,0 +1,19 @@
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_polygon": {
+ "location": {
+ "points": [
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ },
+ "bla": true
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_5.json b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_5.json
new file mode 100644
index 0000000000..6f058f551c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/geo_polygon_exception_5.json
@@ -0,0 +1,19 @@
+{
+ "filtered": {
+ "query": {
+ "match_all": {}
+ },
+ "filter": {
+ "geo_polygon": {
+ "location": {
+ "points": [
+ [-70, 40],
+ [-80, 30],
+ [-90, 20]
+ ]
+ },
+ "bla": ["array"]
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java b/core/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java
new file mode 100644
index 0000000000..a26e233caf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/guice/IndexQueryParserModuleTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserModuleTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testCustomInjection() {
+ Settings settings = settingsBuilder()
+ .put("index.queryparser.query.my.type", MyJsonQueryParser.class)
+ .put("index.queryparser.query.my.param1", "value1")
+ .put("index.cache.filter.type", "none")
+ .put("name", "IndexQueryParserModuleTests")
+ .build();
+
+ IndexQueryParserService indexQueryParserService = createIndex("test", settings).queryParserService();
+
+ MyJsonQueryParser myJsonQueryParser = (MyJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+ assertThat(myJsonQueryParser.settings().get("param1"), equalTo("value1"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java b/core/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java
new file mode 100644
index 0000000000..582ef1313e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/guice/MyJsonQueryParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.guice;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class MyJsonQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public MyJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json b/core/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json
new file mode 100644
index 0000000000..4b055cb246
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/has-child-in-and-filter-cached.json
@@ -0,0 +1,19 @@
+{
+ "filtered":{
+ "filter":{
+ "and" : {
+ "filters" : [
+ {
+ "has_child" : {
+ "type" : "child",
+ "query" : {
+ "match_all" : {}
+ }
+ }
+ }
+ ],
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/has-child.json b/core/src/test/java/org/elasticsearch/index/query/has-child.json
new file mode 100644
index 0000000000..c87ac17362
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/has-child.json
@@ -0,0 +1,13 @@
+{
+ "filtered":{
+ "filter":{
+ "has_child" : {
+ "type" : "child",
+ "query" : {
+ "match_all" : {}
+ },
+ "_cache" : true
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/mapping.json b/core/src/test/java/org/elasticsearch/index/query/mapping.json
new file mode 100644
index 0000000000..3939249207
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/mapping.json
@@ -0,0 +1,15 @@
+{
+ "person":{
+ "properties":{
+ "location":{
+ "type":"geo_point"
+ },
+ "country" : {
+ "type" : "geo_shape"
+ },
+ "born":{
+ "type":"date"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json b/core/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json
new file mode 100644
index 0000000000..47d122715a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/match-query-bad-type.json
@@ -0,0 +1,8 @@
+{
+ "match" : {
+ "message" : {
+ "query" : "this is a test",
+ "type" : "doesNotExist"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/match-with-fuzzy-transpositions.json b/core/src/test/java/org/elasticsearch/index/query/match-with-fuzzy-transpositions.json
new file mode 100644
index 0000000000..5f4fe8bcac
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/match-with-fuzzy-transpositions.json
@@ -0,0 +1 @@
+{ "match": { "body": { "query": "fuzzy", "fuzziness": 1, "fuzzy_transpositions": true }} }
diff --git a/core/src/test/java/org/elasticsearch/index/query/match-without-fuzzy-transpositions.json b/core/src/test/java/org/elasticsearch/index/query/match-without-fuzzy-transpositions.json
new file mode 100644
index 0000000000..06c77aafb3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/match-without-fuzzy-transpositions.json
@@ -0,0 +1 @@
+{ "match": { "body": { "query": "fuzzy", "fuzziness": 1, "fuzzy_transpositions": false }} }
diff --git a/core/src/test/java/org/elasticsearch/index/query/matchAll.json b/core/src/test/java/org/elasticsearch/index/query/matchAll.json
new file mode 100644
index 0000000000..3325646950
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/matchAll.json
@@ -0,0 +1,5 @@
+{
+ match_all:{
+ boost:1.2
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/match_all_empty1.json b/core/src/test/java/org/elasticsearch/index/query/match_all_empty1.json
new file mode 100644
index 0000000000..6dd141fe86
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/match_all_empty1.json
@@ -0,0 +1,3 @@
+{
+ "match_all": {}
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/match_all_empty2.json b/core/src/test/java/org/elasticsearch/index/query/match_all_empty2.json
new file mode 100644
index 0000000000..a0549df713
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/match_all_empty2.json
@@ -0,0 +1,3 @@
+{
+ "match_all": []
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/mlt-items.json b/core/src/test/java/org/elasticsearch/index/query/mlt-items.json
new file mode 100644
index 0000000000..d7839ac707
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/mlt-items.json
@@ -0,0 +1,22 @@
+{
+ "more_like_this" : {
+ "fields" : ["name.first", "name.last"],
+ "like_text": "Apache Lucene",
+ "like" : [
+ {
+ "_index" : "test",
+ "_type" : "person",
+ "_id" : "1"
+ },
+ {
+ "_index" : "test",
+ "_type" : "person",
+ "_id" : "2"
+ }
+ ],
+ "ids" : ["3", "4"],
+ "include" : true,
+ "min_term_freq" : 1,
+ "max_query_terms" : 12
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/mlt.json b/core/src/test/java/org/elasticsearch/index/query/mlt.json
new file mode 100644
index 0000000000..d3d98bee5a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/mlt.json
@@ -0,0 +1,8 @@
+{
+ "more_like_this" : {
+ "fields" : ["name.first", "name.last"],
+ "like_text" : "something",
+ "min_term_freq" : 1,
+ "max_query_terms" : 12
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json b/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json
new file mode 100644
index 0000000000..9c3b751082
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-bad-type.json
@@ -0,0 +1,7 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": [ "myField", "otherField" ],
+ "type":"doesNotExist"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json b/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json
new file mode 100644
index 0000000000..d29211d69d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-fields-as-string.json
@@ -0,0 +1,6 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": "myField"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json b/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json
new file mode 100644
index 0000000000..904ba0e6cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/multiMatch-query-simple.json
@@ -0,0 +1,6 @@
+{
+ "multi_match": {
+ "query": "foo bar",
+ "fields": [ "myField", "otherField" ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/not-filter.json b/core/src/test/java/org/elasticsearch/index/query/not-filter.json
new file mode 100644
index 0000000000..42c48d806e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/not-filter.json
@@ -0,0 +1,18 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "not":{
+ "filter":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/not-filter2.json b/core/src/test/java/org/elasticsearch/index/query/not-filter2.json
new file mode 100644
index 0000000000..6defaff7cd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/not-filter2.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "not":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/not-filter3.json b/core/src/test/java/org/elasticsearch/index/query/not-filter3.json
new file mode 100644
index 0000000000..ab613357cb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/not-filter3.json
@@ -0,0 +1,16 @@
+{
+ "filtered":{
+ "filter":{
+ "not":{
+ "term":{
+ "name.first":"shay1"
+ }
+ }
+ },
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/or-filter.json b/core/src/test/java/org/elasticsearch/index/query/or-filter.json
new file mode 100644
index 0000000000..b1e73face7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/or-filter.json
@@ -0,0 +1,25 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "or":{
+ "filters":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/or-filter2.json b/core/src/test/java/org/elasticsearch/index/query/or-filter2.json
new file mode 100644
index 0000000000..2c15e9a082
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/or-filter2.json
@@ -0,0 +1,23 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "or":[
+ {
+ "term":{
+ "name.first":"shay1"
+ }
+ },
+ {
+ "term":{
+ "name.first":"shay4"
+ }
+ }
+ ]
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java b/core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java
new file mode 100644
index 0000000000..4e4420ca4a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPlugin2Tests.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserPlugin2Tests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() throws InterruptedException {
+ Settings settings = Settings.builder()
+ .put("name", "testCustomInjection")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("path.home", createTempDir()).build();
+
+ IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings);
+ queryParserModule.addQueryParser("my", PluginJsonQueryParser.class);
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new EnvironmentModule(new Environment(settings)),
+ new SettingsModule(settings),
+ new ThreadPoolModule(new ThreadPool(settings)),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new SimilarityModule(settings),
+ queryParserModule,
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ PluginJsonQueryParser myJsonQueryParser = (PluginJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+
+ terminate(injector.getInstance(ThreadPool.class));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java b/core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java
new file mode 100644
index 0000000000..57a0d99741
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/plugin/IndexQueryParserPluginTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.inject.util.Providers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexNameModule;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.index.cache.IndexCacheModule;
+import org.elasticsearch.index.query.IndexQueryParserModule;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.index.settings.IndexSettingsModule;
+import org.elasticsearch.index.similarity.SimilarityModule;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.indices.query.IndicesQueriesModule;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class IndexQueryParserPluginTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomInjection() throws InterruptedException {
+ Settings settings = Settings.builder()
+ .put("name", "testCustomInjection")
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put("path.home", createTempDir()).build();
+
+ IndexQueryParserModule queryParserModule = new IndexQueryParserModule(settings);
+ queryParserModule.addProcessor(new IndexQueryParserModule.QueryParsersProcessor() {
+ @Override
+ public void processXContentQueryParsers(XContentQueryParsersBindings bindings) {
+ bindings.processXContentQueryParser("my", PluginJsonQueryParser.class);
+ }
+ });
+
+ Index index = new Index("test");
+ Injector injector = new ModulesBuilder().add(
+ new EnvironmentModule(new Environment(settings)),
+ new SettingsModule(settings),
+ new ThreadPoolModule(new ThreadPool(settings)),
+ new IndicesQueriesModule(),
+ new ScriptModule(settings),
+ new IndexSettingsModule(index, settings),
+ new IndexCacheModule(settings),
+ new AnalysisModule(settings),
+ new SimilarityModule(settings),
+ queryParserModule,
+ new IndexNameModule(index),
+ new FunctionScoreModule(),
+ new AbstractModule() {
+ @Override
+ protected void configure() {
+ bind(ClusterService.class).toProvider(Providers.of((ClusterService) null));
+ bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
+ }
+ }
+ ).createInjector();
+
+ IndexQueryParserService indexQueryParserService = injector.getInstance(IndexQueryParserService.class);
+
+ PluginJsonQueryParser myJsonQueryParser = (PluginJsonQueryParser) indexQueryParserService.queryParser("my");
+
+ assertThat(myJsonQueryParser.names()[0], equalTo("my"));
+
+ terminate(injector.getInstance(ThreadPool.class));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java b/core/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java
new file mode 100644
index 0000000000..d475cdfefa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/plugin/PluginJsonQueryParser.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query.plugin;
+
+import org.apache.lucene.search.Query;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.assistedinject.Assisted;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.AbstractIndexComponent;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.settings.IndexSettings;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class PluginJsonQueryParser extends AbstractIndexComponent implements QueryParser {
+
+ private final String name;
+
+ private final Settings settings;
+
+ @Inject
+ public PluginJsonQueryParser(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
+ super(index, indexSettings);
+ this.name = name;
+ this.settings = settings;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[]{this.name};
+ }
+
+ @Override
+ public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException {
+ return null;
+ }
+
+ public Settings settings() {
+ return settings;
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/prefix-boost.json b/core/src/test/java/org/elasticsearch/index/query/prefix-boost.json
new file mode 100644
index 0000000000..4da623ac49
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/prefix-boost.json
@@ -0,0 +1,8 @@
+{
+ "prefix":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json b/core/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json
new file mode 100644
index 0000000000..de0170118e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/prefix-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "prefix":{
+ "name.first":"sh",
+ "_name":"test"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/prefix-filter.json b/core/src/test/java/org/elasticsearch/index/query/prefix-filter.json
new file mode 100644
index 0000000000..1f2e42e8e9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/prefix-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "prefix":{
+ "name.first":"sh"
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json b/core/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json
new file mode 100644
index 0000000000..83e56cb4d5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/prefix-with-boost.json
@@ -0,0 +1,8 @@
+{
+ prefix:{
+ "name.first":{
+ prefix:"sh",
+ boost:2.0
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/prefix.json b/core/src/test/java/org/elasticsearch/index/query/prefix.json
new file mode 100644
index 0000000000..49f5261d1d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/prefix.json
@@ -0,0 +1,5 @@
+{
+ prefix:{
+ "name.first":"sh"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-fields-match.json b/core/src/test/java/org/elasticsearch/index/query/query-fields-match.json
new file mode 100644
index 0000000000..c15cdf3c6d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-fields-match.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["name.*"],
+ use_dis_max:false,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-fields1.json b/core/src/test/java/org/elasticsearch/index/query/query-fields1.json
new file mode 100644
index 0000000000..84abcaafc0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-fields1.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content", "name"],
+ use_dis_max:false,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-fields2.json b/core/src/test/java/org/elasticsearch/index/query/query-fields2.json
new file mode 100644
index 0000000000..ab39c8773d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-fields2.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content", "name"],
+ use_dis_max:true,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-fields3.json b/core/src/test/java/org/elasticsearch/index/query/query-fields3.json
new file mode 100644
index 0000000000..8114c1b3b8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-fields3.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ fields:["content^2.2", "name"],
+ use_dis_max:true,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-filter.json b/core/src/test/java/org/elasticsearch/index/query/query-filter.json
new file mode 100644
index 0000000000..dee136d24c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-filter.json
@@ -0,0 +1,16 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ query:{
+ term:{
+ "name.last":"banon"
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-regexp-max-determinized-states.json b/core/src/test/java/org/elasticsearch/index/query/query-regexp-max-determinized-states.json
new file mode 100644
index 0000000000..023b90ec6f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-regexp-max-determinized-states.json
@@ -0,0 +1,7 @@
+{
+ query_string: {
+ default_field: "content",
+ query:"/foo*bar/",
+ max_determinized_states: 5000
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-regexp-too-many-determinized-states.json b/core/src/test/java/org/elasticsearch/index/query/query-regexp-too-many-determinized-states.json
new file mode 100644
index 0000000000..0d2d41a7e3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-regexp-too-many-determinized-states.json
@@ -0,0 +1,6 @@
+{
+ query_string: {
+ default_field: "content",
+ query: "/[ac]*a[ac]{50,200}/"
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-timezone-incorrect.json b/core/src/test/java/org/elasticsearch/index/query/query-timezone-incorrect.json
new file mode 100644
index 0000000000..3bffb0f4a4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-timezone-incorrect.json
@@ -0,0 +1,6 @@
+{
+ "query_string":{
+ "time_zone":"This timezone does not exist",
+ "query":"date:[2012 TO 2014]"
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/query-timezone.json b/core/src/test/java/org/elasticsearch/index/query/query-timezone.json
new file mode 100644
index 0000000000..e2fcc0e226
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query-timezone.json
@@ -0,0 +1,6 @@
+{
+ "query_string":{
+ "time_zone":"Europe/Paris",
+ "query":"date:[2012 TO 2014]"
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/query.json b/core/src/test/java/org/elasticsearch/index/query/query.json
new file mode 100644
index 0000000000..f07a0d8a59
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query.json
@@ -0,0 +1,7 @@
+{
+ query_string:{
+ default_field:"content",
+ phrase_slop:1,
+ query:"test"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/query2.json b/core/src/test/java/org/elasticsearch/index/query/query2.json
new file mode 100644
index 0000000000..410e05cd80
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/query2.json
@@ -0,0 +1,6 @@
+{
+ query_string:{
+ default_field:"age",
+ query:"12~0.2"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/range-filter-named.json b/core/src/test/java/org/elasticsearch/index/query/range-filter-named.json
new file mode 100644
index 0000000000..1b50177a52
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/range-filter-named.json
@@ -0,0 +1,20 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "range":{
+ "age":{
+ "from":"23",
+ "to":"54",
+ "include_lower":true,
+ "include_upper":false
+ },
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/range-filter.json b/core/src/test/java/org/elasticsearch/index/query/range-filter.json
new file mode 100644
index 0000000000..3842e0be34
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/range-filter.json
@@ -0,0 +1,19 @@
+{
+ filtered:{
+ query:{
+ term:{
+ "name.first":"shay"
+ }
+ },
+ filter:{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/range.json b/core/src/test/java/org/elasticsearch/index/query/range.json
new file mode 100644
index 0000000000..cc2363fc22
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/range.json
@@ -0,0 +1,10 @@
+{
+ range:{
+ age:{
+ from:"23",
+ to:"54",
+ include_lower:true,
+ include_upper:false
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/range2.json b/core/src/test/java/org/elasticsearch/index/query/range2.json
new file mode 100644
index 0000000000..c116b3c0a3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/range2.json
@@ -0,0 +1,8 @@
+{
+ range:{
+ age:{
+ gte:"23",
+ lt:"54"
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-boost.json b/core/src/test/java/org/elasticsearch/index/query/regexp-boost.json
new file mode 100644
index 0000000000..ed8699b39c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-boost.json
@@ -0,0 +1,8 @@
+{
+ "regexp":{
+ "name.first":{
+ "value":"sh",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json
new file mode 100644
index 0000000000..112f8fb3ab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags-named-cached.json
@@ -0,0 +1,20 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test",
+ "_cache" : true,
+ "_cache_key" : "key"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json
new file mode 100644
index 0000000000..a5d7307e56
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-flags.json
@@ -0,0 +1,18 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : {
+ "value" : "s.*y",
+ "flags" : "INTERSECTION|COMPLEMENT|EMPTY"
+ },
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-filter-max-determinized-states.json b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-max-determinized-states.json
new file mode 100644
index 0000000000..2672ac65ca
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-max-determinized-states.json
@@ -0,0 +1,17 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp": {
+ "name.first": {
+ "value": "s.*y",
+ "max_determinized_states": 6000
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json
new file mode 100644
index 0000000000..ac96b3ee01
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y",
+ "_name" : "test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-filter.json b/core/src/test/java/org/elasticsearch/index/query/regexp-filter.json
new file mode 100644
index 0000000000..d7c7bfdb39
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered": {
+ "query": {
+ "term": {
+ "name.first": "shay"
+ }
+ },
+ "filter": {
+ "regexp":{
+ "name.first" : "s.*y"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp-max-determinized-states.json b/core/src/test/java/org/elasticsearch/index/query/regexp-max-determinized-states.json
new file mode 100644
index 0000000000..df2f5cc603
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp-max-determinized-states.json
@@ -0,0 +1,8 @@
+{
+ "regexp": {
+ "name.first": {
+ "value": "s.*y",
+ "max_determinized_states": 5000
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/regexp.json b/core/src/test/java/org/elasticsearch/index/query/regexp.json
new file mode 100644
index 0000000000..6c3d69469c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/regexp.json
@@ -0,0 +1,5 @@
+{
+ "regexp":{
+ "name.first": "s.*y"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/simple-query-string.json b/core/src/test/java/org/elasticsearch/index/query/simple-query-string.json
new file mode 100644
index 0000000000..9208e8876f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/simple-query-string.json
@@ -0,0 +1,8 @@
+{
+ "simple_query_string": {
+ "query": "foo bar",
+ "analyzer": "keyword",
+ "fields": ["body^5","_all"],
+ "default_operator": "and"
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json
new file mode 100644
index 0000000000..d9ca05b3f3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-range.json
@@ -0,0 +1,13 @@
+{
+ "span_multi":{
+ "match":{
+ "fuzzy":{
+ "age":{
+ "value":12,
+ "fuzziness":5,
+ "boost":2.0
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json
new file mode 100644
index 0000000000..edb58e35ec
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-fuzzy-term.json
@@ -0,0 +1,12 @@
+{
+ "span_multi":{
+ "match":{
+ "fuzzy" : {
+ "user" : {
+ "value" : "ki",
+ "boost" : 1.08
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json
new file mode 100644
index 0000000000..62918d6ad7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-prefix.json
@@ -0,0 +1,7 @@
+{
+ "span_multi":{
+ "match":{
+ "prefix" : { "user" : { "value" : "ki", "boost" : 1.08 } }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json
new file mode 100644
index 0000000000..d9db8a445c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-numeric.json
@@ -0,0 +1,16 @@
+{
+ "span_multi":{
+ "match":{
+ "range" : {
+ "age" : {
+ "from" : 10,
+ "to" : 20,
+ "include_lower" : true,
+ "include_upper": false,
+ "boost" : 2.0
+ }
+ }
+ }
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json
new file mode 100644
index 0000000000..8c4da31ebf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-range-term.json
@@ -0,0 +1,16 @@
+{
+ "span_multi":{
+ "match":{
+ "range" : {
+ "user" : {
+ "from" : "alice",
+ "to" : "bob",
+ "include_lower" : true,
+ "include_upper": false,
+ "boost" : 2.0
+ }
+ }
+ }
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json
new file mode 100644
index 0000000000..a2eaeb7209
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/span-multi-term-wildcard.json
@@ -0,0 +1,7 @@
+{
+ "span_multi":{
+ "match":{
+ "wildcard" : { "user" : {"value": "ki*y" , "boost" : 1.08}}
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanContaining.json b/core/src/test/java/org/elasticsearch/index/query/spanContaining.json
new file mode 100644
index 0000000000..13f91d88b4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanContaining.json
@@ -0,0 +1,14 @@
+{
+ span_containing:{
+ big:{
+ span_term:{
+ age:34
+ }
+ },
+ little:{
+ span_term:{
+ age:35
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json b/core/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json
new file mode 100644
index 0000000000..9849c105e9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanFieldMaskingTerm.json
@@ -0,0 +1,29 @@
+{
+ span_near:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ field_masking_span:{
+ query:{
+ span_term:{
+ age_1 : 36
+ }
+ },
+ field:"age"
+ }
+ }
+ ],
+ slop:12,
+ in_order:false,
+ collect_payloads:false
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanFirst.json b/core/src/test/java/org/elasticsearch/index/query/spanFirst.json
new file mode 100644
index 0000000000..9972c76913
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanFirst.json
@@ -0,0 +1,10 @@
+{
+ span_first:{
+ match:{
+ span_term:{
+ age:34
+ }
+ },
+ end:12
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanNear.json b/core/src/test/java/org/elasticsearch/index/query/spanNear.json
new file mode 100644
index 0000000000..ce17063978
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanNear.json
@@ -0,0 +1,24 @@
+{
+ span_near:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ span_term:{
+ age:36
+ }
+ }
+ ],
+ slop:12,
+ in_order:false,
+ collect_payloads:false
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanNot.json b/core/src/test/java/org/elasticsearch/index/query/spanNot.json
new file mode 100644
index 0000000000..c90de330df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanNot.json
@@ -0,0 +1,14 @@
+{
+ span_not:{
+ include:{
+ span_term:{
+ age:34
+ }
+ },
+ exclude:{
+ span_term:{
+ age:35
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanOr.json b/core/src/test/java/org/elasticsearch/index/query/spanOr.json
new file mode 100644
index 0000000000..06c52628e5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanOr.json
@@ -0,0 +1,21 @@
+{
+ span_or:{
+ clauses:[
+ {
+ span_term:{
+ age:34
+ }
+ },
+ {
+ span_term:{
+ age:35
+ }
+ },
+ {
+ span_term:{
+ age:36
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanOr2.json b/core/src/test/java/org/elasticsearch/index/query/spanOr2.json
new file mode 100644
index 0000000000..b64ce1cae3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanOr2.json
@@ -0,0 +1,30 @@
+{
+ "span_or":{
+ "clauses":[
+ {
+ "span_term":{
+ "age":{
+ "value":34,
+ "boost":1.0
+ }
+ }
+ },
+ {
+ "span_term":{
+ "age":{
+ "value":35,
+ "boost":1.0
+ }
+ }
+ },
+ {
+ "span_term":{
+ "age":{
+ "value":36,
+ "boost":1.0
+ }
+ }
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanTerm.json b/core/src/test/java/org/elasticsearch/index/query/spanTerm.json
new file mode 100644
index 0000000000..0186593ff5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanTerm.json
@@ -0,0 +1,5 @@
+{
+ span_term:{
+ age:34
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/spanWithin.json b/core/src/test/java/org/elasticsearch/index/query/spanWithin.json
new file mode 100644
index 0000000000..7cf767cdf1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/spanWithin.json
@@ -0,0 +1,14 @@
+{
+ span_within:{
+ big:{
+ span_term:{
+ age:34
+ }
+ },
+ little:{
+ span_term:{
+ age:35
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/starColonStar.json b/core/src/test/java/org/elasticsearch/index/query/starColonStar.json
new file mode 100644
index 0000000000..c769ca0938
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/starColonStar.json
@@ -0,0 +1,5 @@
+{
+ "query_string": {
+ "query": "*:*"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/term-array-invalid.json b/core/src/test/java/org/elasticsearch/index/query/term-array-invalid.json
new file mode 100644
index 0000000000..a198bc2daf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/term-array-invalid.json
@@ -0,0 +1,5 @@
+{
+ "term": {
+ "age": [34, 35]
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/query/term-filter-named.json b/core/src/test/java/org/elasticsearch/index/query/term-filter-named.json
new file mode 100644
index 0000000000..c23b7b382e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/term-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "term":{
+ "name.last":"banon",
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/term-filter.json b/core/src/test/java/org/elasticsearch/index/query/term-filter.json
new file mode 100644
index 0000000000..11d2bfdd8d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/term-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "term":{
+ "name.last":"banon"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/term-with-boost.json b/core/src/test/java/org/elasticsearch/index/query/term-with-boost.json
new file mode 100644
index 0000000000..5f33cd55ea
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/term-with-boost.json
@@ -0,0 +1,8 @@
+{
+ term:{
+ age:{
+ value:34,
+ boost:2.0
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/term.json b/core/src/test/java/org/elasticsearch/index/query/term.json
new file mode 100644
index 0000000000..378cf42f04
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/term.json
@@ -0,0 +1,5 @@
+{
+ term:{
+ age:34
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/terms-filter-named.json b/core/src/test/java/org/elasticsearch/index/query/terms-filter-named.json
new file mode 100644
index 0000000000..2cb8c7aab1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/terms-filter-named.json
@@ -0,0 +1,15 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "terms":{
+ "name.last":["banon", "kimchy"],
+ "_name":"test"
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/terms-filter.json b/core/src/test/java/org/elasticsearch/index/query/terms-filter.json
new file mode 100644
index 0000000000..04a8d26f41
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/terms-filter.json
@@ -0,0 +1,14 @@
+{
+ "filtered":{
+ "query":{
+ "term":{
+ "name.first":"shay"
+ }
+ },
+ "filter":{
+ "terms":{
+ "name.last":["banon", "kimchy"]
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/terms-query.json b/core/src/test/java/org/elasticsearch/index/query/terms-query.json
new file mode 100644
index 0000000000..a3e0d084ad
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/terms-query.json
@@ -0,0 +1,5 @@
+{
+ "terms":{
+ "name.first":["shay", "test"]
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/wildcard-boost.json b/core/src/test/java/org/elasticsearch/index/query/wildcard-boost.json
new file mode 100644
index 0000000000..53c8d82a8d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/wildcard-boost.json
@@ -0,0 +1,8 @@
+{
+ "wildcard":{
+ "name.first":{
+ "value":"sh*",
+ "boost":1.2
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/query/wildcard.json b/core/src/test/java/org/elasticsearch/index/query/wildcard.json
new file mode 100644
index 0000000000..c8ed85262b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/query/wildcard.json
@@ -0,0 +1,5 @@
+{
+ wildcard:{
+ "name.first":"sh*"
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java b/core/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java
new file mode 100644
index 0000000000..e4f8e6c3d1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.search.*;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.BitSet;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.hamcrest.Description;
+import org.hamcrest.StringDescription;
+import org.junit.Ignore;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+@Ignore
+public abstract class AbstractChildTests extends ElasticsearchSingleNodeTest {
+
+ /**
+ * The name of the field within the child type that stores a score to use in test queries.
+ * <p />
+ * Its type is {@code double}.
+ */
+ protected static String CHILD_SCORE_NAME = "childScore";
+
+ static SearchContext createSearchContext(String indexName, String parentType, String childType) throws IOException {
+ Settings settings = Settings.builder()
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_6_0)
+ .build();
+ IndexService indexService = createIndex(indexName, settings);
+ MapperService mapperService = indexService.mapperService();
+ // Parent/child parsers require that the parent and child type to be presented in mapping
+ // Sometimes we want a nested object field in the parent type that triggers nonNestedDocsFilter to be used
+ mapperService.merge(parentType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(parentType, "nested_field", random().nextBoolean() ? "type=nested" : "type=object").string()), true);
+ mapperService.merge(childType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(childType, "_parent", "type=" + parentType, CHILD_SCORE_NAME, "type=double,doc_values=false").string()), true);
+ return createSearchContext(indexService);
+ }
+
+ static void assertBitSet(BitSet actual, BitSet expected, IndexSearcher searcher) throws IOException {
+ assertBitSet(new BitDocIdSet(actual), new BitDocIdSet(expected), searcher);
+ }
+
+ static void assertBitSet(BitDocIdSet actual, BitDocIdSet expected, IndexSearcher searcher) throws IOException {
+ if (!equals(expected, actual)) {
+ Description description = new StringDescription();
+ description.appendText(reason(actual, expected, searcher));
+ description.appendText("\nExpected: ");
+ description.appendValue(expected);
+ description.appendText("\n got: ");
+ description.appendValue(actual);
+ description.appendText("\n");
+ throw new java.lang.AssertionError(description.toString());
+ }
+ }
+
+ static boolean equals(BitDocIdSet expected, BitDocIdSet actual) {
+ if (actual == null && expected == null) {
+ return true;
+ } else if (actual == null || expected == null) {
+ return false;
+ }
+ BitSet actualBits = actual.bits();
+ BitSet expectedBits = expected.bits();
+ if (actualBits.length() != expectedBits.length()) {
+ return false;
+ }
+ for (int i = 0; i < expectedBits.length(); i++) {
+ if (expectedBits.get(i) != actualBits.get(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ static String reason(BitDocIdSet actual, BitDocIdSet expected, IndexSearcher indexSearcher) throws IOException {
+ StringBuilder builder = new StringBuilder();
+ builder.append("expected cardinality:").append(expected.bits().cardinality()).append('\n');
+ DocIdSetIterator iterator = expected.iterator();
+ for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+ builder.append("Expected doc[").append(doc).append("] with id value ").append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
+ }
+ builder.append("actual cardinality: ").append(actual.bits().cardinality()).append('\n');
+ iterator = actual.iterator();
+ for (int doc = iterator.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = iterator.nextDoc()) {
+ builder.append("Actual doc[").append(doc).append("] with id value ").append(indexSearcher.doc(doc).get(UidFieldMapper.NAME)).append('\n');
+ }
+ return builder.toString();
+ }
+
+ static void assertTopDocs(TopDocs actual, TopDocs expected) {
+ assertThat("actual.totalHits != expected.totalHits", actual.totalHits, equalTo(expected.totalHits));
+ assertThat("actual.getMaxScore() != expected.getMaxScore()", actual.getMaxScore(), equalTo(expected.getMaxScore()));
+ assertThat("actual.scoreDocs.length != expected.scoreDocs.length", actual.scoreDocs.length, equalTo(actual.scoreDocs.length));
+ for (int i = 0; i < actual.scoreDocs.length; i++) {
+ ScoreDoc actualHit = actual.scoreDocs[i];
+ ScoreDoc expectedHit = expected.scoreDocs[i];
+ assertThat("actualHit.doc != expectedHit.doc", actualHit.doc, equalTo(expectedHit.doc));
+ assertThat("actualHit.score != expectedHit.score", actualHit.score, equalTo(expectedHit.score));
+ }
+ }
+
+ static BitDocIdSetFilter wrapWithBitSetFilter(Filter filter) {
+ return SearchContext.current().bitsetFilterCache().getBitDocIdSetFilter(filter);
+ }
+
+ static Query parseQuery(QueryBuilder queryBuilder) throws IOException {
+ QueryParseContext context = new QueryParseContext(new Index("test"), SearchContext.current().queryParserService());
+ XContentParser parser = XContentHelper.createParser(queryBuilder.buildAsBytes());
+ context.reset(parser);
+ return context.parseInnerQuery();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java b/core/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java
new file mode 100644
index 0000000000..afdc08e232
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/BitSetCollector.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.common.lucene.search.NoopCollector;
+
+import java.io.IOException;
+
+class BitSetCollector extends NoopCollector {
+
+ final FixedBitSet result;
+ int docBase;
+
+ BitSetCollector(int topLevelMaxDoc) {
+ this.result = new FixedBitSet(topLevelMaxDoc);
+ }
+
+ @Override
+ public void collect(int doc) throws IOException {
+ result.set(docBase + doc);
+ }
+
+ @Override
+ protected void doSetNextReader(LeafReaderContext context) throws IOException {
+ docBase = context.docBase;
+ }
+
+ FixedBitSet getResult() {
+ return result;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java
new file mode 100644
index 0000000000..53bfa8cb51
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import java.util.Random;
+import java.util.TreeSet;
+
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.notQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ChildrenConstantScoreQueryTests extends AbstractChildTests {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext current = SearchContext.current();
+ SearchContext.removeCurrent();
+ Releasables.close(current);
+ }
+
+ @Test
+ public void testBasicQuerySanities() {
+ Query childQuery = new TermQuery(new Term("field", "value"));
+ ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
+ ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
+ BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent"))));
+ Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter()));
+ QueryUtils.check(query);
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+ Directory directory = newDirectory();
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
+
+ for (int parent = 1; parent <= 5; parent++) {
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", Integer.toString(parent)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+
+ for (int child = 1; child <= 3; child++) {
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(parent * 3 + child)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", Integer.toString(parent)), Field.Store.NO));
+ document.add(new StringField("field1", "value" + child, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+ }
+
+ IndexReader indexReader = DirectoryReader.open(indexWriter.w, false);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(
+ SearchContext.current(), new Engine.Searcher(ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher)
+ ));
+
+ TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3))));
+ BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent"))));
+ int shortCircuitParentDocSet = random().nextInt(5);
+ ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
+ ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
+ ChildrenConstantScoreQuery query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, shortCircuitParentDocSet, null);
+
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ assertThat(actualResult.cardinality(), equalTo(5));
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ final Random r = random();
+ final IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r))
+ .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .setRAMBufferSizeMB(scaledRandomIntBetween(16, 64)); // we might index a lot - don't go crazy here
+ RandomIndexWriter indexWriter = new RandomIndexWriter(r, directory, iwc);
+ int numUniqueChildValues = scaledRandomIntBetween(100, 2000);
+ String[] childValues = new String[numUniqueChildValues];
+ for (int i = 0; i < numUniqueChildValues; i++) {
+ childValues[i] = Integer.toString(i);
+ }
+
+ IntHashSet filteredOrDeletedDocs = new IntHashSet();
+ int childDocId = 0;
+ int numParentDocs = scaledRandomIntBetween(1, numUniqueChildValues);
+ ObjectObjectHashMap<String, NavigableSet<String>> childValueToParentIds = new ObjectObjectHashMap<>();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ if (markParentAsDeleted) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ final int numChildDocs = scaledRandomIntBetween(0, 100);
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ String childValue = childValues[random().nextInt(childValues.length)];
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId++)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField("field1", childValue, Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markChildAsDeleted) {
+ NavigableSet<String> parentIds;
+ if (childValueToParentIds.containsKey(childValue)) {
+ parentIds = childValueToParentIds.get(childValue);
+ } else {
+ childValueToParentIds.put(childValue, parentIds = new TreeSet<>());
+ }
+ if (!markParentAsDeleted && !filterMe) {
+ parentIds.add(parent);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+
+ indexWriter.commit();
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.Searcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ int max = numUniqueChildValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Simulate a parent update
+ if (random().nextBoolean()) {
+ final int numberOfUpdatableParents = numParentDocs - filteredOrDeletedDocs.size();
+ int numberOfUpdates = scaledRandomIntBetween(0, numberOfUpdatableParents);
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int parentId;
+ do {
+ parentId = random().nextInt(numParentDocs);
+ } while (filteredOrDeletedDocs.contains(parentId));
+
+ String parentUid = Uid.createUid("parent", Integer.toString(parentId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.Searcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String childValue = childValues[random().nextInt(numUniqueChildValues)];
+ int shortCircuitParentDocSet = random().nextInt(numParentDocs);
+ QueryBuilder queryBuilder;
+ if (random().nextBoolean()) {
+ queryBuilder = hasChildQuery("child", termQuery("field1", childValue))
+ .setShortCircuitCutoff(shortCircuitParentDocSet);
+ } else {
+ queryBuilder = constantScoreQuery(
+ hasChildQuery("child", termQuery("field1", childValue))
+ .setShortCircuitCutoff(shortCircuitParentDocSet)
+ );
+ }
+ // Using a FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ queryBuilder = filteredQuery(queryBuilder, notQuery(termQuery("filter", "me")));
+ Query query = parseQuery(queryBuilder);
+
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ if (childValueToParentIds.containsKey(childValue)) {
+ LeafReader slowLeafReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableSet<String> parentIds = childValueToParentIds.get(childValue);
+ TermsEnum termsEnum = terms.iterator();
+ PostingsEnum docsEnum = null;
+ for (String id : parentIds) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java
new file mode 100644
index 0000000000..1dd9d5de2c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java
@@ -0,0 +1,422 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopScoreDocCollector;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionBuilder;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Random;
+import java.util.TreeMap;
+
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.notQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.typeQuery;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+public class ChildrenQueryTests extends AbstractChildTests {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext current = SearchContext.current();
+ SearchContext.removeCurrent();
+ Releasables.close(current);
+ }
+
+ @Test
+ public void testBasicQuerySanities() {
+ Query childQuery = new TermQuery(new Term("field", "value"));
+ ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
+ ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
+ ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
+ BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent"))));
+ int minChildren = random().nextInt(10);
+ int maxChildren = scaledRandomIntBetween(minChildren, 10);
+ Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery, scoreType, minChildren,
+ maxChildren, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter()));
+ QueryUtils.check(query);
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ final Random r = random();
+ final IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r))
+ .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .setRAMBufferSizeMB(scaledRandomIntBetween(16, 64)); // we might index a lot - don't go crazy here
+ RandomIndexWriter indexWriter = new RandomIndexWriter(r, directory, iwc);
+ int numUniqueChildValues = scaledRandomIntBetween(100, 2000);
+ String[] childValues = new String[numUniqueChildValues];
+ for (int i = 0; i < numUniqueChildValues; i++) {
+ childValues[i] = Integer.toString(i);
+ }
+
+ IntHashSet filteredOrDeletedDocs = new IntHashSet();
+
+ int childDocId = 0;
+ int numParentDocs = scaledRandomIntBetween(1, numUniqueChildValues);
+ ObjectObjectHashMap<String, NavigableMap<String, FloatArrayList>> childValueToParentIds = new ObjectObjectHashMap<>();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ if (markParentAsDeleted) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ filteredOrDeletedDocs.add(parentDocId);
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs = scaledRandomIntBetween(0, 100);
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ String childValue = childValues[random().nextInt(childValues.length)];
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId++)), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField("field1", childValue, Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markChildAsDeleted) {
+ NavigableMap<String, FloatArrayList> parentIdToChildScores;
+ if (childValueToParentIds.containsKey(childValue)) {
+ parentIdToChildScores = childValueToParentIds.get(childValue);
+ } else {
+ childValueToParentIds.put(childValue, parentIdToChildScores = new TreeMap<>());
+ }
+ if (!markParentAsDeleted && !filterMe) {
+ FloatArrayList childScores = parentIdToChildScores.get(parent);
+ if (childScores == null) {
+ parentIdToChildScores.put(parent, childScores = new FloatArrayList());
+ }
+ childScores.add(1f);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.Searcher(
+ ChildrenQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ int max = numUniqueChildValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Simulate a parent update
+ if (random().nextBoolean()) {
+ final int numberOfUpdatableParents = numParentDocs - filteredOrDeletedDocs.size();
+ int numberOfUpdates = RandomInts.randomIntBetween(random(), 0, Math.min(numberOfUpdatableParents, TEST_NIGHTLY ? 25 : 5));
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int parentId;
+ do {
+ parentId = random().nextInt(numParentDocs);
+ } while (filteredOrDeletedDocs.contains(parentId));
+
+ String parentUid = Uid.createUid("parent", Integer.toString(parentId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, parentUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, parentUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.Searcher(
+ ChildrenConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String childValue = childValues[random().nextInt(numUniqueChildValues)];
+ int shortCircuitParentDocSet = random().nextInt(numParentDocs);
+ ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)];
+ // leave min/max set to 0 half the time
+ int minChildren = random().nextInt(2) * scaledRandomIntBetween(0, 110);
+ int maxChildren = random().nextInt(2) * scaledRandomIntBetween(minChildren, 110);
+
+ QueryBuilder queryBuilder = hasChildQuery("child", constantScoreQuery(termQuery("field1", childValue)))
+ .scoreType(scoreType.name().toLowerCase(Locale.ENGLISH))
+ .minChildren(minChildren)
+ .maxChildren(maxChildren)
+ .setShortCircuitCutoff(shortCircuitParentDocSet);
+ // Using a FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ queryBuilder = filteredQuery(queryBuilder, notQuery(termQuery("filter", "me")));
+ Query query = parseQuery(queryBuilder);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ int numHits = 1 + random().nextInt(25);
+ TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits);
+ searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits);
+ if (childValueToParentIds.containsKey(childValue)) {
+ LeafReader slowLeafReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ final FloatArrayList[] scores = new FloatArrayList[slowLeafReader.maxDoc()];
+ Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableMap<String, FloatArrayList> parentIdToChildScores = childValueToParentIds.get(childValue);
+ TermsEnum termsEnum = terms.iterator();
+ PostingsEnum docsEnum = null;
+ for (Map.Entry<String, FloatArrayList> entry : parentIdToChildScores.entrySet()) {
+ int count = entry.getValue().elementsCount;
+ if (count >= minChildren && (maxChildren == 0 || count <= maxChildren)) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", entry.getKey()));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ scores[docsEnum.docID()] = new FloatArrayList(entry.getValue());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+ MockScorer mockScorer = new MockScorer(scoreType);
+ final LeafCollector leafCollector = expectedTopDocsCollector.getLeafCollector(slowLeafReader.getContext());
+ leafCollector.setScorer(mockScorer);
+ for (int doc = expectedResult.nextSetBit(0); doc < slowLeafReader.maxDoc(); doc = doc + 1 >= expectedResult.length() ? DocIdSetIterator.NO_MORE_DOCS : expectedResult.nextSetBit(doc + 1)) {
+ mockScorer.scores = scores[doc];
+ leafCollector.collect(doc);
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+ @Test
+ public void testMinScoreMode() throws IOException {
+ assertScoreType(ScoreType.MIN);
+ }
+
+ @Test
+ public void testMaxScoreMode() throws IOException {
+ assertScoreType(ScoreType.MAX);
+ }
+
+ @Test
+ public void testAvgScoreMode() throws IOException {
+ assertScoreType(ScoreType.AVG);
+ }
+
+ @Test
+ public void testSumScoreMode() throws IOException {
+ assertScoreType(ScoreType.SUM);
+ }
+
+ /**
+ * Assert that the {@code scoreType} operates as expected and parents are found in the expected order.
+ * <p />
+ * This will use the test index's parent/child types to create parents with multiple children. Each child will have
+ * a randomly generated scored stored in {@link #CHILD_SCORE_NAME}, which is used to score based on the
+ * {@code scoreType} by using a {@link MockScorer} to determine the expected scores.
+ * @param scoreType The score type to use within the query to score parents relative to their children.
+ * @throws IOException if any unexpected error occurs
+ */
+ private void assertScoreType(ScoreType scoreType) throws IOException {
+ SearchContext context = SearchContext.current();
+ Directory directory = newDirectory();
+ IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(new MockAnalyzer(random())));
+
+ // calculates the expected score per parent
+ MockScorer scorer = new MockScorer(scoreType);
+ scorer.scores = new FloatArrayList(10);
+
+ // number of parents to generate
+ int parentDocs = scaledRandomIntBetween(2, 10);
+ // unique child ID
+ int childDocId = 0;
+
+ // Parent ID to expected score
+ Map<String, Float> parentScores = new TreeMap<>();
+
+ // Add a few random parents to ensure that the children's score is appropriately taken into account
+ for (int parentDocId = 0; parentDocId < parentDocs; ++parentDocId) {
+ String parent = Integer.toString(parentDocId);
+
+ // Create the parent
+ Document parentDocument = new Document();
+
+ parentDocument.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.YES));
+ parentDocument.add(new StringField(IdFieldMapper.NAME, parent, Field.Store.YES));
+ parentDocument.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+
+ // add the parent to the index
+ writer.addDocument(parentDocument);
+
+ int numChildDocs = scaledRandomIntBetween(1, 10);
+
+ // forget any parent's previous scores
+ scorer.scores.clear();
+
+ // associate children with the parent
+ for (int i = 0; i < numChildDocs; ++i) {
+ int childScore = random().nextInt(128);
+
+ Document childDocument = new Document();
+
+ childDocument.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", Integer.toString(childDocId++)), Field.Store.NO));
+ childDocument.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ // parent association:
+ childDocument.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ childDocument.add(new DoubleField(CHILD_SCORE_NAME, childScore, Field.Store.NO));
+
+ // remember the score to be calculated
+ scorer.scores.add(childScore);
+
+ // add the associated child to the index
+ writer.addDocument(childDocument);
+ }
+
+ // this score that should be returned for this parent
+ parentScores.put(parent, scorer.score());
+ }
+
+ writer.commit();
+
+ IndexReader reader = DirectoryReader.open(writer, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // setup to read the parent/child map
+ Engine.Searcher engineSearcher = new Engine.Searcher(ChildrenQueryTests.class.getSimpleName(), searcher);
+ ((TestSearchContext)context).setSearcher(new ContextIndexSearcher(context, engineSearcher));
+
+ // child query that returns the score as the value of "childScore" for each child document, with the parent's score determined by the score type
+ QueryBuilder childQueryBuilder = functionScoreQuery(typeQuery("child")).add(new FieldValueFactorFunctionBuilder(CHILD_SCORE_NAME));
+ QueryBuilder queryBuilder = hasChildQuery("child", childQueryBuilder)
+ .scoreType(scoreType.name().toLowerCase(Locale.ENGLISH))
+ .setShortCircuitCutoff(parentDocs);
+
+ // Perform the search for the documents using the selected score type
+ Query query = parseQuery(queryBuilder);
+ TopDocs docs = searcher.search(query, parentDocs);
+ assertThat("Expected all parents", docs.totalHits, is(parentDocs));
+
+ // score should be descending (just a sanity check)
+ float topScore = docs.scoreDocs[0].score;
+
+ // ensure each score is returned as expected
+ for (int i = 0; i < parentDocs; ++i) {
+ ScoreDoc scoreDoc = docs.scoreDocs[i];
+ // get the ID from the document to get its expected score; remove it so we cannot double-count it
+ float score = parentScores.remove(reader.document(scoreDoc.doc).get(IdFieldMapper.NAME));
+
+ // expect exact match
+ assertThat("Unexpected score", scoreDoc.score, is(score));
+ assertThat("Not descending", score, lessThanOrEqualTo(topScore));
+
+ // it had better keep descending
+ topScore = score;
+ }
+
+ reader.close();
+ writer.close();
+ directory.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/MockScorer.java b/core/src/test/java/org/elasticsearch/index/search/child/MockScorer.java
new file mode 100644
index 0000000000..2eecbceaa5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/MockScorer.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+
+class MockScorer extends Scorer {
+
+ final ScoreType scoreType;
+ FloatArrayList scores;
+
+ MockScorer(ScoreType scoreType) {
+ super(null);
+ this.scoreType = scoreType;
+ }
+
+ @Override
+ public float score() throws IOException {
+ if (scoreType == ScoreType.NONE) {
+ return 1.0f;
+ }
+ float aggregateScore = 0;
+
+ // in the case of a min value, it can't start at 0 (the lowest score); in all cases, it doesn't hurt to use the
+ // first score, so we can safely use the first value by skipping it in the loop
+ if (scores.elementsCount != 0) {
+ aggregateScore = scores.buffer[0];
+
+ for (int i = 1; i < scores.elementsCount; i++) {
+ float score = scores.buffer[i];
+ switch (scoreType) {
+ case MIN:
+ if (aggregateScore > score) {
+ aggregateScore = score;
+ }
+ break;
+ case MAX:
+ if (aggregateScore < score) {
+ aggregateScore = score;
+ }
+ break;
+ case SUM:
+ case AVG:
+ aggregateScore += score;
+ break;
+ }
+ }
+
+ if (scoreType == ScoreType.AVG) {
+ aggregateScore /= scores.elementsCount;
+ }
+ }
+
+ return aggregateScore;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int docID() {
+ return 0;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ return 0;
+ }
+
+ @Override
+ public long cost() {
+ return 0;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java
new file mode 100644
index 0000000000..a42e940569
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.IntIntHashMap;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.NavigableSet;
+import java.util.Random;
+import java.util.TreeSet;
+
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
+import static org.elasticsearch.index.query.QueryBuilders.notQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+/**
+ */
+public class ParentConstantScoreQueryTests extends AbstractChildTests {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext current = SearchContext.current();
+ SearchContext.removeCurrent();
+ Releasables.close(current);
+ }
+
+ @Test
+ public void testBasicQuerySanities() {
+ Query parentQuery = new TermQuery(new Term("field", "value"));
+ ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
+ ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
+ BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child"))));
+ Query query = new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter);
+ QueryUtils.check(query);
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ final Random r = random();
+ final IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r))
+ .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .setRAMBufferSizeMB(scaledRandomIntBetween(16, 64)); // we might index a lot - don't go crazy here
+ RandomIndexWriter indexWriter = new RandomIndexWriter(r, directory, iwc);
+ int numUniqueParentValues = scaledRandomIntBetween(100, 2000);
+ String[] parentValues = new String[numUniqueParentValues];
+ for (int i = 0; i < numUniqueParentValues; i++) {
+ parentValues[i] = Integer.toString(i);
+ }
+
+ int childDocId = 0;
+ int numParentDocs = scaledRandomIntBetween(1, numUniqueParentValues);
+ ObjectObjectHashMap<String, NavigableSet<String>> parentValueToChildDocIds = new ObjectObjectHashMap<>();
+ IntIntHashMap childIdToParentId = new IntIntHashMap();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ String parentValue = parentValues[random().nextInt(parentValues.length)];
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ document.add(new StringField("field1", parentValue, Field.Store.NO));
+ if (markParentAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs = scaledRandomIntBetween(0, 100);
+ if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
+ // ensure there is at least one child in the index
+ numChildDocs = Math.max(1, numChildDocs);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ boolean markChildAsDeleted = rarely();
+ boolean filterMe = rarely();
+ String child = Integer.toString(childDocId++);
+
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markParentAsDeleted) {
+ NavigableSet<String> childIds;
+ if (parentValueToChildDocIds.containsKey(parentValue)) {
+ childIds = parentValueToChildDocIds.get(parentValue);
+ } else {
+ parentValueToChildDocIds.put(parentValue, childIds = new TreeSet<>());
+ }
+ if (!markChildAsDeleted && !filterMe) {
+ childIdToParentId.put(Integer.valueOf(child), parentDocId);
+ childIds.add(child);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.Searcher(
+ ParentConstantScoreQuery.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ int max = numUniqueParentValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Simulate a child update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = childIdToParentId.isEmpty() ? 0 : scaledRandomIntBetween(1, 25);
+ int[] childIds = childIdToParentId.keys().toArray();
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int childId = childIds[random().nextInt(childIds.length)];
+ String childUid = Uid.createUid("child", Integer.toString(childId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
+ document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.Searcher(
+ ParentConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
+ QueryBuilder queryBuilder;
+ if (random().nextBoolean()) {
+ queryBuilder = hasParentQuery("parent", termQuery("field1", parentValue));
+ } else {
+ queryBuilder = constantScoreQuery(hasParentQuery("parent", termQuery("field1", parentValue)));
+ }
+ // Using a FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ queryBuilder = filteredQuery(queryBuilder, notQuery(termQuery("filter", "me")));
+ Query query = parseQuery(queryBuilder);
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ searcher.search(query, collector);
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ if (parentValueToChildDocIds.containsKey(parentValue)) {
+ LeafReader slowLeafReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableSet<String> childIds = parentValueToChildDocIds.get(parentValue);
+ TermsEnum termsEnum = terms.iterator();
+ PostingsEnum docsEnum = null;
+ for (String id : childIds) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java
new file mode 100644
index 0000000000..a0e6e54210
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import com.carrotsearch.hppc.FloatArrayList;
+import com.carrotsearch.hppc.IntIntHashMap;
+import com.carrotsearch.hppc.ObjectObjectHashMap;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopScoreDocCollector;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.FixedBitSet;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Random;
+import java.util.TreeMap;
+
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
+import static org.elasticsearch.index.query.QueryBuilders.notQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+public class ParentQueryTests extends AbstractChildTests {
+
+ @BeforeClass
+ public static void before() throws IOException {
+ SearchContext.setCurrent(createSearchContext("test", "parent", "child"));
+ }
+
+ @AfterClass
+ public static void after() throws IOException {
+ SearchContext current = SearchContext.current();
+ SearchContext.removeCurrent();
+ Releasables.close(current);
+ }
+
+ @Test
+ public void testBasicQuerySanities() {
+ Query parentQuery = new TermQuery(new Term("field", "value"));
+ ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper();
+ ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper);
+ BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child"))));
+ Query query = new ParentQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter);
+ QueryUtils.check(query);
+ }
+
+ @Test
+ public void testRandom() throws Exception {
+ Directory directory = newDirectory();
+ final Random r = random();
+ final IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(r, new MockAnalyzer(r))
+ .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
+ .setRAMBufferSizeMB(scaledRandomIntBetween(16, 64)); // we might index a lot - don't go crazy here
+ RandomIndexWriter indexWriter = new RandomIndexWriter(r, directory, iwc);
+ int numUniqueParentValues = scaledRandomIntBetween(100, 2000);
+ String[] parentValues = new String[numUniqueParentValues];
+ for (int i = 0; i < numUniqueParentValues; i++) {
+ parentValues[i] = Integer.toString(i);
+ }
+
+ int childDocId = 0;
+ int numParentDocs = scaledRandomIntBetween(1, numUniqueParentValues);
+ ObjectObjectHashMap<String, NavigableMap<String, Float>> parentValueToChildIds = new ObjectObjectHashMap<>();
+ IntIntHashMap childIdToParentId = new IntIntHashMap();
+ for (int parentDocId = 0; parentDocId < numParentDocs; parentDocId++) {
+ boolean markParentAsDeleted = rarely();
+ String parentValue = parentValues[random().nextInt(parentValues.length)];
+ String parent = Integer.toString(parentDocId);
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ document.add(new StringField(TypeFieldMapper.NAME, "parent", Field.Store.NO));
+ document.add(new StringField("field1", parentValue, Field.Store.NO));
+ if (markParentAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ int numChildDocs = scaledRandomIntBetween(0, 100);
+ if (parentDocId == numParentDocs - 1 && childIdToParentId.isEmpty()) {
+ // ensure there is at least one child in the index
+ numChildDocs = Math.max(1, numChildDocs);
+ }
+ for (int i = 0; i < numChildDocs; i++) {
+ String child = Integer.toString(childDocId++);
+ boolean markChildAsDeleted = rarely();
+ boolean filterMe = rarely();
+ document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, Uid.createUid("child", child), Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ document.add(new StringField(ParentFieldMapper.NAME, Uid.createUid("parent", parent), Field.Store.NO));
+ if (markChildAsDeleted) {
+ document.add(new StringField("delete", "me", Field.Store.NO));
+ }
+ if (filterMe) {
+ document.add(new StringField("filter", "me", Field.Store.NO));
+ }
+ indexWriter.addDocument(document);
+
+ if (!markParentAsDeleted) {
+ NavigableMap<String, Float> childIdToScore = parentValueToChildIds.getOrDefault(parentValue, null);
+ if (childIdToScore == null) {
+ parentValueToChildIds.put(parentValue, childIdToScore = new TreeMap<>());
+ }
+ if (!markChildAsDeleted && !filterMe) {
+ assertFalse("child ["+ child + "] already has a score", childIdToScore.containsKey(child));
+ childIdToScore.put(child, 1f);
+ childIdToParentId.put(Integer.valueOf(child), parentDocId);
+ }
+ }
+ }
+ }
+
+ // Delete docs that are marked to be deleted.
+ indexWriter.deleteDocuments(new Term("delete", "me"));
+ indexWriter.commit();
+
+ IndexReader indexReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(indexReader);
+ Engine.Searcher engineSearcher = new Engine.Searcher(
+ ParentQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+
+ int max = numUniqueParentValues / 4;
+ for (int i = 0; i < max; i++) {
+ // Simulate a child update
+ if (random().nextBoolean()) {
+ int numberOfUpdates = childIdToParentId.isEmpty() ? 0 : scaledRandomIntBetween(1, 5);
+ int[] childIds = childIdToParentId.keys().toArray();
+ for (int j = 0; j < numberOfUpdates; j++) {
+ int childId = childIds[random().nextInt(childIds.length)];
+ String childUid = Uid.createUid("child", Integer.toString(childId));
+ indexWriter.deleteDocuments(new Term(UidFieldMapper.NAME, childUid));
+
+ Document document = new Document();
+ document.add(new StringField(UidFieldMapper.NAME, childUid, Field.Store.YES));
+ document.add(new StringField(TypeFieldMapper.NAME, "child", Field.Store.NO));
+ String parentUid = Uid.createUid("parent", Integer.toString(childIdToParentId.get(childId)));
+ document.add(new StringField(ParentFieldMapper.NAME, parentUid, Field.Store.NO));
+ indexWriter.addDocument(document);
+ }
+
+ indexReader.close();
+ indexReader = DirectoryReader.open(indexWriter.w, true);
+ searcher = new IndexSearcher(indexReader);
+ engineSearcher = new Engine.Searcher(
+ ParentConstantScoreQueryTests.class.getSimpleName(), searcher
+ );
+ ((TestSearchContext) SearchContext.current()).setSearcher(new ContextIndexSearcher(SearchContext.current(), engineSearcher));
+ }
+
+ String parentValue = parentValues[random().nextInt(numUniqueParentValues)];
+ QueryBuilder queryBuilder = hasParentQuery("parent", constantScoreQuery(termQuery("field1", parentValue)));
+ // Using a FQ, will invoke / test the Scorer#advance(..) and also let the Weight#scorer not get live docs as acceptedDocs
+ queryBuilder = filteredQuery(queryBuilder, notQuery(termQuery("filter", "me")));
+ Query query = parseQuery(queryBuilder);
+
+ BitSetCollector collector = new BitSetCollector(indexReader.maxDoc());
+ int numHits = 1 + random().nextInt(25);
+ TopScoreDocCollector actualTopDocsCollector = TopScoreDocCollector.create(numHits);
+ searcher.search(query, MultiCollector.wrap(collector, actualTopDocsCollector));
+ FixedBitSet actualResult = collector.getResult();
+
+ FixedBitSet expectedResult = new FixedBitSet(indexReader.maxDoc());
+ TopScoreDocCollector expectedTopDocsCollector = TopScoreDocCollector.create(numHits);
+ if (parentValueToChildIds.containsKey(parentValue)) {
+ LeafReader slowLeafReader = SlowCompositeReaderWrapper.wrap(indexReader);
+ final FloatArrayList[] scores = new FloatArrayList[slowLeafReader.maxDoc()];
+ Terms terms = slowLeafReader.terms(UidFieldMapper.NAME);
+ if (terms != null) {
+ NavigableMap<String, Float> childIdsAndScore = parentValueToChildIds.get(parentValue);
+ TermsEnum termsEnum = terms.iterator();
+ PostingsEnum docsEnum = null;
+ for (Map.Entry<String, Float> entry : childIdsAndScore.entrySet()) {
+ TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", entry.getKey()));
+ if (seekStatus == TermsEnum.SeekStatus.FOUND) {
+ docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE);
+ expectedResult.set(docsEnum.nextDoc());
+ FloatArrayList s = scores[docsEnum.docID()];
+ if (s == null) {
+ scores[docsEnum.docID()] = s = new FloatArrayList(2);
+ }
+ s.add(entry.getValue());
+ } else if (seekStatus == TermsEnum.SeekStatus.END) {
+ break;
+ }
+ }
+ }
+ MockScorer mockScorer = new MockScorer(ScoreType.MAX);
+ mockScorer.scores = new FloatArrayList();
+ final LeafCollector leafCollector = expectedTopDocsCollector.getLeafCollector(slowLeafReader.getContext());
+ leafCollector.setScorer(mockScorer);
+ for (int doc = expectedResult.nextSetBit(0); doc < slowLeafReader.maxDoc(); doc = doc + 1 >= expectedResult.length() ? DocIdSetIterator.NO_MORE_DOCS : expectedResult.nextSetBit(doc + 1)) {
+ mockScorer.scores.clear();
+ mockScorer.scores.addAll(scores[doc]);
+ leafCollector.collect(doc);
+ }
+ }
+
+ assertBitSet(actualResult, expectedResult, searcher);
+ assertTopDocs(actualTopDocsCollector.topDocs(), expectedTopDocsCollector.topDocs());
+ }
+
+ indexWriter.close();
+ indexReader.close();
+ directory.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java
new file mode 100644
index 0000000000..27d8641789
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.child;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests {@link ScoreType} to ensure backward compatibility of any changes.
+ */
+public class ScoreTypeTests extends ElasticsearchTestCase {
+ @Test
+ public void minFromString() {
+ assertThat("fromString(min) != MIN", ScoreType.MIN, equalTo(ScoreType.fromString("min")));
+ }
+
+ @Test
+ public void maxFromString() {
+ assertThat("fromString(max) != MAX", ScoreType.MAX, equalTo(ScoreType.fromString("max")));
+ }
+
+ @Test
+ public void avgFromString() {
+ assertThat("fromString(avg) != AVG", ScoreType.AVG, equalTo(ScoreType.fromString("avg")));
+ }
+
+ @Test
+ public void sumFromString() {
+ assertThat("fromString(sum) != SUM", ScoreType.SUM, equalTo(ScoreType.fromString("sum")));
+ // allowed for consistency with ScoreMode.Total:
+ assertThat("fromString(total) != SUM", ScoreType.SUM, equalTo(ScoreType.fromString("total")));
+ }
+
+ @Test
+ public void noneFromString() {
+ assertThat("fromString(none) != NONE", ScoreType.NONE, equalTo(ScoreType.fromString("none")));
+ }
+
+ /**
+ * Should throw {@link IllegalArgumentException} instead of NPE.
+ */
+ @Test(expected = IllegalArgumentException.class)
+ public void nullFromString_throwsException() {
+ ScoreType.fromString(null);
+ }
+
+ /**
+ * Failure should not change (and the value should never match anything...).
+ */
+ @Test(expected = IllegalArgumentException.class)
+ public void unrecognizedFromString_throwsException() {
+ ScoreType.fromString("unrecognized value");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java
new file mode 100644
index 0000000000..a20ee88173
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoDistanceTests.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class GeoDistanceTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDistanceCheck() {
+ // Note, is within is an approximation, so, even though 0.52 is outside 50mi, we still get "true"
+ GeoDistance.DistanceBoundingCheck check = GeoDistance.distanceBoundingCheck(0, 0, 50, DistanceUnit.MILES);
+ assertThat(check.isWithin(0.5, 0.5), equalTo(true));
+ assertThat(check.isWithin(0.52, 0.52), equalTo(true));
+ assertThat(check.isWithin(1, 1), equalTo(false));
+
+ check = GeoDistance.distanceBoundingCheck(0, 179, 200, DistanceUnit.MILES);
+ assertThat(check.isWithin(0, -179), equalTo(true));
+ assertThat(check.isWithin(0, -178), equalTo(false));
+ }
+
+ @Test
+ public void testArcDistanceVsPlaneInEllipsis() {
+ GeoPoint centre = new GeoPoint(48.8534100, 2.3488000);
+ GeoPoint northernPoint = new GeoPoint(48.8801108681, 2.35152032666);
+ GeoPoint westernPoint = new GeoPoint(48.85265, 2.308896);
+
+ // With GeoDistance.ARC both the northern and western points are within the 4km range
+ assertThat(GeoDistance.ARC.calculate(centre.lat(), centre.lon(), northernPoint.lat(),
+ northernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+ assertThat(GeoDistance.ARC.calculate(centre.lat(), centre.lon(), westernPoint.lat(),
+ westernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+
+ // With GeoDistance.PLANE, only the northern point is within the 4km range,
+ // the western point is outside of the range due to the simple math it employs,
+ // meaning results will appear elliptical
+ assertThat(GeoDistance.PLANE.calculate(centre.lat(), centre.lon(), northernPoint.lat(),
+ northernPoint.lon(), DistanceUnit.KILOMETERS), lessThan(4D));
+ assertThat(GeoDistance.PLANE.calculate(centre.lat(), centre.lon(), westernPoint.lat(),
+ westernPoint.lon(), DistanceUnit.KILOMETERS), greaterThan(4D));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java
new file mode 100644
index 0000000000..49e1b4d0dc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoHashUtilsTests.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+/**
+ *
+ */
+public class GeoHashUtilsTests extends ElasticsearchTestCase {
+
+ /**
+ * Pass condition: lat=42.6, lng=-5.6 should be encoded as "ezs42e44yx96",
+ * lat=57.64911 lng=10.40744 should be encoded as "u4pruydqqvj8"
+ */
+ @Test
+ public void testEncode() {
+ String hash = GeoHashUtils.encode(42.6, -5.6);
+ assertEquals("ezs42e44yx96", hash);
+
+ hash = GeoHashUtils.encode(57.64911, 10.40744);
+ assertEquals("u4pruydqqvj8", hash);
+ }
+
+ /**
+ * Pass condition: lat=52.3738007, lng=4.8909347 should be encoded and then
+ * decoded within 0.00001 of the original value
+ */
+ @Test
+ public void testDecodePreciseLongitudeLatitude() {
+ String hash = GeoHashUtils.encode(52.3738007, 4.8909347);
+
+ GeoPoint point = GeoHashUtils.decode(hash);
+
+ assertEquals(52.3738007, point.lat(), 0.00001D);
+ assertEquals(4.8909347, point.lon(), 0.00001D);
+ }
+
+ /**
+ * Pass condition: lat=84.6, lng=10.5 should be encoded and then decoded
+ * within 0.00001 of the original value
+ */
+ @Test
+ public void testDecodeImpreciseLongitudeLatitude() {
+ String hash = GeoHashUtils.encode(84.6, 10.5);
+
+ GeoPoint point = GeoHashUtils.decode(hash);
+
+ assertEquals(84.6, point.lat(), 0.00001D);
+ assertEquals(10.5, point.lon(), 0.00001D);
+ }
+
+ /*
+ * see https://issues.apache.org/jira/browse/LUCENE-1815 for details
+ */
+
+ @Test
+ public void testDecodeEncode() {
+ String geoHash = "u173zq37x014";
+ assertEquals(geoHash, GeoHashUtils.encode(52.3738007, 4.8909347));
+ GeoPoint decode = GeoHashUtils.decode(geoHash);
+ assertEquals(52.37380061d, decode.lat(), 0.000001d);
+ assertEquals(4.8909343d, decode.lon(), 0.000001d);
+
+ assertEquals(geoHash, GeoHashUtils.encode(decode.lat(), decode.lon()));
+ }
+
+ @Test
+ public void testNeighbours() {
+ String geohash = "gcpv";
+ List<String> expectedNeighbors = new ArrayList<>();
+ expectedNeighbors.add("gcpw");
+ expectedNeighbors.add("gcpy");
+ expectedNeighbors.add("u10n");
+ expectedNeighbors.add("gcpt");
+ expectedNeighbors.add("u10j");
+ expectedNeighbors.add("gcps");
+ expectedNeighbors.add("gcpu");
+ expectedNeighbors.add("u10h");
+ Collection<? super String> neighbors = new ArrayList<>();
+ GeoHashUtils.addNeighbors(geohash, neighbors );
+ assertEquals(expectedNeighbors, neighbors);
+
+ // Border odd geohash
+ geohash = "u09x";
+ expectedNeighbors = new ArrayList<>();
+ expectedNeighbors.add("u0c2");
+ expectedNeighbors.add("u0c8");
+ expectedNeighbors.add("u0cb");
+ expectedNeighbors.add("u09r");
+ expectedNeighbors.add("u09z");
+ expectedNeighbors.add("u09q");
+ expectedNeighbors.add("u09w");
+ expectedNeighbors.add("u09y");
+ neighbors = new ArrayList<>();
+ GeoHashUtils.addNeighbors(geohash, neighbors );
+ assertEquals(expectedNeighbors, neighbors);
+
+ // Border even geohash
+ geohash = "u09tv";
+ expectedNeighbors = new ArrayList<>();
+ expectedNeighbors.add("u09wh");
+ expectedNeighbors.add("u09wj");
+ expectedNeighbors.add("u09wn");
+ expectedNeighbors.add("u09tu");
+ expectedNeighbors.add("u09ty");
+ expectedNeighbors.add("u09ts");
+ expectedNeighbors.add("u09tt");
+ expectedNeighbors.add("u09tw");
+ neighbors = new ArrayList<>();
+ GeoHashUtils.addNeighbors(geohash, neighbors );
+ assertEquals(expectedNeighbors, neighbors);
+
+ // Border even and odd geohash
+ geohash = "ezzzz";
+ expectedNeighbors = new ArrayList<>();
+ expectedNeighbors.add("gbpbn");
+ expectedNeighbors.add("gbpbp");
+ expectedNeighbors.add("u0000");
+ expectedNeighbors.add("ezzzy");
+ expectedNeighbors.add("spbpb");
+ expectedNeighbors.add("ezzzw");
+ expectedNeighbors.add("ezzzx");
+ expectedNeighbors.add("spbp8");
+ neighbors = new ArrayList<>();
+ GeoHashUtils.addNeighbors(geohash, neighbors );
+ assertEquals(expectedNeighbors, neighbors);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java
new file mode 100644
index 0000000000..3614cf6e2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoPointParsingTests.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.closeTo;
+
+
+public class GeoPointParsingTests extends ElasticsearchTestCase {
+
+ // mind geohash precision and error
+ private static final double ERROR = 0.00001d;
+
+ @Test
+ public void testGeoPointReset() throws IOException {
+ double lat = 1 + randomDouble() * 89;
+ double lon = 1 + randomDouble() * 179;
+
+ GeoPoint point = new GeoPoint(0, 0);
+ assertCloseTo(point, 0, 0);
+
+ assertCloseTo(point.reset(lat, lon), lat, lon);
+ assertCloseTo(point.reset(0, 0), 0, 0);
+ assertCloseTo(point.resetLat(lat), lat, 0);
+ assertCloseTo(point.resetLat(0), 0, 0);
+ assertCloseTo(point.resetLon(lon), 0, lon);
+ assertCloseTo(point.resetLon(0), 0, 0);
+ assertCloseTo(point.resetFromGeoHash(GeoHashUtils.encode(lat, lon)), lat, lon);
+ assertCloseTo(point.reset(0, 0), 0, 0);
+ assertCloseTo(point.resetFromString(Double.toString(lat) + ", " + Double.toHexString(lon)), lat, lon);
+ assertCloseTo(point.reset(0, 0), 0, 0);
+ }
+
+ @Test
+ public void testGeoPointParsing() throws IOException {
+ double lat = randomDouble() * 180 - 90;
+ double lon = randomDouble() * 360 - 180;
+
+ GeoPoint point = GeoUtils.parseGeoPoint(objectLatLon(lat, lon));
+ assertCloseTo(point, lat, lon);
+
+ GeoUtils.parseGeoPoint(arrayLatLon(lat, lon), point);
+ assertCloseTo(point, lat, lon);
+
+ GeoUtils.parseGeoPoint(geohash(lat, lon), point);
+ assertCloseTo(point, lat, lon);
+
+ GeoUtils.parseGeoPoint(stringLatLon(lat, lon), point);
+ assertCloseTo(point, lat, lon);
+ }
+
+ // Based on issue5390
+ @Test(expected = ElasticsearchParseException.class)
+ public void testInvalidPointEmbeddedObject() throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.startObject();
+ content.startObject("location");
+ content.field("lat", 0).field("lon", 0);
+ content.endObject();
+ content.endObject();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testInvalidPointLatHashMix() throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.startObject();
+ content.field("lat", 0).field("geohash", GeoHashUtils.encode(0, 0));
+ content.endObject();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testInvalidPointLonHashMix() throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.startObject();
+ content.field("lon", 0).field("geohash", GeoHashUtils.encode(0, 0));
+ content.endObject();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected = ElasticsearchParseException.class)
+ public void testInvalidField() throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.startObject();
+ content.field("lon", 0).field("lat", 0).field("test", 0);
+ content.endObject();
+
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ private static XContentParser objectLatLon(double lat, double lon) throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.startObject();
+ content.field("lat", lat).field("lon", lon);
+ content.endObject();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+ return parser;
+ }
+
+ private static XContentParser arrayLatLon(double lat, double lon) throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.startArray().value(lon).value(lat).endArray();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+ return parser;
+ }
+
+ private static XContentParser stringLatLon(double lat, double lon) throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.value(Double.toString(lat) + ", " + Double.toString(lon));
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+ return parser;
+ }
+
+ private static XContentParser geohash(double lat, double lon) throws IOException {
+ XContentBuilder content = JsonXContent.contentBuilder();
+ content.value(GeoHashUtils.encode(lat, lon));
+ XContentParser parser = JsonXContent.jsonXContent.createParser(content.bytes());
+ parser.nextToken();
+ return parser;
+ }
+
+ public static void assertCloseTo(GeoPoint point, double lat, double lon) {
+ assertThat(point.lat(), closeTo(lat, ERROR));
+ assertThat(point.lon(), closeTo(lon, ERROR));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
new file mode 100644
index 0000000000..34c30be922
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java
@@ -0,0 +1,627 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.geo;
+
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import org.apache.lucene.spatial.prefix.tree.Cell;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.*;
+
+public class GeoUtilsTests extends ElasticsearchTestCase {
+
+ private static final char[] BASE_32 = {'0', '1', '2', '3', '4', '5', '6',
+ '7', '8', '9', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'k', 'm', 'n',
+ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
+ private static final double MAX_ACCEPTABLE_ERROR = 0.000000001;
+
+ @Test
+ public void testGeohashCellWidth() {
+ double equatorialDistance = 2 * Math.PI * 6378137.0;
+ assertThat(GeoUtils.geoHashCellWidth(0), equalTo(equatorialDistance));
+ assertThat(GeoUtils.geoHashCellWidth(1), equalTo(equatorialDistance / 8));
+ assertThat(GeoUtils.geoHashCellWidth(2), equalTo(equatorialDistance / 32));
+ assertThat(GeoUtils.geoHashCellWidth(3), equalTo(equatorialDistance / 256));
+ assertThat(GeoUtils.geoHashCellWidth(4), equalTo(equatorialDistance / 1024));
+ assertThat(GeoUtils.geoHashCellWidth(5), equalTo(equatorialDistance / 8192));
+ assertThat(GeoUtils.geoHashCellWidth(6), equalTo(equatorialDistance / 32768));
+ assertThat(GeoUtils.geoHashCellWidth(7), equalTo(equatorialDistance / 262144));
+ assertThat(GeoUtils.geoHashCellWidth(8), equalTo(equatorialDistance / 1048576));
+ assertThat(GeoUtils.geoHashCellWidth(9), equalTo(equatorialDistance / 8388608));
+ assertThat(GeoUtils.geoHashCellWidth(10), equalTo(equatorialDistance / 33554432));
+ assertThat(GeoUtils.geoHashCellWidth(11), equalTo(equatorialDistance / 268435456));
+ assertThat(GeoUtils.geoHashCellWidth(12), equalTo(equatorialDistance / 1073741824));
+ }
+
+ @Test
+ public void testGeohashCellHeight() {
+ double polarDistance = Math.PI * 6356752.314245;
+ assertThat(GeoUtils.geoHashCellHeight(0), equalTo(polarDistance));
+ assertThat(GeoUtils.geoHashCellHeight(1), equalTo(polarDistance / 4));
+ assertThat(GeoUtils.geoHashCellHeight(2), equalTo(polarDistance / 32));
+ assertThat(GeoUtils.geoHashCellHeight(3), equalTo(polarDistance / 128));
+ assertThat(GeoUtils.geoHashCellHeight(4), equalTo(polarDistance / 1024));
+ assertThat(GeoUtils.geoHashCellHeight(5), equalTo(polarDistance / 4096));
+ assertThat(GeoUtils.geoHashCellHeight(6), equalTo(polarDistance / 32768));
+ assertThat(GeoUtils.geoHashCellHeight(7), equalTo(polarDistance / 131072));
+ assertThat(GeoUtils.geoHashCellHeight(8), equalTo(polarDistance / 1048576));
+ assertThat(GeoUtils.geoHashCellHeight(9), equalTo(polarDistance / 4194304));
+ assertThat(GeoUtils.geoHashCellHeight(10), equalTo(polarDistance / 33554432));
+ assertThat(GeoUtils.geoHashCellHeight(11), equalTo(polarDistance / 134217728));
+ assertThat(GeoUtils.geoHashCellHeight(12), equalTo(polarDistance / 1073741824));
+ }
+
+ @Test
+ public void testGeohashCellSize() {
+ double equatorialDistance = 2 * Math.PI * 6378137.0;
+ double polarDistance = Math.PI * 6356752.314245;
+ assertThat(GeoUtils.geoHashCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
+ assertThat(GeoUtils.geoHashCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2))));
+ assertThat(GeoUtils.geoHashCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
+ assertThat(GeoUtils.geoHashCellSize(3),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 256, 2))));
+ assertThat(GeoUtils.geoHashCellSize(4),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 1024, 2) + Math.pow(equatorialDistance / 1024, 2))));
+ assertThat(GeoUtils.geoHashCellSize(5),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 4096, 2) + Math.pow(equatorialDistance / 8192, 2))));
+ assertThat(GeoUtils.geoHashCellSize(6),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 32768, 2) + Math.pow(equatorialDistance / 32768, 2))));
+ assertThat(GeoUtils.geoHashCellSize(7),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 131072, 2) + Math.pow(equatorialDistance / 262144, 2))));
+ assertThat(GeoUtils.geoHashCellSize(8),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 1048576, 2) + Math.pow(equatorialDistance / 1048576, 2))));
+ assertThat(GeoUtils.geoHashCellSize(9),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 4194304, 2) + Math.pow(equatorialDistance / 8388608, 2))));
+ assertThat(GeoUtils.geoHashCellSize(10),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 33554432, 2) + Math.pow(equatorialDistance / 33554432, 2))));
+ assertThat(GeoUtils.geoHashCellSize(11),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 134217728, 2) + Math.pow(equatorialDistance / 268435456, 2))));
+ assertThat(GeoUtils.geoHashCellSize(12),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 1073741824, 2) + Math.pow(equatorialDistance / 1073741824, 2))));
+ }
+
+ @Test
+ public void testGeoHashLevelsForPrecision() {
+ for (int i = 0; i < 100; i++) {
+ double precision = randomDouble() * 100;
+ int level = GeoUtils.geoHashLevelsForPrecision(precision);
+ assertThat(GeoUtils.geoHashCellSize(level), lessThanOrEqualTo(precision));
+ }
+ }
+
+ @Test
+ public void testGeoHashLevelsForPrecision_String() {
+ for (int i = 0; i < 100; i++) {
+ double precision = randomDouble() * 100;
+ String precisionString = precision + "m";
+ int level = GeoUtils.geoHashLevelsForPrecision(precisionString);
+ assertThat(GeoUtils.geoHashCellSize(level), lessThanOrEqualTo(precision));
+ }
+ }
+
+ @Test
+ public void testQuadTreeCellWidth() {
+ double equatorialDistance = 2 * Math.PI * 6378137.0;
+ assertThat(GeoUtils.quadTreeCellWidth(0), equalTo(equatorialDistance));
+ assertThat(GeoUtils.quadTreeCellWidth(1), equalTo(equatorialDistance / 2));
+ assertThat(GeoUtils.quadTreeCellWidth(2), equalTo(equatorialDistance / 4));
+ assertThat(GeoUtils.quadTreeCellWidth(3), equalTo(equatorialDistance / 8));
+ assertThat(GeoUtils.quadTreeCellWidth(4), equalTo(equatorialDistance / 16));
+ assertThat(GeoUtils.quadTreeCellWidth(5), equalTo(equatorialDistance / 32));
+ assertThat(GeoUtils.quadTreeCellWidth(6), equalTo(equatorialDistance / 64));
+ assertThat(GeoUtils.quadTreeCellWidth(7), equalTo(equatorialDistance / 128));
+ assertThat(GeoUtils.quadTreeCellWidth(8), equalTo(equatorialDistance / 256));
+ assertThat(GeoUtils.quadTreeCellWidth(9), equalTo(equatorialDistance / 512));
+ assertThat(GeoUtils.quadTreeCellWidth(10), equalTo(equatorialDistance / 1024));
+ assertThat(GeoUtils.quadTreeCellWidth(11), equalTo(equatorialDistance / 2048));
+ assertThat(GeoUtils.quadTreeCellWidth(12), equalTo(equatorialDistance / 4096));
+ }
+
+ @Test
+ public void testQuadTreeCellHeight() {
+ double polarDistance = Math.PI * 6356752.314245;
+ assertThat(GeoUtils.quadTreeCellHeight(0), equalTo(polarDistance));
+ assertThat(GeoUtils.quadTreeCellHeight(1), equalTo(polarDistance / 2));
+ assertThat(GeoUtils.quadTreeCellHeight(2), equalTo(polarDistance / 4));
+ assertThat(GeoUtils.quadTreeCellHeight(3), equalTo(polarDistance / 8));
+ assertThat(GeoUtils.quadTreeCellHeight(4), equalTo(polarDistance / 16));
+ assertThat(GeoUtils.quadTreeCellHeight(5), equalTo(polarDistance / 32));
+ assertThat(GeoUtils.quadTreeCellHeight(6), equalTo(polarDistance / 64));
+ assertThat(GeoUtils.quadTreeCellHeight(7), equalTo(polarDistance / 128));
+ assertThat(GeoUtils.quadTreeCellHeight(8), equalTo(polarDistance / 256));
+ assertThat(GeoUtils.quadTreeCellHeight(9), equalTo(polarDistance / 512));
+ assertThat(GeoUtils.quadTreeCellHeight(10), equalTo(polarDistance / 1024));
+ assertThat(GeoUtils.quadTreeCellHeight(11), equalTo(polarDistance / 2048));
+ assertThat(GeoUtils.quadTreeCellHeight(12), equalTo(polarDistance / 4096));
+ }
+
+ @Test
+ public void testQuadTreeCellSize() {
+ double equatorialDistance = 2 * Math.PI * 6378137.0;
+ double polarDistance = Math.PI * 6356752.314245;
+ assertThat(GeoUtils.quadTreeCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(3), equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(4), equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(5), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(6), equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(7),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 128, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(8),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 256, 2) + Math.pow(equatorialDistance / 256, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(9),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 512, 2) + Math.pow(equatorialDistance / 512, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(10),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 1024, 2) + Math.pow(equatorialDistance / 1024, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(11),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 2048, 2) + Math.pow(equatorialDistance / 2048, 2))));
+ assertThat(GeoUtils.quadTreeCellSize(12),
+ equalTo(Math.sqrt(Math.pow(polarDistance / 4096, 2) + Math.pow(equatorialDistance / 4096, 2))));
+ }
+
+ @Test
+ public void testQuadTreeLevelsForPrecision() {
+ for (int i = 0; i < 100; i++) {
+ double precision = randomDouble() * 100;
+ int level = GeoUtils.quadTreeLevelsForPrecision(precision);
+ assertThat(GeoUtils.quadTreeCellSize(level), lessThanOrEqualTo(precision));
+ }
+ }
+
+ @Test
+ public void testQuadTreeLevelsForPrecision_String() {
+ for (int i = 0; i < 100; i++) {
+ double precision = randomDouble() * 100;
+ String precisionString = precision + "m";
+ int level = GeoUtils.quadTreeLevelsForPrecision(precisionString);
+ assertThat(GeoUtils.quadTreeCellSize(level), lessThanOrEqualTo(precision));
+ }
+ }
+
+ @Test
+ public void testNormalizeLat_inNormalRange() {
+ for (int i = 0; i < 100; i++) {
+ double testValue = (randomDouble() * 180.0) - 90.0;
+ assertThat(GeoUtils.normalizeLat(testValue), closeTo(testValue, MAX_ACCEPTABLE_ERROR));
+ }
+ }
+
+ @Test
+ public void testNormalizeLat_outsideNormalRange() {
+ for (int i = 0; i < 100; i++) {
+ double normalisedValue = (randomDouble() * 180.0) - 90.0;
+ int shift = (randomBoolean() ? 1 : -1) * randomIntBetween(1, 10000);
+ double testValue = normalisedValue + (180.0 * shift);
+ double expectedValue = normalisedValue * (shift % 2 == 0 ? 1 : -1);
+ assertThat(GeoUtils.normalizeLat(testValue), closeTo(expectedValue, MAX_ACCEPTABLE_ERROR));
+ }
+ }
+
+ @Test
+ public void testNormalizeLat_Huge() {
+ assertThat(GeoUtils.normalizeLat(-18000000000091.0), equalTo(GeoUtils.normalizeLat(-091.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000090.0), equalTo(GeoUtils.normalizeLat(-090.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000089.0), equalTo(GeoUtils.normalizeLat(-089.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000088.0), equalTo(GeoUtils.normalizeLat(-088.0)));
+ assertThat(GeoUtils.normalizeLat(-18000000000001.0), equalTo(GeoUtils.normalizeLat(-001.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000000.0), equalTo(GeoUtils.normalizeLat(+000.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000001.0), equalTo(GeoUtils.normalizeLat(+001.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000002.0), equalTo(GeoUtils.normalizeLat(+002.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000088.0), equalTo(GeoUtils.normalizeLat(+088.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000089.0), equalTo(GeoUtils.normalizeLat(+089.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000090.0), equalTo(GeoUtils.normalizeLat(+090.0)));
+ assertThat(GeoUtils.normalizeLat(+18000000000091.0), equalTo(GeoUtils.normalizeLat(+091.0)));
+ }
+
+ @Test
+ public void testNormalizeLat_edgeCases() {
+ assertThat(GeoUtils.normalizeLat(Double.POSITIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.NEGATIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLat(Double.NaN), equalTo(Double.NaN));
+ assertThat(0.0, not(equalTo(-0.0)));
+ assertThat(GeoUtils.normalizeLat(-0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(-180.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(180.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLat(-90.0), equalTo(-90.0));
+ assertThat(GeoUtils.normalizeLat(90.0), equalTo(90.0));
+ }
+
+ @Test
+ public void testNormalizeLon_inNormalRange() {
+ for (int i = 0; i < 100; i++) {
+ double testValue = (randomDouble() * 360.0) - 180.0;
+ assertThat(GeoUtils.normalizeLon(testValue), closeTo(testValue, MAX_ACCEPTABLE_ERROR));
+ }
+ }
+
+ @Test
+ public void testNormalizeLon_outsideNormalRange() {
+ for (int i = 0; i < 100; i++) {
+ double normalisedValue = (randomDouble() * 360.0) - 180.0;
+ double testValue = normalisedValue + ((randomBoolean() ? 1 : -1) * 360.0 * randomIntBetween(1, 10000));
+ assertThat(GeoUtils.normalizeLon(testValue), closeTo(normalisedValue, MAX_ACCEPTABLE_ERROR));
+ }
+ }
+
+ @Test
+ public void testNormalizeLon_Huge() {
+ assertThat(GeoUtils.normalizeLon(-36000000000181.0), equalTo(GeoUtils.normalizeLon(-181.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000180.0), equalTo(GeoUtils.normalizeLon(-180.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000179.0), equalTo(GeoUtils.normalizeLon(-179.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000178.0), equalTo(GeoUtils.normalizeLon(-178.0)));
+ assertThat(GeoUtils.normalizeLon(-36000000000001.0), equalTo(GeoUtils.normalizeLon(-001.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000000.0), equalTo(GeoUtils.normalizeLon(+000.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000001.0), equalTo(GeoUtils.normalizeLon(+001.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000002.0), equalTo(GeoUtils.normalizeLon(+002.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000178.0), equalTo(GeoUtils.normalizeLon(+178.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000179.0), equalTo(GeoUtils.normalizeLon(+179.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000180.0), equalTo(GeoUtils.normalizeLon(+180.0)));
+ assertThat(GeoUtils.normalizeLon(+36000000000181.0), equalTo(GeoUtils.normalizeLon(+181.0)));
+ }
+
+ @Test
+ public void testNormalizeLon_edgeCases() {
+ assertThat(GeoUtils.normalizeLon(Double.POSITIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLon(Double.NEGATIVE_INFINITY), equalTo(Double.NaN));
+ assertThat(GeoUtils.normalizeLon(Double.NaN), equalTo(Double.NaN));
+ assertThat(0.0, not(equalTo(-0.0)));
+ assertThat(GeoUtils.normalizeLon(-0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(0.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(-360.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(360.0), equalTo(0.0));
+ assertThat(GeoUtils.normalizeLon(-180.0), equalTo(180.0));
+ assertThat(GeoUtils.normalizeLon(180.0), equalTo(180.0));
+ }
+
+ @Test
+ public void testNormalizePoint_inNormalRange() {
+ for (int i = 0; i < 100; i++) {
+ double testLat = (randomDouble() * 180.0) - 90.0;
+ double testLon = (randomDouble() * 360.0) - 180.0;
+ GeoPoint testPoint = new GeoPoint(testLat, testLon);
+ assertNormalizedPoint(testPoint, testPoint);
+ }
+ }
+
+ @Test
+ public void testNormalizePoint_outsideNormalRange() {
+ for (int i = 0; i < 100; i++) {
+ double normalisedLat = (randomDouble() * 180.0) - 90.0;
+ double normalisedLon = (randomDouble() * 360.0) - 180.0;
+ int shiftLat = (randomBoolean() ? 1 : -1) * randomIntBetween(1, 10000);
+ int shiftLon = (randomBoolean() ? 1 : -1) * randomIntBetween(1, 10000);
+ double testLat = normalisedLat + (180.0 * shiftLat);
+ double testLon = normalisedLon + (360.0 * shiftLon);
+ double expectedLat = normalisedLat * (shiftLat % 2 == 0 ? 1 : -1);
+ double expectedLon = normalisedLon + (shiftLat % 2 == 0 ? 0 : 180);
+ if (expectedLon > 180.0) {
+ expectedLon -= 360;
+ }
+ GeoPoint testPoint = new GeoPoint(testLat, testLon);
+ GeoPoint expectedPoint = new GeoPoint(expectedLat, expectedLon);
+ assertNormalizedPoint(testPoint, expectedPoint);
+ }
+ }
+
+ @Test
+ public void testNormalizePoint_outsideNormalRange_withOptions() {
+ for (int i = 0; i < 100; i++) {
+ boolean normLat = randomBoolean();
+ boolean normLon = randomBoolean();
+ double normalisedLat = (randomDouble() * 180.0) - 90.0;
+ double normalisedLon = (randomDouble() * 360.0) - 180.0;
+ int shiftLat = randomIntBetween(1, 10000);
+ int shiftLon = randomIntBetween(1, 10000);
+ double testLat = normalisedLat + (180.0 * shiftLat);
+ double testLon = normalisedLon + (360.0 * shiftLon);
+
+ double expectedLat;
+ double expectedLon;
+ if (normLat) {
+ expectedLat = normalisedLat * (shiftLat % 2 == 0 ? 1 : -1);
+ } else {
+ expectedLat = testLat;
+ }
+ if (normLon) {
+ expectedLon = normalisedLon + ((normLat && shiftLat % 2 == 1) ? 180 : 0);
+ if (expectedLon > 180.0) {
+ expectedLon -= 360;
+ }
+ } else {
+ double shiftValue = normalisedLon > 0 ? -180 : 180;
+ expectedLon = testLon + ((normLat && shiftLat % 2 == 1) ? shiftValue : 0);
+ }
+ GeoPoint testPoint = new GeoPoint(testLat, testLon);
+ GeoPoint expectedPoint = new GeoPoint(expectedLat, expectedLon);
+ GeoUtils.normalizePoint(testPoint, normLat, normLon);
+ assertThat("Unexpected Latitude", testPoint.lat(), closeTo(expectedPoint.lat(), MAX_ACCEPTABLE_ERROR));
+ assertThat("Unexpected Longitude", testPoint.lon(), closeTo(expectedPoint.lon(), MAX_ACCEPTABLE_ERROR));
+ }
+ }
+
+ @Test
+ public void testNormalizePoint_Huge() {
+ assertNormalizedPoint(new GeoPoint(-18000000000091.0, -36000000000181.0), new GeoPoint(-089.0, -001.0));
+ assertNormalizedPoint(new GeoPoint(-18000000000090.0, -36000000000180.0), new GeoPoint(-090.0, +180.0));
+ assertNormalizedPoint(new GeoPoint(-18000000000089.0, -36000000000179.0), new GeoPoint(-089.0, -179.0));
+ assertNormalizedPoint(new GeoPoint(-18000000000088.0, -36000000000178.0), new GeoPoint(-088.0, -178.0));
+ assertNormalizedPoint(new GeoPoint(-18000000000001.0, -36000000000001.0), new GeoPoint(-001.0, -001.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000000.0, +18000000000000.0), new GeoPoint(+000.0, +000.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000001.0, +36000000000001.0), new GeoPoint(+001.0, +001.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000002.0, +36000000000002.0), new GeoPoint(+002.0, +002.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000088.0, +36000000000178.0), new GeoPoint(+088.0, +178.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000089.0, +36000000000179.0), new GeoPoint(+089.0, +179.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000090.0, +36000000000180.0), new GeoPoint(+090.0, +180.0));
+ assertNormalizedPoint(new GeoPoint(+18000000000091.0, +36000000000181.0), new GeoPoint(+089.0, +001.0));
+
+ }
+
+ @Test
+ public void testNormalizePoint_edgeCases() {
+ assertNormalizedPoint(new GeoPoint(Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY), new GeoPoint(Double.NaN, Double.NaN));
+ assertNormalizedPoint(new GeoPoint(Double.NEGATIVE_INFINITY, Double.NEGATIVE_INFINITY), new GeoPoint(Double.NaN, Double.NaN));
+ assertNormalizedPoint(new GeoPoint(Double.NaN, Double.NaN), new GeoPoint(Double.NaN, Double.NaN));
+ assertThat(0.0, not(equalTo(-0.0)));
+ assertNormalizedPoint(new GeoPoint(-0.0, -0.0), new GeoPoint(0.0, 0.0));
+ assertNormalizedPoint(new GeoPoint(0.0, 0.0), new GeoPoint(0.0, 0.0));
+ assertNormalizedPoint(new GeoPoint(-180.0, -360.0), new GeoPoint(0.0, 180.0));
+ assertNormalizedPoint(new GeoPoint(180.0, 360.0), new GeoPoint(0.0, 180.0));
+ assertNormalizedPoint(new GeoPoint(-90.0, -180.0), new GeoPoint(-90.0, 180.0));
+ assertNormalizedPoint(new GeoPoint(90.0, 180.0), new GeoPoint(90.0, 180.0));
+ }
+
+ @Test
+ public void testParseGeoPoint() throws IOException {
+ for (int i = 0; i < 100; i++) {
+ double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180;
+ double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360;
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", lon).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoPoint point = GeoUtils.parseGeoPoint(parser);
+ assertThat(point, equalTo(new GeoPoint(lat, lon)));
+ jsonBytes = jsonBuilder().startObject().field("lat", String.valueOf(lat)).field("lon", String.valueOf(lon)).endObject().bytes();
+ parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ point = GeoUtils.parseGeoPoint(parser);
+ assertThat(point, equalTo(new GeoPoint(lat, lon)));
+ jsonBytes = jsonBuilder().startObject().startArray("foo").value(lon).value(lat).endArray().endObject().bytes();
+ parser = XContentHelper.createParser(jsonBytes);
+ while (parser.currentToken() != Token.START_ARRAY) {
+ parser.nextToken();
+ }
+ point = GeoUtils.parseGeoPoint(parser);
+ assertThat(point, equalTo(new GeoPoint(lat, lon)));
+ jsonBytes = jsonBuilder().startObject().field("foo", lat + "," + lon).endObject().bytes();
+ parser = XContentHelper.createParser(jsonBytes);
+ while (parser.currentToken() != Token.VALUE_STRING) {
+ parser.nextToken();
+ }
+ point = GeoUtils.parseGeoPoint(parser);
+ assertThat(point, equalTo(new GeoPoint(lat, lon)));
+ }
+ }
+
+ @Test
+ public void testParseGeoPoint_geohash() throws IOException {
+ for (int i = 0; i < 100; i++) {
+ int geoHashLength = randomIntBetween(1, 20);
+ StringBuilder geohashBuilder = new StringBuilder(geoHashLength);
+ for (int j = 0; j < geoHashLength; j++) {
+ geohashBuilder.append(BASE_32[randomInt(BASE_32.length - 1)]);
+ }
+ BytesReference jsonBytes = jsonBuilder().startObject().field("geohash", geohashBuilder.toString()).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoPoint point = GeoUtils.parseGeoPoint(parser);
+ assertThat(point.lat(), allOf(lessThanOrEqualTo(90.0), greaterThanOrEqualTo(-90.0)));
+ assertThat(point.lon(), allOf(lessThanOrEqualTo(180.0), greaterThan(-180.0)));
+ jsonBytes = jsonBuilder().startObject().field("geohash", geohashBuilder.toString()).endObject().bytes();
+ parser = XContentHelper.createParser(jsonBytes);
+ while (parser.currentToken() != Token.VALUE_STRING) {
+ parser.nextToken();
+ }
+ point = GeoUtils.parseGeoPoint(parser);
+ assertThat(point.lat(), allOf(lessThanOrEqualTo(90.0), greaterThanOrEqualTo(-90.0)));
+ assertThat(point.lon(), allOf(lessThanOrEqualTo(180.0), greaterThan(-180.0)));
+ }
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_geohashWrongType() throws IOException {
+ BytesReference jsonBytes = jsonBuilder().startObject().field("geohash", 1.0).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_LatNoLon() throws IOException {
+ double lat = 0.0;
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_LonNoLat() throws IOException {
+ double lon = 0.0;
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lon", lon).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_LonWrongType() throws IOException {
+ double lat = 0.0;
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", false).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_LatWrongType() throws IOException {
+ double lon = 0.0;
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lat", false).field("lon", lon).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_ExtraField() throws IOException {
+ double lat = 0.0;
+ double lon = 0.0;
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("foo", true).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_LonLatGeoHash() throws IOException {
+ double lat = 0.0;
+ double lon = 0.0;
+ String geohash = "abcd";
+ BytesReference jsonBytes = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("geohash", geohash).endObject()
+ .bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ parser.nextToken();
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_ArrayTooManyValues() throws IOException {
+ double lat = 0.0;
+ double lon = 0.0;
+ double elev = 0.0;
+ BytesReference jsonBytes = jsonBuilder().startObject().startArray("foo").value(lon).value(lat).value(elev).endArray().endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ while (parser.currentToken() != Token.START_ARRAY) {
+ parser.nextToken();
+ }
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_ArrayWrongType() throws IOException {
+ double lat = 0.0;
+ boolean lon = false;
+ BytesReference jsonBytes = jsonBuilder().startObject().startArray("foo").value(lon).value(lat).endArray().endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ while (parser.currentToken() != Token.START_ARRAY) {
+ parser.nextToken();
+ }
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test(expected=ElasticsearchParseException.class)
+ public void testParseGeoPoint_InvalidType() throws IOException {
+ BytesReference jsonBytes = jsonBuilder().startObject().field("foo", 5).endObject().bytes();
+ XContentParser parser = XContentHelper.createParser(jsonBytes);
+ while (parser.currentToken() != Token.VALUE_NUMBER) {
+ parser.nextToken();
+ }
+ GeoUtils.parseGeoPoint(parser);
+ }
+
+ @Test
+ public void testPrefixTreeCellSizes() {
+ assertThat(GeoUtils.EARTH_SEMI_MAJOR_AXIS, equalTo(DistanceUtils.EARTH_EQUATORIAL_RADIUS_KM * 1000));
+ assertThat(GeoUtils.quadTreeCellWidth(0), lessThanOrEqualTo(GeoUtils.EARTH_EQUATOR));
+
+ SpatialContext spatialContext = new SpatialContext(true);
+
+ GeohashPrefixTree geohashPrefixTree = new GeohashPrefixTree(spatialContext, GeohashPrefixTree.getMaxLevelsPossible() / 2);
+ Cell gNode = geohashPrefixTree.getWorldCell();
+
+ for (int i = 0; i < geohashPrefixTree.getMaxLevels(); i++) {
+ double width = GeoUtils.geoHashCellWidth(i);
+ double height = GeoUtils.geoHashCellHeight(i);
+ double size = GeoUtils.geoHashCellSize(i);
+ double degrees = 360.0 * width / GeoUtils.EARTH_EQUATOR;
+ int level = GeoUtils.quadTreeLevelsForPrecision(size);
+
+ assertThat(GeoUtils.quadTreeCellWidth(level), lessThanOrEqualTo(width));
+ assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
+ assertThat(GeoUtils.geoHashLevelsForPrecision(size), equalTo(geohashPrefixTree.getLevelForDistance(degrees)));
+
+ assertThat("width at level " + i, gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("height at level " + i, gNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height
+ / GeoUtils.EARTH_POLAR_DISTANCE));
+
+ gNode = gNode.getNextLevelCells(null).next();
+ }
+
+ QuadPrefixTree quadPrefixTree = new QuadPrefixTree(spatialContext);
+ Cell qNode = quadPrefixTree.getWorldCell();
+ for (int i = 0; i < quadPrefixTree.getMaxLevels(); i++) {
+
+ double degrees = 360.0 / (1L << i);
+ double width = GeoUtils.quadTreeCellWidth(i);
+ double height = GeoUtils.quadTreeCellHeight(i);
+ double size = GeoUtils.quadTreeCellSize(i);
+ int level = GeoUtils.quadTreeLevelsForPrecision(size);
+
+ assertThat(GeoUtils.quadTreeCellWidth(level), lessThanOrEqualTo(width));
+ assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
+ assertThat(GeoUtils.quadTreeLevelsForPrecision(size), equalTo(quadPrefixTree.getLevelForDistance(degrees)));
+
+ assertThat("width at level " + i, qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
+ assertThat("height at level " + i, qNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height
+ / GeoUtils.EARTH_POLAR_DISTANCE));
+
+ qNode = qNode.getNextLevelCells(null).next();
+ }
+ }
+
+ private static void assertNormalizedPoint(GeoPoint input, GeoPoint expected) {
+ GeoUtils.normalizePoint(input);
+ if (Double.isNaN(expected.lat())) {
+ assertThat("Unexpected Latitude", input.lat(), equalTo(expected.lat()));
+ } else {
+ assertThat("Unexpected Latitude", input.lat(), closeTo(expected.lat(), MAX_ACCEPTABLE_ERROR));
+ }
+ if (Double.isNaN(expected.lon())) {
+ assertThat("Unexpected Longitude", input.lon(), equalTo(expected.lon()));
+ } else {
+ assertThat("Unexpected Longitude", input.lon(), closeTo(expected.lon(), MAX_ACCEPTABLE_ERROR));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java
new file mode 100644
index 0000000000..940e10e77d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java
@@ -0,0 +1,355 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopFieldDocs;
+import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.fielddata.AbstractFieldDataTests;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public abstract class AbstractNumberNestedSortingTests extends AbstractFieldDataTests {
+
+ @Test
+ public void testNestedSorting() throws Exception {
+ List<Document> docs = new ArrayList<>();
+ Document document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 1, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 2, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 2, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 1, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 3, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 4, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 4, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 5, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 5, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 6, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 6, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // This doc will not be included, because it doesn't have nested docs
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 7, Field.Store.NO));
+ writer.addDocument(document);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 3, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(createField("field2", 7, Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(createField("field1", 8, Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+
+ MultiValueMode sortMode = MultiValueMode.SUM;
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent")));
+ Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter));
+ XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter));
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(11));
+
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(13));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(11));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(10));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
+
+ childFilter = new QueryWrapperFilter(new TermQuery(new Term("filter_1", "T")));
+ nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter));
+ query = new ToParentBlockJoinQuery(
+ new FilteredQuery(new MatchAllDocsQuery(), childFilter),
+ new BitDocIdSetCachingWrapperFilter(parentFilter),
+ ScoreMode.None
+ );
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+
+ sort = new Sort(new SortField("field2", nestedComparatorSource));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(7));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(8));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
+
+ nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(parentFilter, childFilter));
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(127));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(24));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(127));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(23));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(12));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(9));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(8));
+
+ nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter));
+ sort = new Sort(new SortField("field2", nestedComparatorSource));
+ topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
+ assertThat(topDocs.totalHits, equalTo(8));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(-127));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(24));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(-127));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(28));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(7));
+
+ // Moved to method, because floating point based XFieldComparatorSource have different outcome for SortMode avg,
+ // than integral number based implementations...
+ assertAvgScoreMode(parentFilter, searcher);
+ searcher.getIndexReader().close();
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException {
+ MultiValueMode sortMode = MultiValueMode.AVG;
+ Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter));
+ XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter));
+ Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(4));
+ }
+
+ protected abstract IndexableField createField(String name, int value, Field.Store store);
+
+ protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested);
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
new file mode 100644
index 0000000000..12776cec73
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.DoubleField;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
+import org.elasticsearch.index.fielddata.plain.DoubleArrayIndexFieldData;
+import org.elasticsearch.search.MultiValueMode;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("double");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
+ DoubleArrayIndexFieldData fieldData = getForField(fieldName);
+ return new DoubleValuesComparatorSource(fieldData, missingValue, sortMode, nested);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new DoubleField(name, value, store);
+ }
+
+ @Override
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException {
+ MultiValueMode sortMode = MultiValueMode.AVG;
+ Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter));
+ XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter));
+ Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
new file mode 100644
index 0000000000..12cd10a2cd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FloatField;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+import org.elasticsearch.index.fielddata.fieldcomparator.FloatValuesComparatorSource;
+import org.elasticsearch.index.fielddata.plain.FloatArrayIndexFieldData;
+import org.elasticsearch.search.MultiValueMode;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class FloatNestedSortingTests extends DoubleNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("float");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
+ FloatArrayIndexFieldData fieldData = getForField(fieldName);
+ return new FloatValuesComparatorSource(fieldData, missingValue, sortMode, nested);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new FloatField(name, value, store);
+ }
+
+ protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
+ MultiValueMode sortMode = MultiValueMode.AVG;
+ Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter));
+ XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter));
+ Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(11));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).intValue(), equalTo(2));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(3));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).intValue(), equalTo(3));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(3));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
new file mode 100644
index 0000000000..927aa67201
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongField;
+import org.apache.lucene.index.IndexableField;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
+import org.elasticsearch.index.fielddata.fieldcomparator.LongValuesComparatorSource;
+import org.elasticsearch.index.fielddata.plain.PackedArrayIndexFieldData;
+import org.elasticsearch.search.MultiValueMode;
+
+/**
+ */
+public class LongNestedSortingTests extends AbstractNumberNestedSortingTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("long");
+ }
+
+ @Override
+ protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
+ PackedArrayIndexFieldData fieldData = getForField(fieldName);
+ return new LongValuesComparatorSource(fieldData, missingValue, sortMode, nested);
+ }
+
+ @Override
+ protected IndexableField createField(String name, int value, Field.Store store) {
+ return new LongField(name, value, store);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
new file mode 100644
index 0000000000..287170dad3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java
@@ -0,0 +1,353 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.search.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopFieldDocs;
+import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
+import org.apache.lucene.search.join.ScoreMode;
+import org.apache.lucene.search.join.ToParentBlockJoinQuery;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.AbstractFieldDataTests;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource;
+import org.elasticsearch.index.fielddata.NoOrdinalsStringFieldDataTests;
+import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource;
+import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
+import org.elasticsearch.search.MultiValueMode;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class NestedSortingTests extends AbstractFieldDataTests {
+
+ @Override
+ protected FieldDataType getFieldDataType() {
+ return new FieldDataType("string", Settings.builder().put("format", "paged_bytes"));
+ }
+
+ @Test
+ public void testDuel() throws Exception {
+ final int numDocs = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < numDocs; ++i) {
+ final int numChildren = randomInt(2);
+ List<Document> docs = new ArrayList<>(numChildren + 1);
+ for (int j = 0; j < numChildren; ++j) {
+ Document doc = new Document();
+ doc.add(new StringField("f", TestUtil.randomSimpleString(getRandom(), 2), Field.Store.NO));
+ doc.add(new StringField("__type", "child", Field.Store.NO));
+ docs.add(doc);
+ }
+ if (randomBoolean()) {
+ docs.add(new Document());
+ }
+ Document parent = new Document();
+ parent.add(new StringField("__type", "parent", Field.Store.NO));
+ docs.add(parent);
+ writer.addDocuments(docs);
+ if (rarely()) { // we need to have a bit more segments than what RandomIndexWriter would do by default
+ DirectoryReader.open(writer, false).close();
+ }
+ }
+ writer.commit();
+
+ MultiValueMode sortMode = randomFrom(Arrays.asList(MultiValueMode.MIN, MultiValueMode.MAX));
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ PagedBytesIndexFieldData indexFieldData1 = getForField("f");
+ IndexFieldData<?> indexFieldData2 = NoOrdinalsStringFieldDataTests.hideOrdinals(indexFieldData1);
+ final String missingValue = randomBoolean() ? null : TestUtil.randomSimpleString(getRandom(), 2);
+ final int n = randomIntBetween(1, numDocs + 2);
+ final boolean reverse = randomBoolean();
+
+ final TopDocs topDocs1 = getTopDocs(searcher, indexFieldData1, missingValue, sortMode, n, reverse);
+ final TopDocs topDocs2 = getTopDocs(searcher, indexFieldData2, missingValue, sortMode, n, reverse);
+ for (int i = 0; i < topDocs1.scoreDocs.length; ++i) {
+ final FieldDoc fieldDoc1 = (FieldDoc) topDocs1.scoreDocs[i];
+ final FieldDoc fieldDoc2 = (FieldDoc) topDocs2.scoreDocs[i];
+ assertEquals(fieldDoc1.doc, fieldDoc2.doc);
+ assertArrayEquals(fieldDoc1.fields, fieldDoc2.fields);
+ }
+
+ searcher.getIndexReader().close();
+ }
+
+ private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData<?> indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException {
+ Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent")));
+ Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "child")));
+ XFieldComparatorSource nestedComparatorSource = indexFieldData.comparatorSource(missingValue, sortMode, createNested(parentFilter, childFilter));
+ Query query = new ConstantScoreQuery(parentFilter);
+ Sort sort = new Sort(new SortField("f", nestedComparatorSource, reverse));
+ return searcher.search(query, n, sort);
+ }
+
+ @Test
+ public void testNestedSorting() throws Exception {
+ List<Document> docs = new ArrayList<>();
+ Document document = new Document();
+ document.add(new StringField("field2", "a", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "b", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "c", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "a", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "c", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "d", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "e", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "b", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "e", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "f", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "g", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "c", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "g", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "h", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "i", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "d", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "i", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "j", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "k", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "f", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "k", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "l", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "m", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "g", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+
+ // This doc will not be included, because it doesn't have nested docs
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "h", Field.Store.NO));
+ writer.addDocument(document);
+
+ docs.clear();
+ document = new Document();
+ document.add(new StringField("field2", "m", Field.Store.NO));
+ document.add(new StringField("filter_1", "T", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "n", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("field2", "o", Field.Store.NO));
+ document.add(new StringField("filter_1", "F", Field.Store.NO));
+ docs.add(document);
+ document = new Document();
+ document.add(new StringField("__type", "parent", Field.Store.NO));
+ document.add(new StringField("field1", "i", Field.Store.NO));
+ docs.add(document);
+ writer.addDocuments(docs);
+ writer.commit();
+
+ // Some garbage docs, just to check if the NestedFieldComparator can deal with this.
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+ document = new Document();
+ document.add(new StringField("fieldXXX", "x", Field.Store.NO));
+ writer.addDocument(document);
+
+ MultiValueMode sortMode = MultiValueMode.MIN;
+ IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
+ PagedBytesIndexFieldData indexFieldData = getForField("field2");
+ Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent")));
+ Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter));
+ BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter));
+ ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None);
+
+ Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
+ TopFieldDocs topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(3));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("a"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("c"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("e"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(19));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i"));
+
+ sortMode = MultiValueMode.MAX;
+ nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter));
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(7));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(28));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("o"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(23));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(19));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("k"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("i"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("g"));
+
+
+ BooleanQuery bq = new BooleanQuery();
+ bq.add(parentFilter, Occur.MUST_NOT);
+ bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST);
+ childFilter = new QueryWrapperFilter(bq);
+ nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter));
+ query = new ToParentBlockJoinQuery(
+ new FilteredQuery(new MatchAllDocsQuery(), childFilter),
+ new BitDocIdSetCachingWrapperFilter(parentFilter),
+ ScoreMode.None
+ );
+ sort = new Sort(new SortField("field2", nestedComparatorSource, true));
+ topDocs = searcher.search(query, 5, sort);
+ assertThat(topDocs.totalHits, equalTo(6));
+ assertThat(topDocs.scoreDocs.length, equalTo(5));
+ assertThat(topDocs.scoreDocs[0].doc, equalTo(23));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[0]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[1].doc, equalTo(28));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[1]).fields[0]).utf8ToString(), equalTo("m"));
+ assertThat(topDocs.scoreDocs[2].doc, equalTo(11));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[2]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[3].doc, equalTo(15));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[3]).fields[0]).utf8ToString(), equalTo("g"));
+ assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
+ assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("e"));
+
+ searcher.getIndexReader().close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardModuleTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardModuleTests.java
new file mode 100644
index 0000000000..8238c159df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardModuleTests.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+/** Unit test(s) for IndexShardModule */
+public class IndexShardModuleTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDetermineShadowEngineShouldBeUsed() {
+ ShardId shardId = new ShardId("myindex", 0);
+ Settings regularSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .build();
+
+ Settings shadowSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
+ .build();
+
+ IndexShardModule ism1 = new IndexShardModule(shardId, true, regularSettings, null);
+ IndexShardModule ism2 = new IndexShardModule(shardId, false, regularSettings, null);
+ IndexShardModule ism3 = new IndexShardModule(shardId, true, shadowSettings, null);
+ IndexShardModule ism4 = new IndexShardModule(shardId, false, shadowSettings, null);
+
+ assertFalse("no shadow replicas for normal settings", ism1.useShadowEngine());
+ assertFalse("no shadow replicas for normal settings", ism2.useShadowEngine());
+ assertFalse("no shadow replicas for primary shard with shadow settings", ism3.useShadowEngine());
+ assertTrue("shadow replicas for replica shards with shadow settings", ism4.useShadowEngine());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
new file mode 100644
index 0000000000..45fed8a594
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -0,0 +1,379 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.env.ShardLock;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.VersionUtils;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Simple unit-test IndexShard related operations.
+ */
+public class IndexShardTests extends ElasticsearchSingleNodeTest {
+
+ public void testFlushOnDeleteSetting() throws Exception {
+ boolean initValue = randomBoolean();
+ createIndex("test", settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, initValue).build());
+ ensureGreen();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService("test");
+ IndexShard shard = test.shard(0);
+ assertEquals(initValue, shard.isFlushOnClose());
+ final boolean newValue = !initValue;
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, newValue).build()));
+ assertEquals(newValue, shard.isFlushOnClose());
+
+ try {
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, "FOOBAR").build()));
+ fail("exception expected");
+ } catch (IllegalArgumentException ex) {
+
+ }
+ assertEquals(newValue, shard.isFlushOnClose());
+
+ }
+
+ public void testWriteShardState() throws Exception {
+ try (NodeEnvironment env = newNodeEnvironment()) {
+ ShardId id = new ShardId("foo", 1);
+ long version = between(1, Integer.MAX_VALUE / 2);
+ boolean primary = randomBoolean();
+ ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo");
+ write(state1, env.availableShardPaths(id));
+ ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id));
+ assertEquals(shardStateMetaData, state1);
+
+ ShardStateMetaData state2 = new ShardStateMetaData(version, primary, "foo");
+ write(state2, env.availableShardPaths(id));
+ shardStateMetaData = load(logger, env.availableShardPaths(id));
+ assertEquals(shardStateMetaData, state1);
+
+ ShardStateMetaData state3 = new ShardStateMetaData(version + 1, primary, "foo");
+ write(state3, env.availableShardPaths(id));
+ shardStateMetaData = load(logger, env.availableShardPaths(id));
+ assertEquals(shardStateMetaData, state3);
+ assertEquals("foo", state3.indexUUID);
+ }
+ }
+
+ @Test
+ public void testLockTryingToDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ //IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
+ Path[] shardPaths = env.availableShardPaths(new ShardId("test", 0));
+ logger.info("--> paths: [{}]", shardPaths);
+ // Should not be able to acquire the lock because it's already open
+ try {
+ NodeEnvironment.acquireFSLockForPaths(Settings.EMPTY, shardPaths);
+ fail("should not have been able to acquire the lock");
+ } catch (ElasticsearchException e) {
+ assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
+ }
+ // Test without the regular shard lock to assume we can acquire it
+ // (worst case, meaning that the shard lock could be acquired and
+ // we're green to delete the shard's directory)
+ ShardLock sLock = new DummyShardLock(new ShardId("test", 0));
+ try {
+ env.deleteShardDirectoryUnderLock(sLock, Settings.builder().build());
+ fail("should not have been able to delete the directory");
+ } catch (ElasticsearchException e) {
+ assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
+ }
+ }
+
+ public void testPersistenceStateMetadataPersistence() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
+ IndexService test = indicesService.indexService("test");
+ IndexShard shard = test.shard(0);
+ ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals(getShardStateMetadata(shard), shardStateMetaData);
+ ShardRouting routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
+ shard.updateRoutingEntry(routing, true);
+
+ shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals(shardStateMetaData, getShardStateMetadata(shard));
+ assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
+
+ routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
+ shard.updateRoutingEntry(routing, true);
+ shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals(shardStateMetaData, getShardStateMetadata(shard));
+ assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
+
+ routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
+ shard.updateRoutingEntry(routing, true);
+ shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals(shardStateMetaData, getShardStateMetadata(shard));
+ assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
+
+ // test if we still write it even if the shard is not active
+ MutableShardRouting inactiveRouting = new MutableShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(), shard.shardRouting.currentNodeId(), true, ShardRoutingState.INITIALIZING, shard.shardRouting.version() + 1);
+ shard.persistMetadata(inactiveRouting, shard.shardRouting);
+ shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, getShardStateMetadata(shard));
+ assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
+
+
+ shard.updateRoutingEntry(new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1), false);
+ shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertFalse("shard state persisted despite of persist=false", shardStateMetaData.equals(getShardStateMetadata(shard)));
+ assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
+
+
+ routing = new MutableShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
+ shard.updateRoutingEntry(routing, true);
+ shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals(shardStateMetaData, getShardStateMetadata(shard));
+ assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
+ }
+
+ public void testDeleteShardState() throws IOException {
+ createIndex("test");
+ ensureGreen();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
+ IndexService test = indicesService.indexService("test");
+ IndexShard shard = test.shard(0);
+ try {
+ shard.deleteShardState();
+ fail("shard is active metadata delete must fail");
+ } catch (IllegalStateException ex) {
+ // fine - only delete if non-active
+ }
+
+ ShardRouting routing = shard.routingEntry();
+ ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
+ assertEquals(shardStateMetaData, getShardStateMetadata(shard));
+
+ routing = new MutableShardRouting(shard.shardId.index().getName(), shard.shardId.id(), routing.currentNodeId(), routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.version() + 1);
+ shard.updateRoutingEntry(routing, true);
+ shard.deleteShardState();
+
+ assertNull("no shard state expected after delete on initializing", load(logger, env.availableShardPaths(shard.shardId)));
+
+
+ }
+
+ ShardStateMetaData getShardStateMetadata(IndexShard shard) {
+ ShardRouting shardRouting = shard.routingEntry();
+ if (shardRouting == null) {
+ return null;
+ } else {
+ return new ShardStateMetaData(shardRouting.version(), shardRouting.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID));
+ }
+ }
+
+ public void testShardStateMetaHashCodeEquals() {
+ ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10));
+
+ assertEquals(meta, new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID));
+ assertEquals(meta.hashCode(), new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID).hashCode());
+
+ assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID)));
+ assertFalse(meta.equals(new ShardStateMetaData(meta.version + 1, meta.primary, meta.indexUUID)));
+ assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID + "foo")));
+ Set<Integer> hashCodes = new HashSet<>();
+ for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode
+ meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10));
+ hashCodes.add(meta.hashCode());
+ }
+ assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1);
+
+ }
+
+ @Test
+ public void testDeleteIndexDecreasesCounter() throws InterruptedException, ExecutionException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
+ ensureGreen("test");
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe("test");
+ IndexShard indexShard = indexService.shard(0);
+ client().admin().indices().prepareDelete("test").get();
+ assertThat(indexShard.getOperationsCount(), equalTo(0));
+ try {
+ indexShard.incrementOperationCounter();
+ fail("we should not be able to increment anymore");
+ } catch (IndexShardClosedException e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testIndexShardCounter() throws InterruptedException, ExecutionException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
+ ensureGreen("test");
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe("test");
+ IndexShard indexShard = indexService.shard(0);
+ assertEquals(0, indexShard.getOperationsCount());
+ indexShard.incrementOperationCounter();
+ assertEquals(1, indexShard.getOperationsCount());
+ indexShard.incrementOperationCounter();
+ assertEquals(2, indexShard.getOperationsCount());
+ indexShard.decrementOperationCounter();
+ indexShard.decrementOperationCounter();
+ assertEquals(0, indexShard.getOperationsCount());
+ }
+
+ @Test
+ public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ client().prepareIndex("test", "test").setSource("{}").get();
+ ensureGreen("test");
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ indicesService.indexService("test").shard(0).markAsInactive();
+ assertBusy(new Runnable() { // should be very very quick
+ @Override
+ public void run() {
+ IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+ });
+ IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+
+ public static ShardStateMetaData load(ESLogger logger, Path... shardPaths) throws IOException {
+ return ShardStateMetaData.FORMAT.loadLatestState(logger, shardPaths);
+ }
+
+ public static void write(ShardStateMetaData shardStateMetaData,
+ Path... shardPaths) throws IOException {
+ ShardStateMetaData.FORMAT.write(shardStateMetaData, shardStateMetaData.version, shardPaths);
+ }
+
+ public void testDurableFlagHasEffect() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "bar", "1").setSource("{}").get();
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService("test");
+ IndexShard shard = test.shard(0);
+ setDurability(shard, Translog.Durabilty.REQUEST);
+ assertFalse(shard.engine().getTranslog().syncNeeded());
+ setDurability(shard, Translog.Durabilty.ASYNC);
+ client().prepareIndex("test", "bar", "2").setSource("{}").get();
+ assertTrue(shard.engine().getTranslog().syncNeeded());
+ setDurability(shard, Translog.Durabilty.REQUEST);
+ client().prepareDelete("test", "bar", "1").get();
+ assertFalse(shard.engine().getTranslog().syncNeeded());
+
+ setDurability(shard, Translog.Durabilty.ASYNC);
+ client().prepareDelete("test", "bar", "2").get();
+ assertTrue(shard.engine().getTranslog().syncNeeded());
+ setDurability(shard, Translog.Durabilty.REQUEST);
+ assertNoFailures(client().prepareBulk()
+ .add(client().prepareIndex("test", "bar", "3").setSource("{}"))
+ .add(client().prepareDelete("test", "bar", "1")).get());
+ assertFalse(shard.engine().getTranslog().syncNeeded());
+
+ setDurability(shard, Translog.Durabilty.ASYNC);
+ assertNoFailures(client().prepareBulk()
+ .add(client().prepareIndex("test", "bar", "4").setSource("{}"))
+ .add(client().prepareDelete("test", "bar", "3")).get());
+ setDurability(shard, Translog.Durabilty.REQUEST);
+ assertTrue(shard.engine().getTranslog().syncNeeded());
+ }
+
+ private void setDurability(IndexShard shard, Translog.Durabilty durabilty) {
+ client().admin().indices().prepareUpdateSettings(shard.shardId.getIndex()).setSettings(settingsBuilder().put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, durabilty.name()).build()).get();
+ assertEquals(durabilty, shard.getTranslogDurability());
+ }
+
+ public void testDeleteByQueryBWC() {
+ Version version = VersionUtils.randomVersion(random());
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, IndexMetaData.SETTING_VERSION_CREATED, version.id));
+ ensureGreen("test");
+ client().prepareIndex("test", "person").setSource("{ \"user\" : \"kimchy\" }").get();
+
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexService test = indicesService.indexService("test");
+ IndexShard shard = test.shard(0);
+ int numDocs = 1;
+ shard.state = IndexShardState.RECOVERING;
+ try {
+ shard.recoveryState().getTranslog().totalOperations(1);
+ shard.engine().config().getTranslogRecoveryPerformer().performRecoveryOperation(shard.engine(), new Translog.DeleteByQuery(new Engine.DeleteByQuery(null, new BytesArray("{\"term\" : { \"user\" : \"kimchy\" }}"), null, null, null, Engine.Operation.Origin.RECOVERY, 0, "person")), false);
+ assertTrue(version.onOrBefore(Version.V_1_0_0_Beta2));
+ numDocs = 0;
+ } catch (QueryParsingException ex) {
+ assertTrue(version.after(Version.V_1_0_0_Beta2));
+ } finally {
+ shard.state = IndexShardState.STARTED;
+ }
+ shard.engine().refresh("foo");
+
+ try (Engine.Searcher searcher = shard.engine().acquireSearcher("foo")) {
+ assertEquals(numDocs, searcher.reader().numDocs());
+ }
+ }
+
+ public void testMinimumCompatVersion() {
+ Version versionCreated = VersionUtils.randomVersion(random());
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0, SETTING_VERSION_CREATED, versionCreated.id));
+ client().prepareIndex("test", "test").setSource("{}").get();
+ ensureGreen("test");
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class);
+ IndexShard test = indicesService.indexService("test").shard(0);
+ assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
+ client().prepareIndex("test", "test").setSource("{}").get();
+ assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
+ test.engine().flush();
+ assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java
new file mode 100644
index 0000000000..6186d8930b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.shard;
+
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ */
+public class ShardPathTests extends ElasticsearchTestCase {
+
+ public void testLoadShardPath() throws IOException {
+ try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
+ Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF");
+ Settings settings = builder.build();
+ ShardId shardId = new ShardId("foo", 0);
+ Path[] paths = env.availableShardPaths(shardId);
+ Path path = randomFrom(paths);
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF"), 2, path);
+ ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, settings);
+ assertEquals(path, shardPath.getDataPath());
+ assertEquals("0xDEADBEEF", shardPath.getIndexUUID());
+ assertEquals("foo", shardPath.getShardId().getIndex());
+ assertEquals(path.resolve("translog"), shardPath.resolveTranslog());
+ assertEquals(path.resolve("index"), shardPath.resolveIndex());
+ }
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailLoadShardPathOnMultiState() throws IOException {
+ try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
+ Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF");
+ Settings settings = builder.build();
+ ShardId shardId = new ShardId("foo", 0);
+ Path[] paths = env.availableShardPaths(shardId);
+ assumeTrue("This test tests multi data.path but we only got one", paths.length > 1);
+ int id = randomIntBetween(1, 10);
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF"), id, paths);
+ ShardPath.loadShardPath(logger, env, shardId, settings);
+ }
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException {
+ try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
+ Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "foobar");
+ Settings settings = builder.build();
+ ShardId shardId = new ShardId("foo", 0);
+ Path[] paths = env.availableShardPaths(shardId);
+ Path path = randomFrom(paths);
+ int id = randomIntBetween(1, 10);
+ ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF"), id, path);
+ ShardPath.loadShardPath(logger, env, shardId, settings);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java
new file mode 100644
index 0000000000..e2163fa89b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/shard/ShardUtilsTests.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.shard;
+
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.BaseDirectoryWrapper;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+
+public class ShardUtilsTests extends ElasticsearchTestCase {
+
+ public void testExtractShardId() throws IOException {
+ BaseDirectoryWrapper dir = newDirectory();
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig());
+ writer.commit();
+ ShardId id = new ShardId("foo", random().nextInt());
+ try (DirectoryReader reader = DirectoryReader.open(writer, random().nextBoolean())) {
+ ElasticsearchDirectoryReader wrap = ElasticsearchDirectoryReader.wrap(reader, id);
+ assertEquals(id, ShardUtils.extractShardId(wrap));
+ }
+ final int numDocs = 1 + random().nextInt(5);
+ for (int i = 0; i < numDocs; i++) {
+ Document d = new Document();
+ d.add(newField("name", "foobar", StringField.TYPE_STORED));
+ writer.addDocument(d);
+ if (random().nextBoolean()) {
+ writer.commit();
+ }
+ }
+
+ try (DirectoryReader reader = DirectoryReader.open(writer, random().nextBoolean())) {
+ ElasticsearchDirectoryReader wrap = ElasticsearchDirectoryReader.wrap(reader, id);
+ assertEquals(id, ShardUtils.extractShardId(wrap));
+ CompositeReaderContext context = wrap.getContext();
+ for (LeafReaderContext leaf : context.leaves()) {
+ assertEquals(id, ShardUtils.extractShardId(leaf.reader()));
+ }
+ }
+ IOUtils.close(writer, dir);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java
new file mode 100644
index 0000000000..3f4119b473
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.similarity;
+
+import org.apache.lucene.search.similarities.*;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+
+public class SimilarityTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testResolveDefaultSimilarities() {
+ SimilarityLookupService similarityLookupService = createIndex("foo").similarityService().similarityLookupService();
+ assertThat(similarityLookupService.similarity("default"), instanceOf(PreBuiltSimilarityProvider.class));
+ assertThat(similarityLookupService.similarity("default").get(), instanceOf(DefaultSimilarity.class));
+ assertThat(similarityLookupService.similarity("BM25"), instanceOf(PreBuiltSimilarityProvider.class));
+ assertThat(similarityLookupService.similarity("BM25").get(), instanceOf(BM25Similarity.class));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_default() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "default")
+ .put("index.similarity.my_similarity.discount_overlaps", false)
+ .build();
+ SimilarityService similarityService = createIndex("foo", indexSettings).similarityService();
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DefaultSimilarityProvider.class));
+
+ DefaultSimilarity similarity = (DefaultSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get();
+ assertThat(similarity.getDiscountOverlaps(), equalTo(false));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_bm25() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "BM25")
+ .put("index.similarity.my_similarity.k1", 2.0f)
+ .put("index.similarity.my_similarity.b", 1.5f)
+ .put("index.similarity.my_similarity.discount_overlaps", false)
+ .build();
+ SimilarityService similarityService = createIndex("foo", indexSettings).similarityService();
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(BM25SimilarityProvider.class));
+
+ BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get();
+ assertThat(similarity.getK1(), equalTo(2.0f));
+ assertThat(similarity.getB(), equalTo(1.5f));
+ assertThat(similarity.getDiscountOverlaps(), equalTo(false));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_DFR() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "DFR")
+ .put("index.similarity.my_similarity.basic_model", "g")
+ .put("index.similarity.my_similarity.after_effect", "l")
+ .put("index.similarity.my_similarity.normalization", "h2")
+ .put("index.similarity.my_similarity.normalization.h2.c", 3f)
+ .build();
+ SimilarityService similarityService = createIndex("foo", indexSettings).similarityService();
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DFRSimilarityProvider.class));
+
+ DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get();
+ assertThat(similarity.getBasicModel(), instanceOf(BasicModelG.class));
+ assertThat(similarity.getAfterEffect(), instanceOf(AfterEffectL.class));
+ assertThat(similarity.getNormalization(), instanceOf(NormalizationH2.class));
+ assertThat(((NormalizationH2) similarity.getNormalization()).getC(), equalTo(3f));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_IB() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "IB")
+ .put("index.similarity.my_similarity.distribution", "spl")
+ .put("index.similarity.my_similarity.lambda", "ttf")
+ .put("index.similarity.my_similarity.normalization", "h2")
+ .put("index.similarity.my_similarity.normalization.h2.c", 3f)
+ .build();
+ SimilarityService similarityService = createIndex("foo", indexSettings).similarityService();
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(IBSimilarityProvider.class));
+
+ IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get();
+ assertThat(similarity.getDistribution(), instanceOf(DistributionSPL.class));
+ assertThat(similarity.getLambda(), instanceOf(LambdaTTF.class));
+ assertThat(similarity.getNormalization(), instanceOf(NormalizationH2.class));
+ assertThat(((NormalizationH2) similarity.getNormalization()).getC(), equalTo(3f));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_LMDirichlet() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "LMDirichlet")
+ .put("index.similarity.my_similarity.mu", 3000f)
+ .build();
+ SimilarityService similarityService = createIndex("foo", indexSettings).similarityService();
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMDirichletSimilarityProvider.class));
+
+ LMDirichletSimilarity similarity = (LMDirichletSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get();
+ assertThat(similarity.getMu(), equalTo(3000f));
+ }
+
+ @Test
+ public void testResolveSimilaritiesFromMapping_LMJelinekMercer() throws IOException {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("similarity", "my_similarity").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings indexSettings = Settings.settingsBuilder()
+ .put("index.similarity.my_similarity.type", "LMJelinekMercer")
+ .put("index.similarity.my_similarity.lambda", 0.7f)
+ .build();
+ SimilarityService similarityService = createIndex("foo", indexSettings).similarityService();
+ DocumentMapper documentMapper = similarityService.mapperService().documentMapperParser().parse(mapping);
+ assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMJelinekMercerSimilarityProvider.class));
+
+ LMJelinekMercerSimilarity similarity = (LMJelinekMercerSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get();
+ assertThat(similarity.getLambda(), equalTo(0.7f));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTest.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTest.java
new file mode 100644
index 0000000000..c8d127667d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTest.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.snapshots.blobstore;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ */
+public class FileInfoTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testToFromXContent() throws IOException {
+ final int iters = scaledRandomIntBetween(1, 10);
+ for (int iter = 0; iter < iters; iter++) {
+ final BytesRef hash = new BytesRef(scaledRandomIntBetween(0, 1024 * 1024));
+ hash.length = hash.bytes.length;
+ for (int i = 0; i < hash.length; i++) {
+ hash.bytes[i] = randomByte();
+ }
+ StoreFileMetaData meta = new StoreFileMetaData("foobar", randomInt(), randomAsciiOfLengthBetween(1, 10), Version.LATEST, hash);
+ ByteSizeValue size = new ByteSizeValue(Math.max(0,Math.abs(randomLong())));
+ BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("_foobar", meta, size);
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
+ BlobStoreIndexShardSnapshot.FileInfo.toXContent(info, builder, ToXContent.EMPTY_PARAMS);
+ byte[] xcontent = builder.bytes().toBytes();
+
+ final BlobStoreIndexShardSnapshot.FileInfo parsedInfo;
+ try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xcontent)) {
+ parser.nextToken();
+ parsedInfo = BlobStoreIndexShardSnapshot.FileInfo.fromXContent(parser);
+ }
+ assertThat(info.name(), equalTo(parsedInfo.name()));
+ assertThat(info.physicalName(), equalTo(parsedInfo.physicalName()));
+ assertThat(info.length(), equalTo(parsedInfo.length()));
+ assertThat(info.checksum(), equalTo(parsedInfo.checksum()));
+ assertThat(info.partBytes(), equalTo(parsedInfo.partBytes()));
+ assertThat(parsedInfo.metadata().hash().length, equalTo(hash.length));
+ assertThat(parsedInfo.metadata().hash(), equalTo(hash));
+ assertThat(parsedInfo.metadata().writtenBy(), equalTo(Version.LATEST));
+ assertThat(parsedInfo.isSame(info.metadata()), is(true));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTest.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTest.java
new file mode 100644
index 0000000000..ea2b51ab97
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStreamTest.java
@@ -0,0 +1,133 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.snapshots.blobstore;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class SlicedInputStreamTest extends ElasticsearchTestCase {
+
+ @Test
+ public void readRandom() throws IOException {
+ int parts = randomIntBetween(1, 20);
+ ByteArrayOutputStream stream = new ByteArrayOutputStream();
+ int numWriteOps = scaledRandomIntBetween(1000, 10000);
+ final long seed = randomLong();
+ Random random = new Random(seed);
+ for (int i = 0; i < numWriteOps; i++) {
+ switch(random.nextInt(5)) {
+ case 1:
+ stream.write(random.nextInt(Byte.MAX_VALUE));
+ break;
+ default:
+ stream.write(randomBytes(random));
+ break;
+ }
+ }
+
+ final CheckClosedInputStream[] streams = new CheckClosedInputStream[parts];
+ byte[] bytes = stream.toByteArray();
+ int slice = bytes.length / parts;
+ int offset = 0;
+ int length;
+ for (int i = 0; i < parts; i++) {
+ length = i == parts-1 ? bytes.length-offset : slice;
+ streams[i] = new CheckClosedInputStream(new ByteArrayInputStream(bytes, offset, length));
+ offset += length;
+ }
+
+ SlicedInputStream input = new SlicedInputStream(parts) {
+
+ @Override
+ protected InputStream openSlice(long slice) throws IOException {
+ return streams[(int)slice];
+ }
+ };
+ random = new Random(seed);
+ assertThat(input.available(), equalTo(streams[0].available()));
+ for (int i = 0; i < numWriteOps; i++) {
+ switch(random.nextInt(5)) {
+ case 1:
+ assertThat(random.nextInt(Byte.MAX_VALUE), equalTo(input.read()));
+ break;
+ default:
+ byte[] b = randomBytes(random);
+ byte[] buffer = new byte[b.length];
+ int read = readFully(input, buffer);
+ assertThat(b.length, equalTo(read));
+ assertArrayEquals(b, buffer);
+ break;
+ }
+ }
+
+ assertThat(input.available(), equalTo(0));
+ for (int i =0; i < streams.length-1; i++) {
+ assertTrue(streams[i].closed);
+ }
+ input.close();
+
+ for (int i =0; i < streams.length; i++) {
+ assertTrue(streams[i].closed);
+ }
+
+ }
+
+ private int readFully(InputStream stream, byte[] buffer) throws IOException {
+ for (int i = 0; i < buffer.length;) {
+ int read = stream.read(buffer, i, buffer.length-i);
+ if (read == -1) {
+ if (i == 0) {
+ return -1;
+ } else {
+ return i;
+ }
+ }
+ i+= read;
+ }
+ return buffer.length;
+ }
+
+ private byte[] randomBytes(Random random) {
+ int length = RandomInts.randomIntBetween(random, 1, 10);
+ byte[] data = new byte[length];
+ random.nextBytes(data);
+ return data;
+ }
+
+ private static final class CheckClosedInputStream extends FilterInputStream {
+
+ public boolean closed = false;
+
+ public CheckClosedInputStream(InputStream in) {
+ super(in);
+ }
+
+ @Override
+ public void close() throws IOException {
+ closed = true;
+ super.close();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java
new file mode 100644
index 0000000000..70fdbd28d9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileTest.java
@@ -0,0 +1,748 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.store;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Charsets;
+import com.google.common.base.Predicate;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.store.*;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.index.merge.policy.MergePolicyModule;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardException;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.TranslogService;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.snapshots.SnapshotState;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
+import org.elasticsearch.test.store.MockFSDirectoryService;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class CorruptedFileTest extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ // we really need local GW here since this also checks for corruption etc.
+ // and we need to make sure primaries are not just trashed if we don't have replicas
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ // speed up recoveries
+ .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, 10)
+ .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 10)
+ .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5)
+ .build();
+ }
+
+ /**
+ * Tests that we can actually recover from a corruption on the primary given that we have replica shards around.
+ */
+ @Test
+ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedException, IOException {
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ // have enough space for 3 copies
+ internalCluster().ensureAtLeastNumDataNodes(3);
+ if (cluster().numDataNodes() == 3) {
+ logger.info("--> cluster has [3] data nodes, corrupted primary will be overwritten");
+ }
+
+ assertThat(cluster().numDataNodes(), greaterThanOrEqualTo(3));
+
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
+ .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files
+ .put("indices.recovery.concurrent_streams", 10)
+ ));
+ ensureGreen();
+ disableAllocation("test");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
+ }
+ indexRandom(true, builders);
+ ensureGreen();
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ // we have to flush at least once here since we don't corrupt the translog
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ final int numShards = numShards("test");
+ ShardRouting corruptedShardRouting = corruptRandomPrimaryFile();
+ logger.info("--> {} corrupted", corruptedShardRouting);
+ enableAllocation("test");
+ /*
+ * we corrupted the primary shard - now lets make sure we never recover from it successfully
+ */
+ Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "2").build();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
+ ClusterHealthResponse health = client().admin().cluster()
+ .health(Requests.clusterHealthRequest("test").waitForGreenStatus()
+ .timeout("5m") // sometimes due to cluster rebalacing and random settings default timeout is just not enough.
+ .waitForRelocatingShards(0)).actionGet();
+ if (health.isTimedOut()) {
+ logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", health.isTimedOut(), equalTo(false));
+ }
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ final int numIterations = scaledRandomIntBetween(5, 20);
+ for (int i = 0; i < numIterations; i++) {
+ SearchResponse response = client().prepareSearch().setSize(numDocs).get();
+ assertHitCount(response, numDocs);
+ }
+
+
+
+ /*
+ * now hook into the IndicesService and register a close listener to
+ * run the checkindex. if the corruption is still there we will catch it.
+ */
+ final CountDownLatch latch = new CountDownLatch(numShards * 3); // primary + 2 replicas
+ final CopyOnWriteArrayList<Throwable> exception = new CopyOnWriteArrayList<>();
+ final IndicesLifecycle.Listener listener = new IndicesLifecycle.Listener() {
+ @Override
+ public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, @IndexSettings Settings indexSettings) {
+ if (indexShard != null) {
+ Store store = ((IndexShard) indexShard).store();
+ store.incRef();
+ try {
+ if (!Lucene.indexExists(store.directory()) && indexShard.state() == IndexShardState.STARTED) {
+ return;
+ }
+ try (CheckIndex checkIndex = new CheckIndex(store.directory())) {
+ BytesStreamOutput os = new BytesStreamOutput();
+ PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
+ checkIndex.setInfoStream(out);
+ out.flush();
+ CheckIndex.Status status = checkIndex.checkIndex();
+ if (!status.clean) {
+ logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
+ throw new IndexShardException(sid, "index check failure");
+ }
+ }
+ } catch (Throwable t) {
+ exception.add(t);
+ } finally {
+ store.decRef();
+ latch.countDown();
+ }
+ }
+ }
+ };
+
+ for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
+ service.indicesLifecycle().addListener(listener);
+ }
+ try {
+ client().admin().indices().prepareDelete("test").get();
+ latch.await();
+ assertThat(exception, empty());
+ } finally {
+ for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
+ service.indicesLifecycle().removeListener(listener);
+ }
+ }
+ }
+
+ /**
+ * Tests corruption that happens on a single shard when no replicas are present. We make sure that the primary stays unassigned
+ * and all other replicas for the healthy shards happens
+ */
+ @Test
+ public void testCorruptPrimaryNoReplica() throws ExecutionException, InterruptedException, IOException {
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ internalCluster().ensureAtLeastNumDataNodes(2);
+
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
+ .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files
+ .put("indices.recovery.concurrent_streams", 10)
+ ));
+ ensureGreen();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
+ }
+ indexRandom(true, builders);
+ ensureGreen();
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ // we have to flush at least once here since we don't corrupt the translog
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ ShardRouting shardRouting = corruptRandomPrimaryFile();
+ /*
+ * we corrupted the primary shard - now lets make sure we never recover from it successfully
+ */
+ Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").build();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
+ client().admin().cluster().prepareReroute().get();
+
+ boolean didClusterTurnRed = awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ ClusterHealthStatus test = client().admin().cluster()
+ .health(Requests.clusterHealthRequest("test")).actionGet().getStatus();
+ return test == ClusterHealthStatus.RED;
+ }
+ }, 5, TimeUnit.MINUTES);// sometimes on slow nodes the replication / recovery is just dead slow
+ final ClusterHealthResponse response = client().admin().cluster()
+ .health(Requests.clusterHealthRequest("test")).get();
+ if (response.getStatus() != ClusterHealthStatus.RED) {
+ logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed);
+ logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ }
+ assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
+ for (ShardIterator iterator : shardIterators) {
+ ShardRouting routing;
+ while ((routing = iterator.nextOrNull()) != null) {
+ if (routing.getId() == shardRouting.getId()) {
+ assertThat(routing.state(), equalTo(ShardRoutingState.UNASSIGNED));
+ } else {
+ assertThat(routing.state(), anyOf(equalTo(ShardRoutingState.RELOCATING), equalTo(ShardRoutingState.STARTED)));
+ }
+ }
+ }
+ final List<Path> files = listShardFiles(shardRouting);
+ Path corruptedFile = null;
+ for (Path file : files) {
+ if (file.getFileName().toString().startsWith("corrupted_")) {
+ corruptedFile = file;
+ break;
+ }
+ }
+ assertThat(corruptedFile, notNullValue());
+ }
+
+ /**
+ * This test triggers a corrupt index exception during finalization size if an empty commit point is transferred
+ * during recovery we don't know the version of the segments_N file because it has no segments we can take it from.
+ * This simulates recoveries from old indices or even without checksums and makes sure if we fail during finalization
+ * we also check if the primary is ok. Without the relevant checks this test fails with a RED cluster
+ */
+ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionException, InterruptedException, IOException {
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
+ List<NodeStats> dataNodeStats = new ArrayList<>();
+ for (NodeStats stat : nodeStats.getNodes()) {
+ if (stat.getNode().isDataNode()) {
+ dataNodeStats.add(stat);
+ }
+ }
+
+ assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
+ Collections.shuffle(dataNodeStats, getRandom());
+ NodeStats primariesNode = dataNodeStats.get(0);
+ NodeStats unluckyNode = dataNodeStats.get(1);
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put("index.routing.allocation.include._name", primariesNode.getNode().name())
+ .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)
+
+ ));
+ ensureGreen(); // allocated with empty commit
+ final AtomicBoolean corrupt = new AtomicBoolean(true);
+ final CountDownLatch hasCorrupted = new CountDownLatch(1);
+ for (NodeStats dataNode : dataNodeStats) {
+ MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
+ mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, unluckyNode.getNode().name()).localNode(), new MockTransportService.DelegateTransport(mockTransportService.original()) {
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (corrupt.get() && action.equals(RecoveryTarget.Actions.FILE_CHUNK)) {
+ RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
+ byte[] array = req.content().array();
+ int i = randomIntBetween(0, req.content().length() - 1);
+ array[i] = (byte) ~array[i]; // flip one byte in the content
+ hasCorrupted.countDown();
+ }
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ });
+ }
+
+ Settings build = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
+ .put("index.routing.allocation.include._name", primariesNode.getNode().name() + "," + unluckyNode.getNode().name()).build();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
+ client().admin().cluster().prepareReroute().get();
+ hasCorrupted.await();
+ corrupt.set(false);
+ ensureGreen();
+ }
+
+ /**
+ * Tests corruption that happens on the network layer and that the primary does not get affected by corruption that happens on the way
+ * to the replica. The file on disk stays uncorrupted
+ */
+ @Test
+ public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ if (cluster().numDataNodes() < 3) {
+ internalCluster().startNode(Settings.builder().put("node.data", true).put("node.client", false).put("node.master", false));
+ }
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
+ List<NodeStats> dataNodeStats = new ArrayList<>();
+ for (NodeStats stat : nodeStats.getNodes()) {
+ if (stat.getNode().isDataNode()) {
+ dataNodeStats.add(stat);
+ }
+ }
+
+ assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
+ Collections.shuffle(dataNodeStats, getRandom());
+ NodeStats primariesNode = dataNodeStats.get(0);
+ NodeStats unluckyNode = dataNodeStats.get(1);
+
+
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(1, 4)) // don't go crazy here it must recovery fast
+ // This does corrupt files on the replica, so we can't check:
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false)
+ .put("index.routing.allocation.include._name", primariesNode.getNode().name())
+ .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)
+ ));
+ ensureGreen();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
+ }
+ indexRandom(true, builders);
+ ensureGreen();
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ // we have to flush at least once here since we don't corrupt the translog
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ final boolean truncate = randomBoolean();
+ for (NodeStats dataNode : dataNodeStats) {
+ MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
+ mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, unluckyNode.getNode().name()).localNode(), new MockTransportService.DelegateTransport(mockTransportService.original()) {
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (action.equals(RecoveryTarget.Actions.FILE_CHUNK)) {
+ RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
+ if (truncate && req.length() > 1) {
+ BytesArray array = new BytesArray(req.content().array(), req.content().arrayOffset(), (int) req.length() - 1);
+ request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
+ } else {
+ byte[] array = req.content().array();
+ int i = randomIntBetween(0, req.content().length() - 1);
+ array[i] = (byte) ~array[i]; // flip one byte in the content
+ }
+ }
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ });
+ }
+
+ Settings build = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
+ .put("index.routing.allocation.include._name", "*").build();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
+ client().admin().cluster().prepareReroute().get();
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest("test").waitForGreenStatus()).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
+ }
+ // we are green so primaries got not corrupted.
+ // ensure that no shard is actually allocated on the unlucky node
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
+ for (IndexShardRoutingTable table : clusterStateResponse.getState().routingNodes().getRoutingTable().index("test")) {
+ for (ShardRouting routing : table) {
+ if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
+ assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
+ assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
+ }
+ }
+ }
+ final int numIterations = scaledRandomIntBetween(5, 20);
+ for (int i = 0; i < numIterations; i++) {
+ SearchResponse response = client().prepareSearch().setSize(numDocs).get();
+ assertHitCount(response, numDocs);
+ }
+
+ }
+
+
+ /**
+ * Tests that restoring of a corrupted shard fails and we get a partial snapshot.
+ * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several
+ * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard.
+ */
+ @Test
+ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ internalCluster().ensureAtLeastNumDataNodes(2);
+
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
+ .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files
+ .put("indices.recovery.concurrent_streams", 10)
+ ));
+ ensureGreen();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
+ }
+ indexRandom(true, builders);
+ ensureGreen();
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ // we have to flush at least once here since we don't corrupt the translog
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ ShardRouting shardRouting = corruptRandomPrimaryFile(false);
+ // we don't corrupt segments.gen since S/R doesn't snapshot this file
+ // the other problem here why we can't corrupt segments.X files is that the snapshot flushes again before
+ // it snapshots and that will write a new segments.X+1 file
+ logger.info("--> creating repository");
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(settingsBuilder()
+ .put("location", randomRepoPath().toAbsolutePath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL));
+ logger.info("failed during snapshot -- maybe SI file got corrupted");
+ final List<Path> files = listShardFiles(shardRouting);
+ Path corruptedFile = null;
+ for (Path file : files) {
+ if (file.getFileName().toString().startsWith("corrupted_")) {
+ corruptedFile = file;
+ break;
+ }
+ }
+ assertThat(corruptedFile, notNullValue());
+ }
+
+ /**
+ * This test verifies that if we corrupt a replica, we can still get to green, even though
+ * listing its store fails. Note, we need to make sure that replicas are allocated on all data
+ * nodes, so that replica won't be sneaky and allocated on a node that doesn't have a corrupted
+ * replica.
+ */
+ @Test
+ public void testReplicaCorruption() throws Exception {
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ internalCluster().ensureAtLeastNumDataNodes(2);
+
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(GatewayAllocator.INDEX_RECOVERY_INITIAL_SHARDS, "one")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1)
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
+ .put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true) // no translog based flush - it might change the .liv / segments.N files
+ .put("indices.recovery.concurrent_streams", 10)
+ ));
+ ensureGreen();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
+ }
+ indexRandom(true, builders);
+ ensureGreen();
+ assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).execute().actionGet());
+ // we have to flush at least once here since we don't corrupt the translog
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+
+ final Map<String, List<Path>> filesToCorrupt = findFilesToCorruptForReplica();
+ internalCluster().fullRestart(new InternalTestCluster.RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ List<Path> paths = filesToCorrupt.get(nodeName);
+ if (paths != null) {
+ for (Path path : paths) {
+ try (OutputStream os = Files.newOutputStream(path)) {
+ os.write(0);
+ }
+ logger.info("corrupting file {} on node {}", path, nodeName);
+ }
+ }
+ return null;
+ }
+ });
+ ensureGreen();
+ }
+
+ private int numShards(String... index) {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(index, false);
+ return shardIterators.size();
+ }
+
+ private Map<String, List<Path>> findFilesToCorruptForReplica() throws IOException {
+ Map<String, List<Path>> filesToNodes = new HashMap<>();
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ for (ShardRouting shardRouting : state.getRoutingTable().allShards("test")) {
+ if (shardRouting.primary() == true) {
+ continue;
+ }
+ assertTrue(shardRouting.assignedToNode());
+ NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(shardRouting.currentNodeId()).setFs(true).get();
+ NodeStats nodeStats = nodeStatses.getNodes()[0];
+ List<Path> files = new ArrayList<>();
+ filesToNodes.put(nodeStats.getNode().getName(), files);
+ for (FsStats.Info info : nodeStats.getFs()) {
+ String path = info.getPath();
+ final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/index";
+ Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
+ if (Files.exists(file)) { // multi data path might only have one path in use
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
+ for (Path item : stream) {
+ if (item.getFileName().toString().startsWith("segments_")) {
+ files.add(item);
+ }
+ }
+ }
+ }
+ }
+ }
+ return filesToNodes;
+ }
+
+ private ShardRouting corruptRandomPrimaryFile() throws IOException {
+ return corruptRandomPrimaryFile(true);
+ }
+
+ private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
+ List<ShardIterator> iterators = Lists.newArrayList(shardIterators);
+ ShardIterator shardIterator = RandomPicks.randomFrom(getRandom(), iterators);
+ ShardRouting shardRouting = shardIterator.nextOrNull();
+ assertNotNull(shardRouting);
+ assertTrue(shardRouting.primary());
+ assertTrue(shardRouting.assignedToNode());
+ String nodeId = shardRouting.currentNodeId();
+ NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get();
+ Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
+ for (FsStats.Info info : nodeStatses.getNodes()[0].getFs()) {
+ String path = info.getPath();
+ final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/index";
+ Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
+ if (Files.exists(file)) { // multi data path might only have one path in use
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
+ for (Path item : stream) {
+ if (Files.isRegularFile(item) && "write.lock".equals(item.getFileName().toString()) == false) {
+ if (includePerCommitFiles || isPerSegmentFile(item.getFileName().toString())) {
+ files.add(item);
+ }
+ }
+ }
+ }
+ }
+ }
+ pruneOldDeleteGenerations(files);
+ Path fileToCorrupt = null;
+ if (!files.isEmpty()) {
+ fileToCorrupt = RandomPicks.randomFrom(getRandom(), files);
+ try (Directory dir = FSDirectory.open(fileToCorrupt.toAbsolutePath().getParent())) {
+ long checksumBeforeCorruption;
+ try (IndexInput input = dir.openInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
+ checksumBeforeCorruption = CodecUtil.retrieveChecksum(input);
+ }
+ try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
+ // read
+ raf.position(randomIntBetween(0, (int) Math.min(Integer.MAX_VALUE, raf.size() - 1)));
+ long filePointer = raf.position();
+ ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
+ raf.read(bb);
+ bb.flip();
+
+ // corrupt
+ byte oldValue = bb.get(0);
+ byte newValue = (byte) (oldValue + 1);
+ bb.put(0, newValue);
+
+ // rewrite
+ raf.position(filePointer);
+ raf.write(bb);
+ logger.info("Corrupting file for shard {} -- flipping at position {} from {} to {} file: {}", shardRouting, filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue), fileToCorrupt.getFileName());
+ }
+ long checksumAfterCorruption;
+ long actualChecksumAfterCorruption;
+ try (ChecksumIndexInput input = dir.openChecksumInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
+ assertThat(input.getFilePointer(), is(0l));
+ input.seek(input.length() - 8); // one long is the checksum... 8 bytes
+ checksumAfterCorruption = input.getChecksum();
+ actualChecksumAfterCorruption = input.readLong();
+ }
+ // we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions
+ // in the checksum which is ok though....
+ StringBuilder msg = new StringBuilder();
+ msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]");
+ msg.append(" after: [").append(checksumAfterCorruption).append("]");
+ msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
+ msg.append(" file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
+ logger.info(msg.toString());
+ assumeTrue("Checksum collision - " + msg.toString(),
+ checksumAfterCorruption != checksumBeforeCorruption // collision
+ || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
+ }
+ }
+ assertThat("no file corrupted", fileToCorrupt, notNullValue());
+ return shardRouting;
+ }
+
+ private static final boolean isPerCommitFile(String fileName) {
+ // .liv and segments_N are per commit files and might change after corruption
+ return fileName.startsWith("segments") || fileName.endsWith(".liv");
+ }
+
+ private static final boolean isPerSegmentFile(String fileName) {
+ return isPerCommitFile(fileName) == false;
+ }
+
+ /**
+ * prunes the list of index files such that only the latest del generation files are contained.
+ */
+ private void pruneOldDeleteGenerations(Set<Path> files) {
+ final TreeSet<Path> delFiles = new TreeSet<>();
+ for (Path file : files) {
+ if (file.getFileName().toString().endsWith(".liv")) {
+ delFiles.add(file);
+ }
+ }
+ Path last = null;
+ for (Path current : delFiles) {
+ if (last != null) {
+ final String newSegmentName = IndexFileNames.parseSegmentName(current.getFileName().toString());
+ final String oldSegmentName = IndexFileNames.parseSegmentName(last.getFileName().toString());
+ if (newSegmentName.equals(oldSegmentName)) {
+ int oldGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""), Character.MAX_RADIX);
+ int newGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""), Character.MAX_RADIX);
+ if (newGen > oldGen) {
+ files.remove(last);
+ } else {
+ files.remove(current);
+ continue;
+ }
+ }
+ }
+ last = current;
+ }
+ }
+
+ public List<Path> listShardFiles(ShardRouting routing) throws IOException {
+ NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(routing.currentNodeId()).setFs(true).get();
+
+ assertThat(routing.toString(), nodeStatses.getNodes().length, equalTo(1));
+ List<Path> files = new ArrayList<>();
+ for (FsStats.Info info : nodeStatses.getNodes()[0].getFs()) {
+ String path = info.getPath();
+ Path file = PathUtils.get(path).resolve("indices/test/" + Integer.toString(routing.getId()) + "/index");
+ if (Files.exists(file)) { // multi data path might only have one path in use
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
+ for (Path item : stream) {
+ files.add(item);
+ }
+ }
+ }
+ }
+ return files;
+ }
+
+ private void disableAllocation(String index) {
+ client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(
+ "index.routing.allocation.enable", "none"
+ )).get();
+ }
+
+ private void enableAllocation(String index) {
+ client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(
+ "index.routing.allocation.enable", "all"
+ )).get();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogTests.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogTests.java
new file mode 100644
index 0000000000..b03c7d9032
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedTranslogTests.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.monitor.fs.FsStats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.engine.MockEngineSupport;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.TransportModule;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.*;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ * Integration test for corrupted translog files
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope= ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0)
+public class CorruptedTranslogTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ // we really need local GW here since this also checks for corruption etc.
+ // and we need to make sure primaries are not just trashed if we don't have replicas
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName()).build();
+ }
+
+ @Test
+ @TestLogging("index.translog:TRACE,index.gateway:TRACE")
+ public void testCorruptTranslogFiles() throws Exception {
+ internalCluster().startNodesAsync(1, Settings.EMPTY).get();
+
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("index.refresh_interval", "-1")
+ .put(MockEngineSupport.FLUSH_ON_CLOSE_RATIO, 0.0d) // never flush - always recover from translog
+ .put(IndexShard.INDEX_FLUSH_ON_CLOSE, false) // never flush - always recover from translog
+ .put(TranslogConfig.INDEX_TRANSLOG_SYNC_INTERVAL, "1s") // fsync the translog every second
+ ));
+ ensureYellow();
+
+ // Index some documents
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type").setSource("foo", "bar");
+ }
+ disableTranslogFlush("test");
+ indexRandom(false, false, false, Arrays.asList(builders)); // this one
+
+ // Corrupt the translog file(s)
+ corruptRandomTranslogFiles();
+
+ // Restart the single node
+ internalCluster().fullRestart();
+ // node needs time to start recovery and discover the translog corruption
+ Thread.sleep(1000);
+ enableTranslogFlush("test");
+
+ try {
+ client().prepareSearch("test").setQuery(matchAllQuery()).get();
+ fail("all shards should be failed due to a corrupted translog");
+ } catch (SearchPhaseExecutionException e) {
+ // Good, all shards should be failed because there is only a
+ // single shard and its translog is corrupt
+ }
+ }
+
+
+ private void corruptRandomTranslogFiles() throws IOException {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
+ List<ShardIterator> iterators = Lists.newArrayList(shardIterators);
+ ShardIterator shardIterator = RandomPicks.randomFrom(getRandom(), iterators);
+ ShardRouting shardRouting = shardIterator.nextOrNull();
+ assertNotNull(shardRouting);
+ assertTrue(shardRouting.primary());
+ assertTrue(shardRouting.assignedToNode());
+ String nodeId = shardRouting.currentNodeId();
+ NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get();
+ Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
+ for (FsStats.Info info : nodeStatses.getNodes()[0].getFs()) {
+ String path = info.getPath();
+ final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/translog";
+ Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
+ if (Files.exists(file)) {
+ logger.info("--> path: {}", file);
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
+ for (Path item : stream) {
+ logger.info("--> File: {}", item);
+ if (Files.isRegularFile(item) && item.getFileName().toString().startsWith("translog-")) {
+ files.add(item);
+ }
+ }
+ }
+ }
+ }
+ Path fileToCorrupt = null;
+ if (!files.isEmpty()) {
+ int corruptions = randomIntBetween(5, 20);
+ for (int i = 0; i < corruptions; i++) {
+ fileToCorrupt = RandomPicks.randomFrom(getRandom(), files);
+ try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
+ // read
+ raf.position(randomIntBetween(0, (int) Math.min(Integer.MAX_VALUE, raf.size() - 1)));
+ long filePointer = raf.position();
+ ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
+ raf.read(bb);
+ bb.flip();
+
+ // corrupt
+ byte oldValue = bb.get(0);
+ byte newValue = (byte) (oldValue + 1);
+ bb.put(0, newValue);
+
+ // rewrite
+ raf.position(filePointer);
+ raf.write(bb);
+ logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}",
+ fileToCorrupt, filePointer, Integer.toHexString(oldValue),
+ Integer.toHexString(newValue), fileToCorrupt);
+ }
+ }
+ }
+ assertThat("no file corrupted", fileToCorrupt, notNullValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTest.java b/core/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTest.java
new file mode 100644
index 0000000000..950de0e27a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.store;
+
+import org.apache.lucene.store.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collections;
+import java.util.Set;
+
+import static org.hamcrest.CoreMatchers.*;
+
+public class DirectoryUtilsTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testGetLeave() throws IOException {
+ Path file = createTempDir();
+ final int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ {
+ BaseDirectoryWrapper dir = newFSDirectory(file);
+ FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(dir) {}, FSDirectory.class, null);
+ assertThat(directory, notNullValue());
+ assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
+ dir.close();
+ }
+
+ {
+ BaseDirectoryWrapper dir = newFSDirectory(file);
+ FSDirectory directory = DirectoryUtils.getLeaf(dir, FSDirectory.class, null);
+ assertThat(directory, notNullValue());
+ assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
+ dir.close();
+ }
+
+ {
+ Set<String> stringSet = Collections.emptySet();
+ BaseDirectoryWrapper dir = newFSDirectory(file);
+ FSDirectory directory = DirectoryUtils.getLeaf(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean()), FSDirectory.class, null);
+ assertThat(directory, notNullValue());
+ assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
+ dir.close();
+ }
+
+ {
+ Set<String> stringSet = Collections.emptySet();
+ BaseDirectoryWrapper dir = newFSDirectory(file);
+ FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, FSDirectory.class, null);
+ assertThat(directory, notNullValue());
+ assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null)));
+ dir.close();
+ }
+
+ {
+ Set<String> stringSet = Collections.emptySet();
+ BaseDirectoryWrapper dir = newFSDirectory(file);
+ RAMDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, RAMDirectory.class, null);
+ assertThat(directory, nullValue());
+ dir.close();
+ }
+
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java
new file mode 100644
index 0000000000..916e7310b6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/ExceptionRetryTests.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.store;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.bulk.TransportShardBulkAction;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class ExceptionRetryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .build();
+ }
+
+ @Override
+ protected void beforeIndexDeletion() {
+ // a write operation might still be in flight when the test has finished
+ // so we should not check the operation counter here
+ }
+
+ /**
+ * Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally failing.
+ * If auto generated ids are used this must not lead to duplicate ids
+ * see https://github.com/elasticsearch/elasticsearch/issues/8788
+ */
+ @Test
+ public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
+ final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
+ NodeStats unluckyNode = randomFrom(nodeStats.getNodes());
+ assertAcked(client().admin().indices().prepareCreate("index"));
+ ensureGreen("index");
+
+ //create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry.
+ for (NodeStats dataNode : nodeStats.getNodes()) {
+ MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
+ mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, unluckyNode.getNode().name()).localNode(), new MockTransportService.DelegateTransport(mockTransportService.original()) {
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ super.sendRequest(node, requestId, action, request, options);
+ if (action.equals(TransportShardBulkAction.ACTION_NAME) && !exceptionThrown.get()) {
+ logger.debug("Throw ConnectTransportException");
+ exceptionThrown.set(true);
+ throw new ConnectTransportException(node, action);
+ }
+ }
+ });
+ }
+
+ BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ for (int i = 0; i < numDocs; i++) {
+ XContentBuilder doc = null;
+ doc = jsonBuilder().startObject().field("foo", "bar").endObject();
+ bulkBuilder.add(client().prepareIndex("index", "type").setSource(doc));
+ }
+
+ BulkResponse response = bulkBuilder.get();
+ if (response.hasFailures()) {
+ for (BulkItemResponse singleIndexRespons : response.getItems()) {
+ if (singleIndexRespons.isFailed()) {
+ fail("None of the bulk items should fail but got " + singleIndexRespons.getFailureMessage());
+ }
+ }
+ }
+
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("index").setSize(numDocs * 2).addField("_id").get();
+
+ Set<String> uniqueIds = new HashSet();
+ long dupCounter = 0;
+ boolean found_duplicate_already = false;
+ for (int i = 0; i < searchResponse.getHits().getHits().length; i++) {
+ if (!uniqueIds.add(searchResponse.getHits().getHits()[i].getId())) {
+ if (!found_duplicate_already) {
+ SearchResponse dupIdResponse = client().prepareSearch("index").setQuery(termQuery("_id", searchResponse.getHits().getHits()[i].getId())).setExplain(true).get();
+ assertThat(dupIdResponse.getHits().totalHits(), greaterThan(1l));
+ logger.info("found a duplicate id:");
+ for (SearchHit hit : dupIdResponse.getHits()) {
+ logger.info("Doc {} was found on shard {}", hit.getId(), hit.getShard().getShardId());
+ }
+ logger.info("will not print anymore in case more duplicates are found.");
+ found_duplicate_already = true;
+ }
+ dupCounter++;
+ }
+ }
+ assertSearchResponse(searchResponse);
+ assertThat(dupCounter, equalTo(0l));
+ assertHitCount(searchResponse, numDocs);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java
new file mode 100644
index 0000000000..1286a7322f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.store;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardPath;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+/**
+ */
+public class IndexStoreTests extends ElasticsearchTestCase {
+
+ public void testStoreDirectory() throws IOException {
+ final Path tempDir = createTempDir();
+ final IndexStoreModule.Type[] values = IndexStoreModule.Type.values();
+ final IndexStoreModule.Type type = RandomPicks.randomFrom(random(), values);
+ Settings settings = Settings.settingsBuilder().put(IndexStoreModule.STORE_TYPE, type.name()).build();
+ FsDirectoryService service = new FsDirectoryService(settings, null, new ShardPath(tempDir, tempDir, "foo", new ShardId("foo", 0)));
+ try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) {
+ switch (type) {
+ case NIOFS:
+ assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory);
+ break;
+ case MMAPFS:
+ assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory);
+ break;
+ case SIMPLEFS:
+ assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory);
+ break;
+ case FS:
+ case DEFAULT:
+ if (Constants.WINDOWS) {
+ if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
+ assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory);
+ } else {
+ assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory);
+ }
+ } else if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
+ assertTrue(type + " " + directory.toString(), directory instanceof FileSwitchDirectory);
+ } else {
+ assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory);
+ }
+ break;
+ }
+ }
+ }
+
+ public void testStoreDirectoryDefault() throws IOException {
+ final Path tempDir = createTempDir();
+ Settings settings = Settings.EMPTY;
+ FsDirectoryService service = new FsDirectoryService(settings, null, new ShardPath(tempDir, tempDir, "foo", new ShardId("foo", 0)));
+ try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) {
+ if (Constants.WINDOWS) {
+ assertTrue(directory.toString(), directory instanceof MMapDirectory || directory instanceof SimpleFSDirectory);
+ } else {
+ assertTrue(directory.toString(), directory instanceof FileSwitchDirectory);
+ }
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/LegacyVerificationTests.java b/core/src/test/java/org/elasticsearch/index/store/LegacyVerificationTests.java
new file mode 100644
index 0000000000..f870cfa123
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/LegacyVerificationTests.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.store;
+
+import java.nio.charset.StandardCharsets;
+import java.util.zip.Adler32;
+
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+/**
+ * Simple tests for LegacyVerification (old segments)
+ * @deprecated remove this test when support for lucene 4.x
+ * segments is not longer needed.
+ */
+@Deprecated
+public class LegacyVerificationTests extends ElasticsearchTestCase {
+
+ public void testAdler32() throws Exception {
+ Adler32 expected = new Adler32();
+ byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
+ expected.update(bytes);
+ String expectedString = Store.digestToString(expected.getValue());
+
+ Directory dir = newDirectory();
+
+ IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
+ VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
+ out.writeBytes(bytes, 0, bytes.length);
+ out.verify();
+ out.close();
+ out.verify();
+
+ dir.close();
+ }
+
+ public void testAdler32Corrupt() throws Exception {
+ Adler32 expected = new Adler32();
+ byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
+ expected.update(bytes);
+ String expectedString = Store.digestToString(expected.getValue());
+
+ byte corruptBytes[] = "abcdefch".getBytes(StandardCharsets.UTF_8);
+ Directory dir = newDirectory();
+
+ IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
+ VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
+ out.writeBytes(corruptBytes, 0, bytes.length);
+ try {
+ out.verify();
+ fail();
+ } catch (CorruptIndexException e) {
+ // expected exception
+ }
+ out.close();
+
+ try {
+ out.verify();
+ fail();
+ } catch (CorruptIndexException e) {
+ // expected exception
+ }
+
+ dir.close();
+ }
+
+ public void testLengthOnlyOneByte() throws Exception {
+ Directory dir = newDirectory();
+
+ IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
+ VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 1);
+ out.writeByte((byte) 3);
+ out.verify();
+ out.close();
+ out.verify();
+
+ dir.close();
+ }
+
+ public void testLengthOnlyCorrupt() throws Exception {
+ Directory dir = newDirectory();
+
+ IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
+ VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 2);
+ out.writeByte((byte) 3);
+ try {
+ out.verify();
+ fail();
+ } catch (CorruptIndexException expected) {
+ // expected exception
+ }
+
+ out.close();
+
+ try {
+ out.verify();
+ fail();
+ } catch (CorruptIndexException expected) {
+ // expected exception
+ }
+
+ dir.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTest.java b/core/src/test/java/org/elasticsearch/index/store/StoreTest.java
new file mode 100644
index 0000000000..95c043ae70
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/store/StoreTest.java
@@ -0,0 +1,1270 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index.store;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.FilterCodec;
+import org.apache.lucene.codecs.SegmentInfoFormat;
+import org.apache.lucene.codecs.lucene50.Lucene50Codec;
+import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.*;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.ShardLock;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.deletionpolicy.KeepOnlyLastDeletionPolicy;
+import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
+import org.elasticsearch.test.DummyShardLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.zip.Adler32;
+
+import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.*;
+
+public class StoreTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testRefCount() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ int incs = randomIntBetween(1, 100);
+ for (int i = 0; i < incs; i++) {
+ if (randomBoolean()) {
+ store.incRef();
+ } else {
+ assertTrue(store.tryIncRef());
+ }
+ store.ensureOpen();
+ }
+
+ for (int i = 0; i < incs; i++) {
+ store.decRef();
+ store.ensureOpen();
+ }
+
+ store.incRef();
+ final AtomicBoolean called = new AtomicBoolean(false);
+ store.close();
+ for (int i = 0; i < incs; i++) {
+ if (randomBoolean()) {
+ store.incRef();
+ } else {
+ assertTrue(store.tryIncRef());
+ }
+ store.ensureOpen();
+ }
+
+ for (int i = 0; i < incs; i++) {
+ store.decRef();
+ store.ensureOpen();
+ }
+
+ store.decRef();
+ assertThat(store.refCount(), Matchers.equalTo(0));
+ assertFalse(store.tryIncRef());
+ try {
+ store.incRef();
+ fail(" expected exception");
+ } catch (AlreadyClosedException ex) {
+
+ }
+ try {
+ store.ensureOpen();
+ fail(" expected exception");
+ } catch (AlreadyClosedException ex) {
+
+ }
+ }
+
+ @Test
+ public void testVerifyingIndexOutput() throws IOException {
+ Directory dir = newDirectory();
+ IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ }
+ CodecUtil.writeFooter(output);
+ output.close();
+ IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
+ String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput));
+ indexInput.seek(0);
+ BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024));
+ long length = indexInput.length();
+ IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT));
+ while (length > 0) {
+ if (random().nextInt(10) == 0) {
+ verifyingOutput.writeByte(indexInput.readByte());
+ length--;
+ } else {
+ int min = (int) Math.min(length, ref.bytes.length);
+ indexInput.readBytes(ref.bytes, ref.offset, min);
+ verifyingOutput.writeBytes(ref.bytes, ref.offset, min);
+ length -= min;
+ }
+ }
+ Store.verify(verifyingOutput);
+ verifyingOutput.writeByte((byte) 0x0);
+ try {
+ Store.verify(verifyingOutput);
+ fail("should be a corrupted index");
+ } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
+ // ok
+ }
+ IOUtils.close(indexInput, verifyingOutput, dir);
+ }
+
+ @Test
+ public void testVerifyingIndexOutputWithBogusInput() throws IOException {
+ Directory dir = newDirectory();
+ int length = scaledRandomIntBetween(10, 1024);
+ IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, ""), dir.createOutput("foo1.bar", IOContext.DEFAULT));
+ try {
+ while (length > 0) {
+ verifyingOutput.writeByte((byte) random().nextInt());
+ length--;
+ }
+ fail("should be a corrupted index");
+ } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
+ // ok
+ }
+ IOUtils.close(verifyingOutput, dir);
+ }
+
+ // TODO: remove this, its too fragile. just use a static old index instead.
+ private static final class OldSIMockingCodec extends FilterCodec {
+
+ protected OldSIMockingCodec() {
+ super(new Lucene50Codec().getName(), new Lucene50Codec());
+ }
+
+ @Override
+ public SegmentInfoFormat segmentInfoFormat() {
+ final SegmentInfoFormat segmentInfoFormat = super.segmentInfoFormat();
+ return new SegmentInfoFormat() {
+ @Override
+ public SegmentInfo read(Directory directory, String segmentName, byte[] segmentID, IOContext context) throws IOException {
+ return segmentInfoFormat.read(directory, segmentName, segmentID, context);
+ }
+
+ // this sucks it's a full copy of Lucene50SegmentInfoFormat but hey I couldn't find a way to make it write 4_5_0 versions
+ // somebody was too paranoid when implementing this. ey rmuir, was that you? - go fix it :P
+ @Override
+ public void write(Directory dir, SegmentInfo si, IOContext ioContext) throws IOException {
+ final String fileName = IndexFileNames.segmentFileName(si.name, "", Lucene50SegmentInfoFormat.SI_EXTENSION);
+ si.addFile(fileName);
+
+ boolean success = false;
+ try (IndexOutput output = dir.createOutput(fileName, ioContext)) {
+ CodecUtil.writeIndexHeader(output,
+ "Lucene50SegmentInfo",
+ 0,
+ si.getId(),
+ "");
+ Version version = Version.LUCENE_4_5_0; // FOOOOOO!!
+ // Write the Lucene version that created this segment, since 3.1
+ output.writeInt(version.major);
+ output.writeInt(version.minor);
+ output.writeInt(version.bugfix);
+ assert version.prerelease == 0;
+ output.writeInt(si.maxDoc());
+
+ output.writeByte((byte) (si.getUseCompoundFile() ? SegmentInfo.YES : SegmentInfo.NO));
+ output.writeStringStringMap(si.getDiagnostics());
+ Set<String> files = si.files();
+ for (String file : files) {
+ if (!IndexFileNames.parseSegmentName(file).equals(si.name)) {
+ throw new IllegalArgumentException("invalid files: expected segment=" + si.name + ", got=" + files);
+ }
+ }
+ output.writeStringSet(files);
+ output.writeStringStringMap(si.getAttributes());
+ CodecUtil.writeFooter(output);
+ success = true;
+ } finally {
+ if (!success) {
+ // TODO: are we doing this outside of the tracking wrapper? why must SIWriter cleanup like this?
+ IOUtils.deleteFilesIgnoringExceptions(si.dir, fileName);
+ }
+ }
+ }
+ };
+ }
+ }
+
+ // IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ @Test
+ public void testWriteLegacyChecksums() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ // set default codec - all segments need checksums
+ final boolean usesOldCodec = randomBoolean();
+ IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(usesOldCodec ? new OldSIMockingCodec() : TestUtil.getDefaultCodec()));
+ int docs = 1 + random().nextInt(100);
+
+ for (int i = 0; i < docs; i++) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ writer.addDocument(doc);
+ }
+ if (random().nextBoolean()) {
+ for (int i = 0; i < docs; i++) {
+ if (random().nextBoolean()) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.updateDocument(new Term("id", "" + i), doc);
+ }
+ }
+ }
+ if (random().nextBoolean()) {
+ DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
+ }
+ Store.MetadataSnapshot metadata;
+ // check before we committed
+ try {
+ store.getMetadata();
+ fail("no index present - expected exception");
+ } catch (IndexNotFoundException ex) {
+ // expected
+ }
+ assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
+
+ writer.close();
+ Store.LegacyChecksums checksums = new Store.LegacyChecksums();
+ Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
+ for (String file : store.directory().listAll()) {
+ if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
+ continue;
+ }
+ BytesRef hash = new BytesRef();
+ if (file.startsWith("segments")) {
+ hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
+ }
+ StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
+ legacyMeta.put(file, storeFileMetaData);
+ checksums.add(storeFileMetaData);
+ }
+ checksums.write(store);
+
+ metadata = store.getMetadata();
+ Map<String, StoreFileMetaData> stringStoreFileMetaDataMap = metadata.asMap();
+ assertThat(legacyMeta.size(), equalTo(stringStoreFileMetaDataMap.size()));
+ if (usesOldCodec) {
+ for (StoreFileMetaData meta : legacyMeta.values()) {
+ assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
+ assertEquals(meta.name() + "checksum", meta.checksum());
+ assertTrue(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
+ }
+ } else {
+
+ // even if we have a legacy checksum - if we use a new codec we should reuse
+ for (StoreFileMetaData meta : legacyMeta.values()) {
+ assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
+ assertFalse(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
+ StoreFileMetaData storeFileMetaData = metadata.get(meta.name());
+ try (IndexInput input = store.openVerifyingInput(meta.name(), IOContext.DEFAULT, storeFileMetaData)) {
+ assertTrue(storeFileMetaData.toString(), input instanceof Store.VerifyingIndexInput);
+ input.seek(meta.length());
+ Store.verify(input);
+ }
+ }
+ }
+ assertDeleteContent(store, directoryService);
+ IOUtils.close(store);
+
+ }
+
+ @Test
+ public void testNewChecksums() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ // set default codec - all segments need checksums
+ IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
+ int docs = 1 + random().nextInt(100);
+
+ for (int i = 0; i < docs; i++) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ writer.addDocument(doc);
+ }
+ if (random().nextBoolean()) {
+ for (int i = 0; i < docs; i++) {
+ if (random().nextBoolean()) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.updateDocument(new Term("id", "" + i), doc);
+ }
+ }
+ }
+ if (random().nextBoolean()) {
+ DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
+ }
+ Store.MetadataSnapshot metadata;
+ // check before we committed
+ try {
+ store.getMetadata();
+ fail("no index present - expected exception");
+ } catch (IndexNotFoundException ex) {
+ // expected
+ }
+ assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
+ writer.commit();
+ writer.close();
+ metadata = store.getMetadata();
+ assertThat(metadata.asMap().isEmpty(), is(false));
+ for (StoreFileMetaData meta : metadata) {
+ try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
+ String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
+ assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
+ assertThat(meta.hasLegacyChecksum(), equalTo(false));
+ assertThat(meta.writtenBy(), equalTo(Version.LATEST));
+ if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
+ assertThat(meta.hash().length, greaterThan(0));
+ }
+ }
+ }
+ assertConsistent(store, metadata);
+
+ TestUtil.checkIndex(store.directory());
+ assertDeleteContent(store, directoryService);
+ IOUtils.close(store);
+ }
+
+ @Test
+ public void testMixedChecksums() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ // this time random codec....
+ IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
+ int docs = 1 + random().nextInt(100);
+
+ for (int i = 0; i < docs; i++) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ writer.addDocument(doc);
+ }
+ if (random().nextBoolean()) {
+ for (int i = 0; i < docs; i++) {
+ if (random().nextBoolean()) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.updateDocument(new Term("id", "" + i), doc);
+ }
+ }
+ }
+ if (random().nextBoolean()) {
+ DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
+ }
+ Store.MetadataSnapshot metadata;
+ // check before we committed
+ try {
+ store.getMetadata();
+ fail("no index present - expected exception");
+ } catch (IndexNotFoundException ex) {
+ // expected
+ }
+ assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
+ writer.commit();
+ writer.close();
+ Store.LegacyChecksums checksums = new Store.LegacyChecksums();
+ metadata = store.getMetadata();
+ assertThat(metadata.asMap().isEmpty(), is(false));
+ for (StoreFileMetaData meta : metadata) {
+ try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
+ if (meta.checksum() == null) {
+ String checksum = null;
+ try {
+ CodecUtil.retrieveChecksum(input);
+ fail("expected a corrupt index - posting format has not checksums");
+ } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
+ try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
+ checksumIndexInput.seek(meta.length());
+ checksum = Store.digestToString(checksumIndexInput.getChecksum());
+ }
+ // fine - it's a postings format without checksums
+ checksums.add(new StoreFileMetaData(meta.name(), meta.length(), checksum, null));
+ }
+ } else {
+ String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
+ assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
+ assertThat(meta.hasLegacyChecksum(), equalTo(false));
+ assertThat(meta.writtenBy(), equalTo(Version.LATEST));
+ }
+ }
+ }
+ assertConsistent(store, metadata);
+ checksums.write(store);
+ metadata = store.getMetadata();
+ assertThat(metadata.asMap().isEmpty(), is(false));
+ for (StoreFileMetaData meta : metadata) {
+ assertThat("file: " + meta.name() + " has a null checksum", meta.checksum(), not(nullValue()));
+ if (meta.hasLegacyChecksum()) {
+ try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
+ checksumIndexInput.seek(meta.length());
+ assertThat(meta.checksum(), equalTo(Store.digestToString(checksumIndexInput.getChecksum())));
+ }
+ } else {
+ try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
+ String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
+ assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
+ assertThat(meta.hasLegacyChecksum(), equalTo(false));
+ assertThat(meta.writtenBy(), equalTo(Version.LATEST));
+ }
+ }
+ }
+ assertConsistent(store, metadata);
+ TestUtil.checkIndex(store.directory());
+ assertDeleteContent(store, directoryService);
+ IOUtils.close(store);
+ }
+
+ @Test
+ public void testRenameFile() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ {
+ IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ }
+ CodecUtil.writeFooter(output);
+ output.close();
+ }
+ store.renameFile("foo.bar", "bar.foo");
+ assertThat(numNonExtraFiles(store), is(1));
+ final long lastChecksum;
+ try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
+ lastChecksum = CodecUtil.checksumEntireFile(input);
+ }
+
+ try {
+ store.directory().openInput("foo.bar", IOContext.DEFAULT);
+ fail("file was renamed");
+ } catch (FileNotFoundException | NoSuchFileException ex) {
+ // expected
+ }
+ {
+ IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ }
+ CodecUtil.writeFooter(output);
+ output.close();
+ }
+ store.renameFile("foo.bar", "bar.foo");
+ assertThat(numNonExtraFiles(store), is(1));
+ assertDeleteContent(store, directoryService);
+ IOUtils.close(store);
+ }
+
+ public void testCheckIntegrity() throws IOException {
+ Directory dir = newDirectory();
+ long luceneFileLength = 0;
+
+ try (IndexOutput output = dir.createOutput("lucene_checksum.bin", IOContext.DEFAULT)) {
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ luceneFileLength += bytesRef.length;
+ }
+ CodecUtil.writeFooter(output);
+ luceneFileLength += CodecUtil.footerLength();
+
+ }
+
+ final Adler32 adler32 = new Adler32();
+ long legacyFileLength = 0;
+ try (IndexOutput output = dir.createOutput("legacy.bin", IOContext.DEFAULT)) {
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ adler32.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ legacyFileLength += bytesRef.length;
+ }
+ }
+ final long luceneChecksum;
+ final long adler32LegacyChecksum = adler32.getValue();
+ try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
+ assertEquals(luceneFileLength, indexInput.length());
+ luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
+ }
+
+ { // positive check
+ StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
+ StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
+ assertTrue(legacy.hasLegacyChecksum());
+ assertFalse(lucene.hasLegacyChecksum());
+ assertTrue(Store.checkIntegrityNoException(lucene, dir));
+ assertTrue(Store.checkIntegrityNoException(legacy, dir));
+ }
+
+ { // negative check - wrong checksum
+ StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0);
+ StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum + 1));
+ assertTrue(legacy.hasLegacyChecksum());
+ assertFalse(lucene.hasLegacyChecksum());
+ assertFalse(Store.checkIntegrityNoException(lucene, dir));
+ assertFalse(Store.checkIntegrityNoException(legacy, dir));
+ }
+
+ { // negative check - wrong length
+ StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
+ StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength + 1, Store.digestToString(adler32LegacyChecksum));
+ assertTrue(legacy.hasLegacyChecksum());
+ assertFalse(lucene.hasLegacyChecksum());
+ assertFalse(Store.checkIntegrityNoException(lucene, dir));
+ assertFalse(Store.checkIntegrityNoException(legacy, dir));
+ }
+
+ { // negative check - wrong file
+ StoreFileMetaData lucene = new StoreFileMetaData("legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
+ StoreFileMetaData legacy = new StoreFileMetaData("lucene_checksum.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
+ assertTrue(legacy.hasLegacyChecksum());
+ assertFalse(lucene.hasLegacyChecksum());
+ assertFalse(Store.checkIntegrityNoException(lucene, dir));
+ assertFalse(Store.checkIntegrityNoException(legacy, dir));
+ }
+ dir.close();
+
+ }
+
+ @Test
+ public void testVerifyingIndexInput() throws IOException {
+ Directory dir = newDirectory();
+ IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ }
+ CodecUtil.writeFooter(output);
+ output.close();
+
+ // Check file
+ IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
+ long checksum = CodecUtil.retrieveChecksum(indexInput);
+ indexInput.seek(0);
+ IndexInput verifyingIndexInput = new Store.VerifyingIndexInput(dir.openInput("foo.bar", IOContext.DEFAULT));
+ readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
+ Store.verify(verifyingIndexInput);
+ assertThat(checksum, equalTo(((ChecksumIndexInput) verifyingIndexInput).getChecksum()));
+ IOUtils.close(indexInput, verifyingIndexInput);
+
+ // Corrupt file and check again
+ corruptFile(dir, "foo.bar", "foo1.bar");
+ verifyingIndexInput = new Store.VerifyingIndexInput(dir.openInput("foo1.bar", IOContext.DEFAULT));
+ readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
+ try {
+ Store.verify(verifyingIndexInput);
+ fail("should be a corrupted index");
+ } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
+ // ok
+ }
+ IOUtils.close(verifyingIndexInput);
+ IOUtils.close(dir);
+ }
+
+ private void readIndexInputFullyWithRandomSeeks(IndexInput indexInput) throws IOException {
+ BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024));
+ long pos = 0;
+ while (pos < indexInput.length()) {
+ assertEquals(pos, indexInput.getFilePointer());
+ int op = random().nextInt(5);
+ if (op == 0) {
+ int shift = 100 - randomIntBetween(0, 200);
+ pos = Math.min(indexInput.length() - 1, Math.max(0, pos + shift));
+ indexInput.seek(pos);
+ } else if (op == 1) {
+ indexInput.readByte();
+ pos++;
+ } else {
+ int min = (int) Math.min(indexInput.length() - pos, ref.bytes.length);
+ indexInput.readBytes(ref.bytes, ref.offset, min);
+ pos += min;
+ }
+ }
+ }
+
+ private void corruptFile(Directory dir, String fileIn, String fileOut) throws IOException {
+ IndexInput input = dir.openInput(fileIn, IOContext.READONCE);
+ IndexOutput output = dir.createOutput(fileOut, IOContext.DEFAULT);
+ long len = input.length();
+ byte[] b = new byte[1024];
+ long broken = randomInt((int) len);
+ long pos = 0;
+ while (pos < len) {
+ int min = (int) Math.min(input.length() - pos, b.length);
+ input.readBytes(b, 0, min);
+ if (broken >= pos && broken < pos + min) {
+ // Flip one byte
+ int flipPos = (int) (broken - pos);
+ b[flipPos] = (byte) (b[flipPos] ^ 42);
+ }
+ output.writeBytes(b, min);
+ pos += min;
+ }
+ IOUtils.close(input, output);
+
+ }
+
+ public void assertDeleteContent(Store store, DirectoryService service) throws IOException {
+ deleteContent(store.directory());
+ assertThat(Arrays.toString(store.directory().listAll()), store.directory().listAll().length, equalTo(0));
+ assertThat(store.stats().sizeInBytes(), equalTo(0l));
+ assertThat(service.newDirectory().listAll().length, equalTo(0));
+ }
+
+ private static final class LuceneManagedDirectoryService extends DirectoryService {
+ private final Directory dir;
+ private final Random random;
+
+ public LuceneManagedDirectoryService(Random random) {
+ this(random, true);
+ }
+
+ public LuceneManagedDirectoryService(Random random, boolean preventDoubleWrite) {
+ super(new ShardId("fake", 1), Settings.EMPTY);
+ dir = StoreTest.newDirectory(random);
+ if (dir instanceof MockDirectoryWrapper) {
+ ((MockDirectoryWrapper) dir).setPreventDoubleWrite(preventDoubleWrite);
+ // TODO: fix this test to handle virus checker
+ ((MockDirectoryWrapper) dir).setEnableVirusScanner(false);
+ }
+ this.random = random;
+ }
+
+ @Override
+ public Directory newDirectory() throws IOException {
+ return dir;
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return random.nextInt(1000);
+ }
+ }
+
+ public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
+ for (String file : store.directory().listAll()) {
+ if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && !Store.isChecksum(file) && file.startsWith("extra") == false) {
+ assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
+ } else {
+ assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
+ }
+ }
+ }
+
+ /**
+ * Legacy indices without lucene CRC32 did never write or calculate checksums for segments_N files
+ * but for other files
+ */
+ @Test
+ public void testRecoveryDiffWithLegacyCommit() {
+ Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
+ metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
+ metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
+ Store.MetadataSnapshot first = new Store.MetadataSnapshot(metaDataMap, Collections.EMPTY_MAP, 0);
+
+ Store.MetadataSnapshot second = new Store.MetadataSnapshot(metaDataMap, Collections.EMPTY_MAP, 0);
+ Store.RecoveryDiff recoveryDiff = first.recoveryDiff(second);
+ assertEquals(recoveryDiff.toString(), recoveryDiff.different.size(), 2);
+ }
+
+
+ @Test
+ public void testRecoveryDiff() throws IOException, InterruptedException {
+ int numDocs = 2 + random().nextInt(100);
+ List<Document> docs = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ docs.add(doc);
+ }
+ long seed = random().nextLong();
+ Store.MetadataSnapshot first;
+ {
+ Random random = new Random(seed);
+ IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setUseCompoundFile(random.nextBoolean());
+ iwc.setMaxThreadStates(1);
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random);
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ IndexWriter writer = new IndexWriter(store.directory(), iwc);
+ final boolean lotsOfSegments = rarely(random);
+ for (Document d : docs) {
+ writer.addDocument(d);
+ if (lotsOfSegments && random.nextBoolean()) {
+ writer.commit();
+ } else if (rarely(random)) {
+ writer.commit();
+ }
+ }
+ writer.commit();
+ writer.close();
+ first = store.getMetadata();
+ assertDeleteContent(store, directoryService);
+ store.close();
+ }
+ long time = new Date().getTime();
+ while (time == new Date().getTime()) {
+ Thread.sleep(10); // bump the time
+ }
+ Store.MetadataSnapshot second;
+ Store store;
+ {
+ Random random = new Random(seed);
+ IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setUseCompoundFile(random.nextBoolean());
+ iwc.setMaxThreadStates(1);
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random);
+ store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ IndexWriter writer = new IndexWriter(store.directory(), iwc);
+ final boolean lotsOfSegments = rarely(random);
+ for (Document d : docs) {
+ writer.addDocument(d);
+ if (lotsOfSegments && random.nextBoolean()) {
+ writer.commit();
+ } else if (rarely(random)) {
+ writer.commit();
+ }
+ }
+ writer.commit();
+ writer.close();
+ second = store.getMetadata();
+ }
+ Store.RecoveryDiff diff = first.recoveryDiff(second);
+ assertThat(first.size(), equalTo(second.size()));
+ for (StoreFileMetaData md : first) {
+ assertThat(second.get(md.name()), notNullValue());
+ // si files are different - containing timestamps etc
+ assertThat(second.get(md.name()).isSame(md), equalTo(false));
+ }
+ assertThat(diff.different.size(), equalTo(first.size()));
+ assertThat(diff.identical.size(), equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers
+ assertThat(diff.missing, empty());
+
+ // check the self diff
+ Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
+ assertThat(selfDiff.identical.size(), equalTo(first.size()));
+ assertThat(selfDiff.different, empty());
+ assertThat(selfDiff.missing, empty());
+
+
+ // lets add some deletes
+ Random random = new Random(seed);
+ IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setUseCompoundFile(random.nextBoolean());
+ iwc.setMaxThreadStates(1);
+ iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
+ IndexWriter writer = new IndexWriter(store.directory(), iwc);
+ writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
+ writer.commit();
+ writer.close();
+ Store.MetadataSnapshot metadata = store.getMetadata();
+ StoreFileMetaData delFile = null;
+ for (StoreFileMetaData md : metadata) {
+ if (md.name().endsWith(".liv")) {
+ delFile = md;
+ break;
+ }
+ }
+ Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
+ if (delFile != null) {
+ assertThat(afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
+ assertThat(afterDeleteDiff.different.size(), equalTo(0));
+ assertThat(afterDeleteDiff.missing.size(), equalTo(2));
+ } else {
+ // an entire segment must be missing (single doc segment got dropped)
+ assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
+ assertThat(afterDeleteDiff.different.size(), equalTo(0));
+ assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
+ }
+
+ // check the self diff
+ selfDiff = metadata.recoveryDiff(metadata);
+ assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
+ assertThat(selfDiff.different, empty());
+ assertThat(selfDiff.missing, empty());
+
+ // add a new commit
+ iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec());
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files
+ iwc.setMaxThreadStates(1);
+ iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
+ writer = new IndexWriter(store.directory(), iwc);
+ writer.addDocument(docs.get(0));
+ writer.close();
+
+ Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
+ Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
+ if (delFile != null) {
+ assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
+ assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
+ assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
+ assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
+ } else {
+ assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
+ assertThat(newCommitDiff.different.size(), equalTo(0));
+ assertThat(newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different
+ }
+
+ deleteContent(store.directory());
+ IOUtils.close(store);
+ }
+
+ @Test
+ public void testCleanupFromSnapshot() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ // this time random codec....
+ IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
+ // we keep all commits and that allows us clean based on multiple snapshots
+ indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
+ IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
+ int docs = 1 + random().nextInt(100);
+ int numCommits = 0;
+ for (int i = 0; i < docs; i++) {
+ if (i > 0 && randomIntBetween(0, 10) == 0) {
+ writer.commit();
+ numCommits++;
+ }
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ writer.addDocument(doc);
+
+ }
+ if (numCommits < 1) {
+ writer.commit();
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ writer.addDocument(doc);
+ }
+
+ Store.MetadataSnapshot firstMeta = store.getMetadata();
+
+ if (random().nextBoolean()) {
+ for (int i = 0; i < docs; i++) {
+ if (random().nextBoolean()) {
+ Document doc = new Document();
+ doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.updateDocument(new Term("id", "" + i), doc);
+ }
+ }
+ }
+ writer.commit();
+ writer.close();
+
+ Store.MetadataSnapshot secondMeta = store.getMetadata();
+
+ Store.LegacyChecksums checksums = new Store.LegacyChecksums();
+ Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
+ for (String file : store.directory().listAll()) {
+ if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
+ continue;
+ }
+ BytesRef hash = new BytesRef();
+ if (file.startsWith("segments")) {
+ hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
+ }
+ StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
+ legacyMeta.put(file, storeFileMetaData);
+ checksums.add(storeFileMetaData);
+ }
+ checksums.write(store); // write one checksum file here - we expect it to survive all the cleanups
+
+ if (randomBoolean()) {
+ store.cleanupAndVerify("test", firstMeta);
+ String[] strings = store.directory().listAll();
+ int numChecksums = 0;
+ int numNotFound = 0;
+ for (String file : strings) {
+ if (file.startsWith("extra")) {
+ continue;
+ }
+ assertTrue(firstMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
+ if (Store.isChecksum(file)) {
+ numChecksums++;
+ } else if (secondMeta.contains(file) == false) {
+ numNotFound++;
+ }
+
+ }
+ assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
+ assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
+ } else {
+ store.cleanupAndVerify("test", secondMeta);
+ String[] strings = store.directory().listAll();
+ int numChecksums = 0;
+ int numNotFound = 0;
+ for (String file : strings) {
+ if (file.startsWith("extra")) {
+ continue;
+ }
+ assertTrue(file, secondMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
+ if (Store.isChecksum(file)) {
+ numChecksums++;
+ } else if (firstMeta.contains(file) == false) {
+ numNotFound++;
+ }
+
+ }
+ assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
+ assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
+ }
+
+ deleteContent(store.directory());
+ IOUtils.close(store);
+ }
+
+ @Test
+ public void testCleanUpWithLegacyChecksums() throws IOException {
+ Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
+ metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
+ metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
+ Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(metaDataMap, Collections.EMPTY_MAP, 0);
+
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ for (String file : metaDataMap.keySet()) {
+ try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ CodecUtil.writeFooter(output);
+ }
+ }
+
+ store.verifyAfterCleanup(snapshot, snapshot);
+ deleteContent(store.directory());
+ IOUtils.close(store);
+ }
+
+ public void testOnCloseCallback() throws IOException {
+ final ShardId shardId = new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10)), randomIntBetween(0, 100));
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ final AtomicInteger count = new AtomicInteger(0);
+ final ShardLock lock = new DummyShardLock(shardId);
+
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, lock, new Store.OnClose() {
+ @Override
+ public void handle(ShardLock theLock) {
+ assertEquals(shardId, theLock.getShardId());
+ assertEquals(lock, theLock);
+ count.incrementAndGet();
+ }
+ });
+ assertEquals(count.get(), 0);
+
+ final int iters = randomIntBetween(1, 10);
+ for (int i = 0; i < iters; i++) {
+ store.close();
+ }
+
+ assertEquals(count.get(), 1);
+ }
+
+ @Test
+ public void testStoreStats() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Settings settings = Settings.builder().put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, TimeValue.timeValueMinutes(0)).build();
+ Store store = new Store(shardId, settings, directoryService, new DummyShardLock(shardId));
+ long initialStoreSize = 0;
+ for (String extraFiles : store.directory().listAll()) {
+ assertTrue("expected extraFS file but got: " + extraFiles, extraFiles.startsWith("extra"));
+ initialStoreSize += store.directory().fileLength(extraFiles);
+ }
+ StoreStats stats = store.stats();
+ assertEquals(stats.getSize().bytes(), initialStoreSize);
+
+ Directory dir = store.directory();
+ final long length;
+ try (IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT)) {
+ int iters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
+ output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+ }
+ length = output.getFilePointer();
+ }
+
+ assertTrue(numNonExtraFiles(store) > 0);
+ stats = store.stats();
+ assertEquals(stats.getSizeInBytes(), length + initialStoreSize);
+
+ deleteContent(store.directory());
+ IOUtils.close(store);
+ }
+
+
+ public static void deleteContent(Directory directory) throws IOException {
+ final String[] files = directory.listAll();
+ final List<IOException> exceptions = new ArrayList<>();
+ for (String file : files) {
+ try {
+ directory.deleteFile(file);
+ } catch (NoSuchFileException | FileNotFoundException e) {
+ // ignore
+ } catch (IOException e) {
+ exceptions.add(e);
+ }
+ }
+ ExceptionsHelper.rethrowAndSuppress(exceptions);
+ }
+
+ public int numNonExtraFiles(Store store) throws IOException {
+ int numNonExtra = 0;
+ for (String file : store.directory().listAll()) {
+ if (file.startsWith("extra") == false) {
+ numNonExtra++;
+ }
+ }
+ return numNonExtra;
+ }
+
+ @Test
+ public void testMetadataSnapshotStreaming() throws Exception {
+
+ Store.MetadataSnapshot outMetadataSnapshot = createMetaDataSnapshot();
+ org.elasticsearch.Version targetNodeVersion = randomVersion(random());
+
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ out.setVersion(targetNodeVersion);
+ outMetadataSnapshot.writeTo(out);
+
+ ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
+ in.setVersion(targetNodeVersion);
+ Store.MetadataSnapshot inMetadataSnapshot = new Store.MetadataSnapshot(in);
+ Map<String, StoreFileMetaData> origEntries = new HashMap<>();
+ origEntries.putAll(outMetadataSnapshot.asMap());
+ for (Map.Entry<String, StoreFileMetaData> entry : inMetadataSnapshot.asMap().entrySet()) {
+ assertThat(entry.getValue().name(), equalTo(origEntries.remove(entry.getKey()).name()));
+ }
+ assertThat(origEntries.size(), equalTo(0));
+ assertThat(inMetadataSnapshot.getCommitUserData(), equalTo(outMetadataSnapshot.getCommitUserData()));
+ }
+
+ protected Store.MetadataSnapshot createMetaDataSnapshot() {
+ StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1);
+ StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1);
+ Map<String, StoreFileMetaData> storeFileMetaDataMap = new HashMap<>();
+ storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
+ storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
+ Map<String, String> commitUserData = new HashMap<>();
+ commitUserData.put("userdata_1", "test");
+ commitUserData.put("userdata_2", "test");
+ return new Store.MetadataSnapshot(storeFileMetaDataMap, commitUserData, 0);
+ }
+
+ @Test
+ public void testUserDataRead() throws IOException {
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
+ SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, EMPTY_SETTINGS));
+ config.setIndexDeletionPolicy(deletionPolicy);
+ IndexWriter writer = new IndexWriter(store.directory(), config);
+ Document doc = new Document();
+ doc.add(new TextField("id", "1", Field.Store.NO));
+ writer.addDocument(doc);
+ Map<String, String> commitData = new HashMap<>(2);
+ String syncId = "a sync id";
+ String translogId = "a translog id";
+ commitData.put(Engine.SYNC_COMMIT_ID, syncId);
+ commitData.put(Translog.TRANSLOG_GENERATION_KEY, translogId);
+ writer.setCommitData(commitData);
+ writer.commit();
+ writer.close();
+ Store.MetadataSnapshot metadata;
+ if (randomBoolean()) {
+ metadata = store.getMetadata();
+ } else {
+ metadata = store.getMetadata(deletionPolicy.snapshot());
+ }
+ assertFalse(metadata.asMap().isEmpty());
+ // do not check for correct files, we have enough tests for that above
+ assertThat(metadata.getCommitUserData().get(Engine.SYNC_COMMIT_ID), equalTo(syncId));
+ assertThat(metadata.getCommitUserData().get(Translog.TRANSLOG_GENERATION_KEY), equalTo(translogId));
+ TestUtil.checkIndex(store.directory());
+ assertDeleteContent(store, directoryService);
+ IOUtils.close(store);
+ }
+
+ @Test
+ public void testStreamStoreFilesMetaData() throws Exception {
+ Store.MetadataSnapshot metadataSnapshot = createMetaDataSnapshot();
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData = new TransportNodesListShardStoreMetaData.StoreFilesMetaData(randomBoolean(), new ShardId("test", 0),metadataSnapshot);
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ org.elasticsearch.Version targetNodeVersion = randomVersion(random());
+ out.setVersion(targetNodeVersion);
+ outStoreFileMetaData.writeTo(out);
+ ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
+ in.setVersion(targetNodeVersion);
+ TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in);
+ Iterator<StoreFileMetaData> outFiles = outStoreFileMetaData.iterator();
+ for (StoreFileMetaData inFile : inStoreFileMetaData) {
+ assertThat(inFile.name(), equalTo(outFiles.next().name()));
+ }
+ assertThat(outStoreFileMetaData.syncId(), equalTo(inStoreFileMetaData.syncId()));
+ }
+
+ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException {
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new LuceneManagedDirectoryService(random());
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ IndexWriter writer = new IndexWriter(store.directory(), iwc);
+
+ int numDocs = 1 + random().nextInt(10);
+ List<Document> docs = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
+ docs.add(doc);
+ }
+ for (Document d : docs) {
+ writer.addDocument(d);
+ }
+ writer.commit();
+ writer.close();
+ MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class);
+ if (leaf != null) {
+ leaf.setPreventDoubleWrite(false); // I do this on purpose
+ }
+ SegmentInfos segmentCommitInfos = store.readLastCommittedSegmentsInfo();
+ try (IndexOutput out = store.directory().createOutput(segmentCommitInfos.getSegmentsFileName(), IOContext.DEFAULT)) {
+ // empty file
+ }
+
+ try {
+ if (randomBoolean()) {
+ store.getMetadata();
+ } else {
+ store.readLastCommittedSegmentsInfo();
+ }
+ fail("corrupted segments_N file");
+ } catch (CorruptIndexException ex) {
+ // expected
+ }
+ assertTrue(store.isMarkedCorrupted());
+ Lucene.cleanLuceneIndex(store.directory()); // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call
+ store.close();
+ }
+
+ public void testCanOpenIndex() throws IOException {
+ IndexWriterConfig iwc = newIndexWriterConfig();
+ Path tempDir = createTempDir();
+ final BaseDirectoryWrapper dir = newFSDirectory(tempDir);
+ assertFalse(Store.canOpenIndex(logger, tempDir));
+ IndexWriter writer = new IndexWriter(dir, iwc);
+ Document doc = new Document();
+ doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
+ writer.addDocument(doc);
+ writer.commit();
+ writer.close();
+ assertTrue(Store.canOpenIndex(logger, tempDir));
+
+ final ShardId shardId = new ShardId(new Index("index"), 1);
+ DirectoryService directoryService = new DirectoryService(shardId, Settings.EMPTY) {
+ @Override
+ public long throttleTimeInNanos() {
+ return 0;
+ }
+
+ @Override
+ public Directory newDirectory() throws IOException {
+ return dir;
+ }
+ };
+ Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId));
+ store.markStoreCorrupted(new CorruptIndexException("foo", "bar"));
+ assertFalse(Store.canOpenIndex(logger, tempDir));
+ store.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java b/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java
new file mode 100644
index 0000000000..7609de746f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.suggest.stats;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
+import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class SuggestStatsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ @Test
+ public void testSimpleStats() throws Exception {
+ // clear all stats first
+ client().admin().indices().prepareStats().clear().execute().actionGet();
+ final int numNodes = cluster().numDataNodes();
+ assertThat(numNodes, greaterThanOrEqualTo(2));
+ final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard...
+ final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10));
+ final int totalShards = shardsIdx1 + shardsIdx2;
+ assertThat(numNodes, lessThanOrEqualTo(totalShards));
+ assertAcked(prepareCreate("test1").setSettings(Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, shardsIdx1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("type", "f", "type=string"));
+ assertAcked(prepareCreate("test2").setSettings(Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, shardsIdx2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("type", "f", "type=string"));
+ assertThat(shardsIdx1 + shardsIdx2, equalTo(numAssignedShards("test1", "test2")));
+ assertThat(numAssignedShards("test1", "test2"), greaterThanOrEqualTo(2));
+ ensureGreen();
+
+ for (int i = 0; i < randomIntBetween(20, 100); i++) {
+ index("test" + ((i % 2) + 1), "type", "" + i, "f", "test" + i);
+ }
+ refresh();
+
+ int suggestAllIdx = scaledRandomIntBetween(20, 50);
+ int suggestIdx1 = scaledRandomIntBetween(20, 50);
+ int suggestIdx2 = scaledRandomIntBetween(20, 50);
+
+ long startTime = System.currentTimeMillis();
+ for (int i = 0; i < suggestAllIdx; i++) {
+ SuggestResponse suggestResponse = addSuggestions(internalCluster().clientNodeClient().prepareSuggest(), i).get();
+ assertAllSuccessful(suggestResponse);
+ }
+ for (int i = 0; i < suggestIdx1; i++) {
+ SuggestResponse suggestResponse = addSuggestions(internalCluster().clientNodeClient().prepareSuggest("test1"), i).get();
+ assertAllSuccessful(suggestResponse);
+ }
+ for (int i = 0; i < suggestIdx2; i++) {
+ SuggestResponse suggestResponse = addSuggestions(internalCluster().clientNodeClient().prepareSuggest("test2"), i).get();
+ assertAllSuccessful(suggestResponse);
+ }
+ long endTime = System.currentTimeMillis();
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+
+ // check current
+ assertThat(indicesStats.getTotal().getSuggest().getCurrent(), equalTo(0l));
+
+ // check suggest count
+ assertThat(indicesStats.getTotal().getSuggest().getCount(), equalTo((long) (suggestAllIdx * totalShards + suggestIdx1 * shardsIdx1 + suggestIdx2 * shardsIdx2)));
+ assertThat(indicesStats.getIndices().get("test1").getTotal().getSuggest().getCount(), equalTo((long) ((suggestAllIdx + suggestIdx1) * shardsIdx1)));
+ assertThat(indicesStats.getIndices().get("test2").getTotal().getSuggest().getCount(), equalTo((long) ((suggestAllIdx + suggestIdx2) * shardsIdx2)));
+
+ logger.info("iter {}, iter1 {}, iter2 {}, {}", suggestAllIdx, suggestIdx1, suggestIdx2, endTime - startTime);
+ // check suggest time
+ assertThat(indicesStats.getTotal().getSuggest().getTimeInMillis(), greaterThan(0l));
+ // the upperbound is num shards * total time since we do searches in parallel
+ assertThat(indicesStats.getTotal().getSuggest().getTimeInMillis(), lessThanOrEqualTo(totalShards * (endTime - startTime)));
+
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ NodeStats[] nodes = nodeStats.getNodes();
+ Set<String> nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2");
+ int num = 0;
+ for (NodeStats stat : nodes) {
+ SuggestStats suggestStats = stat.getIndices().getSuggest();
+ logger.info("evaluating {}", stat.getNode());
+ if (nodeIdsWithIndex.contains(stat.getNode().getId())) {
+ assertThat(suggestStats.getCount(), greaterThan(0l));
+ assertThat(suggestStats.getTimeInMillis(), greaterThan(0l));
+ num++;
+ } else {
+ assertThat(suggestStats.getCount(), equalTo(0l));
+ assertThat(suggestStats.getTimeInMillis(), equalTo(0l));
+ }
+ }
+
+ assertThat(num, greaterThan(0));
+
+ }
+
+ private SuggestRequestBuilder addSuggestions(SuggestRequestBuilder request, int i) {
+ for (int s = 0; s < randomIntBetween(2, 10); s++) {
+ if (randomBoolean()) {
+ request.addSuggestion(new PhraseSuggestionBuilder("s" + s).field("f").text("test" + i + " test" + (i - 1)));
+ } else {
+ request.addSuggestion(new TermSuggestionBuilder("s" + s).field("f").text("test" + i));
+ }
+ }
+ return request;
+ }
+
+ private Set<String> nodeIdsWithIndex(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ Set<String> nodes = new HashSet<>();
+ for (ShardIterator shardIterator : allAssignedShardsGrouped) {
+ for (ShardRouting routing : shardIterator.asUnordered()) {
+ if (routing.active()) {
+ nodes.add(routing.currentNodeId());
+ }
+
+ }
+ }
+ return nodes;
+ }
+
+
+ protected int numAssignedShards(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ return allAssignedShardsGrouped.size();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java
new file mode 100644
index 0000000000..2e5375efd5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.util.BigArrays;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+/**
+ *
+ */
+public class BufferedTranslogTests extends TranslogTests {
+
+ @Override
+ protected Translog create(Path path) throws IOException {
+ Settings build = Settings.settingsBuilder()
+ .put("index.translog.fs.type", TranslogWriter.Type.BUFFERED.name())
+ .put("index.translog.fs.buffer_size", 10 + randomInt(128 * 1024), ByteSizeUnit.BYTES)
+ .build();
+ TranslogConfig translogConfig = new TranslogConfig(shardId, path, build, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null);
+ return new Translog(translogConfig);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java b/core/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java
new file mode 100644
index 0000000000..c45da660b0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/translog/SnapshotMatchers.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.elasticsearch.ElasticsearchException;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+
+public final class SnapshotMatchers {
+ private SnapshotMatchers() {
+
+ }
+
+ /**
+ * Consumes a snapshot and make sure it's size is as expected
+ */
+ public static Matcher<Translog.Snapshot> size(int size) {
+ return new SizeMatcher(size);
+ }
+
+ /**
+ * Consumes a snapshot and make sure it's content is as expected
+ */
+ public static Matcher<Translog.Snapshot> equalsTo(Translog.Operation... ops) {
+ return new EqualMatcher(ops);
+ }
+
+ /**
+ * Consumes a snapshot and make sure it's content is as expected
+ */
+ public static Matcher<Translog.Snapshot> equalsTo(ArrayList<Translog.Operation> ops) {
+ return new EqualMatcher(ops.toArray(new Translog.Operation[ops.size()]));
+ }
+
+ public static class SizeMatcher extends TypeSafeMatcher<Translog.Snapshot> {
+
+ private final int size;
+
+ public SizeMatcher(int size) {
+ this.size = size;
+ }
+
+ @Override
+ public boolean matchesSafely(Translog.Snapshot snapshot) {
+ int count = 0;
+ try {
+ while (snapshot.next() != null) {
+ count++;
+ }
+ } catch (IOException ex) {
+ throw new ElasticsearchException("failed to advance snapshot", ex);
+ }
+ return size == count;
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("a snapshot with size ").appendValue(size);
+ }
+ }
+
+ public static class EqualMatcher extends TypeSafeMatcher<Translog.Snapshot> {
+
+ private final Translog.Operation[] expectedOps;
+ String failureMsg = null;
+
+ public EqualMatcher(Translog.Operation[] expectedOps) {
+ this.expectedOps = expectedOps;
+ }
+
+ @Override
+ protected boolean matchesSafely(Translog.Snapshot snapshot) {
+ try {
+ Translog.Operation op;
+ int i;
+ for (i = 0, op = snapshot.next(); op != null && i < expectedOps.length; i++, op = snapshot.next()) {
+ if (expectedOps[i].equals(op) == false) {
+ failureMsg = "position [" + i + "] expected [" + expectedOps[i] + "] but found [" + op + "]";
+ return false;
+ }
+ }
+
+ if (i < expectedOps.length) {
+ failureMsg = "expected [" + expectedOps.length + "] ops but only found [" + i + "]";
+ return false;
+ }
+
+ if (op != null) {
+ int count = 1; // to account for the op we already read
+ while (snapshot.next() != null) {
+ count++;
+ }
+ failureMsg = "expected [" + expectedOps.length + "] ops but got [" + (expectedOps.length + count) + "]";
+ return false;
+ }
+ return true;
+ } catch (IOException ex) {
+ throw new ElasticsearchException("failed to read snapshot content", ex);
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(failureMsg);
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
new file mode 100644
index 0000000000..98c1d3917e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java
@@ -0,0 +1,1246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityTests;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.charset.Charset;
+import java.nio.file.*;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+public class TranslogTests extends ElasticsearchTestCase {
+
+ protected final ShardId shardId = new ShardId(new Index("index"), 1);
+
+ protected Translog translog;
+ protected Path translogDir;
+
+ @Override
+ protected void afterIfSuccessful() throws Exception {
+ super.afterIfSuccessful();
+
+ if (translog.isOpen()) {
+ if (translog.currentFileGeneration() > 1) {
+ translog.commit();
+ assertFileDeleted(translog, translog.currentFileGeneration() - 1);
+ }
+ translog.close();
+ }
+ assertFileIsPresent(translog, translog.currentFileGeneration());
+ IOUtils.rm(translog.location()); // delete all the locations
+
+ }
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ // if a previous test failed we clean up things here
+ translogDir = createTempDir();
+ translog = create(translogDir);
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ try {
+ assertEquals("there are still open views", 0, translog.getNumOpenViews());
+ translog.close();
+ } finally {
+ super.tearDown();
+ }
+ }
+
+ protected Translog create(Path path) throws IOException {
+ Settings build = Settings.settingsBuilder()
+ .put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.SIMPLE.name())
+ .build();
+ TranslogConfig translogConfig = new TranslogConfig(shardId, path, build, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null);
+ return new Translog(translogConfig);
+ }
+
+ protected void addToTranslogAndList(Translog translog, ArrayList<Translog.Operation> list, Translog.Operation op) {
+ list.add(op);
+ translog.add(op);
+ }
+
+ public void testIdParsingFromFile() {
+ long id = randomIntBetween(0, Integer.MAX_VALUE);
+ Path file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id);
+ assertThat(Translog.parseIdFromFileName(file), equalTo(id));
+
+ file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id + ".recovering");
+ assertThat(Translog.parseIdFromFileName(file), equalTo(id));
+
+ file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + randomNonTranslogPatternString(1, 10) + id);
+ assertThat(Translog.parseIdFromFileName(file), equalTo(-1l));
+
+ file = translogDir.resolve(randomNonTranslogPatternString(1, Translog.TRANSLOG_FILE_PREFIX.length() - 1));
+ assertThat(Translog.parseIdFromFileName(file), equalTo(-1l));
+ }
+
+ private String randomNonTranslogPatternString(int min, int max) {
+ String string;
+ boolean validPathString;
+ do {
+ validPathString = false;
+ string = randomRealisticUnicodeOfCodepointLength(randomIntBetween(min, max));
+ try {
+ translogDir.resolve(string);
+ validPathString = true;
+ } catch (InvalidPathException ex) {
+ // some FS don't like our random file names -- let's just skip these random choices
+ }
+ } while (Translog.PARSE_ID_PATTERN.matcher(string).matches() || validPathString == false);
+ return string;
+ }
+
+ @Test
+ public void testRead() throws IOException {
+ Translog.Location loc1 = translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Location loc2 = translog.add(new Translog.Create("test", "2", new byte[]{2}));
+ assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1})));
+ assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2})));
+ translog.sync();
+ assertThat(translog.read(loc1).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{1})));
+ assertThat(translog.read(loc2).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{2})));
+ Translog.Location loc3 = translog.add(new Translog.Create("test", "2", new byte[]{3}));
+ assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ translog.sync();
+ assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ translog.prepareCommit();
+ assertThat(translog.read(loc3).getSource().source.toBytesArray(), equalTo(new BytesArray(new byte[]{3})));
+ translog.commit();
+ assertNull(translog.read(loc1));
+ assertNull(translog.read(loc2));
+ assertNull(translog.read(loc3));
+ try {
+ translog.read(new Translog.Location(translog.currentFileGeneration() + 1, 17, 35));
+ fail("generation is greater than the current");
+ } catch (IllegalStateException ex) {
+ // expected
+ }
+ }
+
+ @Test
+ public void testSimpleOperations() throws IOException {
+ ArrayList<Translog.Operation> ops = new ArrayList<>();
+ Translog.Snapshot snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.size(0));
+ snapshot.close();
+
+ addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1}));
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.close();
+
+ addToTranslogAndList(translog, ops, new Translog.Index("test", "2", new byte[]{2}));
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size()));
+ snapshot.close();
+
+ addToTranslogAndList(translog, ops, new Translog.Delete(newUid("3")));
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size()));
+ snapshot.close();
+
+ snapshot = translog.newSnapshot();
+
+ Translog.Create create = (Translog.Create) snapshot.next();
+ assertThat(create != null, equalTo(true));
+ assertThat(create.source().toBytes(), equalTo(new byte[]{1}));
+
+ Translog.Index index = (Translog.Index) snapshot.next();
+ assertThat(index != null, equalTo(true));
+ assertThat(index.source().toBytes(), equalTo(new byte[]{2}));
+
+ Translog.Delete delete = (Translog.Delete) snapshot.next();
+ assertThat(delete != null, equalTo(true));
+ assertThat(delete.uid(), equalTo(newUid("3")));
+
+ assertThat(snapshot.next(), equalTo(null));
+
+ snapshot.close();
+
+ long firstId = translog.currentFileGeneration();
+ translog.prepareCommit();
+ assertThat(translog.currentFileGeneration(), Matchers.not(equalTo(firstId)));
+
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(ops.size()));
+ snapshot.close();
+
+ translog.commit();
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.size(0));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(0));
+ snapshot.close();
+ }
+
+ protected TranslogStats stats() throws IOException {
+ // force flushing and updating of stats
+ translog.sync();
+ TranslogStats stats = translog.stats();
+ if (randomBoolean()) {
+ BytesStreamOutput out = new BytesStreamOutput();
+ stats.writeTo(out);
+ StreamInput in = StreamInput.wrap(out.bytes());
+ stats = new TranslogStats();
+ stats.readFrom(in);
+ }
+ return stats;
+ }
+
+ @Test
+ public void testStats() throws IOException {
+ final long firstOperationPosition = translog.getFirstOperationPosition();
+ TranslogStats stats = stats();
+ assertThat(stats.estimatedNumberOfOperations(), equalTo(0l));
+ long lastSize = stats.translogSizeInBytes().bytes();
+ assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC)));
+ assertThat(lastSize, equalTo(firstOperationPosition));
+
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ stats = stats();
+ assertThat(stats.estimatedNumberOfOperations(), equalTo(1l));
+ assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize));
+ lastSize = stats.translogSizeInBytes().bytes();
+
+ translog.add(new Translog.Index("test", "2", new byte[]{2}));
+ stats = stats();
+ assertThat(stats.estimatedNumberOfOperations(), equalTo(2l));
+ assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize));
+ lastSize = stats.translogSizeInBytes().bytes();
+
+ translog.add(new Translog.Delete(newUid("3")));
+ stats = stats();
+ assertThat(stats.estimatedNumberOfOperations(), equalTo(3l));
+ assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize));
+ lastSize = stats.translogSizeInBytes().bytes();
+
+ translog.add(new Translog.Delete(newUid("4")));
+ translog.prepareCommit();
+ stats = stats();
+ assertThat(stats.estimatedNumberOfOperations(), equalTo(4l));
+ assertThat(stats.translogSizeInBytes().bytes(), greaterThan(lastSize));
+
+ translog.commit();
+ stats = stats();
+ assertThat(stats.estimatedNumberOfOperations(), equalTo(0l));
+ assertThat(stats.translogSizeInBytes().bytes(), equalTo(firstOperationPosition));
+ }
+
+ @Test
+ public void testSnapshot() {
+ ArrayList<Translog.Operation> ops = new ArrayList<>();
+ Translog.Snapshot snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.size(0));
+ snapshot.close();
+
+ addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1}));
+
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ snapshot.close();
+
+ snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+
+ // snapshot while another is open
+ Translog.Snapshot snapshot1 = translog.newSnapshot();
+ assertThat(snapshot1, SnapshotMatchers.size(1));
+ assertThat(snapshot1.estimatedTotalOperations(), equalTo(1));
+
+ snapshot.close();
+ snapshot1.close();
+ }
+
+ @Test
+ public void testSnapshotWithNewTranslog() throws IOException {
+ ArrayList<Translog.Operation> ops = new ArrayList<>();
+ Translog.Snapshot snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.size(0));
+ snapshot.close();
+
+ addToTranslogAndList(translog, ops, new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Snapshot snapshot1 = translog.newSnapshot();
+
+ addToTranslogAndList(translog, ops, new Translog.Index("test", "2", new byte[]{2}));
+
+ translog.prepareCommit();
+ addToTranslogAndList(translog, ops, new Translog.Index("test", "3", new byte[]{3}));
+
+ Translog.Snapshot snapshot2 = translog.newSnapshot();
+ translog.commit();
+ assertThat(snapshot2, SnapshotMatchers.equalsTo(ops));
+ assertThat(snapshot2.estimatedTotalOperations(), equalTo(ops.size()));
+
+
+ assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0)));
+ snapshot1.close();
+ snapshot2.close();
+ }
+
+ public void testSnapshotOnClosedTranslog() throws IOException {
+ assertTrue(Files.exists(translogDir.resolve(translog.getFilename(1))));
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ translog.close();
+ try {
+ Translog.Snapshot snapshot = translog.newSnapshot();
+ fail("translog is closed");
+ } catch (AlreadyClosedException ex) {
+ assertThat(ex.getMessage(), containsString("translog-1.tlog is already closed can't increment"));
+ }
+ }
+
+ @Test
+ public void deleteOnSnapshotRelease() throws Exception {
+ ArrayList<Translog.Operation> firstOps = new ArrayList<>();
+ addToTranslogAndList(translog, firstOps, new Translog.Create("test", "1", new byte[]{1}));
+
+ Translog.Snapshot firstSnapshot = translog.newSnapshot();
+ assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1));
+ translog.commit();
+ assertFileIsPresent(translog, 1);
+
+
+ ArrayList<Translog.Operation> secOps = new ArrayList<>();
+ addToTranslogAndList(translog, secOps, new Translog.Index("test", "2", new byte[]{2}));
+ assertThat(firstSnapshot.estimatedTotalOperations(), equalTo(1));
+
+ Translog.Snapshot secondSnapshot = translog.newSnapshot();
+ translog.add(new Translog.Index("test", "3", new byte[]{3}));
+ assertThat(secondSnapshot, SnapshotMatchers.equalsTo(secOps));
+ assertThat(secondSnapshot.estimatedTotalOperations(), equalTo(1));
+ assertFileIsPresent(translog, 1);
+ assertFileIsPresent(translog, 2);
+
+ firstSnapshot.close();
+ assertFileDeleted(translog, 1);
+ assertFileIsPresent(translog, 2);
+ secondSnapshot.close();
+ assertFileIsPresent(translog, 2); // it's the current nothing should be deleted
+ translog.commit();
+ assertFileIsPresent(translog, 3); // it's the current nothing should be deleted
+ assertFileDeleted(translog, 2);
+
+ }
+
+
+ public void assertFileIsPresent(Translog translog, long id) {
+ if (Files.exists(translogDir.resolve(translog.getFilename(id)))) {
+ return;
+ }
+ fail(translog.getFilename(id) + " is not present in any location: " + translog.location());
+ }
+
+ public void assertFileDeleted(Translog translog, long id) {
+ assertFalse("translog [" + id + "] still exists", Files.exists(translog.location().resolve(translog.getFilename(id))));
+ }
+
+ static class LocationOperation {
+ final Translog.Operation operation;
+ final Translog.Location location;
+
+ public LocationOperation(Translog.Operation operation, Translog.Location location) {
+ this.operation = operation;
+ this.location = location;
+ }
+
+ }
+
+ @Test
+ public void testConcurrentWritesWithVaryingSize() throws Throwable {
+ final int opsPerThread = randomIntBetween(10, 200);
+ int threadCount = 2 + randomInt(5);
+
+ logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread);
+ final BlockingQueue<LocationOperation> writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread);
+
+ Thread[] threads = new Thread[threadCount];
+ final Throwable[] threadExceptions = new Throwable[threadCount];
+ final CountDownLatch downLatch = new CountDownLatch(1);
+ for (int i = 0; i < threadCount; i++) {
+ final int threadId = i;
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ downLatch.await();
+ for (int opCount = 0; opCount < opsPerThread; opCount++) {
+ Translog.Operation op;
+ switch (randomFrom(Translog.Operation.Type.values())) {
+ case CREATE:
+ op = new Translog.Create("test", threadId + "_" + opCount,
+ randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
+ break;
+ case SAVE:
+ op = new Translog.Index("test", threadId + "_" + opCount,
+ randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
+ break;
+ case DELETE:
+ op = new Translog.Delete(new Term("_uid", threadId + "_" + opCount),
+ 1 + randomInt(100000),
+ randomFrom(VersionType.values()));
+ break;
+ case DELETE_BY_QUERY:
+ // deprecated
+ continue;
+ default:
+ throw new ElasticsearchException("not supported op type");
+ }
+
+ Translog.Location loc = translog.add(op);
+ writtenOperations.add(new LocationOperation(op, loc));
+ }
+ } catch (Throwable t) {
+ threadExceptions[threadId] = t;
+ }
+ }
+ });
+ threads[i].setDaemon(true);
+ threads[i].start();
+ }
+
+ downLatch.countDown();
+
+ for (int i = 0; i < threadCount; i++) {
+ if (threadExceptions[i] != null) {
+ throw threadExceptions[i];
+ }
+ threads[i].join(60 * 1000);
+ }
+
+ for (LocationOperation locationOperation : writtenOperations) {
+ Translog.Operation op = translog.read(locationOperation.location);
+ Translog.Operation expectedOp = locationOperation.operation;
+ assertEquals(expectedOp.opType(), op.opType());
+ switch (op.opType()) {
+ case SAVE:
+ Translog.Index indexOp = (Translog.Index) op;
+ Translog.Index expIndexOp = (Translog.Index) expectedOp;
+ assertEquals(expIndexOp.id(), indexOp.id());
+ assertEquals(expIndexOp.routing(), indexOp.routing());
+ assertEquals(expIndexOp.type(), indexOp.type());
+ assertEquals(expIndexOp.source(), indexOp.source());
+ assertEquals(expIndexOp.version(), indexOp.version());
+ assertEquals(expIndexOp.versionType(), indexOp.versionType());
+ break;
+ case CREATE:
+ Translog.Create createOp = (Translog.Create) op;
+ Translog.Create expCreateOp = (Translog.Create) expectedOp;
+ assertEquals(expCreateOp.id(), createOp.id());
+ assertEquals(expCreateOp.routing(), createOp.routing());
+ assertEquals(expCreateOp.type(), createOp.type());
+ assertEquals(expCreateOp.source(), createOp.source());
+ assertEquals(expCreateOp.version(), createOp.version());
+ assertEquals(expCreateOp.versionType(), createOp.versionType());
+ break;
+ case DELETE:
+ Translog.Delete delOp = (Translog.Delete) op;
+ Translog.Delete expDelOp = (Translog.Delete) expectedOp;
+ assertEquals(expDelOp.uid(), delOp.uid());
+ assertEquals(expDelOp.version(), delOp.version());
+ assertEquals(expDelOp.versionType(), delOp.versionType());
+ break;
+ default:
+ throw new ElasticsearchException("unsupported opType");
+ }
+
+ }
+
+ }
+
+ @Test
+ public void testTranslogChecksums() throws Exception {
+ List<Translog.Location> locations = newArrayList();
+
+ int translogOperations = randomIntBetween(10, 100);
+ for (int op = 0; op < translogOperations; op++) {
+ String ascii = randomAsciiOfLengthBetween(1, 50);
+ locations.add(translog.add(new Translog.Create("test", "" + op, ascii.getBytes("UTF-8"))));
+ }
+ translog.sync();
+
+ corruptTranslogs(translogDir);
+
+ AtomicInteger corruptionsCaught = new AtomicInteger(0);
+ for (Translog.Location location : locations) {
+ try {
+ translog.read(location);
+ } catch (TranslogCorruptedException e) {
+ corruptionsCaught.incrementAndGet();
+ }
+ }
+ assertThat("at least one corruption was caused and caught", corruptionsCaught.get(), greaterThanOrEqualTo(1));
+ }
+
+ @Test
+ public void testTruncatedTranslogs() throws Exception {
+ List<Translog.Location> locations = newArrayList();
+
+ int translogOperations = randomIntBetween(10, 100);
+ for (int op = 0; op < translogOperations; op++) {
+ String ascii = randomAsciiOfLengthBetween(1, 50);
+ locations.add(translog.add(new Translog.Create("test", "" + op, ascii.getBytes("UTF-8"))));
+ }
+ translog.sync();
+
+ truncateTranslogs(translogDir);
+
+ AtomicInteger truncations = new AtomicInteger(0);
+ for (Translog.Location location : locations) {
+ try {
+ translog.read(location);
+ } catch (ElasticsearchException e) {
+ if (e.getCause() instanceof EOFException) {
+ truncations.incrementAndGet();
+ } else {
+ throw e;
+ }
+ }
+ }
+ assertThat("at least one truncation was caused and caught", truncations.get(), greaterThanOrEqualTo(1));
+ }
+
+ /**
+ * Randomly truncate some bytes in the translog files
+ */
+ private void truncateTranslogs(Path directory) throws Exception {
+ Path[] files = FileSystemUtils.files(directory, "translog-*");
+ for (Path file : files) {
+ try (FileChannel f = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
+ long prevSize = f.size();
+ long newSize = prevSize - randomIntBetween(1, (int) prevSize / 2);
+ logger.info("--> truncating {}, prev: {}, now: {}", file, prevSize, newSize);
+ f.truncate(newSize);
+ }
+ }
+ }
+
+
+ /**
+ * Randomly overwrite some bytes in the translog files
+ */
+ private void corruptTranslogs(Path directory) throws Exception {
+ Path[] files = FileSystemUtils.files(directory, "translog-*");
+ for (Path file : files) {
+ logger.info("--> corrupting {}...", file);
+ FileChannel f = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE);
+ int corruptions = scaledRandomIntBetween(10, 50);
+ for (int i = 0; i < corruptions; i++) {
+ // note: with the current logic, this will sometimes be a no-op
+ long pos = randomIntBetween(0, (int) f.size());
+ ByteBuffer junk = ByteBuffer.wrap(new byte[]{randomByte()});
+ f.write(junk, pos);
+ }
+ f.close();
+ }
+ }
+
+ private Term newUid(String id) {
+ return new Term("_uid", id);
+ }
+
+
+ @Test
+ public void testVerifyTranslogIsNotDeleted() throws IOException {
+ assertFileIsPresent(translog, 1);
+ translog.add(new Translog.Create("test", "1", new byte[]{1}));
+ Translog.Snapshot snapshot = translog.newSnapshot();
+ assertThat(snapshot, SnapshotMatchers.size(1));
+ assertFileIsPresent(translog, 1);
+ assertThat(snapshot.estimatedTotalOperations(), equalTo(1));
+ if (randomBoolean()) {
+ translog.close();
+ snapshot.close();
+ } else {
+ snapshot.close();
+ translog.close();
+ }
+
+ assertFileIsPresent(translog, 1);
+ }
+
+ /** Tests that concurrent readers and writes maintain view and snapshot semantics */
+ @Test
+ public void testConcurrentWriteViewsAndSnapshot() throws Throwable {
+ final Thread[] writers = new Thread[randomIntBetween(1, 10)];
+ final Thread[] readers = new Thread[randomIntBetween(1, 10)];
+ final int flushEveryOps = randomIntBetween(5, 100);
+ // used to notify main thread that so many operations have been written so it can simulate a flush
+ final AtomicReference<CountDownLatch> writtenOpsLatch = new AtomicReference<>(new CountDownLatch(0));
+ final AtomicLong idGenerator = new AtomicLong();
+ final CyclicBarrier barrier = new CyclicBarrier(writers.length + readers.length + 1);
+
+ // a map of all written ops and their returned location.
+ final Map<Translog.Operation, Translog.Location> writtenOps = ConcurrentCollections.newConcurrentMap();
+
+ // a signal for all threads to stop
+ final AtomicBoolean run = new AtomicBoolean(true);
+
+ // any errors on threads
+ final List<Throwable> errors = new CopyOnWriteArrayList<>();
+ logger.debug("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps);
+ for (int i = 0; i < writers.length; i++) {
+ final String threadId = "writer_" + i;
+ writers[i] = new Thread(new AbstractRunnable() {
+ @Override
+ public void doRun() throws BrokenBarrierException, InterruptedException {
+ barrier.await();
+ int counter = 0;
+ while (run.get()) {
+ long id = idGenerator.incrementAndGet();
+ final Translog.Operation op;
+ switch (Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type.values().length))]) {
+ case CREATE:
+ op = new Translog.Create("type", "" + id, new byte[]{(byte) id});
+ break;
+ case SAVE:
+ op = new Translog.Index("type", "" + id, new byte[]{(byte) id});
+ break;
+ case DELETE:
+ op = new Translog.Delete(newUid("" + id));
+ break;
+ case DELETE_BY_QUERY:
+ // deprecated
+ continue;
+ default:
+ throw new ElasticsearchException("unknown type");
+ }
+ Translog.Location location = translog.add(op);
+ Translog.Location existing = writtenOps.put(op, location);
+ if (existing != null) {
+ fail("duplicate op [" + op + "], old entry at " + location);
+ }
+ writtenOpsLatch.get().countDown();
+ counter++;
+ }
+ logger.debug("--> [{}] done. wrote [{}] ops.", threadId, counter);
+ }
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.error("--> writer [{}] had an error", t, threadId);
+ errors.add(t);
+ }
+ }, threadId);
+ writers[i].start();
+ }
+
+ for (int i = 0; i < readers.length; i++) {
+ final String threadId = "reader_" + i;
+ readers[i] = new Thread(new AbstractRunnable() {
+ Translog.View view = null;
+ Set<Translog.Operation> writtenOpsAtView;
+
+ @Override
+ public void onFailure(Throwable t) {
+ logger.error("--> reader [{}] had an error", t, threadId);
+ errors.add(t);
+ closeView();
+ }
+
+ void closeView() {
+ if (view != null) {
+ view.close();
+ }
+ }
+
+ void newView() {
+ closeView();
+ view = translog.newView();
+ // captures the currently written ops so we know what to expect from the view
+ writtenOpsAtView = new HashSet<>(writtenOps.keySet());
+ logger.debug("--> [{}] opened view from [{}]", threadId, view.minTranslogGeneration());
+ }
+
+ @Override
+ protected void doRun() throws Exception {
+ barrier.await();
+ int iter = 0;
+ while (run.get()) {
+ if (iter++ % 10 == 0) {
+ newView();
+ }
+
+ // captures al views that are written since the view was created (with a small caveat see bellow)
+ // these are what we expect the snapshot to return (and potentially some more).
+ Set<Translog.Operation> expectedOps = new HashSet<>(writtenOps.keySet());
+ expectedOps.removeAll(writtenOpsAtView);
+ try (Translog.Snapshot snapshot = view.snapshot()) {
+ Translog.Operation op;
+ while ((op = snapshot.next()) != null) {
+ expectedOps.remove(op);
+ }
+ }
+ if (expectedOps.isEmpty() == false) {
+ StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size()).append(" operations");
+ boolean failed = false;
+ for (Translog.Operation op : expectedOps) {
+ final Translog.Location loc = writtenOps.get(op);
+ if (loc.generation < view.minTranslogGeneration()) {
+ // writtenOps is only updated after the op was written to the translog. This mean
+ // that ops written to the translog before the view was taken (and will be missing from the view)
+ // may yet be available in writtenOpsAtView, meaning we will erroneously expect them
+ continue;
+ }
+ failed = true;
+ missed.append("\n --> [").append(op).append("] written at ").append(loc);
+ }
+ if (failed) {
+ fail(missed.toString());
+ }
+ }
+ // slow down things a bit and spread out testing..
+ writtenOpsLatch.get().await(200, TimeUnit.MILLISECONDS);
+ }
+ closeView();
+ logger.debug("--> [{}] done. tested [{}] snapshots", threadId, iter);
+ }
+ }, threadId);
+ readers[i].start();
+ }
+
+ barrier.await();
+ try {
+ for (int iterations = scaledRandomIntBetween(10, 200); iterations > 0 && errors.isEmpty(); iterations--) {
+ writtenOpsLatch.set(new CountDownLatch(flushEveryOps));
+ while (writtenOpsLatch.get().await(200, TimeUnit.MILLISECONDS) == false) {
+ if (errors.size() > 0) {
+ break;
+ }
+ }
+ translog.commit();
+ }
+ } finally {
+ run.set(false);
+ logger.debug("--> waiting for threads to stop");
+ for (Thread thread : writers) {
+ thread.join();
+ }
+ for (Thread thread : readers) {
+ thread.join();
+ }
+ if (errors.size() > 0) {
+ Throwable e = errors.get(0);
+ for (Throwable suppress : errors.subList(1, errors.size())) {
+ e.addSuppressed(suppress);
+ }
+ throw e;
+ }
+ logger.info("--> test done. total ops written [{}]", writtenOps.size());
+ }
+ }
+
+
+ public void testSyncUpTo() throws IOException {
+ int translogOperations = randomIntBetween(10, 100);
+ int count = 0;
+ for (int op = 0; op < translogOperations; op++) {
+ final Translog.Location location = translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
+ if (randomBoolean()) {
+ assertTrue("at least one operation pending", translog.syncNeeded());
+ assertTrue("this operation has not been synced", translog.ensureSynced(location));
+ assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
+ translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
+ assertTrue("one pending operation", translog.syncNeeded());
+ assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now
+ assertTrue("we only synced a previous operation yet", translog.syncNeeded());
+ }
+ if (rarely()) {
+ translog.commit();
+ assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now
+ assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
+ }
+
+ if (randomBoolean()) {
+ translog.sync();
+ assertFalse("translog has been synced already", translog.ensureSynced(location));
+ }
+ }
+ }
+
+ public void testLocationComparison() throws IOException {
+ List<Translog.Location> locations = newArrayList();
+ int translogOperations = randomIntBetween(10, 100);
+ int count = 0;
+ for (int op = 0; op < translogOperations; op++) {
+ locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
+ if (rarely() && translogOperations > op+1) {
+ translog.commit();
+ }
+ }
+ Collections.shuffle(locations, random());
+ Translog.Location max = locations.get(0);
+ for (Translog.Location location : locations) {
+ max = max(max, location);
+ }
+
+ assertEquals(max.generation, translog.currentFileGeneration());
+ final Translog.Operation read = translog.read(max);
+ assertEquals(read.getSource().source.toUtf8(), Integer.toString(count));
+ }
+
+ public static Translog.Location max(Translog.Location a, Translog.Location b) {
+ if (a.compareTo(b) > 0) {
+ return a;
+ }
+ return b;
+ }
+
+
+ public void testBasicCheckpoint() throws IOException {
+ List<Translog.Location> locations = newArrayList();
+ int translogOperations = randomIntBetween(10, 100);
+ int lastSynced = -1;
+ for (int op = 0; op < translogOperations; op++) {
+ locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ if (frequently()) {
+ translog.sync();
+ lastSynced = op;
+ }
+ }
+ assertEquals(translogOperations, translog.totalOperations());
+ final Translog.Location lastLocation = translog.add(new Translog.Create("test", "" + translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
+
+ final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME));
+ try (final ImmutableTranslogReader reader = translog.openReader(translog.location().resolve(translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
+ assertEquals(lastSynced + 1, reader.totalOperations());
+ for (int op = 0; op < translogOperations; op++) {
+ Translog.Location location = locations.get(op);
+ if (op <= lastSynced) {
+ final Translog.Operation read = reader.read(location);
+ assertEquals(Integer.toString(op), read.getSource().source.toUtf8());
+ } else {
+ try {
+ reader.read(location);
+ fail("read past checkpoint");
+ } catch (EOFException ex) {
+
+ }
+ }
+ }
+ try {
+ reader.read(lastLocation);
+ fail("read past checkpoint");
+ } catch (EOFException ex) {
+ }
+ }
+ assertEquals(translogOperations + 1, translog.totalOperations());
+ translog.close();
+ }
+
+ public void testTranslogWriter() throws IOException {
+ final TranslogWriter writer = translog.createWriter(0);
+ final int numOps = randomIntBetween(10, 100);
+ byte[] bytes = new byte[4];
+ ByteArrayDataOutput out = new ByteArrayDataOutput(bytes);
+ for (int i = 0; i < numOps; i++) {
+ out.reset(bytes);
+ out.writeInt(i);
+ writer.add(new BytesArray(bytes));
+ }
+ writer.sync();
+
+ final TranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
+ for (int i = 0; i < numOps; i++) {
+ ByteBuffer buffer = ByteBuffer.allocate(4);
+ reader.readBytes(buffer, reader.getFirstOperationOffset() + 4*i);
+ buffer.flip();
+ final int value = buffer.getInt();
+ assertEquals(i, value);
+ }
+
+ out.reset(bytes);
+ out.writeInt(2048);
+ writer.add(new BytesArray(bytes));
+
+ if (reader instanceof ImmutableTranslogReader) {
+ ByteBuffer buffer = ByteBuffer.allocate(4);
+ try {
+ reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * numOps);
+ fail("read past EOF?");
+ } catch (EOFException ex) {
+ // expected
+ }
+ } else {
+ // live reader!
+ ByteBuffer buffer = ByteBuffer.allocate(4);
+ final long pos = reader.getFirstOperationOffset() + 4 * numOps;
+ reader.readBytes(buffer, pos);
+ buffer.flip();
+ final int value = buffer.getInt();
+ assertEquals(2048, value);
+ }
+ IOUtils.close(writer, reader);
+ }
+
+ public void testBasicRecovery() throws IOException {
+ List<Translog.Location> locations = newArrayList();
+ int translogOperations = randomIntBetween(10, 100);
+ Translog.TranslogGeneration translogGeneration = null;
+ int minUncommittedOp = -1;
+ final boolean commitOften = randomBoolean();
+ for (int op = 0; op < translogOperations; op++) {
+ locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ final boolean commit = commitOften ? frequently() : rarely();
+ if (commit && op < translogOperations-1) {
+ translog.commit();
+ minUncommittedOp = op+1;
+ translogGeneration = translog.getGeneration();
+ }
+ }
+ translog.sync();
+ TranslogConfig config = translog.getConfig();
+
+ translog.close();
+ config.setTranslogGeneration(translogGeneration);
+ translog = new Translog(config);
+ if (translogGeneration == null) {
+ assertEquals(0, translog.stats().estimatedNumberOfOperations());
+ assertEquals(1, translog.currentFileGeneration());
+ assertFalse(translog.syncNeeded());
+ try (Translog.Snapshot snapshot = translog.newSnapshot()) {
+ assertNull(snapshot.next());
+ }
+ } else {
+ assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
+ assertFalse(translog.syncNeeded());
+ try (Translog.Snapshot snapshot = translog.newSnapshot()) {
+ for (int i = minUncommittedOp; i < translogOperations; i++) {
+ assertEquals("expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, locations.get(i).generation);
+ Translog.Operation next = snapshot.next();
+ assertNotNull("operation " + i + " must be non-null", next);
+ assertEquals(i, Integer.parseInt(next.getSource().source.toUtf8()));
+ }
+ }
+ }
+ }
+
+ public void testRecoveryUncommitted() throws IOException {
+ List<Translog.Location> locations = newArrayList();
+ int translogOperations = randomIntBetween(10, 100);
+ final int prepareOp = randomIntBetween(0, translogOperations-1);
+ Translog.TranslogGeneration translogGeneration = null;
+ final boolean sync = randomBoolean();
+ for (int op = 0; op < translogOperations; op++) {
+ locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ if (op == prepareOp) {
+ translogGeneration = translog.getGeneration();
+ translog.prepareCommit();
+ assertEquals("expected this to be the first commit", 1l, translogGeneration.translogFileGeneration);
+ assertNotNull(translogGeneration.translogUUID);
+ }
+ }
+ if (sync) {
+ translog.sync();
+ }
+ // we intentionally don't close the tlog that is in the prepareCommit stage since we try to recovery the uncommitted
+ // translog here as well.
+ TranslogConfig config = translog.getConfig();
+ config.setTranslogGeneration(translogGeneration);
+ try (Translog translog = new Translog(config)) {
+ assertNotNull(translogGeneration);
+ assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
+ assertFalse(translog.syncNeeded());
+ try (Translog.Snapshot snapshot = translog.newSnapshot()) {
+ int upTo = sync ? translogOperations : prepareOp;
+ for (int i = 0; i < upTo; i++) {
+ Translog.Operation next = snapshot.next();
+ assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
+ assertEquals("payload missmatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.toUtf8()));
+ }
+ }
+ }
+ if (randomBoolean()) { // recover twice
+ try (Translog translog = new Translog(config)) {
+ assertNotNull(translogGeneration);
+ assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
+ assertFalse(translog.syncNeeded());
+ try (Translog.Snapshot snapshot = translog.newSnapshot()) {
+ int upTo = sync ? translogOperations : prepareOp;
+ for (int i = 0; i < upTo; i++) {
+ Translog.Operation next = snapshot.next();
+ assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
+ assertEquals("payload missmatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.toUtf8()));
+ }
+ }
+ }
+ }
+
+ }
+
+ public void testSnapshotFromStreamInput() throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ List<Translog.Operation> ops = newArrayList();
+ int translogOperations = randomIntBetween(10, 100);
+ for (int op = 0; op < translogOperations; op++) {
+ Translog.Create test = new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")));
+ ops.add(test);
+ }
+ Translog.writeOperations(out, ops);
+ final List<Translog.Operation> readOperations = Translog.readOperations(StreamInput.wrap(out.bytes()));
+ assertEquals(ops.size(), readOperations.size());
+ assertEquals(ops, readOperations);
+ }
+
+ public void testLocationHashCodeEquals() throws IOException {
+ List<Translog.Location> locations = newArrayList();
+ List<Translog.Location> locations2 = newArrayList();
+ int translogOperations = randomIntBetween(10, 100);
+ try(Translog translog2 = create(createTempDir())) {
+ for (int op = 0; op < translogOperations; op++) {
+ locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ locations2.add(translog2.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ }
+ int iters = randomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ Translog.Location location = RandomPicks.randomFrom(random(), locations);
+ for (Translog.Location loc : locations) {
+ if (loc == location) {
+ assertTrue(loc.equals(location));
+ assertEquals(loc.hashCode(), location.hashCode());
+ } else {
+ assertFalse(loc.equals(location));
+ }
+ }
+ for (int j = 0; j < translogOperations; j++) {
+ assertTrue(locations.get(j).equals(locations2.get(j)));
+ assertEquals(locations.get(j).hashCode(), locations2.get(j).hashCode());
+ }
+ }
+ }
+ }
+
+ public void testOpenForeignTranslog() throws IOException {
+ List<Translog.Location> locations = newArrayList();
+ int translogOperations = randomIntBetween(1, 10);
+ int firstUncommitted = 0;
+ for (int op = 0; op < translogOperations; op++) {
+ locations.add(translog.add(new Translog.Create("test", "" + op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
+ if (randomBoolean()) {
+ translog.commit();
+ firstUncommitted = op + 1;
+ }
+ }
+ TranslogConfig config = translog.getConfig();
+ Translog.TranslogGeneration translogGeneration = translog.getGeneration();
+ translog.close();
+
+ config.setTranslogGeneration(new Translog.TranslogGeneration(randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()),translogGeneration.translogFileGeneration));
+ try {
+ new Translog(config);
+ fail("translog doesn't belong to this UUID");
+ } catch (TranslogCorruptedException ex) {
+
+ }
+ config.setTranslogGeneration(translogGeneration);
+ this.translog = new Translog(config);
+ try (Translog.Snapshot snapshot = this.translog.newSnapshot()) {
+ for (int i = firstUncommitted; i < translogOperations; i++) {
+ Translog.Operation next = snapshot.next();
+ assertNotNull("" + i, next);
+ assertEquals(Integer.parseInt(next.getSource().source.toUtf8()), i);
+ }
+ assertNull(snapshot.next());
+ }
+ }
+
+ public void testUpgradeOldTranslogFiles() throws IOException {
+ List<Path> indexes = new ArrayList<>();
+ Path dir = getDataPath("/" + OldIndexBackwardsCompatibilityTests.class.getPackage().getName().replace('.', '/')); // the files are in the same pkg as the OldIndexBackwardsCompatibilityTests test
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir, "index-*.zip")) {
+ for (Path path : stream) {
+ indexes.add(path);
+ }
+ }
+ TranslogConfig config = this.translog.getConfig();
+ Translog.TranslogGeneration gen = translog.getGeneration();
+ this.translog.close();
+ try {
+ Translog.upgradeLegacyTranslog(logger, translog.getConfig());
+ fail("no generation set");
+ } catch (IllegalArgumentException ex) {
+
+ }
+ translog.getConfig().setTranslogGeneration(gen);
+ try {
+ Translog.upgradeLegacyTranslog(logger, translog.getConfig());
+ fail("already upgraded generation set");
+ } catch (IllegalArgumentException ex) {
+
+ }
+
+ for (Path indexFile : indexes) {
+ final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
+ Version version = Version.fromString(indexName.replace("index-", ""));
+ if (version.onOrAfter(Version.V_2_0_0)) {
+ continue;
+ }
+ Path unzipDir = createTempDir();
+ Path unzipDataDir = unzipDir.resolve("data");
+ // decompress the index
+ try (InputStream stream = Files.newInputStream(indexFile)) {
+ TestUtil.unzip(stream, unzipDir);
+ }
+ // check it is unique
+ assertTrue(Files.exists(unzipDataDir));
+ Path[] list = FileSystemUtils.files(unzipDataDir);
+ if (list.length != 1) {
+ throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length);
+ }
+ // the bwc scripts packs the indices under this path
+ Path src = list[0].resolve("nodes/0/indices/" + indexName);
+ Path translog = list[0].resolve("nodes/0/indices/" + indexName).resolve("0").resolve("translog");
+
+ assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
+ assertTrue("[" + indexFile + "] missing translog dir: " + translog.toString(), Files.exists(translog));
+ Path[] tlogFiles = FileSystemUtils.files(translog);
+ assertEquals(tlogFiles.length, 1);
+ final long size = Files.size(tlogFiles[0]);
+
+ final long generation = Translog.parseIdFromFileName(tlogFiles[0]);
+ assertTrue(generation >= 1);
+ logger.debug("upgrading index {} file: {} size: {}", indexName, tlogFiles[0].getFileName(), size);
+ TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), translog, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool());
+ upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation));
+ Translog.upgradeLegacyTranslog(logger, upgradeConfig);
+ try (Translog upgraded = new Translog(upgradeConfig)) {
+ assertEquals(generation + 1, upgraded.getGeneration().translogFileGeneration);
+ assertEquals(upgraded.getRecoveredReaders().size(), 1);
+ final long headerSize;
+ if (version.before(Version.V_1_4_0)) {
+ assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReader.class);
+ headerSize = 0;
+ } else {
+ assertTrue(upgraded.getRecoveredReaders().get(0).getClass().toString(), upgraded.getRecoveredReaders().get(0).getClass() == LegacyTranslogReaderBase.class);
+ headerSize = CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC);
+ }
+ List<Translog.Operation> operations = new ArrayList<>();
+ try (Translog.Snapshot snapshot = upgraded.newSnapshot()) {
+ Translog.Operation op = null;
+ while ((op = snapshot.next()) != null) {
+ operations.add(op);
+ }
+ }
+ if (size > headerSize) {
+ assertFalse(operations.toString(), operations.isEmpty());
+ } else {
+ assertTrue(operations.toString(), operations.isEmpty());
+ }
+ }
+ }
+ }
+
+ /**
+ * this tests a set of files that has some of the operations flushed with a buffered translog such that tlogs are truncated.
+ * 3 of the 6 files are created with ES 1.3 and the rest is created wiht ES 1.4 such that both the checksummed as well as the
+ * super old version of the translog without a header is tested.
+ */
+ public void testOpenAndReadTruncatedLegacyTranslogs() throws IOException {
+ Path zip = getDataPath("/org/elasticsearch/index/translog/legacy_translogs.zip");
+ Path unzipDir = createTempDir();
+ try (InputStream stream = Files.newInputStream(zip)) {
+ TestUtil.unzip(stream, unzipDir);
+ }
+ TranslogConfig config = this.translog.getConfig();
+ int count = 0;
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(unzipDir)) {
+
+ for (Path legacyTranslog : stream) {
+ logger.debug("upgrading {} ", legacyTranslog.getFileName());
+ Path directory = legacyTranslog.resolveSibling("translog_" + count++);
+ Files.createDirectories(directory);
+ Files.copy(legacyTranslog, directory.resolve(legacyTranslog.getFileName()));
+ TranslogConfig upgradeConfig = new TranslogConfig(config.getShardId(), directory, config.getIndexSettings(), config.getDurabilty(), config.getBigArrays(), config.getThreadPool());
+ try {
+ Translog.upgradeLegacyTranslog(logger, upgradeConfig);
+ fail("no generation set");
+ } catch (IllegalArgumentException ex) {
+ // expected
+ }
+ long generation = Translog.parseIdFromFileName(legacyTranslog);
+ upgradeConfig.setTranslogGeneration(new Translog.TranslogGeneration(null, generation));
+ Translog.upgradeLegacyTranslog(logger, upgradeConfig);
+ try (Translog tlog = new Translog(upgradeConfig)) {
+ List<Translog.Operation> operations = new ArrayList<>();
+ try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
+ Translog.Operation op = null;
+ while ((op = snapshot.next()) != null) {
+ operations.add(op);
+ }
+ }
+ logger.debug("num ops recovered: {} for file {} ", operations.size(), legacyTranslog.getFileName());
+ assertFalse(operations.isEmpty());
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java
new file mode 100644
index 0000000000..b5603a2fa4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.translog;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for reading old and new translog files
+ */
+public class TranslogVersionTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testV0LegacyTranslogVersion() throws Exception {
+ Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v0.binary");
+ assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
+ try (ImmutableTranslogReader reader = openReader(translogFile, 0)) {
+ assertThat("a version0 stream is returned", reader instanceof LegacyTranslogReader, equalTo(true));
+ try (final Translog.Snapshot snapshot = reader.newSnapshot()) {
+ final Translog.Operation operation = snapshot.next();
+ assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.SAVE, equalTo(true));
+ Translog.Index op = (Translog.Index) operation;
+ assertThat(op.id(), equalTo("1"));
+ assertThat(op.type(), equalTo("doc"));
+ assertThat(op.source().toUtf8(), equalTo("{\"body\": \"worda wordb wordc wordd \\\"worde\\\" wordf\"}"));
+ assertThat(op.routing(), equalTo(null));
+ assertThat(op.parent(), equalTo(null));
+ assertThat(op.version(), equalTo(1L));
+ assertThat(op.timestamp(), equalTo(1407312091791L));
+ assertThat(op.ttl(), equalTo(-1L));
+ assertThat(op.versionType(), equalTo(VersionType.INTERNAL));
+
+ assertNull(snapshot.next());
+ }
+ }
+ }
+
+ @Test
+ public void testV1ChecksummedTranslogVersion() throws Exception {
+ Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1.binary");
+ assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
+ try (ImmutableTranslogReader reader = openReader(translogFile, 0)) {
+ try (final Translog.Snapshot snapshot = reader.newSnapshot()) {
+
+ assertThat("a version1 stream is returned", reader instanceof ImmutableTranslogReader, equalTo(true));
+
+ Translog.Operation operation = snapshot.next();
+
+ assertThat("operation is the correct type correctly", operation.opType() == Translog.Operation.Type.CREATE, equalTo(true));
+ Translog.Create op = (Translog.Create) operation;
+ assertThat(op.id(), equalTo("Bwiq98KFSb6YjJQGeSpeiw"));
+ assertThat(op.type(), equalTo("doc"));
+ assertThat(op.source().toUtf8(), equalTo("{\"body\": \"foo\"}"));
+ assertThat(op.routing(), equalTo(null));
+ assertThat(op.parent(), equalTo(null));
+ assertThat(op.version(), equalTo(1L));
+ assertThat(op.timestamp(), equalTo(1408627184844L));
+ assertThat(op.ttl(), equalTo(-1L));
+ assertThat(op.versionType(), equalTo(VersionType.INTERNAL));
+
+ // There are more operations
+ int opNum = 1;
+ while (snapshot.next() != null) {
+ opNum++;
+ }
+ assertThat("there should be 5 translog operations", opNum, equalTo(5));
+ }
+ }
+ }
+
+ @Test
+ public void testCorruptedTranslogs() throws Exception {
+ try {
+ Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary");
+ assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
+ openReader(translogFile, 0);
+ fail("should have thrown an exception about the header being corrupt");
+ } catch (TranslogCorruptedException e) {
+ assertThat("translog corruption from header: " + e.getMessage(),
+ e.getMessage().contains("translog looks like version 1 or later, but has corrupted header"), equalTo(true));
+ }
+
+ try {
+ Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-invalid-first-byte.binary");
+ assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
+ openReader(translogFile, 0);
+ fail("should have thrown an exception about the header being corrupt");
+ } catch (TranslogCorruptedException e) {
+ assertThat("translog corruption from header: " + e.getMessage(),
+ e.getMessage().contains("Invalid first byte in translog file, got: 1, expected 0x00 or 0x3f"), equalTo(true));
+ }
+
+ try {
+ Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary");
+ assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
+ try (ImmutableTranslogReader reader = openReader(translogFile, 0)) {
+ try (final Translog.Snapshot snapshot = reader.newSnapshot()) {
+ while(snapshot.next() != null) {
+
+ }
+ }
+ }
+ fail("should have thrown an exception about the body being corrupted");
+ } catch (TranslogCorruptedException e) {
+ assertThat("translog corruption from body: " + e.getMessage(),
+ e.getMessage().contains("translog corruption while reading from stream"), equalTo(true));
+ }
+
+ }
+
+ @Test
+ public void testTruncatedTranslog() throws Exception {
+ try {
+ Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1-truncated.binary");
+ assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
+ try (ImmutableTranslogReader reader = openReader(translogFile, 0)) {
+ try (final Translog.Snapshot snapshot = reader.newSnapshot()) {
+ while(snapshot.next() != null) {
+
+ }
+ }
+ }
+ fail("should have thrown an exception about the body being truncated");
+ } catch (TranslogCorruptedException e) {
+ assertThat("translog truncated: " + e.getMessage(),
+ e.getMessage().contains("operation size is corrupted must be"), equalTo(true));
+ }
+ }
+
+ public ImmutableTranslogReader openReader(Path path, long id) throws IOException {
+ FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
+ try {
+ final ChannelReference raf = new ChannelReference(path, id, channel, null);
+ ImmutableTranslogReader reader = ImmutableTranslogReader.open(raf, new Checkpoint(Files.size(path), TranslogReader.UNKNOWN_OP_COUNT, id), null);
+ channel = null;
+ return reader;
+ } finally {
+ IOUtils.close(channel);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indexing/IndexActionTests.java b/core/src/test/java/org/elasticsearch/indexing/IndexActionTests.java
new file mode 100644
index 0000000000..9201e1c381
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indexing/IndexActionTests.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indexing;
+
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.indices.InvalidIndexNameException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicIntegerArray;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ *
+ */
+public class IndexActionTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * This test tries to simulate load while creating an index and indexing documents
+ * while the index is being created.
+ */
+ @Test @Slow
+ public void testAutoGenerateIdNoDuplicates() throws Exception {
+ int numberOfIterations = scaledRandomIntBetween(10, 50);
+ for (int i = 0; i < numberOfIterations; i++) {
+ Throwable firstError = null;
+ createIndex("test");
+ int numOfDocs = randomIntBetween(10, 100);
+ logger.info("indexing [{}] docs", numOfDocs);
+ List<IndexRequestBuilder> builders = new ArrayList<>(numOfDocs);
+ for (int j = 0; j < numOfDocs; j++) {
+ builders.add(client().prepareIndex("test", "type").setSource("field", "value"));
+ }
+ indexRandom(true, builders);
+ ensureYellow("test");
+ logger.info("verifying indexed content");
+ int numOfChecks = randomIntBetween(8, 12);
+ for (int j = 0; j < numOfChecks; j++) {
+ try {
+ logger.debug("running search with all types");
+ assertHitCount(client().prepareSearch("test").get(), numOfDocs);
+ } catch (Throwable t) {
+ logger.error("search for all docs types failed", t);
+ if (firstError == null) {
+ firstError = t;
+ }
+ }
+ try {
+ logger.debug("running search with a specific type");
+ assertHitCount(client().prepareSearch("test").setTypes("type").get(), numOfDocs);
+ } catch (Throwable t) {
+ logger.error("search for all docs of a specific type failed", t);
+ if (firstError == null) {
+ firstError = t;
+ }
+ }
+ }
+ if (firstError != null) {
+ fail(firstError.getMessage());
+ }
+ internalCluster().wipeIndices("test");
+ }
+ }
+
+ @Test
+ public void testCreatedFlag() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertFalse(indexResponse.isCreated());
+
+ client().prepareDelete("test", "type", "1").execute().actionGet();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ }
+
+ @Test
+ public void testCreatedFlagWithFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+
+ client().prepareDelete("test", "type", "1").execute().actionGet();
+
+ flush();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreatedFlagParallelExecution() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ int threadCount = 20;
+ final int docCount = 300;
+ int taskCount = docCount * threadCount;
+
+ final AtomicIntegerArray createdCounts = new AtomicIntegerArray(docCount);
+ ExecutorService threadPool = Executors.newFixedThreadPool(threadCount);
+ List<Callable<Void>> tasks = new ArrayList<>(taskCount);
+ final Random random = getRandom();
+ for (int i=0;i< taskCount; i++ ) {
+ tasks.add(new Callable<Void>() {
+ @Override
+ public Void call() throws Exception {
+ int docId = random.nextInt(docCount);
+ IndexResponse indexResponse = index("test", "type", Integer.toString(docId), "field1", "value");
+ if (indexResponse.isCreated()) createdCounts.incrementAndGet(docId);
+ return null;
+ }
+ });
+ }
+
+ threadPool.invokeAll(tasks);
+
+ for (int i=0;i<docCount;i++) {
+ assertThat(createdCounts.get(i), lessThanOrEqualTo(1));
+ }
+ terminate(threadPool);
+ }
+
+ @Test
+ public void testCreatedFlagWithExternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(123)
+ .setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreateFlagWithBulk() {
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")).execute().actionGet();
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(1));
+ IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
+ assertTrue(indexResponse.isCreated());
+ }
+
+ @Test
+ public void testCreateIndexWithLongName() {
+ int min = MetaDataCreateIndexService.MAX_INDEX_NAME_BYTES + 1;
+ int max = MetaDataCreateIndexService.MAX_INDEX_NAME_BYTES * 2;
+ try {
+ createIndex(randomAsciiOfLengthBetween(min, max).toLowerCase(Locale.ROOT));
+ fail("exception should have been thrown on too-long index name");
+ } catch (InvalidIndexNameException e) {
+ assertThat("exception contains message about index name too long: " + e.getMessage(),
+ e.getMessage().contains("index name is too long,"), equalTo(true));
+ }
+
+ try {
+ client().prepareIndex(randomAsciiOfLengthBetween(min, max).toLowerCase(Locale.ROOT), "mytype").setSource("foo", "bar").get();
+ fail("exception should have been thrown on too-long index name");
+ } catch (InvalidIndexNameException e) {
+ assertThat("exception contains message about index name too long: " + e.getMessage(),
+ e.getMessage().contains("index name is too long,"), equalTo(true));
+ }
+
+ try {
+ // Catch chars that are more than a single byte
+ client().prepareIndex(randomAsciiOfLength(MetaDataCreateIndexService.MAX_INDEX_NAME_BYTES -1).toLowerCase(Locale.ROOT) +
+ "Ïž".toLowerCase(Locale.ROOT),
+ "mytype").setSource("foo", "bar").get();
+ fail("exception should have been thrown on too-long index name");
+ } catch (InvalidIndexNameException e) {
+ assertThat("exception contains message about index name too long: " + e.getMessage(),
+ e.getMessage().contains("index name is too long,"), equalTo(true));
+ }
+
+ // we can create an index of max length
+ createIndex(randomAsciiOfLength(MetaDataCreateIndexService.MAX_INDEX_NAME_BYTES).toLowerCase(Locale.ROOT));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java
new file mode 100644
index 0000000000..7cc8142e5b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indexlifecycle/IndexLifecycleActionTests.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indexlifecycle;
+
+import com.google.common.base.Function;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.client.Requests.createIndexRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class IndexLifecycleActionTests extends ElasticsearchIntegrationTest {
+
+ @Slow
+ @Test
+ public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception {
+ Settings settings = settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 11)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("cluster.routing.schedule", "20ms") // reroute every 20ms so we identify new nodes fast
+ .build();
+
+ // start one server
+ logger.info("Starting sever1");
+ final String server_1 = internalCluster().startNode(settings);
+ final String node1 = getLocalNodeId(server_1);
+
+ logger.info("Creating index [test]");
+ CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test")).actionGet();
+ assertThat(createIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ RoutingNode routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+
+ logger.info("Starting server2");
+ // start another server
+ String server_2 = internalCluster().startNode(settings);
+
+ // first wait for 2 nodes in the cluster
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ final String node2 = getLocalNodeId(server_2);
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2);
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
+ RoutingNode routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
+
+ logger.info("Starting server3");
+ // start another server
+ String server_3 = internalCluster().startNode(settings);
+
+ // first wait for 3 nodes in the cluster
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+
+ final String node3 = getLocalNodeId(server_3);
+
+
+ // explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
+ client().admin().cluster().prepareReroute().execute().actionGet();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForRelocatingShards(0)).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
+ assertThat(clusterHealth.getInitializingShards(), equalTo(0));
+ assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node1, node2, node3);
+
+ routingNodeEntry1 = clusterState.readOnlyRoutingNodes().node(node1);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ RoutingNode routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
+
+ assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(7));
+
+ logger.info("Closing server1");
+ // kill the first server
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(server_1));
+ // verify health
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ client().admin().cluster().prepareReroute().get();
+
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForRelocatingShards(0).waitForNodes("2")).actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
+ assertThat(clusterHealth.getActiveShards(), equalTo(22));
+ assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
+
+ assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
+
+ assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
+ assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
+
+
+ logger.info("Deleting index [test]");
+ // last, lets delete the index
+ DeleteIndexResponse deleteIndexResponse = client().admin().indices().prepareDelete("test").execute().actionGet();
+ assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
+
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ assertNodesPresent(clusterState.readOnlyRoutingNodes(), node3, node2);
+ routingNodeEntry2 = clusterState.readOnlyRoutingNodes().node(node2);
+ assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
+
+ routingNodeEntry3 = clusterState.readOnlyRoutingNodes().node(node3);
+ assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
+ }
+
+ private String getLocalNodeId(String name) {
+ Discovery discovery = internalCluster().getInstance(Discovery.class, name);
+ String nodeId = discovery.localNode().getId();
+ assertThat(nodeId, not(nullValue()));
+ return nodeId;
+ }
+
+ private void assertNodesPresent(RoutingNodes routingNodes, String... nodes) {
+ final Set<String> keySet = Sets.newHashSet(Iterables.transform(routingNodes, new Function<RoutingNode, String>() {
+ @Override
+ public String apply(RoutingNode input) {
+ return input.nodeId();
+ }
+ }));
+ assertThat(keySet, containsInAnyOrder(nodes));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesCustomDataPathTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesCustomDataPathTests.java
new file mode 100644
index 0000000000..a6513b6968
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesCustomDataPathTests.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for custom data path locations and templates
+ */
+public class IndicesCustomDataPathTests extends ElasticsearchIntegrationTest {
+
+ private String path;
+
+ @Before
+ public void setup() {
+ path = createTempDir().toAbsolutePath().toString();
+ }
+
+ @After
+ public void teardown() throws Exception {
+ IOUtils.deleteFilesIgnoringExceptions(PathUtils.get(path));
+ }
+
+ @Test
+ @TestLogging("_root:DEBUG,index:TRACE")
+ public void testDataPathCanBeChanged() throws Exception {
+ final String INDEX = "idx";
+ Path root = createTempDir();
+ Path startDir = root.resolve("start");
+ Path endDir = root.resolve("end");
+ logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString());
+ logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString());
+ // temp dirs are automatically created, but the end dir is what
+ // startDir is going to be renamed as, so it needs to be deleted
+ // otherwise we get all sorts of errors about the directory
+ // already existing
+ IOUtils.rm(endDir);
+
+ Settings.Builder sb = Settings.builder().put(IndexMetaData.SETTING_DATA_PATH,
+ startDir.toAbsolutePath().toString());
+ Settings.Builder sb2 = Settings.builder().put(IndexMetaData.SETTING_DATA_PATH,
+ endDir.toAbsolutePath().toString());
+
+ logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
+ client().admin().indices().prepareCreate(INDEX).setSettings(sb).get();
+ ensureGreen(INDEX);
+
+ indexRandom(true, client().prepareIndex(INDEX, "doc", "1").setSource("{\"body\": \"foo\"}"));
+
+ SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+
+ logger.info("--> closing the index [{}]", INDEX);
+ client().admin().indices().prepareClose(INDEX).get();
+ logger.info("--> index closed, re-opening...");
+ client().admin().indices().prepareOpen(INDEX).get();
+ logger.info("--> index re-opened");
+ ensureGreen(INDEX);
+
+ resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+
+ // Now, try closing and changing the settings
+
+ logger.info("--> closing the index [{}]", INDEX);
+ client().admin().indices().prepareClose(INDEX).get();
+
+ logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName());
+ assert Files.exists(endDir) == false : "end directory should not exist!";
+ Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING);
+
+ logger.info("--> updating settings...");
+ client().admin().indices().prepareUpdateSettings(INDEX)
+ .setSettings(sb2)
+ .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
+ .get();
+
+ assert Files.exists(startDir) == false : "start dir shouldn't exist";
+
+ logger.info("--> settings updated and files moved, re-opening index");
+ client().admin().indices().prepareOpen(INDEX).get();
+ logger.info("--> index re-opened");
+ ensureGreen(INDEX);
+
+ resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+
+ assertAcked(client().admin().indices().prepareDelete(INDEX));
+ assertPathHasBeenCleared(startDir.toAbsolutePath().toString());
+ assertPathHasBeenCleared(endDir.toAbsolutePath().toString());
+ }
+
+ @Test
+ public void testIndexCreatedWithCustomPathAndTemplate() throws Exception {
+ final String INDEX = "myindex2";
+
+ logger.info("--> creating an index with data_path [{}]", path);
+ Settings.Builder sb = Settings.builder().put(IndexMetaData.SETTING_DATA_PATH, path);
+
+ client().admin().indices().prepareCreate(INDEX).setSettings(sb).get();
+ ensureGreen(INDEX);
+
+ indexRandom(true, client().prepareIndex(INDEX, "doc", "1").setSource("{\"body\": \"foo\"}"));
+
+ SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
+ assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
+ assertAcked(client().admin().indices().prepareDelete(INDEX));
+ assertPathHasBeenCleared(path);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
new file mode 100644
index 0000000000..910902b7b4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+public class IndicesLifecycleListenerSingleNodeTests extends ElasticsearchSingleNodeTest {
+
+ @Override
+ protected boolean resetNodeAfterTest() {
+ return true;
+ }
+
+ @Test
+ public void testCloseDeleteCallback() throws Throwable {
+
+ final AtomicInteger counter = new AtomicInteger(1);
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+ getInstanceFromNode(IndicesLifecycle.class).addListener(new IndicesLifecycle.Listener() {
+ @Override
+ public void afterIndexClosed(Index index, @IndexSettings Settings indexSettings) {
+ assertEquals(counter.get(), 5);
+ counter.incrementAndGet();
+ }
+
+ @Override
+ public void beforeIndexClosed(IndexService indexService) {
+ assertEquals(counter.get(), 1);
+ counter.incrementAndGet();
+ }
+
+ @Override
+ public void afterIndexDeleted(Index index, @IndexSettings Settings indexSettings) {
+ assertEquals(counter.get(), 6);
+ counter.incrementAndGet();
+ }
+
+ @Override
+ public void beforeIndexDeleted(IndexService indexService) {
+ assertEquals(counter.get(), 2);
+ counter.incrementAndGet();
+ }
+
+ @Override
+ public void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {
+ assertEquals(counter.get(), 3);
+ counter.incrementAndGet();
+ }
+
+ @Override
+ public void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) {
+ assertEquals(counter.get(), 4);
+ counter.incrementAndGet();
+ }
+ });
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertEquals(7, counter.get());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java
new file mode 100644
index 0000000000..5cccd4436b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Maps;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION;
+import static org.elasticsearch.common.settings.Settings.builder;
+import static org.elasticsearch.index.shard.IndexShardState.*;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.hasSize;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class IndicesLifecycleListenerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBeforeIndexAddedToCluster() throws Exception {
+ String node1 = internalCluster().startNode();
+ String node2 = internalCluster().startNode();
+ String node3 = internalCluster().startNode();
+
+ final AtomicInteger beforeAddedCount = new AtomicInteger(0);
+ final AtomicInteger allCreatedCount = new AtomicInteger(0);
+
+ IndicesLifecycle.Listener listener = new IndicesLifecycle.Listener() {
+ @Override
+ public void beforeIndexAddedToCluster(Index index, @IndexSettings Settings indexSettings) {
+ beforeAddedCount.incrementAndGet();
+ if (indexSettings.getAsBoolean("index.fail", false)) {
+ throw new ElasticsearchException("failing on purpose");
+ }
+ }
+
+ @Override
+ public void beforeIndexCreated(Index index, @IndexSettings Settings indexSettings) {
+ allCreatedCount.incrementAndGet();
+ }
+ };
+
+ internalCluster().getInstance(IndicesLifecycle.class, node1).addListener(listener);
+ internalCluster().getInstance(IndicesLifecycle.class, node2).addListener(listener);
+ internalCluster().getInstance(IndicesLifecycle.class, node3).addListener(listener);
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get();
+ ensureGreen("test");
+ assertThat("beforeIndexAddedToCluster called only once", beforeAddedCount.get(), equalTo(1));
+ assertThat("beforeIndexCreated called on each data node", allCreatedCount.get(), greaterThanOrEqualTo(3));
+
+ try {
+ client().admin().indices().prepareCreate("failed").setSettings("index.fail", true).get();
+ fail("should have thrown an exception during creation");
+ } catch (Exception e) {
+ assertTrue(e.getMessage().contains("failing on purpose"));
+ ClusterStateResponse resp = client().admin().cluster().prepareState().get();
+ assertFalse(resp.getState().routingTable().indicesRouting().keySet().contains("failed"));
+ }
+ }
+
+ /**
+ * Tests that if an *index* structure creation fails on relocation to a new node, the shard
+ * is not stuck but properly failed.
+ */
+ @Test
+ public void testIndexShardFailedOnRelocation() throws Throwable {
+ String node1 = internalCluster().startNode();
+ client().admin().indices().prepareCreate("index1").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen("index1");
+ String node2 = internalCluster().startNode();
+ internalCluster().getInstance(IndicesLifecycle.class, node2).addListener(new IndexShardStateChangeListener() {
+ @Override
+ public void beforeIndexCreated(Index index, @IndexSettings Settings indexSettings) {
+ throw new RuntimeException("FAIL");
+ }
+ });
+ client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("index1", 0), node1, node2)).get();
+ ensureGreen("index1");
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ List<MutableShardRouting> shard = state.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED);
+ assertThat(shard, hasSize(1));
+ assertThat(state.nodes().resolveNode(shard.get(0).currentNodeId()).getName(), Matchers.equalTo(node1));
+ }
+
+ @Test
+ public void testIndexStateShardChanged() throws Throwable {
+
+ //start with a single node
+ String node1 = internalCluster().startNode();
+ IndexShardStateChangeListener stateChangeListenerNode1 = new IndexShardStateChangeListener();
+ //add a listener that keeps track of the shard state changes
+ internalCluster().getInstance(IndicesLifecycle.class, node1).addListener(stateChangeListenerNode1);
+
+ //create an index that should fail
+ try {
+ client().admin().indices().prepareCreate("failed").setSettings(SETTING_NUMBER_OF_SHARDS, 1, "index.fail", true).get();
+ fail("should have thrown an exception");
+ } catch (ElasticsearchException e) {
+ assertTrue(e.getMessage().contains("failing on purpose"));
+ ClusterStateResponse resp = client().admin().cluster().prepareState().get();
+ assertFalse(resp.getState().routingTable().indicesRouting().keySet().contains("failed"));
+ }
+
+
+ //create an index
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 6, SETTING_NUMBER_OF_REPLICAS, 0));
+ ensureGreen();
+ assertThat(stateChangeListenerNode1.creationSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6));
+ assertThat(stateChangeListenerNode1.creationSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(0));
+
+ //new shards got started
+ assertShardStatesMatch(stateChangeListenerNode1, 6, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //add a node: 3 out of the 6 shards will be relocated to it
+ //disable allocation before starting a new node, as we need to register the listener first
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, true)));
+ String node2 = internalCluster().startNode();
+ IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener();
+ //add a listener that keeps track of the shard state changes
+ internalCluster().getInstance(IndicesLifecycle.class, node2).addListener(stateChangeListenerNode2);
+ //re-enable allocation
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setPersistentSettings(builder().put(CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION, false)));
+ ensureGreen();
+
+ //the 3 relocated shards get closed on the first node
+ assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED);
+ //the 3 relocated shards get created on the second node
+ assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //increase replicas from 0 to 1
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder().put(SETTING_NUMBER_OF_REPLICAS, 1)));
+ ensureGreen();
+
+ //3 replicas are allocated to the first node
+ assertShardStatesMatch(stateChangeListenerNode1, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+ //3 replicas are allocated to the second node
+ assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);
+
+
+ //close the index
+ assertAcked(client().admin().indices().prepareClose("test"));
+
+ assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6));
+ assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(0));
+
+ assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED);
+ assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED);
+ }
+
+ private static void assertShardStatesMatch(final IndexShardStateChangeListener stateChangeListener, final int numShards, final IndexShardState... shardStates)
+ throws InterruptedException {
+
+ Predicate<Object> waitPredicate = new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ if (stateChangeListener.shardStates.size() != numShards) {
+ return false;
+ }
+ for (List<IndexShardState> indexShardStates : stateChangeListener.shardStates.values()) {
+ if (indexShardStates == null || indexShardStates.size() != shardStates.length) {
+ return false;
+ }
+ for (int i = 0; i < shardStates.length; i++) {
+ if (indexShardStates.get(i) != shardStates[i]) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+ };
+ if (!awaitBusy(waitPredicate, 1, TimeUnit.MINUTES)) {
+ fail("failed to observe expect shard states\n" +
+ "expected: [" + numShards + "] shards with states: " + Strings.arrayToCommaDelimitedString(shardStates) + "\n" +
+ "observed:\n" + stateChangeListener);
+ }
+
+ stateChangeListener.shardStates.clear();
+ }
+
+ private static class IndexShardStateChangeListener extends IndicesLifecycle.Listener {
+ //we keep track of all the states (ordered) a shard goes through
+ final ConcurrentMap<ShardId, List<IndexShardState>> shardStates = Maps.newConcurrentMap();
+ Settings creationSettings = Settings.EMPTY;
+ Settings afterCloseSettings = Settings.EMPTY;
+
+ @Override
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState newState, @Nullable String reason) {
+ List<IndexShardState> shardStates = this.shardStates.putIfAbsent(indexShard.shardId(),
+ new CopyOnWriteArrayList<>(new IndexShardState[]{newState}));
+ if (shardStates != null) {
+ shardStates.add(newState);
+ }
+ }
+
+ @Override
+ public void beforeIndexCreated(Index index, @IndexSettings Settings indexSettings) {
+ this.creationSettings = indexSettings;
+ if (indexSettings.getAsBoolean("index.fail", false)) {
+ throw new ElasticsearchException("failing on purpose");
+ }
+ }
+
+ @Override
+ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, @IndexSettings Settings indexSettings) {
+ this.afterCloseSettings = indexSettings;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ for (Map.Entry<ShardId, List<IndexShardState>> entry : shardStates.entrySet()) {
+ sb.append(entry.getKey()).append(" --> ").append(Strings.collectionToCommaDelimitedString(entry.getValue())).append("\n");
+ }
+ return sb.toString();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java
new file mode 100644
index 0000000000..baec5760b7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java
@@ -0,0 +1,915 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistRequestBuilder;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeRequestBuilder;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequestBuilder;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder;
+import org.elasticsearch.action.count.CountRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.MultiSearchRequestBuilder;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.suggest.SuggestBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSpecifiedIndexUnavailable_multipleIndices() throws Exception {
+ createIndex("test1");
+ ensureYellow();
+
+ // Verify defaults
+ verify(search("test1", "test2"), true);
+ verify(msearch(null, "test1", "test2"), true);
+ verify(count("test1", "test2"), true);
+ verify(clearCache("test1", "test2"), true);
+ verify(_flush("test1", "test2"),true);
+ verify(segments("test1", "test2"), true);
+ verify(stats("test1", "test2"), true);
+ verify(optimize("test1", "test2"), true);
+ verify(refresh("test1", "test2"), true);
+ verify(validateQuery("test1", "test2"), true);
+ verify(aliasExists("test1", "test2"), true);
+ verify(typesExists("test1", "test2"), true);
+ verify(percolate("test1", "test2"), true);
+ verify(mpercolate(null, "test1", "test2"), false);
+ verify(suggest("test1", "test2"), true);
+ verify(getAliases("test1", "test2"), true);
+ verify(getFieldMapping("test1", "test2"), true);
+ verify(getMapping("test1", "test2"), true);
+ verify(getWarmer("test1", "test2"), true);
+ verify(getSettings("test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.strictExpandOpen();
+ verify(search("test1", "test2").setIndicesOptions(options), true);
+ verify(msearch(options, "test1", "test2"), true);
+ verify(count("test1", "test2").setIndicesOptions(options), true);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), true);
+ verify(_flush("test1", "test2").setIndicesOptions(options),true);
+ verify(segments("test1", "test2").setIndicesOptions(options), true);
+ verify(stats("test1", "test2").setIndicesOptions(options), true);
+ verify(optimize("test1", "test2").setIndicesOptions(options), true);
+ verify(refresh("test1", "test2").setIndicesOptions(options), true);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), true);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), true);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), true);
+ verify(percolate("test1", "test2").setIndicesOptions(options), true);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), true);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), true);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), true);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), true);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), true);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), true);
+
+ options = IndicesOptions.lenientExpandOpen();
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), false);
+ verify(_flush("test1", "test2").setIndicesOptions(options), false);
+ verify(segments("test1", "test2").setIndicesOptions(options), false);
+ verify(stats("test1", "test2").setIndicesOptions(options), false);
+ verify(optimize("test1", "test2").setIndicesOptions(options), false);
+ verify(refresh("test1", "test2").setIndicesOptions(options), false);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), false);
+ verify(percolate("test1", "test2").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), false);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), false);
+
+ options = IndicesOptions.strictExpandOpen();
+ assertAcked(prepareCreate("test2"));
+ ensureYellow();
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(msearch(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+ verify(clearCache("test1", "test2").setIndicesOptions(options), false);
+ verify(_flush("test1", "test2").setIndicesOptions(options),false);
+ verify(segments("test1", "test2").setIndicesOptions(options), false);
+ verify(stats("test1", "test2").setIndicesOptions(options), false);
+ verify(optimize("test1", "test2").setIndicesOptions(options), false);
+ verify(refresh("test1", "test2").setIndicesOptions(options), false);
+ verify(validateQuery("test1", "test2").setIndicesOptions(options), false);
+ verify(aliasExists("test1", "test2").setIndicesOptions(options), false);
+ verify(typesExists("test1", "test2").setIndicesOptions(options), false);
+ verify(percolate("test1", "test2").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false);
+ verify(suggest("test1", "test2").setIndicesOptions(options), false);
+ verify(getAliases("test1", "test2").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getMapping("test1", "test2").setIndicesOptions(options), false);
+ verify(getWarmer("test1", "test2").setIndicesOptions(options), false);
+ verify(getSettings("test1", "test2").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testSpecifiedIndexUnavailable_singleIndexThatIsClosed() throws Exception {
+ assertAcked(prepareCreate("test1"));
+ // we need to wait until all shards are allocated since recovery from
+ // gateway will fail unless the majority of the replicas was allocated
+ // pre-closing. with lots of replicas this will fail.
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareClose("test1"));
+
+ IndicesOptions options = IndicesOptions.strictExpandOpenAndForbidClosed();
+ verify(search("test1").setIndicesOptions(options), true);
+ verify(msearch(options, "test1"), true);
+ verify(count("test1").setIndicesOptions(options), true);
+ verify(clearCache("test1").setIndicesOptions(options), true);
+ verify(_flush("test1").setIndicesOptions(options),true);
+ verify(segments("test1").setIndicesOptions(options), true);
+ verify(stats("test1").setIndicesOptions(options), true);
+ verify(optimize("test1").setIndicesOptions(options), true);
+ verify(refresh("test1").setIndicesOptions(options), true);
+ verify(validateQuery("test1").setIndicesOptions(options), true);
+ verify(aliasExists("test1").setIndicesOptions(options), true);
+ verify(typesExists("test1").setIndicesOptions(options), true);
+ verify(percolate("test1").setIndicesOptions(options), true);
+ verify(mpercolate(options, "test1").setIndicesOptions(options), true);
+ verify(suggest("test1").setIndicesOptions(options), true);
+ verify(getAliases("test1").setIndicesOptions(options), true);
+ verify(getFieldMapping("test1").setIndicesOptions(options), true);
+ verify(getMapping("test1").setIndicesOptions(options), true);
+ verify(getWarmer("test1").setIndicesOptions(options), true);
+ verify(getSettings("test1").setIndicesOptions(options), true);
+
+ options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
+ verify(search("test1").setIndicesOptions(options), false);
+ verify(msearch(options, "test1"), false);
+ verify(count("test1").setIndicesOptions(options), false);
+ verify(clearCache("test1").setIndicesOptions(options), false);
+ verify(_flush("test1").setIndicesOptions(options),false);
+ verify(segments("test1").setIndicesOptions(options), false);
+ verify(stats("test1").setIndicesOptions(options), false);
+ verify(optimize("test1").setIndicesOptions(options), false);
+ verify(refresh("test1").setIndicesOptions(options), false);
+ verify(validateQuery("test1").setIndicesOptions(options), false);
+ verify(aliasExists("test1").setIndicesOptions(options), false);
+ verify(typesExists("test1").setIndicesOptions(options), false);
+ verify(percolate("test1").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1").setIndicesOptions(options), false);
+ verify(suggest("test1").setIndicesOptions(options), false);
+ verify(getAliases("test1").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1").setIndicesOptions(options), false);
+ verify(getMapping("test1").setIndicesOptions(options), false);
+ verify(getWarmer("test1").setIndicesOptions(options), false);
+ verify(getSettings("test1").setIndicesOptions(options), false);
+
+ assertAcked(client().admin().indices().prepareOpen("test1"));
+ ensureYellow();
+
+ options = IndicesOptions.strictExpandOpenAndForbidClosed();
+ verify(search("test1").setIndicesOptions(options), false);
+ verify(msearch(options, "test1"), false);
+ verify(count("test1").setIndicesOptions(options), false);
+ verify(clearCache("test1").setIndicesOptions(options), false);
+ verify(_flush("test1").setIndicesOptions(options),false);
+ verify(segments("test1").setIndicesOptions(options), false);
+ verify(stats("test1").setIndicesOptions(options), false);
+ verify(optimize("test1").setIndicesOptions(options), false);
+ verify(refresh("test1").setIndicesOptions(options), false);
+ verify(validateQuery("test1").setIndicesOptions(options), false);
+ verify(aliasExists("test1").setIndicesOptions(options), false);
+ verify(typesExists("test1").setIndicesOptions(options), false);
+ verify(percolate("test1").setIndicesOptions(options), false);
+ verify(mpercolate(options, "test1").setIndicesOptions(options), false);
+ verify(suggest("test1").setIndicesOptions(options), false);
+ verify(getAliases("test1").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1").setIndicesOptions(options), false);
+ verify(getMapping("test1").setIndicesOptions(options), false);
+ verify(getWarmer("test1").setIndicesOptions(options), false);
+ verify(getSettings("test1").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testSpecifiedIndexUnavailable_singleIndex() throws Exception {
+ IndicesOptions options = IndicesOptions.strictExpandOpenAndForbidClosed();
+ verify(search("test1").setIndicesOptions(options), true);
+ verify(msearch(options, "test1"), true);
+ verify(count("test1").setIndicesOptions(options), true);
+ verify(clearCache("test1").setIndicesOptions(options), true);
+ verify(_flush("test1").setIndicesOptions(options),true);
+ verify(segments("test1").setIndicesOptions(options), true);
+ verify(stats("test1").setIndicesOptions(options), true);
+ verify(optimize("test1").setIndicesOptions(options), true);
+ verify(refresh("test1").setIndicesOptions(options), true);
+ verify(validateQuery("test1").setIndicesOptions(options), true);
+ verify(aliasExists("test1").setIndicesOptions(options), true);
+ verify(typesExists("test1").setIndicesOptions(options), true);
+ verify(percolate("test1").setIndicesOptions(options), true);
+ verify(suggest("test1").setIndicesOptions(options), true);
+ verify(getAliases("test1").setIndicesOptions(options), true);
+ verify(getFieldMapping("test1").setIndicesOptions(options), true);
+ verify(getMapping("test1").setIndicesOptions(options), true);
+ verify(getWarmer("test1").setIndicesOptions(options), true);
+ verify(getSettings("test1").setIndicesOptions(options), true);
+
+ options = IndicesOptions.fromOptions(true, options.allowNoIndices(), options.expandWildcardsOpen(), options.expandWildcardsClosed(), options);
+ verify(search("test1").setIndicesOptions(options), false);
+ verify(msearch(options, "test1"), false);
+ verify(count("test1").setIndicesOptions(options), false);
+ verify(clearCache("test1").setIndicesOptions(options), false);
+ verify(_flush("test1").setIndicesOptions(options),false);
+ verify(segments("test1").setIndicesOptions(options), false);
+ verify(stats("test1").setIndicesOptions(options), false);
+ verify(optimize("test1").setIndicesOptions(options), false);
+ verify(refresh("test1").setIndicesOptions(options), false);
+ verify(validateQuery("test1").setIndicesOptions(options), false);
+ verify(aliasExists("test1").setIndicesOptions(options), false);
+ verify(typesExists("test1").setIndicesOptions(options), false);
+ verify(percolate("test1").setIndicesOptions(options), false);
+ verify(suggest("test1").setIndicesOptions(options), false);
+ verify(getAliases("test1").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1").setIndicesOptions(options), false);
+ verify(getMapping("test1").setIndicesOptions(options), false);
+ verify(getWarmer("test1").setIndicesOptions(options), false);
+ verify(getSettings("test1").setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("test1"));
+ ensureYellow();
+
+ options = IndicesOptions.strictExpandOpenAndForbidClosed();
+ verify(search("test1").setIndicesOptions(options), false);
+ verify(msearch(options, "test1"), false);
+ verify(count("test1").setIndicesOptions(options), false);
+ verify(clearCache("test1").setIndicesOptions(options), false);
+ verify(_flush("test1").setIndicesOptions(options),false);
+ verify(segments("test1").setIndicesOptions(options), false);
+ verify(stats("test1").setIndicesOptions(options), false);
+ verify(optimize("test1").setIndicesOptions(options), false);
+ verify(refresh("test1").setIndicesOptions(options), false);
+ verify(validateQuery("test1").setIndicesOptions(options), false);
+ verify(aliasExists("test1").setIndicesOptions(options), false);
+ verify(typesExists("test1").setIndicesOptions(options), false);
+ verify(percolate("test1").setIndicesOptions(options), false);
+ verify(suggest("test1").setIndicesOptions(options), false);
+ verify(getAliases("test1").setIndicesOptions(options), false);
+ verify(getFieldMapping("test1").setIndicesOptions(options), false);
+ verify(getMapping("test1").setIndicesOptions(options), false);
+ verify(getWarmer("test1").setIndicesOptions(options), false);
+ verify(getSettings("test1").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testSpecifiedIndexUnavailable_snapshotRestore() throws Exception {
+ createIndex("test1");
+ ensureGreen("test1");
+ waitForRelocation();
+
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
+
+ verify(snapshot("snap2", "test1", "test2"), true);
+ verify(restore("snap1", "test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.strictExpandOpen();
+ verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), true);
+ verify(restore("snap1", "test1", "test2").setIndicesOptions(options), true);
+
+ options = IndicesOptions.lenientExpandOpen();
+ verify(snapshot("snap2", "test1", "test2").setIndicesOptions(options), false);
+ verify(restore("snap2", "test1", "test2").setIndicesOptions(options), false);
+
+ options = IndicesOptions.strictExpandOpen();
+ createIndex("test2");
+ //TODO: temporary work-around for #5531
+ ensureGreen("test2");
+ waitForRelocation();
+ verify(snapshot("snap3", "test1", "test2").setIndicesOptions(options), false);
+ verify(restore("snap3", "test1", "test2").setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testWildcardBehaviour() throws Exception {
+ // Verify defaults for wildcards, when specifying no indices (*, _all, /)
+ String[] indices = Strings.EMPTY_ARRAY;
+ verify(search(indices), false);
+ verify(msearch(null, indices), false);
+ verify(count(indices), false);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(segments(indices), false);
+ verify(stats(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), true);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices), false);
+
+ // Now force allow_no_indices=true
+ IndicesOptions options = IndicesOptions.fromOptions(false, true, true, false);
+ verify(search(indices).setIndicesOptions(options), false);
+ verify(msearch(options, indices).setIndicesOptions(options), false);
+ verify(count(indices).setIndicesOptions(options), false);
+ verify(clearCache(indices).setIndicesOptions(options), false);
+ verify(_flush(indices).setIndicesOptions(options),false);
+ verify(segments(indices).setIndicesOptions(options), false);
+ verify(stats(indices).setIndicesOptions(options), false);
+ verify(optimize(indices).setIndicesOptions(options), false);
+ verify(refresh(indices).setIndicesOptions(options), false);
+ verify(validateQuery(indices).setIndicesOptions(options), false);
+ verify(aliasExists(indices).setIndicesOptions(options), false);
+ verify(typesExists(indices).setIndicesOptions(options), false);
+ verify(percolate(indices).setIndicesOptions(options), false);
+ verify(mpercolate(options, indices), false);
+ verify(suggest(indices).setIndicesOptions(options), false);
+ verify(getAliases(indices).setIndicesOptions(options), false);
+ verify(getFieldMapping(indices).setIndicesOptions(options), false);
+ verify(getMapping(indices).setIndicesOptions(options), false);
+ verify(getWarmer(indices).setIndicesOptions(options), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("foobar"));
+ client().prepareIndex("foobar", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
+
+ // Verify defaults for wildcards, with one wildcard expression and one existing index
+ indices = new String[]{"foo*"};
+ verify(search(indices), false, 1);
+ verify(msearch(null, indices), false, 1);
+ verify(count(indices), false, 1);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(segments(indices), false);
+ verify(stats(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), false);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ // Verify defaults for wildcards, with two wildcard expression and one existing index
+ indices = new String[]{"foo*", "bar*"};
+ verify(search(indices), false, 1);
+ verify(msearch(null, indices), false, 1);
+ verify(count(indices), false, 1);
+ verify(clearCache(indices), false);
+ verify(_flush(indices),false);
+ verify(segments(indices), false);
+ verify(stats(indices), false);
+ verify(optimize(indices), false);
+ verify(refresh(indices), false);
+ verify(validateQuery(indices), true);
+ verify(aliasExists(indices), false);
+ verify(typesExists(indices), false);
+ verify(percolate(indices), false);
+ verify(mpercolate(null, indices), false);
+ verify(suggest(indices), false);
+ verify(getAliases(indices), false);
+ verify(getFieldMapping(indices), false);
+ verify(getMapping(indices), false);
+ verify(getWarmer(indices), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+
+ // Now force allow_no_indices=true
+ options = IndicesOptions.fromOptions(false, true, true, false);
+ verify(search(indices).setIndicesOptions(options), false, 1);
+ verify(msearch(options, indices).setIndicesOptions(options), false, 1);
+ verify(count(indices).setIndicesOptions(options), false, 1);
+ verify(clearCache(indices).setIndicesOptions(options), false);
+ verify(_flush(indices).setIndicesOptions(options),false);
+ verify(segments(indices).setIndicesOptions(options), false);
+ verify(stats(indices).setIndicesOptions(options), false);
+ verify(optimize(indices).setIndicesOptions(options), false);
+ verify(refresh(indices).setIndicesOptions(options), false);
+ verify(validateQuery(indices).setIndicesOptions(options), false);
+ verify(aliasExists(indices).setIndicesOptions(options), false);
+ verify(typesExists(indices).setIndicesOptions(options), false);
+ verify(percolate(indices).setIndicesOptions(options), false);
+ verify(mpercolate(options, indices), false);
+ verify(suggest(indices).setIndicesOptions(options), false);
+ verify(getAliases(indices).setIndicesOptions(options), false);
+ verify(getFieldMapping(indices).setIndicesOptions(options), false);
+ verify(getMapping(indices).setIndicesOptions(options), false);
+ verify(getWarmer(indices).setIndicesOptions(options), false);
+ verify(getSettings(indices).setIndicesOptions(options), false);
+ }
+
+ @Test
+ public void testWildcardBehaviour_snapshotRestore() throws Exception {
+ createIndex("foobar");
+ ensureGreen("foobar");
+ waitForRelocation();
+
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("dummy-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get();
+
+ IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), true);
+ verify(restore("snap1", "foo*", "bar*").setIndicesOptions(options), true);
+
+ options = IndicesOptions.strictExpandOpen();
+ verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), false);
+ verify(restore("snap2", "foo*", "bar*").setIndicesOptions(options), false);
+
+ assertAcked(prepareCreate("barbaz"));
+ //TODO: temporary work-around for #5531
+ ensureGreen("barbaz");
+ waitForRelocation();
+ options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap3", "foo*", "bar*").setIndicesOptions(options), false);
+ verify(restore("snap3", "foo*", "bar*").setIndicesOptions(options), false);
+
+ options = IndicesOptions.fromOptions(false, false, true, false);
+ verify(snapshot("snap4", "foo*", "baz*").setIndicesOptions(options), true);
+ verify(restore("snap3", "foo*", "baz*").setIndicesOptions(options), true);
+ }
+
+ @Test
+ public void testAllMissing_lenient() throws Exception {
+ createIndex("test1");
+ client().prepareIndex("test1", "type", "1").setSource("k", "v").setRefresh(true).execute().actionGet();
+ SearchResponse response = client().prepareSearch("test2")
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen())
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ assertHitCount(response, 0l);
+
+ response = client().prepareSearch("test2","test3").setQuery(matchAllQuery())
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen())
+ .execute().actionGet();
+ assertHitCount(response, 0l);
+
+ //you should still be able to run empty searches without things blowing up
+ response = client().prepareSearch()
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen())
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testAllMissing_strict() throws Exception {
+ createIndex("test1");
+ ensureYellow();
+ try {
+ client().prepareSearch("test2")
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ }
+
+ try {
+ client().prepareSearch("test2","test3")
+ .setQuery(matchAllQuery())
+ .execute().actionGet();
+ fail("Exception should have been thrown.");
+ } catch (IndexMissingException e) {
+ }
+
+ //you should still be able to run empty searches without things blowing up
+ client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ }
+
+ @Test
+ // For now don't handle closed indices
+ public void testCloseApi_specifiedIndices() throws Exception {
+ createIndex("test1", "test2");
+ ensureGreen();
+ verify(search("test1", "test2"), false);
+ verify(count("test1", "test2"), false);
+ assertAcked(client().admin().indices().prepareClose("test2").get());
+
+ verify(search("test1", "test2"), true);
+ verify(count("test1", "test2"), true);
+
+ IndicesOptions options = IndicesOptions.fromOptions(true, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed());
+ verify(search("test1", "test2").setIndicesOptions(options), false);
+ verify(count("test1", "test2").setIndicesOptions(options), false);
+
+ verify(search(), false);
+ verify(count(), false);
+
+ verify(search("t*"), false);
+ verify(count("t*"), false);
+ }
+
+ @Test
+ public void testCloseApi_wildcards() throws Exception {
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureGreen();
+
+ verify(client().admin().indices().prepareClose("bar*"), false);
+ verify(client().admin().indices().prepareClose("bar*"), true);
+
+ verify(client().admin().indices().prepareClose("foo*"), false);
+ verify(client().admin().indices().prepareClose("foo*"), true);
+ verify(client().admin().indices().prepareClose("_all"), true);
+
+ verify(client().admin().indices().prepareOpen("bar*"), false);
+ verify(client().admin().indices().prepareOpen("_all"), false);
+ verify(client().admin().indices().prepareOpen("_all"), true);
+ }
+
+ @Test
+ public void testDeleteIndex() throws Exception {
+ createIndex("foobar");
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDelete("foo"), true);
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true));
+ verify(client().admin().indices().prepareDelete("foobar"), false);
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testDeleteIndex_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDelete("_all"), false);
+
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDelete("foo*"), false);
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(true));
+
+ verify(client().admin().indices().prepareDelete("foo*"), false);
+
+ verify(client().admin().indices().prepareDelete("_all"), false);
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("barbaz").get().isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testPutWarmer() throws Exception {
+ createIndex("foobar");
+ ensureYellow();
+ verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foobar").setQuery(QueryBuilders.matchAllQuery())), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+
+ }
+
+ @Test
+ public void testPutWarmer_wildcard() throws Exception {
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureYellow();
+
+ verify(client().admin().indices().preparePutWarmer("warmer1").setSearchRequest(client().prepareSearch().setIndices("foo*").setQuery(QueryBuilders.matchAllQuery())), false);
+
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer1").get().getWarmers().size(), equalTo(0));
+
+ verify(client().admin().indices().preparePutWarmer("warmer2").setSearchRequest(client().prepareSearch().setIndices().setQuery(QueryBuilders.matchAllQuery())), false);
+
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("warmer2").get().getWarmers().size(), equalTo(1));
+
+ }
+
+ @Test
+ public void testPutAlias() throws Exception {
+ createIndex("foobar");
+ ensureYellow();
+ verify(client().admin().indices().prepareAliases().addAlias("foobar", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+
+ }
+
+ @Test
+ public void testPutAlias_wildcard() throws Exception {
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureYellow();
+
+ verify(client().admin().indices().prepareAliases().addAlias("foo*", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(false));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(false));
+
+ verify(client().admin().indices().prepareAliases().addAlias("*", "foobar_alias"), false);
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foo").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("foobar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("bar").get().exists(), equalTo(true));
+ assertThat(client().admin().indices().prepareAliasesExist("foobar_alias").setIndices("barbaz").get().exists(), equalTo(true));
+
+ }
+
+ @Test
+ public void testDeleteWarmer() throws Exception {
+ IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry(
+ "test1", new String[]{"typ1"}, false, new BytesArray("{\"query\" : { \"match_all\" : {}}}")
+ );
+ assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo").setNames("test1"), true);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foobar").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void testDeleteWarmer_wildcard() throws Exception {
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), true);
+
+ IndexWarmersMetaData.Entry entry = new IndexWarmersMetaData.Entry(
+ "test1", new String[]{"type1"}, false, new BytesArray("{\"query\" : { \"match_all\" : {}}}")
+ );
+ assertAcked(prepareCreate("foo").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("foobar").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("bar").addCustom(new IndexWarmersMetaData(entry)));
+ assertAcked(prepareCreate("barbaz").addCustom(new IndexWarmersMetaData(entry)));
+ ensureYellow();
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("foo").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("foobar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(1));
+
+ assertAcked(client().admin().indices().prepareDelete("foo*"));
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("foo*").setNames("test1"), true);
+
+ verify(client().admin().indices().prepareDeleteWarmer().setIndices("_all").setNames("test1"), false);
+ assertThat(client().admin().indices().prepareGetWarmers("bar").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void testPutMapping() throws Exception {
+ verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true);
+ verify(client().admin().indices().preparePutMapping("_all").setType("type1").setSource("field", "type=string"), true);
+
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureYellow();
+
+ verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type1"), notNullValue());
+ verify(client().admin().indices().preparePutMapping("b*").setType("type1").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type1"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type1"), notNullValue());
+ verify(client().admin().indices().preparePutMapping("_all").setType("type2").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type2"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type2"), notNullValue());
+ verify(client().admin().indices().preparePutMapping().setType("type3").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("foo").get().mappings().get("foo").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("foobar").get().mappings().get("foobar").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("bar").get().mappings().get("bar").get("type3"), notNullValue());
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type3"), notNullValue());
+
+
+ verify(client().admin().indices().preparePutMapping("c*").setType("type1").setSource("field", "type=string"), true);
+
+ assertAcked(client().admin().indices().prepareClose("barbaz").get());
+ verify(client().admin().indices().preparePutMapping("barbaz").setType("type4").setSource("field", "type=string"), false);
+ assertThat(client().admin().indices().prepareGetMappings("barbaz").get().mappings().get("barbaz").get("type4"), notNullValue());
+ }
+
+ @Test
+ public void testUpdateSettings() throws Exception {
+ verify(client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put("a", "b")), true);
+ verify(client().admin().indices().prepareUpdateSettings("_all").setSettings(Settings.builder().put("a", "b")), true);
+
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureGreen();
+ assertAcked(client().admin().indices().prepareClose("_all").get());
+
+ verify(client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put("a", "b")), false);
+ verify(client().admin().indices().prepareUpdateSettings("bar*").setSettings(Settings.builder().put("a", "b")), false);
+ verify(client().admin().indices().prepareUpdateSettings("_all").setSettings(Settings.builder().put("c", "d")), false);
+
+ GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("foo").get();
+ assertThat(settingsResponse.getSetting("foo", "index.a"), equalTo("b"));
+ settingsResponse = client().admin().indices().prepareGetSettings("bar*").get();
+ assertThat(settingsResponse.getSetting("bar", "index.a"), equalTo("b"));
+ assertThat(settingsResponse.getSetting("barbaz", "index.a"), equalTo("b"));
+ settingsResponse = client().admin().indices().prepareGetSettings("_all").get();
+ assertThat(settingsResponse.getSetting("foo", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("foobar", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("bar", "index.c"), equalTo("d"));
+ assertThat(settingsResponse.getSetting("barbaz", "index.c"), equalTo("d"));
+
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+ try {
+ verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(Settings.builder().put("e", "f")), false);
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices [[barbaz]]"));
+ }
+ verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(Settings.builder().put("a", "b")), true);
+ }
+
+ private static SearchRequestBuilder search(String... indices) {
+ return client().prepareSearch(indices).setQuery(matchAllQuery());
+ }
+
+ private static MultiSearchRequestBuilder msearch(IndicesOptions options, String... indices) {
+ MultiSearchRequestBuilder multiSearchRequestBuilder = client().prepareMultiSearch();
+ if (options != null) {
+ multiSearchRequestBuilder.setIndicesOptions(options);
+ }
+ return multiSearchRequestBuilder.add(client().prepareSearch(indices).setQuery(matchAllQuery()));
+ }
+
+ private static CountRequestBuilder count(String... indices) {
+ return client().prepareCount(indices).setQuery(matchAllQuery());
+ }
+
+ private static ClearIndicesCacheRequestBuilder clearCache(String... indices) {
+ return client().admin().indices().prepareClearCache(indices);
+ }
+
+ private static FlushRequestBuilder _flush(String... indices) {
+ return client().admin().indices().prepareFlush(indices);
+ }
+
+ private static IndicesSegmentsRequestBuilder segments(String... indices) {
+ return client().admin().indices().prepareSegments(indices);
+ }
+
+ private static IndicesStatsRequestBuilder stats(String... indices) {
+ return client().admin().indices().prepareStats(indices);
+ }
+
+ private static OptimizeRequestBuilder optimize(String... indices) {
+ return client().admin().indices().prepareOptimize(indices);
+ }
+
+ private static RefreshRequestBuilder refresh(String... indices) {
+ return client().admin().indices().prepareRefresh(indices);
+ }
+
+ private static ValidateQueryRequestBuilder validateQuery(String... indices) {
+ return client().admin().indices().prepareValidateQuery(indices);
+ }
+
+ private static AliasesExistRequestBuilder aliasExists(String... indices) {
+ return client().admin().indices().prepareAliasesExist("dummy").addIndices(indices);
+ }
+
+ private static TypesExistsRequestBuilder typesExists(String... indices) {
+ return client().admin().indices().prepareTypesExists(indices).setTypes("dummy");
+ }
+
+ private static PercolateRequestBuilder percolate(String... indices) {
+ return client().preparePercolate().setIndices(indices)
+ .setSource(new PercolateSourceBuilder().setDoc(docBuilder().setDoc("k", "v")))
+ .setDocumentType("type");
+ }
+
+ private static MultiPercolateRequestBuilder mpercolate(IndicesOptions options, String... indices) {
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ if (options != null) {
+ builder.setIndicesOptions(options);
+ }
+ return builder.add(percolate(indices));
+ }
+
+ private static SuggestRequestBuilder suggest(String... indices) {
+ return client().prepareSuggest(indices).addSuggestion(SuggestBuilders.termSuggestion("name").field("a"));
+ }
+
+ private static GetAliasesRequestBuilder getAliases(String... indices) {
+ return client().admin().indices().prepareGetAliases("dummy").addIndices(indices);
+ }
+
+ private static GetFieldMappingsRequestBuilder getFieldMapping(String... indices) {
+ return client().admin().indices().prepareGetFieldMappings(indices);
+ }
+
+ private static GetMappingsRequestBuilder getMapping(String... indices) {
+ return client().admin().indices().prepareGetMappings(indices);
+ }
+
+ private static GetWarmersRequestBuilder getWarmer(String... indices) {
+ return client().admin().indices().prepareGetWarmers(indices);
+ }
+
+ private static GetSettingsRequestBuilder getSettings(String... indices) {
+ return client().admin().indices().prepareGetSettings(indices);
+ }
+
+ private static CreateSnapshotRequestBuilder snapshot(String name, String... indices) {
+ return client().admin().cluster().prepareCreateSnapshot("dummy-repo", name).setWaitForCompletion(true).setIndices(indices);
+ }
+
+ private static RestoreSnapshotRequestBuilder restore(String name, String... indices) {
+ return client().admin().cluster().prepareRestoreSnapshot("dummy-repo", name)
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy-" + name)
+ .setWaitForCompletion(true)
+ .setIndices(indices);
+ }
+
+ private static void verify(ActionRequestBuilder requestBuilder, boolean fail) {
+ verify(requestBuilder, fail, 0);
+ }
+
+ private static void verify(ActionRequestBuilder requestBuilder, boolean fail, long expectedCount) {
+ if (fail) {
+ if (requestBuilder instanceof MultiSearchRequestBuilder) {
+ MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
+ assertThat(multiSearchResponse.getResponses().length, equalTo(1));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), nullValue());
+ } else {
+ try {
+ requestBuilder.get();
+ fail("IndexMissingException or IndexClosedException was expected");
+ } catch (IndexMissingException | IndexClosedException e) {}
+ }
+ } else {
+ if (requestBuilder instanceof SearchRequestBuilder) {
+ SearchRequestBuilder searchRequestBuilder = (SearchRequestBuilder) requestBuilder;
+ assertHitCount(searchRequestBuilder.get(), expectedCount);
+ } else if (requestBuilder instanceof CountRequestBuilder) {
+ CountRequestBuilder countRequestBuilder = (CountRequestBuilder) requestBuilder;
+ assertHitCount(countRequestBuilder.get(), expectedCount);
+ } else if (requestBuilder instanceof MultiSearchRequestBuilder) {
+ MultiSearchResponse multiSearchResponse = ((MultiSearchRequestBuilder) requestBuilder).get();
+ assertThat(multiSearchResponse.getResponses().length, equalTo(1));
+ assertThat(multiSearchResponse.getResponses()[0].getResponse(), notNullValue());
+ } else {
+ requestBuilder.get();
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java
new file mode 100644
index 0000000000..415d806278
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices;
+
+import org.apache.lucene.store.LockObtainFailedException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.gateway.GatewayMetaState;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardPath;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+public class IndicesServiceTest extends ElasticsearchSingleNodeTest {
+
+ public IndicesService getIndicesService() {
+ return getInstanceFromNode(IndicesService.class);
+ }
+ public NodeEnvironment getNodeEnvironment() {
+ return getInstanceFromNode(NodeEnvironment.class);
+ }
+
+ @Override
+ protected boolean resetNodeAfterTest() {
+ return true;
+ }
+
+ public void testCanDeleteShardContent() {
+ IndicesService indicesService = getIndicesService();
+ IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(
+ 1).build();
+ assertFalse("no shard location", indicesService.canDeleteShardContent(new ShardId("test", 0), meta));
+ IndexService test = createIndex("test");
+ assertTrue(test.hasShard(0));
+ assertFalse("shard is allocated", indicesService.canDeleteShardContent(new ShardId("test", 0), meta));
+ test.removeShard(0, "boom");
+ assertTrue("shard is removed", indicesService.canDeleteShardContent(new ShardId("test", 0), meta));
+ }
+
+ public void testDeleteIndexStore() throws Exception {
+ IndicesService indicesService = getIndicesService();
+ IndexService test = createIndex("test");
+ ClusterService clusterService = getInstanceFromNode(ClusterService.class);
+ IndexMetaData firstMetaData = clusterService.state().metaData().index("test");
+ assertTrue(test.hasShard(0));
+
+ try {
+ indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state());
+ fail();
+ } catch (IllegalStateException ex) {
+ // all good
+ }
+
+ GatewayMetaState gwMetaState = getInstanceFromNode(GatewayMetaState.class);
+ MetaData meta = gwMetaState.loadMetaState();
+ assertNotNull(meta);
+ assertNotNull(meta.index("test"));
+ assertAcked(client().admin().indices().prepareDelete("test"));
+
+ meta = gwMetaState.loadMetaState();
+ assertNotNull(meta);
+ assertNull(meta.index("test"));
+
+
+ test = createIndex("test");
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).get();
+ client().admin().indices().prepareFlush("test").get();
+ assertHitCount(client().prepareSearch("test").get(), 1);
+ IndexMetaData secondMetaData = clusterService.state().metaData().index("test");
+ assertAcked(client().admin().indices().prepareClose("test"));
+ ShardPath path = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings());
+ assertTrue(path.exists());
+
+ try {
+ indicesService.deleteIndexStore("boom", secondMetaData, clusterService.state());
+ fail();
+ } catch (IllegalStateException ex) {
+ // all good
+ }
+
+ assertTrue(path.exists());
+
+ // now delete the old one and make sure we resolve against the name
+ try {
+ indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state());
+ fail();
+ } catch (IllegalStateException ex) {
+ // all good
+ }
+ assertAcked(client().admin().indices().prepareOpen("test"));
+ ensureGreen("test");
+ }
+
+ public void testPendingTasks() throws IOException {
+ IndicesService indicesService = getIndicesService();
+ IndexService test = createIndex("test");
+
+ assertTrue(test.hasShard(0));
+ ShardPath path = test.shard(0).shardPath();
+ assertTrue(test.shard(0).routingEntry().started());
+ ShardPath shardPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings());
+ assertEquals(shardPath, path);
+ try {
+ indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));
+ fail("can't get lock");
+ } catch (LockObtainFailedException ex) {
+
+ }
+ assertTrue(path.exists());
+
+ int numPending = 1;
+ if (randomBoolean()) {
+ indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());
+ } else {
+ if (randomBoolean()) {
+ numPending++;
+ indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());
+ }
+ indicesService.addPendingDelete(test.index(), test.getIndexSettings());
+ }
+ assertAcked(client().admin().indices().prepareClose("test"));
+ assertTrue(path.exists());
+
+ assertEquals(indicesService.numPendingDeletes(test.index()), numPending);
+
+ // shard lock released... we can now delete
+ indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));
+ assertEquals(indicesService.numPendingDeletes(test.index()), 0);
+ assertFalse(path.exists());
+
+ if (randomBoolean()) {
+ indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());
+ indicesService.addPendingDelete(new ShardId(test.index(), 1), test.getIndexSettings());
+ indicesService.addPendingDelete(new ShardId("bogus", 1), test.getIndexSettings());
+ assertEquals(indicesService.numPendingDeletes(test.index()), 2);
+ // shard lock released... we can now delete
+ indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));
+ assertEquals(indicesService.numPendingDeletes(test.index()), 0);
+ }
+ assertAcked(client().admin().indices().prepareOpen("test"));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java
new file mode 100644
index 0000000000..fdb5ab05bd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisBinderProcessor.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.AnalysisModule;
+
+/**
+ */
+public class DummyAnalysisBinderProcessor extends AnalysisModule.AnalysisBinderProcessor {
+
+ @Override
+ public void processAnalyzers(AnalyzersBindings analyzersBindings) {
+ analyzersBindings.processAnalyzer("dummy", DummyAnalyzerProvider.class);
+ }
+
+ @Override
+ public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
+ tokenFiltersBindings.processTokenFilter("dummy_token_filter", DummyTokenFilterFactory.class);
+ }
+
+ @Override
+ public void processTokenizers(TokenizersBindings tokenizersBindings) {
+ tokenizersBindings.processTokenizer("dummy_tokenizer", DummyTokenizerFactory.class);
+ }
+
+ @Override
+ public void processCharFilters(CharFiltersBindings charFiltersBindings) {
+ charFiltersBindings.processCharFilter("dummy_char_filter", DummyCharFilterFactory.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java
new file mode 100644
index 0000000000..55d22eb8c9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalysisPlugin.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.index.analysis.AnalysisModule;
+import org.elasticsearch.plugins.AbstractPlugin;
+
+import java.util.Collection;
+
+public class DummyAnalysisPlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "analysis-dummy";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "Analysis Dummy Plugin";
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> modules() {
+ return ImmutableList.<Class<? extends Module>>of(DummyIndicesAnalysisModule.class);
+ }
+
+ public void onModule(AnalysisModule module) {
+ module.addProcessor(new DummyAnalysisBinderProcessor());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
new file mode 100644
index 0000000000..7034d5b439
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzer.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
+import org.apache.lucene.util.Version;
+
+import java.io.Reader;
+
+public class DummyAnalyzer extends StopwordAnalyzerBase {
+
+ protected DummyAnalyzer() {
+ }
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ return null;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java
new file mode 100644
index 0000000000..68beb817d7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyAnalyzerProvider.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.AnalyzerProvider;
+import org.elasticsearch.index.analysis.AnalyzerScope;
+
+public class DummyAnalyzerProvider implements AnalyzerProvider<DummyAnalyzer> {
+ @Override
+ public String name() {
+ return "dummy";
+ }
+
+ @Override
+ public AnalyzerScope scope() {
+ return AnalyzerScope.INDICES;
+ }
+
+ @Override
+ public DummyAnalyzer get() {
+ return new DummyAnalyzer();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java
new file mode 100644
index 0000000000..8c5896e59e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyCharFilterFactory.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.index.analysis.CharFilterFactory;
+
+import java.io.Reader;
+
+public class DummyCharFilterFactory implements CharFilterFactory {
+ @Override
+ public String name() {
+ return "dummy_char_filter";
+ }
+
+ @Override
+ public Reader create(Reader reader) {
+ return null;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java
new file mode 100644
index 0000000000..9642b610f6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysis.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.*;
+
+public class DummyIndicesAnalysis extends AbstractComponent {
+
+ @Inject
+ public DummyIndicesAnalysis(Settings settings, IndicesAnalysisService indicesAnalysisService) {
+ super(settings);
+ indicesAnalysisService.analyzerProviderFactories().put("dummy",
+ new PreBuiltAnalyzerProviderFactory("dummy", AnalyzerScope.INDICES,
+ new DummyAnalyzer()));
+ indicesAnalysisService.tokenFilterFactories().put("dummy_token_filter",
+ new PreBuiltTokenFilterFactoryFactory(new DummyTokenFilterFactory()));
+ indicesAnalysisService.charFilterFactories().put("dummy_char_filter",
+ new PreBuiltCharFilterFactoryFactory(new DummyCharFilterFactory()));
+ indicesAnalysisService.tokenizerFactories().put("dummy_tokenizer",
+ new PreBuiltTokenizerFactoryFactory(new DummyTokenizerFactory()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java
new file mode 100644
index 0000000000..9d14f67ec6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyIndicesAnalysisModule.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class DummyIndicesAnalysisModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(DummyIndicesAnalysis.class).asEagerSingleton();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java
new file mode 100644
index 0000000000..489e4dce7b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenFilterFactory.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.elasticsearch.index.analysis.TokenFilterFactory;
+
+public class DummyTokenFilterFactory implements TokenFilterFactory {
+ @Override public String name() {
+ return "dummy_token_filter";
+ }
+
+ @Override public TokenStream create(TokenStream tokenStream) {
+ return null;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java b/core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java
new file mode 100644
index 0000000000..a27c6ae7db
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/DummyTokenizerFactory.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import org.apache.lucene.analysis.Tokenizer;
+import org.elasticsearch.index.analysis.TokenizerFactory;
+
+public class DummyTokenizerFactory implements TokenizerFactory {
+ @Override
+ public String name() {
+ return "dummy_tokenizer";
+ }
+
+ @Override
+ public Tokenizer create() {
+ return null;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java
new file mode 100644
index 0000000000..1bffc1ed4a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzerIntegrationTests.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.analysis;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.lucene.analysis.Analyzer;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.lang.reflect.Field;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+@ElasticsearchBackwardsCompatIntegrationTest.CompatibilityVersion(version = Version.V_1_2_0_ID) // we throw an exception if we create an index with _field_names that is 1.3
+public class PreBuiltAnalyzerIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", DummyAnalysisPlugin.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testThatPreBuiltAnalyzersAreNotClosedOnIndexClose() throws Exception {
+ Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers = Maps.newHashMap();
+ List<String> indexNames = Lists.newArrayList();
+ final int numIndices = scaledRandomIntBetween(2, 4);
+ for (int i = 0; i < numIndices; i++) {
+ String indexName = randomAsciiOfLength(10).toLowerCase(Locale.ROOT);
+ indexNames.add(indexName);
+
+ int randomInt = randomInt(PreBuiltAnalyzers.values().length-1);
+ PreBuiltAnalyzers preBuiltAnalyzer = PreBuiltAnalyzers.values()[randomInt];
+ String name = preBuiltAnalyzer.name().toLowerCase(Locale.ROOT);
+
+ Version randomVersion = randomVersion(random());
+ if (!loadedAnalyzers.containsKey(preBuiltAnalyzer)) {
+ loadedAnalyzers.put(preBuiltAnalyzer, Lists.<Version>newArrayList());
+ }
+ loadedAnalyzers.get(preBuiltAnalyzer).add(randomVersion);
+
+ final XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", name)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Settings versionSettings = settings(randomVersion).build();
+ client().admin().indices().prepareCreate(indexName).addMapping("type", mapping).setSettings(versionSettings).get();
+ }
+
+ ensureGreen();
+
+ final int numDocs = randomIntBetween(10, 100);
+ // index some amount of data
+ for (int i = 0; i < numDocs; i++) {
+ String randomIndex = indexNames.get(randomInt(indexNames.size()-1));
+ String randomId = randomInt() + "";
+
+ Map<String, Object> data = Maps.newHashMap();
+ data.put("foo", randomAsciiOfLength(scaledRandomIntBetween(5, 50)));
+
+ index(randomIndex, "type", randomId, data);
+ }
+
+ refresh();
+
+ // close some of the indices
+ int amountOfIndicesToClose = randomInt(numIndices-1);
+ for (int i = 0; i < amountOfIndicesToClose; i++) {
+ String indexName = indexNames.get(i);
+ client().admin().indices().prepareClose(indexName).execute().actionGet();
+ }
+
+ ensureGreen();
+
+ // check that all above configured analyzers have been loaded
+ assertThatAnalyzersHaveBeenLoaded(loadedAnalyzers);
+
+ // check that all of the prebuiltanalyzers are still open
+ assertLuceneAnalyzersAreNotClosed(loadedAnalyzers);
+ }
+
+ /**
+ * Test case for #5030: Upgrading analysis plugins fails
+ * See https://github.com/elasticsearch/elasticsearch/issues/5030
+ */
+ @Test
+ public void testThatPluginAnalyzersCanBeUpdated() throws Exception {
+ final XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .field("analyzer", "dummy")
+ .endObject()
+ .startObject("bar")
+ .field("type", "string")
+ .field("analyzer", "my_dummy")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ Settings versionSettings = settings(randomVersion(random()))
+ .put("index.analysis.analyzer.my_dummy.type", "custom")
+ .put("index.analysis.analyzer.my_dummy.filter", "my_dummy_token_filter")
+ .put("index.analysis.analyzer.my_dummy.char_filter", "my_dummy_char_filter")
+ .put("index.analysis.analyzer.my_dummy.tokenizer", "my_dummy_tokenizer")
+ .put("index.analysis.tokenizer.my_dummy_tokenizer.type", "dummy_tokenizer")
+ .put("index.analysis.filter.my_dummy_token_filter.type", "dummy_token_filter")
+ .put("index.analysis.char_filter.my_dummy_char_filter.type", "dummy_char_filter")
+ .build();
+
+ client().admin().indices().prepareCreate("test-analysis-dummy").addMapping("type", mapping).setSettings(versionSettings).get();
+
+ ensureGreen();
+ }
+
+ private void assertThatAnalyzersHaveBeenLoaded(Map<PreBuiltAnalyzers, List<Version>> expectedLoadedAnalyzers) {
+ for (Map.Entry<PreBuiltAnalyzers, List<Version>> entry : expectedLoadedAnalyzers.entrySet()) {
+ for (Version version : entry.getValue()) {
+ // if it is not null in the cache, it has been loaded
+ assertThat(entry.getKey().getCache().get(version), is(notNullValue()));
+ }
+ }
+ }
+
+ // the close() method of a lucene analyzer sets the storedValue field to null
+ // we simply check this via reflection - ugly but works
+ private void assertLuceneAnalyzersAreNotClosed(Map<PreBuiltAnalyzers, List<Version>> loadedAnalyzers) throws IllegalAccessException, NoSuchFieldException {
+ for (Map.Entry<PreBuiltAnalyzers, List<Version>> preBuiltAnalyzerEntry : loadedAnalyzers.entrySet()) {
+ PreBuiltAnalyzers preBuiltAnalyzer = preBuiltAnalyzerEntry.getKey();
+ for (Version version : preBuiltAnalyzerEntry.getValue()) {
+ Analyzer analyzer = preBuiltAnalyzerEntry.getKey().getCache().get(version);
+
+ Field field = getFieldFromClass("storedValue", analyzer);
+ boolean currentAccessible = field.isAccessible();
+ field.setAccessible(true);
+ Object storedValue = field.get(analyzer);
+ field.setAccessible(currentAccessible);
+
+ assertThat(String.format(Locale.ROOT, "Analyzer %s in version %s seems to be closed", preBuiltAnalyzer.name(), version), storedValue, is(notNullValue()));
+ }
+ }
+ }
+
+ /**
+ * Searches for a field until it finds, loops through all superclasses
+ */
+ private Field getFieldFromClass(String fieldName, Object obj) {
+ Field field = null;
+ boolean storedValueFieldFound = false;
+ Class clazz = obj.getClass();
+ while (!storedValueFieldFound) {
+ try {
+ field = clazz.getDeclaredField(fieldName);
+ storedValueFieldFound = true;
+ } catch (NoSuchFieldException e) {
+ clazz = clazz.getSuperclass();
+ }
+
+ if (Object.class.equals(clazz)) throw new RuntimeException("Could not find storedValue field in class" + clazz);
+ }
+
+ return field;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java
new file mode 100644
index 0000000000..2537b53b0f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java
@@ -0,0 +1,280 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analyze;
+
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.*;
+import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class AnalyzeActionTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleAnalyzerTests() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "this is a test").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0);
+ assertThat(token.getTerm(), equalTo("this"));
+ assertThat(token.getStartOffset(), equalTo(0));
+ assertThat(token.getEndOffset(), equalTo(4));
+ assertThat(token.getPosition(), equalTo(0));
+ token = analyzeResponse.getTokens().get(1);
+ assertThat(token.getTerm(), equalTo("is"));
+ assertThat(token.getStartOffset(), equalTo(5));
+ assertThat(token.getEndOffset(), equalTo(7));
+ assertThat(token.getPosition(), equalTo(1));
+ token = analyzeResponse.getTokens().get(2);
+ assertThat(token.getTerm(), equalTo("a"));
+ assertThat(token.getStartOffset(), equalTo(8));
+ assertThat(token.getEndOffset(), equalTo(9));
+ assertThat(token.getPosition(), equalTo(2));
+ token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+ assertThat(token.getPosition(), equalTo(3));
+ }
+ }
+
+ @Test
+ public void analyzeNumericField() throws IOException {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "long", "type=long", "double", "type=double"));
+ ensureGreen("test");
+
+ try {
+ client().admin().indices().prepareAnalyze(indexOrAlias(), "123").setField("long").get();
+ fail("shouldn't get here");
+ } catch (IllegalArgumentException ex) {
+ //all good
+ }
+ try {
+ client().admin().indices().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get();
+ fail("shouldn't get here");
+ } catch (IllegalArgumentException ex) {
+ //all good
+ }
+ }
+
+ @Test
+ public void analyzeWithNoIndex() throws Exception {
+
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setAnalyzer("simple").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("keyword").setTokenFilters("lowercase").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST").setTokenizer("standard").setTokenFilters("lowercase", "reverse").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0);
+ assertThat(token.getTerm(), equalTo("siht"));
+ token = analyzeResponse.getTokens().get(1);
+ assertThat(token.getTerm(), equalTo("si"));
+ token = analyzeResponse.getTokens().get(2);
+ assertThat(token.getTerm(), equalTo("a"));
+ token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("tset"));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze("of course").setTokenizer("standard").setTokenFilters("stop").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("course"));
+ assertThat(analyzeResponse.getTokens().get(0).getPosition(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getStartOffset(), equalTo(3));
+ assertThat(analyzeResponse.getTokens().get(0).getEndOffset(), equalTo(9));
+
+ }
+
+ @Test
+ public void analyzeWithCharFilters() throws Exception {
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(settingsBuilder().put(indexSettings())
+ .put("index.analysis.char_filter.custom_mapping.type", "mapping")
+ .putArray("index.analysis.char_filter.custom_mapping.mappings", "ph=>f", "qu=>q")
+ .put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "custom_mapping")));
+ ensureGreen();
+
+ AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("<h2><b>THIS</b> IS A</h2> <a href=\"#\">TEST</a>").setTokenizer("standard").setCharFilters("html_strip").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A <b>TEST</b>").setTokenizer("keyword").setTokenFilters("lowercase").setCharFilters("html_strip").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "jeff quit phish").setTokenizer("keyword").setTokenFilters("lowercase").setCharFilters("custom_mapping").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(1));
+ assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("jeff qit fish"));
+
+ analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "<a href=\"#\">jeff quit fish</a>").setTokenizer("standard").setCharFilters("html_strip", "custom_mapping").get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(3));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0);
+ assertThat(token.getTerm(), equalTo("jeff"));
+ token = analyzeResponse.getTokens().get(1);
+ assertThat(token.getTerm(), equalTo("qit"));
+ token = analyzeResponse.getTokens().get(2);
+ assertThat(token.getTerm(), equalTo("fish"));
+ }
+
+ @Test
+ public void analyzerWithFieldOrTypeTests() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ client().admin().indices().preparePutMapping("test")
+ .setType("document").setSource("simple", "type=string,analyzer=simple").get();
+
+ for (int i = 0; i < 10; i++) {
+ final AnalyzeRequestBuilder requestBuilder = client().admin().indices().prepareAnalyze("THIS IS A TEST");
+ requestBuilder.setIndex(indexOrAlias());
+ requestBuilder.setField("document.simple");
+ AnalyzeResponse analyzeResponse = requestBuilder.get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(4));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+ }
+ }
+
+ @Test // issue #5974
+ public void testThatStandardAndDefaultAnalyzersAreSame() throws Exception {
+ AnalyzeResponse response = client().admin().indices().prepareAnalyze("this is a test").setAnalyzer("standard").get();
+ assertTokens(response, "this", "is", "a", "test");
+
+ response = client().admin().indices().prepareAnalyze("this is a test").setAnalyzer("default").get();
+ assertTokens(response, "this", "is", "a", "test");
+
+ response = client().admin().indices().prepareAnalyze("this is a test").get();
+ assertTokens(response, "this", "is", "a", "test");
+ }
+
+ private void assertTokens(AnalyzeResponse response, String ... tokens) {
+ assertThat(response.getTokens(), hasSize(tokens.length));
+ for (int i = 0; i < tokens.length; i++) {
+ assertThat(response.getTokens().get(i).getTerm(), is(tokens[i]));
+ }
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+
+ @Test
+ public void testParseXContentForAnalyzeReuqest() throws Exception {
+ BytesReference content = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("text", "THIS IS A TEST")
+ .field("tokenizer", "keyword")
+ .array("filters", "lowercase")
+ .endObject().bytes();
+
+ AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
+
+ RestAnalyzeAction.buildFromContent(content, analyzeRequest);
+
+ assertThat(analyzeRequest.text().length, equalTo(1));
+ assertThat(analyzeRequest.text(), equalTo(new String[]{"THIS IS A TEST"}));
+ assertThat(analyzeRequest.tokenizer(), equalTo("keyword"));
+ assertThat(analyzeRequest.tokenFilters(), equalTo(new String[]{"lowercase"}));
+ }
+
+ @Test
+ public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() throws Exception {
+ AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
+ BytesReference invalidContent = XContentFactory.jsonBuilder().startObject().value("invalid_json").endObject().bytes();
+
+ try {
+ RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest);
+ fail("shouldn't get here");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), equalTo("Failed to parse request body"));
+ }
+ }
+
+ @Test
+ public void testParseXContentForAnalyzeRequestWithUnknownParamThrowsException() throws Exception {
+ AnalyzeRequest analyzeRequest = new AnalyzeRequest("for test");
+ BytesReference invalidContent =XContentFactory.jsonBuilder()
+ .startObject()
+ .field("text", "THIS IS A TEST")
+ .field("unknown", "keyword")
+ .endObject().bytes();
+
+ try {
+ RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest);
+ fail("shouldn't get here");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
+ }
+ }
+
+ @Test
+ public void analyzerWithMultiValues() throws Exception {
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ client().admin().indices().preparePutMapping("test")
+ .setType("document").setSource("simple", "type=string,analyzer=simple,position_offset_gap=100").get();
+
+ String[] texts = new String[]{"THIS IS A TEST", "THE SECOND TEXT"};
+
+ final AnalyzeRequestBuilder requestBuilder = client().admin().indices().prepareAnalyze();
+ requestBuilder.setText(texts);
+ requestBuilder.setIndex(indexOrAlias());
+ requestBuilder.setField("simple");
+ AnalyzeResponse analyzeResponse = requestBuilder.get();
+ assertThat(analyzeResponse.getTokens().size(), equalTo(7));
+ AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(3);
+ assertThat(token.getTerm(), equalTo("test"));
+ assertThat(token.getPosition(), equalTo(3));
+ assertThat(token.getStartOffset(), equalTo(10));
+ assertThat(token.getEndOffset(), equalTo(14));
+
+ token = analyzeResponse.getTokens().get(5);
+ assertThat(token.getTerm(), equalTo("second"));
+ assertThat(token.getPosition(), equalTo(105));
+ assertThat(token.getStartOffset(), equalTo(19));
+ assertThat(token.getEndOffset(), equalTo(25));
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java b/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java
new file mode 100644
index 0000000000..46a6505a51
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/analyze/HunspellServiceTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.analyze;
+
+import org.apache.lucene.analysis.hunspell.Dictionary;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.analysis.HunspellService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.lang.reflect.Field;
+
+import static org.elasticsearch.indices.analysis.HunspellService.*;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ClusterScope(scope= Scope.TEST, numDataNodes=0)
+public class HunspellServiceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testLocaleDirectoryWithNodeLevelConfig() throws Exception {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.conf", getDataPath("/indices/analyze/conf_dir"))
+ .put(HUNSPELL_LAZY_LOAD, randomBoolean())
+ .put(HUNSPELL_IGNORE_CASE, true)
+ .build();
+
+ internalCluster().startNode(settings);
+ Dictionary dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ assertIgnoreCase(true, dictionary);
+ }
+
+ @Test
+ public void testLocaleDirectoryWithLocaleSpecificConfig() throws Exception {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.conf", getDataPath("/indices/analyze/conf_dir"))
+ .put(HUNSPELL_LAZY_LOAD, randomBoolean())
+ .put(HUNSPELL_IGNORE_CASE, true)
+ .put("indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing", false)
+ .put("indices.analysis.hunspell.dictionary.en_US.ignore_case", false)
+ .build();
+
+ internalCluster().startNode(settings);
+ Dictionary dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
+ assertThat(dictionary, notNullValue());
+ assertIgnoreCase(false, dictionary);
+
+
+
+ // testing that dictionary specific settings override node level settings
+ dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US_custom");
+ assertThat(dictionary, notNullValue());
+ assertIgnoreCase(true, dictionary);
+ }
+
+ @Test
+ public void testDicWithNoAff() throws Exception {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.conf", getDataPath("/indices/analyze/no_aff_conf_dir"))
+ .put(HUNSPELL_LAZY_LOAD, randomBoolean())
+ .build();
+
+ Dictionary dictionary = null;
+ try {
+ internalCluster().startNode(settings);
+ dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
+ fail("Missing affix file didn't throw an error");
+ }
+ catch (Throwable t) {
+ assertNull(dictionary);
+ assertThat(ExceptionsHelper.unwrap(t, ElasticsearchException.class).toString(), Matchers.containsString("Missing affix file"));
+ }
+ }
+
+ @Test
+ public void testDicWithTwoAffs() throws Exception {
+ Settings settings = Settings.settingsBuilder()
+ .put("path.conf", getDataPath("/indices/analyze/two_aff_conf_dir"))
+ .put(HUNSPELL_LAZY_LOAD, randomBoolean())
+ .build();
+
+ Dictionary dictionary = null;
+ try {
+ internalCluster().startNode(settings);
+ dictionary = internalCluster().getInstance(HunspellService.class).getDictionary("en_US");
+ fail("Multiple affix files didn't throw an error");
+ } catch (Throwable t) {
+ assertNull(dictionary);
+ assertThat(ExceptionsHelper.unwrap(t, ElasticsearchException.class).toString(), Matchers.containsString("Too many affix files"));
+ }
+ }
+
+ // TODO: open up a getter on Dictionary
+ private void assertIgnoreCase(boolean expected, Dictionary dictionary) throws Exception {
+ Field f = Dictionary.class.getDeclaredField("ignoreCase");
+ f.setAccessible(true);
+ assertEquals(expected, f.getBoolean(dictionary));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
new file mode 100644
index 0000000000..9013156a59
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.cache.query;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.indices.cache.query.IndicesQueryCache;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class IndicesQueryCacheTests extends ElasticsearchIntegrationTest {
+
+ // One of the primary purposes of the query cache is to cache aggs results
+ public void testCacheAggs() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("index")
+ .addMapping("type", "f", "type=date")
+ .setSettings(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true).get());
+ indexRandom(true,
+ client().prepareIndex("index", "type").setSource("f", "2014-03-10T00:00:00.000Z"),
+ client().prepareIndex("index", "type").setSource("f", "2014-05-13T00:00:00.000Z"));
+ ensureSearchable("index");
+
+ // This is not a random example: serialization with time zones writes shared strings
+ // which used to not work well with the query cache because of the handles stream output
+ // see #9500
+ final SearchResponse r1 = client().prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addAggregation(dateHistogram("histo").field("f").timeZone("+01:00").minDocCount(0).interval(DateHistogramInterval.MONTH)).get();
+ assertSearchResponse(r1);
+
+ // The cached is actually used
+ assertThat(client().admin().indices().prepareStats("index").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ for (int i = 0; i < 10; ++i) {
+ final SearchResponse r2 = client().prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addAggregation(dateHistogram("histo").field("f").timeZone("+01:00").minDocCount(0).interval(DateHistogramInterval.MONTH)).get();
+ assertSearchResponse(r2);
+ Histogram h1 = r1.getAggregations().get("histo");
+ Histogram h2 = r2.getAggregations().get("histo");
+ final List<? extends Bucket> buckets1 = h1.getBuckets();
+ final List<? extends Bucket> buckets2 = h2.getBuckets();
+ assertEquals(buckets1.size(), buckets2.size());
+ for (int j = 0; j < buckets1.size(); ++j) {
+ final Bucket b1 = buckets1.get(j);
+ final Bucket b2 = buckets2.get(j);
+ assertEquals(b1.getKey(), b2.getKey());
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java b/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java
new file mode 100644
index 0000000000..b0a549f3cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.exists.indices;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.hamcrest.Matchers.equalTo;
+
+public class IndicesExistsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false)
+ public void testIndicesExists() throws Exception {
+ assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false));
+ assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false));
+
+ createIndex("foo", "foobar", "bar", "barbaz");
+ ensureYellow();
+
+ assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true));
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testIndicesExistsWithBlocks() {
+ createIndex("ro");
+ ensureYellow();
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("ro", blockSetting);
+ assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true));
+ } finally {
+ disableIndexBlock("ro", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ try {
+ enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA);
+ assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true));
+ fail("Exists should fail when " + IndexMetaData.SETTING_BLOCKS_METADATA + " is true");
+ } catch (ClusterBlockException e) {
+ // Ok, a ClusterBlockException is expected
+ } finally {
+ disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java b/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java
new file mode 100644
index 0000000000..f72609298e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.exists.types;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+
+public class TypesExistsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple() throws Exception {
+ Client client = client();
+ client.admin().indices().prepareCreate("test1")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareCreate("test2")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())
+ .execute().actionGet();
+ client.admin().indices().prepareAliases().addAlias("test1", "alias1").execute().actionGet();
+ ClusterHealthResponse healthResponse = client.admin().cluster()
+ .prepareHealth("test1", "test2").setWaitForYellowStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ TypesExistsResponse response = client.admin().indices().prepareTypesExists("test1").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1").setTypes("type2").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1").setTypes("type3").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ try {
+ client.admin().indices().prepareTypesExists("notExist").setTypes("type1").execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (IndexMissingException e) {}
+ try {
+ client.admin().indices().prepareTypesExists("notExist").setTypes("type0").execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (IndexMissingException e) {}
+ response = client.admin().indices().prepareTypesExists("alias1").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("*").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type1").execute().actionGet();
+ assertThat(response.isExists(), equalTo(true));
+ response = client.admin().indices().prepareTypesExists("test1", "test2").setTypes("type2").execute().actionGet();
+ assertThat(response.isExists(), equalTo(false));
+ }
+
+ @Test
+ public void testTypesExistsWithBlocks() throws IOException {
+ assertAcked(prepareCreate("ro").addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject()));
+ ensureGreen("ro");
+
+ // Request is not blocked
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("ro", block);
+ assertThat(client().admin().indices().prepareTypesExists("ro").setTypes("type1").execute().actionGet().isExists(), equalTo(true));
+ } finally {
+ disableIndexBlock("ro", block);
+ }
+ }
+
+ // Request is blocked
+ try {
+ enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareTypesExists("ro").setTypes("type1"));
+ } finally {
+ disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/flush/FlushTest.java b/core/src/test/java/org/elasticsearch/indices/flush/FlushTest.java
new file mode 100644
index 0000000000..5ce34ca4fa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/flush/FlushTest.java
@@ -0,0 +1,226 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.flush;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+public class FlushTest extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testWaitIfOngoing() throws InterruptedException {
+ createIndex("test");
+ ensureGreen("test");
+ final int numIters = scaledRandomIntBetween(10, 30);
+ for (int i = 0; i < numIters; i++) {
+ for (int j = 0; j < 10; j++) {
+ client().prepareIndex("test", "test").setSource("{}").get();
+ }
+ final CountDownLatch latch = new CountDownLatch(10);
+ final CopyOnWriteArrayList<Throwable> errors = new CopyOnWriteArrayList<>();
+ for (int j = 0; j < 10; j++) {
+ client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute(new ActionListener<FlushResponse>() {
+ @Override
+ public void onResponse(FlushResponse flushResponse) {
+ try {
+ // dont' use assertAllSuccesssful it uses a randomized context that belongs to a different thread
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(flushResponse.getShardFailures()), flushResponse.getFailedShards(), equalTo(0));
+ latch.countDown();
+ } catch (Throwable ex) {
+ onFailure(ex);
+ }
+
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ errors.add(e);
+ latch.countDown();
+ }
+ });
+ }
+ latch.await();
+ assertThat(errors, emptyIterable());
+ }
+ }
+
+ @TestLogging("indices:TRACE")
+ public void testSyncedFlush() throws ExecutionException, InterruptedException, IOException {
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).get();
+ ensureGreen();
+
+ IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ for (ShardStats shardStats : indexStats.getShards()) {
+ assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+
+ ShardsSyncedFlushResult result;
+ if (randomBoolean()) {
+ logger.info("--> sync flushing shard 0");
+ result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId("test", 0));
+ } else {
+ logger.info("--> sync flushing index [test]");
+ IndicesSyncedFlushResult indicesResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test");
+ result = indicesResult.getShardsResultPerIndex().get("test").get(0);
+ }
+ assertFalse(result.failed());
+ assertThat(result.totalShards(), equalTo(indexStats.getShards().length));
+ assertThat(result.successfulShards(), equalTo(indexStats.getShards().length));
+
+ indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ String syncId = result.syncId();
+ for (ShardStats shardStats : indexStats.getShards()) {
+ final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID);
+ assertThat(shardSyncId, equalTo(syncId));
+ }
+
+ // now, start new node and relocate a shard there and see if sync id still there
+ String newNodeName = internalCluster().startNode();
+ ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
+ ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next();
+ String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).name();
+ assertFalse(currentNodeName.equals(newNodeName));
+ internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), currentNodeName, newNodeName)).get();
+
+ client().admin().cluster().prepareHealth()
+ .setWaitForRelocatingShards(0)
+ .get();
+ indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ for (ShardStats shardStats : indexStats.getShards()) {
+ assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()).get();
+ ensureGreen("test");
+ indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ for (ShardStats shardStats : indexStats.getShards()) {
+ assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build()).get();
+ ensureGreen("test");
+ indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ for (ShardStats shardStats : indexStats.getShards()) {
+ assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+ }
+
+ @TestLogging("indices:TRACE")
+ public void testSyncedFlushWithConcurrentIndexing() throws Exception {
+
+ internalCluster().ensureAtLeastNumDataNodes(3);
+ createIndex("test");
+
+ client().admin().indices().prepareUpdateSettings("test").setSettings(
+ Settings.builder().put("index.translog.disable_flush", true).put("index.refresh_interval", -1).put("index.number_of_replicas", internalCluster().numDataNodes() - 1))
+ .get();
+ ensureGreen();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final AtomicInteger numDocs = new AtomicInteger(0);
+ Thread indexingThread = new Thread() {
+ @Override
+ public void run() {
+ while (stop.get() == false) {
+ client().prepareIndex().setIndex("test").setType("doc").setSource("{}").get();
+ numDocs.incrementAndGet();
+ }
+ }
+ };
+ indexingThread.start();
+
+ IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ for (ShardStats shardStats : indexStats.getShards()) {
+ assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+ logger.info("--> trying sync flush");
+ IndicesSyncedFlushResult syncedFlushResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test");
+ logger.info("--> sync flush done");
+ stop.set(true);
+ indexingThread.join();
+ indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
+ assertFlushResponseEqualsShardStats(indexStats.getShards(), syncedFlushResult.getShardsResultPerIndex().get("test"));
+ refresh();
+ assertThat(client().prepareCount().get().getCount(), equalTo((long) numDocs.get()));
+ logger.info("indexed {} docs", client().prepareCount().get().getCount());
+ logClusterState();
+ internalCluster().fullRestart();
+ ensureGreen();
+ assertThat(client().prepareCount().get().getCount(), equalTo((long) numDocs.get()));
+ }
+
+ private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List<ShardsSyncedFlushResult> syncedFlushResults) {
+
+ for (final ShardStats shardStats : shardsStats) {
+ for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) {
+ if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) {
+ for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> singleResponse : shardResult.shardResponses().entrySet()) {
+ if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) {
+ if (singleResponse.getValue().success()) {
+ logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId());
+ assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ } else {
+ logger.info("{} sync flush failed for on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId());
+ assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testUnallocatedShardsDoesNotHang() throws InterruptedException {
+ // create an index but disallow allocation
+ prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.include._name", "nonexistent")).get();
+
+ // this should not hang but instead immediately return with empty result set
+ List<ShardsSyncedFlushResult> shardsResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").getShardsResultPerIndex().get("test");
+ // just to make sure the test actually tests the right thing
+ int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1);
+ assertThat(shardsResult.size(), equalTo(numShards));
+ assertThat(shardsResult.get(0).failureReason(), equalTo("no active shards"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java
new file mode 100644
index 0000000000..5d65c1acb7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTest.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.flush;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ */
+public class SyncedFlushSingleNodeTest extends ElasticsearchSingleNodeTest {
+
+ public void testModificationPreventsFlushing() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource("{}").get();
+ IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
+ IndexShard shard = test.shard(0);
+
+ SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
+ final ShardId shardId = shard.shardId();
+ final ClusterState state = getInstanceFromNode(ClusterService.class).state();
+ final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
+ final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
+ assertEquals("exactly one active shard", 1, activeShards.size());
+ Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
+ assertEquals("exactly one commit id", 1, commitIds.size());
+ client().prepareIndex("test", "test", "2").setSource("{}").get();
+ String syncId = Strings.base64UUID();
+ SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
+ flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
+ listener.latch.await();
+ assertNull(listener.error);
+ ShardsSyncedFlushResult syncedFlushResult = listener.result;
+ assertNotNull(syncedFlushResult);
+ assertEquals(0, syncedFlushResult.successfulShards());
+ assertEquals(1, syncedFlushResult.totalShards());
+ assertEquals(syncId, syncedFlushResult.syncId());
+ assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
+ assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
+ assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
+
+ SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); // pull another commit and make sure we can't sync-flush with the old one
+ listener = new SyncedFlushUtil.LatchedListener();
+ flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
+ listener.latch.await();
+ assertNull(listener.error);
+ syncedFlushResult = listener.result;
+ assertNotNull(syncedFlushResult);
+ assertEquals(0, syncedFlushResult.successfulShards());
+ assertEquals(1, syncedFlushResult.totalShards());
+ assertEquals(syncId, syncedFlushResult.syncId());
+ assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
+ assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
+ assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
+ }
+
+ public void testSingleShardSuccess() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource("{}").get();
+ IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
+ IndexShard shard = test.shard(0);
+
+ SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
+ final ShardId shardId = shard.shardId();
+ SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
+ flushService.attemptSyncedFlush(shardId, listener);
+ listener.latch.await();
+ assertNull(listener.error);
+ ShardsSyncedFlushResult syncedFlushResult = listener.result;
+ assertNotNull(syncedFlushResult);
+ assertEquals(1, syncedFlushResult.successfulShards());
+ assertEquals(1, syncedFlushResult.totalShards());
+ SyncedFlushService.SyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next();
+ assertTrue(response.success());
+ }
+
+ public void testSyncFailsIfOperationIsInFlight() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource("{}").get();
+ IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
+ IndexShard shard = test.shard(0);
+
+ SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
+ final ShardId shardId = shard.shardId();
+ shard.incrementOperationCounter();
+ try {
+ SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
+ flushService.attemptSyncedFlush(shardId, listener);
+ listener.latch.await();
+ assertNull(listener.error);
+ ShardsSyncedFlushResult syncedFlushResult = listener.result;
+ assertNotNull(syncedFlushResult);
+ assertEquals(0, syncedFlushResult.successfulShards());
+ assertNotEquals(0, syncedFlushResult.totalShards());
+ assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason());
+ } finally {
+ shard.decrementOperationCounter();
+ }
+ }
+
+ public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException {
+ createIndex("test");
+ IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
+ IndexShard shard = test.shard(0);
+
+ SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
+ SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener();
+ flushService.attemptSyncedFlush(new ShardId("test", 1), listener);
+ listener.latch.await();
+ assertNotNull(listener.error);
+ assertNull(listener.result);
+ assertEquals("missing", listener.error.getMessage());
+
+ final ShardId shardId = shard.shardId();
+
+ client().admin().indices().prepareClose("test").get();
+ listener = new SyncedFlushUtil.LatchedListener();
+ flushService.attemptSyncedFlush(shardId, listener);
+ listener.latch.await();
+ assertNotNull(listener.error);
+ assertNull(listener.result);
+ assertEquals("closed", listener.error.getMessage());
+
+ listener = new SyncedFlushUtil.LatchedListener();
+ flushService.attemptSyncedFlush(new ShardId("nosuchindex", 0), listener);
+ listener.latch.await();
+ assertNotNull(listener.error);
+ assertNull(listener.result);
+ assertEquals("no such index", listener.error.getMessage());
+ }
+
+ public void testFailAfterIntermediateCommit() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource("{}").get();
+ IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
+ IndexShard shard = test.shard(0);
+
+ SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
+ final ShardId shardId = shard.shardId();
+ final ClusterState state = getInstanceFromNode(ClusterService.class).state();
+ final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
+ final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
+ assertEquals("exactly one active shard", 1, activeShards.size());
+ Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
+ assertEquals("exactly one commit id", 1, commitIds.size());
+ if (randomBoolean()) {
+ client().prepareIndex("test", "test", "2").setSource("{}").get();
+ }
+ client().admin().indices().prepareFlush("test").setForce(true).get();
+ String syncId = Strings.base64UUID();
+ final SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
+ flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
+ listener.latch.await();
+ assertNull(listener.error);
+ ShardsSyncedFlushResult syncedFlushResult = listener.result;
+ assertNotNull(syncedFlushResult);
+ assertEquals(0, syncedFlushResult.successfulShards());
+ assertEquals(1, syncedFlushResult.totalShards());
+ assertEquals(syncId, syncedFlushResult.syncId());
+ assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
+ assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
+ assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
+ }
+
+ public void testFailWhenCommitIsMissing() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource("{}").get();
+ IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
+ IndexShard shard = test.shard(0);
+
+ SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
+ final ShardId shardId = shard.shardId();
+ final ClusterState state = getInstanceFromNode(ClusterService.class).state();
+ final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
+ final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
+ assertEquals("exactly one active shard", 1, activeShards.size());
+ Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
+ assertEquals("exactly one commit id", 1, commitIds.size());
+ commitIds.clear(); // wipe it...
+ String syncId = Strings.base64UUID();
+ SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
+ flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
+ listener.latch.await();
+ assertNull(listener.error);
+ ShardsSyncedFlushResult syncedFlushResult = listener.result;
+ assertNotNull(syncedFlushResult);
+ assertEquals(0, syncedFlushResult.successfulShards());
+ assertEquals(1, syncedFlushResult.totalShards());
+ assertEquals(syncId, syncedFlushResult.syncId());
+ assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
+ assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
+ assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java
new file mode 100644
index 0000000000..426ec36d60
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.flush;
+
+import com.carrotsearch.hppc.ObjectIntHashMap;
+import com.carrotsearch.hppc.ObjectIntMap;
+import org.elasticsearch.cluster.routing.ImmutableShardRouting;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.flush.IndicesSyncedFlushResult.ShardCounts;
+import org.elasticsearch.indices.flush.SyncedFlushService.SyncedFlushResponse;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.test.XContentTestUtils.convertToMap;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+public class SyncedFlushUnitTests extends ElasticsearchTestCase {
+
+
+ private static class TestPlan {
+ public ShardCounts totalCounts;
+ public Map<String, ShardCounts> countsPerIndex = new HashMap<>();
+ public ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
+
+ public IndicesSyncedFlushResult result;
+
+ }
+
+ public void testIndicesSyncedFlushResult() throws IOException {
+ final TestPlan testPlan = createTestPlan();
+ assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total));
+ assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful));
+ assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed));
+ assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
+ Map<String, Object> asMap = convertToMap(testPlan.result);
+ assertShardCount("_shards header", (Map<String, Object>) asMap.get("_shards"), testPlan.totalCounts);
+
+ assertThat("unexpected number of indices", asMap.size(), equalTo(1 + testPlan.countsPerIndex.size())); // +1 for the shards header
+ for (String index : testPlan.countsPerIndex.keySet()) {
+ Map<String, Object> indexMap = (Map<String, Object>) asMap.get(index);
+ assertShardCount(index, indexMap, testPlan.countsPerIndex.get(index));
+ List<Map<String, Object>> failureList = (List<Map<String, Object>>) indexMap.get("failures");
+ final int expectedFailures = testPlan.expectedFailuresPerIndex.get(index);
+ if (expectedFailures == 0) {
+ assertNull(index + " has unexpected failures", failureList);
+ } else {
+ assertNotNull(index + " should have failures", failureList);
+ assertThat(failureList, hasSize(expectedFailures));
+ }
+ }
+ }
+
+ private void assertShardCount(String name, Map<String, Object> header, ShardCounts expectedCounts) {
+ assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total));
+ assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful));
+ assertThat(name + " has unexpected failed count", (Integer) header.get("failed"), equalTo(expectedCounts.failed));
+ }
+
+ protected TestPlan createTestPlan() {
+ final TestPlan testPlan = new TestPlan();
+ final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
+ final int indexCount = randomIntBetween(1, 10);
+ int totalShards = 0;
+ int totalSuccesful = 0;
+ int totalFailed = 0;
+ for (int i = 0; i < indexCount; i++) {
+ final String index = "index_" + i;
+ int shards = randomIntBetween(1, 4);
+ int replicas = randomIntBetween(0, 2);
+ int successful = 0;
+ int failed = 0;
+ int failures = 0;
+ List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
+ for (int shard = 0; shard < shards; shard++) {
+ final ShardId shardId = new ShardId(index, shard);
+ if (randomInt(5) < 2) {
+ // total shard failure
+ failed += replicas + 1;
+ failures++;
+ shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
+ } else {
+ Map<ShardRouting, SyncedFlushResponse> shardResponses = new HashMap<>();
+ for (int copy = 0; copy < replicas + 1; copy++) {
+ final ShardRouting shardRouting = new ImmutableShardRouting(index, shard, "node_" + shardId + "_" + copy, null,
+ copy == 0, ShardRoutingState.STARTED, 0);
+ if (randomInt(5) < 2) {
+ // shard copy failure
+ failed++;
+ failures++;
+ shardResponses.put(shardRouting, new SyncedFlushResponse("copy failure " + shardId));
+ } else {
+ successful++;
+ shardResponses.put(shardRouting, new SyncedFlushResponse());
+ }
+ }
+ shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
+ }
+ }
+ indicesResults.put(index, shardsResults);
+ testPlan.countsPerIndex.put(index, new ShardCounts(shards * (replicas + 1), successful, failed));
+ testPlan.expectedFailuresPerIndex.put(index, failures);
+ totalFailed += failed;
+ totalShards += shards * (replicas + 1);
+ totalSuccesful += successful;
+ }
+ testPlan.result = new IndicesSyncedFlushResult(indicesResults);
+ testPlan.totalCounts = new ShardCounts(totalShards, totalSuccesful, totalFailed);
+ return testPlan;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java
new file mode 100644
index 0000000000..fef6c23231
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.flush;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+
+/** Utils for SyncedFlush */
+public class SyncedFlushUtil {
+
+ private SyncedFlushUtil() {
+
+ }
+
+ /**
+ * Blocking single index version of {@link SyncedFlushService#attemptSyncedFlush(String[], IndicesOptions, ActionListener)}
+ */
+ public static IndicesSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, String index) {
+ SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
+ LatchedListener<IndicesSyncedFlushResult> listener = new LatchedListener();
+ service.attemptSyncedFlush(new String[]{index}, IndicesOptions.lenientExpandOpen(), listener);
+ try {
+ listener.latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ if (listener.error != null) {
+ throw ExceptionsHelper.convertToElastic(listener.error);
+ }
+ return listener.result;
+ }
+
+
+ /**
+ * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)}
+ */
+ public static ShardsSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, ShardId shardId) {
+ SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
+ LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener();
+ service.attemptSyncedFlush(shardId, listener);
+ try {
+ listener.latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ if (listener.error != null) {
+ throw ExceptionsHelper.convertToElastic(listener.error);
+ }
+ return listener.result;
+ }
+
+ public static final class LatchedListener<T> implements ActionListener<T> {
+ public volatile T result;
+ public volatile Throwable error;
+ public final CountDownLatch latch = new CountDownLatch(1);
+
+ @Override
+ public void onResponse(T result) {
+ this.result = result;
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ error = e;
+ latch.countDown();
+ }
+ }
+
+ /**
+ * Blocking version of {@link SyncedFlushService#sendPreSyncRequests(List, ClusterState, ShardId, ActionListener)}
+ */
+ public static Map<String, Engine.CommitId> sendPreSyncRequests(SyncedFlushService service, List<ShardRouting> activeShards, ClusterState state, ShardId shardId) {
+ LatchedListener<Map<String, Engine.CommitId>> listener = new LatchedListener<>();
+ service.sendPreSyncRequests(activeShards, state, shardId, listener);
+ try {
+ listener.latch.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ if (listener.error != null) {
+ throw ExceptionsHelper.convertToElastic(listener.error);
+ }
+ return listener.result;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java b/core/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java
new file mode 100644
index 0000000000..3e73a52fb4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.leaks;
+
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+@ClusterScope(scope= Scope.TEST, numDataNodes =1)
+public class IndicesLeaksTests extends ElasticsearchIntegrationTest {
+
+
+ @SuppressWarnings({"ConstantConditions", "unchecked"})
+ @Test
+ @BadApple(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/3232")
+ public void testIndexShardLifecycleLeak() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
+ .execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+
+ IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class);
+ IndexService indexService = indicesService.indexServiceSafe("test");
+ Injector indexInjector = indexService.injector();
+ IndexShard shard = indexService.shardSafe(0);
+ Injector shardInjector = indexService.shardInjectorSafe(0);
+
+ performCommonOperations();
+
+ List<WeakReference> indexReferences = new ArrayList<>();
+ List<WeakReference> shardReferences = new ArrayList<>();
+
+ // TODO if we could iterate over the already created classes on the injector, we can just add them here to the list
+ // for now, we simple add some classes that make sense
+
+ // add index references
+ indexReferences.add(new WeakReference(indexService));
+ indexReferences.add(new WeakReference(indexInjector));
+ indexReferences.add(new WeakReference(indexService.mapperService()));
+ for (DocumentMapper documentMapper : indexService.mapperService().docMappers(true)) {
+ indexReferences.add(new WeakReference(documentMapper));
+ }
+ indexReferences.add(new WeakReference(indexService.aliasesService()));
+ indexReferences.add(new WeakReference(indexService.analysisService()));
+ indexReferences.add(new WeakReference(indexService.fieldData()));
+ indexReferences.add(new WeakReference(indexService.queryParserService()));
+
+
+ // add shard references
+ shardReferences.add(new WeakReference(shard));
+ shardReferences.add(new WeakReference(shardInjector));
+
+ indexService = null;
+ indexInjector = null;
+ shard = null;
+ shardInjector = null;
+
+ cluster().wipeIndices("test");
+
+ for (int i = 0; i < 100; i++) {
+ System.gc();
+ int indexNotCleared = 0;
+ for (WeakReference indexReference : indexReferences) {
+ if (indexReference.get() != null) {
+ indexNotCleared++;
+ }
+ }
+ int shardNotCleared = 0;
+ for (WeakReference shardReference : shardReferences) {
+ if (shardReference.get() != null) {
+ shardNotCleared++;
+ }
+ }
+ logger.info("round {}, indices {}/{}, shards {}/{}", i, indexNotCleared, indexReferences.size(), shardNotCleared, shardReferences.size());
+ if (indexNotCleared == 0 && shardNotCleared == 0) {
+ break;
+ }
+ }
+
+ //System.out.println("sleeping");Thread.sleep(1000000);
+
+ for (WeakReference indexReference : indexReferences) {
+ assertThat("dangling index reference: " + indexReference.get(), indexReference.get(), nullValue());
+ }
+
+ for (WeakReference shardReference : shardReferences) {
+ assertThat("dangling shard reference: " + shardReference.get(), shardReference.get(), nullValue());
+ }
+ }
+
+ private void performCommonOperations() {
+ client().prepareIndex("test", "type", "1").setSource("field1", "value", "field2", 2, "field3", 3.0f).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ client().prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("field1:value")).execute().actionGet();
+ client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value")).execute().actionGet();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java
new file mode 100644
index 0000000000..28bcde323d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.emptyIterable;
+
+@ElasticsearchIntegrationTest.ClusterScope(randomDynamicTemplates = false) // this test takes a long time to delete the idx if all fields are eager loading
+public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest {
+
+ private final String mappingType = "test-mapping";
+
+ @Test // see #3544
+ public void testConcurrentDynamicMapping() throws Exception {
+ final String fieldName = "field";
+ final String mapping = "{ \"" + mappingType + "\": {" +
+ "\"dynamic_templates\": ["
+ + "{ \"" + fieldName + "\": {" + "\"path_match\": \"*\"," + "\"mapping\": {" + "\"type\": \"string\"," + "\"store\": \"yes\","
+ + "\"index\": \"analyzed\", \"analyzer\": \"whitespace\" } } } ] } }";
+ // The 'fieldNames' array is used to help with retrieval of index terms
+ // after testing
+
+ int iters = scaledRandomIntBetween(5, 15);
+ for (int i = 0; i < iters; i++) {
+ cluster().wipeIndices("test");
+ assertAcked(prepareCreate("test")
+ .addMapping(mappingType, mapping));
+ ensureYellow();
+ int numDocs = scaledRandomIntBetween(10, 100);
+ final CountDownLatch latch = new CountDownLatch(numDocs);
+ final List<Throwable> throwable = new CopyOnWriteArrayList<>();
+ int currentID = 0;
+ for (int j = 0; j < numDocs; j++) {
+ Map<String, Object> source = new HashMap<>();
+ source.put(fieldName, "test-user");
+ client().prepareIndex("test", mappingType, Integer.toString(currentID++)).setSource(source).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ throwable.add(e);
+ latch.countDown();
+ }
+ });
+ }
+ latch.await();
+ assertThat(throwable, emptyIterable());
+ refresh();
+ assertHitCount(client().prepareSearch("test").setQuery(QueryBuilders.matchQuery(fieldName, "test-user")).get(), numDocs);
+ assertHitCount(client().prepareSearch("test").setQuery(QueryBuilders.matchQuery(fieldName, "test user")).get(), 0);
+
+ }
+ }
+
+ @Test
+ public void testDynamicMappingIntroductionPropagatesToAll() throws Exception {
+ int numDocs = randomIntBetween(100, 1000);
+ int numberOfFields = scaledRandomIntBetween(1, 50);
+ Set<Integer> fieldsIdx = Sets.newHashSet();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+
+ createIndex("idx");
+ ensureGreen("idx");
+ for (int i = 0; i < numDocs; ++i) {
+ int fieldIdx = i % numberOfFields;
+ fieldsIdx.add(fieldIdx);
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("str_value_" + fieldIdx, "s" + i)
+ .field("l_value_" + fieldIdx, i)
+ .field("d_value_" + fieldIdx, (double)i + 0.01)
+ .endObject());
+ }
+ indexRandom(false, builders);
+ for (Integer fieldIdx : fieldsIdx) {
+ waitForConcreteMappingsOnAll("idx", "type", "str_value_" + fieldIdx, "l_value_" + fieldIdx, "d_value_" + fieldIdx);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java b/core/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java
new file mode 100644
index 0000000000..2b7e7a0e77
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/mapping/DedicatedMasterGetFieldMappingTests.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.common.settings.Settings;
+import org.junit.Before;
+
+import static org.apache.lucene.util.LuceneTestCase.Slow;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+
+/**
+ */
+@Slow
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class DedicatedMasterGetFieldMappingTests extends SimpleGetFieldMappingsTests {
+
+ @Before
+ public void before1() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("node.data", false)
+ .build();
+ internalCluster().startNodesAsync(settings, Settings.EMPTY).get();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java
new file mode 100644
index 0000000000..119157bcfc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest {
+
+ public void getMappingsWhereThereAreNone() {
+ createIndex("index");
+ ensureYellow();
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().get();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("index").size(), equalTo(0));
+
+ assertThat(response.fieldMappings("index", "type", "field"), Matchers.nullValue());
+ }
+
+ private XContentBuilder getMappingForType(String type) throws IOException {
+ return jsonBuilder().startObject().startObject(type).startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("obj").startObject("properties").startObject("subfield").field("type", "string").field("index", "not_analyzed").endObject().endObject().endObject()
+ .endObject().endObject().endObject();
+ }
+
+ public void simpleGetFieldMappings() throws Exception {
+
+ assertAcked(prepareCreate("indexa")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB")));
+ assertAcked(client().admin().indices().prepareCreate("indexb")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB")));
+
+ ensureYellow();
+
+ // Get mappings by full name
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.mappings().get("indexa"), not(hasKey("typeB")));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.mappings(), not(hasKey("indexb")));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // Get mappings by name
+ response = client().admin().indices().prepareGetFieldMappings("indexa").setTypes("typeA").setFields("field1", "obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").fullName(), equalTo("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1").sourceAsMap(), hasKey("field1"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // get mappings by name across multiple indices
+ response = client().admin().indices().prepareGetFieldMappings().setTypes("typeA").setFields("obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue());
+
+ // get mappings by name across multiple types
+ response = client().admin().indices().prepareGetFieldMappings("indexa").setFields("obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ // get mappings by name across multiple types & indices
+ response = client().admin().indices().prepareGetFieldMappings().setFields("obj.subfield").get();
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeA", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexa", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeA", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+ assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").fullName(), equalTo("obj.subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "obj.subfield").sourceAsMap(), hasKey("subfield"));
+ assertThat(response.fieldMappings("indexb", "typeB", "field1"), nullValue());
+
+ }
+
+ @SuppressWarnings("unchecked")
+ public void simpleGetFieldMappingsWithDefaults() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", getMappingForType("type")));
+
+ client().prepareIndex("test", "type", "1").setSource("num", 1).get();
+ ensureYellow();
+ waitForConcreteMappingsOnAll("test", "type", "num"); // for num, we need to wait...
+
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings().setFields("num", "field1", "obj.subfield").includeDefaults(true).get();
+
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("index", (Object) "not_analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "num").sourceAsMap().get("num"), hasEntry("type", (Object) "long"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("index", (Object) "analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "field1").sourceAsMap().get("field1"), hasEntry("type", (Object) "string"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("index", (Object) "not_analyzed"));
+ assertThat((Map<String, Object>) response.fieldMappings("test", "type", "obj.subfield").sourceAsMap().get("subfield"), hasEntry("type", (Object) "string"));
+
+
+ }
+
+ //fix #6552
+ public void simpleGetFieldMappingsWithPretty() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", getMappingForType("type")));
+ Map<String, String> params = Maps.newHashMap();
+ params.put("pretty", "true");
+ ensureYellow();
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("index").setTypes("type").setFields("field1", "obj.subfield").get();
+ XContentBuilder responseBuilder = XContentFactory.jsonBuilder().prettyPrint();
+ responseBuilder.startObject();
+ response.toXContent(responseBuilder, new ToXContent.MapParams(params));
+ responseBuilder.endObject();
+ String responseStrings = responseBuilder.string();
+
+
+ XContentBuilder prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint();
+ prettyJsonBuilder.copyCurrentStructure(XContentFactory.xContent(responseStrings).createParser(responseStrings));
+ assertThat(responseStrings, equalTo(prettyJsonBuilder.string()));
+
+ params.put("pretty", "false");
+
+ response = client().admin().indices().prepareGetFieldMappings("index").setTypes("type").setFields("field1", "obj.subfield").get();
+ responseBuilder = XContentFactory.jsonBuilder().prettyPrint().lfAtEnd();
+ responseBuilder.startObject();
+ response.toXContent(responseBuilder, new ToXContent.MapParams(params));
+ responseBuilder.endObject();
+ responseStrings = responseBuilder.string();
+
+ prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint();
+ prettyJsonBuilder.copyCurrentStructure(XContentFactory.xContent(responseStrings).createParser(responseStrings));
+ assertThat(responseStrings, not(equalTo(prettyJsonBuilder.string())));
+
+ }
+
+ @Test
+ public void testGetFieldMappingsWithBlocks() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB")));
+ ensureYellow();
+
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("test", block);
+ GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("typeA").setFields("field1", "obj.subfield").get();
+ assertThat(response.fieldMappings("test", "typeA", "field1").fullName(), equalTo("field1"));
+ } finally {
+ disableIndexBlock("test", block);
+ }
+ }
+
+ try {
+ enableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareGetMappings(), INDEX_METADATA_BLOCK);
+ } finally {
+ disableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java
new file mode 100644
index 0000000000..4757aea860
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ClusterScope(randomDynamicTemplates = false)
+public class SimpleGetMappingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void getMappingsWhereThereAreNone() {
+ createIndex("index");
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().containsKey("index"), equalTo(true));
+ assertThat(response.mappings().get("index").size(), equalTo(0));
+ }
+
+
+ private XContentBuilder getMappingForType(String type) throws IOException {
+ return jsonBuilder().startObject().startObject(type).startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .endObject().endObject().endObject();
+ }
+
+
+ @Test
+ public void simpleGetMappings() throws Exception {
+ client().admin().indices().prepareCreate("indexa")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .addMapping("Atype", getMappingForType("Atype"))
+ .addMapping("Btype", getMappingForType("Btype"))
+ .execute().actionGet();
+ client().admin().indices().prepareCreate("indexb")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .addMapping("Atype", getMappingForType("Atype"))
+ .addMapping("Btype", getMappingForType("Btype"))
+ .execute().actionGet();
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ // Get all mappings
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(4));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all mappings, via wildcard support
+ response = client().admin().indices().prepareGetMappings("*").setTypes("*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(4));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all typeA mappings in all indices
+ response = client().admin().indices().prepareGetMappings("*").setTypes("typeA").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(1));
+ assertThat(response.mappings().get("indexb").get("typeA"), notNullValue());
+
+ // Get all mappings in indexa
+ response = client().admin().indices().prepareGetMappings("indexa").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").size(), equalTo(4));
+ assertThat(response.mappings().get("indexa").get("typeA"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("typeB"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+
+ // Get all mappings beginning with A* in indexa
+ response = client().admin().indices().prepareGetMappings("indexa").setTypes("A*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+
+ // Get all mappings beginning with B* in all indices
+ response = client().admin().indices().prepareGetMappings().setTypes("B*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(1));
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(1));
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+
+ // Get all mappings beginning with B* and A* in all indices
+ response = client().admin().indices().prepareGetMappings().setTypes("B*", "A*").execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").size(), equalTo(2));
+ assertThat(response.mappings().get("indexa").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexa").get("Btype"), notNullValue());
+ assertThat(response.mappings().get("indexb").size(), equalTo(2));
+ assertThat(response.mappings().get("indexb").get("Atype"), notNullValue());
+ assertThat(response.mappings().get("indexb").get("Btype"), notNullValue());
+ }
+
+ @Test
+ public void testGetMappingsWithBlocks() throws IOException {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("typeA", getMappingForType("typeA"))
+ .addMapping("typeB", getMappingForType("typeB"))
+ .execute().actionGet();
+ ensureGreen();
+
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("test", block);
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet();
+ assertThat(response.mappings().size(), equalTo(1));
+ assertThat(response.mappings().get("test").size(), equalTo(2));
+ } finally {
+ disableIndexBlock("test", block);
+ }
+ }
+
+ try {
+ enableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareGetMappings(), INDEX_METADATA_BLOCK);
+ } finally {
+ disableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java
new file mode 100644
index 0000000000..36026e95b5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.mapping;
+
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(randomDynamicTemplates = false)
+public class UpdateMappingIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void dynamicUpdates() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ int recCount = randomIntBetween(200, 600);
+ int numberOfTypes = randomIntBetween(1, 5);
+ List<IndexRequestBuilder> indexRequests = Lists.newArrayList();
+ for (int rec = 0; rec < recCount; rec++) {
+ String type = "type" + (rec % numberOfTypes);
+ String fieldName = "field_" + type + "_" + rec;
+ indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value"));
+ }
+ indexRandom(true, indexRequests);
+
+ logger.info("checking all the documents are there");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+ CountResponse response = client().prepareCount("test").execute().actionGet();
+ assertThat(response.getCount(), equalTo((long) recCount));
+
+ logger.info("checking all the fields are in the mappings");
+
+ for (int rec = 0; rec < recCount; rec++) {
+ String type = "type" + (rec % numberOfTypes);
+ String fieldName = "field_" + type + "_" + rec;
+ waitForConcreteMappingsOnAll("test", type, fieldName);
+ }
+ }
+
+ @Test
+ public void updateMappingWithoutType() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("doc", "{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
+ .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
+ assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
+ equalTo("{\"doc\":{\"properties\":{\"body\":{\"type\":\"string\"},\"date\":{\"type\":\"integer\"}}}}"));
+ }
+
+ @Test
+ public void updateMappingWithoutTypeMultiObjects() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ ).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
+ .setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
+ assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
+ equalTo("{\"doc\":{\"properties\":{\"date\":{\"type\":\"integer\"}}}}"));
+ }
+
+ @Test(expected = MergeMappingException.class)
+ public void updateMappingWithConflicts() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}")
+ .execute().actionGet();
+
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+ @Test(expected = MergeMappingException.class)
+ public void updateMappingWithNormsConflicts() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": false }}}}}")
+ .execute().actionGet();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\", \"norms\": { \"enabled\": true }}}}}")
+ .execute().actionGet();
+ }
+
+ /*
+ Second regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
+ */
+ @Test
+ public void updateMappingNoChanges() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(
+ settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 0)
+ ).addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("type")
+ .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}")
+ .execute().actionGet();
+
+ //no changes, we return
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void updateDefaultMappingSettings() throws Exception {
+
+ logger.info("Creating index with _default_ mappings");
+ client().admin().indices().prepareCreate("test").addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .field("date_detection", false)
+ .endObject().endObject()
+ ).get();
+
+ GetMappingsResponse getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ Map<String, Object> defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ assertThat(defaultMapping, hasKey("date_detection"));
+
+
+ logger.info("Emptying _default_ mappings");
+ // now remove it
+ PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+ logger.info("Done Emptying _default_ mappings");
+
+ getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ assertThat(defaultMapping, not(hasKey("date_detection")));
+
+ // now test you can change stuff that are normally unchangeable
+ logger.info("Creating _default_ mappings with an analyzed field");
+ putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "string").field("index", "analyzed").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+
+
+ logger.info("Changing _default_ mappings field from analyzed to non-analyzed");
+ putResponse = client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "string").field("index", "not_analyzed").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+ assertThat(putResponse.isAcknowledged(), equalTo(true));
+ logger.info("Done changing _default_ mappings field from analyzed to non-analyzed");
+
+ getResponse = client().admin().indices().prepareGetMappings("test").addTypes(MapperService.DEFAULT_MAPPING).get();
+ defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap();
+ Map<String, Object> fieldSettings = (Map<String, Object>) ((Map) defaultMapping.get("properties")).get("f");
+ assertThat(fieldSettings, hasEntry("index", (Object) "not_analyzed"));
+
+ // but we still validate the _default_ type
+ logger.info("Confirming _default_ mappings validation");
+ assertThrows(client().admin().indices().preparePutMapping("test").setType(MapperService.DEFAULT_MAPPING).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .startObject("properties").startObject("f").field("type", "DOESNT_EXIST").endObject().endObject()
+ .endObject().endObject()
+ ), MapperParsingException.class);
+
+ }
+
+ @Test
+ public void updateMappingConcurrently() throws Throwable {
+ createIndex("test1", "test2");
+
+ // This is important. The test assumes all nodes are aware of all indices. Due to initializing shard throttling
+ // not all shards are allocated with the initial create index. Wait for it..
+ ensureYellow();
+
+ final Throwable[] threadException = new Throwable[1];
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread[] threads = new Thread[3];
+ final CyclicBarrier barrier = new CyclicBarrier(threads.length);
+ final ArrayList<Client> clientArray = new ArrayList<>();
+ for (Client c : clients()) {
+ clientArray.add(c);
+ }
+
+ for (int j = 0; j < threads.length; j++) {
+ threads[j] = new Thread(new Runnable() {
+ @SuppressWarnings("unchecked")
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+
+ for (int i = 0; i < 100; i++) {
+ if (stop.get()) {
+ return;
+ }
+
+ Client client1 = clientArray.get(i % clientArray.size());
+ Client client2 = clientArray.get((i + 1) % clientArray.size());
+ String indexName = i % 2 == 0 ? "test2" : "test1";
+ String typeName = "type" + (i % 10);
+ String fieldName = Thread.currentThread().getName() + "_" + i;
+
+ PutMappingResponse response = client1.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(
+ JsonXContent.contentBuilder().startObject().startObject(typeName)
+ .startObject("properties").startObject(fieldName).field("type", "string").endObject().endObject()
+ .endObject().endObject()
+ ).get();
+
+ assertThat(response.isAcknowledged(), equalTo(true));
+ GetMappingsResponse getMappingResponse = client2.admin().indices().prepareGetMappings(indexName).get();
+ ImmutableOpenMap<String, MappingMetaData> mappings = getMappingResponse.getMappings().get(indexName);
+ assertThat(mappings.containsKey(typeName), equalTo(true));
+ assertThat(((Map<String, Object>) mappings.get(typeName).getSourceAsMap().get("properties")).keySet(), Matchers.hasItem(fieldName));
+ }
+ } catch (Throwable t) {
+ threadException[0] = t;
+ stop.set(true);
+ }
+ }
+ });
+
+ threads[j].setName("t_" + j);
+ threads[j].start();
+ }
+
+ for (Thread t : threads) t.join();
+
+ if (threadException[0] != null) {
+ throw threadException[0];
+ }
+
+ }
+
+ @Test
+ public void testPutMappingsWithBlocks() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", block);
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("doc").setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}"));
+ } finally {
+ disableIndexBlock("test", block);
+ }
+ }
+
+ for (String block : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", block);
+ assertBlocked(client().admin().indices().preparePutMapping("test").setType("doc").setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}"));
+ } finally {
+ disableIndexBlock("test", block);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java b/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java
new file mode 100644
index 0000000000..6914bcb95d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/memory/IndexingMemoryControllerTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.memory;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+public class IndexingMemoryControllerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndexBufferSizeUpdateAfterCreationRemoval() throws InterruptedException {
+
+ createNode(Settings.EMPTY);
+
+ prepareCreate("test1").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
+
+ ensureGreen();
+
+ final IndexShard shard1 = internalCluster().getInstance(IndicesService.class).indexService("test1").shard(0);
+
+ prepareCreate("test2").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
+
+ ensureGreen();
+
+ final IndexShard shard2 = internalCluster().getInstance(IndicesService.class).indexService("test2").shard(0);
+ final long expected1ShardSize = internalCluster().getInstance(IndexingMemoryController.class).indexingBufferSize().bytes();
+ final long expected2ShardsSize = expected1ShardSize / 2;
+
+ boolean success = awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return shard1.engine().config().getIndexingBufferSize().bytes() <= expected2ShardsSize &&
+ shard2.engine().config().getIndexingBufferSize().bytes() <= expected2ShardsSize;
+ }
+ });
+
+ if (!success) {
+ fail("failed to update shard indexing buffer size. expected [" + expected2ShardsSize + "] shard1 [" +
+ shard1.engine().config().getIndexingBufferSize().bytes() + "] shard2 [" +
+ shard2.engine().config().getIndexingBufferSize().bytes() + "]"
+ );
+ }
+
+ client().admin().indices().prepareDelete("test2").get();
+ success = awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return shard1.engine().config().getIndexingBufferSize().bytes() >= expected1ShardSize;
+ }
+ });
+
+ if (!success) {
+ fail("failed to update shard indexing buffer size after deleting shards. expected [" + expected1ShardSize + "] got [" +
+ shard1.engine().config().getIndexingBufferSize().bytes() + "]"
+ );
+ }
+
+ }
+
+ @Test
+ public void testIndexBufferSizeUpdateInactiveShard() throws InterruptedException {
+
+ createNode(Settings.builder().put("indices.memory.shard_inactive_time", "100ms").build());
+
+ prepareCreate("test1").setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).get();
+
+ ensureGreen();
+
+ final IndexShard shard1 = internalCluster().getInstance(IndicesService.class).indexService("test1").shard(0);
+ boolean success = awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return shard1.engine().config().getIndexingBufferSize().bytes() == EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER.bytes();
+ }
+ });
+ if (!success) {
+ fail("failed to update shard indexing buffer size due to inactive state. expected [" + EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER + "] got [" +
+ shard1.engine().config().getIndexingBufferSize().bytes() + "]"
+ );
+ }
+
+ index("test1", "type", "1", "f", 1);
+
+ success = awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return shard1.engine().config().getIndexingBufferSize().bytes() > EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER.bytes();
+ }
+ });
+ if (!success) {
+ fail("failed to update shard indexing buffer size due to inactive state. expected something larger then [" + EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER + "] got [" +
+ shard1.engine().config().getIndexingBufferSize().bytes() + "]"
+ );
+ }
+
+ flush(); // clean translogs
+
+ success = awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return shard1.engine().config().getIndexingBufferSize().bytes() == EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER.bytes();
+ }
+ });
+ if (!success) {
+ fail("failed to update shard indexing buffer size due to inactive state. expected [" + EngineConfig.INACTIVE_SHARD_INDEXING_BUFFER + "] got [" +
+ shard1.engine().config().getIndexingBufferSize().bytes() + "]"
+ );
+ }
+ }
+
+ private void createNode(Settings settings) {
+ internalCluster().startNode(Settings.builder()
+ .put(ClusterName.SETTING, "IndexingMemoryControllerTests")
+ .put("node.name", "IndexingMemoryControllerTests")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(EsExecutors.PROCESSORS, 1) // limit the number of threads created
+ .put("http.enabled", false)
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // make sure we get what we set :)
+ .put("indices.memory.interval", "100ms")
+ .put(settings)
+ );
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopTests.java
new file mode 100644
index 0000000000..23ff0709c6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.memory.breaker;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+/** Tests for the noop breakers, which are non-dynamic settings */
+@ElasticsearchIntegrationTest.ClusterScope(scope=ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes=0)
+public class CircuitBreakerNoopTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop")
+ // This is set low, because if the "noop" is not a noop, it will break
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b")
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop")
+ // This is set low, because if the "noop" is not a noop, it will break
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b")
+ .build();
+ }
+
+ @Test
+ public void testNoopRequestBreaker() throws Exception {
+ assertAcked(prepareCreate("cb-test", 1, settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
+ Client client = client();
+
+ // index some different terms so we have some field data for loading
+ int docCount = scaledRandomIntBetween(300, 1000);
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (long id = 0; id < docCount; id++) {
+ reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", id));
+ }
+ indexRandom(true, reqs);
+
+ // A cardinality aggregation uses BigArrays and thus the REQUEST breaker
+ client.prepareSearch("cb-test").setQuery(matchAllQuery()).addAggregation(cardinality("card").field("test")).get();
+ // no exception because the breaker is a noop
+ }
+
+ @Test
+ public void testNoopFielddataBreaker() throws Exception {
+ assertAcked(prepareCreate("cb-test", 1, settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
+ Client client = client();
+
+ // index some different terms so we have some field data for loading
+ int docCount = scaledRandomIntBetween(300, 1000);
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (long id = 0; id < docCount; id++) {
+ reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", id));
+ }
+ indexRandom(true, reqs);
+
+ // Sorting using fielddata and thus the FIELDDATA breaker
+ client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get();
+ // no exception because the breaker is a noop
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java
new file mode 100644
index 0000000000..901f9d74bb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java
@@ -0,0 +1,350 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.memory.breaker;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.breaker.CircuitBreakingException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.indices.breaker.BreakerSettings;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.CircuitBreakerStats;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+/**
+ * Integration tests for InternalCircuitBreakerService
+ */
+@ClusterScope(scope = TEST, randomDynamicTemplates = false)
+public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest {
+
+ /** Reset all breaker settings back to their defaults */
+ private void reset() {
+ logger.info("--> resetting breaker settings");
+ Settings resetSettings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
+ HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_BREAKER_LIMIT)
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
+ HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_OVERHEAD_CONSTANT)
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
+ HierarchyCircuitBreakerService.DEFAULT_REQUEST_BREAKER_LIMIT)
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
+ }
+
+ @Before
+ public void setup() {
+ reset();
+ }
+
+ @After
+ public void teardown() {
+ reset();
+ }
+
+ private String randomRidiculouslySmallLimit() {
+ return randomFrom(Arrays.asList("100b", "100"));
+ }
+
+ /** Returns true if any of the nodes used a noop breaker */
+ private boolean noopBreakerUsed() {
+ NodesStatsResponse stats = client().admin().cluster().prepareNodesStats().setBreaker(true).get();
+ for (NodeStats nodeStats : stats) {
+ if (nodeStats.getBreaker().getStats(CircuitBreaker.REQUEST).getLimit() == 0) {
+ return true;
+ }
+ if (nodeStats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getLimit() == 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/8710")
+ public void testMemoryBreaker() throws Exception {
+ if (noopBreakerUsed()) {
+ logger.info("--> noop breakers used, skipping test");
+ return;
+ }
+ assertAcked(prepareCreate("cb-test", 1, settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
+ final Client client = client();
+
+ // index some different terms so we have some field data for loading
+ int docCount = scaledRandomIntBetween(300, 1000);
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (long id = 0; id < docCount; id++) {
+ reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", "value" + id));
+ }
+ indexRandom(true, false, true, reqs);
+
+ // execute a search that loads field data (sorting on the "test" field)
+ SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC);
+ searchRequest.get();
+
+ // clear field data cache (thus setting the loaded field data back to 0)
+ clearFieldData();
+
+ // Update circuit breaker settings
+ Settings settings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, randomRidiculouslySmallLimit())
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05)
+ .build();
+ assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ // execute a search that loads field data (sorting on the "test" field)
+ // again, this time it should trip the breaker
+ assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR,
+ containsString("Data too large, data for [test] would be larger than limit of [100/100b]"));
+
+ NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get();
+ int breaks = 0;
+ for (NodeStats stat : stats.getNodes()) {
+ CircuitBreakerStats breakerStats = stat.getBreaker().getStats(CircuitBreaker.FIELDDATA);
+ breaks += breakerStats.getTrippedCount();
+ }
+ assertThat(breaks, greaterThanOrEqualTo(1));
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270")
+ public void testRamAccountingTermsEnum() throws Exception {
+ if (noopBreakerUsed()) {
+ logger.info("--> noop breakers used, skipping test");
+ return;
+ }
+ final Client client = client();
+
+ // Create an index where the mappings have a field data filter
+ assertAcked(prepareCreate("ramtest").setSource("{\"mappings\": {\"type\": {\"properties\": {\"test\": " +
+ "{\"type\": \"string\",\"fielddata\": {\"filter\": {\"regex\": {\"pattern\": \"^value.*\"}}}}}}}}"));
+
+ ensureGreen(TimeValue.timeValueSeconds(10), "ramtest");
+
+ // index some different terms so we have some field data for loading
+ int docCount = scaledRandomIntBetween(300, 1000);
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (long id = 0; id < docCount; id++) {
+ reqs.add(client.prepareIndex("ramtest", "type", Long.toString(id)).setSource("test", "value" + id));
+ }
+ indexRandom(true, reqs);
+
+ // execute a search that loads field data (sorting on the "test" field)
+ client.prepareSearch("ramtest").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get();
+
+ // clear field data cache (thus setting the loaded field data back to 0)
+ clearFieldData();
+
+ // Update circuit breaker settings
+ Settings settings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, randomRidiculouslySmallLimit())
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05)
+ .build();
+ assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ // execute a search that loads field data (sorting on the "test" field)
+ // again, this time it should trip the breaker
+ assertFailures(client.prepareSearch("ramtest").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC),
+ RestStatus.INTERNAL_SERVER_ERROR,
+ containsString("Data too large, data for [test] would be larger than limit of [100/100b]"));
+
+ NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().setBreaker(true).get();
+ int breaks = 0;
+ for (NodeStats stat : stats.getNodes()) {
+ CircuitBreakerStats breakerStats = stat.getBreaker().getStats(CircuitBreaker.FIELDDATA);
+ breaks += breakerStats.getTrippedCount();
+ }
+ assertThat(breaks, greaterThanOrEqualTo(1));
+ }
+
+ /**
+ * Test that a breaker correctly redistributes to a different breaker, in
+ * this case, the fielddata breaker borrows space from the request breaker
+ */
+ @Test
+ public void testParentChecking() throws Exception {
+ if (noopBreakerUsed()) {
+ logger.info("--> noop breakers used, skipping test");
+ return;
+ }
+ assertAcked(prepareCreate("cb-test", 1, settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
+ Client client = client();
+
+ // index some different terms so we have some field data for loading
+ int docCount = scaledRandomIntBetween(300, 1000);
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (long id = 0; id < docCount; id++) {
+ reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", "value" + id));
+ }
+ indexRandom(true, reqs);
+
+ // We need the request limit beforehand, just from a single node because the limit should always be the same
+ long beforeReqLimit = client.admin().cluster().prepareNodesStats().setBreaker(true).get()
+ .getNodes()[0].getBreaker().getStats(CircuitBreaker.REQUEST).getLimit();
+
+ Settings resetSettings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b")
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0)
+ .build();
+ assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
+
+ // Perform a search to load field data for the "test" field
+ try {
+ client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get();
+ fail("should have thrown an exception");
+ } catch (Exception e) {
+ String errMsg = "[fielddata] Data too large, data for [test] would be larger than limit of [10/10b]";
+ assertThat("Exception: " + e.toString() + " should contain a CircuitBreakingException",
+ e.toString().contains(errMsg), equalTo(true));
+ }
+
+ assertFailures(client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC),
+ RestStatus.INTERNAL_SERVER_ERROR,
+ containsString("Data too large, data for [test] would be larger than limit of [10/10b]"));
+
+ // Adjust settings so the parent breaker will fail, but the fielddata breaker doesn't
+ resetSettings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "15b")
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "90%")
+ .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0)
+ .build();
+ client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet();
+
+ // Perform a search to load field data for the "test" field
+ try {
+ client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get();
+ fail("should have thrown an exception");
+ } catch (Exception e) {
+ String errMsg = "[parent] Data too large, data for [test] would be larger than limit of [15/15b]";
+ assertThat("Exception: " +e.toString() + " should contain a CircuitBreakingException",
+ e.toString().contains(errMsg), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRequestBreaker() throws Exception {
+ if (noopBreakerUsed()) {
+ logger.info("--> noop breakers used, skipping test");
+ return;
+ }
+ assertAcked(prepareCreate("cb-test", 1, settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
+ Client client = client();
+
+ // Make request breaker limited to a small amount
+ Settings resetSettings = settingsBuilder()
+ .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b")
+ .build();
+ assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
+
+ // index some different terms so we have some field data for loading
+ int docCount = scaledRandomIntBetween(300, 1000);
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (long id = 0; id < docCount; id++) {
+ reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", id));
+ }
+ indexRandom(true, reqs);
+
+ // A cardinality aggregation uses BigArrays and thus the REQUEST breaker
+ try {
+ client.prepareSearch("cb-test").setQuery(matchAllQuery()).addAggregation(cardinality("card").field("test")).get();
+ fail("aggregation should have tripped the breaker");
+ } catch (Exception e) {
+ String errMsg = "CircuitBreakingException[[request] Data too large, data for [<reused_arrays>] would be larger than limit of [10/10b]]";
+ assertThat("Exception: " + e.toString() + " should contain a CircuitBreakingException",
+ e.toString().contains(errMsg), equalTo(true));
+ }
+ }
+
+ /** Issues a cache clear and waits 30 seconds for the field data breaker to be cleared */
+ public void clearFieldData() throws Exception {
+ client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ NodesStatsResponse resp = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).get(new TimeValue(15, TimeUnit.SECONDS));
+ for (NodeStats nStats : resp.getNodes()) {
+ assertThat("fielddata breaker never reset back to 0",
+ nStats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(),
+ equalTo(0L));
+ }
+ }
+ }, 30, TimeUnit.SECONDS);
+ }
+
+ @Test
+ public void testCustomCircuitBreakerRegistration() throws Exception {
+ Iterable<CircuitBreakerService> serviceIter = internalCluster().getInstances(CircuitBreakerService.class);
+
+ final String breakerName = "customBreaker";
+ BreakerSettings breakerSettings = new BreakerSettings(breakerName, 8, 1.03);
+ CircuitBreaker breaker = null;
+
+ for (CircuitBreakerService s : serviceIter) {
+ s.registerBreaker(breakerSettings);
+ breaker = s.getBreaker(breakerSettings.getName());
+ }
+
+ if (breaker != null) {
+ try {
+ breaker.addEstimateBytesAndMaybeBreak(16, "test");
+ } catch (CircuitBreakingException e) {
+ // ignore, we forced a circuit break
+ }
+ }
+
+ NodesStatsResponse stats = client().admin().cluster().prepareNodesStats().clear().setBreaker(true).get();
+ int breaks = 0;
+ for (NodeStats stat : stats.getNodes()) {
+ CircuitBreakerStats breakerStats = stat.getBreaker().getStats(breakerName);
+ breaks += breakerStats.getTrippedCount();
+ }
+ assertThat(breaks, greaterThanOrEqualTo(1));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java
new file mode 100644
index 0000000000..c2296c363f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.memory.breaker;
+
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.breaker.BreakerSettings;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Unit tests for the circuit breaker
+ */
+public class CircuitBreakerUnitTests extends ElasticsearchTestCase {
+
+ public static long pctBytes(String percentString) {
+ return Settings.EMPTY.getAsMemory("", percentString).bytes();
+ }
+
+ @Test
+ public void testBreakerSettingsValidationWithValidSettings() {
+ // parent: {:limit 70}, fd: {:limit 50}, request: {:limit 20}
+ BreakerSettings fd = new BreakerSettings(CircuitBreaker.FIELDDATA, pctBytes("50%"), 1.0);
+ BreakerSettings request = new BreakerSettings(CircuitBreaker.REQUEST, pctBytes("20%"), 1.0);
+ HierarchyCircuitBreakerService.validateSettings(new BreakerSettings[]{fd, request});
+
+ // parent: {:limit 70}, fd: {:limit 40}, request: {:limit 30}
+ fd = new BreakerSettings(CircuitBreaker.FIELDDATA, pctBytes("40%"), 1.0);
+ request = new BreakerSettings(CircuitBreaker.REQUEST, pctBytes("30%"), 1.0);
+ HierarchyCircuitBreakerService.validateSettings(new BreakerSettings[]{fd, request});
+ }
+
+ @Test
+ public void testBreakerSettingsValidationNegativeOverhead() {
+ // parent: {:limit 70}, fd: {:limit 50}, request: {:limit 20}
+ BreakerSettings fd = new BreakerSettings(CircuitBreaker.FIELDDATA, pctBytes("50%"), -0.1);
+ BreakerSettings request = new BreakerSettings(CircuitBreaker.REQUEST, pctBytes("20%"), 1.0);
+ try {
+ HierarchyCircuitBreakerService.validateSettings(new BreakerSettings[]{fd, request});
+ fail("settings are invalid but validate settings did not throw an exception");
+ } catch (Exception e) {
+ assertThat("Incorrect message: " + e.getMessage(),
+ e.getMessage().contains("must be non-negative"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testRegisterCustomBreaker() throws Exception {
+ CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY));
+ String customName = "custom";
+ BreakerSettings settings = new BreakerSettings(customName, 20, 1.0);
+ service.registerBreaker(settings);
+
+ CircuitBreaker breaker = service.getBreaker(customName);
+ assertThat(breaker, notNullValue());
+ assertThat(breaker, instanceOf(CircuitBreaker.class));
+ assertThat(breaker.getName(), is(customName));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerTests.java
new file mode 100644
index 0000000000..c2da93b9ab
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerTests.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.memory.breaker;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.LeafReader;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.engine.MockEngineSupport;
+import org.elasticsearch.test.engine.ThrowingLeafReaderWrapper;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the circuit breaker while random exceptions are happening
+ */
+public class RandomExceptionCircuitBreakerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBreakerWithRandomExceptions() throws IOException, InterruptedException, ExecutionException {
+ for (NodeStats node : client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet().getNodes()) {
+ assertThat("Breaker is not set to 0", node.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ }
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("test-str")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .field("doc_values", randomBoolean())
+ .startObject("fielddata")
+ .field("format", randomBytesFieldDataFormat())
+ .endObject() // fielddata
+ .endObject() // test-str
+ .startObject("test-num")
+ // I don't use randomNumericType() here because I don't want "byte", and I want "float" and "double"
+ .field("type", randomFrom(Arrays.asList("float", "long", "double", "short", "integer")))
+ .startObject("fielddata")
+ .field("format", randomNumericFieldDataFormat())
+ .endObject() // fielddata
+ .endObject() // test-num
+ .endObject() // properties
+ .endObject() // type
+ .endObject() // {}
+ .string();
+ final double topLevelRate;
+ final double lowLevelRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ lowLevelRate = 1.0 / between(2, 10);
+ topLevelRate = 0.0d;
+ } else {
+ topLevelRate = 1.0 / between(2, 10);
+ lowLevelRate = 0.0d;
+ }
+ } else {
+ lowLevelRate = 1.0 / between(2, 10);
+ topLevelRate = 1.0 / between(2, 10);
+ }
+ } else {
+ // rarely no exception
+ topLevelRate = 0d;
+ lowLevelRate = 0d;
+ }
+
+ Settings.Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put(MockEngineSupport.READER_WRAPPER_TYPE, RandomExceptionDirectoryReaderWrapper.class.getName())
+ .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
+ .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
+ .put(MockEngineSupport.WRAP_READER_RATIO, 1.0d);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here
+ final int numDocs;
+ if (clusterHealthResponse.isTimedOut()) {
+ /* some seeds just won't let you create the index at all and we enter a ping-pong mode
+ * trying one node after another etc. that is ok but we need to make sure we don't wait
+ * forever when indexing documents so we set numDocs = 1 and expect all shards to fail
+ * when we search below.*/
+ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
+ numDocs = 1;
+ } else {
+ numDocs = between(10, 100);
+ }
+ for (int i = 0; i < numDocs; i++) {
+ try {
+ client().prepareIndex("test", "type", "" + i)
+ .setTimeout(TimeValue.timeValueSeconds(1)).setSource("test-str", randomUnicodeOfLengthBetween(5, 25), "test-num", i).get();
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ",
+ refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length,
+ refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+ final int numSearches = scaledRandomIntBetween(50, 150);
+ NodesStatsResponse resp = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : resp.getNodes()) {
+ assertThat("Breaker is set to 0", stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ }
+
+ for (int i = 0; i < numSearches; i++) {
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery());
+ switch (randomIntBetween(0, 5)) {
+ case 5:
+ case 4:
+ case 3:
+ searchRequestBuilder.addSort("test-str", SortOrder.ASC);
+ // fall through - sometimes get both fields
+ case 2:
+ case 1:
+ default:
+ searchRequestBuilder.addSort("test-num", SortOrder.ASC);
+
+ }
+ boolean success = false;
+ try {
+ // Sort by the string and numeric fields, to load them into field data
+ searchRequestBuilder.get();
+ success = true;
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+
+ if (frequently()) {
+ // Now, clear the cache and check that the circuit breaker has been
+ // successfully set back to zero. If there is a bug in the circuit
+ // breaker adjustment code, it should show up here by the breaker
+ // estimate being either positive or negative.
+ ensureGreen("test"); // make sure all shards are there - there could be shards that are still starting up.
+ assertAllSuccessful(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).execute().actionGet());
+
+ // Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually
+ for (String node : internalCluster().getNodeNames()) {
+ final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesFieldDataCache.class, node);
+ // Clean up the cache, ensuring that entries' listeners have been called
+ fdCache.getCache().cleanUp();
+ }
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Breaker reset to 0 last search success: " + success + " mapping: " + mapping,
+ stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ }
+ }
+ }
+ }
+
+
+ public static final String EXCEPTION_TOP_LEVEL_RATIO_KEY = "index.engine.exception.ratio.top";
+ public static final String EXCEPTION_LOW_LEVEL_RATIO_KEY = "index.engine.exception.ratio.low";
+
+ // TODO: Generalize this class and add it as a utility
+ public static class RandomExceptionDirectoryReaderWrapper extends MockEngineSupport.DirectoryReaderWrapper {
+ private final Settings settings;
+
+ static class ThrowingSubReaderWrapper extends SubReaderWrapper implements ThrowingLeafReaderWrapper.Thrower {
+ private final Random random;
+ private final double topLevelRatio;
+ private final double lowLevelRatio;
+
+ ThrowingSubReaderWrapper(Settings settings) {
+ final long seed = settings.getAsLong(SETTING_INDEX_SEED, 0l);
+ this.topLevelRatio = settings.getAsDouble(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d);
+ this.lowLevelRatio = settings.getAsDouble(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d);
+ this.random = new Random(seed);
+ }
+
+ @Override
+ public LeafReader wrap(LeafReader reader) {
+ return new ThrowingLeafReaderWrapper(reader, this);
+ }
+
+ @Override
+ public void maybeThrow(ThrowingLeafReaderWrapper.Flags flag) throws IOException {
+ switch (flag) {
+ case Fields:
+ break;
+ case TermVectors:
+ break;
+ case Terms:
+ case TermsEnum:
+ if (random.nextDouble() < topLevelRatio) {
+ throw new IOException("Forced top level Exception on [" + flag.name() + "]");
+ }
+ case Intersect:
+ break;
+ case Norms:
+ break;
+ case NumericDocValues:
+ break;
+ case BinaryDocValues:
+ break;
+ case SortedDocValues:
+ break;
+ case SortedSetDocValues:
+ break;
+ case DocsEnum:
+ case DocsAndPositionsEnum:
+ if (random.nextDouble() < lowLevelRatio) {
+ throw new IOException("Forced low level Exception on [" + flag.name() + "]");
+ }
+ break;
+ }
+ }
+
+ @Override
+ public boolean wrapTerms(String field) {
+ return field.startsWith("test");
+ }
+ }
+
+
+ public RandomExceptionDirectoryReaderWrapper(DirectoryReader in, Settings settings) throws IOException {
+ super(in, new ThrowingSubReaderWrapper(settings));
+ this.settings = settings;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
+ return new RandomExceptionDirectoryReaderWrapper(in, settings);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java
new file mode 100644
index 0000000000..85de661e08
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java
@@ -0,0 +1,628 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.util.concurrent.ListenableFuture;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.index.recovery.RecoveryStats;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.RecoveryState.Stage;
+import org.elasticsearch.indices.recovery.RecoveryState.Type;
+import org.elasticsearch.snapshots.SnapshotState;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockFSDirectoryService;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class IndexRecoveryTests extends ElasticsearchIntegrationTest {
+
+ private static final String INDEX_NAME = "test-idx-1";
+ private static final String INDEX_TYPE = "test-type-1";
+ private static final String REPO_NAME = "test-repo-1";
+ private static final String SNAP_NAME = "test-snap-1";
+
+ private static final int MIN_DOC_COUNT = 500;
+ private static final int MAX_DOC_COUNT = 1000;
+ private static final int SHARD_COUNT = 1;
+ private static final int REPLICA_COUNT = 0;
+
+
+ private void assertRecoveryStateWithoutStage(RecoveryState state, int shardId, Type type,
+ String sourceNode, String targetNode, boolean hasRestoreSource) {
+ assertThat(state.getShardId().getId(), equalTo(shardId));
+ assertThat(state.getType(), equalTo(type));
+ if (sourceNode == null) {
+ assertNull(state.getSourceNode());
+ } else {
+ assertNotNull(state.getSourceNode());
+ assertThat(state.getSourceNode().getName(), equalTo(sourceNode));
+ }
+ if (targetNode == null) {
+ assertNull(state.getTargetNode());
+ } else {
+ assertNotNull(state.getTargetNode());
+ assertThat(state.getTargetNode().getName(), equalTo(targetNode));
+ }
+ if (hasRestoreSource) {
+ assertNotNull(state.getRestoreSource());
+ } else {
+ assertNull(state.getRestoreSource());
+ }
+
+ }
+
+ private void assertRecoveryState(RecoveryState state, int shardId, Type type, Stage stage,
+ String sourceNode, String targetNode, boolean hasRestoreSource) {
+ assertRecoveryStateWithoutStage(state, shardId, type, sourceNode, targetNode, hasRestoreSource);
+ assertThat(state.getStage(), equalTo(stage));
+ }
+
+ private void assertOnGoingRecoveryState(RecoveryState state, int shardId, Type type,
+ String sourceNode, String targetNode, boolean hasRestoreSource) {
+ assertRecoveryStateWithoutStage(state, shardId, type, sourceNode, targetNode, hasRestoreSource);
+ assertThat(state.getStage(), not(equalTo(Stage.DONE)));
+ }
+
+ private void slowDownRecovery(ByteSizeValue shardSize) {
+ long chunkSize = shardSize.bytes() / 10;
+ assertTrue(client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder()
+ // one chunk per sec..
+ .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES)
+ .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, chunkSize, ByteSizeUnit.BYTES)
+ )
+ .get().isAcknowledged());
+ }
+
+ private void restoreRecoverySpeed() {
+ assertTrue(client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder()
+ .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb")
+ .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, "512kb")
+ )
+ .get().isAcknowledged());
+ }
+
+ @Test
+ public void gatewayRecoveryTest() throws Exception {
+ logger.info("--> start nodes");
+ String node = internalCluster().startNode();
+
+ createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
+
+ logger.info("--> restarting cluster");
+ internalCluster().fullRestart();
+ ensureGreen();
+
+ logger.info("--> request recoveries");
+ RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+ assertThat(response.shardResponses().size(), equalTo(SHARD_COUNT));
+ assertThat(response.shardResponses().get(INDEX_NAME).size(), equalTo(1));
+
+ List<ShardRecoveryResponse> shardResponses = response.shardResponses().get(INDEX_NAME);
+ assertThat(shardResponses.size(), equalTo(1));
+
+ ShardRecoveryResponse shardResponse = shardResponses.get(0);
+ RecoveryState state = shardResponse.recoveryState();
+
+ assertRecoveryState(state, 0, Type.GATEWAY, Stage.DONE, node, node, false);
+
+ validateIndexRecoveryState(state.getIndex());
+ }
+
+ @Test
+ public void gatewayRecoveryTestActiveOnly() throws Exception {
+ logger.info("--> start nodes");
+ internalCluster().startNode();
+
+ createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
+
+ logger.info("--> restarting cluster");
+ internalCluster().fullRestart();
+ ensureGreen();
+
+ logger.info("--> request recoveries");
+ RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).setActiveOnly(true).execute().actionGet();
+
+ List<ShardRecoveryResponse> shardResponses = response.shardResponses().get(INDEX_NAME);
+ assertThat(shardResponses.size(), equalTo(0)); // Should not expect any responses back
+ }
+
+ @Test
+ public void replicaRecoveryTest() throws Exception {
+ logger.info("--> start node A");
+ String nodeA = internalCluster().startNode();
+
+ logger.info("--> create index on node: {}", nodeA);
+ createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
+
+ logger.info("--> start node B");
+ String nodeB = internalCluster().startNode();
+ ensureGreen();
+
+ // force a shard recovery from nodeA to nodeB
+ logger.info("--> bump replica count");
+ client().admin().indices().prepareUpdateSettings(INDEX_NAME)
+ .setSettings(settingsBuilder().put("number_of_replicas", 1)).execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> request recoveries");
+ RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+
+ // we should now have two total shards, one primary and one replica
+ List<ShardRecoveryResponse> shardResponses = response.shardResponses().get(INDEX_NAME);
+ assertThat(shardResponses.size(), equalTo(2));
+
+ List<ShardRecoveryResponse> nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses);
+ assertThat(nodeAResponses.size(), equalTo(1));
+ List<ShardRecoveryResponse> nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses);
+ assertThat(nodeBResponses.size(), equalTo(1));
+
+ // validate node A recovery
+ ShardRecoveryResponse nodeAShardResponse = nodeAResponses.get(0);
+ assertRecoveryState(nodeAShardResponse.recoveryState(), 0, Type.GATEWAY, Stage.DONE, nodeA, nodeA, false);
+ validateIndexRecoveryState(nodeAShardResponse.recoveryState().getIndex());
+
+ // validate node B recovery
+ ShardRecoveryResponse nodeBShardResponse = nodeBResponses.get(0);
+ assertRecoveryState(nodeBShardResponse.recoveryState(), 0, Type.REPLICA, Stage.DONE, nodeA, nodeB, false);
+ validateIndexRecoveryState(nodeBShardResponse.recoveryState().getIndex());
+ }
+
+ @Test
+ @TestLogging("indices.recovery:TRACE")
+ public void rerouteRecoveryTest() throws Exception {
+ logger.info("--> start node A");
+ final String nodeA = internalCluster().startNode();
+
+ logger.info("--> create index on node: {}", nodeA);
+ ByteSizeValue shardSize = createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT).getShards()[0].getStats().getStore().size();
+
+ logger.info("--> start node B");
+ final String nodeB = internalCluster().startNode();
+
+ ensureGreen();
+
+ logger.info("--> slowing down recoveries");
+ slowDownRecovery(shardSize);
+
+ logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId(INDEX_NAME, 0), nodeA, nodeB))
+ .execute().actionGet().getState();
+
+ logger.info("--> waiting for recovery to start both on source and target");
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA);
+ assertThat(indicesService.indexServiceSafe(INDEX_NAME).shardSafe(0).recoveryStats().currentAsSource(),
+ equalTo(1));
+ indicesService = internalCluster().getInstance(IndicesService.class, nodeB);
+ assertThat(indicesService.indexServiceSafe(INDEX_NAME).shardSafe(0).recoveryStats().currentAsTarget(),
+ equalTo(1));
+ }
+ });
+
+ logger.info("--> request recoveries");
+ RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+
+ List<ShardRecoveryResponse> shardResponses = response.shardResponses().get(INDEX_NAME);
+ List<ShardRecoveryResponse> nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses);
+ assertThat(nodeAResponses.size(), equalTo(1));
+ List<ShardRecoveryResponse> nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses);
+ assertThat(nodeBResponses.size(), equalTo(1));
+
+ assertRecoveryState(nodeAResponses.get(0).recoveryState(), 0, Type.GATEWAY, Stage.DONE, nodeA, nodeA, false);
+ validateIndexRecoveryState(nodeAResponses.get(0).recoveryState().getIndex());
+
+ assertOnGoingRecoveryState(nodeBResponses.get(0).recoveryState(), 0, Type.RELOCATION, nodeA, nodeB, false);
+ validateIndexRecoveryState(nodeBResponses.get(0).recoveryState().getIndex());
+
+ logger.info("--> request node recovery stats");
+ NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
+ long nodeAThrottling = Long.MAX_VALUE;
+ long nodeBThrottling = Long.MAX_VALUE;
+ for (NodeStats nodeStats : statsResponse.getNodes()) {
+ final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
+ if (nodeStats.getNode().name().equals(nodeA)) {
+ assertThat("node A should have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(1));
+ assertThat("node A should not have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(0));
+ nodeAThrottling = recoveryStats.throttleTime().millis();
+ }
+ if (nodeStats.getNode().name().equals(nodeB)) {
+ assertThat("node B should not have ongoing recovery as source", recoveryStats.currentAsSource(), equalTo(0));
+ assertThat("node B should have ongoing recovery as target", recoveryStats.currentAsTarget(), equalTo(1));
+ nodeBThrottling = recoveryStats.throttleTime().millis();
+ }
+ }
+
+ logger.info("--> checking throttling increases");
+ final long finalNodeAThrottling = nodeAThrottling;
+ final long finalNodeBThrottling = nodeBThrottling;
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ NodesStatsResponse statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
+ assertThat(statsResponse.getNodes(), arrayWithSize(2));
+ for (NodeStats nodeStats : statsResponse.getNodes()) {
+ final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
+ if (nodeStats.getNode().name().equals(nodeA)) {
+ assertThat("node A throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeAThrottling));
+ }
+ if (nodeStats.getNode().name().equals(nodeB)) {
+ assertThat("node B throttling should increase", recoveryStats.throttleTime().millis(), greaterThan(finalNodeBThrottling));
+ }
+ }
+ }
+ });
+
+
+ logger.info("--> speeding up recoveries");
+ restoreRecoverySpeed();
+
+ // wait for it to be finished
+ ensureGreen();
+
+ response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+
+ shardResponses = response.shardResponses().get(INDEX_NAME);
+ assertThat(shardResponses.size(), equalTo(1));
+
+ assertRecoveryState(shardResponses.get(0).recoveryState(), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ validateIndexRecoveryState(shardResponses.get(0).recoveryState().getIndex());
+
+ statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
+ assertThat(statsResponse.getNodes(), arrayWithSize(2));
+ for (NodeStats nodeStats : statsResponse.getNodes()) {
+ final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
+ assertThat(recoveryStats.currentAsSource(), equalTo(0));
+ assertThat(recoveryStats.currentAsTarget(), equalTo(0));
+ if (nodeStats.getNode().name().equals(nodeA)) {
+ assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0l));
+ }
+ if (nodeStats.getNode().name().equals(nodeB)) {
+ assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0l));
+ }
+ }
+
+ logger.info("--> bump replica count");
+ client().admin().indices().prepareUpdateSettings(INDEX_NAME)
+ .setSettings(settingsBuilder().put("number_of_replicas", 1)).execute().actionGet();
+ ensureGreen();
+
+ statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
+ assertThat(statsResponse.getNodes(), arrayWithSize(2));
+ for (NodeStats nodeStats : statsResponse.getNodes()) {
+ final RecoveryStats recoveryStats = nodeStats.getIndices().getRecoveryStats();
+ assertThat(recoveryStats.currentAsSource(), equalTo(0));
+ assertThat(recoveryStats.currentAsTarget(), equalTo(0));
+ if (nodeStats.getNode().name().equals(nodeA)) {
+ assertThat("node A throttling should be >0", recoveryStats.throttleTime().millis(), greaterThan(0l));
+ }
+ if (nodeStats.getNode().name().equals(nodeB)) {
+ assertThat("node B throttling should be >0 ", recoveryStats.throttleTime().millis(), greaterThan(0l));
+ }
+ }
+
+ logger.info("--> start node C");
+ String nodeC = internalCluster().startNode();
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").get().isTimedOut());
+
+ logger.info("--> slowing down recoveries");
+ slowDownRecovery(shardSize);
+
+ logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId(INDEX_NAME, 0), nodeA, nodeC))
+ .execute().actionGet().getState();
+
+ response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+ shardResponses = response.shardResponses().get(INDEX_NAME);
+
+ nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses);
+ assertThat(nodeAResponses.size(), equalTo(1));
+ nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses);
+ assertThat(nodeBResponses.size(), equalTo(1));
+ List<ShardRecoveryResponse> nodeCResponses = findRecoveriesForTargetNode(nodeC, shardResponses);
+ assertThat(nodeCResponses.size(), equalTo(1));
+
+ assertRecoveryState(nodeAResponses.get(0).recoveryState(), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false);
+ validateIndexRecoveryState(nodeAResponses.get(0).recoveryState().getIndex());
+
+ assertRecoveryState(nodeBResponses.get(0).recoveryState(), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ validateIndexRecoveryState(nodeBResponses.get(0).recoveryState().getIndex());
+
+ // relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
+ assertOnGoingRecoveryState(nodeCResponses.get(0).recoveryState(), 0, Type.REPLICA, nodeB, nodeC, false);
+ validateIndexRecoveryState(nodeCResponses.get(0).recoveryState().getIndex());
+
+ logger.info("--> speeding up recoveries");
+ restoreRecoverySpeed();
+ ensureGreen();
+
+ response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+ shardResponses = response.shardResponses().get(INDEX_NAME);
+
+ nodeAResponses = findRecoveriesForTargetNode(nodeA, shardResponses);
+ assertThat(nodeAResponses.size(), equalTo(0));
+ nodeBResponses = findRecoveriesForTargetNode(nodeB, shardResponses);
+ assertThat(nodeBResponses.size(), equalTo(1));
+ nodeCResponses = findRecoveriesForTargetNode(nodeC, shardResponses);
+ assertThat(nodeCResponses.size(), equalTo(1));
+
+ assertRecoveryState(nodeBResponses.get(0).recoveryState(), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
+ validateIndexRecoveryState(nodeBResponses.get(0).recoveryState().getIndex());
+
+ // relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
+ assertRecoveryState(nodeCResponses.get(0).recoveryState(), 0, Type.REPLICA, Stage.DONE, nodeB, nodeC, false);
+ validateIndexRecoveryState(nodeCResponses.get(0).recoveryState().getIndex());
+ }
+
+ @Test
+ public void snapshotRecoveryTest() throws Exception {
+ logger.info("--> start node A");
+ String nodeA = internalCluster().startNode();
+
+ logger.info("--> create repository");
+ assertAcked(client().admin().cluster().preparePutRepository(REPO_NAME)
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", false)
+ ).get());
+
+ ensureGreen();
+
+ logger.info("--> create index on node: {}", nodeA);
+ createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPO_NAME, SNAP_NAME)
+ .setWaitForCompletion(true).setIndices(INDEX_NAME).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client().admin().cluster().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get()
+ .getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ client().admin().indices().prepareClose(INDEX_NAME).execute().actionGet();
+
+ logger.info("--> restore");
+ RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster()
+ .prepareRestoreSnapshot(REPO_NAME, SNAP_NAME).setWaitForCompletion(true).execute().actionGet();
+ int totalShards = restoreSnapshotResponse.getRestoreInfo().totalShards();
+ assertThat(totalShards, greaterThan(0));
+
+ ensureGreen();
+
+ logger.info("--> request recoveries");
+ RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
+
+ for (Map.Entry<String, List<ShardRecoveryResponse>> shardRecoveryResponse : response.shardResponses().entrySet()) {
+
+ assertThat(shardRecoveryResponse.getKey(), equalTo(INDEX_NAME));
+ List<ShardRecoveryResponse> shardRecoveryResponses = shardRecoveryResponse.getValue();
+ assertThat(shardRecoveryResponses.size(), equalTo(totalShards));
+
+ for (ShardRecoveryResponse shardResponse : shardRecoveryResponses) {
+ assertRecoveryState(shardResponse.recoveryState(), 0, Type.SNAPSHOT, Stage.DONE, null, nodeA, true);
+ validateIndexRecoveryState(shardResponse.recoveryState().getIndex());
+ }
+ }
+ }
+
+ private List<ShardRecoveryResponse> findRecoveriesForTargetNode(String nodeName, List<ShardRecoveryResponse> responses) {
+ List<ShardRecoveryResponse> nodeResponses = new ArrayList<>();
+ for (ShardRecoveryResponse response : responses) {
+ if (response.recoveryState().getTargetNode().getName().equals(nodeName)) {
+ nodeResponses.add(response);
+ }
+ }
+ return nodeResponses;
+ }
+
+ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, int shardCount, int replicaCount)
+ throws ExecutionException, InterruptedException {
+
+ logger.info("--> creating test index: {}", name);
+ assertAcked(prepareCreate(name, nodeCount, settingsBuilder().put("number_of_shards", shardCount)
+ .put("number_of_replicas", replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0)));
+ ensureGreen();
+
+ logger.info("--> indexing sample data");
+ final int numDocs = between(MIN_DOC_COUNT, MAX_DOC_COUNT);
+ final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex(INDEX_NAME, INDEX_TYPE).
+ setSource("foo-int", randomInt(),
+ "foo-string", randomAsciiOfLength(32),
+ "foo-float", randomFloat());
+ }
+
+ indexRandom(true, docs);
+ flush();
+ assertThat(client().prepareCount(INDEX_NAME).get().getCount(), equalTo((long) numDocs));
+ return client().admin().indices().prepareStats(INDEX_NAME).execute().actionGet();
+ }
+
+ private void validateIndexRecoveryState(RecoveryState.Index indexState) {
+ assertThat(indexState.time(), greaterThanOrEqualTo(0L));
+ assertThat(indexState.recoveredFilesPercent(), greaterThanOrEqualTo(0.0f));
+ assertThat(indexState.recoveredFilesPercent(), lessThanOrEqualTo(100.0f));
+ assertThat(indexState.recoveredBytesPercent(), greaterThanOrEqualTo(0.0f));
+ assertThat(indexState.recoveredBytesPercent(), lessThanOrEqualTo(100.0f));
+ }
+
+ @Test
+ public void disconnectsWhileRecoveringTest() throws Exception {
+ final String indexName = "test";
+ final Settings nodeSettings = Settings.builder()
+ .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, "100ms")
+ .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s")
+ .put("cluster.routing.schedule", "100ms") // aggressive reroute post shard failures
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again
+ .build();
+ // start a master node
+ internalCluster().startNode(nodeSettings);
+
+ ListenableFuture<String> blueFuture = internalCluster().startNodeAsync(Settings.builder().put("node.color", "blue").put(nodeSettings).build());
+ ListenableFuture<String> redFuture = internalCluster().startNodeAsync(Settings.builder().put("node.color", "red").put(nodeSettings).build());
+ final String blueNodeName = blueFuture.get();
+ final String redNodeName = redFuture.get();
+
+ ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get();
+ assertThat(response.isTimedOut(), is(false));
+
+
+ client().admin().indices().prepareCreate(indexName)
+ .setSettings(
+ Settings.builder()
+ .put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ).get();
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ int numDocs = scaledRandomIntBetween(25, 250);
+ for (int i = 0; i < numDocs; i++) {
+ requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}"));
+ }
+ indexRandom(true, requests);
+ ensureSearchable(indexName);
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ final String blueNodeId = internalCluster().getInstance(DiscoveryService.class, blueNodeName).localNode().id();
+
+ assertFalse(stateResponse.getState().readOnlyRoutingNodes().node(blueNodeId).isEmpty());
+
+ SearchResponse searchResponse = client().prepareSearch(indexName).get();
+ assertHitCount(searchResponse, numDocs);
+
+ String[] recoveryActions = new String[]{
+ RecoverySource.Actions.START_RECOVERY,
+ RecoveryTarget.Actions.FILES_INFO,
+ RecoveryTarget.Actions.FILE_CHUNK,
+ RecoveryTarget.Actions.CLEAN_FILES,
+ //RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed
+ RecoveryTarget.Actions.PREPARE_TRANSLOG,
+ RecoveryTarget.Actions.FINALIZE
+ };
+ final String recoveryActionToBlock = randomFrom(recoveryActions);
+ final boolean dropRequests = randomBoolean();
+ logger.info("--> will {} between blue & red on [{}]", dropRequests ? "drop requests" : "break connection", recoveryActionToBlock);
+
+ MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName);
+ MockTransportService redMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, redNodeName);
+ DiscoveryNode redDiscoNode = internalCluster().getInstance(ClusterService.class, redNodeName).localNode();
+ DiscoveryNode blueDiscoNode = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode();
+ final CountDownLatch requestBlocked = new CountDownLatch(1);
+
+ blueMockTransportService.addDelegate(redDiscoNode, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, blueMockTransportService.original(), requestBlocked));
+ redMockTransportService.addDelegate(blueDiscoNode, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, redMockTransportService.original(), requestBlocked));
+
+ logger.info("--> starting recovery from blue to red");
+ client().admin().indices().prepareUpdateSettings(indexName).setSettings(
+ Settings.builder()
+ .put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red,blue")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ ).get();
+
+ requestBlocked.await();
+
+ logger.info("--> stopping to block recovery");
+ blueMockTransportService.clearAllRules();
+ redMockTransportService.clearAllRules();
+
+ ensureGreen();
+ searchResponse = client(redNodeName).prepareSearch(indexName).setPreference("_local").get();
+ assertHitCount(searchResponse, numDocs);
+
+ }
+
+ private class RecoveryActionBlocker extends MockTransportService.DelegateTransport {
+ private final boolean dropRequests;
+ private final String recoveryActionToBlock;
+ private final CountDownLatch requestBlocked;
+
+ public RecoveryActionBlocker(boolean dropRequests, String recoveryActionToBlock, Transport delegate, CountDownLatch requestBlocked) {
+ super(delegate);
+ this.dropRequests = dropRequests;
+ this.recoveryActionToBlock = recoveryActionToBlock;
+ this.requestBlocked = requestBlocked;
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (recoveryActionToBlock.equals(action) || requestBlocked.getCount() == 0) {
+ logger.info("--> preventing {} request", action);
+ requestBlocked.countDown();
+ if (dropRequests) {
+ return;
+ }
+ throw new ConnectTransportException(node, "DISCONNECT: prevented " + action + " request");
+ }
+ transport.sendRequest(node, requestId, action, request, options);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java
new file mode 100644
index 0000000000..6d3c5fe6a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java
@@ -0,0 +1,528 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.recovery;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.recovery.RecoveryState.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.*;
+
+public class RecoveryStateTest extends ElasticsearchTestCase {
+
+ abstract class Streamer<T extends Streamable> extends Thread {
+
+ private T lastRead;
+ final private AtomicBoolean shouldStop;
+ final private T source;
+ final AtomicReference<Throwable> error = new AtomicReference<>();
+ final Version streamVersion;
+
+ Streamer(AtomicBoolean shouldStop, T source) {
+ this(shouldStop, source, randomVersion(random()));
+ }
+
+ Streamer(AtomicBoolean shouldStop, T source, Version streamVersion) {
+ this.shouldStop = shouldStop;
+ this.source = source;
+ this.streamVersion = streamVersion;
+ }
+
+ public T lastRead() throws Throwable {
+ Throwable t = error.get();
+ if (t != null) {
+ throw t;
+ }
+ return lastRead;
+ }
+
+ public T serializeDeserialize() throws IOException {
+ BytesStreamOutput out = new BytesStreamOutput();
+ source.writeTo(out);
+ out.close();
+ StreamInput in = StreamInput.wrap(out.bytes());
+ T obj = deserialize(in);
+ lastRead = obj;
+ return obj;
+ }
+
+ protected T deserialize(StreamInput in) throws IOException {
+ T obj = createObj();
+ obj.readFrom(in);
+ return obj;
+ }
+
+ abstract T createObj();
+
+ @Override
+ public void run() {
+ try {
+ while (shouldStop.get() == false) {
+ serializeDeserialize();
+ }
+ serializeDeserialize();
+ } catch (Throwable t) {
+ error.set(t);
+ }
+ }
+ }
+
+ public void testTimers() throws Throwable {
+ final Timer timer;
+ Streamer<Timer> streamer;
+ AtomicBoolean stop = new AtomicBoolean();
+ if (randomBoolean()) {
+ timer = new Timer();
+ streamer = new Streamer<Timer>(stop, timer) {
+ @Override
+ Timer createObj() {
+ return new Timer();
+ }
+ };
+ } else if (randomBoolean()) {
+ timer = new Index();
+ streamer = new Streamer<Timer>(stop, timer) {
+ @Override
+ Timer createObj() {
+ return new Index();
+ }
+ };
+ } else if (randomBoolean()) {
+ timer = new VerifyIndex();
+ streamer = new Streamer<Timer>(stop, timer) {
+ @Override
+ Timer createObj() {
+ return new VerifyIndex();
+ }
+ };
+ } else {
+ timer = new Translog();
+ streamer = new Streamer<Timer>(stop, timer) {
+ @Override
+ Timer createObj() {
+ return new Translog();
+ }
+ };
+ }
+
+ timer.start();
+ assertThat(timer.startTime(), greaterThan(0l));
+ assertThat(timer.stopTime(), equalTo(0l));
+ Timer lastRead = streamer.serializeDeserialize();
+ final long time = lastRead.time();
+ assertThat(time, lessThanOrEqualTo(timer.time()));
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat("timer timer should progress compared to captured one ", time, lessThan(timer.time()));
+ }
+ });
+ assertThat("captured time shouldn't change", lastRead.time(), equalTo(time));
+
+ if (randomBoolean()) {
+ timer.stop();
+ assertThat(timer.stopTime(), greaterThanOrEqualTo(timer.startTime()));
+ assertThat(timer.time(), equalTo(timer.stopTime() - timer.startTime()));
+ lastRead = streamer.serializeDeserialize();
+ assertThat(lastRead.startTime(), equalTo(timer.startTime()));
+ assertThat(lastRead.time(), equalTo(timer.time()));
+ assertThat(lastRead.stopTime(), equalTo(timer.stopTime()));
+ }
+
+ timer.reset();
+ assertThat(timer.startTime(), equalTo(0l));
+ assertThat(timer.time(), equalTo(0l));
+ assertThat(timer.stopTime(), equalTo(0l));
+ lastRead = streamer.serializeDeserialize();
+ assertThat(lastRead.startTime(), equalTo(0l));
+ assertThat(lastRead.time(), equalTo(0l));
+ assertThat(lastRead.stopTime(), equalTo(0l));
+
+ }
+
+ public void testIndex() throws Throwable {
+ File[] files = new File[randomIntBetween(1, 20)];
+ ArrayList<File> filesToRecover = new ArrayList<>();
+ long totalFileBytes = 0;
+ long totalReusedBytes = 0;
+ int totalReused = 0;
+ for (int i = 0; i < files.length; i++) {
+ final int fileLength = randomIntBetween(1, 1000);
+ final boolean reused = randomBoolean();
+ totalFileBytes += fileLength;
+ files[i] = new RecoveryState.File("f_" + i, fileLength, reused);
+ if (reused) {
+ totalReused++;
+ totalReusedBytes += fileLength;
+ } else {
+ filesToRecover.add(files[i]);
+ }
+ }
+
+ Collections.shuffle(Arrays.asList(files));
+ final RecoveryState.Index index = new RecoveryState.Index();
+
+ if (randomBoolean()) {
+ // initialize with some data and then reset
+ index.start();
+ for (int i = randomIntBetween(0, 10); i > 0; i--) {
+ index.addFileDetail("t_" + i, randomIntBetween(1, 100), randomBoolean());
+ if (randomBoolean()) {
+ index.addSourceThrottling(randomIntBetween(0, 20));
+ }
+ if (randomBoolean()) {
+ index.addTargetThrottling(randomIntBetween(0, 20));
+ }
+ }
+ if (randomBoolean()) {
+ index.stop();
+ }
+ index.reset();
+ }
+
+
+ // before we start we must report 0
+ assertThat(index.recoveredFilesPercent(), equalTo((float) 0.0));
+ assertThat(index.recoveredBytesPercent(), equalTo((float) 0.0));
+ assertThat(index.sourceThrottling().nanos(), equalTo(Index.UNKNOWN));
+ assertThat(index.targetThrottling().nanos(), equalTo(Index.UNKNOWN));
+
+ index.start();
+ for (File file : files) {
+ index.addFileDetail(file.name(), file.length(), file.reused());
+ }
+
+ logger.info("testing initial information");
+ assertThat(index.totalBytes(), equalTo(totalFileBytes));
+ assertThat(index.reusedBytes(), equalTo(totalReusedBytes));
+ assertThat(index.totalRecoverBytes(), equalTo(totalFileBytes - totalReusedBytes));
+ assertThat(index.totalFileCount(), equalTo(files.length));
+ assertThat(index.reusedFileCount(), equalTo(totalReused));
+ assertThat(index.totalRecoverFiles(), equalTo(filesToRecover.size()));
+ assertThat(index.recoveredFileCount(), equalTo(0));
+ assertThat(index.recoveredBytes(), equalTo(0l));
+ assertThat(index.recoveredFilesPercent(), equalTo(filesToRecover.size() == 0 ? 100.0f : 0.0f));
+ assertThat(index.recoveredBytesPercent(), equalTo(filesToRecover.size() == 0 ? 100.0f : 0.0f));
+
+
+ long bytesToRecover = totalFileBytes - totalReusedBytes;
+ boolean completeRecovery = bytesToRecover == 0 || randomBoolean();
+ if (completeRecovery == false) {
+ bytesToRecover = randomIntBetween(1, (int) bytesToRecover);
+ logger.info("performing partial recovery ([{}] bytes of [{}])", bytesToRecover, totalFileBytes - totalReusedBytes);
+ }
+ AtomicBoolean streamShouldStop = new AtomicBoolean();
+
+ Streamer<Index> backgroundReader = new Streamer<RecoveryState.Index>(streamShouldStop, index) {
+ @Override
+ Index createObj() {
+ return new Index();
+ }
+ };
+
+ backgroundReader.start();
+
+ long recoveredBytes = 0;
+ long sourceThrottling = Index.UNKNOWN;
+ long targetThrottling = Index.UNKNOWN;
+ while (bytesToRecover > 0) {
+ File file = randomFrom(filesToRecover);
+ final long toRecover = Math.min(bytesToRecover, randomIntBetween(1, (int) (file.length() - file.recovered())));
+ final long throttledOnSource = rarely() ? randomIntBetween(10, 200) : 0;
+ index.addSourceThrottling(throttledOnSource);
+ if (sourceThrottling == Index.UNKNOWN) {
+ sourceThrottling = throttledOnSource;
+ } else {
+ sourceThrottling += throttledOnSource;
+ }
+ index.addRecoveredBytesToFile(file.name(), toRecover);
+ file.addRecoveredBytes(toRecover);
+ final long throttledOnTarget = rarely() ? randomIntBetween(10, 200) : 0;
+ if (targetThrottling == Index.UNKNOWN) {
+ targetThrottling = throttledOnTarget;
+ } else {
+ targetThrottling += throttledOnTarget;
+ }
+ index.addTargetThrottling(throttledOnTarget);
+ bytesToRecover -= toRecover;
+ recoveredBytes += toRecover;
+ if (file.reused() || file.fullyRecovered()) {
+ filesToRecover.remove(file);
+ }
+ }
+
+ if (completeRecovery) {
+ assertThat(filesToRecover.size(), equalTo(0));
+ index.stop();
+ assertThat(index.time(), equalTo(index.stopTime() - index.startTime()));
+ assertThat(index.time(), equalTo(index.stopTime() - index.startTime()));
+ }
+
+ logger.info("testing serialized information");
+ streamShouldStop.set(true);
+ backgroundReader.join();
+ final Index lastRead = backgroundReader.lastRead();
+ assertThat(lastRead.fileDetails().toArray(), arrayContainingInAnyOrder(index.fileDetails().toArray()));
+ assertThat(lastRead.startTime(), equalTo(index.startTime()));
+ if (completeRecovery) {
+ assertThat(lastRead.time(), equalTo(index.time()));
+ } else {
+ assertThat(lastRead.time(), lessThanOrEqualTo(index.time()));
+ }
+ assertThat(lastRead.stopTime(), equalTo(index.stopTime()));
+ assertThat(lastRead.targetThrottling(), equalTo(index.targetThrottling()));
+ assertThat(lastRead.sourceThrottling(), equalTo(index.sourceThrottling()));
+
+ logger.info("testing post recovery");
+ assertThat(index.totalBytes(), equalTo(totalFileBytes));
+ assertThat(index.reusedBytes(), equalTo(totalReusedBytes));
+ assertThat(index.totalRecoverBytes(), equalTo(totalFileBytes - totalReusedBytes));
+ assertThat(index.totalFileCount(), equalTo(files.length));
+ assertThat(index.reusedFileCount(), equalTo(totalReused));
+ assertThat(index.totalRecoverFiles(), equalTo(files.length - totalReused));
+ assertThat(index.recoveredFileCount(), equalTo(index.totalRecoverFiles() - filesToRecover.size()));
+ assertThat(index.recoveredBytes(), equalTo(recoveredBytes));
+ assertThat(index.targetThrottling().nanos(), equalTo(targetThrottling));
+ assertThat(index.sourceThrottling().nanos(), equalTo(sourceThrottling));
+ if (index.totalRecoverFiles() == 0) {
+ assertThat((double) index.recoveredFilesPercent(), equalTo(100.0));
+ assertThat((double) index.recoveredBytesPercent(), equalTo(100.0));
+ } else {
+ assertThat((double) index.recoveredFilesPercent(), closeTo(100.0 * index.recoveredFileCount() / index.totalRecoverFiles(), 0.1));
+ assertThat((double) index.recoveredBytesPercent(), closeTo(100.0 * index.recoveredBytes() / index.totalRecoverBytes(), 0.1));
+ }
+ }
+
+ public void testStageSequenceEnforcement() {
+ final DiscoveryNode discoveryNode = new DiscoveryNode("1", DummyTransportAddress.INSTANCE, Version.CURRENT);
+ Stage[] stages = Stage.values();
+ int i = randomIntBetween(0, stages.length - 1);
+ int j;
+ do {
+ j = randomIntBetween(0, stages.length - 1);
+ } while (j == i);
+ Stage t = stages[i];
+ stages[i] = stages[j];
+ stages[j] = t;
+ try {
+ RecoveryState state = new RecoveryState(new ShardId("bla", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, discoveryNode);
+ for (Stage stage : stages) {
+ state.setStage(stage);
+ }
+ fail("succeeded in performing the illegal sequence [" + Strings.arrayToCommaDelimitedString(stages) + "]");
+ } catch (IllegalStateException e) {
+ // cool
+ }
+
+ // but reset should be always possible.
+ stages = Stage.values();
+ i = randomIntBetween(1, stages.length - 1);
+ ArrayList<Stage> list = new ArrayList<>(Arrays.asList(Arrays.copyOfRange(stages, 0, i)));
+ list.addAll(Arrays.asList(stages));
+ RecoveryState state = new RecoveryState(new ShardId("bla", 0), randomBoolean(), randomFrom(Type.values()), discoveryNode, discoveryNode);
+ for (Stage stage : list) {
+ state.setStage(stage);
+ }
+
+ assertThat(state.getStage(), equalTo(Stage.DONE));
+ }
+
+ public void testTranslog() throws Throwable {
+ final Translog translog = new Translog();
+ AtomicBoolean stop = new AtomicBoolean();
+ Streamer<Translog> streamer = new Streamer<Translog>(stop, translog) {
+ @Override
+ Translog createObj() {
+ return new Translog();
+ }
+ };
+
+ // we don't need to test the time aspect, it's done in the timer test
+ translog.start();
+ assertThat(translog.recoveredOperations(), equalTo(0));
+ assertThat(translog.totalOperations(), equalTo(Translog.UNKNOWN));
+ assertThat(translog.totalOperationsOnStart(), equalTo(Translog.UNKNOWN));
+ streamer.start();
+ // force one
+ streamer.serializeDeserialize();
+ int ops = 0;
+ int totalOps = 0;
+ int totalOpsOnStart = randomIntBetween(10, 200);
+ translog.totalOperationsOnStart(totalOpsOnStart);
+ for (int i = scaledRandomIntBetween(10, 200); i > 0; i--) {
+ final int iterationOps = randomIntBetween(1, 10);
+ totalOps += iterationOps;
+ translog.totalOperations(totalOps);
+ assertThat((double) translog.recoveredPercent(), closeTo(100.0 * ops / totalOps, 0.1));
+ for (int j = iterationOps; j > 0; j--) {
+ ops++;
+ translog.incrementRecoveredOperations();
+ }
+ assertThat(translog.recoveredOperations(), equalTo(ops));
+ assertThat(translog.totalOperations(), equalTo(totalOps));
+ assertThat(translog.recoveredPercent(), equalTo(100.f));
+ assertThat(streamer.lastRead().recoveredOperations(), greaterThanOrEqualTo(0));
+ assertThat(streamer.lastRead().recoveredOperations(), lessThanOrEqualTo(ops));
+ assertThat(streamer.lastRead().totalOperations(), lessThanOrEqualTo(totalOps));
+ assertThat(streamer.lastRead().totalOperationsOnStart(), lessThanOrEqualTo(totalOpsOnStart));
+ assertThat(streamer.lastRead().recoveredPercent(), either(greaterThanOrEqualTo(0.f)).or(equalTo(-1.f)));
+ }
+
+ boolean stopped = false;
+ if (randomBoolean()) {
+ translog.stop();
+ stopped = true;
+ }
+
+ if (randomBoolean()) {
+ translog.reset();
+ ops = 0;
+ totalOps = Translog.UNKNOWN;
+ totalOpsOnStart = Translog.UNKNOWN;
+ assertThat(translog.recoveredOperations(), equalTo(0));
+ assertThat(translog.totalOperationsOnStart(), equalTo(Translog.UNKNOWN));
+ assertThat(translog.totalOperations(), equalTo(Translog.UNKNOWN));
+ }
+
+ stop.set(true);
+ streamer.join();
+ final Translog lastRead = streamer.lastRead();
+ assertThat(lastRead.recoveredOperations(), equalTo(ops));
+ assertThat(lastRead.totalOperations(), equalTo(totalOps));
+ assertThat(lastRead.totalOperationsOnStart(), equalTo(totalOpsOnStart));
+ assertThat(lastRead.startTime(), equalTo(translog.startTime()));
+ assertThat(lastRead.stopTime(), equalTo(translog.stopTime()));
+
+ if (stopped) {
+ assertThat(lastRead.time(), equalTo(translog.time()));
+ } else {
+ assertThat(lastRead.time(), lessThanOrEqualTo(translog.time()));
+ }
+ }
+
+ public void testStart() throws IOException {
+ final VerifyIndex verifyIndex = new VerifyIndex();
+ AtomicBoolean stop = new AtomicBoolean();
+ Streamer<VerifyIndex> streamer = new Streamer<VerifyIndex>(stop, verifyIndex) {
+ @Override
+ VerifyIndex createObj() {
+ return new VerifyIndex();
+ }
+ };
+
+ // we don't need to test the time aspect, it's done in the timer test
+ verifyIndex.start();
+ assertThat(verifyIndex.checkIndexTime(), equalTo(0l));
+ // force one
+ VerifyIndex lastRead = streamer.serializeDeserialize();
+ assertThat(lastRead.checkIndexTime(), equalTo(0l));
+
+ long took = randomLong();
+ if (took < 0) {
+ took = -took;
+ took = Math.max(0l, took);
+
+ }
+ verifyIndex.checkIndexTime(took);
+ assertThat(verifyIndex.checkIndexTime(), equalTo(took));
+
+ boolean stopped = false;
+ if (randomBoolean()) {
+ verifyIndex.stop();
+ stopped = true;
+ }
+
+ if (randomBoolean()) {
+ verifyIndex.reset();
+ took = 0;
+ assertThat(verifyIndex.checkIndexTime(), equalTo(took));
+ }
+
+ lastRead = streamer.serializeDeserialize();
+ assertThat(lastRead.checkIndexTime(), equalTo(took));
+ assertThat(lastRead.startTime(), equalTo(verifyIndex.startTime()));
+ assertThat(lastRead.stopTime(), equalTo(verifyIndex.stopTime()));
+
+ if (stopped) {
+ assertThat(lastRead.time(), equalTo(verifyIndex.time()));
+ } else {
+ assertThat(lastRead.time(), lessThanOrEqualTo(verifyIndex.time()));
+ }
+ }
+
+ @Test
+ public void testConcurrentModificationIndexFileDetailsMap() throws InterruptedException {
+ final Index index = new Index();
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Streamer<Index> readWriteIndex = new Streamer<Index>(stop, index) {
+ @Override
+ Index createObj() {
+ return new Index();
+ }
+ };
+ Thread modifyThread = new Thread() {
+ public void run() {
+ for (int i = 0; i < 1000; i++) {
+ index.addFileDetail(randomAsciiOfLength(10), 100, true);
+ }
+ stop.set(true);
+ }
+ };
+ readWriteIndex.start();
+ modifyThread.start();
+ modifyThread.join();
+ readWriteIndex.join();
+ assertThat(readWriteIndex.error.get(), equalTo(null));
+ }
+
+ @Test
+ public void testFileHashCodeAndEquals() {
+ File f = new File("foo", randomIntBetween(0, 100), randomBoolean());
+ File anotherFile = new File(f.name(), f.length(), f.reused());
+ assertEquals(f, anotherFile);
+ assertEquals(f.hashCode(), anotherFile.hashCode());
+ int iters = randomIntBetween(10, 100);
+ for (int i = 0; i < iters; i++) {
+ f = new File("foo", randomIntBetween(0, 100), randomBoolean());
+ anotherFile = new File(f.name(), randomIntBetween(0, 100), randomBoolean());
+ if (f.equals(anotherFile)) {
+ assertEquals(f.hashCode(), anotherFile.hashCode());
+ } else if (f.hashCode() != anotherFile.hashCode()) {
+ assertFalse(f.equals(anotherFile));
+ }
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java
new file mode 100644
index 0000000000..a86230248b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.recovery;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.store.IndexOutput;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+/**
+ */
+public class RecoveryStatusTests extends ElasticsearchSingleNodeTest {
+
+ public void testRenameTempFiles() throws IOException {
+ IndexService service = createIndex("foo");
+
+ IndexShard indexShard = service.shard(0);
+ DiscoveryNode node = new DiscoveryNode("foo", new LocalTransportAddress("bar"), Version.CURRENT);
+ RecoveryStatus status = new RecoveryStatus(indexShard, node, new RecoveryTarget.RecoveryListener() {
+ @Override
+ public void onRecoveryDone(RecoveryState state) {
+ }
+
+ @Override
+ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
+ }
+ });
+ try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store())) {
+ indexOutput.writeInt(1);
+ IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar");
+ assertSame(openIndexOutput, indexOutput);
+ openIndexOutput.writeInt(1);
+ }
+ status.removeOpenIndexOutputs("foo.bar");
+ Set<String> strings = Sets.newHashSet(status.store().directory().listAll());
+ String expectedFile = null;
+ for (String file : strings) {
+ if (Pattern.compile("recovery[.]\\d+[.]foo[.]bar").matcher(file).matches()) {
+ expectedFile = file;
+ break;
+ }
+ }
+ assertNotNull(expectedFile);
+ indexShard.close("foo", false);// we have to close it here otherwise rename fails since the write.lock is held by the engine
+ status.renameAllTempFiles();
+ strings = Sets.newHashSet(status.store().directory().listAll());
+ assertTrue(strings.toString(), strings.contains("foo.bar"));
+ assertFalse(strings.toString(), strings.contains(expectedFile));
+ // we must fail the recovery because marking it as done will try to move the shard to POST_RECOVERY, which will fail because it's started
+ status.fail(new RecoveryFailedException(status.state(), "end of test. OK.", null), false);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTest.java b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTest.java
new file mode 100644
index 0000000000..4a1586e5c4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTest.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.recovery;
+
+import org.apache.lucene.index.IndexFileNames;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.index.store.StoreFileMetaData;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class StartRecoveryRequestTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testSerialization() throws Exception {
+ Version targetNodeVersion = randomVersion(random());
+ StartRecoveryRequest outRequest = new StartRecoveryRequest(
+ new ShardId("test", 0),
+ new DiscoveryNode("a", new LocalTransportAddress("1"), targetNodeVersion),
+ new DiscoveryNode("b", new LocalTransportAddress("1"), targetNodeVersion),
+ true,
+ Store.MetadataSnapshot.EMPTY,
+ RecoveryState.Type.RELOCATION,
+ 1l
+
+ );
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ out.setVersion(targetNodeVersion);
+ outRequest.writeTo(out);
+
+ ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
+ in.setVersion(targetNodeVersion);
+ StartRecoveryRequest inRequest = new StartRecoveryRequest();
+ inRequest.readFrom(in);
+
+ assertThat(outRequest.shardId(), equalTo(inRequest.shardId()));
+ assertThat(outRequest.sourceNode(), equalTo(inRequest.sourceNode()));
+ assertThat(outRequest.targetNode(), equalTo(inRequest.targetNode()));
+ assertThat(outRequest.markAsRelocated(), equalTo(inRequest.markAsRelocated()));
+ assertThat(outRequest.metadataSnapshot().asMap(), equalTo(inRequest.metadataSnapshot().asMap()));
+ assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId()));
+ assertThat(outRequest.recoveryType(), equalTo(inRequest.recoveryType()));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java
new file mode 100644
index 0000000000..70783a8efb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+public class GetSettingsBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testGetSettingsWithBlocks() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.refresh_interval", -1)
+ .put("index.merge.policy.expunge_deletes_allowed", "30")
+ .put("index.mapper.dynamic", false)));
+
+ for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("test", block);
+ GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(response.getIndexToSettings().size(), greaterThanOrEqualTo(1));
+ assertThat(response.getSetting("test", "index.refresh_interval"), equalTo("-1"));
+ assertThat(response.getSetting("test", "index.merge.policy.expunge_deletes_allowed"), equalTo("30"));
+ assertThat(response.getSetting("test", "index.mapper.dynamic"), equalTo("false"));
+ } finally {
+ disableIndexBlock("test", block);
+ }
+ }
+
+ try {
+ enableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareGetSettings("test"));
+ } finally {
+ disableIndexBlock("test", SETTING_BLOCKS_METADATA);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java
new file mode 100644
index 0000000000..3eb35db9cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java
@@ -0,0 +1,285 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void simpleUpdateNumberOfReplicasTests() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test", 2));
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+
+ NumShards numShards = getNumShards("test");
+
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(numShards.numReplicas));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.totalNumShards));
+
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "test" + i)
+ .endObject()).get();
+ }
+
+ refresh();
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 10l);
+ }
+
+ logger.info("Increasing the number of replicas from 1 to 2");
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 2)).execute().actionGet());
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ //only 2 copies allocated (1 replica) across 2 nodes
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
+
+ logger.info("starting another node to new replicas will be allocated to it");
+ allowNodes("test", 3);
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ //all 3 copies allocated across 3 nodes
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
+
+ for (int i = 0; i < 10; i++) {
+ CountResponse countResponse = client().prepareCount().setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 10l);
+ }
+
+ logger.info("Decreasing number of replicas from 2 to 0");
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.number_of_replicas", 0)).get());
+
+ logger.info("Running Cluster Health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes(">=3").execute().actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
+ //a single copy is allocated (replica set to 0)
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries));
+
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 10);
+ }
+ }
+
+ @Test
+ public void testAutoExpandNumberOfReplicas0ToData() throws IOException {
+ internalCluster().ensureAtMostNumDataNodes(2);
+ logger.info("--> creating index test with auto expand replicas");
+ assertAcked(prepareCreate("test", 2, settingsBuilder().put("auto_expand_replicas", "0-all")));
+
+ NumShards numShards = getNumShards("test");
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
+
+ logger.info("--> add another node, should increase the number of replicas");
+ allowNodes("test", 3);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).setWaitForNodes(">=3").execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
+
+ logger.info("--> closing one node");
+ internalCluster().ensureAtMostNumDataNodes(2);
+ allowNodes("test", 2);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).setWaitForNodes(">=2").execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
+
+ logger.info("--> closing another node");
+ internalCluster().ensureAtMostNumDataNodes(1);
+ allowNodes("test", 1);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries));
+ }
+
+ @Test
+ public void testAutoExpandNumberReplicas1ToData() throws IOException {
+ logger.info("--> creating index test with auto expand replicas");
+ internalCluster().ensureAtMostNumDataNodes(2);
+ assertAcked(prepareCreate("test", 2, settingsBuilder().put("auto_expand_replicas", "1-all")));
+
+ NumShards numShards = getNumShards("test");
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
+
+ logger.info("--> add another node, should increase the number of replicas");
+ allowNodes("test", 3);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
+
+ logger.info("--> closing one node");
+ internalCluster().ensureAtMostNumDataNodes(2);
+ allowNodes("test", 2);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes(">=2").setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
+
+ logger.info("--> closing another node");
+ internalCluster().ensureAtMostNumDataNodes(1);
+ allowNodes("test", 1);
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes(">=1").setWaitForActiveShards(numShards.numPrimaries).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(1));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries));
+ }
+
+ @Test
+ public void testAutoExpandNumberReplicas2() {
+ logger.info("--> creating index test with auto expand replicas set to 0-2");
+ assertAcked(prepareCreate("test", 3, settingsBuilder().put("auto_expand_replicas", "0-2")));
+
+ NumShards numShards = getNumShards("test");
+
+ logger.info("--> running cluster health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 3).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
+
+ logger.info("--> add two more nodes");
+ allowNodes("test", 4);
+ allowNodes("test", 5);
+
+ logger.info("--> update the auto expand replicas to 0-3");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("auto_expand_replicas", "0-3")).execute().actionGet();
+
+ logger.info("--> running cluster health");
+ clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForActiveShards(numShards.numPrimaries * 4).execute().actionGet();
+ logger.info("--> done cluster health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
+ assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(3));
+ assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 4));
+ }
+
+ @Test
+ public void testUpdateWithInvalidNumberOfReplicas() {
+ createIndex("test");
+ try {
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1))
+ )
+ .execute().actionGet();
+ fail("should have thrown an exception about the replica shard count");
+ } catch (IllegalArgumentException e) {
+ assertThat("message contains error about shard count: " + e.getMessage(),
+ e.getMessage().contains("the value of the setting index.number_of_replicas must be a non negative integer"), equalTo(true));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java
new file mode 100644
index 0000000000..e1ca345b68
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.settings;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class UpdateSettingsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testOpenCloseUpdateSettings() throws Exception {
+ createIndex("test");
+ try {
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.refresh_interval", -1) // this one can change
+ .put("index.cache.filter.type", "none") // this one can't
+ )
+ .execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+ // all is well
+ }
+
+ IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), nullValue());
+ assertThat(indexMetaData.settings().get("index.cache.filter.type"), nullValue());
+
+ // Now verify via dedicated get settings api:
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), nullValue());
+ assertThat(getSettingsResponse.getSetting("test", "index.cache.filter.type"), nullValue());
+
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.refresh_interval", -1) // this one can change
+ )
+ .execute().actionGet();
+
+ indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("-1"));
+ // Now verify via dedicated get settings api:
+ getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("-1"));
+
+ // now close the index, change the non dynamic setting, and see that it applies
+
+ // Wait for the index to turn green before attempting to close it
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth().setTimeout("30s").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+ assertThat(health.isTimedOut(), equalTo(false));
+
+ client().admin().indices().prepareClose("test").execute().actionGet();
+
+ try {
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ )
+ .execute().actionGet();
+ fail("can't change number of replicas on a closed index");
+ } catch (IllegalArgumentException ex) {
+ assertEquals(ex.getMessage(), "Can't update [index.number_of_replicas] on closed indices [[test]] - can leave index in an unopenable state");
+ // expected
+ }
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.refresh_interval", "1s") // this one can change
+ .put("index.cache.filter.type", "none") // this one can't
+ )
+ .execute().actionGet();
+
+ indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
+ assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("1s"));
+ assertThat(indexMetaData.settings().get("index.cache.filter.type"), equalTo("none"));
+
+ // Now verify via dedicated get settings api:
+ getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("1s"));
+ assertThat(getSettingsResponse.getSetting("test", "index.cache.filter.type"), equalTo("none"));
+ }
+
+ @Test
+ public void testEngineGCDeletesSetting() throws InterruptedException {
+ createIndex("test");
+ client().prepareIndex("test", "type", "1").setSource("f", 1).get(); // set version to 1
+ client().prepareDelete("test", "type", "1").get(); // sets version to 2
+ client().prepareIndex("test", "type", "1").setSource("f", 2).setVersion(2).get(); // delete is still in cache this should work & set version to 3
+ client().admin().indices().prepareUpdateSettings("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.gc_deletes", 0)
+ ).get();
+
+ client().prepareDelete("test", "type", "1").get(); // sets version to 4
+ Thread.sleep(300); // wait for cache time to change TODO: this needs to be solved better. To be discussed.
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3).setVersion(4), VersionConflictEngineException.class); // delete is should not be in cache
+
+ }
+
+ // #6626: make sure we can update throttle settings and the changes take effect
+ @Test
+ @Slow
+ public void testUpdateThrottleSettings() {
+
+ // No throttling at first, only 1 non-replicated shard, force lots of merging:
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "none")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
+ .put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
+ .put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "2")
+ .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0) // get stats all the time - no caching
+ ));
+ ensureGreen();
+ long termUpto = 0;
+ for(int i=0;i<100;i++) {
+ // Provoke slowish merging by making many unique terms:
+ StringBuilder sb = new StringBuilder();
+ for(int j=0;j<100;j++) {
+ sb.append(' ');
+ sb.append(termUpto++);
+ }
+ client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
+ if (i % 2 == 0) {
+ refresh();
+ }
+ }
+
+ // No merge IO throttling should have happened:
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
+ for(NodeStats stats : nodesStats.getNodes()) {
+ assertThat(stats.getIndices().getStore().getThrottleTime().getMillis(), equalTo(0l));
+ }
+
+ logger.info("test: set low merge throttling");
+
+ // Now updates settings to turn on merge throttling lowish rate
+ client()
+ .admin()
+ .indices()
+ .prepareUpdateSettings("test")
+ .setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge")
+ .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, "1mb"))
+ .get();
+
+ // Make sure setting says it is in fact changed:
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", IndexStore.INDEX_STORE_THROTTLE_TYPE), equalTo("merge"));
+
+ // Also make sure we see throttling kicking in:
+ boolean done = false;
+ while (done == false) {
+ // Provoke slowish merging by making many unique terms:
+ for(int i=0;i<5;i++) {
+ StringBuilder sb = new StringBuilder();
+ for(int j=0;j<100;j++) {
+ sb.append(' ');
+ sb.append(termUpto++);
+ sb.append(" some random text that keeps repeating over and over again hambone");
+ }
+ client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
+ }
+ refresh();
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
+ for(NodeStats stats : nodesStats.getNodes()) {
+ long throttleMillis = stats.getIndices().getStore().getThrottleTime().getMillis();
+ if (throttleMillis > 0) {
+ done = true;
+ break;
+ }
+ }
+ }
+
+ logger.info("test: disable merge throttling");
+
+ // Now updates settings to disable merge throttling
+ client()
+ .admin()
+ .indices()
+ .prepareUpdateSettings("test")
+ .setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "none"))
+ .get();
+
+ // Optimize does a waitForMerges, which we must do to make sure all in-flight (throttled) merges finish:
+ logger.info("test: optimize");
+ client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).get();
+ logger.info("test: optimize done");
+
+ // Record current throttling so far
+ long sumThrottleTime = 0;
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
+ for(NodeStats stats : nodesStats.getNodes()) {
+ sumThrottleTime += stats.getIndices().getStore().getThrottleTime().getMillis();
+ }
+
+ // Make sure no further throttling happens:
+ for(int i=0;i<100;i++) {
+ // Provoke slowish merging by making many unique terms:
+ StringBuilder sb = new StringBuilder();
+ for(int j=0;j<100;j++) {
+ sb.append(' ');
+ sb.append(termUpto++);
+ }
+ client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
+ if (i % 2 == 0) {
+ refresh();
+ }
+ }
+ logger.info("test: done indexing after disabling throttling");
+
+ long newSumThrottleTime = 0;
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
+ for(NodeStats stats : nodesStats.getNodes()) {
+ newSumThrottleTime += stats.getIndices().getStore().getThrottleTime().getMillis();
+ }
+
+ // No additional merge IO throttling should have happened:
+ assertEquals(sumThrottleTime, newSumThrottleTime);
+
+ // Optimize & flush and wait; else we sometimes get a "Delete Index failed - not acked"
+ // when ElasticsearchIntegrationTest.after tries to remove indices created by the test:
+
+ // Wait for merges to finish
+ client().admin().indices().prepareOptimize("test").get();
+ flush();
+
+ logger.info("test: test done");
+ }
+
+ private static class MockAppender extends AppenderSkeleton {
+ public boolean sawIndexWriterMessage;
+ public boolean sawFlushDeletes;
+ public boolean sawMergeThreadPaused;
+ public boolean sawUpdateMaxThreadCount;
+ public boolean sawUpdateAutoThrottle;
+
+ @Override
+ protected void append(LoggingEvent event) {
+ String message = event.getMessage().toString();
+ if (event.getLevel() == Level.TRACE &&
+ event.getLoggerName().endsWith("lucene.iw")) {
+ sawFlushDeletes |= message.contains("IW: apply all deletes during flush");
+ sawMergeThreadPaused |= message.contains("CMS: pause thread");
+ }
+ if (event.getLevel() == Level.INFO && message.contains("updating [index.merge.scheduler.max_thread_count] from [10000] to [1]")) {
+ sawUpdateMaxThreadCount = true;
+ }
+ if (event.getLevel() == Level.INFO && message.contains("updating [index.merge.scheduler.auto_throttle] from [true] to [false]")) {
+ sawUpdateAutoThrottle = true;
+ }
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ public void close() {
+ }
+ }
+
+ @Test
+ public void testUpdateAutoThrottleSettings() {
+
+ MockAppender mockAppender = new MockAppender();
+ Logger rootLogger = Logger.getRootLogger();
+ Level savedLevel = rootLogger.getLevel();
+ rootLogger.addAppender(mockAppender);
+ rootLogger.setLevel(Level.TRACE);
+
+ try {
+ // No throttling at first, only 1 non-replicated shard, force lots of merging:
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
+ .put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
+ .put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "2")
+ .put(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE, "true")
+ ));
+
+ // Disable auto throttle:
+ client()
+ .admin()
+ .indices()
+ .prepareUpdateSettings("test")
+ .setSettings(Settings.builder()
+ .put(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE, "no"))
+ .get();
+
+ // Make sure we log the change:
+ assertTrue(mockAppender.sawUpdateAutoThrottle);
+
+ // Make sure setting says it is in fact changed:
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", ConcurrentMergeSchedulerProvider.AUTO_THROTTLE), equalTo("no"));
+ } finally {
+ rootLogger.removeAppender(mockAppender);
+ rootLogger.setLevel(savedLevel);
+ }
+ }
+
+ // #6882: make sure we can change index.merge.scheduler.max_thread_count live
+ @Test
+ public void testUpdateMergeMaxThreadCount() {
+
+ MockAppender mockAppender = new MockAppender();
+ Logger rootLogger = Logger.getRootLogger();
+ Level savedLevel = rootLogger.getLevel();
+ rootLogger.addAppender(mockAppender);
+ rootLogger.setLevel(Level.TRACE);
+
+ try {
+
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
+ .put(MergeSchedulerModule.MERGE_SCHEDULER_TYPE_KEY, ConcurrentMergeSchedulerProvider.class)
+ .put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "10000")
+ .put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "10000")
+ ));
+
+ assertFalse(mockAppender.sawUpdateMaxThreadCount);
+
+ // Now make a live change to reduce allowed merge threads:
+ client()
+ .admin()
+ .indices()
+ .prepareUpdateSettings("test")
+ .setSettings(Settings.builder()
+ .put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
+ )
+ .get();
+
+ // Make sure we log the change:
+ assertTrue(mockAppender.sawUpdateMaxThreadCount);
+
+ // Make sure setting says it is in fact changed:
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getSetting("test", ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT), equalTo("1"));
+
+ } finally {
+ rootLogger.removeAppender(mockAppender);
+ rootLogger.setLevel(savedLevel);
+ }
+ }
+
+ @Test
+ public void testUpdateSettingsWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ Settings.Builder builder = Settings.builder().put("index.refresh_interval", -1);
+
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Closing an index is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder));
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java
new file mode 100644
index 0000000000..ec06057578
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ClusterScope(scope= Scope.TEST, numDataNodes =2)
+public class CloseIndexDisableCloseAllTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Combined multiple tests into one, because cluster scope is test.
+ // The cluster scope is test b/c we can't clear cluster settings.
+ public void testCloseAllRequiresName() {
+ Settings clusterSettings = Settings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings));
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ // Close all explicitly
+ try {
+ client().admin().indices().prepareClose("_all").execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*").execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("test*").execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*", "-test1").execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ // Close all wildcard
+ try {
+ client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test2", "test3");
+ }
+
+ private void assertIndexIsClosed(String... indices) {
+ checkIndexState(IndexMetaData.State.CLOSE, indices);
+ }
+
+ private void checkIndexState(IndexMetaData.State state, String... indices) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index);
+ assertThat(indexMetaData, notNullValue());
+ assertThat(indexMetaData.getState(), equalTo(state));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java
new file mode 100644
index 0000000000..4ba9722775
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class OpenCloseIndexTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleCloseOpen() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testSimpleCloseMissingIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose("test1").execute().actionGet();
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testSimpleOpenMissingIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen("test1").execute().actionGet();
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testCloseOneMissingIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ client.admin().indices().prepareClose("test1", "test2").execute().actionGet();
+ }
+
+ @Test
+ public void testCloseOneMissingIndexIgnoreMissing() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1", "test2")
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void testOpenOneMissingIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ client.admin().indices().prepareOpen("test1", "test2").execute().actionGet();
+ }
+
+ @Test
+ public void testOpenOneMissingIndexIgnoreMissing() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1", "test2")
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseOpenMultipleIndices() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse1 = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse1.isAcknowledged(), equalTo(true));
+ CloseIndexResponse closeIndexResponse2 = client.admin().indices().prepareClose("test2").execute().actionGet();
+ assertThat(closeIndexResponse2.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+ assertIndexIsOpened("test3");
+
+ OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse1.isAcknowledged(), equalTo(true));
+ OpenIndexResponse openIndexResponse2 = client.admin().indices().prepareOpen("test2").execute().actionGet();
+ assertThat(openIndexResponse2.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test
+ public void testCloseOpenWildcard() {
+ Client client = client();
+ createIndex("test1", "test2", "a");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test*").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+ assertIndexIsOpened("a");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "a");
+ }
+
+ @Test
+ public void testCloseOpenAll() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("_all").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2", "test3");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("_all").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test
+ public void testCloseOpenAllWildcard() {
+ Client client = client();
+ createIndex("test1", "test2", "test3");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("*").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2", "test3");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("*").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2", "test3");
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testCloseNoIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose().execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testCloseNullIndex() {
+ Client client = client();
+ client.admin().indices().prepareClose(null).execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testOpenNoIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen().execute().actionGet();
+ }
+
+ @Test(expected = ActionRequestValidationException.class)
+ public void testOpenNullIndex() {
+ Client client = client();
+ client.admin().indices().prepareOpen(null).execute().actionGet();
+ }
+
+ @Test
+ public void testOpenAlreadyOpenedIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ //no problem if we try to open an index that's already in open state
+ OpenIndexResponse openIndexResponse1 = client.admin().indices().prepareOpen("test1").execute().actionGet();
+ assertThat(openIndexResponse1.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseAlreadyClosedIndex() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ //closing the index
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ //no problem if we try to close an index that's already in close state
+ closeIndexResponse = client.admin().indices().prepareClose("test1").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+ }
+
+ @Test
+ public void testSimpleCloseOpenAlias() {
+ Client client = client();
+ createIndex("test1");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ IndicesAliasesResponse aliasesResponse = client.admin().indices().prepareAliases().addAlias("test1", "test1-alias").execute().actionGet();
+ assertThat(aliasesResponse.isAcknowledged(), equalTo(true));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test1-alias").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test1-alias").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1");
+ }
+
+ @Test
+ public void testCloseOpenAliasMultipleIndices() {
+ Client client = client();
+ createIndex("test1", "test2");
+ ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+
+ IndicesAliasesResponse aliasesResponse1 = client.admin().indices().prepareAliases().addAlias("test1", "test-alias").execute().actionGet();
+ assertThat(aliasesResponse1.isAcknowledged(), equalTo(true));
+ IndicesAliasesResponse aliasesResponse2 = client.admin().indices().prepareAliases().addAlias("test2", "test-alias").execute().actionGet();
+ assertThat(aliasesResponse2.isAcknowledged(), equalTo(true));
+
+ CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test-alias").execute().actionGet();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsClosed("test1", "test2");
+
+ OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test-alias").execute().actionGet();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+ assertIndexIsOpened("test1", "test2");
+ }
+
+ private void assertIndexIsOpened(String... indices) {
+ checkIndexState(IndexMetaData.State.OPEN, indices);
+ }
+
+ private void assertIndexIsClosed(String... indices) {
+ checkIndexState(IndexMetaData.State.CLOSE, indices);
+ }
+
+ private void checkIndexState(IndexMetaData.State expectedState, String... indices) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ for (String index : indices) {
+ IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().indices().get(index);
+ assertThat(indexMetaData, notNullValue());
+ assertThat(indexMetaData.getState(), equalTo(expectedState));
+ }
+ }
+
+ @Test
+ public void testOpenCloseWithDocs() throws IOException, ExecutionException, InterruptedException {
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type", mapping));
+ ensureGreen();
+ int docs = between(10, 100);
+ IndexRequestBuilder[] builder = new IndexRequestBuilder[docs];
+ for (int i = 0; i < docs ; i++) {
+ builder[i] = client().prepareIndex("test", "type", "" + i).setSource("test", "init");
+ }
+ indexRandom(true, builder);
+ if (randomBoolean()) {
+ client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).setForce(true).execute().get();
+ }
+ client().admin().indices().prepareClose("test").execute().get();
+
+ // check the index still contains the records that we indexed
+ client().admin().indices().prepareOpen("test").execute().get();
+ ensureGreen();
+ SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, docs);
+ }
+
+ @Test
+ public void testOpenCloseIndexWithBlocks() {
+ createIndex("test");
+ ensureGreen("test");
+
+ int docs = between(10, 100);
+ for (int i = 0; i < docs ; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet();
+ }
+
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+
+ // Closing an index is not blocked
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet();
+ assertAcked(closeIndexResponse);
+ assertIndexIsClosed("test");
+
+ // Opening an index is not blocked
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").execute().actionGet();
+ assertAcked(openIndexResponse);
+ assertIndexIsOpened("test");
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ // Closing an index is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareClose("test"));
+ assertIndexIsOpened("test");
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet();
+ assertAcked(closeIndexResponse);
+ assertIndexIsClosed("test");
+
+ // Opening an index is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ enableIndexBlock("test", blockSetting);
+ assertBlocked(client().admin().indices().prepareOpen("test"));
+ assertIndexIsClosed("test");
+ } finally {
+ disableIndexBlock("test", blockSetting);
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java
new file mode 100644
index 0000000000..956c9d6561
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java
@@ -0,0 +1,369 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.cluster.ClusterInfo;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.DiskUsage;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.disruption.BlockClusterStateProcessing;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0)
+public class RareClusterStateTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ @Test
+ public void testUnassignedShardAndEmptyNodesInRoutingTable() throws Exception {
+ internalCluster().startNode();
+ createIndex("a");
+ ensureSearchable("a");
+ ClusterState current = clusterService().state();
+ GatewayAllocator allocator = internalCluster().getInstance(GatewayAllocator.class);
+
+ AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[0]);
+ RoutingNodes routingNodes = new RoutingNodes(
+ ClusterState.builder(current)
+ .routingTable(RoutingTable.builder(current.routingTable()).remove("a").addAsRecovery(current.metaData().index("a")))
+ .nodes(DiscoveryNodes.EMPTY_NODES)
+ .build()
+ );
+ ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.<String, DiskUsage>of(), ImmutableMap.<String, Long>of());
+
+ RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), clusterInfo);
+ allocator.allocateUnassigned(routingAllocation);
+ }
+
+ @Test
+ @TestLogging(value = "cluster.service:TRACE")
+ public void testDeleteCreateInOneBulk() throws Exception {
+ internalCluster().startNodesAsync(2, Settings.builder()
+ .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen")
+ .build()).get();
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
+ prepareCreate("test").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).addMapping("type").get();
+ ensureGreen("test");
+
+ // now that the cluster is stable, remove publishing timeout
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0")));
+
+ Set<String> nodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames()));
+ nodes.remove(internalCluster().getMasterName());
+
+ // block none master node.
+ BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(nodes.iterator().next(), getRandom());
+ internalCluster().setDisruptionScheme(disruption);
+ logger.info("--> indexing a doc");
+ index("test", "type", "1");
+ refresh();
+ disruption.startDisrupting();
+ logger.info("--> delete index and recreate it");
+ assertFalse(client().admin().indices().prepareDelete("test").setTimeout("200ms").get().isAcknowledged());
+ assertFalse(prepareCreate("test").setTimeout("200ms").setSettings(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, true).get().isAcknowledged());
+ logger.info("--> letting cluster proceed");
+ disruption.stopDisrupting();
+ ensureGreen(TimeValue.timeValueMinutes(30), "test");
+ assertHitCount(client().prepareSearch("test").get(), 0);
+ }
+
+ public void testDelayedMappingPropagationOnPrimary() throws Exception {
+ // Here we want to test that things go well if there is a first request
+ // that adds mappings but before mappings are propagated to all nodes
+ // another index request introduces the same mapping. The master node
+ // will reply immediately since it did not change the cluster state
+ // but the change might not be on the node that performed the indexing
+ // operation yet
+
+ Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0ms").build();
+ final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get();
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
+
+ final String master = internalCluster().getMasterName();
+ assertThat(nodeNames, hasItem(master));
+ String otherNode = null;
+ for (String node : nodeNames) {
+ if (node.equals(master) == false) {
+ otherNode = node;
+ break;
+ }
+ }
+ assertNotNull(otherNode);
+
+ // Don't allocate the shard on the master node
+ assertAcked(prepareCreate("index").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.routing.allocation.exclude._name", master)).get());
+ ensureGreen();
+
+ // Check routing tables
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ assertEquals(master, state.nodes().masterNode().name());
+ List<ShardRouting> shards = state.routingTable().allShards("index");
+ assertThat(shards, hasSize(1));
+ for (ShardRouting shard : shards) {
+ if (shard.primary()) {
+ // primary must not be on the master node
+ assertFalse(state.nodes().masterNodeId().equals(shard.currentNodeId()));
+ } else {
+ fail(); // only primaries
+ }
+ }
+
+ // Block cluster state processing where our shard is
+ BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, getRandom());
+ internalCluster().setDisruptionScheme(disruption);
+ disruption.startDisrupting();
+
+ // Add a new mapping...
+ final AtomicReference<Object> putMappingResponse = new AtomicReference<>();
+ client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener<PutMappingResponse>() {
+ @Override
+ public void onResponse(PutMappingResponse response) {
+ putMappingResponse.set(response);
+ }
+ @Override
+ public void onFailure(Throwable e) {
+ putMappingResponse.set(e);
+ }
+ });
+ // ...and wait for mappings to be available on master
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ImmutableOpenMap<String, MappingMetaData> indexMappings = client().admin().indices().prepareGetMappings("index").get().getMappings().get("index");
+ assertNotNull(indexMappings);
+ MappingMetaData typeMappings = indexMappings.get("type");
+ assertNotNull(typeMappings);
+ Object properties;
+ try {
+ properties = typeMappings.getSourceAsMap().get("properties");
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+ assertNotNull(properties);
+ Object fieldMapping = ((Map<String, Object>) properties).get("field");
+ assertNotNull(fieldMapping);
+ }
+ });
+
+ final AtomicReference<Object> docIndexResponse = new AtomicReference<>();
+ client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ docIndexResponse.set(response);
+ }
+ @Override
+ public void onFailure(Throwable e) {
+ docIndexResponse.set(e);
+ }
+ });
+
+ // Wait a bit to make sure that the reason why we did not get a response
+ // is that cluster state processing is blocked and not just that it takes
+ // time to process the indexing request
+ Thread.sleep(100);
+ assertThat(putMappingResponse.get(), equalTo(null));
+ assertThat(docIndexResponse.get(), equalTo(null));
+
+ // Now make sure the indexing request finishes successfully
+ disruption.stopDisrupting();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class));
+ PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get();
+ assertTrue(resp.isAcknowledged());
+ assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class));
+ IndexResponse docResp = (IndexResponse) docIndexResponse.get();
+ assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()),
+ 1, docResp.getShardInfo().getTotal());
+ }
+ });
+ }
+
+ public void testDelayedMappingPropagationOnReplica() throws Exception {
+ // This is essentially the same thing as testDelayedMappingPropagationOnPrimary
+ // but for replicas
+ // Here we want to test that everything goes well if the mappings that
+ // are needed for a document are not available on the replica at the
+ // time of indexing it
+ final List<String> nodeNames = internalCluster().startNodesAsync(2).get();
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
+
+ final String master = internalCluster().getMasterName();
+ assertThat(nodeNames, hasItem(master));
+ String otherNode = null;
+ for (String node : nodeNames) {
+ if (node.equals(master) == false) {
+ otherNode = node;
+ break;
+ }
+ }
+ assertNotNull(otherNode);
+
+ // Force allocation of the primary on the master node by first only allocating on the master
+ // and then allowing all nodes so that the replica gets allocated on the other node
+ assertAcked(prepareCreate("index").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("index.routing.allocation.include._name", master)).get());
+ assertAcked(client().admin().indices().prepareUpdateSettings("index").setSettings(Settings.builder()
+ .put("index.routing.allocation.include._name", "")).get());
+ ensureGreen();
+
+ // Check routing tables
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ assertEquals(master, state.nodes().masterNode().name());
+ List<ShardRouting> shards = state.routingTable().allShards("index");
+ assertThat(shards, hasSize(2));
+ for (ShardRouting shard : shards) {
+ if (shard.primary()) {
+ // primary must be on the master
+ assertEquals(state.nodes().masterNodeId(), shard.currentNodeId());
+ } else {
+ assertTrue(shard.active());
+ }
+ }
+
+ // Block cluster state processing on the replica
+ BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, getRandom());
+ internalCluster().setDisruptionScheme(disruption);
+ disruption.startDisrupting();
+ final AtomicReference<Object> putMappingResponse = new AtomicReference<>();
+ client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener<PutMappingResponse>() {
+ @Override
+ public void onResponse(PutMappingResponse response) {
+ putMappingResponse.set(response);
+ }
+ @Override
+ public void onFailure(Throwable e) {
+ putMappingResponse.set(e);
+ }
+ });
+ // Wait for mappings to be available on master
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master);
+ final IndexService indexService = indicesService.indexServiceSafe("index");
+ assertNotNull(indexService);
+ final MapperService mapperService = indexService.mapperService();
+ DocumentMapper mapper = mapperService.documentMapper("type");
+ assertNotNull(mapper);
+ assertNotNull(mapper.mappers().getMapper("field"));
+ }
+ });
+
+ final AtomicReference<Object> docIndexResponse = new AtomicReference<>();
+ client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ docIndexResponse.set(response);
+ }
+ @Override
+ public void onFailure(Throwable e) {
+ docIndexResponse.set(e);
+ }
+ });
+
+ // Wait for document to be indexed on primary
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertTrue(client().prepareGet("index", "type", "1").setPreference("_primary").get().isExists());
+ }
+ });
+
+ // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed
+ // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled
+ // and not just because it takes time to replicate the indexing request to the replica
+ Thread.sleep(100);
+ assertThat(putMappingResponse.get(), equalTo(null));
+ assertThat(docIndexResponse.get(), equalTo(null));
+
+ // Now make sure the indexing request finishes successfully
+ disruption.stopDisrupting();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class));
+ PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get();
+ assertTrue(resp.isAcknowledged());
+ assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class));
+ IndexResponse docResp = (IndexResponse) docIndexResponse.get();
+ assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()),
+ 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded
+ }
+ });
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java
new file mode 100644
index 0000000000..9c1a78f2f0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.state;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsException;
+import org.elasticsearch.indices.IndexClosedException;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class SimpleIndexStateTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(SimpleIndexStateTests.class);
+
+ @Test
+ public void testSimpleOpenClose() {
+ logger.info("--> creating test index");
+ createIndex("test");
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ NumShards numShards = getNumShards("test");
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+
+ logger.info("--> closing test index...");
+ CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").get();
+ assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
+
+ logger.info("--> trying to index into a closed index ...");
+ try {
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ fail();
+ } catch (IndexClosedException e) {
+ // all is well
+ }
+
+ logger.info("--> opening index...");
+ OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").get();
+ assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ }
+
+ @Test
+ public void testFastCloseAfterCreateDoesNotClose() {
+ logger.info("--> creating test index that cannot be allocated");
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder()
+ .put("index.routing.allocation.include.tag", "no_such_node").build()).get();
+
+ ClusterHealthResponse health = client().admin().cluster().prepareHealth("test").setWaitForNodes(">=2").get();
+ assertThat(health.isTimedOut(), equalTo(false));
+ assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));
+
+ try {
+ client().admin().indices().prepareClose("test").get();
+ fail("Exception should have been thrown");
+ } catch(IndexPrimaryShardNotAllocatedException e) {
+ // expected
+ }
+
+ logger.info("--> updating test index settings to allow allocation");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.settingsBuilder()
+ .put("index.routing.allocation.include.tag", "").build()).get();
+
+ logger.info("--> waiting for green status");
+ ensureGreen();
+
+ NumShards numShards = getNumShards("test");
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
+ assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
+ assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
+
+ logger.info("--> indexing a simple document");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ }
+
+ @Test
+ public void testConsistencyAfterIndexCreationFailure() {
+
+ logger.info("--> deleting test index....");
+ try {
+ client().admin().indices().prepareDelete("test").get();
+ } catch (IndexMissingException ex) {
+ // Ignore
+ }
+
+ logger.info("--> creating test index with invalid settings ");
+ try {
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("number_of_shards", "bad")).get();
+ fail();
+ } catch (SettingsException ex) {
+ // Expected
+ }
+
+ logger.info("--> creating test index with valid settings ");
+ CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("number_of_shards", 1)).get();
+ assertThat(response.isAcknowledged(), equalTo(true));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java
new file mode 100644
index 0000000000..0ef2bf02fd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java
@@ -0,0 +1,1049 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.stats;
+
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.CommonStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag;
+import org.elasticsearch.action.admin.indices.stats.IndexStats;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.filter.FilterCacheModule;
+import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
+import org.elasticsearch.index.cache.filter.FilterCacheStats;
+import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
+import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.indices.cache.query.IndicesQueryCache;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Random;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThan;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, randomDynamicTemplates = false)
+@SuppressCodecs("*") // requires custom completion format
+public class IndexStatsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ .put(IndicesQueryCache.INDICES_CACHE_QUERY_CLEAN_INTERVAL, "1ms")
+ .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
+ .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class)
+ .build();
+ }
+
+ @Test
+ public void testFieldDataStats() {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("field", "value1", "field2", "value1").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2", "field2", "value2").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ // sort to load it to field data...
+ client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
+ client().prepareSearch().addSort("field", SortOrder.ASC).execute().actionGet();
+
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+
+ // sort to load it to field data...
+ client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
+ client().prepareSearch().addSort("field2", SortOrder.ASC).execute().actionGet();
+
+ // now check the per field stats
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.FieldData, true).fieldDataFields("*")).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getFields().get("field") + nodesStats.getNodes()[1].getIndices().getFieldData().getFields().get("field"), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getFields().get("field") + nodesStats.getNodes()[1].getIndices().getFieldData().getFields().get("field"), lessThan(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes()));
+
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).setFieldDataFields("*").execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFieldData().getFields().get("field"), lessThan(indicesStats.getTotal().getFieldData().getMemorySizeInBytes()));
+
+ client().admin().indices().prepareClearCache().setFieldDataCache(true).execute().actionGet();
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ indicesStats = client().admin().indices().prepareStats("test").clear().setFieldData(true).execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ }
+
+ @Test
+ public void testClearAllCaches() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(Settings.settingsBuilder().put("index.number_of_replicas", 0).put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+ client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ client().prepareIndex("test", "type", "1").setSource("field", "value1").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "value2").execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ // sort to load it to field data and filter to load filter cache
+ client().prepareSearch()
+ .setPostFilter(QueryBuilders.termQuery("field", "value1"))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ client().prepareSearch()
+ .setPostFilter(QueryBuilders.termQuery("field", "value2"))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache().execute().actionGet();
+ Thread.sleep(100); // Make sure the filter cache entries have been removed...
+ nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true)
+ .execute().actionGet();
+ assertThat(nodesStats.getNodes()[0].getIndices().getFieldData().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+
+ indicesStats = client().admin().indices().prepareStats("test")
+ .clear().setFieldData(true).setFilterCache(true)
+ .execute().actionGet();
+ assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+
+ @Test
+ public void testQueryCache() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("idx").setSettings(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true).get());
+ ensureGreen();
+
+ // index docs until we have at least one doc on each shard, otherwise, our tests will not work
+ // since refresh will not refresh anything on a shard that has 0 docs and its search response get cached
+ int pageDocs = randomIntBetween(2, 100);
+ int numDocs = 0;
+ int counter = 0;
+ while (true) {
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[pageDocs];
+ for (int i = 0; i < pageDocs; ++i) {
+ builders[i] = client().prepareIndex("idx", "type", Integer.toString(counter++)).setSource(jsonBuilder()
+ .startObject()
+ .field("common", "field")
+ .field("str_value", "s" + i)
+ .endObject());
+ }
+ indexRandom(true, builders);
+ numDocs += pageDocs;
+
+ boolean allHaveDocs = true;
+ for (ShardStats stats : client().admin().indices().prepareStats("idx").setDocs(true).get().getShards()) {
+ if (stats.getStats().getDocs().getCount() == 0) {
+ allHaveDocs = false;
+ break;
+ }
+ }
+
+ if (allHaveDocs) {
+ break;
+ }
+ }
+
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), equalTo(0l));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), equalTo(0l));
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+ }
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), greaterThan(0l));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), greaterThan(0l));
+
+ // index the data again...
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ builders[i] = client().prepareIndex("idx", "type", Integer.toString(i)).setSource(jsonBuilder()
+ .startObject()
+ .field("common", "field")
+ .field("str_value", "s" + i)
+ .endObject());
+ }
+ indexRandom(true, builders);
+ refresh();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+ }
+ });
+
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+ }
+
+ client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+
+ // test explicit request parameter
+
+ assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(false).get().getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+
+ assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(true).get().getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ // set the index level setting to false, and see that the reverse works
+
+ client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache
+ assertAcked(client().admin().indices().prepareUpdateSettings("idx").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, false)));
+
+ assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+
+ assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(true).get().getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+ }
+
+
+ @Test
+ public void nonThrottleStats() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
+ .put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
+ .put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "10000")
+ ));
+ ensureGreen();
+ long termUpto = 0;
+ IndicesStatsResponse stats;
+ // Provoke slowish merging by making many unique terms:
+ for(int i=0; i<100; i++) {
+ StringBuilder sb = new StringBuilder();
+ for(int j=0; j<100; j++) {
+ sb.append(' ');
+ sb.append(termUpto++);
+ sb.append(" some random text that keeps repeating over and over again hambone");
+ }
+ client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
+ }
+ refresh();
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ //nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTimeInMillis(), equalTo(0l));
+ }
+
+ @Test
+ public void throttleStats() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge")
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2")
+ .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, "2")
+ .put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, "1")
+ .put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, "1")
+ .put("index.merge.policy.type", "tiered")
+
+ ));
+ ensureGreen();
+ long termUpto = 0;
+ IndicesStatsResponse stats;
+ // make sure we see throttling kicking in:
+ boolean done = false;
+ long start = System.currentTimeMillis();
+ while (!done) {
+ for(int i=0; i<100; i++) {
+ // Provoke slowish merging by making many unique terms:
+ StringBuilder sb = new StringBuilder();
+ for(int j=0; j<100; j++) {
+ sb.append(' ');
+ sb.append(termUpto++);
+ }
+ client().prepareIndex("test", "type", ""+termUpto).setSource("field" + (i%10), sb.toString()).get();
+ if (i % 2 == 0) {
+ refresh();
+ }
+ }
+ refresh();
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ //nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).get();
+ done = stats.getPrimaries().getIndexing().getTotal().getThrottleTimeInMillis() > 0;
+ if (System.currentTimeMillis() - start > 300*1000) { //Wait 5 minutes for throttling to kick in
+ fail("index throttling didn't kick in after 5 minutes of intense merging");
+ }
+ }
+
+ // Optimize & flush and wait; else we sometimes get a "Delete Index failed - not acked"
+ // when ElasticsearchIntegrationTest.after tries to remove indices created by the test:
+ logger.info("test: now optimize");
+ client().admin().indices().prepareOptimize("test").get();
+ flush();
+ logger.info("test: test done");
+ }
+
+ @Test
+ public void simpleStats() throws Exception {
+ createIndex("test1", "test2");
+ ensureGreen();
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+
+ refresh();
+
+ NumShards test1 = getNumShards("test1");
+ long test1ExpectedWrites = 2 * test1.dataCopies;
+ NumShards test2 = getNumShards("test2");
+ long test2ExpectedWrites = test2.dataCopies;
+ long totalExpectedWrites = test1ExpectedWrites + test2ExpectedWrites;
+
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getPrimaries().getDocs().getCount(), equalTo(3l));
+ assertThat(stats.getTotal().getDocs().getCount(), equalTo(totalExpectedWrites));
+ assertThat(stats.getPrimaries().getIndexing().getTotal().getIndexCount(), equalTo(3l));
+ assertThat(stats.getPrimaries().getIndexing().getTotal().isThrottled(), equalTo(false));
+ assertThat(stats.getPrimaries().getIndexing().getTotal().getThrottleTimeInMillis(), equalTo(0l));
+ assertThat(stats.getTotal().getIndexing().getTotal().getIndexCount(), equalTo(totalExpectedWrites));
+ assertThat(stats.getTotal().getStore(), notNullValue());
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getFlush(), notNullValue());
+ assertThat(stats.getTotal().getRefresh(), notNullValue());
+
+ assertThat(stats.getIndex("test1").getPrimaries().getDocs().getCount(), equalTo(2l));
+ assertThat(stats.getIndex("test1").getTotal().getDocs().getCount(), equalTo(test1ExpectedWrites));
+ assertThat(stats.getIndex("test1").getPrimaries().getStore(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getMerge(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getFlush(), notNullValue());
+ assertThat(stats.getIndex("test1").getPrimaries().getRefresh(), notNullValue());
+
+ assertThat(stats.getIndex("test2").getPrimaries().getDocs().getCount(), equalTo(1l));
+ assertThat(stats.getIndex("test2").getTotal().getDocs().getCount(), equalTo(test2ExpectedWrites));
+
+ // make sure that number of requests in progress is 0
+ assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getIndexCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getIndexing().getTotal().getDeleteCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getFetchCurrent(), equalTo(0l));
+ assertThat(stats.getIndex("test1").getTotal().getSearch().getTotal().getQueryCurrent(), equalTo(0l));
+
+ // check flags
+ stats = client().admin().indices().prepareStats().clear()
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getFlush(), notNullValue());
+ assertThat(stats.getTotal().getRefresh(), notNullValue());
+
+ // check types
+ stats = client().admin().indices().prepareStats().setTypes("type1", "type").execute().actionGet();
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCount(), equalTo(1l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type").getIndexCount(), equalTo(1l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type2"), nullValue());
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getIndexCurrent(), equalTo(0l));
+ assertThat(stats.getPrimaries().getIndexing().getTypeStats().get("type1").getDeleteCurrent(), equalTo(0l));
+
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(0l));
+ // check get
+ GetResponse getResponse = client().prepareGet("test1", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(0l));
+
+ // missing get
+ getResponse = client().prepareGet("test1", "type1", "2").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ stats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(stats.getTotal().getGet().getCount(), equalTo(2l));
+ assertThat(stats.getTotal().getGet().getExistsCount(), equalTo(1l));
+ assertThat(stats.getTotal().getGet().getMissingCount(), equalTo(1l));
+
+ // clear all
+ stats = client().admin().indices().prepareStats()
+ .setDocs(false)
+ .setStore(false)
+ .setIndexing(false)
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .clear() // reset defaults
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getGet(), nullValue());
+ assertThat(stats.getTotal().getSearch(), nullValue());
+ }
+
+ @Test
+ public void testMergeStats() {
+ createIndex("test1");
+
+ ensureGreen();
+
+ // clear all
+ IndicesStatsResponse stats = client().admin().indices().prepareStats()
+ .setDocs(false)
+ .setStore(false)
+ .setIndexing(false)
+ .setFlush(true)
+ .setRefresh(true)
+ .setMerge(true)
+ .clear() // reset defaults
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getDocs(), nullValue());
+ assertThat(stats.getTotal().getStore(), nullValue());
+ assertThat(stats.getTotal().getIndexing(), nullValue());
+ assertThat(stats.getTotal().getGet(), nullValue());
+ assertThat(stats.getTotal().getSearch(), nullValue());
+
+ for (int i = 0; i < 20; i++) {
+ client().prepareIndex("test1", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
+ stats = client().admin().indices().prepareStats()
+ .setMerge(true)
+ .execute().actionGet();
+
+ assertThat(stats.getTotal().getMerge(), notNullValue());
+ assertThat(stats.getTotal().getMerge().getTotal(), greaterThan(0l));
+ }
+
+ @Test
+ public void testSegmentsStats() {
+ assertAcked(prepareCreate("test1", 2, settingsBuilder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
+ ensureGreen();
+
+ NumShards test1 = getNumShards("test1");
+
+ for (int i = 0; i < 100; i++) {
+ index("test1", "type1", Integer.toString(i), "field", "value");
+ index("test1", "type2", Integer.toString(i), "field", "value");
+ }
+
+ IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
+ assertThat(stats.getTotal().getSegments().getIndexWriterMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().getSegments().getIndexWriterMaxMemoryInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().getSegments().getVersionMapMemoryInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareFlush().get();
+ client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
+ stats = client().admin().indices().prepareStats().setSegments(true).get();
+
+ assertThat(stats.getTotal().getSegments(), notNullValue());
+ assertThat(stats.getTotal().getSegments().getCount(), equalTo((long) test1.totalNumShards));
+ assumeTrue("test doesn't work with 4.6.0", org.elasticsearch.Version.CURRENT.luceneVersion != Version.LUCENE_4_6_0);
+ assertThat(stats.getTotal().getSegments().getMemoryInBytes(), greaterThan(0l));
+ }
+
+ @Test
+ public void testAllFlags() throws Exception {
+ // rely on 1 replica for this tests
+ createIndex("test1");
+ createIndex("test2");
+
+ ensureGreen();
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ Flag[] values = CommonStatsFlags.Flag.values();
+ for (Flag flag : values) {
+ set(flag, builder, false);
+ }
+
+ IndicesStatsResponse stats = builder.execute().actionGet();
+ for (Flag flag : values) {
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(false));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(false));
+ }
+
+ for (Flag flag : values) {
+ set(flag, builder, true);
+ }
+ stats = builder.execute().actionGet();
+ for (Flag flag : values) {
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(true));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(true));
+ }
+ Random random = getRandom();
+ EnumSet<Flag> flags = EnumSet.noneOf(Flag.class);
+ for (Flag flag : values) {
+ if (random.nextBoolean()) {
+ flags.add(flag);
+ }
+ }
+
+
+ for (Flag flag : values) {
+ set(flag, builder, false); // clear all
+ }
+
+ for (Flag flag : flags) { // set the flags
+ set(flag, builder, true);
+ }
+ stats = builder.execute().actionGet();
+ for (Flag flag : flags) { // check the flags
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(true));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(true));
+ }
+
+ for (Flag flag : EnumSet.complementOf(flags)) { // check the complement
+ assertThat(isSet(flag, stats.getPrimaries()), equalTo(false));
+ assertThat(isSet(flag, stats.getTotal()), equalTo(false));
+ }
+
+ }
+
+ @Test
+ public void testEncodeDecodeCommonStats() throws IOException {
+ CommonStatsFlags flags = new CommonStatsFlags();
+ Flag[] values = CommonStatsFlags.Flag.values();
+ assertThat(flags.anySet(), equalTo(true));
+
+ for (Flag flag : values) {
+ flags.set(flag, false);
+ }
+ assertThat(flags.anySet(), equalTo(false));
+ for (Flag flag : values) {
+ flags.set(flag, true);
+ }
+ assertThat(flags.anySet(), equalTo(true));
+ Random random = getRandom();
+ flags.set(values[random.nextInt(values.length)], false);
+ assertThat(flags.anySet(), equalTo(true));
+
+ {
+ BytesStreamOutput out = new BytesStreamOutput();
+ flags.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+ CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(StreamInput.wrap(bytes));
+ for (Flag flag : values) {
+ assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
+ }
+ }
+
+ {
+ for (Flag flag : values) {
+ flags.set(flag, random.nextBoolean());
+ }
+ BytesStreamOutput out = new BytesStreamOutput();
+ flags.writeTo(out);
+ out.close();
+ BytesReference bytes = out.bytes();
+ CommonStatsFlags readStats = CommonStatsFlags.readCommonStatsFlags(StreamInput.wrap(bytes));
+ for (Flag flag : values) {
+ assertThat(flags.isSet(flag), equalTo(readStats.isSet(flag)));
+ }
+ }
+ }
+
+ @Test
+ public void testFlagOrdinalOrder() {
+ Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh,
+ Flag.FilterCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments,
+ Flag.Translog, Flag.Suggest, Flag.QueryCache, Flag.Recovery};
+
+ assertThat(flags.length, equalTo(Flag.values().length));
+ for (int i = 0; i < flags.length; i++) {
+ assertThat("ordinal has changed - this breaks the wire protocol. Only append to new values", i, equalTo(flags[i].ordinal()));
+ }
+ }
+
+ @Test
+ public void testMultiIndex() throws Exception {
+
+ createIndex("test1");
+ createIndex("test2");
+
+ ensureGreen();
+
+ client().prepareIndex("test1", "type1", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test1", "type2", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ client().prepareIndex("test2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
+ refresh();
+
+ int numShards1 = getNumShards("test1").totalNumShards;
+ int numShards2 = getNumShards("test2").totalNumShards;
+
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ IndicesStatsResponse stats = builder.execute().actionGet();
+
+ assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2));
+
+ stats = builder.setIndices("_all").execute().actionGet();
+ assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2));
+
+ stats = builder.setIndices("_all").execute().actionGet();
+ assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2));
+
+ stats = builder.setIndices("*").execute().actionGet();
+ assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2));
+
+ stats = builder.setIndices("test1").execute().actionGet();
+ assertThat(stats.getTotalShards(), equalTo(numShards1));
+
+ stats = builder.setIndices("test1", "test2").execute().actionGet();
+ assertThat(stats.getTotalShards(), equalTo(numShards1 + numShards2));
+
+ stats = builder.setIndices("*2").execute().actionGet();
+ assertThat(stats.getTotalShards(), equalTo(numShards2));
+
+ }
+
+ @Test
+ public void testFieldDataFieldsParam() throws Exception {
+
+ createIndex("test1");
+
+ ensureGreen();
+
+ client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}").execute().actionGet();
+ client().prepareIndex("test1", "baz", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}").execute().actionGet();
+ refresh();
+
+ client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet();
+
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ IndicesStatsResponse stats = builder.execute().actionGet();
+
+ assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields(), is(nullValue()));
+
+ stats = builder.setFieldDataFields("bar").execute().actionGet();
+ assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true));
+ assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(false));
+
+ stats = builder.setFieldDataFields("bar", "baz").execute().actionGet();
+ assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true));
+ assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(true));
+ assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0l));
+
+ stats = builder.setFieldDataFields("*").execute().actionGet();
+ assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true));
+ assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(true));
+ assertThat(stats.getTotal().fieldData.getFields().get("baz"), greaterThan(0l));
+
+ stats = builder.setFieldDataFields("*r").execute().actionGet();
+ assertThat(stats.getTotal().fieldData.getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("bar"), is(true));
+ assertThat(stats.getTotal().fieldData.getFields().get("bar"), greaterThan(0l));
+ assertThat(stats.getTotal().fieldData.getFields().containsKey("baz"), is(false));
+
+ }
+
+ @Test
+ public void testCompletionFieldsParam() throws Exception {
+
+ assertAcked(prepareCreate("test1")
+ .addMapping(
+ "bar",
+ "{ \"properties\": { \"bar\": { \"type\": \"string\", \"fields\": { \"completion\": { \"type\": \"completion\" }}},\"baz\": { \"type\": \"string\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}}}"));
+ ensureGreen();
+
+ client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}").execute().actionGet();
+ client().prepareIndex("test1", "baz", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}").execute().actionGet();
+ refresh();
+
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ IndicesStatsResponse stats = builder.execute().actionGet();
+
+ assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields(), is(nullValue()));
+
+ stats = builder.setCompletionFields("bar.completion").execute().actionGet();
+ assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true));
+ assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(false));
+
+ stats = builder.setCompletionFields("bar.completion", "baz.completion").execute().actionGet();
+ assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true));
+ assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(true));
+ assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0l));
+
+ stats = builder.setCompletionFields("*").execute().actionGet();
+ assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true));
+ assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(true));
+ assertThat(stats.getTotal().completion.getFields().get("baz.completion"), greaterThan(0l));
+
+ stats = builder.setCompletionFields("*r*").execute().actionGet();
+ assertThat(stats.getTotal().completion.getSizeInBytes(), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("bar.completion"), is(true));
+ assertThat(stats.getTotal().completion.getFields().get("bar.completion"), greaterThan(0l));
+ assertThat(stats.getTotal().completion.getFields().containsKey("baz.completion"), is(false));
+
+ }
+
+ @Test
+ public void testGroupsParam() throws Exception {
+
+ createIndex("test1");
+
+ ensureGreen();
+
+ client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("foo", "bar").execute().actionGet();
+ refresh();
+
+ client().prepareSearch("_all").setStats("bar", "baz").execute().actionGet();
+
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ IndicesStatsResponse stats = builder.execute().actionGet();
+
+ assertThat(stats.getTotal().search.getTotal().getQueryCount(), greaterThan(0l));
+ assertThat(stats.getTotal().search.getGroupStats(), is(nullValue()));
+
+ stats = builder.setGroups("bar").execute().actionGet();
+ assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0l));
+ assertThat(stats.getTotal().search.getGroupStats().containsKey("baz"), is(false));
+
+ stats = builder.setGroups("bar", "baz").execute().actionGet();
+ assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0l));
+ assertThat(stats.getTotal().search.getGroupStats().get("baz").getQueryCount(), greaterThan(0l));
+
+ stats = builder.setGroups("*").execute().actionGet();
+ assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0l));
+ assertThat(stats.getTotal().search.getGroupStats().get("baz").getQueryCount(), greaterThan(0l));
+
+ stats = builder.setGroups("*r").execute().actionGet();
+ assertThat(stats.getTotal().search.getGroupStats().get("bar").getQueryCount(), greaterThan(0l));
+ assertThat(stats.getTotal().search.getGroupStats().containsKey("baz"), is(false));
+
+ }
+
+ @Test
+ public void testTypesParam() throws Exception {
+
+ createIndex("test1");
+ createIndex("test2");
+
+ ensureGreen();
+
+ client().prepareIndex("test1", "bar", Integer.toString(1)).setSource("foo", "bar").execute().actionGet();
+ client().prepareIndex("test2", "baz", Integer.toString(1)).setSource("foo", "bar").execute().actionGet();
+ refresh();
+
+ IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
+ IndicesStatsResponse stats = builder.execute().actionGet();
+
+ assertThat(stats.getTotal().indexing.getTotal().getIndexCount(), greaterThan(0l));
+ assertThat(stats.getTotal().indexing.getTypeStats(), is(nullValue()));
+
+ stats = builder.setTypes("bar").execute().actionGet();
+ assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0l));
+ assertThat(stats.getTotal().indexing.getTypeStats().containsKey("baz"), is(false));
+
+ stats = builder.setTypes("bar", "baz").execute().actionGet();
+ assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0l));
+ assertThat(stats.getTotal().indexing.getTypeStats().get("baz").getIndexCount(), greaterThan(0l));
+
+ stats = builder.setTypes("*").execute().actionGet();
+ assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0l));
+ assertThat(stats.getTotal().indexing.getTypeStats().get("baz").getIndexCount(), greaterThan(0l));
+
+ stats = builder.setTypes("*r").execute().actionGet();
+ assertThat(stats.getTotal().indexing.getTypeStats().get("bar").getIndexCount(), greaterThan(0l));
+ assertThat(stats.getTotal().indexing.getTypeStats().containsKey("baz"), is(false));
+
+ }
+
+ private static void set(Flag flag, IndicesStatsRequestBuilder builder, boolean set) {
+ switch (flag) {
+ case Docs:
+ builder.setDocs(set);
+ break;
+ case FieldData:
+ builder.setFieldData(set);
+ break;
+ case FilterCache:
+ builder.setFilterCache(set);
+ break;
+ case Flush:
+ builder.setFlush(set);
+ break;
+ case Get:
+ builder.setGet(set);
+ break;
+ case Indexing:
+ builder.setIndexing(set);
+ break;
+ case Merge:
+ builder.setMerge(set);
+ break;
+ case Refresh:
+ builder.setRefresh(set);
+ break;
+ case Search:
+ builder.setSearch(set);
+ break;
+ case Store:
+ builder.setStore(set);
+ break;
+ case Warmer:
+ builder.setWarmer(set);
+ break;
+ case Percolate:
+ builder.setPercolate(set);
+ break;
+ case Completion:
+ builder.setCompletion(set);
+ break;
+ case Segments:
+ builder.setSegments(set);
+ break;
+ case Translog:
+ builder.setTranslog(set);
+ break;
+ case Suggest:
+ builder.setSuggest(set);
+ break;
+ case QueryCache:
+ builder.setQueryCache(set);
+ break;
+ case Recovery:
+ builder.setRecovery(set);
+ break;
+ default:
+ fail("new flag? " + flag);
+ break;
+ }
+ }
+
+ private static boolean isSet(Flag flag, CommonStats response) {
+ switch (flag) {
+ case Docs:
+ return response.getDocs() != null;
+ case FieldData:
+ return response.getFieldData() != null;
+ case FilterCache:
+ return response.getFilterCache() != null;
+ case Flush:
+ return response.getFlush() != null;
+ case Get:
+ return response.getGet() != null;
+ case Indexing:
+ return response.getIndexing() != null;
+ case Merge:
+ return response.getMerge() != null;
+ case Refresh:
+ return response.getRefresh() != null;
+ case Search:
+ return response.getSearch() != null;
+ case Store:
+ return response.getStore() != null;
+ case Warmer:
+ return response.getWarmer() != null;
+ case Percolate:
+ return response.getPercolate() != null;
+ case Completion:
+ return response.getCompletion() != null;
+ case Segments:
+ return response.getSegments() != null;
+ case Translog:
+ return response.getTranslog() != null;
+ case Suggest:
+ return response.getSuggest() != null;
+ case QueryCache:
+ return response.getQueryCache() != null;
+ case Recovery:
+ return response.getRecoveryStats() != null;
+ default:
+ fail("new flag? " + flag);
+ return false;
+ }
+ }
+
+ private void assertEquals(FilterCacheStats stats1, FilterCacheStats stats2) {
+ assertEquals(stats1.getCacheCount(), stats2.getCacheCount());
+ assertEquals(stats1.getCacheSize(), stats2.getCacheSize());
+ assertEquals(stats1.getEvictions(), stats2.getEvictions());
+ assertEquals(stats1.getHitCount(), stats2.getHitCount());
+ assertEquals(stats2.getMemorySizeInBytes(), stats2.getMemorySizeInBytes());
+ assertEquals(stats1.getMissCount(), stats2.getMissCount());
+ assertEquals(stats1.getTotalCount(), stats2.getTotalCount());
+ }
+
+ private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) {
+ assertAllSuccessful(response);
+ FilterCacheStats total = response.getTotal().filterCache;
+ FilterCacheStats indexTotal = new FilterCacheStats();
+ FilterCacheStats shardTotal = new FilterCacheStats();
+ for (IndexStats indexStats : response.getIndices().values()) {
+ indexTotal.add(indexStats.getTotal().filterCache);
+ for (ShardStats shardStats : response.getShards()) {
+ shardTotal.add(shardStats.getStats().filterCache);
+ }
+ }
+ assertEquals(total, indexTotal);
+ assertEquals(total, shardTotal);
+ }
+
+ public void testFilterCacheStats() throws Exception {
+ assertAcked(prepareCreate("index").setSettings("number_of_replicas", 0).get());
+ indexRandom(true,
+ client().prepareIndex("index", "type", "1").setSource("foo", "bar"),
+ client().prepareIndex("index", "type", "2").setSource("foo", "baz"));
+ ensureGreen();
+
+ IndicesStatsResponse response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
+ assertCumulativeFilterCacheStats(response);
+ assertEquals(0, response.getTotal().filterCache.getCacheSize());
+
+ SearchResponse r;
+ assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
+ response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
+ assertCumulativeFilterCacheStats(response);
+ assertThat(response.getTotal().filterCache.getHitCount(), equalTo(0L));
+ assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L));
+ assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L));
+
+ assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
+ response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
+ assertCumulativeFilterCacheStats(response);
+ assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L));
+ assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L));
+
+ assertTrue(client().prepareDelete("index", "type", "1").get().isFound());
+ assertTrue(client().prepareDelete("index", "type", "2").get().isFound());
+ refresh();
+ response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
+ assertCumulativeFilterCacheStats(response);
+ assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L));
+ assertThat(response.getTotal().filterCache.getCacheCount(), greaterThan(0L));
+
+ indexRandom(true,
+ client().prepareIndex("index", "type", "1").setSource("foo", "bar"),
+ client().prepareIndex("index", "type", "2").setSource("foo", "baz"));
+ assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
+
+ response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
+ assertCumulativeFilterCacheStats(response);
+ assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), greaterThan(0L));
+
+ assertAllSuccessful(client().admin().indices().prepareClearCache("index").setFilterCache(true).get());
+ response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
+ assertCumulativeFilterCacheStats(response);
+ assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
+ assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L));
+ assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), equalTo(0L));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java
new file mode 100644
index 0000000000..12bad2364a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java
@@ -0,0 +1,302 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.*;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.disruption.SlowClusterStateProcessing;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "")
+ // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here
+ // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout.
+ // to prevent this we are setting the timeout here to something highish ie. the default in practice
+ .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS))
+ .build();
+ }
+
+ @Override
+ protected void ensureClusterStateConsistency() throws IOException {
+ // testShardActiveElseWhere might change the state of a non-master node
+ // so we cannot check state consistency of this cluster
+ }
+
+ @Test
+ public void indexCleanup() throws Exception {
+ final String masterNode = internalCluster().startNode(Settings.builder().put("node.data", false));
+ final String node_1 = internalCluster().startNode(Settings.builder().put("node.master", false));
+ final String node_2 = internalCluster().startNode(Settings.builder().put("node.master", false));
+ logger.info("--> creating index [test] with one shard and on replica");
+ assertAcked(prepareCreate("test").setSettings(
+ Settings.builder().put(indexSettings())
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))
+ );
+ ensureGreen("test");
+
+ logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
+ assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
+ assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
+ assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true));
+
+ logger.info("--> starting node server3");
+ final String node_3 = internalCluster().startNode(Settings.builder().put("node.master", false));
+ logger.info("--> running cluster_health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
+ .setWaitForNodes("4")
+ .setWaitForRelocatingShards(0)
+ .get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
+ assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
+ assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(false));
+ assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false));
+
+ logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish");
+ if (randomBoolean()) { // sometimes add cluster-state delay to trigger observers in IndicesStore.ShardActiveRequestHandler
+ final SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(node_3, getRandom(), 0, 0, 1000, 2000);
+ internalCluster().setDisruptionScheme(disruption);
+ disruption.startDisrupting();
+ }
+ internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_3)).get();
+ clusterHealth = client().admin().cluster().prepareHealth()
+ .setWaitForNodes("4")
+ .setWaitForRelocatingShards(0)
+ .get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false));
+ assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false));
+ assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
+ assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true));
+ assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(true));
+
+ }
+
+ @Test
+ public void shardsCleanup() throws Exception {
+ final String node_1 = internalCluster().startNode();
+ final String node_2 = internalCluster().startNode();
+ logger.info("--> creating index [test] with one shard and on replica");
+ assertAcked(prepareCreate("test").setSettings(
+ Settings.builder().put(indexSettings())
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))
+ );
+ ensureGreen("test");
+
+ logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
+ assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
+
+ logger.info("--> starting node server3");
+ String node_3 = internalCluster().startNode();
+ logger.info("--> running cluster_health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
+ .setWaitForNodes("3")
+ .setWaitForRelocatingShards(0)
+ .get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+
+ logger.info("--> making sure that shard is not allocated on server3");
+ assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
+
+ Path server2Shard = shardDirectory(node_2, "test", 0);
+ logger.info("--> stopping node " + node_2);
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
+
+ logger.info("--> running cluster_health");
+ clusterHealth = client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes("2")
+ .setWaitForRelocatingShards(0)
+ .get();
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ assertThat(Files.exists(server2Shard), equalTo(true));
+
+ logger.info("--> making sure that shard and its replica exist on server1, server2 and server3");
+ assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
+ assertThat(Files.exists(server2Shard), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true));
+
+ logger.info("--> starting node node_4");
+ final String node_4 = internalCluster().startNode();
+
+ logger.info("--> running cluster_health");
+ ensureGreen();
+
+ logger.info("--> making sure that shard and its replica are allocated on server1 and server3 but not on server2");
+ assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
+ assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true));
+ assertThat(waitForShardDeletion(node_4, "test", 0), equalTo(false));
+ }
+
+ @Test @Slow
+ public void testShardActiveElseWhere() throws Exception {
+ boolean node1IsMasterEligible = randomBoolean();
+ boolean node2IsMasterEligible = !node1IsMasterEligible || randomBoolean();
+ Future<String> node_1_future = internalCluster().startNodeAsync(Settings.builder().put("node.master", node1IsMasterEligible).build());
+ Future<String> node_2_future = internalCluster().startNodeAsync(Settings.builder().put("node.master", node2IsMasterEligible).build());
+ final String node_1 = node_1_future.get();
+ final String node_2 = node_2_future.get();
+ final String node_1_id = internalCluster().getInstance(DiscoveryService.class, node_1).localNode().getId();
+ final String node_2_id = internalCluster().getInstance(DiscoveryService.class, node_2).localNode().getId();
+
+ logger.debug("node {} (node_1) is {}master eligible", node_1, node1IsMasterEligible ? "" : "not ");
+ logger.debug("node {} (node_2) is {}master eligible", node_2, node2IsMasterEligible ? "" : "not ");
+ logger.debug("node {} became master", internalCluster().getMasterName());
+ final int numShards = scaledRandomIntBetween(2, 20);
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards))
+ );
+ ensureGreen("test");
+
+ waitNoPendingTasksOnAll();
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+
+ RoutingNode routingNode = stateResponse.getState().routingNodes().node(node_2_id);
+ final int[] node2Shards = new int[routingNode.numberOfOwningShards()];
+ int i = 0;
+ for (MutableShardRouting mutableShardRouting : routingNode) {
+ node2Shards[i] = mutableShardRouting.shardId().id();
+ i++;
+ }
+ logger.info("Node 2 has shards: {}", Arrays.toString(node2Shards));
+ final long shardVersions[] = new long[numShards];
+ final int shardIds[] = new int[numShards];
+ i = 0;
+ for (ShardRouting shardRouting : stateResponse.getState().getRoutingTable().allShards("test")) {
+ shardVersions[i] = shardRouting.version();
+ shardIds[i] = shardRouting.getId();
+ i++;
+ }
+ // disable relocations when we do this, to make sure the shards are not relocated from node2
+ // due to rebalancing, and delete its content
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get();
+ internalCluster().getInstance(ClusterService.class, node_2).submitStateUpdateTask("test", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder("test");
+ for (int i = 0; i < numShards; i++) {
+ indexRoutingTableBuilder.addIndexShard(
+ new IndexShardRoutingTable.Builder(new ShardId("test", i), false)
+ .addShard(new ImmutableShardRouting("test", i, node_1_id, true, ShardRoutingState.STARTED, shardVersions[shardIds[i]]))
+ .build()
+ );
+ }
+ return ClusterState.builder(currentState)
+ .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder).build())
+ .build();
+ }
+
+ public boolean runOnlyOnMaster() {
+ return false;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ }
+ });
+ waitNoPendingTasksOnAll();
+ logger.info("Checking if shards aren't removed");
+ for (int shard : node2Shards) {
+ assertTrue(waitForShardDeletion(node_2, "test", shard));
+ }
+ }
+
+ private Path indexDirectory(String server, String index) {
+ NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server);
+ final Path[] paths = env.indexPaths(new Index(index));
+ assert paths.length == 1;
+ return paths[0];
+ }
+
+ private Path shardDirectory(String server, String index, int shard) {
+ NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server);
+ final Path[] paths = env.availableShardPaths(new ShardId(index, shard));
+ assert paths.length == 1;
+ return paths[0];
+ }
+
+ private boolean waitForShardDeletion(final String server, final String index, final int shard) throws InterruptedException {
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ return !Files.exists(shardDirectory(server, index, shard));
+ }
+ });
+ return Files.exists(shardDirectory(server, index, shard));
+ }
+
+ private boolean waitForIndexDeletion(final String server, final String index) throws InterruptedException {
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ return !Files.exists(indexDirectory(server, index));
+ }
+ });
+ return Files.exists(indexDirectory(server, index));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
new file mode 100644
index 0000000000..eadee06e2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.store;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.ImmutableShardRouting;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.Version.CURRENT;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.is;
+
+/**
+ */
+public class IndicesStoreTests extends ElasticsearchTestCase {
+
+ private final static ShardRoutingState[] NOT_STARTED_STATES;
+
+ static {
+ Set<ShardRoutingState> set = new HashSet<>();
+ set.addAll(Arrays.asList(ShardRoutingState.values()));
+ set.remove(ShardRoutingState.STARTED);
+ NOT_STARTED_STATES = set.toArray(new ShardRoutingState[set.size()]);
+ }
+
+ private IndicesStore indicesStore;
+ private DiscoveryNode localNode;
+
+ @Before
+ public void before() {
+ localNode = new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT);
+ indicesStore = new IndicesStore();
+ }
+
+ @Test
+ public void testShardCanBeDeleted_noShardRouting() throws Exception {
+ int numShards = randomIntBetween(1, 7);
+ int numReplicas = randomInt(2);
+
+ ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
+ clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
+ IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
+
+ assertFalse(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
+ }
+
+ @Test
+ public void testShardCanBeDeleted_noShardStarted() throws Exception {
+ int numShards = randomIntBetween(1, 7);
+ int numReplicas = randomInt(2);
+
+ ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
+ clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
+ IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
+
+ for (int i = 0; i < numShards; i++) {
+ int unStartedShard = randomInt(numReplicas);
+ for (int j=0; j <= numReplicas; j++) {
+ ShardRoutingState state;
+ if (j == unStartedShard) {
+ state = randomFrom(NOT_STARTED_STATES);
+ } else {
+ state = randomFrom(ShardRoutingState.values());
+ }
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", null, j == 0, state, 0));
+ }
+ }
+ assertFalse(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
+ }
+
+ @Test
+ public void testShardCanBeDeleted_shardExistsLocally() throws Exception {
+ int numShards = randomIntBetween(1, 7);
+ int numReplicas = randomInt(2);
+
+ ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
+ clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
+ clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), Version.CURRENT)));
+ IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
+ int localShardId = randomInt(numShards - 1);
+ for (int i = 0; i < numShards; i++) {
+ String nodeId = i == localShardId ? localNode.getId() : randomBoolean() ? "abc" : "xyz";
+ String relocationNodeId = randomBoolean() ? null : randomBoolean() ? localNode.getId() : "xyz";
+ routingTable.addShard(new ImmutableShardRouting("test", i, nodeId, relocationNodeId, true, ShardRoutingState.STARTED, 0));
+ for (int j = 0; j < numReplicas; j++) {
+ routingTable.addShard(new ImmutableShardRouting("test", i, nodeId, relocationNodeId, false, ShardRoutingState.STARTED, 0));
+ }
+ }
+
+ // Shard exists locally, can't delete shard
+ assertFalse(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
+ }
+
+ @Test
+ public void testShardCanBeDeleted_nodeNotInList() throws Exception {
+ int numShards = randomIntBetween(1, 7);
+ int numReplicas = randomInt(2);
+
+ ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
+ clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
+ clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode));
+ IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
+ for (int i = 0; i < numShards; i++) {
+ String relocatingNodeId = randomBoolean() ? null : "def";
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", relocatingNodeId, true, ShardRoutingState.STARTED, 0));
+ for (int j = 0; j < numReplicas; j++) {
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", relocatingNodeId, false, ShardRoutingState.STARTED, 0));
+ }
+ }
+
+ // null node -> false
+ assertFalse(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
+ }
+
+ @Test
+ public void testShardCanBeDeleted_nodeVersion() throws Exception {
+ int numShards = randomIntBetween(1, 7);
+ int numReplicas = randomInt(2);
+
+ // Most of the times don't test bwc and use current version
+ final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
+ ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
+ clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
+ clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id()).put(localNode).put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), nodeVersion)));
+ IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
+ for (int i = 0; i < numShards; i++) {
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", null, true, ShardRoutingState.STARTED, 0));
+ for (int j = 0; j < numReplicas; j++) {
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", null, false, ShardRoutingState.STARTED, 0));
+ }
+ }
+
+ // shard exist on other node (abc)
+ assertTrue(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
+ }
+
+ @Test
+ public void testShardCanBeDeleted_relocatingNode() throws Exception {
+ int numShards = randomIntBetween(1, 7);
+ int numReplicas = randomInt(2);
+
+ ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test"));
+ clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas)));
+ final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random());
+
+ clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.id())
+ .put(localNode)
+ .put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), Version.CURRENT))
+ .put(new DiscoveryNode("def", new LocalTransportAddress("def"), nodeVersion) // <-- only set relocating, since we're testing that in this test
+ ));
+ IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", 1), false);
+ for (int i = 0; i < numShards; i++) {
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", "def", true, ShardRoutingState.STARTED, 0));
+ for (int j = 0; j < numReplicas; j++) {
+ routingTable.addShard(new ImmutableShardRouting("test", i, "xyz", "def", false, ShardRoutingState.STARTED, 0));
+ }
+ }
+
+ // shard exist on other node (abc and def)
+ assertTrue(indicesStore.shardCanBeDeleted(clusterState.build(), routingTable.build()));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java
new file mode 100644
index 0000000000..e8fddf2c1a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.template;
+
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.hasSize;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class IndexTemplateBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndexTemplatesWithBlocks() throws IOException {
+ // creates a simple index template
+ client().admin().indices().preparePutTemplate("template_blocks")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ try {
+ setClusterReadOnly(true);
+
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("template_blocks").execute().actionGet();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+
+ assertBlocked(client().admin().indices().preparePutTemplate("template_blocks_2")
+ .setTemplate("block*")
+ .setOrder(0)
+ .addAlias(new Alias("alias_1")));
+
+ assertBlocked(client().admin().indices().prepareDeleteTemplate("template_blocks"));
+
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringTests.java b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringTests.java
new file mode 100644
index 0000000000..d09761c5a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/IndexTemplateFilteringTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.template;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.cluster.ClusterModule;
+import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ClusterScope(scope = Scope.SUITE)
+public class IndexTemplateFilteringTests extends ElasticsearchIntegrationTest{
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", TestPlugin.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testTemplateFiltering() throws Exception {
+ client().admin().indices().preparePutTemplate("template1")
+ .setTemplate("test*")
+ .addMapping("type1", "field1", "type=string").get();
+
+ client().admin().indices().preparePutTemplate("template2")
+ .setTemplate("test*")
+ .addMapping("type2", "field2", "type=string").get();
+
+ client().admin().indices().preparePutTemplate("template3")
+ .setTemplate("no_match")
+ .addMapping("type3", "field3", "type=string").get();
+
+ assertAcked(prepareCreate("test").putHeader("header_test", "header_value"));
+
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings("test").get();
+ assertThat(response, notNullValue());
+ ImmutableOpenMap<String, MappingMetaData> metadata = response.getMappings().get("test");
+ assertThat(metadata.size(), is(1));
+ assertThat(metadata.get("type2"), notNullValue());
+ }
+
+
+ public static class TestFilter implements IndexTemplateFilter {
+ @Override
+ public boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template) {
+ //make sure that no_match template is filtered out before the custom filters as it doesn't match the index name
+ return (template.name().equals("template2") || template.name().equals("no_match")) && request.originalMessage().getHeader("header_test").equals("header_value");
+ }
+ }
+
+ public static class TestPlugin extends AbstractPlugin {
+ @Override
+ public String name() {
+ return "test-plugin";
+ }
+
+ @Override
+ public String description() {
+ return "";
+ }
+
+ public void onModule(ClusterModule module) {
+ module.registerIndexTemplateFilter(TestFilter.class);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java
new file mode 100644
index 0000000000..131e8ad73d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java
@@ -0,0 +1,675 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.indices.template;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.AliasMetaData;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
+import org.elasticsearch.indices.InvalidAliasNameException;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleIndexTemplateTests() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setSettings(indexSettings())
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setSettings(indexSettings())
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ // test create param
+ assertThrows(client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setSettings(indexSettings())
+ .setCreate(true)
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ , IndexTemplateAlreadyExistsException.class
+ );
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(2));
+
+
+ // index something into test_index, will match on both templates
+ client().prepareIndex("test_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
+
+ ensureGreen();
+ SearchResponse searchResponse = client().prepareSearch("test_index")
+ .setQuery(termQuery("field1", "value1"))
+ .addField("field1").addField("field2")
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2")); // this will still be loaded because of the source feature
+
+ client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
+
+ ensureGreen();
+ // now only match on one template (template_1)
+ searchResponse = client().prepareSearch("text_index")
+ .setQuery(termQuery("field1", "value1"))
+ .addField("field1").addField("field2")
+ .execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2"));
+ }
+
+ @Test
+ public void testDeleteIndexTemplate() throws Exception {
+ final int existingTemplates = admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size();
+ logger.info("--> put template_1 and template_2");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("test*")
+ .setOrder(1)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> explicitly delete template_1");
+ admin().indices().prepareDeleteTemplate("template_1").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(1 + existingTemplates));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_2"), equalTo(true));
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().containsKey("template_1"), equalTo(false));
+
+
+ logger.info("--> put template_1 back");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> delete template*");
+ admin().indices().prepareDeleteTemplate("template*").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(existingTemplates));
+
+ logger.info("--> delete * with no templates, make sure we don't get a failure");
+ admin().indices().prepareDeleteTemplate("*").execute().actionGet();
+ assertThat(admin().cluster().prepareState().execute().actionGet().getState().metaData().templates().size(), equalTo(0));
+ }
+
+ @Test
+ public void testThatGetIndexTemplatesWorks() throws Exception {
+ logger.info("--> put template_1");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> get template template_1");
+ GetIndexTemplatesResponse getTemplate1Response = client().admin().indices().prepareGetTemplates("template_1").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(1));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0), is(notNullValue()));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getTemplate(), is("te*"));
+ assertThat(getTemplate1Response.getIndexTemplates().get(0).getOrder(), is(0));
+
+ logger.info("--> get non-existing-template");
+ GetIndexTemplatesResponse getTemplate2Response = client().admin().indices().prepareGetTemplates("non-existing-template").execute().actionGet();
+ assertThat(getTemplate2Response.getIndexTemplates(), hasSize(0));
+ }
+
+ @Test
+ public void testThatGetIndexTemplatesWithSimpleRegexWorks() throws Exception {
+ logger.info("--> put template_1");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> put template_2");
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> put template3");
+ client().admin().indices().preparePutTemplate("template3")
+ .setTemplate("te*")
+ .setOrder(0)
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ logger.info("--> get template template_*");
+ GetIndexTemplatesResponse getTemplate1Response = client().admin().indices().prepareGetTemplates("template_*").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2));
+
+ List<String> templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2"));
+
+ logger.info("--> get all templates");
+ getTemplate1Response = client().admin().indices().prepareGetTemplates("template*").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(3));
+
+ templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(2).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2", "template3"));
+
+ logger.info("--> get templates template_1 and template_2");
+ getTemplate1Response = client().admin().indices().prepareGetTemplates("template_1", "template_2").execute().actionGet();
+ assertThat(getTemplate1Response.getIndexTemplates(), hasSize(2));
+
+ templateNames = Lists.newArrayList();
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(0).name());
+ templateNames.add(getTemplate1Response.getIndexTemplates().get(1).name());
+ assertThat(templateNames, containsInAnyOrder("template_1", "template_2"));
+ }
+
+ @Test
+ public void testThatInvalidGetIndexTemplatesFails() throws Exception {
+ logger.info("--> get template null");
+ testExpectActionRequestValidationException(null);
+
+ logger.info("--> get template empty");
+ testExpectActionRequestValidationException("");
+
+ logger.info("--> get template 'a', '', 'c'");
+ testExpectActionRequestValidationException("a", "", "c");
+
+ logger.info("--> get template 'a', null, 'c'");
+ testExpectActionRequestValidationException("a", null, "c");
+ }
+
+ private void testExpectActionRequestValidationException(String... names) {
+ assertThrows(client().admin().indices().prepareGetTemplates(names),
+ ActionRequestValidationException.class,
+ "get template with " + Arrays.toString(names));
+ }
+
+ @Test
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/8802")
+ public void testBrokenMapping() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addMapping("type1", "abcde")
+ .get();
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ assertThat(response.getIndexTemplates().get(0).getMappings().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getMappings().get("type1").string(), equalTo("abcde"));
+
+ try {
+ createIndex("test");
+ fail("create index should have failed due to broken index templates mapping");
+ } catch(ElasticsearchParseException e) {
+ //everything fine
+ }
+ }
+
+ @Test
+ public void testInvalidSettings() throws Exception {
+ // clean all templates setup by the framework.
+ client().admin().indices().prepareDeleteTemplate("*").get();
+
+ // check get all templates on an empty index.
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), empty());
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setSettings(Settings.builder().put("does_not_exist", "test"))
+ .get();
+
+ response = client().admin().indices().prepareGetTemplates().get();
+ assertThat(response.getIndexTemplates(), hasSize(1));
+ assertThat(response.getIndexTemplates().get(0).getSettings().getAsMap().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getSettings().get("index.does_not_exist"), equalTo("test"));
+
+ createIndex("test");
+
+ //the wrong setting has no effect but does get stored among the index settings
+ GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
+ assertThat(getSettingsResponse.getIndexToSettings().get("test").getAsMap().get("index.does_not_exist"), equalTo("test"));
+ }
+
+ public void testIndexTemplateWithAliases() throws Exception {
+
+ client().admin().indices().preparePutTemplate("template_with_aliases")
+ .setTemplate("te*")
+ .addMapping("type1", "{\"type1\" : {\"properties\" : {\"value\" : {\"type\" : \"string\"}}}}")
+ .addAlias(new Alias("simple_alias"))
+ .addAlias(new Alias("templated_alias-{index}"))
+ .addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}"))
+ .addAlias(new Alias("complex_filtered_alias")
+ .filter(QueryBuilders.termsQuery("_type", "typeX", "typeY", "typeZ").execution("bool")))
+ .get();
+
+ assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ"));
+ ensureGreen();
+
+ client().prepareIndex("test_index", "type1", "1").setSource("field", "A value").get();
+ client().prepareIndex("test_index", "type2", "2").setSource("field", "B value").get();
+ client().prepareIndex("test_index", "typeX", "3").setSource("field", "C value").get();
+ client().prepareIndex("test_index", "typeY", "4").setSource("field", "D value").get();
+ client().prepareIndex("test_index", "typeZ", "5").setSource("field", "E value").get();
+
+ GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get();
+ assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
+ assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(4));
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test_index").get();
+ assertHitCount(searchResponse, 5l);
+
+ searchResponse = client().prepareSearch("simple_alias").get();
+ assertHitCount(searchResponse, 5l);
+
+ searchResponse = client().prepareSearch("templated_alias-test_index").get();
+ assertHitCount(searchResponse, 5l);
+
+ searchResponse = client().prepareSearch("filtered_alias").get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type2"));
+
+ // Search the complex filter alias
+ searchResponse = client().prepareSearch("complex_filtered_alias").get();
+ assertHitCount(searchResponse, 3l);
+
+ Set<String> types = Sets.newHashSet();
+ for (SearchHit searchHit : searchResponse.getHits().getHits()) {
+ types.add(searchHit.getType());
+ }
+ assertThat(types.size(), equalTo(3));
+ assertThat(types, containsInAnyOrder("typeX", "typeY", "typeZ"));
+ }
+
+ @Test
+ public void testIndexTemplateWithAliasesInSource() {
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"*\",\n" +
+ " \"aliases\" : {\n" +
+ " \"my_alias\" : {\n" +
+ " \"filter\" : {\n" +
+ " \"type\" : {\n" +
+ " \"value\" : \"type2\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}").get();
+
+
+ assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2"));
+ ensureGreen();
+
+ GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get();
+ assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
+ assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(1));
+
+ client().prepareIndex("test_index", "type1", "1").setSource("field", "value1").get();
+ client().prepareIndex("test_index", "type2", "2").setSource("field", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test_index").get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("my_alias").get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type2"));
+ }
+
+ @Test
+ public void testIndexTemplateWithAliasesSource() {
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .setAliases(
+ " {\n" +
+ " \"alias1\" : {},\n" +
+ " \"alias2\" : {\n" +
+ " \"filter\" : {\n" +
+ " \"type\" : {\n" +
+ " \"value\" : \"type2\"\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ " \"alias3\" : { \"routing\" : \"1\" }" +
+ " }\n").get();
+
+ assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2"));
+ ensureGreen();
+
+ GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get();
+ assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
+ assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(3));
+
+ client().prepareIndex("test_index", "type1", "1").setSource("field", "value1").get();
+ client().prepareIndex("test_index", "type2", "2").setSource("field", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test_index").get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("alias1").get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("alias2").get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type2"));
+ }
+
+ @Test
+ public void testDuplicateAlias() throws Exception {
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addAlias(new Alias("my_alias").filter(termQuery("field", "value1")))
+ .addAlias(new Alias("my_alias").filter(termQuery("field", "value2")))
+ .get();
+
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("template_1").get();
+ assertThat(response.getIndexTemplates().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getAliases().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getAliases().get("my_alias").filter().string(), containsString("\"value1\""));
+ }
+
+ @Test
+ public void testAliasInvalidFilterValidJson() throws Exception {
+
+ //invalid filter but valid json: put index template works fine, fails during index creation
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addAlias(new Alias("invalid_alias").filter("{ \"invalid\": {} }")).get();
+
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("template_1").get();
+ assertThat(response.getIndexTemplates().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getAliases().size(), equalTo(1));
+ assertThat(response.getIndexTemplates().get(0).getAliases().get("invalid_alias").filter().string(), equalTo("{\"invalid\":{}}"));
+
+ try {
+ createIndex("test");
+ fail("index creation should have failed due to invalid alias filter in matching index template");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]"));
+ assertThat(e.getCause(), instanceOf(QueryParsingException.class));
+ assertThat(e.getCause().getMessage(), equalTo("No query registered for [invalid]"));
+ }
+ }
+
+ @Test
+ public void testAliasInvalidFilterInvalidJson() throws Exception {
+
+ //invalid json: put index template fails
+ PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addAlias(new Alias("invalid_alias").filter("abcde"));
+
+ try {
+ putIndexTemplateRequestBuilder.get();
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]"));
+ }
+
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("template_1").get();
+ assertThat(response.getIndexTemplates().size(), equalTo(0));
+ }
+
+ @Test
+ public void testAliasNameExistingIndex() throws Exception {
+
+ createIndex("index");
+
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addAlias(new Alias("index")).get();
+
+ try {
+ createIndex("test");
+ fail("index creation should have failed due to alias with existing index name in mathching index template");
+ } catch(InvalidAliasNameException e) {
+ assertThat(e.getMessage(), equalTo("Invalid alias name [index], an index exists with the same name as the alias"));
+ }
+ }
+
+ @Test
+ public void testAliasEmptyName() throws Exception {
+ PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addAlias(new Alias(" ").indexRouting("1,2,3"));
+
+ try {
+ putIndexTemplateRequestBuilder.get();
+ fail("put template should have failed due to alias with empty name");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("alias name is required"));
+ }
+ }
+
+ @Test
+ public void testAliasWithMultipleIndexRoutings() throws Exception {
+ PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("te*")
+ .addAlias(new Alias("alias").indexRouting("1,2,3"));
+
+ try {
+ putIndexTemplateRequestBuilder.get();
+ fail("put template should have failed due to alias with multiple index routings");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("alias [alias] has several index routing values associated with it"));
+ }
+ }
+
+ @Test
+ public void testMultipleAliasesPrecedence() throws Exception {
+ client().admin().indices().preparePutTemplate("template1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addAlias(new Alias("alias1"))
+ .addAlias(new Alias("{index}-alias"))
+ .addAlias(new Alias("alias3").filter(QueryBuilders.missingQuery("test")))
+ .addAlias(new Alias("alias4")).get();
+
+ client().admin().indices().preparePutTemplate("template2")
+ .setTemplate("te*")
+ .setOrder(1)
+ .addAlias(new Alias("alias1").routing("test"))
+ .addAlias(new Alias("alias3")).get();
+
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("test-alias").searchRouting("test-routing")));
+
+ ensureGreen();
+
+ GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().addIndices("test").get();
+ assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(4));
+
+ for (AliasMetaData aliasMetaData : getAliasesResponse.getAliases().get("test")) {
+ assertThat(aliasMetaData.alias(), anyOf(equalTo("alias1"), equalTo("test-alias"), equalTo("alias3"), equalTo("alias4")));
+ if ("alias1".equals(aliasMetaData.alias())) {
+ assertThat(aliasMetaData.indexRouting(), equalTo("test"));
+ assertThat(aliasMetaData.searchRouting(), equalTo("test"));
+ } else if ("alias3".equals(aliasMetaData.alias())) {
+ assertThat(aliasMetaData.filter(), nullValue());
+ } else if ("test-alias".equals(aliasMetaData.alias())) {
+ assertThat(aliasMetaData.indexRouting(), nullValue());
+ assertThat(aliasMetaData.searchRouting(), equalTo("test-routing"));
+ }
+ }
+ }
+
+ @Test
+ public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exception {
+ // Indexing into a should succeed, because the field mapping for field 'field' is defined in the test mapping.
+ client().admin().indices().preparePutTemplate("template1")
+ .setTemplate("a*")
+ .setOrder(0)
+ .addMapping("test", "field", "type=string")
+ .addAlias(new Alias("alias1").filter(termQuery("field", "value"))).get();
+ // Indexing into b should succeed, because the field mapping for field 'field' is defined in the _default_ mapping and the test type exists.
+ client().admin().indices().preparePutTemplate("template2")
+ .setTemplate("b*")
+ .setOrder(0)
+ .addMapping("_default_", "field", "type=string")
+ .addMapping("test")
+ .addAlias(new Alias("alias2").filter(termQuery("field", "value"))).get();
+ // Indexing into c should succeed, because the field mapping for field 'field' is defined in the _default_ mapping.
+ client().admin().indices().preparePutTemplate("template3")
+ .setTemplate("c*")
+ .setOrder(0)
+ .addMapping("_default_", "field", "type=string")
+ .addAlias(new Alias("alias3").filter(termQuery("field", "value"))).get();
+ // Indexing into d index should fail, since there is field with name 'field' in the mapping
+ client().admin().indices().preparePutTemplate("template4")
+ .setTemplate("d*")
+ .setOrder(0)
+ .addAlias(new Alias("alias4").filter(termQuery("field", "value"))).get();
+
+ client().prepareIndex("a1", "test", "test").setSource("{}").get();
+ BulkResponse response = client().prepareBulk().add(new IndexRequest("a2", "test", "test").source("{}")).get();
+ assertThat(response.hasFailures(), is(false));
+ assertThat(response.getItems()[0].isFailed(), equalTo(false));
+ assertThat(response.getItems()[0].getIndex(), equalTo("a2"));
+ assertThat(response.getItems()[0].getType(), equalTo("test"));
+ assertThat(response.getItems()[0].getId(), equalTo("test"));
+ assertThat(response.getItems()[0].getVersion(), equalTo(1l));
+
+ client().prepareIndex("b1", "test", "test").setSource("{}").get();
+ response = client().prepareBulk().add(new IndexRequest("b2", "test", "test").source("{}")).get();
+ assertThat(response.hasFailures(), is(false));
+ assertThat(response.getItems()[0].isFailed(), equalTo(false));
+ assertThat(response.getItems()[0].getIndex(), equalTo("b2"));
+ assertThat(response.getItems()[0].getType(), equalTo("test"));
+ assertThat(response.getItems()[0].getId(), equalTo("test"));
+ assertThat(response.getItems()[0].getVersion(), equalTo(1l));
+
+ client().prepareIndex("c1", "test", "test").setSource("{}").get();
+ response = client().prepareBulk().add(new IndexRequest("c2", "test", "test").source("{}")).get();
+ assertThat(response.hasFailures(), is(false));
+ assertThat(response.getItems()[0].isFailed(), equalTo(false));
+ assertThat(response.getItems()[0].getIndex(), equalTo("c2"));
+ assertThat(response.getItems()[0].getType(), equalTo("test"));
+ assertThat(response.getItems()[0].getId(), equalTo("test"));
+ assertThat(response.getItems()[0].getVersion(), equalTo(1l));
+
+ try {
+ client().prepareIndex("d1", "test", "test").setSource("{}").get();
+ fail();
+ } catch (Exception e) {
+ assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), containsString("failed to parse filter for alias [alias4]"));
+ }
+ response = client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get();
+ assertThat(response.hasFailures(), is(true));
+ assertThat(response.getItems()[0].isFailed(), equalTo(true));
+ assertThat(response.getItems()[0].getFailureMessage(), containsString("failed to parse filter for alias [alias4]"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/template/template0.json b/core/src/test/java/org/elasticsearch/indices/template/template0.json
new file mode 100644
index 0000000000..af055fd01f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/template0.json
@@ -0,0 +1,11 @@
+{
+ "template" : "foo*",
+ "order" : 10,
+ "settings" : {
+ "index.number_of_shards": 10,
+ "index.number_of_replicas": 0
+ },
+ "aliases" : {
+ "{index}-alias" : {}
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/template/template1.json b/core/src/test/java/org/elasticsearch/indices/template/template1.json
new file mode 100644
index 0000000000..030fb547bb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/template1.json
@@ -0,0 +1,11 @@
+{
+ "template" : "foo*",
+ "order" : 10,
+ "settings" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ },
+ "aliases" : {
+ "{index}-alias" : {}
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/template/template2.json b/core/src/test/java/org/elasticsearch/indices/template/template2.json
new file mode 100644
index 0000000000..d4a376cf2b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/template2.json
@@ -0,0 +1,13 @@
+{
+ "template" : "foo*",
+ "order" : 10,
+ "settings" : {
+ "index" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ },
+ "aliases" : {
+ "{index}-alias" : {}
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/template/template3.json b/core/src/test/java/org/elasticsearch/indices/template/template3.json
new file mode 100644
index 0000000000..7231a6ef43
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/template3.json
@@ -0,0 +1,13 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "order" : 10,
+ "settings" : {
+ "index.number_of_shards": 10,
+ "index.number_of_replicas": 0
+ },
+ "aliases" : {
+ "{index}-alias" : {}
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/template/template4.json b/core/src/test/java/org/elasticsearch/indices/template/template4.json
new file mode 100644
index 0000000000..52dcaa62c0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/template4.json
@@ -0,0 +1,13 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "order" : 10,
+ "settings" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ },
+ "aliases" : {
+ "{index}-alias" : {}
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/template/template5.json b/core/src/test/java/org/elasticsearch/indices/template/template5.json
new file mode 100644
index 0000000000..803bec0ff0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/template/template5.json
@@ -0,0 +1,15 @@
+{
+ "mytemplate" : {
+ "template" : "foo*",
+ "order" : 10,
+ "settings" : {
+ "index" : {
+ "number_of_shards": 10,
+ "number_of_replicas": 0
+ }
+ },
+ "aliases" : {
+ "{index}-alias" : {}
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerTests.java b/core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerTests.java
new file mode 100644
index 0000000000..b63a8273c8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/warmer/GatewayIndicesWarmerTests.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.InternalTestCluster.RestartCallback;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ClusterScope(numDataNodes =0, scope= Scope.TEST)
+public class GatewayIndicesWarmerTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(GatewayIndicesWarmerTests.class);
+
+ @Test
+ public void testStatePersistence() throws Exception {
+
+ logger.info("--> starting 1 nodes");
+ internalCluster().startNode();
+
+ logger.info("--> putting two templates");
+ createIndex("test");
+
+ ensureYellow();
+
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value1"))));
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_2")
+ .setSearchRequest(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "value2"))));
+
+ logger.info("--> put template with warmer");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"xxx\",\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+
+ logger.info("--> verify warmers are registered in cluster state");
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(2));
+
+ IndexWarmersMetaData templateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
+ assertThat(templateWarmers, Matchers.notNullValue());
+ assertThat(templateWarmers.entries().size(), equalTo(1));
+
+ logger.info("--> restarting the node");
+ internalCluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return Settings.EMPTY;
+ }
+ });
+
+ ensureYellow();
+
+ logger.info("--> verify warmers are recovered");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
+ for (int i = 0; i < warmersMetaData.entries().size(); i++) {
+ assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
+ assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
+ }
+
+ logger.info("--> verify warmers in template are recovered");
+ IndexWarmersMetaData recoveredTemplateWarmers = clusterState.metaData().templates().get("template_1").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredTemplateWarmers.entries().size(), equalTo(templateWarmers.entries().size()));
+ for (int i = 0; i < templateWarmers.entries().size(); i++) {
+ assertThat(recoveredTemplateWarmers.entries().get(i).name(), equalTo(templateWarmers.entries().get(i).name()));
+ assertThat(recoveredTemplateWarmers.entries().get(i).source(), equalTo(templateWarmers.entries().get(i).source()));
+ }
+
+
+ logger.info("--> delete warmer warmer_1");
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("warmer_1").execute().actionGet();
+ assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> verify warmers (delete) are registered in cluster state");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ logger.info("--> restarting the node");
+ internalCluster().fullRestart(new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return Settings.EMPTY;
+ }
+ });
+
+ ensureYellow();
+
+ logger.info("--> verify warmers are recovered");
+ clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ recoveredWarmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(recoveredWarmersMetaData.entries().size(), equalTo(warmersMetaData.entries().size()));
+ for (int i = 0; i < warmersMetaData.entries().size(); i++) {
+ assertThat(recoveredWarmersMetaData.entries().get(i).name(), equalTo(warmersMetaData.entries().get(i).name()));
+ assertThat(recoveredWarmersMetaData.entries().get(i).source(), equalTo(warmersMetaData.entries().get(i).source()));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java
new file mode 100644
index 0000000000..a2735d6134
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
+import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class IndicesWarmerBlocksTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPutWarmerWithBlocks() {
+ createIndex("test-blocks");
+ ensureGreen("test-blocks");
+
+ // Index reads are blocked, the warmer can't be registered
+ try {
+ enableIndexBlock("test-blocks", SETTING_BLOCKS_READ);
+ assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
+ .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_BLOCK);
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_BLOCKS_READ);
+ }
+
+ // Index writes are blocked, the warmer can be registered
+ try {
+ enableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE);
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_acked")
+ .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE);
+ }
+
+ // Index metadata changes are blocked, the warmer can't be registered
+ try {
+ enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
+ .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_METADATA_BLOCK);
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ }
+
+ // Index metadata changes are blocked, the warmer can't be registered
+ try {
+ enableIndexBlock("test-blocks", SETTING_READ_ONLY);
+ assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
+ .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_ONLY_BLOCK);
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_READ_ONLY);
+ }
+
+ // Adding a new warmer is not possible when the cluster is read-only
+ try {
+ setClusterReadOnly(true);
+ assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked")
+ .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), CLUSTER_READ_ONLY_BLOCK);
+ } finally {
+ setClusterReadOnly(false);
+ }
+ }
+
+ @Test
+ public void testGetWarmerWithBlocks() {
+ createIndex("test-blocks");
+ ensureGreen("test-blocks");
+
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
+ .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
+ try {
+ enableIndexBlock("test-blocks", blockSetting);
+ GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get();
+ assertThat(response.warmers().size(), equalTo(1));
+
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = response.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test-blocks"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.iterator().next().name(), equalTo("warmer_block"));
+ } finally {
+ disableIndexBlock("test-blocks", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ try {
+ enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ assertBlocked(client().admin().indices().prepareGetWarmers("test-blocks"), INDEX_METADATA_BLOCK);
+ } finally {
+ disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA);
+ }
+ }
+
+ @Test
+ public void testDeleteWarmerWithBlocks() {
+ createIndex("test-blocks");
+ ensureGreen("test-blocks");
+
+ // Request is not blocked
+ for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
+ try {
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
+ .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
+
+ enableIndexBlock("test-blocks", blockSetting);
+ assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block"));
+ } finally {
+ disableIndexBlock("test-blocks", blockSetting);
+ }
+ }
+
+ // Request is blocked
+ for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
+ try {
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_block")
+ .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())));
+
+ enableIndexBlock("test-blocks", blockSetting);
+ assertBlocked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block"));
+ } finally {
+ disableIndexBlock("test-blocks", blockSetting);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java
new file mode 100644
index 0000000000..adf1fbf2a0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.indices.warmer;
+
+import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse;
+import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse;
+import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.index.mapper.MappedFieldType.Loading;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.cache.query.IndicesQueryCache;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.warmer.IndexWarmerMissingException;
+import org.elasticsearch.search.warmer.IndexWarmersMetaData;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Locale;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleWarmerTests() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.termQuery("field", "value1")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+ putWarmerResponse = client().admin().indices().preparePutWarmer("warmer_2")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a2").setQuery(QueryBuilders.termQuery("field", "value2")))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("tes*")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_*")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(2));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(1).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_1")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_1"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a*").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").size(), equalTo(1));
+ assertThat(getWarmersResponse.getWarmers().get("test").get(0).name(), equalTo("warmer_2"));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addTypes("a1").addWarmers("warmer_2")
+ .execute().actionGet();
+ assertThat(getWarmersResponse.getWarmers().size(), equalTo(0));
+ }
+
+ @Test
+ public void templateWarmer() {
+ client().admin().indices().preparePutTemplate("template_1")
+ .setSource("{\n" +
+ " \"template\" : \"*\",\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}")
+ .execute().actionGet();
+
+ createIndex("test");
+ ensureGreen();
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ }
+
+ @Test
+ public void createIndexWarmer() {
+ assertAcked(prepareCreate("test")
+ .setSource("{\n" +
+ " \"warmers\" : {\n" +
+ " \"warmer_1\" : {\n" +
+ " \"types\" : [],\n" +
+ " \"source\" : {\n" +
+ " \"query\" : {\n" +
+ " \"match_all\" : {}\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}"));
+
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ IndexWarmersMetaData warmersMetaData = clusterState.metaData().index("test").custom(IndexWarmersMetaData.TYPE);
+ assertThat(warmersMetaData, Matchers.notNullValue());
+ assertThat(warmersMetaData.entries().size(), equalTo(1));
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ }
+
+ @Test
+ public void deleteNonExistentIndexWarmerTest() {
+ createIndex("test");
+ try {
+ client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("foo").execute().actionGet();
+ fail("warmer foo should not exist");
+ } catch (IndexWarmerMissingException ex) {
+ assertThat(ex.names()[0], equalTo("foo"));
+ }
+ }
+
+ @Test
+ public void deleteIndexWarmerTest() {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .get();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(1));
+ ObjectObjectCursor<String, ImmutableList<IndexWarmersMetaData.Entry>> entry = getWarmersResponse.warmers().iterator().next();
+ assertThat(entry.key, equalTo("test"));
+ assertThat(entry.value.size(), equalTo(1));
+ assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer"));
+
+ DeleteWarmerResponse deleteWarmerResponse = client().admin().indices().prepareDeleteWarmer().setIndices("test").setNames("custom_warmer").get();
+ assertThat(deleteWarmerResponse.isAcknowledged(), equalTo(true));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get();
+ assertThat(getWarmersResponse.warmers().size(), equalTo(0));
+ }
+
+ @Test // issue 3246
+ public void ensureThatIndexWarmersCanBeChangedOnRuntime() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ client().prepareIndex("test", "test", "1").setSource("foo", "bar").setRefresh(true).execute().actionGet();
+
+ logger.info("--> Disabling warmers execution");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.warmer.enabled", false)).execute().actionGet();
+
+ long warmerRunsAfterDisabling = getWarmerRuns();
+ assertThat(warmerRunsAfterDisabling, greaterThanOrEqualTo(1L));
+
+ client().prepareIndex("test", "test", "2").setSource("foo2", "bar2").setRefresh(true).execute().actionGet();
+
+ assertThat(getWarmerRuns(), equalTo(warmerRunsAfterDisabling));
+ }
+
+ @Test
+ public void gettingAllWarmersUsingAllAndWildcardsShouldWork() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ PutWarmerResponse putWarmerResponse = client().admin().indices().preparePutWarmer("custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(putWarmerResponse.isAcknowledged(), equalTo(true));
+
+ PutWarmerResponse anotherPutWarmerResponse = client().admin().indices().preparePutWarmer("second_custom_warmer")
+ .setSearchRequest(client().prepareSearch("test").setTypes("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(anotherPutWarmerResponse.isAcknowledged(), equalTo(true));
+
+ GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("*").addWarmers("*").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("_all").addWarmers("_all").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("t*").addWarmers("c*").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+
+ getWarmersResponse = client().admin().indices().prepareGetWarmers("test").addWarmers("custom_warmer", "second_custom_warmer").get();
+ assertThat(getWarmersResponse.warmers().size(), is(1));
+ }
+
+ private long getWarmerRuns() {
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setWarmer(true).execute().actionGet();
+ return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total();
+ }
+
+ private long getSegmentsMemoryUsage(String idx) {
+ IndicesSegmentResponse response = client().admin().indices().segments(Requests.indicesSegmentsRequest(idx)).actionGet();
+ IndexSegments indicesSegments = response.getIndices().get(idx);
+ long total = 0;
+ for (IndexShardSegments indexShardSegments : indicesSegments) {
+ for (ShardSegments shardSegments : indexShardSegments) {
+ for (Segment segment : shardSegments) {
+ logger.debug("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getIndex());
+ total += segment.memoryInBytes;
+ }
+ }
+ }
+ return total;
+ }
+
+ private enum LoadingMethod {
+ LAZY {
+ @Override
+ CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) {
+ return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE));
+ }
+ },
+ EAGER {
+ @Override
+ CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) {
+ return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.EAGER_VALUE));
+ }
+
+ @Override
+ boolean isLazy() {
+ return false;
+ }
+ },
+ EAGER_PER_FIELD {
+ @Override
+ CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) throws Exception {
+ return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).addMapping(type, JsonXContent.contentBuilder()
+ .startObject()
+ .startObject(type)
+ .startObject("properties")
+ .startObject(fieldName)
+ .field("type", "string")
+ .startObject("norms")
+ .field("loading", Loading.EAGER_VALUE)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ );
+ }
+
+ @Override
+ boolean isLazy() {
+ return false;
+ }
+ };
+ private static Settings SINGLE_SHARD_NO_REPLICA = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
+
+ abstract CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) throws Exception;
+
+ boolean isLazy() {
+ return true;
+ }
+ }
+
+ // NOTE: we have to ensure we defeat compression strategies of the default codec...
+ public void testEagerLoading() throws Exception {
+ for (LoadingMethod method : LoadingMethod.values()) {
+ logger.debug("METHOD " + method);
+ String indexName = method.name().toLowerCase(Locale.ROOT);
+ assertAcked(method.createIndex(indexName, "t", "foo"));
+ // index a doc with 1 token, and one with 3 tokens so we dont get CONST compressed (otherwise norms take zero memory usage)
+ client().prepareIndex(indexName, "t", "1").setSource("foo", "bar").execute().actionGet();
+ client().prepareIndex(indexName, "t", "2").setSource("foo", "bar baz foo").setRefresh(true).execute().actionGet();
+ ensureGreen(indexName);
+ long memoryUsage0 = getSegmentsMemoryUsage(indexName);
+ // queries load norms if they were not loaded before
+ client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("foo", "bar")).execute().actionGet();
+ long memoryUsage1 = getSegmentsMemoryUsage(indexName);
+ if (method.isLazy()) {
+ assertThat(memoryUsage1, greaterThan(memoryUsage0));
+ } else {
+ assertThat(memoryUsage1, equalTo(memoryUsage0));
+ }
+ }
+ }
+
+ public void testQueryCacheOnWarmer() {
+ createIndex("test");
+ ensureGreen();
+
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, false)));
+ logger.info("register warmer with no query cache, validate no cache is used");
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
+ .get());
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+
+ logger.info("register warmer with query cache, validate caching happened");
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()).setQueryCache(true))
+ .get());
+
+ // index again, to make sure it gets refreshed
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+
+ client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache
+ assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+
+ logger.info("enable default query caching on the index level, and test that no flag on warmer still caches");
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true)));
+
+ assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
+ .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
+ .get());
+
+ // index again, to make sure it gets refreshed
+ client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java b/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java
new file mode 100644
index 0000000000..3ad69b71bd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/mget/SimpleMgetTests.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.mget;
+
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleMgetTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "test", "1").setSource(jsonBuilder().startObject().field("foo", "bar").endObject()).setRefresh(true).execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("test", "test", "1"))
+ .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1"))
+ .execute().actionGet();
+ assertThat(mgetResponse.getResponses().length, is(2));
+
+ assertThat(mgetResponse.getResponses()[0].getIndex(), is("test"));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+
+ assertThat(mgetResponse.getResponses()[1].getIndex(), is("nonExistingIndex"));
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), is("[nonExistingIndex] missing"));
+
+
+ mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item("nonExistingIndex", "test", "1"))
+ .execute().actionGet();
+ assertThat(mgetResponse.getResponses().length, is(1));
+ assertThat(mgetResponse.getResponses()[0].getIndex(), is("nonExistingIndex"));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[0].getFailure().getMessage(), is("[nonExistingIndex] missing"));
+
+ }
+
+ @Test
+ public void testThatParentPerDocumentIsSupported() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("test", jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .startObject("_parent")
+ .field("type", "foo")
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureYellow();
+
+ client().prepareIndex("test", "test", "1").setParent("4").setRefresh(true)
+ .setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
+ .execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "test", "1").parent("4"))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "test", "1"))
+ .execute().actionGet();
+
+ assertThat(mgetResponse.getResponses().length, is(2));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true));
+
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(true));
+ assertThat(mgetResponse.getResponses()[1].getResponse(), nullValue());
+ assertThat(mgetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[test]/[1]"));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Test
+ public void testThatSourceFilteringIsSupported() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureYellow();
+ BytesReference sourceBytesRef = jsonBuilder().startObject()
+ .field("field", "1", "2")
+ .startObject("included").field("field", "should be seen").field("hidden_field", "should not be seen").endObject()
+ .field("excluded", "should not be seen")
+ .endObject().bytes();
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource(sourceBytesRef).get();
+ }
+
+ MultiGetRequestBuilder request = client().prepareMultiGet();
+ for (int i = 0; i < 100; i++) {
+ if (i % 2 == 0) {
+ request.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext("included", "*.hidden_field")));
+ } else {
+ request.add(new MultiGetRequest.Item(indexOrAlias(), "type", Integer.toString(i)).fetchSourceContext(new FetchSourceContext(false)));
+ }
+ }
+
+ MultiGetResponse response = request.get();
+
+ assertThat(response.getResponses().length, equalTo(100));
+ for (int i = 0; i < 100; i++) {
+ MultiGetItemResponse responseItem = response.getResponses()[i];
+ assertThat(responseItem.getIndex(), equalTo("test"));
+ if (i % 2 == 0) {
+ Map<String, Object> source = responseItem.getResponse().getSourceAsMap();
+ assertThat(source.size(), equalTo(1));
+ assertThat(source, hasKey("included"));
+ assertThat(((Map<String, Object>) source.get("included")).size(), equalTo(1));
+ assertThat(((Map<String, Object>) source.get("included")), hasKey("field"));
+ } else {
+ assertThat(responseItem.getResponse().getSourceAsBytes(), nullValue());
+ }
+ }
+ }
+
+ @Test
+ public void testThatRoutingPerDocumentIsSupported() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .setSettings(Settings.builder()
+ .put(indexSettings())
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(2, DEFAULT_MAX_NUM_SHARDS))));
+ ensureYellow();
+
+ final String id = routingKeyForShard("test", "test", 0);
+ final String routingOtherShard = routingKeyForShard("test", "test", 1);
+
+ client().prepareIndex("test", "test", id).setRefresh(true).setRouting(routingOtherShard)
+ .setSource(jsonBuilder().startObject().field("foo", "bar").endObject())
+ .execute().actionGet();
+
+ MultiGetResponse mgetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "test", id).routing(routingOtherShard))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "test", id))
+ .execute().actionGet();
+
+ assertThat(mgetResponse.getResponses().length, is(2));
+ assertThat(mgetResponse.getResponses()[0].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[0].getResponse().isExists(), is(true));
+ assertThat(mgetResponse.getResponses()[0].getResponse().getIndex(), is("test"));
+
+ assertThat(mgetResponse.getResponses()[1].isFailed(), is(false));
+ assertThat(mgetResponse.getResponses()[1].getResponse().isExists(), is(false));
+ assertThat(mgetResponse.getResponses()[1].getResponse().getIndex(), is("test"));
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/monitor/SigarTests.java b/core/src/test/java/org/elasticsearch/monitor/SigarTests.java
new file mode 100644
index 0000000000..f582c2fdc1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/monitor/SigarTests.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.monitor;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hyperic.sigar.Sigar;
+
+public class SigarTests extends ElasticsearchTestCase {
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ assumeTrue("we can only ensure sigar is working when running from maven",
+ Boolean.parseBoolean(System.getProperty("tests.maven")));
+ }
+
+ public void testSigarLoads() throws Exception {
+ Sigar.load();
+ }
+
+ public void testSigarWorks() throws Exception {
+ Sigar sigar = new Sigar();
+ assertNotNull(sigar.getCpu());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/core/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java
new file mode 100644
index 0000000000..86a698e07d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java
@@ -0,0 +1,1415 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nested;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.arrayContaining;
+import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.startsWith;
+
+public class SimpleNestedTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleNested() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", "nested1", "type=nested").addMapping("type2", "nested1", "type=nested"));
+ ensureGreen();
+
+ // check on no data, see it works
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(termQuery("_all", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").get();
+ assertThat(getResponse.isExists(), equalTo(true));
+ assertThat(getResponse.getSourceAsBytes(), notNullValue());
+
+ // check the numDocs
+ assertDocumentCount("test", 3);
+
+ // check that _all is working on nested docs
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("_all", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ // search for something that matches the nested doc, and see that we don't find the nested doc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("n_field1", "n_value1_1")).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ // now, do a nested query
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // add another doc, one that would match if it was not nested...
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ assertDocumentCount("test", 6);
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // filter
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // check with type prefix
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.n_field1", "n_value1_1")).must(termQuery("nested1.n_field2", "n_value2_1")))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ // check delete, so all is gone...
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "2").execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ assertDocumentCount("test", 3);
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setTypes("type1", "type2").setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/10661")
+ public void simpleNestedMatchQueries() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .startObject("field1")
+ .field("type", "long")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", builder));
+ ensureGreen();
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ int numDocs = randomIntBetween(2, 35);
+ requests.add(client().prepareIndex("test", "type1", "0").setSource(jsonBuilder().startObject()
+ .field("field1", 0)
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()));
+ requests.add(client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_8")
+ .field("n_field2", "n_value2_5")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_3")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .endArray()
+ .endObject()));
+
+ for (int i = 2; i < numDocs; i++) {
+ requests.add(client().prepareIndex("test", "type1", String.valueOf(i)).setSource(jsonBuilder().startObject()
+ .field("field1", i)
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_8")
+ .field("n_field2", "n_value2_5")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()));
+ }
+
+ indexRandom(true, requests);
+ waitForRelocation(ClusterHealthStatus.GREEN);
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(nestedQuery("nested1", boolQuery()
+ .should(termQuery("nested1.n_field1", "n_value1_1").queryName("test1"))
+ .should(termQuery("nested1.n_field1", "n_value1_3").queryName("test2"))
+ .should(termQuery("nested1.n_field2", "n_value2_2").queryName("test3"))
+ ))
+ .setSize(numDocs)
+ .addSort("field1", SortOrder.ASC)
+ .get();
+ assertNoFailures(searchResponse);
+ assertAllSuccessful(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("0"));
+ assertThat(searchResponse.getHits().getAt(0).matchedQueries(), arrayWithSize(2));
+ assertThat(searchResponse.getHits().getAt(0).matchedQueries(), arrayContainingInAnyOrder("test1", "test3"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).matchedQueries(), arrayWithSize(1));
+ assertThat(searchResponse.getHits().getAt(1).matchedQueries(), arrayContaining("test2"));
+
+ for (int i = 2; i < numDocs; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(String.valueOf(i)));
+ assertThat(searchResponse.getHits().getAt(i).matchedQueries(), arrayWithSize(1));
+ assertThat(searchResponse.getHits().getAt(i).matchedQueries(), arrayContaining("test3"));
+ }
+ }
+
+ @Test
+ public void multiNested() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested").startObject("properties")
+ .startObject("nested2").field("type", "nested").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("field", "value")
+ .startArray("nested1")
+ .startObject().field("field1", "1").startArray("nested2").startObject().field("field2", "2").endObject().startObject().field("field2", "3").endObject().endArray().endObject()
+ .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ // flush, so we fetch it from the index (as see that we filter nested docs)
+ flush();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(true));
+ waitForRelocation(ClusterHealthStatus.GREEN);
+ // check the numDocs
+ assertDocumentCount("test", 7);
+
+ // do some multi nested queries
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ termQuery("nested1.field1", "1"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1.nested2",
+ termQuery("nested1.nested2.field2", "2"))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "3"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "4"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "1")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "4")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "5"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test").setQuery(nestedQuery("nested1",
+ boolQuery().must(termQuery("nested1.field1", "4")).must(nestedQuery("nested1.nested2", termQuery("nested1.nested2.field2", "2"))))).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ // When IncludeNestedDocsQuery is wrapped in a FilteredQuery then a in-finite loop occurs b/c of a bug in IncludeNestedDocsQuery#advance()
+ // This IncludeNestedDocsQuery also needs to be aware of the filter from alias
+ public void testDeleteNestedDocsWithAlias() throws Exception {
+
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder().put(indexSettings()).put("index.referesh_interval", -1).build())
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1")
+ .field("type", "string")
+ .endObject()
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ client().admin().indices().prepareAliases()
+ .addAlias("test", "alias1", QueryBuilders.termQuery("field1", "value1")).execute().actionGet();
+
+ ensureGreen();
+
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", "value2")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1_1")
+ .field("n_field2", "n_value2_1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1_2")
+ .field("n_field2", "n_value2_2")
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+ assertDocumentCount("test", 6);
+ }
+
+ @Test
+ public void testExplain() throws Exception {
+
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startArray("nested1")
+ .startObject()
+ .field("n_field1", "n_value1")
+ .endObject()
+ .startObject()
+ .field("n_field1", "n_value1")
+ .endObject()
+ .endArray()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1")).scoreMode("total"))
+ .setExplain(true)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ Explanation explanation = searchResponse.getHits().hits()[0].explanation();
+ assertThat(explanation.getValue(), equalTo(2f));
+ assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on child doc range from 0 to 1\n"));
+ // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2)
+// assertThat(explanation.getDetails().length, equalTo(2));
+// assertThat(explanation.getDetails()[0].getValue(), equalTo(1f));
+// assertThat(explanation.getDetails()[0].getDescription(), equalTo("Child[0]"));
+// assertThat(explanation.getDetails()[1].getValue(), equalTo(1f));
+// assertThat(explanation.getDetails()[1].getDescription(), equalTo("Child[1]"));
+ }
+
+ @Test
+ public void testSimpleNestedSorting() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.refresh_interval", -1))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "long")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 5)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", 2)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 1)
+ .endObject()
+ .startObject()
+ .field("field1", 2)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("field1", 3)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 3)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(
+ SortBuilders.scriptSort(new Script("_fields['nested1.field1'].value + 1"), "number").setNestedPath("nested1")
+ .order(SortOrder.DESC)).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("6.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("3.0"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(
+ SortBuilders.scriptSort(new Script("_fields['nested1.field1'].value + 1"), "number").setNestedPath("nested1")
+ .sortMode("sum").order(SortOrder.DESC)).execute().actionGet();
+
+ // B/c of sum it is actually +2
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("11.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("9.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("5.0"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(
+ SortBuilders.scriptSort(new Script("_fields['nested1.field1'].value"), "number")
+ .setNestedFilter(rangeQuery("nested1.field1").from(1).to(3)).setNestedPath("nested1").sortMode("avg")
+ .order(SortOrder.DESC)).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo(Double.toString(Double.MAX_VALUE)));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("1.5"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(
+ SortBuilders.scriptSort(new Script("_fields['nested1.field1'].value"), "string").setNestedPath("nested1")
+ .order(SortOrder.DESC)).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(
+ SortBuilders.scriptSort(new Script("_fields['nested1.field1'].value"), "string").setNestedPath("nested1")
+ .order(SortOrder.ASC)).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ try {
+ client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(
+ SortBuilders.scriptSort(new Script("_fields['nested1.field1'].value"), "string").setNestedPath("nested1")
+ .sortMode("sum").order(SortOrder.ASC)).execute().actionGet();
+ Assert.fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("type [string] doesn't support mode [SUM]"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testSimpleNestedSortingOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder().put(indexSettings()).put("index.refresh_interval", -1)).addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("nested1").field("type", "nested")
+ .startObject("properties").startObject("field1").field("type", "long").field("store", "yes").endObject()
+ .endObject().endObject().endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(
+ jsonBuilder().startObject().field("field1", 1).startArray("nested1").startObject().field("field1", 5).endObject()
+ .startObject().field("field1", 4).endObject().endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(
+ jsonBuilder().startObject().field("field1", 2).startArray("nested1").startObject().field("field1", 1).endObject()
+ .startObject().field("field1", 2).endObject().endArray().endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(
+ jsonBuilder().startObject().field("field1", 3).startArray("nested1").startObject().field("field1", 3).endObject()
+ .startObject().field("field1", 4).endObject().endArray().endObject()).execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.ASC)).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ searchResponse = client().prepareSearch("test").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").order(SortOrder.DESC)).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value + 1", "number").setNestedPath("nested1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("6.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("3.0"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value + 1", "number").setNestedPath("nested1").sortMode("sum").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ // B/c of sum it is actually +2
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("11.0"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("9.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("5.0"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "number")
+ .setNestedFilter(rangeQuery("nested1.field1").from(1).to(3))
+ .setNestedPath("nested1").sortMode("avg").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo(Double.toString(Double.MAX_VALUE)));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3.0"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("1.5"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("4"));
+
+ try {
+ client().prepareSearch("test")
+ .setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.scriptSort("_fields['nested1.field1'].value", "string")
+ .setNestedPath("nested1").sortMode("sum").order(SortOrder.ASC))
+ .execute().actionGet();
+ Assert.fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("type [string] doesn't support mode [SUM]"));
+ }
+ }
+
+ @Test
+ public void testSimpleNestedSorting_withNestedFilterMissing() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.referesh_interval", -1))
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field1")
+ .field("type", "long")
+ .endObject()
+ .startObject("field2")
+ .field("type", "boolean")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", 1)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 5)
+ .field("field2", true)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .field("field2", true)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("field1", 2)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 1)
+ .field("field2", true)
+ .endObject()
+ .startObject()
+ .field("field1", 2)
+ .field("field2", true)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ // Doc with missing nested docs if nested filter is used
+ refresh();
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("field1", 3)
+ .startArray("nested1")
+ .startObject()
+ .field("field1", 3)
+ .field("field2", false)
+ .endObject()
+ .startObject()
+ .field("field1", 4)
+ .field("field2", false)
+ .endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+ refresh();
+
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setTypes("type1")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").setNestedFilter(termQuery("nested1.field2", true)).missing(10).order(SortOrder.ASC));
+
+ if (randomBoolean()) {
+ searchRequestBuilder.setScroll("10m");
+ }
+
+ SearchResponse searchResponse = searchRequestBuilder.get();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("4"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("10"));
+
+ searchRequestBuilder = client().prepareSearch("test").setTypes("type1").setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("nested1.field1").setNestedFilter(termQuery("nested1.field2", true)).missing(10).order(SortOrder.DESC));
+
+ if (randomBoolean()) {
+ searchRequestBuilder.setScroll("10m");
+ }
+
+ searchResponse = searchRequestBuilder.get();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(searchResponse.getHits().hits()[0].sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("1"));
+ assertThat(searchResponse.getHits().hits()[1].sortValues()[0].toString(), equalTo("5"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(searchResponse.getHits().hits()[2].sortValues()[0].toString(), equalTo("2"));
+ client().prepareClearScroll().addScrollId("_all").get();
+ }
+
+ @Test
+ public void testSortNestedWithNestedFilter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("grand_parent_values").field("type", "long").endObject()
+ .startObject("parent").field("type", "nested")
+ .startObject("properties")
+ .startObject("parent_values").field("type", "long").endObject()
+ .startObject("child").field("type", "nested")
+ .startObject("properties")
+ .startObject("child_values").field("type", "long").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+
+ // sum: 11
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 1l)
+ .startObject("parent")
+ .field("filter", false)
+ .field("parent_values", 1l)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 1l)
+ .startObject("child_obj")
+ .field("value", 1l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 6l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("filter", true)
+ .field("parent_values", 2l)
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", -1l)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 5l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ // sum: 7
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 2l)
+ .startObject("parent")
+ .field("filter", false)
+ .field("parent_values", 2l)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 2l)
+ .startObject("child_obj")
+ .field("value", 2l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 4l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("parent_values", 3l)
+ .field("filter", true)
+ .startObject("child")
+ .field("child_values", -2l)
+ .field("filter", false)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 3l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ // sum: 2
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .field("grand_parent_values", 3l)
+ .startObject("parent")
+ .field("parent_values", 3l)
+ .field("filter", false)
+ .startObject("child")
+ .field("filter", true)
+ .field("child_values", 3l)
+ .startObject("child_obj")
+ .field("value", 3l)
+ .endObject()
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 1l)
+ .endObject()
+ .endObject()
+ .startObject("parent")
+ .field("parent_values", 4l)
+ .field("filter", true)
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", -3l)
+ .endObject()
+ .startObject("child")
+ .field("filter", false)
+ .field("child_values", 1l)
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ refresh();
+
+ // Without nested filter
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("-3"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("-2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("-1"));
+
+ // With nested filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Nested path should be automatically detected, expect same results as above search request
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.parent_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(QueryBuilders.termQuery("parent.filter", false))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(QueryBuilders.termQuery("parent.filter", false))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ // TODO: If we expose ToChildBlockJoinQuery we can filter sort values based on a higher level nested objects
+// assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+// assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("-3"));
+// assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+// assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("-2"));
+// assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+// assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("-1"));
+
+ // Check if closest nested type is resolved
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_obj.value")
+ .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true))
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Sort mode: sum
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("sum")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("7"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("11"));
+
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("sum")
+ .order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("11"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("7"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("2"));
+
+ // Sort mode: sum with filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true))
+ .sortMode("sum")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ // Sort mode: avg
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .sortMode("avg")
+ .order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("1"));
+
+ // Sort mode: avg with filter
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.fieldSort("parent.child.child_values")
+ .setNestedPath("parent.child")
+ .setNestedFilter(QueryBuilders.termQuery("parent.child.filter", true))
+ .sortMode("avg")
+ .order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(3));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[0].sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[1].sortValues()[0].toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].sortValues()[0].toString(), equalTo("3"));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/9305
+ public void testNestedSortingWithNestedFilterAsFilter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", jsonBuilder().startObject().startObject("properties")
+ .startObject("officelocation").field("type", "string").endObject()
+ .startObject("users")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("first").field("type", "string").endObject()
+ .startObject("last").field("type", "string").endObject()
+ .startObject("workstations")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("stationid").field("type", "string").endObject()
+ .startObject("phoneid").field("type", "string").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()));
+
+ client().prepareIndex("test", "type", "1").setSource(jsonBuilder().startObject()
+ .field("officelocation", "gendale")
+ .startArray("users")
+ .startObject()
+ .field("first", "fname1")
+ .field("last", "lname1")
+ .startArray("workstations")
+ .startObject()
+ .field("stationid", "s1")
+ .field("phoneid", "p1")
+ .endObject()
+ .startObject()
+ .field("stationid", "s2")
+ .field("phoneid", "p2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("first", "fname2")
+ .field("last", "lname2")
+ .startArray("workstations")
+ .startObject()
+ .field("stationid", "s3")
+ .field("phoneid", "p3")
+ .endObject()
+ .startObject()
+ .field("stationid", "s4")
+ .field("phoneid", "p4")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("first", "fname3")
+ .field("last", "lname3")
+ .startArray("workstations")
+ .startObject()
+ .field("stationid", "s5")
+ .field("phoneid", "p5")
+ .endObject()
+ .startObject()
+ .field("stationid", "s6")
+ .field("phoneid", "p6")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject()).get();
+
+ client().prepareIndex("test", "type", "2").setSource(jsonBuilder().startObject()
+ .field("officelocation", "gendale")
+ .startArray("users")
+ .startObject()
+ .field("first", "fname4")
+ .field("last", "lname4")
+ .startArray("workstations")
+ .startObject()
+ .field("stationid", "s1")
+ .field("phoneid", "p1")
+ .endObject()
+ .startObject()
+ .field("stationid", "s2")
+ .field("phoneid", "p2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("first", "fname5")
+ .field("last", "lname5")
+ .startArray("workstations")
+ .startObject()
+ .field("stationid", "s3")
+ .field("phoneid", "p3")
+ .endObject()
+ .startObject()
+ .field("stationid", "s4")
+ .field("phoneid", "p4")
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("first", "fname1")
+ .field("last", "lname1")
+ .startArray("workstations")
+ .startObject()
+ .field("stationid", "s5")
+ .field("phoneid", "p5")
+ .endObject()
+ .startObject()
+ .field("stationid", "s6")
+ .field("phoneid", "p6")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject()).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .addSort(SortBuilders.fieldSort("users.first")
+ .order(SortOrder.ASC))
+ .addSort(SortBuilders.fieldSort("users.first")
+ .order(SortOrder.ASC)
+ .setNestedPath("users")
+ .setNestedFilter(nestedQuery("users.workstations", termQuery("users.workstations.stationid", "s5"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("fname1"));
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[1].toString(), equalTo("fname1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("fname1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[1].toString(), equalTo("fname3"));
+ }
+
+ @Test
+ public void testCheckFixedBitSetCache() throws Exception {
+ boolean loadFixedBitSeLazily = randomBoolean();
+ Settings.Builder settingsBuilder = Settings.builder().put(indexSettings())
+ .put("index.refresh_interval", -1);
+ if (loadFixedBitSeLazily) {
+ settingsBuilder.put("index.load_fixed_bitset_filters_eagerly", false);
+ }
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder)
+ .addMapping("type")
+ );
+
+ client().prepareIndex("test", "type", "0").setSource("field", "value").get();
+ client().prepareIndex("test", "type", "1").setSource("field", "value").get();
+ refresh();
+ ensureSearchable("test");
+
+ // No nested mapping yet, there shouldn't be anything in the fixed bit set cache
+ ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
+ assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+
+ // Now add nested mapping
+ assertAcked(
+ client().admin().indices().preparePutMapping("test").setType("type").setSource("array1", "type=nested")
+ );
+
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startArray("array1").startObject().field("field1", "value1").endObject().endArray()
+ .endObject();
+ // index simple data
+ client().prepareIndex("test", "type", "2").setSource(builder).get();
+ client().prepareIndex("test", "type", "3").setSource(builder).get();
+ client().prepareIndex("test", "type", "4").setSource(builder).get();
+ client().prepareIndex("test", "type", "5").setSource(builder).get();
+ client().prepareIndex("test", "type", "6").setSource(builder).get();
+ refresh();
+ ensureSearchable("test");
+
+ if (loadFixedBitSeLazily) {
+ clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
+ assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+
+ // only when querying with nested the fixed bitsets are loaded
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(nestedQuery("array1", termQuery("array1.field1", "value1")))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(5l));
+ }
+ clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
+ assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), greaterThan(0l));
+
+ assertAcked(client().admin().indices().prepareDelete("test"));
+ clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
+ assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+ }
+
+ /**
+ */
+ private void assertDocumentCount(String index, long numdocs) {
+ IndicesStatsResponse stats = admin().indices().prepareStats(index).clear().setDocs(true).get();
+ assertNoFailures(stats);
+ assertThat(stats.getIndex(index).getPrimaries().docs.getCount(), is(numdocs));
+
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/network/DirectBufferNetworkTests.java b/core/src/test/java/org/elasticsearch/network/DirectBufferNetworkTests.java
new file mode 100644
index 0000000000..979bc8b454
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/network/DirectBufferNetworkTests.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.network;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.lang.reflect.Field;
+import java.nio.ByteBuffer;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+/**
+ */
+public class DirectBufferNetworkTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(Node.HTTP_ENABLED, true)
+ .put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ /**
+ * This test validates that using large data sets (large docs + large API requests) don't
+ * cause a large direct byte buffer to be allocated internally in the sun.nio buffer cache.
+ * <p/>
+ * See {@link org.elasticsearch.common.netty.NettyUtils#DEFAULT_GATHERING} for more info.
+ */
+ @Test
+ public void verifySaneDirectBufferAllocations() throws Exception {
+ createIndex("test");
+
+ int estimatedBytesSize = scaledRandomIntBetween(ByteSizeValue.parseBytesSizeValue("1.1mb", "estimatedBytesSize").bytesAsInt(),
+ ByteSizeValue.parseBytesSizeValue("1.5mb", "estimatedBytesSize").bytesAsInt());
+ byte[] data = new byte[estimatedBytesSize];
+ getRandom().nextBytes(data);
+
+ ByteArrayOutputStream docOut = new ByteArrayOutputStream();
+ // we use smile to automatically use the binary mapping
+ XContentBuilder doc = XContentFactory.smileBuilder(docOut).startObject().startObject("doc").field("value", data).endObject();
+ doc.close();
+ byte[] docBytes = docOut.toByteArray();
+
+ int numDocs = randomIntBetween(2, 5);
+ logger.info("indexing [{}] docs, each with size [{}]", numDocs, estimatedBytesSize);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ builders[i] = client().prepareIndex("test", "type").setSource(docBytes);
+ }
+ indexRandom(true, builders);
+ logger.info("done indexing");
+
+ logger.info("executing random client search for all docs");
+ assertHitCount(client().prepareSearch("test").setFrom(0).setSize(numDocs).get(), numDocs);
+ logger.info("executing transport client search for all docs");
+ assertHitCount(internalCluster().transportClient().prepareSearch("test").setFrom(0).setSize(numDocs).get(), numDocs);
+
+ logger.info("executing HTTP search for all docs");
+ // simulate large HTTP call as well
+ httpClient().method("GET").path("/test/_search").addParam("size", Integer.toString(numDocs)).execute();
+
+ logger.info("validating large direct buffer not allocated");
+ validateNoLargeDirectBufferAllocated();
+ }
+
+ /**
+ * Validates that all the thread local allocated ByteBuffer in sun.nio under the Util$BufferCache
+ * are not greater than 1mb.
+ */
+ private void validateNoLargeDirectBufferAllocated() throws Exception {
+ // Make the fields in the Thread class that store ThreadLocals
+ // accessible
+ Field threadLocalsField = Thread.class.getDeclaredField("threadLocals");
+ threadLocalsField.setAccessible(true);
+ // Make the underlying array of ThreadLoad.ThreadLocalMap.Entry objects
+ // accessible
+ Class<?> tlmClass = Class.forName("java.lang.ThreadLocal$ThreadLocalMap");
+ Field tableField = tlmClass.getDeclaredField("table");
+ tableField.setAccessible(true);
+
+ for (Thread thread : Thread.getAllStackTraces().keySet()) {
+ if (thread == null) {
+ continue;
+ }
+ Object threadLocalMap = threadLocalsField.get(thread);
+ if (threadLocalMap == null) {
+ continue;
+ }
+ Object[] table = (Object[]) tableField.get(threadLocalMap);
+ if (table == null) {
+ continue;
+ }
+ for (Object entry : table) {
+ if (entry == null) {
+ continue;
+ }
+ Field valueField = entry.getClass().getDeclaredField("value");
+ valueField.setAccessible(true);
+ Object value = valueField.get(entry);
+ if (value == null) {
+ continue;
+ }
+ if (!value.getClass().getName().equals("sun.nio.ch.Util$BufferCache")) {
+ continue;
+ }
+ Field buffersField = value.getClass().getDeclaredField("buffers");
+ buffersField.setAccessible(true);
+ Object[] buffers = (Object[]) buffersField.get(value);
+ for (Object buffer : buffers) {
+ if (buffer == null) {
+ continue;
+ }
+ assertThat(((ByteBuffer) buffer).capacity(), Matchers.lessThan(1 * 1024 * 1024));
+ }
+ }
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java b/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java
new file mode 100644
index 0000000000..82b6a2a601
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/node/internal/InternalSettingsPreparerTests.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node.internal;
+
+import org.elasticsearch.common.cli.CliToolTestCase;
+import org.elasticsearch.common.cli.Terminal;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class InternalSettingsPreparerTests extends ElasticsearchTestCase {
+
+ @Before
+ public void setupSystemProperties() {
+ System.setProperty("es.node.zone", "foo");
+ }
+
+ @After
+ public void cleanupSystemProperties() {
+ System.clearProperty("es.node.zone");
+ }
+
+ @Test
+ public void testIgnoreSystemProperties() {
+ Settings settings = settingsBuilder()
+ .put("node.zone", "bar")
+ .put("path.home", createTempDir().toString())
+ .build();
+ Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(settings, true);
+ // Should use setting from the system property
+ assertThat(tuple.v1().get("node.zone"), equalTo("foo"));
+
+ settings = settingsBuilder()
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put("node.zone", "bar")
+ .put("path.home", createTempDir().toString())
+ .build();
+ tuple = InternalSettingsPreparer.prepareSettings(settings, true);
+ // Should use setting from the system property
+ assertThat(tuple.v1().get("node.zone"), equalTo("bar"));
+ }
+
+ @Test
+ public void testAlternateConfigFileSuffixes() {
+ // test that we can read config files with .yaml, .json, and .properties suffixes
+ Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder()
+ .put("config.ignore_system_properties", true)
+ .put("path.home", createTempDir().toString())
+ .build(), true);
+
+ assertThat(tuple.v1().get("yaml.config.exists"), equalTo("true"));
+ assertThat(tuple.v1().get("json.config.exists"), equalTo("true"));
+ assertThat(tuple.v1().get("properties.config.exists"), equalTo("true"));
+ }
+
+ @Test
+ public void testReplacePromptPlaceholders() {
+ final List<String> replacedSecretProperties = new ArrayList<>();
+ final List<String> replacedTextProperties = new ArrayList<>();
+ final Terminal terminal = new CliToolTestCase.MockTerminal() {
+ @Override
+ public char[] readSecret(String message, Object... args) {
+ for (Object arg : args) {
+ replacedSecretProperties.add((String) arg);
+ }
+ return "replaced".toCharArray();
+ }
+
+ @Override
+ public String readText(String message, Object... args) {
+ for (Object arg : args) {
+ replacedTextProperties.add((String) arg);
+ }
+ return "text";
+ }
+ };
+
+ Settings.Builder builder = settingsBuilder()
+ .put("password.replace", InternalSettingsPreparer.SECRET_PROMPT_VALUE)
+ .put("dont.replace", "prompt:secret")
+ .put("dont.replace2", "_prompt:secret_")
+ .put("dont.replace3", "_prompt:text__")
+ .put("dont.replace4", "__prompt:text_")
+ .put("dont.replace5", "prompt:secret__")
+ .put("replace_me", InternalSettingsPreparer.TEXT_PROMPT_VALUE);
+ Settings settings = builder.build();
+ settings = InternalSettingsPreparer.replacePromptPlaceholders(settings, terminal);
+
+ assertThat(replacedSecretProperties.size(), is(1));
+ assertThat(replacedTextProperties.size(), is(1));
+ assertThat(settings.get("password.replace"), equalTo("replaced"));
+ assertThat(settings.get("replace_me"), equalTo("text"));
+
+ // verify other values unchanged
+ assertThat(settings.get("dont.replace"), equalTo("prompt:secret"));
+ assertThat(settings.get("dont.replace2"), equalTo("_prompt:secret_"));
+ assertThat(settings.get("dont.replace3"), equalTo("_prompt:text__"));
+ assertThat(settings.get("dont.replace4"), equalTo("__prompt:text_"));
+ assertThat(settings.get("dont.replace5"), equalTo("prompt:secret__"));
+ }
+
+ @Test
+ public void testReplaceSecretPromptPlaceholderWithNullTerminal() {
+ Settings.Builder builder = settingsBuilder()
+ .put("replace_me1", InternalSettingsPreparer.SECRET_PROMPT_VALUE);
+ try {
+ InternalSettingsPreparer.replacePromptPlaceholders(builder.build(), null);
+ fail("an exception should have been thrown since no terminal was provided!");
+ } catch (UnsupportedOperationException e) {
+ assertThat(e.getMessage(), containsString("with value [" + InternalSettingsPreparer.SECRET_PROMPT_VALUE + "]"));
+ }
+ }
+
+ @Test
+ public void testReplaceTextPromptPlaceholderWithNullTerminal() {
+ Settings.Builder builder = settingsBuilder()
+ .put("replace_me1", InternalSettingsPreparer.TEXT_PROMPT_VALUE);
+ try {
+ InternalSettingsPreparer.replacePromptPlaceholders(builder.build(), null);
+ fail("an exception should have been thrown since no terminal was provided!");
+ } catch (UnsupportedOperationException e) {
+ assertThat(e.getMessage(), containsString("with value [" + InternalSettingsPreparer.TEXT_PROMPT_VALUE + "]"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java
new file mode 100644
index 0000000000..7b94b5ef11
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo;
+
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.nodesinfo.plugin.dummy1.TestPlugin;
+import org.elasticsearch.nodesinfo.plugin.dummy2.TestNoVersionPlugin;
+import org.elasticsearch.plugins.PluginTestCase;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.List;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.client.Requests.nodesInfoRequest;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope= Scope.TEST, numDataNodes =0)
+public class SimpleNodesInfoTests extends PluginTestCase {
+
+ static final class Fields {
+ static final String SITE_PLUGIN = "dummy";
+ static final String SITE_PLUGIN_DESCRIPTION = "This is a description for a dummy test site plugin.";
+ static final String SITE_PLUGIN_VERSION = "0.0.7-BOND-SITE";
+ }
+
+
+ @Test
+ public void testNodesInfos() throws Exception {
+ List<String> nodesIds = internalCluster().startNodesAsync(2).get();
+ final String node_1 = nodesIds.get(0);
+ final String node_2 = nodesIds.get(1);
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().localNodeId();
+ String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().localNodeId();
+ logger.info("--> started nodes: " + server1NodeId + " and " + server2NodeId);
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
+ assertThat(response.getNodes().length, is(2));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest()).actionGet();
+ assertThat(response.getNodes().length, is(2));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server1NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server1NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server2NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+
+ response = client().admin().cluster().nodesInfo(nodesInfoRequest(server2NodeId)).actionGet();
+ assertThat(response.getNodes().length, is(1));
+ assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
+ }
+
+ /**
+ * Use case is to start 4 nodes:
+ * <ul>
+ * <li>1 : no plugin</li>
+ * <li>2 : one site plugin (with a es-plugin.properties file)</li>
+ * <li>3 : one java plugin</li>
+ * <li>4 : one site plugin and 2 java plugins (included the previous one)</li>
+ * </ul>
+ * We test here that NodeInfo API with plugin option give us the right results.
+ * @throws URISyntaxException
+ */
+ @Test
+ public void testNodeInfoPlugin() throws URISyntaxException {
+ // We start four nodes
+ // The first has no plugin
+ String server1NodeId = startNodeWithPlugins(1);
+ // The second has one site plugin with a es-plugin.properties file (description and version)
+ String server2NodeId = startNodeWithPlugins(2);
+ // The third has one java plugin
+ String server3NodeId = startNodeWithPlugins(3,TestPlugin.class.getName());
+ // The fourth has one java plugin and one site plugin
+ String server4NodeId = startNodeWithPlugins(4,TestNoVersionPlugin.class.getName());
+
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("4")).actionGet();
+ logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).execute().actionGet();
+ logger.info("--> full json answer, status " + response.toString());
+
+ ElasticsearchAssertions.assertNodeContainsPlugins(response, server1NodeId,
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
+
+ ElasticsearchAssertions.assertNodeContainsPlugins(response, server2NodeId,
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
+ Lists.newArrayList(Fields.SITE_PLUGIN), // Site Plugin
+ Lists.newArrayList(Fields.SITE_PLUGIN_DESCRIPTION),
+ Lists.newArrayList(Fields.SITE_PLUGIN_VERSION));
+
+ ElasticsearchAssertions.assertNodeContainsPlugins(response, server3NodeId,
+ Lists.newArrayList(TestPlugin.Fields.NAME), // JVM Plugin
+ Lists.newArrayList(TestPlugin.Fields.DESCRIPTION),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No site Plugin
+
+ ElasticsearchAssertions.assertNodeContainsPlugins(response, server4NodeId,
+ Lists.newArrayList(TestNoVersionPlugin.Fields.NAME), // JVM Plugin
+ Lists.newArrayList(TestNoVersionPlugin.Fields.DESCRIPTION),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
+ Lists.newArrayList(Fields.SITE_PLUGIN, TestNoVersionPlugin.Fields.NAME),// Site Plugin
+ Lists.newArrayList(PluginInfo.DESCRIPTION_NOT_AVAILABLE),
+ Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE));
+ }
+
+ public String startNodeWithPlugins(int nodeId, String ... pluginClassNames) throws URISyntaxException {
+ return startNodeWithPlugins(Settings.EMPTY, "/org/elasticsearch/nodesinfo/node" + Integer.toString(nodeId) + "/", pluginClassNames);
+ }
+
+
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java b/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java
new file mode 100644
index 0000000000..274e5e51a0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy1/TestPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo.plugin.dummy1;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class TestPlugin extends AbstractPlugin {
+
+ static final public class Fields {
+ static public final String NAME = "test-plugin";
+ static public final String DESCRIPTION = NAME + " description";
+ }
+
+ @Override
+ public String name() {
+ return Fields.NAME;
+ }
+
+ @Override
+ public String description() {
+ return Fields.DESCRIPTION;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java b/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java
new file mode 100644
index 0000000000..58b5ee007f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/nodesinfo/plugin/dummy2/TestNoVersionPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.nodesinfo.plugin.dummy2;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class TestNoVersionPlugin extends AbstractPlugin {
+
+ static final public class Fields {
+ static public final String NAME = "test-no-version-plugin";
+ static public final String DESCRIPTION = NAME + " description";
+ }
+
+ @Override
+ public String name() {
+ return Fields.NAME;
+ }
+
+ @Override
+ public String description() {
+ return Fields.DESCRIPTION;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java
new file mode 100644
index 0000000000..2390e3b9f8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.operateAllIndices;
+
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // One test for test performance, since cluster scope is test
+ // The cluster scope is test b/c we can't clear cluster settings.
+ public void testDestructiveOperations() throws Exception {
+ Settings settings = Settings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+
+ // Should succeed, since no wildcards
+ assertAcked(client().admin().indices().prepareDelete("1index").get());
+
+ try {
+ // should fail since index1 is the only index.
+ client().admin().indices().prepareDelete("i*").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ try {
+ client().admin().indices().prepareDelete("_all").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ settings = Settings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareDelete("_all").get());
+ assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false));
+
+ // end delete index:
+ // close index:
+ settings = Settings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, true)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+
+ assertAcked(client().admin().indices().prepareCreate("index1").get());
+ assertAcked(client().admin().indices().prepareCreate("1index").get());
+ ensureYellow();// wait for primaries to be allocated
+ // Should succeed, since no wildcards
+ assertAcked(client().admin().indices().prepareClose("1index").get());
+
+ try {
+ client().admin().indices().prepareClose("_all").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+ try {
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+ try {
+ client().admin().indices().prepareClose("*").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+ try {
+ assertAcked(client().admin().indices().prepareOpen("*").get());
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+
+ settings = Settings.builder()
+ .put(DestructiveOperations.REQUIRES_NAME, false)
+ .build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
+ assertAcked(client().admin().indices().prepareClose("_all").get());
+ assertAcked(client().admin().indices().prepareOpen("_all").get());
+
+ // end close index:
+ client().admin().indices().prepareDelete("_all").get();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledTest.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledTest.java
new file mode 100644
index 0000000000..fa8762861e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.options.detailederrors;
+
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.http.netty.NettyHttpServerTransport;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.rest.client.http.HttpDeleteWithEntity;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Tests that when disabling detailed errors, a request with the error_trace parameter returns a HTTP 400
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 1)
+public class DetailedErrorsDisabledTest extends ElasticsearchIntegrationTest {
+
+ // Build our cluster settings
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(Node.HTTP_ENABLED, true)
+ .put(NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED, false)
+ .build();
+ }
+
+ @Test
+ public void testThatErrorTraceParamReturns400() throws Exception {
+ // Make the HTTP request
+ HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault())
+ .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class))
+ .addParam("error_trace", "true")
+ .method(HttpDeleteWithEntity.METHOD_NAME)
+ .execute();
+
+ assertThat(response.getHeaders().get("Content-Type"), is("application/json"));
+ assertThat(response.getBody(), is("{\"error\":\"error traces in responses are disabled.\"}"));
+ assertThat(response.getStatusCode(), is(400));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledTest.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledTest.java
new file mode 100644
index 0000000000..979e362eae
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.options.detailederrors;
+
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.rest.client.http.HttpDeleteWithEntity;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ * Tests that by default the error_trace parameter can be used to show stacktraces
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 1)
+public class DetailedErrorsEnabledTest extends ElasticsearchIntegrationTest {
+
+ // Build our cluster settings
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(Node.HTTP_ENABLED, true)
+ .build();
+ }
+
+ @Test
+ public void testThatErrorTraceWorksByDefault() throws Exception {
+ // Make the HTTP request
+ HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault())
+ .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class))
+ .path("/")
+ .addParam("error_trace", "true")
+ .method(HttpDeleteWithEntity.METHOD_NAME)
+ .execute();
+
+ assertThat(response.getHeaders().get("Content-Type"), containsString("application/json"));
+ assertThat(response.getBody(), containsString("\"error_trace\":{\"message\":\"Validation Failed"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java
new file mode 100644
index 0000000000..82e61defa1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/ConcurrentPercolatorTests.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+
+/**
+ *
+ */
+public class ConcurrentPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleConcurrentPercolator() throws Exception {
+ // We need to index a document / define mapping, otherwise field1 doesn't get reconized as number field.
+ // If we don't do this, then 'test2' percolate query gets parsed as a TermQuery and not a RangeQuery.
+ // The percolate api doesn't parse the doc if no queries have registered, so it can't lazily create a mapping
+ assertAcked(prepareCreate("index").addMapping("type", "field1", "type=long", "field2", "type=string")); // random # shards better has a mapping!
+ ensureGreen();
+
+ final BytesReference onlyField1 = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .endObject().endObject().bytes();
+ final BytesReference onlyField2 = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field2", "value")
+ .endObject().endObject().bytes();
+ final BytesReference bothFields = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject().bytes();
+
+ client().prepareIndex("index", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("index", PercolatorService.TYPE_NAME, "test1")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("index", PercolatorService.TYPE_NAME, "test2")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject())
+ .execute().actionGet();
+ refresh(); // make sure it's refreshed
+
+ final CountDownLatch start = new CountDownLatch(1);
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final AtomicInteger counts = new AtomicInteger(0);
+ final AtomicReference<Throwable> exceptionHolder = new AtomicReference<>();
+ Thread[] threads = new Thread[scaledRandomIntBetween(2, 5)];
+ final int numberOfPercolations = scaledRandomIntBetween(1000, 10000);
+
+ for (int i = 0; i < threads.length; i++) {
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ start.await();
+ while (!stop.get()) {
+ int count = counts.incrementAndGet();
+ if ((count > numberOfPercolations)) {
+ stop.set(true);
+ }
+ PercolateResponse percolate;
+ if (count % 3 == 0) {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(bothFields)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContainingInAnyOrder("test1", "test2"));
+ } else if (count % 3 == 1) {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField2)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test1"));
+ } else {
+ percolate = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField1)
+ .execute().actionGet();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "index"), arrayContaining("test2"));
+ }
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ } catch (Throwable e) {
+ exceptionHolder.set(e);
+ Thread.currentThread().interrupt();
+ }
+ }
+ };
+ threads[i] = new Thread(r);
+ threads[i].start();
+ }
+
+ start.countDown();
+ for (Thread thread : threads) {
+ thread.join();
+ }
+
+ Throwable assertionError = exceptionHolder.get();
+ if (assertionError != null) {
+ assertionError.printStackTrace();
+ }
+ assertThat(assertionError + " should be null", assertionError, nullValue());
+ }
+
+ @Test
+ public void testConcurrentAddingAndPercolating() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", "field1", "type=string", "field2", "type=string"));
+ ensureGreen();
+ final int numIndexThreads = scaledRandomIntBetween(1, 3);
+ final int numPercolateThreads = scaledRandomIntBetween(2, 6);
+ final int numPercolatorOperationsPerThread = scaledRandomIntBetween(100, 1000);
+
+ final Set<Throwable> exceptionsHolder = ConcurrentCollections.newConcurrentSet();
+ final CountDownLatch start = new CountDownLatch(1);
+ final AtomicInteger runningPercolateThreads = new AtomicInteger(numPercolateThreads);
+ final AtomicInteger type1 = new AtomicInteger();
+ final AtomicInteger type2 = new AtomicInteger();
+ final AtomicInteger type3 = new AtomicInteger();
+
+ final AtomicInteger idGen = new AtomicInteger();
+
+ Thread[] indexThreads = new Thread[numIndexThreads];
+ for (int i = 0; i < numIndexThreads; i++) {
+ final Random rand = new Random(getRandom().nextLong());
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder onlyField1 = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value")).endObject();
+ XContentBuilder onlyField2 = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field2", "value")).endObject();
+ XContentBuilder field1And2 = XContentFactory.jsonBuilder().startObject()
+ .field("query", boolQuery().must(termQuery("field1", "value")).must(termQuery("field2", "value"))).endObject();
+
+ start.await();
+ while (runningPercolateThreads.get() > 0) {
+ Thread.sleep(100);
+ int x = rand.nextInt(3);
+ String id = Integer.toString(idGen.incrementAndGet());
+ IndexResponse response;
+ switch (x) {
+ case 0:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(onlyField1)
+ .execute().actionGet();
+ type1.incrementAndGet();
+ break;
+ case 1:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(onlyField2)
+ .execute().actionGet();
+ type2.incrementAndGet();
+ break;
+ case 2:
+ response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(field1And2)
+ .execute().actionGet();
+ type3.incrementAndGet();
+ break;
+ default:
+ throw new IllegalStateException("Illegal x=" + x);
+ }
+ assertThat(response.getId(), equalTo(id));
+ assertThat(response.getVersion(), equalTo(1l));
+ }
+ } catch (Throwable t) {
+ exceptionsHolder.add(t);
+ logger.error("Error in indexing thread...", t);
+ }
+ }
+ };
+ indexThreads[i] = new Thread(r);
+ indexThreads[i].start();
+ }
+
+ Thread[] percolateThreads = new Thread[numPercolateThreads];
+ for (int i = 0; i < numPercolateThreads; i++) {
+ final Random rand = new Random(getRandom().nextLong());
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder onlyField1Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .endObject().endObject();
+ XContentBuilder onlyField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field2", "value")
+ .endObject().endObject();
+ XContentBuilder field1AndField2Doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .field("field2", "value")
+ .endObject().endObject();
+ start.await();
+ for (int counter = 0; counter < numPercolatorOperationsPerThread; counter++) {
+ int x = rand.nextInt(3);
+ int atLeastExpected;
+ PercolateResponse response;
+ switch (x) {
+ case 0:
+ atLeastExpected = type1.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField1Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ case 1:
+ atLeastExpected = type2.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(onlyField2Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ case 2:
+ atLeastExpected = type3.get();
+ response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(field1AndField2Doc).execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, greaterThanOrEqualTo(atLeastExpected));
+ break;
+ }
+ }
+ } catch (Throwable t) {
+ exceptionsHolder.add(t);
+ logger.error("Error in percolate thread...", t);
+ } finally {
+ runningPercolateThreads.decrementAndGet();
+ }
+ }
+ };
+ percolateThreads[i] = new Thread(r);
+ percolateThreads[i].start();
+ }
+
+ start.countDown();
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ for (Thread thread : percolateThreads) {
+ thread.join();
+ }
+
+ for (Throwable t : exceptionsHolder) {
+ logger.error("Unexpected exception {}", t.getMessage(), t);
+ }
+ assertThat(exceptionsHolder.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConcurrentAddingAndRemovingWhilePercolating() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", "field1", "type=string"));
+ ensureGreen();
+ final int numIndexThreads = scaledRandomIntBetween(1, 3);
+ final int numberPercolateOperation = scaledRandomIntBetween(10, 100);
+
+ final AtomicReference<Throwable> exceptionHolder = new AtomicReference<>(null);
+ final AtomicInteger idGen = new AtomicInteger(0);
+ final Set<String> liveIds = ConcurrentCollections.newConcurrentSet();
+ final AtomicBoolean run = new AtomicBoolean(true);
+ Thread[] indexThreads = new Thread[numIndexThreads];
+ final Semaphore semaphore = new Semaphore(numIndexThreads, true);
+ for (int i = 0; i < indexThreads.length; i++) {
+ final Random rand = new Random(getRandom().nextLong());
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value")).endObject();
+ outer:
+ while (run.get()) {
+ semaphore.acquire();
+ try {
+ if (!liveIds.isEmpty() && rand.nextInt(100) < 19) {
+ String id;
+ do {
+ if (liveIds.isEmpty()) {
+ continue outer;
+ }
+ id = Integer.toString(randomInt(idGen.get()));
+ } while (!liveIds.remove(id));
+
+ DeleteResponse response = client().prepareDelete("index", PercolatorService.TYPE_NAME, id)
+ .execute().actionGet();
+ assertThat(response.getId(), equalTo(id));
+ assertThat("doc[" + id + "] should have been deleted, but isn't", response.isFound(), equalTo(true));
+ } else {
+ String id = Integer.toString(idGen.getAndIncrement());
+ IndexResponse response = client().prepareIndex("index", PercolatorService.TYPE_NAME, id)
+ .setSource(doc)
+ .execute().actionGet();
+ liveIds.add(id);
+ assertThat(response.isCreated(), equalTo(true)); // We only add new docs
+ assertThat(response.getId(), equalTo(id));
+ }
+ } finally {
+ semaphore.release();
+ }
+ }
+ } catch (InterruptedException iex) {
+ logger.error("indexing thread was interrupted...", iex);
+ run.set(false);
+ } catch (Throwable t) {
+ run.set(false);
+ exceptionHolder.set(t);
+ logger.error("Error in indexing thread...", t);
+ }
+ }
+ };
+ indexThreads[i] = new Thread(r);
+ indexThreads[i].start();
+ }
+
+ XContentBuilder percolateDoc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value")
+ .endObject().endObject();
+ for (int counter = 0; counter < numberPercolateOperation; counter++) {
+ Thread.sleep(5);
+ semaphore.acquire(numIndexThreads);
+ try {
+ if (!run.get()) {
+ break;
+ }
+ int atLeastExpected = liveIds.size();
+ PercolateResponse response = client().preparePercolate().setIndices("index").setDocumentType("type")
+ .setSource(percolateDoc).execute().actionGet();
+ assertThat(response.getShardFailures(), emptyArray());
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getMatches().length, equalTo(atLeastExpected));
+ } finally {
+ semaphore.release(numIndexThreads);
+ }
+ }
+ run.set(false);
+ for (Thread thread : indexThreads) {
+ thread.join();
+ }
+ assertThat("exceptionHolder should have been empty, but holds: " + exceptionHolder.toString(), exceptionHolder.get(), nullValue());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java
new file mode 100644
index 0000000000..46982cb500
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/MultiPercolatorTests.java
@@ -0,0 +1,387 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class MultiPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testBasics() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string"));
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ MultiPercolateResponse response = client().prepareMultiPercolate()
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())))
+ .add(client().preparePercolate() // non existing doc, so error element
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5")))
+ .execute().actionGet();
+
+ MultiPercolateResponse.Item item = response.getItems()[0];
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(item.errorMessage(), nullValue());
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ item = response.getItems()[1];
+ assertThat(item.errorMessage(), nullValue());
+
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ item = response.getItems()[2];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 4l);
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ item = response.getItems()[3];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 1l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4"));
+
+ item = response.getItems()[4];
+ assertThat(item.getResponse(), nullValue());
+ assertThat(item.errorMessage(), notNullValue());
+ assertThat(item.errorMessage(), containsString("document missing"));
+ }
+
+ @Test
+ public void testWithRouting() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string"));
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setRouting("a")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setRouting("a")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setRouting("a")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setRouting("a")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ MultiPercolateResponse response = client().prepareMultiPercolate()
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setRouting("a")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setRouting("a")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setRouting("a")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject())))
+ .add(client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setRouting("a")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject())))
+ .add(client().preparePercolate() // non existing doc, so error element
+ .setIndices("test").setDocumentType("type")
+ .setRouting("a")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5")))
+ .execute().actionGet();
+
+ MultiPercolateResponse.Item item = response.getItems()[0];
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(item.errorMessage(), nullValue());
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ item = response.getItems()[1];
+ assertThat(item.errorMessage(), nullValue());
+
+ assertMatchCount(item.response(), 2l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ item = response.getItems()[2];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 4l);
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ item = response.getItems()[3];
+ assertThat(item.errorMessage(), nullValue());
+ assertMatchCount(item.response(), 1l);
+ assertThat(item.getResponse().getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(item.getResponse().getMatches(), "test"), arrayContaining("4"));
+
+ item = response.getItems()[4];
+ assertThat(item.getResponse(), nullValue());
+ assertThat(item.errorMessage(), notNullValue());
+ assertThat(item.errorMessage(), containsString("document missing"));
+ }
+
+ @Test
+ public void testExistingDocsOnly() throws Exception {
+ createIndex("test");
+
+ int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field", "a"))
+ .execute().actionGet();
+
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type"));
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertMatchCount(item.response(), numQueries);
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ // Non existing doc
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .setIndices("test").setDocumentType("type"));
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(true));
+ assertThat(item.errorMessage(), containsString("document missing"));
+ assertThat(item.getResponse(), nullValue());
+ }
+
+ // One existing doc
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .setIndices("test").setDocumentType("type"));
+ }
+ builder.add(
+ client().preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type"));
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest + 1));
+ assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false));
+ assertMatchCount(response.items()[numPercolateRequest].response(), numQueries);
+ assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ @Test
+ public void testWithDocsOnly() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ NumShards test = getNumShards("test");
+
+ int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ MultiPercolateRequestBuilder builder = client().prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
+ }
+
+ MultiPercolateResponse response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertMatchCount(item.response(), numQueries);
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+ // All illegal json
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource("illegal json"));
+ }
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertThat(item.getResponse().getSuccessfulShards(), equalTo(0));
+ assertThat(item.getResponse().getShardFailures().length, equalTo(test.numPrimaries));
+ for (ShardOperationFailedException shardFailure : item.getResponse().getShardFailures()) {
+ assertThat(shardFailure.reason(), containsString("Failed to derive xcontent from"));
+ assertThat(shardFailure.status().getStatus(), equalTo(500));
+ }
+ }
+
+ // one valid request
+ builder = client().prepareMultiPercolate();
+ for (int i = 0; i < numPercolateRequest; i++) {
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource("illegal json"));
+ }
+ builder.add(
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "a").endObject())));
+
+ response = builder.execute().actionGet();
+ assertThat(response.items().length, equalTo(numPercolateRequest + 1));
+ assertThat(response.items()[numPercolateRequest].isFailure(), equalTo(false));
+ assertMatchCount(response.items()[numPercolateRequest].response(), numQueries);
+ assertThat(response.items()[numPercolateRequest].getResponse().getMatches().length, equalTo(numQueries));
+ }
+
+
+ @Test
+ public void testNestedMultiPercolation() throws IOException {
+ initNestedIndexAndPercolation();
+ MultiPercolateRequestBuilder mpercolate= client().prepareMultiPercolate();
+ mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company"));
+ mpercolate.add(client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company"));
+ MultiPercolateResponse response = mpercolate.get();
+ assertEquals(response.getItems()[0].getResponse().getMatches().length, 0);
+ assertEquals(response.getItems()[1].getResponse().getMatches().length, 1);
+ assertEquals(response.getItems()[1].getResponse().getMatches()[0].getId().string(), "Q");
+ }
+
+ void initNestedIndexAndPercolation() throws IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder();
+ mapping.startObject().startObject("properties").startObject("companyname").field("type", "string").endObject()
+ .startObject("employee").field("type", "nested").startObject("properties")
+ .startObject("name").field("type", "string").endObject().endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(client().admin().indices().prepareCreate("nestedindex").addMapping("company", mapping));
+ ensureGreen("nestedindex");
+
+ client().prepareIndex("nestedindex", PercolatorService.TYPE_NAME, "Q").setSource(jsonBuilder().startObject()
+ .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(MatchQueryBuilder.Operator.AND)).scoreMode("avg")).endObject()).get();
+
+ refresh();
+
+ }
+
+ XContentBuilder getMatchingNestedDoc() throws IOException {
+ XContentBuilder doc = XContentFactory.jsonBuilder();
+ doc.startObject().field("companyname", "stark").startArray("employee")
+ .startObject().field("name", "virginia potts").endObject()
+ .startObject().field("name", "tony stark").endObject()
+ .endArray().endObject();
+ return doc;
+ }
+
+ XContentBuilder getNotMatchingNestedDoc() throws IOException {
+ XContentBuilder doc = XContentFactory.jsonBuilder();
+ doc.startObject().field("companyname", "notstark").startArray("employee")
+ .startObject().field("name", "virginia stark").endObject()
+ .startObject().field("name", "tony potts").endObject()
+ .endArray().endObject();
+ return doc;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java
new file mode 100644
index 0000000000..4c2cfcf487
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorBackwardsCompatibilityTests.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.index.percolator.PercolatorException;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.hamcrest.Matchers.instanceOf;
+
+/**
+ */
+public class PercolatorBackwardsCompatibilityTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testPercolatorUpgrading() throws Exception {
+ // Simulates an index created on an node before 1.4.0 where the field resolution isn't strict.
+ assertAcked(prepareCreate("test")
+ .setSettings(settings(Version.V_1_3_0).put(indexSettings())));
+ ensureGreen();
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", PercolatorService.TYPE_NAME)
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject());
+ }
+ indexRandom(true, docs);
+ PercolateResponse response = client().preparePercolate().setIndices("test").setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("field1", "value"))
+ .get();
+ assertMatchCount(response, (long) numDocs);
+
+ // After upgrade indices, indices created before the upgrade allow that queries refer to fields not available in mapping
+ client().prepareIndex("test", PercolatorService.TYPE_NAME)
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject()).get();
+
+ // However on new indices, the field resolution is strict, no queries with unmapped fields are allowed
+ createIndex("test2");
+ try {
+ client().prepareIndex("test2", PercolatorService.TYPE_NAME)
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject()).get();
+ fail();
+ } catch (PercolatorException e) {
+ e.printStackTrace();
+ assertThat(e.getRootCause(), instanceOf(QueryParsingException.class));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java
new file mode 100644
index 0000000000..e14dbf60da
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.percolate.PercolateRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders;
+import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ // Just test the integration with facets and aggregations, not the facet and aggregation functionality!
+ public void testFacetsAndAggregations() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string", "field2", "type=string"));
+ ensureGreen();
+
+ int numQueries = scaledRandomIntBetween(250, 500);
+ int numUniqueQueries = between(1, numQueries / 2);
+ String[] values = new String[numUniqueQueries];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "value" + i;
+ }
+ int[] expectedCount = new int[numUniqueQueries];
+
+ logger.info("--> registering {} queries", numQueries);
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ expectedCount[i % numUniqueQueries]++;
+ QueryBuilder queryBuilder = matchQuery("field1", value);
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute()
+ .actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()));
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode));
+
+ if (randomBoolean()) {
+ percolateRequestBuilder.setPercolateQuery(matchAllQuery());
+ }
+ if (randomBoolean()) {
+ percolateRequestBuilder.setScore(true);
+ } else {
+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);
+ }
+
+ boolean countOnly = randomBoolean();
+ if (countOnly) {
+ percolateRequestBuilder.setOnlyCount(countOnly);
+ }
+
+ PercolateResponse response = percolateRequestBuilder.execute().actionGet();
+ assertMatchCount(response, expectedCount[i % numUniqueQueries]);
+ if (!countOnly) {
+ assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries]));
+ }
+
+ List<Aggregation> aggregations = response.getAggregations().asList();
+ assertThat(aggregations.size(), equalTo(1));
+ assertThat(aggregations.get(0).getName(), equalTo("a"));
+ List<Terms.Bucket> buckets = new ArrayList<>(((Terms) aggregations.get(0)).getBuckets());
+ assertThat(buckets.size(), equalTo(1));
+ assertThat(buckets.get(0).getKeyAsString(), equalTo("b"));
+ assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length]));
+ }
+ }
+
+ @Test
+ // Just test the integration with facets and aggregations, not the facet and aggregation functionality!
+ public void testAggregationsAndPipelineAggregations() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string", "field2", "type=string"));
+ ensureGreen();
+
+ int numQueries = scaledRandomIntBetween(250, 500);
+ int numUniqueQueries = between(1, numQueries / 2);
+ String[] values = new String[numUniqueQueries];
+ for (int i = 0; i < values.length; i++) {
+ values[i] = "value" + i;
+ }
+ int[] expectedCount = new int[numUniqueQueries];
+
+ logger.info("--> registering {} queries", numQueries);
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ expectedCount[i % numUniqueQueries]++;
+ QueryBuilder queryBuilder = matchQuery("field1", value);
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute()
+ .actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ for (int i = 0; i < numQueries; i++) {
+ String value = values[i % numUniqueQueries];
+ PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()));
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode));
+
+ if (randomBoolean()) {
+ percolateRequestBuilder.setPercolateQuery(matchAllQuery());
+ }
+ if (randomBoolean()) {
+ percolateRequestBuilder.setScore(true);
+ } else {
+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);
+ }
+
+ boolean countOnly = randomBoolean();
+ if (countOnly) {
+ percolateRequestBuilder.setOnlyCount(countOnly);
+ }
+
+ percolateRequestBuilder.addAggregation(PipelineAggregatorBuilders.maxBucket("max_a").setBucketsPaths("a>_count"));
+
+ PercolateResponse response = percolateRequestBuilder.execute().actionGet();
+ assertMatchCount(response, expectedCount[i % numUniqueQueries]);
+ if (!countOnly) {
+ assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries]));
+ }
+
+ Aggregations aggregations = response.getAggregations();
+ assertThat(aggregations.asList().size(), equalTo(2));
+ Terms terms = aggregations.get("a");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("a"));
+ List<Terms.Bucket> buckets = new ArrayList<>(terms.getBuckets());
+ assertThat(buckets.size(), equalTo(1));
+ assertThat(buckets.get(0).getKeyAsString(), equalTo("b"));
+ assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length]));
+
+ InternalBucketMetricValue maxA = aggregations.get("max_a");
+ assertThat(maxA, notNullValue());
+ assertThat(maxA.getName(), equalTo("max_a"));
+ assertThat(maxA.value(), equalTo((double) expectedCount[i % values.length]));
+ assertThat(maxA.keys(), equalTo(new String[] { "b" }));
+ }
+ }
+
+ @Test
+ public void testSignificantAggs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+ PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject()))
+ .addAggregation(AggregationBuilders.significantTerms("a").field("field2"));
+ PercolateResponse response = percolateRequestBuilder.get();
+ assertNoFailures(response);
+ }
+
+ @Test
+ public void testSingleShardAggregations() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).put("SETTING_NUMBER_OF_SHARDS", 1))
+ .addMapping("type", "field1", "type=string", "field2", "type=string"));
+ ensureGreen();
+
+ int numQueries = scaledRandomIntBetween(250, 500);
+
+ logger.info("--> registering {} queries", numQueries);
+ for (int i = 0; i < numQueries; i++) {
+ String value = "value0";
+ QueryBuilder queryBuilder = matchQuery("field1", value);
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", i % 3 == 0 ? "b" : "a").endObject())
+ .execute()
+ .actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ for (int i = 0; i < numQueries; i++) {
+ String value = "value0";
+ PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject()));
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ percolateRequestBuilder.addAggregation(AggregationBuilders.terms("terms").field("field2").collectMode(aggCollectionMode)
+ .order(Order.term(true)).shardSize(2).size(1));
+
+ if (randomBoolean()) {
+ percolateRequestBuilder.setPercolateQuery(matchAllQuery());
+ }
+ if (randomBoolean()) {
+ percolateRequestBuilder.setScore(true);
+ } else {
+ percolateRequestBuilder.setSortByScore(true).setSize(numQueries);
+ }
+
+ boolean countOnly = randomBoolean();
+ if (countOnly) {
+ percolateRequestBuilder.setOnlyCount(countOnly);
+ }
+
+ percolateRequestBuilder.addAggregation(PipelineAggregatorBuilders.maxBucket("max_terms").setBucketsPaths("terms>_count"));
+
+ PercolateResponse response = percolateRequestBuilder.execute().actionGet();
+ assertMatchCount(response, numQueries);
+ if (!countOnly) {
+ assertThat(response.getMatches(), arrayWithSize(numQueries));
+ }
+
+ Aggregations aggregations = response.getAggregations();
+ assertThat(aggregations.asList().size(), equalTo(2));
+ Terms terms = aggregations.get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = new ArrayList<>(terms.getBuckets());
+ assertThat(buckets.size(), equalTo(1));
+ assertThat(buckets.get(0).getKeyAsString(), equalTo("a"));
+
+ InternalBucketMetricValue maxA = aggregations.get("max_terms");
+ assertThat(maxA, notNullValue());
+ assertThat(maxA.getName(), equalTo("max_terms"));
+ assertThat(maxA.keys(), equalTo(new String[] { "a" }));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java
new file mode 100644
index 0000000000..07ef51f32c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/PercolatorTests.java
@@ -0,0 +1,2040 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.percolate.PercolateSourceBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.percolator.PercolatorException;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.index.query.functionscore.factor.FactorBuilder;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.common.settings.Settings.builder;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.arrayContaining;
+import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.emptyArray;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class PercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimple1() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy doc");
+ client().prepareIndex("test", "type", "1").setSource("field1", "value").execute().actionGet();
+ waitForConcreteMappingsOnAll("test", "type", "field1");
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate doc with field1=c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate doc with field1=b c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate doc with field1=d");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+
+ logger.info("--> Search dummy doc, percolate queries must not be included");
+ SearchResponse searchResponse = client().prepareSearch("test", "test").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1L));
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ logger.info("--> Percolate non existing doc");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("5"))
+ .execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (DocumentMissingException e) {
+ }
+ }
+
+ @Test
+ public void testSimple2() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=long,doc_values=true"));
+ ensureGreen();
+
+ // introduce the doc
+ XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject();
+
+ PercolateResponse response = client().preparePercolate().setSource(doc)
+ .setIndices("test").setDocumentType("type1")
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ assertThat(response.getMatches(), emptyArray());
+ waitForConcreteMappingsOnAll("test", "type1", "field1", "field2");
+
+ // add first query...
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "test1")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field2", "value")).endObject())
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+
+ // add second query...
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "test2")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("query", termQuery("field1", 1)).endObject())
+ .execute().actionGet();
+
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc)
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("test1", "test2"));
+
+
+ client().prepareDelete("test", PercolatorService.TYPE_NAME, "test2").execute().actionGet();
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(doc).execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("test1"));
+ }
+
+ @Test
+ public void testPercolateQueriesWithRouting() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a queries");
+ for (int i = 1; i <= 100; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .setRouting(Integer.toString(i % 2))
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc with no routing");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 100l);
+ assertThat(response.getMatches(), arrayWithSize(100));
+
+ logger.info("--> Percolate doc with routing=0");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .setRouting("0")
+ .execute().actionGet();
+ assertMatchCount(response, 50l);
+ assertThat(response.getMatches(), arrayWithSize(50));
+
+ logger.info("--> Percolate doc with routing=1");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .setRouting("1")
+ .execute().actionGet();
+ assertMatchCount(response, 50l);
+ assertThat(response.getMatches(), arrayWithSize(50));
+ }
+
+ @Test
+ public void percolateOnRecreatedIndex() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("my-queries-index", "test", "1").setSource("field1", "value1").execute().actionGet();
+ waitForConcreteMappingsOnAll("my-queries-index", "test", "field1");
+ logger.info("--> register a query");
+ client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku1")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ cluster().wipeIndices("test");
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("my-queries-index", "test", "1").setSource("field1", "value1").execute().actionGet();
+ waitForConcreteMappingsOnAll("my-queries-index", "test", "field1");
+ logger.info("--> register a query");
+ client().prepareIndex("my-queries-index", PercolatorService.TYPE_NAME, "kuku2")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+ }
+
+ @Test
+ // see #2814
+ public void percolateCustomAnalyzer() throws Exception {
+ Builder builder = builder();
+ builder.put("index.analysis.analyzer.lwhitespacecomma.tokenizer", "whitespacecomma");
+ builder.putArray("index.analysis.analyzer.lwhitespacecomma.filter", "lowercase");
+ builder.put("index.analysis.tokenizer.whitespacecomma.type", "pattern");
+ builder.put("index.analysis.tokenizer.whitespacecomma.pattern", "(,|\\s+)");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .startObject("properties")
+ .startObject("filingcategory").field("type", "string").field("analyzer", "lwhitespacecomma").endObject()
+ .endObject()
+ .endObject().endObject();
+
+ assertAcked(prepareCreate("test").setSettings(builder).addMapping("doc", mapping));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("source", "productizer")
+ .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.queryStringQuery("filingcategory:s")))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc").field("filingcategory", "s").endObject()
+ .field("query", termQuery("source", "productizer"))
+ .endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ }
+
+ @Test
+ public void createIndexAndThenRegisterPercolator() throws Exception {
+ prepareCreate("test")
+ .addMapping("type1", "field1", "type=string")
+ .get();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .execute().actionGet();
+
+ refresh();
+ CountResponse countResponse = client().prepareCount()
+ .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME)
+ .execute().actionGet();
+ assertThat(countResponse.getCount(), equalTo(1l));
+
+
+ for (int i = 0; i < 10; i++) {
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ for (int i = 0; i < 10; i++) {
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setPreference("_local")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+
+ logger.info("--> delete the index");
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ logger.info("--> make sure percolated queries for it have been deleted as well");
+ countResponse = client().prepareCount()
+ .setQuery(matchAllQuery()).setTypes(PercolatorService.TYPE_NAME)
+ .execute().actionGet();
+ assertHitCount(countResponse, 0l);
+ }
+
+ @Test
+ public void multiplePercolators() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=string"));
+ ensureGreen();
+
+ logger.info("--> register a query 1");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ logger.info("--> register a query 2");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "green")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu"));
+
+ }
+
+ @Test
+ public void dynamicAddingRemovingQueries() throws Exception {
+ assertAcked(
+ prepareCreate("test")
+ .addMapping("type1", "field1", "type=string")
+ );
+ ensureGreen();
+
+ logger.info("--> register a query 1");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value1").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+
+ logger.info("--> register a query 2");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "bubu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "green")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value2").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("bubu"));
+
+ logger.info("--> register a query 3");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "susu")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "red")
+ .field("query", termQuery("field1", "value2"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateSourceBuilder sourceBuilder = new PercolateSourceBuilder()
+ .setDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value2").endObject()))
+ .setQueryBuilder(termQuery("color", "red"));
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(sourceBuilder)
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("susu"));
+
+ logger.info("--> deleting query 1");
+ client().prepareDelete("test", PercolatorService.TYPE_NAME, "kuku").setRefresh(true).execute().actionGet();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").startObject("type1")
+ .field("field1", "value1")
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 0l);
+ assertThat(percolate.getMatches(), emptyArray());
+ }
+
+ @Test
+ public void percolateWithSizeField() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_size").field("enabled", true).endObject()
+ .startObject("properties").startObject("field1").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ logger.info("--> percolate a document");
+ PercolateResponse percolate = client().preparePercolate().setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(percolate.getMatches(), "test"), arrayContaining("kuku"));
+ }
+
+ @Test
+ public void testPercolateStatistics() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> First percolate request");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+
+ NumShards numShards = getNumShards("test");
+
+ IndicesStatsResponse indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet();
+ assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries));
+ assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
+ assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies
+ assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
+
+ NodesStatsResponse nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ long percolateCount = 0;
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ }
+ assertThat(percolateCount, equalTo((long) numShards.numPrimaries));
+
+ logger.info("--> Second percolate request");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+
+ indicesResponse = client().admin().indices().prepareStats().setPercolate(true).execute().actionGet();
+ assertThat(indicesResponse.getTotal().getPercolate().getCount(), equalTo((long) numShards.numPrimaries * 2));
+ assertThat(indicesResponse.getTotal().getPercolate().getCurrent(), equalTo(0l));
+ assertThat(indicesResponse.getTotal().getPercolate().getNumQueries(), equalTo((long)numShards.dataCopies)); //number of copies
+ assertThat(indicesResponse.getTotal().getPercolate().getMemorySizeInBytes(), equalTo(-1l));
+
+ percolateCount = 0;
+ nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ }
+ assertThat(percolateCount, equalTo((long) numShards.numPrimaries *2));
+
+ // We might be faster than 1 ms, so run upto 1000 times until have spend 1ms or more on percolating
+ boolean moreThanOneMs = false;
+ int counter = 3; // We already ran two times.
+ do {
+ indicesResponse = client().admin().indices().prepareStats("test").execute().actionGet();
+ if (indicesResponse.getTotal().getPercolate().getTimeInMillis() > 0) {
+ moreThanOneMs = true;
+ break;
+ }
+
+ logger.info("--> {}th percolate request", counter);
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
+ .execute().actionGet();
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("1"));
+ } while (++counter <= 1000);
+ assertTrue("Something is off, we should have spent at least 1ms on percolating...", moreThanOneMs);
+
+ long percolateSumTime = 0;
+ nodesResponse = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats nodeStats : nodesResponse) {
+ percolateCount += nodeStats.getIndices().getPercolate().getCount();
+ percolateSumTime += nodeStats.getIndices().getPercolate().getTimeInMillis();
+ }
+ assertThat(percolateSumTime, greaterThan(0l));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+
+ logger.info("--> Search normals docs, percolate queries must not be included");
+ SearchResponse searchResponse = client().prepareSearch("test").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(4L));
+ assertThat(searchResponse.getHits().getAt(0).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(1).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(2).type(), equalTo("type"));
+ assertThat(searchResponse.getHits().getAt(3).type(), equalTo("type"));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs_routing() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").setRouting("4").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").setRouting("3").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").setRouting("2").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").setRouting("1").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("1").routing("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "4"));
+
+ logger.info("--> Percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").routing("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("3").routing("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), arrayWithSize(4));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4"));
+
+ logger.info("--> Percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("4").routing("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContaining("4"));
+ }
+
+ @Test
+ public void testPercolatingExistingDocs_versionCheck() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> registering queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 2 and version 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(1l))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+
+ logger.info("--> Percolate existing doc with id 2 and version 2");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(2l))
+ .execute().actionGet();
+ fail("Error should have been thrown");
+ } catch (VersionConflictEngineException e) {
+ }
+
+ logger.info("--> Index doc with id for the second time");
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+
+ logger.info("--> Percolate existing doc with id 2 and version 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setGetRequest(Requests.getRequest("test").type("type").id("2").version(2l))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("2", "4"));
+ }
+
+ @Test
+ public void testPercolateMultipleIndicesAndAliases() throws Exception {
+ createIndex("test1", "test2");
+ ensureGreen();
+
+ logger.info("--> registering queries");
+ for (int i = 1; i <= 10; i++) {
+ String index = i % 2 == 0 ? "test1" : "test2";
+ client().prepareIndex(index, PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ }
+
+ logger.info("--> Percolate doc to index test1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test1").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Percolate doc to index test2");
+ response = client().preparePercolate()
+ .setIndices("test2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Percolate doc to index test1 and test2");
+ response = client().preparePercolate()
+ .setIndices("test1", "test2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 10l);
+ assertThat(response.getMatches(), arrayWithSize(10));
+
+ logger.info("--> Percolate doc to index test2 and test3, with ignore missing");
+ response = client().preparePercolate()
+ .setIndices("test1", "test3").setDocumentType("type")
+ .setIndicesOptions(IndicesOptions.lenientExpandOpen())
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+
+ logger.info("--> Adding aliases");
+ IndicesAliasesResponse aliasesResponse = client().admin().indices().prepareAliases()
+ .addAlias("test1", "my-alias1")
+ .addAlias("test2", "my-alias1")
+ .addAlias("test2", "my-alias2")
+ .setTimeout(TimeValue.timeValueHours(10))
+ .execute().actionGet();
+ assertTrue(aliasesResponse.isAcknowledged());
+
+ logger.info("--> Percolate doc to my-alias1");
+ response = client().preparePercolate()
+ .setIndices("my-alias1").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 10l);
+ assertThat(response.getMatches(), arrayWithSize(10));
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getIndex().string(), anyOf(equalTo("test1"), equalTo("test2")));
+ }
+
+ logger.info("--> Percolate doc to my-alias2");
+ response = client().preparePercolate()
+ .setIndices("my-alias2").setDocumentType("type")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", "value").endObject().endObject())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getIndex().string(), equalTo("test2"));
+ }
+ }
+
+ @Test
+ public void testPercolateWithAliasFilter() throws Exception {
+ assertAcked(prepareCreate("my-index")
+ .addMapping(PercolatorService.TYPE_NAME, "a", "type=string,index=not_analyzed")
+ .addAlias(new Alias("a").filter(QueryBuilders.termQuery("a", "a")))
+ .addAlias(new Alias("b").filter(QueryBuilders.termQuery("a", "b")))
+ .addAlias(new Alias("c").filter(QueryBuilders.termQuery("a", "c")))
+ );
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "a").endObject())
+ .get();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("a", "b").endObject())
+ .get();
+ refresh();
+
+ // Specifying only the document to percolate and no filter, sorting or aggs, the queries are retrieved from
+ // memory directly. Otherwise we need to retrieve those queries from lucene to be able to execute filters,
+ // aggregations and sorting on top of them. So this test a different code execution path.
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("a")
+ .setDocumentType("my-type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getCount(), equalTo(1l));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("1"));
+
+ response = client().preparePercolate()
+ .setIndices("b")
+ .setDocumentType("my-type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getCount(), equalTo(1l));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("2"));
+
+
+ response = client().preparePercolate()
+ .setIndices("c")
+ .setDocumentType("my-type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getCount(), equalTo(0l));
+
+ // Testing that the alias filter and the filter specified while percolating are both taken into account.
+ response = client().preparePercolate()
+ .setIndices("a")
+ .setDocumentType("my-type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .setPercolateQuery(QueryBuilders.matchAllQuery())
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getCount(), equalTo(1l));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("1"));
+
+ response = client().preparePercolate()
+ .setIndices("b")
+ .setDocumentType("my-type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .setPercolateQuery(QueryBuilders.matchAllQuery())
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getCount(), equalTo(1l));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("2"));
+
+
+ response = client().preparePercolate()
+ .setIndices("c")
+ .setDocumentType("my-type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("{}"))
+ .setPercolateQuery(QueryBuilders.matchAllQuery())
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getCount(), equalTo(0l));
+ }
+
+ @Test
+ public void testCountPercolation() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Add dummy doc");
+ client().prepareIndex("test", "type", "1").setSource("field1", "value").execute().actionGet();
+ waitForConcreteMappingsOnAll("test", "type", "field1");
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Count percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "b").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(yamlBuilder().startObject().field("field1", "c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=b c");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(smileBuilder().startObject().field("field1", "b c").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate doc with field1=d");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "d").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate non existing doc");
+ try {
+ client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("5"))
+ .execute().actionGet();
+ fail("Exception should have been thrown");
+ } catch (DocumentMissingException e) {
+ }
+ }
+
+ @Test
+ public void testCountPercolatingExistingDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> Adding docs");
+ client().prepareIndex("test", "type", "1").setSource("field1", "b").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field1", "c").execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field1", "b c").execute().actionGet();
+ client().prepareIndex("test", "type", "4").setSource("field1", "d").execute().actionGet();
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "b")).field("a", "b").endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "c")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", boolQuery()
+ .must(matchQuery("field1", "b"))
+ .must(matchQuery("field1", "c"))
+ ).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Count percolate existing doc with id 1");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 2");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("2"))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 3");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("3"))
+ .execute().actionGet();
+ assertMatchCount(response, 4l);
+ assertThat(response.getMatches(), nullValue());
+
+ logger.info("--> Count percolate existing doc with id 4");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type").setOnlyCount(true)
+ .setGetRequest(Requests.getRequest("test").type("type").id("4"))
+ .execute().actionGet();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), nullValue());
+ }
+
+ @Test
+ public void testPercolateSizingWithQueryAndFilter() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ int numLevels = randomIntBetween(1, 25);
+ long numQueriesPerLevel = randomIntBetween(10, 250);
+ long totalQueries = numLevels * numQueriesPerLevel;
+ logger.info("--> register " + totalQueries + " queries");
+ for (int level = 1; level <= numLevels; level++) {
+ for (int query = 1; query <= numQueriesPerLevel; query++) {
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, level + "-" + query)
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", level).endObject())
+ .execute().actionGet();
+ }
+ }
+
+ boolean onlyCount = randomBoolean();
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .execute().actionGet();
+ assertMatchCount(response, totalQueries);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) totalQueries));
+ }
+
+ int size = randomIntBetween(0, (int) totalQueries - 1);
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setSize(size)
+ .execute().actionGet();
+ assertMatchCount(response, totalQueries);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo(size));
+ }
+
+ // The query / filter capabilities are NOT in realtime
+ client().admin().indices().prepareRefresh("my-index").execute().actionGet();
+
+ int runs = randomIntBetween(3, 16);
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));
+ }
+ }
+
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo((int) numQueriesPerLevel));
+ }
+ }
+
+ for (int i = 0; i < runs; i++) {
+ onlyCount = randomBoolean();
+ size = randomIntBetween(0, (int) numQueriesPerLevel - 1);
+ response = client().preparePercolate()
+ .setIndices("my-index").setDocumentType("my-type")
+ .setOnlyCount(onlyCount)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(termQuery("level", 1 + randomInt(numLevels - 1)))
+ .execute().actionGet();
+ assertMatchCount(response, numQueriesPerLevel);
+ if (!onlyCount) {
+ assertThat(response.getMatches().length, equalTo(size));
+ }
+ }
+ }
+
+ @Test
+ public void testPercolateScoreAndSorting() throws Exception {
+ createIndex("my-index");
+ ensureGreen();
+
+ // Add a dummy doc, that shouldn't never interfere with percolate operations.
+ client().prepareIndex("my-index", "my-type", "1").setSource("field", "value").execute().actionGet();
+
+ Map<Integer, NavigableSet<Integer>> controlMap = new HashMap<>();
+ long numQueries = randomIntBetween(100, 250);
+ logger.info("--> register " + numQueries + " queries");
+ for (int i = 0; i < numQueries; i++) {
+ int value = randomInt(10);
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", i).field("field1", value).endObject())
+ .execute().actionGet();
+ if (!controlMap.containsKey(value)) {
+ controlMap.put(value, new TreeSet<Integer>());
+ }
+ controlMap.get(value).add(i);
+ }
+ List<Integer> usedValues = new ArrayList<>(controlMap.keySet());
+ refresh();
+
+ // Only retrieve the score
+ int runs = randomInt(27);
+ for (int i = 0; i < runs; i++) {
+ int size = randomIntBetween(1, 50);
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .execute().actionGet();
+ assertMatchCount(response, numQueries);
+ assertThat(response.getMatches().length, equalTo(size));
+ for (int j = 0; j < response.getMatches().length; j++) {
+ String id = response.getMatches()[j].getId().string();
+ assertThat(Integer.valueOf(id), equalTo((int) response.getMatches()[j].getScore()));
+ }
+ }
+
+ // Sort the queries by the score
+ for (int i = 0; i < runs; i++) {
+ int size = randomIntBetween(1, 10);
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .execute().actionGet();
+ assertMatchCount(response, numQueries);
+ assertThat(response.getMatches().length, equalTo(size));
+
+ int expectedId = (int) (numQueries - 1);
+ for (PercolateResponse.Match match : response) {
+ assertThat(match.getId().string(), equalTo(Integer.toString(expectedId)));
+ assertThat(match.getScore(), equalTo((float) expectedId));
+ assertThat(match.getIndex().string(), equalTo("my-index"));
+ expectedId--;
+ }
+ }
+
+
+ for (int i = 0; i < runs; i++) {
+ int value = usedValues.get(randomInt(usedValues.size() - 1));
+ NavigableSet<Integer> levels = controlMap.get(value);
+ int size = randomIntBetween(1, levels.size());
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(size)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(
+ QueryBuilders.functionScoreQuery(matchQuery("field1", value), scriptFunction(new Script("doc['level'].value")))
+ .boostMode(
+ CombineFunction.REPLACE))
+ .execute().actionGet();
+
+ assertMatchCount(response, levels.size());
+ assertThat(response.getMatches().length, equalTo(Math.min(levels.size(), size)));
+ Iterator<Integer> levelIterator = levels.descendingIterator();
+ for (PercolateResponse.Match match : response) {
+ int controlLevel = levelIterator.next();
+ assertThat(match.getId().string(), equalTo(Integer.toString(controlLevel)));
+ assertThat(match.getScore(), equalTo((float) controlLevel));
+ assertThat(match.getIndex().string(), equalTo("my-index"));
+ }
+ }
+ }
+
+ @Test
+ public void testPercolateSortingWithNoSize() throws Exception {
+ createIndex("my-index");
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject())
+ .execute().actionGet();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .execute().actionGet();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches()[0].getId().string(), equalTo("2"));
+ assertThat(response.getMatches()[0].getScore(), equalTo(2f));
+ assertThat(response.getMatches()[1].getId().string(), equalTo("1"));
+ assertThat(response.getMatches()[1].getScore(), equalTo(1f));
+
+ response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .execute().actionGet();
+ assertThat(response.getCount(), equalTo(0l));
+ assertThat(response.getShardFailures().length, greaterThan(0));
+ for (ShardOperationFailedException failure : response.getShardFailures()) {
+ assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(failure.reason(), containsString("Can't sort if size isn't specified"));
+ }
+ }
+
+ @Test
+ public void testPercolateSorting_unsupportedField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .addMapping("my-type", "field", "type=string")
+ .addMapping(PercolatorService.TYPE_NAME, "level", "type=integer", "query", "type=object,enabled=false")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .get();
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 2).endObject())
+ .get();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .addSort(SortBuilders.fieldSort("level"))
+ .get();
+
+ assertThat(response.getShardFailures().length, equalTo(getNumShards("my-index").numPrimaries));
+ assertThat(response.getShardFailures()[0].status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(response.getShardFailures()[0].reason(), containsString("Only _score desc is supported"));
+ }
+
+ @Test
+ public void testPercolateOnEmptyIndex() throws Exception {
+ client().admin().indices().prepareCreate("my-index").execute().actionGet();
+ ensureGreen();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ @Test
+ public void testPercolateNotEmptyIndexButNoRefresh() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(settingsBuilder().put("index.refresh_interval", -1))
+ .execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("my-index", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).field("level", 1).endObject())
+ .execute().actionGet();
+
+ PercolateResponse response = client().preparePercolate().setIndices("my-index").setDocumentType("my-type")
+ .setSortByScore(true)
+ .setSize(2)
+ .setPercolateDoc(docBuilder().setDoc("field", "value"))
+ .setPercolateQuery(QueryBuilders.functionScoreQuery(matchAllQuery(), scriptFunction(new Script("doc['level'].value"))))
+ .execute().actionGet();
+ assertMatchCount(response, 0l);
+ }
+
+ @Test
+ public void testPercolatorWithHighlighting() throws Exception {
+ StringBuilder fieldMapping = new StringBuilder("type=string")
+ .append(",store=").append(randomBoolean());
+ if (randomBoolean()) {
+ fieldMapping.append(",term_vector=with_positions_offsets");
+ } else if (randomBoolean()) {
+ fieldMapping.append(",index_options=offsets");
+ }
+ assertAcked(prepareCreate("test").addMapping("type", "field1", fieldMapping.toString()));
+
+ logger.info("--> register a queries");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "brown fox")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "lazy dog")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "3")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "jumps")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "dog")).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "5")
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "fox")).endObject())
+ .execute().actionGet();
+
+ logger.info("--> Percolate doc with field1=The quick brown fox jumps over the lazy dog");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ PercolateResponse.Match[] matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ // Anything with percolate query isn't realtime
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ logger.info("--> Query percolate doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(matchAllQuery())
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Query percolate with score for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setScore(true)
+ .execute().actionGet();
+ assertNoFailures(response);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject()))
+ .setHighlightBuilder(new HighlightBuilder().field("field1").highlightQuery(QueryBuilders.matchQuery("field1", "jumps")))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+
+ // Highlighting an existing doc
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field1", "The quick brown fox jumps over the lazy dog").endObject())
+ .get();
+
+ logger.info("--> Top percolate for doc with field1=The quick brown fox jumps over the lazy dog");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setSize(5)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setHighlightBuilder(new HighlightBuilder().field("field1"))
+ .setPercolateQuery(functionScoreQuery(matchAllQuery()).add(new FactorBuilder().boostFactor(5.5f)))
+ .setSortByScore(true)
+ .execute().actionGet();
+ assertMatchCount(response, 5l);
+ assertThat(response.getMatches(), arrayWithSize(5));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2", "3", "4", "5"));
+
+ matches = response.getMatches();
+ Arrays.sort(matches, new Comparator<PercolateResponse.Match>() {
+ @Override
+ public int compare(PercolateResponse.Match a, PercolateResponse.Match b) {
+ return a.getId().compareTo(b.getId());
+ }
+ });
+
+ assertThat(matches[0].getScore(), equalTo(5.5f));
+ assertThat(matches[0].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick <em>brown</em> <em>fox</em> jumps over the lazy dog"));
+ assertThat(matches[1].getScore(), equalTo(5.5f));
+ assertThat(matches[1].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the <em>lazy</em> <em>dog</em>"));
+ assertThat(matches[2].getScore(), equalTo(5.5f));
+ assertThat(matches[2].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox <em>jumps</em> over the lazy dog"));
+ assertThat(matches[3].getScore(), equalTo(5.5f));
+ assertThat(matches[3].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown fox jumps over the lazy <em>dog</em>"));
+ assertThat(matches[4].getScore(), equalTo(5.5f));
+ assertThat(matches[4].getHighlightFields().get("field1").fragments()[0].string(), equalTo("The quick brown <em>fox</em> jumps over the lazy dog"));
+ }
+
+ public static String[] convertFromTextArray(PercolateResponse.Match[] matches, String index) {
+ if (matches.length == 0) {
+ return Strings.EMPTY_ARRAY;
+ }
+ String[] strings = new String[matches.length];
+ for (int i = 0; i < matches.length; i++) {
+ assertEquals(index, matches[i].getIndex().string());
+ strings[i] = matches[i].getId().string();
+ }
+ return strings;
+ }
+
+ @Test
+ public void percolateNonMatchingConstantScoreQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("doc", "message", "type=string"));
+ ensureGreen();
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject()
+ .field("query", QueryBuilders.constantScoreQuery(QueryBuilders.andQuery(
+ QueryBuilders.queryStringQuery("root"),
+ QueryBuilders.termQuery("message", "tree"))))
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setSource(jsonBuilder().startObject()
+ .startObject("doc").field("message", "A new bonsai tree ").endObject()
+ .endObject())
+ .execute().actionGet();
+ assertNoFailures(percolate);
+ assertMatchCount(percolate, 0l);
+ }
+
+ @Test
+ public void testNestedPercolation() throws IOException {
+ initNestedIndexAndPercolation();
+ PercolateResponse response = client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getNotMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company").get();
+ assertEquals(response.getMatches().length, 0);
+ response = client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(getMatchingNestedDoc())).setIndices("nestedindex").setDocumentType("company").get();
+ assertEquals(response.getMatches().length, 1);
+ assertEquals(response.getMatches()[0].getId().string(), "Q");
+ }
+
+ @Test
+ public void makeSureNonNestedDocumentDoesNotTriggerAssertion() throws IOException {
+ initNestedIndexAndPercolation();
+ XContentBuilder doc = jsonBuilder();
+ doc.startObject();
+ doc.field("some_unnested_field", "value");
+ PercolateResponse response = client().preparePercolate().setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(doc)).setIndices("nestedindex").setDocumentType("company").get();
+ assertNoFailures(response);
+ }
+
+ @Test
+ public void testNestedPercolationOnExistingDoc() throws IOException {
+ initNestedIndexAndPercolation();
+ client().prepareIndex("nestedindex", "company", "notmatching").setSource(getNotMatchingNestedDoc()).get();
+ client().prepareIndex("nestedindex", "company", "matching").setSource(getMatchingNestedDoc()).get();
+ refresh();
+ PercolateResponse response = client().preparePercolate().setGetRequest(Requests.getRequest("nestedindex").type("company").id("notmatching")).setDocumentType("company").setIndices("nestedindex").get();
+ assertEquals(response.getMatches().length, 0);
+ response = client().preparePercolate().setGetRequest(Requests.getRequest("nestedindex").type("company").id("matching")).setDocumentType("company").setIndices("nestedindex").get();
+ assertEquals(response.getMatches().length, 1);
+ assertEquals(response.getMatches()[0].getId().string(), "Q");
+ }
+
+ @Test
+ public void testPercolationWithDynamicTemplates() throws Exception {
+ assertAcked(prepareCreate("idx").addMapping("type", jsonBuilder().startObject().startObject("type")
+ .field("dynamic", false)
+ .startObject("properties")
+ .startObject("custom")
+ .field("dynamic", true)
+ .field("type", "object")
+ .field("include_in_all", false)
+ .endObject()
+ .endObject()
+ .startArray("dynamic_templates")
+ .startObject()
+ .startObject("custom_fields")
+ .field("path_match", "custom.*")
+ .startObject("mapping")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().endObject()));
+ ensureGreen("idx");
+
+ try {
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("color:red")).endObject())
+ .get();
+ fail();
+ } catch (PercolatorException e) {
+
+ }
+
+ PercolateResponse percolateResponse = client().preparePercolate().setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject()))
+ .get();
+
+ assertMatchCount(percolateResponse, 0l);
+ assertThat(percolateResponse.getMatches(), arrayWithSize(0));
+ waitForConcreteMappingsOnAll("idx", "type", "custom.color");
+
+ // The previous percolate request introduced the custom.color field, so now we register the query again
+ // and the field name `color` will be resolved to `custom.color` field in mapping via smart field mapping resolving.
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("custom.color:red")).endObject())
+ .get();
+ client().prepareIndex("idx", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", QueryBuilders.queryStringQuery("custom.color:blue")).field("type", "type").endObject())
+ .get();
+
+ // The second request will yield a match, since the query during the proper field during parsing.
+ percolateResponse = client().preparePercolate().setDocumentType("type")
+ .setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc(jsonBuilder().startObject().startObject("custom").field("color", "blue").endObject().endObject()))
+ .get();
+
+ assertMatchCount(percolateResponse, 1l);
+ assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("2"));
+ }
+
+ @Test
+ public void testUpdateMappingDynamicallyWhilePercolating() throws Exception {
+ createIndex("test");
+ ensureSearchable();
+
+ // percolation source
+ XContentBuilder percolateDocumentSource = XContentFactory.jsonBuilder().startObject().startObject("doc")
+ .field("field1", 1)
+ .field("field2", "value")
+ .endObject().endObject();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(percolateDocumentSource).execute().actionGet();
+ assertAllSuccessful(response);
+ assertMatchCount(response, 0l);
+ assertThat(response.getMatches(), arrayWithSize(0));
+
+ waitForMappingOnMaster("test", "type1");
+
+ GetMappingsResponse mappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ assertThat(mappingsResponse.getMappings().get("test"), notNullValue());
+ assertThat(mappingsResponse.getMappings().get("test").get("type1"), notNullValue());
+ assertThat(mappingsResponse.getMappings().get("test").get("type1").getSourceAsMap().isEmpty(), is(false));
+ Map<String, Object> properties = (Map<String, Object>) mappingsResponse.getMappings().get("test").get("type1").getSourceAsMap().get("properties");
+ assertThat(((Map<String, String>) properties.get("field1")).get("type"), equalTo("long"));
+ assertThat(((Map<String, String>) properties.get("field2")).get("type"), equalTo("string"));
+ }
+
+ @Test
+ public void testDontReportDeletedPercolatorDocs() throws Exception {
+ client().admin().indices().prepareCreate("test").execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .get();
+ refresh();
+
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field", "value").endObject()))
+ .setPercolateQuery(QueryBuilders.matchAllQuery())
+ .get();
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1"));
+ }
+
+ @Test
+ public void testAddQueryWithNoMapping() throws Exception {
+ client().admin().indices().prepareCreate("test").get();
+ ensureGreen();
+
+ try {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME)
+ .setSource(jsonBuilder().startObject().field("query", termQuery("field1", "value")).endObject())
+ .get();
+ fail();
+ } catch (PercolatorException e) {
+ assertThat(e.getRootCause(), instanceOf(QueryParsingException.class));
+ }
+
+ try {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME)
+ .setSource(jsonBuilder().startObject().field("query", rangeQuery("field1").from(0).to(1)).endObject())
+ .get();
+ fail();
+ } catch (PercolatorException e) {
+ assertThat(e.getRootCause(), instanceOf(QueryParsingException.class));
+ }
+ }
+
+ @Test
+ public void testPercolatorQueryWithNowRange() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("my-type", "timestamp", "type=date")
+ .get();
+ ensureGreen();
+
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "1")
+ .setSource(jsonBuilder().startObject().field("query", rangeQuery("timestamp").from("now-1d").to("now")).endObject())
+ .get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "2")
+ .setSource(jsonBuilder().startObject().field("query", constantScoreQuery(rangeQuery("timestamp").from("now-1d").to("now"))).endObject())
+ .get();
+
+ logger.info("--> Percolate doc with field1=b");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("my-type")
+ .setPercolateDoc(docBuilder().setDoc("timestamp", System.currentTimeMillis()))
+ .get();
+ assertMatchCount(response, 2l);
+ assertThat(response.getMatches(), arrayWithSize(2));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("1", "2"));
+ }
+
+ void initNestedIndexAndPercolation() throws IOException {
+ XContentBuilder mapping = XContentFactory.jsonBuilder();
+ mapping.startObject().startObject("properties").startObject("companyname").field("type", "string").endObject()
+ .startObject("employee").field("type", "nested").startObject("properties")
+ .startObject("name").field("type", "string").endObject().endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(client().admin().indices().prepareCreate("nestedindex").addMapping("company", mapping));
+ ensureGreen("nestedindex");
+
+ client().prepareIndex("nestedindex", PercolatorService.TYPE_NAME, "Q").setSource(jsonBuilder().startObject()
+ .field("query", QueryBuilders.nestedQuery("employee", QueryBuilders.matchQuery("employee.name", "virginia potts").operator(MatchQueryBuilder.Operator.AND)).scoreMode("avg")).endObject()).get();
+
+ refresh();
+
+ }
+
+ XContentBuilder getMatchingNestedDoc() throws IOException {
+ XContentBuilder doc = XContentFactory.jsonBuilder();
+ doc.startObject().field("companyname", "stark").startArray("employee")
+ .startObject().field("name", "virginia potts").endObject()
+ .startObject().field("name", "tony stark").endObject()
+ .endArray().endObject();
+ return doc;
+ }
+
+ XContentBuilder getNotMatchingNestedDoc() throws IOException {
+ XContentBuilder doc = XContentFactory.jsonBuilder();
+ doc.startObject().field("companyname", "notstark").startArray("employee")
+ .startObject().field("name", "virginia stark").endObject()
+ .startObject().field("name", "tony potts").endObject()
+ .endArray().endObject();
+ return doc;
+ }
+
+ // issue
+ @Test
+ public void testNestedDocFilter() throws IOException {
+ String mapping = "{\n" +
+ " \"doc\": {\n" +
+ " \"properties\": {\n" +
+ " \"name\": {\"type\":\"string\"},\n" +
+ " \"persons\": {\n" +
+ " \"type\": \"nested\"\n," +
+ " \"properties\" : {\"foo\" : {\"type\" : \"string\"}}" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }";
+ String doc = "{\n" +
+ " \"name\": \"obama\",\n" +
+ " \"persons\": [\n" +
+ " {\n" +
+ " \"foo\": \"bar\"\n" +
+ " }\n" +
+ " ]\n" +
+ " }";
+ String q1 = "{\n" +
+ " \"query\": {\n" +
+ " \"bool\": {\n" +
+ " \"must\": {\n" +
+ " \"match\": {\n" +
+ " \"name\": \"obama\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ "\"text\":\"foo\""+
+ "}";
+ String q2 = "{\n" +
+ " \"query\": {\n" +
+ " \"bool\": {\n" +
+ " \"must_not\": {\n" +
+ " \"match\": {\n" +
+ " \"name\": \"obama\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ "\"text\":\"foo\""+
+ "}";
+ String q3 = "{\n" +
+ " \"query\": {\n" +
+ " \"bool\": {\n" +
+ " \"must\": {\n" +
+ " \"match\": {\n" +
+ " \"persons.foo\": \"bar\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ "\"text\":\"foo\""+
+ "}";
+ String q4 = "{\n" +
+ " \"query\": {\n" +
+ " \"bool\": {\n" +
+ " \"must_not\": {\n" +
+ " \"match\": {\n" +
+ " \"persons.foo\": \"bar\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ "\"text\":\"foo\""+
+ "}";
+ String q5 = "{\n" +
+ " \"query\": {\n" +
+ " \"bool\": {\n" +
+ " \"must\": {\n" +
+ " \"nested\": {\n" +
+ " \"path\": \"persons\",\n" +
+ " \"query\": {\n" +
+ " \"match\": {\n" +
+ " \"persons.foo\": \"bar\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ "\"text\":\"foo\""+
+ "}";
+ String q6 = "{\n" +
+ " \"query\": {\n" +
+ " \"bool\": {\n" +
+ " \"must_not\": {\n" +
+ " \"nested\": {\n" +
+ " \"path\": \"persons\",\n" +
+ " \"query\": {\n" +
+ " \"match\": {\n" +
+ " \"persons.foo\": \"bar\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ "\"text\":\"foo\""+
+ "}";
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", mapping));
+ ensureGreen("test");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q1).setId("q1").get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q2).setId("q2").get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q3).setId("q3").get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q4).setId("q4").get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q5).setId("q5").get();
+ client().prepareIndex("test", PercolatorService.TYPE_NAME).setSource(q6).setId("q6").get();
+ refresh();
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("doc")
+ .setPercolateDoc(docBuilder().setDoc(doc))
+ .get();
+ assertMatchCount(response, 3l);
+ Set<String> expectedIds = new HashSet<>();
+ expectedIds.add("q1");
+ expectedIds.add("q4");
+ expectedIds.add("q5");
+ for (PercolateResponse.Match match : response.getMatches()) {
+ assertTrue(expectedIds.remove(match.getId().string()));
+ }
+ assertTrue(expectedIds.isEmpty());
+ response = client().preparePercolate().setOnlyCount(true)
+ .setIndices("test").setDocumentType("doc")
+ .setPercolateDoc(docBuilder().setDoc(doc))
+ .get();
+ assertMatchCount(response, 3l);
+ response = client().preparePercolate().setScore(randomBoolean()).setSortByScore(randomBoolean()).setOnlyCount(randomBoolean()).setSize(10).setPercolateQuery(QueryBuilders.termQuery("text", "foo"))
+ .setIndices("test").setDocumentType("doc")
+ .setPercolateDoc(docBuilder().setDoc(doc))
+ .get();
+ assertMatchCount(response, 3l);
+ }
+
+ @Test
+ public void testMapUnmappedFieldAsString() throws IOException{
+ // If index.percolator.map_unmapped_fields_as_string is set to true, unmapped field is mapped as an analyzed string.
+ Settings.Builder settings = Settings.settingsBuilder()
+ .put(indexSettings())
+ .put("index.percolator.map_unmapped_fields_as_string", true);
+ assertAcked(prepareCreate("test")
+ .setSettings(settings));
+ client().prepareIndex("test", PercolatorService.TYPE_NAME)
+ .setSource(jsonBuilder().startObject().field("query", matchQuery("field1", "value")).endObject()).get();
+ logger.info("--> Percolate doc with field1=value");
+ PercolateResponse response1 = client().preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject()))
+ .execute().actionGet();
+ assertMatchCount(response1, 1l);
+ assertThat(response1.getMatches(), arrayWithSize(1));
+
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java
new file mode 100644
index 0000000000..7679ff6724
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/RecoveryPercolatorTests.java
@@ -0,0 +1,419 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder;
+import org.elasticsearch.action.percolate.MultiPercolateResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0)
+public class RecoveryPercolatorTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfShards() {
+ return 1;
+ }
+
+ @Test
+ @Slow
+ public void testRestartNodePercolator1() throws Exception {
+ internalCluster().startNode();
+ assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=string").addMapping(PercolatorService.TYPE_NAME, "color", "type=string"));
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .get();
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .get();
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ internalCluster().rollingRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ensureYellow();
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .get();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ @Test
+ @Slow
+ public void testRestartNodePercolator2() throws Exception {
+ internalCluster().startNode();
+ assertAcked(prepareCreate("test").addMapping("type1", "field1", "type=string").addMapping(PercolatorService.TYPE_NAME, "color", "type=string"));
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .get();
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getCount(), equalTo(1l));
+
+ PercolateResponse percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .get();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+
+ internalCluster().rollingRestart();
+
+ logger.info("Running Cluster Health (wait for the shards to startup)");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ CountResponse countResponse = client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get();
+ assertHitCount(countResponse, 1l);
+
+ DeleteIndexResponse actionGet = client().admin().indices().prepareDelete("test").get();
+ assertThat(actionGet.isAcknowledged(), equalTo(true));
+ client().admin().indices().prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", 1)).get();
+ clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getCount(), equalTo(0l));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .get();
+ assertMatchCount(percolate, 0l);
+ assertThat(percolate.getMatches(), emptyArray());
+
+ logger.info("--> register a query");
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, "kuku")
+ .setSource(jsonBuilder().startObject()
+ .field("color", "blue")
+ .field("query", termQuery("field1", "value1"))
+ .endObject())
+ .setRefresh(true)
+ .get();
+
+ assertThat(client().prepareCount().setTypes(PercolatorService.TYPE_NAME).setQuery(matchAllQuery()).get().getCount(), equalTo(1l));
+
+ percolate = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc")
+ .field("field1", "value1")
+ .endObject().endObject())
+ .get();
+ assertMatchCount(percolate, 1l);
+ assertThat(percolate.getMatches(), arrayWithSize(1));
+ }
+
+ @Test
+ @Slow
+ public void testLoadingPercolateQueriesDuringCloseAndOpen() throws Exception {
+ internalCluster().startNode();
+ internalCluster().startNode();
+
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)));
+ ensureGreen();
+
+ logger.info("--> Add dummy docs");
+ client().prepareIndex("test", "type1", "1").setSource("field1", 0).get();
+ client().prepareIndex("test", "type2", "1").setSource("field1", "0").get();
+ waitForConcreteMappingsOnAll("test", "type1", "field1");
+
+ logger.info("--> register a queries");
+ for (int i = 1; i <= 100; i++) {
+ client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject()
+ .field("query", rangeQuery("field1").from(0).to(i))
+ // The type must be set now, because two fields with the same name exist in different types.
+ // Setting the type to `type1`, makes sure that the range query gets parsed to a Lucene NumericRangeQuery.
+ .field("type", "type1")
+ .endObject())
+ .get();
+ }
+ waitForConcreteMappingsOnAll("test", PercolatorService.TYPE_NAME);
+
+ logger.info("--> Percolate doc with field1=95");
+ PercolateResponse response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 95).endObject().endObject())
+ .get();
+ assertMatchCount(response, 6l);
+ assertThat(response.getMatches(), arrayWithSize(6));
+ assertThat(convertFromTextArray(response.getMatches(), "test"), arrayContainingInAnyOrder("95", "96", "97", "98", "99", "100"));
+
+ logger.info("--> Close and open index to trigger percolate queries loading...");
+ assertAcked(client().admin().indices().prepareClose("test"));
+ assertAcked(client().admin().indices().prepareOpen("test"));
+ ensureGreen();
+
+ logger.info("--> Percolate doc with field1=100");
+ response = client().preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder().startObject().startObject("doc").field("field1", 100).endObject().endObject()).get();
+
+ assertMatchCount(response, 1l);
+ assertThat(response.getMatches(), arrayWithSize(1));
+ assertThat(response.getMatches()[0].getId().string(), equalTo("100"));
+ }
+
+ @Test
+ @Slow
+ public void testSinglePercolator_recovery() throws Exception {
+ percolatorRecovery(false);
+ }
+
+ @Test
+ @Slow
+ public void testMultiPercolator_recovery() throws Exception {
+ percolatorRecovery(true);
+ }
+
+ // 3 nodes, 2 primary + 2 replicas per primary, so each node should have a copy of the data.
+ // We only start and stop nodes 2 and 3, so all requests should succeed and never be partial.
+ private void percolatorRecovery(final boolean multiPercolate) throws Exception {
+ internalCluster().startNode(settingsBuilder().put("node.stay", true));
+ internalCluster().startNode(settingsBuilder().put("node.stay", false));
+ internalCluster().startNode(settingsBuilder().put("node.stay", false));
+ ensureGreen();
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 2)
+ )
+ .get();
+ ensureGreen();
+
+ final Client client = internalCluster().client(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return input.getAsBoolean("node.stay", true);
+ }
+ });
+ final int numQueries = randomIntBetween(50, 100);
+ logger.info("--> register a queries");
+ for (int i = 0; i < numQueries; i++) {
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .get();
+ }
+
+ client.prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("field", "a"))
+ .get();
+
+ final AtomicBoolean run = new AtomicBoolean(true);
+ final CountDownLatch done = new CountDownLatch(1);
+ final AtomicReference<Throwable> error = new AtomicReference<>();
+ Runnable r = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ XContentBuilder doc = jsonBuilder().startObject().field("field", "a").endObject();
+ while (run.get()) {
+ NodesInfoResponse nodesInfoResponse = client.admin().cluster().prepareNodesInfo()
+ .get();
+ String node2Id = null;
+ String node3Id = null;
+ for (NodeInfo nodeInfo : nodesInfoResponse) {
+ if ("node2".equals(nodeInfo.getNode().getName())) {
+ node2Id = nodeInfo.getNode().id();
+ } else if ("node3".equals(nodeInfo.getNode().getName())) {
+ node3Id = nodeInfo.getNode().id();
+ }
+ }
+
+ String preference;
+ if (node2Id == null && node3Id == null) {
+ preference = "_local";
+ } else if (node2Id == null || node3Id == null) {
+ if (node2Id != null) {
+ preference = "_prefer_node:" + node2Id;
+ } else {
+ preference = "_prefer_node:" + node3Id;
+ }
+ } else {
+ preference = "_prefer_node:" + (randomBoolean() ? node2Id : node3Id);
+ }
+
+ if (multiPercolate) {
+ MultiPercolateRequestBuilder builder = client
+ .prepareMultiPercolate();
+ int numPercolateRequest = randomIntBetween(50, 100);
+
+ for (int i = 0; i < numPercolateRequest; i++) {
+ if (randomBoolean()) {
+ builder.add(
+ client.preparePercolate()
+ .setPreference(preference)
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type")
+ );
+ } else {
+ builder.add(
+ client.preparePercolate()
+ .setPreference(preference)
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(doc)));
+ }
+ }
+
+ MultiPercolateResponse response = builder.get();
+ assertThat(response.items().length, equalTo(numPercolateRequest));
+ for (MultiPercolateResponse.Item item : response) {
+ assertThat(item.isFailure(), equalTo(false));
+ assertNoFailures(item.getResponse());
+ assertThat(item.getResponse().getSuccessfulShards(), equalTo(item.getResponse().getTotalShards()));
+ assertThat(item.getResponse().getCount(), equalTo((long) numQueries));
+ assertThat(item.getResponse().getMatches().length, equalTo(numQueries));
+ }
+ } else {
+ PercolateResponse response;
+ if (randomBoolean()) {
+ response = client.preparePercolate()
+ .setIndices("test").setDocumentType("type")
+ .setPercolateDoc(docBuilder().setDoc(doc))
+ .setPreference(preference)
+ .get();
+ } else {
+ response = client.preparePercolate()
+ .setGetRequest(Requests.getRequest("test").type("type").id("1"))
+ .setIndices("test").setDocumentType("type")
+ .setPreference(preference)
+ .get();
+ }
+ assertNoFailures(response);
+ assertThat(response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ assertThat(response.getCount(), equalTo((long) numQueries));
+ assertThat(response.getMatches().length, equalTo(numQueries));
+ }
+ }
+ } catch (Throwable t) {
+ logger.info("Error in percolate thread...", t);
+ run.set(false);
+ error.set(t);
+ } finally {
+ done.countDown();
+ }
+ }
+ };
+ new Thread(r).start();
+
+ Predicate<Settings> nodePredicate = new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings input) {
+ return !input.getAsBoolean("node.stay", false);
+ }
+ };
+ try {
+ // 1 index, 2 primaries, 2 replicas per primary
+ for (int i = 0; i < 4; i++) {
+ internalCluster().stopRandomNode(nodePredicate);
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
+ .get();
+ assertThat(error.get(), nullValue());
+ internalCluster().stopRandomNode(nodePredicate);
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(2) // 1 node, so 2 shards (2 primaries, 0 replicas)
+ .get();
+ assertThat(error.get(), nullValue());
+ internalCluster().startNode();
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForYellowStatus()
+ .setWaitForActiveShards(4) // 2 nodes, so 4 shards (2 primaries, 2 replicas)
+ .get();
+ assertThat(error.get(), nullValue());
+ internalCluster().startNode();
+ client.admin().cluster().prepareHealth("test")
+ .setWaitForEvents(Priority.LANGUID)
+ .setTimeout(TimeValue.timeValueMinutes(2))
+ .setWaitForGreenStatus() // We're confirm the shard settings, so green instead of yellow
+ .setWaitForActiveShards(6) // 3 nodes, so 6 shards (2 primaries, 4 replicas)
+ .get();
+ assertThat(error.get(), nullValue());
+ }
+ } finally {
+ run.set(false);
+ }
+ done.await();
+ assertThat(error.get(), nullValue());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java
new file mode 100644
index 0000000000..dff850d9bf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/percolator/TTLPercolatorTests.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import com.google.common.base.Predicate;
+
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.AlreadyExpiredException;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.percolator.PercolatorTests.convertFromTextArray;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class TTLPercolatorTests extends ElasticsearchIntegrationTest {
+
+ private static final long PURGE_INTERVAL = 200;
+
+ @Override
+ protected void beforeIndexDeletion() {
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("indices.ttl.interval", PURGE_INTERVAL, TimeUnit.MILLISECONDS)
+ .build();
+ }
+
+ @Test
+ public void testPercolatingWithTimeToLive() throws Exception {
+ final Client client = client();
+ ensureGreen();
+
+ String percolatorMapping = XContentFactory.jsonBuilder().startObject().startObject(PercolatorService.TYPE_NAME)
+ .startObject("_ttl").field("enabled", true).endObject()
+ .startObject("_timestamp").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .startObject("_timestamp").field("enabled", true).endObject()
+ .startObject("properties").startObject("field1").field("type", "string").endObject().endObject()
+ .endObject().endObject().string();
+
+ client.admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 2))
+ .addMapping(PercolatorService.TYPE_NAME, percolatorMapping)
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+ ensureGreen();
+
+ final NumShards test = getNumShards("test");
+
+ long ttl = 1500;
+ long now = System.currentTimeMillis();
+ client.prepareIndex("test", PercolatorService.TYPE_NAME, "kuku").setSource(jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("term")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).setRefresh(true).setTTL(ttl).execute().actionGet();
+
+ IndicesStatsResponse response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ assertThat(response.getIndices().get("test").getTotal().getIndexing().getTotal().getIndexCount(), equalTo((long)test.dataCopies));
+
+ PercolateResponse percolateResponse = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ assertNoFailures(percolateResponse);
+ if (percolateResponse.getMatches().length == 0) {
+ // OK, ttl + purgeInterval has passed (slow machine or many other tests were running at the same time
+ GetResponse getResponse = client.prepareGet("test", PercolatorService.TYPE_NAME, "kuku").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ response = client.admin().indices().prepareStats("test")
+ .clear().setIndexing(true)
+ .execute().actionGet();
+ long currentDeleteCount = response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount();
+ assertThat(currentDeleteCount, equalTo((long)test.dataCopies));
+ return;
+ }
+
+ assertThat(convertFromTextArray(percolateResponse.getMatches(), "test"), arrayContaining("kuku"));
+ long timeSpent = System.currentTimeMillis() - now;
+ long waitTime = ttl + PURGE_INTERVAL - timeSpent;
+ if (waitTime >= 0) {
+ Thread.sleep(waitTime); // Doesn't make sense to check the deleteCount before ttl has expired
+ }
+
+ // See comment in SimpleTTLTests
+ logger.info("Checking if the ttl purger has run");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ IndicesStatsResponse indicesStatsResponse = client.admin().indices().prepareStats("test").clear().setIndexing(true).get();
+ // TTL deletes one doc, but it is indexed in the primary shard and replica shards
+ return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() == test.dataCopies;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+
+ percolateResponse = client.preparePercolate()
+ .setIndices("test").setDocumentType("type1")
+ .setSource(jsonBuilder()
+ .startObject()
+ .startObject("doc")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ ).execute().actionGet();
+ assertMatchCount(percolateResponse, 0l);
+ assertThat(percolateResponse.getMatches(), emptyArray());
+ }
+
+
+ @Test
+ public void testEnsureTTLDoesNotCreateIndex() throws IOException, InterruptedException {
+ ensureGreen();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put("indices.ttl.interval", 60, TimeUnit.SECONDS) // 60 sec
+ .build()).get();
+
+ String typeMapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", 1))
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+ ensureGreen();
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder()
+ .put("indices.ttl.interval", 1, TimeUnit.SECONDS)
+ .build()).get();
+
+ for (int i = 0; i < 100; i++) {
+ logger.debug("index doc {} ", i);
+ try {
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("term")
+ .field("field1", "value1")
+ .endObject()
+ .endObject()
+ .endObject()
+ ).setTTL(randomIntBetween(1, 500)).execute().actionGet();
+ } catch (MapperParsingException e) {
+ logger.info("failed indexing {}", i, e);
+ // if we are unlucky the TTL is so small that we see the expiry date is already in the past when
+ // we parse the doc ignore those...
+ assertThat(e.getCause(), Matchers.instanceOf(AlreadyExpiredException.class));
+ }
+
+ }
+ refresh();
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
+ logger.debug("delete count [{}]", indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount());
+ // TTL deletes one doc, but it is indexed in the primary shard and replica shards
+ return indicesStatsResponse.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() != 0;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ internalCluster().wipeIndices("test");
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", typeMapping)
+ .execute().actionGet();
+
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleTests.java b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleTests.java
new file mode 100644
index 0000000000..832d04c5d0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 2)
+public class PluggableTransportModuleTests extends ElasticsearchIntegrationTest {
+
+ public static final AtomicInteger SENT_REQUEST_COUNTER = new AtomicInteger(0);
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", CountingSentRequestsPlugin.class.getName())
+ .build();
+ }
+
+ @Override
+ protected Settings transportClientSettings() {
+ return settingsBuilder()
+ .put("plugin.types", CountingSentRequestsPlugin.class.getName())
+ .put(super.transportClientSettings())
+ .build();
+ }
+
+ @Test
+ public void testThatPluginFunctionalityIsLoadedWithoutConfiguration() throws Exception {
+ for (Transport transport : internalCluster().getInstances(Transport.class)) {
+ assertThat(transport, instanceOf(CountingAssertingLocalTransport.class));
+ }
+
+ int countBeforeRequest = SENT_REQUEST_COUNTER.get();
+ internalCluster().clientNodeClient().admin().cluster().prepareHealth().get();
+ int countAfterRequest = SENT_REQUEST_COUNTER.get();
+ assertThat("Expected send request counter to be greather than zero", countAfterRequest, is(greaterThan(countBeforeRequest)));
+ }
+
+ public static class CountingSentRequestsPlugin extends AbstractPlugin {
+ @Override
+ public String name() {
+ return "counting-pipelines-plugin";
+ }
+
+ @Override
+ public String description() {
+ return "counting-pipelines-plugin";
+ }
+
+ public void onModule(TransportModule transportModule) {
+ transportModule.setTransport(CountingAssertingLocalTransport.class, this.name());
+ }
+ }
+
+ public static final class CountingAssertingLocalTransport extends AssertingLocalTransport {
+
+ @Inject
+ public CountingAssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version) {
+ super(settings, threadPool, version);
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ SENT_REQUEST_COUNTER.incrementAndGet();
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginLuceneCheckerTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginLuceneCheckerTests.java
new file mode 100644
index 0000000000..1a44b1192e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/PluginLuceneCheckerTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.net.URISyntaxException;
+import java.util.Collections;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ *
+ */
+@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes=0, transportClientRatio = 0)
+public class PluginLuceneCheckerTests extends PluginTestCase {
+
+ /**
+ * We check that no Lucene version checking is done
+ * when we set `"plugins.check_lucene":false`
+ */
+ @Test
+ public void testDisableLuceneVersionCheckingPlugin() throws URISyntaxException {
+ String serverNodeId = startNodeWithPlugins(
+ settingsBuilder().put(PluginsService.PLUGINS_CHECK_LUCENE_KEY, false)
+ .put(PluginsService.ES_PLUGIN_PROPERTIES_FILE_KEY, "es-plugin-test.properties")
+ .put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true).build(),
+ "/org/elasticsearch/plugins/lucene/");
+ logger.info("--> server {} started" + serverNodeId);
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).execute().actionGet();
+ logger.info("--> full json answer, status " + response.toString());
+
+ ElasticsearchAssertions.assertNodeContainsPlugins(response, serverNodeId,
+ Lists.newArrayList("old-lucene"), Lists.newArrayList("old"), Lists.newArrayList("1.0.0"), // JVM Plugin
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
+ }
+
+ /**
+ * We check that with an old plugin (built on an old Lucene version)
+ * plugin is not loaded
+ * We check that with a recent plugin (built on current Lucene version)
+ * plugin is loaded
+ * We check that with a too recent plugin (built on an unknown Lucene version)
+ * plugin is not loaded
+ */
+ @Test
+ public void testEnableLuceneVersionCheckingPlugin() throws URISyntaxException {
+ String serverNodeId = startNodeWithPlugins(
+ settingsBuilder().put(PluginsService.PLUGINS_CHECK_LUCENE_KEY, true)
+ .put(PluginsService.ES_PLUGIN_PROPERTIES_FILE_KEY, "es-plugin-test.properties")
+ .put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true).build(),
+ "/org/elasticsearch/plugins/lucene/");
+ logger.info("--> server {} started" + serverNodeId);
+
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).execute().actionGet();
+ logger.info("--> full json answer, status " + response.toString());
+
+ ElasticsearchAssertions.assertNodeContainsPlugins(response, serverNodeId,
+ Lists.newArrayList("current-lucene"), Lists.newArrayList("current"), Lists.newArrayList("2.0.0"), // JVM Plugin
+ Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java
new file mode 100644
index 0000000000..5d9c3ba493
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java
@@ -0,0 +1,526 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins;
+
+import com.google.common.base.Predicate;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.junit.annotations.Network;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.PosixFileAttributeView;
+import java.nio.file.attribute.PosixFileAttributes;
+import java.nio.file.attribute.PosixFilePermission;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.io.FileSystemUtilsTests.assertFileContent;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDirectoryExists;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
+import static org.hamcrest.Matchers.arrayWithSize;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0)
+@LuceneTestCase.SuppressFileSystems("*") // TODO: clean up this test to allow extra files
+// TODO: jimfs is really broken here (throws wrong exception from detection method).
+// if its in your classpath, then do not use plugins!!!!!!
+public class PluginManagerTests extends ElasticsearchIntegrationTest {
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testDownloadAndExtract_NullName_ThrowsException() throws IOException {
+ pluginManager(getPluginUrlForResource("plugin_single_folder.zip")).downloadAndExtract(null);
+ }
+
+ @Test
+ public void testLocalPluginInstallSingleFolder() throws Exception {
+ //When we have only a folder in top-level (no files either) we remove that folder while extracting
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ downloadAndExtract(pluginName, initialSettings, getPluginUrlForResource("plugin_single_folder.zip"));
+
+ internalCluster().startNode(initialSettings.v1());
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginInstallWithBinAndConfig() throws Exception {
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ Environment env = initialSettings.v2();
+ Path binDir = env.homeFile().resolve("bin");
+ if (!Files.exists(binDir)) {
+ Files.createDirectories(binDir);
+ }
+ Path pluginBinDir = binDir.resolve(pluginName);
+ Path configDir = env.configFile();
+ if (!Files.exists(configDir)) {
+ Files.createDirectories(configDir);
+ }
+ Path pluginConfigDir =configDir.resolve(pluginName);
+ try {
+
+ PluginManager pluginManager = pluginManager(getPluginUrlForResource("plugin_with_bin_and_config.zip"), initialSettings);
+
+ pluginManager.downloadAndExtract(pluginName);
+
+ Path[] plugins = pluginManager.getListInstalledPlugins();
+
+ assertThat(plugins, arrayWithSize(1));
+ assertDirectoryExists(pluginBinDir);
+ assertDirectoryExists(pluginConfigDir);
+ Path toolFile = pluginBinDir.resolve("tool");
+ assertFileExists(toolFile);
+
+ // check that the file is marked executable, without actually checking that we can execute it.
+ PosixFileAttributeView view = Files.getFileAttributeView(toolFile, PosixFileAttributeView.class);
+ // the view might be null, on e.g. windows, there is nothing to check there!
+ if (view != null) {
+ PosixFileAttributes attributes = view.readAttributes();
+ assertTrue("unexpected permissions: " + attributes.permissions(),
+ attributes.permissions().contains(PosixFilePermission.OWNER_EXECUTE));
+ }
+ } finally {
+ // we need to clean up the copied dirs
+ IOUtils.rm(pluginBinDir, pluginConfigDir);
+ }
+ }
+
+ /**
+ * Test for #7890
+ */
+ @Test
+ public void testLocalPluginInstallWithBinAndConfigInAlreadyExistingConfigDir_7890() throws Exception {
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ Environment env = initialSettings.v2();
+
+ Path configDir = env.configFile();
+ if (!Files.exists(configDir)) {
+ Files.createDirectories(configDir);
+ }
+ Path pluginConfigDir = configDir.resolve(pluginName);
+
+ try {
+ PluginManager pluginManager = pluginManager(getPluginUrlForResource("plugin_with_config_v1.zip"), initialSettings);
+ pluginManager.downloadAndExtract(pluginName);
+
+ Path[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, arrayWithSize(1));
+
+ /*
+ First time, our plugin contains:
+ - config/test.txt (version1)
+ */
+ assertFileContent(pluginConfigDir, "test.txt", "version1\n");
+
+ // We now remove the plugin
+ pluginManager.removePlugin(pluginName);
+ // We should still have test.txt
+ assertFileContent(pluginConfigDir, "test.txt", "version1\n");
+
+ // Installing a new plugin version
+ /*
+ Second time, our plugin contains:
+ - config/test.txt (version2)
+ - config/dir/testdir.txt (version1)
+ - config/dir/subdir/testsubdir.txt (version1)
+ */
+ pluginManager = pluginManager(getPluginUrlForResource("plugin_with_config_v2.zip"), initialSettings);
+ pluginManager.downloadAndExtract(pluginName);
+
+ assertFileContent(pluginConfigDir, "test.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "test.txt.new", "version2\n");
+ assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1\n");
+
+ // Removing
+ pluginManager.removePlugin(pluginName);
+ assertFileContent(pluginConfigDir, "test.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "test.txt.new", "version2\n");
+ assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1\n");
+
+ // Installing a new plugin version
+ /*
+ Third time, our plugin contains:
+ - config/test.txt (version3)
+ - config/test2.txt (version1)
+ - config/dir/testdir.txt (version2)
+ - config/dir/testdir2.txt (version1)
+ - config/dir/subdir/testsubdir.txt (version2)
+ */
+ pluginManager = pluginManager(getPluginUrlForResource("plugin_with_config_v3.zip"), initialSettings);
+ pluginManager.downloadAndExtract(pluginName);
+
+ assertFileContent(pluginConfigDir, "test.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "test2.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "test.txt.new", "version3\n");
+ assertFileContent(pluginConfigDir, "dir/testdir.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "dir/testdir.txt.new", "version2\n");
+ assertFileContent(pluginConfigDir, "dir/testdir2.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt", "version1\n");
+ assertFileContent(pluginConfigDir, "dir/subdir/testsubdir.txt.new", "version2\n");
+ } finally {
+ // we need to clean up the copied dirs
+ IOUtils.rm(pluginConfigDir);
+ }
+ }
+
+ // For #7152
+ @Test
+ public void testLocalPluginInstallWithBinOnly_7152() throws Exception {
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ Environment env = initialSettings.v2();
+ Path binDir = env.homeFile().resolve("bin");
+ if (!Files.exists(binDir)) {
+ Files.createDirectories(binDir);
+ }
+ Path pluginBinDir = binDir.resolve(pluginName);
+ try {
+ PluginManager pluginManager = pluginManager(getPluginUrlForResource("plugin_with_bin_only.zip"), initialSettings);
+ pluginManager.downloadAndExtract(pluginName);
+ Path[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins.length, is(1));
+ assertDirectoryExists(pluginBinDir);
+ } finally {
+ // we need to clean up the copied dirs
+ IOUtils.rm(pluginBinDir);
+ }
+ }
+
+ @Test
+ public void testLocalPluginInstallSiteFolder() throws Exception {
+ //When we have only a folder in top-level (no files either) but it's called _site, we make it work
+ //we can either remove the folder while extracting and then re-add it manually or just leave it as it is
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ downloadAndExtract(pluginName, initialSettings, getPluginUrlForResource("plugin_folder_site.zip"));
+
+ internalCluster().startNode(initialSettings.v1());
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginWithoutFolders() throws Exception {
+ //When we don't have folders at all in the top-level, but only files, we don't modify anything
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ downloadAndExtract(pluginName, initialSettings, getPluginUrlForResource("plugin_without_folders.zip"));
+
+ internalCluster().startNode(initialSettings.v1());
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test
+ public void testLocalPluginFolderAndFile() throws Exception {
+ //When we have a single top-level folder but also files in the top-level, we don't modify anything
+ String pluginName = "plugin-test";
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ downloadAndExtract(pluginName, initialSettings, getPluginUrlForResource("plugin_folder_file.zip"));
+
+ internalCluster().startNode(initialSettings.v1());
+
+ assertPluginLoaded(pluginName);
+ assertPluginAvailable(pluginName);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testSitePluginWithSourceThrows() throws Exception {
+ String pluginName = "plugin-with-source";
+ downloadAndExtract(pluginName, buildInitialSettings(), getPluginUrlForResource("plugin_with_sourcefiles.zip"));
+ }
+
+ private PluginManager pluginManager(String pluginUrl) throws IOException {
+ return pluginManager(pluginUrl, buildInitialSettings());
+ }
+
+ private Tuple<Settings, Environment> buildInitialSettings() throws IOException {
+ Settings settings = Settings.settingsBuilder()
+ .put("discovery.zen.ping.multicast.enabled", false)
+ .put("http.enabled", true)
+ .put("path.home", createTempDir()).build();
+ return InternalSettingsPreparer.prepareSettings(settings, false);
+ }
+
+ /**
+ * We build a plugin manager instance which wait only for 30 seconds before
+ * raising an ElasticsearchTimeoutException
+ */
+ private PluginManager pluginManager(String pluginUrl, Tuple<Settings, Environment> initialSettings) throws IOException {
+ if (!Files.exists(initialSettings.v2().pluginsFile())) {
+ Files.createDirectories(initialSettings.v2().pluginsFile());
+ }
+ return new PluginManager(initialSettings.v2(), pluginUrl, PluginManager.OutputMode.VERBOSE, TimeValue.timeValueSeconds(30));
+ }
+
+ private void downloadAndExtract(String pluginName, Tuple<Settings, Environment> initialSettings, String pluginUrl) throws IOException {
+ pluginManager(pluginUrl, initialSettings).downloadAndExtract(pluginName);
+ }
+
+ private void assertPluginLoaded(String pluginName) {
+ NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).get();
+ assertThat(nodesInfoResponse.getNodes().length, equalTo(1));
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos(), notNullValue());
+ assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().size(), not(0));
+
+ boolean pluginFound = false;
+
+ for (PluginInfo pluginInfo : nodesInfoResponse.getNodes()[0].getPlugins().getInfos()) {
+ if (pluginInfo.getName().equals(pluginName)) {
+ pluginFound = true;
+ break;
+ }
+ }
+
+ assertThat(pluginFound, is(true));
+ }
+
+ private void assertPluginAvailable(String pluginName) throws InterruptedException, IOException {
+ final HttpRequestBuilder httpRequestBuilder = httpClient();
+
+ //checking that the http connector is working properly
+ // We will try it for some seconds as it could happen that the REST interface is not yet fully started
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object obj) {
+ try {
+ HttpResponse response = httpRequestBuilder.method("GET").path("/").execute();
+ if (response.getStatusCode() != RestStatus.OK.getStatus()) {
+ // We want to trace what's going on here before failing the test
+ logger.info("--> error caught [{}], headers [{}]", response.getStatusCode(), response.getHeaders());
+ logger.info("--> cluster state [{}]", internalCluster().clusterService().state());
+ return false;
+ }
+ return true;
+ } catch (IOException e) {
+ throw new ElasticsearchException("HTTP problem", e);
+ }
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+
+
+ //checking now that the plugin is available
+ HttpResponse response = httpClient().method("GET").path("/_plugin/" + pluginName + "/").execute();
+ assertThat(response, notNullValue());
+ assertThat(response.getReasonPhrase(), response.getStatusCode(), equalTo(RestStatus.OK.getStatus()));
+ }
+
+ @Test
+ public void testListInstalledEmpty() throws IOException {
+ Path[] plugins = pluginManager(null).getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(0));
+ }
+
+ @Test(expected = IOException.class)
+ public void testInstallPluginNull() throws IOException {
+ pluginManager(null).downloadAndExtract("plugin-test");
+ }
+
+
+ @Test
+ public void testInstallPlugin() throws IOException {
+ PluginManager pluginManager = pluginManager(getPluginUrlForResource("plugin_with_classfile.zip"));
+
+ pluginManager.downloadAndExtract("plugin-classfile");
+ Path[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+ }
+
+ @Test
+ public void testInstallSitePlugin() throws IOException {
+ Tuple<Settings, Environment> initialSettings = buildInitialSettings();
+ PluginManager pluginManager = pluginManager(getPluginUrlForResource("plugin_without_folders.zip"), initialSettings);
+
+ pluginManager.downloadAndExtract("plugin-site");
+ Path[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+
+ // We want to check that Plugin Manager moves content to _site
+ assertFileExists(initialSettings.v2().pluginsFile().resolve("plugin-site/_site"));
+ }
+
+
+ private void singlePluginInstallAndRemove(String pluginShortName, String pluginCoordinates) throws IOException {
+ logger.info("--> trying to download and install [{}]", pluginShortName);
+ PluginManager pluginManager = pluginManager(pluginCoordinates);
+ try {
+ pluginManager.downloadAndExtract(pluginShortName);
+ Path[] plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(1));
+
+ // We remove it
+ pluginManager.removePlugin(pluginShortName);
+ plugins = pluginManager.getListInstalledPlugins();
+ assertThat(plugins, notNullValue());
+ assertThat(plugins.length, is(0));
+ } catch (IOException e) {
+ logger.warn("--> IOException raised while downloading plugin [{}]. Skipping test.", e, pluginShortName);
+ } catch (ElasticsearchTimeoutException e) {
+ logger.warn("--> timeout exception raised while downloading plugin [{}]. Skipping test.", pluginShortName);
+ }
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test regular form: username/reponame/version
+ * It should find it in download.elasticsearch.org service
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithElasticsearchDownloadService() throws IOException {
+ assumeTrue("download.elastic.co is accessible", isDownloadServiceWorking("download.elastic.co", 80, "/elasticsearch/ci-test.txt"));
+ singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/2.4.0", null);
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test regular form: groupId/artifactId/version
+ * It should find it in maven central service
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithMavenCentral() throws IOException {
+ assumeTrue("search.maven.org is accessible", isDownloadServiceWorking("search.maven.org", 80, "/"));
+ assumeTrue("repo1.maven.org is accessible", isDownloadServiceWorking("repo1.maven.org", 443, "/maven2/org/elasticsearch/elasticsearch-transport-thrift/2.4.0/elasticsearch-transport-thrift-2.4.0.pom"));
+ singlePluginInstallAndRemove("org.elasticsearch/elasticsearch-transport-thrift/2.4.0", null);
+ }
+
+ /**
+ * We are ignoring by default these tests as they require to have an internet access
+ * To activate the test, use -Dtests.network=true
+ * We test site plugins from github: userName/repoName
+ * It should find it on github
+ */
+ @Test
+ @Network
+ public void testInstallPluginWithGithub() throws IOException {
+ assumeTrue("github.com is accessible", isDownloadServiceWorking("github.com", 443, "/"));
+ singlePluginInstallAndRemove("elasticsearch/kibana", null);
+ }
+
+ private boolean isDownloadServiceWorking(String host, int port, String resource) {
+ try {
+ String protocol = port == 443 ? "https" : "http";
+ HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault()).protocol(protocol).host(host).port(port).path(resource).execute();
+ if (response.getStatusCode() != 200) {
+ logger.warn("[{}{}] download service is not working. Disabling current test.", host, resource);
+ return false;
+ }
+ return true;
+ } catch (Throwable t) {
+ logger.warn("[{}{}] download service is not working. Disabling current test.", host, resource);
+ }
+ return false;
+ }
+
+ @Test
+ public void testRemovePlugin() throws Exception {
+ // We want to remove plugin with plugin short name
+ singlePluginInstallAndRemove("plugintest", getPluginUrlForResource("plugin_without_folders.zip"));
+
+ // We want to remove plugin with groupid/artifactid/version form
+ singlePluginInstallAndRemove("groupid/plugintest/1.0.0", getPluginUrlForResource("plugin_without_folders.zip"));
+
+ // We want to remove plugin with groupid/artifactid form
+ singlePluginInstallAndRemove("groupid/plugintest", getPluginUrlForResource("plugin_without_folders.zip"));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testRemovePlugin_NullName_ThrowsException() throws IOException {
+ pluginManager(getPluginUrlForResource("plugin_single_folder.zip")).removePlugin(null);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testRemovePluginWithURLForm() throws Exception {
+ PluginManager pluginManager = pluginManager(null);
+ pluginManager.removePlugin("file://whatever");
+ }
+
+ @Test
+ public void testForbiddenPluginName_ThrowsException() throws IOException {
+ runTestWithForbiddenName(null);
+ runTestWithForbiddenName("");
+ runTestWithForbiddenName("elasticsearch");
+ runTestWithForbiddenName("elasticsearch.bat");
+ runTestWithForbiddenName("elasticsearch.in.sh");
+ runTestWithForbiddenName("plugin");
+ runTestWithForbiddenName("plugin.bat");
+ runTestWithForbiddenName("service.bat");
+ runTestWithForbiddenName("ELASTICSEARCH");
+ runTestWithForbiddenName("ELASTICSEARCH.IN.SH");
+ }
+
+ private void runTestWithForbiddenName(String name) throws IOException {
+ try {
+ pluginManager(null).removePlugin(name);
+ fail("this plugin name [" + name +
+ "] should not be allowed");
+ } catch (IllegalArgumentException e) {
+ // We expect that error
+ }
+ }
+
+
+ /**
+ * Retrieve a URL string that represents the resource with the given {@code resourceName}.
+ * @param resourceName The resource name relative to {@link PluginManagerTests}.
+ * @return Never {@code null}.
+ * @throws NullPointerException if {@code resourceName} does not point to a valid resource.
+ */
+ private String getPluginUrlForResource(String resourceName) {
+ URI uri = URI.create(PluginManagerTests.class.getResource(resourceName).toString());
+
+ return "file://" + uri.getPath();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java
new file mode 100644
index 0000000000..3dfb7a04c5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/PluginManagerUnitTests.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.io.Files;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class PluginManagerUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatConfigDirectoryCanBeOutsideOfElasticsearchHomeDirectory() throws IOException {
+ String pluginName = randomAsciiOfLength(10);
+ Path homeFolder = createTempDir();
+ Path genericConfigFolder = createTempDir();
+
+ Settings settings = settingsBuilder()
+ .put("path.conf", genericConfigFolder)
+ .put("path.home", homeFolder)
+ .build();
+ Environment environment = new Environment(settings);
+
+ PluginManager.PluginHandle pluginHandle = new PluginManager.PluginHandle(pluginName, "version", "user", "repo");
+ String configDirPath = Files.simplifyPath(pluginHandle.configDir(environment).normalize().toString());
+ String expectedDirPath = Files.simplifyPath(genericConfigFolder.resolve(pluginName).normalize().toString());
+
+ assertThat(configDirPath, is(expectedDirPath));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginServiceTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginServiceTests.java
new file mode 100644
index 0000000000..18f826033c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/PluginServiceTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.loading.classpath.InClassPathPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.net.URISyntaxException;
+import java.nio.file.Path;
+import java.nio.file.PathMatcher;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.endsWith;
+import static org.hamcrest.Matchers.instanceOf;
+
+@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes=0, numClientNodes = 1, transportClientRatio = 0)
+public class PluginServiceTests extends PluginTestCase {
+
+ @Test
+ public void testPluginLoadingFromClassName() throws URISyntaxException {
+ Settings settings = settingsBuilder()
+ // Defines a plugin in classpath
+ .put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
+ .put(PluginsService.ES_PLUGIN_PROPERTIES_FILE_KEY, "es-plugin-test.properties")
+ // Defines a plugin in settings
+ .put("plugin.types", InSettingsPlugin.class.getName())
+ .build();
+
+ startNodeWithPlugins(settings, "/org/elasticsearch/plugins/loading/");
+
+ Plugin plugin = getPlugin("in-settings-plugin");
+ assertNotNull("InSettingsPlugin (defined below in this class) must be loaded", plugin);
+ assertThat(plugin, instanceOf(InSettingsPlugin.class));
+
+ plugin = getPlugin("in-classpath-plugin");
+ assertNotNull("InClassPathPlugin (defined in package ) must be loaded", plugin);
+ assertThat(plugin, instanceOf(InClassPathPlugin.class));
+
+ plugin = getPlugin("in-jar-plugin");
+ assertNotNull("InJarPlugin (packaged as a JAR file in a plugins directory) must be loaded", plugin);
+ assertThat(plugin.getClass().getName(), endsWith("InJarPlugin"));
+
+ plugin = getPlugin("in-zip-plugin");
+ assertNotNull("InZipPlugin (packaged as a Zipped file in a plugins directory) must be loaded", plugin);
+ assertThat(plugin.getClass().getName(), endsWith("InZipPlugin"));
+ }
+
+ @Test
+ public void testHasLibExtension() {
+ PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(PluginsService.PLUGIN_LIB_PATTERN);
+
+ Path p = PathUtils.get("path", "to", "plugin.jar");
+ assertTrue(matcher.matches(p));
+
+ p = PathUtils.get("path", "to", "plugin.zip");
+ assertTrue(matcher.matches(p));
+
+ p = PathUtils.get("path", "to", "plugin.tar.gz");
+ assertFalse(matcher.matches(p));
+
+ p = PathUtils.get("path", "to", "plugin");
+ assertFalse(matcher.matches(p));
+ }
+
+ private Plugin getPlugin(String pluginName) {
+ assertNotNull("cannot check plugin existence with a null plugin's name", pluginName);
+ PluginsService pluginsService = internalCluster().getInstance(PluginsService.class);
+ ImmutableList<Tuple<PluginInfo, Plugin>> plugins = pluginsService.plugins();
+
+ if ((plugins != null) && (!plugins.isEmpty())) {
+ for (Tuple<PluginInfo, Plugin> plugin:plugins) {
+ if (pluginName.equals(plugin.v1().getName())) {
+ return plugin.v2();
+ }
+ }
+ }
+ return null;
+ }
+
+ static class InSettingsPlugin extends AbstractPlugin {
+
+ private final Settings settings;
+
+ public InSettingsPlugin(Settings settings) {
+ this.settings = settings;
+ }
+
+ @Override
+ public String name() {
+ return "in-settings-plugin";
+ }
+
+ @Override
+ public String description() {
+ return "A plugin defined in settings";
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java b/core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java
new file mode 100644
index 0000000000..dd3cb164aa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/PluginTestCase.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Ignore;
+
+import java.net.URISyntaxException;
+import java.net.URL;
+
+import static org.elasticsearch.client.Requests.clusterHealthRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ * Base class that lets you start a node with plugins.
+ */
+@Ignore
+public abstract class PluginTestCase extends ElasticsearchIntegrationTest {
+
+ public String startNodeWithPlugins(Settings nodeSettings, String pluginDir, String ... pluginClassNames) throws URISyntaxException {
+ URL resource = getClass().getResource(pluginDir);
+ Settings.Builder settings = settingsBuilder();
+ settings.put(nodeSettings);
+ if (resource != null) {
+ settings.put("path.plugins", getDataPath(pluginDir).toAbsolutePath());
+ }
+
+ if (pluginClassNames.length > 0) {
+ settings.putArray("plugin.types", pluginClassNames);
+ }
+
+ String nodeName = internalCluster().startNode(settings);
+
+ // We wait for a Green status
+ client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus()).actionGet();
+
+ return internalCluster().getInstance(ClusterService.class, nodeName).state().nodes().localNodeId();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginTests.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginTests.java
new file mode 100644
index 0000000000..96673555e0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginTests.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.responseheader.TestResponseHeaderPlugin;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Test a rest action that sets special response headers
+ */
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class ResponseHeaderPluginTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", TestResponseHeaderPlugin.class.getName())
+ .put("force.http.enabled", true)
+ .build();
+ }
+
+ @Test
+ public void testThatSettingHeadersWorks() throws Exception {
+ ensureGreen();
+ HttpResponse response = httpClient().method("GET").path("/_protected").execute();
+ assertThat(response, hasStatus(UNAUTHORIZED));
+ assertThat(response.getHeaders().get("Secret"), equalTo("required"));
+
+ HttpResponse authResponse = httpClient().method("GET").path("/_protected").addHeader("Secret", "password").execute();
+ assertThat(authResponse, hasStatus(OK));
+ assertThat(authResponse.getHeaders().get("Secret"), equalTo("granted"));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/plugins/SitePluginRelativePathConfigTests.java b/core/src/test/java/org/elasticsearch/plugins/SitePluginRelativePathConfigTests.java
new file mode 100644
index 0000000000..d350e43c0f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/SitePluginRelativePathConfigTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins;
+
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import java.nio.file.Path;
+
+import static org.apache.lucene.util.Constants.WINDOWS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus;
+
+@ClusterScope(scope = SUITE, numDataNodes = 1)
+public class SitePluginRelativePathConfigTests extends ElasticsearchIntegrationTest {
+
+ private final Path root = PathUtils.get(".").toAbsolutePath().getRoot();
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ String cwdToRoot = getRelativePath(PathUtils.get(".").toAbsolutePath());
+ Path pluginDir = PathUtils.get(cwdToRoot, relativizeToRootIfNecessary(getDataPath("/org/elasticsearch/plugins")).toString());
+
+ Path tempDir = createTempDir();
+ boolean useRelativeInMiddleOfPath = randomBoolean();
+ if (useRelativeInMiddleOfPath) {
+ pluginDir = PathUtils.get(tempDir.toString(), getRelativePath(tempDir), pluginDir.toString());
+ }
+
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("path.plugins", pluginDir)
+ .put("force.http.enabled", true)
+ .build();
+ }
+
+ @Test
+ public void testThatRelativePathsDontAffectPlugins() throws Exception {
+ HttpResponse response = httpClient().method("GET").path("/_plugin/dummy/").execute();
+ assertThat(response, hasStatus(OK));
+ }
+
+ private Path relativizeToRootIfNecessary(Path path) {
+ if (WINDOWS) {
+ return root.relativize(path);
+ }
+ return path;
+ }
+
+ private String getRelativePath(Path path) {
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < path.getNameCount(); i++) {
+ sb.append("..");
+ sb.append(path.getFileSystem().getSeparator());
+ }
+
+ return sb.toString();
+ }
+
+ public HttpRequestBuilder httpClient() {
+ CloseableHttpClient httpClient = HttpClients.createDefault();
+ return new HttpRequestBuilder(httpClient).httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/SitePluginTests.java b/core/src/test/java/org/elasticsearch/plugins/SitePluginTests.java
new file mode 100644
index 0000000000..35ae9844fc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/SitePluginTests.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins;
+
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.rest.RestStatus.*;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus;
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ * We want to test site plugins
+ */
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class SitePluginTests extends ElasticsearchIntegrationTest {
+
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ Path pluginDir = getDataPath("/org/elasticsearch/plugins");
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("path.plugins", pluginDir.toAbsolutePath())
+ .put("force.http.enabled", true)
+ .build();
+ }
+
+ public HttpRequestBuilder httpClient() {
+ RequestConfig.Builder builder = RequestConfig.custom().setRedirectsEnabled(false);
+ CloseableHttpClient httpClient = HttpClients.custom().setDefaultRequestConfig(builder.build()).build();
+ return new HttpRequestBuilder(httpClient).httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class));
+ }
+
+ @Test
+ public void testRedirectSitePlugin() throws Exception {
+ // We use an HTTP Client to test redirection
+ HttpResponse response = httpClient().method("GET").path("/_plugin/dummy").execute();
+ assertThat(response, hasStatus(MOVED_PERMANENTLY));
+ assertThat(response.getBody(), containsString("/_plugin/dummy/"));
+
+ // We test the real URL
+ response = httpClient().method("GET").path("/_plugin/dummy/").execute();
+ assertThat(response, hasStatus(OK));
+ assertThat(response.getBody(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test direct access to an existing file (index.html)
+ */
+ @Test
+ public void testAnyPage() throws Exception {
+ HttpResponse response = httpClient().path("/_plugin/dummy/index.html").execute();
+ assertThat(response, hasStatus(OK));
+ assertThat(response.getBody(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test normalizing of path
+ */
+ @Test
+ public void testThatPathsAreNormalized() throws Exception {
+ // more info: https://www.owasp.org/index.php/Path_Traversal
+ List<String> notFoundUris = new ArrayList<>();
+ notFoundUris.add("/_plugin/dummy/../../../../../log4j.properties");
+ notFoundUris.add("/_plugin/dummy/../../../../../%00log4j.properties");
+ notFoundUris.add("/_plugin/dummy/..%c0%af..%c0%af..%c0%af..%c0%af..%c0%aflog4j.properties");
+ notFoundUris.add("/_plugin/dummy/%2E%2E/%2E%2E/%2E%2E/%2E%2E/index.html");
+ notFoundUris.add("/_plugin/dummy/%2e%2e/%2e%2e/%2e%2e/%2e%2e/index.html");
+ notFoundUris.add("/_plugin/dummy/%2e%2e%2f%2e%2e%2f%2e%2e%2f%2e%2e%2findex.html");
+ notFoundUris.add("/_plugin/dummy/%2E%2E/%2E%2E/%2E%2E/%2E%2E/index.html");
+ notFoundUris.add("/_plugin/dummy/..\\..\\..\\..\\..\\log4j.properties");
+
+ for (String uri : notFoundUris) {
+ HttpResponse response = httpClient().path(uri).execute();
+ String message = String.format(Locale.ROOT, "URI [%s] expected to be not found", uri);
+ assertThat(message, response, hasStatus(NOT_FOUND));
+ }
+
+ // using relative path inside of the plugin should work
+ HttpResponse response = httpClient().path("/_plugin/dummy/dir1/../dir1/../index.html").execute();
+ assertThat(response, hasStatus(OK));
+ assertThat(response.getBody(), containsString("<title>Dummy Site Plugin</title>"));
+ }
+
+ /**
+ * Test case for #4845: https://github.com/elasticsearch/elasticsearch/issues/4845
+ * Serving _site plugins do not pick up on index.html for sub directories
+ */
+ @Test
+ public void testWelcomePageInSubDirs() throws Exception {
+ HttpResponse response = httpClient().path("/_plugin/subdir/dir/").execute();
+ assertThat(response, hasStatus(OK));
+ assertThat(response.getBody(), containsString("<title>Dummy Site Plugin (subdir)</title>"));
+
+ response = httpClient().path("/_plugin/subdir/dir_without_index/").execute();
+ assertThat(response, hasStatus(FORBIDDEN));
+
+ response = httpClient().path("/_plugin/subdir/dir_without_index/page.html").execute();
+ assertThat(response, hasStatus(OK));
+ assertThat(response.getBody(), containsString("<title>Dummy Site Plugin (page)</title>"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java b/core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java
new file mode 100644
index 0000000000..cf4959e62d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/loading/classpath/InClassPathPlugin.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins.loading.classpath;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class InClassPathPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "in-classpath-plugin";
+ }
+
+ @Override
+ public String description() {
+ return "A plugin defined in class path";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties b/core/src/test/java/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties
new file mode 100644
index 0000000000..f57bea58cf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/loading/classpath/es-plugin-test.properties
@@ -0,0 +1,19 @@
+################################################################
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+################################################################
+plugin=org.elasticsearch.plugins.loading.classpath.InClassPathPlugin \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/plugins/lucene/current/CurrentLucenePlugin.java b/core/src/test/java/org/elasticsearch/plugins/lucene/current/CurrentLucenePlugin.java
new file mode 100644
index 0000000000..0b2aeb1f4b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/lucene/current/CurrentLucenePlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins.lucene.current;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class CurrentLucenePlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "current-lucene";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "current";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/lucene/current/es-plugin-test.properties b/core/src/test/java/org/elasticsearch/plugins/lucene/current/es-plugin-test.properties
new file mode 100644
index 0000000000..eca345b0a2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/lucene/current/es-plugin-test.properties
@@ -0,0 +1,21 @@
+################################################################
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+################################################################
+plugin=org.elasticsearch.plugins.lucene.current.CurrentLucenePlugin
+version=2.0.0
+lucene=${lucene.version}
diff --git a/core/src/test/java/org/elasticsearch/plugins/lucene/newer/NewerLucenePlugin.java b/core/src/test/java/org/elasticsearch/plugins/lucene/newer/NewerLucenePlugin.java
new file mode 100644
index 0000000000..a948eece80
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/lucene/newer/NewerLucenePlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins.lucene.newer;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class NewerLucenePlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "newer-lucene";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "newer";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/lucene/newer/es-plugin-test.properties b/core/src/test/java/org/elasticsearch/plugins/lucene/newer/es-plugin-test.properties
new file mode 100644
index 0000000000..4ddbca8130
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/lucene/newer/es-plugin-test.properties
@@ -0,0 +1,21 @@
+################################################################
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+################################################################
+plugin=org.elasticsearch.plugins.lucene.newer.NewerLucenePlugin
+version=3.0.0
+lucene=99.0.0
diff --git a/core/src/test/java/org/elasticsearch/plugins/lucene/old/OldLucenePlugin.java b/core/src/test/java/org/elasticsearch/plugins/lucene/old/OldLucenePlugin.java
new file mode 100644
index 0000000000..8f85418c0b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/lucene/old/OldLucenePlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins.lucene.old;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class OldLucenePlugin extends AbstractPlugin {
+ /**
+ * The name of the plugin.
+ */
+ @Override
+ public String name() {
+ return "old-lucene";
+ }
+
+ /**
+ * The description of the plugin.
+ */
+ @Override
+ public String description() {
+ return "old";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/lucene/old/es-plugin-test.properties b/core/src/test/java/org/elasticsearch/plugins/lucene/old/es-plugin-test.properties
new file mode 100644
index 0000000000..c99f9b1dc6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/lucene/old/es-plugin-test.properties
@@ -0,0 +1,21 @@
+################################################################
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+################################################################
+plugin=org.elasticsearch.plugins.lucene.old.OldLucenePlugin
+version=1.0.0
+lucene=3.0.0
diff --git a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java
new file mode 100644
index 0000000000..48b00b2375
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins.responseheader;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.rest.RestModule;
+
+public class TestResponseHeaderPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-header";
+ }
+
+ @Override
+ public String description() {
+ return "test-plugin-custom-header-desc";
+ }
+
+ public void onModule(RestModule restModule) {
+ restModule.addRestAction(TestResponseHeaderRestAction.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java
new file mode 100644
index 0000000000..83fa3e2129
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderRestAction.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugins.responseheader;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.*;
+
+public class TestResponseHeaderRestAction extends BaseRestHandler {
+
+ @Inject
+ public TestResponseHeaderRestAction(Settings settings, RestController controller, Client client) {
+ super(settings, controller, client);
+ controller.registerHandler(RestRequest.Method.GET, "/_protected", this);
+ }
+
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel, Client client) {
+ if ("password".equals(request.header("Secret"))) {
+ RestResponse response = new BytesRestResponse(RestStatus.OK, "Access granted");
+ response.addHeader("Secret", "granted");
+ channel.sendResponse(response);
+ } else {
+ RestResponse response = new BytesRestResponse(RestStatus.UNAUTHORIZED, "Access denied");
+ response.addHeader("Secret", "required");
+ channel.sendResponse(response);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java
new file mode 100644
index 0000000000..dd859b46fb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/FullRollingRestartTests.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.zen.ZenDiscovery;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0)
+public class FullRollingRestartTests extends ElasticsearchIntegrationTest {
+
+ protected void assertTimeout(ClusterHealthRequestBuilder requestBuilder) {
+ ClusterHealthResponse clusterHealth = requestBuilder.get();
+ if (clusterHealth.isTimedOut()) {
+ logger.info("cluster health request timed out:\n{}", clusterHealth);
+ fail("cluster health request timed out");
+ }
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ @Slow
+ public void testFullRollingRestart() throws Exception {
+ Settings settings = Settings.builder().put(ZenDiscovery.SETTING_JOIN_TIMEOUT, "30s").build();
+ internalCluster().startNode(settings);
+ createIndex("test");
+
+ final String healthTimeout = "1m";
+
+ for (int i = 0; i < 1000; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+ }
+ flush();
+ for (int i = 1000; i < 2000; i++) {
+ client().prepareIndex("test", "type1", Long.toString(i))
+ .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map()).execute().actionGet();
+ }
+
+ logger.info("--> now start adding nodes");
+ internalCluster().startNodesAsync(2, settings).get();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+
+ logger.info("--> add two more nodes");
+ internalCluster().startNodesAsync(2, settings).get();
+
+ // We now have 5 nodes
+ setMinimumMasterNodes(3);
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("5"));
+
+ logger.info("--> refreshing and checking data");
+ refresh();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+
+ // now start shutting nodes down
+ internalCluster().stopRandomDataNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("4"));
+
+ // going down to 3 nodes. note that the min_master_node may not be in effect when we shutdown the 4th
+ // node, but that's OK as it is set to 3 before.
+ setMinimumMasterNodes(2);
+ internalCluster().stopRandomDataNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("3"));
+
+ logger.info("--> stopped two nodes, verifying data");
+ refresh();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+
+ // closing the 3rd node
+ internalCluster().stopRandomDataNode();
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForNodes("2"));
+
+ // closing the 2nd node
+ setMinimumMasterNodes(1);
+ internalCluster().stopRandomDataNode();
+
+ // make sure the cluster state is green, and all has been recovered
+ assertTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout(healthTimeout).setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForNodes("1"));
+
+ logger.info("--> one node left, verifying data");
+ refresh();
+ for (int i = 0; i < 10; i++) {
+ assertHitCount(client().prepareCount().setQuery(matchAllQuery()).get(), 2000l);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java
new file mode 100644
index 0000000000..37bf22aa47
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java
@@ -0,0 +1,181 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.recovery;
+
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.recovery.*;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThan;
+
+public class RecoveriesCollectionTests extends ElasticsearchSingleNodeTest {
+
+ final static RecoveryTarget.RecoveryListener listener = new RecoveryTarget.RecoveryListener() {
+ @Override
+ public void onRecoveryDone(RecoveryState state) {
+
+ }
+
+ @Override
+ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
+
+ }
+ };
+
+ @Test
+ public void testLastAccessTimeUpdate() throws Exception {
+ createIndex();
+ final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));
+ final long recoveryId = startRecovery(collection);
+ try (RecoveriesCollection.StatusRef status = collection.getStatus(recoveryId)) {
+ final long lastSeenTime = status.status().lastAccessTime();
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ try (RecoveriesCollection.StatusRef currentStatus = collection.getStatus(recoveryId)) {
+ assertThat("access time failed to update", lastSeenTime, lessThan(currentStatus.status().lastAccessTime()));
+ }
+ }
+ });
+ } finally {
+ collection.cancelRecovery(recoveryId, "life");
+ }
+ }
+
+ @Test
+ public void testRecoveryTimeout() throws InterruptedException {
+ createIndex();
+ final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));
+ final AtomicBoolean failed = new AtomicBoolean();
+ final CountDownLatch latch = new CountDownLatch(1);
+ final long recoveryId = startRecovery(collection, new RecoveryTarget.RecoveryListener() {
+ @Override
+ public void onRecoveryDone(RecoveryState state) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
+ failed.set(true);
+ latch.countDown();
+ }
+ }, TimeValue.timeValueMillis(100));
+ try {
+ latch.await(30, TimeUnit.SECONDS);
+ assertTrue("recovery failed to timeout", failed.get());
+ } finally {
+ collection.cancelRecovery(recoveryId, "meh");
+ }
+
+ }
+
+ @Test
+ public void testRecoveryCancellationNoPredicate() throws Exception {
+ createIndex();
+ final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));
+ final long recoveryId = startRecovery(collection);
+ final long recoveryId2 = startRecovery(collection);
+ try (RecoveriesCollection.StatusRef statusRef = collection.getStatus(recoveryId)) {
+ ShardId shardId = statusRef.status().shardId();
+ assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test"));
+ assertThat("all recoveries should be cancelled", collection.size(), equalTo(0));
+ } finally {
+ collection.cancelRecovery(recoveryId, "meh");
+ collection.cancelRecovery(recoveryId2, "meh");
+ }
+ }
+
+ @Test
+ public void testRecoveryCancellationPredicate() throws Exception {
+ createIndex();
+ final RecoveriesCollection collection = new RecoveriesCollection(logger, getInstanceFromNode(ThreadPool.class));
+ final long recoveryId = startRecovery(collection);
+ final long recoveryId2 = startRecovery(collection);
+ final ArrayList<AutoCloseable> toClose = new ArrayList<>();
+ try {
+ RecoveriesCollection.StatusRef statusRef = collection.getStatus(recoveryId);
+ toClose.add(statusRef);
+ ShardId shardId = statusRef.status().shardId();
+ assertFalse("should not have cancelled recoveries", collection.cancelRecoveriesForShard(shardId, "test", Predicates.<RecoveryStatus>alwaysFalse()));
+ final Predicate<RecoveryStatus> shouldCancel = new Predicate<RecoveryStatus>() {
+ @Override
+ public boolean apply(RecoveryStatus status) {
+ return status.recoveryId() == recoveryId;
+ }
+ };
+ assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test", shouldCancel));
+ assertThat("we should still have on recovery", collection.size(), equalTo(1));
+ statusRef = collection.getStatus(recoveryId);
+ toClose.add(statusRef);
+ assertNull("recovery should have been deleted", statusRef);
+ statusRef = collection.getStatus(recoveryId2);
+ toClose.add(statusRef);
+ assertNotNull("recovery should NOT have been deleted", statusRef);
+
+ } finally {
+ // TODO: do we want a lucene IOUtils version of this?
+ for (AutoCloseable closeable : toClose) {
+ if (closeable != null) {
+ closeable.close();
+ }
+ }
+ collection.cancelRecovery(recoveryId, "meh");
+ collection.cancelRecovery(recoveryId2, "meh");
+ }
+ }
+
+ protected void createIndex() {
+ createIndex("test",
+ Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .build());
+ ensureGreen();
+ }
+
+
+ long startRecovery(RecoveriesCollection collection) {
+ return startRecovery(collection, listener, TimeValue.timeValueMinutes(60));
+ }
+
+ long startRecovery(RecoveriesCollection collection, RecoveryTarget.RecoveryListener listener, TimeValue timeValue) {
+ IndicesService indexServices = getInstanceFromNode(IndicesService.class);
+ IndexShard indexShard = indexServices.indexServiceSafe("test").shard(0);
+ final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT);
+ return collection.startRecovery(indexShard, sourceNode, listener, timeValue);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTest.java b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTest.java
new file mode 100644
index 0000000000..f1610f5b19
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.concurrent.TimeUnit;
+
+public class RecoverySettingsTest extends ElasticsearchSingleNodeTest {
+
+ @Override
+ protected boolean resetNodeAfterTest() {
+ return true;
+ }
+
+ @Test
+ public void testAllSettingsAreDynamicallyUpdatable() {
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.fileChunkSize().bytesAsInt());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, randomIntBetween(1, 200), new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.translogOps());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.translogSize().bytesAsInt());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.concurrentStreamPool().getMaximumPoolSize());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, randomIntBetween(1, 200), new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.concurrentSmallFileStreamPool().getMaximumPoolSize());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, 0, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(null, recoverySettings.rateLimiter());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.retryDelayStateSync().millis());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.retryDelayNetwork().millis());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.activityTimeout().millis());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.internalActionTimeout().millis());
+ }
+ });
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis());
+ }
+ });
+
+ innerTestSettings(RecoverySettings.INDICES_RECOVERY_COMPRESS, false, new Validator() {
+ @Override
+ public void validate(RecoverySettings recoverySettings, boolean expectedValue) {
+ assertEquals(expectedValue, recoverySettings.compress());
+ }
+ });
+ }
+
+ private static class Validator {
+ public void validate(RecoverySettings recoverySettings, int expectedValue) {
+ }
+
+ public void validate(RecoverySettings recoverySettings, boolean expectedValue) {
+ }
+ }
+
+ private void innerTestSettings(String key, int newValue, Validator validator) {
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue)).get();
+ validator.validate(getInstanceFromNode(RecoverySettings.class), newValue);
+ }
+
+ private void innerTestSettings(String key, int newValue, TimeUnit timeUnit, Validator validator) {
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue, timeUnit)).get();
+ validator.validate(getInstanceFromNode(RecoverySettings.class), newValue);
+ }
+
+ private void innerTestSettings(String key, int newValue, ByteSizeUnit byteSizeUnit, Validator validator) {
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue, byteSizeUnit)).get();
+ validator.validate(getInstanceFromNode(RecoverySettings.class), newValue);
+ }
+
+ private void innerTestSettings(String key, boolean newValue, Validator validator) {
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(key, newValue)).get();
+ validator.validate(getInstanceFromNode(RecoverySettings.class), newValue);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java
new file mode 100644
index 0000000000..cccfe8024b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadTests.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.ShardStats;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.DocsStats;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.test.BackgroundIndexer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+
+public class RecoveryWhileUnderLoadTests extends ElasticsearchIntegrationTest {
+
+ private final ESLogger logger = Loggers.getLogger(RecoveryWhileUnderLoadTests.class);
+
+ @Test
+ @Slow
+ public void recoverWhileUnderLoadAllocateReplicasTest() throws Exception {
+ logger.info("--> creating test index ...");
+ int numberOfShards = numberOfShards();
+ assertAcked(prepareCreate("test", 1, settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, Translog.Durabilty.ASYNC)));
+
+ final int totalNumDocs = scaledRandomIntBetween(200, 10000);
+ int waitFor = totalNumDocs / 10;
+ int extraDocs = waitFor;
+ try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type", client(), extraDocs)) {
+ logger.info("--> waiting for {} docs to be indexed ...", waitFor);
+ waitForDocs(waitFor, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", waitFor);
+
+ extraDocs = totalNumDocs / 10;
+ waitFor += extraDocs;
+ indexer.continueIndexing(extraDocs);
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> waiting for {} docs to be indexed ...", waitFor);
+ waitForDocs(waitFor, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", waitFor);
+
+ extraDocs = totalNumDocs - waitFor;
+ indexer.continueIndexing(extraDocs);
+
+ logger.info("--> allow 2 nodes for index [test] ...");
+ // now start another node, while we index
+ allowNodes("test", 2);
+
+ logger.info("--> waiting for GREEN health status ...");
+ // make sure the cluster state is green, and all has been recovered
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus());
+
+ logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
+ waitForDocs(totalNumDocs, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", totalNumDocs);
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ indexer.stop();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10);
+ }
+ }
+
+ @Test
+ @Slow
+ public void recoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() throws Exception {
+ logger.info("--> creating test index ...");
+ int numberOfShards = numberOfShards();
+ assertAcked(prepareCreate("test", 1, settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, Translog.Durabilty.ASYNC)));
+
+ final int totalNumDocs = scaledRandomIntBetween(200, 10000);
+ int waitFor = totalNumDocs / 10;
+ int extraDocs = waitFor;
+ try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type", client(), extraDocs)) {
+ logger.info("--> waiting for {} docs to be indexed ...", waitFor);
+ waitForDocs(waitFor, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", waitFor);
+
+ extraDocs = totalNumDocs / 10;
+ waitFor += extraDocs;
+ indexer.continueIndexing(extraDocs);
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> waiting for {} docs to be indexed ...", waitFor);
+ waitForDocs(waitFor, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", waitFor);
+
+ extraDocs = totalNumDocs - waitFor;
+ indexer.continueIndexing(extraDocs);
+ logger.info("--> allow 4 nodes for index [test] ...");
+ allowNodes("test", 4);
+
+ logger.info("--> waiting for GREEN health status ...");
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus());
+
+
+ logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
+ waitForDocs(totalNumDocs, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", totalNumDocs);
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ indexer.stop();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10);
+ }
+ }
+
+ @Test
+ @Slow
+ public void recoverWhileUnderLoadWithReducedAllowedNodes() throws Exception {
+ logger.info("--> creating test index ...");
+ int numberOfShards = numberOfShards();
+ assertAcked(prepareCreate("test", 2, settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).put(SETTING_NUMBER_OF_REPLICAS, 1).put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, Translog.Durabilty.ASYNC)));
+
+ final int totalNumDocs = scaledRandomIntBetween(200, 10000);
+ int waitFor = totalNumDocs / 10;
+ int extraDocs = waitFor;
+ try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type", client(), extraDocs)) {
+ logger.info("--> waiting for {} docs to be indexed ...", waitFor);
+ waitForDocs(waitFor, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", waitFor);
+
+ extraDocs = totalNumDocs / 10;
+ waitFor += extraDocs;
+ indexer.continueIndexing(extraDocs);
+ logger.info("--> flushing the index ....");
+ // now flush, just to make sure we have some data in the index, not just translog
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ logger.info("--> waiting for {} docs to be indexed ...", waitFor);
+ waitForDocs(waitFor, indexer);
+ indexer.assertNoFailures();
+ logger.info("--> {} docs indexed", waitFor);
+
+ // now start more nodes, while we index
+ extraDocs = totalNumDocs - waitFor;
+ indexer.continueIndexing(extraDocs);
+ logger.info("--> allow 4 nodes for index [test] ...");
+ allowNodes("test", 4);
+
+ logger.info("--> waiting for GREEN health status ...");
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForGreenStatus().setWaitForRelocatingShards(0));
+
+ logger.info("--> waiting for {} docs to be indexed ...", totalNumDocs);
+ waitForDocs(totalNumDocs, indexer);
+ indexer.assertNoFailures();
+
+ logger.info("--> {} docs indexed", totalNumDocs);
+ // now, shutdown nodes
+ logger.info("--> allow 3 nodes for index [test] ...");
+ allowNodes("test", 3);
+ logger.info("--> waiting for relocations ...");
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+
+ logger.info("--> allow 2 nodes for index [test] ...");
+ allowNodes("test", 2);
+ logger.info("--> waiting for relocations ...");
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+
+ logger.info("--> allow 1 nodes for index [test] ...");
+ allowNodes("test", 1);
+ logger.info("--> waiting for relocations ...");
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ indexer.stop();
+ logger.info("--> indexing threads stopped");
+
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("5m").setWaitForRelocatingShards(0));
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(numberOfShards, indexer.totalIndexedDocs(), 10);
+ }
+ }
+
+ @Test
+ @Slow
+ public void recoverWhileRelocating() throws Exception {
+ final int numShards = between(2, 10);
+ final int numReplicas = 0;
+ logger.info("--> creating test index ...");
+ int allowNodes = 2;
+ assertAcked(prepareCreate("test", 3, settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, numShards).put(SETTING_NUMBER_OF_REPLICAS, numReplicas).put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, Translog.Durabilty.ASYNC)));
+
+ final int numDocs = scaledRandomIntBetween(200, 20000);
+
+ try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type", client(), numDocs)) {
+
+ for (int i = 0; i < numDocs; i += scaledRandomIntBetween(100, Math.min(1000, numDocs))) {
+ indexer.assertNoFailures();
+ logger.info("--> waiting for {} docs to be indexed ...", i);
+ waitForDocs(i, indexer);
+ logger.info("--> {} docs indexed", i);
+ allowNodes = 2 / allowNodes;
+ allowNodes("test", allowNodes);
+ logger.info("--> waiting for GREEN health status ...");
+ ensureGreen(TimeValue.timeValueMinutes(5));
+ }
+
+ logger.info("--> marking and waiting for indexing threads to stop ...");
+ indexer.stop();
+
+ logger.info("--> indexing threads stopped");
+ logger.info("--> bump up number of replicas to 1 and allow all nodes to hold the index");
+ allowNodes("test", 3);
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("number_of_replicas", 1)).get());
+ ensureGreen(TimeValue.timeValueMinutes(5));
+
+ logger.info("--> refreshing the index");
+ refreshAndAssert();
+ logger.info("--> verifying indexed content");
+ iterateAssertCount(numShards, indexer.totalIndexedDocs(), 10);
+ }
+ }
+
+ private void iterateAssertCount(final int numberOfShards, final long numberOfDocs, final int iterations) throws Exception {
+ SearchResponse[] iterationResults = new SearchResponse[iterations];
+ boolean error = false;
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
+ logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse);
+ iterationResults[i] = searchResponse;
+ if (searchResponse.getHits().totalHits() != numberOfDocs) {
+ error = true;
+ }
+ }
+
+ if (error) {
+ //Printing out shards and their doc count
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().get();
+ for (ShardStats shardStats : indicesStatsResponse.getShards()) {
+ DocsStats docsStats = shardStats.getStats().docs;
+ logger.info("shard [{}] - count {}, primary {}", shardStats.getShardId(), docsStats.getCount(), shardStats.getShardRouting().primary());
+ }
+
+ //if there was an error we try to wait and see if at some point it'll get fixed
+ logger.info("--> trying to wait");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ boolean error = false;
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
+ if (searchResponse.getHits().totalHits() != numberOfDocs) {
+ error = true;
+ }
+ }
+ return !error;
+ }
+ }, 5, TimeUnit.MINUTES), equalTo(true));
+ }
+
+ //lets now make the test fail if it was supposed to fail
+ for (int i = 0; i < iterations; i++) {
+ assertHitCount(iterationResults[i], numberOfDocs);
+ }
+ }
+
+ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iteration, SearchResponse searchResponse) {
+ logger.info("iteration [{}] - successful shards: {} (expected {})", iteration, searchResponse.getSuccessfulShards(), numberOfShards);
+ logger.info("iteration [{}] - failed shards: {} (expected 0)", iteration, searchResponse.getFailedShards());
+ if (searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0) {
+ logger.info("iteration [{}] - shard failures: {}", iteration, Arrays.toString(searchResponse.getShardFailures()));
+ }
+ logger.info("iteration [{}] - returned documents: {} (expected {})", iteration, searchResponse.getHits().totalHits(), numberOfDocs);
+ }
+
+ private void refreshAndAssert() throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().get();
+ assertAllSuccessful(actionGet);
+ }
+ }, 5, TimeUnit.MINUTES);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationTests.java b/core/src/test/java/org/elasticsearch/recovery/RelocationTests.java
new file mode 100644
index 0000000000..4f61be0b27
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/RelocationTests.java
@@ -0,0 +1,550 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.procedures.IntProcedure;
+import com.google.common.base.Predicate;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.apache.lucene.index.IndexFileNames;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.BackgroundIndexer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+@TestLogging("indices.recovery:TRACE,index.shard.service:TRACE")
+public class RelocationTests extends ElasticsearchIntegrationTest {
+ private final TimeValue ACCEPTABLE_RELOCATION_TIME = new TimeValue(5, TimeUnit.MINUTES);
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName()).build();
+ }
+
+
+ @Test
+ public void testSimpleRelocationNoIndexing() {
+ logger.info("--> starting [node1] ...");
+ final String node_1 = internalCluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ )
+ .execute().actionGet();
+
+ logger.info("--> index 10 docs");
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+ logger.info("--> flush so we have an actual index");
+ client().admin().indices().prepareFlush().execute().actionGet();
+ logger.info("--> index more docs so we have something in the translog");
+ for (int i = 10; i < 20; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
+ }
+
+ logger.info("--> verifying count");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
+
+ logger.info("--> start another node");
+ final String node_2 = internalCluster().startNode();
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> relocate the shard from node1 to node2");
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2))
+ .execute().actionGet();
+
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+
+ logger.info("--> verifying count again...");
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount("test").execute().actionGet().getCount(), equalTo(20l));
+ }
+
+ @Test
+ @Slow
+ public void testRelocationWhileIndexingRandom() throws Exception {
+ int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4);
+ int numberOfReplicas = randomBoolean() ? 0 : 1;
+ int numberOfNodes = numberOfReplicas == 0 ? 2 : 3;
+
+ logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes);
+
+ String[] nodes = new String[numberOfNodes];
+ logger.info("--> starting [node1] ...");
+ nodes[0] = internalCluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", numberOfReplicas)
+ ).execute().actionGet();
+
+
+ for (int i = 1; i < numberOfNodes; i++) {
+ logger.info("--> starting [node{}] ...", i + 1);
+ nodes[i] = internalCluster().startNode();
+ if (i != numberOfNodes - 1) {
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
+ .setWaitForNodes(Integer.toString(i + 1)).setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ }
+ }
+
+ int numDocs = scaledRandomIntBetween(200, 2500);
+ try (BackgroundIndexer indexer = new BackgroundIndexer("test", "type1", client(), numDocs)) {
+ logger.info("--> waiting for {} docs to be indexed ...", numDocs);
+ waitForDocs(numDocs, indexer);
+ logger.info("--> {} docs indexed", numDocs);
+
+ logger.info("--> starting relocations...");
+ int nodeShiftBased = numberOfReplicas; // if we have replicas shift those
+ for (int i = 0; i < numberOfRelocations; i++) {
+ int fromNode = (i % 2);
+ int toNode = fromNode == 0 ? 1 : 0;
+ fromNode += nodeShiftBased;
+ toNode += nodeShiftBased;
+ numDocs = scaledRandomIntBetween(200, 1000);
+ logger.debug("--> Allow indexer to index [{}] documents", numDocs);
+ indexer.continueIndexing(numDocs);
+ logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), nodes[fromNode], nodes[toNode]))
+ .get();
+ if (rarely()) {
+ logger.debug("--> flushing");
+ client().admin().indices().prepareFlush().get();
+ }
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
+ assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
+ indexer.pauseIndexing();
+ logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
+ }
+ logger.info("--> done relocations");
+ logger.info("--> waiting for indexing threads to stop ...");
+ indexer.stop();
+ logger.info("--> indexing threads stopped");
+
+ logger.info("--> refreshing the index");
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ logger.info("--> searching the index");
+ boolean ranOnce = false;
+ for (int i = 0; i < 10; i++) {
+ try {
+ logger.info("--> START search test round {}", i + 1);
+ SearchHits hits = client().prepareSearch("test").setQuery(matchAllQuery()).setSize((int) indexer.totalIndexedDocs()).setNoFields().execute().actionGet().getHits();
+ ranOnce = true;
+ if (hits.totalHits() != indexer.totalIndexedDocs()) {
+ int[] hitIds = new int[(int) indexer.totalIndexedDocs()];
+ for (int hit = 0; hit < indexer.totalIndexedDocs(); hit++) {
+ hitIds[hit] = hit + 1;
+ }
+ IntHashSet set = IntHashSet.from(hitIds);
+ for (SearchHit hit : hits.hits()) {
+ int id = Integer.parseInt(hit.id());
+ if (!set.remove(id)) {
+ logger.error("Extra id [{}]", id);
+ }
+ }
+ set.forEach(new IntProcedure() {
+
+ @Override
+ public void apply(int value) {
+ logger.error("Missing id [{}]", value);
+ }
+
+ });
+ }
+ assertThat(hits.totalHits(), equalTo(indexer.totalIndexedDocs()));
+ logger.info("--> DONE search test round {}", i + 1);
+ } catch (SearchPhaseExecutionException ex) {
+ // TODO: the first run fails with this failure, waiting for relocating nodes set to 0 is not enough?
+ logger.warn("Got exception while searching.", ex);
+ }
+ }
+ if (!ranOnce) {
+ fail();
+ }
+ }
+ }
+
+ @Test
+ @Slow
+ public void testRelocationWhileRefreshing() throws Exception {
+ int numberOfRelocations = scaledRandomIntBetween(1, rarely() ? 10 : 4);
+ int numberOfReplicas = randomBoolean() ? 0 : 1;
+ int numberOfNodes = numberOfReplicas == 0 ? 2 : 3;
+
+ logger.info("testRelocationWhileIndexingRandom(numRelocations={}, numberOfReplicas={}, numberOfNodes={})", numberOfRelocations, numberOfReplicas, numberOfNodes);
+
+ String[] nodes = new String[numberOfNodes];
+ logger.info("--> starting [node_0] ...");
+ nodes[0] = internalCluster().startNode();
+
+ logger.info("--> creating test index ...");
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", numberOfReplicas)
+ .put("index.refresh_interval", -1) // we want to control refreshes c
+ ).execute().actionGet();
+
+ // make sure the first shard is started.
+ ensureYellow();
+
+ for (int i = 1; i < numberOfNodes; i++) {
+ logger.info("--> starting [node_{}] ...", i);
+ nodes[i] = internalCluster().startNode();
+ if (i != numberOfNodes - 1) {
+ ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
+ .setWaitForNodes(Integer.toString(i + 1)).setWaitForGreenStatus().execute().actionGet();
+ assertThat(healthResponse.isTimedOut(), equalTo(false));
+ }
+ }
+
+ final Semaphore postRecoveryShards = new Semaphore(0);
+
+ for (IndicesLifecycle indicesLifecycle : internalCluster().getInstances(IndicesLifecycle.class)) {
+ indicesLifecycle.addListener(new IndicesLifecycle.Listener() {
+ @Override
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {
+ if (currentState == IndexShardState.POST_RECOVERY) {
+ postRecoveryShards.release();
+ }
+ }
+ });
+ }
+
+
+ logger.info("--> starting relocations...");
+ int nodeShiftBased = numberOfReplicas; // if we have replicas shift those
+ for (int i = 0; i < numberOfRelocations; i++) {
+ int fromNode = (i % 2);
+ int toNode = fromNode == 0 ? 1 : 0;
+ fromNode += nodeShiftBased;
+ toNode += nodeShiftBased;
+
+ List<IndexRequestBuilder> builders1 = new ArrayList<>();
+ for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) {
+ builders1.add(client().prepareIndex("test", "type").setSource("{}"));
+ }
+
+ List<IndexRequestBuilder> builders2 = new ArrayList<>();
+ for (int numDocs = randomIntBetween(10, 30); numDocs > 0; numDocs--) {
+ builders2.add(client().prepareIndex("test", "type").setSource("{}"));
+ }
+
+ logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
+
+
+ client().admin().cluster().prepareReroute()
+ .add(new MoveAllocationCommand(new ShardId("test", 0), nodes[fromNode], nodes[toNode]))
+ .get();
+
+
+ logger.debug("--> index [{}] documents", builders1.size());
+ indexRandom(false, true, builders1);
+ // wait for shard to reach post recovery
+ postRecoveryShards.acquire(1);
+
+ logger.debug("--> index [{}] documents", builders2.size());
+ indexRandom(true, true, builders2);
+
+ // verify cluster was finished.
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get().isTimedOut());
+ logger.info("--> DONE relocate the shard from {} to {}", fromNode, toNode);
+
+ logger.debug("--> verifying all searches return the same number of docs");
+ long expectedCount = -1;
+ for (Client client : clients()) {
+ SearchResponse response = client.prepareSearch("test").setPreference("_local").setSize(0).get();
+ assertNoFailures(response);
+ if (expectedCount < 0) {
+ expectedCount = response.getHits().totalHits();
+ } else {
+ assertEquals(expectedCount, response.getHits().totalHits());
+ }
+ }
+
+ }
+ }
+
+ @Test
+ public void testMoveShardsWhileRelocation() throws Exception {
+ final String indexName = "test";
+
+ ListenableFuture<String> blueFuture = internalCluster().startNodeAsync(Settings.builder().put("node.color", "blue").build());
+ ListenableFuture<String> redFuture = internalCluster().startNodeAsync(Settings.builder().put("node.color", "red").build());
+ internalCluster().startNode(Settings.builder().put("node.color", "green").build());
+ final String blueNodeName = blueFuture.get();
+ final String redNodeName = redFuture.get();
+
+ ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get();
+ assertThat(response.isTimedOut(), is(false));
+
+
+ client().admin().indices().prepareCreate(indexName)
+ .setSettings(
+ Settings.builder()
+ .put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "blue")
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ).get();
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ int numDocs = scaledRandomIntBetween(25, 250);
+ for (int i = 0; i < numDocs; i++) {
+ requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}"));
+ }
+ indexRandom(true, requests);
+ ensureSearchable(indexName);
+
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ String blueNodeId = internalCluster().getInstance(DiscoveryService.class, blueNodeName).localNode().id();
+
+ assertFalse(stateResponse.getState().readOnlyRoutingNodes().node(blueNodeId).isEmpty());
+
+ SearchResponse searchResponse = client().prepareSearch(indexName).get();
+ assertHitCount(searchResponse, numDocs);
+
+ // Slow down recovery in order to make recovery cancellations more likely
+ IndicesStatsResponse statsResponse = client().admin().indices().prepareStats(indexName).get();
+ long chunkSize = statsResponse.getIndex(indexName).getShards()[0].getStats().getStore().size().bytes() / 10;
+ assertTrue(client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder()
+ // one chunk per sec..
+ .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES)
+ .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, chunkSize, ByteSizeUnit.BYTES)
+ )
+ .get().isAcknowledged());
+
+ client().admin().indices().prepareUpdateSettings(indexName).setSettings(
+ Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "red")
+ ).get();
+
+ // Lets wait a bit and then move again to hopefully trigger recovery cancellations.
+ boolean applied = awaitBusy(
+ new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ RecoveryResponse recoveryResponse = internalCluster().client(redNodeName).admin().indices().prepareRecoveries(indexName)
+ .get();
+ return !recoveryResponse.shardResponses().get(indexName).isEmpty();
+ }
+ }
+ );
+ assertTrue(applied);
+ client().admin().indices().prepareUpdateSettings(indexName).setSettings(
+ Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + "color", "green")
+ ).get();
+
+ // Restore the recovery speed to not timeout cluster health call
+ assertTrue(client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder()
+ .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb")
+ .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, "512kb")
+ )
+ .get().isAcknowledged());
+
+ // this also waits for all ongoing recoveries to complete:
+ ensureSearchable(indexName);
+ searchResponse = client().prepareSearch(indexName).get();
+ assertHitCount(searchResponse, numDocs);
+
+ stateResponse = client().admin().cluster().prepareState().get();
+ assertTrue(stateResponse.getState().readOnlyRoutingNodes().node(blueNodeId).isEmpty());
+ }
+
+ @Test
+ @Slow
+ public void testCancellationCleansTempFiles() throws Exception {
+ final String indexName = "test";
+
+ final String p_node = internalCluster().startNode();
+
+ client().admin().indices().prepareCreate(indexName)
+ .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1, IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get();
+
+ internalCluster().startNodesAsync(2).get();
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ int numDocs = scaledRandomIntBetween(25, 250);
+ for (int i = 0; i < numDocs; i++) {
+ requests.add(client().prepareIndex(indexName, "type").setCreate(true).setSource("{}"));
+ }
+ indexRandom(true, requests);
+ assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("3").setWaitForGreenStatus().get().isTimedOut());
+ flush();
+
+ int allowedFailures = randomIntBetween(3, 10);
+ logger.info("--> blocking recoveries from primary (allowed failures: [{}])", allowedFailures);
+ CountDownLatch corruptionCount = new CountDownLatch(allowedFailures);
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class, p_node);
+ MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, p_node);
+ for (DiscoveryNode node : clusterService.state().nodes()) {
+ if (!node.equals(clusterService.localNode())) {
+ mockTransportService.addDelegate(node, new RecoveryCorruption(mockTransportService.original(), corruptionCount));
+ }
+ }
+
+ client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
+
+ corruptionCount.await();
+
+ logger.info("--> stopping replica assignment");
+ assertAcked(client().admin().cluster().prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none")));
+
+ logger.info("--> wait for all replica shards to be removed, on all nodes");
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ for (String node : internalCluster().getNodeNames()) {
+ if (node.equals(p_node)) {
+ continue;
+ }
+ ClusterState state = client(node).admin().cluster().prepareState().setLocal(true).get().getState();
+ assertThat(node + " indicates assigned replicas",
+ state.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
+ }
+ }
+ });
+
+ logger.info("--> verifying no temporary recoveries are left");
+ for (String node : internalCluster().getNodeNames()) {
+ NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, node);
+ for (final Path shardLoc : nodeEnvironment.availableShardPaths(new ShardId(indexName, 0))) {
+ if (Files.exists(shardLoc)) {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Files.walkFileTree(shardLoc, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ assertThat("found a temporary recovery file: " + file, file.getFileName().toString(), not(startsWith("recovery.")));
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ } catch (IOException e) {
+ throw new AssertionError("failed to walk file tree starting at [" + shardLoc + "]", e);
+ }
+ }
+ });
+ }
+ }
+ }
+ }
+
+ class RecoveryCorruption extends MockTransportService.DelegateTransport {
+
+ private final CountDownLatch corruptionCount;
+
+ public RecoveryCorruption(Transport transport, CountDownLatch corruptionCount) {
+ super(transport);
+ this.corruptionCount = corruptionCount;
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (action.equals(RecoveryTarget.Actions.FILE_CHUNK)) {
+ RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request;
+ if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) {
+ // corrupting the segments_N files in order to make sure future recovery re-send files
+ logger.debug("corrupting [{}] to {}. file name: [{}]", action, node, chunkRequest.name());
+ byte[] array = chunkRequest.content().array();
+ array[0] = (byte) ~array[0]; // flip one byte in the content
+ corruptionCount.countDown();
+ }
+ transport.sendRequest(node, requestId, action, request, options);
+ } else {
+ transport.sendRequest(node, requestId, action, request, options);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java b/core/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java
new file mode 100644
index 0000000000..f91ce01967
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/SimpleRecoveryTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SimpleRecoveryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return settingsBuilder().put(super.indexSettings()).put(recoverySettings()).build();
+ }
+
+ protected Settings recoverySettings() {
+ return Settings.Builder.EMPTY_SETTINGS;
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void testSimpleRecovery() throws Exception {
+ assertAcked(prepareCreate("test", 1).execute().actionGet());
+
+ NumShards numShards = getNumShards("test");
+
+ logger.info("Running Cluster Health");
+ ensureYellow();
+
+ client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
+ FlushResponse flushResponse = client().admin().indices().flush(flushRequest("test")).actionGet();
+ assertThat(flushResponse.getTotalShards(), equalTo(numShards.totalNumShards));
+ assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
+ assertThat(flushResponse.getFailedShards(), equalTo(0));
+ client().index(indexRequest("test").type("type1").id("2").source(source("2", "test"))).actionGet();
+ RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+
+ allowNodes("test", 2);
+
+ logger.info("Running Cluster Health");
+ ensureGreen();
+
+ GetResponse getResult;
+
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(false)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(false)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ }
+
+ // now start another one so we move some primaries
+ allowNodes("test", 3);
+ Thread.sleep(200);
+ logger.info("Running Cluster Health");
+ ensureGreen();
+
+ for (int i = 0; i < 5; i++) {
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("1", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ getResult = client().get(getRequest("test").type("type1").id("2").operationThreaded(true)).actionGet();
+ assertThat(getResult.getSourceAsString(), equalTo(source("2", "test")));
+ }
+ }
+
+ private String source(String id, String nameValue) {
+ return "{ type1 : { \"id\" : \"" + id + "\", \"name\" : \"" + nameValue + "\" } }";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java b/core/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java
new file mode 100644
index 0000000000..eb2f0af69e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/SmallFileChunkSizeRecoveryTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallFileChunkSizeRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return Settings.settingsBuilder().put("index.shard.recovery.file_chunk_size", "3b").build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java b/core/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java
new file mode 100644
index 0000000000..d565bacfd5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/SmallTranslogOpsRecoveryTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallTranslogOpsRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return Settings.settingsBuilder().put("index.shard.recovery.translog_ops", 1).build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java b/core/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java
new file mode 100644
index 0000000000..500b1c74ca
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/SmallTranslogSizeRecoveryTests.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.elasticsearch.common.settings.Settings;
+
+/**
+ *
+ */
+public class SmallTranslogSizeRecoveryTests extends SimpleRecoveryTests {
+
+ @Override
+ protected Settings recoverySettings() {
+ return Settings.settingsBuilder().put("index.shard.recovery.translog_size", "3b").build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryTests.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryTests.java
new file mode 100644
index 0000000000..02656e9e4e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryTests.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.recovery;
+
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.recovery.RecoveryTarget;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+@ElasticsearchIntegrationTest.ClusterScope(numDataNodes = 2, numClientNodes = 0, scope = ElasticsearchIntegrationTest.Scope.TEST)
+@SuppressCodecs("*") // test relies on exact file extensions
+public class TruncatedRecoveryTests extends ElasticsearchIntegrationTest {
+
+ protected Settings nodeSettings(int nodeOrdinal) {
+ Settings.Builder builder = Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
+ .put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES));
+ return builder.build();
+ }
+
+ /**
+ * This test tries to truncate some of larger files in the index to trigger leftovers on the recovery
+ * target. This happens during recovery when the last chunk of the file is transferred to the replica
+ * we just throw an exception to make sure the recovery fails and we leave some half baked files on the target.
+ * Later we allow full recovery to ensure we can still recover and don't run into corruptions.
+ */
+ @Test
+ public void testCancelRecoveryAndResume() throws Exception {
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
+ List<NodeStats> dataNodeStats = new ArrayList<>();
+ for (NodeStats stat : nodeStats.getNodes()) {
+ if (stat.getNode().isDataNode()) {
+ dataNodeStats.add(stat);
+ }
+ }
+ assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
+ Collections.shuffle(dataNodeStats, getRandom());
+ // we use 2 nodes a lucky and unlucky one
+ // the lucky one holds the primary
+ // the unlucky one gets the replica and the truncated leftovers
+ NodeStats primariesNode = dataNodeStats.get(0);
+ NodeStats unluckyNode = dataNodeStats.get(1);
+
+ // create the index and prevent allocation on any other nodes than the lucky one
+ // we have no replicas so far and make sure that we allocate the primary on the lucky node
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string", "the_id", "type=string")
+ .setSettings(settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numberOfShards())
+ .put("index.routing.allocation.include._name", primariesNode.getNode().name()))); // only allocate on the lucky node
+
+ // index some docs and check if they are coming back
+ int numDocs = randomIntBetween(100, 200);
+ List<IndexRequestBuilder> builder = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
+ }
+ indexRandom(true, builder);
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
+ }
+ ensureGreen();
+ // ensure we have flushed segments and make them a big one via optimize
+ client().admin().indices().prepareFlush().setForce(true).setWaitIfOngoing(true).get();
+ client().admin().indices().prepareOptimize().setMaxNumSegments(1).setFlush(true).get();
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ final AtomicBoolean truncate = new AtomicBoolean(true);
+ for (NodeStats dataNode : dataNodeStats) {
+ MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().name()));
+ mockTransportService.addDelegate(internalCluster().getInstance(Discovery.class, unluckyNode.getNode().name()).localNode(), new MockTransportService.DelegateTransport(mockTransportService.original()) {
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (action.equals(RecoveryTarget.Actions.FILE_CHUNK)) {
+ RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
+ logger.debug("file chunk [" + req.toString() + "] lastChunk: " + req.lastChunk());
+ if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) {
+ latch.countDown();
+ throw new RuntimeException("Caused some truncated files for fun and profit");
+ }
+ }
+ super.sendRequest(node, requestId, action, request, options);
+ }
+ });
+ }
+
+ logger.info("--> bumping replicas to 1"); //
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put("index.routing.allocation.include._name", // now allow allocation on all nodes
+ primariesNode.getNode().name() + "," + unluckyNode.getNode().name())).get();
+
+ latch.await();
+
+ // at this point we got some truncated left overs on the replica on the unlucky node
+ // now we are allowing the recovery to allocate again and finish to see if we wipe the truncated files
+ truncate.compareAndSet(true, false);
+ ensureGreen("test");
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java
new file mode 100644
index 0000000000..579408366e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.query.TestQueryParsingException;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.FakeRestRequest;
+import org.elasticsearch.transport.RemoteTransportException;
+import org.junit.Test;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class BytesRestResponseTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testWithHeaders() throws Exception {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = randomBoolean() ? new DetailedExceptionRestChannel(request) : new SimpleExceptionRestChannel(request);
+
+ BytesRestResponse response = new BytesRestResponse(channel, new ExceptionWithHeaders());
+ assertThat(response.getHeaders().get("n1"), notNullValue());
+ assertThat(response.getHeaders().get("n1"), contains("v11", "v12"));
+ assertThat(response.getHeaders().get("n2"), notNullValue());
+ assertThat(response.getHeaders().get("n2"), contains("v21", "v22"));
+ }
+
+ @Test
+ public void testSimpleExceptionMessage() throws Exception {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = new SimpleExceptionRestChannel(request);
+
+ Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar"));
+ BytesRestResponse response = new BytesRestResponse(channel, t);
+ String text = response.content().toUtf8();
+ assertThat(text, containsString("ElasticsearchException[an error occurred reading data]"));
+ assertThat(text, not(containsString("FileNotFoundException")));
+ assertThat(text, not(containsString("/foo/bar")));
+ assertThat(text, not(containsString("error_trace")));
+ }
+
+ @Test
+ public void testDetailedExceptionMessage() throws Exception {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = new DetailedExceptionRestChannel(request);
+
+ Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar"));
+ BytesRestResponse response = new BytesRestResponse(channel, t);
+ String text = response.content().toUtf8();
+ assertThat(text, containsString("{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}"));
+ assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}"));
+ }
+
+ @Test
+ public void testNonElasticsearchExceptionIsNotShownAsSimpleMessage() throws Exception {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = new SimpleExceptionRestChannel(request);
+
+ Throwable t = new Throwable("an error occurred reading data", new FileNotFoundException("/foo/bar"));
+ BytesRestResponse response = new BytesRestResponse(channel, t);
+ String text = response.content().toUtf8();
+ assertThat(text, not(containsString("Throwable[an error occurred reading data]")));
+ assertThat(text, not(containsString("FileNotFoundException[/foo/bar]")));
+ assertThat(text, not(containsString("error_trace")));
+ assertThat(text, containsString("\"error\":\"No ElasticsearchException found\""));
+ }
+
+ @Test
+ public void testErrorTrace() throws Exception {
+ RestRequest request = new FakeRestRequest();
+ request.params().put("error_trace", "true");
+ RestChannel channel = new DetailedExceptionRestChannel(request);
+
+ Throwable t = new Throwable("an error occurred reading data", new FileNotFoundException("/foo/bar"));
+ BytesRestResponse response = new BytesRestResponse(channel, t);
+ String text = response.content().toUtf8();
+ assertThat(text, containsString("\"type\":\"throwable\",\"reason\":\"an error occurred reading data\""));
+ assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}"));
+ assertThat(text, containsString("\"error_trace\":{\"message\":\"an error occurred reading data\""));
+ }
+
+ public void testGuessRootCause() throws IOException {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = new DetailedExceptionRestChannel(request);
+ {
+ Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar"));
+ BytesRestResponse response = new BytesRestResponse(channel, t);
+ String text = response.content().toUtf8();
+ assertThat(text, containsString("{\"root_cause\":[{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}]"));
+ }
+ {
+ Throwable t = new FileNotFoundException("/foo/bar");
+ BytesRestResponse response = new BytesRestResponse(channel, t);
+ String text = response.content().toUtf8();
+ assertThat(text, containsString("{\"root_cause\":[{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}]"));
+ }
+ }
+
+ @Test
+ public void testNullThrowable() throws Exception {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = new SimpleExceptionRestChannel(request);
+
+ BytesRestResponse response = new BytesRestResponse(channel, null);
+ String text = response.content().toUtf8();
+ assertThat(text, containsString("\"error\":\"unknown\""));
+ assertThat(text, not(containsString("error_trace")));
+ }
+
+ @Test
+ public void testConvert() throws IOException {
+ RestRequest request = new FakeRestRequest();
+ RestChannel channel = new DetailedExceptionRestChannel(request);
+ ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 1));
+ ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null),
+ new SearchShardTarget("node_1", "foo", 2));
+ SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] {failure, failure1});
+ BytesRestResponse response = new BytesRestResponse(channel, new RemoteTransportException("foo", ex));
+ String text = response.content().toUtf8();
+ String expected = "{\"error\":{\"root_cause\":[{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]},\"status\":400}";
+ assertEquals(expected.trim(), text.trim());
+ }
+
+ private static class ExceptionWithHeaders extends ElasticsearchException.WithRestHeaders {
+
+ ExceptionWithHeaders() {
+ super("", header("n1", "v11", "v12"), header("n2", "v21", "v22"));
+ }
+ }
+
+ private static class SimpleExceptionRestChannel extends RestChannel {
+
+ SimpleExceptionRestChannel(RestRequest request) {
+ super(request, false);
+ }
+
+ @Override
+ public void sendResponse(RestResponse response) {
+ }
+ }
+
+ private static class DetailedExceptionRestChannel extends RestChannel {
+
+ DetailedExceptionRestChannel(RestRequest request) {
+ super(request, true);
+ }
+
+ @Override
+ public void sendResponse(RestResponse response) {
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexDefaultTests.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexDefaultTests.java
new file mode 100644
index 0000000000..da5da15922
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexDefaultTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class CorsRegexDefaultTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(Node.HTTP_ENABLED, true)
+ .put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test
+ public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Exception {
+ String corsValue = "http://localhost:9200";
+ HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute();
+
+ assertThat(response.getStatusCode(), is(200));
+ assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
+ assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Credentials")));
+ }
+
+ @Test
+ public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws Exception {
+ HttpResponse response = httpClient().method("GET").path("/").execute();
+
+ assertThat(response.getStatusCode(), is(200));
+ assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
+ assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Credentials")));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexTests.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexTests.java
new file mode 100644
index 0000000000..8e568d847c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.rest;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.Test;
+
+import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN;
+import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS;
+import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ENABLED;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.*;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class CorsRegexTests extends ElasticsearchIntegrationTest {
+
+ protected static final ESLogger logger = Loggers.getLogger(CorsRegexTests.class);
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(SETTING_CORS_ALLOW_ORIGIN, "/https?:\\/\\/localhost(:[0-9]+)?/")
+ .put(SETTING_CORS_ALLOW_CREDENTIALS, true)
+ .put(SETTING_CORS_ENABLED, true)
+ .put(Node.HTTP_ENABLED, true)
+ .build();
+ }
+
+ @Test
+ public void testThatRegularExpressionWorksOnMatch() throws Exception {
+ String corsValue = "http://localhost:9200";
+ HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute();
+ assertResponseWithOriginheader(response, corsValue);
+
+ corsValue = "https://localhost:9200";
+ response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute();
+ assertResponseWithOriginheader(response, corsValue);
+ assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Credentials"));
+ assertThat(response.getHeaders().get("Access-Control-Allow-Credentials"), is("true"));
+ }
+
+ @Test
+ public void testThatRegularExpressionReturnsNullOnNonMatch() throws Exception {
+ HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", "http://evil-host:9200").execute();
+ assertResponseWithOriginheader(response, "null");
+ }
+
+ @Test
+ public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws Exception {
+ HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").execute();
+ assertThat(response.getStatusCode(), is(200));
+ assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
+ }
+
+ @Test
+ public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception {
+ HttpResponse response = httpClient().method("GET").path("/").execute();
+ assertThat(response.getStatusCode(), is(200));
+ assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin")));
+ }
+
+ @Test
+ public void testThatPreFlightRequestWorksOnMatch() throws Exception {
+ String corsValue = "http://localhost:9200";
+ HttpResponse response = httpClient().method("OPTIONS").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute();
+ assertResponseWithOriginheader(response, corsValue);
+ }
+
+ @Test
+ public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception {
+ HttpResponse response = httpClient().method("OPTIONS").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", "http://evil-host:9200").execute();
+ assertResponseWithOriginheader(response, "null");
+ }
+
+ public static void assertResponseWithOriginheader(HttpResponse response, String expectedCorsHeader) {
+ assertThat(response.getStatusCode(), is(200));
+ assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Origin"));
+ assertThat(response.getHeaders().get("Access-Control-Allow-Origin"), is(expectedCorsHeader));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java b/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java
new file mode 100644
index 0000000000..6a110cd5da
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import com.google.common.collect.Maps;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.FakeRestRequest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.Matchers.is;
+
+public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testRegisterRelevantHeaders() throws InterruptedException {
+
+ final RestController restController = new RestController(Settings.EMPTY);
+
+ int iterations = randomIntBetween(1, 5);
+
+ Set<String> headers = new HashSet<>();
+ ExecutorService executorService = Executors.newFixedThreadPool(iterations);
+ for (int i = 0; i < iterations; i++) {
+ int headersCount = randomInt(10);
+ final Set<String> newHeaders = new HashSet<>();
+ for (int j = 0; j < headersCount; j++) {
+ String usefulHeader = randomRealisticUnicodeOfLengthBetween(1, 30);
+ newHeaders.add(usefulHeader);
+ }
+ headers.addAll(newHeaders);
+
+ executorService.submit(new Runnable() {
+ @Override
+ public void run() {
+ restController.registerRelevantHeaders(newHeaders.toArray(new String[newHeaders.size()]));
+ }
+ });
+ }
+
+ executorService.shutdown();
+ assertThat(executorService.awaitTermination(1, TimeUnit.SECONDS), equalTo(true));
+ String[] relevantHeaders = restController.relevantHeaders().toArray(new String[restController.relevantHeaders().size()]);
+ assertThat(relevantHeaders.length, equalTo(headers.size()));
+
+ Arrays.sort(relevantHeaders);
+ String[] headersArray = new String[headers.size()];
+ headersArray = headers.toArray(headersArray);
+ Arrays.sort(headersArray);
+ assertThat(relevantHeaders, equalTo(headersArray));
+ }
+
+ @Test
+ public void testCopyHeadersRequest() {
+ Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
+ Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
+ usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
+ Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
+ Map<String, String> transportContext = Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();
+
+ Map<String, String> expectedHeaders = new HashMap<>();
+ expectedHeaders.putAll(transportHeaders);
+ expectedHeaders.putAll(copiedHeaders);
+
+ Map<String, String> expectedContext = new HashMap<>();
+ expectedContext.putAll(transportContext);
+ expectedContext.putAll(restContext);
+
+ try (Client client = client(new NoOpClient(getTestName()), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) {
+
+ SearchRequest searchRequest = Requests.searchRequest();
+ putHeaders(searchRequest, transportHeaders);
+ putContext(searchRequest, transportContext);
+ assertHeaders(searchRequest, transportHeaders);
+ client.search(searchRequest);
+ assertHeaders(searchRequest, expectedHeaders);
+ assertContext(searchRequest, expectedContext);
+
+ GetRequest getRequest = Requests.getRequest("index");
+ putHeaders(getRequest, transportHeaders);
+ putContext(getRequest, transportContext);
+ assertHeaders(getRequest, transportHeaders);
+ client.get(getRequest);
+ assertHeaders(getRequest, expectedHeaders);
+ assertContext(getRequest, expectedContext);
+
+ IndexRequest indexRequest = Requests.indexRequest();
+ putHeaders(indexRequest, transportHeaders);
+ putContext(indexRequest, transportContext);
+ assertHeaders(indexRequest, transportHeaders);
+ client.index(indexRequest);
+ assertHeaders(indexRequest, expectedHeaders);
+ assertContext(indexRequest, expectedContext);
+ }
+ }
+
+ @Test
+ public void testCopyHeadersClusterAdminRequest() {
+ Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
+ Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
+ usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
+ Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
+ Map<String, String> transportContext = Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();
+
+ HashMap<String, String> expectedHeaders = new HashMap<>();
+ expectedHeaders.putAll(transportHeaders);
+ expectedHeaders.putAll(copiedHeaders);
+
+ Map<String, String> expectedContext = new HashMap<>();
+ expectedContext.putAll(transportContext);
+ expectedContext.putAll(restContext);
+
+ try (Client client = client(new NoOpClient(getTestName()), new FakeRestRequest(restHeaders, expectedContext), usefulRestHeaders)) {
+
+ ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest();
+ putHeaders(clusterHealthRequest, transportHeaders);
+ putContext(clusterHealthRequest, transportContext);
+ assertHeaders(clusterHealthRequest, transportHeaders);
+ client.admin().cluster().health(clusterHealthRequest);
+ assertHeaders(clusterHealthRequest, expectedHeaders);
+ assertContext(clusterHealthRequest, expectedContext);
+
+ ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest();
+ putHeaders(clusterStateRequest, transportHeaders);
+ putContext(clusterStateRequest, transportContext);
+ assertHeaders(clusterStateRequest, transportHeaders);
+ client.admin().cluster().state(clusterStateRequest);
+ assertHeaders(clusterStateRequest, expectedHeaders);
+ assertContext(clusterStateRequest, expectedContext);
+
+ ClusterStatsRequest clusterStatsRequest = Requests.clusterStatsRequest();
+ putHeaders(clusterStatsRequest, transportHeaders);
+ putContext(clusterStatsRequest, transportContext);
+ assertHeaders(clusterStatsRequest, transportHeaders);
+ client.admin().cluster().clusterStats(clusterStatsRequest);
+ assertHeaders(clusterStatsRequest, expectedHeaders);
+ assertContext(clusterStatsRequest, expectedContext);
+ }
+ }
+
+ @Test
+ public void testCopyHeadersIndicesAdminRequest() {
+ Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
+ Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
+ usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
+ Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
+ Map<String, String> transportContext = Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();
+
+ HashMap<String, String> expectedHeaders = new HashMap<>();
+ expectedHeaders.putAll(transportHeaders);
+ expectedHeaders.putAll(copiedHeaders);
+
+ Map<String, String> expectedContext = new HashMap<>();
+ expectedContext.putAll(transportContext);
+ expectedContext.putAll(restContext);
+
+ try (Client client = client(new NoOpClient(getTestName()), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) {
+
+ CreateIndexRequest createIndexRequest = Requests.createIndexRequest("test");
+ putHeaders(createIndexRequest, transportHeaders);
+ putContext(createIndexRequest, transportContext);
+ assertHeaders(createIndexRequest, transportHeaders);
+ client.admin().indices().create(createIndexRequest);
+ assertHeaders(createIndexRequest, expectedHeaders);
+ assertContext(createIndexRequest, expectedContext);
+
+ CloseIndexRequest closeIndexRequest = Requests.closeIndexRequest("test");
+ putHeaders(closeIndexRequest, transportHeaders);
+ putContext(closeIndexRequest, transportContext);
+ assertHeaders(closeIndexRequest, transportHeaders);
+ client.admin().indices().close(closeIndexRequest);
+ assertHeaders(closeIndexRequest, expectedHeaders);
+ assertContext(closeIndexRequest, expectedContext);
+
+ FlushRequest flushRequest = Requests.flushRequest();
+ putHeaders(flushRequest, transportHeaders);
+ putContext(flushRequest, transportContext);
+ assertHeaders(flushRequest, transportHeaders);
+ client.admin().indices().flush(flushRequest);
+ assertHeaders(flushRequest, expectedHeaders);
+ assertContext(flushRequest, expectedContext);
+ }
+ }
+
+ @Test
+ public void testCopyHeadersRequestBuilder() {
+ Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
+ Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
+ usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
+ Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
+ Map<String, String> transportContext = Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();
+
+ HashMap<String, String> expectedHeaders = new HashMap<>();
+ expectedHeaders.putAll(transportHeaders);
+ expectedHeaders.putAll(copiedHeaders);
+
+ Map<String, String> expectedContext = new HashMap<>();
+ expectedContext.putAll(transportContext);
+ expectedContext.putAll(restContext);
+
+ try (Client client = client(new NoOpClient(getTestName()), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) {
+
+ ActionRequestBuilder requestBuilders[] = new ActionRequestBuilder[]{
+ client.prepareIndex("index", "type"),
+ client.prepareGet("index", "type", "id"),
+ client.prepareBulk(),
+ client.prepareDelete(),
+ client.prepareIndex(),
+ client.prepareClearScroll(),
+ client.prepareMultiGet(),
+ };
+
+ for (ActionRequestBuilder requestBuilder : requestBuilders) {
+ putHeaders(requestBuilder.request(), transportHeaders);
+ putContext(requestBuilder.request(), transportContext);
+ assertHeaders(requestBuilder.request(), transportHeaders);
+ requestBuilder.get();
+ assertHeaders(requestBuilder.request(), expectedHeaders);
+ assertContext(requestBuilder.request(), expectedContext);
+ }
+ }
+ }
+
+ @Test
+ public void testCopyHeadersClusterAdminRequestBuilder() {
+ Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
+ Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
+ usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
+ Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
+ Map<String, String> transportContext = Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();
+
+ HashMap<String, String> expectedHeaders = new HashMap<>();
+ expectedHeaders.putAll(transportHeaders);
+ expectedHeaders.putAll(copiedHeaders);
+
+ Map<String, String> expectedContext = new HashMap<>();
+ expectedContext.putAll(transportContext);
+ expectedContext.putAll(restContext);
+
+ try (Client client = client(new NoOpClient(getTestName()), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) {
+
+ ActionRequestBuilder requestBuilders[] = new ActionRequestBuilder[]{
+ client.admin().cluster().prepareNodesInfo(),
+ client.admin().cluster().prepareClusterStats(),
+ client.admin().cluster().prepareState(),
+ client.admin().cluster().prepareCreateSnapshot("repo", "name"),
+ client.admin().cluster().prepareHealth(),
+ client.admin().cluster().prepareReroute()
+ };
+
+ for (ActionRequestBuilder requestBuilder : requestBuilders) {
+ putHeaders(requestBuilder.request(), transportHeaders);
+ putContext(requestBuilder.request(), transportContext);
+ assertHeaders(requestBuilder.request(), transportHeaders);
+ requestBuilder.get();
+ assertHeaders(requestBuilder.request(), expectedHeaders);
+ assertContext(requestBuilder.request(), expectedContext);
+ }
+ }
+ }
+
+ @Test
+ public void testCopyHeadersIndicesAdminRequestBuilder() {
+ Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
+ Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
+ Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
+ usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
+ Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
+ Map<String, String> transportContext = Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();
+
+ HashMap<String, String> expectedHeaders = new HashMap<>();
+ expectedHeaders.putAll(transportHeaders);
+ expectedHeaders.putAll(copiedHeaders);
+
+ Map<String, String> expectedContext = new HashMap<>();
+ expectedContext.putAll(transportContext);
+ expectedContext.putAll(restContext);
+
+ try (Client client = client(new NoOpClient(getTestName()), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders)) {
+
+ ActionRequestBuilder requestBuilders[] = new ActionRequestBuilder[]{
+ client.admin().indices().prepareValidateQuery(),
+ client.admin().indices().prepareCreate("test"),
+ client.admin().indices().prepareAliases(),
+ client.admin().indices().prepareAnalyze("text"),
+ client.admin().indices().prepareDeleteWarmer(),
+ client.admin().indices().prepareTypesExists("type"),
+ client.admin().indices().prepareClose()
+ };
+
+ for (ActionRequestBuilder requestBuilder : requestBuilders) {
+ putHeaders(requestBuilder.request(), transportHeaders);
+ putContext(requestBuilder.request(), transportContext);
+ assertHeaders(requestBuilder.request(), transportHeaders);
+ requestBuilder.get();
+ assertHeaders(requestBuilder.request(), expectedHeaders);
+ assertContext(requestBuilder.request(), expectedContext);
+ }
+ }
+ }
+
+ private static Map<String, String> randomHeaders(int count) {
+ return randomMap(count, "header-");
+ }
+
+ private static Map<String, String> randomContext(int count) {
+ return randomMap(count, "context-");
+ }
+
+ private static Map<String, String> randomMap(int count, String prefix) {
+ Map<String, String> headers = new HashMap<>();
+ for (int i = 0; i < count; i++) {
+ headers.put(prefix + randomInt(30), randomAsciiOfLength(10));
+ }
+ return headers;
+ }
+
+ private static Map<String, String> randomHeadersFrom(Map<String, String> headers) {
+ Map<String, String> newHeaders = new HashMap<>();
+ if (headers.isEmpty()) {
+ return newHeaders;
+ }
+ int i = randomInt(headers.size() - 1);
+ for (Map.Entry<String, String> entry : headers.entrySet()) {
+ if (randomInt(i) == 0) {
+ newHeaders.put(entry.getKey(), entry.getValue());
+ }
+ }
+ return newHeaders;
+ }
+
+ private static Client client(Client noOpClient, RestRequest restRequest, Set<String> usefulRestHeaders) {
+ return new BaseRestHandler.HeadersAndContextCopyClient(noOpClient, restRequest, usefulRestHeaders);
+ }
+
+ private static void putHeaders(ActionRequest<?> request, Map<String, String> headers) {
+ for (Map.Entry<String, String> header : headers.entrySet()) {
+ request.putHeader(header.getKey(), header.getValue());
+ }
+ }
+
+ private static void putContext(ActionRequest<?> request, Map<String, String> context) {
+ for (Map.Entry<String, String> header : context.entrySet()) {
+ request.putInContext(header.getKey(), header.getValue());
+ }
+ }
+
+ private static void assertHeaders(ActionRequest<?> request, Map<String, String> headers) {
+ if (headers.size() == 0) {
+ assertThat(request.getHeaders() == null || request.getHeaders().size() == 0, equalTo(true));
+ } else {
+ assertThat(request.getHeaders(), notNullValue());
+ assertThat(request.getHeaders().size(), equalTo(headers.size()));
+ for (String key : request.getHeaders()) {
+ assertThat(headers.get(key), equalTo(request.getHeader(key)));
+ }
+ }
+ }
+
+ private static void assertContext(ActionRequest<?> request, Map<String, String> context) {
+ if (context.size() == 0) {
+ assertThat(request.isContextEmpty(), is(true));
+ } else {
+ ImmutableOpenMap map = request.getContext();
+ assertThat(map, notNullValue());
+ assertThat(map.size(), equalTo(context.size()));
+ for (Object key : map.keys()) {
+ assertThat(context.get(key), equalTo(request.getFromContext(key)));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/NoOpClient.java b/core/src/test/java/org/elasticsearch/rest/NoOpClient.java
new file mode 100644
index 0000000000..245bdb96a3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/NoOpClient.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.Action;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.client.support.AbstractClient;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.TimeUnit;
+
+public class NoOpClient extends AbstractClient {
+
+ public NoOpClient(String testName) {
+ super(Settings.EMPTY, new ThreadPool(testName), Headers.EMPTY);
+ }
+
+ @Override
+ protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ listener.onResponse(null);
+ }
+
+ @Override
+ public void close() {
+ try {
+ ThreadPool.terminate(threadPool(), 10, TimeUnit.SECONDS);
+ } catch (Throwable t) {
+ throw new ElasticsearchException(t.getMessage(), t);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java b/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java
new file mode 100644
index 0000000000..087bc30632
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/RestFilterChainTests.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.FakeRestRequest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class RestFilterChainTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testRestFilters() throws InterruptedException {
+
+ RestController restController = new RestController(Settings.EMPTY);
+
+ int numFilters = randomInt(10);
+ Set<Integer> orders = new HashSet<>(numFilters);
+ while (orders.size() < numFilters) {
+ orders.add(randomInt(10));
+ }
+
+ List<RestFilter> filters = new ArrayList<>();
+ for (Integer order : orders) {
+ TestFilter testFilter = new TestFilter(order, randomFrom(Operation.values()));
+ filters.add(testFilter);
+ restController.registerFilter(testFilter);
+ }
+
+ ArrayList<RestFilter> restFiltersByOrder = Lists.newArrayList(filters);
+ Collections.sort(restFiltersByOrder, new Comparator<RestFilter>() {
+ @Override
+ public int compare(RestFilter o1, RestFilter o2) {
+ return Integer.compare(o1.order(), o2.order());
+ }
+ });
+
+ List<RestFilter> expectedRestFilters = Lists.newArrayList();
+ for (RestFilter filter : restFiltersByOrder) {
+ TestFilter testFilter = (TestFilter) filter;
+ expectedRestFilters.add(testFilter);
+ if (!(testFilter.callback == Operation.CONTINUE_PROCESSING) ) {
+ break;
+ }
+ }
+
+ restController.registerHandler(RestRequest.Method.GET, "/", new RestHandler() {
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel) throws Exception {
+ channel.sendResponse(new TestResponse());
+ }
+ });
+
+ FakeRestRequest fakeRestRequest = new FakeRestRequest();
+ FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, 1);
+ restController.dispatchRequest(fakeRestRequest, fakeRestChannel);
+ assertThat(fakeRestChannel.await(), equalTo(true));
+
+
+ List<TestFilter> testFiltersByLastExecution = Lists.newArrayList();
+ for (RestFilter restFilter : filters) {
+ testFiltersByLastExecution.add((TestFilter)restFilter);
+ }
+ Collections.sort(testFiltersByLastExecution, new Comparator<TestFilter>() {
+ @Override
+ public int compare(TestFilter o1, TestFilter o2) {
+ return Long.compare(o1.executionToken, o2.executionToken);
+ }
+ });
+
+ ArrayList<TestFilter> finalTestFilters = Lists.newArrayList();
+ for (RestFilter filter : testFiltersByLastExecution) {
+ TestFilter testFilter = (TestFilter) filter;
+ finalTestFilters.add(testFilter);
+ if (!(testFilter.callback == Operation.CONTINUE_PROCESSING) ) {
+ break;
+ }
+ }
+
+ assertThat(finalTestFilters.size(), equalTo(expectedRestFilters.size()));
+
+ for (int i = 0; i < finalTestFilters.size(); i++) {
+ TestFilter testFilter = finalTestFilters.get(i);
+ assertThat(testFilter, equalTo(expectedRestFilters.get(i)));
+ assertThat(testFilter.runs.get(), equalTo(1));
+ }
+ }
+
+ @Test
+ public void testTooManyContinueProcessing() throws InterruptedException {
+
+ final int additionalContinueCount = randomInt(10);
+
+ TestFilter testFilter = new TestFilter(randomInt(), new Callback() {
+ @Override
+ public void execute(final RestRequest request, final RestChannel channel, final RestFilterChain filterChain) throws Exception {
+ for (int i = 0; i <= additionalContinueCount; i++) {
+ filterChain.continueProcessing(request, channel);
+ }
+ }
+ });
+
+ RestController restController = new RestController(Settings.EMPTY);
+ restController.registerFilter(testFilter);
+
+ restController.registerHandler(RestRequest.Method.GET, "/", new RestHandler() {
+ @Override
+ public void handleRequest(RestRequest request, RestChannel channel) throws Exception {
+ channel.sendResponse(new TestResponse());
+ }
+ });
+
+ FakeRestRequest fakeRestRequest = new FakeRestRequest();
+ FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, additionalContinueCount + 1);
+ restController.dispatchRequest(fakeRestRequest, fakeRestChannel);
+ fakeRestChannel.await();
+
+ assertThat(testFilter.runs.get(), equalTo(1));
+
+ assertThat(fakeRestChannel.responses.get(), equalTo(1));
+ assertThat(fakeRestChannel.errors.get(), equalTo(additionalContinueCount));
+ }
+
+ private static class FakeRestChannel extends RestChannel {
+
+ private final CountDownLatch latch;
+ AtomicInteger responses = new AtomicInteger();
+ AtomicInteger errors = new AtomicInteger();
+
+ protected FakeRestChannel(RestRequest request, int responseCount) {
+ super(request, randomBoolean());
+ this.latch = new CountDownLatch(responseCount);
+ }
+
+ @Override
+ public XContentBuilder newBuilder() throws IOException {
+ return super.newBuilder();
+ }
+
+ @Override
+ public XContentBuilder newErrorBuilder() throws IOException {
+ return super.newErrorBuilder();
+ }
+
+ @Override
+ public XContentBuilder newBuilder(@Nullable BytesReference autoDetectSource, boolean useFiltering) throws IOException {
+ return super.newBuilder(autoDetectSource, useFiltering);
+ }
+
+ @Override
+ protected BytesStreamOutput newBytesOutput() {
+ return super.newBytesOutput();
+ }
+
+ @Override
+ public RestRequest request() {
+ return super.request();
+ }
+
+ @Override
+ public void sendResponse(RestResponse response) {
+ if (response.status() == RestStatus.OK) {
+ responses.incrementAndGet();
+ } else {
+ errors.incrementAndGet();
+ }
+ latch.countDown();
+ }
+
+ public boolean await() throws InterruptedException {
+ return latch.await(10, TimeUnit.SECONDS);
+ }
+ }
+
+ private static enum Operation implements Callback {
+ CONTINUE_PROCESSING {
+ @Override
+ public void execute(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception {
+ filterChain.continueProcessing(request, channel);
+ }
+ },
+ CHANNEL_RESPONSE {
+ @Override
+ public void execute(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception {
+ channel.sendResponse(new TestResponse());
+ }
+ }
+ }
+
+ private static interface Callback {
+ void execute(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception;
+ }
+
+ private final AtomicInteger counter = new AtomicInteger();
+
+ private class TestFilter extends RestFilter {
+ private final int order;
+ private final Callback callback;
+ AtomicInteger runs = new AtomicInteger();
+ volatile int executionToken = Integer.MAX_VALUE; //the filters that don't run will go last in the sorted list
+
+ TestFilter(int order, Callback callback) {
+ this.order = order;
+ this.callback = callback;
+ }
+
+ @Override
+ public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws Exception {
+ this.runs.incrementAndGet();
+ this.executionToken = counter.incrementAndGet();
+ this.callback.execute(request, channel, filterChain);
+ }
+
+ @Override
+ public int order() {
+ return order;
+ }
+
+ @Override
+ public String toString() {
+ return "[order:" + order + ", executionToken:" + executionToken + "]";
+ }
+ }
+
+ private static class TestResponse extends RestResponse {
+ @Override
+ public String contentType() {
+ return null;
+ }
+
+ @Override
+ public BytesReference content() {
+ return null;
+ }
+
+ @Override
+ public RestStatus status() {
+ return RestStatus.OK;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/core/src/test/java/org/elasticsearch/rest/RestRequestTests.java
new file mode 100644
index 0000000000..1565455b60
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/RestRequestTests.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class RestRequestTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testContext() throws Exception {
+ int count = randomInt(10);
+ Request request = new Request();
+ for (int i = 0; i < count; i++) {
+ request.putInContext("key" + i, "val" + i);
+ }
+ assertThat(request.isContextEmpty(), is(count == 0));
+ assertThat(request.contextSize(), is(count));
+ ImmutableOpenMap<Object, Object> ctx = request.getContext();
+ for (int i = 0; i < count; i++) {
+ assertThat(request.hasInContext("key" + i), is(true));
+ assertThat((String) request.getFromContext("key" + i), equalTo("val" + i));
+ assertThat((String) ctx.get("key" + i), equalTo("val" + i));
+ }
+ }
+
+ public static class Request extends RestRequest {
+ @Override
+ public Method method() {
+ return null;
+ }
+
+ @Override
+ public String uri() {
+ return null;
+ }
+
+ @Override
+ public String rawPath() {
+ return null;
+ }
+
+ @Override
+ public boolean hasContent() {
+ return false;
+ }
+
+ @Override
+ public BytesReference content() {
+ return null;
+ }
+
+ @Override
+ public String header(String name) {
+ return null;
+ }
+
+ @Override
+ public Iterable<Map.Entry<String, String>> headers() {
+ return null;
+ }
+
+ @Override
+ public boolean hasParam(String key) {
+ return false;
+ }
+
+ @Override
+ public String param(String key) {
+ return null;
+ }
+
+ @Override
+ public Map<String, String> params() {
+ return null;
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ return null;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java
new file mode 100644
index 0000000000..3fd6bf4c5f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeReallyOldIndexTest.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.upgrade;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.bwcompat.StaticIndexBackwardCompatibilityTest;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.indices.IndicesService;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+public class UpgradeReallyOldIndexTest extends StaticIndexBackwardCompatibilityTest {
+
+ public void testUpgrade_0_90_6() throws Exception {
+ String indexName = "index-0.90.6";
+
+ loadIndex(indexName);
+ assertMinVersion(indexName, org.apache.lucene.util.Version.parse("4.5.1"));
+ UpgradeTest.assertNotUpgraded(client(), indexName);
+ assertTrue(UpgradeTest.hasAncientSegments(client(), indexName));
+ assertNoFailures(client().admin().indices().prepareUpgrade(indexName).setUpgradeOnlyAncientSegments(true).get());
+
+ assertFalse(UpgradeTest.hasAncientSegments(client(), "index-0.90.6"));
+ // This index has only ancient segments, so it should now be fully upgraded:
+ UpgradeTest.assertUpgraded(client(), indexName);
+ assertEquals(Version.CURRENT.luceneVersion.toString(), client().admin().indices().prepareGetSettings(indexName).get().getSetting(indexName, IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE));
+ assertMinVersion(indexName, Version.CURRENT.luceneVersion);
+ }
+
+ private void assertMinVersion(String index, org.apache.lucene.util.Version version) {
+ for (IndicesService services : internalCluster().getInstances(IndicesService.class)) {
+ IndexService indexService = services.indexService(index);
+ if (indexService != null) {
+ assertEquals(version, indexService.shard(0).minimumCompatibleVersion());
+ }
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java
new file mode 100644
index 0000000000..3b9f8c19b5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/upgrade/UpgradeTest.java
@@ -0,0 +1,268 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.admin.indices.upgrade;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.segments.IndexSegments;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.upgrade.get.IndexUpgradeStatus;
+import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.BeforeClass;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) // test scope since we set cluster wide settings
+public class UpgradeTest extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @BeforeClass
+ public static void checkUpgradeVersion() {
+ final boolean luceneVersionMatches = (globalCompatibilityVersion().luceneVersion.major == Version.CURRENT.luceneVersion.major
+ && globalCompatibilityVersion().luceneVersion.minor == Version.CURRENT.luceneVersion.minor);
+ assumeFalse("lucene versions must be different to run upgrade test", luceneVersionMatches);
+ }
+
+ @Override
+ protected int minExternalNodes() {
+ return 2;
+ }
+
+ public void testUpgrade() throws Exception {
+ // allow the cluster to rebalance quickly - 2 concurrent rebalance are default we can do higher
+ Settings.Builder builder = Settings.builder();
+ builder.put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 100);
+ client().admin().cluster().prepareUpdateSettings().setPersistentSettings(builder).get();
+
+ int numIndexes = randomIntBetween(2, 4);
+ String[] indexNames = new String[numIndexes];
+ for (int i = 0; i < numIndexes; ++i) {
+ final String indexName = "test" + i;
+ indexNames[i] = indexName;
+
+ Settings settings = Settings.builder()
+ .put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern())
+ // don't allow any merges so that we can check segments are upgraded
+ // by the upgrader, and not just regular merging
+ .put("index.merge.policy.segments_per_tier", 1000000f)
+ .put(indexSettings())
+ .build();
+
+ assertAcked(prepareCreate(indexName).setSettings(settings));
+ ensureGreen(indexName);
+ assertAllShardsOnNodes(indexName, backwardsCluster().backwardsNodePattern());
+
+ int numDocs = scaledRandomIntBetween(100, 1000);
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+ for (int j = 0; j < numDocs; ++j) {
+ String id = Integer.toString(j);
+ docs.add(client().prepareIndex(indexName, "type1", id).setSource("text", "sometext"));
+ }
+ indexRandom(true, docs);
+ ensureGreen(indexName);
+ if (globalCompatibilityVersion().before(Version.V_1_4_0_Beta1)) {
+ // before 1.4 and the wait_if_ongoing flag, flushes could fail randomly, so we
+ // need to continue to try flushing until all shards succeed
+ assertTrue(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ return flush(indexName).getFailedShards() == 0;
+ }
+ }));
+ } else {
+ assertEquals(0, flush(indexName).getFailedShards());
+ }
+
+ // index more docs that won't be flushed
+ numDocs = scaledRandomIntBetween(100, 1000);
+ docs = new ArrayList<>();
+ for (int j = 0; j < numDocs; ++j) {
+ String id = Integer.toString(j);
+ docs.add(client().prepareIndex(indexName, "type2", id).setSource("text", "someothertext"));
+ }
+ indexRandom(true, docs);
+ ensureGreen(indexName);
+ }
+ logger.debug("--> Upgrading nodes");
+ backwardsCluster().allowOnAllNodes(indexNames);
+ ensureGreen();
+ // disable allocation entirely until all nodes are upgraded
+ builder = Settings.builder();
+ builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE);
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get();
+ backwardsCluster().upgradeAllNodes();
+ builder = Settings.builder();
+ // disable rebalanceing entirely for the time being otherwise we might get relocations / rebalance from nodes with old segments
+ builder.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE);
+ builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.ALL);
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get();
+ ensureGreen();
+ logger.info("--> Nodes upgrade complete");
+ logSegmentsState();
+
+ assertNotUpgraded(client());
+ final String indexToUpgrade = "test" + randomInt(numIndexes - 1);
+
+ // This test fires up another node running an older version of ES, but because wire protocol changes across major ES versions, it
+ // means we can never generate ancient segments in this test (unless Lucene major version bumps but ES major version does not):
+ assertFalse(hasAncientSegments(client(), indexToUpgrade));
+
+ logger.info("--> Running upgrade on index " + indexToUpgrade);
+ assertNoFailures(client().admin().indices().prepareUpgrade(indexToUpgrade).get());
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ try {
+ return isUpgraded(client(), indexToUpgrade);
+ } catch (Exception e) {
+ throw ExceptionsHelper.convertToRuntime(e);
+ }
+ }
+ });
+ logger.info("--> Single index upgrade complete");
+
+ logger.info("--> Running upgrade on the rest of the indexes");
+ assertNoFailures(client().admin().indices().prepareUpgrade().get());
+ logSegmentsState();
+ logger.info("--> Full upgrade complete");
+ assertUpgraded(client());
+ }
+
+ public static void assertNotUpgraded(Client client, String... index) throws Exception {
+ for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
+ assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
+ // TODO: it would be better for this to be strictly greater, but sometimes an extra flush
+ // mysteriously happens after the second round of docs are indexed
+ assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log",
+ status.getTotalBytes() >= status.getToUpgradeBytes());
+ assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0);
+ }
+ }
+
+ public static void assertNoAncientSegments(Client client, String... index) throws Exception {
+ for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
+ assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
+ // TODO: it would be better for this to be strictly greater, but sometimes an extra flush
+ // mysteriously happens after the second round of docs are indexed
+ assertTrue("index " + status.getIndex() + " should not have any ancient segments",
+ status.getToUpgradeBytesAncient() == 0);
+ assertTrue("index " + status.getIndex() + " should have recovered some segments from transaction log",
+ status.getTotalBytes() >= status.getToUpgradeBytes());
+ assertTrue("index " + status.getIndex() + " should need upgrading", status.getToUpgradeBytes() != 0);
+ }
+ }
+
+ /** Returns true if there are any ancient segments. */
+ public static boolean hasAncientSegments(Client client, String index) throws Exception {
+ for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
+ if (status.getToUpgradeBytesAncient() != 0) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /** Returns true if there are any old but not ancient segments. */
+ public static boolean hasOldButNotAncientSegments(Client client, String index) throws Exception {
+ for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
+ if (status.getToUpgradeBytes() > status.getToUpgradeBytesAncient()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public static void assertUpgraded(Client client, String... index) throws Exception {
+ for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
+ assertTrue("index " + status.getIndex() + " should not be zero sized", status.getTotalBytes() != 0);
+ assertEquals("index " + status.getIndex() + " should be upgraded",
+ 0, status.getToUpgradeBytes());
+ }
+
+ // double check using the segments api that all segments are actually upgraded
+ IndicesSegmentResponse segsRsp;
+ if (index == null) {
+ segsRsp = client().admin().indices().prepareSegments().execute().actionGet();
+ } else {
+ segsRsp = client().admin().indices().prepareSegments(index).execute().actionGet();
+ }
+ for (IndexSegments indexSegments : segsRsp.getIndices().values()) {
+ for (IndexShardSegments shard : indexSegments) {
+ for (ShardSegments segs : shard.getShards()) {
+ for (Segment seg : segs.getSegments()) {
+ assertEquals("Index " + indexSegments.getIndex() + " has unupgraded segment " + seg.toString(),
+ Version.CURRENT.luceneVersion.major, seg.version.major);
+ assertEquals("Index " + indexSegments.getIndex() + " has unupgraded segment " + seg.toString(),
+ Version.CURRENT.luceneVersion.minor, seg.version.minor);
+ }
+ }
+ }
+ }
+ }
+
+ static boolean isUpgraded(Client client, String index) throws Exception {
+ ESLogger logger = Loggers.getLogger(UpgradeTest.class);
+ int toUpgrade = 0;
+ for (IndexUpgradeStatus status : getUpgradeStatus(client, index)) {
+ logger.info("Index: " + status.getIndex() + ", total: " + status.getTotalBytes() + ", toUpgrade: " + status.getToUpgradeBytes());
+ toUpgrade += status.getToUpgradeBytes();
+ }
+ return toUpgrade == 0;
+ }
+
+ static class UpgradeStatus {
+ public final String indexName;
+ public final int totalBytes;
+ public final int toUpgradeBytes;
+ public final int toUpgradeBytesAncient;
+
+ public UpgradeStatus(String indexName, int totalBytes, int toUpgradeBytes, int toUpgradeBytesAncient) {
+ this.indexName = indexName;
+ this.totalBytes = totalBytes;
+ this.toUpgradeBytes = toUpgradeBytes;
+ this.toUpgradeBytesAncient = toUpgradeBytesAncient;
+ assert toUpgradeBytesAncient <= toUpgradeBytes;
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ static Collection<IndexUpgradeStatus> getUpgradeStatus(Client client, String... indices) throws Exception {
+ UpgradeStatusResponse upgradeStatusResponse = client.admin().indices().prepareUpgradeStatus(indices).get();
+ assertNoFailures(upgradeStatusResponse);
+ return upgradeStatusResponse.getIndices().values();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/action/support/RestTableTest.java b/core/src/test/java/org/elasticsearch/rest/action/support/RestTableTest.java
new file mode 100644
index 0000000000..3faa0ea367
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/action/support/RestTableTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.action.support;
+
+import org.elasticsearch.common.Table;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.FakeRestRequest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.rest.action.support.RestTable.buildDisplayHeaders;
+import static org.hamcrest.Matchers.*;
+
+public class RestTableTest extends ElasticsearchTestCase {
+
+ private Table table = new Table();
+ private FakeRestRequest restRequest = new FakeRestRequest();
+
+ @Before
+ public void setup() {
+ table.startHeaders();
+ table.addCell("bulk.foo", "alias:f;desc:foo");
+ table.addCell("bulk.bar", "alias:b;desc:bar");
+ // should be matched as well due to the aliases
+ table.addCell("aliasedBulk", "alias:bulkWhatever;desc:bar");
+ table.addCell("aliasedSecondBulk", "alias:foobar,bulkolicious,bulkotastic;desc:bar");
+ // no match
+ table.addCell("unmatched", "alias:un.matched;desc:bar");
+ // invalid alias
+ table.addCell("invalidAliasesBulk", "alias:,,,;desc:bar");
+ table.endHeaders();
+ }
+
+ @Test
+ public void testThatDisplayHeadersSupportWildcards() throws Exception {
+ restRequest.params().put("h", "bulk*");
+ List<RestTable.DisplayHeader> headers = buildDisplayHeaders(table, restRequest);
+
+ List<String> headerNames = getHeaderNames(headers);
+ assertThat(headerNames, contains("bulk.foo", "bulk.bar", "aliasedBulk", "aliasedSecondBulk"));
+ assertThat(headerNames, not(hasItem("unmatched")));
+ }
+
+ @Test
+ public void testThatDisplayHeadersAreNotAddedTwice() throws Exception {
+ restRequest.params().put("h", "nonexistent,bulk*,bul*");
+ List<RestTable.DisplayHeader> headers = buildDisplayHeaders(table, restRequest);
+
+ List<String> headerNames = getHeaderNames(headers);
+ assertThat(headerNames, contains("bulk.foo", "bulk.bar", "aliasedBulk", "aliasedSecondBulk"));
+ assertThat(headerNames, not(hasItem("unmatched")));
+ }
+
+ private List<String> getHeaderNames(List<RestTable.DisplayHeader> headers) {
+ List<String> headerNames = new ArrayList<>();
+ for (RestTable.DisplayHeader header : headers) {
+ headerNames.add(header.name);
+ }
+
+ return headerNames;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java b/core/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java
new file mode 100644
index 0000000000..b3bf486b1d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/rest/util/RestUtilsTests.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.rest.util;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.rest.support.RestUtils;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Locale;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class RestUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testDecodeQueryString() {
+ Map<String, String> params = newHashMap();
+
+ String uri = "something?test=value";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("test"), equalTo("value"));
+
+ params.clear();
+ uri = "something?test=value&test1=value1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("test"), equalTo("value"));
+ assertThat(params.get("test1"), equalTo("value1"));
+
+ params.clear();
+ uri = "something";
+ RestUtils.decodeQueryString(uri, uri.length(), params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something";
+ RestUtils.decodeQueryString(uri, -1, params);
+ assertThat(params.size(), equalTo(0));
+ }
+
+ @Test
+ public void testDecodeQueryStringEdgeCases() {
+ Map<String, String> params = newHashMap();
+
+ String uri = "something?";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?&";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?p=v&&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+
+ params.clear();
+ uri = "something?=";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?&=";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(0));
+
+ params.clear();
+ uri = "something?a";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(1));
+ assertThat(params.get("a"), equalTo(""));
+
+ params.clear();
+ uri = "something?p=v&a";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(2));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+
+ params.clear();
+ uri = "something?p=v&a&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(3));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+
+ params.clear();
+ uri = "something?p=v&a&b&p1=v1";
+ RestUtils.decodeQueryString(uri, uri.indexOf('?') + 1, params);
+ assertThat(params.size(), equalTo(4));
+ assertThat(params.get("a"), equalTo(""));
+ assertThat(params.get("b"), equalTo(""));
+ assertThat(params.get("p"), equalTo("v"));
+ assertThat(params.get("p1"), equalTo("v1"));
+ }
+
+ @Test
+ public void testCorsSettingIsARegex() {
+ assertCorsSettingRegex("/foo/", Pattern.compile("foo"));
+ assertCorsSettingRegex("/.*/", Pattern.compile(".*"));
+ assertCorsSettingRegex("/https?:\\/\\/localhost(:[0-9]+)?/", Pattern.compile("https?:\\/\\/localhost(:[0-9]+)?"));
+ assertCorsSettingRegexMatches("/https?:\\/\\/localhost(:[0-9]+)?/", true, "http://localhost:9200", "http://localhost:9215", "https://localhost:9200", "https://localhost");
+ assertCorsSettingRegexMatches("/https?:\\/\\/localhost(:[0-9]+)?/", false, "htt://localhost:9200", "http://localhost:9215/foo", "localhost:9215");
+ assertCorsSettingRegexIsNull("//");
+ assertCorsSettingRegexIsNull("/");
+ assertCorsSettingRegexIsNull("/foo");
+ assertCorsSettingRegexIsNull("foo");
+ assertCorsSettingRegexIsNull("");
+ assertThat(RestUtils.getCorsSettingRegex(Settings.EMPTY), is(nullValue()));
+ }
+
+ private void assertCorsSettingRegexIsNull(String settingsValue) {
+ assertThat(RestUtils.getCorsSettingRegex(settingsBuilder().put("http.cors.allow-origin", settingsValue).build()), is(nullValue()));
+ }
+
+ private void assertCorsSettingRegex(String settingsValue, Pattern pattern) {
+ assertThat(RestUtils.getCorsSettingRegex(settingsBuilder().put("http.cors.allow-origin", settingsValue).build()).toString(), is(pattern.toString()));
+ }
+
+ private void assertCorsSettingRegexMatches(String settingsValue, boolean expectMatch, String ... candidates) {
+ Pattern pattern = RestUtils.getCorsSettingRegex(settingsBuilder().put("http.cors.allow-origin", settingsValue).build());
+ for (String candidate : candidates) {
+ assertThat(String.format(Locale.ROOT, "Expected pattern %s to match against %s: %s", settingsValue, candidate, expectMatch),
+ pattern.matcher(candidate).matches(), is(expectMatch));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/river/RiverTests.java b/core/src/test/java/org/elasticsearch/river/RiverTests.java
new file mode 100644
index 0000000000..1a3e0e7016
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/river/RiverTests.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.river;
+
+import com.google.common.base.Predicate;
+
+import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetRequestBuilder;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.river.dummy.DummyRiverModule;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+@AwaitsFix(bugUrl="occasionally fails apparently due to synchronous mappings updates")
+public class RiverTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected void beforeIndexDeletion() {
+ }
+
+ @Test
+ public void testRiverStart() throws Exception {
+ startAndCheckRiverIsStarted("dummy-river-test");
+ }
+
+ @Test
+ public void testMultipleRiversStart() throws Exception {
+ int nbRivers = between(2,10);
+ logger.info("--> testing with {} rivers...", nbRivers);
+ Thread[] riverCreators = new Thread[nbRivers];
+ final CountDownLatch latch = new CountDownLatch(nbRivers);
+ final MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet();
+ for (int i = 0; i < nbRivers; i++) {
+ final String riverName = "dummy-river-test-" + i;
+ riverCreators[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ startRiver(riverName);
+ } catch (Throwable t) {
+ logger.warn("failed to register river {}", t, riverName);
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+ riverCreators[i].start();
+ multiGetRequestBuilder.add(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_status");
+ }
+
+ latch.await();
+
+ logger.info("--> checking that all rivers were created");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object obj) {
+ MultiGetResponse multiGetItemResponse = multiGetRequestBuilder.get();
+ for (MultiGetItemResponse getItemResponse : multiGetItemResponse) {
+ if (getItemResponse.isFailed() || !getItemResponse.getResponse().isExists()) {
+ return false;
+ }
+ }
+ return true;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ /**
+ * Test case for https://github.com/elasticsearch/elasticsearch/issues/4577
+ * River does not start when using config/templates files
+ */
+ @Test
+ public void startDummyRiverWithDefaultTemplate() throws Exception {
+ logger.info("--> create empty template");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject())
+ .get();
+
+ startAndCheckRiverIsStarted("dummy-river-default-template-test");
+ }
+
+ /**
+ * Test case for https://github.com/elasticsearch/elasticsearch/issues/4577
+ * River does not start when using config/templates files
+ */
+ @Test
+ public void startDummyRiverWithSomeTemplates() throws Exception {
+ logger.info("--> create some templates");
+ client().admin().indices().preparePutTemplate("template_1")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping(MapperService.DEFAULT_MAPPING,
+ JsonXContent.contentBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING)
+ .endObject().endObject())
+ .get();
+ client().admin().indices().preparePutTemplate("template_2")
+ .setTemplate("*")
+ .setOrder(0)
+ .addMapping("atype",
+ JsonXContent.contentBuilder().startObject().startObject("atype")
+ .endObject().endObject())
+ .get();
+
+ startAndCheckRiverIsStarted("dummy-river-template-test");
+ }
+
+ /**
+ * Create a Dummy river then check it has been started. We will fail after 5 seconds.
+ * @param riverName Dummy river needed to be started
+ */
+ private void startAndCheckRiverIsStarted(final String riverName) throws InterruptedException {
+ startRiver(riverName);
+ checkRiverIsStarted(riverName);
+ }
+
+ private void startRiver(final String riverName) {
+ logger.info("--> starting river [{}]", riverName);
+ IndexResponse indexResponse = client().prepareIndex(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_meta")
+ .setSource("type", DummyRiverModule.class.getCanonicalName()).get();
+ assertTrue(indexResponse.isCreated());
+ ensureGreen();
+ }
+
+ private void checkRiverIsStarted(final String riverName) throws InterruptedException {
+ logger.info("--> checking that river [{}] was created", riverName);
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object obj) {
+ GetResponse response = client().prepareGet(RiverIndexName.Conf.DEFAULT_INDEX_NAME, riverName, "_status").get();
+ return response.isExists();
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java
new file mode 100644
index 0000000000..9b2b11fdda
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.Set;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static com.google.common.collect.Sets.newHashSet;
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class AliasResolveRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testResolveIndexRouting() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias110").searchRouting("1,0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias12").routing("2")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue());
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias"), nullValue());
+
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "test1"), nullValue());
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias10"), equalTo("0"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias20"), equalTo("0"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting(null, "alias21"), equalTo("1"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting("3", "test1"), equalTo("3"));
+ assertThat(clusterService().state().metaData().resolveIndexRouting("0", "alias10"), equalTo("0"));
+ try {
+ clusterService().state().metaData().resolveIndexRouting("1", "alias10");
+ fail("should fail");
+ } catch (IllegalArgumentException e) {
+ // all is well, we can't have two mappings, one provided, and one in the alias
+ }
+
+ try {
+ clusterService().state().metaData().resolveIndexRouting(null, "alias0");
+ fail("should fail");
+ } catch (IllegalArgumentException ex) {
+ // Expected
+ }
+ }
+
+
+ @Test
+ public void testResolveSearchRouting() throws Exception {
+ createIndex("test1");
+ createIndex("test2");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias10").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias20").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias21").routing("1")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test1", "alias0").routing("0")).execute().actionGet();
+ client().admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test2", "alias0").routing("0")).execute().actionGet();
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias"), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", "alias"), equalTo(newMap("test1", newSet("0", "1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", "alias10"), equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", "alias10"), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias20", "alias21"}),
+ equalTo(newMap("test2", newSet("0", "1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"test1", "alias10"}), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting(null, new String[]{"alias10", "test1"}), nullValue());
+
+
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1", new String[]{"alias10", "alias20"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias20"}), nullValue());
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("1", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
+ assertThat(clusterService().state().metaData().resolveSearchRouting("0,1,2", new String[]{"test1", "alias10", "alias21"}),
+ equalTo(newMap("test1", newSet("0", "1", "2"), "test2", newSet("1"))));
+ }
+
+ private <T> Set<T> newSet(T... elements) {
+ return newHashSet(elements);
+ }
+
+
+ private <K, V> Map<K, V> newMap(K key, V value) {
+ Map<K, V> r = newHashMap();
+ r.put(key, value);
+ return r;
+ }
+
+ private <K, V> Map<K, V> newMap(K key1, V value1, K key2, V value2) {
+ Map<K, V> r = newHashMap();
+ r.put(key1, value1);
+ r.put(key2, value2);
+ return r;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java b/core/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java
new file mode 100644
index 0000000000..ff2a006a04
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java
@@ -0,0 +1,356 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class AliasRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int minimumNumberOfShards() {
+ return 2;
+ }
+
+ @Test
+ public void testAliasCrudRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ assertAcked(admin().indices().prepareAliases().addAliasAction(newAddAliasAction("test", "alias0").routing("0")));
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> verifying get with routing alias, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> updating with id [1] and routing through alias");
+ client().prepareUpdate("alias0", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript(new Script("ctx._source.field = 'value2'", ScriptService.ScriptType.INLINE, null, null))
+ .execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().getSourceAsMap().get("field").toString(), equalTo("value2"));
+ }
+
+
+ logger.info("--> deleting with no routing, should not delete anything");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with routing alias, should delete");
+ client().prepareDelete("alias0", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testAliasSearchRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias"))
+ .addAliasAction(newAddAliasAction("test", "alias0").routing("0"))
+ .addAliasAction(newAddAliasAction("test", "alias1").routing("1"))
+ .addAliasAction(newAddAliasAction("test", "alias01").searchRouting("0,1")));
+
+ logger.info("--> indexing with id [1], and routing [0] using alias");
+ client().prepareIndex("alias0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> search with no routing, should fine one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ }
+
+ logger.info("--> search with wrong routing, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with correct routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> indexing with id [2], and routing [1] using alias");
+ client().prepareIndex("alias1", "type1", "2").setSource("field", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> search with no routing, should fine two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 1 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ assertThat(client().prepareSearch("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 0,1 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ assertThat(client().prepareSearch("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with two routing aliases , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with alias0, alias1 and alias01, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias0", "alias1", "alias01").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with test, alias0 and alias1, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("test", "alias0", "alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ }
+
+ @Test
+ public void testAliasSearchRoutingWithTwoIndices() throws Exception {
+ createIndex("test-a");
+ createIndex("test-b");
+ ensureGreen();
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test-a", "alias-a0").routing("0"))
+ .addAliasAction(newAddAliasAction("test-a", "alias-a1").routing("1"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-b0").routing("0"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-b1").routing("1"))
+ .addAliasAction(newAddAliasAction("test-a", "alias-ab").searchRouting("0"))
+ .addAliasAction(newAddAliasAction("test-b", "alias-ab").searchRouting("1")));
+ ensureGreen(); // wait for events again to make sure we got the aliases on all nodes
+ logger.info("--> indexing with id [1], and routing [0] using alias to test-a");
+ client().prepareIndex("alias-a0", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias-a0", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> indexing with id [0], and routing [1] using alias to test-b");
+ client().prepareIndex("alias-b1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test-a", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("alias-b1", "type1", "1").execute().actionGet().isExists(), equalTo(true));
+ }
+
+
+ logger.info("--> search with alias-a1,alias-b0, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias-a1", "alias-b0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with alias-ab, should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias-ab").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with alias-a0,alias-b1 should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias-a0", "alias-b1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+ /*
+ See https://github.com/elasticsearch/elasticsearch/issues/2682
+ Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
+ to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShards.
+ That affected the number of shards that we executed the search on, thus some documents were missing in the search results.
+ */
+ @Test
+ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue2682() throws Exception {
+ createIndex("index", "index_2");
+ ensureGreen();
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("index", "index_1").routing("1")));
+
+ logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
+ client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> indexing on index_2 which is a concrete index");
+ client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+
+ logger.info("--> search all on index_* should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("index_*").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ }
+ }
+
+ /*
+ See https://github.com/elasticsearch/elasticsearch/pull/3268
+ Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
+ to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShardsCount.
+ That could cause returning 1, which led to forcing the QUERY_AND_FETCH mode.
+ As a result, (size * number of hit shards) results were returned and no reduce phase was taking place.
+ */
+ @Test
+ public void testAliasSearchRoutingWithConcreteAndAliasedIndices_issue3268() throws Exception {
+ createIndex("index", "index_2");
+ ensureGreen();
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("index", "index_1").routing("1")));
+
+ logger.info("--> indexing on index_1 which is an alias for index with routing [1]");
+ client().prepareIndex("index_1", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> indexing on index_2 which is a concrete index");
+ client().prepareIndex("index_2", "type2", "2").setSource("field", "value2").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("index_*").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(1).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+
+ logger.info("--> search all on index_* should find two");
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2L));
+ //Let's make sure that, even though 2 docs are available, only one is returned according to the size we set in the request
+ //Therefore the reduce phase has taken place, which proves that the QUERY_AND_FETCH search type wasn't erroneously forced.
+ assertThat(searchResponse.getHits().getHits().length, equalTo(1));
+ }
+
+ @Test
+ public void testIndexingAliasesOverTime() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ logger.info("--> creating alias with routing [3]");
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").routing("3")));
+
+ logger.info("--> indexing with id [0], and routing [3]");
+ client().prepareIndex("alias", "type1", "0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> verifying get and search with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> creating alias with routing [4]");
+ assertAcked(admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").routing("4")));
+
+ logger.info("--> verifying search with wrong routing should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> creating alias with search routing [3,4] and index routing 4");
+ assertAcked(client().admin().indices().prepareAliases()
+ .addAliasAction(newAddAliasAction("test", "alias").searchRouting("3,4").indexRouting("4")));
+
+ logger.info("--> indexing with id [1], and routing [4]");
+ client().prepareIndex("alias", "type1", "1").setSource("field", "value2").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> verifying get and search with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "0").setRouting("3").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("4").execute().actionGet().isExists(), equalTo(true));
+ assertThat(client().prepareSearch("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount("alias").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java
new file mode 100644
index 0000000000..cca1d2125a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java
@@ -0,0 +1,464 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.routing;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.RoutingMissingException;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.action.termvectors.MultiTermVectorsResponse;
+import org.elasticsearch.action.termvectors.TermVectorsRequest;
+import org.elasticsearch.action.termvectors.TermVectorsResponse;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.*;
+
+public class SimpleRoutingTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int minimumNumberOfShards() {
+ return 2;
+ }
+
+ public void testSimpleCrudRouting() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should not delete anything");
+ client().prepareDelete("test", "type1", "1").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with routing, should delete");
+ client().prepareDelete("test", "type1", "1").setRouting("0").setRefresh(true).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ public void testSimpleSearchRouting() {
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> search with no routing, should fine one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ }
+
+ logger.info("--> search with wrong routing, should not find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(0l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(0l));
+ }
+
+ logger.info("--> search with correct routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> indexing with id [2], and routing [1]");
+ client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+
+ logger.info("--> search with no routing, should fine two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 1 routing, should find one");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(1l));
+ assertThat(client().prepareCount().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(1l));
+ }
+
+ logger.info("--> search with 0,1 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+
+ logger.info("--> search with 0,1,0 routings , should find two");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareSearch().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(2l));
+ assertThat(client().prepareCount().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getCount(), equalTo(2l));
+ }
+ }
+
+ public void testRequiredRoutingMapping() throws Exception {
+ client().admin().indices().prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should fail");
+
+ logger.info("--> indexing with id [1], with no routing, should fail");
+ try {
+ client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ fail("index with missing routing when routing is required should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
+ }
+
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> deleting with no routing, should fail");
+ try {
+ client().prepareDelete(indexOrAlias(), "type1", "1").setRefresh(true).execute().actionGet();
+ fail("delete with missing routing when routing is required should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
+ }
+
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists();
+ fail("get with missing routing when routing is required should fail");
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").setRefresh(true).execute().actionGet();
+ logger.info("--> verifying get with no routing, should not find anything");
+
+ logger.info("--> bulk deleting with no routing, should broadcast the delete since _routing is required");
+ client().prepareBulk().add(Requests.deleteRequest(indexOrAlias()).type("type1").id("1")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
+ }
+ }
+
+ public void testRequiredRoutingWithPathMapping() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject().startObject("properties")
+ .startObject("routing_field").field("type", "string").field("index", randomBoolean() ? "no" : "not_analyzed").field("doc_values", randomBoolean() ? "yes" : "no").endObject().endObject()
+ .endObject().endObject())
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2_ID)
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1", "routing_field", "0").setRefresh(true).execute().actionGet();
+
+ logger.info("--> check failure with different routing");
+ try {
+ client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("1").setSource("field", "value1", "routing_field", "0").setRefresh(true).execute().actionGet();
+ fail();
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(MapperParsingException.class));
+ }
+
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ public void testRequiredRoutingWithPathMappingBulk() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2_ID)
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareBulk().add(
+ client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1", "routing_field", "0")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ public void testRequiredRoutingBulk() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).endObject()
+ .endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareBulk().add(
+ client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1")).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ public void testRequiredRoutingWithPathNumericType() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_routing").field("required", true).field("path", "routing_field").endObject()
+ .endObject().endObject())
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2_ID)
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex(indexOrAlias(), "type1", "1").setSource("field", "value1", "routing_field", 0).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("--> verifying get with no routing, should fail");
+ for (int i = 0; i < 5; i++) {
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").execute().actionGet().isExists();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+ }
+ logger.info("--> verifying get with routing, should find");
+ for (int i = 0; i < 5; i++) {
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+ }
+ }
+
+ public void testRequiredRoutingMapping_variousAPIs() throws Exception {
+ client().admin().indices().prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
+ .execute().actionGet();
+ ensureGreen();
+
+ logger.info("--> indexing with id [1], and routing [0]");
+ client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").get();
+ logger.info("--> indexing with id [2], and routing [0]");
+ client().prepareIndex(indexOrAlias(), "type1", "2").setRouting("0").setSource("field", "value2").setRefresh(true).get();
+
+ logger.info("--> verifying get with id [1] with routing [0], should succeed");
+ assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
+
+ logger.info("--> verifying get with id [1], with no routing, should fail");
+ try {
+ client().prepareGet(indexOrAlias(), "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ logger.info("--> verifying explain with id [2], with routing [0], should succeed");
+ ExplainResponse explainResponse = client().prepareExplain(indexOrAlias(), "type1", "2")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setRouting("0").get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.isMatch(), equalTo(true));
+
+ logger.info("--> verifying explain with id [2], with no routing, should fail");
+ try {
+ client().prepareExplain(indexOrAlias(), "type1", "2")
+ .setQuery(QueryBuilders.matchAllQuery()).get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[2]"));
+ }
+
+ logger.info("--> verifying term vector with id [1], with routing [0], should succeed");
+ TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "type1", "1").setRouting("0").get();
+ assertThat(termVectorsResponse.isExists(), equalTo(true));
+ assertThat(termVectorsResponse.getId(), equalTo("1"));
+
+ try {
+ client().prepareTermVectors(indexOrAlias(), "type1", "1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setRouting("0")
+ .setDoc("field1", "value1").get();
+ assertThat(updateResponse.getId(), equalTo("1"));
+ assertThat(updateResponse.getVersion(), equalTo(2l));
+
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc("field1", "value1").get();
+ fail();
+ } catch (RoutingMissingException e) {
+ assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ }
+
+ logger.info("--> verifying mget with ids [1,2], with routing [0], should succeed");
+ MultiGetResponse multiGetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1").routing("0"))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2").routing("0")).get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(2));
+ assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(false));
+ assertThat(multiGetResponse.getResponses()[0].getResponse().getId(), equalTo("1"));
+ assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(false));
+ assertThat(multiGetResponse.getResponses()[1].getResponse().getId(), equalTo("2"));
+
+ logger.info("--> verifying mget with ids [1,2], with no routing, should fail");
+ multiGetResponse = client().prepareMultiGet()
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "1"))
+ .add(new MultiGetRequest.Item(indexOrAlias(), "type1", "2")).get();
+ assertThat(multiGetResponse.getResponses().length, equalTo(2));
+ assertThat(multiGetResponse.getResponses()[0].isFailed(), equalTo(true));
+ assertThat(multiGetResponse.getResponses()[0].getFailure().getId(), equalTo("1"));
+ assertThat(multiGetResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ assertThat(multiGetResponse.getResponses()[1].isFailed(), equalTo(true));
+ assertThat(multiGetResponse.getResponses()[1].getFailure().getId(), equalTo("2"));
+ assertThat(multiGetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[2]"));
+
+ MultiTermVectorsResponse multiTermVectorsResponse = client().prepareMultiTermVectors()
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").routing("0"))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2").routing("0")).get();
+ assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(false));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse().getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse().isExists(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].isFailed(), equalTo(false));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse().isExists(), equalTo(true));
+
+ multiTermVectorsResponse = client().prepareMultiTermVectors()
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "1"))
+ .add(new TermVectorsRequest(indexOrAlias(), "type1", "2")).get();
+ assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
+ assertThat(multiTermVectorsResponse.getResponses()[0].getResponse(), nullValue());
+ assertThat(multiTermVectorsResponse.getResponses()[1].getId(), equalTo("2"));
+ assertThat(multiTermVectorsResponse.getResponses()[1].isFailed(), equalTo(true));
+ assertThat(multiTermVectorsResponse.getResponses()[1].getResponse(),nullValue());
+ assertThat(multiTermVectorsResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[2]"));
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java b/core/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java
new file mode 100644
index 0000000000..c38aec157f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.collect.ImmutableSet;
+
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.expression.ExpressionScriptEngineService;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.script.mustache.MustacheScriptEngineService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+public class CustomScriptContextTests extends ElasticsearchIntegrationTest {
+
+ private static final ImmutableSet<String> LANG_SET = ImmutableSet.of(GroovyScriptEngineService.NAME, MustacheScriptEngineService.NAME, ExpressionScriptEngineService.NAME);
+
+ private static final String PLUGIN_NAME = "testplugin";
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder().put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", CustomScriptContextPlugin.class.getName())
+ .put("script." + PLUGIN_NAME + "_custom_globally_disabled_op", "off")
+ .put("script.engine.expression.inline." + PLUGIN_NAME + "_custom_exp_disabled_op", "off")
+ .build();
+ }
+
+ @Test
+ public void testCustomScriptContextsSettings() {
+ ScriptService scriptService = internalCluster().getInstance(ScriptService.class);
+ for (String lang : LANG_SET) {
+ for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) {
+ try {
+ scriptService.compile(new Script("test", scriptType, lang, null), new ScriptContext.Plugin(PLUGIN_NAME,
+ "custom_globally_disabled_op"));
+ fail("script compilation should have been rejected");
+ } catch(ScriptException e) {
+ assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + lang + "] are disabled"));
+ }
+ }
+ }
+
+ try {
+ scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, "expression", null), new ScriptContext.Plugin(
+ PLUGIN_NAME, "custom_exp_disabled_op"));
+ fail("script compilation should have been rejected");
+ } catch(ScriptException e) {
+ assertThat(e.getMessage(), containsString("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [expression] are disabled"));
+ }
+
+ CompiledScript compiledScript = scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, "expression", null),
+ randomFrom(ScriptContext.Standard.values()));
+ assertThat(compiledScript, notNullValue());
+
+ compiledScript = scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, "mustache", null),
+ new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"));
+ assertThat(compiledScript, notNullValue());
+
+ for (String lang : LANG_SET) {
+ compiledScript = scriptService.compile(new Script("1", ScriptService.ScriptType.INLINE, lang, null), new ScriptContext.Plugin(
+ PLUGIN_NAME, "custom_op"));
+ assertThat(compiledScript, notNullValue());
+ }
+ }
+
+ @Test
+ public void testCompileNonRegisteredPluginContext() {
+ ScriptService scriptService = internalCluster().getInstance(ScriptService.class);
+ try {
+ scriptService.compile(
+ new Script("test", randomFrom(ScriptService.ScriptType.values()), randomFrom(LANG_SET.toArray(new String[LANG_SET
+ .size()])), null), new ScriptContext.Plugin("test", "unknown"));
+ fail("script compilation should have been rejected");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("script context [test_unknown] not supported"));
+ }
+ }
+
+ @Test
+ public void testCompileNonRegisteredScriptContext() {
+ ScriptService scriptService = internalCluster().getInstance(ScriptService.class);
+ try {
+ scriptService.compile(
+ new Script("test", randomFrom(ScriptService.ScriptType.values()), randomFrom(LANG_SET.toArray(new String[LANG_SET
+ .size()])), null), new ScriptContext() {
+ @Override
+ public String getKey() {
+ return "test";
+ }
+ });
+ fail("script compilation should have been rejected");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("script context [test] not supported"));
+ }
+ }
+
+ public static class CustomScriptContextPlugin extends AbstractPlugin {
+ @Override
+ public String name() {
+ return "custom_script_context_plugin";
+ }
+
+ @Override
+ public String description() {
+ return "Custom script context plugin";
+ }
+
+ @Override
+ public void processModule(Module module) {
+ if (module instanceof ScriptModule) {
+ ScriptModule scriptModule = (ScriptModule) module;
+ scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"));
+ scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"));
+ scriptModule.registerScriptContext(new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/GroovyScriptTests.java b/core/src/test/java/org/elasticsearch/script/GroovyScriptTests.java
new file mode 100644
index 0000000000..9ceda548b9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/GroovyScriptTests.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Various tests for Groovy scripting
+ */
+public class GroovyScriptTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testGroovyBigDecimalTransformation() {
+ client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get();
+
+ // Test that something that would usually be a BigDecimal is transformed into a Double
+ assertScript("def n = 1.23; assert n instanceof Double;");
+ assertScript("def n = 1.23G; assert n instanceof Double;");
+ assertScript("def n = BigDecimal.ONE; assert n instanceof BigDecimal;");
+ }
+
+ public void assertScript(String script) {
+ SearchResponse resp = client().prepareSearch("test")
+ .setSource("{\"query\": {\"match_all\": {}}," +
+ "\"sort\":{\"_script\": {\"script\": \""+ script +
+ "; 1\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get();
+ assertNoFailures(resp);
+ }
+
+ @Test
+ public void testGroovyExceptionSerialization() throws Exception {
+ List<IndexRequestBuilder> reqs = newArrayList();
+ for (int i = 0; i < randomIntBetween(50, 500); i++) {
+ reqs.add(client().prepareIndex("test", "doc", "" + i).setSource("foo", "bar"));
+ }
+ indexRandom(true, false, reqs);
+ try {
+ client().prepareSearch("test")
+ .setQuery(
+ constantScoreQuery(scriptQuery(new Script("1 == not_found", ScriptType.INLINE, GroovyScriptEngineService.NAME,
+ null)))).get();
+ fail("should have thrown an exception");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString()+ "should not contained NotSerializableTransportException",
+ e.toString().contains("NotSerializableTransportException"), equalTo(false));
+ assertThat(e.toString()+ "should have contained GroovyScriptExecutionException",
+ e.toString().contains("GroovyScriptExecutionException"), equalTo(true));
+ assertThat(e.toString()+ "should have contained not_found",
+ e.toString().contains("No such property: not_found"), equalTo(true));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(constantScoreQuery(scriptQuery(new Script("assert false", ScriptType.INLINE, "groovy", null)))).get();
+ fail("should have thrown an exception");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should not contained NotSerializableTransportException",
+ e.toString().contains("NotSerializableTransportException"), equalTo(false));
+ assertThat(e.toString() + "should have contained GroovyScriptExecutionException",
+ e.toString().contains("GroovyScriptExecutionException"), equalTo(true));
+ assertThat(e.toString()+ "should have contained an assert error",
+ e.toString().contains("PowerAssertionError[assert false"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testGroovyScriptAccess() {
+ client().prepareIndex("test", "doc", "1").setSource("foo", "quick brow fox jumped over the lazy dog", "bar", 1).get();
+ client().prepareIndex("test", "doc", "2").setSource("foo", "fast jumping spiders", "bar", 2).get();
+ client().prepareIndex("test", "doc", "3").setSource("foo", "dog spiders that can eat a dog", "bar", 3).get();
+ refresh();
+
+ // doc[] access
+ SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(matchAllQuery())
+.add(
+ scriptFunction(new Script("doc['bar'].value", ScriptType.INLINE, "groovy", null)))
+ .boostMode(CombineFunction.REPLACE)).get();
+
+ assertNoFailures(resp);
+ assertOrderedSearchHits(resp, "3", "2", "1");
+ }
+
+ public void testScoreAccess() {
+ client().prepareIndex("test", "doc", "1").setSource("foo", "quick brow fox jumped over the lazy dog", "bar", 1).get();
+ client().prepareIndex("test", "doc", "2").setSource("foo", "fast jumping spiders", "bar", 2).get();
+ client().prepareIndex("test", "doc", "3").setSource("foo", "dog spiders that can eat a dog", "bar", 3).get();
+ refresh();
+
+ // _score can be accessed
+ SearchResponse resp = client().prepareSearch("test").setQuery(functionScoreQuery(matchQuery("foo", "dog"))
+ .add(scriptFunction(new Script("_score", ScriptType.INLINE, "groovy", null)))
+ .boostMode(CombineFunction.REPLACE)).get();
+ assertNoFailures(resp);
+ assertSearchHits(resp, "3", "1");
+
+ // _score is comparable
+ // NOTE: it is important to use 0.0 instead of 0 instead Groovy will do an integer comparison
+ // and if the score if between 0 and 1 it will be considered equal to 0 due to the cast
+ resp = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchQuery("foo", "dog")).add(
+ scriptFunction(new Script("_score > 0.0 ? _score : 0", ScriptType.INLINE, "groovy", null))).boostMode(
+ CombineFunction.REPLACE)).get();
+ assertNoFailures(resp);
+ assertSearchHits(resp, "3", "1");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/GroovySecurityTests.java b/core/src/test/java/org/elasticsearch/script/GroovySecurityTests.java
new file mode 100644
index 0000000000..bee96f43b6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/GroovySecurityTests.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.nio.file.Path;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ * Tests for the Groovy security permissions
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0)
+public class GroovySecurityTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ assumeTrue("test requires security manager to be enabled", System.getSecurityManager() != null);
+ }
+
+ @Test
+ public void testEvilGroovyScripts() throws Exception {
+ int nodes = randomIntBetween(1, 3);
+ Settings nodeSettings = Settings.builder()
+ .put("script.inline", true)
+ .put("script.indexed", true)
+ .build();
+ internalCluster().startNodesAsync(nodes, nodeSettings).get();
+ client().admin().cluster().prepareHealth().setWaitForNodes(nodes + "").get();
+
+ client().prepareIndex("test", "doc", "1").setSource("foo", 5, "bar", "baz").setRefresh(true).get();
+
+ // Plain test
+ assertSuccess("");
+ // numeric field access
+ assertSuccess("def foo = doc['foo'].value; if (foo == null) { return 5; }");
+ // string field access
+ assertSuccess("def bar = doc['bar'].value; if (bar == null) { return 5; }");
+ // List
+ assertSuccess("def list = [doc['foo'].value, 3, 4]; def v = list.get(1); list.add(10)");
+ // Ranges
+ assertSuccess("def range = 1..doc['foo'].value; def v = range.get(0)");
+ // Maps
+ assertSuccess("def v = doc['foo'].value; def m = [:]; m.put(\\\"value\\\", v)");
+ // Times
+ assertSuccess("def t = Instant.now().getMillis()");
+ // GroovyCollections
+ assertSuccess("def n = [1,2,3]; GroovyCollections.max(n)");
+
+ // Fail cases:
+ // AccessControlException[access denied ("java.io.FilePermission" "<<ALL FILES>>" "execute")]
+ assertFailure("pr = Runtime.getRuntime().exec(\\\"touch /tmp/gotcha\\\"); pr.waitFor()");
+
+ // AccessControlException[access denied ("java.lang.RuntimePermission" "accessClassInPackage.sun.reflect")]
+ assertFailure("d = new DateTime(); d.getClass().getDeclaredMethod(\\\"year\\\").setAccessible(true)");
+ assertFailure("d = new DateTime(); d.\\\"${'get' + 'Class'}\\\"()." +
+ "\\\"${'getDeclared' + 'Method'}\\\"(\\\"year\\\").\\\"${'set' + 'Accessible'}\\\"(false)");
+ assertFailure("Class.forName(\\\"org.joda.time.DateTime\\\").getDeclaredMethod(\\\"year\\\").setAccessible(true)");
+
+ // AccessControlException[access denied ("groovy.security.GroovyCodeSourcePermission" "/groovy/shell")]
+ assertFailure("Eval.me('2 + 2')");
+ assertFailure("Eval.x(5, 'x + 2')");
+
+ // AccessControlException[access denied ("java.lang.RuntimePermission" "accessDeclaredMembers")]
+ assertFailure("d = new Date(); java.lang.reflect.Field f = Date.class.getDeclaredField(\\\"fastTime\\\");" +
+ " f.setAccessible(true); f.get(\\\"fastTime\\\")");
+
+ // AccessControlException[access denied ("java.io.FilePermission" "<<ALL FILES>>" "execute")]
+ assertFailure("def methodName = 'ex'; Runtime.\\\"${'get' + 'Runtime'}\\\"().\\\"${methodName}ec\\\"(\\\"touch /tmp/gotcha2\\\")");
+
+ // test a directory we normally have access to, but the groovy script does not.
+ Path dir = createTempDir();
+ // TODO: figure out the necessary escaping for windows paths here :)
+ if (!Constants.WINDOWS) {
+ // access denied ("java.io.FilePermission" ".../tempDir-00N" "read")
+ assertFailure("new File(\\\"" + dir + "\\\").exists()");
+ }
+ }
+
+ private void assertSuccess(String script) {
+ logger.info("--> script: " + script);
+ SearchResponse resp = client().prepareSearch("test")
+ .setSource("{\"query\": {\"match_all\": {}}," +
+ "\"sort\":{\"_script\": {\"script\": \""+ script +
+ "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get();
+ assertNoFailures(resp);
+ assertEquals(1, resp.getHits().getTotalHits());
+ assertThat(resp.getHits().getAt(0).getSortValues(), equalTo(new Object[]{7.0}));
+ }
+
+ private void assertFailure(String script) {
+ logger.info("--> script: " + script);
+ SearchResponse resp = client().prepareSearch("test")
+ .setSource("{\"query\": {\"match_all\": {}}," +
+ "\"sort\":{\"_script\": {\"script\": \""+ script +
+ "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get();
+ assertEquals(0, resp.getHits().getTotalHits());
+ ShardSearchFailure fails[] = resp.getShardFailures();
+ // TODO: GroovyScriptExecutionException needs work
+ for (ShardSearchFailure fail : fails) {
+ assertTrue(fail.getCause().toString().contains("AccessControlException[access denied"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/IndexLookupTests.java b/core/src/test/java/org/elasticsearch/script/IndexLookupTests.java
new file mode 100644
index 0000000000..fda14ca3dd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/IndexLookupTests.java
@@ -0,0 +1,1134 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+public class IndexLookupTests extends ElasticsearchIntegrationTest {
+
+ String includeAllFlag = "_FREQUENCIES | _OFFSETS | _PAYLOADS | _POSITIONS | _CACHE";
+ String includeAllWithoutRecordFlag = "_FREQUENCIES | _OFFSETS | _PAYLOADS | _POSITIONS ";
+ private HashMap<String, List<Object>> expectedEndOffsetsArray;
+ private HashMap<String, List<Object>> expectedPayloadsArray;
+ private HashMap<String, List<Object>> expectedPositionsArray;
+ private HashMap<String, List<Object>> emptyArray;
+ private HashMap<String, List<Object>> expectedStartOffsetsArray;
+
+ void initTestData() throws InterruptedException, ExecutionException, IOException {
+ emptyArray = new HashMap<>();
+ List<Object> empty1 = new ArrayList<>();
+ empty1.add(-1);
+ empty1.add(-1);
+ emptyArray.put("1", empty1);
+ List<Object> empty2 = new ArrayList<>();
+ empty2.add(-1);
+ empty2.add(-1);
+ emptyArray.put("2", empty2);
+ List<Object> empty3 = new ArrayList<>();
+ empty3.add(-1);
+ empty3.add(-1);
+ emptyArray.put("3", empty3);
+
+ expectedPositionsArray = new HashMap<>();
+
+ List<Object> pos1 = new ArrayList<>();
+ pos1.add(1);
+ pos1.add(2);
+ expectedPositionsArray.put("1", pos1);
+ List<Object> pos2 = new ArrayList<>();
+ pos2.add(0);
+ pos2.add(1);
+ expectedPositionsArray.put("2", pos2);
+ List<Object> pos3 = new ArrayList<>();
+ pos3.add(0);
+ pos3.add(4);
+ expectedPositionsArray.put("3", pos3);
+
+ expectedPayloadsArray = new HashMap<>();
+ List<Object> pay1 = new ArrayList<>();
+ pay1.add(2);
+ pay1.add(3);
+ expectedPayloadsArray.put("1", pay1);
+ List<Object> pay2 = new ArrayList<>();
+ pay2.add(1);
+ pay2.add(2);
+ expectedPayloadsArray.put("2", pay2);
+ List<Object> pay3 = new ArrayList<>();
+ pay3.add(1);
+ pay3.add(-1);
+ expectedPayloadsArray.put("3", pay3);
+ /*
+ * "a|1 b|2 b|3 c|4 d " "b|1 b|2 c|3 d|4 a " "b|1 c|2 d|3 a|4 b "
+ */
+ expectedStartOffsetsArray = new HashMap<>();
+ List<Object> starts1 = new ArrayList<>();
+ starts1.add(4);
+ starts1.add(8);
+ expectedStartOffsetsArray.put("1", starts1);
+ List<Object> starts2 = new ArrayList<>();
+ starts2.add(0);
+ starts2.add(4);
+ expectedStartOffsetsArray.put("2", starts2);
+ List<Object> starts3 = new ArrayList<>();
+ starts3.add(0);
+ starts3.add(16);
+ expectedStartOffsetsArray.put("3", starts3);
+
+ expectedEndOffsetsArray = new HashMap<>();
+ List<Object> ends1 = new ArrayList<>();
+ ends1.add(7);
+ ends1.add(11);
+ expectedEndOffsetsArray.put("1", ends1);
+ List<Object> ends2 = new ArrayList<>();
+ ends2.add(3);
+ ends2.add(7);
+ expectedEndOffsetsArray.put("2", ends2);
+ List<Object> ends3 = new ArrayList<>();
+ ends3.add(3);
+ ends3.add(17);
+ expectedEndOffsetsArray.put("3", ends3);
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ Settings.settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter")));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("int_payload_field", "a|1 b|2 b|3 c|4 d "), client()
+ .prepareIndex("test", "type1", "2").setSource("int_payload_field", "b|1 b|2 c|3 d|4 a "),
+ client().prepareIndex("test", "type1", "3").setSource("int_payload_field", "b|1 c|2 d|3 a|4 b "));
+ ensureGreen();
+ }
+
+ @Test
+ public void testTwoScripts() throws Exception {
+
+ initTestData();
+
+ // check term frequencies for 'a'
+ Script scriptFieldScript = new Script("term = _index['int_payload_field']['c']; term.tf()");
+ scriptFieldScript = new Script("1");
+ Script scoreScript = new Script("term = _index['int_payload_field']['b']; term.tf()");
+ Map<String, Object> expectedResultsField = new HashMap<>();
+ expectedResultsField.put("1", 1);
+ expectedResultsField.put("2", 1);
+ expectedResultsField.put("3", 1);
+ Map<String, Object> expectedResultsScore = new HashMap<>();
+ expectedResultsScore.put("1", 2f);
+ expectedResultsScore.put("2", 2f);
+ expectedResultsScore.put("3", 2f);
+ checkOnlyFunctionScore(scoreScript, expectedResultsScore, 3);
+ checkValueInEachDocWithFunctionScore(scriptFieldScript, expectedResultsField, scoreScript, expectedResultsScore, 3);
+
+ }
+
+ @Test
+ public void testCallWithDifferentFlagsFails() throws Exception {
+
+ initTestData();
+
+ // should throw an exception, we cannot call with different flags twice
+ // if the flags of the second call were not included in the first call.
+ Script script = new Script("term = _index['int_payload_field']['b']; return _index['int_payload_field'].get('b', _POSITIONS).tf();");
+ try {
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(
+ "got: " + e.toString(),
+ e.toString()
+ .indexOf(
+ "You must call get with all required flags! Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS) call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]"),
+ Matchers.greaterThan(-1));
+ }
+
+ // Should not throw an exception this way round
+ script = new Script(
+ "term = _index['int_payload_field'].get('b', _POSITIONS | _FREQUENCIES);return _index['int_payload_field']['b'].tf();");
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ }
+
+ private void checkOnlyFunctionScore(Script scoreScript, Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript))).execute()
+ .actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ @Test
+ public void testDocumentationExample() throws Exception {
+
+ initTestData();
+
+ Script script = new Script("term = _index['float_payload_field'].get('b'," + includeAllFlag
+ + "); payloadSum=0; for (pos in term) {payloadSum = pos.payloadAsInt(0)}; payloadSum");
+
+ // non existing field: sum should be 0
+ HashMap<String, Object> zeroArray = new HashMap<>();
+ zeroArray.put("1", 0);
+ zeroArray.put("2", 0);
+ zeroArray.put("3", 0);
+ checkValueInEachDoc(script, zeroArray, 3);
+
+ script = new Script("term = _index['int_payload_field'].get('b'," + includeAllFlag
+ + "); payloadSum=0; for (pos in term) {payloadSum = payloadSum + pos.payloadAsInt(0)}; payloadSum");
+
+ // existing field: sums should be as here:
+ zeroArray.put("1", 5);
+ zeroArray.put("2", 3);
+ zeroArray.put("3", 1);
+ checkValueInEachDoc(script, zeroArray, 3);
+ }
+
+ @Test
+ public void testIteratorAndRecording() throws Exception {
+
+ initTestData();
+
+ // call twice with record: should work as expected
+ Script script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // no record and get iterator twice: should fail
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "position");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptIterateTwice("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptions(script);
+
+ // no record and get termObject twice and iterate: should fail
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "position");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptions(script);
+ script = createPositionsArrayScriptGetInfoObjectTwice("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptions(script);
+
+ }
+
+ private Script createPositionsArrayScriptGetInfoObjectTwice(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags
+ + "); array=[]; for (pos in term) {array.add(pos." + what + ")}; _index['int_payload_field'].get('" + term + "',"
+ + flags + "); array=[]; for (pos in term) {array.add(pos." + what + ")}";
+ return new Script(script);
+ }
+
+ private Script createPositionsArrayScriptIterateTwice(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags
+ + "); array=[]; for (pos in term) {array.add(pos." + what + ")}; array=[]; for (pos in term) {array.add(pos." + what
+ + ")}; array";
+ return new Script(script);
+ }
+
+ private Script createPositionsArrayScript(String field, String term, String flags, String what) {
+ String script = "term = _index['" + field + "'].get('" + term + "'," + flags
+ + "); array=[]; for (pos in term) {array.add(pos." + what + ")}; array";
+ return new Script(script);
+ }
+
+ private Script createPositionsArrayScriptDefaultGet(String field, String term, String what) {
+ String script = "term = _index['" + field + "']['" + term + "']; array=[]; for (pos in term) {array.add(pos." + what
+ + ")}; array";
+ return new Script(script);
+ }
+
+ @Test
+ public void testFlags() throws Exception {
+
+ initTestData();
+
+ // check default flag
+ Script script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "position");
+ // there should be no positions
+ /* TODO: the following tests fail with the new postings enum apis because of a bogus assert in BlockDocsEnum
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScriptDefaultGet("int_payload_field", "b", "payloadAsInt(-1)");
+ // there should be no payload
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+
+ // check FLAG_FREQUENCIES flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "position");
+ // there should be no positions
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES", "payloadAsInt(-1)");
+ // there should be no payloads
+ checkArrayValsInEachDoc(script, emptyArray, 3);*/
+
+ // check FLAG_POSITIONS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "position");
+ // there should be positions
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ /* TODO: these tests make a bogus assumption that asking for positions will return only positions
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "startOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "endOffset");
+ // there should be no offsets
+ checkArrayValsInEachDoc(script, emptyArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_POSITIONS", "payloadAsInt(-1)");
+ // there should be no payloads
+ checkArrayValsInEachDoc(script, emptyArray, 3);*/
+
+ // check FLAG_OFFSETS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "position");
+ // there should be positions and s forth ...
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_OFFSETS", "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check FLAG_PAYLOADS flag
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", "_PAYLOADS", "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check all flags
+ String allFlags = "_POSITIONS | _OFFSETS | _PAYLOADS";
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", allFlags, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ // check all flags without record
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "position");
+ checkArrayValsInEachDoc(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "startOffset");
+ checkArrayValsInEachDoc(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "endOffset");
+ checkArrayValsInEachDoc(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScript("int_payload_field", "b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 3);
+
+ }
+
+ private void checkArrayValsInEachDoc(Script script, HashMap<String, List<Object>> expectedArray, int expectedHitSize) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, expectedHitSize);
+ int nullCounter = 0;
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues();
+ Object expectedResult = expectedArray.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ if (expectedResult != null) {
+ nullCounter++;
+ }
+ }
+ assertThat(nullCounter, equalTo(expectedArray.size()));
+ }
+
+ @Test
+ public void testAllExceptPosAndOffset() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("float_payload_field").field("type", "string").field("index_options", "offsets").field("term_vector", "no")
+ .field("analyzer", "payload_float").endObject().startObject("string_payload_field").field("type", "string")
+ .field("index_options", "offsets").field("term_vector", "no").field("analyzer", "payload_string").endObject()
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ Settings.settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_string.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_string.filter", "delimited_string")
+ .put("index.analysis.filter.delimited_string.delimiter", "|")
+ .put("index.analysis.filter.delimited_string.encoding", "identity")
+ .put("index.analysis.filter.delimited_string.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter")
+ .put("index.number_of_shards", 1)));
+ ensureYellow();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("float_payload_field", "a|1 b|2 a|3 b "), client()
+ .prepareIndex("test", "type1", "2").setSource("string_payload_field", "a|a b|b a|a b "),
+ client().prepareIndex("test", "type1", "3").setSource("float_payload_field", "a|4 b|5 a|6 b "),
+ client().prepareIndex("test", "type1", "4").setSource("string_payload_field", "a|b b|a a|b b "),
+ client().prepareIndex("test", "type1", "5").setSource("float_payload_field", "c "),
+ client().prepareIndex("test", "type1", "6").setSource("int_payload_field", "c|1"));
+
+ // get the number of all docs
+ Script script = new Script("_index.numDocs()");
+ checkValueInEachDoc(6, script, 6);
+
+ // get the number of docs with field float_payload_field
+ script = new Script("_index['float_payload_field'].docCount()");
+ checkValueInEachDoc(3, script, 6);
+
+ // corner case: what if the field does not exist?
+ script = new Script("_index['non_existent_field'].docCount()");
+ checkValueInEachDoc(0, script, 6);
+
+ // get the number of all tokens in all docs
+ script = new Script("_index['float_payload_field'].sumttf()");
+ checkValueInEachDoc(9, script, 6);
+
+ // corner case get the number of all tokens in all docs for non existent
+ // field
+ script = new Script("_index['non_existent_field'].sumttf()");
+ checkValueInEachDoc(0, script, 6);
+
+ // get the sum of doc freqs in all docs
+ script = new Script("_index['float_payload_field'].sumdf()");
+ checkValueInEachDoc(5, script, 6);
+
+ // get the sum of doc freqs in all docs for non existent field
+ script = new Script("_index['non_existent_field'].sumdf()");
+ checkValueInEachDoc(0, script, 6);
+
+ // check term frequencies for 'a'
+ script = new Script("term = _index['float_payload_field']['a']; if (term != null) {term.tf()}");
+ Map<String, Object> expectedResults = new HashMap<>();
+ expectedResults.put("1", 2);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 2);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for 'c'
+ script = new Script("term = _index['float_payload_field']['c']; if (term != null) {term.df()}");
+ expectedResults.put("1", 1l);
+ expectedResults.put("2", 1l);
+ expectedResults.put("3", 1l);
+ expectedResults.put("4", 1l);
+ expectedResults.put("5", 1l);
+ expectedResults.put("6", 1l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = new Script("term = _index['float_payload_field']['non_existent_term']; if (term != null) {term.df()}");
+ expectedResults.put("1", 0l);
+ expectedResults.put("2", 0l);
+ expectedResults.put("3", 0l);
+ expectedResults.put("4", 0l);
+ expectedResults.put("5", 0l);
+ expectedResults.put("6", 0l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = new Script("term = _index['non_existent_field']['non_existent_term']; if (term != null) {term.tf()}");
+ expectedResults.put("1", 0);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 0);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check total term frequencies for 'a'
+ script = new Script("term = _index['float_payload_field']['a']; if (term != null) {term.ttf()}");
+ expectedResults.put("1", 4l);
+ expectedResults.put("2", 4l);
+ expectedResults.put("3", 4l);
+ expectedResults.put("4", 4l);
+ expectedResults.put("5", 4l);
+ expectedResults.put("6", 4l);
+ checkValueInEachDoc(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check float payload for 'b'
+ HashMap<String, List<Object>> expectedPayloadsArray = new HashMap<>();
+ script = createPositionsArrayScript("float_payload_field", "b", includeAllFlag, "payloadAsFloat(-1)");
+ float missingValue = -1;
+ List<Object> payloadsFor1 = new ArrayList<>();
+ payloadsFor1.add(2f);
+ payloadsFor1.add(missingValue);
+ expectedPayloadsArray.put("1", payloadsFor1);
+ List<Object> payloadsFor2 = new ArrayList<>();
+ payloadsFor2.add(5f);
+ payloadsFor2.add(missingValue);
+ expectedPayloadsArray.put("3", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<>());
+ expectedPayloadsArray.put("5", new ArrayList<>());
+ expectedPayloadsArray.put("4", new ArrayList<>());
+ expectedPayloadsArray.put("2", new ArrayList<>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ // check string payload for 'b'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScript("string_payload_field", "b", includeAllFlag, "payloadAsString()");
+ payloadsFor1.add("b");
+ payloadsFor1.add(null);
+ expectedPayloadsArray.put("2", payloadsFor1);
+ payloadsFor2.add("a");
+ payloadsFor2.add(null);
+ expectedPayloadsArray.put("4", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<>());
+ expectedPayloadsArray.put("5", new ArrayList<>());
+ expectedPayloadsArray.put("3", new ArrayList<>());
+ expectedPayloadsArray.put("1", new ArrayList<>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ // check int payload for 'c'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScript("int_payload_field", "c", includeAllFlag, "payloadAsInt(-1)");
+ payloadsFor1 = new ArrayList<>();
+ payloadsFor1.add(1);
+ expectedPayloadsArray.put("6", payloadsFor1);
+ expectedPayloadsArray.put("5", new ArrayList<>());
+ expectedPayloadsArray.put("4", new ArrayList<>());
+ expectedPayloadsArray.put("3", new ArrayList<>());
+ expectedPayloadsArray.put("2", new ArrayList<>());
+ expectedPayloadsArray.put("1", new ArrayList<>());
+ checkArrayValsInEachDoc(script, expectedPayloadsArray, 6);
+
+ }
+
+ private void checkExceptions(Script script) {
+ try {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(0));
+ ShardSearchFailure[] shardFails = sr.getShardFailures();
+ for (ShardSearchFailure fail : shardFails) {
+ assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
+ Matchers.greaterThan(-1));
+ }
+ } catch (SearchPhaseExecutionException ex) {
+ assertThat(
+ "got " + ex.toString(),
+ ex.toString().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
+ Matchers.greaterThan(-1));
+ }
+ }
+
+ private void checkValueInEachDocWithFunctionScore(Script fieldScript, Map<String, Object> expectedFieldVals, Script scoreScript,
+ Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(scoreScript)))
+ .addScriptField("tvtest", fieldScript).execute().actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedFieldVals.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ private void checkValueInEachDoc(Script script, Map<String, Object> expectedResults, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedResults.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ }
+ }
+
+ private void checkValueInEachDoc(int value, Script script, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ if (result instanceof Integer) {
+ assertThat((Integer)result, equalTo(value));
+ } else if (result instanceof Long) {
+ assertThat(((Long) result).intValue(), equalTo(value));
+ } else {
+ fail();
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testCallWithDifferentFlagsFailsOldScriptAPI() throws Exception {
+
+ initTestData();
+
+ // should throw an exception, we cannot call with different flags twice
+ // if the flags of the second call were not included in the first call.
+ String script = "term = _index['int_payload_field']['b']; return _index['int_payload_field'].get('b', _POSITIONS).tf();";
+ try {
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(
+ "got: " + e.toString(),
+ e.toString()
+ .indexOf(
+ "You must call get with all required flags! Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS) call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]"),
+ Matchers.greaterThan(-1));
+ }
+
+ // Should not throw an exception this way round
+ script = "term = _index['int_payload_field'].get('b', _POSITIONS | _FREQUENCIES);return _index['int_payload_field']['b'].tf();";
+ client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testIteratorAndRecordingOldScriptAPI() throws Exception {
+
+ initTestData();
+
+ // call twice with record: should work as expected
+ String script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllFlag, "position");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllFlag, "startOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllFlag, "endOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 3);
+
+ // no record and get iterator twice: should fail
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "position");
+ checkExceptionsOldScriptAPI(script);
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptionsOldScriptAPI(script);
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptionsOldScriptAPI(script);
+ script = createPositionsArrayScriptIterateTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptionsOldScriptAPI(script);
+
+ // no record and get termObject twice and iterate: should fail
+ script = createPositionsArrayScriptGetInfoObjectTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "position");
+ checkExceptionsOldScriptAPI(script);
+ script = createPositionsArrayScriptGetInfoObjectTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "startOffset");
+ checkExceptionsOldScriptAPI(script);
+ script = createPositionsArrayScriptGetInfoObjectTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "endOffset");
+ checkExceptionsOldScriptAPI(script);
+ script = createPositionsArrayScriptGetInfoObjectTwiceOldScriptAPI("b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkExceptionsOldScriptAPI(script);
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private String createPositionsArrayScriptGetInfoObjectTwiceOldScriptAPI(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags + "); array=[]; for (pos in term) {array.add(pos."
+ + what + ")}; _index['int_payload_field'].get('" + term + "'," + flags + "); array=[]; for (pos in term) {array.add(pos."
+ + what + ")}";
+ return script;
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private String createPositionsArrayScriptIterateTwiceOldScriptAPI(String term, String flags, String what) {
+ String script = "term = _index['int_payload_field'].get('" + term + "'," + flags + "); array=[]; for (pos in term) {array.add(pos."
+ + what + ")}; array=[]; for (pos in term) {array.add(pos." + what + ")}; array";
+ return script;
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private String createPositionsArrayScriptOldScriptAPI(String field, String term, String flags, String what) {
+ String script = "term = _index['" + field + "'].get('" + term + "'," + flags + "); array=[]; for (pos in term) {array.add(pos."
+ + what + ")}; array";
+ return script;
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private String createPositionsArrayScriptDefaultGetOldScriptAPI(String field, String term, String what) {
+ String script = "term = _index['" + field + "']['" + term + "']; array=[]; for (pos in term) {array.add(pos." + what + ")}; array";
+ return script;
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testFlagsOldScriptAPI() throws Exception {
+
+ initTestData();
+
+ // check default flag
+ String script = createPositionsArrayScriptDefaultGetOldScriptAPI("int_payload_field", "b", "position");
+ // there should be no positions
+ /*
+ * TODO: the following tests fail with the new postings enum apis
+ * because of a bogus assert in BlockDocsEnum
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScriptDefaultGet("int_payload_field", "b",
+ * "startOffset"); // there should be no offsets
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScriptDefaultGet("int_payload_field", "b",
+ * "endOffset"); // there should be no offsets
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScriptDefaultGet("int_payload_field", "b",
+ * "payloadAsInt(-1)"); // there should be no payload
+ * checkArrayValsInEachDoc(script, emptyArray, 3);
+ *
+ * // check FLAG_FREQUENCIES flag script =
+ * createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES",
+ * "position"); // there should be no positions
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES",
+ * "startOffset"); // there should be no offsets
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES",
+ * "endOffset"); // there should be no offsets
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScript("int_payload_field", "b", "_FREQUENCIES",
+ * "payloadAsInt(-1)"); // there should be no payloads
+ * checkArrayValsInEachDoc(script, emptyArray, 3);
+ */
+
+ // check FLAG_POSITIONS flag
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_POSITIONS", "position");
+ // there should be positions
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPositionsArray, 3);
+ /*
+ * TODO: these tests make a bogus assumption that asking for positions
+ * will return only positions script =
+ * createPositionsArrayScript("int_payload_field", "b", "_POSITIONS",
+ * "startOffset"); // there should be no offsets
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScript("int_payload_field", "b", "_POSITIONS",
+ * "endOffset"); // there should be no offsets
+ * checkArrayValsInEachDoc(script, emptyArray, 3); script =
+ * createPositionsArrayScript("int_payload_field", "b", "_POSITIONS",
+ * "payloadAsInt(-1)"); // there should be no payloads
+ * checkArrayValsInEachDoc(script, emptyArray, 3);
+ */
+
+ // check FLAG_OFFSETS flag
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_OFFSETS", "position");
+ // there should be positions and s forth ...
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_OFFSETS", "startOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_OFFSETS", "endOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_OFFSETS", "payloadAsInt(-1)");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 3);
+
+ // check FLAG_PAYLOADS flag
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_PAYLOADS", "position");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_PAYLOADS", "startOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_PAYLOADS", "endOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", "_PAYLOADS", "payloadAsInt(-1)");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 3);
+
+ // check all flags
+ String allFlags = "_POSITIONS | _OFFSETS | _PAYLOADS";
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", allFlags, "position");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", allFlags, "startOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", allFlags, "endOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", allFlags, "payloadAsInt(-1)");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 3);
+
+ // check all flags without record
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", includeAllWithoutRecordFlag, "position");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPositionsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", includeAllWithoutRecordFlag, "startOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedStartOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", includeAllWithoutRecordFlag, "endOffset");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedEndOffsetsArray, 3);
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "b", includeAllWithoutRecordFlag, "payloadAsInt(-1)");
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 3);
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testAllExceptPosAndOffsetOldSciptAPI() throws Exception {
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("float_payload_field").field("type", "string").field("index_options", "offsets").field("term_vector", "no")
+ .field("analyzer", "payload_float").endObject().startObject("string_payload_field").field("type", "string")
+ .field("index_options", "offsets").field("term_vector", "no").field("analyzer", "payload_string").endObject()
+ .startObject("int_payload_field").field("type", "string").field("index_options", "offsets")
+ .field("analyzer", "payload_int").endObject().endObject().endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping).setSettings(
+ Settings.settingsBuilder().put(indexSettings())
+ .put("index.analysis.analyzer.payload_float.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_float.filter", "delimited_float")
+ .put("index.analysis.filter.delimited_float.delimiter", "|")
+ .put("index.analysis.filter.delimited_float.encoding", "float")
+ .put("index.analysis.filter.delimited_float.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_string.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_string.filter", "delimited_string")
+ .put("index.analysis.filter.delimited_string.delimiter", "|")
+ .put("index.analysis.filter.delimited_string.encoding", "identity")
+ .put("index.analysis.filter.delimited_string.type", "delimited_payload_filter")
+ .put("index.analysis.analyzer.payload_int.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.payload_int.filter", "delimited_int")
+ .put("index.analysis.filter.delimited_int.delimiter", "|")
+ .put("index.analysis.filter.delimited_int.encoding", "int")
+ .put("index.analysis.filter.delimited_int.type", "delimited_payload_filter").put("index.number_of_shards", 1)));
+ ensureYellow();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("float_payload_field", "a|1 b|2 a|3 b "), client()
+ .prepareIndex("test", "type1", "2").setSource("string_payload_field", "a|a b|b a|a b "),
+ client().prepareIndex("test", "type1", "3").setSource("float_payload_field", "a|4 b|5 a|6 b "),
+ client().prepareIndex("test", "type1", "4").setSource("string_payload_field", "a|b b|a a|b b "),
+ client().prepareIndex("test", "type1", "5").setSource("float_payload_field", "c "),
+ client().prepareIndex("test", "type1", "6").setSource("int_payload_field", "c|1"));
+
+ // get the number of all docs
+ String script = "_index.numDocs()";
+ checkValueInEachDocOldScriptAPI(6, script, 6);
+
+ // get the number of docs with field float_payload_field
+ script = "_index['float_payload_field'].docCount()";
+ checkValueInEachDocOldScriptAPI(3, script, 6);
+
+ // corner case: what if the field does not exist?
+ script = "_index['non_existent_field'].docCount()";
+ checkValueInEachDocOldScriptAPI(0, script, 6);
+
+ // get the number of all tokens in all docs
+ script = "_index['float_payload_field'].sumttf()";
+ checkValueInEachDocOldScriptAPI(9, script, 6);
+
+ // corner case get the number of all tokens in all docs for non existent
+ // field
+ script = "_index['non_existent_field'].sumttf()";
+ checkValueInEachDocOldScriptAPI(0, script, 6);
+
+ // get the sum of doc freqs in all docs
+ script = "_index['float_payload_field'].sumdf()";
+ checkValueInEachDocOldScriptAPI(5, script, 6);
+
+ // get the sum of doc freqs in all docs for non existent field
+ script = "_index['non_existent_field'].sumdf()";
+ checkValueInEachDocOldScriptAPI(0, script, 6);
+
+ // check term frequencies for 'a'
+ script = "term = _index['float_payload_field']['a']; if (term != null) {term.tf()}";
+ Map<String, Object> expectedResults = new HashMap<>();
+ expectedResults.put("1", 2);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 2);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDocOldScriptAPI(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for 'c'
+ script = "term = _index['float_payload_field']['c']; if (term != null) {term.df()}";
+ expectedResults.put("1", 1l);
+ expectedResults.put("2", 1l);
+ expectedResults.put("3", 1l);
+ expectedResults.put("4", 1l);
+ expectedResults.put("5", 1l);
+ expectedResults.put("6", 1l);
+ checkValueInEachDocOldScriptAPI(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = "term = _index['float_payload_field']['non_existent_term']; if (term != null) {term.df()}";
+ expectedResults.put("1", 0l);
+ expectedResults.put("2", 0l);
+ expectedResults.put("3", 0l);
+ expectedResults.put("4", 0l);
+ expectedResults.put("5", 0l);
+ expectedResults.put("6", 0l);
+ checkValueInEachDocOldScriptAPI(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check doc frequencies for term that does not exist
+ script = "term = _index['non_existent_field']['non_existent_term']; if (term != null) {term.tf()}";
+ expectedResults.put("1", 0);
+ expectedResults.put("2", 0);
+ expectedResults.put("3", 0);
+ expectedResults.put("4", 0);
+ expectedResults.put("5", 0);
+ expectedResults.put("6", 0);
+ checkValueInEachDocOldScriptAPI(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check total term frequencies for 'a'
+ script = "term = _index['float_payload_field']['a']; if (term != null) {term.ttf()}";
+ expectedResults.put("1", 4l);
+ expectedResults.put("2", 4l);
+ expectedResults.put("3", 4l);
+ expectedResults.put("4", 4l);
+ expectedResults.put("5", 4l);
+ expectedResults.put("6", 4l);
+ checkValueInEachDocOldScriptAPI(script, expectedResults, 6);
+ expectedResults.clear();
+
+ // check float payload for 'b'
+ HashMap<String, List<Object>> expectedPayloadsArray = new HashMap<>();
+ script = createPositionsArrayScriptOldScriptAPI("float_payload_field", "b", includeAllFlag, "payloadAsFloat(-1)");
+ float missingValue = -1;
+ List<Object> payloadsFor1 = new ArrayList<>();
+ payloadsFor1.add(2f);
+ payloadsFor1.add(missingValue);
+ expectedPayloadsArray.put("1", payloadsFor1);
+ List<Object> payloadsFor2 = new ArrayList<>();
+ payloadsFor2.add(5f);
+ payloadsFor2.add(missingValue);
+ expectedPayloadsArray.put("3", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<>());
+ expectedPayloadsArray.put("5", new ArrayList<>());
+ expectedPayloadsArray.put("4", new ArrayList<>());
+ expectedPayloadsArray.put("2", new ArrayList<>());
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 6);
+
+ // check string payload for 'b'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScriptOldScriptAPI("string_payload_field", "b", includeAllFlag, "payloadAsString()");
+ payloadsFor1.add("b");
+ payloadsFor1.add(null);
+ expectedPayloadsArray.put("2", payloadsFor1);
+ payloadsFor2.add("a");
+ payloadsFor2.add(null);
+ expectedPayloadsArray.put("4", payloadsFor2);
+ expectedPayloadsArray.put("6", new ArrayList<>());
+ expectedPayloadsArray.put("5", new ArrayList<>());
+ expectedPayloadsArray.put("3", new ArrayList<>());
+ expectedPayloadsArray.put("1", new ArrayList<>());
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 6);
+
+ // check int payload for 'c'
+ expectedPayloadsArray.clear();
+ payloadsFor1.clear();
+ payloadsFor2.clear();
+ script = createPositionsArrayScriptOldScriptAPI("int_payload_field", "c", includeAllFlag, "payloadAsInt(-1)");
+ payloadsFor1 = new ArrayList<>();
+ payloadsFor1.add(1);
+ expectedPayloadsArray.put("6", payloadsFor1);
+ expectedPayloadsArray.put("5", new ArrayList<>());
+ expectedPayloadsArray.put("4", new ArrayList<>());
+ expectedPayloadsArray.put("3", new ArrayList<>());
+ expectedPayloadsArray.put("2", new ArrayList<>());
+ expectedPayloadsArray.put("1", new ArrayList<>());
+ checkArrayValsInEachDocOldScriptAPI(script, expectedPayloadsArray, 6);
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkArrayValsInEachDocOldScriptAPI(Script script, HashMap<String, List<Object>> expectedArray, int expectedHitSize) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, expectedHitSize);
+ int nullCounter = 0;
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues();
+ Object expectedResult = expectedArray.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ if (expectedResult != null) {
+ nullCounter++;
+ }
+ }
+ assertThat(nullCounter, equalTo(expectedArray.size()));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkExceptionsOldScriptAPI(String script) {
+ try {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(0));
+ ShardSearchFailure[] shardFails = sr.getShardFailures();
+ for (ShardSearchFailure fail : shardFails) {
+ assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
+ Matchers.greaterThan(-1));
+ }
+ } catch (SearchPhaseExecutionException ex) {
+ assertThat("got " + ex.toString(),
+ ex.toString().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."),
+ Matchers.greaterThan(-1));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkValueInEachDocWithFunctionScoreOldScriptAPI(String fieldScript, Map<String, Object> expectedFieldVals,
+ String scoreScript, Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(new Script(scoreScript))))
+ .addScriptField("tvtest", fieldScript).execute().actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedFieldVals.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkOnlyFunctionScoreOldScriptAPI(String scoreScript, Map<String, Object> expectedScore, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test")
+ .setQuery(QueryBuilders.functionScoreQuery(ScoreFunctionBuilders.scriptFunction(new Script(scoreScript)))).execute()
+ .actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ assertThat("for doc " + hit.getId(), ((Float) expectedScore.get(hit.getId())).doubleValue(),
+ Matchers.closeTo(hit.score(), 1.e-4));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkValueInEachDocOldScriptAPI(String script, Map<String, Object> expectedResults, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ Object expectedResult = expectedResults.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkValueInEachDocOldScriptAPI(int value, String script, int numExpectedDocs) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, numExpectedDocs);
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues().get(0);
+ if (result instanceof Integer) {
+ assertThat((Integer) result, equalTo(value));
+ } else if (result instanceof Long) {
+ assertThat(((Long) result).intValue(), equalTo(value));
+ } else {
+ fail();
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private void checkArrayValsInEachDocOldScriptAPI(String script, HashMap<String, List<Object>> expectedArray, int expectedHitSize) {
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script)
+ .execute().actionGet();
+ assertHitCount(sr, expectedHitSize);
+ int nullCounter = 0;
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("tvtest").getValues();
+ Object expectedResult = expectedArray.get(hit.getId());
+ assertThat("for doc " + hit.getId(), result, equalTo(expectedResult));
+ if (expectedResult != null) {
+ nullCounter++;
+ }
+ }
+ assertThat(nullCounter, equalTo(expectedArray.size()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/IndexedScriptTests.java b/core/src/test/java/org/elasticsearch/script/IndexedScriptTests.java
new file mode 100644
index 0000000000..d8d493acb3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/IndexedScriptTests.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.script.expression.ExpressionScriptEngineService;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class IndexedScriptTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal));
+ builder.put("script.engine.groovy.indexed.update", "off");
+ builder.put("script.engine.groovy.indexed.search", "on");
+ builder.put("script.engine.groovy.indexed.aggs", "on");
+ builder.put("script.engine.groovy.inline.aggs", "off");
+ builder.put("script.engine.expression.indexed.update", "off");
+ builder.put("script.engine.expression.indexed.search", "off");
+ builder.put("script.engine.expression.indexed.aggs", "off");
+ builder.put("script.engine.expression.indexed.mapping", "off");
+ return builder.build();
+ }
+
+ @Test
+ public void testFieldIndexedScript() throws ExecutionException, InterruptedException {
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, "groovy", "script1").setSource("{" +
+ "\"script\":\"2\""+
+ "}").setTimeout(TimeValue.timeValueSeconds(randomIntBetween(2,10))));
+
+ builders.add(client().prepareIndex(ScriptService.SCRIPT_INDEX, "groovy", "script2").setSource("{" +
+ "\"script\":\"factor*2\""+
+ "}"));
+
+ indexRandom(true, builders);
+
+ builders.clear();
+
+ builders.add(client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+ String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_id\" : \"script1\", \"lang\":\"groovy\" }, \"test2\" : { \"script_id\" : \"script2\", \"lang\":\"groovy\", \"params\":{\"factor\":3} }}, size:1}";
+ SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ assertHitCount(searchResponse, 5);
+ assertTrue(searchResponse.getHits().hits().length == 1);
+ SearchHit sh = searchResponse.getHits().getAt(0);
+ assertThat((Integer)sh.field("test1").getValue(), equalTo(2));
+ assertThat((Integer)sh.field("test2").getValue(), equalTo(6));
+ }
+
+ // Relates to #10397
+ @Test
+ public void testUpdateScripts() {
+ createIndex("test_index");
+ ensureGreen("test_index");
+ client().prepareIndex("test_index", "test_type", "1").setSource("{\"foo\":\"bar\"}").get();
+ flush("test_index");
+
+ int iterations = randomIntBetween(2, 11);
+ for (int i = 1; i < iterations; i++) {
+ PutIndexedScriptResponse response =
+ client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "script1", "{\"script\":\"" + i + "\"}").get();
+ assertEquals(i, response.getVersion());
+
+ String query = "{"
+ + " \"query\" : { \"match_all\": {}}, "
+ + " \"script_fields\" : { \"test_field\" : { \"script_id\" : \"script1\", \"lang\":\"groovy\" } } }";
+ SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test_index").setTypes("test_type").get();
+ assertHitCount(searchResponse, 1);
+ SearchHit sh = searchResponse.getHits().getAt(0);
+ assertThat((Integer)sh.field("test_field").getValue(), equalTo(i));
+ }
+ }
+
+ @Test
+ public void testDisabledUpdateIndexedScriptsOnly() {
+ if (randomBoolean()) {
+ client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "script1", "{\"script\":\"2\"}").get();
+ } else {
+ client().prepareIndex(ScriptService.SCRIPT_INDEX, GroovyScriptEngineService.NAME, "script1").setSource("{\"script\":\"2\"}").get();
+ }
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ try {
+ client().prepareUpdate("test", "scriptTest", "1")
+ .setScript(new Script("script1", ScriptService.ScriptType.INDEXED, GroovyScriptEngineService.NAME, null)).get();
+ fail("update script should have been rejected");
+ } catch (Exception e) {
+ assertThat(e.getMessage(), containsString("failed to execute script"));
+ assertThat(ExceptionsHelper.detailedMessage(e),
+ containsString("scripts of type [indexed], operation [update] and lang [groovy] are disabled"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testDisabledUpdateIndexedScriptsOnlyOldScriptAPI() {
+ if (randomBoolean()) {
+ client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "script1", "{\"script\":\"2\"}").get();
+ } else {
+ client().prepareIndex(ScriptService.SCRIPT_INDEX, GroovyScriptEngineService.NAME, "script1").setSource("{\"script\":\"2\"}")
+ .get();
+ }
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ try {
+ client().prepareUpdate("test", "scriptTest", "1").setScript("script1", ScriptService.ScriptType.INDEXED).setScriptLang(GroovyScriptEngineService.NAME).get();
+ fail("update script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.getMessage(), containsString("failed to execute script"));
+ assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [indexed], operation [update] and lang [groovy] are disabled"));
+ }
+ }
+
+ @Test
+ public void testDisabledAggsDynamicScripts() {
+ //dynamic scripts don't need to be enabled for an indexed script to be indexed and later on executed
+ if (randomBoolean()) {
+ client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "script1", "{\"script\":\"2\"}").get();
+ } else {
+ client().prepareIndex(ScriptService.SCRIPT_INDEX, GroovyScriptEngineService.NAME, "script1").setSource("{\"script\":\"2\"}").get();
+ }
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ refresh();
+ String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_id\":\"script1\" } } } }";
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get();
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getAggregations().get("test"), notNullValue());
+ }
+
+ @Test
+ public void testAllOpsDisabledIndexedScripts() throws IOException {
+ if (randomBoolean()) {
+ client().preparePutIndexedScript(ExpressionScriptEngineService.NAME, "script1", "{\"script\":\"2\"}").get();
+ } else {
+ client().prepareIndex(ScriptService.SCRIPT_INDEX, ExpressionScriptEngineService.NAME, "script1").setSource("{\"script\":\"2\"}").get();
+ }
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ try {
+ client().prepareUpdate("test", "scriptTest", "1")
+ .setScript(new Script("script1", ScriptService.ScriptType.INDEXED, ExpressionScriptEngineService.NAME, null)).get();
+ fail("update script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.getMessage(), containsString("failed to execute script"));
+ assertThat(e.getCause().toString(), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled"));
+ }
+ try {
+ String query = "{ \"script_fields\" : { \"test1\" : { \"script_id\" : \"script1\", \"lang\":\"expression\" }}}";
+ client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ fail("search script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [indexed], operation [search] and lang [expression] are disabled"));
+ }
+ try {
+ String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_id\":\"script1\", \"script_lang\":\"expression\" } } } }";
+ client().prepareSearch("test").setSource(source).get();
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [indexed], operation [aggs] and lang [expression] are disabled"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testAllOpsDisabledIndexedScriptsOldScriptAPI() throws IOException {
+ if (randomBoolean()) {
+ client().preparePutIndexedScript(ExpressionScriptEngineService.NAME, "script1", "{\"script\":\"2\"}").get();
+ } else {
+ client().prepareIndex(ScriptService.SCRIPT_INDEX, ExpressionScriptEngineService.NAME, "script1")
+ .setSource("{\"script\":\"2\"}").get();
+ }
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ try {
+ client().prepareUpdate("test", "scriptTest", "1").setScript("script1", ScriptService.ScriptType.INDEXED).setScriptLang(ExpressionScriptEngineService.NAME).get();
+ fail("update script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.getMessage(), containsString("failed to execute script"));
+ assertThat(e.getCause().toString(), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled"));
+ }
+ try {
+ String query = "{ \"script_fields\" : { \"test1\" : { \"script_id\" : \"script1\", \"lang\":\"expression\" }}}";
+ client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ fail("search script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [indexed], operation [search] and lang [expression] are disabled"));
+ }
+ try {
+ String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_id\":\"script1\", \"script_lang\":\"expression\" } } } }";
+ client().prepareSearch("test").setSource(source).get();
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [indexed], operation [aggs] and lang [expression] are disabled"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java
new file mode 100644
index 0000000000..9d07b17a91
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Injector;
+import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.EnvironmentModule;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.threadpool.ThreadPoolModule;
+import org.elasticsearch.watcher.ResourceWatcherService;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class NativeScriptTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testNativeScript() throws InterruptedException {
+ Settings settings = Settings.settingsBuilder()
+ .put("script.native.my.type", MyNativeScriptFactory.class.getName())
+ .put("name", "testNativeScript")
+ .put("path.home", createTempDir())
+ .build();
+ Injector injector = new ModulesBuilder().add(
+ new EnvironmentModule(new Environment(settings)),
+ new ThreadPoolModule(new ThreadPool(settings)),
+ new SettingsModule(settings),
+ new ScriptModule(settings)).createInjector();
+
+ ScriptService scriptService = injector.getInstance(ScriptService.class);
+
+ ExecutableScript executable = scriptService.executable(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null),
+ ScriptContext.Standard.SEARCH);
+ assertThat(executable.run().toString(), equalTo("test"));
+ terminate(injector.getInstance(ThreadPool.class));
+ }
+
+ @Test
+ public void testFineGrainedSettingsDontAffectNativeScripts() throws IOException {
+ Settings.Builder builder = Settings.settingsBuilder();
+ if (randomBoolean()) {
+ ScriptType scriptType = randomFrom(ScriptType.values());
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + scriptType, randomFrom(ScriptMode.values()));
+ } else {
+ String scriptContext = randomFrom(ScriptContext.Standard.values()).getKey();
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + scriptContext, randomFrom(ScriptMode.values()));
+ }
+ Settings settings = builder.put("path.home", createTempDir()).build();
+ Environment environment = new Environment(settings);
+ ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, null);
+ Map<String, NativeScriptFactory> nativeScriptFactoryMap = new HashMap<>();
+ nativeScriptFactoryMap.put("my", new MyNativeScriptFactory());
+ Set<ScriptEngineService> scriptEngineServices = ImmutableSet.<ScriptEngineService>of(new NativeScriptEngineService(settings, nativeScriptFactoryMap));
+ ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Lists.<ScriptContext.Plugin>newArrayList());
+ ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry);
+
+ for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) {
+ assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext),
+ notNullValue());
+ }
+ }
+
+ static class MyNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new MyScript();
+ }
+ }
+
+ static class MyScript extends AbstractExecutableScript {
+ @Override
+ public Object run() {
+ return "test";
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java b/core/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java
new file mode 100644
index 0000000000..b49486f050
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.mustache.MustacheScriptEngineService;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+//Use Suite scope so that paths get set correctly
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class OnDiskScriptTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ //Set path so ScriptService will pick up the test scripts
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ .put("path.conf", this.getDataPath("config"))
+ .put("script.engine.expression.file.aggs", "off")
+ .put("script.engine.mustache.file.aggs", "off")
+ .put("script.engine.mustache.file.search", "off")
+ .put("script.engine.mustache.file.mapping", "off")
+ .put("script.engine.mustache.file.update", "off").build();
+ }
+
+ @Test
+ public void testFieldOnDiskScript() throws ExecutionException, InterruptedException {
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "5").setSource("{\"theField\":\"bar\"}"));
+ indexRandom(true, builders);
+
+ String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\" }, \"test2\" : { \"script_file\" : \"script2\", \"params\":{\"factor\":3} }}, size:1}";
+ SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ assertHitCount(searchResponse, 5);
+ assertTrue(searchResponse.getHits().hits().length == 1);
+ SearchHit sh = searchResponse.getHits().getAt(0);
+ assertThat((Integer)sh.field("test1").getValue(), equalTo(2));
+ assertThat((Integer)sh.field("test2").getValue(), equalTo(6));
+ }
+
+ @Test
+ public void testOnDiskScriptsSameNameDifferentLang() throws ExecutionException, InterruptedException {
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "5").setSource("{\"theField\":\"bar\"}"));
+ indexRandom(true, builders);
+
+ String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\" }, \"test2\" : { \"script_file\" : \"script1\", \"lang\":\"expression\" }}, size:1}";
+ SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ assertHitCount(searchResponse, 5);
+ assertTrue(searchResponse.getHits().hits().length == 1);
+ SearchHit sh = searchResponse.getHits().getAt(0);
+ assertThat((Integer)sh.field("test1").getValue(), equalTo(2));
+ assertThat((Double)sh.field("test2").getValue(), equalTo(10d));
+ }
+
+ @Test
+ public void testPartiallyDisabledOnDiskScripts() throws ExecutionException, InterruptedException {
+ //test that although aggs are disabled for expression, search scripts work fine
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "2").setSource("{\"theField\":\"foo 2\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "3").setSource("{\"theField\":\"foo 3\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "4").setSource("{\"theField\":\"foo 4\"}"));
+ builders.add(client().prepareIndex("test", "scriptTest", "5").setSource("{\"theField\":\"bar\"}"));
+
+ indexRandom(true, builders);
+
+ String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_file\":\"script1\", \"lang\": \"expression\" } } } }";
+ try {
+ client().prepareSearch("test").setSource(source).get();
+ fail("aggs script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [expression] are disabled"));
+ }
+
+ String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"expression\" }}, size:1}";
+ SearchResponse searchResponse = client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ assertHitCount(searchResponse, 5);
+ assertTrue(searchResponse.getHits().hits().length == 1);
+ SearchHit sh = searchResponse.getHits().getAt(0);
+ assertThat((Double)sh.field("test1").getValue(), equalTo(10d));
+ }
+
+ @Test
+ public void testAllOpsDisabledOnDiskScripts() {
+ //whether we even compile or cache the on disk scripts doesn't change the end result (the returned error)
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ refresh();
+ String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_file\":\"script1\", \"lang\": \"mustache\" } } } }";
+ try {
+ client().prepareSearch("test").setSource(source).get();
+ fail("aggs script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [mustache] are disabled"));
+ }
+ String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"mustache\" }}, size:1}";
+ try {
+ client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ fail("search script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [file], operation [search] and lang [mustache] are disabled"));
+ }
+ try {
+ client().prepareUpdate("test", "scriptTest", "1")
+ .setScript(new Script("script1", ScriptService.ScriptType.FILE, MustacheScriptEngineService.NAME, null)).get();
+ fail("update script should have been rejected");
+ } catch (Exception e) {
+ assertThat(e.getMessage(), containsString("failed to execute script"));
+ assertThat(e.getCause().toString(),
+ containsString("scripts of type [file], operation [update] and lang [mustache] are disabled"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testAllOpsDisabledOnDiskScriptsOldScriptAPI() {
+ //whether we even compile or cache the on disk scripts doesn't change the end result (the returned error)
+ client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get();
+ refresh();
+ String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_file\":\"script1\", \"lang\": \"mustache\" } } } }";
+ try {
+ client().prepareSearch("test").setSource(source).get();
+ fail("aggs script should have been rejected");
+ } catch (Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [mustache] are disabled"));
+ }
+ String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"mustache\" }}, size:1}";
+ try {
+ client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get();
+ fail("search script should have been rejected");
+ } catch (Exception e) {
+ assertThat(e.toString(), containsString("scripts of type [file], operation [search] and lang [mustache] are disabled"));
+ }
+ try {
+ client().prepareUpdate("test", "scriptTest", "1").setScript("script1", ScriptService.ScriptType.FILE).setScriptLang(MustacheScriptEngineService.NAME).get();
+ fail("update script should have been rejected");
+ } catch(Exception e) {
+ assertThat(e.getMessage(), containsString("failed to execute script"));
+ assertThat(e.getCause().toString(), containsString("scripts of type [file], operation [update] and lang [mustache] are disabled"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java b/core/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java
new file mode 100644
index 0000000000..b96391682f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class ScriptContextRegistryTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testValidateCustomScriptContextsOperation() throws IOException {
+ for (final String rejectedContext : ScriptContextRegistry.RESERVED_SCRIPT_CONTEXTS) {
+ try {
+ //try to register a prohibited script context
+ new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("test", rejectedContext)));
+ fail("ScriptContextRegistry initialization should have failed");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), Matchers.containsString("[" + rejectedContext + "] is a reserved name, it cannot be registered as a custom script context"));
+ }
+ }
+ }
+
+ @Test
+ public void testValidateCustomScriptContextsPluginName() throws IOException {
+ for (final String rejectedContext : ScriptContextRegistry.RESERVED_SCRIPT_CONTEXTS) {
+ try {
+ //try to register a prohibited script context
+ new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin(rejectedContext, "test")));
+ fail("ScriptContextRegistry initialization should have failed");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), Matchers.containsString("[" + rejectedContext + "] is a reserved name, it cannot be registered as a custom script context"));
+ }
+ }
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testValidateCustomScriptContextsEmptyPluginName() throws IOException {
+ new ScriptContext.Plugin(randomBoolean() ? null : "", "test");
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testValidateCustomScriptContextsEmptyOperation() throws IOException {
+ new ScriptContext.Plugin("test", randomBoolean() ? null : "");
+ }
+
+ @Test
+ public void testDuplicatedPluginScriptContexts() throws IOException {
+ try {
+ //try to register a prohibited script context
+ new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("testplugin", "test"), new ScriptContext.Plugin("testplugin", "test")));
+ fail("ScriptContextRegistry initialization should have failed");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), Matchers.containsString("script context [testplugin_test] cannot be registered twice"));
+ }
+ }
+
+ @Test
+ public void testNonDuplicatedPluginScriptContexts() throws IOException {
+ new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("testplugin1", "test"), new ScriptContext.Plugin("testplugin2", "test")));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptFieldTests.java b/core/src/test/java/org/elasticsearch/script/ScriptFieldTests.java
new file mode 100644
index 0000000000..81319f6779
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptFieldTests.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 3)
+public class ScriptFieldTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("plugin.types", CustomScriptPlugin.class.getName()).build();
+ }
+
+ static int[] intArray = { Integer.MAX_VALUE, Integer.MIN_VALUE, 3 };
+ static long[] longArray = { Long.MAX_VALUE, Long.MIN_VALUE, 9223372036854775807l };
+ static float[] floatArray = { Float.MAX_VALUE, Float.MIN_VALUE, 3.3f };
+ static double[] doubleArray = { Double.MAX_VALUE, Double.MIN_VALUE, 3.3d };
+
+ public void testNativeScript() throws InterruptedException, ExecutionException {
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("text", "doc1"), client()
+ .prepareIndex("test", "type1", "2").setSource("text", "doc2"),
+ client().prepareIndex("test", "type1", "3").setSource("text", "doc3"), client().prepareIndex("test", "type1", "4")
+ .setSource("text", "doc4"), client().prepareIndex("test", "type1", "5").setSource("text", "doc5"), client()
+ .prepareIndex("test", "type1", "6").setSource("text", "doc6"));
+
+ client().admin().indices().prepareFlush("test").execute().actionGet();
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .addScriptField("int", new Script("int", ScriptType.INLINE, "native", null))
+ .addScriptField("float", new Script("float", ScriptType.INLINE, "native", null))
+ .addScriptField("double", new Script("double", ScriptType.INLINE, "native", null))
+ .addScriptField("long", new Script("long", ScriptType.INLINE, "native", null)).execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(6));
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("int").getValues().get(0);
+ assertThat(result, equalTo((Object) intArray));
+ result = hit.getFields().get("long").getValues().get(0);
+ assertThat(result, equalTo((Object) longArray));
+ result = hit.getFields().get("float").getValues().get(0);
+ assertThat(result, equalTo((Object) floatArray));
+ result = hit.getFields().get("double").getValues().get(0);
+ assertThat(result, equalTo((Object) doubleArray));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ public void testNativeScriptOldScriptAPI() throws InterruptedException, ExecutionException {
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("text", "doc1"), client()
+ .prepareIndex("test", "type1", "2").setSource("text", "doc2"),
+ client().prepareIndex("test", "type1", "3").setSource("text", "doc3"), client().prepareIndex("test", "type1", "4")
+ .setSource("text", "doc4"), client().prepareIndex("test", "type1", "5").setSource("text", "doc5"), client()
+ .prepareIndex("test", "type1", "6").setSource("text", "doc6"));
+
+ client().admin().indices().prepareFlush("test").execute().actionGet();
+ SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .addScriptField("int", "native", "int", null).addScriptField("float", "native", "float", null)
+ .addScriptField("double", "native", "double", null).addScriptField("long", "native", "long", null).execute().actionGet();
+ assertThat(sr.getHits().hits().length, equalTo(6));
+ for (SearchHit hit : sr.getHits().getHits()) {
+ Object result = hit.getFields().get("int").getValues().get(0);
+ assertThat(result, equalTo((Object) intArray));
+ result = hit.getFields().get("long").getValues().get(0);
+ assertThat(result, equalTo((Object) longArray));
+ result = hit.getFields().get("float").getValues().get(0);
+ assertThat(result, equalTo((Object) floatArray));
+ result = hit.getFields().get("double").getValues().get(0);
+ assertThat(result, equalTo((Object) doubleArray));
+ }
+ }
+
+ static class IntArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new IntScript();
+ }
+ }
+
+ static class IntScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return intArray;
+ }
+ }
+
+ static class LongArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new LongScript();
+ }
+ }
+
+ static class LongScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return longArray;
+ }
+ }
+
+ static class FloatArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new FloatScript();
+ }
+ }
+
+ static class FloatScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return floatArray;
+ }
+ }
+
+ static class DoubleArrayScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new DoubleScript();
+ }
+ }
+
+ static class DoubleScript extends AbstractSearchScript {
+ @Override
+ public Object run() {
+ return doubleArray;
+ }
+ }
+
+ public static class CustomScriptPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "custom_script";
+ }
+
+ @Override
+ public String description() {
+ return "script ";
+ }
+
+ public void onModule(ScriptModule scriptModule) {
+ scriptModule.registerScript("int", IntArrayScriptFactory.class);
+ scriptModule.registerScript("long", LongArrayScriptFactory.class);
+ scriptModule.registerScript("float", FloatArrayScriptFactory.class);
+ scriptModule.registerScript("double", DoubleArrayScriptFactory.class);
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java b/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java
new file mode 100644
index 0000000000..db8770a220
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptIndexSettingsTest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.script;
+
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptResponse;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class ScriptIndexSettingsTest extends ElasticsearchIntegrationTest{
+
+
+ @Test
+ public void testScriptIndexSettings() {
+ PutIndexedScriptResponse putIndexedScriptResponse =
+ client().preparePutIndexedScript().setId("foobar").setScriptLang("groovy").setSource("{ \"script\": 1 }")
+ .get();
+ assertTrue(putIndexedScriptResponse.isCreated());
+ ensureGreen();
+
+ IndicesExistsRequest existsRequest = new IndicesExistsRequest();
+ String[] index = new String[1];
+ index[0] = ScriptService.SCRIPT_INDEX;
+ existsRequest.indices(index);
+
+
+ IndicesExistsResponse existsResponse = cluster().client().admin().indices().exists(existsRequest).actionGet();
+ assertTrue(existsResponse.isExists());
+
+ GetSettingsRequest settingsRequest = new GetSettingsRequest();
+ settingsRequest.indices(ScriptService.SCRIPT_INDEX);
+ settingsRequest.indicesOptions(IndicesOptions.strictExpandOpen());
+ GetSettingsResponse settingsResponse = client()
+ .admin()
+ .indices()
+ .getSettings(settingsRequest)
+ .actionGet();
+
+ String numberOfShards = settingsResponse.getSetting(ScriptService.SCRIPT_INDEX,"index.number_of_shards");
+ String numberOfReplicas = settingsResponse.getSetting(ScriptService.SCRIPT_INDEX,"index.auto_expand_replicas");
+
+ assertEquals("Number of shards should be 1", "1", numberOfShards);
+ assertEquals("Auto expand replicas should be 0-all", "0-all", numberOfReplicas);
+ }
+
+ @Test
+ public void testDeleteScriptIndex() {
+ PutIndexedScriptResponse putIndexedScriptResponse =
+ client().preparePutIndexedScript().setId("foobar").setScriptLang("groovy").setSource("{ \"script\": 1 }")
+ .get();
+ assertTrue(putIndexedScriptResponse.isCreated());
+ DeleteIndexResponse deleteResponse = client().admin().indices().prepareDelete(ScriptService.SCRIPT_INDEX).get();
+ assertTrue(deleteResponse.isAcknowledged());
+ ensureGreen();
+ try {
+ GetIndexedScriptResponse response = client().prepareGetIndexedScript("groovy","foobar").get();
+ assertTrue(false); //This should not happen
+ } catch (IndexMissingException ime) {
+ assertTrue(true);
+ }
+ }
+
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java
new file mode 100644
index 0000000000..1ea2145e03
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java
@@ -0,0 +1,319 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import com.google.common.collect.*;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.script.expression.ExpressionScriptEngineService;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.script.mustache.MustacheScriptEngineService;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+public class ScriptModesTests extends ElasticsearchTestCase {
+
+ private static final Set<String> ALL_LANGS = ImmutableSet.of(GroovyScriptEngineService.NAME, MustacheScriptEngineService.NAME, ExpressionScriptEngineService.NAME, "custom", "test");
+
+ static final String[] ENABLE_VALUES = new String[]{"on", "true", "yes", "1"};
+ static final String[] DISABLE_VALUES = new String[]{"off", "false", "no", "0"};
+
+ ScriptContextRegistry scriptContextRegistry;
+ private ScriptContext[] scriptContexts;
+ private Map<String, ScriptEngineService> scriptEngines;
+ private ScriptModes scriptModes;
+ private Set<String> checkedSettings;
+ private boolean assertAllSettingsWereChecked;
+ private boolean assertScriptModesNonNull;
+
+ @Before
+ public void setupScriptEngines() {
+ //randomly register custom script contexts
+ int randomInt = randomIntBetween(0, 3);
+ //prevent duplicates using map
+ Map<String, ScriptContext.Plugin> contexts = Maps.newHashMap();
+ for (int i = 0; i < randomInt; i++) {
+ String plugin = randomAsciiOfLength(randomIntBetween(1, 10));
+ String operation = randomAsciiOfLength(randomIntBetween(1, 30));
+ String context = plugin + "-" + operation;
+ contexts.put(context, new ScriptContext.Plugin(plugin, operation));
+ }
+ scriptContextRegistry = new ScriptContextRegistry(contexts.values());
+ scriptContexts = scriptContextRegistry.scriptContexts().toArray(new ScriptContext[scriptContextRegistry.scriptContexts().size()]);
+ scriptEngines = buildScriptEnginesByLangMap(ImmutableSet.of(
+ new GroovyScriptEngineService(Settings.EMPTY),
+ new MustacheScriptEngineService(Settings.EMPTY),
+ new ExpressionScriptEngineService(Settings.EMPTY),
+ //add the native engine just to make sure it gets filtered out
+ new NativeScriptEngineService(Settings.EMPTY, Collections.<String, NativeScriptFactory>emptyMap()),
+ new CustomScriptEngineService()));
+ checkedSettings = new HashSet<>();
+ assertAllSettingsWereChecked = true;
+ assertScriptModesNonNull = true;
+ }
+
+ @After
+ public void assertNativeScriptsAreAlwaysAllowed() {
+ if (assertScriptModesNonNull) {
+ assertThat(scriptModes.getScriptMode(NativeScriptEngineService.NAME, randomFrom(ScriptType.values()), randomFrom(scriptContexts)), equalTo(ScriptMode.ON));
+ }
+ }
+
+ @After
+ public void assertAllSettingsWereChecked() {
+ if (assertScriptModesNonNull) {
+ assertThat(scriptModes, notNullValue());
+ //4 is the number of engines (native excluded), custom is counted twice though as it's associated with two different names
+ int numberOfSettings = 5 * ScriptType.values().length * scriptContextRegistry.scriptContexts().size();
+ assertThat(scriptModes.scriptModes.size(), equalTo(numberOfSettings));
+ if (assertAllSettingsWereChecked) {
+ assertThat(checkedSettings.size(), equalTo(numberOfSettings));
+ }
+ }
+ }
+
+ @Test
+ public void testDefaultSettings() {
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, Settings.EMPTY);
+ assertScriptModesAllOps(ScriptMode.ON, ALL_LANGS, ScriptType.FILE);
+ assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INDEXED, ScriptType.INLINE);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testMissingSetting() {
+ assertAllSettingsWereChecked = false;
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, Settings.EMPTY);
+ scriptModes.getScriptMode("non_existing", randomFrom(ScriptType.values()), randomFrom(scriptContexts));
+ }
+
+ @Test
+ public void testScriptTypeGenericSettings() {
+ int randomInt = randomIntBetween(1, ScriptType.values().length - 1);
+ Set<ScriptType> randomScriptTypesSet = Sets.newHashSet();
+ ScriptMode[] randomScriptModes = new ScriptMode[randomInt];
+ for (int i = 0; i < randomInt; i++) {
+ boolean added = false;
+ while (added == false) {
+ added = randomScriptTypesSet.add(randomFrom(ScriptType.values()));
+ }
+ randomScriptModes[i] = randomFrom(ScriptMode.values());
+ }
+ ScriptType[] randomScriptTypes = randomScriptTypesSet.toArray(new ScriptType[randomScriptTypesSet.size()]);
+ Settings.Builder builder = Settings.builder();
+ for (int i = 0; i < randomInt; i++) {
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + randomScriptTypes[i], randomScriptModes[i]);
+ }
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, builder.build());
+
+ for (int i = 0; i < randomInt; i++) {
+ assertScriptModesAllOps(randomScriptModes[i], ALL_LANGS, randomScriptTypes[i]);
+ }
+ if (randomScriptTypesSet.contains(ScriptType.FILE) == false) {
+ assertScriptModesAllOps(ScriptMode.ON, ALL_LANGS, ScriptType.FILE);
+ }
+ if (randomScriptTypesSet.contains(ScriptType.INDEXED) == false) {
+ assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INDEXED);
+ }
+ if (randomScriptTypesSet.contains(ScriptType.INLINE) == false) {
+ assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INLINE);
+ }
+ }
+
+ @Test
+ public void testScriptContextGenericSettings() {
+ int randomInt = randomIntBetween(1, scriptContexts.length - 1);
+ Set<ScriptContext> randomScriptContextsSet = Sets.newHashSet();
+ ScriptMode[] randomScriptModes = new ScriptMode[randomInt];
+ for (int i = 0; i < randomInt; i++) {
+ boolean added = false;
+ while (added == false) {
+ added = randomScriptContextsSet.add(randomFrom(scriptContexts));
+ }
+ randomScriptModes[i] = randomFrom(ScriptMode.values());
+ }
+ ScriptContext[] randomScriptContexts = randomScriptContextsSet.toArray(new ScriptContext[randomScriptContextsSet.size()]);
+ Settings.Builder builder = Settings.builder();
+ for (int i = 0; i < randomInt; i++) {
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + randomScriptContexts[i].getKey(), randomScriptModes[i]);
+ }
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, builder.build());
+
+ for (int i = 0; i < randomInt; i++) {
+ assertScriptModesAllTypes(randomScriptModes[i], ALL_LANGS, randomScriptContexts[i]);
+ }
+
+ ScriptContext[] complementOf = complementOf(randomScriptContexts);
+ assertScriptModes(ScriptMode.ON, ALL_LANGS, new ScriptType[]{ScriptType.FILE}, complementOf);
+ assertScriptModes(ScriptMode.SANDBOX, ALL_LANGS, new ScriptType[]{ScriptType.INDEXED, ScriptType.INLINE}, complementOf);
+ }
+
+ @Test
+ public void testConflictingScriptTypeAndOpGenericSettings() {
+ ScriptContext scriptContext = randomFrom(scriptContexts);
+ Settings.Builder builder = Settings.builder().put(ScriptModes.SCRIPT_SETTINGS_PREFIX + scriptContext.getKey(), randomFrom(DISABLE_VALUES))
+ .put("script.indexed", randomFrom(ENABLE_VALUES)).put("script.inline", ScriptMode.SANDBOX);
+ //operations generic settings have precedence over script type generic settings
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, builder.build());
+ assertScriptModesAllTypes(ScriptMode.OFF, ALL_LANGS, scriptContext);
+ ScriptContext[] complementOf = complementOf(scriptContext);
+ assertScriptModes(ScriptMode.ON, ALL_LANGS, new ScriptType[]{ScriptType.FILE, ScriptType.INDEXED}, complementOf);
+ assertScriptModes(ScriptMode.SANDBOX, ALL_LANGS, new ScriptType[]{ScriptType.INLINE}, complementOf);
+ }
+
+ @Test
+ public void testEngineSpecificSettings() {
+ Settings.Builder builder = Settings.builder()
+ .put(specificEngineOpSettings(GroovyScriptEngineService.NAME, ScriptType.INLINE, ScriptContext.Standard.MAPPING), randomFrom(DISABLE_VALUES))
+ .put(specificEngineOpSettings(GroovyScriptEngineService.NAME, ScriptType.INLINE, ScriptContext.Standard.UPDATE), randomFrom(DISABLE_VALUES));
+ ImmutableSet<String> groovyLangSet = ImmutableSet.of(GroovyScriptEngineService.NAME);
+ Set<String> allButGroovyLangSet = new HashSet<>(ALL_LANGS);
+ allButGroovyLangSet.remove(GroovyScriptEngineService.NAME);
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, builder.build());
+ assertScriptModes(ScriptMode.OFF, groovyLangSet, new ScriptType[]{ScriptType.INLINE}, ScriptContext.Standard.MAPPING, ScriptContext.Standard.UPDATE);
+ assertScriptModes(ScriptMode.SANDBOX, groovyLangSet, new ScriptType[]{ScriptType.INLINE}, complementOf(ScriptContext.Standard.MAPPING, ScriptContext.Standard.UPDATE));
+ assertScriptModesAllOps(ScriptMode.SANDBOX, allButGroovyLangSet, ScriptType.INLINE);
+ assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INDEXED);
+ assertScriptModesAllOps(ScriptMode.ON, ALL_LANGS, ScriptType.FILE);
+ }
+
+ @Test
+ public void testInteractionBetweenGenericAndEngineSpecificSettings() {
+ Settings.Builder builder = Settings.builder().put("script.inline", randomFrom(DISABLE_VALUES))
+ .put(specificEngineOpSettings(MustacheScriptEngineService.NAME, ScriptType.INLINE, ScriptContext.Standard.AGGS), randomFrom(ENABLE_VALUES))
+ .put(specificEngineOpSettings(MustacheScriptEngineService.NAME, ScriptType.INLINE, ScriptContext.Standard.SEARCH), randomFrom(ENABLE_VALUES));
+ ImmutableSet<String> mustacheLangSet = ImmutableSet.of(MustacheScriptEngineService.NAME);
+ Set<String> allButMustacheLangSet = new HashSet<>(ALL_LANGS);
+ allButMustacheLangSet.remove(MustacheScriptEngineService.NAME);
+ this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, builder.build());
+ assertScriptModes(ScriptMode.ON, mustacheLangSet, new ScriptType[]{ScriptType.INLINE}, ScriptContext.Standard.AGGS, ScriptContext.Standard.SEARCH);
+ assertScriptModes(ScriptMode.OFF, mustacheLangSet, new ScriptType[]{ScriptType.INLINE}, complementOf(ScriptContext.Standard.AGGS, ScriptContext.Standard.SEARCH));
+ assertScriptModesAllOps(ScriptMode.OFF, allButMustacheLangSet, ScriptType.INLINE);
+ assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INDEXED);
+ assertScriptModesAllOps(ScriptMode.ON, ALL_LANGS, ScriptType.FILE);
+ }
+
+ private void assertScriptModesAllOps(ScriptMode expectedScriptMode, Set<String> langs, ScriptType... scriptTypes) {
+ assertScriptModes(expectedScriptMode, langs, scriptTypes, scriptContexts);
+ }
+
+ private void assertScriptModesAllTypes(ScriptMode expectedScriptMode, Set<String> langs, ScriptContext... scriptContexts) {
+ assertScriptModes(expectedScriptMode, langs, ScriptType.values(), scriptContexts);
+ }
+
+ private void assertScriptModes(ScriptMode expectedScriptMode, Set<String> langs, ScriptType[] scriptTypes, ScriptContext... scriptContexts) {
+ assert langs.size() > 0;
+ assert scriptTypes.length > 0;
+ assert scriptContexts.length > 0;
+ for (String lang : langs) {
+ for (ScriptType scriptType : scriptTypes) {
+ for (ScriptContext scriptContext : scriptContexts) {
+ assertThat(lang + "." + scriptType + "." + scriptContext.getKey() + " doesn't have the expected value", scriptModes.getScriptMode(lang, scriptType, scriptContext), equalTo(expectedScriptMode));
+ checkedSettings.add(lang + "." + scriptType + "." + scriptContext);
+ }
+ }
+ }
+ }
+
+ private ScriptContext[] complementOf(ScriptContext... scriptContexts) {
+ Map<String, ScriptContext> copy = Maps.newHashMap();
+ for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) {
+ copy.put(scriptContext.getKey(), scriptContext);
+ }
+ for (ScriptContext scriptContext : scriptContexts) {
+ copy.remove(scriptContext.getKey());
+ }
+ return copy.values().toArray(new ScriptContext[copy.size()]);
+ }
+
+ private static String specificEngineOpSettings(String lang, ScriptType scriptType, ScriptContext scriptContext) {
+ return ScriptModes.ENGINE_SETTINGS_PREFIX + "." + lang + "." + scriptType + "." + scriptContext.getKey();
+ }
+
+ static ImmutableMap<String, ScriptEngineService> buildScriptEnginesByLangMap(Set<ScriptEngineService> scriptEngines) {
+ ImmutableMap.Builder<String, ScriptEngineService> builder = ImmutableMap.builder();
+ for (ScriptEngineService scriptEngine : scriptEngines) {
+ for (String type : scriptEngine.types()) {
+ builder.put(type, scriptEngine);
+ }
+ }
+ return builder.build();
+ }
+
+ private static class CustomScriptEngineService implements ScriptEngineService {
+ @Override
+ public String[] types() {
+ return new String[]{"custom", "test"};
+ }
+
+ @Override
+ public String[] extensions() {
+ return new String[0];
+ }
+
+ @Override
+ public boolean sandboxed() {
+ return false;
+ }
+
+ @Override
+ public Object compile(String script) {
+ return null;
+ }
+
+ @Override
+ public ExecutableScript executable(Object compiledScript, @Nullable Map<String, Object> vars) {
+ return null;
+ }
+
+ @Override
+ public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
+ return null;
+ }
+
+ @Override
+ public Object execute(Object compiledScript, Map<String, Object> vars) {
+ return null;
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ @Override
+ public void scriptRemoved(@Nullable CompiledScript script) {
+
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptParameterParserTest.java b/core/src/test/java/org/elasticsearch/script/ScriptParameterParserTest.java
new file mode 100644
index 0000000000..e0c66fd09d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptParameterParserTest.java
@@ -0,0 +1,1269 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.ToXContent.MapParams;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
+import org.elasticsearch.script.Script.ScriptParseException;
+import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class ScriptParameterParserTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testTokenDefaultInline() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"script\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ paramParser = new ScriptParameterParser(null);
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ paramParser = new ScriptParameterParser(new HashSet<String>());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenDefaultFile() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"script_file\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+
+ parser = XContentHelper.createParser(new BytesArray("{ \"scriptFile\" : \"scriptValue\" }"));
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser = new ScriptParameterParser();
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenDefaultIndexed() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"script_id\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+
+ parser = XContentHelper.createParser(new BytesArray("{ \"scriptId\" : \"scriptValue\" }"));
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser = new ScriptParameterParser();
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenDefaultNotFound() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"bar\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(false));
+ assertThat(paramParser.getDefaultScriptParameterValue(), nullValue());
+ assertThat(paramParser.getScriptParameterValue("script"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenSingleParameter() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenSingleParameterFile() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo_file\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenSingleParameterIndexed() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo_id\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testTokenSingleParameterDelcaredTwiceInlineFile() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"scriptValue\", \"foo_file\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser.token(parser.currentName(), parser.currentToken(), parser);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testTokenSingleParameterDelcaredTwiceInlineIndexed() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"scriptValue\", \"foo_id\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser.token(parser.currentName(), parser.currentToken(), parser);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testTokenSingleParameterDelcaredTwiceFileInline() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo_file\" : \"scriptValue\", \"foo\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser.token(parser.currentName(), parser.currentToken(), parser);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testTokenSingleParameterDelcaredTwiceFileIndexed() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo_file\" : \"scriptValue\", \"foo_id\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser.token(parser.currentName(), parser.currentToken(), parser);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testTokenSingleParameterDelcaredTwiceIndexedInline() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo_id\" : \"scriptValue\", \"foo\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser.token(parser.currentName(), parser.currentToken(), parser);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testTokenSingleParameterDelcaredTwiceIndexedFile() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo_id\" : \"scriptValue\", \"foo_file\" : \"scriptValue\" }"));
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ paramParser.token(parser.currentName(), parser.currentToken(), parser);
+ }
+
+ @Test
+ public void testTokenMultipleParameters() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"fooScriptValue\", \"bar_file\" : \"barScriptValue\", \"baz_id\" : \"bazScriptValue\" }"));
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenMultipleParametersWithLang() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"fooScriptValue\", \"bar_file\" : \"barScriptValue\", \"lang\" : \"myLang\", \"baz_id\" : \"bazScriptValue\" }"));
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), equalTo("myLang"));
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), equalTo("myLang"));
+ }
+
+ @Test
+ public void testTokenMultipleParametersNotFound() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"other\" : \"scriptValue\" }"));
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(false));
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenMultipleParametersSomeNotFound() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"fooScriptValue\", \"other_file\" : \"barScriptValue\", \"baz_id\" : \"bazScriptValue\" }"));
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ Token token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(false));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ token = parser.nextToken();
+ while (token != Token.VALUE_STRING) {
+ token = parser.nextToken();
+ }
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(true));
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testTokenMultipleParametersWrongType() throws IOException {
+ XContentParser parser = XContentHelper.createParser(new BytesArray("{ \"foo\" : \"fooScriptValue\", \"bar_file\" : \"barScriptValue\", \"baz_id\" : \"bazScriptValue\" }"));
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(paramParser.token(parser.currentName(), parser.currentToken(), parser), equalTo(false));
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test(expected=IllegalArgumentException.class)
+ public void testReservedParameters() {
+ Set<String> parameterNames = Collections.singleton("lang");
+ new ScriptParameterParser(parameterNames );
+ }
+
+ @Test
+ public void testConfigDefaultInline() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("script", "scriptValue");
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ config = new HashMap<>();
+ config.put("script", "scriptValue");
+ paramParser = new ScriptParameterParser(null);
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ config = new HashMap<>();
+ config.put("script", "scriptValue");
+ paramParser = new ScriptParameterParser(new HashSet<String>());
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigDefaultFile() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("script_file", "scriptValue");
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+
+ config = new HashMap<>();
+ config.put("scriptFile", "scriptValue");
+ paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigDefaultIndexed() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("script_id", "scriptValue");
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+
+ config = new HashMap<>();
+ config.put("scriptId", "scriptValue");
+ paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, true);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigDefaultIndexedNoRemove() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("script_id", "scriptValue");
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, false);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.size(), equalTo(1));
+ assertThat((String) config.get("script_id"), equalTo("scriptValue"));
+
+ config = new HashMap<>();
+ config.put("scriptId", "scriptValue");
+ paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, false);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.size(), equalTo(1));
+ assertThat((String) config.get("scriptId"), equalTo("scriptValue"));
+ }
+
+ @Test
+ public void testConfigDefaultNotFound() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "bar");
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseConfig(config, true);
+ assertThat(paramParser.getDefaultScriptParameterValue(), nullValue());
+ assertThat(paramParser.getScriptParameterValue("script"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.size(), equalTo(1));
+ assertThat((String) config.get("foo"), equalTo("bar"));
+ }
+
+ @Test
+ public void testConfigSingleParameter() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigSingleParameterFile() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo_file", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigSingleParameterIndexed() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo_id", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigSingleParameterDelcaredTwiceInlineFile() throws IOException {
+ Map<String, Object> config = new LinkedHashMap<>();
+ config.put("foo", "scriptValue");
+ config.put("foo_file", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigSingleParameterDelcaredTwiceInlineIndexed() throws IOException {
+ Map<String, Object> config = new LinkedHashMap<>();
+ config.put("foo", "scriptValue");
+ config.put("foo_id", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigSingleParameterDelcaredTwiceFileInline() throws IOException {
+ Map<String, Object> config = new LinkedHashMap<>();
+ config.put("foo_file", "scriptValue");
+ config.put("foo", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigSingleParameterDelcaredTwiceFileIndexed() throws IOException {
+ Map<String, Object> config = new LinkedHashMap<>();
+ config.put("foo_file", "scriptValue");
+ config.put("foo_id", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigSingleParameterDelcaredTwiceIndexedInline() throws IOException {
+ Map<String, Object> config = new LinkedHashMap<>();
+ config.put("foo_id", "scriptValue");
+ config.put("foo", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigSingleParameterDelcaredTwiceIndexedFile() throws IOException {
+ Map<String, Object> config = new LinkedHashMap<>();
+ config.put("foo_id", "scriptValue");
+ config.put("foo_file", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test
+ public void testConfigMultipleParameters() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigMultipleParametersWithLang() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("lang", "myLang");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), equalTo("myLang"));
+ assertThat(config.isEmpty(), equalTo(true));
+ }
+
+ @Test
+ public void testConfigMultipleParametersWithLangNoRemove() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("lang", "myLang");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, false);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), equalTo("myLang"));
+ assertThat(config.size(), equalTo(4));
+ assertThat((String) config.get("foo"), equalTo("fooScriptValue"));
+ assertThat((String) config.get("bar_file"), equalTo("barScriptValue"));
+ assertThat((String) config.get("baz_id"), equalTo("bazScriptValue"));
+ assertThat((String) config.get("lang"), equalTo("myLang"));
+ }
+
+ @Test
+ public void testConfigMultipleParametersNotFound() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("other", "scriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.size(), equalTo(1));
+ assertThat((String) config.get("other"), equalTo("scriptValue"));
+ }
+
+ @Test
+ public void testConfigMultipleParametersSomeNotFound() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("other_file", "barScriptValue");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ assertThat(config.size(), equalTo(1));
+ assertThat((String) config.get("other_file"), equalTo("barScriptValue"));
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigMultipleParametersInlineWrongType() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", 1l);
+ config.put("bar_file", "barScriptValue");
+ config.put("baz_id", "bazScriptValue");
+ config.put("lang", "myLang");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigMultipleParametersFileWrongType() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", 1l);
+ config.put("baz_id", "bazScriptValue");
+ config.put("lang", "myLang");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigMultipleParametersIndexedWrongType() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("baz_id", 1l);
+ config.put("lang", "myLang");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testConfigMultipleParametersLangWrongType() throws IOException {
+ Map<String, Object> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("baz_id", "bazScriptValue");
+ config.put("lang", 1l);
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ paramParser.parseConfig(config, true);
+ }
+
+ @Test
+ public void testParamsDefaultInline() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("script", "scriptValue");
+ MapParams params = new MapParams(config);
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseParams(params);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+
+ paramParser = new ScriptParameterParser(null);
+ paramParser.parseParams(params);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+
+ paramParser = new ScriptParameterParser(new HashSet<String>());
+ paramParser.parseParams(params);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsDefaultFile() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("script_file", "scriptValue");
+ MapParams params = new MapParams(config);
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseParams(params);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsDefaultIndexed() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("script_id", "scriptValue");
+ MapParams params = new MapParams(config);
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseParams(params);
+ assertDefaultParameterValue(paramParser, "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsDefaultNotFound() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo", "bar");
+ MapParams params = new MapParams(config);
+ ScriptParameterParser paramParser = new ScriptParameterParser();
+ paramParser.parseParams(params);
+ assertThat(paramParser.getDefaultScriptParameterValue(), nullValue());
+ assertThat(paramParser.getScriptParameterValue("script"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsSingleParameter() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INLINE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsSingleParameterFile() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo_file", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.FILE);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsSingleParameterIndexed() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo_id", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "scriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testParamsSingleParameterDelcaredTwiceInlineFile() throws IOException {
+ Map<String, String> config = new LinkedHashMap<>();
+ config.put("foo", "scriptValue");
+ config.put("foo_file", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testParamsSingleParameterDelcaredTwiceInlineIndexed() throws IOException {
+ Map<String, String> config = new LinkedHashMap<>();
+ config.put("foo", "scriptValue");
+ config.put("foo_id", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testParamsSingleParameterDelcaredTwiceFileInline() throws IOException {
+ Map<String, String> config = new LinkedHashMap<>();
+ config.put("foo_file", "scriptValue");
+ config.put("foo", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testParamsSingleParameterDelcaredTwiceFileIndexed() throws IOException {
+ Map<String, String> config = new LinkedHashMap<>();
+ config.put("foo_file", "scriptValue");
+ config.put("foo_id", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testParamsSingleParameterDelcaredTwiceIndexedInline() throws IOException {
+ Map<String, String> config = new LinkedHashMap<>();
+ config.put("foo_id", "scriptValue");
+ config.put("foo", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ }
+
+ @Test(expected = ScriptParseException.class)
+ public void testParamsSingleParameterDelcaredTwiceIndexedFile() throws IOException {
+ Map<String, String> config = new LinkedHashMap<>();
+ config.put("foo_id", "scriptValue");
+ config.put("foo_file", "scriptValue");
+ Set<String> parameters = Collections.singleton("foo");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ }
+
+ @Test
+ public void testParamsMultipleParameters() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsMultipleParametersWithLang() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("lang", "myLang");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), equalTo("myLang"));
+ }
+
+ @Test
+ public void testParamsMultipleParametersWithLangNoRemove() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("bar_file", "barScriptValue");
+ config.put("lang", "myLang");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertParameterValue(paramParser, "bar", "barScriptValue", ScriptType.FILE);
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), equalTo("myLang"));
+ }
+
+ @Test
+ public void testParamsMultipleParametersNotFound() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("other", "scriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ @Test
+ public void testParamsMultipleParametersSomeNotFound() throws IOException {
+ Map<String, String> config = new HashMap<>();
+ config.put("foo", "fooScriptValue");
+ config.put("other_file", "barScriptValue");
+ config.put("baz_id", "bazScriptValue");
+ Set<String> parameters = new HashSet<>();
+ parameters.add("foo");
+ parameters.add("bar");
+ parameters.add("baz");
+ ScriptParameterParser paramParser = new ScriptParameterParser(parameters);
+ assertThat(paramParser.getScriptParameterValue("foo"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ MapParams params = new MapParams(config);
+ paramParser.parseParams(params);
+ assertParameterValue(paramParser, "foo", "fooScriptValue", ScriptType.INLINE);
+ assertThat(paramParser.getScriptParameterValue("bar"), nullValue());
+ assertParameterValue(paramParser, "baz", "bazScriptValue", ScriptType.INDEXED);
+ assertThat(paramParser.getScriptParameterValue("bar_file"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("baz_id"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other"), nullValue());
+ assertThat(paramParser.getScriptParameterValue("other_file"), nullValue());
+ assertThat(paramParser.lang(), nullValue());
+ }
+
+ private void assertDefaultParameterValue(ScriptParameterParser paramParser, String expectedScript, ScriptType expectedScriptType) throws IOException {
+ ScriptParameterValue defaultValue = paramParser.getDefaultScriptParameterValue();
+ ScriptParameterValue defaultValueByName = paramParser.getScriptParameterValue("script");
+ assertThat(defaultValue.scriptType(), equalTo(expectedScriptType));
+ assertThat(defaultValue.script(), equalTo(expectedScript));
+ assertThat(defaultValueByName.scriptType(), equalTo(expectedScriptType));
+ assertThat(defaultValueByName.script(), equalTo(expectedScript));
+ }
+
+ private void assertParameterValue(ScriptParameterParser paramParser, String parameterName, String expectedScript, ScriptType expectedScriptType) throws IOException {
+ ScriptParameterValue value = paramParser.getScriptParameterValue(parameterName);
+ assertThat(value.scriptType(), equalTo(expectedScriptType));
+ assertThat(value.script(), equalTo(expectedScript));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java
new file mode 100644
index 0000000000..a43edcbc0b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java
@@ -0,0 +1,445 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.script;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.script.expression.ExpressionScriptEngineService;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.script.mustache.MustacheScriptEngineService;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.watcher.ResourceWatcherService;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+public class ScriptServiceTests extends ElasticsearchTestCase {
+
+ private ResourceWatcherService resourceWatcherService;
+ private Set<ScriptEngineService> scriptEngineServices;
+ private Map<String, ScriptEngineService> scriptEnginesByLangMap;
+ private ScriptContextRegistry scriptContextRegistry;
+ private ScriptContext[] scriptContexts;
+ private ScriptService scriptService;
+ private Path scriptsFilePath;
+ private Settings baseSettings;
+
+ private static final Map<ScriptType, ScriptMode> DEFAULT_SCRIPT_MODES = new HashMap<>();
+
+ static {
+ DEFAULT_SCRIPT_MODES.put(ScriptType.FILE, ScriptMode.ON);
+ DEFAULT_SCRIPT_MODES.put(ScriptType.INDEXED, ScriptMode.SANDBOX);
+ DEFAULT_SCRIPT_MODES.put(ScriptType.INLINE, ScriptMode.SANDBOX);
+ }
+
+ @Before
+ public void setup() throws IOException {
+ Path genericConfigFolder = createTempDir();
+ baseSettings = settingsBuilder()
+ .put("path.home", createTempDir().toString())
+ .put("path.conf", genericConfigFolder)
+ .build();
+ resourceWatcherService = new ResourceWatcherService(baseSettings, null);
+ scriptEngineServices = ImmutableSet.of(new TestEngineService(), new GroovyScriptEngineService(baseSettings),
+ new ExpressionScriptEngineService(baseSettings), new MustacheScriptEngineService(baseSettings));
+ scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(scriptEngineServices);
+ //randomly register custom script contexts
+ int randomInt = randomIntBetween(0, 3);
+ //prevent duplicates using map
+ Map<String, ScriptContext.Plugin> contexts = Maps.newHashMap();
+ for (int i = 0; i < randomInt; i++) {
+ String plugin;
+ do {
+ plugin = randomAsciiOfLength(randomIntBetween(1, 10));
+ } while (ScriptContextRegistry.RESERVED_SCRIPT_CONTEXTS.contains(plugin));
+ String operation;
+ do {
+ operation = randomAsciiOfLength(randomIntBetween(1, 30));
+ } while (ScriptContextRegistry.RESERVED_SCRIPT_CONTEXTS.contains(operation));
+ String context = plugin + "_" + operation;
+ contexts.put(context, new ScriptContext.Plugin(plugin, operation));
+ }
+ scriptContextRegistry = new ScriptContextRegistry(contexts.values());
+ scriptContexts = scriptContextRegistry.scriptContexts().toArray(new ScriptContext[scriptContextRegistry.scriptContexts().size()]);
+ logger.info("--> setup script service");
+ scriptsFilePath = genericConfigFolder.resolve("scripts");
+ Files.createDirectories(scriptsFilePath);
+ }
+
+ private void buildScriptService(Settings additionalSettings) throws IOException {
+ Settings finalSettings = Settings.builder().put(baseSettings).put(additionalSettings).build();
+ Environment environment = new Environment(finalSettings);
+ scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) {
+ @Override
+ String getScriptFromIndex(String scriptLang, String id) {
+ //mock the script that gets retrieved from an index
+ return "100";
+ }
+ };
+ }
+
+ @Test
+ public void testNotSupportedDisableDynamicSetting() throws IOException {
+ try {
+ buildScriptService(Settings.builder().put(ScriptService.DISABLE_DYNAMIC_SCRIPTING_SETTING, randomUnicodeOfLength(randomIntBetween(1, 10))).build());
+ fail("script service should have thrown exception due to non supported script.disable_dynamic setting");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString(ScriptService.DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings"));
+ }
+ }
+
+ @Test
+ public void testScriptsWithoutExtensions() throws IOException {
+ buildScriptService(Settings.EMPTY);
+ logger.info("--> setup two test files one with extension and another without");
+ Path testFileNoExt = scriptsFilePath.resolve("test_no_ext");
+ Path testFileWithExt = scriptsFilePath.resolve("test_script.tst");
+ Streams.copy("test_file_no_ext".getBytes("UTF-8"), Files.newOutputStream(testFileNoExt));
+ Streams.copy("test_file".getBytes("UTF-8"), Files.newOutputStream(testFileWithExt));
+ resourceWatcherService.notifyNow();
+
+ logger.info("--> verify that file with extension was correctly processed");
+ CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null),
+ ScriptContext.Standard.SEARCH);
+ assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file"));
+
+ logger.info("--> delete both files");
+ Files.delete(testFileNoExt);
+ Files.delete(testFileWithExt);
+ resourceWatcherService.notifyNow();
+
+ logger.info("--> verify that file with extension was correctly removed");
+ try {
+ scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH);
+ fail("the script test_script should no longer exist");
+ } catch (IllegalArgumentException ex) {
+ assertThat(ex.getMessage(), containsString("Unable to find on disk script test_script"));
+ }
+ }
+
+ @Test
+ public void testScriptsSameNameDifferentLanguage() throws IOException {
+ buildScriptService(Settings.EMPTY);
+ createFileScripts("groovy", "expression");
+ CompiledScript groovyScript = scriptService.compile(
+ new Script("file_script", ScriptType.FILE, GroovyScriptEngineService.NAME, null), randomFrom(scriptContexts));
+ assertThat(groovyScript.lang(), equalTo(GroovyScriptEngineService.NAME));
+ CompiledScript expressionScript = scriptService.compile(new Script("file_script", ScriptType.FILE,
+ ExpressionScriptEngineService.NAME, null), randomFrom(scriptContexts));
+ assertThat(expressionScript.lang(), equalTo(ExpressionScriptEngineService.NAME));
+ }
+
+ @Test
+ public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOException {
+ buildScriptService(Settings.EMPTY);
+ CompiledScript compiledScript1 = scriptService.compile(new Script("script", ScriptType.INLINE, "test", null),
+ randomFrom(scriptContexts));
+ CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null),
+ randomFrom(scriptContexts));
+ assertThat(compiledScript1, sameInstance(compiledScript2));
+ }
+
+ @Test
+ public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException {
+ buildScriptService(Settings.EMPTY);
+ createFileScripts("test");
+ CompiledScript compiledScript1 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null),
+ randomFrom(scriptContexts));
+ CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null),
+ randomFrom(scriptContexts));
+ assertThat(compiledScript1, sameInstance(compiledScript2));
+ }
+
+ @Test
+ public void testDefaultBehaviourFineGrainedSettings() throws IOException {
+ Settings.Builder builder = Settings.builder();
+ //rarely inject the default settings, which have no effect
+ if (rarely()) {
+ builder.put("script.file", randomFrom(ScriptModesTests.ENABLE_VALUES));
+ }
+ if (rarely()) {
+ builder.put("script.indexed", ScriptMode.SANDBOX);
+ }
+ if (rarely()) {
+ builder.put("script.inline", ScriptMode.SANDBOX);
+ }
+ buildScriptService(builder.build());
+ createFileScripts("groovy", "expression", "mustache", "test");
+
+ for (ScriptContext scriptContext : scriptContexts) {
+ //groovy is not sandboxed, only file scripts are enabled by default
+ assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext);
+ assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext);
+ assertCompileAccepted(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext);
+ //expression engine is sandboxed, all scripts are enabled by default
+ assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext);
+ assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext);
+ assertCompileAccepted(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext);
+ //mustache engine is sandboxed, all scripts are enabled by default
+ assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext);
+ assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext);
+ assertCompileAccepted(MustacheScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext);
+ //custom engine is sandboxed, all scripts are enabled by default
+ assertCompileAccepted("test", "script", ScriptType.INLINE, scriptContext);
+ assertCompileAccepted("test", "script", ScriptType.INDEXED, scriptContext);
+ assertCompileAccepted("test", "file_script", ScriptType.FILE, scriptContext);
+ }
+ }
+
+ @Test
+ public void testFineGrainedSettings() throws IOException {
+ //collect the fine-grained settings to set for this run
+ int numScriptSettings = randomIntBetween(0, ScriptType.values().length);
+ Map<ScriptType, ScriptMode> scriptSourceSettings = new HashMap<>();
+ for (int i = 0; i < numScriptSettings; i++) {
+ ScriptType scriptType;
+ do {
+ scriptType = randomFrom(ScriptType.values());
+ } while (scriptSourceSettings.containsKey(scriptType));
+ scriptSourceSettings.put(scriptType, randomFrom(ScriptMode.values()));
+ }
+ int numScriptContextSettings = randomIntBetween(0, this.scriptContextRegistry.scriptContexts().size());
+ Map<String, ScriptMode> scriptContextSettings = new HashMap<>();
+ for (int i = 0; i < numScriptContextSettings; i++) {
+ String scriptContext;
+ do {
+ scriptContext = randomFrom(this.scriptContexts).getKey();
+ } while (scriptContextSettings.containsKey(scriptContext));
+ scriptContextSettings.put(scriptContext, randomFrom(ScriptMode.values()));
+ }
+ int numEngineSettings = randomIntBetween(0, 10);
+ Map<String, ScriptMode> engineSettings = new HashMap<>();
+ for (int i = 0; i < numEngineSettings; i++) {
+ String settingKey;
+ do {
+ ScriptEngineService[] scriptEngineServices = this.scriptEngineServices.toArray(new ScriptEngineService[this.scriptEngineServices.size()]);
+ ScriptEngineService scriptEngineService = randomFrom(scriptEngineServices);
+ ScriptType scriptType = randomFrom(ScriptType.values());
+ ScriptContext scriptContext = randomFrom(this.scriptContexts);
+ settingKey = scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey();
+ } while (engineSettings.containsKey(settingKey));
+ engineSettings.put(settingKey, randomFrom(ScriptMode.values()));
+ }
+ //set the selected fine-grained settings
+ Settings.Builder builder = Settings.builder();
+ for (Map.Entry<ScriptType, ScriptMode> entry : scriptSourceSettings.entrySet()) {
+ switch (entry.getValue()) {
+ case ON:
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + entry.getKey(), randomFrom(ScriptModesTests.ENABLE_VALUES));
+ break;
+ case OFF:
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + entry.getKey(), randomFrom(ScriptModesTests.DISABLE_VALUES));
+ break;
+ case SANDBOX:
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + entry.getKey(), ScriptMode.SANDBOX);
+ break;
+ }
+ }
+ for (Map.Entry<String, ScriptMode> entry : scriptContextSettings.entrySet()) {
+ switch (entry.getValue()) {
+ case ON:
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + entry.getKey(), randomFrom(ScriptModesTests.ENABLE_VALUES));
+ break;
+ case OFF:
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + entry.getKey(), randomFrom(ScriptModesTests.DISABLE_VALUES));
+ break;
+ case SANDBOX:
+ builder.put(ScriptModes.SCRIPT_SETTINGS_PREFIX + entry.getKey(), ScriptMode.SANDBOX);
+ break;
+ }
+ }
+ for (Map.Entry<String, ScriptMode> entry : engineSettings.entrySet()) {
+ int delimiter = entry.getKey().indexOf('.');
+ String part1 = entry.getKey().substring(0, delimiter);
+ String part2 = entry.getKey().substring(delimiter + 1);
+
+ String lang = randomFrom(scriptEnginesByLangMap.get(part1).types());
+ switch (entry.getValue()) {
+ case ON:
+ builder.put(ScriptModes.ENGINE_SETTINGS_PREFIX + "." + lang + "." + part2, randomFrom(ScriptModesTests.ENABLE_VALUES));
+ break;
+ case OFF:
+ builder.put(ScriptModes.ENGINE_SETTINGS_PREFIX + "." + lang + "." + part2, randomFrom(ScriptModesTests.DISABLE_VALUES));
+ break;
+ case SANDBOX:
+ builder.put(ScriptModes.ENGINE_SETTINGS_PREFIX + "." + lang + "." + part2, ScriptMode.SANDBOX);
+ break;
+ }
+ }
+
+ buildScriptService(builder.build());
+ createFileScripts("groovy", "expression", "mustache", "test");
+
+ for (ScriptEngineService scriptEngineService : scriptEngineServices) {
+ for (ScriptType scriptType : ScriptType.values()) {
+ //make sure file scripts have a different name than inline ones.
+ //Otherwise they are always considered file ones as they can be found in the static cache.
+ String script = scriptType == ScriptType.FILE ? "file_script" : "script";
+ for (ScriptContext scriptContext : this.scriptContexts) {
+ //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings
+ ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey());
+ if (scriptMode == null) {
+ scriptMode = scriptContextSettings.get(scriptContext.getKey());
+ }
+ if (scriptMode == null) {
+ scriptMode = scriptSourceSettings.get(scriptType);
+ }
+ if (scriptMode == null) {
+ scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType);
+ }
+
+ for (String lang : scriptEngineService.types()) {
+ switch (scriptMode) {
+ case ON:
+ assertCompileAccepted(lang, script, scriptType, scriptContext);
+ break;
+ case OFF:
+ assertCompileRejected(lang, script, scriptType, scriptContext);
+ break;
+ case SANDBOX:
+ if (scriptEngineService.sandboxed()) {
+ assertCompileAccepted(lang, script, scriptType, scriptContext);
+ } else {
+ assertCompileRejected(lang, script, scriptType, scriptContext);
+ }
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testCompileNonRegisteredContext() throws IOException {
+ buildScriptService(Settings.EMPTY);
+ String pluginName;
+ String unknownContext;
+ do {
+ pluginName = randomAsciiOfLength(randomIntBetween(1, 10));
+ unknownContext = randomAsciiOfLength(randomIntBetween(1, 30));
+ } while(scriptContextRegistry.isSupportedContext(new ScriptContext.Plugin(pluginName, unknownContext)));
+
+ for (ScriptEngineService scriptEngineService : scriptEngineServices) {
+ for (String type : scriptEngineService.types()) {
+ try {
+ scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin(
+ pluginName, unknownContext));
+ fail("script compilation should have been rejected");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported"));
+ }
+ }
+ }
+ }
+
+ private void createFileScripts(String... langs) throws IOException {
+ for (String lang : langs) {
+ Path scriptPath = scriptsFilePath.resolve("file_script." + lang);
+ Streams.copy("10".getBytes("UTF-8"), Files.newOutputStream(scriptPath));
+ }
+ resourceWatcherService.notifyNow();
+ }
+
+ private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) {
+ try {
+ scriptService.compile(new Script(script, scriptType, lang, null), scriptContext);
+ fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]");
+ } catch(ScriptException e) {
+ //all good
+ }
+ }
+
+ private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) {
+ assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext), notNullValue());
+ }
+
+ public static class TestEngineService implements ScriptEngineService {
+
+ @Override
+ public String[] types() {
+ return new String[] {"test", "test2"};
+ }
+
+ @Override
+ public String[] extensions() {
+ return new String[] {"test", "tst"};
+ }
+
+ @Override
+ public boolean sandboxed() {
+ return true;
+ }
+
+ @Override
+ public Object compile(String script) {
+ return "compiled_" + script;
+ }
+
+ @Override
+ public ExecutableScript executable(final Object compiledScript, @Nullable Map<String, Object> vars) {
+ return null;
+ }
+
+ @Override
+ public SearchScript search(Object compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
+ return null;
+ }
+
+ @Override
+ public Object execute(Object compiledScript, Map<String, Object> vars) {
+ return null;
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ @Override
+ public void scriptRemoved(CompiledScript script) {
+ // Nothing to do here
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java
new file mode 100644
index 0000000000..065e03cb73
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script.expression;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class ExpressionScriptTests extends ElasticsearchIntegrationTest {
+
+ private SearchRequestBuilder buildRequest(String script, Object... params) {
+ ensureGreen("test");
+
+ Map<String, Object> paramsMap = new HashMap<>();
+ assert(params.length % 2 == 0);
+ for (int i = 0; i < params.length; i += 2) {
+ paramsMap.put(params[i].toString(), params[i + 1]);
+ }
+
+ SearchRequestBuilder req = client().prepareSearch().setIndices("test");
+ req.setQuery(QueryBuilders.matchAllQuery())
+ .addSort(SortBuilders.fieldSort("_uid")
+.order(SortOrder.ASC))
+ .addScriptField("foo", new Script(script, ScriptType.INLINE, "expression", paramsMap));
+ return req;
+ }
+
+ public void testBasic() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+ client().prepareIndex("test", "doc", "1").setSource("foo", 4).setRefresh(true).get();
+ SearchResponse rsp = buildRequest("doc['foo'] + 1").get();
+ assertEquals(1, rsp.getHits().getTotalHits());
+ assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue());
+ }
+
+ public void testBasicUsingDotValue() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+ client().prepareIndex("test", "doc", "1").setSource("foo", 4).setRefresh(true).get();
+ SearchResponse rsp = buildRequest("doc['foo'].value + 1").get();
+ assertEquals(1, rsp.getHits().getTotalHits());
+ assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue());
+ }
+
+ public void testScore() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("text", "hello goodbye"),
+ client().prepareIndex("test", "doc", "2").setSource("text", "hello hello hello goodbye"),
+ client().prepareIndex("test", "doc", "3").setSource("text", "hello hello goodebye"));
+ ScoreFunctionBuilder score = ScoreFunctionBuilders.scriptFunction(new Script("1 / _score", ScriptType.INLINE, "expression", null));
+ SearchRequestBuilder req = client().prepareSearch().setIndices("test");
+ req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode("replace"));
+ req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent
+ SearchResponse rsp = req.get();
+ assertSearchResponse(rsp);
+ SearchHits hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals("1", hits.getAt(0).getId());
+ assertEquals("3", hits.getAt(1).getId());
+ assertEquals("2", hits.getAt(2).getId());
+ }
+
+ public void testDateMethods() throws Exception {
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "date0", "type=date", "date1", "type=date"));
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"),
+ client().prepareIndex("test", "doc", "2").setSource("date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z"));
+ SearchResponse rsp = buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()").get();
+ assertEquals(2, rsp.getHits().getTotalHits());
+ SearchHits hits = rsp.getHits();
+ assertEquals(5.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(-11.0, hits.getAt(1).field("foo").getValue());
+ rsp = buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()").get();
+ assertEquals(2, rsp.getHits().getTotalHits());
+ hits = rsp.getHits();
+ assertEquals(5.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(24.0, hits.getAt(1).field("foo").getValue());
+ rsp = buildRequest("doc['date1'].getMonth() + 1").get();
+ assertEquals(2, rsp.getHits().getTotalHits());
+ hits = rsp.getHits();
+ assertEquals(9.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(10.0, hits.getAt(1).field("foo").getValue());
+ rsp = buildRequest("doc['date1'].getYear()").get();
+ assertEquals(2, rsp.getHits().getTotalHits());
+ hits = rsp.getHits();
+ assertEquals(1985.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(1983.0, hits.getAt(1).field("foo").getValue());
+ }
+
+ public void testMultiValueMethods() throws Exception {
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "double0", "type=double", "double1", "type=double"));
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("double0", "5.0", "double0", "1.0", "double0", "1.5", "double1", "1.2", "double1", "2.4"),
+ client().prepareIndex("test", "doc", "2").setSource("double0", "5.0", "double1", "3.0"),
+ client().prepareIndex("test", "doc", "3").setSource("double0", "5.0", "double0", "1.0", "double0", "1.5", "double0", "-1.5", "double1", "4.0"));
+
+
+ SearchResponse rsp = buildRequest("doc['double0'].count() + doc['double1'].count()").get();
+ assertSearchResponse(rsp);
+ SearchHits hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(5.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(2.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(2).field("foo").getValue());
+
+ rsp = buildRequest("doc['double0'].sum()").get();
+ assertSearchResponse(rsp);
+ hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(7.5, hits.getAt(0).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(6.0, hits.getAt(2).field("foo").getValue());
+
+ rsp = buildRequest("doc['double0'].avg() + doc['double1'].avg()").get();
+ assertSearchResponse(rsp);
+ hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(4.3, hits.getAt(0).field("foo").getValue());
+ assertEquals(8.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(5.5, hits.getAt(2).field("foo").getValue());
+
+ rsp = buildRequest("doc['double0'].median()").get();
+ assertSearchResponse(rsp);
+ hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(1.5, hits.getAt(0).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(1.25, hits.getAt(2).field("foo").getValue());
+
+ rsp = buildRequest("doc['double0'].min()").get();
+ assertSearchResponse(rsp);
+ hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(1.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(-1.5, hits.getAt(2).field("foo").getValue());
+
+ rsp = buildRequest("doc['double0'].max()").get();
+ assertSearchResponse(rsp);
+ hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(5.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(2).field("foo").getValue());
+
+ rsp = buildRequest("doc['double0'].sum()/doc['double0'].count()").get();
+ assertSearchResponse(rsp);
+ hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(2.5, hits.getAt(0).field("foo").getValue());
+ assertEquals(5.0, hits.getAt(1).field("foo").getValue());
+ assertEquals(1.5, hits.getAt(2).field("foo").getValue());
+ }
+
+ public void testInvalidDateMethodCall() throws Exception {
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "double", "type=double"));
+ ensureGreen("test");
+ indexRandom(true, client().prepareIndex("test", "doc", "1").setSource("double", "178000000.0"));
+ try {
+ buildRequest("doc['double'].getYear()").get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained IllegalArgumentException",
+ e.toString().contains("IllegalArgumentException"), equalTo(true));
+ assertThat(e.toString() + "should have contained can only be used with a date field type",
+ e.toString().contains("can only be used with a date field type"), equalTo(true));
+ }
+ }
+
+ public void testSparseField() throws Exception {
+ ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "x", "type=long", "y", "type=long"));
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("x", 4),
+ client().prepareIndex("test", "doc", "2").setSource("y", 2));
+ SearchResponse rsp = buildRequest("doc['x'] + 1").get();
+ ElasticsearchAssertions.assertSearchResponse(rsp);
+ SearchHits hits = rsp.getHits();
+ assertEquals(2, rsp.getHits().getTotalHits());
+ assertEquals(5.0, hits.getAt(0).field("foo").getValue());
+ assertEquals(1.0, hits.getAt(1).field("foo").getValue());
+ }
+
+ public void testMissingField() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+ client().prepareIndex("test", "doc", "1").setSource("x", 4).setRefresh(true).get();
+ try {
+ buildRequest("doc['bogus']").get();
+ fail("Expected missing field to cause failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained missing field error",
+ e.toString().contains("does not exist in mappings"), equalTo(true));
+ }
+ }
+
+ public void testParams() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("x", 10),
+ client().prepareIndex("test", "doc", "2").setSource("x", 3),
+ client().prepareIndex("test", "doc", "3").setSource("x", 5));
+ // a = int, b = double, c = long
+ String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)";
+ SearchResponse rsp = buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L).get();
+ SearchHits hits = rsp.getHits();
+ assertEquals(3, hits.getTotalHits());
+ assertEquals(24.5, hits.getAt(0).field("foo").getValue());
+ assertEquals(9.5, hits.getAt(1).field("foo").getValue());
+ assertEquals(13.5, hits.getAt(2).field("foo").getValue());
+ }
+
+ public void testCompileFailure() {
+ client().prepareIndex("test", "doc", "1").setSource("x", 1).setRefresh(true).get();
+ try {
+ buildRequest("garbage%@#%@").get();
+ fail("Expected expression compilation failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained compilation failure",
+ e.toString().contains("Failed to parse expression"), equalTo(true));
+ }
+ }
+
+ public void testNonNumericParam() {
+ client().prepareIndex("test", "doc", "1").setSource("x", 1).setRefresh(true).get();
+ try {
+ buildRequest("a", "a", "astring").get();
+ fail("Expected string parameter to cause failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained non-numeric parameter error",
+ e.toString().contains("must be a numeric type"), equalTo(true));
+ }
+ }
+
+ public void testNonNumericField() {
+ client().prepareIndex("test", "doc", "1").setSource("text", "this is not a number").setRefresh(true).get();
+ try {
+ buildRequest("doc['text']").get();
+ fail("Expected text field to cause execution failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained non-numeric field error",
+ e.toString().contains("must be numeric"), equalTo(true));
+ }
+ }
+
+ public void testInvalidGlobalVariable() {
+ client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get();
+ try {
+ buildRequest("bogus").get();
+ fail("Expected bogus variable to cause execution failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained unknown variable error",
+ e.toString().contains("Unknown variable"), equalTo(true));
+ }
+ }
+
+ public void testDocWithoutField() {
+ client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get();
+ try {
+ buildRequest("doc").get();
+ fail("Expected doc variable without field to cause execution failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained a missing specific field error",
+ e.toString().contains("must be used with a specific field"), equalTo(true));
+ }
+ }
+
+ public void testInvalidFieldMember() {
+ client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get();
+ try {
+ buildRequest("doc['foo'].bogus").get();
+ fail("Expected bogus field member to cause execution failure");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString() + "should have contained ExpressionScriptCompilationException",
+ e.toString().contains("ExpressionScriptCompilationException"), equalTo(true));
+ assertThat(e.toString() + "should have contained member variable [value] or member methods may be accessed",
+ e.toString().contains("member variable [value] or member methods may be accessed"), equalTo(true));
+ }
+ }
+
+ public void testSpecialValueVariable() throws Exception {
+ // i.e. _value for aggregations
+ createIndex("test");
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("x", 5, "y", 1.2),
+ client().prepareIndex("test", "doc", "2").setSource("x", 10, "y", 1.4),
+ client().prepareIndex("test", "doc", "3").setSource("x", 13, "y", 1.8));
+
+ SearchRequestBuilder req = client().prepareSearch().setIndices("test");
+ req.setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(AggregationBuilders.stats("int_agg").field("x").script("_value * 3").lang(ExpressionScriptEngineService.NAME))
+ .addAggregation(AggregationBuilders.stats("double_agg").field("y").script("_value - 1.1").lang(ExpressionScriptEngineService.NAME));
+
+ SearchResponse rsp = req.get();
+ assertEquals(3, rsp.getHits().getTotalHits());
+
+ Stats stats = rsp.getAggregations().get("int_agg");
+ assertEquals(39.0, stats.getMax(), 0.0001);
+ assertEquals(15.0, stats.getMin(), 0.0001);
+
+ stats = rsp.getAggregations().get("double_agg");
+ assertEquals(0.7, stats.getMax(), 0.0001);
+ assertEquals(0.1, stats.getMin(), 0.0001);
+ }
+
+ public void testStringSpecialValueVariable() throws Exception {
+ // i.e. expression script for term aggregations, which is not allowed
+ createIndex("test");
+ ensureGreen("test");
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("text", "hello"),
+ client().prepareIndex("test", "doc", "2").setSource("text", "goodbye"),
+ client().prepareIndex("test", "doc", "3").setSource("text", "hello"));
+
+ SearchRequestBuilder req = client().prepareSearch().setIndices("test");
+ req.setQuery(QueryBuilders.matchAllQuery())
+.addAggregation(
+ AggregationBuilders.terms("term_agg").field("text")
+ .script(new Script("_value", ScriptType.INLINE, ExpressionScriptEngineService.NAME, null)));
+
+ String message;
+ try {
+ // shards that don't have docs with the "text" field will not fail,
+ // so we may or may not get a total failure
+ SearchResponse rsp = req.get();
+ assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed
+ message = rsp.getShardFailures()[0].reason();
+ } catch (SearchPhaseExecutionException e) {
+ message = e.toString();
+ }
+ assertThat(message + "should have contained ExpressionScriptExecutionException",
+ message.contains("ExpressionScriptExecutionException"), equalTo(true));
+ assertThat(message + "should have contained text variable error",
+ message.contains("text variable"), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java b/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java
new file mode 100644
index 0000000000..ed7de33cde
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.script.mustache;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringWriter;
+import java.nio.charset.Charset;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Mustache based templating test
+ */
+public class MustacheScriptEngineTest extends ElasticsearchTestCase {
+ private MustacheScriptEngineService qe;
+ private JsonEscapingMustacheFactory escaper;
+
+ @Before
+ public void setup() {
+ qe = new MustacheScriptEngineService(Settings.Builder.EMPTY_SETTINGS);
+ escaper = new JsonEscapingMustacheFactory();
+ }
+
+ @Test
+ public void testSimpleParameterReplace() {
+ {
+ String template = "GET _search {\"query\": " + "{\"boosting\": {" + "\"positive\": {\"match\": {\"body\": \"gift\"}},"
+ + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}" + "}}, \"negative_boost\": {{boost_val}} } }}";
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("boost_val", "0.3");
+ BytesReference o = (BytesReference) qe.execute(qe.compile(template), vars);
+ assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}},"
+ + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.3 } }}",
+ new String(o.toBytes(), Charset.forName("UTF-8")));
+ }
+ {
+ String template = "GET _search {\"query\": " + "{\"boosting\": {" + "\"positive\": {\"match\": {\"body\": \"gift\"}},"
+ + "\"negative\": {\"term\": {\"body\": {\"value\": \"{{body_val}}\"}" + "}}, \"negative_boost\": {{boost_val}} } }}";
+ Map<String, Object> vars = new HashMap<>();
+ vars.put("boost_val", "0.3");
+ vars.put("body_val", "\"quick brown\"");
+ BytesReference o = (BytesReference) qe.execute(qe.compile(template), vars);
+ assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}},"
+ + "\"negative\": {\"term\": {\"body\": {\"value\": \"\\\"quick brown\\\"\"}}}, \"negative_boost\": 0.3 } }}",
+ new String(o.toBytes(), Charset.forName("UTF-8")));
+ }
+ }
+
+ @Test
+ public void testEscapeJson() throws IOException {
+ {
+ StringWriter writer = new StringWriter();
+ escaper.encode("hello \n world", writer);
+ assertThat(writer.toString(), equalTo("hello \\n world"));
+ }
+ {
+ StringWriter writer = new StringWriter();
+ escaper.encode("\n", writer);
+ assertThat(writer.toString(), equalTo("\\n"));
+ }
+
+ Character[] specialChars = new Character[]{
+ '\"',
+ '\\',
+ '\u0000',
+ '\u0001',
+ '\u0002',
+ '\u0003',
+ '\u0004',
+ '\u0005',
+ '\u0006',
+ '\u0007',
+ '\u0008',
+ '\u0009',
+ '\u000B',
+ '\u000C',
+ '\u000E',
+ '\u000F',
+ '\u001F'};
+ String[] escapedChars = new String[]{
+ "\\\"",
+ "\\\\",
+ "\\u0000",
+ "\\u0001",
+ "\\u0002",
+ "\\u0003",
+ "\\u0004",
+ "\\u0005",
+ "\\u0006",
+ "\\u0007",
+ "\\u0008",
+ "\\u0009",
+ "\\u000B",
+ "\\u000C",
+ "\\u000E",
+ "\\u000F",
+ "\\u001F"};
+ int iters = scaledRandomIntBetween(100, 1000);
+ for (int i = 0; i < iters; i++) {
+ int rounds = scaledRandomIntBetween(1, 20);
+ StringWriter expect = new StringWriter();
+ StringWriter writer = new StringWriter();
+ for (int j = 0; j < rounds; j++) {
+ String s = getChars();
+ writer.write(s);
+ expect.write(s);
+
+ int charIndex = randomInt(7);
+ writer.append(specialChars[charIndex]);
+ expect.append(escapedChars[charIndex]);
+ }
+ StringWriter target = new StringWriter();
+ escaper.encode(writer.toString(), target);
+ assertThat(expect.toString(), equalTo(target.toString()));
+ }
+ }
+
+ private String getChars() {
+ String string = randomRealisticUnicodeOfCodepointLengthBetween(0, 10);
+ for (int i = 0; i < string.length(); i++) {
+ if (isEscapeChar(string.charAt(i))) {
+ return string.substring(0, i);
+ }
+ }
+ return string;
+ }
+
+ /**
+ * From https://www.ietf.org/rfc/rfc4627.txt:
+ *
+ * All Unicode characters may be placed within the
+ * quotation marks except for the characters that must be escaped:
+ * quotation mark, reverse solidus, and the control characters (U+0000
+ * through U+001F).
+ * */
+ private static boolean isEscapeChar(char c) {
+ switch (c) {
+ case '"':
+ case '\\':
+ return true;
+ }
+
+ if (c < '\u002F')
+ return true;
+ return false;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/script/mustache/MustacheTest.java b/core/src/test/java/org/elasticsearch/script/mustache/MustacheTest.java
new file mode 100644
index 0000000000..a793b279ec
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/script/mustache/MustacheTest.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.script.mustache;
+
+import com.github.mustachejava.DefaultMustacheFactory;
+import com.github.mustachejava.Mustache;
+import com.github.mustachejava.MustacheFactory;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.util.HashMap;
+
+/**
+ * Figure out how Mustache works for the simplest use case. Leaving in here for now for reference.
+ * */
+public class MustacheTest extends ElasticsearchTestCase {
+
+ @Test
+ public void test() {
+ HashMap<String, Object> scopes = new HashMap<>();
+ scopes.put("boost_val", "0.2");
+
+ String template = "GET _search {\"query\": " + "{\"boosting\": {"
+ + "\"positive\": {\"match\": {\"body\": \"gift\"}},"
+ + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}"
+ + "}}, \"negative_boost\": {{boost_val}} } }}";
+ MustacheFactory f = new DefaultMustacheFactory();
+ Mustache mustache = f.compile(new StringReader(template), "example");
+ StringWriter writer = new StringWriter();
+ mustache.execute(writer, scopes);
+ writer.flush();
+ assertEquals(
+ "Mustache templating broken",
+ "GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}},"
+ + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.2 } }}",
+ writer.toString());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/CountSearchTests.java b/core/src/test/java/org/elasticsearch/search/CountSearchTests.java
new file mode 100644
index 0000000000..fd7f028a92
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/CountSearchTests.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+/**
+ * {@link SearchType#COUNT} is deprecated but let's make sure it still works as expected.
+ */
+public class CountSearchTests extends ElasticsearchIntegrationTest {
+
+ public void testDuelCountQueryThenFetch() throws Exception {
+ createIndex("idx");
+ ensureYellow();
+ indexRandom(true,
+ client().prepareIndex("idx", "type", "1").setSource("foo", "bar", "bar", 3),
+ client().prepareIndex("idx", "type", "2").setSource("foo", "baz", "bar", 10),
+ client().prepareIndex("idx", "type", "3").setSource("foo", "foo", "bar", 7));
+
+ final SearchResponse resp1 = client().prepareSearch("idx").setSize(0).addAggregation(AggregationBuilders.sum("bar").field("bar")).execute().get();
+ assertSearchResponse(resp1);
+ final SearchResponse resp2 = client().prepareSearch("idx").setSearchType(SearchType.COUNT).addAggregation(AggregationBuilders.sum("bar").field("bar")).execute().get();
+ assertSearchResponse(resp2);
+
+ assertEquals(resp1.getHits().getTotalHits(), resp2.getHits().getTotalHits());
+ Sum sum1 = resp1.getAggregations().get("bar");
+ Sum sum2 = resp2.getAggregations().get("bar");
+ assertEquals(sum1.getValue(), sum2.getValue(), 0d);
+ }
+
+ public void testCloseContextEvenWithExplicitSize() throws Exception {
+ createIndex("idx");
+ ensureYellow();
+ indexRandom(true,
+ client().prepareIndex("idx", "type", "1").setSource("foo", "bar", "bar", 3),
+ client().prepareIndex("idx", "type", "2").setSource("foo", "baz", "bar", 10),
+ client().prepareIndex("idx", "type", "3").setSource("foo", "foo", "bar", 7));
+
+ client().prepareSearch("idx").setSearchType(SearchType.COUNT).setSize(2).addAggregation(AggregationBuilders.sum("bar").field("bar")).execute().get();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/MultiValueModeTests.java b/core/src/test/java/org/elasticsearch/search/MultiValueModeTests.java
new file mode 100644
index 0000000000..197c6f8cba
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/MultiValueModeTests.java
@@ -0,0 +1,736 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.BitDocIdSet;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.FixedBitSet;
+import org.elasticsearch.index.fielddata.FieldData;
+import org.elasticsearch.index.fielddata.NumericDoubleValues;
+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
+import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+public class MultiValueModeTests extends ElasticsearchTestCase {
+
+ private static FixedBitSet randomRootDocs(int maxDoc) {
+ FixedBitSet set = new FixedBitSet(maxDoc);
+ for (int i = 0; i < maxDoc; ++i) {
+ if (randomBoolean()) {
+ set.set(i);
+ }
+ }
+ // the last doc must be a root doc
+ set.set(maxDoc - 1);
+ return set;
+ }
+
+ private static FixedBitSet randomInnerDocs(FixedBitSet rootDocs) {
+ FixedBitSet innerDocs = new FixedBitSet(rootDocs.length());
+ for (int i = 0; i < innerDocs.length(); ++i) {
+ if (!rootDocs.get(i) && randomBoolean()) {
+ innerDocs.set(i);
+ }
+ }
+ return innerDocs;
+ }
+
+ public void testSingleValuedLongs() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final long[] array = new long[numDocs];
+ final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs);
+ for (int i = 0; i < array.length; ++i) {
+ if (randomBoolean()) {
+ array[i] = randomLong();
+ if (docsWithValue != null) {
+ docsWithValue.set(i);
+ }
+ } else if (docsWithValue != null && randomBoolean()) {
+ docsWithValue.set(i);
+ }
+ }
+ final NumericDocValues singleValues = new NumericDocValues() {
+ @Override
+ public long get(int docID) {
+ return array[docID];
+ }
+ };
+ final SortedNumericDocValues multiValues = DocValues.singleton(singleValues, docsWithValue);
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ public void testMultiValuedLongs() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final long[][] array = new long[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final long[] values = new long[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = randomLong();
+ }
+ Arrays.sort(values);
+ array[i] = values;
+ }
+ final SortedNumericDocValues multiValues = new SortedNumericDocValues() {
+ int doc;
+
+ @Override
+ public long valueAt(int index) {
+ return array[doc][index];
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ this.doc = doc;
+ }
+
+ @Override
+ public int count() {
+ return array[doc].length;
+ }
+ };
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ private void verify(SortedNumericDocValues values, int maxDoc) {
+ for (long missingValue : new long[] { 0, randomLong() }) {
+ for (MultiValueMode mode : MultiValueMode.values()) {
+ final NumericDocValues selected = mode.select(values, missingValue);
+ for (int i = 0; i < maxDoc; ++i) {
+ final long actual = selected.get(i);
+ long expected = 0;
+ values.setDocument(i);
+ int numValues = values.count();
+ if (numValues == 0) {
+ expected = missingValue;
+ } else {
+ if (mode == MultiValueMode.MAX) {
+ expected = Long.MIN_VALUE;
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Long.MAX_VALUE;
+ }
+ for (int j = 0; j < numValues; ++j) {
+ if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+ expected += values.valueAt(j);
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, values.valueAt(j));
+ }
+ }
+ if (mode == MultiValueMode.AVG) {
+ expected = numValues > 1 ? Math.round((double)expected/(double)numValues) : expected;
+ } else if (mode == MultiValueMode.MEDIAN) {
+ int value = numValues/2;
+ if (numValues % 2 == 0) {
+ expected = Math.round((values.valueAt(value - 1) + values.valueAt(value))/2.0);
+ } else {
+ expected = values.valueAt(value);
+ }
+ }
+ }
+
+ assertEquals(mode.toString() + " docId=" + i, expected, actual);
+ }
+ }
+ }
+ }
+
+ private void verify(SortedNumericDocValues values, int maxDoc, FixedBitSet rootDocs, FixedBitSet innerDocs) throws IOException {
+ for (long missingValue : new long[] { 0, randomLong() }) {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX, MultiValueMode.SUM, MultiValueMode.AVG}) {
+ final NumericDocValues selected = mode.select(values, missingValue, rootDocs, new BitDocIdSet(innerDocs), maxDoc);
+ int prevRoot = -1;
+ for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) {
+ final long actual = selected.get(root);
+ long expected = 0;
+ if (mode == MultiValueMode.MAX) {
+ expected = Long.MIN_VALUE;
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Long.MAX_VALUE;
+ }
+ int numValues = 0;
+ for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit(child + 1)) {
+ values.setDocument(child);
+ for (int j = 0; j < values.count(); ++j) {
+ if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+ expected += values.valueAt(j);
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, values.valueAt(j));
+ }
+ ++numValues;
+ }
+ }
+ if (numValues == 0) {
+ expected = missingValue;
+ } else if (mode == MultiValueMode.AVG) {
+ expected = numValues > 1 ? Math.round((double) expected / (double) numValues) : expected;
+ }
+
+ assertEquals(mode.toString() + " docId=" + root, expected, actual);
+
+ prevRoot = root;
+ }
+ }
+ }
+ }
+
+ public void testSingleValuedDoubles() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final double[] array = new double[numDocs];
+ final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs);
+ for (int i = 0; i < array.length; ++i) {
+ if (randomBoolean()) {
+ array[i] = randomDouble();
+ if (docsWithValue != null) {
+ docsWithValue.set(i);
+ }
+ } else if (docsWithValue != null && randomBoolean()) {
+ docsWithValue.set(i);
+ }
+ }
+ final NumericDoubleValues singleValues = new NumericDoubleValues() {
+ @Override
+ public double get(int docID) {
+ return array[docID];
+ }
+ };
+ final SortedNumericDoubleValues multiValues = FieldData.singleton(singleValues, docsWithValue);
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ public void testMultiValuedDoubles() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final double[][] array = new double[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final double[] values = new double[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = randomDouble();
+ }
+ Arrays.sort(values);
+ array[i] = values;
+ }
+ final SortedNumericDoubleValues multiValues = new SortedNumericDoubleValues() {
+ int doc;
+
+ @Override
+ public double valueAt(int index) {
+ return array[doc][index];
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ this.doc = doc;
+ }
+
+ @Override
+ public int count() {
+ return array[doc].length;
+ }
+ };
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ private void verify(SortedNumericDoubleValues values, int maxDoc) {
+ for (long missingValue : new long[] { 0, randomLong() }) {
+ for (MultiValueMode mode : MultiValueMode.values()) {
+ if (MultiValueMode.MEDIAN.equals(mode)) {
+ continue;
+ }
+ final NumericDoubleValues selected = mode.select(values, missingValue);
+ for (int i = 0; i < maxDoc; ++i) {
+ final double actual = selected.get(i);
+ double expected = 0.0;
+ values.setDocument(i);
+ int numValues = values.count();
+ if (numValues == 0) {
+ expected = missingValue;
+ } else {
+ if (mode == MultiValueMode.MAX) {
+ expected = Long.MIN_VALUE;
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Long.MAX_VALUE;
+ }
+ for (int j = 0; j < numValues; ++j) {
+ if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+ expected += values.valueAt(j);
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, values.valueAt(j));
+ }
+ }
+ if (mode == MultiValueMode.AVG) {
+ expected = expected/numValues;
+ } else if (mode == MultiValueMode.MEDIAN) {
+ int value = numValues/2;
+ if (numValues % 2 == 0) {
+ expected = (values.valueAt(value - 1) + values.valueAt(value))/2.0;
+ } else {
+ expected = values.valueAt(value);
+ }
+ }
+ }
+
+ assertEquals(mode.toString() + " docId=" + i, expected, actual, 0.1);
+ }
+ }
+ }
+ }
+
+ private void verify(SortedNumericDoubleValues values, int maxDoc, FixedBitSet rootDocs, FixedBitSet innerDocs) throws IOException {
+ for (long missingValue : new long[] { 0, randomLong() }) {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX, MultiValueMode.SUM, MultiValueMode.AVG}) {
+ final NumericDoubleValues selected = mode.select(values, missingValue, rootDocs, new BitDocIdSet(innerDocs), maxDoc);
+ int prevRoot = -1;
+ for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) {
+ final double actual = selected.get(root);
+ double expected = 0.0;
+ if (mode == MultiValueMode.MAX) {
+ expected = Long.MIN_VALUE;
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Long.MAX_VALUE;
+ }
+ int numValues = 0;
+ for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit(child + 1)) {
+ values.setDocument(child);
+ for (int j = 0; j < values.count(); ++j) {
+ if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+ expected += values.valueAt(j);
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, values.valueAt(j));
+ }
+ ++numValues;
+ }
+ }
+ if (numValues == 0) {
+ expected = missingValue;
+ } else if (mode == MultiValueMode.AVG) {
+ expected = expected/numValues;
+ }
+
+ assertEquals(mode.toString() + " docId=" + root, expected, actual, 0.1);
+
+ prevRoot = root;
+ }
+ }
+ }
+ }
+
+ public void testSingleValuedStrings() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final BytesRef[] array = new BytesRef[numDocs];
+ final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs);
+ for (int i = 0; i < array.length; ++i) {
+ if (randomBoolean()) {
+ array[i] = new BytesRef(RandomStrings.randomAsciiOfLength(getRandom(), 8));
+ if (docsWithValue != null) {
+ docsWithValue.set(i);
+ }
+ } else {
+ array[i] = new BytesRef();
+ if (docsWithValue != null && randomBoolean()) {
+ docsWithValue.set(i);
+ }
+ }
+ }
+ final BinaryDocValues singleValues = new BinaryDocValues() {
+ @Override
+ public BytesRef get(int docID) {
+ return BytesRef.deepCopyOf(array[docID]);
+ }
+ };
+ final SortedBinaryDocValues multiValues = FieldData.singleton(singleValues, docsWithValue);
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ public void testMultiValuedStrings() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final BytesRef[][] array = new BytesRef[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final BytesRef[] values = new BytesRef[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = new BytesRef(RandomStrings.randomAsciiOfLength(getRandom(), 8));
+ }
+ Arrays.sort(values);
+ array[i] = values;
+ }
+ final SortedBinaryDocValues multiValues = new SortedBinaryDocValues() {
+ int doc;
+
+ @Override
+ public BytesRef valueAt(int index) {
+ return BytesRef.deepCopyOf(array[doc][index]);
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ this.doc = doc;
+ }
+
+ @Override
+ public int count() {
+ return array[doc].length;
+ }
+ };
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ private void verify(SortedBinaryDocValues values, int maxDoc) {
+ for (BytesRef missingValue : new BytesRef[] { new BytesRef(), new BytesRef(RandomStrings.randomAsciiOfLength(getRandom(), 8)) }) {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX}) {
+ final BinaryDocValues selected = mode.select(values, missingValue);
+ for (int i = 0; i < maxDoc; ++i) {
+ final BytesRef actual = selected.get(i);
+ BytesRef expected = null;
+ values.setDocument(i);
+ int numValues = values.count();
+ if (numValues == 0) {
+ expected = missingValue;
+ } else {
+ for (int j = 0; j < numValues; ++j) {
+ if (expected == null) {
+ expected = BytesRef.deepCopyOf(values.valueAt(j));
+ } else {
+ if (mode == MultiValueMode.MIN) {
+ expected = expected.compareTo(values.valueAt(j)) <= 0 ? expected : BytesRef.deepCopyOf(values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = expected.compareTo(values.valueAt(j)) > 0 ? expected : BytesRef.deepCopyOf(values.valueAt(j));
+ }
+ }
+ }
+ if (expected == null) {
+ expected = missingValue;
+ }
+ }
+
+ assertEquals(mode.toString() + " docId=" + i, expected, actual);
+ }
+ }
+ }
+ }
+
+ private void verify(SortedBinaryDocValues values, int maxDoc, FixedBitSet rootDocs, FixedBitSet innerDocs) throws IOException {
+ for (BytesRef missingValue : new BytesRef[] { new BytesRef(), new BytesRef(RandomStrings.randomAsciiOfLength(getRandom(), 8)) }) {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX}) {
+ final BinaryDocValues selected = mode.select(values, missingValue, rootDocs, new BitDocIdSet(innerDocs), maxDoc);
+ int prevRoot = -1;
+ for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) {
+ final BytesRef actual = selected.get(root);
+ BytesRef expected = null;
+ for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit(child + 1)) {
+ values.setDocument(child);
+ for (int j = 0; j < values.count(); ++j) {
+ if (expected == null) {
+ expected = BytesRef.deepCopyOf(values.valueAt(j));
+ } else {
+ if (mode == MultiValueMode.MIN) {
+ expected = expected.compareTo(values.valueAt(j)) <= 0 ? expected : BytesRef.deepCopyOf(values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = expected.compareTo(values.valueAt(j)) > 0 ? expected : BytesRef.deepCopyOf(values.valueAt(j));
+ }
+ }
+ }
+ }
+ if (expected == null) {
+ expected = missingValue;
+ }
+
+ assertEquals(mode.toString() + " docId=" + root, expected, actual);
+
+ prevRoot = root;
+ }
+ }
+ }
+ }
+
+
+ public void testSingleValuedOrds() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final int[] array = new int[numDocs];
+ for (int i = 0; i < array.length; ++i) {
+ if (randomBoolean()) {
+ array[i] = randomInt(1000);
+ } else {
+ array[i] = -1;
+ }
+ }
+ final SortedDocValues singleValues = new SortedDocValues() {
+ @Override
+ public int getOrd(int docID) {
+ return array[docID];
+ }
+
+ @Override
+ public BytesRef lookupOrd(int ord) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getValueCount() {
+ return 1 << 20;
+ }
+ };
+ final RandomAccessOrds multiValues = (RandomAccessOrds) DocValues.singleton(singleValues);
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ public void testMultiValuedOrds() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final long[][] array = new long[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final long[] values = new long[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = j == 0 ? randomInt(1000) : values[j - 1] + 1 + randomInt(1000);
+ }
+ array[i] = values;
+ }
+ final RandomAccessOrds multiValues = new RandomAccessOrds() {
+ int doc;
+
+ @Override
+ public long ordAt(int index) {
+ return array[doc][index];
+ }
+
+ @Override
+ public int cardinality() {
+ return array[doc].length;
+ }
+
+ @Override
+ public long nextOrd() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setDocument(int docID) {
+ this.doc = docID;
+ }
+
+ @Override
+ public BytesRef lookupOrd(long ord) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getValueCount() {
+ return 1 << 20;
+ }
+ };
+ verify(multiValues, numDocs);
+ final FixedBitSet rootDocs = randomRootDocs(numDocs);
+ final FixedBitSet innerDocs = randomInnerDocs(rootDocs);
+ verify(multiValues, numDocs, rootDocs, innerDocs);
+ }
+
+ private void verify(RandomAccessOrds values, int maxDoc) {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX}) {
+ final SortedDocValues selected = mode.select(values);
+ for (int i = 0; i < maxDoc; ++i) {
+ final long actual = selected.getOrd(i);
+ int expected = -1;
+ values.setDocument(i);
+ for (int j = 0; j < values.cardinality(); ++j) {
+ if (expected == -1) {
+ expected = (int) values.ordAt(j);
+ } else {
+ if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, (int)values.ordAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, (int)values.ordAt(j));
+ }
+ }
+ }
+
+ assertEquals(mode.toString() + " docId=" + i, expected, actual);
+ }
+ }
+ }
+
+ private void verify(RandomAccessOrds values, int maxDoc, FixedBitSet rootDocs, FixedBitSet innerDocs) throws IOException {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX}) {
+ final SortedDocValues selected = mode.select(values, rootDocs, new BitDocIdSet(innerDocs));
+ int prevRoot = -1;
+ for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) {
+ final int actual = selected.getOrd(root);
+ int expected = -1;
+ for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit(child + 1)) {
+ values.setDocument(child);
+ for (int j = 0; j < values.cardinality(); ++j) {
+ if (expected == -1) {
+ expected = (int) values.ordAt(j);
+ } else {
+ if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, (int)values.ordAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, (int)values.ordAt(j));
+ }
+ }
+ }
+ }
+
+ assertEquals(mode.toString() + " docId=" + root, expected, actual);
+
+ prevRoot = root;
+ }
+ }
+ }
+
+ public void testUnsortedSingleValuedDoubles() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final double[] array = new double[numDocs];
+ final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs);
+ for (int i = 0; i < array.length; ++i) {
+ if (randomBoolean()) {
+ array[i] = randomDouble();
+ if (docsWithValue != null) {
+ docsWithValue.set(i);
+ }
+ } else if (docsWithValue != null && randomBoolean()) {
+ docsWithValue.set(i);
+ }
+ }
+ final NumericDoubleValues singleValues = new NumericDoubleValues() {
+ @Override
+ public double get(int docID) {
+ return array[docID];
+ }
+ };
+ final SortedNumericDoubleValues singletonValues = FieldData.singleton(singleValues, docsWithValue);
+ final MultiValueMode.UnsortedNumericDoubleValues multiValues = new MultiValueMode.UnsortedNumericDoubleValues() {
+ int doc;
+
+ @Override
+ public int count() {
+ return singletonValues.count();
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ singletonValues.setDocument(doc);
+ }
+
+ @Override
+ public double valueAt(int index) {
+ return Math.cos(singletonValues.valueAt(index));
+ }
+ };
+ verify(multiValues, numDocs);
+ }
+
+ public void testUnsortedMultiValuedDoubles() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1, 100);
+ final double[][] array = new double[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final double[] values = new double[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = randomDouble();
+ }
+ Arrays.sort(values);
+ array[i] = values;
+ }
+ final MultiValueMode.UnsortedNumericDoubleValues multiValues = new MultiValueMode.UnsortedNumericDoubleValues() {
+ int doc;
+
+ @Override
+ public int count() {
+ return array[doc].length;
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ this.doc = doc;
+ }
+
+ @Override
+ public double valueAt(int index) {
+ return Math.sin(array[doc][index]);
+ }
+ };
+ verify(multiValues, numDocs);
+ }
+
+ private void verify(MultiValueMode.UnsortedNumericDoubleValues values, int maxDoc) {
+ for (double missingValue : new double[] { 0, randomDouble() }) {
+ for (MultiValueMode mode : new MultiValueMode[] {MultiValueMode.MIN, MultiValueMode.MAX, MultiValueMode.SUM, MultiValueMode.AVG}) {
+ final NumericDoubleValues selected = mode.select(values, missingValue);
+ for (int i = 0; i < maxDoc; ++i) {
+ final double actual = selected.get(i);
+ double expected = 0.0;
+ values.setDocument(i);
+ int numValues = values.count();
+ if (numValues == 0) {
+ expected = missingValue;
+ } else {
+ if (mode == MultiValueMode.MAX) {
+ expected = Long.MIN_VALUE;
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Long.MAX_VALUE;
+ }
+ for (int j = 0; j < numValues; ++j) {
+ if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) {
+ expected += values.valueAt(j);
+ } else if (mode == MultiValueMode.MIN) {
+ expected = Math.min(expected, values.valueAt(j));
+ } else if (mode == MultiValueMode.MAX) {
+ expected = Math.max(expected, values.valueAt(j));
+ }
+ }
+ if (mode == MultiValueMode.AVG) {
+ expected = expected/numValues;
+ }
+ }
+
+ assertEquals(mode.toString() + " docId=" + i, expected, actual, 0.1);
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java
new file mode 100644
index 0000000000..2578257af7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search;
+
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SearchServiceTests extends ElasticsearchSingleNodeTest {
+
+ @Override
+ protected boolean resetNodeAfterTest() {
+ return true;
+ }
+
+ public void testClearOnClose() throws ExecutionException, InterruptedException {
+ createIndex("index");
+ client().prepareIndex("index", "type", "1").setSource("field", "value").setRefresh(true).get();
+ SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get();
+ assertThat(searchResponse.getScrollId(), is(notNullValue()));
+ SearchService service = getInstanceFromNode(SearchService.class);
+
+ assertEquals(1, service.getActiveContexts());
+ service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test
+ assertEquals(0, service.getActiveContexts());
+ }
+
+ public void testClearOnStop() throws ExecutionException, InterruptedException {
+ createIndex("index");
+ client().prepareIndex("index", "type", "1").setSource("field", "value").setRefresh(true).get();
+ SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get();
+ assertThat(searchResponse.getScrollId(), is(notNullValue()));
+ SearchService service = getInstanceFromNode(SearchService.class);
+
+ assertEquals(1, service.getActiveContexts());
+ service.doStop();
+ assertEquals(0, service.getActiveContexts());
+ }
+
+ public void testClearIndexDelete() throws ExecutionException, InterruptedException {
+ createIndex("index");
+ client().prepareIndex("index", "type", "1").setSource("field", "value").setRefresh(true).get();
+ SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get();
+ assertThat(searchResponse.getScrollId(), is(notNullValue()));
+ SearchService service = getInstanceFromNode(SearchService.class);
+
+ assertEquals(1, service.getActiveContexts());
+ assertAcked(client().admin().indices().prepareDelete("index"));
+ assertEquals(0, service.getActiveContexts());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsTests.java b/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsTests.java
new file mode 100644
index 0000000000..322679a584
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/SearchWithRejectionsTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+@LuceneTestCase.Slow
+public class SearchWithRejectionsTests extends ElasticsearchIntegrationTest {
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ .put("threadpool.search.type", "fixed")
+ .put("threadpool.search.size", 1)
+ .put("threadpool.search.queue_size", 1)
+ .build();
+ }
+
+ @Test
+ public void testOpenContextsAfterRejections() throws InterruptedException {
+ createIndex("test");
+ ensureGreen("test");
+ final int docs = scaledRandomIntBetween(20, 50);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+ refresh();
+
+ int numSearches = 10;
+ Future<SearchResponse>[] responses = new Future[numSearches];
+ SearchType searchType = randomFrom(SearchType.DEFAULT, SearchType.QUERY_AND_FETCH, SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_AND_FETCH, SearchType.DFS_QUERY_THEN_FETCH);
+ logger.info("search type is {}", searchType);
+ for (int i = 0; i < numSearches; i++) {
+ responses[i] = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSearchType(searchType)
+ .execute();
+ }
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ responses[i].get();
+ } catch (Throwable t) {
+ }
+ }
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ // we must wait here because the requests to release search contexts might still be in flight
+ // although the search request has already returned
+ return client().admin().indices().prepareStats().execute().actionGet().getTotal().getSearch().getOpenContexts() == 0;
+ }
+ }, 1, TimeUnit.SECONDS);
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java b/core/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java
new file mode 100644
index 0000000000..e83f477d33
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/StressSearchServiceReaperTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search;
+
+import org.apache.lucene.util.English;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+
+@ClusterScope(scope = SUITE)
+public class StressSearchServiceReaperTest extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ // very frequent checks
+ return Settings.builder().put(super.nodeSettings(nodeOrdinal))
+ .put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(1)).build();
+ }
+
+ @Slow
+ @Test // see issue #5165 - this test fails each time without the fix in pull #5170
+ public void testStressReaper() throws ExecutionException, InterruptedException {
+ int num = randomIntBetween(100, 150);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[num];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type", "" + i).setSource("f", English.intToEnglish(i));
+ }
+ createIndex("test");
+ indexRandom(true, builders);
+ ensureYellow();
+ final int iterations = scaledRandomIntBetween(500, 1000);
+ for (int i = 0; i < iterations; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).setSize(num).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, num);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java
new file mode 100644
index 0000000000..dcb3dc2f16
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+
+import java.io.IOException;
+
+public class AggregationCollectorTests extends ElasticsearchSingleNodeTest {
+
+ public void testNeedsScores() throws Exception {
+ IndexService index = createIndex("idx");
+ client().prepareIndex("idx", "type", "1").setSource("f", 5).execute().get();
+ client().admin().indices().prepareRefresh("idx").get();
+
+ // simple field aggregation, no scores needed
+ String fieldAgg = "{ \"my_terms\": {\"terms\": {\"field\": \"f\"}}}";
+ assertFalse(needsScores(index, fieldAgg));
+
+ // agg on a script => scores are needed
+ String scriptAgg = "{ \"my_terms\": {\"terms\": {\"script\": \"doc['f'].value\"}}}";
+ assertTrue(needsScores(index, scriptAgg));
+
+ // make sure the information is propagated to sub aggregations
+ String subFieldAgg = "{ \"my_outer_terms\": { \"terms\": { \"field\": \"f\" }, \"aggs\": " + fieldAgg + "}}";
+ assertFalse(needsScores(index, subFieldAgg));
+
+ String subScriptAgg = "{ \"my_outer_terms\": { \"terms\": { \"field\": \"f\" }, \"aggs\": " + scriptAgg + "}}";
+ assertTrue(needsScores(index, subScriptAgg));
+
+ // top_hits is a particular example of an aggregation that needs scores
+ String topHitsAgg = "{ \"my_hits\": {\"top_hits\": {}}}";
+ assertTrue(needsScores(index, topHitsAgg));
+ }
+
+ private boolean needsScores(IndexService index, String agg) throws IOException {
+ AggregatorParsers parser = getInstanceFromNode(AggregatorParsers.class);
+ XContentParser aggParser = JsonXContent.jsonXContent.createParser(agg);
+ aggParser.nextToken();
+ SearchContext searchContext = createSearchContext(index);
+ final AggregatorFactories factories = parser.parseAggregators(aggParser, searchContext);
+ AggregationContext aggregationContext = new AggregationContext(searchContext);
+ final Aggregator[] aggregators = factories.createTopLevelAggregators(aggregationContext);
+ assertEquals(1, aggregators.length);
+ return aggregators[0].needsScores();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsBinaryTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsBinaryTests.java
new file mode 100644
index 0000000000..2e27c68304
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsBinaryTests.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class AggregationsBinaryTests extends ElasticsearchIntegrationTest {
+
+ private static final String STRING_FIELD_NAME = "s_value";
+ private static final String INT_FIELD_NAME = "i_value";
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 5; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field(STRING_FIELD_NAME, "val" + i).field(INT_FIELD_NAME, i).endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void testAggregationsBinary() throws Exception {
+ TermsBuilder termsBuilder = AggregationBuilders.terms("terms").field(STRING_FIELD_NAME);
+ TermsBuilder subTerm = AggregationBuilders.terms("subterms").field(INT_FIELD_NAME);
+
+ // Create an XContentBuilder from sub aggregation
+ XContentBuilder subTermContentBuilder = JsonXContent.contentBuilder().startObject();
+ subTerm.toXContent(subTermContentBuilder, ToXContent.EMPTY_PARAMS);
+ subTermContentBuilder.endObject();
+
+ // Add sub aggregation as a XContentBuilder (binary_aggregation)
+ termsBuilder.subAggregation(subTermContentBuilder);
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").addAggregation(termsBuilder).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Aggregations subAggs = bucket.getAggregations();
+ assertThat(subAggs, notNullValue());
+ assertThat(subAggs.asList().size(), equalTo(1));
+ Terms subTerms = subAggs.get("subterms");
+ assertThat(subTerms, notNullValue());
+ List<Bucket> subTermsBuckets = subTerms.getBuckets();
+ assertThat(subTermsBuckets, notNullValue());
+ assertThat(subTermsBuckets.size(), equalTo(1));
+ assertThat(((Number) subTermsBuckets.get(0).getKey()).intValue(), equalTo(i));
+ assertThat(subTermsBuckets.get(0).getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void testAggregationsBinarySameContentType() throws Exception {
+ TermsBuilder termsBuilder = AggregationBuilders.terms("terms").field(STRING_FIELD_NAME);
+ TermsBuilder subTerm = AggregationBuilders.terms("subterms").field(INT_FIELD_NAME);
+
+ // Create an XContentBuilder from sub aggregation
+
+ XContentBuilder subTermContentBuilder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
+ subTermContentBuilder.startObject();
+ subTerm.toXContent(subTermContentBuilder, ToXContent.EMPTY_PARAMS);
+ subTermContentBuilder.endObject();
+
+ // Add sub aggregation as a XContentBuilder (binary_aggregation)
+ termsBuilder.subAggregation(subTermContentBuilder);
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").addAggregation(termsBuilder).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Aggregations subAggs = bucket.getAggregations();
+ assertThat(subAggs, notNullValue());
+ assertThat(subAggs.asList().size(), equalTo(1));
+ Terms subTerms = subAggs.get("subterms");
+ assertThat(subTerms, notNullValue());
+ List<Bucket> subTermsBuckets = subTerms.getBuckets();
+ assertThat(subTermsBuckets, notNullValue());
+ assertThat(subTermsBuckets.size(), equalTo(1));
+ assertThat(((Number) subTermsBuckets.get(0).getKey()).intValue(), equalTo(i));
+ assertThat(subTermsBuckets.get(0).getDocCount(), equalTo(1l));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java
new file mode 100644
index 0000000000..640ac7ffb8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class AggregationsIntegrationTests extends ElasticsearchIntegrationTest {
+
+ static int numDocs;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", "f", "type=string").get());
+ ensureYellow("index");
+ numDocs = randomIntBetween(1, 20);
+ List<IndexRequestBuilder> docs = Lists.newArrayList();
+ for (int i = 0; i < numDocs; ++i) {
+ docs.add(client().prepareIndex("index", "type").setSource("f", Integer.toString(i / 3)));
+ }
+ indexRandom(true, docs);
+ }
+
+ public void testScan() {
+ try {
+ client().prepareSearch("index").setSearchType(SearchType.SCAN).setScroll(new TimeValue(500)).addAggregation(terms("f").field("f")).get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue(e.toString(), e.toString().contains("aggregations are not supported with search_type=scan"));
+ }
+ }
+
+ public void testScroll() {
+ final int size = randomIntBetween(1, 4);
+ SearchResponse response = client().prepareSearch("index")
+ .setSize(size).setScroll(new TimeValue(500))
+ .addAggregation(terms("f").field("f")).get();
+ assertSearchResponse(response);
+ Aggregations aggregations = response.getAggregations();
+ assertNotNull(aggregations);
+ Terms terms = aggregations.get("f");
+ assertEquals(Math.min(numDocs, 3L), terms.getBucketByKey("0").getDocCount());
+
+ int total = response.getHits().getHits().length;
+ while (response.getHits().hits().length > 0) {
+ response = client().prepareSearchScroll(response.getScrollId())
+ .setScroll(new TimeValue(500))
+ .execute().actionGet();
+ assertNull(response.getAggregations());
+ total += response.getHits().hits().length;
+ }
+ clearScroll(response.getScrollId());
+ assertEquals(numDocs, total);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java
new file mode 100644
index 0000000000..4888996f35
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/CombiTests.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntIntMap;
+import com.carrotsearch.hppc.IntIntHashMap;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.missing;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+public class CombiTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * Making sure that if there are multiple aggregations, working on the same field, yet require different
+ * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the
+ * field name. If the cached value source was of type "bytes" and another aggregation on the field required to see
+ * it as "numeric", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type)
+ * so there's no conflict there.
+ */
+ @Test
+ public void multipleAggs_OnSameField_WithDifferentRequiredValueSourceType() throws Exception {
+
+ createIndex("idx");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)];
+ IntIntMap values = new IntIntHashMap();
+ long missingValues = 0;
+ for (int i = 0; i < builders.length; i++) {
+ String name = "name_" + randomIntBetween(1, 10);
+ if (rarely()) {
+ missingValues++;
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .endObject());
+ } else {
+ int value = randomIntBetween(1, 10);
+ values.put(value, values.getOrDefault(value, 0) + 1);
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .field("value", value)
+ .endObject());
+ }
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_values").field("value"))
+ .addAggregation(terms("values").field("value")
+ .collectMode(aggCollectionMode ))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Aggregations aggs = response.getAggregations();
+
+ Missing missing = aggs.get("missing_values");
+ assertNotNull(missing);
+ assertThat(missing.getDocCount(), equalTo(missingValues));
+
+ Terms terms = aggs.get("values");
+ assertNotNull(terms);
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(values.size()));
+ for (Terms.Bucket bucket : buckets) {
+ values.remove(((Number) bucket.getKey()).intValue());
+ }
+ assertTrue(values.isEmpty());
+ }
+
+
+ /**
+ * Some top aggs (eg. date_/histogram) that are executed on unmapped fields, will generate an estimate count of buckets - zero.
+ * when the sub aggregator is then created, it will take this estimation into account. This used to cause
+ * and an ArrayIndexOutOfBoundsException...
+ */
+ @Test
+ public void subAggregationForTopAggregationOnUnmappedField() throws Exception {
+
+ prepareCreate("idx").addMapping("type", jsonBuilder()
+ .startObject()
+ .startObject("type").startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("value").field("type", "integer").endObject()
+ .endObject().endObject()
+ .endObject()).execute().actionGet();
+
+ ensureSearchable("idx");
+
+ SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .addAggregation(histogram("values").field("value1").interval(1)
+ .subAggregation(terms("names").field("name")
+ .collectMode(aggCollectionMode )))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(0l));
+ Histogram values = searchResponse.getAggregations().get("values");
+ assertThat(values, notNullValue());
+ assertThat(values.getBuckets().isEmpty(), is(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java
new file mode 100644
index 0000000000..109d0951a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java
@@ -0,0 +1,444 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntHashSet;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.RangeQueryBuilder;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket;
+import org.elasticsearch.search.aggregations.bucket.range.RangeBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.range;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like
+ * the growth of dynamic arrays is tested.
+ */
+@Slow
+public class EquivalenceTests extends ElasticsearchIntegrationTest {
+
+ // Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported
+ // Duel with filters
+ public void testRandomRanges() throws Exception {
+ final int numDocs = scaledRandomIntBetween(500, 5000);
+ final double[][] docs = new double[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ final int numValues = randomInt(5);
+ docs[i] = new double[numValues];
+ for (int j = 0; j < numValues; ++j) {
+ docs[i][j] = randomDouble() * 100;
+ }
+ }
+
+ createIndex("idx");
+ for (int i = 0; i < docs.length; ++i) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .startArray("values");
+ for (int j = 0; j < docs[i].length; ++j) {
+ source = source.value(docs[i][j]);
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
+
+ final int numRanges = randomIntBetween(1, 20);
+ final double[][] ranges = new double[numRanges][];
+ for (int i = 0; i < ranges.length; ++i) {
+ switch (randomInt(2)) {
+ case 0:
+ ranges[i] = new double[] { Double.NEGATIVE_INFINITY, randomInt(100) };
+ break;
+ case 1:
+ ranges[i] = new double[] { randomInt(100), Double.POSITIVE_INFINITY };
+ break;
+ case 2:
+ ranges[i] = new double[] { randomInt(100), randomInt(100) };
+ break;
+ default:
+ throw new AssertionError();
+ }
+ }
+
+ RangeBuilder query = range("range").field("values");
+ for (int i = 0; i < ranges.length; ++i) {
+ String key = Integer.toString(i);
+ if (ranges[i][0] == Double.NEGATIVE_INFINITY) {
+ query.addUnboundedTo(key, ranges[i][1]);
+ } else if (ranges[i][1] == Double.POSITIVE_INFINITY) {
+ query.addUnboundedFrom(key, ranges[i][0]);
+ } else {
+ query.addRange(key, ranges[i][0], ranges[i][1]);
+ }
+ }
+
+ SearchRequestBuilder reqBuilder = client().prepareSearch("idx").addAggregation(query);
+ for (int i = 0; i < ranges.length; ++i) {
+ RangeQueryBuilder filter = QueryBuilders.rangeQuery("values");
+ if (ranges[i][0] != Double.NEGATIVE_INFINITY) {
+ filter = filter.from(ranges[i][0]);
+ }
+ if (ranges[i][1] != Double.POSITIVE_INFINITY){
+ filter = filter.to(ranges[i][1]);
+ }
+ reqBuilder = reqBuilder.addAggregation(filter("filter" + i).filter(filter));
+ }
+
+ SearchResponse resp = reqBuilder.execute().actionGet();
+ Range range = resp.getAggregations().get("range");
+ List<? extends Bucket> buckets = range.getBuckets();
+
+ HashMap<String, Bucket> bucketMap = Maps.newHashMapWithExpectedSize(buckets.size());
+ for (Bucket bucket : buckets) {
+ bucketMap.put(bucket.getKeyAsString(), bucket);
+ }
+
+ for (int i = 0; i < ranges.length; ++i) {
+
+ long count = 0;
+ for (double[] values : docs) {
+ for (double value : values) {
+ if (value >= ranges[i][0] && value < ranges[i][1]) {
+ ++count;
+ break;
+ }
+ }
+ }
+
+ final Range.Bucket bucket = bucketMap.get(Integer.toString(i));
+ assertEquals(bucket.getKeyAsString(), Integer.toString(i), bucket.getKeyAsString());
+ assertEquals(bucket.getKeyAsString(), count, bucket.getDocCount());
+
+ final Filter filter = resp.getAggregations().get("filter" + i);
+ assertThat(filter.getDocCount(), equalTo(count));
+ }
+ }
+
+ // test long/double/string terms aggs with high number of buckets that require array growth
+ public void testDuelTerms() throws Exception {
+ final int numDocs = scaledRandomIntBetween(1000, 2000);
+ final int maxNumTerms = randomIntBetween(10, 5000);
+
+ final IntHashSet valuesSet = new IntHashSet();
+ cluster().wipeIndices("idx");
+ prepareCreate("idx")
+ .addMapping("type", jsonBuilder().startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("string_values")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .startObject("fields")
+ .startObject("doc_values")
+ .field("type", "string")
+ .field("index", "no")
+ .startObject("fielddata")
+ .field("format", "doc_values")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("long_values")
+ .field("type", "long")
+ .endObject()
+ .startObject("double_values")
+ .field("type", "double")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+
+ List<IndexRequestBuilder> indexingRequests = Lists.newArrayList();
+ for (int i = 0; i < numDocs; ++i) {
+ final int[] values = new int[randomInt(4)];
+ for (int j = 0; j < values.length; ++j) {
+ values[j] = randomInt(maxNumTerms - 1) - 1000;
+ valuesSet.add(values[j]);
+ }
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("num", randomDouble())
+ .startArray("long_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value(values[j]);
+ }
+ source = source.endArray().startArray("double_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value((double) values[j]);
+ }
+ source = source.endArray().startArray("string_values");
+ for (int j = 0; j < values.length; ++j) {
+ source = source.value(Integer.toString(values[j]));
+ }
+ source = source.endArray().endObject();
+ indexingRequests.add(client().prepareIndex("idx", "type").setSource(source));
+ }
+ indexRandom(true, indexingRequests);
+
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
+
+ TermsAggregatorFactory.ExecutionMode[] globalOrdinalModes = new TermsAggregatorFactory.ExecutionMode[]{
+ TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS_HASH,
+ TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS
+ };
+
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("long").field("long_values").size(maxNumTerms).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(min("min").field("num")))
+ .addAggregation(terms("double").field("double_values").size(maxNumTerms).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(max("max").field("num")))
+ .addAggregation(terms("string_map").field("string_values").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()).size(maxNumTerms).subAggregation(stats("stats").field("num")))
+ .addAggregation(terms("string_global_ordinals").field("string_values").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(globalOrdinalModes[randomInt(globalOrdinalModes.length - 1)].toString()).size(maxNumTerms).subAggregation(extendedStats("stats").field("num")))
+ .addAggregation(terms("string_global_ordinals_doc_values").field("string_values.doc_values").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(globalOrdinalModes[randomInt(globalOrdinalModes.length - 1)].toString()).size(maxNumTerms).subAggregation(extendedStats("stats").field("num")))
+ .execute().actionGet();
+ assertAllSuccessful(resp);
+ assertEquals(numDocs, resp.getHits().getTotalHits());
+
+ final Terms longTerms = resp.getAggregations().get("long");
+ final Terms doubleTerms = resp.getAggregations().get("double");
+ final Terms stringMapTerms = resp.getAggregations().get("string_map");
+ final Terms stringGlobalOrdinalsTerms = resp.getAggregations().get("string_global_ordinals");
+ final Terms stringGlobalOrdinalsDVTerms = resp.getAggregations().get("string_global_ordinals_doc_values");
+
+ assertEquals(valuesSet.size(), longTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), doubleTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size());
+ assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size());
+ for (Terms.Bucket bucket : longTerms.getBuckets()) {
+ final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey(Double.toString(Long.parseLong(bucket.getKeyAsString())));
+ final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsString());
+ final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsString());
+ final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsString());
+ assertNotNull(doubleBucket);
+ assertNotNull(stringMapBucket);
+ assertNotNull(stringGlobalOrdinalsBucket);
+ assertNotNull(stringGlobalOrdinalsDVBucket);
+ assertEquals(bucket.getDocCount(), doubleBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount());
+ assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount());
+ }
+ }
+
+ // Duel between histograms and scripted terms
+ public void testDuelTermsHistogram() throws Exception {
+ createIndex("idx");
+
+ final int numDocs = scaledRandomIntBetween(500, 5000);
+ final int maxNumTerms = randomIntBetween(10, 2000);
+ final int interval = randomIntBetween(1, 100);
+
+ final Integer[] values = new Integer[maxNumTerms];
+ for (int i = 0; i < values.length; ++i) {
+ values[i] = randomInt(maxNumTerms * 3) - maxNumTerms;
+ }
+
+ for (int i = 0; i < numDocs; ++i) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("num", randomDouble())
+ .startArray("values");
+ final int numValues = randomInt(4);
+ for (int j = 0; j < numValues; ++j) {
+ source = source.value(randomFrom(values));
+ }
+ source = source.endArray().endObject();
+ client().prepareIndex("idx", "type").setSource(source).execute().actionGet();
+ }
+ assertNoFailures(client().admin().indices().prepareRefresh("idx").setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
+
+ Map<String, Object> params = new HashMap<>();
+ params.put("interval", interval);
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(
+ terms("terms").field("values").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("floor(_value / interval)", ScriptType.INLINE, null, params)).size(maxNumTerms))
+ .addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1))
+ .execute().actionGet();
+
+ assertSearchResponse(resp);
+
+ Terms terms = resp.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ Histogram histo = resp.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size()));
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ final double key = ((Number) bucket.getKey()).doubleValue() / interval;
+ final Terms.Bucket termsBucket = terms.getBucketByKey(String.valueOf(key));
+ assertEquals(bucket.getDocCount(), termsBucket.getDocCount());
+ }
+ }
+
+ public void testLargeNumbersOfPercentileBuckets() throws Exception {
+ // test high numbers of percentile buckets to make sure paging and release work correctly
+ createIndex("idx");
+
+ final int numDocs = scaledRandomIntBetween(2500, 5000);
+ logger.info("Indexing [" + numDocs +"] docs");
+ List<IndexRequestBuilder> indexingRequests = Lists.newArrayList();
+ for (int i = 0; i < numDocs; ++i) {
+ indexingRequests.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource("double_value", randomDouble()));
+ }
+ indexRandom(true, indexingRequests);
+
+ SearchResponse response = client().prepareSearch("idx").addAggregation(terms("terms").field("double_value").collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(percentiles("pcts").field("double_value"))).execute().actionGet();
+ assertAllSuccessful(response);
+ assertEquals(numDocs, response.getHits().getTotalHits());
+ }
+
+ // https://github.com/elasticsearch/elasticsearch/issues/6435
+ public void testReduce() throws Exception {
+ createIndex("idx");
+ final int value = randomIntBetween(0, 10);
+ indexRandom(true, client().prepareIndex("idx", "type").setSource("f", value));
+ ensureYellow("idx"); // only one document let's make sure all shards have an active primary
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("filter").filter(QueryBuilders.matchAllQuery())
+ .subAggregation(range("range")
+ .field("f")
+ .addUnboundedTo(6)
+ .addUnboundedFrom(6)
+ .subAggregation(sum("sum").field("f"))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("filter");
+ assertNotNull(filter);
+ assertEquals(1, filter.getDocCount());
+
+ Range range = filter.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getDocCount(), equalTo(value < 6 ? 1L : 0L));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertEquals(value < 6 ? value : 0, sum.getValue(), 0d);
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getDocCount(), equalTo(value >= 6 ? 1L : 0L));
+ sum = bucket.getAggregations().get("sum");
+ assertEquals(value >= 6 ? value : 0, sum.getValue(), 0d);
+ }
+
+ private void assertEquals(Terms t1, Terms t2) {
+ List<Terms.Bucket> t1Buckets = t1.getBuckets();
+ List<Terms.Bucket> t2Buckets = t1.getBuckets();
+ assertEquals(t1Buckets.size(), t2Buckets.size());
+ for (Iterator<Terms.Bucket> it1 = t1Buckets.iterator(), it2 = t2Buckets.iterator(); it1.hasNext(); ) {
+ final Terms.Bucket b1 = it1.next();
+ final Terms.Bucket b2 = it2.next();
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ assertEquals(b1.getKey(), b2.getKey());
+ }
+ }
+
+ public void testDuelDepthBreadthFirst() throws Exception {
+ createIndex("idx");
+ final int numDocs = randomIntBetween(100, 500);
+ List<IndexRequestBuilder> reqs = new ArrayList<>();
+ for (int i = 0; i < numDocs; ++i) {
+ final int v1 = randomInt(1 << randomInt(7));
+ final int v2 = randomInt(1 << randomInt(7));
+ final int v3 = randomInt(1 << randomInt(7));
+ reqs.add(client().prepareIndex("idx", "type").setSource("f1", v1, "f2", v2, "f3", v3));
+ }
+ indexRandom(true, reqs);
+
+ final SearchResponse r1 = client().prepareSearch("idx").addAggregation(
+ terms("f1").field("f1").collectMode(SubAggCollectionMode.DEPTH_FIRST)
+ .subAggregation(terms("f2").field("f2").collectMode(SubAggCollectionMode.DEPTH_FIRST)
+ .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.DEPTH_FIRST)))).get();
+ assertSearchResponse(r1);
+ final SearchResponse r2 = client().prepareSearch("idx").addAggregation(
+ terms("f1").field("f1").collectMode(SubAggCollectionMode.BREADTH_FIRST)
+ .subAggregation(terms("f2").field("f2").collectMode(SubAggCollectionMode.BREADTH_FIRST)
+ .subAggregation(terms("f3").field("f3").collectMode(SubAggCollectionMode.BREADTH_FIRST)))).get();
+ assertSearchResponse(r2);
+
+ final Terms t1 = r1.getAggregations().get("f1");
+ final Terms t2 = r2.getAggregations().get("f1");
+ assertEquals(t1, t2);
+ for (Terms.Bucket b1 : t1.getBuckets()) {
+ final Terms.Bucket b2 = t2.getBucketByKey(b1.getKeyAsString());
+ final Terms sub1 = b1.getAggregations().get("f2");
+ final Terms sub2 = b2.getAggregations().get("f2");
+ assertEquals(sub1, sub2);
+ for (Terms.Bucket subB1 : sub1.getBuckets()) {
+ final Terms.Bucket subB2 = sub2.getBucketByKey(subB1.getKeyAsString());
+ final Terms subSub1 = subB1.getAggregations().get("f3");
+ final Terms subSub2 = subB2.getAggregations().get("f3");
+ assertEquals(subSub1, subSub2);
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataTests.java
new file mode 100644
index 0000000000..10e399df6a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/MetaDataTests.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.hppc.IntIntMap;
+import com.carrotsearch.hppc.IntIntHashMap;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.missing;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ *
+ */
+public class MetaDataTests extends ElasticsearchIntegrationTest {
+
+ /**
+ * Making sure that if there are multiple aggregations, working on the same field, yet require different
+ * value source type, they can all still work. It used to fail as we used to cache the ValueSource by the
+ * field name. If the cached value source was of type "bytes" and another aggregation on the field required to see
+ * it as "numeric", it didn't work. Now we cache the Value Sources by a custom key (field name + ValueSource type)
+ * so there's no conflict there.
+ */
+ @Test
+ public void meta_data_set_on_aggregation_result() throws Exception {
+
+ createIndex("idx");
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[randomInt(30)];
+ IntIntMap values = new IntIntHashMap();
+ long missingValues = 0;
+ for (int i = 0; i < builders.length; i++) {
+ String name = "name_" + randomIntBetween(1, 10);
+ if (rarely()) {
+ missingValues++;
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .endObject());
+ } else {
+ int value = randomIntBetween(1, 10);
+ values.put(value, values.getOrDefault(value, 0) + 1);
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("name", name)
+ .field("value", value)
+ .endObject());
+ }
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+
+ final Map<String, Object> nestedMetaData = new HashMap<String, Object>() {{
+ put("nested", "value");
+ }};
+
+ Map<String, Object> missingValueMetaData = new HashMap<String, Object>() {{
+ put("key", "value");
+ put("numeric", 1.2);
+ put("bool", true);
+ put("complex", nestedMetaData);
+ }};
+
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_values").field("value").setMetaData(missingValueMetaData))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Aggregations aggs = response.getAggregations();
+ assertNotNull(aggs);
+
+ Missing missing = aggs.get("missing_values");
+ assertNotNull(missing);
+ assertThat(missing.getDocCount(), equalTo(missingValues));
+
+ Map<String, Object> returnedMetaData = missing.getMetaData();
+ assertNotNull(returnedMetaData);
+ assertEquals(4, returnedMetaData.size());
+ assertEquals("value", returnedMetaData.get("key"));
+ assertEquals(1.2, returnedMetaData.get("numeric"));
+ assertEquals(true, returnedMetaData.get("bool"));
+
+ Object nestedObject = returnedMetaData.get("complex");
+ assertNotNull(nestedObject);
+
+ Map<String, Object> nestedMap = (Map<String, Object>)nestedObject;
+ assertEquals("value", nestedMap.get("nested"));
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/MissingValueTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/MissingValueTests.java
new file mode 100644
index 0000000000..157dc528f8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/MissingValueTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality;
+import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBounds;
+import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geoBounds;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class MissingValueTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int maximumNumberOfShards() {
+ return 2;
+ }
+
+ @Override
+ protected void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("idx").addMapping("type", "date", "type=date", "location", "type=geo_point").get());
+ indexRandom(true,
+ client().prepareIndex("idx", "type", "1").setSource(),
+ client().prepareIndex("idx", "type", "2").setSource("str", "foo", "long", 3L, "double", 5.5, "date", "2015-05-07", "location", "1,2"));
+ }
+
+ public void testUnmappedTerms() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("non_existing_field").missing("bar")).get();
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("bar").getDocCount());
+ }
+
+ public void testStringTerms() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("str").missing("bar")).get();
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(2, terms.getBuckets().size());
+ assertEquals(1, terms.getBucketByKey("foo").getDocCount());
+ assertEquals(1, terms.getBucketByKey("bar").getDocCount());
+
+ response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("str").missing("foo")).get();
+ assertSearchResponse(response);
+ terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("foo").getDocCount());
+ }
+
+ public void testLongTerms() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(4)).get();
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(2, terms.getBuckets().size());
+ assertEquals(1, terms.getBucketByKey("3").getDocCount());
+ assertEquals(1, terms.getBucketByKey("4").getDocCount());
+
+ response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("long").missing(3)).get();
+ assertSearchResponse(response);
+ terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("3").getDocCount());
+ }
+
+ public void testDoubleTerms() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(4.5)).get();
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("my_terms");
+ assertEquals(2, terms.getBuckets().size());
+ assertEquals(1, terms.getBucketByKey("4.5").getDocCount());
+ assertEquals(1, terms.getBucketByKey("5.5").getDocCount());
+
+ response = client().prepareSearch("idx").addAggregation(terms("my_terms").field("double").missing(5.5)).get();
+ assertSearchResponse(response);
+ terms = response.getAggregations().get("my_terms");
+ assertEquals(1, terms.getBuckets().size());
+ assertEquals(2, terms.getBucketByKey("5.5").getDocCount());
+ }
+
+ public void testUnmappedHistogram() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(histogram("my_histogram").field("non-existing_field").interval(5).missing(12)).get();
+ assertSearchResponse(response);
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(1, histogram.getBuckets().size());
+ assertEquals(10L, histogram.getBuckets().get(0).getKey());
+ assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ }
+
+ public void testHistogram() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(7)).get();
+ assertSearchResponse(response);
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(2, histogram.getBuckets().size());
+ assertEquals(0L, histogram.getBuckets().get(0).getKey());
+ assertEquals(1, histogram.getBuckets().get(0).getDocCount());
+ assertEquals(5L, histogram.getBuckets().get(1).getKey());
+ assertEquals(1, histogram.getBuckets().get(1).getDocCount());
+
+ response = client().prepareSearch("idx").addAggregation(histogram("my_histogram").field("long").interval(5).missing(3)).get();
+ assertSearchResponse(response);
+ histogram = response.getAggregations().get("my_histogram");
+ assertEquals(1, histogram.getBuckets().size());
+ assertEquals(0L, histogram.getBuckets().get(0).getKey());
+ assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ }
+
+ public void testDateHistogram() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(dateHistogram("my_histogram").field("date").interval(DateHistogramInterval.YEAR).missing("2014-05-07")).get();
+ assertSearchResponse(response);
+ Histogram histogram = response.getAggregations().get("my_histogram");
+ assertEquals(2, histogram.getBuckets().size());
+ assertEquals("2014-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
+ assertEquals(1, histogram.getBuckets().get(0).getDocCount());
+ assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(1).getKeyAsString());
+ assertEquals(1, histogram.getBuckets().get(1).getDocCount());
+
+ response = client().prepareSearch("idx").addAggregation(dateHistogram("my_histogram").field("date").interval(DateHistogramInterval.YEAR).missing("2015-05-07")).get();
+ assertSearchResponse(response);
+ histogram = response.getAggregations().get("my_histogram");
+ assertEquals(1, histogram.getBuckets().size());
+ assertEquals("2015-01-01T00:00:00.000Z", histogram.getBuckets().get(0).getKeyAsString());
+ assertEquals(2, histogram.getBuckets().get(0).getDocCount());
+ }
+
+ public void testCardinality() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(cardinality("card").field("long").missing(2)).get();
+ assertSearchResponse(response);
+ Cardinality cardinality = response.getAggregations().get("card");
+ assertEquals(2, cardinality.getValue());
+ }
+
+ public void testPercentiles() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(percentiles("percentiles").field("long").missing(1000)).get();
+ assertSearchResponse(response);
+ Percentiles percentiles = response.getAggregations().get("percentiles");
+ assertEquals(1000, percentiles.percentile(100), 0);
+ }
+
+ public void testStats() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(stats("stats").field("long").missing(5)).get();
+ assertSearchResponse(response);
+ Stats stats = response.getAggregations().get("stats");
+ assertEquals(2, stats.getCount());
+ assertEquals(4, stats.getAvg(), 0);
+ }
+
+ public void testUnmappedGeoBounds() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(geoBounds("bounds").field("non_existing_field").missing("2,1")).get();
+ assertSearchResponse(response);
+ GeoBounds bounds = response.getAggregations().get("bounds");
+ assertEquals(new GeoPoint(2,1), bounds.bottomRight());
+ assertEquals(new GeoPoint(2,1), bounds.topLeft());
+ }
+
+ public void testGeoBounds() {
+ SearchResponse response = client().prepareSearch("idx").addAggregation(geoBounds("bounds").field("location").missing("2,1")).get();
+ assertSearchResponse(response);
+ GeoBounds bounds = response.getAggregations().get("bounds");
+ assertEquals(new GeoPoint(1,2), bounds.bottomRight());
+ assertEquals(new GeoPoint(2,1), bounds.topLeft());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java
new file mode 100644
index 0000000000..9007611aa9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/ParsingTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.security.SecureRandom;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+public class ParsingTests extends ElasticsearchIntegrationTest {
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testTwoTypes() throws Exception {
+ createIndex("idx");
+ ensureGreen();
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("in_stock")
+ .startObject("filter")
+ .startObject("range")
+ .startObject("stock")
+ .field("gt", 0)
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("terms")
+ .field("field", "stock")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testTwoAggs() throws Exception {
+ createIndex("idx");
+ ensureGreen();
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("by_date")
+ .startObject("date_histogram")
+ .field("field", "timestamp")
+ .field("interval", "month")
+ .endObject()
+ .startObject("aggs")
+ .startObject("tag_count")
+ .startObject("cardinality")
+ .field("field", "tag")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("aggs") // 2nd "aggs": illegal
+ .startObject("tag_count2")
+ .startObject("cardinality")
+ .field("field", "tag")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testInvalidAggregationName() throws Exception {
+
+ Matcher matcher = Pattern.compile("[^\\[\\]>]+").matcher("");
+ String name;
+ SecureRandom rand = new SecureRandom();
+ int len = randomIntBetween(1, 5);
+ char[] word = new char[len];
+ while(true) {
+ for (int i = 0; i < word.length; i++) {
+ word[i] = (char) rand.nextInt(127);
+ }
+ name = String.valueOf(word);
+ if (!matcher.reset(name).matches()) {
+ break;
+ }
+ }
+
+ createIndex("idx");
+ ensureGreen();
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject(name)
+ .startObject("filter")
+ .startObject("range")
+ .startObject("stock")
+ .field("gt", 0)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testSameAggregationName() throws Exception {
+ createIndex("idx");
+ ensureGreen();
+ final String name = RandomStrings.randomAsciiOfLength(getRandom(), 10);
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject(name)
+ .startObject("terms")
+ .field("field", "a")
+ .endObject()
+ .endObject()
+ .startObject(name)
+ .startObject("terms")
+ .field("field", "b")
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testMissingName() throws Exception {
+ createIndex("idx");
+ ensureGreen();
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("by_date")
+ .startObject("date_histogram")
+ .field("field", "timestamp")
+ .field("interval", "month")
+ .endObject()
+ .startObject("aggs")
+ // the aggregation name is missing
+ //.startObject("tag_count")
+ .startObject("cardinality")
+ .field("field", "tag")
+ .endObject()
+ //.endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+ @Test(expected=SearchPhaseExecutionException.class)
+ public void testMissingType() throws Exception {
+ createIndex("idx");
+ ensureGreen();
+ client().prepareSearch("idx").setAggregations(JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("by_date")
+ .startObject("date_histogram")
+ .field("field", "timestamp")
+ .field("interval", "month")
+ .endObject()
+ .startObject("aggs")
+ .startObject("tag_count")
+ // the aggregation type is missing
+ //.startObject("cardinality")
+ .field("field", "tag")
+ //.endObject()
+ .endObject()
+ .endObject()
+ .endObject()).execute().actionGet();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTests.java
new file mode 100644
index 0000000000..2394795646
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTests.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Ignore;
+
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+@Ignore
+public abstract class AbstractTermsTests extends ElasticsearchIntegrationTest {
+
+ public String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString();
+ }
+
+ private static long sumOfDocCounts(Terms terms) {
+ long sumOfDocCounts = terms.getSumOfOtherDocCounts();
+ for (Terms.Bucket b : terms.getBuckets()) {
+ sumOfDocCounts += b.getDocCount();
+ }
+ return sumOfDocCounts;
+ }
+
+ public void testOtherDocCount(String... fieldNames) {
+ for (String fieldName : fieldNames) {
+ SearchResponse allTerms = client().prepareSearch("idx")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(fieldName)
+ .size(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .get();
+ assertSearchResponse(allTerms);
+
+ Terms terms = allTerms.getAggregations().get("terms");
+ assertEquals(0, terms.getSumOfOtherDocCounts()); // size is 0
+ final long sumOfDocCounts = sumOfDocCounts(terms);
+ final int totalNumTerms = terms.getBuckets().size();
+
+ for (int size = 1; size < totalNumTerms + 2; size += randomIntBetween(1, 5)) {
+ for (int shardSize = size; shardSize <= totalNumTerms + 2; shardSize += randomIntBetween(1, 5)) {
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(fieldName)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .get();
+ assertSearchResponse(resp);
+ terms = resp.getAggregations().get("terms");
+ assertEquals(Math.min(size, totalNumTerms), terms.getBuckets().size());
+ assertEquals(sumOfDocCounts, sumOfDocCounts(terms));
+ }
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsTests.java
new file mode 100644
index 0000000000..20378c96e2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsTests.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class BooleanTermsTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "b_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "b_values";
+
+ static int numSingleTrues, numSingleFalses, numMultiTrues, numMultiFalses;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ final int numDocs = randomInt(5);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ final boolean singleValue = randomBoolean();
+ if (singleValue) {
+ numSingleTrues++;
+ } else {
+ numSingleFalses++;
+ }
+ final boolean[] multiValue;
+ switch (randomInt(3)) {
+ case 0:
+ multiValue = new boolean[0];
+ break;
+ case 1:
+ numMultiFalses++;
+ multiValue = new boolean[] {false};
+ break;
+ case 2:
+ numMultiTrues++;
+ multiValue = new boolean[] {true};
+ break;
+ case 3:
+ numMultiFalses++; numMultiTrues++;
+ multiValue = new boolean[] {false, true};
+ break;
+ default:
+ throw new AssertionError();
+ }
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, singleValue)
+ .field(MULTI_VALUED_FIELD_NAME, multiValue)
+ .endObject());
+ }
+ indexRandom(true, builders);
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0;
+ assertThat(terms.getBuckets().size(), equalTo(bucketCount));
+
+ Terms.Bucket bucket = terms.getBucketByKey("0");
+ if (numSingleFalses == 0) {
+ assertNull(bucket);
+ } else {
+ assertNotNull(bucket);
+ assertEquals(numSingleFalses, bucket.getDocCount());
+ }
+
+ bucket = terms.getBucketByKey("1");
+ if (numSingleTrues == 0) {
+ assertNull(bucket);
+ } else {
+ assertNotNull(bucket);
+ assertEquals(numSingleTrues, bucket.getDocCount());
+ }
+ }
+
+ @Test
+ public void multiValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0;
+ assertThat(terms.getBuckets().size(), equalTo(bucketCount));
+
+ Terms.Bucket bucket = terms.getBucketByKey("0");
+ if (numMultiFalses == 0) {
+ assertNull(bucket);
+ } else {
+ assertNotNull(bucket);
+ assertEquals(numMultiFalses, bucket.getDocCount());
+ }
+
+ bucket = terms.getBucketByKey("1");
+ if (numMultiTrues == 0) {
+ assertNull(bucket);
+ } else {
+ assertNotNull(bucket);
+ assertEquals(numMultiTrues, bucket.getDocCount());
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java
new file mode 100644
index 0000000000..6379ba4709
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenTests.java
@@ -0,0 +1,399 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.aggregations.bucket.children.Children;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.metrics.tophits.TopHits;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class ChildrenTests extends ElasticsearchIntegrationTest {
+
+ private final static Map<String, Control> categoryToControl = new HashMap<>();
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(
+ prepareCreate("test")
+ .addMapping("article")
+ .addMapping("comment", "_parent", "type=article")
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ String[] uniqueCategories = new String[randomIntBetween(1, 25)];
+ for (int i = 0; i < uniqueCategories.length; i++) {
+ uniqueCategories[i] = Integer.toString(i);
+ }
+ int catIndex = 0;
+
+ int numParentDocs = randomIntBetween(uniqueCategories.length, uniqueCategories.length * 5);
+ for (int i = 0; i < numParentDocs; i++) {
+ String id = Integer.toString(i);
+
+ // TODO: this array is always of length 1, and testChildrenAggs fails if this is changed
+ String[] categories = new String[randomIntBetween(1,1)];
+ for (int j = 0; j < categories.length; j++) {
+ String category = categories[j] = uniqueCategories[catIndex++ % uniqueCategories.length];
+ Control control = categoryToControl.get(category);
+ if (control == null) {
+ categoryToControl.put(category, control = new Control(category));
+ }
+ control.articleIds.add(id);
+ }
+
+ requests.add(client().prepareIndex("test", "article", id).setCreate(true).setSource("category", categories, "randomized", true));
+ }
+
+ String[] commenters = new String[randomIntBetween(5, 50)];
+ for (int i = 0; i < commenters.length; i++) {
+ commenters[i] = Integer.toString(i);
+ }
+
+ int id = 0;
+ for (Control control : categoryToControl.values()) {
+ for (String articleId : control.articleIds) {
+ int numChildDocsPerParent = randomIntBetween(0, 5);
+ for (int i = 0; i < numChildDocsPerParent; i++) {
+ String commenter = commenters[id % commenters.length];
+ String idValue = Integer.toString(id++);
+ control.commentIds.add(idValue);
+ Set<String> ids = control.commenterToCommentId.get(commenter);
+ if (ids == null) {
+ control.commenterToCommentId.put(commenter, ids = new HashSet<>());
+ }
+ ids.add(idValue);
+ requests.add(client().prepareIndex("test", "comment", idValue).setCreate(true).setParent(articleId).setSource("commenter", commenter));
+ }
+ }
+ }
+
+ requests.add(client().prepareIndex("test", "article", "a").setSource("category", new String[]{"a"}, "randomized", false));
+ requests.add(client().prepareIndex("test", "article", "b").setSource("category", new String[]{"a", "b"}, "randomized", false));
+ requests.add(client().prepareIndex("test", "article", "c").setSource("category", new String[]{"a", "b", "c"}, "randomized", false));
+ requests.add(client().prepareIndex("test", "article", "d").setSource("category", new String[]{"c"}, "randomized", false));
+ requests.add(client().prepareIndex("test", "comment", "a").setParent("a").setSource("{}"));
+ requests.add(client().prepareIndex("test", "comment", "c").setParent("c").setSource("{}"));
+
+ indexRandom(true, requests);
+ ensureSearchable("test");
+ }
+
+ @Test
+ public void testChildrenAggs() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(matchQuery("randomized", true))
+ .addAggregation(
+ terms("category").field("category").size(0).subAggregation(
+ children("to_comment").childType("comment").subAggregation(
+ terms("commenters").field("commenter").size(0).subAggregation(
+ topHits("top_comments")
+ ))
+ )
+ ).get();
+ assertSearchResponse(searchResponse);
+
+ Terms categoryTerms = searchResponse.getAggregations().get("category");
+ assertThat(categoryTerms.getBuckets().size(), equalTo(categoryToControl.size()));
+ for (Map.Entry<String, Control> entry1 : categoryToControl.entrySet()) {
+ Terms.Bucket categoryBucket = categoryTerms.getBucketByKey(entry1.getKey());
+ assertThat(categoryBucket.getKeyAsString(), equalTo(entry1.getKey()));
+ assertThat(categoryBucket.getDocCount(), equalTo((long) entry1.getValue().articleIds.size()));
+
+ Children childrenBucket = categoryBucket.getAggregations().get("to_comment");
+ assertThat(childrenBucket.getName(), equalTo("to_comment"));
+ assertThat(childrenBucket.getDocCount(), equalTo((long) entry1.getValue().commentIds.size()));
+ assertThat((long) childrenBucket.getProperty("_count"), equalTo((long) entry1.getValue().commentIds.size()));
+
+ Terms commentersTerms = childrenBucket.getAggregations().get("commenters");
+ assertThat((Terms) childrenBucket.getProperty("commenters"), sameInstance(commentersTerms));
+ assertThat(commentersTerms.getBuckets().size(), equalTo(entry1.getValue().commenterToCommentId.size()));
+ for (Map.Entry<String, Set<String>> entry2 : entry1.getValue().commenterToCommentId.entrySet()) {
+ Terms.Bucket commentBucket = commentersTerms.getBucketByKey(entry2.getKey());
+ assertThat(commentBucket.getKeyAsString(), equalTo(entry2.getKey()));
+ assertThat(commentBucket.getDocCount(), equalTo((long) entry2.getValue().size()));
+
+ TopHits topHits = commentBucket.getAggregations().get("top_comments");
+ for (SearchHit searchHit : topHits.getHits().getHits()) {
+ assertThat(entry2.getValue().contains(searchHit.getId()), is(true));
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testParentWithMultipleBuckets() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(matchQuery("randomized", false))
+ .addAggregation(
+ terms("category").field("category").size(0).subAggregation(
+ children("to_comment").childType("comment").subAggregation(topHits("top_comments").addSort("_uid", SortOrder.ASC))
+ )
+ ).get();
+ assertSearchResponse(searchResponse);
+
+ Terms categoryTerms = searchResponse.getAggregations().get("category");
+ assertThat(categoryTerms.getBuckets().size(), equalTo(3));
+
+ for (Terms.Bucket bucket : categoryTerms.getBuckets()) {
+ logger.info("bucket=" + bucket.getKey());
+ Children childrenBucket = bucket.getAggregations().get("to_comment");
+ TopHits topHits = childrenBucket.getAggregations().get("top_comments");
+ logger.info("total_hits={}", topHits.getHits().getTotalHits());
+ for (SearchHit searchHit : topHits.getHits()) {
+ logger.info("hit= {} {} {}", searchHit.sortValues()[0], searchHit.getType(), searchHit.getId());
+ }
+ }
+
+ Terms.Bucket categoryBucket = categoryTerms.getBucketByKey("a");
+ assertThat(categoryBucket.getKeyAsString(), equalTo("a"));
+ assertThat(categoryBucket.getDocCount(), equalTo(3l));
+
+ Children childrenBucket = categoryBucket.getAggregations().get("to_comment");
+ assertThat(childrenBucket.getName(), equalTo("to_comment"));
+ assertThat(childrenBucket.getDocCount(), equalTo(2l));
+ TopHits topHits = childrenBucket.getAggregations().get("top_comments");
+ assertThat(topHits.getHits().totalHits(), equalTo(2l));
+ assertThat(topHits.getHits().getAt(0).getId(), equalTo("a"));
+ assertThat(topHits.getHits().getAt(0).getType(), equalTo("comment"));
+ assertThat(topHits.getHits().getAt(1).getId(), equalTo("c"));
+ assertThat(topHits.getHits().getAt(1).getType(), equalTo("comment"));
+
+ categoryBucket = categoryTerms.getBucketByKey("b");
+ assertThat(categoryBucket.getKeyAsString(), equalTo("b"));
+ assertThat(categoryBucket.getDocCount(), equalTo(2l));
+
+ childrenBucket = categoryBucket.getAggregations().get("to_comment");
+ assertThat(childrenBucket.getName(), equalTo("to_comment"));
+ assertThat(childrenBucket.getDocCount(), equalTo(1l));
+ topHits = childrenBucket.getAggregations().get("top_comments");
+ assertThat(topHits.getHits().totalHits(), equalTo(1l));
+ assertThat(topHits.getHits().getAt(0).getId(), equalTo("c"));
+ assertThat(topHits.getHits().getAt(0).getType(), equalTo("comment"));
+
+ categoryBucket = categoryTerms.getBucketByKey("c");
+ assertThat(categoryBucket.getKeyAsString(), equalTo("c"));
+ assertThat(categoryBucket.getDocCount(), equalTo(2l));
+
+ childrenBucket = categoryBucket.getAggregations().get("to_comment");
+ assertThat(childrenBucket.getName(), equalTo("to_comment"));
+ assertThat(childrenBucket.getDocCount(), equalTo(1l));
+ topHits = childrenBucket.getAggregations().get("top_comments");
+ assertThat(topHits.getHits().totalHits(), equalTo(1l));
+ assertThat(topHits.getHits().getAt(0).getId(), equalTo("c"));
+ assertThat(topHits.getHits().getAt(0).getType(), equalTo("comment"));
+ }
+
+ @Test
+ public void testWithDeletes() throws Exception {
+ String indexName = "xyz";
+ assertAcked(
+ prepareCreate(indexName)
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent", "count", "type=long")
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex(indexName, "parent", "1").setSource("{}"));
+ requests.add(client().prepareIndex(indexName, "child", "0").setParent("1").setSource("count", 1));
+ requests.add(client().prepareIndex(indexName, "child", "1").setParent("1").setSource("count", 1));
+ requests.add(client().prepareIndex(indexName, "child", "2").setParent("1").setSource("count", 1));
+ requests.add(client().prepareIndex(indexName, "child", "3").setParent("1").setSource("count", 1));
+ indexRandom(true, requests);
+
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch(indexName)
+ .addAggregation(children("children").childType("child").subAggregation(sum("counts").field("count")))
+ .get();
+
+ assertNoFailures(searchResponse);
+ Children children = searchResponse.getAggregations().get("children");
+ assertThat(children.getDocCount(), equalTo(4l));
+
+ Sum count = children.getAggregations().get("counts");
+ assertThat(count.getValue(), equalTo(4.));
+
+ String idToUpdate = Integer.toString(randomInt(3));
+ UpdateResponse updateResponse = client().prepareUpdate(indexName, "child", idToUpdate)
+ .setParent("1")
+ .setDoc("count", 1)
+ .get();
+ assertThat(updateResponse.getVersion(), greaterThan(1l));
+ refresh();
+ }
+ }
+
+ @Test
+ public void testNonExistingChildType() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .addAggregation(
+ children("non-existing").childType("xyz")
+ ).get();
+ assertSearchResponse(searchResponse);
+
+ Children children = searchResponse.getAggregations().get("non-existing");
+ assertThat(children.getName(), equalTo("non-existing"));
+ assertThat(children.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void testPostCollection() throws Exception {
+ String indexName = "prodcatalog";
+ String masterType = "masterprod";
+ String childType = "variantsku";
+ assertAcked(
+ prepareCreate(indexName)
+ .addMapping(masterType, "brand", "type=string", "name", "type=string", "material", "type=string")
+ .addMapping(childType, "_parent", "type=masterprod", "color", "type=string", "size", "type=string")
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex(indexName, masterType, "1").setSource("brand", "Levis", "name", "Style 501", "material", "Denim"));
+ requests.add(client().prepareIndex(indexName, childType, "0").setParent("1").setSource("color", "blue", "size", "32"));
+ requests.add(client().prepareIndex(indexName, childType, "1").setParent("1").setSource("color", "blue", "size", "34"));
+ requests.add(client().prepareIndex(indexName, childType, "2").setParent("1").setSource("color", "blue", "size", "36"));
+ requests.add(client().prepareIndex(indexName, childType, "3").setParent("1").setSource("color", "black", "size", "38"));
+ requests.add(client().prepareIndex(indexName, childType, "4").setParent("1").setSource("color", "black", "size", "40"));
+ requests.add(client().prepareIndex(indexName, childType, "5").setParent("1").setSource("color", "gray", "size", "36"));
+
+ requests.add(client().prepareIndex(indexName, masterType, "2").setSource("brand", "Wrangler", "name", "Regular Cut", "material", "Leather"));
+ requests.add(client().prepareIndex(indexName, childType, "6").setParent("2").setSource("color", "blue", "size", "32"));
+ requests.add(client().prepareIndex(indexName, childType, "7").setParent("2").setSource("color", "blue", "size", "34"));
+ requests.add(client().prepareIndex(indexName, childType, "8").setParent("2").setSource("color", "black", "size", "36"));
+ requests.add(client().prepareIndex(indexName, childType, "9").setParent("2").setSource("color", "black", "size", "38"));
+ requests.add(client().prepareIndex(indexName, childType, "10").setParent("2").setSource("color", "black", "size", "40"));
+ requests.add(client().prepareIndex(indexName, childType, "11").setParent("2").setSource("color", "orange", "size", "36"));
+ requests.add(client().prepareIndex(indexName, childType, "12").setParent("2").setSource("color", "green", "size", "44"));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch(indexName).setTypes(masterType)
+ .setQuery(hasChildQuery(childType, termQuery("color", "orange")))
+ .addAggregation(children("my-refinements")
+ .childType(childType)
+ .subAggregation(terms("my-colors").field("color"))
+ .subAggregation(terms("my-sizes").field("size"))
+ ).get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+
+ Children childrenAgg = response.getAggregations().get("my-refinements");
+ assertThat(childrenAgg.getDocCount(), equalTo(7l));
+
+ Terms termsAgg = childrenAgg.getAggregations().get("my-colors");
+ assertThat(termsAgg.getBuckets().size(), equalTo(4));
+ assertThat(termsAgg.getBucketByKey("black").getDocCount(), equalTo(3l));
+ assertThat(termsAgg.getBucketByKey("blue").getDocCount(), equalTo(2l));
+ assertThat(termsAgg.getBucketByKey("green").getDocCount(), equalTo(1l));
+ assertThat(termsAgg.getBucketByKey("orange").getDocCount(), equalTo(1l));
+
+ termsAgg = childrenAgg.getAggregations().get("my-sizes");
+ assertThat(termsAgg.getBuckets().size(), equalTo(6));
+ assertThat(termsAgg.getBucketByKey("36").getDocCount(), equalTo(2l));
+ assertThat(termsAgg.getBucketByKey("32").getDocCount(), equalTo(1l));
+ assertThat(termsAgg.getBucketByKey("34").getDocCount(), equalTo(1l));
+ assertThat(termsAgg.getBucketByKey("38").getDocCount(), equalTo(1l));
+ assertThat(termsAgg.getBucketByKey("40").getDocCount(), equalTo(1l));
+ assertThat(termsAgg.getBucketByKey("44").getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testHierarchicalChildrenAggs() {
+ String indexName = "geo";
+ String grandParentType = "continent";
+ String parentType = "country";
+ String childType = "city";
+ assertAcked(
+ prepareCreate(indexName)
+ .setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ )
+ .addMapping(grandParentType)
+ .addMapping(parentType, "_parent", "type=" + grandParentType)
+ .addMapping(childType, "_parent", "type=" + parentType)
+ );
+
+ client().prepareIndex(indexName, grandParentType, "1").setSource("name", "europe").get();
+ client().prepareIndex(indexName, parentType, "2").setParent("1").setSource("name", "belgium").get();
+ client().prepareIndex(indexName, childType, "3").setParent("2").setRouting("1").setSource("name", "brussels").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch(indexName)
+ .setQuery(matchQuery("name", "europe"))
+ .addAggregation(
+ children(parentType).childType(parentType).subAggregation(
+ children(childType).childType(childType).subAggregation(
+ terms("name").field("name")
+ )
+ )
+ )
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+
+ Children children = response.getAggregations().get(parentType);
+ assertThat(children.getName(), equalTo(parentType));
+ assertThat(children.getDocCount(), equalTo(1l));
+ children = children.getAggregations().get(childType);
+ assertThat(children.getName(), equalTo(childType));
+ assertThat(children.getDocCount(), equalTo(1l));
+ Terms terms = children.getAggregations().get("name");
+ assertThat(terms.getBuckets().size(), equalTo(1));
+ assertThat(terms.getBuckets().get(0).getKey().toString(), equalTo("brussels"));
+ assertThat(terms.getBuckets().get(0).getDocCount(), equalTo(1l));
+ }
+
+ private static final class Control {
+
+ final String category;
+ final Set<String> articleIds = new HashSet<>();
+ final Set<String> commentIds = new HashSet<>();
+ final Map<String, Set<String>> commenterToCommentId = new HashMap<>();
+
+ private Control(String category) {
+ this.category = category;
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetTests.java
new file mode 100644
index 0000000000..b4ec3e3342
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetTests.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ * The serialisation of offsets for the date histogram aggregation was corrected in version 1.4 to allow negative offsets and as such the
+ * serialisation of negative offsets in these tests would break in pre 1.4 versions. These tests are separated from the other DateHistogramTests so the
+ * AssertingLocalTransport for these tests can be set to only use versions 1.4 onwards while keeping the other tests using all versions
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+@ElasticsearchIntegrationTest.ClusterScope(scope=ElasticsearchIntegrationTest.Scope.SUITE)
+public class DateHistogramOffsetTests extends ElasticsearchIntegrationTest {
+
+ private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss";
+
+ private DateTime date(String date) {
+ return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date);
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put(AssertingLocalTransport.ASSERTING_TRANSPORT_MIN_VERSION_KEY, Version.V_1_4_0_Beta1).build();
+ }
+
+ @Before
+ public void beforeEachTest() throws IOException {
+ prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet();
+ }
+
+ @After
+ public void afterEachTest() throws IOException {
+ internalCluster().wipeIndices("idx2");
+ }
+
+ private void prepareIndex(DateTime date, int numHours, int stepSizeHours, int idxIdStart) throws IOException, InterruptedException, ExecutionException {
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours];
+ for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) {
+ reqs[i - idxIdStart] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject());
+ date = date.plusHours(stepSizeHours);
+ }
+ indexRandom(true, reqs);
+ }
+
+ @Test
+ public void singleValue_WithPositiveOffset() throws Exception {
+ prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, 1, 0);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .offset("2h")
+ .format(DATE_FORMAT)
+ .interval(DateHistogramInterval.DAY))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ Histogram histo = response.getAggregations().get("date_histo");
+ List<? extends Histogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 2, 0, DateTimeZone.UTC), 2l);
+ checkBucketFor(buckets.get(1), new DateTime(2014, 3, 11, 2, 0, DateTimeZone.UTC), 3l);
+ }
+
+ @Test
+ public void singleValue_WithNegativeOffset() throws Exception {
+ prepareIndex(date("2014-03-11T00:00:00+00:00"), 5, -1, 0);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .offset("-2h")
+ .format(DATE_FORMAT)
+ .interval(DateHistogramInterval.DAY))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ Histogram histo = response.getAggregations().get("date_histo");
+ List<? extends Histogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ checkBucketFor(buckets.get(0), new DateTime(2014, 3, 9, 22, 0, DateTimeZone.UTC), 2l);
+ checkBucketFor(buckets.get(1), new DateTime(2014, 3, 10, 22, 0, DateTimeZone.UTC), 3l);
+ }
+
+ /**
+ * Set offset so day buckets start at 6am. Index first 12 hours for two days, with one day gap.
+ * @throws Exception
+ */
+ @Test
+ public void singleValue_WithOffset_MinDocCount() throws Exception {
+ prepareIndex(date("2014-03-11T00:00:00+00:00"), 12, 1, 0);
+ prepareIndex(date("2014-03-14T00:00:00+00:00"), 12, 1, 13);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .offset("6h")
+ .minDocCount(0)
+ .format(DATE_FORMAT)
+ .interval(DateHistogramInterval.DAY))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(24l));
+
+ Histogram histo = response.getAggregations().get("date_histo");
+ List<? extends Histogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(5));
+
+ checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 6, 0, DateTimeZone.UTC), 6L);
+ checkBucketFor(buckets.get(1), new DateTime(2014, 3, 11, 6, 0, DateTimeZone.UTC), 6L);
+ checkBucketFor(buckets.get(2), new DateTime(2014, 3, 12, 6, 0, DateTimeZone.UTC), 0L);
+ checkBucketFor(buckets.get(3), new DateTime(2014, 3, 13, 6, 0, DateTimeZone.UTC), 6L);
+ checkBucketFor(buckets.get(4), new DateTime(2014, 3, 14, 6, 0, DateTimeZone.UTC), 6L);
+ }
+
+ /**
+ * @param bucket the bucket to check asssertions for
+ * @param key the expected key
+ * @param expectedSize the expected size of the bucket
+ */
+ private static void checkBucketFor(Histogram.Bucket bucket, DateTime key, long expectedSize) {
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(key.toString(DATE_FORMAT)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(expectedSize));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java
new file mode 100644
index 0000000000..28c6aa3859
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java
@@ -0,0 +1,1691 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
+import org.junit.After;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class DateHistogramTests extends ElasticsearchIntegrationTest {
+
+ private DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ private DateTime date(String date) {
+ return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date);
+ }
+
+ private static String format(DateTime date, String pattern) {
+ return DateTimeFormat.forPattern(pattern).print(date);
+ }
+
+ private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception {
+ return client().prepareIndex(idx, "type").setSource(jsonBuilder()
+ .startObject()
+ .field("date", date)
+ .field("value", value)
+ .startArray("dates").value(date).value(date.plusMonths(1).plusDays(1)).endArray()
+ .endObject());
+ }
+
+ private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("date", date(month, day))
+ .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray()
+ .endObject());
+ }
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ // TODO: would be nice to have more random data here
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ builders.addAll(Arrays.asList(
+ indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3
+ indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3
+ indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16
+ indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3
+ indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16
+ indexDoc(3, 23, 6))); // date: Mar 23, dates: Mar 23, Apr 24
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @After
+ public void afterEachTest() throws IOException {
+ internalCluster().wipeIndices("idx2");
+ }
+
+ private static String getBucketKeyAsString(DateTime key) {
+ return getBucketKeyAsString(key, DateTimeZone.UTC);
+ }
+
+ private static String getBucketKeyAsString(DateTime key, DateTimeZone tz) {
+ return Joda.forPattern(DateFieldMapper.Defaults.DATE_TIME_FORMATTER.format()).printer().withZone(tz).print(key);
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void singleValuedField_WithTimeZone() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(1).timeZone("+01:00")).execute()
+ .actionGet();
+ DateTimeZone tz = DateTimeZone.forID("+01:00");
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(6));
+
+ DateTime key = new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(4);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(5);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+ .order(Histogram.Order.KEY_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ int i = 0;
+ for (Histogram.Bucket bucket : buckets) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+.order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+.order(Histogram.Order.COUNT_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+.order(Histogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH)
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Object[] propertiesKeys = (Object[]) histo.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) histo.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) histo.getProperty("sum.value");
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(1.0));
+ assertThat((DateTime) propertiesKeys[0], equalTo(key));
+ assertThat((long) propertiesDocCounts[0], equalTo(1l));
+ assertThat((double) propertiesCounts[0], equalTo(1.0));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(5.0));
+ assertThat((DateTime) propertiesKeys[1], equalTo(key));
+ assertThat((long) propertiesDocCounts[1], equalTo(2l));
+ assertThat((double) propertiesCounts[1], equalTo(5.0));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(15.0));
+ assertThat((DateTime) propertiesKeys[2], equalTo(key));
+ assertThat((long) propertiesDocCounts[2], equalTo(3l));
+ assertThat((double) propertiesCounts[2], equalTo(15.0));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+ .order(Histogram.Order.aggregation("sum", true))
+ .subAggregation(max("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+ .order(Histogram.Order.aggregation("sum", false))
+ .subAggregation(max("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationAsc_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+ .order(Histogram.Order.aggregation("stats", "sum", true))
+ .subAggregation(stats("stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 0;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.MONTH)
+ .order(Histogram.Order.aggregation("stats", "sum", false))
+ .subAggregation(stats("stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(3));
+
+ int i = 2;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC)));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+.script(new Script("new DateTime(_value).plusMonths(1).getMillis()"))
+ .interval(DateHistogramInterval.MONTH)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+
+
+ /*
+ [ Jan 2, Feb 3]
+ [ Feb 2, Mar 3]
+ [ Feb 15, Mar 16]
+ [ Mar 2, Apr 3]
+ [ Mar 15, Apr 16]
+ [ Mar 23, Apr 24]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("dates").interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void multiValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .interval(DateHistogramInterval.MONTH)
+.order(Histogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3]
+ * doc 2: [ Mar 2, Apr 3]
+ * doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3]
+ * doc 5: [ Apr 15, May 16]
+ * doc 6: [ Apr 23, May 24]
+ */
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .script(new Script("new DateTime(_value, DateTimeZone.UTC).plusMonths(1).getMillis()"))
+ .interval(DateHistogramInterval.MONTH)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3]
+ * doc 2: [ Mar 2, Apr 3]
+ * doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3]
+ * doc 5: [ Apr 15, May 16]
+ * doc 6: [ Apr 23, May 24]
+ *
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+ .field("dates")
+ .script(new Script("new DateTime((long)_value, DateTimeZone.UTC).plusMonths(1).getMillis()"))
+ .interval(DateHistogramInterval.MONTH).subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ /**
+ * Jan 2
+ * Feb 2
+ * Feb 15
+ * Mar 2
+ * Mar 15
+ * Mar 23
+ */
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script(new Script("doc['date'].value")).interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+.script(new Script("doc['date'].value")).interval(DateHistogramInterval.MONTH)
+ .subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script(new Script("doc['dates'].values")).interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+
+
+ /*
+ [ Jan 2, Feb 3]
+ [ Feb 2, Mar 3]
+ [ Feb 15, Mar 16]
+ [ Mar 2, Apr 3]
+ [ Mar 15, Apr 16]
+ [ Mar 23, Apr 24]
+ */
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo")
+.script(new Script("doc['dates'].values")).interval(DateHistogramInterval.MONTH)
+ .subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 2, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(dateHistogram("date_histo").interval(1)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ List<? extends Histogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Histogram.Bucket bucket = buckets.get(1);
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo("1"));
+
+ Histogram dateHisto = bucket.getAggregations().get("date_histo");
+ assertThat(dateHisto, Matchers.notNullValue());
+ assertThat(dateHisto.getName(), equalTo("date_histo"));
+ assertThat(dateHisto.getBuckets().isEmpty(), is(true));
+
+ }
+
+ @Test
+ public void singleValue_WithTimeZone() throws Exception {
+ prepareCreate("idx2").addMapping("type", "date", "type=date").execute().actionGet();
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[5];
+ DateTime date = date("2014-03-11T00:00:00+00:00");
+ for (int i = 0; i < reqs.length; i++) {
+ reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", date).endObject());
+ date = date.plusHours(1);
+ }
+ indexRandom(true, reqs);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .timeZone("-02:00")
+ .interval(DateHistogramInterval.DAY)
+ .format("yyyy-MM-dd:HH-mm-ssZZ"))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ Histogram histo = response.getAggregations().get("date_histo");
+ List<? extends Histogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(2));
+
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo("2014-03-10:00-00-00-02:00"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo("2014-03-11:00-00-00-02:00"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ @Test
+ public void singleValueField_WithExtendedBounds() throws Exception {
+
+ String pattern = "yyyy-MM-dd";
+ // we're testing on days, so the base must be rounded to a day
+ int interval = randomIntBetween(1, 2); // in days
+ long intervalMillis = interval * 24 * 60 * 60 * 1000;
+ DateTime base = new DateTime(DateTimeZone.UTC).dayOfMonth().roundFloorCopy();
+ DateTime baseKey = new DateTime(intervalMillis * (base.getMillis() / intervalMillis), DateTimeZone.UTC);
+
+ prepareCreate("idx2")
+ .setSettings(
+ Settings.builder().put(indexSettings()).put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)).execute().actionGet();
+ int numOfBuckets = randomIntBetween(3, 6);
+ int emptyBucketIndex = randomIntBetween(1, numOfBuckets - 2); // should be in the middle
+
+ long[] docCounts = new long[numOfBuckets];
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numOfBuckets; i++) {
+ if (i == emptyBucketIndex) {
+ docCounts[i] = 0;
+ } else {
+ int docCount = randomIntBetween(1, 3);
+ for (int j = 0; j < docCount; j++) {
+ DateTime date = baseKey.plusDays(i * interval + randomIntBetween(0, interval - 1));
+ builders.add(indexDoc("idx2", date, j));
+ }
+ docCounts[i] = docCount;
+ }
+ }
+ indexRandom(true, builders);
+ ensureSearchable("idx2");
+
+ DateTime lastDataBucketKey = baseKey.plusDays((numOfBuckets - 1) * interval);
+
+ // randomizing the number of buckets on the min bound
+ // (can sometimes fall within the data range, but more frequently will fall before the data range)
+ int addedBucketsLeft = randomIntBetween(0, numOfBuckets);
+ DateTime boundsMinKey;
+ if (frequently()) {
+ boundsMinKey = baseKey.minusDays(addedBucketsLeft * interval);
+ } else {
+ boundsMinKey = baseKey.plusDays(addedBucketsLeft * interval);
+ addedBucketsLeft = 0;
+ }
+ DateTime boundsMin = boundsMinKey.plusDays(randomIntBetween(0, interval - 1));
+
+ // randomizing the number of buckets on the max bound
+ // (can sometimes fall within the data range, but more frequently will fall after the data range)
+ int addedBucketsRight = randomIntBetween(0, numOfBuckets);
+ int boundsMaxKeyDelta = addedBucketsRight * interval;
+ if (rarely()) {
+ addedBucketsRight = 0;
+ boundsMaxKeyDelta = -boundsMaxKeyDelta;
+ }
+ DateTime boundsMaxKey = lastDataBucketKey.plusDays(boundsMaxKeyDelta);
+ DateTime boundsMax = boundsMaxKey.plusDays(randomIntBetween(0, interval - 1));
+
+ // it could be that the random bounds.min we chose ended up greater than
+ // bounds.max - this should
+ // trigger an error
+ boolean invalidBoundsError = boundsMin.isAfter(boundsMax);
+
+ // constructing the newly expected bucket list
+ int bucketsCount = numOfBuckets + addedBucketsLeft + addedBucketsRight;
+ long[] extendedValueCounts = new long[bucketsCount];
+ System.arraycopy(docCounts, 0, extendedValueCounts, addedBucketsLeft, docCounts.length);
+
+ SearchResponse response = null;
+ try {
+ response = client().prepareSearch("idx2")
+ .addAggregation(dateHistogram("histo")
+ .field("date")
+ .interval(DateHistogramInterval.days(interval))
+ .minDocCount(0)
+ // when explicitly specifying a format, the extended bounds should be defined by the same format
+ .extendedBounds(format(boundsMin, pattern), format(boundsMax, pattern))
+ .format(pattern))
+ .execute().actionGet();
+
+ if (invalidBoundsError) {
+ fail("Expected an exception to be thrown when bounds.min is greater than bounds.max");
+ return;
+ }
+
+ } catch (Exception e) {
+ if (invalidBoundsError) {
+ // expected
+ return;
+ } else {
+ throw e;
+ }
+ }
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(bucketsCount));
+
+ DateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey;
+ for (int i = 0; i < bucketsCount; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern)));
+ assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i]));
+ key = key.plusDays(interval);
+ }
+ }
+
+ @Test
+ public void singleValue_WithMultipleDateFormatsFromMapping() throws Exception {
+
+ String mappingJson = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("date").field("type", "date").field("format", "dateOptionalTime||dd-MM-yyyy").endObject().endObject().endObject().endObject().string();
+ prepareCreate("idx2").addMapping("type", mappingJson).execute().actionGet();
+ IndexRequestBuilder[] reqs = new IndexRequestBuilder[5];
+ for (int i = 0; i < reqs.length; i++) {
+ reqs[i] = client().prepareIndex("idx2", "type", "" + i).setSource(jsonBuilder().startObject().field("date", "10-03-2014").endObject());
+ }
+ indexRandom(true, reqs);
+
+ SearchResponse response = client().prepareSearch("idx2")
+ .setQuery(matchAllQuery())
+ .addAggregation(dateHistogram("date_histo")
+ .field("date")
+ .interval(DateHistogramInterval.DAY))
+ .execute().actionGet();
+
+ assertThat(response.getHits().getTotalHits(), equalTo(5l));
+
+ Histogram histo = response.getAggregations().get("date_histo");
+ List<? extends Histogram.Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(1));
+
+ DateTime key = new DateTime(2014, 3, 10, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ public void testIssue6965() {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").timeZone("+01:00").interval(DateHistogramInterval.MONTH).minDocCount(0))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ DateTimeZone tz = DateTimeZone.forID("+01:00");
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2011, 12, 31, 23, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 1, 31, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 2, 29, 23, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionException {
+ assertAcked(client().admin().indices().prepareCreate("test9491").addMapping("type", "d", "type=date").get());
+ indexRandom(true, client().prepareIndex("test9491", "type").setSource("d", "2014-10-08T13:00:00Z"),
+ client().prepareIndex("test9491", "type").setSource("d", "2014-11-08T13:00:00Z"));
+ ensureSearchable("test9491");
+ SearchResponse response = client().prepareSearch("test9491")
+ .addAggregation(dateHistogram("histo").field("d").interval(DateHistogramInterval.YEAR).timeZone("Asia/Jerusalem"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(1));
+ assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00"));
+ }
+
+ public void testIssue8209() throws InterruptedException, ExecutionException {
+ assertAcked(client().admin().indices().prepareCreate("test8209").addMapping("type", "d", "type=date").get());
+ indexRandom(true,
+ client().prepareIndex("test8209", "type").setSource("d", "2014-01-01T0:00:00Z"),
+ client().prepareIndex("test8209", "type").setSource("d", "2014-04-01T0:00:00Z"),
+ client().prepareIndex("test8209", "type").setSource("d", "2014-04-30T0:00:00Z"));
+ ensureSearchable("test8209");
+ SearchResponse response = client().prepareSearch("test8209")
+ .addAggregation(dateHistogram("histo").field("d").interval(DateHistogramInterval.MONTH).timeZone("CET")
+ .minDocCount(0))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+01:00"));
+ assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L));
+ assertThat(histo.getBuckets().get(1).getKeyAsString(), equalTo("2014-02-01T00:00:00.000+01:00"));
+ assertThat(histo.getBuckets().get(1).getDocCount(), equalTo(0L));
+ assertThat(histo.getBuckets().get(2).getKeyAsString(), equalTo("2014-03-01T00:00:00.000+01:00"));
+ assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L));
+ assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00"));
+ assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L));
+ }
+
+ /**
+ * see issue #9634, negative interval in date_histogram should raise exception
+ */
+ public void testExceptionOnNegativeInterval() {
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").field("date").interval(-TimeUnit.DAYS.toMillis(1)).minDocCount(0)).execute()
+ .actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("ElasticsearchParseException"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("date")
+ .script("new DateTime(_value).plusMonths(1).getMillis()")
+ .interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3] doc 2: [ Mar 2, Apr 3] doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3] doc 5: [ Apr 15, May 16] doc 6: [ Apr 23, May 24]
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("dates")
+ .script("new DateTime(_value, DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ /**
+ * The script will change to document date values to the following:
+ *
+ * doc 1: [ Feb 2, Mar 3] doc 2: [ Mar 2, Apr 3] doc 3: [ Mar 15, Apr 16]
+ * doc 4: [ Apr 2, May 3] doc 5: [ Apr 15, May 16] doc 6: [ Apr 23, May 24]
+ *
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("dates")
+ .script("new DateTime((long)_value, DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .interval(DateHistogramInterval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 5, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ /**
+ * Jan 2 Feb 2 Feb 15 Mar 2 Mar 15 Mar 23
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script("doc['date'].value").interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_WithSubAggregator_InheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo")
+ .script("doc['date'].value")
+ .interval(DateHistogramInterval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 1, 2, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 2, 15, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) new DateTime(2012, 3, 23, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateHistogram("histo").script("doc['dates'].values").interval(DateHistogramInterval.MONTH))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo")
+ .script("doc['dates'].values")
+ .interval(DateHistogramInterval.MONTH)
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 2, 3, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 3, 16, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key)));
+ assertThat(((DateTime) bucket.getKey()), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat((long) max.getValue(), equalTo(new DateTime(2012, 4, 24, 0, 0, DateTimeZone.UTC).getMillis()));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java
new file mode 100644
index 0000000000..589b2b39bc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java
@@ -0,0 +1,1445 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket;
+import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeBuilder;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class DateRangeTests extends ElasticsearchIntegrationTest {
+
+ private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("date", date(month, day))
+ .startArray("dates").value(date(month, day)).value(date(month + 1, day + 1)).endArray()
+ .endObject());
+ }
+
+ private static DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ private static int numDocs;
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(7, 20);
+
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+ docs.addAll(Arrays.asList(
+ indexDoc(1, 2, 1), // Jan 2
+ indexDoc(2, 2, 2), // Feb 2
+ indexDoc(2, 15, 3), // Feb 15
+ indexDoc(3, 2, 4), // Mar 2
+ indexDoc(3, 15, 5), // Mar 15
+ indexDoc(3, 23, 6))); // Mar 23
+
+ // dummy docs
+ for (int i = docs.size(); i < numDocs; ++i) {
+ docs.add(indexDoc(randomIntBetween(6, 10), randomIntBetween(1, 20), randomInt(100)));
+ }
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ docs.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, docs);
+ ensureSearchable();
+ }
+
+ @Test
+ public void dateMath() throws Exception {
+ DateRangeBuilder rangeBuilder = dateRange("range");
+ if (randomBoolean()) {
+ rangeBuilder.field("date");
+ } else {
+ rangeBuilder.script(new Script("doc['date'].value"));
+ }
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ rangeBuilder.addUnboundedTo("a long time ago", "now-50y").addRange("recently", "now-50y", "now-1y")
+ .addUnboundedFrom("last year", "now-1y")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(range.getBuckets());
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat((String) bucket.getKey(), equalTo("a long time ago"));
+ assertThat(bucket.getKeyAsString(), equalTo("a long time ago"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+
+ bucket = buckets.get(1);
+ assertThat((String) bucket.getKey(), equalTo("recently"));
+ assertThat(bucket.getKeyAsString(), equalTo("recently"));
+ assertThat(bucket.getDocCount(), equalTo((long) numDocs));
+
+ bucket = buckets.get(2);
+ assertThat((String) bucket.getKey(), equalTo("last year"));
+ assertThat(bucket.getKeyAsString(), equalTo("last year"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithStringDates() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithStringDates_WithCustomFormat() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .format("yyyy-MM-dd")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithDateMath() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-02-15||+1M")
+ .addUnboundedFrom("2012-02-15||+1M"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ Jan 2, 1
+ Feb 2, 2
+ Feb 15, 3
+ Mar 2, 4
+ Mar 15, 5
+ Mar 23, 6
+ */
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15))
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Object[] propertiesKeys = (Object[]) range.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) range.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) range.getProperty("sum.value");
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 1 + 2));
+ assertThat((String) propertiesKeys[0], equalTo("r1"));
+ assertThat((long) propertiesDocCounts[0], equalTo(2l));
+ assertThat((double) propertiesCounts[0], equalTo((double) 1 + 2));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 3 + 4));
+ assertThat((String) propertiesKeys[1], equalTo("r2"));
+ assertThat((long) propertiesDocCounts[1], equalTo(2l));
+ assertThat((double) propertiesCounts[1], equalTo((double) 3 + 4));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((String) propertiesKeys[2], equalTo("r3"));
+ assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("r1", date(2, 15))
+ .addRange("r2", date(2, 15), date(3, 15))
+ .addUnboundedFrom("r3", date(3, 15))
+ .subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(3, 15).getMillis()));
+ }
+
+ /*
+ Jan 2, Feb 3, 1
+ Feb 2, Mar 3, 2
+ Feb 15, Mar 16, 3
+ Mar 2, Apr 3, 4
+ Mar 15, Apr 16 5
+ Mar 23, Apr 24 6
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ /*
+ Feb 2, Mar 3, 1
+ Mar 2, Apr 3, 2
+ Mar 15, Apr 16, 3
+ Apr 2, May 3, 4
+ Apr 15, May 16 5
+ Apr 23, May 24 6
+ */
+
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .script(new Script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()"))
+ .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ }
+
+
+
+ /*
+ Feb 2, Mar 3, 1
+ Mar 2, Apr 3, 2
+ Mar 15, Apr 16, 3
+ Apr 2, May 3, 4
+ Apr 15, May 16 5
+ Apr 23, May 24 6
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+ .field("dates")
+ .script(new Script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()"))
+ .addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 3).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(4, 3).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+.script(new Script("doc['date'].value"))
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range").script(new Script("doc['date'].value")).addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)).subAggregation(max("max"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 2).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+
+
+ /*
+ Jan 2, Feb 3, 1
+ Feb 2, Mar 3, 2
+ Feb 15, Mar 16, 3
+ Mar 2, Apr 3, 4
+ Mar 15, Apr 16 5
+ Mar 23, Apr 24 6
+ */
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range").script(new Script("doc['dates'].values")).addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(dateRange("range")
+.script(new Script("doc['dates'].values")).addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15)).addUnboundedFrom(date(3, 15)).subAggregation(min("min"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void unmapped_WithStringDates() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo("2012-02-15")
+ .addRange("2012-02-15", "2012-03-15")
+ .addUnboundedFrom("2012-03-15"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(dateRange("range")
+ .field("date")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(dateRange("date_range").addRange("0-1", 0, 1)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Range dateRange = bucket.getAggregations().get("date_range");
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(dateRange.getBuckets());
+ assertThat(dateRange, Matchers.notNullValue());
+ assertThat(dateRange.getName(), equalTo("date_range"));
+ assertThat(buckets.size(), is(1));
+ assertThat((String) buckets.get(0).getKey(), equalTo("0-1"));
+ assertThat(((DateTime) buckets.get(0).getFrom()).getMillis(), equalTo(0l));
+ assertThat(((DateTime) buckets.get(0).getTo()).getMillis(), equalTo(1l));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true));
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void dateMathOldScriptAPI() throws Exception {
+ DateRangeBuilder rangeBuilder = dateRange("range");
+ if (randomBoolean()) {
+ rangeBuilder.field("date");
+ } else {
+ rangeBuilder.script("doc['date'].value");
+ }
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(rangeBuilder
+ .addUnboundedTo("a long time ago", "now-50y")
+ .addRange("recently", "now-50y", "now-1y")
+ .addUnboundedFrom("last year", "now-1y"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(range.getBuckets());
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat((String) bucket.getKey(), equalTo("a long time ago"));
+ assertThat(bucket.getKeyAsString(), equalTo("a long time ago"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+
+ bucket = buckets.get(1);
+ assertThat((String) bucket.getKey(), equalTo("recently"));
+ assertThat(bucket.getKeyAsString(), equalTo("recently"));
+ assertThat(bucket.getDocCount(), equalTo((long) numDocs));
+
+ bucket = buckets.get(2);
+ assertThat((String) bucket.getKey(), equalTo("last year"));
+ assertThat(bucket.getKeyAsString(), equalTo("last year"));
+ assertThat(bucket.getDocCount(), equalTo(0L));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range").field("dates")
+ .script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range").field("dates")
+ .script("new DateTime(_value.longValue(), DateTimeZone.UTC).plusMonths(1).getMillis()")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 3).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(4, 3).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 1l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range").script("doc['date'].value").addUnboundedTo(date(2, 15)).addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_WithSubAggregator_InheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range")
+ .script("doc['date'].value")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) date(3, 2).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range")
+ .script("doc['dates'].values")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateRange("range")
+ .script("doc['dates'].values")
+ .addUnboundedTo(date(2, 15))
+ .addRange(date(2, 15), date(3, 15))
+ .addUnboundedFrom(date(3, 15))
+ .subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), nullValue());
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15)));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(1, 2).getMillis()));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15)));
+ assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15)));
+ assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 2).getMillis()));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*"));
+ assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15)));
+ assertThat(((DateTime) bucket.getTo()), nullValue());
+ assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getValue(), equalTo((double) date(2, 15).getMillis()));
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DedicatedAggregationTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DedicatedAggregationTests.java
new file mode 100644
index 0000000000..aeba973a7d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DedicatedAggregationTests.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+
+public class DedicatedAggregationTests extends ElasticsearchIntegrationTest {
+
+ // https://github.com/elasticsearch/elasticsearch/issues/7240
+ @Test
+ public void testEmptyBoolIsMatchAll() throws IOException {
+ String query = copyToStringFromClasspath("/org/elasticsearch/search/aggregations/bucket/agg-filter-with-empty-bool.json");
+ createIndex("testidx");
+ index("testidx", "apache", "1", "field", "text");
+ index("testidx", "nginx", "2", "field", "text");
+ refresh();
+ ensureGreen("testidx");
+ SearchResponse searchResponse = client().prepareSearch("testidx").setQuery(matchAllQuery()).get();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ searchResponse = client().prepareSearch("testidx").setSource(query).get();
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getAggregations().getAsMap().get("issue7240"), instanceOf(Filter.class));
+ Filter filterAgg = (Filter) searchResponse.getAggregations().getAsMap().get("issue7240");
+ assertThat(filterAgg.getAggregations().getAsMap().get("terms"), instanceOf(StringTerms.class));
+ assertThat(((StringTerms) filterAgg.getAggregations().getAsMap().get("terms")).getBuckets().get(0).getDocCount(), equalTo(1l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java
new file mode 100644
index 0000000000..69f3cff44e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsTests.java
@@ -0,0 +1,1593 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class DoubleTermsTests extends AbstractTermsTests {
+
+ private static final int NUM_DOCS = 5; // TODO: randomize the size?
+ private static final String SINGLE_VALUED_FIELD_NAME = "d_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "d_values";
+ private static HashMap<Double, Map<String, Object>> expectedMultiSortBuckets;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < NUM_DOCS; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, (double) i)
+ .field("num_tag", i < NUM_DOCS/2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg
+ .startArray(MULTI_VALUED_FIELD_NAME).value((double) i).value(i + 1d).endArray()
+ .endObject()));
+
+ }
+ for (int i = 0; i < 100; i++) {
+ builders.add(client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, (double) i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value((double)i).value(i + 1d).endArray()
+ .endObject()));
+ }
+
+ createIndex("idx_unmapped");
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i*2)
+ .endObject()));
+ }
+
+ getMultiSortDocs(builders);
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ private void getMultiSortDocs(List<IndexRequestBuilder> builders) throws IOException {
+ expectedMultiSortBuckets = new HashMap<>();
+ Map<String, Object> bucketProps = new HashMap<>();
+ bucketProps.put("_term", 1d);
+ bucketProps.put("_count", 3l);
+ bucketProps.put("avg_l", 1d);
+ bucketProps.put("sum_d", 6d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 2d);
+ bucketProps.put("_count", 3l);
+ bucketProps.put("avg_l", 2d);
+ bucketProps.put("sum_d", 6d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 3d);
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 3d);
+ bucketProps.put("sum_d", 3d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 4d);
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 3d);
+ bucketProps.put("sum_d", 4d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 5d);
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 3d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 6d);
+ bucketProps.put("_count", 1l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 1d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 7d);
+ bucketProps.put("_count", 1l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 1d);
+ expectedMultiSortBuckets.put((Double) bucketProps.get("_term"), bucketProps);
+
+ assertAcked(prepareCreate("sort_idx").addMapping("multi_sort_type", SINGLE_VALUED_FIELD_NAME, "type=double"));
+ for (int i = 1; i <= 3; i++) {
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 1)
+ .field("l", 1)
+ .field("d", i)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 2)
+ .field("l", 2)
+ .field("d", i)
+ .endObject()));
+ }
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 3)
+ .field("l", 3)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 3)
+ .field("l", 3)
+ .field("d", 2)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 4)
+ .field("l", 3)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 4)
+ .field("l", 3)
+ .field("d", 3)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 5)
+ .field("l", 5)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 5)
+ .field("l", 5)
+ .field("d", 2)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 6)
+ .field("l", 5)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 7)
+ .field("l", 5)
+ .field("d", 1)
+ .endObject()));
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return bucket.getKeyAsString();
+ }
+
+ @Test
+ // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard
+ public void sizeIsZero() {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .minDocCount(randomInt(1))
+ .size(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(100));
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueFieldWithFiltering() throws Exception {
+ double includes[] = { 1, 2, 3, 98.2 };
+ double excludes[] = { 2, 4, 99 };
+ double empty[] = {};
+ testIncludeExcludeResults(includes, empty, new double[] { 1, 2, 3 });
+ testIncludeExcludeResults(includes, excludes, new double[] { 1, 3 });
+ testIncludeExcludeResults(empty, excludes, new double[] { 0, 1, 3 });
+ }
+
+ private void testIncludeExcludeResults(double[] includes, double[] excludes, double[] expecteds) {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .include(includes)
+ .exclude(excludes)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(expecteds.length));
+
+ for (int i = 0; i < expecteds.length; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + expecteds[i]);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sum").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+ Object[] propertiesKeys = (Object[]) terms.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) terms.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) terms.getProperty("sum.value");
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((long) sum.getValue(), equalTo(i+i+1l));
+ assertThat((double) propertiesKeys[i], equalTo((double) i));
+ assertThat((long) propertiesDocCounts[i], equalTo(1l));
+ assertThat((double) propertiesCounts[i], equalTo((double) i + i + 1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("_value + 1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("_value + 1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("(long) _value / 1000 + 1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+
+ 1 - count: 1 - sum: 1
+ 2 - count: 2 - sum: 4
+ 3 - count: 2 - sum: 6
+ 4 - count: 2 - sum: 8
+ 5 - count: 2 - sum: 10
+ 6 - count: 1 - sum: 6
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("_value + 1"))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().doubleValue(), equalTo(i + 1d));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j + 1;
+ s += j+1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ new Script("doc['" + MULTI_VALUED_FIELD_NAME + "'].value"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
+
+ // since no type is explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitType() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))
+ .valueType(Terms.ValueType.DOUBLE)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i + ".0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i + ".0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j;
+ s += j+1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscWithSubTermsAgg() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ .subAggregation(
+ terms("subTerms").field(MULTI_VALUED_FIELD_NAME).collectMode(
+ randomFrom(SubAggCollectionMode.values())))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+
+ Terms subTermsAgg = bucket.getAggregations().get("subTerms");
+ assertThat(subTermsAgg, notNullValue());
+ assertThat(subTermsAgg.getBuckets().size(), equalTo(2));
+ double j = i;
+ for (Terms.Bucket subBucket : subTermsAgg.getBuckets()) {
+ assertThat(subBucket, notNullValue());
+ assertThat(key(subBucket), equalTo(String.valueOf(j)));
+ assertThat(subBucket.getDocCount(), equalTo(1l));
+ j++;
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleBucketSubAggregationAsc() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("num_tags").field("num_tag").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter", asc))
+ .subAggregation(filter("filter").filter(QueryBuilders.matchAllQuery()))).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("num_tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("num_tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "0" : "1"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ Filter filter = tag.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo(asc ? 2l : 3l));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "1" : "0"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ filter = tag.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo(asc ? 3l : 2l));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc_MultiHierarchyLevels() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("tags")
+ .field("num_tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter1>filter2>max", asc))
+ .subAggregation(
+ filter("filter1").filter(QueryBuilders.matchAllQuery()).subAggregation(
+ filter("filter2").filter(QueryBuilders.matchAllQuery()).subAggregation(
+ max("max").field(SINGLE_VALUED_FIELD_NAME))))).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ // the max for "1" is 2
+ // the max for "0" is 4
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "1" : "0"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter2 = filter1.getAggregations().get("filter2");
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 3l : 2l));
+ Max max = filter2.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(asc ? 2.0 : 4.0));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "0" : "1"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter2 = filter1.getAggregations().get("filter2");
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 2l : 3l));
+ max = filter2.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(asc ? 4.0 : 2.0));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", true))).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsOrMultiBucketSubAggregation() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("num_tags", true))
+ .subAggregation(
+ terms("num_tags").field("num_tags").collectMode(randomFrom(SubAggCollectionMode.values()))))
+ .execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME + "2").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", asc)).subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.variance", asc))
+ .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void script_Score() {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).add(
+ ScoreFunctionBuilders.scriptFunction(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))))
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ new Script("ceil(_score.doubleValue()/3)"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 0; i < 3; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double)i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception {
+ double[] expectedKeys = new double[] { 1, 2, 4, 3, 7, 6, 5 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception {
+ double[] expectedKeys = new double[] { 1, 2, 3, 4, 5, 6, 7 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception {
+ double[] expectedKeys = new double[] { 5, 6, 7, 3, 4, 2, 1 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception {
+ double[] expectedKeys = new double[] { 6, 7, 3, 4, 5, 1, 2 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception {
+ double[] expectedKeys = new double[] { 6, 7, 3, 5, 4, 1, 2 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByThreeCriteria() throws Exception {
+ double[] expectedKeys = new double[] { 2, 1, 4, 5, 3, 6, 7 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.count(false), Terms.Order.aggregation("sum_d", false), Terms.Order.aggregation("avg_l", false));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAsCompound() throws Exception {
+ double[] expectedKeys = new double[] { 1, 2, 3, 4, 5, 6, 7 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true));
+ }
+
+ private void assertMultiSortResponse(double[] expectedKeys, Terms.Order... order) {
+ SearchResponse response = client()
+ .prepareSearch("sort_idx")
+ .setTypes("multi_sort_type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.compound(order)).subAggregation(avg("avg_l").field("l"))
+ .subAggregation(sum("sum_d").field("d"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo(String.valueOf(expectedKeys[i])));
+ assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count")));
+ Avg avg = bucket.getAggregations().get("avg_l");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l")));
+ Sum sum = bucket.getAggregations().get("sum_d");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d")));
+ i++;
+ }
+ }
+
+ @Test
+ public void otherDocCount() {
+ testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("_value + 1")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i + 1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("_value + 1")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i + 1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_NotUniqueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("(long) _value / 1000 + 1")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+ *
+ * [1, 2] [2, 3] [3, 4] [4, 5] [5, 6]
+ *
+ * 1 - count: 1 - sum: 1 2 - count: 2 - sum: 4 3 - count: 2 - sum: 6 4 -
+ * count: 2 - sum: 8 5 - count: 2 - sum: 10 6 - count: 1 - sum: 6
+ */
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("_value + 1").subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i + 1d)));
+ assertThat(bucket.getKeyAsNumber().doubleValue(), equalTo(i + 1d));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j + 1) {
+ s += j + 1;
+ s += j + 1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ "doc['" + MULTI_VALUED_FIELD_NAME + "'].value")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ "doc['" + MULTI_VALUED_FIELD_NAME + "']")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitTypeOldScriptAPI() throws Exception {
+
+ // since no type is explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "']").subAggregation(sum("sum"))).execute().actionGet();
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitTypeOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "']").valueType(Terms.ValueType.DOUBLE)
+ .subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i + ".0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i + ".0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j + 1) {
+ s += j;
+ s += j + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_ScoreOldScriptAPI() {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .setQuery(
+ functionScoreQuery(matchAllQuery()).add(
+ ScoreFunctionBuilders.scriptFunction(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))))
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script("ceil(_score.doubleValue()/3)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 0; i < 3; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (double) i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (double) i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(i == 1 ? 3L : 1L));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java
new file mode 100644
index 0000000000..032504b1a0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterTests.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.AndQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class FilterTests extends ElasticsearchIntegrationTest {
+
+ static int numDocs, numTag1Docs;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ numDocs = randomIntBetween(5, 20);
+ numTag1Docs = randomIntBetween(1, numDocs - 1);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numTag1Docs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject()));
+ }
+ for (int i = numTag1Docs; i < numDocs; i++) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .field("tag", "tag2")
+ .field("name", "name" + i)
+ .endObject();
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(source));
+ if (randomBoolean()) {
+ // randomly index the document twice so that we have deleted docs that match the filter
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(source));
+ }
+ }
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("tag1").filter(termQuery("tag", "tag1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("tag1"));
+ assertThat(filter.getDocCount(), equalTo((long) numTag1Docs));
+ }
+
+ // See NullPointer issue when filters are empty:
+ // https://github.com/elasticsearch/elasticsearch/issues/8438
+ @Test
+ public void emptyFilterDeclarations() throws Exception {
+ QueryBuilder emptyFilter = new AndQueryBuilder();
+ SearchResponse response = client().prepareSearch("idx").addAggregation(filter("tag1").filter(emptyFilter)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo((long) numDocs));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filter("tag1")
+ .filter(termQuery("tag", "tag1"))
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Filter filter = response.getAggregations().get("tag1");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("tag1"));
+ assertThat(filter.getDocCount(), equalTo((long) numTag1Docs));
+ assertThat((long) filter.getProperty("_count"), equalTo((long) numTag1Docs));
+
+ long sum = 0;
+ for (int i = 0; i < numTag1Docs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(filter.getAggregations().asList().isEmpty(), is(false));
+ Avg avgValue = filter.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs));
+ assertThat((double) filter.getProperty("avg_value.value"), equalTo((double) sum / numTag1Docs));
+ }
+
+ @Test
+ public void withContextBasedSubAggregation() throws Exception {
+
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(filter("tag1")
+ .filter(termQuery("tag", "tag1"))
+ .subAggregation(avg("avg_value")))
+ .execute().actionGet();
+
+ fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" +
+ "context which the sub-aggregation can inherit");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(filter("filter").filter(matchAllQuery())))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Filter filter = bucket.getAggregations().get("filter");
+ assertThat(filter, Matchers.notNullValue());
+ assertThat(filter.getName(), equalTo("filter"));
+ assertThat(filter.getDocCount(), is(0l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java
new file mode 100644
index 0000000000..4aaa9f2b35
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FiltersTests.java
@@ -0,0 +1,275 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.AndQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.aggregations.bucket.filters.Filters;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filters;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class FiltersTests extends ElasticsearchIntegrationTest {
+
+ static int numDocs, numTag1Docs, numTag2Docs;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ numDocs = randomIntBetween(5, 20);
+ numTag1Docs = randomIntBetween(1, numDocs - 1);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numTag1Docs; i++) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject();
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(source));
+ if (randomBoolean()) {
+ // randomly index the document twice so that we have deleted docs that match the filter
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(source));
+ }
+ }
+ for (int i = numTag1Docs; i < numDocs; i++) {
+ numTag2Docs++;
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .field("tag", "tag2")
+ .field("name", "name" + i)
+ .endObject();
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(source));
+ if (randomBoolean()) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(source));
+ }
+ }
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ filters("tags")
+ .filter("tag1", termQuery("tag", "tag1"))
+ .filter("tag2", termQuery("tag", "tag2")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filters filters = response.getAggregations().get("tags");
+ assertThat(filters, notNullValue());
+ assertThat(filters.getName(), equalTo("tags"));
+
+ assertThat(filters.getBuckets().size(), equalTo(2));
+
+ Filters.Bucket bucket = filters.getBucketByKey("tag1");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs));
+
+ bucket = filters.getBucketByKey("tag2");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs));
+ }
+
+ // See NullPointer issue when filters are empty:
+ // https://github.com/elasticsearch/elasticsearch/issues/8438
+ @Test
+ public void emptyFilterDeclarations() throws Exception {
+ QueryBuilder emptyFilter = new AndQueryBuilder();
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(filters("tags").filter("all", emptyFilter).filter("tag1", termQuery("tag", "tag1"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Filters filters = response.getAggregations().get("tags");
+ assertThat(filters, notNullValue());
+ Filters.Bucket allBucket = filters.getBucketByKey("all");
+ assertThat(allBucket.getDocCount(), equalTo((long) numDocs));
+
+ Filters.Bucket bucket = filters.getBucketByKey("tag1");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ filters("tags")
+ .filter("tag1", termQuery("tag", "tag1"))
+ .filter("tag2", termQuery("tag", "tag2"))
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filters filters = response.getAggregations().get("tags");
+ assertThat(filters, notNullValue());
+ assertThat(filters.getName(), equalTo("tags"));
+
+ assertThat(filters.getBuckets().size(), equalTo(2));
+ Object[] propertiesKeys = (Object[]) filters.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) filters.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) filters.getProperty("avg_value.value");
+
+ Filters.Bucket bucket = filters.getBucketByKey("tag1");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs));
+ long sum = 0;
+ for (int i = 0; i < numTag1Docs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Avg avgValue = bucket.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / numTag1Docs));
+ assertThat((String) propertiesKeys[0], equalTo("tag1"));
+ assertThat((long) propertiesDocCounts[0], equalTo((long) numTag1Docs));
+ assertThat((double) propertiesCounts[0], equalTo((double) sum / numTag1Docs));
+
+ bucket = filters.getBucketByKey("tag2");
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs));
+ sum = 0;
+ for (int i = numTag1Docs; i < numDocs; ++i) {
+ sum += i;
+ }
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ avgValue = bucket.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / numTag2Docs));
+ assertThat((String) propertiesKeys[1], equalTo("tag2"));
+ assertThat((long) propertiesDocCounts[1], equalTo((long) numTag2Docs));
+ assertThat((double) propertiesCounts[1], equalTo((double) sum / numTag2Docs));
+ }
+
+ @Test
+ public void withContextBasedSubAggregation() throws Exception {
+
+ try {
+ client().prepareSearch("idx")
+ .addAggregation(
+ filters("tags")
+ .filter("tag1", termQuery("tag", "tag1"))
+ .filter("tag2", termQuery("tag", "tag2"))
+ .subAggregation(avg("avg_value"))
+ )
+ .execute().actionGet();
+
+ fail("expected execution to fail - an attempt to have a context based numeric sub-aggregation, but there is not value source" +
+ "context which the sub-aggregation can inherit");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(filters("filters").filter("all", matchAllQuery())))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Filters filters = bucket.getAggregations().get("filters");
+ assertThat(filters, notNullValue());
+ Filters.Bucket all = filters.getBucketByKey("all");
+ assertThat(all, Matchers.notNullValue());
+ assertThat(all.getKeyAsString(), equalTo("all"));
+ assertThat(all.getDocCount(), is(0l));
+ }
+
+ @Test
+ public void simple_nonKeyed() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ filters("tags")
+ .filter(termQuery("tag", "tag1"))
+ .filter(termQuery("tag", "tag2")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filters filters = response.getAggregations().get("tags");
+ assertThat(filters, notNullValue());
+ assertThat(filters.getName(), equalTo("tags"));
+
+ assertThat(filters.getBuckets().size(), equalTo(2));
+
+ Collection<? extends Filters.Bucket> buckets = filters.getBuckets();
+ Iterator<? extends Filters.Bucket> itr = buckets.iterator();
+
+ Filters.Bucket bucket = itr.next();
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag1Docs));
+
+ bucket = itr.next();
+ assertThat(bucket, Matchers.notNullValue());
+ assertThat(bucket.getDocCount(), equalTo((long) numTag2Docs));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java
new file mode 100644
index 0000000000..028b053c64
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceTests.java
@@ -0,0 +1,492 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geoDistance;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class GeoDistanceTests extends ElasticsearchIntegrationTest {
+
+ private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception {
+ XContentBuilder source = jsonBuilder().startObject().field("city", name);
+ source.startArray("location");
+ for (int i = 0; i < latLons.length; i++) {
+ source.value(latLons[i]);
+ }
+ source.endArray();
+ source = source.endObject();
+ return client().prepareIndex(idx, "type").setSource(source);
+ }
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ prepareCreate("idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ prepareCreate("idx-multi")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed")
+ .execute().actionGet();
+
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> cities = new ArrayList<>();
+ cities.addAll(Arrays.asList(
+ // below 500km
+ indexCity("idx", "utrecht", "52.0945, 5.116"),
+ indexCity("idx", "haarlem", "52.3890, 4.637"),
+ // above 500km, below 1000km
+ indexCity("idx", "berlin", "52.540, 13.409"),
+ indexCity("idx", "prague", "50.097679, 14.441314"),
+ // above 1000km
+ indexCity("idx", "tel-aviv", "32.0741, 34.777")));
+
+ // random cities with no location
+ for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) {
+ if (randomBoolean()) {
+ cities.add(indexCity("idx", cityName));
+ }
+ }
+ indexRandom(true, cities);
+
+ cities.clear();
+ cities.addAll(Arrays.asList(
+ indexCity("idx-multi", "city1", "52.3890, 4.637", "50.097679,14.441314"), // first point is within the ~17.5km, the second is ~710km
+ indexCity("idx-multi", "city2", "52.540, 13.409", "52.0945, 5.116"), // first point is ~576km, the second is within the ~35km
+ indexCity("idx-multi", "city3", "32.0741, 34.777"))); // above 1000km
+
+ // random cities with no location
+ for (String cityName : Arrays.asList("london", "singapour", "tokyo", "milan")) {
+ if (randomBoolean() || true) {
+ cities.add(indexCity("idx-multi", cityName));
+ }
+ }
+ indexRandom(true, cities);
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "location", "type=geo_point").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i * 2)
+ .field("location", "52.0945, 5.116")
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ List<? extends Bucket> buckets = geoDist.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) (String) bucket.getKey(), equalTo("*-500.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0));
+ assertThat(bucket.getFromAsString(), equalTo("0.0"));
+ assertThat(bucket.getToAsString(), equalTo("500.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) (String) bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getFromAsString(), equalTo("500.0"));
+ assertThat(bucket.getToAsString(), equalTo("1000.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void simple_WithCustomKeys() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo("ring1", 500)
+ .addRange("ring2", 500, 1000)
+ .addUnboundedFrom("ring3", 1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ List<? extends Bucket> buckets = geoDist.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("ring1"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0));
+ assertThat(bucket.getFromAsString(), equalTo("0.0"));
+ assertThat(bucket.getToAsString(), equalTo("500.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("ring2"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getFromAsString(), equalTo("500.0"));
+ assertThat(bucket.getToAsString(), equalTo("1000.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("ring3"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ List<? extends Bucket> buckets = geoDist.getBuckets();
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-500.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0));
+ assertThat(bucket.getFromAsString(), equalTo("0.0"));
+ assertThat(bucket.getToAsString(), equalTo("500.0"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getFromAsString(), equalTo("500.0"));
+ assertThat(bucket.getToAsString(), equalTo("1000.0"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ List<? extends Bucket> buckets = geoDist.getBuckets();
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-500.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0));
+ assertThat(bucket.getFromAsString(), equalTo("0.0"));
+ assertThat(bucket.getToAsString(), equalTo("500.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getFromAsString(), equalTo("500.0"));
+ assertThat(bucket.getToAsString(), equalTo("1000.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000)
+ .subAggregation(terms("cities").field("city")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ List<? extends Bucket> buckets = geoDist.getBuckets();
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+ Object[] propertiesKeys = (Object[]) geoDist.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) geoDist.getProperty("_count");
+ Object[] propertiesCities = (Object[]) geoDist.getProperty("cities");
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-500.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0));
+ assertThat(bucket.getFromAsString(), equalTo("0.0"));
+ assertThat(bucket.getToAsString(), equalTo("500.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Terms cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ Set<String> names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKeyAsString());
+ }
+ assertThat(names.contains("utrecht") && names.contains("haarlem"), is(true));
+ assertThat((String) propertiesKeys[0], equalTo("*-500.0"));
+ assertThat((long) propertiesDocCounts[0], equalTo(2l));
+ assertThat((Terms) propertiesCities[0], sameInstance(cities));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getFromAsString(), equalTo("500.0"));
+ assertThat(bucket.getToAsString(), equalTo("1000.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKeyAsString());
+ }
+ assertThat(names.contains("berlin") && names.contains("prague"), is(true));
+ assertThat((String) propertiesKeys[1], equalTo("500.0-1000.0"));
+ assertThat((long) propertiesDocCounts[1], equalTo(2l));
+ assertThat((Terms) propertiesCities[1], sameInstance(cities));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ cities = bucket.getAggregations().get("cities");
+ assertThat(cities, Matchers.notNullValue());
+ names = Sets.newHashSet();
+ for (Terms.Bucket city : cities.getBuckets()) {
+ names.add(city.getKeyAsString());
+ }
+ assertThat(names.contains("tel-aviv"), is(true));
+ assertThat((String) propertiesKeys[2], equalTo("1000.0-*"));
+ assertThat((long) propertiesDocCounts[2], equalTo(1l));
+ assertThat((Terms) propertiesCities[2], sameInstance(cities));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(geoDistance("geo_dist").field("location").point("52.3760, 4.894").addRange("0-100", 0.0, 100.0)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Range geoDistance = bucket.getAggregations().get("geo_dist");
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(geoDistance.getBuckets());
+ assertThat(geoDistance, Matchers.notNullValue());
+ assertThat(geoDistance.getName(), equalTo("geo_dist"));
+ assertThat(buckets.size(), is(1));
+ assertThat((String) buckets.get(0).getKey(), equalTo("0-100"));
+ assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(100.0));
+ assertThat(buckets.get(0).getFromAsString(), equalTo("0.0"));
+ assertThat(buckets.get(0).getToAsString(), equalTo("100.0"));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void multiValues() throws Exception {
+ SearchResponse response = client().prepareSearch("idx-multi")
+ .addAggregation(geoDistance("amsterdam_rings")
+ .field("location")
+ .unit(DistanceUnit.KILOMETERS)
+ .distanceType(org.elasticsearch.common.geo.GeoDistance.ARC)
+ .point("52.3760, 4.894") // coords of amsterdam
+ .addUnboundedTo(500)
+ .addRange(500, 1000)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range geoDist = response.getAggregations().get("amsterdam_rings");
+ assertThat(geoDist, notNullValue());
+ assertThat(geoDist.getName(), equalTo("amsterdam_rings"));
+ List<? extends Bucket> buckets = geoDist.getBuckets();
+ assertThat(geoDist.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-500.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(500.0));
+ assertThat(bucket.getFromAsString(), equalTo("0.0"));
+ assertThat(bucket.getToAsString(), equalTo("500.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("500.0-1000.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(500.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(1000.0));
+ assertThat(bucket.getFromAsString(), equalTo("500.0"));
+ assertThat(bucket.getToAsString(), equalTo("1000.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java
new file mode 100644
index 0000000000..dac686ea4a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java
@@ -0,0 +1,322 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.ObjectIntHashMap;
+import com.carrotsearch.hppc.ObjectIntMap;
+import com.carrotsearch.hppc.cursors.ObjectIntCursor;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid.Bucket;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class GeoHashGridTests extends ElasticsearchIntegrationTest {
+
+ static ObjectIntMap<String> expectedDocCountsForGeoHash = null;
+ static ObjectIntMap<String> multiValuedExpectedDocCountsForGeoHash = null;
+ static int highestPrecisionGeohash = 12;
+ static int numDocs = 100;
+
+ static String smallestGeoHash = null;
+
+ private static IndexRequestBuilder indexCity(String index, String name, List<String> latLon) throws Exception {
+ XContentBuilder source = jsonBuilder().startObject().field("city", name);
+ if (latLon != null) {
+ source = source.field("location", latLon);
+ }
+ source = source.endObject();
+ return client().prepareIndex(index, "type").setSource(source);
+ }
+
+ private static IndexRequestBuilder indexCity(String index, String name, String latLon) throws Exception {
+ return indexCity(index, name, Arrays.<String>asList(latLon));
+ }
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx_unmapped");
+
+ assertAcked(prepareCreate("idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed"));
+
+ List<IndexRequestBuilder> cities = new ArrayList<>();
+ Random random = getRandom();
+ expectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2);
+ for (int i = 0; i < numDocs; i++) {
+ //generate random point
+ double lat = (180d * random.nextDouble()) - 90d;
+ double lng = (360d * random.nextDouble()) - 180d;
+ String randomGeoHash = GeoHashUtils.encode(lat, lng, highestPrecisionGeohash);
+ //Index at the highest resolution
+ cities.add(indexCity("idx", randomGeoHash, lat + ", " + lng));
+ expectedDocCountsForGeoHash.put(randomGeoHash, expectedDocCountsForGeoHash.getOrDefault(randomGeoHash, 0) + 1);
+ //Update expected doc counts for all resolutions..
+ for (int precision = highestPrecisionGeohash - 1; precision > 0; precision--) {
+ String hash = GeoHashUtils.encode(lat, lng, precision);
+ if ((smallestGeoHash == null) || (hash.length() < smallestGeoHash.length())) {
+ smallestGeoHash = hash;
+ }
+ expectedDocCountsForGeoHash.put(hash, expectedDocCountsForGeoHash.getOrDefault(hash, 0) + 1);
+ }
+ }
+ indexRandom(true, cities);
+
+ assertAcked(prepareCreate("multi_valued_idx")
+ .addMapping("type", "location", "type=geo_point", "city", "type=string,index=not_analyzed"));
+
+ cities = new ArrayList<>();
+ multiValuedExpectedDocCountsForGeoHash = new ObjectIntHashMap<>(numDocs * 2);
+ for (int i = 0; i < numDocs; i++) {
+ final int numPoints = random.nextInt(4);
+ List<String> points = new ArrayList<>();
+ Set<String> geoHashes = new HashSet<>();
+ for (int j = 0; j < numPoints; ++j) {
+ double lat = (180d * random.nextDouble()) - 90d;
+ double lng = (360d * random.nextDouble()) - 180d;
+ points.add(lat + "," + lng);
+ // Update expected doc counts for all resolutions..
+ for (int precision = highestPrecisionGeohash; precision > 0; precision--) {
+ final String geoHash = GeoHashUtils.encode(lat, lng, precision);
+ geoHashes.add(geoHash);
+ }
+ }
+ cities.add(indexCity("multi_valued_idx", Integer.toString(i), points));
+ for (String hash : geoHashes) {
+ multiValuedExpectedDocCountsForGeoHash.put(hash, multiValuedExpectedDocCountsForGeoHash.getOrDefault(hash, 0) + 1);
+ }
+ }
+ indexRandom(true, cities);
+
+ ensureSearchable();
+ }
+
+
+ @Test
+ public void simple() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ List<Bucket> buckets = geoGrid.getBuckets();
+ Object[] propertiesKeys = (Object[]) geoGrid.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) geoGrid.getProperty("_count");
+ for (int i = 0; i < buckets.size(); i++) {
+ GeoHashGrid.Bucket cell = buckets.get(i);
+ String geohash = cell.getKeyAsString();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ GeoPoint geoPoint = (GeoPoint) propertiesKeys[i];
+ assertThat(GeoHashUtils.encode(geoPoint.lat(), geoPoint.lon(), precision), equalTo(geohash));
+ assertThat((long) propertiesDocCounts[i], equalTo(bucketCount));
+ }
+ }
+ }
+
+ @Test
+ public void multivalued() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("multi_valued_idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKeyAsString();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = multiValuedExpectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ public void filtered() throws Exception {
+ GeoBoundingBoxQueryBuilder bbox = new GeoBoundingBoxQueryBuilder("location");
+ bbox.topLeft(smallestGeoHash).bottomRight(smallestGeoHash).queryName("bbox");
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ AggregationBuilders.filter("filtered").filter(bbox)
+ .subAggregation(
+ geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("filtered");
+
+ GeoHashGrid geoGrid = filter.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKeyAsString();
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertTrue("Buckets must be filtered", geohash.startsWith(smallestGeoHash));
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+
+ }
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ assertThat(geoGrid.getBuckets().size(), equalTo(0));
+ }
+
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKeyAsString();
+
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = expectedDocCountsForGeoHash.get(geohash);
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ public void testTopMatch() throws Exception {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .size(1)
+ .shardSize(100)
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ //Check we only have one bucket with the best match for that resolution
+ assertThat(geoGrid.getBuckets().size(), equalTo(1));
+ for (GeoHashGrid.Bucket cell : geoGrid.getBuckets()) {
+ String geohash = cell.getKeyAsString();
+ long bucketCount = cell.getDocCount();
+ int expectedBucketCount = 0;
+ for (ObjectIntCursor<String> cursor : expectedDocCountsForGeoHash) {
+ if (cursor.key.length() == precision) {
+ expectedBucketCount = Math.max(expectedBucketCount, cursor.value);
+ }
+ }
+ assertNotSame(bucketCount, 0);
+ assertEquals("Geohash " + geohash + " has wrong doc count ",
+ expectedBucketCount, bucketCount);
+ }
+ }
+ }
+
+ @Test
+ // making sure this doesn't runs into an OOME
+ public void sizeIsZero() {
+ for (int precision = 1; precision <= highestPrecisionGeohash; precision++) {
+ final int size = randomBoolean() ? 0 : randomIntBetween(1, Integer.MAX_VALUE);
+ final int shardSize = randomBoolean() ? -1 : 0;
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geohashGrid("geohashgrid")
+ .field("location")
+ .size(size)
+ .shardSize(shardSize)
+ .precision(precision)
+ )
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+ GeoHashGrid geoGrid = response.getAggregations().get("geohashgrid");
+ assertThat(geoGrid.getBuckets().size(), greaterThanOrEqualTo(1));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java
new file mode 100644
index 0000000000..a3b5a985e3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GlobalTests.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class GlobalTests extends ElasticsearchIntegrationTest {
+
+ static int numDocs;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx2");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ numDocs = randomIntBetween(3, 20);
+ for (int i = 0; i < numDocs / 2; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag1")
+ .endObject()));
+ }
+ for (int i = numDocs / 2; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .field("tag", "tag2")
+ .field("name", "name" + i+1)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void withStatsSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.termQuery("tag", "tag1"))
+ .addAggregation(global("global")
+ .subAggregation(stats("value_stats").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Global global = response.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo((long) numDocs));
+ assertThat((long) global.getProperty("_count"), equalTo((long) numDocs));
+ assertThat(global.getAggregations().asList().isEmpty(), is(false));
+
+ Stats stats = global.getAggregations().get("value_stats");
+ assertThat((Stats) global.getProperty("value_stats"), sameInstance(stats));
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("value_stats"));
+ long sum = 0;
+ for (int i = 0; i < numDocs; ++i) {
+ sum += i + 1;
+ }
+ assertThat(stats.getAvg(), equalTo((double) sum / numDocs));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo((double) numDocs));
+ assertThat(stats.getCount(), equalTo((long) numDocs));
+ assertThat(stats.getSum(), equalTo((double) sum));
+ }
+
+ @Test
+ public void nonTopLevel() throws Exception {
+
+ try {
+
+ client().prepareSearch("idx")
+ .setQuery(QueryBuilders.termQuery("tag", "tag1"))
+ .addAggregation(global("global")
+ .subAggregation(global("inner_global")))
+ .execute().actionGet();
+
+ fail("expected to fail executing non-top-level global aggregator. global aggregations are only allowed as top level" +
+ "aggregations");
+
+ } catch (ElasticsearchException ese) {
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java
new file mode 100644
index 0000000000..2d1012f2b5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java
@@ -0,0 +1,1272 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongHashSet;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class HistogramTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ static int numDocs;
+ static int interval;
+ static int numValueBuckets, numValuesBuckets;
+ static long[] valueCounts, valuesCounts;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ numValueBuckets = numDocs / interval + 1;
+ valueCounts = new long[numValueBuckets];
+ for (int i = 0; i < numDocs; i++) {
+ final int bucket = (i + 1) / interval;
+ valueCounts[bucket]++;
+ }
+
+ numValuesBuckets = (numDocs + 1) / interval + 1;
+ valuesCounts = new long[numValuesBuckets];
+ for (int i = 0; i < numDocs; i++) {
+ final int bucket1 = (i + 1) / interval;
+ final int bucket2 = (i + 2) / interval;
+ valuesCounts[bucket1]++;
+ if (bucket1 != bucket2) {
+ valuesCounts[bucket2]++;
+ }
+ }
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i + 1)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i + 1).value(i + 2).endArray()
+ .field("tag", "tag" + i)
+ .endObject()));
+ }
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ public void singleValuedField_withOffset() throws Exception {
+ int interval1 = 10;
+ int offset = 5;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval1).offset(offset))
+ .execute().actionGet();
+
+ // from setup we have between 6 and 20 documents, each with value 1 in test field
+ int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1;
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets));
+
+ // first bucket should start at -5, contain 4 documents
+ Histogram.Bucket bucket = histo.getBuckets().get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo(-5L));
+ assertThat(bucket.getDocCount(), equalTo(4L));
+
+ // last bucket should have (numDocs % interval + 1) docs
+ bucket = histo.getBuckets().get(0);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo(numDocs%interval1 + 5L));
+ assertThat(bucket.getDocCount(), equalTo((numDocs % interval) + 1L));
+ }
+
+ /**
+ * Shift buckets by random offset between [2..interval]. From setup we have 1 doc per values from 1..numdocs.
+ * Special care needs to be taken for expecations on counts in first and last bucket.
+ */
+ @Test
+ public void singleValuedField_withRandomOffset() throws Exception {
+ int offset = randomIntBetween(2, interval);
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).offset(offset))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ // shifting by offset>2 creates new extra bucket [0,offset-1]
+ // if offset is >= number of values in original last bucket, that effect is canceled
+ int expectedNumberOfBuckets = (offset >= (numDocs % interval + 1)) ? numValueBuckets : numValueBuckets + 1;
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(expectedNumberOfBuckets));
+
+ int docsCounted = 0;
+ for (int i = 0; i < expectedNumberOfBuckets; ++i) {
+ Histogram.Bucket bucket = histo.getBuckets().get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) ((i-1) * interval + offset)));
+ if (i==0) {
+ // first bucket
+ long expectedFirstBucketCount = offset-1;
+ assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount));
+ docsCounted += expectedFirstBucketCount;
+ } else if(i<expectedNumberOfBuckets-1) {
+ assertThat(bucket.getDocCount(), equalTo((long) interval));
+ docsCounted += interval;
+ } else {
+ assertThat(bucket.getDocCount(), equalTo((long) numDocs - docsCounted));
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(numValueBuckets - i - 1);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_ASC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet buckets = new LongHashSet();
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> histoBuckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ long previousCount = Long.MIN_VALUE;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histoBuckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertEquals(0, key % interval);
+ assertTrue(buckets.add(key));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
+ assertThat(bucket.getDocCount(), greaterThanOrEqualTo(previousCount));
+ previousCount = bucket.getDocCount();
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.COUNT_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet buckets = new LongHashSet();
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> histoBuckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ long previousCount = Long.MAX_VALUE;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = histoBuckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertEquals(0, key % interval);
+ assertTrue(buckets.add(key));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[(int) (key / interval)]));
+ assertThat(bucket.getDocCount(), lessThanOrEqualTo(previousCount));
+ previousCount = bucket.getDocCount();
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+ Object[] propertiesKeys = (Object[]) histo.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) histo.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) histo.getProperty("sum.value");
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat((long) propertiesKeys[i], equalTo((long) i * interval));
+ assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i]));
+ assertThat((double) propertiesCounts[i], equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("sum", true))
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet visited = new LongHashSet();
+ double previousSum = Double.NEGATIVE_INFINITY;
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat(sum.getValue(), greaterThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("sum", false))
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet visited = new LongHashSet();
+ double previousSum = Double.POSITIVE_INFINITY;
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ assertThat(sum.getValue(), lessThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationAsc_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("stats.sum", true))
+ .subAggregation(stats("stats")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet visited = new LongHashSet();
+ double previousSum = Double.NEGATIVE_INFINITY;
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(stats.getSum(), equalTo((double) s));
+ assertThat(stats.getSum(), greaterThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregationDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("stats.sum", false))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet visited = new LongHashSet();
+ double previousSum = Double.POSITIVE_INFINITY;
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == b) {
+ s += j + 1;
+ }
+ }
+ assertThat(stats.getSum(), equalTo((double) s));
+ assertThat(stats.getSum(), lessThanOrEqualTo(previousSum));
+ previousSum = s;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationDesc_DeepOrderPath() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.aggregation("filter>max", asc))
+ .subAggregation(filter("filter").filter(matchAllQuery())
+ .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ LongHashSet visited = new LongHashSet();
+ double prevMax = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ long key = ((Number) bucket.getKey()).longValue();
+ assertTrue(visited.add(key));
+ int b = (int) (key / interval);
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[b]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Filter filter = bucket.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(filter.getDocCount()));
+ Max max = filter.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat(max.getValue(), asc ? greaterThanOrEqualTo(prevMax) : lessThanOrEqualTo(prevMax));
+ prevMax = max.getValue();
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).script(new Script("_value + 1")).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 1) / interval + 1];
+ for (int i = 0; i < numDocs; ++i) {
+ ++counts[(i + 2) / interval];
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets));
+
+ for (int i = 0; i < numBuckets; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ int key = ((2 / interval) + i) * interval;
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key));
+ assertThat(bucket.getDocCount(), equalTo(counts[key / interval]));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_OrderedByKeyDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).interval(interval).order(Histogram.Order.KEY_DESC))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValuesBuckets));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(numValuesBuckets - i - 1);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script(new Script("_value + 1")).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets));
+
+ for (int i = 0; i < numBuckets; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ int key = ((2 / interval) + i) * interval;
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key));
+ assertThat(bucket.getDocCount(), equalTo(counts[key / interval]));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(
+ histogram("histo")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .script(new Script("_value + 1"))
+ .interval(interval)
+ .subAggregation(
+ terms(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values())).order(
+ Terms.Order.term(true)))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets));
+
+ for (int i = 0; i < numBuckets; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ int key = ((2 / interval) + i) * interval;
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key));
+ assertThat(bucket.getDocCount(), equalTo(counts[key / interval]));
+ Terms terms = bucket.getAggregations().get(MULTI_VALUED_FIELD_NAME);
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo(MULTI_VALUED_FIELD_NAME));
+ int minTerm = Math.max(2, key - 1);
+ int maxTerm = Math.min(numDocs + 2, (key / interval + 1) * interval);
+ assertThat(terms.getBuckets().size(), equalTo(maxTerm - minTerm + 1));
+ Iterator<Terms.Bucket> iter = terms.getBuckets().iterator();
+ for (int j = minTerm; j <= maxTerm; ++j) {
+ assertThat(iter.next().getKeyAsNumber().longValue(), equalTo((long) j));
+ }
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ histogram("histo").script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).interval(interval)
+ .subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']")).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ histogram("histo").script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']")).interval(interval)
+ .subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i || (j + 2) / interval == i) {
+ s += j + 1;
+ s += j + 2;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(histogram("sub_histo").interval(1l)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ List<? extends Bucket> buckets = histo.getBuckets();
+ Histogram.Bucket bucket = buckets.get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ histo = bucket.getAggregations().get("sub_histo");
+ assertThat(histo, Matchers.notNullValue());
+ assertThat(histo.getName(), equalTo("sub_histo"));
+ assertThat(histo.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_WithExtendedBounds() throws Exception {
+ int lastDataBucketKey = (numValueBuckets - 1) * interval;
+
+ // randomizing the number of buckets on the min bound
+ // (can sometimes fall within the data range, but more frequently will fall before the data range)
+ int addedBucketsLeft = randomIntBetween(0, numValueBuckets);
+ long boundsMinKey = addedBucketsLeft * interval;
+ if (frequently()) {
+ boundsMinKey = -boundsMinKey;
+ } else {
+ addedBucketsLeft = 0;
+ }
+ long boundsMin = boundsMinKey + randomIntBetween(0, interval - 1);
+
+ // randomizing the number of buckets on the max bound
+ // (can sometimes fall within the data range, but more frequently will fall after the data range)
+ int addedBucketsRight = randomIntBetween(0, numValueBuckets);
+ long boundsMaxKeyDelta = addedBucketsRight * interval;
+ if (rarely()) {
+ addedBucketsRight = 0;
+ boundsMaxKeyDelta = -boundsMaxKeyDelta;
+ }
+ long boundsMaxKey = lastDataBucketKey + boundsMaxKeyDelta;
+ long boundsMax = boundsMaxKey + randomIntBetween(0, interval - 1);
+
+
+ // it could be that the random bounds.min we chose ended up greater than bounds.max - this should cause an
+ // error
+ boolean invalidBoundsError = boundsMin > boundsMax;
+
+ // constructing the newly expected bucket list
+ int bucketsCount = numValueBuckets + addedBucketsLeft + addedBucketsRight;
+ long[] extendedValueCounts = new long[bucketsCount];
+ System.arraycopy(valueCounts, 0, extendedValueCounts, addedBucketsLeft, valueCounts.length);
+
+ SearchResponse response = null;
+ try {
+ response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .interval(interval)
+ .minDocCount(0)
+ .extendedBounds(boundsMin, boundsMax))
+ .execute().actionGet();
+
+ if (invalidBoundsError) {
+ fail("Expected an exception to be thrown when bounds.min is greater than bounds.max");
+ return;
+ }
+
+ } catch (Exception e) {
+ if (invalidBoundsError) {
+ // expected
+ return;
+ } else {
+ throw e;
+ }
+ }
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(bucketsCount));
+
+ long key = Math.min(boundsMinKey, 0);
+ for (int i = 0; i < bucketsCount; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i]));
+ key += interval;
+ }
+ }
+
+ /**
+ * see issue #9634, negative interval in histogram should raise exception
+ */
+ public void testExeptionOnNegativerInterval() {
+ try {
+ client().prepareSearch("empty_bucket_idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).execute().actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("Missing required field [interval]"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).script("_value + 1").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 1) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 1) / interval + 1];
+ for (int i = 0; i < numDocs; ++i) {
+ ++counts[(i + 2) / interval];
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets));
+
+ for (int i = 0; i < numBuckets; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ int key = ((2 / interval) + i) * interval;
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key));
+ assertThat(bucket.getDocCount(), equalTo(counts[key / interval]));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets));
+
+ for (int i = 0; i < numBuckets; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ int key = ((2 / interval) + i) * interval;
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key));
+ assertThat(bucket.getDocCount(), equalTo(counts[key / interval]));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(histogram("histo").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").interval(interval)
+ .subAggregation(terms(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ final int numBuckets = (numDocs + 2) / interval - 2 / interval + 1;
+ final long[] counts = new long[(numDocs + 2) / interval + 1];
+ for (int i = 0; i < numDocs; ++i) {
+ final int bucket1 = (i + 2) / interval;
+ final int bucket2 = (i + 3) / interval;
+ ++counts[bucket1];
+ if (bucket1 != bucket2) {
+ ++counts[bucket2];
+ }
+ }
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets));
+
+ for (int i = 0; i < numBuckets; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ int key = ((2 / interval) + i) * interval;
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) key));
+ assertThat(bucket.getDocCount(), equalTo(counts[key / interval]));
+ Terms terms = bucket.getAggregations().get(MULTI_VALUED_FIELD_NAME);
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo(MULTI_VALUED_FIELD_NAME));
+ int minTerm = Math.max(2, key - 1);
+ int maxTerm = Math.min(numDocs + 2, (key / interval + 1) * interval);
+ assertThat(terms.getBuckets().size(), equalTo(maxTerm - minTerm + 1));
+ Iterator<Terms.Bucket> iter = terms.getBuckets().iterator();
+ for (int j = minTerm; j <= maxTerm; ++j) {
+ assertThat(iter.next().getKeyAsNumber().longValue(), equalTo((long) j));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_WithSubAggregator_InheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ assertThat(histo.getBuckets().size(), equalTo(numValueBuckets));
+
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Histogram.Bucket> buckets = new ArrayList<Histogram.Bucket>(histo.getBuckets());
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i) {
+ s += j + 1;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + MULTI_VALUED_FIELD_NAME + "']").interval(interval))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInheritedOldScriptAPi() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").script("doc['" + MULTI_VALUED_FIELD_NAME + "']").interval(interval)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValuesBuckets));
+
+ for (int i = 0; i < numValuesBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valuesCounts[i]));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long s = 0;
+ for (int j = 0; j < numDocs; ++j) {
+ if ((j + 1) / interval == i || (j + 2) / interval == i) {
+ s += j + 1;
+ s += j + 2;
+ }
+ }
+ assertThat(sum.getValue(), equalTo((double) s));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java
new file mode 100644
index 0000000000..59674eac1c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/IPv4RangeTests.java
@@ -0,0 +1,1228 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.mapper.ip.IpFieldMapper;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.ipRange;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class IPv4RangeTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ {
+ assertAcked(prepareCreate("idx")
+ .addMapping("type", "ip", "type=ip", "ips", "type=ip"));
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[255]; // TODO randomize the size?
+ // TODO randomize the values in the docs?
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("ip", "10.0.0." + (i))
+ .startArray("ips").value("10.0.0." + i).value("10.0.0." + (i + 1)).endArray()
+ .field("value", (i < 100 ? 1 : i < 200 ? 2 : 3)) // 100 1's, 100 2's, and 55 3's
+ .endObject());
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ }
+ {
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "ip", "type=ip"));
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i * 2)
+ .field("ip", "10.0.0.5")
+ .endObject()));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ }
+ ensureSearchable();
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) (String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) ((Number) bucket.getFrom())).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) ((Number) bucket.getTo())).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) (String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) ((Number) bucket.getFrom())).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) ((Number) bucket.getTo())).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) (String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void singleValueField_WithMaskRange() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addMaskRange("10.0.0.0/25")
+ .addMaskRange("10.0.0.128/25"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(2));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.0/25"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.0")));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.0"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.128")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.128"));
+ assertThat(bucket.getDocCount(), equalTo(128l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.128/25"));
+ assertThat((long) ((Number) bucket.getFrom()).doubleValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.128")));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.128"));
+ assertThat((long) ((Number) bucket.getTo()).doubleValue(), equalTo(IpFieldMapper.ipToLong("10.0.1.0"))); // range is exclusive on the to side
+ assertThat(bucket.getToAsString(), equalTo("10.0.1.0"));
+ assertThat(bucket.getDocCount(), equalTo(127l)); // include 10.0.0.128
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("r1", "10.0.0.100")
+ .addRange("r2", "10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("r3", "10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+ Object[] propertiesKeys = (Object[]) range.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) range.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) range.getProperty("sum.value");
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 100));
+ assertThat((String) propertiesKeys[0], equalTo("*-10.0.0.100"));
+ assertThat((long) propertiesDocCounts[0], equalTo(100l));
+ assertThat((double) propertiesCounts[0], equalTo((double) 100));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 200));
+ assertThat((String) propertiesKeys[1], equalTo("10.0.0.100-10.0.0.200"));
+ assertThat((long) propertiesDocCounts[1], equalTo(100l));
+ assertThat((double) propertiesCounts[1], equalTo((double) 200));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) 55*3));
+ assertThat((String) propertiesKeys[2], equalTo("10.0.0.200-*"));
+ assertThat((long) propertiesDocCounts[2], equalTo(55l));
+ assertThat((double) propertiesCounts[2], equalTo((double) 55 * 3));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")
+ .subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").field("ip").script(new Script("_value")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ /*
+ [0, 1]
+ [1, 2]
+ [2, 3]
+ ...
+ [99, 100]
+ [100, 101]
+ [101, 102]
+ ...
+ [199, 200]
+ [200, 201]
+ [201, 202]
+ ...
+ [254, 255]
+ [255, 256]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(ipRange("range")
+ .field("ips")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").field("ips").script(new Script("_value")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").field("ips").script(new Script("_value")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200").subAggregation(max("max"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script(new Script("doc['ip'].value")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script(new Script("doc['ip'].value")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200").subAggregation(max("max"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script(new Script("doc['ips'].values")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script(new Script("doc['ips'].values")).addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200").addUnboundedFrom("10.0.0.200").subAggregation(max("max"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(ipRange("range")
+ .field("ip")
+ .addUnboundedTo("10.0.0.100")
+ .addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(ipRange("ip_range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Range range = bucket.getAggregations().get("ip_range");
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(range.getBuckets());
+ assertThat(range, Matchers.notNullValue());
+ assertThat(range.getName(), equalTo("ip_range"));
+ assertThat(buckets.size(), is(1));
+ assertThat((String) buckets.get(0).getKey(), equalTo("r1"));
+ assertThat(buckets.get(0).getFromAsString(), equalTo("10.0.0.1"));
+ assertThat(buckets.get(0).getToAsString(), equalTo("10.0.0.10"));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").field("ip").script("_value").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").field("ips").script("_value").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").field("ips").script("_value").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200").subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script("doc['ip'].value").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_WithSubAggregator_InheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script("doc['ip'].value").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200").subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.99")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.199")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(55l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.254")));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script("doc['ips'].values").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ ipRange("range").script("doc['ips'].values").addUnboundedTo("10.0.0.100").addRange("10.0.0.100", "10.0.0.200")
+ .addUnboundedFrom("10.0.0.200").subAggregation(max("max"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getDocCount(), equalTo(100l));
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.100")));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.100-10.0.0.200"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.100"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.100")));
+ assertThat(bucket.getToAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(bucket.getDocCount(), equalTo(101l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.200")));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("10.0.0.200-*"));
+ assertThat(bucket.getFromAsString(), equalTo("10.0.0.200"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo((double) IpFieldMapper.ipToLong("10.0.0.200")));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(56l));
+ max = bucket.getAggregations().get("max");
+ assertThat(max, Matchers.notNullValue());
+ assertThat((long) max.getValue(), equalTo(IpFieldMapper.ipToLong("10.0.0.255")));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java
new file mode 100644
index 0000000000..b825259ee3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsTests.java
@@ -0,0 +1,1527 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class LongTermsTests extends AbstractTermsTests {
+
+ private static final int NUM_DOCS = 5; // TODO randomize the size?
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+ private static HashMap<Long, Map<String, Object>> expectedMultiSortBuckets;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ IndexRequestBuilder[] lowCardBuilders = new IndexRequestBuilder[NUM_DOCS];
+ for (int i = 0; i < lowCardBuilders.length; i++) {
+ lowCardBuilders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray()
+ .field("num_tag", i < lowCardBuilders.length / 2 + 1 ? 1 : 0) // used to test order by single-bucket sub agg
+ .endObject());
+ }
+ indexRandom(randomBoolean(), lowCardBuilders);
+ IndexRequestBuilder[] highCardBuilders = new IndexRequestBuilder[100]; // TODO randomize the size?
+ for (int i = 0; i < highCardBuilders.length; i++) {
+ highCardBuilders[i] = client().prepareIndex("idx", "high_card_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i).value(i + 1).endArray()
+ .endObject());
+
+ }
+ indexRandom(true, highCardBuilders);
+ createIndex("idx_unmapped");
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2)
+ .endObject()));
+ }
+
+ getMultiSortDocs(builders);
+
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+ ensureSearchable();
+ }
+
+ private void getMultiSortDocs(List<IndexRequestBuilder> builders) throws IOException {
+ expectedMultiSortBuckets = new HashMap<>();
+ Map<String, Object> bucketProps = new HashMap<>();
+ bucketProps.put("_term", 1l);
+ bucketProps.put("_count", 3l);
+ bucketProps.put("avg_l", 1d);
+ bucketProps.put("sum_d", 6d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 2l);
+ bucketProps.put("_count", 3l);
+ bucketProps.put("avg_l", 2d);
+ bucketProps.put("sum_d", 6d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 3l);
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 3d);
+ bucketProps.put("sum_d", 3d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 4l);
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 3d);
+ bucketProps.put("sum_d", 4d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 5l);
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 3d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 6l);
+ bucketProps.put("_count", 1l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 1d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", 7l);
+ bucketProps.put("_count", 1l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 1d);
+ expectedMultiSortBuckets.put((Long) bucketProps.get("_term"), bucketProps);
+
+ createIndex("sort_idx");
+ for (int i = 1; i <= 3; i++) {
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 1)
+ .field("l", 1)
+ .field("d", i)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 2)
+ .field("l", 2)
+ .field("d", i)
+ .endObject()));
+ }
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 3)
+ .field("l", 3)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 3)
+ .field("l", 3)
+ .field("d", 2)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 4)
+ .field("l", 3)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 4)
+ .field("l", 3)
+ .field("d", 3)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 5)
+ .field("l", 5)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 5)
+ .field("l", 5)
+ .field("d", 2)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 6)
+ .field("l", 5)
+ .field("d", 1)
+ .endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, 7)
+ .field("l", 5)
+ .field("d", 1)
+ .endObject()));
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return bucket.getKeyAsString();
+ }
+
+ @Test
+ // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard
+ public void sizeIsZero() {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .minDocCount(randomInt(1))
+ .size(0))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(100));
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueFieldWithFiltering() throws Exception {
+ long includes[] = { 1, 2, 3, 98 };
+ long excludes[] = { -1, 2, 4 };
+ long empty[] = {};
+ testIncludeExcludeResults(includes, empty, new long[] { 1, 2, 3 });
+ testIncludeExcludeResults(includes, excludes, new long[] { 1, 3 });
+ testIncludeExcludeResults(empty, excludes, new long[] { 0, 1, 3 });
+ }
+
+ private void testIncludeExcludeResults(long[] includes, long[] excludes, long[] expecteds) {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .include(includes)
+ .exclude(excludes)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(expecteds.length));
+
+ for (int i = 0; i < expecteds.length; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + expecteds[i]);
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("high_card_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(20)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.term(true)))
+ .execute().actionGet();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.term(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sum").field(MULTI_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+ Object[] propertiesKeys = (Object[]) terms.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) terms.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) terms.getProperty("sum.value");
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat((long) sum.getValue(), equalTo(i+i+1l));
+ assertThat((long) propertiesKeys[i], equalTo((long) i));
+ assertThat((long) propertiesDocCounts[i], equalTo(1l));
+ assertThat((double) propertiesCounts[i], equalTo((double) i + i + 1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("_value + 1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("_value - 1")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i - 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i-1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i-1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("floor(_value / 1000 + 1)")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+
+ 1 - count: 1 - sum: 1
+ 2 - count: 2 - sum: 4
+ 3 - count: 2 - sum: 6
+ 4 - count: 2 - sum: 8
+ 5 - count: 2 - sum: 10
+ 6 - count: 1 - sum: 6
+
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("_value + 1"))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i+1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i+1));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j + 1;
+ s += j+1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+.script(
+ new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+.script(
+ new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitType() throws Exception {
+
+ // since no type ie explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitType() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))
+ .valueType(Terms.ValueType.LONG)
+ .subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j+1) {
+ s += j;
+ s += j+1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .size(randomInt(5))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscWithTermsSubAgg() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME)).subAggregation(terms("subTerms").field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+
+ Terms subTermsAgg = bucket.getAggregations().get("subTerms");
+ assertThat(subTermsAgg, notNullValue());
+ assertThat(subTermsAgg.getBuckets().size(), equalTo(2));
+ int j = i;
+ for (Terms.Bucket subBucket : subTermsAgg.getBuckets()) {
+ assertThat(subBucket, notNullValue());
+ assertThat(key(subBucket), equalTo(String.valueOf(j)));
+ assertThat(subBucket.getDocCount(), equalTo(1l));
+ j++;
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleBucketSubAggregationAsc() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("num_tags")
+ .field("num_tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter", asc))
+ .subAggregation(filter("filter").filter(QueryBuilders.matchAllQuery()))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("num_tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("num_tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "0" : "1"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ Filter filter = tag.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo(asc ? 2l : 3l));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "1" : "0"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ filter = tag.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo(asc ? 3l : 2l));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc_MultiHierarchyLevels() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("tags")
+ .field("num_tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter1>filter2>max", asc))
+ .subAggregation(filter("filter1").filter(QueryBuilders.matchAllQuery())
+ .subAggregation(filter("filter2").filter(QueryBuilders.matchAllQuery())
+ .subAggregation(max("max").field(SINGLE_VALUED_FIELD_NAME))))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ // the max for "1" is 2
+ // the max for "0" is 4
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "1" : "0"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter2 = filter1.getAggregations().get("filter2");
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 3l : 2l));
+ Max max = filter2.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(asc ? 2.0 : 4.0));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "0" : "1"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter2 = filter1.getAggregations().get("filter2");
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 2l : 3l));
+ max = filter2.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(asc ? 4.0 : 2.0));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index).setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", true))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsOrMultiBucketSubAggregation() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index).setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("num_tags", true))
+ .subAggregation(terms("num_tags").field("num_tags")
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index).setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.foo", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "with an unknown specified metric to order by");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index).setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats", true))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation " +
+ "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 4; i >= 0; i--) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.variance", asc))
+ .subAggregation(extendedStats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception {
+ long[] expectedKeys = new long[] { 1, 2, 4, 3, 7, 6, 5 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception {
+ long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception {
+ long[] expectedKeys = new long[] { 5, 6, 7, 3, 4, 2, 1 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception {
+ long[] expectedKeys = new long[] { 6, 7, 3, 4, 5, 1, 2 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception {
+ long[] expectedKeys = new long[] { 6, 7, 3, 5, 4, 1, 2 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByThreeCriteria() throws Exception {
+ long[] expectedKeys = new long[] { 2, 1, 4, 5, 3, 6, 7 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.count(false), Terms.Order.aggregation("sum_d", false), Terms.Order.aggregation("avg_l", false));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAsCompound() throws Exception {
+ long[] expectedKeys = new long[] { 1, 2, 3, 4, 5, 6, 7 };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true));
+ }
+
+ private void assertMultiSortResponse(long[] expectedKeys, Terms.Order... order) {
+ SearchResponse response = client().prepareSearch("sort_idx").setTypes("multi_sort_type")
+ .addAggregation(terms("terms")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.compound(order))
+ .subAggregation(avg("avg_l").field("l"))
+ .subAggregation(sum("sum_d").field("d"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo(String.valueOf(expectedKeys[i])));
+ assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count")));
+ Avg avg = bucket.getAggregations().get("avg_l");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l")));
+ Sum sum = bucket.getAggregations().get("sum_d");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d")));
+ i++;
+ }
+ }
+
+ @Test
+ public void otherDocCount() {
+ testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("_value + 1")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i + 1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("_value - 1")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i - 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i - 1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i - 1));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_NotUniqueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("floor(_value / 1000 + 1)")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("1.0");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("1.0"));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(1));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+ *
+ * [1, 2] [2, 3] [3, 4] [4, 5] [5, 6]
+ *
+ * 1 - count: 1 - sum: 1 2 - count: 2 - sum: 4 3 - count: 2 - sum: 6 4 -
+ * count: 2 - sum: 8 5 - count: 2 - sum: 10 6 - count: 1 - sum: 6
+ */
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("_value + 1").subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + (i + 1d));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + (i + 1d)));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i + 1));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j + 1) {
+ s += j + 1;
+ s += j + 1 + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ "doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).script(
+ "doc['" + MULTI_VALUED_FIELD_NAME + "']")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_NoExplicitTypeOldScriptAPI() throws Exception {
+
+ // since no type ie explicitly defined, es will assume all values returned by the script to be strings (bytes),
+ // so the aggregation should fail, since the "sum" aggregation can only operation on numeric values.
+
+ try {
+
+ client().prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "']").subAggregation(sum("sum"))).execute().actionGet();
+
+ fail("expected to fail as sub-aggregation sum requires a numeric value source context, but there is none");
+
+ } catch (Exception e) {
+ // expected
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInherited_WithExplicitTypeOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values()))
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "']").valueType(Terms.ValueType.LONG)
+ .subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("" + i));
+ assertThat(bucket.getKeyAsNumber().intValue(), equalTo(i));
+ final long count = i == 0 || i == 5 ? 1 : 2;
+ double s = 0;
+ for (int j = 0; j < NUM_DOCS; ++j) {
+ if (i == j || i == j + 1) {
+ s += j;
+ s += j + 1;
+ }
+ }
+ assertThat(bucket.getDocCount(), equalTo(count));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(s));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java
new file mode 100644
index 0000000000..394cc57511
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountTests.java
@@ -0,0 +1,390 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.carrotsearch.hppc.LongHashSet;
+import com.carrotsearch.hppc.LongSet;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormat;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class MinDocCountTests extends AbstractTermsTests {
+
+ private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true);
+
+ private static int cardinality;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+
+ cardinality = randomIntBetween(8, 30);
+ final List<IndexRequestBuilder> indexRequests = new ArrayList<>();
+ final Set<String> stringTerms = new HashSet<>();
+ final LongSet longTerms = new LongHashSet();
+ final Set<String> dateTerms = new HashSet<>();
+ for (int i = 0; i < cardinality; ++i) {
+ String stringTerm;
+ do {
+ stringTerm = RandomStrings.randomAsciiOfLength(getRandom(), 8);
+ } while (!stringTerms.add(stringTerm));
+ long longTerm;
+ do {
+ longTerm = randomInt(cardinality * 2);
+ } while (!longTerms.add(longTerm));
+ double doubleTerm = longTerm * Math.PI;
+ String dateTerm = DateTimeFormat.forPattern("yyyy-MM-dd").print(new DateTime(2014, 1, ((int) longTerm % 20) + 1, 0, 0));
+ final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20);
+ for (int j = 0; j < frequency; ++j) {
+ indexRequests.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("s", stringTerm)
+ .field("l", longTerm)
+ .field("d", doubleTerm)
+ .field("date", dateTerm)
+ .field("match", randomBoolean())
+ .endObject()));
+ }
+ }
+ cardinality = stringTerms.size();
+
+ indexRandom(true, indexRequests);
+ ensureSearchable();
+ }
+
+ private enum Script {
+ NO {
+ @Override
+ TermsBuilder apply(TermsBuilder builder, String field) {
+ return builder.field(field);
+ }
+ },
+ YES {
+ @Override
+ TermsBuilder apply(TermsBuilder builder, String field) {
+ return builder.script(new org.elasticsearch.script.Script("doc['" + field + "'].values"));
+ }
+ };
+ abstract TermsBuilder apply(TermsBuilder builder, String field);
+ }
+
+ // check that terms2 is a subset of terms1
+ private void assertSubset(Terms terms1, Terms terms2, long minDocCount, int size, String include) {
+ final Matcher matcher = include == null ? null : Pattern.compile(include).matcher("");;
+ final Iterator<Terms.Bucket> it1 = terms1.getBuckets().iterator();
+ final Iterator<Terms.Bucket> it2 = terms2.getBuckets().iterator();
+ int size2 = 0;
+ while (it1.hasNext()) {
+ final Terms.Bucket bucket1 = it1.next();
+ if (bucket1.getDocCount() >= minDocCount && (matcher == null || matcher.reset(bucket1.getKeyAsString()).matches())) {
+ if (size2++ == size) {
+ break;
+ }
+ assertTrue(it2.hasNext());
+ final Terms.Bucket bucket2 = it2.next();
+ assertEquals(bucket1.getDocCount(), bucket2.getDocCount());
+ }
+ }
+ assertFalse(it2.hasNext());
+ }
+
+ private void assertSubset(Histogram histo1, Histogram histo2, long minDocCount) {
+ final Iterator<? extends Histogram.Bucket> it2 = histo2.getBuckets().iterator();
+ for (Histogram.Bucket b1 : histo1.getBuckets()) {
+ if (b1.getDocCount() >= minDocCount) {
+ final Histogram.Bucket b2 = it2.next();
+ assertEquals(b1.getKey(), b2.getKey());
+ assertEquals(b1.getDocCount(), b2.getDocCount());
+ }
+ }
+ }
+
+ public void testStringTermAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testStringScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testStringTermDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testStringScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testStringCountAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testStringScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testStringCountDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testStringScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false));
+ }
+
+ public void testStringCountAscWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*", true);
+ }
+
+ public void testStringScriptCountAscWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*", true);
+ }
+
+ public void testStringCountDescWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*", true);
+ }
+
+ public void testStringScriptCountDescWithInclude() throws Exception {
+ testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*", true);
+ }
+
+ public void testLongTermAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testLongScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testLongTermDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testLongScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testLongCountAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testLongScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testLongCountDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testLongScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("l", Script.YES, Terms.Order.count(false));
+ }
+
+ public void testDoubleTermAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(true));
+ }
+
+ public void testDoubleScriptTermAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(true));
+ }
+
+ public void testDoubleTermDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.term(false));
+ }
+
+ public void testDoubleScriptTermDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.term(false));
+ }
+
+ public void testDoubleCountAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(true));
+ }
+
+ public void testDoubleScriptCountAsc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(true));
+ }
+
+ public void testDoubleCountDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.NO, Terms.Order.count(false));
+ }
+
+ public void testDoubleScriptCountDesc() throws Exception {
+ testMinDocCountOnTerms("d", Script.YES, Terms.Order.count(false));
+ }
+
+ private void testMinDocCountOnTerms(String field, Script script, Terms.Order order) throws Exception {
+ testMinDocCountOnTerms(field, script, order, null, true);
+ }
+
+ private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include, boolean retryOnFailure) throws Exception {
+ // all terms
+ final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type")
+ .setSize(0)
+ .setQuery(QUERY)
+ .addAggregation(script.apply(terms("terms"), field)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .executionHint(randomExecutionHint())
+ .order(order)
+ .size(cardinality + randomInt(10))
+ .minDocCount(0))
+ .execute().actionGet();
+ assertAllSuccessful(allTermsResponse);
+
+ final Terms allTerms = allTermsResponse.getAggregations().get("terms");
+ assertEquals(cardinality, allTerms.getBuckets().size());
+
+ for (long minDocCount = 0; minDocCount < 20; ++minDocCount) {
+ final int size = randomIntBetween(1, cardinality + 2);
+ final SearchRequest request = client().prepareSearch("idx").setTypes("type")
+ .setSize(0)
+ .setQuery(QUERY)
+ .addAggregation(script.apply(terms("terms"), field)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .executionHint(randomExecutionHint())
+ .order(order)
+ .size(size)
+ .include(include)
+ .shardSize(cardinality + randomInt(10))
+ .minDocCount(minDocCount)).request();
+ final SearchResponse response = client().search(request).get();
+ try {
+ assertAllSuccessful(response);
+ assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
+ } catch (AssertionError ae) {
+ if (!retryOnFailure) {
+ throw ae;
+ }
+ logger.info("test failed. trying to see if it recovers after 1m.", ae);
+ try {
+ Thread.sleep(60000);
+ logger.debug("1m passed. retrying.");
+ testMinDocCountOnTerms(field, script, order, include, false);
+ } catch (Throwable secondFailure) {
+ logger.error("exception on retry (will re-throw the original in a sec)", secondFailure);
+ } finally {
+ throw ae;
+ }
+ }
+ }
+
+ }
+
+ public void testHistogramCountAsc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.COUNT_ASC);
+ }
+
+ public void testHistogramCountDesc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.COUNT_DESC);
+ }
+
+ public void testHistogramKeyAsc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.KEY_ASC);
+ }
+
+ public void testHistogramKeyDesc() throws Exception {
+ testMinDocCountOnHistogram(Histogram.Order.KEY_DESC);
+ }
+
+ public void testDateHistogramCountAsc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.COUNT_ASC);
+ }
+
+ public void testDateHistogramCountDesc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.COUNT_DESC);
+ }
+
+ public void testDateHistogramKeyAsc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.KEY_ASC);
+ }
+
+ public void testDateHistogramKeyDesc() throws Exception {
+ testMinDocCountOnDateHistogram(Histogram.Order.KEY_DESC);
+ }
+
+ private void testMinDocCountOnHistogram(Histogram.Order order) throws Exception {
+ final int interval = randomIntBetween(1, 3);
+ final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type")
+ .setSize(0)
+ .setQuery(QUERY)
+ .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(0))
+ .execute().actionGet();
+
+ final Histogram allHisto = allResponse.getAggregations().get("histo");
+
+ for (long minDocCount = 0; minDocCount < 50; ++minDocCount) {
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSize(0)
+ .setQuery(QUERY)
+ .addAggregation(histogram("histo").field("d").interval(interval).order(order).minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount);
+ }
+
+ }
+
+ private void testMinDocCountOnDateHistogram(Histogram.Order order) throws Exception {
+ final int interval = randomIntBetween(1, 3);
+ final SearchResponse allResponse = client().prepareSearch("idx").setTypes("type")
+ .setSize(0)
+ .setQuery(QUERY)
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).order(order).minDocCount(0))
+ .execute().actionGet();
+
+ final Histogram allHisto = allResponse.getAggregations().get("histo");
+
+ for (long minDocCount = 0; minDocCount < 50; ++minDocCount) {
+ final SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setSize(0)
+ .setQuery(QUERY)
+ .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).order(order).minDocCount(minDocCount))
+ .execute().actionGet();
+ assertSubset(allHisto, (Histogram) response.getAggregations().get("histo"), minDocCount);
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java
new file mode 100644
index 0000000000..b263187920
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MissingTests.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.missing;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class MissingTests extends ElasticsearchIntegrationTest {
+
+ static int numDocs, numDocsMissing, numDocsUnmapped;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ numDocs = randomIntBetween(5, 20);
+ numDocsMissing = randomIntBetween(1, numDocs - 1);
+ for (int i = 0; i < numDocsMissing; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .endObject()));
+ }
+ for (int i = numDocsMissing; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("tag", "tag1")
+ .endObject()));
+ }
+
+ createIndex("unmapped_idx");
+ numDocsUnmapped = randomIntBetween(2, 5);
+ for (int i = 0; i < numDocsUnmapped; i++) {
+ builders.add(client().prepareIndex("unmapped_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i)
+ .endObject()));
+ }
+
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsUnmapped));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(missing("missing_tag").field("tag"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing));
+ }
+
+ @Test
+ public void withSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("missing_tag").field("tag")
+ .subAggregation(avg("avg_value").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+
+ Missing missing = response.getAggregations().get("missing_tag");
+ assertThat(missing, notNullValue());
+ assertThat(missing.getName(), equalTo("missing_tag"));
+ assertThat(missing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat((long) missing.getProperty("_count"), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat(missing.getAggregations().asList().isEmpty(), is(false));
+
+ long sum = 0;
+ for (int i = 0; i < numDocsMissing; ++i) {
+ sum += i;
+ }
+ for (int i = 0; i < numDocsUnmapped; ++i) {
+ sum += i;
+ }
+ Avg avgValue = missing.getAggregations().get("avg_value");
+ assertThat(avgValue, notNullValue());
+ assertThat(avgValue.getName(), equalTo("avg_value"));
+ assertThat(avgValue.getValue(), equalTo((double) sum / (numDocsMissing + numDocsUnmapped)));
+ assertThat((double) missing.getProperty("avg_value.value"), equalTo((double) sum / (numDocsMissing + numDocsUnmapped)));
+ }
+
+ @Test
+ public void withInheritedSubMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx", "unmapped_idx")
+ .addAggregation(missing("top_missing").field("tag")
+ .subAggregation(missing("sub_missing")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Missing topMissing = response.getAggregations().get("top_missing");
+ assertThat(topMissing, notNullValue());
+ assertThat(topMissing.getName(), equalTo("top_missing"));
+ assertThat(topMissing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ assertThat(topMissing.getAggregations().asList().isEmpty(), is(false));
+
+ Missing subMissing = topMissing.getAggregations().get("sub_missing");
+ assertThat(subMissing, notNullValue());
+ assertThat(subMissing.getName(), equalTo("sub_missing"));
+ assertThat(subMissing.getDocCount(), equalTo((long) numDocsMissing + numDocsUnmapped));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(missing("missing")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Missing missing = bucket.getAggregations().get("missing");
+ assertThat(missing, Matchers.notNullValue());
+ assertThat(missing.getName(), equalTo("missing"));
+ assertThat(missing.getDocCount(), is(0l));
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java
new file mode 100644
index 0000000000..a8acf62ac7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NaNSortingTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.util.Comparators;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.MetricsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class NaNSortingTests extends ElasticsearchIntegrationTest {
+
+ private enum SubAggregation {
+ AVG("avg") {
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return avg(name).field("numeric_field");
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((Avg) aggregation).getValue();
+ }
+ },
+ VARIANCE("variance") {
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return extendedStats(name).field("numeric_field");
+ }
+ @Override
+ public String sortKey() {
+ return name + ".variance";
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((ExtendedStats) aggregation).getVariance();
+ }
+ },
+ STD_DEVIATION("std_deviation"){
+ @Override
+ public MetricsAggregationBuilder<?> builder() {
+ return extendedStats(name).field("numeric_field");
+ }
+ @Override
+ public String sortKey() {
+ return name + ".std_deviation";
+ }
+ @Override
+ public double getValue(Aggregation aggregation) {
+ return ((ExtendedStats) aggregation).getStdDeviation();
+ }
+ };
+
+ SubAggregation(String name) {
+ this.name = name;
+ }
+
+ public String name;
+
+ public abstract MetricsAggregationBuilder<?> builder();
+
+ public String sortKey() {
+ return name;
+ }
+
+ public abstract double getValue(Aggregation aggregation);
+ }
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ final int numDocs = randomIntBetween(2, 10);
+ for (int i = 0; i < numDocs; ++i) {
+ final long value = randomInt(5);
+ XContentBuilder source = jsonBuilder().startObject().field("long_value", value).field("double_value", value + 0.05).field("string_value", "str_" + value);
+ if (randomBoolean()) {
+ source.field("numeric_value", randomDouble());
+ }
+ client().prepareIndex("idx", "type").setSource(source.endObject()).execute().actionGet();
+ }
+ refresh();
+ ensureSearchable();
+ }
+
+ private void assertCorrectlySorted(Terms terms, boolean asc, SubAggregation agg) {
+ assertThat(terms, notNullValue());
+ double previousValue = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ Aggregation sub = bucket.getAggregations().get(agg.name);
+ double value = agg.getValue(sub);
+ assertTrue(Comparators.compareDiscardNaN(previousValue, value, asc) <= 0);
+ previousValue = value;
+ }
+ }
+
+ private void assertCorrectlySorted(Histogram histo, boolean asc, SubAggregation agg) {
+ assertThat(histo, notNullValue());
+ double previousValue = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ Aggregation sub = bucket.getAggregations().get(agg.name);
+ double value = agg.getValue(sub);
+ assertTrue(Comparators.compareDiscardNaN(previousValue, value, asc) <= 0);
+ previousValue = value;
+ }
+ }
+
+ public void testTerms(String fieldName) {
+ final boolean asc = randomBoolean();
+ SubAggregation agg = randomFrom(SubAggregation.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field(fieldName).collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(agg.builder()).order(Terms.Order.aggregation(agg.sortKey(), asc)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+ final Terms terms = response.getAggregations().get("terms");
+ assertCorrectlySorted(terms, asc, agg);
+ }
+
+ @Test
+ public void stringTerms() {
+ testTerms("string_value");
+ }
+
+ @Test
+ public void longTerms() {
+ testTerms("long_value");
+ }
+
+ @Test
+ public void doubleTerms() {
+ testTerms("double_value");
+ }
+
+ @Test
+ public void longHistogram() {
+ final boolean asc = randomBoolean();
+ SubAggregation agg = randomFrom(SubAggregation.values());
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo")
+ .field("long_value").interval(randomIntBetween(1, 2)).subAggregation(agg.builder()).order(Histogram.Order.aggregation(agg.sortKey(), asc)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+ final Histogram histo = response.getAggregations().get("histo");
+ assertCorrectlySorted(histo, asc, agg);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java
new file mode 100644
index 0000000000..9b4a5bb78b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java
@@ -0,0 +1,557 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.nested;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class NestedTests extends ElasticsearchIntegrationTest {
+
+ static int numParents;
+ static int[] numChildren;
+ static SubAggCollectionMode aggCollectionMode;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+
+ assertAcked(prepareCreate("idx")
+ .addMapping("type", "nested", "type=nested", "incorrect", "type=object"));
+ ensureGreen("idx");
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ numParents = randomIntBetween(3, 10);
+ numChildren = new int[numParents];
+ aggCollectionMode = randomFrom(SubAggCollectionMode.values());
+ logger.info("AGG COLLECTION MODE: " + aggCollectionMode);
+ int totalChildren = 0;
+ for (int i = 0; i < numParents; ++i) {
+ if (i == numParents - 1 && totalChildren == 0) {
+ // we need at least one child overall
+ numChildren[i] = randomIntBetween(1, 5);
+ } else {
+ numChildren[i] = randomInt(5);
+ }
+ totalChildren += numChildren[i];
+ }
+ assertTrue(totalChildren > 0);
+
+ for (int i = 0; i < numParents; i++) {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .field("value", i + 1)
+ .startArray("nested");
+ for (int j = 0; j < numChildren[i]; ++j) {
+ source = source.startObject().field("value", i + 1 + j).endObject();
+ }
+ source = source.endArray().endObject();
+ builders.add(client().prepareIndex("idx", "type", ""+i+1).setSource(source));
+ }
+
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer", "nested", "type=nested").execute().actionGet();
+ ensureGreen("empty_bucket_idx");
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .startArray("nested")
+ .startObject().field("value", i + 1).endObject()
+ .startObject().field("value", i + 2).endObject()
+ .startObject().field("value", i + 3).endObject()
+ .startObject().field("value", i + 4).endObject()
+ .startObject().field("value", i + 5).endObject()
+ .endArray()
+ .endObject()));
+ }
+
+ assertAcked(prepareCreate("idx_nested_nested_aggs")
+ .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("nested1")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("nested2")
+ .field("type", "nested")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen("idx_nested_nested_aggs");
+
+ builders.add(
+ client().prepareIndex("idx_nested_nested_aggs", "type", "1")
+ .setSource(jsonBuilder().startObject()
+ .startArray("nested1")
+ .startObject()
+ .field("a", "a")
+ .startArray("nested2")
+ .startObject()
+ .field("b", 2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("a", "b")
+ .startArray("nested2")
+ .startObject()
+ .field("b", 2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject())
+ );
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void simple() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(stats("nested_value_stats").field("nested.value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ double min = Double.POSITIVE_INFINITY;
+ double max = Double.NEGATIVE_INFINITY;
+ long sum = 0;
+ long count = 0;
+ for (int i = 0; i < numParents; ++i) {
+ for (int j = 0; j < numChildren[i]; ++j) {
+ final long value = i + 1 + j;
+ min = Math.min(min, value);
+ max = Math.max(max, value);
+ sum += value;
+ ++count;
+ }
+ }
+
+ Nested nested = response.getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), equalTo(count));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ Stats stats = nested.getAggregations().get("nested_value_stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMin(), equalTo(min));
+ assertThat(stats.getMax(), equalTo(max));
+ assertThat(stats.getCount(), equalTo(count));
+ assertThat(stats.getSum(), equalTo((double) sum));
+ assertThat(stats.getAvg(), equalTo((double) sum / count));
+ }
+
+ @Test
+ public void nonExistingNestedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("value")
+ .subAggregation(stats("nested_value_stats").field("nested.value")))
+ .execute().actionGet();
+
+ Nested nested = searchResponse.getAggregations().get("nested");
+ assertThat(nested, Matchers.notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), is(0l));
+ }
+
+ @Test
+ public void nestedWithSubTermsAgg() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(terms("values").field("nested.value").size(100)
+ .collectMode(aggCollectionMode)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ long docCount = 0;
+ long[] counts = new long[numParents + 6];
+ for (int i = 0; i < numParents; ++i) {
+ for (int j = 0; j < numChildren[i]; ++j) {
+ final int value = i + 1 + j;
+ ++counts[value];
+ ++docCount;
+ }
+ }
+ int uniqueValues = 0;
+ for (long count : counts) {
+ if (count > 0) {
+ ++uniqueValues;
+ }
+ }
+
+ Nested nested = response.getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), equalTo(docCount));
+ assertThat((long) nested.getProperty("_count"), equalTo(docCount));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ LongTerms values = nested.getAggregations().get("values");
+ assertThat(values, notNullValue());
+ assertThat(values.getName(), equalTo("values"));
+ assertThat(values.getBuckets(), notNullValue());
+ assertThat(values.getBuckets().size(), equalTo(uniqueValues));
+ for (int i = 0; i < counts.length; ++i) {
+ final String key = Long.toString(i);
+ if (counts[i] == 0) {
+ assertNull(values.getBucketByKey(key));
+ } else {
+ Bucket bucket = values.getBucketByKey(key);
+ assertNotNull(bucket);
+ assertEquals(counts[i], bucket.getDocCount());
+ }
+ }
+ assertThat((LongTerms) nested.getProperty("values"), sameInstance(values));
+ }
+
+ @Test
+ public void nestedAsSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("top_values").field("value").size(100)
+ .collectMode(aggCollectionMode)
+ .subAggregation(nested("nested").path("nested")
+ .subAggregation(max("max_value").field("nested.value"))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ LongTerms values = response.getAggregations().get("top_values");
+ assertThat(values, notNullValue());
+ assertThat(values.getName(), equalTo("top_values"));
+ assertThat(values.getBuckets(), notNullValue());
+ assertThat(values.getBuckets().size(), equalTo(numParents));
+
+ for (int i = 0; i < numParents; i++) {
+ String topValue = "" + (i + 1);
+ assertThat(values.getBucketByKey(topValue), notNullValue());
+ Nested nested = values.getBucketByKey(topValue).getAggregations().get("nested");
+ assertThat(nested, notNullValue());
+ Max max = nested.getAggregations().get("max_value");
+ assertThat(max, notNullValue());
+ assertThat(max.getValue(), equalTo(numChildren[i] == 0 ? Double.NEGATIVE_INFINITY : (double) i + numChildren[i]));
+ }
+ }
+
+ @Test
+ public void nestNestedAggs() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_nested_nested_aggs")
+ .addAggregation(nested("level1").path("nested1")
+ .subAggregation(terms("a").field("nested1.a")
+ .collectMode(aggCollectionMode)
+ .subAggregation(nested("level2").path("nested1.nested2")
+ .subAggregation(sum("sum").field("nested1.nested2.b")))))
+ .get();
+ assertSearchResponse(response);
+
+
+ Nested level1 = response.getAggregations().get("level1");
+ assertThat(level1, notNullValue());
+ assertThat(level1.getName(), equalTo("level1"));
+ assertThat(level1.getDocCount(), equalTo(2l));
+
+ StringTerms a = level1.getAggregations().get("a");
+ Terms.Bucket bBucket = a.getBucketByKey("a");
+ assertThat(bBucket.getDocCount(), equalTo(1l));
+
+ Nested level2 = bBucket.getAggregations().get("level2");
+ assertThat(level2.getDocCount(), equalTo(1l));
+ Sum sum = level2.getAggregations().get("sum");
+ assertThat(sum.getValue(), equalTo(2d));
+
+ a = level1.getAggregations().get("a");
+ bBucket = a.getBucketByKey("b");
+ assertThat(bBucket.getDocCount(), equalTo(1l));
+
+ level2 = bBucket.getAggregations().get("level2");
+ assertThat(level2.getDocCount(), equalTo(1l));
+ sum = level2.getAggregations().get("sum");
+ assertThat(sum.getValue(), equalTo(2d));
+ }
+
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(nested("nested").path("nested")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Nested nested = bucket.getAggregations().get("nested");
+ assertThat(nested, Matchers.notNullValue());
+ assertThat(nested.getName(), equalTo("nested"));
+ assertThat(nested.getDocCount(), is(0l));
+ }
+
+ @Test
+ public void nestedOnObjectField() throws Exception {
+ try {
+ client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(nested("object_field").path("incorrect"))
+ .execute().actionGet();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("[nested] nested path [incorrect] is not nested"));
+ }
+ }
+
+ @Test
+ // Test based on: https://github.com/elasticsearch/elasticsearch/issues/9280
+ public void testParentFilterResolvedCorrectly() throws Exception {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("provider").startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("cid").field("type", "long").endObject()
+ .startObject("identifier").field("type", "string").field("index", "not_analyzed").endObject()
+ .startObject("tags")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("tid").field("type", "long").endObject()
+ .startObject("name").field("type", "string").field("index", "not_analyzed").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("dates")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("day").field("type", "date").field("format", "dateOptionalTime").endObject()
+ .startObject("month")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("end").field("type", "date").field("format", "dateOptionalTime").endObject()
+ .startObject("start").field("type", "date").field("format", "dateOptionalTime").endObject()
+ .startObject("label").field("type", "string").field("index", "not_analyzed").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject();
+ assertAcked(prepareCreate("idx2")
+ .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("provider", mapping));
+ ensureGreen("idx2");
+
+ List<IndexRequestBuilder> indexRequests = new ArrayList<>(2);
+ indexRequests.add(client().prepareIndex("idx2", "provider", "1").setSource("{\"dates\": {\"month\": {\"label\": \"2014-11\", \"end\": \"2014-11-30\", \"start\": \"2014-11-01\"}, \"day\": \"2014-11-30\"}, \"comments\": [{\"cid\": 3,\"identifier\": \"29111\"}, {\"cid\": 4,\"tags\": [{\"tid\" :44,\"name\": \"Roles\"}], \"identifier\": \"29101\"}]}"));
+ indexRequests.add(client().prepareIndex("idx2", "provider", "2").setSource("{\"dates\": {\"month\": {\"label\": \"2014-12\", \"end\": \"2014-12-31\", \"start\": \"2014-12-01\"}, \"day\": \"2014-12-03\"}, \"comments\": [{\"cid\": 1, \"identifier\": \"29111\"}, {\"cid\": 2,\"tags\": [{\"tid\" : 22, \"name\": \"DataChannels\"}], \"identifier\": \"29101\"}]}"));
+ indexRandom(true, indexRequests);
+
+ SearchResponse response = client().prepareSearch("idx2").setTypes("provider")
+ .addAggregation(
+ terms("startDate").field("dates.month.start").subAggregation(
+ terms("endDate").field("dates.month.end").subAggregation(
+ terms("period").field("dates.month.label").subAggregation(
+ nested("ctxt_idfier_nested").path("comments").subAggregation(
+ filter("comment_filter").filter(termQuery("comments.identifier", "29111")).subAggregation(
+ nested("nested_tags").path("comments.tags").subAggregation(
+ terms("tag").field("comments.tags.name")
+ )
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertNoFailures(response);
+ assertHitCount(response, 2);
+
+ Terms startDate = response.getAggregations().get("startDate");
+ assertThat(startDate.getBuckets().size(), equalTo(2));
+ Terms.Bucket bucket = startDate.getBucketByKey("1414800000000"); // 2014-11-01T00:00:00.000Z
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Terms endDate = bucket.getAggregations().get("endDate");
+ bucket = endDate.getBucketByKey("1417305600000"); // 2014-11-30T00:00:00.000Z
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Terms period = bucket.getAggregations().get("period");
+ bucket = period.getBucketByKey("2014-11");
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Nested comments = bucket.getAggregations().get("ctxt_idfier_nested");
+ assertThat(comments.getDocCount(), equalTo(2l));
+ Filter filter = comments.getAggregations().get("comment_filter");
+ assertThat(filter.getDocCount(), equalTo(1l));
+ Nested nestedTags = filter.getAggregations().get("nested_tags");
+ assertThat(nestedTags.getDocCount(), equalTo(0l)); // This must be 0
+ Terms tags = nestedTags.getAggregations().get("tag");
+ assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty
+
+ bucket = startDate.getBucketByKey("1417392000000"); // 2014-12-01T00:00:00.000Z
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ endDate = bucket.getAggregations().get("endDate");
+ bucket = endDate.getBucketByKey("1419984000000"); // 2014-12-31T00:00:00.000Z
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ period = bucket.getAggregations().get("period");
+ bucket = period.getBucketByKey("2014-12");
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ comments = bucket.getAggregations().get("ctxt_idfier_nested");
+ assertThat(comments.getDocCount(), equalTo(2l));
+ filter = comments.getAggregations().get("comment_filter");
+ assertThat(filter.getDocCount(), equalTo(1l));
+ nestedTags = filter.getAggregations().get("nested_tags");
+ assertThat(nestedTags.getDocCount(), equalTo(0l)); // This must be 0
+ tags = nestedTags.getAggregations().get("tag");
+ assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty
+ }
+
+ @Test
+ public void nestedSameDocIdProcessedMultipleTime() throws Exception {
+ assertAcked(
+ prepareCreate("idx4")
+ .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("product", "categories", "type=string", "name", "type=string", "property", "type=nested")
+ );
+ ensureGreen("idx4");
+
+ client().prepareIndex("idx4", "product", "1").setSource(jsonBuilder().startObject()
+ .field("name", "product1")
+ .field("categories", "1", "2", "3", "4")
+ .startArray("property")
+ .startObject().field("id", 1).endObject()
+ .startObject().field("id", 2).endObject()
+ .startObject().field("id", 3).endObject()
+ .endArray()
+ .endObject()).get();
+ client().prepareIndex("idx4", "product", "2").setSource(jsonBuilder().startObject()
+ .field("name", "product2")
+ .field("categories", "1", "2")
+ .startArray("property")
+ .startObject().field("id", 1).endObject()
+ .startObject().field("id", 5).endObject()
+ .startObject().field("id", 4).endObject()
+ .endArray()
+ .endObject()).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("idx4").setTypes("product")
+ .addAggregation(terms("category").field("categories").subAggregation(
+ nested("property").path("property").subAggregation(
+ terms("property_id").field("property.id")
+ )
+ ))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 2);
+
+ Terms category = response.getAggregations().get("category");
+ assertThat(category.getBuckets().size(), equalTo(4));
+
+ Terms.Bucket bucket = category.getBucketByKey("1");
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Nested property = bucket.getAggregations().get("property");
+ assertThat(property.getDocCount(), equalTo(6l));
+ Terms propertyId = property.getAggregations().get("property_id");
+ assertThat(propertyId.getBuckets().size(), equalTo(5));
+ assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2l));
+ assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1l));
+
+ bucket = category.getBucketByKey("2");
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ property = bucket.getAggregations().get("property");
+ assertThat(property.getDocCount(), equalTo(6l));
+ propertyId = property.getAggregations().get("property_id");
+ assertThat(propertyId.getBuckets().size(), equalTo(5));
+ assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(2l));
+ assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("4").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("5").getDocCount(), equalTo(1l));
+
+ bucket = category.getBucketByKey("3");
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ property = bucket.getAggregations().get("property");
+ assertThat(property.getDocCount(), equalTo(3l));
+ propertyId = property.getAggregations().get("property_id");
+ assertThat(propertyId.getBuckets().size(), equalTo(3));
+ assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1l));
+
+ bucket = category.getBucketByKey("4");
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ property = bucket.getAggregations().get("property");
+ assertThat(property.getDocCount(), equalTo(3l));
+ propertyId = property.getAggregations().get("property_id");
+ assertThat(propertyId.getBuckets().size(), equalTo(3));
+ assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1l));
+ assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggTests.java
new file mode 100644
index 0000000000..b3ae0733dd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ParentIdAggTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+
+public class ParentIdAggTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testParentIdAggregation() throws IOException {
+ XContentBuilder mapping = jsonBuilder().startObject()
+ .startObject("childtype")
+ .startObject("_parent")
+ .field("type", "parenttype")
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate("testidx").addMapping("childtype", mapping));
+ client().prepareIndex("testidx", "childtype").setSource(jsonBuilder().startObject().field("num", 1).endObject()).setParent("p1").get();
+ client().prepareIndex("testidx", "childtype").setSource(jsonBuilder().startObject().field("num", 2).endObject()).setParent("p1").get();
+
+ refresh();
+ ensureGreen("testidx");
+ SearchResponse searchResponse = client().prepareSearch("testidx").setTypes("childtype").setQuery(matchAllQuery()).addAggregation(AggregationBuilders.terms("children").field("_parent")).get();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getAggregations().getAsMap().get("children"), instanceOf(Terms.class));
+ Terms terms = (Terms) searchResponse.getAggregations().getAsMap().get("children");
+ assertThat(terms.getBuckets().iterator().next().getDocCount(), equalTo(2l));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java
new file mode 100644
index 0000000000..c53b763a0a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/RangeTests.java
@@ -0,0 +1,1495 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.range.Range.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.range;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class RangeTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "l_values";
+
+ static int numDocs;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ numDocs = randomIntBetween(10, 20);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field(SINGLE_VALUED_FIELD_NAME, i+1)
+ .startArray(MULTI_VALUED_FIELD_NAME).value(i+1).value(i+2).endArray()
+ .endObject()));
+ }
+ createIndex("idx_unmapped");
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(jsonBuilder()
+ .startObject()
+ // shift sequence by 1, to ensure we have negative values, and value 3 on the edge of the tested ranges
+ .field(SINGLE_VALUED_FIELD_NAME, i * 2 - 1)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void rangeAsSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field(MULTI_VALUED_FIELD_NAME).size(100)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(
+ range("range").field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getBuckets().size(), equalTo(numDocs + 1));
+ for (int i = 1; i < numDocs + 2; ++i) {
+ Terms.Bucket bucket = terms.getBucketByKey("" + i);
+ assertThat(bucket, notNullValue());
+ final long docCount = i == 1 || i == numDocs + 1 ? 1 : 2;
+ assertThat(bucket.getDocCount(), equalTo(docCount));
+ Range range = bucket.getAggregations().get("range");
+ List<? extends Bucket> buckets = range.getBuckets();
+ Range.Bucket rangeBucket = buckets.get(0);
+ assertThat((String) rangeBucket.getKey(), equalTo("*-3.0"));
+ assertThat(rangeBucket.getKeyAsString(), equalTo("*-3.0"));
+ assertThat(rangeBucket, notNullValue());
+ assertThat(rangeBucket.getFromAsString(), nullValue());
+ assertThat(rangeBucket.getToAsString(), equalTo("3.0"));
+ if (i == 1 || i == 3) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i == 2) {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ }
+ rangeBucket = buckets.get(1);
+ assertThat((String) rangeBucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(rangeBucket.getKeyAsString(), equalTo("3.0-6.0"));
+ assertThat(rangeBucket, notNullValue());
+ assertThat(rangeBucket.getFromAsString(), equalTo("3.0"));
+ assertThat(rangeBucket.getToAsString(), equalTo("6.0"));
+ if (i == 3 || i == 6) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i == 4 || i == 5) {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ }
+ rangeBucket = buckets.get(2);
+ assertThat((String) rangeBucket.getKey(), equalTo("6.0-*"));
+ assertThat(rangeBucket.getKeyAsString(), equalTo("6.0-*"));
+ assertThat(rangeBucket, notNullValue());
+ assertThat(rangeBucket.getFromAsString(), equalTo("6.0"));
+ assertThat(rangeBucket.getToAsString(), nullValue());
+ if (i == 6 || i == numDocs + 1) {
+ assertThat(rangeBucket.getDocCount(), equalTo(1L));
+ } else if (i < 6) {
+ assertThat(rangeBucket.getDocCount(), equalTo(0L));
+ } else {
+ assertThat(rangeBucket.getDocCount(), equalTo(2L));
+ }
+ }
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) ((Number) bucket.getFrom())).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) ((Number) bucket.getTo())).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValueField_WithFormat() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(SINGLE_VALUED_FIELD_NAME).addUnboundedTo(3).addRange(3, 6).addUnboundedFrom(6).format("#"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3-6"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3"));
+ assertThat(bucket.getToAsString(), equalTo("6"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValueField_WithCustomKey() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo("r1", 3)
+ .addRange("r2", 3, 6)
+ .addUnboundedFrom("r3", 6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5L));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+ Object[] propertiesKeys = (Object[]) range.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) range.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) range.getProperty("sum.value");
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(3.0)); // 1 + 2
+ assertThat((String) propertiesKeys[0], equalTo("*-3.0"));
+ assertThat((long) propertiesDocCounts[0], equalTo(2l));
+ assertThat((double) propertiesCounts[0], equalTo(3.0));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(12.0)); // 3 + 4 + 5
+ assertThat((String) propertiesKeys[1], equalTo("3.0-6.0"));
+ assertThat((long) propertiesDocCounts[1], equalTo(3l));
+ assertThat((double) propertiesCounts[1], equalTo(12.0));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ assertThat((String) propertiesKeys[2], equalTo("6.0-*"));
+ assertThat((long) propertiesDocCounts[2], equalTo(numDocs - 5l));
+ assertThat((double) propertiesCounts[2], equalTo((double) total));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6)
+ .subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(SINGLE_VALUED_FIELD_NAME).script(new Script("_value + 1")).addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(1l)); // 2
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l)); // 3, 4, 5
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ */
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ [11, 12]
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(MULTI_VALUED_FIELD_NAME).script(new Script("_value + 1")).addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3l));
+ }
+
+ /*
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+ [11, 12]
+
+ r1: 2
+ r2: 3, 3, 4, 4, 5, 5
+ r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(MULTI_VALUED_FIELD_NAME).script(new Script("_value + 1")).addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6).subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(2d+3d));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3L));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 3; i < numDocs; ++i) {
+ total += ((i + 1) + 1) + ((i + 1) + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6).subAggregation(avg("avg"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ @Test
+ public void emptyRange() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(-1)
+ .addUnboundedFrom(1000))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(2));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*--1.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(-1.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("-1.0"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("1000.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(1000d));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("1000.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")).addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ [1, 2]
+ [2, 3]
+ [3, 4]
+ [4, 5]
+ [5, 6]
+ [6, 7]
+ [7, 8j
+ [8, 9]
+ [9, 10]
+ [10, 11]
+
+ r1: 1, 2, 2
+ r2: 3, 3, 4, 4, 5, 5
+ r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+ */
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values")).addUnboundedTo("r1", 3)
+ .addRange("r2", 3, 6).addUnboundedFrom("r3", 6).subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 4; i < numDocs; ++i) {
+ total += (i + 1) + (i + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ client().admin().cluster().prepareHealth("idx_unmapped").setWaitForYellowStatus().execute().actionGet();
+
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(range("range")
+ .field(SINGLE_VALUED_FIELD_NAME)
+ .addUnboundedTo(3)
+ .addRange(3, 6)
+ .addUnboundedFrom(6))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ @Test
+ public void overlappingRanges() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(range("range")
+ .field(MULTI_VALUED_FIELD_NAME)
+ .addUnboundedTo(5)
+ .addRange(3, 6)
+ .addRange(4, 5)
+ .addUnboundedFrom(4))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(4));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-5.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("5.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("4.0-5.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(5.0));
+ assertThat(bucket.getFromAsString(), equalTo("4.0"));
+ assertThat(bucket.getToAsString(), equalTo("5.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("4.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(4.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("4.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 2l));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0)
+ .subAggregation(range("range").addRange("0-2", 0.0, 2.0)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Range range = bucket.getAggregations().get("range");
+ // TODO: use diamond once JI-9019884 is fixed
+ List<Range.Bucket> buckets = new ArrayList<Range.Bucket>(range.getBuckets());
+ assertThat(range, Matchers.notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ assertThat(buckets.size(), is(1));
+ assertThat((String) buckets.get(0).getKey(), equalTo("0-2"));
+ assertThat(((Number) buckets.get(0).getFrom()).doubleValue(), equalTo(0.0));
+ assertThat(((Number) buckets.get(0).getTo()).doubleValue(), equalTo(2.0));
+ assertThat(buckets.get(0).getFromAsString(), equalTo("0.0"));
+ assertThat(buckets.get(0).getToAsString(), equalTo("2.0"));
+ assertThat(buckets.get(0).getDocCount(), equalTo(0l));
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(SINGLE_VALUED_FIELD_NAME).script("_value + 1").addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(1l)); // 2
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l)); // 3, 4, 5
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3l));
+ }
+
+ /*
+ * [2, 3] [3, 4] [4, 5] [5, 6] [6, 7] [7, 8j [8, 9] [9, 10] [10, 11] [11,
+ * 12]
+ *
+ * r1: 2 r2: 3, 3, 4, 4, 5, 5 r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
+ */
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").field(MULTI_VALUED_FIELD_NAME).script("_value + 1").addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6).subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(2d + 3d));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 3L));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 3; i < numDocs; ++i) {
+ total += ((i + 1) + 1) + ((i + 1) + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_WithSubAggregator_InheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6).subAggregation(avg("avg"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(1.5)); // (1 + 2) / 2
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(4.0)); // (3 + 4 + 5) / 3
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 5l));
+ avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ long total = 0;
+ for (int i = 5; i < numDocs; ++i) {
+ total += i + 1;
+ }
+ assertThat(avg.getValue(), equalTo((double) total / (numDocs - 5))); // (6 + 7 + 8 + 9 + 10) / 5
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values").addUnboundedTo(3).addRange(3, 6)
+ .addUnboundedFrom(6)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("*-3.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("3.0-6.0"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("6.0-*"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ }
+
+ /*
+ * [1, 2] [2, 3] [3, 4] [4, 5] [5, 6] [6, 7] [7, 8j [8, 9] [9, 10] [10, 11]
+ *
+ * r1: 1, 2, 2 r2: 3, 3, 4, 4, 5, 5 r3: 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11
+ */
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInheritedOldScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ range("range").script("doc['" + MULTI_VALUED_FIELD_NAME + "'].values").addUnboundedTo("r1", 3).addRange("r2", 3, 6)
+ .addUnboundedFrom("r3", 6).subAggregation(sum("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ assertThat(range, notNullValue());
+ assertThat(range.getName(), equalTo("range"));
+ List<? extends Bucket> buckets = range.getBuckets();
+ assertThat(range.getBuckets().size(), equalTo(3));
+
+ Range.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r1"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(3.0));
+ assertThat(bucket.getFromAsString(), nullValue());
+ assertThat(bucket.getToAsString(), equalTo("3.0"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1 + 2 + 2 + 3));
+
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r2"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(3.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(6.0));
+ assertThat(bucket.getFromAsString(), equalTo("3.0"));
+ assertThat(bucket.getToAsString(), equalTo("6.0"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2 + 3 + 3 + 4 + 4 + 5 + 5 + 6));
+
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("r3"));
+ assertThat(((Number) bucket.getFrom()).doubleValue(), equalTo(6.0));
+ assertThat(((Number) bucket.getTo()).doubleValue(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(bucket.getFromAsString(), equalTo("6.0"));
+ assertThat(bucket.getToAsString(), nullValue());
+ assertThat(bucket.getDocCount(), equalTo(numDocs - 4l));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ long total = 0;
+ for (int i = 4; i < numDocs; ++i) {
+ total += (i + 1) + (i + 2);
+ }
+ assertThat(sum.getValue(), equalTo((double) total));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedTests.java
new file mode 100644
index 0000000000..01321634cd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ReverseNestedTests.java
@@ -0,0 +1,643 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.nested.ReverseNested;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.count;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.nested;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.reverseNested;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.sameInstance;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class ReverseNestedTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("idx")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("field2").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject().endObject()
+ )
+ .addMapping(
+ "type2",
+ jsonBuilder().startObject().startObject("properties")
+ .startObject("nested1").field("type", "nested").startObject("properties")
+ .startObject("field1").field("type", "string").endObject()
+ .startObject("nested2").field("type", "nested").startObject("properties")
+ .startObject("field2").field("type", "string").endObject()
+ .endObject().endObject()
+ .endObject().endObject()
+ .endObject().endObject()
+ )
+ );
+
+ insertType1(Arrays.asList("a", "b", "c"), Arrays.asList("1", "2", "3", "4"));
+ insertType1(Arrays.asList("b", "c", "d"), Arrays.asList("4", "5", "6", "7"));
+ insertType1(Arrays.asList("c", "d", "e"), Arrays.asList("7", "8", "9", "1"));
+ refresh();
+ insertType1(Arrays.asList("a", "e"), Arrays.asList("7", "4", "1", "1"));
+ insertType1(Arrays.asList("a", "c"), Arrays.asList("2", "1"));
+ insertType1(Arrays.asList("a"), Arrays.asList("3", "4"));
+ refresh();
+ insertType1(Arrays.asList("x", "c"), Arrays.asList("1", "8"));
+ insertType1(Arrays.asList("y", "c"), Arrays.asList("6"));
+ insertType1(Arrays.asList("z"), Arrays.asList("5", "9"));
+ refresh();
+
+ insertType2(new String[][]{new String[]{"a", "0", "0", "1", "2"}, new String[]{"b", "0", "1", "1", "2"}, new String[]{"a", "0"}});
+ insertType2(new String[][]{new String[]{"c", "1", "1", "2", "2"}, new String[]{"d", "3", "4"}});
+ refresh();
+
+ insertType2(new String[][]{new String[]{"a", "0", "0", "0", "0"}, new String[]{"b", "0", "0", "0", "0"}});
+ insertType2(new String[][]{new String[]{"e", "1", "2"}, new String[]{"f", "3", "4"}});
+ refresh();
+
+ ensureSearchable();
+ }
+
+ private void insertType1(List<String> values1, List<String> values2) throws Exception {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .array("field1", values1.toArray())
+ .startArray("nested1");
+ for (String value1 : values2) {
+ source.startObject().field("field2", value1).endObject();
+ }
+ source.endArray().endObject();
+ indexRandom(false, client().prepareIndex("idx", "type1").setRouting("1").setSource(source));
+ }
+
+ private void insertType2(String[][] values) throws Exception {
+ XContentBuilder source = jsonBuilder()
+ .startObject()
+ .startArray("nested1");
+ for (String[] value : values) {
+ source.startObject().field("field1", value[0]).startArray("nested2");
+ for (int i = 1; i < value.length; i++) {
+ source.startObject().field("field2", value[i]).endObject();
+ }
+ source.endArray().endObject();
+ }
+ source.endArray().endObject();
+ indexRandom(false, client().prepareIndex("idx", "type2").setRouting("1").setSource(source));
+ }
+
+ @Test
+ public void simple_reverseNestedToRoot() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type1")
+ .addAggregation(nested("nested1").path("nested1")
+ .subAggregation(
+ terms("field2").field("nested1.field2")
+ .subAggregation(
+ reverseNested("nested1_to_field1")
+ .subAggregation(
+ terms("field1").field("field1")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ )
+ )
+ )
+ ).get();
+
+ assertSearchResponse(response);
+
+ Nested nested = response.getAggregations().get("nested1");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested1"));
+ assertThat(nested.getDocCount(), equalTo(25l));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ Terms usernames = nested.getAggregations().get("field2");
+ assertThat(usernames, notNullValue());
+ assertThat(usernames.getBuckets().size(), equalTo(9));
+ List<Terms.Bucket> usernameBuckets = new ArrayList<>(usernames.getBuckets());
+
+ // nested.field2: 1
+ Terms.Bucket bucket = usernameBuckets.get(0);
+ assertThat(bucket.getKeyAsString(), equalTo("1"));
+ assertThat(bucket.getDocCount(), equalTo(6l));
+ ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ assertThat((long) reverseNested.getProperty("_count"), equalTo(5l));
+ Terms tags = reverseNested.getAggregations().get("field1");
+ assertThat((Terms) reverseNested.getProperty("field1"), sameInstance(tags));
+ List<Terms.Bucket> tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(6));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(4l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(3l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(5).getKeyAsString(), equalTo("x"));
+ assertThat(tagsBuckets.get(5).getDocCount(), equalTo(1l));
+
+ // nested.field2: 4
+ bucket = usernameBuckets.get(1);
+ assertThat(bucket.getKeyAsString(), equalTo("4"));
+ assertThat(bucket.getDocCount(), equalTo(4l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(5));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1l));
+
+ // nested.field2: 7
+ bucket = usernameBuckets.get(2);
+ assertThat(bucket.getKeyAsString(), equalTo("7"));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(5));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(4).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(4).getDocCount(), equalTo(1l));
+
+ // nested.field2: 2
+ bucket = usernameBuckets.get(3);
+ assertThat(bucket.getKeyAsString(), equalTo("2"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(3));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+
+ // nested.field2: 3
+ bucket = usernameBuckets.get(4);
+ assertThat(bucket.getKeyAsString(), equalTo("3"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(3));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+
+ // nested.field2: 5
+ bucket = usernameBuckets.get(5);
+ assertThat(bucket.getKeyAsString(), equalTo("5"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(4));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+
+ // nested.field2: 6
+ bucket = usernameBuckets.get(6);
+ assertThat(bucket.getKeyAsString(), equalTo("6"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(4));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("y"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+
+ // nested.field2: 8
+ bucket = usernameBuckets.get(7);
+ assertThat(bucket.getKeyAsString(), equalTo("8"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(4));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(2l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("x"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+
+ // nested.field2: 9
+ bucket = usernameBuckets.get(8);
+ assertThat(bucket.getKeyAsString(), equalTo("9"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(4));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("z"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+ }
+
+ @Test
+ public void simple_nested1ToRootToNested2() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type2")
+ .addAggregation(nested("nested1").path("nested1")
+ .subAggregation(
+ reverseNested("nested1_to_root")
+ .subAggregation(nested("root_to_nested2").path("nested1.nested2"))
+ )
+ )
+ .get();
+
+ assertSearchResponse(response);
+ Nested nested = response.getAggregations().get("nested1");
+ assertThat(nested.getName(), equalTo("nested1"));
+ assertThat(nested.getDocCount(), equalTo(9l));
+ ReverseNested reverseNested = nested.getAggregations().get("nested1_to_root");
+ assertThat(reverseNested.getName(), equalTo("nested1_to_root"));
+ assertThat(reverseNested.getDocCount(), equalTo(4l));
+ nested = reverseNested.getAggregations().get("root_to_nested2");
+ assertThat(nested.getName(), equalTo("root_to_nested2"));
+ assertThat(nested.getDocCount(), equalTo(27l));
+ }
+
+ @Test
+ public void simple_reverseNestedToNested1() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type2")
+ .addAggregation(nested("nested1").path("nested1.nested2")
+ .subAggregation(
+ terms("field2").field("nested1.nested2.field2").order(Terms.Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .size(0)
+ .subAggregation(
+ reverseNested("nested1_to_field1").path("nested1")
+ .subAggregation(
+ terms("field1").field("nested1.field1").order(Terms.Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ )
+ )
+ )
+ ).get();
+
+ assertSearchResponse(response);
+
+ Nested nested = response.getAggregations().get("nested1");
+ assertThat(nested, notNullValue());
+ assertThat(nested.getName(), equalTo("nested1"));
+ assertThat(nested.getDocCount(), equalTo(27l));
+ assertThat(nested.getAggregations().asList().isEmpty(), is(false));
+
+ Terms usernames = nested.getAggregations().get("field2");
+ assertThat(usernames, notNullValue());
+ assertThat(usernames.getBuckets().size(), equalTo(5));
+ List<Terms.Bucket> usernameBuckets = new ArrayList<>(usernames.getBuckets());
+
+ Terms.Bucket bucket = usernameBuckets.get(0);
+ assertThat(bucket.getKeyAsString(), equalTo("0"));
+ assertThat(bucket.getDocCount(), equalTo(12l));
+ ReverseNested reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ assertThat(reverseNested.getDocCount(), equalTo(5l));
+ Terms tags = reverseNested.getAggregations().get("field1");
+ List<Terms.Bucket> tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(2));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(3l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(2l));
+
+ bucket = usernameBuckets.get(1);
+ assertThat(bucket.getKeyAsString(), equalTo("1"));
+ assertThat(bucket.getDocCount(), equalTo(6l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ assertThat(reverseNested.getDocCount(), equalTo(4l));
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(4));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+
+ bucket = usernameBuckets.get(2);
+ assertThat(bucket.getKeyAsString(), equalTo("2"));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ assertThat(reverseNested.getDocCount(), equalTo(4l));
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(4));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("a"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("b"));
+ assertThat(tagsBuckets.get(1).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(2).getKeyAsString(), equalTo("c"));
+ assertThat(tagsBuckets.get(2).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(3).getKeyAsString(), equalTo("e"));
+ assertThat(tagsBuckets.get(3).getDocCount(), equalTo(1l));
+
+ bucket = usernameBuckets.get(3);
+ assertThat(bucket.getKeyAsString(), equalTo("3"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ assertThat(reverseNested.getDocCount(), equalTo(2l));
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(2));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f"));
+
+ bucket = usernameBuckets.get(4);
+ assertThat(bucket.getKeyAsString(), equalTo("4"));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ reverseNested = bucket.getAggregations().get("nested1_to_field1");
+ assertThat(reverseNested.getDocCount(), equalTo(2l));
+ tags = reverseNested.getAggregations().get("field1");
+ tagsBuckets = new ArrayList<>(tags.getBuckets());
+ assertThat(tagsBuckets.size(), equalTo(2));
+ assertThat(tagsBuckets.get(0).getKeyAsString(), equalTo("d"));
+ assertThat(tagsBuckets.get(0).getDocCount(), equalTo(1l));
+ assertThat(tagsBuckets.get(1).getKeyAsString(), equalTo("f"));
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testReverseNestedAggWithoutNestedAgg() throws Exception {
+ client().prepareSearch("idx")
+ .addAggregation(terms("field2").field("nested1.nested2.field2")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(
+ reverseNested("nested1_to_field1")
+ .subAggregation(
+ terms("field1").field("nested1.field1")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ )
+ )
+ ).get();
+ }
+
+ @Test
+ public void nonExistingNestedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(nested("nested2").path("nested1.nested2").subAggregation(reverseNested("incorrect").path("nested3")))
+ .execute().actionGet();
+
+ Nested nested = searchResponse.getAggregations().get("nested2");
+ assertThat(nested, Matchers.notNullValue());
+ assertThat(nested.getName(), equalTo("nested2"));
+
+ ReverseNested reverseNested = nested.getAggregations().get("incorrect");
+ assertThat(reverseNested.getDocCount(), is(0l));
+ }
+
+ @Test
+ public void testSameParentDocHavingMultipleBuckets() throws Exception {
+ XContentBuilder mapping = jsonBuilder().startObject().startObject("product").field("dynamic", "strict").startObject("properties")
+ .startObject("id").field("type", "long").endObject()
+ .startObject("category")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .endObject()
+ .endObject()
+ .startObject("sku")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("sku_type").field("type", "string").endObject()
+ .startObject("colors")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject();
+ assertAcked(
+ prepareCreate("idx3")
+ .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("product", mapping)
+ );
+
+ client().prepareIndex("idx3", "product", "1").setRefresh(true).setSource(
+ jsonBuilder().startObject()
+ .startArray("sku")
+ .startObject()
+ .field("sku_type", "bar1")
+ .startArray("colors")
+ .startObject().field("name", "red").endObject()
+ .startObject().field("name", "green").endObject()
+ .startObject().field("name", "yellow").endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("sku_type", "bar1")
+ .startArray("colors")
+ .startObject().field("name", "red").endObject()
+ .startObject().field("name", "blue").endObject()
+ .startObject().field("name", "white").endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("sku_type", "bar1")
+ .startArray("colors")
+ .startObject().field("name", "black").endObject()
+ .startObject().field("name", "blue").endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("sku_type", "bar2")
+ .startArray("colors")
+ .startObject().field("name", "orange").endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("sku_type", "bar2")
+ .startArray("colors")
+ .startObject().field("name", "pink").endObject()
+ .endArray()
+ .endObject()
+ .endArray()
+ .startArray("category")
+ .startObject().field("name", "abc").endObject()
+ .startObject().field("name", "klm").endObject()
+ .startObject().field("name", "xyz").endObject()
+ .endArray()
+ .endObject()
+ ).get();
+
+ SearchResponse response = client().prepareSearch("idx3")
+ .addAggregation(
+ nested("nested_0").path("category").subAggregation(
+ terms("group_by_category").field("category.name").subAggregation(
+ reverseNested("to_root").subAggregation(
+ nested("nested_1").path("sku").subAggregation(
+ filter("filter_by_sku").filter(termQuery("sku.sku_type", "bar1")).subAggregation(
+ count("sku_count").field("sku.sku_type")
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+
+ Nested nested0 = response.getAggregations().get("nested_0");
+ assertThat(nested0.getDocCount(), equalTo(3l));
+ Terms terms = nested0.getAggregations().get("group_by_category");
+ assertThat(terms.getBuckets().size(), equalTo(3));
+ for (String bucketName : new String[]{"abc", "klm", "xyz"}) {
+ logger.info("Checking results for bucket {}", bucketName);
+ Terms.Bucket bucket = terms.getBucketByKey(bucketName);
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ReverseNested toRoot = bucket.getAggregations().get("to_root");
+ assertThat(toRoot.getDocCount(), equalTo(1l));
+ Nested nested1 = toRoot.getAggregations().get("nested_1");
+ assertThat(nested1.getDocCount(), equalTo(5l));
+ Filter filterByBar = nested1.getAggregations().get("filter_by_sku");
+ assertThat(filterByBar.getDocCount(), equalTo(3l));
+ ValueCount barCount = filterByBar.getAggregations().get("sku_count");
+ assertThat(barCount.getValue(), equalTo(3l));
+ }
+
+ response = client().prepareSearch("idx3")
+ .addAggregation(
+ nested("nested_0").path("category").subAggregation(
+ terms("group_by_category").field("category.name").subAggregation(
+ reverseNested("to_root").subAggregation(
+ nested("nested_1").path("sku").subAggregation(
+ filter("filter_by_sku").filter(termQuery("sku.sku_type", "bar1")).subAggregation(
+ nested("nested_2").path("sku.colors").subAggregation(
+ filter("filter_sku_color").filter(termQuery("sku.colors.name", "red")).subAggregation(
+ reverseNested("reverse_to_sku").path("sku").subAggregation(
+ count("sku_count").field("sku.sku_type")
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+
+ nested0 = response.getAggregations().get("nested_0");
+ assertThat(nested0.getDocCount(), equalTo(3l));
+ terms = nested0.getAggregations().get("group_by_category");
+ assertThat(terms.getBuckets().size(), equalTo(3));
+ for (String bucketName : new String[]{"abc", "klm", "xyz"}) {
+ logger.info("Checking results for bucket {}", bucketName);
+ Terms.Bucket bucket = terms.getBucketByKey(bucketName);
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ReverseNested toRoot = bucket.getAggregations().get("to_root");
+ assertThat(toRoot.getDocCount(), equalTo(1l));
+ Nested nested1 = toRoot.getAggregations().get("nested_1");
+ assertThat(nested1.getDocCount(), equalTo(5l));
+ Filter filterByBar = nested1.getAggregations().get("filter_by_sku");
+ assertThat(filterByBar.getDocCount(), equalTo(3l));
+ Nested nested2 = filterByBar.getAggregations().get("nested_2");
+ assertThat(nested2.getDocCount(), equalTo(8l));
+ Filter filterBarColor = nested2.getAggregations().get("filter_sku_color");
+ assertThat(filterBarColor.getDocCount(), equalTo(2l));
+ ReverseNested reverseToBar = filterBarColor.getAggregations().get("reverse_to_sku");
+ assertThat(reverseToBar.getDocCount(), equalTo(2l));
+ ValueCount barCount = reverseToBar.getAggregations().get("sku_count");
+ assertThat(barCount.getValue(), equalTo(2l));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java
new file mode 100644
index 0000000000..d6ff926e4f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerTests.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.index.query.TermQueryBuilder;
+import org.elasticsearch.search.aggregations.bucket.sampler.Sampler;
+import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Collection;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sampler;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ * Tests the Sampler aggregation
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class SamplerTests extends ElasticsearchIntegrationTest {
+
+ public static final int NUM_SHARDS = 2;
+
+ public String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString();
+ }
+
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS, SETTING_NUMBER_OF_REPLICAS, 0).addMapping(
+ "book", "author", "type=string,index=not_analyzed", "name", "type=string,index=analyzed", "genre",
+ "type=string,index=not_analyzed", "price", "type=float"));
+ createIndex("idx_unmapped");
+ // idx_unmapped_author is same as main index but missing author field
+ assertAcked(prepareCreate("idx_unmapped_author").setSettings(SETTING_NUMBER_OF_SHARDS, NUM_SHARDS, SETTING_NUMBER_OF_REPLICAS, 0)
+ .addMapping("book", "name", "type=string,index=analyzed", "genre", "type=string,index=not_analyzed", "price", "type=float"));
+
+ ensureGreen();
+ String data[] = {
+ // "id,cat,name,price,inStock,author_t,series_t,sequence_i,genre_s",
+ "0553573403,book,A Game of Thrones,7.99,true,George R.R. Martin,A Song of Ice and Fire,1,fantasy",
+ "0553579908,book,A Clash of Kings,7.99,true,George R.R. Martin,A Song of Ice and Fire,2,fantasy",
+ "055357342X,book,A Storm of Swords,7.99,true,George R.R. Martin,A Song of Ice and Fire,3,fantasy",
+ "0553293354,book,Foundation,17.99,true,Isaac Asimov,Foundation Novels,1,scifi",
+ "0812521390,book,The Black Company,6.99,false,Glen Cook,The Chronicles of The Black Company,1,fantasy",
+ "0812550706,book,Ender's Game,6.99,true,Orson Scott Card,Ender,1,scifi",
+ "0441385532,book,Jhereg,7.95,false,Steven Brust,Vlad Taltos,1,fantasy",
+ "0380014300,book,Nine Princes In Amber,6.99,true,Roger Zelazny,the Chronicles of Amber,1,fantasy",
+ "0805080481,book,The Book of Three,5.99,true,Lloyd Alexander,The Chronicles of Prydain,1,fantasy",
+ "080508049X,book,The Black Cauldron,5.99,true,Lloyd Alexander,The Chronicles of Prydain,2,fantasy"
+
+ };
+
+ for (int i = 0; i < data.length; i++) {
+ String[] parts = data[i].split(",");
+ client().prepareIndex("test", "book", "" + i).setSource("author", parts[5], "name", parts[2], "genre", parts[8], "price",Float.parseFloat(parts[3])).get();
+ client().prepareIndex("idx_unmapped_author", "book", "" + i).setSource("name", parts[2], "genre", parts[8],"price",Float.parseFloat(parts[3])).get();
+ }
+ client().admin().indices().refresh(new RefreshRequest("test")).get();
+ }
+
+ @Test
+ public void issue10719() throws Exception {
+ // Tests that we can refer to nested elements under a sample in a path
+ // statement
+ boolean asc = randomBoolean();
+ SearchResponse response = client().prepareSearch("test").setTypes("book").setSearchType(SearchType.QUERY_AND_FETCH)
+ .addAggregation(terms("genres")
+ .field("genre")
+ .order(Terms.Order.aggregation("sample>max_price.value", asc))
+ .subAggregation(sampler("sample").shardSize(100)
+ .subAggregation(max("max_price").field("price")))
+ ).execute().actionGet();
+ assertSearchResponse(response);
+ Terms genres = response.getAggregations().get("genres");
+ Collection<Bucket> genreBuckets = genres.getBuckets();
+ // For this test to be useful we need >1 genre bucket to compare
+ assertThat(genreBuckets.size(), greaterThan(1));
+ double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE;
+ for (Terms.Bucket genreBucket : genres.getBuckets()) {
+ Sampler sample = genreBucket.getAggregations().get("sample");
+ Max maxPriceInGenre = sample.getAggregations().get("max_price");
+ double price = maxPriceInGenre.getValue();
+ if (asc) {
+ assertThat(price, greaterThanOrEqualTo(lastMaxPrice));
+ } else {
+ assertThat(price, lessThanOrEqualTo(lastMaxPrice));
+ }
+ lastMaxPrice = price;
+ }
+
+ }
+
+ @Test
+ public void noDiversity() throws Exception {
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg).execute().actionGet();
+ assertSearchResponse(response);
+ Sampler sample = response.getAggregations().get("sample");
+ Terms authors = sample.getAggregations().get("authors");
+ Collection<Bucket> testBuckets = authors.getBuckets();
+
+ long maxBooksPerAuthor = 0;
+ for (Terms.Bucket testBucket : testBuckets) {
+ maxBooksPerAuthor = Math.max(testBucket.getDocCount(), maxBooksPerAuthor);
+ }
+ assertThat(maxBooksPerAuthor, equalTo(3l));
+ }
+
+ @Test
+ public void simpleDiversity() throws Exception {
+ int MAX_DOCS_PER_AUTHOR = 1;
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint());
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("genre", "fantasy"))
+ .setFrom(0).setSize(60)
+ .addAggregation(sampleAgg)
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ Sampler sample = response.getAggregations().get("sample");
+ Terms authors = sample.getAggregations().get("authors");
+ Collection<Bucket> testBuckets = authors.getBuckets();
+
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR));
+ }
+ }
+
+ @Test
+ public void nestedDiversity() throws Exception {
+ // Test multiple samples gathered under buckets made by a parent agg
+ int MAX_DOCS_PER_AUTHOR = 1;
+ TermsBuilder rootTerms = new TermsBuilder("genres").field("genre");
+
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint());
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+
+ rootTerms.subAggregation(sampleAgg);
+ SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_AND_FETCH)
+ .addAggregation(rootTerms).execute().actionGet();
+ assertSearchResponse(response);
+ Terms genres = response.getAggregations().get("genres");
+ Collection<Bucket> genreBuckets = genres.getBuckets();
+ for (Terms.Bucket genreBucket : genreBuckets) {
+ Sampler sample = genreBucket.getAggregations().get("sample");
+ Terms authors = sample.getAggregations().get("authors");
+ Collection<Bucket> testBuckets = authors.getBuckets();
+
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR));
+ }
+ }
+ }
+
+ @Test
+ public void nestedSamples() throws Exception {
+ // Test samples nested under samples
+ int MAX_DOCS_PER_AUTHOR = 1;
+ int MAX_DOCS_PER_GENRE = 2;
+ SamplerAggregationBuilder rootSample = new SamplerAggregationBuilder("genreSample").shardSize(100).field("genre")
+ .maxDocsPerValue(MAX_DOCS_PER_GENRE);
+
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint());
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ sampleAgg.subAggregation(new TermsBuilder("genres").field("genre"));
+
+ rootSample.subAggregation(sampleAgg);
+ SearchResponse response = client().prepareSearch("test").setSearchType(SearchType.QUERY_AND_FETCH).addAggregation(rootSample)
+ .execute().actionGet();
+ assertSearchResponse(response);
+ Sampler genreSample = response.getAggregations().get("genreSample");
+ Sampler sample = genreSample.getAggregations().get("sample");
+
+ Terms genres = sample.getAggregations().get("genres");
+ Collection<Bucket> testBuckets = genres.getBuckets();
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_GENRE));
+ }
+
+ Terms authors = sample.getAggregations().get("authors");
+ testBuckets = authors.getBuckets();
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR));
+ }
+ }
+
+ @Test
+ public void unmappedChildAggNoDiversity() throws Exception {
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("genre", "fantasy"))
+ .setFrom(0).setSize(60)
+ .addAggregation(sampleAgg)
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ Sampler sample = response.getAggregations().get("sample");
+ assertThat(sample.getDocCount(), equalTo(0l));
+ Terms authors = sample.getAggregations().get("authors");
+ assertThat(authors.getBuckets().size(), equalTo(0));
+ }
+
+
+
+ @Test
+ public void partiallyUnmappedChildAggNoDiversity() throws Exception {
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ SearchResponse response = client().prepareSearch("idx_unmapped", "test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("genre", "fantasy"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(sampleAgg)
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ Sampler sample = response.getAggregations().get("sample");
+ assertThat(sample.getDocCount(), greaterThan(0l));
+ Terms authors = sample.getAggregations().get("authors");
+ assertThat(authors.getBuckets().size(), greaterThan(0));
+ }
+
+ @Test
+ public void partiallyUnmappedDiversifyField() throws Exception {
+ // One of the indexes is missing the "author" field used for
+ // diversifying results
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100).field("author").maxDocsPerValue(1);
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ SearchResponse response = client().prepareSearch("idx_unmapped_author", "test").setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg)
+ .execute().actionGet();
+ assertSearchResponse(response);
+ Sampler sample = response.getAggregations().get("sample");
+ assertThat(sample.getDocCount(), greaterThan(0l));
+ Terms authors = sample.getAggregations().get("authors");
+ assertThat(authors.getBuckets().size(), greaterThan(0));
+ }
+
+ @Test
+ public void whollyUnmappedDiversifyField() throws Exception {
+ //All of the indices are missing the "author" field used for diversifying results
+ int MAX_DOCS_PER_AUTHOR = 1;
+ SamplerAggregationBuilder sampleAgg = new SamplerAggregationBuilder("sample").shardSize(100);
+ sampleAgg.field("author").maxDocsPerValue(MAX_DOCS_PER_AUTHOR).executionHint(randomExecutionHint());
+ sampleAgg.subAggregation(new TermsBuilder("authors").field("author"));
+ SearchResponse response = client().prepareSearch("idx_unmapped", "idx_unmapped_author").setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("genre", "fantasy")).setFrom(0).setSize(60).addAggregation(sampleAgg).execute().actionGet();
+ assertSearchResponse(response);
+ Sampler sample = response.getAggregations().get("sample");
+ assertThat(sample.getDocCount(), equalTo(0l));
+ Terms authors = sample.getAggregations().get("authors");
+ assertNull(authors);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java
new file mode 100644
index 0000000000..232f85f3cf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardReduceTests.java
@@ -0,0 +1,324 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGrid;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.missing.Missing;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.range.Range;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateRange;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geohashGrid;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.ipRange;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.missing;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.nested;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.range;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests making sure that the reduce is propagated to all aggregations in the hierarchy when executing on a single shard
+ * These tests are based on the date histogram in combination of min_doc_count=0. In order for the date histogram to
+ * compute empty buckets, its {@code reduce()} method must be called. So by adding the date histogram under other buckets,
+ * we can make sure that the reduce is properly propagated by checking that empty buckets were created.
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class ShardReduceTests extends ElasticsearchIntegrationTest {
+
+ private IndexRequestBuilder indexDoc(String date, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("value", value)
+ .field("ip", "10.0.0." + value)
+ .field("location", GeoHashUtils.encode(52, 5, 12))
+ .field("date", date)
+ .field("term-l", 1)
+ .field("term-d", 1.5)
+ .field("term-s", "term")
+ .startObject("nested")
+ .field("date", date)
+ .endObject()
+ .endObject());
+ }
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("idx")
+ .addMapping("type", "nested", "type=nested", "ip", "type=ip", "location", "type=geo_point"));
+
+ indexRandom(true,
+ indexDoc("2014-01-01", 1),
+ indexDoc("2014-01-02", 2),
+ indexDoc("2014-01-04", 3));
+ ensureSearchable();
+ }
+
+ @Test
+ public void testGlobal() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(global("global")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Global global = response.getAggregations().get("global");
+ Histogram histo = global.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testFilter() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(filter("filter").filter(QueryBuilders.matchAllQuery())
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("filter");
+ Histogram histo = filter.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(missing("missing").field("foobar")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Missing missing = response.getAggregations().get("missing");
+ Histogram histo = missing.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testGlobalWithFilterWithMissing() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(global("global")
+ .subAggregation(filter("filter").filter(QueryBuilders.matchAllQuery())
+ .subAggregation(missing("missing").field("foobar")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Global global = response.getAggregations().get("global");
+ Filter filter = global.getAggregations().get("filter");
+ Missing missing = filter.getAggregations().get("missing");
+ Histogram histo = missing.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(nested("nested").path("nested")
+ .subAggregation(dateHistogram("histo").field("nested.date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Nested nested = response.getAggregations().get("nested");
+ Histogram histo = nested.getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testStringTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-s")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ Histogram histo = terms.getBucketByKey("term").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testLongTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-l")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ Histogram histo = terms.getBucketByKey("1").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDoubleTerms() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(terms("terms").field("term-d")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ Histogram histo = terms.getBucketByKey("1.5").getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(range("range").field("value").addRange("r1", 0, 10)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ Histogram histo = range.getBuckets().get(0).getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDateRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(dateRange("range").field("date").addRange("r1", "2014-01-01", "2014-01-10")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ Histogram histo = range.getBuckets().get(0).getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testIpRange() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(ipRange("range").field("ip").addRange("r1", "10.0.0.1", "10.0.0.10")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Range range = response.getAggregations().get("range");
+ Histogram histo = range.getBuckets().get(0).getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testHistogram() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(histogram("topHisto").field("value").interval(5)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram topHisto = response.getAggregations().get("topHisto");
+ Histogram histo = topHisto.getBuckets().get(0).getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+ @Test
+ public void testDateHistogram() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(dateHistogram("topHisto").field("date").interval(DateHistogramInterval.MONTH)
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram topHisto = response.getAggregations().get("topHisto");
+ Histogram histo = topHisto.getBuckets().iterator().next().getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+
+ }
+
+ @Test
+ public void testGeoHashGrid() throws Exception {
+
+ SearchResponse response = client().prepareSearch("idx")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addAggregation(geohashGrid("grid").field("location")
+ .subAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(0)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoHashGrid grid = response.getAggregations().get("grid");
+ Histogram histo = grid.getBuckets().iterator().next().getAggregations().get("histo");
+ assertThat(histo.getBuckets().size(), equalTo(4));
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java
new file mode 100644
index 0000000000..df449ea7c2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsTests.java
@@ -0,0 +1,423 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.junit.Test;
+
+import java.util.Collection;
+import java.util.Map;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.hamcrest.Matchers.equalTo;
+
+public class ShardSizeTermsTests extends ShardSizeTests {
+
+ @Test
+ public void noShardSize_string() throws Exception {
+ createIdx("type=string,index=not_analyzed");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString())));
+ }
+ }
+
+ @Test
+ public void shardSizeEqualsSize_string() throws Exception {
+ createIdx("type=string,index=not_analyzed");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string() throws Exception {
+
+ createIdx("type=string,index=not_analyzed");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("3", 8l)
+ .put("2", 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString())));
+ }
+ }
+
+ @Test
+ public void withShardSize_string_singleShard() throws Exception {
+
+ createIdx("type=string,index=not_analyzed");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1)
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 5l)
+ .put("2", 4l)
+ .put("3", 3l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket: buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKey())));
+ }
+ }
+
+ @Test
+ public void noShardSizeTermOrder_string() throws Exception {
+ createIdx("type=string,index=not_analyzed");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<String, Long> expected = ImmutableMap.<String, Long>builder()
+ .put("1", 8l)
+ .put("2", 5l)
+ .put("3", 8l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString())));
+ }
+ }
+
+ @Test
+ public void noShardSize_long() throws Exception {
+
+ createIdx("type=long");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void shardSizeEqualsSize_long() throws Exception {
+
+ createIdx("type=long");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long() throws Exception {
+
+ createIdx("type=long");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_long_singleShard() throws Exception {
+
+ createIdx("type=long");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1)
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param)
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSizeTermOrder_long() throws Exception {
+
+ createIdx("type=long");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(2, 5l)
+ .put(3, 8l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSize_double() throws Exception {
+
+ createIdx("type=double");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void shardSizeEqualsSize_double() throws Exception {
+
+ createIdx("type=double");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3).shardSize(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 4l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double() throws Exception {
+
+ createIdx("type=double");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(3, 8l)
+ .put(2, 5l) // <-- count is now fixed
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void withShardSize_double_singleShard() throws Exception {
+
+ createIdx("type=double");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type").setRouting(routing1)
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).shardSize(5).order(Terms.Order.count(false)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 5l)
+ .put(2, 4l)
+ .put(3, 3l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+
+ @Test
+ public void noShardSizeTermOrder_double() throws Exception {
+
+ createIdx("type=double");
+
+ indexData();
+
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchAllQuery())
+ .addAggregation(terms("keys").field("key").size(3)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true)))
+ .execute().actionGet();
+
+ Terms terms = response.getAggregations().get("keys");
+ Collection<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Map<Integer, Long> expected = ImmutableMap.<Integer, Long>builder()
+ .put(1, 8l)
+ .put(2, 5l)
+ .put(3, 8l)
+ .build();
+ for (Terms.Bucket bucket : buckets) {
+ assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue())));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTests.java
new file mode 100644
index 0000000000..68da2bc384
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Ignore;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.is;
+
+@Ignore
+@ClusterScope(scope = SUITE)
+public abstract class ShardSizeTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfShards() {
+ // we need at least 2
+ return randomIntBetween(2, DEFAULT_MAX_NUM_SHARDS);
+ }
+
+ protected void createIdx(String keyFieldMapping) {
+ assertAcked(prepareCreate("idx")
+ .addMapping("type", "key", keyFieldMapping));
+ }
+
+ protected static String routing1; // routing key to shard 1
+ protected static String routing2; // routing key to shard 2
+
+ protected void indexData() throws Exception {
+
+ /*
+
+
+ || || size = 3, shard_size = 5 || shard_size = size = 3 ||
+ ||==========||==================================================||===============================================||
+ || shard 1: || "1" - 5 | "2" - 4 | "3" - 3 | "4" - 2 | "5" - 1 || "1" - 5 | "3" - 3 | "2" - 4 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || shard 2: || "1" - 3 | "2" - 1 | "3" - 5 | "4" - 2 | "5" - 1 || "1" - 3 | "3" - 5 | "4" - 2 ||
+ ||----------||--------------------------------------------------||-----------------------------------------------||
+ || reduced: || "1" - 8 | "2" - 5 | "3" - 8 | "4" - 4 | "5" - 2 || ||
+ || || || "1" - 8, "3" - 8, "2" - 4 <= WRONG ||
+ || || "1" - 8 | "3" - 8 | "2" - 5 <= CORRECT || ||
+
+
+ */
+
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+
+ routing1 = routingKeyForShard("idx", "type", 0);
+ routing2 = routingKeyForShard("idx", "type", 1);
+
+ docs.addAll(indexDoc(routing1, "1", 5));
+ docs.addAll(indexDoc(routing1, "2", 4));
+ docs.addAll(indexDoc(routing1, "3", 3));
+ docs.addAll(indexDoc(routing1, "4", 2));
+ docs.addAll(indexDoc(routing1, "5", 1));
+
+ // total docs in shard "1" = 15
+
+ docs.addAll(indexDoc(routing2, "1", 3));
+ docs.addAll(indexDoc(routing2, "2", 1));
+ docs.addAll(indexDoc(routing2, "3", 5));
+ docs.addAll(indexDoc(routing2, "4", 2));
+ docs.addAll(indexDoc(routing2, "5", 1));
+
+ // total docs in shard "2" = 12
+
+ indexRandom(true, docs);
+
+ SearchResponse resp = client().prepareSearch("idx").setTypes("type").setRouting(routing1).setQuery(matchAllQuery()).execute().actionGet();
+ assertSearchResponse(resp);
+ long totalOnOne = resp.getHits().getTotalHits();
+ assertThat(totalOnOne, is(15l));
+ resp = client().prepareSearch("idx").setTypes("type").setRouting(routing2).setQuery(matchAllQuery()).execute().actionGet();
+ assertSearchResponse(resp);
+ long totalOnTwo = resp.getHits().getTotalHits();
+ assertThat(totalOnTwo, is(12l));
+ }
+
+ protected List<IndexRequestBuilder> indexDoc(String shard, String key, int times) throws Exception {
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[times];
+ for (int i = 0; i < times; i++) {
+ builders[i] = client().prepareIndex("idx", "type").setRouting(shard).setCreate(true).setSource(jsonBuilder()
+ .startObject()
+ .field("key", key)
+ .field("value", 1)
+ .endObject());
+ }
+ return Arrays.asList(builders);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsBackwardCompatibilityTests.java
new file mode 100644
index 0000000000..de24ea8d32
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsBackwardCompatibilityTests.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SignificantTermsBackwardCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ static final String INDEX_NAME = "testidx";
+ static final String DOC_TYPE = "doc";
+ static final String TEXT_FIELD = "text";
+ static final String CLASS_FIELD = "class";
+
+ /**
+ * Simple upgrade test for streaming significant terms buckets
+ */
+ @Test
+ public void testBucketStreaming() throws IOException, ExecutionException, InterruptedException {
+
+ logger.debug("testBucketStreaming: indexing documents");
+ String type = randomBoolean() ? "string" : "long";
+ String settings = "{\"index.number_of_shards\": 5, \"index.number_of_replicas\": 0}";
+ index01Docs(type, settings);
+
+ logClusterState();
+ boolean upgraded;
+ int upgradedNodesCounter = 1;
+ do {
+ logger.debug("testBucketStreaming: upgrading {}st node", upgradedNodesCounter++);
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureGreen();
+ logClusterState();
+ checkSignificantTermsAggregationCorrect();
+ } while (upgraded);
+ logger.debug("testBucketStreaming: done testing significant terms while upgrading");
+ }
+
+ private void index01Docs(String type, String settings) throws ExecutionException, InterruptedException {
+ String mappings = "{\"doc\": {\"properties\":{\"" + TEXT_FIELD + "\": {\"type\":\"" + type + "\"},\"" + CLASS_FIELD
+ + "\": {\"type\":\"string\"}}}}";
+ assertAcked(prepareCreate(INDEX_NAME).setSettings(settings).addMapping("doc", mappings));
+ String[] gb = {"0", "1"};
+ List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "1")
+ .setSource(TEXT_FIELD, "1", CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "2")
+ .setSource(TEXT_FIELD, "1", CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "3")
+ .setSource(TEXT_FIELD, "0", CLASS_FIELD, "0"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "4")
+ .setSource(TEXT_FIELD, "0", CLASS_FIELD, "0"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "5")
+ .setSource(TEXT_FIELD, gb, CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "6")
+ .setSource(TEXT_FIELD, gb, CLASS_FIELD, "0"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "7")
+ .setSource(TEXT_FIELD, "0", CLASS_FIELD, "0"));
+ indexRandom(true, indexRequestBuilderList);
+ }
+
+ private void checkSignificantTermsAggregationCorrect() {
+
+ SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation(new TermsBuilder("class").field(CLASS_FIELD).subAggregation(
+ new SignificantTermsBuilder("sig_terms")
+ .field(TEXT_FIELD)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ StringTerms classes = (StringTerms) response.getAggregations().get("class");
+ assertThat(classes.getBuckets().size(), equalTo(2));
+ for (Terms.Bucket classBucket : classes.getBuckets()) {
+ Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();
+ assertTrue(aggs.containsKey("sig_terms"));
+ SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms");
+ assertThat(agg.getBuckets().size(), equalTo(1));
+ String term = agg.iterator().next().getKeyAsString();
+ String classTerm = classBucket.getKeyAsString();
+ assertTrue(term.equals(classTerm));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java
new file mode 100644
index 0000000000..26d06dfb86
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreTests.java
@@ -0,0 +1,747 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryParsingException;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptModule;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;
+import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptNoParams;
+import org.elasticsearch.search.aggregations.bucket.script.NativeSignificanceScoreScriptWithParams;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsBuilder;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificantTermsHeuristicModule;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.TransportSignificantTermsHeuristicModule;
+import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class SignificantTermsSignificanceScoreTests extends ElasticsearchIntegrationTest {
+
+ static final String INDEX_NAME = "testidx";
+ static final String DOC_TYPE = "doc";
+ static final String TEXT_FIELD = "text";
+ static final String CLASS_FIELD = "class";
+
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", CustomSignificanceHeuristicPlugin.class.getName())
+ .put("path.conf", this.getDataPath("config"))
+ .build();
+ }
+
+ public String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString();
+ }
+
+ @Test
+ public void testPlugin() throws Exception {
+ String type = randomBoolean() ? "string" : "long";
+ String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
+ index01Docs(type, settings);
+ SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation(new TermsBuilder("class")
+ .field(CLASS_FIELD)
+ .subAggregation((new SignificantTermsBuilder("sig_terms"))
+ .field(TEXT_FIELD)
+ .significanceHeuristic(new SimpleHeuristic.SimpleHeuristicBuilder())
+ .minDocCount(1)
+ )
+ )
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ StringTerms classes = (StringTerms) response.getAggregations().get("class");
+ assertThat(classes.getBuckets().size(), equalTo(2));
+ for (Terms.Bucket classBucket : classes.getBuckets()) {
+ Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();
+ assertTrue(aggs.containsKey("sig_terms"));
+ SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms");
+ assertThat(agg.getBuckets().size(), equalTo(2));
+ Iterator<SignificantTerms.Bucket> bucketIterator = agg.iterator();
+ SignificantTerms.Bucket sigBucket = bucketIterator.next();
+ String term = sigBucket.getKeyAsString();
+ String classTerm = classBucket.getKeyAsString();
+ assertTrue(term.equals(classTerm));
+ assertThat(sigBucket.getSignificanceScore(), closeTo(2.0, 1.e-8));
+ sigBucket = bucketIterator.next();
+ assertThat(sigBucket.getSignificanceScore(), closeTo(1.0, 1.e-8));
+ }
+
+ // we run the same test again but this time we do not call assertSearchResponse() before the assertions
+ // the reason is that this would trigger toXContent and we would like to check that this has no potential side effects
+
+ response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation(new TermsBuilder("class")
+ .field(CLASS_FIELD)
+ .subAggregation((new SignificantTermsBuilder("sig_terms"))
+ .field(TEXT_FIELD)
+ .significanceHeuristic(new SimpleHeuristic.SimpleHeuristicBuilder())
+ .minDocCount(1)
+ )
+ )
+ .execute()
+ .actionGet();
+
+ classes = (StringTerms) response.getAggregations().get("class");
+ assertThat(classes.getBuckets().size(), equalTo(2));
+ for (Terms.Bucket classBucket : classes.getBuckets()) {
+ Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();
+ assertTrue(aggs.containsKey("sig_terms"));
+ SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms");
+ assertThat(agg.getBuckets().size(), equalTo(2));
+ Iterator<SignificantTerms.Bucket> bucketIterator = agg.iterator();
+ SignificantTerms.Bucket sigBucket = bucketIterator.next();
+ String term = sigBucket.getKeyAsString();
+ String classTerm = classBucket.getKeyAsString();
+ assertTrue(term.equals(classTerm));
+ assertThat(sigBucket.getSignificanceScore(), closeTo(2.0, 1.e-8));
+ sigBucket = bucketIterator.next();
+ assertThat(sigBucket.getSignificanceScore(), closeTo(1.0, 1.e-8));
+ }
+ }
+
+ public static class CustomSignificanceHeuristicPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-significance-heuristic";
+ }
+
+ @Override
+ public String description() {
+ return "Significance heuristic plugin";
+ }
+
+ public void onModule(SignificantTermsHeuristicModule significanceModule) {
+ significanceModule.registerParser(SimpleHeuristic.SimpleHeuristicParser.class);
+ }
+
+ public void onModule(TransportSignificantTermsHeuristicModule significanceModule) {
+ significanceModule.registerStream(SimpleHeuristic.STREAM);
+ }
+ public void onModule(ScriptModule module) {
+ module.registerScript(NativeSignificanceScoreScriptNoParams.NATIVE_SIGNIFICANCE_SCORE_SCRIPT_NO_PARAMS, NativeSignificanceScoreScriptNoParams.Factory.class);
+ module.registerScript(NativeSignificanceScoreScriptWithParams.NATIVE_SIGNIFICANCE_SCORE_SCRIPT_WITH_PARAMS, NativeSignificanceScoreScriptWithParams.Factory.class);
+ }
+ }
+
+ public static class SimpleHeuristic extends SignificanceHeuristic {
+
+ protected static final String[] NAMES = {"simple"};
+
+ public static final SignificanceHeuristicStreams.Stream STREAM = new SignificanceHeuristicStreams.Stream() {
+ @Override
+ public SignificanceHeuristic readResult(StreamInput in) throws IOException {
+ return readFrom(in);
+ }
+
+ @Override
+ public String getName() {
+ return NAMES[0];
+ }
+ };
+
+ public static SignificanceHeuristic readFrom(StreamInput in) throws IOException {
+ return new SimpleHeuristic();
+ }
+
+ /**
+ * @param subsetFreq The frequency of the term in the selected sample
+ * @param subsetSize The size of the selected sample (typically number of docs)
+ * @param supersetFreq The frequency of the term in the superset from which the sample was taken
+ * @param supersetSize The size of the superset from which the sample was taken (typically number of docs)
+ * @return a "significance" score
+ */
+ @Override
+ public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) {
+ return subsetFreq / subsetSize > supersetFreq / supersetSize ? 2.0 : 1.0;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(STREAM.getName());
+ }
+
+ public static class SimpleHeuristicParser implements SignificanceHeuristicParser {
+
+ @Override
+ public SignificanceHeuristic parse(XContentParser parser) throws IOException, QueryParsingException {
+ parser.nextToken();
+ return new SimpleHeuristic();
+ }
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+ }
+
+ public static class SimpleHeuristicBuilder implements SignificanceHeuristicBuilder {
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject(STREAM.getName()).endObject();
+ return builder;
+ }
+ }
+ }
+
+
+ @Test
+ public void testXContentResponse() throws Exception {
+
+ String type = randomBoolean() ? "string" : "long";
+ String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
+ index01Docs(type, settings);
+ SearchResponse response = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation(new TermsBuilder("class").field(CLASS_FIELD).subAggregation(new SignificantTermsBuilder("sig_terms").field(TEXT_FIELD)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ StringTerms classes = (StringTerms) response.getAggregations().get("class");
+ assertThat(classes.getBuckets().size(), equalTo(2));
+ for (Terms.Bucket classBucket : classes.getBuckets()) {
+ Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();
+ assertTrue(aggs.containsKey("sig_terms"));
+ SignificantTerms agg = (SignificantTerms) aggs.get("sig_terms");
+ assertThat(agg.getBuckets().size(), equalTo(1));
+ String term = agg.iterator().next().getKeyAsString();
+ String classTerm = classBucket.getKeyAsString();
+ assertTrue(term.equals(classTerm));
+ }
+
+ XContentBuilder responseBuilder = XContentFactory.jsonBuilder();
+ classes.toXContent(responseBuilder, null);
+ String result = null;
+ if (type.equals("long")) {
+ result = "\"class\"{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0,\"buckets\":[{\"key\":\"0\",\"doc_count\":4,\"sig_terms\":{\"doc_count\":4,\"buckets\":[{\"key\":0,\"key_as_string\":\"0\",\"doc_count\":4,\"score\":0.39999999999999997,\"bg_count\":5}]}},{\"key\":\"1\",\"doc_count\":3,\"sig_terms\":{\"doc_count\":3,\"buckets\":[{\"key\":1,\"key_as_string\":\"1\",\"doc_count\":3,\"score\":0.75,\"bg_count\":4}]}}]}";
+ } else {
+ result = "\"class\"{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0,\"buckets\":[{\"key\":\"0\",\"doc_count\":4,\"sig_terms\":{\"doc_count\":4,\"buckets\":[{\"key\":\"0\",\"doc_count\":4,\"score\":0.39999999999999997,\"bg_count\":5}]}},{\"key\":\"1\",\"doc_count\":3,\"sig_terms\":{\"doc_count\":3,\"buckets\":[{\"key\":\"1\",\"doc_count\":3,\"score\":0.75,\"bg_count\":4}]}}]}";
+ }
+ assertThat(responseBuilder.string(), equalTo(result));
+
+ }
+
+ @Test
+ public void testDeletesIssue7951() throws Exception {
+ String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
+ String mappings = "{\"doc\": {\"properties\":{\"text\": {\"type\":\"string\",\"index\":\"not_analyzed\"}}}}";
+ assertAcked(prepareCreate(INDEX_NAME).setSettings(settings).addMapping("doc", mappings));
+ String[] cat1v1 = {"constant", "one"};
+ String[] cat1v2 = {"constant", "uno"};
+ String[] cat2v1 = {"constant", "two"};
+ String[] cat2v2 = {"constant", "duo"};
+ List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "1")
+ .setSource(TEXT_FIELD, cat1v1, CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "2")
+ .setSource(TEXT_FIELD, cat1v2, CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "3")
+ .setSource(TEXT_FIELD, cat2v1, CLASS_FIELD, "2"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "4")
+ .setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2"));
+ indexRandom(true, false, indexRequestBuilderList);
+
+ // Now create some holes in the index with selective deletes caused by updates.
+ // This is the scenario that caused this issue https://github.com/elasticsearch/elasticsearch/issues/7951
+ // Scoring algorithms throw exceptions if term docFreqs exceed the reported size of the index
+ // from which they are taken so need to make sure this doesn't happen.
+ String[] text = cat1v1;
+ indexRequestBuilderList.clear();
+ for (int i = 0; i < 50; i++) {
+ text = text == cat1v2 ? cat1v1 : cat1v2;
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "1").setSource(TEXT_FIELD, text, CLASS_FIELD, "1"));
+ }
+ indexRandom(true, false, indexRequestBuilderList);
+
+ SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation(new TermsBuilder("class")
+ .field(CLASS_FIELD)
+ .subAggregation(
+ new SignificantTermsBuilder("sig_terms")
+ .field(TEXT_FIELD)
+ .minDocCount(1)))
+ .execute()
+ .actionGet();
+ }
+
+ @Test
+ public void testBackgroundVsSeparateSet() throws Exception {
+ String type = randomBoolean() ? "string" : "long";
+ String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
+ index01Docs(type, settings);
+ testBackgroundVsSeparateSet(new MutualInformation.MutualInformationBuilder(true, true), new MutualInformation.MutualInformationBuilder(true, false));
+ testBackgroundVsSeparateSet(new ChiSquare.ChiSquareBuilder(true, true), new ChiSquare.ChiSquareBuilder(true, false));
+ testBackgroundVsSeparateSet(new GND.GNDBuilder(true), new GND.GNDBuilder(false));
+ }
+
+ // compute significance score by
+ // 1. terms agg on class and significant terms
+ // 2. filter buckets and set the background to the other class and set is_background false
+ // both should yield exact same result
+ public void testBackgroundVsSeparateSet(SignificanceHeuristicBuilder significanceHeuristicExpectingSuperset, SignificanceHeuristicBuilder significanceHeuristicExpectingSeparateSets) throws Exception {
+
+ SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation(new TermsBuilder("class")
+ .field(CLASS_FIELD)
+ .subAggregation(
+ new SignificantTermsBuilder("sig_terms")
+ .field(TEXT_FIELD)
+ .minDocCount(1)
+ .significanceHeuristic(
+ significanceHeuristicExpectingSuperset)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response1);
+ SearchResponse response2 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE)
+ .addAggregation((new FilterAggregationBuilder("0"))
+ .filter(QueryBuilders.termQuery(CLASS_FIELD, "0"))
+ .subAggregation(new SignificantTermsBuilder("sig_terms")
+ .field(TEXT_FIELD)
+ .minDocCount(1)
+ .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "1"))
+ .significanceHeuristic(significanceHeuristicExpectingSeparateSets)))
+ .addAggregation((new FilterAggregationBuilder("1"))
+ .filter(QueryBuilders.termQuery(CLASS_FIELD, "1"))
+ .subAggregation(new SignificantTermsBuilder("sig_terms")
+ .field(TEXT_FIELD)
+ .minDocCount(1)
+ .backgroundFilter(QueryBuilders.termQuery(CLASS_FIELD, "0"))
+ .significanceHeuristic(significanceHeuristicExpectingSeparateSets)))
+ .execute()
+ .actionGet();
+
+ SignificantTerms sigTerms0 = ((SignificantTerms) (((StringTerms) response1.getAggregations().get("class")).getBucketByKey("0").getAggregations().asMap().get("sig_terms")));
+ assertThat(sigTerms0.getBuckets().size(), equalTo(2));
+ double score00Background = sigTerms0.getBucketByKey("0").getSignificanceScore();
+ double score01Background = sigTerms0.getBucketByKey("1").getSignificanceScore();
+ SignificantTerms sigTerms1 = ((SignificantTerms) (((StringTerms) response1.getAggregations().get("class")).getBucketByKey("1").getAggregations().asMap().get("sig_terms")));
+ double score10Background = sigTerms1.getBucketByKey("0").getSignificanceScore();
+ double score11Background = sigTerms1.getBucketByKey("1").getSignificanceScore();
+
+ double score00SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("0")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("0").getSignificanceScore();
+ double score01SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("0")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("1").getSignificanceScore();
+ double score10SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("1")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("0").getSignificanceScore();
+ double score11SeparateSets = ((SignificantTerms) ((InternalFilter) response2.getAggregations().get("1")).getAggregations().getAsMap().get("sig_terms")).getBucketByKey("1").getSignificanceScore();
+
+ assertThat(score00Background, equalTo(score00SeparateSets));
+ assertThat(score01Background, equalTo(score01SeparateSets));
+ assertThat(score10Background, equalTo(score10SeparateSets));
+ assertThat(score11Background, equalTo(score11SeparateSets));
+ }
+
+ private void index01Docs(String type, String settings) throws ExecutionException, InterruptedException {
+ String mappings = "{\"doc\": {\"properties\":{\"text\": {\"type\":\"" + type + "\"}}}}";
+ assertAcked(prepareCreate(INDEX_NAME).setSettings(settings).addMapping("doc", mappings));
+ String[] gb = {"0", "1"};
+ List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "1")
+ .setSource(TEXT_FIELD, "1", CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "2")
+ .setSource(TEXT_FIELD, "1", CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "3")
+ .setSource(TEXT_FIELD, "0", CLASS_FIELD, "0"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "4")
+ .setSource(TEXT_FIELD, "0", CLASS_FIELD, "0"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "5")
+ .setSource(TEXT_FIELD, gb, CLASS_FIELD, "1"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "6")
+ .setSource(TEXT_FIELD, gb, CLASS_FIELD, "0"));
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "7")
+ .setSource(TEXT_FIELD, "0", CLASS_FIELD, "0"));
+ indexRandom(true, false, indexRequestBuilderList);
+ }
+
+ @Test
+ public void testScoresEqualForPositiveAndNegative() throws Exception {
+ indexEqualTestData();
+ testScoresEqualForPositiveAndNegative(new MutualInformation.MutualInformationBuilder(true, true));
+ testScoresEqualForPositiveAndNegative(new ChiSquare.ChiSquareBuilder(true, true));
+ }
+
+ public void testScoresEqualForPositiveAndNegative(SignificanceHeuristicBuilder heuristic) throws Exception {
+
+ //check that results for both classes are the same with exclude negatives = false and classes are routing ids
+ SearchResponse response = client().prepareSearch("test")
+ .addAggregation(new TermsBuilder("class").field("class").subAggregation(new SignificantTermsBuilder("mySignificantTerms")
+ .field("text")
+ .executionHint(randomExecutionHint())
+ .significanceHeuristic(heuristic)
+ .minDocCount(1).shardSize(1000).size(1000)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ StringTerms classes = (StringTerms) response.getAggregations().get("class");
+ assertThat(classes.getBuckets().size(), equalTo(2));
+ Iterator<Terms.Bucket> classBuckets = classes.getBuckets().iterator();
+ Collection<SignificantTerms.Bucket> classA = ((SignificantTerms) classBuckets.next().getAggregations().get("mySignificantTerms")).getBuckets();
+ Iterator<SignificantTerms.Bucket> classBBucketIterator = ((SignificantTerms) classBuckets.next().getAggregations().get("mySignificantTerms")).getBuckets().iterator();
+ assertThat(classA.size(), greaterThan(0));
+ for (SignificantTerms.Bucket classABucket : classA) {
+ SignificantTerms.Bucket classBBucket = classBBucketIterator.next();
+ assertThat(classABucket.getKey(), equalTo(classBBucket.getKey()));
+ assertThat(classABucket.getSignificanceScore(), closeTo(classBBucket.getSignificanceScore(), 1.e-5));
+ }
+ }
+
+ private void indexEqualTestData() throws ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0).addMapping("doc",
+ "text", "type=string", "class", "type=string"));
+ createIndex("idx_unmapped");
+
+ ensureGreen();
+ String data[] = {
+ "A\ta",
+ "A\ta",
+ "A\tb",
+ "A\tb",
+ "A\tb",
+ "B\tc",
+ "B\tc",
+ "B\tc",
+ "B\tc",
+ "B\td",
+ "B\td",
+ "B\td",
+ "B\td",
+ "B\td",
+ "A\tc d",
+ "B\ta b"
+ };
+
+ List<IndexRequestBuilder> indexRequestBuilders = new ArrayList<>();
+ for (int i = 0; i < data.length; i++) {
+ String[] parts = data[i].split("\t");
+ indexRequestBuilders.add(client().prepareIndex("test", "doc", "" + i)
+ .setSource("class", parts[0], "text", parts[1]));
+ }
+ indexRandom(true, false, indexRequestBuilders);
+ }
+
+ @Test
+ public void testScriptScore() throws ExecutionException, InterruptedException, IOException {
+ indexRandomFrequencies01(randomBoolean() ? "string" : "long");
+ ScriptHeuristic.ScriptHeuristicBuilder scriptHeuristicBuilder = getScriptSignificanceHeuristicBuilder();
+ ensureYellow();
+ SearchResponse response = client().prepareSearch(INDEX_NAME)
+ .addAggregation(new TermsBuilder("class").field(CLASS_FIELD).subAggregation(new SignificantTermsBuilder("mySignificantTerms")
+ .field(TEXT_FIELD)
+ .executionHint(randomExecutionHint())
+ .significanceHeuristic(scriptHeuristicBuilder)
+ .minDocCount(1).shardSize(2).size(2)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) {
+ for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) {
+ assertThat(bucket.getSignificanceScore(), is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testScriptScoreOldScriptAPI() throws ExecutionException, InterruptedException, IOException {
+ indexRandomFrequencies01(randomBoolean() ? "string" : "long");
+ ScriptHeuristic.ScriptHeuristicBuilder scriptHeuristicBuilder = getScriptSignificanceHeuristicBuilderOldScriptAPI();
+ ensureYellow();
+ SearchResponse response = client()
+ .prepareSearch(INDEX_NAME)
+ .addAggregation(
+ new TermsBuilder("class").field(CLASS_FIELD).subAggregation(
+ new SignificantTermsBuilder("mySignificantTerms").field(TEXT_FIELD).executionHint(randomExecutionHint())
+ .significanceHeuristic(scriptHeuristicBuilder).minDocCount(1).shardSize(2).size(2))).execute()
+ .actionGet();
+ assertSearchResponse(response);
+ for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("class")).getBuckets()) {
+ for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) {
+ assertThat(bucket.getSignificanceScore(),
+ is((double) bucket.getSubsetDf() + bucket.getSubsetSize() + bucket.getSupersetDf() + bucket.getSupersetSize()));
+ }
+ }
+ }
+
+ @Test
+ public void testNoNumberFormatExceptionWithDefaultScriptingEngine() throws ExecutionException, InterruptedException, IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)));
+ index("test", "doc", "1", "{\"field\":\"a\"}");
+ index("test", "doc", "11", "{\"field\":\"a\"}");
+ index("test", "doc", "2", "{\"field\":\"b\"}");
+ index("test", "doc", "22", "{\"field\":\"b\"}");
+ index("test", "doc", "3", "{\"field\":\"a b\"}");
+ index("test", "doc", "33", "{\"field\":\"a b\"}");
+ ScriptHeuristic.ScriptHeuristicBuilder scriptHeuristicBuilder = new ScriptHeuristic.ScriptHeuristicBuilder();
+ scriptHeuristicBuilder.setScript(new Script("_subset_freq/(_superset_freq - _subset_freq + 1)"));
+ ensureYellow();
+ refresh();
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .addAggregation(
+ new TermsBuilder("letters").field("field").subAggregation(
+ new SignificantTermsBuilder("mySignificantTerms").field("field").executionHint(randomExecutionHint())
+ .significanceHeuristic(scriptHeuristicBuilder).minDocCount(1).shardSize(2).size(2))).execute()
+ .actionGet();
+ assertSearchResponse(response);
+ assertThat(((Terms) response.getAggregations().get("letters")).getBuckets().size(), equalTo(2));
+ for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("letters")).getBuckets()) {
+ assertThat(((SignificantStringTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets().size(), equalTo(2));
+ for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) {
+ assertThat(bucket.getSignificanceScore(),
+ closeTo((double) bucket.getSubsetDf() / (bucket.getSupersetDf() - bucket.getSubsetDf() + 1), 1.e-6));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testNoNumberFormatExceptionWithDefaultScriptingEngineOldScriptAPI() throws ExecutionException, InterruptedException,
+ IOException {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1)));
+ index("test", "doc", "1", "{\"field\":\"a\"}");
+ index("test", "doc", "11", "{\"field\":\"a\"}");
+ index("test", "doc", "2", "{\"field\":\"b\"}");
+ index("test", "doc", "22", "{\"field\":\"b\"}");
+ index("test", "doc", "3", "{\"field\":\"a b\"}");
+ index("test", "doc", "33", "{\"field\":\"a b\"}");
+ ScriptHeuristic.ScriptHeuristicBuilder scriptHeuristicBuilder = new ScriptHeuristic.ScriptHeuristicBuilder();
+ scriptHeuristicBuilder.setScript("_subset_freq/(_superset_freq - _subset_freq + 1)");
+ ensureYellow();
+ refresh();
+ SearchResponse response = client().prepareSearch("test")
+ .addAggregation(new TermsBuilder("letters").field("field").subAggregation(new SignificantTermsBuilder("mySignificantTerms")
+ .field("field")
+ .executionHint(randomExecutionHint())
+ .significanceHeuristic(scriptHeuristicBuilder)
+ .minDocCount(1).shardSize(2).size(2)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ assertThat(((Terms) response.getAggregations().get("letters")).getBuckets().size(), equalTo(2));
+ for (Terms.Bucket classBucket : ((Terms) response.getAggregations().get("letters")).getBuckets()) {
+ assertThat(((SignificantStringTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets().size(), equalTo(2));
+ for (SignificantTerms.Bucket bucket : ((SignificantTerms) classBucket.getAggregations().get("mySignificantTerms")).getBuckets()) {
+ assertThat(bucket.getSignificanceScore(), closeTo((double)bucket.getSubsetDf() /(bucket.getSupersetDf() - bucket.getSubsetDf()+ 1), 1.e-6));
+ }
+ }
+ }
+
+ private ScriptHeuristic.ScriptHeuristicBuilder getScriptSignificanceHeuristicBuilder() throws IOException {
+ Map<String, Object> params = null;
+ Script script = null;
+ String lang = null;
+ if (randomBoolean()) {
+ params = new HashMap<>();
+ params.put("param", randomIntBetween(1, 100));
+ }
+ int randomScriptKind = randomIntBetween(0, 3);
+ if (randomBoolean()) {
+ lang = "groovy";
+ }
+ switch (randomScriptKind) {
+ case 0: {
+ if (params == null) {
+ script = new Script("return _subset_freq + _subset_size + _superset_freq + _superset_size");
+ } else {
+ script = new Script("return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param",
+ ScriptType.INLINE, lang, params);
+ }
+ break;
+ }
+ case 1: {
+ String scriptString;
+ if (params == null) {
+ scriptString = "return _subset_freq + _subset_size + _superset_freq + _superset_size";
+ } else {
+ scriptString = "return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param";
+ }
+ client().prepareIndex().setIndex(ScriptService.SCRIPT_INDEX).setType(ScriptService.DEFAULT_LANG).setId("my_script")
+ .setSource(XContentFactory.jsonBuilder().startObject().field("script", scriptString).endObject()).get();
+ refresh();
+ script = new Script("my_script", ScriptType.INDEXED, lang, params);
+ break;
+ }
+ case 2: {
+ if (params == null) {
+ script = new Script("significance_script_no_params", ScriptType.FILE, lang, null);
+ } else {
+ script = new Script("significance_script_with_params", ScriptType.FILE, lang, params);
+ }
+ break;
+ }
+ case 3: {
+ logger.info("NATIVE SCRIPT");
+ if (params == null) {
+ script = new Script("native_significance_score_script_no_params", ScriptType.INLINE, "native", null);
+ } else {
+ script = new Script("native_significance_score_script_with_params", ScriptType.INLINE, "native", params);
+ }
+ lang = "native";
+ if (randomBoolean()) {
+ }
+ break;
+ }
+ }
+ ScriptHeuristic.ScriptHeuristicBuilder builder = new ScriptHeuristic.ScriptHeuristicBuilder().setScript(script);
+
+ return builder;
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ private ScriptHeuristic.ScriptHeuristicBuilder getScriptSignificanceHeuristicBuilderOldScriptAPI() throws IOException {
+ Map<String, Object> params = null;
+ String script = null;
+ String lang = null;
+ String scriptId = null;
+ String scriptFile = null;
+ if (randomBoolean()) {
+ params = new HashMap<>();
+ params.put("param", randomIntBetween(1, 100));
+ }
+ int randomScriptKind = randomIntBetween(0, 3);
+ if (randomBoolean()) {
+ lang = "groovy";
+ }
+ switch (randomScriptKind) {
+ case 0: {
+ if (params == null) {
+ script = "return _subset_freq + _subset_size + _superset_freq + _superset_size";
+ } else {
+ script = "return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param";
+ }
+ break;
+ }
+ case 1: {
+ if (params == null) {
+ script = "return _subset_freq + _subset_size + _superset_freq + _superset_size";
+ } else {
+ script = "return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param";
+ }
+ client().prepareIndex().setIndex(ScriptService.SCRIPT_INDEX).setType(ScriptService.DEFAULT_LANG).setId("my_script")
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("script", script)
+ .endObject()).get();
+ refresh();
+ scriptId = "my_script";
+ script = null;
+ break;
+ }
+ case 2: {
+ if (params == null) {
+ scriptFile = "significance_script_no_params";
+ } else {
+ scriptFile = "significance_script_with_params";
+ }
+ break;
+ }
+ case 3: {
+ logger.info("NATIVE SCRIPT");
+ if (params == null) {
+ script = "native_significance_score_script_no_params";
+ } else {
+ script = "native_significance_score_script_with_params";
+ }
+ lang = "native";
+ if (randomBoolean()) {
+ }
+ break;
+ }
+ }
+ ScriptHeuristic.ScriptHeuristicBuilder builder = new ScriptHeuristic.ScriptHeuristicBuilder().setScript(script).setLang(lang).setParams(params).setScriptId(scriptId).setScriptFile(scriptFile);
+
+ return builder;
+ }
+
+ private void indexRandomFrequencies01(String type) throws ExecutionException, InterruptedException {
+ String mappings = "{\"" + DOC_TYPE + "\": {\"properties\":{\"" + TEXT_FIELD + "\": {\"type\":\"" + type + "\"}}}}";
+ assertAcked(prepareCreate(INDEX_NAME).addMapping(DOC_TYPE, mappings));
+ String[] gb = {"0", "1"};
+ List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
+ for (int i = 0; i < randomInt(20); i++) {
+ int randNum = randomInt(2);
+ String[] text = new String[1];
+ if (randNum == 2) {
+ text = gb;
+ } else {
+ text[0] = gb[randNum];
+ }
+ indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE)
+ .setSource(TEXT_FIELD, text, CLASS_FIELD, randomBoolean() ? "one" : "zero"));
+ }
+ indexRandom(true, indexRequestBuilderList);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java
new file mode 100644
index 0000000000..8e5431d653
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java
@@ -0,0 +1,441 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.TermQueryBuilder;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms.Bucket;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory.ExecutionMode;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsBuilder;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.PercentageScore;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class SignificantTermsTests extends ElasticsearchIntegrationTest {
+
+ public String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString();
+ }
+
+ @Override
+ public Settings indexSettings() {
+ return Settings.builder()
+ .put("index.number_of_shards", numberOfShards())
+ .put("index.number_of_replicas", numberOfReplicas())
+ .build();
+ }
+
+ public static final int MUSIC_CATEGORY=1;
+ public static final int OTHER_CATEGORY=2;
+ public static final int SNOWBOARDING_CATEGORY=3;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 5, SETTING_NUMBER_OF_REPLICAS, 0).addMapping("fact",
+ "_routing", "required=true", "routing_id", "type=string,index=not_analyzed", "fact_category",
+ "type=integer,index=not_analyzed", "description", "type=string,index=analyzed"));
+ createIndex("idx_unmapped");
+
+ ensureGreen();
+ String data[] = {
+ "A\t1\tpaul weller was lead singer of the jam before the style council",
+ "B\t1\tpaul weller left the jam to form the style council",
+ "A\t2\tpaul smith is a designer in the fashion industry",
+ "B\t1\tthe stranglers are a group originally from guildford",
+ "A\t1\tafter disbanding the style council in 1985 paul weller became a solo artist",
+ "B\t1\tjean jaques burnel is a bass player in the stranglers and has a black belt in karate",
+ "A\t1\tmalcolm owen was the lead singer of the ruts",
+ "B\t1\tpaul weller has denied any possibility of a reunion of the jam",
+ "A\t1\tformer frontman of the jam paul weller became the father of twins",
+ "B\t2\tex-england football star paul gascoigne has re-emerged following recent disappearance",
+ "A\t2\tdavid smith has recently denied connections with the mafia",
+ "B\t1\tthe damned's new rose single was considered the first 'punk' single in the UK",
+ "A\t1\tthe sex pistols broke up after a few short years together",
+ "B\t1\tpaul gascoigne was a midfielder for england football team",
+ "A\t3\tcraig kelly became the first world champion snowboarder and has a memorial at baldface lodge",
+ "B\t3\tterje haakonsen has credited craig kelly as his snowboard mentor",
+ "A\t3\tterje haakonsen and craig kelly were some of the first snowboarders sponsored by burton snowboards",
+ "B\t3\tlike craig kelly before him terje won the mt baker banked slalom many times - once riding switch",
+ "A\t3\tterje haakonsen has been a team rider for burton snowboards for over 20 years"
+ };
+
+ for (int i = 0; i < data.length; i++) {
+ String[] parts = data[i].split("\t");
+ client().prepareIndex("test", "fact", "" + i)
+ .setRouting(parts[0])
+ .setSource("fact_category", parts[1], "description", parts[2]).get();
+ }
+ client().admin().indices().refresh(new RefreshRequest("test")).get();
+ }
+
+ @Test
+ public void structuredAnalysis() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("fact_category").executionHint(randomExecutionHint())
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ Number topCategory = (Number) topTerms.getBuckets().iterator().next().getKey();
+ assertTrue(topCategory.equals(new Long(SNOWBOARDING_CATEGORY)));
+ }
+
+ @Test
+ public void structuredAnalysisWithIncludeExclude() throws Exception {
+ long[] excludeTerms = { MUSIC_CATEGORY };
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "paul"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("fact_category").executionHint(randomExecutionHint())
+ .minDocCount(1).exclude(excludeTerms))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ Number topCategory = (Number) topTerms.getBuckets().iterator().next().getKey();
+ assertTrue(topCategory.equals(new Long(OTHER_CATEGORY)));
+ }
+
+ @Test
+ public void includeExclude() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(new TermQueryBuilder("_all", "weller"))
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint())
+ .exclude("weller"))
+ .get();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ Set<String> terms = new HashSet<>();
+ for (Bucket topTerm : topTerms) {
+ terms.add(topTerm.getKeyAsString());
+ }
+ assertThat(terms, hasSize(6));
+ assertThat(terms.contains("jam"), is(true));
+ assertThat(terms.contains("council"), is(true));
+ assertThat(terms.contains("style"), is(true));
+ assertThat(terms.contains("paul"), is(true));
+ assertThat(terms.contains("of"), is(true));
+ assertThat(terms.contains("the"), is(true));
+
+ response = client().prepareSearch("test")
+ .setQuery(new TermQueryBuilder("_all", "weller"))
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint())
+ .include("weller"))
+ .get();
+ assertSearchResponse(response);
+ topTerms = response.getAggregations().get("mySignificantTerms");
+ terms = new HashSet<>();
+ for (Bucket topTerm : topTerms) {
+ terms.add(topTerm.getKeyAsString());
+ }
+ assertThat(terms, hasSize(1));
+ assertThat(terms.contains("weller"), is(true));
+ }
+
+ @Test
+ public void includeExcludeExactValues() throws Exception {
+ String []incExcTerms={"weller","nosuchterm"};
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(new TermQueryBuilder("_all", "weller"))
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint())
+ .exclude(incExcTerms))
+ .get();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ Set<String> terms = new HashSet<>();
+ for (Bucket topTerm : topTerms) {
+ terms.add(topTerm.getKeyAsString());
+ }
+ assertEquals(new HashSet<String>(Arrays.asList("jam", "council", "style", "paul", "of", "the")), terms);
+
+ response = client().prepareSearch("test")
+ .setQuery(new TermQueryBuilder("_all", "weller"))
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint())
+ .include(incExcTerms))
+ .get();
+ assertSearchResponse(response);
+ topTerms = response.getAggregations().get("mySignificantTerms");
+ terms = new HashSet<>();
+ for (Bucket topTerm : topTerms) {
+ terms.add(topTerm.getKeyAsString());
+ }
+ assertThat(terms, hasSize(1));
+ assertThat(terms.contains("weller"), is(true));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("fact_category").executionHint(randomExecutionHint())
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ assertThat(topTerms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void textAnalysis() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint())
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+
+ @Test
+ public void textAnalysisGND() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint()).significanceHeuristic(new GND.GNDBuilder(true))
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+
+ @Test
+ public void textAnalysisChiSquare() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint()).significanceHeuristic(new ChiSquare.ChiSquareBuilder(false,true))
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+
+ @Test
+ public void textAnalysisPercentageScore() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0)
+ .setSize(60)
+ .setExplain(true)
+ .addAggregation(
+ new SignificantTermsBuilder("mySignificantTerms").field("description").executionHint(randomExecutionHint())
+ .significanceHeuristic(new PercentageScore.PercentageScoreBuilder()).minDocCount(2)).execute().actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+
+ @Test
+ public void badFilteredAnalysis() throws Exception {
+ // Deliberately using a bad choice of filter here for the background context in order
+ // to test robustness.
+ // We search for the name of a snowboarder but use music-related content (fact_category:1)
+ // as the background source of term statistics.
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description")
+ .minDocCount(2).backgroundFilter(QueryBuilders.termQuery("fact_category", 1)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ // We expect at least one of the significant terms to have been selected on the basis
+ // that it is present in the foreground selection but entirely missing from the filtered
+ // background used as context.
+ boolean hasMissingBackgroundTerms = false;
+ for (Bucket topTerm : topTerms) {
+ if (topTerm.getSupersetDf() == 0) {
+ hasMissingBackgroundTerms = true;
+ break;
+ }
+ }
+ assertTrue(hasMissingBackgroundTerms);
+ }
+
+
+ @Test
+ public void filteredAnalysis() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "weller"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description")
+ .minDocCount(1).backgroundFilter(QueryBuilders.termsQuery("description", "paul")))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ HashSet<String> topWords = new HashSet<String>();
+ for (Bucket topTerm : topTerms) {
+ topWords.add(topTerm.getKeyAsString());
+ }
+ //The word "paul" should be a constant of all docs in the background set and therefore not seen as significant
+ assertFalse(topWords.contains("paul"));
+ //"Weller" is the only Paul who was in The Jam and therefore this should be identified as a differentiator from the background of all other Pauls.
+ assertTrue(topWords.contains("jam"));
+ }
+
+ @Test
+ public void nestedAggs() throws Exception {
+ String[][] expectedKeywordsByCategory={
+ { "paul", "weller", "jam", "style", "council" },
+ { "paul", "smith" },
+ { "craig", "kelly", "terje", "haakonsen", "burton" }};
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .addAggregation(new TermsBuilder("myCategories").field("fact_category").minDocCount(2)
+ .subAggregation(
+ new SignificantTermsBuilder("mySignificantTerms").field("description")
+ .executionHint(randomExecutionHint())
+ .minDocCount(2)))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ Terms topCategoryTerms = response.getAggregations().get("myCategories");
+ for (org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket topCategory : topCategoryTerms.getBuckets()) {
+ SignificantTerms topTerms = topCategory.getAggregations().get("mySignificantTerms");
+ HashSet<String> foundTopWords = new HashSet<String>();
+ for (Bucket topTerm : topTerms) {
+ foundTopWords.add(topTerm.getKeyAsString());
+ }
+ String[] expectedKeywords = expectedKeywordsByCategory[Integer.parseInt(topCategory.getKeyAsString()) - 1];
+ for (String expectedKeyword : expectedKeywords) {
+ assertTrue(expectedKeyword + " missing from category keywords", foundTopWords.contains(expectedKeyword));
+ }
+ }
+ }
+
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped", "test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms").field("description")
+ .executionHint(randomExecutionHint())
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+
+
+ private void checkExpectedStringTermsFound(SignificantTerms topTerms) {
+ HashMap<String,Bucket>topWords=new HashMap<>();
+ for (Bucket topTerm : topTerms ){
+ topWords.put(topTerm.getKeyAsString(), topTerm);
+ }
+ assertTrue( topWords.containsKey("haakonsen"));
+ assertTrue( topWords.containsKey("craig"));
+ assertTrue( topWords.containsKey("kelly"));
+ assertTrue( topWords.containsKey("burton"));
+ assertTrue( topWords.containsKey("snowboards"));
+ Bucket kellyTerm=topWords.get("kelly");
+ assertEquals(3, kellyTerm.getSubsetDf());
+ assertEquals(4, kellyTerm.getSupersetDf());
+ }
+
+ public void testDefaultSignificanceHeuristic() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms")
+ .field("description")
+ .executionHint(randomExecutionHint())
+ .significanceHeuristic(new JLHScore.JLHScoreBuilder())
+ .minDocCount(2))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+
+ @Test
+ public void testMutualInformation() throws Exception {
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.QUERY_AND_FETCH)
+ .setQuery(new TermQueryBuilder("_all", "terje"))
+ .setFrom(0).setSize(60).setExplain(true)
+ .addAggregation(new SignificantTermsBuilder("mySignificantTerms")
+ .field("description")
+ .executionHint(randomExecutionHint())
+ .significanceHeuristic(new MutualInformation.MutualInformationBuilder(false, true))
+ .minDocCount(1))
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ SignificantTerms topTerms = response.getAggregations().get("mySignificantTerms");
+ checkExpectedStringTermsFound(topTerms);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java
new file mode 100644
index 0000000000..7baf417c91
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsTests.java
@@ -0,0 +1,1905 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import com.google.common.base.Strings;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.text.NumberFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.count;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class StringTermsTests extends AbstractTermsTests {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "s_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "s_values";
+ private static Map<String, Map<String, Object>> expectedMultiSortBuckets;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 5; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val" + i).field("i", i)
+ .field("tag", i < 5 / 2 + 1 ? "more" : "less").startArray(MULTI_VALUED_FIELD_NAME).value("val" + i)
+ .value("val" + (i + 1)).endArray().endObject()));
+ }
+
+ getMultiSortDocs(builders);
+
+ for (int i = 0; i < 100; i++) {
+ builders.add(client().prepareIndex("idx", "high_card_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val" + Strings.padStart(i + "", 3, '0'))
+ .startArray(MULTI_VALUED_FIELD_NAME).value("val" + Strings.padStart(i + "", 3, '0'))
+ .value("val" + Strings.padStart((i + 1) + "", 3, '0')).endArray().endObject()));
+ }
+ prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer").execute().actionGet();
+
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()));
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+ ensureSearchable();
+ }
+
+ private void getMultiSortDocs(List<IndexRequestBuilder> builders) throws IOException {
+ expectedMultiSortBuckets = new HashMap<>();
+ Map<String, Object> bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val1");
+ bucketProps.put("_count", 3l);
+ bucketProps.put("avg_l", 1d);
+ bucketProps.put("sum_d", 6d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val2");
+ bucketProps.put("_count", 3l);
+ bucketProps.put("avg_l", 2d);
+ bucketProps.put("sum_d", 6d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val3");
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 3d);
+ bucketProps.put("sum_d", 3d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val4");
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 3d);
+ bucketProps.put("sum_d", 4d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val5");
+ bucketProps.put("_count", 2l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 3d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val6");
+ bucketProps.put("_count", 1l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 1d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+ bucketProps = new HashMap<>();
+ bucketProps.put("_term", "val7");
+ bucketProps.put("_count", 1l);
+ bucketProps.put("avg_l", 5d);
+ bucketProps.put("sum_d", 1d);
+ expectedMultiSortBuckets.put((String) bucketProps.get("_term"), bucketProps);
+
+ createIndex("sort_idx");
+ for (int i = 1; i <= 3; i++) {
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val1").field("l", 1).field("d", i).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val2").field("l", 2).field("d", i).endObject()));
+ }
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 1).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val3").field("l", 3).field("d", 2).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 1).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val4").field("l", 3).field("d", 3).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 1).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val5").field("l", 5).field("d", 2).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val6").field("l", 5).field("d", 1).endObject()));
+ builders.add(client().prepareIndex("sort_idx", "multi_sort_type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, "val7").field("l", 5).field("d", 1).endObject()));
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return bucket.getKeyAsString();
+ }
+
+ @Test
+ // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard
+ public void sizeIsZero() {
+ final int minDocCount = randomInt(1);
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).minDocCount(minDocCount).size(0)).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(minDocCount == 0 ? 105 : 100)); // 105 because of the other type
+ }
+
+ @Test
+ public void singleValueField() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+ Object[] propertiesKeys = (Object[]) terms.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) terms.getProperty("_count");
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat((String) propertiesKeys[i], equalTo("val" + i));
+ assertThat((long) propertiesDocCounts[i], equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_withGlobalOrdinals() throws Exception {
+ ExecutionMode[] executionModes = new ExecutionMode[] { null, ExecutionMode.GLOBAL_ORDINALS, ExecutionMode.GLOBAL_ORDINALS_HASH,
+ ExecutionMode.GLOBAL_ORDINALS_LOW_CARDINALITY };
+ for (ExecutionMode executionMode : executionModes) {
+ logger.info("Execution mode:" + executionMode);
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(executionMode == null ? null : executionMode.toString())
+ .field(SINGLE_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))).execute()
+ .actionGet();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+ }
+
+ @Test
+ public void singleValueField_WithRegexFiltering() throws Exception {
+
+ // include without exclude
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).include("val00.+")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // include and exclude
+ // we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
+
+ response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).include("val00.+").exclude("(val000|val001)"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(8));
+
+ for (int i = 2; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // exclude without include
+ // we should be left with: val000, val001, val002, val003, val004, val005, val006, val007, val008, val009
+
+ response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).exclude("val0[1-9]+.+")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+
+ for (int i = 0; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_WithExactTermFiltering() throws Exception {
+ // include without exclude
+ String incVals[] = { "val000", "val001", "val002", "val003", "val004", "val005", "val006", "val007", "val008", "val009" };
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).include(incVals)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(incVals.length));
+
+ for (String incVal : incVals) {
+ Terms.Bucket bucket = terms.getBucketByKey(incVal);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo(incVal));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // include and exclude
+ // Slightly illogical example with exact terms below as include and exclude sets
+ // are made to overlap but the exclude set should have priority over matches.
+ // we should be left with: val002, val003, val004, val005, val006, val007, val008, val009
+ String excVals[] = { "val000", "val001" };
+
+ response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).include(incVals).exclude(excVals)).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(8));
+
+ for (int i = 2; i < 10; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val00" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val00" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ // Check case with only exact term exclude clauses
+ response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).exclude(excVals)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(10));
+ for (String key : excVals) {
+ Terms.Bucket bucket = terms.getBucketByKey(key);
+ assertThat(bucket, nullValue());
+ }
+ NumberFormat nf = NumberFormat.getIntegerInstance(Locale.ENGLISH);
+ nf.setMinimumIntegerDigits(3);
+ for (int i = 2; i < 12; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + nf.format(i));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + nf.format(i)));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+
+ }
+
+ @Test
+ public void singleValueField_WithMaxSize() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("high_card_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME).size(20)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))) // we need to sort by terms cause we're checking the first 20 values
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(20));
+
+ for (int i = 0; i < 20; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + Strings.padStart(i + "", 3, '0'));
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + Strings.padStart(i + "", 3, '0')));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermAsc() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(true))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValueField_OrderedByTermDesc() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.term(false))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ i--;
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(count("count").field(MULTI_VALUED_FIELD_NAME))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+ Object[] propertiesKeys = (Object[]) terms.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) terms.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) terms.getProperty("count.value");
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ assertThat((String) propertiesKeys[i], equalTo("val" + i));
+ assertThat((long) propertiesDocCounts[i], equalTo(1l));
+ assertThat((double) propertiesCounts[i], equalTo(2.0));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation_Inherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).subAggregation(count("count"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void singleValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script(new Script("'foo_' + _value"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript_NotUnique() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script(new Script("_value.substring(0,3)")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("val");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val"));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void multiValuedField_WithValueScript() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script(new Script("'foo_' + _value"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ *
+ * [foo_val0, foo_val1] [foo_val1, foo_val2] [foo_val2, foo_val3] [foo_val3,
+ * foo_val4] [foo_val4, foo_val5]
+ *
+ *
+ * foo_val0 - doc_count: 1 - val_count: 2 foo_val1 - doc_count: 2 -
+ * val_count: 4 foo_val2 - doc_count: 2 - val_count: 4 foo_val3 - doc_count:
+ * 2 - val_count: 4 foo_val4 - doc_count: 2 - val_count: 4 foo_val5 -
+ * doc_count: 1 - val_count: 2
+ */
+
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregator() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script(new Script("'foo_' + _value"))
+ .subAggregation(count("count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat("term[" + key(bucket) + "]", valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ @Test
+ public void script_SingleValue() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_ExplicitSingleValue() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_SingleValue_WithSubAggregator_Inherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script(new Script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).subAggregation(count("count")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void script_MultiValued() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ @Test
+ public void script_MultiValued_WithAggregatorInherited() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script(new Script("doc['" + MULTI_VALUED_FIELD_NAME + "']")).subAggregation(count("count"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx_unmapped")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).size(randomInt(5)).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx", "idx_unmapped")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ @Test
+ public void stringTermsNestedIntoPerBucketAggregator() throws Exception {
+ // no execution hint so that the logic that decides whether or not to use ordinals is executed
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ filter("filter").filter(termQuery(MULTI_VALUED_FIELD_NAME, "val3")).subAggregation(
+ terms("terms").field(MULTI_VALUED_FIELD_NAME).collectMode(randomFrom(SubAggCollectionMode.values()))))
+ .execute().actionGet();
+
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ Filter filter = response.getAggregations().get("filter");
+
+ Terms terms = filter.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (int i = 2; i <= 4; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(i == 3 ? 2L : 1L));
+ }
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1l).minDocCount(0).subAggregation(terms("terms")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, Matchers.notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, Matchers.notNullValue());
+
+ Terms terms = bucket.getAggregations().get("terms");
+ assertThat(terms, Matchers.notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().isEmpty(), is(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field("i"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ i++;
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleBucketSubAggregationAsc() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("tags").executionHint(randomExecutionHint()).field("tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("filter", asc))
+ .subAggregation(filter("filter").filter(QueryBuilders.matchAllQuery()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "less" : "more"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ Filter filter = tag.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo(asc ? 2l : 3l));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "more" : "less"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ filter = tag.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getDocCount(), equalTo(asc ? 3l : 2l));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc_MultiHierarchyLevels() throws Exception {
+ boolean asc = randomBoolean();
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("tags")
+ .executionHint(randomExecutionHint())
+ .field("tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter1>filter2>stats.max", asc))
+ .subAggregation(
+ filter("filter1").filter(QueryBuilders.matchAllQuery()).subAggregation(
+ filter("filter2").filter(QueryBuilders.matchAllQuery()).subAggregation(
+ stats("stats").field("i"))))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ // the max for "more" is 2
+ // the max for "less" is 4
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "more" : "less"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter2 = filter1.getAggregations().get("filter2");
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 3l : 2l));
+ Stats stats = filter2.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "less" : "more"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter2 = filter1.getAggregations().get("filter2");
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 2l : 3l));
+ stats = filter2.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc_MultiHierarchyLevels_specialChars() throws Exception {
+ StringBuilder filter2NameBuilder = new StringBuilder("filt.er2");
+ filter2NameBuilder.append(randomAsciiOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", ""));
+ String filter2Name = filter2NameBuilder.toString();
+ StringBuilder statsNameBuilder = new StringBuilder("st.ats");
+ statsNameBuilder.append(randomAsciiOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", ""));
+ String statsName = statsNameBuilder.toString();
+ boolean asc = randomBoolean();
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("tags")
+ .executionHint(randomExecutionHint())
+ .field("tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter1>" + filter2Name + ">" + statsName + ".max", asc))
+ .subAggregation(
+ filter("filter1").filter(QueryBuilders.matchAllQuery()).subAggregation(
+ filter(filter2Name).filter(QueryBuilders.matchAllQuery()).subAggregation(
+ stats(statsName).field("i"))))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ // the max for "more" is 2
+ // the max for "less" is 4
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "more" : "less"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter2 = filter1.getAggregations().get(filter2Name);
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 3l : 2l));
+ Stats stats = filter2.getAggregations().get(statsName);
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "less" : "more"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter2 = filter1.getAggregations().get(filter2Name);
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 2l : 3l));
+ stats = filter2.getAggregations().get(statsName);
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySubAggregationAsc_MultiHierarchyLevels_specialCharsNoDotNotation() throws Exception {
+ StringBuilder filter2NameBuilder = new StringBuilder("filt.er2");
+ filter2NameBuilder.append(randomAsciiOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", ""));
+ String filter2Name = filter2NameBuilder.toString();
+ StringBuilder statsNameBuilder = new StringBuilder("st.ats");
+ statsNameBuilder.append(randomAsciiOfLengthBetween(3, 10).replace("[", "").replace("]", "").replace(">", ""));
+ String statsName = statsNameBuilder.toString();
+ boolean asc = randomBoolean();
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("tags")
+ .executionHint(randomExecutionHint())
+ .field("tag")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("filter1>" + filter2Name + ">" + statsName + "[max]", asc))
+ .subAggregation(
+ filter("filter1").filter(QueryBuilders.matchAllQuery()).subAggregation(
+ filter(filter2Name).filter(QueryBuilders.matchAllQuery()).subAggregation(
+ stats(statsName).field("i"))))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms tags = response.getAggregations().get("tags");
+ assertThat(tags, notNullValue());
+ assertThat(tags.getName(), equalTo("tags"));
+ assertThat(tags.getBuckets().size(), equalTo(2));
+
+ Iterator<Terms.Bucket> iters = tags.getBuckets().iterator();
+
+ // the max for "more" is 2
+ // the max for "less" is 4
+
+ Terms.Bucket tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "more" : "less"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 3l : 2l));
+ Filter filter2 = filter1.getAggregations().get(filter2Name);
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 3l : 2l));
+ Stats stats = filter2.getAggregations().get(statsName);
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo(asc ? 2.0 : 4.0));
+
+ tag = iters.next();
+ assertThat(tag, notNullValue());
+ assertThat(key(tag), equalTo(asc ? "less" : "more"));
+ assertThat(tag.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter1 = tag.getAggregations().get("filter1");
+ assertThat(filter1, notNullValue());
+ assertThat(filter1.getDocCount(), equalTo(asc ? 2l : 3l));
+ filter2 = filter1.getAggregations().get(filter2Name);
+ assertThat(filter2, notNullValue());
+ assertThat(filter2.getDocCount(), equalTo(asc ? 2l : 3l));
+ stats = filter2.getAggregations().get(statsName);
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo(asc ? 4.0 : 2.0));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMissingSubAggregation() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("avg_i", true))).execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation that doesn't exist");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByNonMetricsOrMultiBucketSubAggregation() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("values", true))
+ .subAggregation(terms("values").field("i").collectMode(randomFrom(SubAggCollectionMode.values()))))
+ .execute().actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by sug-aggregation which is not of a metrics or single-bucket type");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithUknownMetric() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ SearchResponse response = client()
+ .prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.foo", true)).subAggregation(stats("stats").field("i")))
+ .execute().actionGet();
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation "
+ + "with an unknown specified metric to order by. response had " + response.getFailedShards() + " failed shards.");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValuedSubAggregation_WithoutMetric() throws Exception {
+ for (String index : Arrays.asList("idx", "idx_unmapped")) {
+ try {
+ client().prepareSearch(index)
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats", true)).subAggregation(stats("stats").field("i"))).execute()
+ .actionGet();
+
+ fail("Expected search to fail when trying to sort terms aggregation by multi-valued sug-aggregation "
+ + "where the metric name is not specified");
+
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+ }
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("avg_i", asc))
+ .subAggregation(avg("avg_i").field("i"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avg = bucket.getAggregations().get("avg_i");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo((double) i));
+ i--;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field("i"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i++;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueSubAggregationDesc() throws Exception {
+ boolean asc = false;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.aggregation("stats.avg", asc))
+ .subAggregation(stats("stats").field("i"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i--;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByMultiValueExtendedStatsAsc() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.sum_of_squares", asc))
+ .subAggregation(extendedStats("stats").field("i"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+ i++;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedByStatsAggAscWithTermsSubAgg() throws Exception {
+ boolean asc = true;
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .order(Terms.Order.aggregation("stats.sum_of_squares", asc))
+ .subAggregation(extendedStats("stats").field("i"))
+ .subAggregation(terms("subTerms").field("s_values").collectMode(randomFrom(SubAggCollectionMode.values()))))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getMax(), equalTo((double) i));
+
+ Terms subTermsAgg = bucket.getAggregations().get("subTerms");
+ assertThat(subTermsAgg, notNullValue());
+ assertThat(subTermsAgg.getBuckets().size(), equalTo(2));
+ int j = i;
+ for (Terms.Bucket subBucket : subTermsAgg.getBuckets()) {
+ assertThat(subBucket, notNullValue());
+ assertThat(key(subBucket), equalTo("val" + j));
+ assertThat(subBucket.getDocCount(), equalTo(1l));
+ j++;
+ }
+ i++;
+ }
+
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAndTermsDesc() throws Exception {
+ String[] expectedKeys = new String[] { "val1", "val2", "val4", "val3", "val7", "val6", "val5" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(false));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAndTermsAsc() throws Exception {
+ String[] expectedKeys = new String[] { "val1", "val2", "val3", "val4", "val5", "val6", "val7" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true), Terms.Order.term(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationDescAndTermsAsc() throws Exception {
+ String[] expectedKeys = new String[] { "val5", "val6", "val7", "val3", "val4", "val2", "val1" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", false), Terms.Order.term(true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByCountAscAndSingleValueSubAggregationAsc() throws Exception {
+ String[] expectedKeys = new String[] { "val6", "val7", "val3", "val4", "val5", "val1", "val2" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.count(true), Terms.Order.aggregation("avg_l", true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscSingleValueSubAggregationAsc() throws Exception {
+ String[] expectedKeys = new String[] { "val6", "val7", "val3", "val5", "val4", "val1", "val2" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("sum_d", true), Terms.Order.aggregation("avg_l", true));
+ }
+
+ @Test
+ public void singleValuedField_OrderedByThreeCriteria() throws Exception {
+ String[] expectedKeys = new String[] { "val2", "val1", "val4", "val5", "val3", "val6", "val7" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.count(false), Terms.Order.aggregation("sum_d", false),
+ Terms.Order.aggregation("avg_l", false));
+ }
+
+ @Test
+ public void singleValuedField_OrderedBySingleValueSubAggregationAscAsCompound() throws Exception {
+ String[] expectedKeys = new String[] { "val1", "val2", "val3", "val4", "val5", "val6", "val7" };
+ assertMultiSortResponse(expectedKeys, Terms.Order.aggregation("avg_l", true));
+ }
+
+ private void assertMultiSortResponse(String[] expectedKeys, Terms.Order... order) {
+ SearchResponse response = client()
+ .prepareSearch("sort_idx")
+ .setTypes("multi_sort_type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).order(Terms.Order.compound(order))
+ .subAggregation(avg("avg_l").field("l")).subAggregation(sum("sum_d").field("d"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(expectedKeys.length));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo(expectedKeys[i]));
+ assertThat(bucket.getDocCount(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("_count")));
+ Avg avg = bucket.getAggregations().get("avg_l");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("avg_l")));
+ Sum sum = bucket.getAggregations().get("sum_d");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(expectedMultiSortBuckets.get(expectedKeys[i]).get("sum_d")));
+ i++;
+ }
+ }
+
+ @Test
+ public void indexMetaField() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx", "empty_bucket_idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .field(IndexFieldMapper.NAME)).execute().actionGet();
+
+ assertSearchResponse(response);
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(2));
+
+ int i = 0;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo(i == 0 ? "idx" : "empty_bucket_idx"));
+ assertThat(bucket.getDocCount(), equalTo(i == 0 ? 5L : 2L));
+ i++;
+ }
+
+ response = client().prepareSearch("idx", "empty_bucket_idx").setTypes("type")
+ .addAggregation(terms("terms").executionHint(randomExecutionHint()).field(FieldNamesFieldMapper.NAME)).execute()
+ .actionGet();
+ assertSearchResponse(response);
+ terms = response.getAggregations().get("terms");
+ assertEquals(5L, terms.getBucketByKey("i").getDocCount());
+ }
+
+ @Test
+ public void otherDocCount() {
+ testOtherDocCount(SINGLE_VALUED_FIELD_NAME, MULTI_VALUED_FIELD_NAME);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void singleValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script("'foo_' + _value")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_NotUniqueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script("_value.substring(0,3)")).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(1));
+
+ Terms.Bucket bucket = terms.getBucketByKey("val");
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val"));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).script("doc['" + MULTI_VALUED_FIELD_NAME + "']")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScriptOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script("'foo_' + _value")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ *
+ * [foo_val0, foo_val1] [foo_val1, foo_val2] [foo_val2, foo_val3] [foo_val3,
+ * foo_val4] [foo_val4, foo_val5]
+ *
+ *
+ * foo_val0 - doc_count: 1 - val_count: 2 foo_val1 - doc_count: 2 -
+ * val_count: 4 foo_val2 - doc_count: 2 - val_count: 4 foo_val3 - doc_count:
+ * 2 - val_count: 4 foo_val4 - doc_count: 2 - val_count: 4 foo_val5 -
+ * doc_count: 1 - val_count: 2
+ */
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void multiValuedField_WithValueScript_WithInheritedSubAggregatorOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").executionHint(randomExecutionHint()).field(MULTI_VALUED_FIELD_NAME)
+ .collectMode(randomFrom(SubAggCollectionMode.values())).script("'foo_' + _value")
+ .subAggregation(count("count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("foo_val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("foo_val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat("term[" + key(bucket) + "]", valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_ExplicitSingleValueOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_SingleValue_WithSubAggregator_InheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script("doc['" + SINGLE_VALUED_FIELD_NAME + "'].value").subAggregation(count("count"))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValuedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "']")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 || i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void script_MultiValued_WithAggregatorInheritedOldScriptAPI() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(
+ terms("terms").collectMode(randomFrom(SubAggCollectionMode.values())).executionHint(randomExecutionHint())
+ .script("doc['" + MULTI_VALUED_FIELD_NAME + "']").subAggregation(count("count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(6));
+
+ for (int i = 0; i < 6; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ if (i == 0 | i == 5) {
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(2l));
+ } else {
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ ValueCount valueCount = bucket.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getValue(), equalTo(4l));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorTests.java
new file mode 100644
index 0000000000..144cb4d4f9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorTests.java
@@ -0,0 +1,948 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class TermsDocCountErrorTests extends ElasticsearchIntegrationTest {
+
+ private static final String STRING_FIELD_NAME = "s_value";
+ private static final String LONG_FIELD_NAME = "l_value";
+ private static final String DOUBLE_FIELD_NAME = "d_value";
+
+ public static String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString();
+ }
+
+ private static int numRoutingValues;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ int numDocs = between(10, 200);
+ int numUniqueTerms = between(2,numDocs/2);
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms))
+ .field(LONG_FIELD_NAME, randomInt(numUniqueTerms))
+ .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms))
+ .endObject()));
+ }
+ assertAcked(prepareCreate("idx_single_shard").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)));
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx_single_shard", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms))
+ .field(LONG_FIELD_NAME, randomInt(numUniqueTerms))
+ .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms))
+ .endObject()));
+ }
+ numRoutingValues = between(1,40);
+ assertAcked(prepareCreate("idx_with_routing").addMapping("type", "{ \"type\" : { \"_routing\" : { \"required\" : true } } }"));
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx_single_shard", "type", "" + i)
+ .setRouting(String.valueOf(randomInt(numRoutingValues)))
+ .setSource(jsonBuilder()
+ .startObject()
+ .field(STRING_FIELD_NAME, "val" + randomInt(numUniqueTerms))
+ .field(LONG_FIELD_NAME, randomInt(numUniqueTerms))
+ .field(DOUBLE_FIELD_NAME, 1.0 * randomInt(numUniqueTerms))
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ private void assertDocCountErrorWithinBounds(int size, SearchResponse accurateResponse, SearchResponse testResponse) {
+ Terms accurateTerms = accurateResponse.getAggregations().get("terms");
+ assertThat(accurateTerms, notNullValue());
+ assertThat(accurateTerms.getName(), equalTo("terms"));
+ assertThat(accurateTerms.getDocCountError(), equalTo(0l));
+
+ Terms testTerms = testResponse.getAggregations().get("terms");
+ assertThat(testTerms, notNullValue());
+ assertThat(testTerms.getName(), equalTo("terms"));
+ assertThat(testTerms.getDocCountError(), greaterThanOrEqualTo(0l));
+ Collection<Bucket> testBuckets = testTerms.getBuckets();
+ assertThat(testBuckets.size(), lessThanOrEqualTo(size));
+ assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size()));
+
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket, notNullValue());
+ Terms.Bucket accurateBucket = accurateTerms.getBucketByKey(testBucket.getKeyAsString());
+ assertThat(accurateBucket, notNullValue());
+ assertThat(accurateBucket.getDocCountError(), equalTo(0l));
+ assertThat(testBucket.getDocCountError(), lessThanOrEqualTo(testTerms.getDocCountError()));
+ assertThat(testBucket.getDocCount() + testBucket.getDocCountError(), greaterThanOrEqualTo(accurateBucket.getDocCount()));
+ assertThat(testBucket.getDocCount() - testBucket.getDocCountError(), lessThanOrEqualTo(accurateBucket.getDocCount()));
+ }
+
+ for (Terms.Bucket accurateBucket: accurateTerms.getBuckets()) {
+ assertThat(accurateBucket, notNullValue());
+ Terms.Bucket testBucket = accurateTerms.getBucketByKey(accurateBucket.getKeyAsString());
+ if (testBucket == null) {
+ assertThat(accurateBucket.getDocCount(), lessThanOrEqualTo(testTerms.getDocCountError()));
+ }
+
+ }
+ }
+
+ private void assertNoDocCountError(int size, SearchResponse accurateResponse, SearchResponse testResponse) {
+ Terms accurateTerms = accurateResponse.getAggregations().get("terms");
+ assertThat(accurateTerms, notNullValue());
+ assertThat(accurateTerms.getName(), equalTo("terms"));
+ assertThat(accurateTerms.getDocCountError(), equalTo(0l));
+
+ Terms testTerms = testResponse.getAggregations().get("terms");
+ assertThat(testTerms, notNullValue());
+ assertThat(testTerms.getName(), equalTo("terms"));
+ assertThat(testTerms.getDocCountError(), equalTo(0l));
+ Collection<Bucket> testBuckets = testTerms.getBuckets();
+ assertThat(testBuckets.size(), lessThanOrEqualTo(size));
+ assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size()));
+
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket, notNullValue());
+ Terms.Bucket accurateBucket = accurateTerms.getBucketByKey(testBucket.getKeyAsString());
+ assertThat(accurateBucket, notNullValue());
+ assertThat(accurateBucket.getDocCountError(), equalTo(0l));
+ assertThat(testBucket.getDocCountError(), equalTo(0l));
+ }
+ }
+
+ private void assertNoDocCountErrorSingleResponse(int size, SearchResponse testResponse) {
+ Terms testTerms = testResponse.getAggregations().get("terms");
+ assertThat(testTerms, notNullValue());
+ assertThat(testTerms.getName(), equalTo("terms"));
+ assertThat(testTerms.getDocCountError(), equalTo(0l));
+ Collection<Bucket> testBuckets = testTerms.getBuckets();
+ assertThat(testBuckets.size(), lessThanOrEqualTo(size));
+
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket, notNullValue());
+ assertThat(testBucket.getDocCountError(), equalTo(0l));
+ }
+ }
+
+ private void assertUnboundedDocCountError(int size, SearchResponse accurateResponse, SearchResponse testResponse) {
+ Terms accurateTerms = accurateResponse.getAggregations().get("terms");
+ assertThat(accurateTerms, notNullValue());
+ assertThat(accurateTerms.getName(), equalTo("terms"));
+ assertThat(accurateTerms.getDocCountError(), equalTo(0l));
+
+ Terms testTerms = testResponse.getAggregations().get("terms");
+ assertThat(testTerms, notNullValue());
+ assertThat(testTerms.getName(), equalTo("terms"));
+ assertThat(testTerms.getDocCountError(),anyOf(equalTo(-1l), equalTo(0l)));
+ Collection<Bucket> testBuckets = testTerms.getBuckets();
+ assertThat(testBuckets.size(), lessThanOrEqualTo(size));
+ assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size()));
+
+ for (Terms.Bucket testBucket : testBuckets) {
+ assertThat(testBucket, notNullValue());
+ Terms.Bucket accurateBucket = accurateTerms.getBucketByKey(testBucket.getKeyAsString());
+ assertThat(accurateBucket, notNullValue());
+ assertThat(accurateBucket.getDocCountError(), equalTo(0l));
+ assertThat(testBucket.getDocCountError(), anyOf(equalTo(-1l), equalTo(0l)));
+ }
+ }
+
+ @Test
+ public void stringValueField() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertDocCountErrorWithinBounds(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void stringValueField_singleShard() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void stringValueField_withRouting() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+
+ SearchResponse testResponse = client().prepareSearch("idx_with_routing").setTypes("type").setRouting(String.valueOf(between(1, numRoutingValues)))
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountErrorSingleResponse(size, testResponse);
+ }
+
+ @Test
+ public void stringValueField_docCountAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.count(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.count(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void stringValueField_termSortAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void stringValueField_termSortDesc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.term(false))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.term(false))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void stringValueField_subAggAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.aggregation("sortAgg", true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.aggregation("sortAgg", true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void stringValueField_subAggDesc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.aggregation("sortAgg", false))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(STRING_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.aggregation("sortAgg", false))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertDocCountErrorWithinBounds(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField_singleShard() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField_withRouting() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+
+ SearchResponse testResponse = client().prepareSearch("idx_with_routing").setTypes("type").setRouting(String.valueOf(between(1, numRoutingValues)))
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountErrorSingleResponse(size, testResponse);
+ }
+
+ @Test
+ public void longValueField_docCountAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.count(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.count(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField_termSortAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField_termSortDesc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.term(false))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.term(false))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField_subAggAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.aggregation("sortAgg", true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.aggregation("sortAgg", true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void longValueField_subAggDesc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.aggregation("sortAgg", false))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(LONG_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.aggregation("sortAgg", false))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(DOUBLE_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertDocCountErrorWithinBounds(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_singleShard() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_withRouting() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+
+ SearchResponse testResponse = client().prepareSearch("idx_with_routing").setTypes("type").setRouting(String.valueOf(between(1, numRoutingValues)))
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountErrorSingleResponse(size, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_docCountAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.count(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.count(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_termSortAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.term(true))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_termSortDesc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.term(false))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.term(false))
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertNoDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_subAggAsc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.aggregation("sortAgg", true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.aggregation("sortAgg", true))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+ @Test
+ public void doubleValueField_subAggDesc() throws Exception {
+ int size = randomIntBetween(1, 20);
+ int shardSize = randomIntBetween(size, size * 2);
+ SearchResponse accurateResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(0)
+ .shardSize(0)
+ .order(Order.aggregation("sortAgg", false))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(accurateResponse);
+
+ SearchResponse testResponse = client().prepareSearch("idx_single_shard").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(DOUBLE_FIELD_NAME)
+ .showTermDocCountError(true)
+ .size(size)
+ .shardSize(shardSize)
+ .order(Order.aggregation("sortAgg", false))
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(sum("sortAgg").field(LONG_FIELD_NAME)))
+ .execute().actionGet();
+
+ assertSearchResponse(testResponse);
+
+ assertUnboundedDocCountError(size, accurateResponse, testResponse);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountTests.java
new file mode 100644
index 0000000000..4a7a49d2e9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountTests.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregatorFactory;
+import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsBuilder;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class TermsShardMinDocCountTests extends ElasticsearchIntegrationTest {
+ private static final String index = "someindex";
+ private static final String type = "testtype";
+ public String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString();
+ }
+
+ // see https://github.com/elasticsearch/elasticsearch/issues/5998
+ @Test
+ public void shardMinDocCountSignificantTermsTest() throws Exception {
+
+ String termtype = "string";
+ if (randomBoolean()) {
+ termtype = "long";
+ }
+ assertAcked(prepareCreate(index).setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0).addMapping(type, "{\"properties\":{\"text\": {\"type\": \"" + termtype + "\"}}}"));
+ ensureYellow(index);
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+
+ addTermsDocs("1", 1, 0, indexBuilders);//high score but low doc freq
+ addTermsDocs("2", 1, 0, indexBuilders);
+ addTermsDocs("3", 1, 0, indexBuilders);
+ addTermsDocs("4", 1, 0, indexBuilders);
+ addTermsDocs("5", 3, 1, indexBuilders);//low score but high doc freq
+ addTermsDocs("6", 3, 1, indexBuilders);
+ addTermsDocs("7", 0, 3, indexBuilders);// make sure the terms all get score > 0 except for this one
+ indexRandom(true, false, indexBuilders);
+
+ // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned
+ SearchResponse response = client().prepareSearch(index)
+ .addAggregation(
+ (new FilterAggregationBuilder("inclass").filter(QueryBuilders.termQuery("class", true)))
+ .subAggregation(new SignificantTermsBuilder("mySignificantTerms").field("text").minDocCount(2).size(2).executionHint(randomExecutionHint()))
+ )
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ InternalFilter filteredBucket = response.getAggregations().get("inclass");
+ SignificantTerms sigterms = filteredBucket.getAggregations().get("mySignificantTerms");
+ assertThat(sigterms.getBuckets().size(), equalTo(0));
+
+
+ response = client().prepareSearch(index)
+ .addAggregation(
+ (new FilterAggregationBuilder("inclass").filter(QueryBuilders.termQuery("class", true)))
+ .subAggregation(new SignificantTermsBuilder("mySignificantTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).executionHint(randomExecutionHint()))
+ )
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ filteredBucket = response.getAggregations().get("inclass");
+ sigterms = filteredBucket.getAggregations().get("mySignificantTerms");
+ assertThat(sigterms.getBuckets().size(), equalTo(2));
+
+ }
+
+ private void addTermsDocs(String term, int numInClass, int numNotInClass, List<IndexRequestBuilder> builders) {
+ String sourceClass = "{\"text\": \"" + term + "\", \"class\":" + "true" + "}";
+ String sourceNotClass = "{\"text\": \"" + term + "\", \"class\":" + "false" + "}";
+ for (int i = 0; i < numInClass; i++) {
+ builders.add(client().prepareIndex(index, type).setSource(sourceClass));
+ }
+ for (int i = 0; i < numNotInClass; i++) {
+ builders.add(client().prepareIndex(index, type).setSource(sourceNotClass));
+ }
+ }
+
+ // see https://github.com/elasticsearch/elasticsearch/issues/5998
+ @Test
+ public void shardMinDocCountTermsTest() throws Exception {
+ final String [] termTypes = {"string", "long", "integer", "float", "double"};
+ String termtype = termTypes[randomInt(termTypes.length - 1)];
+
+ assertAcked(prepareCreate(index).setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0).addMapping(type, "{\"properties\":{\"text\": {\"type\": \"" + termtype + "\"}}}"));
+ ensureYellow(index);
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+
+ addTermsDocs("1", 1, indexBuilders);//low doc freq but high score
+ addTermsDocs("2", 1, indexBuilders);
+ addTermsDocs("3", 1, indexBuilders);
+ addTermsDocs("4", 1, indexBuilders);
+ addTermsDocs("5", 3, indexBuilders);//low score but high doc freq
+ addTermsDocs("6", 3, indexBuilders);
+ indexRandom(true, false, indexBuilders);
+
+ // first, check that indeed when not setting the shardMinDocCount parameter 0 terms are returned
+ SearchResponse response = client().prepareSearch(index)
+ .addAggregation(
+ new TermsBuilder("myTerms").field("text").minDocCount(2).size(2).executionHint(randomExecutionHint()).order(Terms.Order.term(true))
+ )
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ Terms sigterms = response.getAggregations().get("myTerms");
+ assertThat(sigterms.getBuckets().size(), equalTo(0));
+
+
+ response = client().prepareSearch(index)
+ .addAggregation(
+ new TermsBuilder("myTerms").field("text").minDocCount(2).shardMinDocCount(2).size(2).executionHint(randomExecutionHint()).order(Terms.Order.term(true))
+ )
+ .execute()
+ .actionGet();
+ assertSearchResponse(response);
+ sigterms = response.getAggregations().get("myTerms");
+ assertThat(sigterms.getBuckets().size(), equalTo(2));
+
+ }
+
+ private void addTermsDocs(String term, int numDocs, List<IndexRequestBuilder> builders) {
+ String sourceClass = "{\"text\": \"" + term + "\"}";
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex(index, type).setSource(sourceClass));
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java
new file mode 100644
index 0000000000..d5292e6786
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java
@@ -0,0 +1,1048 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.nested.Nested;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.elasticsearch.search.aggregations.metrics.tophits.TopHits;
+import org.elasticsearch.search.highlight.HighlightBuilder;
+import org.elasticsearch.search.highlight.HighlightField;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.nested;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.topHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.arrayContaining;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest()
+public class TopHitsTests extends ElasticsearchIntegrationTest {
+
+ private static final String TERMS_AGGS_FIELD = "terms";
+ private static final String SORT_FIELD = "sort";
+
+ public static String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString();
+ }
+
+ static int numArticles;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("empty");
+ assertAcked(prepareCreate("articles").addMapping("article", jsonBuilder().startObject().startObject("article").startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("date")
+ .field("type", "long")
+ .endObject()
+ .startObject("message")
+ .field("type", "string")
+ .field("store", true)
+ .field("term_vector", "with_positions_offsets")
+ .field("index_options", "offsets")
+ .endObject()
+ .startObject("reviewers")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen("idx", "empty", "articles");
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 50; i++) {
+ builders.add(client().prepareIndex("idx", "type", Integer.toString(i)).setSource(jsonBuilder()
+ .startObject()
+ .field(TERMS_AGGS_FIELD, "val" + (i / 10))
+ .field(SORT_FIELD, i + 1)
+ .field("text", "some text to entertain")
+ .field("field1", 5)
+ .endObject()));
+ }
+
+ builders.add(client().prepareIndex("idx", "field-collapsing", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "a")
+ .field("text", "term x y z b")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "2").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "a")
+ .field("text", "term x y z n rare")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "3").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "b")
+ .field("text", "x y z term")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "4").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "b")
+ .field("text", "x y term")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "5").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "b")
+ .field("text", "x term")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "6").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "b")
+ .field("text", "term rare")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "7").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "c")
+ .field("text", "x y z term")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "8").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "c")
+ .field("text", "x y term b")
+ .endObject()));
+ builders.add(client().prepareIndex("idx", "field-collapsing", "9").setSource(jsonBuilder()
+ .startObject()
+ .field("group", "c")
+ .field("text", "rare x term")
+ .endObject()));
+
+ numArticles = scaledRandomIntBetween(10, 100);
+ numArticles -= (numArticles % 5);
+ for (int i = 0; i < numArticles; i++) {
+ XContentBuilder builder = randomFrom(jsonBuilder(), yamlBuilder(), smileBuilder());
+ builder.startObject().field("date", i).startArray("comments");
+ for (int j = 0; j < i; j++) {
+ String user = Integer.toString(j);
+ builder.startObject().field("id", j).field("user", user).field("message", "some text").endObject();
+ }
+ builder.endArray().endObject();
+
+ builders.add(
+ client().prepareIndex("articles", "article").setCreate(true).setSource(builder)
+ );
+ }
+
+ builders.add(
+ client().prepareIndex("articles", "article", "1")
+ .setSource(jsonBuilder().startObject().field("title", "title 1").field("body", "some text").startArray("comments")
+ .startObject()
+ .field("user", "a").field("date", 1l).field("message", "some comment")
+ .startArray("reviewers")
+ .startObject().field("name", "user a").endObject()
+ .startObject().field("name", "user b").endObject()
+ .startObject().field("name", "user c").endObject()
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("user", "b").field("date", 2l).field("message", "some other comment")
+ .startArray("reviewers")
+ .startObject().field("name", "user c").endObject()
+ .startObject().field("name", "user d").endObject()
+ .startObject().field("name", "user e").endObject()
+ .endArray()
+ .endObject()
+ .endArray().endObject())
+ );
+ builders.add(
+ client().prepareIndex("articles", "article", "2")
+ .setSource(jsonBuilder().startObject().field("title", "title 2").field("body", "some different text").startArray("comments")
+ .startObject()
+ .field("user", "b").field("date", 3l).field("message", "some comment")
+ .startArray("reviewers")
+ .startObject().field("name", "user f").endObject()
+ .endArray()
+ .endObject()
+ .startObject().field("user", "c").field("date", 4l).field("message", "some other comment").endObject()
+ .endArray().endObject())
+ );
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ private String key(Terms.Bucket bucket) {
+ return bucket.getKeyAsString();
+ }
+
+ @Test
+ public void testBasics() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(
+ topHits("hits").addSort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))
+ )
+ )
+ .get();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ long higestSortValue = 0;
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(10l));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(10l));
+ assertThat(hits.getHits().length, equalTo(3));
+ higestSortValue += 10;
+ assertThat((Long) hits.getAt(0).sortValues()[0], equalTo(higestSortValue));
+ assertThat((Long) hits.getAt(1).sortValues()[0], equalTo(higestSortValue - 1));
+ assertThat((Long) hits.getAt(2).sortValues()[0], equalTo(higestSortValue - 2));
+
+ assertThat(hits.getAt(0).sourceAsMap().size(), equalTo(4));
+ }
+ }
+
+ @Test
+ public void testIssue11119() throws Exception {
+ // Test that top_hits aggregation is fed scores if query results size=0
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("field-collapsing")
+ .setSize(0)
+ .setQuery(matchQuery("text", "x y z"))
+ .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group").subAggregation(topHits("hits")))
+ .get();
+
+ assertSearchResponse(response);
+
+ assertThat(response.getHits().getTotalHits(), equalTo(8l));
+ assertThat(response.getHits().hits().length, equalTo(0));
+ assertThat(response.getHits().maxScore(), equalTo(0f));
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(bucket, notNullValue());
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ float bestScore = Float.MAX_VALUE;
+ for (int h = 0; h < hits.getHits().length; h++) {
+ float score=hits.getAt(h).getScore();
+ assertThat(score, lessThanOrEqualTo(bestScore));
+ assertThat(score, greaterThan(0f));
+ bestScore = hits.getAt(h).getScore();
+ }
+ }
+
+ // Also check that min_score setting works when size=0
+ // (technically not a test of top_hits but implementation details are
+ // tied up with the need to feed scores into the agg tree even when
+ // users don't want ranked set of query results.)
+ response = client()
+ .prepareSearch("idx")
+ .setTypes("field-collapsing")
+ .setSize(0)
+ .setMinScore(0.0001f)
+ .setQuery(matchQuery("text", "x y z"))
+ .addAggregation(terms("terms").executionHint(randomExecutionHint()).field("group"))
+ .get();
+
+ assertSearchResponse(response);
+
+ assertThat(response.getHits().getTotalHits(), equalTo(8l));
+ assertThat(response.getHits().hits().length, equalTo(0));
+ assertThat(response.getHits().maxScore(), equalTo(0f));
+ terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+ }
+
+
+ @Test
+ public void testBreadthFirst() throws Exception {
+ // breadth_first will be ignored since we need scores
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .collectMode(SubAggCollectionMode.BREADTH_FIRST)
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(topHits("hits").setSize(3))
+ ).get();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (int i = 0; i < 5; i++) {
+ Terms.Bucket bucket = terms.getBucketByKey("val" + i);
+ assertThat(bucket, notNullValue());
+ assertThat(key(bucket), equalTo("val" + i));
+ assertThat(bucket.getDocCount(), equalTo(10l));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(10l));
+ assertThat(hits.getHits().length, equalTo(3));
+
+ assertThat(hits.getAt(0).sourceAsMap().size(), equalTo(4));
+ }
+ }
+
+ @Test
+ public void testBasics_getProperty() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(topHits("hits"))).execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ TopHits topHits = global.getAggregations().get("hits");
+ assertThat(topHits, notNullValue());
+ assertThat(topHits.getName(), equalTo("hits"));
+ assertThat((TopHits) global.getProperty("hits"), sameInstance(topHits));
+
+ }
+
+ @Test
+ public void testPagination() throws Exception {
+ int size = randomIntBetween(1, 10);
+ int from = randomIntBetween(0, 10);
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(
+ topHits("hits").addSort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))
+ .setFrom(from)
+ .setSize(size)
+ )
+ )
+ .get();
+ assertSearchResponse(response);
+
+ SearchResponse control = client().prepareSearch("idx")
+ .setTypes("type")
+ .setFrom(from)
+ .setSize(size)
+ .setPostFilter(QueryBuilders.termQuery(TERMS_AGGS_FIELD, "val0"))
+ .addSort(SORT_FIELD, SortOrder.DESC)
+ .get();
+ assertSearchResponse(control);
+ SearchHits controlHits = control.getHits();
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ Terms.Bucket bucket = terms.getBucketByKey("val0");
+ assertThat(bucket, notNullValue());
+ assertThat(bucket.getDocCount(), equalTo(10l));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(controlHits.totalHits()));
+ assertThat(hits.getHits().length, equalTo(controlHits.getHits().length));
+ for (int i = 0; i < hits.getHits().length; i++) {
+ logger.info(i + ": top_hits: [" + hits.getAt(i).id() + "][" + hits.getAt(i).sortValues()[0] + "] control: [" + controlHits.getAt(i).id() + "][" + controlHits.getAt(i).sortValues()[0] + "]");
+ assertThat(hits.getAt(i).id(), equalTo(controlHits.getAt(i).id()));
+ assertThat(hits.getAt(i).sortValues()[0], equalTo(controlHits.getAt(i).sortValues()[0]));
+ }
+ }
+
+ @Test
+ public void testSortByBucket() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .order(Terms.Order.aggregation("max_sort", false))
+ .subAggregation(
+ topHits("hits").addSort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC)).setTrackScores(true)
+ )
+ .subAggregation(
+ max("max_sort").field(SORT_FIELD)
+ )
+ )
+ .get();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ long higestSortValue = 50;
+ int currentBucket = 4;
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ assertThat(key(bucket), equalTo("val" + currentBucket--));
+ assertThat(bucket.getDocCount(), equalTo(10l));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(10l));
+ assertThat(hits.getHits().length, equalTo(3));
+ assertThat((Long) hits.getAt(0).sortValues()[0], equalTo(higestSortValue));
+ assertThat((Long) hits.getAt(1).sortValues()[0], equalTo(higestSortValue - 1));
+ assertThat((Long) hits.getAt(2).sortValues()[0], equalTo(higestSortValue - 2));
+ Max max = bucket.getAggregations().get("max_sort");
+ assertThat(max.getValue(), equalTo(((Long) higestSortValue).doubleValue()));
+ higestSortValue -= 10;
+ }
+ }
+
+ @Test
+ public void testFieldCollapsing() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(matchQuery("text", "term rare"))
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field("group")
+ .order(Terms.Order.aggregation("max_score", false))
+ .subAggregation(
+ topHits("hits").setSize(1)
+ )
+ .subAggregation(
+ max("max_score").script("_score.doubleValue()")
+ )
+ )
+ .get();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ Iterator<Terms.Bucket> bucketIterator = terms.getBuckets().iterator();
+ Terms.Bucket bucket = bucketIterator.next();
+ assertThat(key(bucket), equalTo("b"));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(4l));
+ assertThat(hits.getHits().length, equalTo(1));
+ assertThat(hits.getAt(0).id(), equalTo("6"));
+
+ bucket = bucketIterator.next();
+ assertThat(key(bucket), equalTo("c"));
+ topHits = bucket.getAggregations().get("hits");
+ hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(3l));
+ assertThat(hits.getHits().length, equalTo(1));
+ assertThat(hits.getAt(0).id(), equalTo("9"));
+
+ bucket = bucketIterator.next();
+ assertThat(key(bucket), equalTo("a"));
+ topHits = bucket.getAggregations().get("hits");
+ hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(2l));
+ assertThat(hits.getHits().length, equalTo(1));
+ assertThat(hits.getAt(0).id(), equalTo("2"));
+ }
+
+ @Test
+ public void testFetchFeatures() {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .setQuery(matchQuery("text", "text").queryName("test"))
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(
+ topHits("hits").setSize(1)
+ .addHighlightedField("text")
+ .setExplain(true)
+ .addFieldDataField("field1")
+ .addScriptField("script", new Script("doc['field1'].value"))
+ .setFetchSource("text", null)
+ .setVersion(true)
+ )
+ )
+ .get();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(10l));
+ assertThat(hits.getHits().length, equalTo(1));
+
+ SearchHit hit = hits.getAt(0);
+ HighlightField highlightField = hit.getHighlightFields().get("text");
+ assertThat(highlightField.getFragments().length, equalTo(1));
+ assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>text</em> to entertain"));
+
+ Explanation explanation = hit.explanation();
+ assertThat(explanation.toString(), containsString("text:text"));
+
+ long version = hit.version();
+ assertThat(version, equalTo(1l));
+
+ assertThat(hit.matchedQueries()[0], equalTo("test"));
+
+ SearchHitField field = hit.field("field1");
+ assertThat(field.getValue().toString(), equalTo("5"));
+
+ field = hit.field("script");
+ assertThat(field.getValue().toString(), equalTo("5"));
+
+ assertThat(hit.sourceAsMap().size(), equalTo(1));
+ assertThat(hit.sourceAsMap().get("text").toString(), equalTo("some text to entertain"));
+ }
+ }
+
+ @Test
+ public void testInvalidSortField() throws Exception {
+ try {
+ client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(
+ topHits("hits").addSort(SortBuilders.fieldSort("xyz").order(SortOrder.DESC))
+ )
+ ).get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("No mapping found for [xyz] in order to sort on"));
+ }
+ }
+
+ @Test
+ public void testFailWithSubAgg() throws Exception {
+ String source = "{\n" +
+ " \"aggs\": {\n" +
+ " \"top-tags\": {\n" +
+ " \"terms\": {\n" +
+ " \"field\": \"tags\"\n" +
+ " },\n" +
+ " \"aggs\": {\n" +
+ " \"top_tags_hits\": {\n" +
+ " \"top_hits\": {},\n" +
+ " \"aggs\": {\n" +
+ " \"max\": {\n" +
+ " \"max\": {\n" +
+ " \"field\": \"age\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ "}";
+ try {
+ client().prepareSearch("idx").setTypes("type")
+ .setSource(source)
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations"));
+ }
+ }
+
+ @Test
+ public void testEmptyIndex() throws Exception {
+ SearchResponse response = client().prepareSearch("empty").setTypes("type")
+ .addAggregation(topHits("hits"))
+ .get();
+ assertSearchResponse(response);
+
+ TopHits hits = response.getAggregations().get("hits");
+ assertThat(hits, notNullValue());
+ assertThat(hits.getName(), equalTo("hits"));
+ assertThat(hits.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testTrackScores() throws Exception {
+ boolean[] trackScores = new boolean[]{true, false};
+ for (boolean trackScore : trackScores) {
+ logger.info("Track score=" + trackScore);
+ SearchResponse response = client().prepareSearch("idx").setTypes("field-collapsing")
+ .setQuery(matchQuery("text", "term rare"))
+ .addAggregation(terms("terms")
+ .field("group")
+ .subAggregation(
+ topHits("hits")
+ .setTrackScores(trackScore)
+ .setSize(1)
+ .addSort("_id", SortOrder.DESC)
+ )
+ )
+ .get();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(3));
+
+ Terms.Bucket bucket = terms.getBucketByKey("a");
+ assertThat(key(bucket), equalTo("a"));
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN));
+ assertThat(hits.getAt(0).score(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN));
+
+ bucket = terms.getBucketByKey("b");
+ assertThat(key(bucket), equalTo("b"));
+ topHits = bucket.getAggregations().get("hits");
+ hits = topHits.getHits();
+ assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN));
+ assertThat(hits.getAt(0).score(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN));
+
+ bucket = terms.getBucketByKey("c");
+ assertThat(key(bucket), equalTo("c"));
+ topHits = bucket.getAggregations().get("hits");
+ hits = topHits.getHits();
+ assertThat(hits.getMaxScore(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN));
+ assertThat(hits.getAt(0).score(), trackScore ? not(equalTo(Float.NaN)) : equalTo(Float.NaN));
+ }
+ }
+
+ @Test
+ public void testTopHitsInNestedSimple() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("articles")
+ .setQuery(matchQuery("title", "title"))
+ .addAggregation(
+ nested("to-comments")
+ .path("comments")
+ .subAggregation(
+ terms("users")
+ .field("comments.user")
+ .subAggregation(
+ topHits("top-comments").addSort("comments.date", SortOrder.ASC)
+ )
+ )
+ )
+ .get();
+
+ Nested nested = searchResponse.getAggregations().get("to-comments");
+ assertThat(nested.getDocCount(), equalTo(4l));
+
+ Terms terms = nested.getAggregations().get("users");
+ Terms.Bucket bucket = terms.getBucketByKey("a");
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ TopHits topHits = bucket.getAggregations().get("top-comments");
+ SearchHits searchHits = topHits.getHits();
+ assertThat(searchHits.totalHits(), equalTo(1l));
+ assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat((Integer) searchHits.getAt(0).getSource().get("date"), equalTo(1));
+
+ bucket = terms.getBucketByKey("b");
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ topHits = bucket.getAggregations().get("top-comments");
+ searchHits = topHits.getHits();
+ assertThat(searchHits.totalHits(), equalTo(2l));
+ assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat((Integer) searchHits.getAt(0).getSource().get("date"), equalTo(2));
+ assertThat(searchHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHits.getAt(1).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat((Integer) searchHits.getAt(1).getSource().get("date"), equalTo(3));
+
+ bucket = terms.getBucketByKey("c");
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ topHits = bucket.getAggregations().get("top-comments");
+ searchHits = topHits.getHits();
+ assertThat(searchHits.totalHits(), equalTo(1l));
+ assertThat(searchHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHits.getAt(0).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat((Integer) searchHits.getAt(0).getSource().get("date"), equalTo(4));
+ }
+
+ @Test
+ public void testTopHitsInSecondLayerNested() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("articles")
+ .setQuery(matchQuery("title", "title"))
+ .addAggregation(
+ nested("to-comments")
+ .path("comments")
+ .subAggregation(
+ nested("to-reviewers").path("comments.reviewers").subAggregation(
+ // Also need to sort on _doc because there are two reviewers with the same name
+ topHits("top-reviewers").addSort("comments.reviewers.name", SortOrder.ASC).addSort("_doc", SortOrder.DESC).setSize(7)
+ )
+ )
+ .subAggregation(topHits("top-comments").addSort("comments.date", SortOrder.DESC).setSize(4))
+ ).get();
+ assertNoFailures(searchResponse);
+
+ Nested toComments = searchResponse.getAggregations().get("to-comments");
+ assertThat(toComments.getDocCount(), equalTo(4l));
+
+ TopHits topComments = toComments.getAggregations().get("top-comments");
+ assertThat(topComments.getHits().totalHits(), equalTo(4l));
+ assertThat(topComments.getHits().getHits().length, equalTo(4));
+
+ assertThat(topComments.getHits().getAt(0).getId(), equalTo("2"));
+ assertThat(topComments.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topComments.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(topComments.getHits().getAt(0).getNestedIdentity().getChild(), nullValue());
+
+ assertThat(topComments.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(topComments.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topComments.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(topComments.getHits().getAt(1).getNestedIdentity().getChild(), nullValue());
+
+ assertThat(topComments.getHits().getAt(2).getId(), equalTo("1"));
+ assertThat(topComments.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topComments.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(topComments.getHits().getAt(2).getNestedIdentity().getChild(), nullValue());
+
+ assertThat(topComments.getHits().getAt(3).getId(), equalTo("1"));
+ assertThat(topComments.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topComments.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(topComments.getHits().getAt(3).getNestedIdentity().getChild(), nullValue());
+
+ Nested toReviewers = toComments.getAggregations().get("to-reviewers");
+ assertThat(toReviewers.getDocCount(), equalTo(7l));
+
+ TopHits topReviewers = toReviewers.getAggregations().get("top-reviewers");
+ assertThat(topReviewers.getHits().totalHits(), equalTo(7l));
+ assertThat(topReviewers.getHits().getHits().length, equalTo(7));
+
+ assertThat(topReviewers.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat((String) topReviewers.getHits().getAt(0).sourceAsMap().get("name"), equalTo("user a"));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
+
+ assertThat(topReviewers.getHits().getAt(1).getId(), equalTo("1"));
+ assertThat((String) topReviewers.getHits().getAt(1).sourceAsMap().get("name"), equalTo("user b"));
+ assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(1).getNestedIdentity().getChild().getOffset(), equalTo(1));
+
+ assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1"));
+ assertThat((String) topReviewers.getHits().getAt(2).sourceAsMap().get("name"), equalTo("user c"));
+ assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(2));
+
+ assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1"));
+ assertThat((String) topReviewers.getHits().getAt(3).sourceAsMap().get("name"), equalTo("user c"));
+ assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(0));
+
+ assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1"));
+ assertThat((String) topReviewers.getHits().getAt(4).sourceAsMap().get("name"), equalTo("user d"));
+ assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(4).getNestedIdentity().getChild().getOffset(), equalTo(1));
+
+ assertThat(topReviewers.getHits().getAt(5).getId(), equalTo("1"));
+ assertThat((String) topReviewers.getHits().getAt(5).sourceAsMap().get("name"), equalTo("user e"));
+ assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(5).getNestedIdentity().getChild().getOffset(), equalTo(2));
+
+ assertThat(topReviewers.getHits().getAt(6).getId(), equalTo("2"));
+ assertThat((String) topReviewers.getHits().getAt(6).sourceAsMap().get("name"), equalTo("user f"));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
+ assertThat(topReviewers.getHits().getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
+ }
+
+ @Test
+ public void testNestedFetchFeatures() {
+ String hlType = randomFrom("plain", "fvh", "postings");
+ HighlightBuilder.Field hlField = new HighlightBuilder.Field("comments.message")
+ .highlightQuery(matchQuery("comments.message", "comment"))
+ .forceSource(randomBoolean()) // randomly from stored field or _source
+ .highlighterType(hlType);
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test")))
+ .addAggregation(
+ nested("to-comments").path("comments").subAggregation(
+ topHits("top-comments").setSize(1).addHighlightedField(hlField).setExplain(true)
+ .addFieldDataField("comments.user")
+ .addScriptField("script", new Script("doc['comments.user'].value")).setFetchSource("message", null)
+ .setVersion(true).addSort("comments.date", SortOrder.ASC))).get();
+ assertHitCount(searchResponse, 2);
+ Nested nested = searchResponse.getAggregations().get("to-comments");
+ assertThat(nested.getDocCount(), equalTo(4l));
+
+ SearchHits hits = ((TopHits) nested.getAggregations().get("top-comments")).getHits();
+ assertThat(hits.totalHits(), equalTo(4l));
+ SearchHit searchHit = hits.getAt(0);
+ assertThat(searchHit.getId(), equalTo("1"));
+ assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0));
+
+ HighlightField highlightField = searchHit.getHighlightFields().get("comments.message");
+ assertThat(highlightField.getFragments().length, equalTo(1));
+ assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>comment</em>"));
+
+ // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not even have matched with the main query
+ // If top_hits would have a query option then we can explain that query
+ Explanation explanation = searchHit.explanation();
+ assertFalse(explanation.isMatch());
+
+ // Returns the version of the root document. Nested docs don't have a separate version
+ long version = searchHit.version();
+ assertThat(version, equalTo(1l));
+
+ // Can't use named queries for the same reason explain doesn't work:
+ assertThat(searchHit.matchedQueries(), arrayContaining("test"));
+
+ SearchHitField field = searchHit.field("comments.user");
+ assertThat(field.getValue().toString(), equalTo("a"));
+
+ field = searchHit.field("script");
+ assertThat(field.getValue().toString(), equalTo("a"));
+
+ assertThat(searchHit.sourceAsMap().size(), equalTo(1));
+ assertThat(searchHit.sourceAsMap().get("message").toString(), equalTo("some comment"));
+ }
+
+ @Test
+ public void testTopHitsInNested() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("articles")
+ .addAggregation(
+ histogram("dates")
+ .field("date")
+ .interval(5)
+ .order(Histogram.Order.aggregation("to-comments", true))
+ .subAggregation(
+ nested("to-comments")
+ .path("comments")
+ .subAggregation(topHits("comments")
+ .addHighlightedField(new HighlightBuilder.Field("comments.message").highlightQuery(matchQuery("comments.message", "text")))
+ .addSort("comments.id", SortOrder.ASC))
+ )
+ )
+ .get();
+
+ Histogram histogram = searchResponse.getAggregations().get("dates");
+ for (int i = 0; i < numArticles; i += 5) {
+ Histogram.Bucket bucket = histogram.getBuckets().get(i / 5);
+ assertThat(bucket.getDocCount(), equalTo(5l));
+
+ long numNestedDocs = 10 + (5 * i);
+ Nested nested = bucket.getAggregations().get("to-comments");
+ assertThat(nested.getDocCount(), equalTo(numNestedDocs));
+
+ TopHits hits = nested.getAggregations().get("comments");
+ SearchHits searchHits = hits.getHits();
+ assertThat(searchHits.totalHits(), equalTo(numNestedDocs));
+ for (int j = 0; j < 3; j++) {
+ assertThat(searchHits.getAt(j).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHits.getAt(j).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat((Integer) searchHits.getAt(j).sourceAsMap().get("id"), equalTo(0));
+
+ HighlightField highlightField = searchHits.getAt(j).getHighlightFields().get("comments.message");
+ assertThat(highlightField.getFragments().length, equalTo(1));
+ assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>text</em>"));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testFetchFeaturesOldScriptAPI() {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setTypes("type")
+ .setQuery(matchQuery("text", "text").queryName("test"))
+ .addAggregation(
+ terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(TERMS_AGGS_FIELD)
+ .subAggregation(
+ topHits("hits").setSize(1).addHighlightedField("text").setExplain(true).addFieldDataField("field1")
+ .addScriptField("script", "doc['field1'].value").setFetchSource("text", null)
+ .setVersion(true))).get();
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ assertThat(terms.getBuckets().size(), equalTo(5));
+
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ TopHits topHits = bucket.getAggregations().get("hits");
+ SearchHits hits = topHits.getHits();
+ assertThat(hits.totalHits(), equalTo(10l));
+ assertThat(hits.getHits().length, equalTo(1));
+
+ SearchHit hit = hits.getAt(0);
+ HighlightField highlightField = hit.getHighlightFields().get("text");
+ assertThat(highlightField.getFragments().length, equalTo(1));
+ assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>text</em> to entertain"));
+
+ Explanation explanation = hit.explanation();
+ assertThat(explanation.toString(), containsString("text:text"));
+
+ long version = hit.version();
+ assertThat(version, equalTo(1l));
+
+ assertThat(hit.matchedQueries()[0], equalTo("test"));
+
+ SearchHitField field = hit.field("field1");
+ assertThat(field.getValue().toString(), equalTo("5"));
+
+ field = hit.field("script");
+ assertThat(field.getValue().toString(), equalTo("5"));
+
+ assertThat(hit.sourceAsMap().size(), equalTo(1));
+ assertThat(hit.sourceAsMap().get("text").toString(), equalTo("some text to entertain"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testNestedFetchFeaturesOldScriptAPI() {
+ String hlType = randomFrom("plain", "fvh", "postings");
+ HighlightBuilder.Field hlField = new HighlightBuilder.Field("comments.message")
+ .highlightQuery(matchQuery("comments.message", "comment")).forceSource(randomBoolean()) // randomly from stored field or _source
+ .highlighterType(hlType);
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "comment").queryName("test")))
+ .addAggregation(
+ nested("to-comments").path("comments").subAggregation(
+ topHits("top-comments").setSize(1).addHighlightedField(hlField).setExplain(true)
+ .addFieldDataField("comments.user").addScriptField("script", "doc['comments.user'].value")
+ .setFetchSource("message", null).setVersion(true).addSort("comments.date", SortOrder.ASC))).get();
+ assertHitCount(searchResponse, 2);
+ Nested nested = searchResponse.getAggregations().get("to-comments");
+ assertThat(nested.getDocCount(), equalTo(4l));
+
+ SearchHits hits = ((TopHits) nested.getAggregations().get("top-comments")).getHits();
+ assertThat(hits.totalHits(), equalTo(4l));
+ SearchHit searchHit = hits.getAt(0);
+ assertThat(searchHit.getId(), equalTo("1"));
+ assertThat(searchHit.getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(searchHit.getNestedIdentity().getOffset(), equalTo(0));
+
+ HighlightField highlightField = searchHit.getHighlightFields().get("comments.message");
+ assertThat(highlightField.getFragments().length, equalTo(1));
+ assertThat(highlightField.getFragments()[0].string(), equalTo("some <em>comment</em>"));
+
+ // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not even have matched with the main query
+ // If top_hits would have a query option then we can explain that query
+ Explanation explanation = searchHit.explanation();
+ assertFalse(explanation.isMatch());
+
+ // Returns the version of the root document. Nested docs don't have a separate version
+ long version = searchHit.version();
+ assertThat(version, equalTo(1l));
+
+ // Can't use named queries for the same reason explain doesn't work:
+ assertThat(searchHit.matchedQueries(), arrayContaining("test"));
+
+ SearchHitField field = searchHit.field("comments.user");
+ assertThat(field.getValue().toString(), equalTo("a"));
+
+ field = searchHit.field("script");
+ assertThat(field.getValue().toString(), equalTo("a"));
+
+ assertThat(searchHit.sourceAsMap().size(), equalTo(1));
+ assertThat(searchHit.sourceAsMap().get("message").toString(), equalTo("some comment"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/agg-filter-with-empty-bool.json b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/agg-filter-with-empty-bool.json
new file mode 100644
index 0000000000..f730b43c49
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/agg-filter-with-empty-bool.json
@@ -0,0 +1,33 @@
+{
+ "aggs": {
+ "issue7240": {
+ "aggs": {
+ "terms": {
+ "terms": {
+ "field": "field"
+ }
+ }
+ },
+ "filter": {
+ "fquery": {
+ "query": {
+ "filtered": {
+ "query": {
+ "bool": {}
+ },
+ "filter": {
+ "fquery": {
+ "query": {
+ "query_string": {
+ "query": "_type:apache"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java
new file mode 100644
index 0000000000..f07d7790ff
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.nested;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
+import org.elasticsearch.common.compress.CompressedXContent;
+import org.elasticsearch.common.lucene.search.Queries;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.search.aggregations.Aggregator;
+import org.elasticsearch.search.aggregations.AggregatorFactories;
+import org.elasticsearch.search.aggregations.BucketCollector;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.aggregations.support.AggregationContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class NestedAggregatorTest extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testResetRootDocId() throws Exception {
+ Directory directory = newDirectory();
+ IndexWriterConfig iwc = new IndexWriterConfig(null);
+ iwc.setMergePolicy(NoMergePolicy.INSTANCE);
+ RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc);
+
+ List<Document> documents = new ArrayList<>();
+
+ // 1 segment with, 1 root document, with 3 nested sub docs
+ Document document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ indexWriter.addDocuments(documents);
+ indexWriter.commit();
+
+ documents.clear();
+ // 1 segment with:
+ // 1 document, with 1 nested subdoc
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ indexWriter.addDocuments(documents);
+ documents.clear();
+ // and 1 document, with 1 nested subdoc
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ document = new Document();
+ document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
+ document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
+ documents.add(document);
+ indexWriter.addDocuments(documents);
+
+ indexWriter.commit();
+ indexWriter.close();
+
+ DirectoryReader directoryReader = DirectoryReader.open(directory);
+ IndexSearcher searcher = new IndexSearcher(directoryReader);
+
+ IndexService indexService = createIndex("test");
+ indexService.mapperService().merge("test", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()), true);
+ SearchContext searchContext = createSearchContext(indexService);
+ AggregationContext context = new AggregationContext(searchContext);
+
+ AggregatorFactories.Builder builder = AggregatorFactories.builder();
+ builder.addAggregator(new NestedAggregator.Factory("test", "nested_field"));
+ AggregatorFactories factories = builder.build();
+ searchContext.aggregations(new SearchContextAggregations(factories));
+ Aggregator[] aggs = factories.createTopLevelAggregators(context);
+ BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs));
+ collector.preCollection();
+ // A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here (otherwise MatchAllDocsQuery would be sufficient)
+ // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because
+ // root doc type#3 and root doc type#1 have the same segment docid
+ BooleanQuery bq = new BooleanQuery();
+ bq.add(Queries.newNonNestedFilter(), Occur.MUST);
+ bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT);
+ searcher.search(new ConstantScoreQuery(bq), collector);
+ collector.postCollection();
+
+ Nested nested = (Nested) aggs[0].buildAggregation(0);
+ // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first segment are emitted as hits.
+ assertThat(nested.getDocCount(), equalTo(4l));
+
+ directoryReader.close();
+ directory.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java
new file mode 100644
index 0000000000..a0ccd83f1c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+package org.elasticsearch.search.aggregations.bucket;
+
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
new file mode 100644
index 0000000000..816c845675
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeSignificanceScoreScriptNoParams extends TestScript {
+
+ public static final String NATIVE_SIGNIFICANCE_SCORE_SCRIPT_NO_PARAMS = "native_significance_score_script_no_params";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeSignificanceScoreScriptNoParams();
+ }
+ }
+
+ private NativeSignificanceScoreScriptNoParams() {
+ }
+
+ @Override
+ public Object run() {
+ return _subset_freq.longValue() + _subset_size.longValue() + _superset_freq.longValue() + _superset_size.longValue();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
new file mode 100644
index 0000000000..3568f8f687
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeSignificanceScoreScriptWithParams extends TestScript {
+
+ public static final String NATIVE_SIGNIFICANCE_SCORE_SCRIPT_WITH_PARAMS = "native_significance_score_script_with_params";
+ double factor = 0.0;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeSignificanceScoreScriptWithParams(params);
+ }
+ }
+
+ private NativeSignificanceScoreScriptWithParams(Map<String, Object> params) {
+ factor = ((Number) params.get("param")).doubleValue();
+ }
+
+ @Override
+ public Object run() {
+ return factor * (_subset_freq.longValue() + _subset_size.longValue() + _superset_freq.longValue() + _superset_size.longValue()) / factor;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
new file mode 100644
index 0000000000..3060d7af81
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.script;
+
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic;
+
+public abstract class TestScript implements ExecutableScript{
+
+ ScriptHeuristic.LongAccessor _subset_freq;
+ ScriptHeuristic.LongAccessor _subset_size;
+ ScriptHeuristic.LongAccessor _superset_freq;
+ ScriptHeuristic.LongAccessor _superset_size;
+
+ protected TestScript() {
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ if (name.equals("_subset_freq")) {
+ _subset_freq = (ScriptHeuristic.LongAccessor)value;
+ }
+ if (name.equals("_subset_size")) {
+ _subset_size = (ScriptHeuristic.LongAccessor)value;
+ }
+ if (name.equals("_superset_freq")) {
+ _superset_freq = (ScriptHeuristic.LongAccessor)value;
+ }
+ if (name.equals("_superset_size")) {
+ _superset_size = (ScriptHeuristic.LongAccessor)value;
+ }
+ }
+
+ @Override
+ public Double unwrap(Object value) {
+ return ((Number) value).doubleValue();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
new file mode 100644
index 0000000000..83705bf7d7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java
@@ -0,0 +1,413 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.bucket.significant;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.InputStreamStreamInput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.PercentageScore;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParserMapper;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ *
+ */
+public class SignificanceHeuristicTests extends ElasticsearchTestCase {
+ static class SignificantTermsTestSearchContext extends TestSearchContext {
+ @Override
+ public int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ public SearchShardTarget shardTarget() {
+ return new SearchShardTarget("no node, this is a unit test", "no index, this is a unit test", 0);
+ }
+ }
+
+ // test that stream output can actually be read - does not replace bwc test
+ @Test
+ public void streamResponse() throws Exception {
+ SignificanceHeuristicStreams.registerStream(MutualInformation.STREAM, MutualInformation.STREAM.getName());
+ SignificanceHeuristicStreams.registerStream(JLHScore.STREAM, JLHScore.STREAM.getName());
+ SignificanceHeuristicStreams.registerStream(PercentageScore.STREAM, PercentageScore.STREAM.getName());
+ SignificanceHeuristicStreams.registerStream(GND.STREAM, GND.STREAM.getName());
+ SignificanceHeuristicStreams.registerStream(ChiSquare.STREAM, ChiSquare.STREAM.getName());
+ SignificanceHeuristicStreams.registerStream(ScriptHeuristic.STREAM, ScriptHeuristic.STREAM.getName());
+ Version version = randomVersion(random());
+ InternalSignificantTerms[] sigTerms = getRandomSignificantTerms(getRandomSignificanceheuristic());
+
+ // write
+ ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
+ OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
+ out.setVersion(version);
+
+ sigTerms[0].writeTo(out);
+
+ // read
+ ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
+ InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
+ in.setVersion(version);
+
+ sigTerms[1].readFrom(in);
+
+ assertTrue(sigTerms[1].significanceHeuristic.equals(sigTerms[0].significanceHeuristic));
+ }
+
+ InternalSignificantTerms[] getRandomSignificantTerms(SignificanceHeuristic heuristic) {
+ InternalSignificantTerms[] sTerms = new InternalSignificantTerms[2];
+ ArrayList<InternalSignificantTerms.Bucket> buckets = new ArrayList<>();
+ if (randomBoolean()) {
+ BytesRef term = new BytesRef("123.0");
+ buckets.add(new SignificantLongTerms.Bucket(1, 2, 3, 4, 123, InternalAggregations.EMPTY, null));
+ sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets,
+ (List<PipelineAggregator>) Collections.EMPTY_LIST, null);
+ sTerms[1] = new SignificantLongTerms();
+ } else {
+
+ BytesRef term = new BytesRef("someterm");
+ buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY));
+ sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, (List<PipelineAggregator>) Collections.EMPTY_LIST,
+ null);
+ sTerms[1] = new SignificantStringTerms();
+ }
+ return sTerms;
+ }
+
+ SignificanceHeuristic getRandomSignificanceheuristic() {
+ List<SignificanceHeuristic> heuristics = new ArrayList<>();
+ heuristics.add(JLHScore.INSTANCE);
+ heuristics.add(new MutualInformation(randomBoolean(), randomBoolean()));
+ heuristics.add(new GND(randomBoolean()));
+ heuristics.add(new ChiSquare(randomBoolean(), randomBoolean()));
+ return heuristics.get(randomInt(3));
+ }
+
+ // test that
+ // 1. The output of the builders can actually be parsed
+ // 2. The parser does not swallow parameters after a significance heuristic was defined
+ @Test
+ public void testBuilderAndParser() throws Exception {
+
+ Set<SignificanceHeuristicParser> parsers = new HashSet<>();
+ parsers.add(new JLHScore.JLHScoreParser());
+ parsers.add(new MutualInformation.MutualInformationParser());
+ parsers.add(new GND.GNDParser());
+ parsers.add(new ChiSquare.ChiSquareParser());
+ SignificanceHeuristicParserMapper heuristicParserMapper = new SignificanceHeuristicParserMapper(parsers);
+ SearchContext searchContext = new SignificantTermsTestSearchContext();
+
+ // test jlh with string
+ assertTrue(parseFromString(heuristicParserMapper, searchContext, "\"jlh\":{}") instanceof JLHScore);
+ // test gnd with string
+ assertTrue(parseFromString(heuristicParserMapper, searchContext, "\"gnd\":{}") instanceof GND);
+ // test mutual information with string
+ boolean includeNegatives = randomBoolean();
+ boolean backgroundIsSuperset = randomBoolean();
+ assertThat(parseFromString(heuristicParserMapper, searchContext, "\"mutual_information\":{\"include_negatives\": " + includeNegatives + ", \"background_is_superset\":" + backgroundIsSuperset + "}"), equalTo((SignificanceHeuristic) (new MutualInformation(includeNegatives, backgroundIsSuperset))));
+ assertThat(parseFromString(heuristicParserMapper, searchContext, "\"chi_square\":{\"include_negatives\": " + includeNegatives + ", \"background_is_superset\":" + backgroundIsSuperset + "}"), equalTo((SignificanceHeuristic) (new ChiSquare(includeNegatives, backgroundIsSuperset))));
+
+ // test with builders
+ assertTrue(parseFromBuilder(heuristicParserMapper, searchContext, new JLHScore.JLHScoreBuilder()) instanceof JLHScore);
+ assertTrue(parseFromBuilder(heuristicParserMapper, searchContext, new GND.GNDBuilder(backgroundIsSuperset)) instanceof GND);
+ assertThat(parseFromBuilder(heuristicParserMapper, searchContext, new MutualInformation.MutualInformationBuilder(includeNegatives, backgroundIsSuperset)), equalTo((SignificanceHeuristic) new MutualInformation(includeNegatives, backgroundIsSuperset)));
+ assertThat(parseFromBuilder(heuristicParserMapper, searchContext, new ChiSquare.ChiSquareBuilder(includeNegatives, backgroundIsSuperset)), equalTo((SignificanceHeuristic) new ChiSquare(includeNegatives, backgroundIsSuperset)));
+
+ // test exceptions
+ String faultyHeuristicdefinition = "\"mutual_information\":{\"include_negatives\": false, \"some_unknown_field\": false}";
+ String expectedError = "unknown for mutual_information";
+ checkParseException(heuristicParserMapper, searchContext, faultyHeuristicdefinition, expectedError);
+
+ faultyHeuristicdefinition = "\"chi_square\":{\"unknown_field\": true}";
+ expectedError = "unknown for chi_square";
+ checkParseException(heuristicParserMapper, searchContext, faultyHeuristicdefinition, expectedError);
+
+ faultyHeuristicdefinition = "\"jlh\":{\"unknown_field\": true}";
+ expectedError = "expected }, got ";
+ checkParseException(heuristicParserMapper, searchContext, faultyHeuristicdefinition, expectedError);
+
+ faultyHeuristicdefinition = "\"gnd\":{\"unknown_field\": true}";
+ expectedError = "unknown for gnd";
+ checkParseException(heuristicParserMapper, searchContext, faultyHeuristicdefinition, expectedError);
+ }
+
+ protected void checkParseException(SignificanceHeuristicParserMapper heuristicParserMapper, SearchContext searchContext, String faultyHeuristicDefinition, String expectedError) throws IOException {
+ try {
+ XContentParser stParser = JsonXContent.jsonXContent.createParser("{\"field\":\"text\", " + faultyHeuristicDefinition + ",\"min_doc_count\":200}");
+ stParser.nextToken();
+ new SignificantTermsParser(heuristicParserMapper).parse("testagg", stParser, searchContext);
+ fail();
+ } catch (ElasticsearchParseException e) {
+ assertTrue(e.getMessage().contains(expectedError));
+ }
+ }
+
+ protected SignificanceHeuristic parseFromBuilder(SignificanceHeuristicParserMapper heuristicParserMapper, SearchContext searchContext, SignificanceHeuristicBuilder significanceHeuristicBuilder) throws IOException {
+ SignificantTermsBuilder stBuilder = new SignificantTermsBuilder("testagg");
+ stBuilder.significanceHeuristic(significanceHeuristicBuilder).field("text").minDocCount(200);
+ XContentBuilder stXContentBuilder = XContentFactory.jsonBuilder();
+ stBuilder.internalXContent(stXContentBuilder, null);
+ XContentParser stParser = JsonXContent.jsonXContent.createParser(stXContentBuilder.string());
+ return parseSignificanceHeuristic(heuristicParserMapper, searchContext, stParser);
+ }
+
+ private SignificanceHeuristic parseSignificanceHeuristic(SignificanceHeuristicParserMapper heuristicParserMapper, SearchContext searchContext, XContentParser stParser) throws IOException {
+ stParser.nextToken();
+ SignificantTermsAggregatorFactory aggregatorFactory = (SignificantTermsAggregatorFactory) new SignificantTermsParser(heuristicParserMapper).parse("testagg", stParser, searchContext);
+ stParser.nextToken();
+ assertThat(aggregatorFactory.getBucketCountThresholds().getMinDocCount(), equalTo(200l));
+ assertThat(stParser.currentToken(), equalTo(null));
+ stParser.close();
+ return aggregatorFactory.getSignificanceHeuristic();
+ }
+
+ protected SignificanceHeuristic parseFromString(SignificanceHeuristicParserMapper heuristicParserMapper, SearchContext searchContext, String heuristicString) throws IOException {
+ XContentParser stParser = JsonXContent.jsonXContent.createParser("{\"field\":\"text\", " + heuristicString + ", \"min_doc_count\":200}");
+ return parseSignificanceHeuristic(heuristicParserMapper, searchContext, stParser);
+ }
+
+ void testBackgroundAssertions(SignificanceHeuristic heuristicIsSuperset, SignificanceHeuristic heuristicNotSuperset) {
+ try {
+ heuristicIsSuperset.getScore(2, 3, 1, 4);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > supersetFreq"));
+ }
+ try {
+ heuristicIsSuperset.getScore(1, 4, 2, 3);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("subsetSize > supersetSize"));
+ }
+ try {
+ heuristicIsSuperset.getScore(2, 1, 3, 4);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize"));
+ }
+ try {
+ heuristicIsSuperset.getScore(1, 2, 4, 3);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize"));
+ }
+ try {
+ heuristicIsSuperset.getScore(1, 3, 4, 4);
+ fail();
+ } catch (IllegalArgumentException assertionError) {
+ assertNotNull(assertionError.getMessage());
+ assertTrue(assertionError.getMessage().contains("supersetFreq - subsetFreq > supersetSize - subsetSize"));
+ }
+ try {
+ int idx = randomInt(3);
+ long[] values = {1, 2, 3, 4};
+ values[idx] *= -1;
+ heuristicIsSuperset.getScore(values[0], values[1], values[2], values[3]);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive"));
+ }
+ try {
+ heuristicNotSuperset.getScore(2, 1, 3, 4);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize"));
+ }
+ try {
+ heuristicNotSuperset.getScore(1, 2, 4, 3);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize"));
+ }
+ try {
+ int idx = randomInt(3);
+ long[] values = {1, 2, 3, 4};
+ values[idx] *= -1;
+ heuristicNotSuperset.getScore(values[0], values[1], values[2], values[3]);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive"));
+ }
+ }
+
+ void testAssertions(SignificanceHeuristic heuristic) {
+ try {
+ int idx = randomInt(3);
+ long[] values = {1, 2, 3, 4};
+ values[idx] *= -1;
+ heuristic.getScore(values[0], values[1], values[2], values[3]);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive"));
+ }
+ try {
+ heuristic.getScore(1, 2, 4, 3);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize"));
+ }
+ try {
+ heuristic.getScore(2, 1, 3, 4);
+ fail();
+ } catch (IllegalArgumentException illegalArgumentException) {
+ assertNotNull(illegalArgumentException.getMessage());
+ assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize"));
+ }
+ }
+
+ @Test
+ public void testAssertions() throws Exception {
+ testBackgroundAssertions(new MutualInformation(true, true), new MutualInformation(true, false));
+ testBackgroundAssertions(new ChiSquare(true, true), new ChiSquare(true, false));
+ testBackgroundAssertions(new GND(true), new GND(false));
+ testAssertions(PercentageScore.INSTANCE);
+ testAssertions(JLHScore.INSTANCE);
+ }
+
+ @Test
+ public void basicScoreProperties() {
+ basicScoreProperties(JLHScore.INSTANCE, true);
+ basicScoreProperties(new GND(true), true);
+ basicScoreProperties(PercentageScore.INSTANCE, true);
+ basicScoreProperties(new MutualInformation(true, true), false);
+ basicScoreProperties(new ChiSquare(true, true), false);
+ }
+
+ public void basicScoreProperties(SignificanceHeuristic heuristic, boolean test0) {
+
+ assertThat(heuristic.getScore(1, 1, 1, 3), greaterThan(0.0));
+ assertThat(heuristic.getScore(1, 1, 2, 3), lessThan(heuristic.getScore(1, 1, 1, 3)));
+ assertThat(heuristic.getScore(1, 1, 3, 4), lessThan(heuristic.getScore(1, 1, 2, 4)));
+ if (test0) {
+ assertThat(heuristic.getScore(0, 1, 2, 3), equalTo(0.0));
+ }
+
+ double score = 0.0;
+ try {
+ long a = randomLong();
+ long b = randomLong();
+ long c = randomLong();
+ long d = randomLong();
+ score = heuristic.getScore(a, b, c, d);
+ } catch (IllegalArgumentException e) {
+ }
+ assertThat(score, greaterThanOrEqualTo(0.0));
+ }
+
+ @Test
+ public void scoreMutual() throws Exception {
+ SignificanceHeuristic heuristic = new MutualInformation(true, true);
+ assertThat(heuristic.getScore(1, 1, 1, 3), greaterThan(0.0));
+ assertThat(heuristic.getScore(1, 1, 2, 3), lessThan(heuristic.getScore(1, 1, 1, 3)));
+ assertThat(heuristic.getScore(2, 2, 2, 4), equalTo(1.0));
+ assertThat(heuristic.getScore(0, 2, 2, 4), equalTo(1.0));
+ assertThat(heuristic.getScore(2, 2, 4, 4), equalTo(0.0));
+ assertThat(heuristic.getScore(1, 2, 2, 4), equalTo(0.0));
+ assertThat(heuristic.getScore(3, 6, 9, 18), equalTo(0.0));
+
+ double score = 0.0;
+ try {
+ long a = randomLong();
+ long b = randomLong();
+ long c = randomLong();
+ long d = randomLong();
+ score = heuristic.getScore(a, b, c, d);
+ } catch (IllegalArgumentException e) {
+ }
+ assertThat(score, lessThanOrEqualTo(1.0));
+ assertThat(score, greaterThanOrEqualTo(0.0));
+ heuristic = new MutualInformation(false, true);
+ assertThat(heuristic.getScore(0, 1, 2, 3), equalTo(Double.NEGATIVE_INFINITY));
+
+ heuristic = new MutualInformation(true, false);
+ score = heuristic.getScore(2, 3, 1, 4);
+ assertThat(score, greaterThanOrEqualTo(0.0));
+ assertThat(score, lessThanOrEqualTo(1.0));
+ score = heuristic.getScore(1, 4, 2, 3);
+ assertThat(score, greaterThanOrEqualTo(0.0));
+ assertThat(score, lessThanOrEqualTo(1.0));
+ score = heuristic.getScore(1, 3, 4, 4);
+ assertThat(score, greaterThanOrEqualTo(0.0));
+ assertThat(score, lessThanOrEqualTo(1.0));
+ }
+
+ @Test
+ public void testGNDCornerCases() throws Exception {
+ GND gnd = new GND(true);
+ //term is only in the subset, not at all in the other set but that is because the other set is empty.
+ // this should actually not happen because only terms that are in the subset are considered now,
+ // however, in this case the score should be 0 because a term that does not exist cannot be relevant...
+ assertThat(gnd.getScore(0, randomIntBetween(1, 2), 0, randomIntBetween(2,3)), equalTo(0.0));
+ // the terms do not co-occur at all - should be 0
+ assertThat(gnd.getScore(0, randomIntBetween(1, 2), randomIntBetween(2, 3), randomIntBetween(5,6)), equalTo(0.0));
+ // comparison between two terms that do not exist - probably not relevant
+ assertThat(gnd.getScore(0, 0, 0, randomIntBetween(1,2)), equalTo(0.0));
+ // terms co-occur perfectly - should be 1
+ assertThat(gnd.getScore(1, 1, 1, 1), equalTo(1.0));
+ gnd = new GND(false);
+ assertThat(gnd.getScore(0, 0, 0, 0), equalTo(0.0));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java
new file mode 100644
index 0000000000..f416b7df04
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public abstract class AbstractNumericTests extends ElasticsearchIntegrationTest {
+
+ protected static long minValue, maxValue, minValues, maxValues;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ final int numDocs = 10;
+ for (int i = 0; i < numDocs; i++) { // TODO randomize the size and the params in here?
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject()));
+ }
+ minValue = 1;
+ minValues = 2;
+ maxValue = numDocs;
+ maxValues = numDocs + 2;
+ indexRandom(true, builders);
+
+ // creating an index to test the empty buckets functionality. The way it works is by indexing
+ // two docs {value: 0} and {value : 2}, then building a histogram agg with interval 1 and with empty
+ // buckets computed.. the empty bucket is the one associated with key "1". then each test will have
+ // to check that this bucket exists with the appropriate sub aggregations.
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ public abstract void testEmptyAggregation() throws Exception;
+
+ public abstract void testUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField() throws Exception;
+
+ public abstract void testSingleValuedField_getProperty() throws Exception;
+
+ public abstract void testSingleValuedField_PartiallyUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField_WithValueScript() throws Exception;
+
+ public abstract void testSingleValuedField_WithValueScript_WithParams() throws Exception;
+
+ public abstract void testMultiValuedField() throws Exception;
+
+ public abstract void testMultiValuedField_WithValueScript() throws Exception;
+
+ public abstract void testMultiValuedField_WithValueScript_WithParams() throws Exception;
+
+ public abstract void testScript_SingleValued() throws Exception;
+
+ public abstract void testScript_SingleValued_WithParams() throws Exception;
+
+ public abstract void testScript_ExplicitSingleValued_WithParams() throws Exception;
+
+ public abstract void testScript_MultiValued() throws Exception;
+
+ public abstract void testScript_ExplicitMultiValued() throws Exception;
+
+ public abstract void testScript_MultiValued_WithParams() throws Exception;
+
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
new file mode 100644
index 0000000000..27e6830f0a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgTests.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class AvgTests extends AbstractNumericTests {
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(avg("avg")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ Avg avg = bucket.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(Double.isNaN(avg.getValue()), is(true));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo(Double.NaN));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(avg("avg").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Avg avg = global.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ double expectedAvgValue = (double) (1+2+3+4+5+6+7+8+9+10) / 10;
+ assertThat(avg.getValue(), equalTo(expectedAvgValue));
+ assertThat((Avg) global.getProperty("avg"), equalTo(avg));
+ assertThat((double) global.getProperty("avg.value"), equalTo(expectedAvgValue));
+ assertThat((double) avg.getProperty("value"), equalTo(expectedAvgValue));
+ }
+
+ @Override
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ public void testSingleValuedField_WithFormatter() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(avg("avg").format("#").field("value")).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10));
+ assertThat(avg.getValueAsString(), equalTo("6"));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12) / 20));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").field("values").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13) / 20));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("[ doc['value'].value, doc['value'].value + 1 ]"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("[ doc['value'].value, doc['value'].value + 1 ]"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(avg("avg").script("[ doc['value'].value, doc['value'].value + inc ]").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Avg avg = searchResponse.getAggregations().get("avg");
+ assertThat(avg, notNullValue());
+ assertThat(avg.getName(), equalTo("avg"));
+ assertThat(avg.getValue(), equalTo((double) (1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11) / 20));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java
new file mode 100644
index 0000000000..df5ee5c534
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java
@@ -0,0 +1,473 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.cardinality;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class CardinalityTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public Settings indexSettings() {
+ return Settings.builder()
+ .put("index.number_of_shards", numberOfShards())
+ .put("index.number_of_replicas", numberOfReplicas())
+ .build();
+ }
+
+ static long numDocs;
+ static long precisionThreshold;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+
+ prepareCreate("idx").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("str_value")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("str_values")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("l_value")
+ .field("type", "long")
+ .startObject("fields")
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("l_values")
+ .field("type", "long")
+ .startObject("fields")
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("d_value")
+ .field("type", "double")
+ .startObject("fields")
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("d_values")
+ .field("type", "double")
+ .startObject("fields")
+ .startObject("hash")
+ .field("type", "murmur3")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+
+ numDocs = randomIntBetween(2, 100);
+ precisionThreshold = randomIntBetween(0, 1 << randomInt(20));
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[(int) numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("str_value", "s" + i)
+ .field("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)})
+ .field("l_value", i)
+ .field("l_values", new int[] {i * 2, i * 2 + 1})
+ .field("d_value", i)
+ .field("d_values", new double[]{i * 2, i * 2 + 1})
+ .endObject());
+ }
+ indexRandom(true, builders);
+ createIndex("idx_unmapped");
+
+ IndexRequestBuilder[] dummyDocsBuilder = new IndexRequestBuilder[10];
+ for (int i = 0; i < dummyDocsBuilder.length; i++) {
+ dummyDocsBuilder[i] = client().prepareIndex("idx", "type").setSource("a_field", "1");
+ }
+ indexRandom(true, dummyDocsBuilder);
+
+ ensureSearchable();
+ }
+
+ private void assertCount(Cardinality count, long value) {
+ if (value <= precisionThreshold) {
+ // linear counting should be picked, and should be accurate
+ assertEquals(value, count.getValue());
+ } else {
+ // error is not bound, so let's just make sure it is > 0
+ assertThat(count.getValue(), greaterThan(0L));
+ }
+ }
+ private String singleNumericField(boolean hash) {
+ return (randomBoolean() ? "l_value" : "d_value") + (hash ? ".hash" : "");
+ }
+
+ private String multiNumericField(boolean hash) {
+ return (randomBoolean() ? "l_values" : "d_values") + (hash ? ".hash" : "");
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, 0);
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void singleValuedString() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void singleValuedStringHashed() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value.hash"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void singleValuedNumeric() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void singleValuedNumeric_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(
+ global("global").subAggregation(
+ cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false))))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ // assertThat(global.getDocCount(), equalTo(numDocs));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Cardinality cardinality = global.getAggregations().get("cardinality");
+ assertThat(cardinality, notNullValue());
+ assertThat(cardinality.getName(), equalTo("cardinality"));
+ long expectedValue = numDocs;
+ assertCount(cardinality, expectedValue);
+ assertThat((Cardinality) global.getProperty("cardinality"), equalTo(cardinality));
+ assertThat((double) global.getProperty("cardinality.value"), equalTo((double) cardinality.getValue()));
+ assertThat((double) cardinality.getProperty("value"), equalTo((double) cardinality.getValue()));
+ }
+
+ @Test
+ public void singleValuedNumericHashed() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void multiValuedString() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void multiValuedStringHashed() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values.hash"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void multiValuedNumeric() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void multiValuedNumericHashed() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(true)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void singleValuedStringScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['str_value'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void multiValuedStringScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['str_values'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void singleValuedNumericScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['" + singleNumericField(false) + "'].value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void multiValuedNumericScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).script("doc['" + multiNumericField(false) + "'].values"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void singleValuedStringValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value").script("_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void multiValuedStringValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values").script("_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void singleValuedNumericValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField(false)).script("_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs);
+ }
+
+ @Test
+ public void multiValuedNumericValueScript() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(multiNumericField(false)).script("_value"))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Cardinality count = response.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, numDocs * 2);
+ }
+
+ @Test
+ public void asSubAgg() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms").field("str_value")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ Cardinality count = bucket.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, 2);
+ }
+ }
+
+ @Test
+ public void asSubAggHashed() throws Exception {
+ SearchResponse response = client().prepareSearch("idx").setTypes("type")
+ .addAggregation(terms("terms").field("str_value")
+ .collectMode(randomFrom(SubAggCollectionMode.values()))
+ .subAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_values.hash")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ for (Terms.Bucket bucket : terms.getBuckets()) {
+ Cardinality count = bucket.getAggregations().get("cardinality");
+ assertThat(count, notNullValue());
+ assertThat(count.getName(), equalTo("cardinality"));
+ assertCount(count, 2);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java
new file mode 100644
index 0000000000..248b3633db
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsTests.java
@@ -0,0 +1,557 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStats;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.extendedStats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ *
+ */
+public class ExtendedStatsTests extends AbstractNumericTests {
+
+ private static double stdDev(int... vals) {
+ return Math.sqrt(variance(vals));
+ }
+
+ private static double variance(int... vals) {
+ double sum = 0;
+ double sumOfSqrs = 0;
+ for (int val : vals) {
+ sum += val;
+ sumOfSqrs += val * val;
+ }
+ return (sumOfSqrs - ((sum * sum) / vals.length)) / vals.length;
+ }
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(extendedStats("stats")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ ExtendedStats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getSumOfSquares(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(Double.isNaN(stats.getStdDeviation()), is(true));
+ assertThat(Double.isNaN(stats.getAvg()), is(true));
+ assertThat(Double.isNaN(stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER)), is(true));
+ assertThat(Double.isNaN(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER)), is(true));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo(Double.NaN));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSumOfSquares(), equalTo(0.0));
+ assertThat(stats.getVariance(), equalTo(Double.NaN));
+ assertThat(stats.getStdDeviation(), equalTo(Double.NaN));
+ assertThat(Double.isNaN(stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER)), is(true));
+ assertThat(Double.isNaN(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER)), is(true));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Test
+ public void testSingleValuedFieldDefaultSigma() throws Exception {
+
+ // Same as previous test, but uses a default value for sigma
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ checkUpperLowerBounds(stats, 2);
+ }
+
+ public void testSingleValuedField_WithFormatter() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").format("0000.0").field("value").sigma(sigma)).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10));
+ assertThat(stats.getAvgAsString(), equalTo("0005.5"));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMinAsString(), equalTo("0001.0"));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getMaxAsString(), equalTo("0010.0"));
+ assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10));
+ assertThat(stats.getSumAsString(), equalTo("0055.0"));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getCountAsString(), equalTo("0010.0"));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100));
+ assertThat(stats.getSumOfSquaresAsString(), equalTo("0385.0"));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ assertThat(stats.getVarianceAsString(), equalTo("0008.2"));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ assertThat(stats.getStdDeviationAsString(), equalTo("0002.9"));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(extendedStats("stats").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ ExtendedStats stats = global.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ ExtendedStats statsFromProperty = (ExtendedStats) global.getProperty("stats");
+ assertThat(statsFromProperty, notNullValue());
+ assertThat(statsFromProperty, sameInstance(stats));
+ double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10;
+ assertThat(stats.getAvg(), equalTo(expectedAvgValue));
+ assertThat((double) global.getProperty("stats.avg"), equalTo(expectedAvgValue));
+ double expectedMinValue = 1.0;
+ assertThat(stats.getMin(), equalTo(expectedMinValue));
+ assertThat((double) global.getProperty("stats.min"), equalTo(expectedMinValue));
+ double expectedMaxValue = 10.0;
+ assertThat(stats.getMax(), equalTo(expectedMaxValue));
+ assertThat((double) global.getProperty("stats.max"), equalTo(expectedMaxValue));
+ double expectedSumValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10);
+ assertThat(stats.getSum(), equalTo(expectedSumValue));
+ assertThat((double) global.getProperty("stats.sum"), equalTo(expectedSumValue));
+ long expectedCountValue = 10;
+ assertThat(stats.getCount(), equalTo(expectedCountValue));
+ assertThat((double) global.getProperty("stats.count"), equalTo((double) expectedCountValue));
+ double expectedSumOfSquaresValue = (double) 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 100;
+ assertThat(stats.getSumOfSquares(), equalTo(expectedSumOfSquaresValue));
+ assertThat((double) global.getProperty("stats.sum_of_squares"), equalTo(expectedSumOfSquaresValue));
+ double expectedVarianceValue = variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+ assertThat(stats.getVariance(), equalTo(expectedVarianceValue));
+ assertThat((double) global.getProperty("stats.variance"), equalTo(expectedVarianceValue));
+ double expectedStdDevValue = stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+ assertThat(stats.getStdDeviation(), equalTo(expectedStdDevValue));
+ assertThat((double) global.getProperty("stats.std_deviation"), equalTo(expectedStdDevValue));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").script("_value + 1").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("value").script("_value + inc").param("inc", 1).sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").script("_value - 1").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").field("values").script("_value - dec").param("dec", 1).sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1).sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['value'].value + inc").param("inc", 1).sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['values'].values").sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("doc['values'].values").sigma(sigma))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 4+9+16+25+36+49+64+81+100+121+9+16+25+36+49+64+81+100+121+144));
+ assertThat(stats.getVariance(), equalTo(variance(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12)));
+ checkUpperLowerBounds(stats, sigma);
+
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ double sigma = randomDouble() * randomIntBetween(1, 10);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(extendedStats("stats").script("[ doc['value'].value, doc['value'].value - dec ]").param("dec", 1).sigma(sigma))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ExtendedStats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20));
+ assertThat(stats.getMin(), equalTo(0.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9));
+ assertThat(stats.getCount(), equalTo(20l));
+ assertThat(stats.getSumOfSquares(), equalTo((double) 1+4+9+16+25+36+49+64+81+100+0+1+4+9+16+25+36+49+64+81));
+ assertThat(stats.getVariance(), equalTo(variance(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9)));
+ assertThat(stats.getStdDeviation(), equalTo(stdDev(1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8 ,9)));
+ checkUpperLowerBounds(stats, sigma);
+ }
+
+
+ private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
+ ShardSearchFailure[] failures = response.getShardFailures();
+ if (failures.length != expectedFailures) {
+ for (ShardSearchFailure failure : failures) {
+ logger.error("Shard Failure: {}", failure.reason(), failure.toString());
+ }
+ fail("Unexpected shard failures!");
+ }
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ }
+
+ private void checkUpperLowerBounds(ExtendedStats stats, double sigma) {
+ assertThat(stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), equalTo(stats.getAvg() + (stats.getStdDeviation() * sigma)));
+ assertThat(stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), equalTo(stats.getAvg() - (stats.getStdDeviation() * sigma)));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java
new file mode 100644
index 0000000000..386ae1416e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsTests.java
@@ -0,0 +1,440 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArray;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
+import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBounds;
+import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregator;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.geoBounds;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class GeoBoundsTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "geo_value";
+ private static final String MULTI_VALUED_FIELD_NAME = "geo_values";
+ private static final String NUMBER_FIELD_NAME = "l_values";
+
+ static int numDocs;
+ static int numUniqueGeoPoints;
+ static GeoPoint[] singleValues, multiValues;
+ static GeoPoint singleTopLeft, singleBottomRight, multiTopLeft, multiBottomRight, unmappedTopLeft, unmappedBottomRight;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ assertAcked(prepareCreate("idx")
+ .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed"));
+ createIndex("idx_unmapped");
+
+ unmappedTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ unmappedBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ singleTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ singleBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+ multiTopLeft = new GeoPoint(Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY);
+ multiBottomRight = new GeoPoint(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY);
+
+ numDocs = randomIntBetween(6, 20);
+ numUniqueGeoPoints = randomIntBetween(1, numDocs);
+
+ singleValues = new GeoPoint[numUniqueGeoPoints];
+ for (int i = 0 ; i < singleValues.length; i++)
+ {
+ singleValues[i] = randomGeoPoint();
+ updateBoundsTopLeft(singleValues[i], singleTopLeft);
+ updateBoundsBottomRight(singleValues[i], singleBottomRight);
+ }
+
+ multiValues = new GeoPoint[numUniqueGeoPoints];
+ for (int i = 0 ; i < multiValues.length; i++)
+ {
+ multiValues[i] = randomGeoPoint();
+ updateBoundsTopLeft(multiValues[i], multiTopLeft);
+ updateBoundsBottomRight(multiValues[i], multiBottomRight);
+ }
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .array(SINGLE_VALUED_FIELD_NAME, singleValues[i % numUniqueGeoPoints].lon(), singleValues[i % numUniqueGeoPoints].lat())
+ .startArray(MULTI_VALUED_FIELD_NAME)
+ .startArray().value(multiValues[i % numUniqueGeoPoints].lon()).value(multiValues[i % numUniqueGeoPoints].lat()).endArray()
+ .startArray().value(multiValues[(i+1) % numUniqueGeoPoints].lon()).value(multiValues[(i+1) % numUniqueGeoPoints].lat()).endArray()
+ .endArray()
+ .field(NUMBER_FIELD_NAME, i)
+ .field("tag", "tag" + i)
+ .endObject()));
+ }
+
+ assertAcked(prepareCreate("empty_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point"));
+
+ assertAcked(prepareCreate("idx_dateline")
+ .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed"));
+
+ GeoPoint[] geoValues = new GeoPoint[5];
+ geoValues[0] = new GeoPoint(38, 178);
+ geoValues[1] = new GeoPoint(12, -179);
+ geoValues[2] = new GeoPoint(-24, 170);
+ geoValues[3] = new GeoPoint(32, -175);
+ geoValues[4] = new GeoPoint(-11, 178);
+
+ for (int i = 0; i < 5; i++) {
+ builders.add(client().prepareIndex("idx_dateline", "type").setSource(jsonBuilder()
+ .startObject()
+ .array(SINGLE_VALUED_FIELD_NAME, geoValues[i].lon(), geoValues[i].lat())
+ .field(NUMBER_FIELD_NAME, i)
+ .field("tag", "tag" + i)
+ .endObject()));
+ }
+ assertAcked(prepareCreate("high_card_idx").setSettings(Settings.builder().put("number_of_shards", 2))
+ .addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed"));
+
+
+ for (int i = 0; i < 2000; i++) {
+ builders.add(client().prepareIndex("high_card_idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .array(SINGLE_VALUED_FIELD_NAME, singleValues[i % numUniqueGeoPoints].lon(), singleValues[i % numUniqueGeoPoints].lat())
+ .startArray(MULTI_VALUED_FIELD_NAME)
+ .startArray().value(multiValues[i % numUniqueGeoPoints].lon()).value(multiValues[i % numUniqueGeoPoints].lat()).endArray()
+ .startArray().value(multiValues[(i+1) % numUniqueGeoPoints].lon()).value(multiValues[(i+1) % numUniqueGeoPoints].lat()).endArray()
+ .endArray()
+ .field(NUMBER_FIELD_NAME, i)
+ .field("tag", "tag" + i)
+ .endObject()));
+ }
+
+ builders.add(client().prepareIndex("idx_zero", "type").setSource(
+ jsonBuilder().startObject().array(SINGLE_VALUED_FIELD_NAME, 0.0, 1.0).endObject()));
+ assertAcked(prepareCreate("idx_zero").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point"));
+
+ indexRandom(true, builders);
+ ensureSearchable();
+
+ // Added to debug a test failure where the terms aggregation seems to be reporting two documents with the same value for NUMBER_FIELD_NAME. This will check that after
+ // random indexing each document only has 1 value for NUMBER_FIELD_NAME and it is the correct value. Following this initial change its seems that this call was getting
+ // more that 2000 hits (actual value was 2059) so now it will also check to ensure all hits have the correct index and type
+ SearchResponse response = client().prepareSearch("high_card_idx").addField(NUMBER_FIELD_NAME).addSort(SortBuilders.fieldSort(NUMBER_FIELD_NAME).order(SortOrder.ASC)).setSize(5000).get();
+ assertSearchResponse(response);
+ long totalHits = response.getHits().totalHits();
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ response.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ logger.info("Full high_card_idx Response Content:\n{ {} }", builder.string());
+ for (int i = 0; i < totalHits; i++) {
+ SearchHit searchHit = response.getHits().getAt(i);
+ assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getIndex(), equalTo("high_card_idx"));
+ assertThat("Hit " + i + " with id: " + searchHit.getId(), searchHit.getType(), equalTo("type"));
+ SearchHitField hitField = searchHit.field(NUMBER_FIELD_NAME);
+
+ assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1));
+ Integer value = hitField.getValue();
+ assertThat("Hit " + i + " has wrong value", value, equalTo(i));
+ }
+ assertThat(totalHits, equalTo(2000l));
+ }
+
+ private void updateBoundsBottomRight(GeoPoint geoPoint, GeoPoint currentBound) {
+ if (geoPoint.lat() < currentBound.lat()) {
+ currentBound.resetLat(geoPoint.lat());
+ }
+ if (geoPoint.lon() > currentBound.lon()) {
+ currentBound.resetLon(geoPoint.lon());
+ }
+ }
+
+ private void updateBoundsTopLeft(GeoPoint geoPoint, GeoPoint currentBound) {
+ if (geoPoint.lat() > currentBound.lat()) {
+ currentBound.resetLat(geoPoint.lat());
+ }
+ if (geoPoint.lon() < currentBound.lon()) {
+ currentBound.resetLon(geoPoint.lon());
+ }
+ }
+
+ private GeoPoint randomGeoPoint() {
+ return new GeoPoint((randomDouble() * 180) - 90, (randomDouble() * 360) - 180);
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME)
+ .wrapLongitude(false))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(singleTopLeft.lat()));
+ assertThat(topLeft.lon(), equalTo(singleTopLeft.lon()));
+ assertThat(bottomRight.lat(), equalTo(singleBottomRight.lat()));
+ assertThat(bottomRight.lon(), equalTo(singleBottomRight.lon()));
+ }
+
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ global("global").subAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo((long) numDocs));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ GeoBounds geobounds = global.getAggregations().get("geoBounds");
+ assertThat(geobounds, notNullValue());
+ assertThat(geobounds.getName(), equalTo("geoBounds"));
+ assertThat((GeoBounds) global.getProperty("geoBounds"), sameInstance(geobounds));
+ GeoPoint topLeft = geobounds.topLeft();
+ GeoPoint bottomRight = geobounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(singleTopLeft.lat()));
+ assertThat(topLeft.lon(), equalTo(singleTopLeft.lon()));
+ assertThat(bottomRight.lat(), equalTo(singleBottomRight.lat()));
+ assertThat(bottomRight.lon(), equalTo(singleBottomRight.lon()));
+ assertThat((double) global.getProperty("geoBounds.top"), equalTo(singleTopLeft.lat()));
+ assertThat((double) global.getProperty("geoBounds.left"), equalTo(singleTopLeft.lon()));
+ assertThat((double) global.getProperty("geoBounds.bottom"), equalTo(singleBottomRight.lat()));
+ assertThat((double) global.getProperty("geoBounds.right"), equalTo(singleBottomRight.lon()));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(geoBounds("geoBounds").field(MULTI_VALUED_FIELD_NAME)
+ .wrapLongitude(false))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(multiTopLeft.lat()));
+ assertThat(topLeft.lon(), equalTo(multiTopLeft.lon()));
+ assertThat(bottomRight.lat(), equalTo(multiBottomRight.lat()));
+ assertThat(bottomRight.lon(), equalTo(multiBottomRight.lon()));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_unmapped")
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME)
+ .wrapLongitude(false))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft, equalTo(null));
+ assertThat(bottomRight, equalTo(null));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client().prepareSearch("idx", "idx_unmapped")
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME)
+ .wrapLongitude(false))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(singleTopLeft.lat()));
+ assertThat(topLeft.lon(), equalTo(singleTopLeft.lon()));
+ assertThat(bottomRight.lat(), equalTo(singleBottomRight.lat()));
+ assertThat(bottomRight.lon(), equalTo(singleBottomRight.lon()));
+ }
+
+ @Test
+ public void emptyAggregation() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("empty_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME)
+ .wrapLongitude(false))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+ GeoBounds geoBounds = searchResponse.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft, equalTo(null));
+ assertThat(bottomRight, equalTo(null));
+ }
+
+ @Test
+ public void singleValuedFieldNearDateLine() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_dateline")
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME)
+ .wrapLongitude(false))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoPoint geoValuesTopLeft = new GeoPoint(38, -179);
+ GeoPoint geoValuesBottomRight = new GeoPoint(-24, 178);
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(geoValuesTopLeft.lat()));
+ assertThat(topLeft.lon(), equalTo(geoValuesTopLeft.lon()));
+ assertThat(bottomRight.lat(), equalTo(geoValuesBottomRight.lat()));
+ assertThat(bottomRight.lon(), equalTo(geoValuesBottomRight.lon()));
+ }
+
+ @Test
+ public void singleValuedFieldNearDateLineWrapLongitude() throws Exception {
+
+ GeoPoint geoValuesTopLeft = new GeoPoint(38, 170);
+ GeoPoint geoValuesBottomRight = new GeoPoint(-24, -175);
+
+ SearchResponse response = client().prepareSearch("idx_dateline")
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(true))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(geoValuesTopLeft.lat()));
+ assertThat(topLeft.lon(), equalTo(geoValuesTopLeft.lon()));
+ assertThat(bottomRight.lat(), equalTo(geoValuesBottomRight.lat()));
+ assertThat(bottomRight.lon(), equalTo(geoValuesBottomRight.lon()));
+ }
+
+ /**
+ * This test forces the {@link GeoBoundsAggregator} to resize the {@link BigArray}s it uses to ensure they are resized correctly
+ */
+ @Test
+ public void singleValuedFieldAsSubAggToHighCardTermsAgg() {
+ SearchResponse response = client().prepareSearch("high_card_idx")
+ .addAggregation(terms("terms").field(NUMBER_FIELD_NAME).subAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME)
+ .wrapLongitude(false)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(10));
+ for (int i = 0; i < 10; i++) {
+ Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat("Bucket " + bucket.getKey() + " has wrong number of documents", bucket.getDocCount(), equalTo(1l));
+ GeoBounds geoBounds = bucket.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ assertThat(geoBounds.topLeft().getLat(), allOf(greaterThanOrEqualTo(-90.0), lessThanOrEqualTo(90.0)));
+ assertThat(geoBounds.topLeft().getLon(), allOf(greaterThanOrEqualTo(-180.0), lessThanOrEqualTo(180.0)));
+ assertThat(geoBounds.bottomRight().getLat(), allOf(greaterThanOrEqualTo(-90.0), lessThanOrEqualTo(90.0)));
+ assertThat(geoBounds.bottomRight().getLon(), allOf(greaterThanOrEqualTo(-180.0), lessThanOrEqualTo(180.0)));
+ }
+ }
+
+ @Test
+ public void singleValuedFieldWithZeroLon() throws Exception {
+ SearchResponse response = client().prepareSearch("idx_zero")
+ .addAggregation(geoBounds("geoBounds").field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ GeoBounds geoBounds = response.getAggregations().get("geoBounds");
+ assertThat(geoBounds, notNullValue());
+ assertThat(geoBounds.getName(), equalTo("geoBounds"));
+ GeoPoint topLeft = geoBounds.topLeft();
+ GeoPoint bottomRight = geoBounds.bottomRight();
+ assertThat(topLeft.lat(), equalTo(1.0));
+ assertThat(topLeft.lon(), equalTo(0.0));
+ assertThat(bottomRight.lat(), equalTo(1.0));
+ assertThat(bottomRight.lon(), equalTo(0.0));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java
new file mode 100644
index 0000000000..45a9a3afec
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxTests.java
@@ -0,0 +1,325 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.max.Max;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MaxTests extends AbstractNumericTests {
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(max("max")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ Max max = bucket.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY));
+ }
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(Double.NEGATIVE_INFINITY));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithFormatter() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(max("max").format("0000.0").field("value")).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ assertThat(max.getValueAsString(), equalTo("0010.0"));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(max("max").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Max max = global.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ double expectedMaxValue = 10.0;
+ assertThat(max.getValue(), equalTo(expectedMaxValue));
+ assertThat((Max) global.getProperty("max"), equalTo(max));
+ assertThat((double) global.getProperty("max.value"), equalTo(expectedMaxValue));
+ assertThat((double) max.getProperty("value"), equalTo(expectedMaxValue));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(13.0));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").field("values").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(13.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(10.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(12.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(max("max").script("[ doc['value'].value, doc['value'].value + inc ]").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Max max = searchResponse.getAggregations().get("max");
+ assertThat(max, notNullValue());
+ assertThat(max.getName(), equalTo("max"));
+ assertThat(max.getValue(), equalTo(11.0));
+ }
+
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java
new file mode 100644
index 0000000000..1018ffdf63
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinTests.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.min.Min;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MinTests extends AbstractNumericTests {
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(min("min")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ Min min = bucket.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(Double.POSITIVE_INFINITY));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(Double.POSITIVE_INFINITY));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testSingleValuedField_WithFormatter() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(min("min").format("0000.0").field("value")).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ assertThat(min.getValueAsString(), equalTo("0001.0"));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(min("min").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Min min = global.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ double expectedMinValue = 1.0;
+ assertThat(min.getValue(), equalTo(expectedMinValue));
+ assertThat((Min) global.getProperty("min"), equalTo(min));
+ assertThat((double) global.getProperty("min.value"), equalTo(expectedMinValue));
+ assertThat((double) min.getProperty("value"), equalTo(expectedMinValue));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("value").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_Reverse() throws Exception {
+ // test what happens when values arrive in reverse order since the min aggregator is optimized to work on sorted values
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value * -1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(-12d));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['value'].value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(0.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(2.0));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(min("min").script("List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Min min = searchResponse.getAggregations().get("min");
+ assertThat(min, notNullValue());
+ assertThat(min.getName(), equalTo("min"));
+ assertThat(min.getValue(), equalTo(1.0));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java
new file mode 100644
index 0000000000..2512a519df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksTests.java
@@ -0,0 +1,455 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
+import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
+import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanks;
+import org.elasticsearch.search.aggregations.metrics.percentiles.PercentileRanksBuilder;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.percentileRanks;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ *
+ */
+public class PercentileRanksTests extends AbstractNumericTests {
+
+ private static double[] randomPercents(long minValue, long maxValue) {
+
+ final int length = randomIntBetween(1, 20);
+ final double[] percents = new double[length];
+ for (int i = 0; i < percents.length; ++i) {
+ switch (randomInt(20)) {
+ case 0:
+ percents[i] = minValue;
+ break;
+ case 1:
+ percents[i] = maxValue;
+ break;
+ default:
+ percents[i] = (randomDouble() * (maxValue - minValue)) + minValue;
+ break;
+ }
+ }
+ Arrays.sort(percents);
+ Loggers.getLogger(PercentileRanksTests.class).info("Using percentiles={}", Arrays.toString(percents));
+ return percents;
+ }
+
+ private static PercentileRanksBuilder randomCompression(PercentileRanksBuilder builder) {
+ if (randomBoolean()) {
+ builder.compression(randomIntBetween(20, 120) + randomDouble());
+ }
+ return builder;
+ }
+
+ private void assertConsistent(double[] pcts, PercentileRanks percentiles, long minValue, long maxValue) {
+ final List<Percentile> percentileList = Lists.newArrayList(percentiles);
+ assertEquals(pcts.length, percentileList.size());
+ for (int i = 0; i < pcts.length; ++i) {
+ final Percentile percentile = percentileList.get(i);
+ assertThat(percentile.getValue(), equalTo(pcts[i]));
+ assertThat(percentile.getPercent(), greaterThanOrEqualTo(0.0));
+ assertThat(percentile.getPercent(), lessThanOrEqualTo(100.0));
+
+ if (percentile.getPercent() == 0) {
+ assertThat(percentile.getValue(), lessThanOrEqualTo((double) minValue));
+ }
+ if (percentile.getPercent() == 100) {
+ assertThat(percentile.getValue(), greaterThanOrEqualTo((double) maxValue));
+ }
+ }
+
+ for (int i = 1; i < percentileList.size(); ++i) {
+ assertThat(percentileList.get(i).getValue(), greaterThanOrEqualTo(percentileList.get(i - 1).getValue()));
+ }
+ }
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .percentiles(10, 15)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ PercentileRanks reversePercentiles = bucket.getAggregations().get("percentile_ranks");
+ assertThat(reversePercentiles, notNullValue());
+ assertThat(reversePercentiles.getName(), equalTo("percentile_ranks"));
+ assertThat(reversePercentiles.percent(10), equalTo(Double.NaN));
+ assertThat(reversePercentiles.percent(15), equalTo(Double.NaN));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("value")
+ .percentiles(0, 10, 15, 100))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ PercentileRanks reversePercentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertThat(reversePercentiles, notNullValue());
+ assertThat(reversePercentiles.getName(), equalTo("percentile_ranks"));
+ assertThat(reversePercentiles.percent(0), equalTo(Double.NaN));
+ assertThat(reversePercentiles.percent(10), equalTo(Double.NaN));
+ assertThat(reversePercentiles.percent(15), equalTo(Double.NaN));
+ assertThat(reversePercentiles.percent(100), equalTo(Double.NaN));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ final double[] pcts = randomPercents(minValue, maxValue);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+ final double[] pcts = randomPercents(minValue, maxValue);
+ SearchResponse searchResponse = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ global("global").subAggregation(
+ randomCompression(percentileRanks("percentile_ranks")).field("value").percentiles(pcts))).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ PercentileRanks percentiles = global.getAggregations().get("percentile_ranks");
+ assertThat(percentiles, notNullValue());
+ assertThat(percentiles.getName(), equalTo("percentile_ranks"));
+ assertThat((PercentileRanks) global.getProperty("percentile_ranks"), sameInstance(percentiles));
+
+ }
+
+ @Test
+ public void testSingleValuedFieldOutsideRange() throws Exception {
+ final double[] pcts = new double[] {minValue - 1, maxValue + 1};
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ final double[] pcts = randomPercents(minValue, maxValue);
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ final double[] pcts = randomPercents(minValue - 1, maxValue - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("value").script("_value - 1")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ final double[] pcts = randomPercents(minValue - 1, maxValue - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("value").script("_value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ final double[] pcts = randomPercents(minValues, maxValues);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("values")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValues, maxValues);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ final double[] pcts = randomPercents(minValues - 1, maxValues - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("values").script("_value - 1")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1);
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_Reverse() throws Exception {
+ final double[] pcts = randomPercents(-maxValues, -minValues);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("values").script("_value * -1")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, -maxValues, -minValues);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ final double[] pcts = randomPercents(minValues - 1, maxValues - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .field("values").script("_value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1);
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ final double[] pcts = randomPercents(minValue, maxValue);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .script("doc['value'].value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ final double[] pcts = randomPercents(minValue - 1, maxValue - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .script("doc['value'].value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ final double[] pcts = randomPercents(minValue -1 , maxValue - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .script("doc['value'].value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ final double[] pcts = randomPercents(minValues, maxValues);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .script("doc['values'].values")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValues, maxValues);
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ final double[] pcts = randomPercents(minValues, maxValues);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .script("doc['values'].values")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValues, maxValues);
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ final double[] pcts = randomPercents(minValues - 1, maxValues - 1);
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentileRanks("percentile_ranks"))
+ .script("List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final PercentileRanks percentiles = searchResponse.getAggregations().get("percentile_ranks");
+ assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1);
+ }
+
+ @Test
+ public void testOrderBySubAggregation() {
+ boolean asc = randomBoolean();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field("value").interval(2l)
+ .subAggregation(randomCompression(percentileRanks("percentile_ranks").percentiles(99)))
+ .order(Order.aggregation("percentile_ranks", "99", asc)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ PercentileRanks percentiles = bucket.getAggregations().get("percentile_ranks");
+ double p99 = percentiles.percent(99);
+ if (asc) {
+ assertThat(p99, greaterThanOrEqualTo(previous));
+ } else {
+ assertThat(p99, lessThanOrEqualTo(previous));
+ }
+ previous = p99;
+ }
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java
new file mode 100644
index 0000000000..63141f420f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/PercentilesTests.java
@@ -0,0 +1,438 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import com.google.common.collect.Lists;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Order;
+import org.elasticsearch.search.aggregations.metrics.percentiles.Percentile;
+import org.elasticsearch.search.aggregations.metrics.percentiles.Percentiles;
+import org.elasticsearch.search.aggregations.metrics.percentiles.PercentilesBuilder;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ *
+ */
+public class PercentilesTests extends AbstractNumericTests {
+
+ private static double[] randomPercentiles() {
+ final int length = randomIntBetween(1, 20);
+ final double[] percentiles = new double[length];
+ for (int i = 0; i < percentiles.length; ++i) {
+ switch (randomInt(20)) {
+ case 0:
+ percentiles[i] = 0;
+ break;
+ case 1:
+ percentiles[i] = 100;
+ break;
+ default:
+ percentiles[i] = randomDouble() * 100;
+ break;
+ }
+ }
+ Arrays.sort(percentiles);
+ Loggers.getLogger(PercentilesTests.class).info("Using percentiles={}", Arrays.toString(percentiles));
+ return percentiles;
+ }
+
+ private static PercentilesBuilder randomCompression(PercentilesBuilder builder) {
+ if (randomBoolean()) {
+ builder.compression(randomIntBetween(20, 120) + randomDouble());
+ }
+ return builder;
+ }
+
+ private void assertConsistent(double[] pcts, Percentiles percentiles, long minValue, long maxValue) {
+ final List<Percentile> percentileList = Lists.newArrayList(percentiles);
+ assertEquals(pcts.length, percentileList.size());
+ for (int i = 0; i < pcts.length; ++i) {
+ final Percentile percentile = percentileList.get(i);
+ assertThat(percentile.getPercent(), equalTo(pcts[i]));
+ double value = percentile.getValue();
+ assertThat(value, greaterThanOrEqualTo((double) minValue));
+ assertThat(value, lessThanOrEqualTo((double) maxValue));
+
+ if (percentile.getPercent() == 0) {
+ assertThat(value, equalTo((double) minValue));
+ }
+ if (percentile.getPercent() == 100) {
+ assertThat(value, equalTo((double) maxValue));
+ }
+ }
+
+ for (int i = 1; i < percentileList.size(); ++i) {
+ assertThat(percentileList.get(i).getValue(), greaterThanOrEqualTo(percentileList.get(i - 1).getValue()));
+ }
+ }
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(randomCompression(percentiles("percentiles"))
+ .percentiles(10, 15)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ Percentiles percentiles = bucket.getAggregations().get("percentiles");
+ assertThat(percentiles, notNullValue());
+ assertThat(percentiles.getName(), equalTo("percentiles"));
+ assertThat(percentiles.percentile(10), equalTo(Double.NaN));
+ assertThat(percentiles.percentile(15), equalTo(Double.NaN));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("value")
+ .percentiles(0, 10, 15, 100))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertThat(percentiles, notNullValue());
+ assertThat(percentiles.getName(), equalTo("percentiles"));
+ assertThat(percentiles.percentile(0), equalTo(Double.NaN));
+ assertThat(percentiles.percentile(10), equalTo(Double.NaN));
+ assertThat(percentiles.percentile(15), equalTo(Double.NaN));
+ assertThat(percentiles.percentile(100), equalTo(Double.NaN));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ global("global").subAggregation(randomCompression(percentiles("percentiles")).field("value").percentiles(pcts)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Percentiles percentiles = global.getAggregations().get("percentiles");
+ assertThat(percentiles, notNullValue());
+ assertThat(percentiles.getName(), equalTo("percentiles"));
+ assertThat((Percentiles) global.getProperty("percentiles"), sameInstance(percentiles));
+
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("value").script("_value - 1")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("value").script("_value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("values")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValues, maxValues);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("values").script("_value - 1")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1);
+ }
+
+ @Test
+ public void testMultiValuedField_WithValueScript_Reverse() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("values").script("_value * -1")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, -maxValues, -minValues);
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .field("values").script("_value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1);
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .script("doc['value'].value")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue, maxValue);
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .script("doc['value'].value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .script("doc['value'].value - dec").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValue - 1, maxValue - 1);
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .script("doc['values'].values")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValues, maxValues);
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .script("doc['values'].values")
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValues, maxValues);
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ final double[] pcts = randomPercentiles();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(randomCompression(percentiles("percentiles"))
+ .script("List values = doc['values'].values; double[] res = new double[values.size()]; for (int i = 0; i < res.length; i++) { res[i] = values.get(i) - dec; }; return res;").param("dec", 1)
+ .percentiles(pcts))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ final Percentiles percentiles = searchResponse.getAggregations().get("percentiles");
+ assertConsistent(pcts, percentiles, minValues - 1, maxValues - 1);
+ }
+
+ @Test
+ public void testOrderBySubAggregation() {
+ boolean asc = randomBoolean();
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field("value").interval(2l)
+ .subAggregation(randomCompression(percentiles("percentiles").percentiles(99)))
+ .order(Order.aggregation("percentiles", "99", asc)))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ double previous = asc ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
+ for (Histogram.Bucket bucket : histo.getBuckets()) {
+ Percentiles percentiles = bucket.getAggregations().get("percentiles");
+ double p99 = percentiles.percentile(99);
+ if (asc) {
+ assertThat(p99, greaterThanOrEqualTo(previous));
+ } else {
+ assertThat(p99, lessThanOrEqualTo(previous));
+ }
+ previous = p99;
+ }
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java
new file mode 100644
index 0000000000..2fced92d8e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricTests.java
@@ -0,0 +1,1412 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.search.aggregations.Aggregation;
+import org.elasticsearch.search.aggregations.Aggregations;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetric;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.scriptedMetric;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+@ClusterScope(scope = Scope.SUITE)
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class ScriptedMetricTests extends ElasticsearchIntegrationTest {
+
+ private static long numDocs;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ numDocs = randomIntBetween(10, 100);
+ for (int i = 0; i < numDocs; i++) {
+ builders.add(client().prepareIndex("idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field("value", randomAsciiOfLengthBetween(5, 15))
+ .field("l_value", i).endObject()));
+ }
+ indexRandom(true, builders);
+
+ // creating an index to test the empty buckets functionality. The way it
+ // works is by indexing
+ // two docs {value: 0} and {value : 2}, then building a histogram agg
+ // with interval 1 and with empty
+ // buckets computed.. the empty bucket is the one associated with key
+ // "1". then each test will have
+ // to check that this bucket exists with the appropriate sub
+ // aggregations.
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field("value", i * 2).endObject()));
+ }
+
+ PutIndexedScriptResponse indexScriptResponse = client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "initScript_indexed", "{\"script\":\"vars.multiplier = 3\"}").get();
+ assertThat(indexScriptResponse.isCreated(), equalTo(true));
+ indexScriptResponse = client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "mapScript_indexed", "{\"script\":\"_agg.add(vars.multiplier)\"}").get();
+ assertThat(indexScriptResponse.isCreated(), equalTo(true));
+ indexScriptResponse = client().preparePutIndexedScript(GroovyScriptEngineService.NAME, "combineScript_indexed",
+ "{\"script\":\"newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation\"}")
+ .get();
+ assertThat(indexScriptResponse.isCreated(), equalTo(true));
+ indexScriptResponse = client().preparePutIndexedScript(
+ "groovy",
+ "reduceScript_indexed",
+ "{\"script\":\"newaggregation = []; sum = 0;for (agg in _aggs) { for (a in agg) { sum += a} }; newaggregation.add(sum); return newaggregation\"}")
+ .get();
+ assertThat(indexScriptResponse.isCreated(), equalTo(true));
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ Settings settings = Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("path.conf", getDataPath("/org/elasticsearch/search/aggregations/metrics/scripted/conf"))
+ .build();
+ return settings;
+ }
+
+ @Test
+ public void testMap() {
+ SearchResponse response = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(scriptedMetric("scripted").mapScript(new Script("_agg['count'] = 1"))).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ int numShardsRun = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Map.class));
+ Map<String, Object> map = (Map<String, Object>) object;
+ assertThat(map.size(), lessThanOrEqualTo(1));
+ if (map.size() == 1) {
+ assertThat(map.get("count"), notNullValue());
+ assertThat(map.get("count"), instanceOf(Number.class));
+ assertThat((Number) map.get("count"), equalTo((Number) 1));
+ numShardsRun++;
+ }
+ }
+ // We don't know how many shards will have documents but we need to make
+ // sure that at least one shard ran the map script
+ assertThat(numShardsRun, greaterThan(0));
+ }
+
+ @Test
+ public void testMap_withParams() {
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+
+ SearchResponse response = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(scriptedMetric("scripted").params(params).mapScript(new Script("_agg.add(1)"))).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ assertThat(numberValue, equalTo((Number) 1));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs));
+ }
+
+ @Test
+ public void testInitMap_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted").params(params).initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ assertThat(numberValue, equalTo((Number) 3));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs * 3));
+ }
+
+ @Test
+ public void testMapCombine_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .mapScript(new Script("_agg.add(1)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ // A particular shard may not have any documents stored on it so
+ // we have to assume the lower bound may be 0. The check at the
+ // bottom of the test method will make sure the count is correct
+ assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0l), lessThanOrEqualTo(numDocs)));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs));
+ }
+
+ @Test
+ public void testInitMapCombine_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ // A particular shard may not have any documents stored on it so
+ // we have to assume the lower bound may be 0. The check at the
+ // bottom of the test method will make sure the count is correct
+ assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0l), lessThanOrEqualTo(numDocs * 3)));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs * 3));
+ }
+
+ @Test
+ public void testInitMapCombineReduce_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ @Test
+ public void testInitMapCombineReduce_getProperty() throws Exception {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+ SearchResponse searchResponse = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ global("global")
+ .subAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocs));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(numDocs));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted");
+ assertThat(scriptedMetricAggregation, notNullValue());
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ assertThat((ScriptedMetric) global.getProperty("scripted"), sameInstance(scriptedMetricAggregation));
+ assertThat((List) global.getProperty("scripted.value"), sameInstance((List) aggregationList));
+ assertThat((List) scriptedMetricAggregation.getProperty("value"), sameInstance((List) aggregationList));
+
+ }
+
+ @Test
+ public void testMapCombineReduce_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs));
+ }
+
+ @Test
+ public void testInitMapReduce_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ @Test
+ public void testMapReduce_withParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs));
+ }
+
+ @Test
+ public void testInitMapCombineReduce_withParamsAndReduceParams() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+ Map<String, Object> reduceParams = new HashMap<>();
+ reduceParams.put("multiplier", 4);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum * multiplier); return newaggregation",
+ ScriptType.INLINE, null, reduceParams)))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 12));
+ }
+
+ @Test
+ public void testInitMapCombineReduce_withParams_Indexed() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted").params(params)
+ .initScript(new Script("initScript_indexed", ScriptType.INDEXED, null, null))
+ .mapScript(new Script("mapScript_indexed", ScriptType.INDEXED, null, null))
+ .combineScript(new Script("combineScript_indexed", ScriptType.INDEXED, null, null))
+ .reduceScript(new Script("reduceScript_indexed", ScriptType.INDEXED, null, null))).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ @Test
+ public void testInitMapCombineReduce_withParams_File() {
+
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted").params(params).initScript(new Script("init_script", ScriptType.FILE, null, null))
+ .mapScript(new Script("map_script", ScriptType.FILE, null, null))
+ .combineScript(new Script("combine_script", ScriptType.FILE, null, null))
+ .reduceScript(new Script("reduce_script", ScriptType.FILE, null, null))).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ @Test
+ public void testInitMapCombineReduce_withParams_asSubAgg() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery()).setSize(1000)
+ .addAggregation(
+ histogram("histo")
+ .field("l_value")
+ .interval(1)
+ .subAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+ Aggregation aggregation = response.getAggregations().get("histo");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(Histogram.class));
+ Histogram histoAgg = (Histogram) aggregation;
+ assertThat(histoAgg.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histoAgg.getBuckets();
+ assertThat(buckets, notNullValue());
+ for (Bucket b : buckets) {
+ assertThat(b, notNullValue());
+ assertThat(b.getDocCount(), equalTo(1l));
+ Aggregations subAggs = b.getAggregations();
+ assertThat(subAggs, notNullValue());
+ assertThat(subAggs.asList().size(), equalTo(1));
+ Aggregation subAgg = subAggs.get("scripted");
+ assertThat(subAgg, notNullValue());
+ assertThat(subAgg, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) subAgg;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(3l));
+ }
+ }
+
+ @Test
+ public void testEmptyAggregation() throws Exception {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0)
+ .subAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript(new Script("vars.multiplier = 3"))
+ .mapScript(new Script("_agg.add(vars.multiplier)"))
+ .combineScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .reduceScript(
+ new Script(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted");
+ assertThat(scriptedMetric, notNullValue());
+ assertThat(scriptedMetric.getName(), equalTo("scripted"));
+ assertThat(scriptedMetric.aggregation(), nullValue());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testMapOldScriptAPI() {
+ SearchResponse response = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(scriptedMetric("scripted").mapScript("_agg['count'] = 1")).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ int numShardsRun = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Map.class));
+ Map<String, Object> map = (Map<String, Object>) object;
+ assertThat(map.size(), lessThanOrEqualTo(1));
+ if (map.size() == 1) {
+ assertThat(map.get("count"), notNullValue());
+ assertThat(map.get("count"), instanceOf(Number.class));
+ assertThat((Number) map.get("count"), equalTo((Number) 1));
+ numShardsRun++;
+ }
+ }
+ // We don't know how many shards will have documents but we need to make
+ // sure that at least one shard ran the map script
+ assertThat(numShardsRun, greaterThan(0));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testMap_withParamsOldScriptAPI() {
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+
+ SearchResponse response = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(scriptedMetric("scripted").params(params).mapScript("_agg.add(1)")).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ assertThat(numberValue, equalTo((Number) 1));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMap_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted").params(params).initScript("vars.multiplier = 3").mapScript("_agg.add(vars.multiplier)"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ assertThat(numberValue, equalTo((Number) 3));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs * 3));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testMapCombine_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .mapScript("_agg.add(1)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ // A particular shard may not have any documents stored on it so
+ // we have to assume the lower bound may be 0. The check at the
+ // bottom of the test method will make sure the count is correct
+ assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0l), lessThanOrEqualTo(numDocs)));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapCombine_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(getNumShards("idx").numPrimaries));
+ long totalCount = 0;
+ for (Object object : aggregationList) {
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(List.class));
+ List<?> list = (List<?>) object;
+ for (Object o : list) {
+ assertThat(o, notNullValue());
+ assertThat(o, instanceOf(Number.class));
+ Number numberValue = (Number) o;
+ // A particular shard may not have any documents stored on it so
+ // we have to assume the lower bound may be 0. The check at the
+ // bottom of the test method will make sure the count is correct
+ assertThat(numberValue.longValue(), allOf(greaterThanOrEqualTo(0l), lessThanOrEqualTo(numDocs * 3)));
+ totalCount += numberValue.longValue();
+ }
+ }
+ assertThat(totalCount, equalTo(numDocs * 3));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapCombineReduce_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @SuppressWarnings({ "unchecked", "rawtypes" })
+ @Test
+ public void testInitMapCombineReduce_getPropertyOldScriptAPI() throws Exception {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+ SearchResponse searchResponse = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ global("global")
+ .subAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocs));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(numDocs));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ ScriptedMetric scriptedMetricAggregation = global.getAggregations().get("scripted");
+ assertThat(scriptedMetricAggregation, notNullValue());
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ assertThat((ScriptedMetric) global.getProperty("scripted"), sameInstance(scriptedMetricAggregation));
+ assertThat((List) global.getProperty("scripted.value"), sameInstance((List) aggregationList));
+ assertThat((List) scriptedMetricAggregation.getProperty("value"), sameInstance((List) aggregationList));
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testMapCombineReduce_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapReduce_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testMapReduce_withParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .mapScript("_agg.add(vars.multiplier)")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapCombineReduce_withParamsAndReduceParamsOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+ Map<String, Object> reduceParams = new HashMap<>();
+ reduceParams.put("multiplier", 4);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .reduceParams(reduceParams)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum * multiplier); return newaggregation"))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 12));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapCombineReduce_withParams_IndexedOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted").params(params).initScriptId("initScript_indexed").mapScriptId("mapScript_indexed")
+ .combineScriptId("combineScript_indexed").reduceScriptId("reduceScript_indexed")).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapCombineReduce_withParams_FileOldScriptAPI() {
+
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ scriptedMetric("scripted").params(params).initScriptFile("init_script").mapScriptFile("map_script")
+ .combineScriptFile("combine_script").reduceScriptFile("reduce_script")).execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+
+ Aggregation aggregation = response.getAggregations().get("scripted");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) aggregation;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(numDocs * 3));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testInitMapCombineReduce_withParams_asSubAggOldScriptAPI() {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .setSize(1000)
+ .addAggregation(
+ histogram("histo")
+ .field("l_value")
+ .interval(1)
+ .subAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
+ Aggregation aggregation = response.getAggregations().get("histo");
+ assertThat(aggregation, notNullValue());
+ assertThat(aggregation, instanceOf(Histogram.class));
+ Histogram histoAgg = (Histogram) aggregation;
+ assertThat(histoAgg.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histoAgg.getBuckets();
+ assertThat(buckets, notNullValue());
+ for (Bucket b : buckets) {
+ assertThat(b, notNullValue());
+ assertThat(b.getDocCount(), equalTo(1l));
+ Aggregations subAggs = b.getAggregations();
+ assertThat(subAggs, notNullValue());
+ assertThat(subAggs.asList().size(), equalTo(1));
+ Aggregation subAgg = subAggs.get("scripted");
+ assertThat(subAgg, notNullValue());
+ assertThat(subAgg, instanceOf(ScriptedMetric.class));
+ ScriptedMetric scriptedMetricAggregation = (ScriptedMetric) subAgg;
+ assertThat(scriptedMetricAggregation.getName(), equalTo("scripted"));
+ assertThat(scriptedMetricAggregation.aggregation(), notNullValue());
+ assertThat(scriptedMetricAggregation.aggregation(), instanceOf(ArrayList.class));
+ List<?> aggregationList = (List<?>) scriptedMetricAggregation.aggregation();
+ assertThat(aggregationList.size(), equalTo(1));
+ Object object = aggregationList.get(0);
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Number.class));
+ assertThat(((Number) object).longValue(), equalTo(3l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testEmptyAggregationOldScriptAPI() throws Exception {
+ Map<String, Object> varsMap = new HashMap<>();
+ varsMap.put("multiplier", 1);
+ Map<String, Object> params = new HashMap<>();
+ params.put("_agg", new ArrayList<>());
+ params.put("vars", varsMap);
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo")
+ .field("value")
+ .interval(1l)
+ .minDocCount(0)
+ .subAggregation(
+ scriptedMetric("scripted")
+ .params(params)
+ .initScript("vars.multiplier = 3")
+ .mapScript("_agg.add(vars.multiplier)")
+ .combineScript(
+ "newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation")
+ .reduceScript(
+ "newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted");
+ assertThat(scriptedMetric, notNullValue());
+ assertThat(scriptedMetric.getName(), equalTo("scripted"));
+ assertThat(scriptedMetric.aggregation(), nullValue());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java
new file mode 100644
index 0000000000..5e81a80633
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsTests.java
@@ -0,0 +1,449 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.sameInstance;
+
+/**
+ *
+ */
+public class StatsTests extends AbstractNumericTests {
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(stats("stats")))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getCount(), equalTo(0l));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(Double.isNaN(stats.getAvg()), is(true));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo(Double.NaN));
+ assertThat(stats.getMin(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(stats.getMax(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(stats.getSum(), equalTo(0.0));
+ assertThat(stats.getCount(), equalTo(0l));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ public void testSingleValuedField_WithFormatter() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(stats("stats").format("0000.0").field("value")).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10));
+ assertThat(stats.getAvgAsString(), equalTo("0005.5"));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMinAsString(), equalTo("0001.0"));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getMaxAsString(), equalTo("0010.0"));
+ assertThat(stats.getSum(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10));
+ assertThat(stats.getSumAsString(), equalTo("0055.0"));
+ assertThat(stats.getCount(), equalTo(10l));
+ assertThat(stats.getCountAsString(), equalTo("0010.0"));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(stats("stats").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Stats stats = global.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ Stats statsFromProperty = (Stats) global.getProperty("stats");
+ assertThat(statsFromProperty, notNullValue());
+ assertThat(statsFromProperty, sameInstance(stats));
+ double expectedAvgValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10) / 10;
+ assertThat(stats.getAvg(), equalTo(expectedAvgValue));
+ assertThat((double) global.getProperty("stats.avg"), equalTo(expectedAvgValue));
+ double expectedMinValue = 1.0;
+ assertThat(stats.getMin(), equalTo(expectedMinValue));
+ assertThat((double) global.getProperty("stats.min"), equalTo(expectedMinValue));
+ double expectedMaxValue = 10.0;
+ assertThat(stats.getMax(), equalTo(expectedMaxValue));
+ assertThat((double) global.getProperty("stats.max"), equalTo(expectedMaxValue));
+ double expectedSumValue = (double) (1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10);
+ assertThat(stats.getSum(), equalTo(expectedSumValue));
+ assertThat((double) global.getProperty("stats.sum"), equalTo(expectedSumValue));
+ long expectedCountValue = 10;
+ assertThat(stats.getCount(), equalTo(expectedCountValue));
+ assertThat((double) global.getProperty("stats.count"), equalTo((double) expectedCountValue));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("value").script("_value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values").script("_value - 1"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").field("values").script("_value - dec").param("dec", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11) / 20));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10) / 10));
+ assertThat(stats.getMin(), equalTo(1.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11) / 10));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(11.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ assertThat(stats.getCount(), equalTo(10l));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12) / 20));
+ assertThat(stats.getMin(), equalTo(2.0));
+ assertThat(stats.getMax(), equalTo(12.0));
+ assertThat(stats.getSum(), equalTo((double) 2+3+4+5+6+7+8+9+10+11+3+4+5+6+7+8+9+10+11+12));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(stats("stats").script("[ doc['value'].value, doc['value'].value - dec ]").param("dec", 1))
+ .execute().actionGet();
+
+ assertShardExecutionState(searchResponse, 0);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Stats stats = searchResponse.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ assertThat(stats.getName(), equalTo("stats"));
+ assertThat(stats.getAvg(), equalTo((double) (1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9) / 20));
+ assertThat(stats.getMin(), equalTo(0.0));
+ assertThat(stats.getMax(), equalTo(10.0));
+ assertThat(stats.getSum(), equalTo((double) 1+2+3+4+5+6+7+8+9+10+0+1+2+3+4+5+6+7+8+9));
+ assertThat(stats.getCount(), equalTo(20l));
+ }
+
+
+ private void assertShardExecutionState(SearchResponse response, int expectedFailures) throws Exception {
+ ShardSearchFailure[] failures = response.getShardFailures();
+ if (failures.length != expectedFailures) {
+ for (ShardSearchFailure failure : failures) {
+ logger.error("Shard Failure: {}", failure.reason(), failure.toString());
+ }
+ fail("Unexpected shard failures!");
+ }
+ assertThat("Not all shards are initialized", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java
new file mode 100644
index 0000000000..89060a70cc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumTests.java
@@ -0,0 +1,326 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class SumTests extends AbstractNumericTests {
+
+ @Override
+ @Test
+ public void testEmptyAggregation() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(histogram("histo").field("value").interval(1l).minDocCount(0).subAggregation(sum("sum")))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ Histogram histo = searchResponse.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ Histogram.Bucket bucket = histo.getBuckets().get(1);
+ assertThat(bucket, notNullValue());
+
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(0.0));
+ }
+
+ @Override
+ @Test
+ public void testUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo(0.0));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Test
+ public void testSingleValuedField_WithFormatter() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(sum("sum").format("0000.0").field("value")).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10));
+ assertThat(sum.getValueAsString(), equalTo("0055.0"));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(sum("sum").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ Sum sum = global.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ double expectedSumValue = (double) 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10;
+ assertThat(sum.getValue(), equalTo(expectedSumValue));
+ assertThat((Sum) global.getProperty("sum"), equalTo(sum));
+ assertThat((double) global.getProperty("sum.value"), equalTo(expectedSumValue));
+ assertThat((double) sum.getProperty("value"), equalTo(expectedSumValue));
+ }
+
+ @Override
+ public void testSingleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Override
+ @Test
+ public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("value").script("_value + increment").param("increment", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+3+4+5+6+7+8+9+10));
+ }
+
+ @Override
+ @Test
+ public void testScript_SingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitSingleValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("doc['value'].value + inc").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+4+5+6+7+8+9+10+11));
+ }
+
+
+ @Override
+ @Test
+ public void testScript_MultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("[ doc['value'].value, doc['value'].value + 1 ]"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Override
+ @Test
+ public void testScript_ExplicitMultiValued() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("[ doc['value'].value, doc['value'].value + 1 ]"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Override
+ @Test
+ public void testScript_MultiValued_WithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").script("[ doc['value'].value, doc['value'].value + inc ]").param("inc", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 1+2+2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 2+3+3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values").script("_value + 1"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13));
+ }
+
+ @Override
+ @Test
+ public void testMultiValuedField_WithValueScript_WithParams() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(sum("sum").field("values").script("_value + increment").param("increment", 1))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Sum sum = searchResponse.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getName(), equalTo("sum"));
+ assertThat(sum.getValue(), equalTo((double) 3+4+4+5+5+6+6+7+7+8+8+9+9+10+10+11+11+12+12+13));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java
new file mode 100644
index 0000000000..acbd5b7459
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountTests.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCount;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.count;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.global;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class ValueCountTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject())
+ .execute().actionGet();
+ }
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ ensureSearchable();
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(0l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(0l));
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void singleValuedField_getProperty() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx").setQuery(matchAllQuery())
+ .addAggregation(global("global").subAggregation(count("count").field("value"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ assertThat(global, notNullValue());
+ assertThat(global.getName(), equalTo("global"));
+ assertThat(global.getDocCount(), equalTo(10l));
+ assertThat(global.getAggregations(), notNullValue());
+ assertThat(global.getAggregations().asMap().size(), equalTo(1));
+
+ ValueCount valueCount = global.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ assertThat((ValueCount) global.getProperty("count"), equalTo(valueCount));
+ assertThat((double) global.getProperty("count.value"), equalTo(10d));
+ assertThat((double) valueCount.getProperty("value"), equalTo(10d));
+ }
+
+ @Test
+ public void singleValuedField_PartiallyUnmapped() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx", "idx_unmapped")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").field("values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(20l));
+ }
+
+ @Test
+ public void singleValuedScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").script("doc['value'].value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void multiValuedScript() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").script("doc['values'].values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(20l));
+ }
+
+ @Test
+ public void singleValuedScriptWithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").script("doc[s].value").param("s", "value"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(10l));
+ }
+
+ @Test
+ public void multiValuedScriptWithParams() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(count("count").script("doc[s].values").param("s", "values"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+
+ ValueCount valueCount = searchResponse.getAggregations().get("count");
+ assertThat(valueCount, notNullValue());
+ assertThat(valueCount.getName(), equalTo("count"));
+ assertThat(valueCount.getValue(), equalTo(20l));
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java
new file mode 100644
index 0000000000..91ad947ddb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlusTests.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.metrics.cardinality;
+
+import com.carrotsearch.hppc.BitMixer;
+import com.carrotsearch.hppc.IntHashSet;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.elasticsearch.search.aggregations.metrics.cardinality.HyperLogLogPlusPlus.MAX_PRECISION;
+import static org.elasticsearch.search.aggregations.metrics.cardinality.HyperLogLogPlusPlus.MIN_PRECISION;
+import static org.hamcrest.Matchers.closeTo;
+
+public class HyperLogLogPlusPlusTests extends ElasticsearchTestCase {
+
+ @Test
+ public void encodeDecode() {
+ final int iters = scaledRandomIntBetween(100000, 500000);
+ // random hashes
+ for (int i = 0; i < iters; ++i) {
+ final int p1 = randomIntBetween(4, 24);
+ final long hash = randomLong();
+ testEncodeDecode(p1, hash);
+ }
+ // special cases
+ for (int p1 = MIN_PRECISION; p1 <= MAX_PRECISION; ++p1) {
+ testEncodeDecode(p1, 0);
+ testEncodeDecode(p1, 1);
+ testEncodeDecode(p1, ~0L);
+ }
+ }
+
+ private void testEncodeDecode(int p1, long hash) {
+ final long index = HyperLogLogPlusPlus.index(hash, p1);
+ final int runLen = HyperLogLogPlusPlus.runLen(hash, p1);
+ final int encoded = HyperLogLogPlusPlus.encodeHash(hash, p1);
+ assertEquals(index, HyperLogLogPlusPlus.decodeIndex(encoded, p1));
+ assertEquals(runLen, HyperLogLogPlusPlus.decodeRunLen(encoded, p1));
+ }
+
+ @Test
+ public void accuracy() {
+ final long bucket = randomInt(20);
+ final int numValues = randomIntBetween(1, 100000);
+ final int maxValue = randomIntBetween(1, randomBoolean() ? 1000: 100000);
+ final int p = randomIntBetween(14, MAX_PRECISION);
+ IntHashSet set = new IntHashSet();
+ HyperLogLogPlusPlus e = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 1);
+ for (int i = 0; i < numValues; ++i) {
+ final int n = randomInt(maxValue);
+ set.add(n);
+ final long hash = BitMixer.mix64(n);
+ e.collect(bucket, hash);
+ if (randomInt(100) == 0) {
+ //System.out.println(e.cardinality(bucket) + " <> " + set.size());
+ assertThat((double) e.cardinality(bucket), closeTo(set.size(), 0.1 * set.size()));
+ }
+ }
+ assertThat((double) e.cardinality(bucket), closeTo(set.size(), 0.1 * set.size()));
+ }
+
+ @Test
+ public void merge() {
+ final int p = randomIntBetween(MIN_PRECISION, MAX_PRECISION);
+ final HyperLogLogPlusPlus single = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 0);
+ final HyperLogLogPlusPlus[] multi = new HyperLogLogPlusPlus[randomIntBetween(2, 100)];
+ final long[] bucketOrds = new long[multi.length];
+ for (int i = 0; i < multi.length; ++i) {
+ bucketOrds[i] = randomInt(20);
+ multi[i] = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 5);
+ }
+ final int numValues = randomIntBetween(1, 100000);
+ final int maxValue = randomIntBetween(1, randomBoolean() ? 1000: 1000000);
+ for (int i = 0; i < numValues; ++i) {
+ final int n = randomInt(maxValue);
+ final long hash = BitMixer.mix64(n);
+ single.collect(0, hash);
+ // use a gaussian so that all instances don't collect as many hashes
+ final int index = (int) (Math.pow(randomDouble(), 2));
+ multi[index].collect(bucketOrds[index], hash);
+ if (randomInt(100) == 0) {
+ HyperLogLogPlusPlus merged = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 0);
+ for (int j = 0; j < multi.length; ++j) {
+ merged.merge(0, multi[j], bucketOrds[j]);
+ }
+ assertEquals(single.cardinality(0), merged.cardinality(0));
+ }
+ }
+ }
+
+ @Test
+ public void fakeHashes() {
+ // hashes with lots of leading zeros trigger different paths in the code that we try to go through here
+ final int p = randomIntBetween(MIN_PRECISION, MAX_PRECISION);
+ final HyperLogLogPlusPlus counts = new HyperLogLogPlusPlus(p, BigArrays.NON_RECYCLING_INSTANCE, 0);
+
+ counts.collect(0, 0);
+ assertEquals(1, counts.cardinality(0));
+ if (randomBoolean()) {
+ counts.collect(0, 1);
+ assertEquals(2, counts.cardinality(0));
+ }
+ counts.upgradeToHll(0);
+ // all hashes felt into the same bucket so hll would expect a count of 1
+ assertEquals(1, counts.cardinality(0));
+ }
+
+ @Test
+ public void precisionFromThreshold() {
+ assertEquals(4, HyperLogLogPlusPlus.precisionFromThreshold(0));
+ assertEquals(6, HyperLogLogPlusPlus.precisionFromThreshold(10));
+ assertEquals(10, HyperLogLogPlusPlus.precisionFromThreshold(100));
+ assertEquals(13, HyperLogLogPlusPlus.precisionFromThreshold(1000));
+ assertEquals(16, HyperLogLogPlusPlus.precisionFromThreshold(10000));
+ assertEquals(18, HyperLogLogPlusPlus.precisionFromThreshold(100000));
+ assertEquals(18, HyperLogLogPlusPlus.precisionFromThreshold(1000000));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java
new file mode 100644
index 0000000000..01d8741b8d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/package-info.java b/core/src/test/java/org/elasticsearch/search/aggregations/package-info.java
new file mode 100644
index 0000000000..b8e919f070
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/**
+ * Aggregations module
+ */
+package org.elasticsearch.search.aggregations;
+
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java
new file mode 100644
index 0000000000..abf8629ae2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketTests.java
@@ -0,0 +1,401 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
+import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.avgBucket;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class AvgBucketTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+
+ static int numDocs;
+ static int interval;
+ static int minRandomValue;
+ static int maxRandomValue;
+ static int numValueBuckets;
+ static long[] valueCounts;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ minRandomValue = 0;
+ maxRandomValue = 20;
+
+ numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1;
+ valueCounts = new long[numValueBuckets];
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ for (int i = 0; i < numDocs; i++) {
+ int fieldValue = randomIntBetween(minRandomValue, maxRandomValue);
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval))
+ .endObject()));
+ final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1);
+ valueCounts[bucket]++;
+ }
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void testDocCount_topLevel() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .addAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>_count")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ double sum = 0;
+ int count = 0;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ count++;
+ sum += bucket.getDocCount();
+ }
+
+ double avgValue = count == 0 ? Double.NaN : (sum / count);
+ InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgValue));
+ }
+
+ @Test
+ public void testDocCount_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double sum = 0;
+ int count = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ count++;
+ sum += bucket.getDocCount();
+ }
+
+ double avgValue = count == 0 ? Double.NaN : (sum / count);
+ InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgValue));
+ }
+ }
+
+ @Test
+ public void testMetric_topLevel() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(avgBucket("avg_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(interval));
+
+ double bucketSum = 0;
+ int count = 0;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval)));
+ assertThat(bucket.getDocCount(), greaterThan(0l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ count++;
+ bucketSum += sum.value();
+ }
+
+ double avgValue = count == 0 ? Double.NaN : (bucketSum / count);
+ InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgValue));
+ }
+
+ @Test
+ public void testMetric_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double bucketSum = 0;
+ int count = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() != 0) {
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ count++;
+ bucketSum += sum.value();
+ }
+ }
+
+ double avgValue = count == 0 ? Double.NaN : (bucketSum / count);
+ InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgValue));
+ }
+ }
+
+ @Test
+ public void testMetric_asSubAggWithInsertZeros() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(avgBucket("avg_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double bucketSum = 0;
+ int count = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+
+ count++;
+ bucketSum += sum.value();
+ }
+
+ double avgValue = count == 0 ? Double.NaN : (bucketSum / count);
+ InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgValue));
+ }
+ }
+
+ @Test
+ public void testNoBuckets() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(avgBucket("avg_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(0));
+
+ InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(Double.NaN));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(avgBucket("avg_histo_bucket").setBucketsPaths("histo>_count")))
+ .addAggregation(avgBucket("avg_terms_bucket").setBucketsPaths("terms>avg_histo_bucket")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ double aggTermsSum = 0;
+ int aggTermsCount = 0;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double aggHistoSum = 0;
+ int aggHistoCount = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+
+ aggHistoCount++;
+ aggHistoSum += bucket.getDocCount();
+ }
+
+ double avgHistoValue = aggHistoCount == 0 ? Double.NaN : (aggHistoSum / aggHistoCount);
+ InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_histo_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_histo_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgHistoValue));
+
+ aggTermsCount++;
+ aggTermsSum += avgHistoValue;
+ }
+
+ double avgTermsValue = aggTermsCount == 0 ? Double.NaN : (aggTermsSum / aggTermsCount);
+ InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_terms_bucket");
+ assertThat(avgBucketValue, notNullValue());
+ assertThat(avgBucketValue.getName(), equalTo("avg_terms_bucket"));
+ assertThat(avgBucketValue.value(), equalTo(avgTermsValue));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeTests.java
new file mode 100644
index 0000000000..1bba222387
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeTests.java
@@ -0,0 +1,386 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.pipeline.SimpleValue;
+import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative;
+import org.elasticsearch.search.aggregations.support.AggregationPath;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.DateTimeFormat;
+import org.junit.After;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class DateDerivativeTests extends ElasticsearchIntegrationTest {
+
+ private DateTime date(int month, int day) {
+ return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC);
+ }
+
+ private DateTime date(String date) {
+ return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date);
+ }
+
+ private static String format(DateTime date, String pattern) {
+ return DateTimeFormat.forPattern(pattern).print(date);
+ }
+
+ private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception {
+ return client().prepareIndex(idx, "type").setSource(
+ jsonBuilder().startObject().field("date", date).field("value", value).startArray("dates").value(date)
+ .value(date.plusMonths(1).plusDays(1)).endArray().endObject());
+ }
+
+ private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception {
+ return client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field("value", value).field("date", date(month, day)).startArray("dates")
+ .value(date(month, day)).value(date(month + 1, day + 1)).endArray().endObject());
+ }
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ // TODO: would be nice to have more random data here
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field("value", i * 2).endObject()));
+ }
+ builders.addAll(Arrays.asList(indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3
+ indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3
+ indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16
+ indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3
+ indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16
+ indexDoc(3, 23, 6))); // date: Mar 23, dates: Mar 23, Apr 24
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @After
+ public void afterEachTest() throws IOException {
+ internalCluster().wipeIndices("idx2");
+ }
+
+ @Test
+ public void singleValuedField() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, nullValue());
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(1d));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(1d));
+ }
+
+ @Test
+ public void singleValuedField_normalised() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count").unit(DateHistogramInterval.DAY))).execute()
+ .actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Derivative docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, nullValue());
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), closeTo(1d, 0.00001));
+ assertThat(docCountDeriv.normalizedValue(), closeTo(1d / 31d, 0.00001));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), closeTo(1d, 0.00001));
+ assertThat(docCountDeriv.normalizedValue(), closeTo(1d / 29d, 0.00001));
+ }
+
+ @Test
+ public void singleValuedField_WithSubAggregation() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("sum")).subAggregation(sum("sum").field("value")))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+ Object[] propertiesKeys = (Object[]) histo.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) histo.getProperty("_count");
+ Object[] propertiesCounts = (Object[]) histo.getProperty("sum.value");
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(1.0));
+ SimpleValue deriv = bucket.getAggregations().get("deriv");
+ assertThat(deriv, nullValue());
+ assertThat((DateTime) propertiesKeys[0], equalTo(key));
+ assertThat((long) propertiesDocCounts[0], equalTo(1l));
+ assertThat((double) propertiesCounts[0], equalTo(1.0));
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(5.0));
+ deriv = bucket.getAggregations().get("deriv");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.value(), equalTo(4.0));
+ assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0));
+ assertThat((DateTime) propertiesKeys[1], equalTo(key));
+ assertThat((long) propertiesDocCounts[1], equalTo(2l));
+ assertThat((double) propertiesCounts[1], equalTo(5.0));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ assertThat(sum.getValue(), equalTo(15.0));
+ deriv = bucket.getAggregations().get("deriv");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.value(), equalTo(10.0));
+ assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0));
+ assertThat((DateTime) propertiesKeys[2], equalTo(key));
+ assertThat((long) propertiesDocCounts[2], equalTo(3l));
+ assertThat((double) propertiesCounts[2], equalTo(15.0));
+ }
+
+ @Test
+ public void multiValuedField() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ dateHistogram("histo").field("dates").interval(DateHistogramInterval.MONTH).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(4));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(true));
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, nullValue());
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(2.0));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(5l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(2.0));
+
+ key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(3);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(-2.0));
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx_unmapped")
+ .addAggregation(
+ dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ assertThat(deriv.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx", "idx_unmapped")
+ .addAggregation(
+ dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(3));
+
+ DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC);
+ Histogram.Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(true));
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, nullValue());
+
+ key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(1);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(2l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(1.0));
+
+ key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC);
+ bucket = buckets.get(2);
+ assertThat(bucket, notNullValue());
+ assertThat((DateTime) bucket.getKey(), equalTo(key));
+ assertThat(bucket.getDocCount(), equalTo(3l));
+ assertThat(bucket.getAggregations().asList().isEmpty(), is(false));
+ docCountDeriv = bucket.getAggregations().get("deriv");
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo(1.0));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java
new file mode 100644
index 0000000000..a95364393a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/DerivativeTests.java
@@ -0,0 +1,605 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket;
+import org.elasticsearch.search.aggregations.metrics.stats.Stats;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.pipeline.SimpleValue;
+import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
+import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative;
+import org.elasticsearch.search.aggregations.support.AggregationPath;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.stats;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class DerivativeTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+
+ private static int interval;
+ private static int numValueBuckets;
+ private static int numFirstDerivValueBuckets;
+ private static int numSecondDerivValueBuckets;
+ private static long[] valueCounts;
+ private static long[] firstDerivValueCounts;
+ private static long[] secondDerivValueCounts;
+
+ private static Long[] valueCounts_empty;
+ private static long numDocsEmptyIdx;
+ private static Double[] firstDerivValueCounts_empty;
+
+ // expected bucket values for random setup with gaps
+ private static int numBuckets_empty_rnd;
+ private static Long[] valueCounts_empty_rnd;
+ private static Double[] firstDerivValueCounts_empty_rnd;
+ private static long numDocsEmptyIdx_rnd;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ interval = 5;
+ numValueBuckets = randomIntBetween(6, 80);
+
+ valueCounts = new long[numValueBuckets];
+ for (int i = 0; i < numValueBuckets; i++) {
+ valueCounts[i] = randomIntBetween(1, 20);
+ }
+
+ numFirstDerivValueBuckets = numValueBuckets - 1;
+ firstDerivValueCounts = new long[numFirstDerivValueBuckets];
+ Long lastValueCount = null;
+ for (int i = 0; i < numValueBuckets; i++) {
+ long thisValue = valueCounts[i];
+ if (lastValueCount != null) {
+ long diff = thisValue - lastValueCount;
+ firstDerivValueCounts[i - 1] = diff;
+ }
+ lastValueCount = thisValue;
+ }
+
+ numSecondDerivValueBuckets = numFirstDerivValueBuckets - 1;
+ secondDerivValueCounts = new long[numSecondDerivValueBuckets];
+ Long lastFirstDerivativeValueCount = null;
+ for (int i = 0; i < numFirstDerivValueBuckets; i++) {
+ long thisFirstDerivativeValue = firstDerivValueCounts[i];
+ if (lastFirstDerivativeValueCount != null) {
+ long diff = thisFirstDerivativeValue - lastFirstDerivativeValueCount;
+ secondDerivValueCounts[i - 1] = diff;
+ }
+ lastFirstDerivativeValueCount = thisFirstDerivativeValue;
+ }
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numValueBuckets; i++) {
+ for (int docs = 0; docs < valueCounts[i]; docs++) {
+ builders.add(client().prepareIndex("idx", "type").setSource(newDocBuilder(i * interval)));
+ }
+ }
+
+ // setup for index with empty buckets
+ valueCounts_empty = new Long[] { 1l, 1l, 2l, 0l, 2l, 2l, 0l, 0l, 0l, 3l, 2l, 1l };
+ firstDerivValueCounts_empty = new Double[] { null, 0d, 1d, -2d, 2d, 0d, -2d, 0d, 0d, 3d, -1d, -1d };
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < valueCounts_empty.length; i++) {
+ for (int docs = 0; docs < valueCounts_empty[i]; docs++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type").setSource(newDocBuilder(i)));
+ numDocsEmptyIdx++;
+ }
+ }
+
+ // randomized setup for index with empty buckets
+ numBuckets_empty_rnd = randomIntBetween(20, 100);
+ valueCounts_empty_rnd = new Long[numBuckets_empty_rnd];
+ firstDerivValueCounts_empty_rnd = new Double[numBuckets_empty_rnd];
+ firstDerivValueCounts_empty_rnd[0] = null;
+
+ assertAcked(prepareCreate("empty_bucket_idx_rnd").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < numBuckets_empty_rnd; i++) {
+ valueCounts_empty_rnd[i] = (long) randomIntBetween(1, 10);
+ // make approximately half of the buckets empty
+ if (randomBoolean())
+ valueCounts_empty_rnd[i] = 0l;
+ for (int docs = 0; docs < valueCounts_empty_rnd[i]; docs++) {
+ builders.add(client().prepareIndex("empty_bucket_idx_rnd", "type").setSource(newDocBuilder(i)));
+ numDocsEmptyIdx_rnd++;
+ }
+ if (i > 0) {
+ firstDerivValueCounts_empty_rnd[i] = (double) valueCounts_empty_rnd[i] - valueCounts_empty_rnd[i - 1];
+ }
+ }
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ private XContentBuilder newDocBuilder(int singleValueFieldValue) throws IOException {
+ return jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, singleValueFieldValue).endObject();
+ }
+
+ /**
+ * test first and second derivative on the sing
+ */
+ @Test
+ public void docCountDerivative() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))
+ .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]);
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ if (i > 0) {
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1]));
+ } else {
+ assertThat(docCountDeriv, nullValue());
+ }
+ SimpleValue docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv");
+ if (i > 1) {
+ assertThat(docCount2ndDeriv, notNullValue());
+ assertThat(docCount2ndDeriv.value(), equalTo((double) secondDerivValueCounts[i - 2]));
+ } else {
+ assertThat(docCount2ndDeriv, nullValue());
+ }
+ }
+ }
+
+ /**
+ * test first and second derivative on the sing
+ */
+ @Test
+ public void singleValuedField_normalised() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).minDocCount(0)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count").unit("1ms"))
+ .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv").unit("10ms"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]);
+ Derivative docCountDeriv = bucket.getAggregations().get("deriv");
+ if (i > 0) {
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), closeTo((double) (firstDerivValueCounts[i - 1]), 0.00001));
+ assertThat(docCountDeriv.normalizedValue(), closeTo((double) (firstDerivValueCounts[i - 1]) / 5, 0.00001));
+ } else {
+ assertThat(docCountDeriv, nullValue());
+ }
+ Derivative docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv");
+ if (i > 1) {
+ assertThat(docCount2ndDeriv, notNullValue());
+ assertThat(docCount2ndDeriv.value(), closeTo((double) (secondDerivValueCounts[i - 2]), 0.00001));
+ assertThat(docCount2ndDeriv.normalizedValue(), closeTo((double) (secondDerivValueCounts[i - 2]) * 2, 0.00001));
+ } else {
+ assertThat(docCount2ndDeriv, nullValue());
+ }
+ }
+ }
+
+ @Test
+ public void singleValueAggDerivative() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
+ .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
+ Object[] propertiesKeys = (Object[]) deriv.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count");
+ Object[] propertiesSumCounts = (Object[]) deriv.getProperty("sum.value");
+
+ List<Bucket> buckets = new ArrayList<Bucket>(deriv.getBuckets());
+ Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets
+ // overwritten
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]);
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ long expectedSum = valueCounts[i] * (i * interval);
+ assertThat(sum.getValue(), equalTo((double) expectedSum));
+ SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
+ if (i > 0) {
+ assertThat(sumDeriv, notNullValue());
+ long sumDerivValue = expectedSum - expectedSumPreviousBucket;
+ assertThat(sumDeriv.value(), equalTo((double) sumDerivValue));
+ assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
+ equalTo((double) sumDerivValue));
+ } else {
+ assertThat(sumDeriv, nullValue());
+ }
+ expectedSumPreviousBucket = expectedSum;
+ assertThat((long) propertiesKeys[i], equalTo((long) i * interval));
+ assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i]));
+ assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum));
+ }
+ }
+
+ @Test
+ public void multiValueAggDerivative() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME))
+ .subAggregation(derivative("deriv").setBucketsPaths("stats.sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
+ Object[] propertiesKeys = (Object[]) deriv.getProperty("_key");
+ Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count");
+ Object[] propertiesSumCounts = (Object[]) deriv.getProperty("stats.sum");
+
+ List<Bucket> buckets = new ArrayList<Bucket>(deriv.getBuckets());
+ Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets
+ // overwritten
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]);
+ Stats stats = bucket.getAggregations().get("stats");
+ assertThat(stats, notNullValue());
+ long expectedSum = valueCounts[i] * (i * interval);
+ assertThat(stats.getSum(), equalTo((double) expectedSum));
+ SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
+ if (i > 0) {
+ assertThat(sumDeriv, notNullValue());
+ long sumDerivValue = expectedSum - expectedSumPreviousBucket;
+ assertThat(sumDeriv.value(), equalTo((double) sumDerivValue));
+ assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()),
+ equalTo((double) sumDerivValue));
+ } else {
+ assertThat(sumDeriv, nullValue());
+ }
+ expectedSumPreviousBucket = expectedSum;
+ assertThat((long) propertiesKeys[i], equalTo((long) i * interval));
+ assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i]));
+ assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum));
+ }
+ }
+
+ @Test
+ public void unmapped() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx_unmapped")
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ assertThat(deriv.getBuckets().size(), equalTo(0));
+ }
+
+ @Test
+ public void partiallyUnmapped() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx", "idx_unmapped")
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> deriv = response.getAggregations().get("histo");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = deriv.getBuckets();
+ assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets));
+
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]);
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ if (i > 0) {
+ assertThat(docCountDeriv, notNullValue());
+ assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1]));
+ } else {
+ assertThat(docCountDeriv, nullValue());
+ }
+ }
+ }
+
+ @Test
+ public void docCountDerivativeWithGaps() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx));
+
+ InternalHistogram<Bucket> deriv = searchResponse.getAggregations().get("histo");
+ assertThat(deriv, Matchers.notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(valueCounts_empty.length));
+
+ for (int i = 0; i < valueCounts_empty.length; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]);
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ if (firstDerivValueCounts_empty[i] == null) {
+ assertThat(docCountDeriv, nullValue());
+ } else {
+ assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i]));
+ }
+ }
+ }
+
+ @Test
+ public void docCountDerivativeWithGaps_random() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx_rnd")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
+ .extendedBounds(0l, (long) numBuckets_empty_rnd - 1)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(randomFrom(GapPolicy.values()))))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd));
+
+ InternalHistogram<Bucket> deriv = searchResponse.getAggregations().get("histo");
+ assertThat(deriv, Matchers.notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets_empty_rnd));
+
+ for (int i = 0; i < valueCounts_empty_rnd.length; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]);
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ if (firstDerivValueCounts_empty_rnd[i] == null) {
+ assertThat(docCountDeriv, nullValue());
+ } else {
+ assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty_rnd[i]));
+ }
+ }
+ }
+
+ @Test
+ public void docCountDerivativeWithGaps_insertZeros() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
+ .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS))).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx));
+
+ InternalHistogram<Bucket> deriv = searchResponse.getAggregations().get("histo");
+ assertThat(deriv, Matchers.notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(valueCounts_empty.length));
+
+ for (int i = 0; i < valueCounts_empty.length; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i + ": ", bucket, i, valueCounts_empty[i]);
+ SimpleValue docCountDeriv = bucket.getAggregations().get("deriv");
+ if (firstDerivValueCounts_empty[i] == null) {
+ assertThat(docCountDeriv, nullValue());
+ } else {
+ assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i]));
+ }
+ }
+ }
+
+ @Test
+ public void singleValueAggDerivativeWithGaps() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
+ .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx));
+
+ InternalHistogram<Bucket> deriv = searchResponse.getAggregations().get("histo");
+ assertThat(deriv, Matchers.notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(valueCounts_empty.length));
+
+ double lastSumValue = Double.NaN;
+ for (int i = 0; i < valueCounts_empty.length; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]);
+ Sum sum = bucket.getAggregations().get("sum");
+ double thisSumValue = sum.value();
+ if (bucket.getDocCount() == 0) {
+ thisSumValue = Double.NaN;
+ }
+ SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
+ if (i == 0) {
+ assertThat(sumDeriv, nullValue());
+ } else {
+ double expectedDerivative = thisSumValue - lastSumValue;
+ if (Double.isNaN(expectedDerivative)) {
+ assertThat(sumDeriv.value(), equalTo(expectedDerivative));
+ } else {
+ assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001));
+ }
+ }
+ lastSumValue = thisSumValue;
+ }
+ }
+
+ @Test
+ public void singleValueAggDerivativeWithGaps_insertZeros() throws Exception {
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
+ .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(GapPolicy.INSERT_ZEROS))).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx));
+
+ InternalHistogram<Bucket> deriv = searchResponse.getAggregations().get("histo");
+ assertThat(deriv, Matchers.notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(valueCounts_empty.length));
+
+ double lastSumValue = Double.NaN;
+ for (int i = 0; i < valueCounts_empty.length; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]);
+ Sum sum = bucket.getAggregations().get("sum");
+ double thisSumValue = sum.value();
+ if (bucket.getDocCount() == 0) {
+ thisSumValue = 0;
+ }
+ SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
+ if (i == 0) {
+ assertThat(sumDeriv, nullValue());
+ } else {
+ double expectedDerivative = thisSumValue - lastSumValue;
+ assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001));
+ }
+ lastSumValue = thisSumValue;
+ }
+ }
+
+ @Test
+ public void singleValueAggDerivativeWithGaps_random() throws Exception {
+ GapPolicy gapPolicy = randomFrom(GapPolicy.values());
+ SearchResponse searchResponse = client()
+ .prepareSearch("empty_bucket_idx_rnd")
+ .setQuery(matchAllQuery())
+ .addAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1)
+ .extendedBounds(0l, (long) numBuckets_empty_rnd - 1)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))
+ .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(gapPolicy))).execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd));
+
+ InternalHistogram<Bucket> deriv = searchResponse.getAggregations().get("histo");
+ assertThat(deriv, Matchers.notNullValue());
+ assertThat(deriv.getName(), equalTo("histo"));
+ List<Bucket> buckets = deriv.getBuckets();
+ assertThat(buckets.size(), equalTo(numBuckets_empty_rnd));
+
+ double lastSumValue = Double.NaN;
+ for (int i = 0; i < valueCounts_empty_rnd.length; i++) {
+ Histogram.Bucket bucket = buckets.get(i);
+ checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]);
+ Sum sum = bucket.getAggregations().get("sum");
+ double thisSumValue = sum.value();
+ if (bucket.getDocCount() == 0) {
+ thisSumValue = gapPolicy == GapPolicy.INSERT_ZEROS ? 0 : Double.NaN;
+ }
+ SimpleValue sumDeriv = bucket.getAggregations().get("deriv");
+ if (i == 0) {
+ assertThat(sumDeriv, nullValue());
+ } else {
+ double expectedDerivative = thisSumValue - lastSumValue;
+ if (Double.isNaN(expectedDerivative)) {
+ assertThat(sumDeriv.value(), equalTo(expectedDerivative));
+ } else {
+ assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001));
+ }
+ }
+ lastSumValue = thisSumValue;
+ }
+ }
+
+ private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey,
+ final long expectedDocCount) {
+ assertThat(msg, bucket, notNullValue());
+ assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey));
+ assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java
new file mode 100644
index 0000000000..3f12b81325
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketTests.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
+import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.maxBucket;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class MaxBucketTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+
+ static int numDocs;
+ static int interval;
+ static int minRandomValue;
+ static int maxRandomValue;
+ static int numValueBuckets;
+ static long[] valueCounts;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ minRandomValue = 0;
+ maxRandomValue = 20;
+
+ numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1;
+ valueCounts = new long[numValueBuckets];
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ for (int i = 0; i < numDocs; i++) {
+ int fieldValue = randomIntBetween(minRandomValue, maxRandomValue);
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval))
+ .endObject()));
+ final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1);
+ valueCounts[bucket]++;
+ }
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void testDocCount_topLevel() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .addAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ List<String> maxKeys = new ArrayList<>();
+ double maxValue = Double.NEGATIVE_INFINITY;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ if (bucket.getDocCount() > maxValue) {
+ maxValue = bucket.getDocCount();
+ maxKeys = new ArrayList<>();
+ maxKeys.add(bucket.getKeyAsString());
+ } else if (bucket.getDocCount() == maxValue) {
+ maxKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()])));
+ }
+
+ @Test
+ public void testDocCount_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> maxKeys = new ArrayList<>();
+ double maxValue = Double.NEGATIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() > maxValue) {
+ maxValue = bucket.getDocCount();
+ maxKeys = new ArrayList<>();
+ maxKeys.add(bucket.getKeyAsString());
+ } else if (bucket.getDocCount() == maxValue) {
+ maxKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()])));
+ }
+ }
+
+ @Test
+ public void testMetric_topLevel() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(maxBucket("max_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(interval));
+
+ List<String> maxKeys = new ArrayList<>();
+ double maxValue = Double.NEGATIVE_INFINITY;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval)));
+ assertThat(bucket.getDocCount(), greaterThan(0l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() > maxValue) {
+ maxValue = sum.value();
+ maxKeys = new ArrayList<>();
+ maxKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == maxValue) {
+ maxKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()])));
+ }
+
+ @Test
+ public void testMetric_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> maxKeys = new ArrayList<>();
+ double maxValue = Double.NEGATIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() != 0) {
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() > maxValue) {
+ maxValue = sum.value();
+ maxKeys = new ArrayList<>();
+ maxKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == maxValue) {
+ maxKeys.add(bucket.getKeyAsString());
+ }
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()])));
+ }
+ }
+
+ @Test
+ public void testMetric_asSubAggOfSingleBucketAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ filter("filter")
+ .filter(termQuery("tag", "tag0"))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Filter filter = response.getAggregations().get("filter");
+ assertThat(filter, notNullValue());
+ assertThat(filter.getName(), equalTo("filter"));
+ Histogram histo = filter.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> maxKeys = new ArrayList<>();
+ double maxValue = Double.NEGATIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() != 0) {
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() > maxValue) {
+ maxValue = sum.value();
+ maxKeys = new ArrayList<>();
+ maxKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == maxValue) {
+ maxKeys.add(bucket.getKeyAsString());
+ }
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = filter.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()])));
+ }
+
+ @Test
+ public void testMetric_asSubAggWithInsertZeros() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> maxKeys = new ArrayList<>();
+ double maxValue = Double.NEGATIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() > maxValue) {
+ maxValue = sum.value();
+ maxKeys = new ArrayList<>();
+ maxKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == maxValue) {
+ maxKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()])));
+ }
+ }
+
+ @Test
+ public void testNoBuckets() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(maxBucket("max_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(0));
+
+ InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(Double.NEGATIVE_INFINITY));
+ assertThat(maxBucketValue.keys(), equalTo(new String[0]));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(maxBucket("max_histo_bucket").setBucketsPaths("histo>_count")))
+ .addAggregation(maxBucket("max_terms_bucket").setBucketsPaths("terms>max_histo_bucket")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ List<String> maxTermsKeys = new ArrayList<>();
+ double maxTermsValue = Double.NEGATIVE_INFINITY;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> maxHistoKeys = new ArrayList<>();
+ double maxHistoValue = Double.NEGATIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() > maxHistoValue) {
+ maxHistoValue = bucket.getDocCount();
+ maxHistoKeys = new ArrayList<>();
+ maxHistoKeys.add(bucket.getKeyAsString());
+ } else if (bucket.getDocCount() == maxHistoValue) {
+ maxHistoKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_histo_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_histo_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxHistoValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxHistoKeys.toArray(new String[maxHistoKeys.size()])));
+ if (maxHistoValue > maxTermsValue) {
+ maxTermsValue = maxHistoValue;
+ maxTermsKeys = new ArrayList<>();
+ maxTermsKeys.add(termsBucket.getKeyAsString());
+ } else if (maxHistoValue == maxTermsValue) {
+ maxTermsKeys.add(termsBucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_terms_bucket");
+ assertThat(maxBucketValue, notNullValue());
+ assertThat(maxBucketValue.getName(), equalTo("max_terms_bucket"));
+ assertThat(maxBucketValue.value(), equalTo(maxTermsValue));
+ assertThat(maxBucketValue.keys(), equalTo(maxTermsKeys.toArray(new String[maxTermsKeys.size()])));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java
new file mode 100644
index 0000000000..e08138fce4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketTests.java
@@ -0,0 +1,433 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
+import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.minBucket;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class MinBucketTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+
+ static int numDocs;
+ static int interval;
+ static int minRandomValue;
+ static int maxRandomValue;
+ static int numValueBuckets;
+ static long[] valueCounts;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ minRandomValue = 0;
+ maxRandomValue = 20;
+
+ numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1;
+ valueCounts = new long[numValueBuckets];
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ for (int i = 0; i < numDocs; i++) {
+ int fieldValue = randomIntBetween(minRandomValue, maxRandomValue);
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval))
+ .endObject()));
+ final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1);
+ valueCounts[bucket]++;
+ }
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void testDocCount_topLevel() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .addAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ List<String> minKeys = new ArrayList<>();
+ double minValue = Double.POSITIVE_INFINITY;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ if (bucket.getDocCount() < minValue) {
+ minValue = bucket.getDocCount();
+ minKeys = new ArrayList<>();
+ minKeys.add(bucket.getKeyAsString());
+ } else if (bucket.getDocCount() == minValue) {
+ minKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minValue));
+ assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()])));
+ }
+
+ @Test
+ public void testDocCount_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> minKeys = new ArrayList<>();
+ double minValue = Double.POSITIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() < minValue) {
+ minValue = bucket.getDocCount();
+ minKeys = new ArrayList<>();
+ minKeys.add(bucket.getKeyAsString());
+ } else if (bucket.getDocCount() == minValue) {
+ minKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minValue));
+ assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()])));
+ }
+ }
+
+ @Test
+ public void testMetric_topLevel() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(minBucket("min_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(interval));
+
+ List<String> minKeys = new ArrayList<>();
+ double minValue = Double.POSITIVE_INFINITY;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval)));
+ assertThat(bucket.getDocCount(), greaterThan(0l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() < minValue) {
+ minValue = sum.value();
+ minKeys = new ArrayList<>();
+ minKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == minValue) {
+ minKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minValue));
+ assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()])));
+ }
+
+ @Test
+ public void testMetric_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> minKeys = new ArrayList<>();
+ double minValue = Double.POSITIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() != 0) {
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() < minValue) {
+ minValue = sum.value();
+ minKeys = new ArrayList<>();
+ minKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == minValue) {
+ minKeys.add(bucket.getKeyAsString());
+ }
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minValue));
+ assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()])));
+ }
+ }
+
+ @Test
+ public void testMetric_asSubAggWithInsertZeros() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> minKeys = new ArrayList<>();
+ double minValue = Double.POSITIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ if (sum.value() < minValue) {
+ minValue = sum.value();
+ minKeys = new ArrayList<>();
+ minKeys.add(bucket.getKeyAsString());
+ } else if (sum.value() == minValue) {
+ minKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minValue));
+ assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()])));
+ }
+ }
+
+ @Test
+ public void testNoBuckets() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(minBucket("min_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(0));
+
+ InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_bucket"));
+ assertThat(minBucketValue.value(), equalTo(Double.POSITIVE_INFINITY));
+ assertThat(minBucketValue.keys(), equalTo(new String[0]));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(minBucket("min_histo_bucket").setBucketsPaths("histo>_count")))
+ .addAggregation(minBucket("min_terms_bucket").setBucketsPaths("terms>min_histo_bucket")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ List<String> minTermsKeys = new ArrayList<>();
+ double minTermsValue = Double.POSITIVE_INFINITY;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ List<String> minHistoKeys = new ArrayList<>();
+ double minHistoValue = Double.POSITIVE_INFINITY;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() < minHistoValue) {
+ minHistoValue = bucket.getDocCount();
+ minHistoKeys = new ArrayList<>();
+ minHistoKeys.add(bucket.getKeyAsString());
+ } else if (bucket.getDocCount() == minHistoValue) {
+ minHistoKeys.add(bucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_histo_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_histo_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minHistoValue));
+ assertThat(minBucketValue.keys(), equalTo(minHistoKeys.toArray(new String[minHistoKeys.size()])));
+ if (minHistoValue < minTermsValue) {
+ minTermsValue = minHistoValue;
+ minTermsKeys = new ArrayList<>();
+ minTermsKeys.add(termsBucket.getKeyAsString());
+ } else if (minHistoValue == minTermsValue) {
+ minTermsKeys.add(termsBucket.getKeyAsString());
+ }
+ }
+
+ InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_terms_bucket");
+ assertThat(minBucketValue, notNullValue());
+ assertThat(minBucketValue.getName(), equalTo("min_terms_bucket"));
+ assertThat(minBucketValue.value(), equalTo(minTermsValue));
+ assertThat(minBucketValue.keys(), equalTo(minTermsKeys.toArray(new String[minTermsKeys.size()])));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java
new file mode 100644
index 0000000000..0873ce4665
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.avg.AvgBuilder;
+import org.elasticsearch.search.aggregations.metrics.max.MaxBuilder;
+import org.elasticsearch.search.aggregations.metrics.min.MinBuilder;
+import org.elasticsearch.search.aggregations.metrics.sum.SumBuilder;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.ArrayList;
+
+/**
+ * Provides helper methods and classes for use in PipelineAggregation tests,
+ * such as creating mock histograms or computing simple metrics
+ */
+public class PipelineAggregationHelperTests extends ElasticsearchTestCase {
+
+ /**
+ * Generates a mock histogram to use for testing. Each MockBucket holds a doc count, key and document values
+ * which can later be used to compute metrics and compare against the real aggregation results. Gappiness can be
+ * controlled via parameters
+ *
+ * @param interval Interval between bucket keys
+ * @param size Size of mock histogram to generate (in buckets)
+ * @param gapProbability Probability of generating an empty bucket. 0.0-1.0 inclusive
+ * @param runProbability Probability of extending a gap once one has been created. 0.0-1.0 inclusive
+ * @return
+ */
+ public static ArrayList<MockBucket> generateHistogram(int interval, int size, double gapProbability, double runProbability) {
+ ArrayList<MockBucket> values = new ArrayList<>(size);
+
+ boolean lastWasGap = false;
+
+ for (int i = 0; i < size; i++) {
+ MockBucket bucket = new MockBucket();
+ if (randomDouble() < gapProbability) {
+ // start a gap
+ bucket.count = 0;
+ bucket.docValues = new double[0];
+
+ lastWasGap = true;
+
+ } else if (lastWasGap && randomDouble() < runProbability) {
+ // add to the existing gap
+ bucket.count = 0;
+ bucket.docValues = new double[0];
+
+ lastWasGap = true;
+ } else {
+ bucket.count = randomIntBetween(1, 50);
+ bucket.docValues = new double[bucket.count];
+ for (int j = 0; j < bucket.count; j++) {
+ bucket.docValues[j] = randomDouble() * randomIntBetween(-20,20);
+ }
+ lastWasGap = false;
+ }
+
+ bucket.key = i * interval;
+ values.add(bucket);
+ }
+
+ return values;
+ }
+
+ /**
+ * Simple mock bucket container
+ */
+ public static class MockBucket {
+ public int count;
+ public double[] docValues;
+ public long key;
+ }
+
+ /**
+ * Computes a simple agg metric (min, sum, etc) from the provided values
+ *
+ * @param values Array of values to compute metric for
+ * @param metric A metric builder which defines what kind of metric should be returned for the values
+ * @return
+ */
+ public static double calculateMetric(double[] values, ValuesSourceMetricsAggregationBuilder metric) {
+
+ if (metric instanceof MinBuilder) {
+ double accumulator = Double.POSITIVE_INFINITY;
+ for (double value : values) {
+ accumulator = Math.min(accumulator, value);
+ }
+ return accumulator;
+ } else if (metric instanceof MaxBuilder) {
+ double accumulator = Double.NEGATIVE_INFINITY;
+ for (double value : values) {
+ accumulator = Math.max(accumulator, value);
+ }
+ return accumulator;
+ } else if (metric instanceof SumBuilder) {
+ double accumulator = 0;
+ for (double value : values) {
+ accumulator += value;
+ }
+ return accumulator;
+ } else if (metric instanceof AvgBuilder) {
+ double accumulator = 0;
+ for (double value : values) {
+ accumulator += value;
+ }
+ return accumulator / values.length;
+ }
+
+ return 0.0;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java
new file mode 100644
index 0000000000..550625be32
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketTests.java
@@ -0,0 +1,380 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
+import org.elasticsearch.search.aggregations.metrics.sum.Sum;
+import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
+import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.sumBucket;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.sum;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.core.IsNull.notNullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class SumBucketTests extends ElasticsearchIntegrationTest {
+
+ private static final String SINGLE_VALUED_FIELD_NAME = "l_value";
+
+ static int numDocs;
+ static int interval;
+ static int minRandomValue;
+ static int maxRandomValue;
+ static int numValueBuckets;
+ static long[] valueCounts;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ numDocs = randomIntBetween(6, 20);
+ interval = randomIntBetween(2, 5);
+
+ minRandomValue = 0;
+ maxRandomValue = 20;
+
+ numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1;
+ valueCounts = new long[numValueBuckets];
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ for (int i = 0; i < numDocs; i++) {
+ int fieldValue = randomIntBetween(minRandomValue, maxRandomValue);
+ builders.add(client().prepareIndex("idx", "type").setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval))
+ .endObject()));
+ final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1);
+ valueCounts[bucket]++;
+ }
+
+ assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer"));
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource(
+ jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ @Test
+ public void testDocCount_topLevel() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .addAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>_count")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Histogram histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(numValueBuckets));
+
+ double sum = 0;
+ for (int i = 0; i < numValueBuckets; ++i) {
+ Histogram.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval));
+ assertThat(bucket.getDocCount(), equalTo(valueCounts[i]));
+ sum += bucket.getDocCount();
+ }
+
+ InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(sum));
+ }
+
+ @Test
+ public void testDocCount_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>_count"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double sum = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ sum += bucket.getDocCount();
+ }
+
+ InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(sum));
+ }
+ }
+
+ @Test
+ public void testMetric_topLevel() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(sumBucket("sum_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(interval));
+
+ double bucketSum = 0;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval)));
+ assertThat(bucket.getDocCount(), greaterThan(0l));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ bucketSum += sum.value();
+ }
+
+ InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(bucketSum));
+ }
+
+ @Test
+ public void testMetric_asSubAgg() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>sum"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double bucketSum = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ if (bucket.getDocCount() != 0) {
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+ bucketSum += sum.value();
+ }
+ }
+
+ InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(bucketSum));
+ }
+ }
+
+ @Test
+ public void testMetric_asSubAggWithInsertZeros() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue)
+ .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .subAggregation(sumBucket("sum_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double bucketSum = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+ Sum sum = bucket.getAggregations().get("sum");
+ assertThat(sum, notNullValue());
+
+ bucketSum += sum.value();
+ }
+
+ InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(bucketSum));
+ }
+ }
+
+ @Test
+ public void testNoBuckets() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)))
+ .addAggregation(sumBucket("sum_bucket").setBucketsPaths("terms>sum")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> buckets = terms.getBuckets();
+ assertThat(buckets.size(), equalTo(0));
+
+ InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(0.0));
+ }
+
+ @Test
+ public void testNested() throws Exception {
+ SearchResponse response = client()
+ .prepareSearch("idx")
+ .addAggregation(
+ terms("terms")
+ .field("tag")
+ .order(Order.term(true))
+ .subAggregation(
+ histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval)
+ .extendedBounds((long) minRandomValue, (long) maxRandomValue))
+ .subAggregation(sumBucket("sum_histo_bucket").setBucketsPaths("histo>_count")))
+ .addAggregation(sumBucket("sum_terms_bucket").setBucketsPaths("terms>sum_histo_bucket")).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ Terms terms = response.getAggregations().get("terms");
+ assertThat(terms, notNullValue());
+ assertThat(terms.getName(), equalTo("terms"));
+ List<Terms.Bucket> termsBuckets = terms.getBuckets();
+ assertThat(termsBuckets.size(), equalTo(interval));
+
+ double aggTermsSum = 0;
+ for (int i = 0; i < interval; ++i) {
+ Terms.Bucket termsBucket = termsBuckets.get(i);
+ assertThat(termsBucket, notNullValue());
+ assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval)));
+
+ Histogram histo = termsBucket.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ double aggHistoSum = 0;
+ for (int j = 0; j < numValueBuckets; ++j) {
+ Histogram.Bucket bucket = buckets.get(j);
+ assertThat(bucket, notNullValue());
+ assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval));
+
+ aggHistoSum += bucket.getDocCount();
+ }
+
+ InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_histo_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_histo_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(aggHistoSum));
+
+ aggTermsSum += aggHistoSum;
+ }
+
+ InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_terms_bucket");
+ assertThat(sumBucketValue, notNullValue());
+ assertThat(sumBucketValue.getName(), equalTo("sum_terms_bucket"));
+ assertThat(sumBucketValue.value(), equalTo(aggTermsSum));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java
new file mode 100644
index 0000000000..9e92c69f7e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgTests.java
@@ -0,0 +1,1445 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline.moving.avg;
+
+
+import com.google.common.collect.EvictingQueue;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.RangeQueryBuilder;
+import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter;
+import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram;
+import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket;
+import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder;
+import org.elasticsearch.search.aggregations.metrics.avg.Avg;
+import org.elasticsearch.search.aggregations.pipeline.BucketHelpers;
+import org.elasticsearch.search.aggregations.pipeline.PipelineAggregationHelperTests;
+import org.elasticsearch.search.aggregations.pipeline.SimpleValue;
+import org.elasticsearch.search.aggregations.pipeline.derivative.Derivative;
+import org.elasticsearch.search.aggregations.pipeline.movavg.models.*;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.avg;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.filter;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.max;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.range;
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative;
+import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.core.IsNull.notNullValue;
+import static org.hamcrest.core.IsNull.nullValue;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class MovAvgTests extends ElasticsearchIntegrationTest {
+
+ private static final String INTERVAL_FIELD = "l_value";
+ private static final String VALUE_FIELD = "v_value";
+ private static final String GAP_FIELD = "g_value";
+
+ static int interval;
+ static int numBuckets;
+ static int windowSize;
+ static double alpha;
+ static double beta;
+ static double gamma;
+ static int period;
+ static HoltWintersModel.SeasonalityType seasonalityType;
+ static BucketHelpers.GapPolicy gapPolicy;
+ static ValuesSourceMetricsAggregationBuilder metric;
+ static List<PipelineAggregationHelperTests.MockBucket> mockHisto;
+
+ static Map<String, ArrayList<Double>> testValues;
+
+
+ enum MovAvgType {
+ SIMPLE ("simple"), LINEAR("linear"), EWMA("ewma"), HOLT("holt"), HOLT_WINTERS("holt_winters");
+
+ private final String name;
+
+ MovAvgType(String s) {
+ name = s;
+ }
+
+ public String toString(){
+ return name;
+ }
+ }
+
+ enum MetricTarget {
+ VALUE ("value"), COUNT("count");
+
+ private final String name;
+
+ MetricTarget(String s) {
+ name = s;
+ }
+
+ public String toString(){
+ return name;
+ }
+ }
+
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+
+ interval = 5;
+ numBuckets = randomIntBetween(6, 80);
+ period = randomIntBetween(1, 5);
+ windowSize = randomIntBetween(period * 2, 10); // start must be 2*period to play nice with HW
+ alpha = randomDouble();
+ beta = randomDouble();
+ gamma = randomDouble();
+ seasonalityType = randomBoolean() ? HoltWintersModel.SeasonalityType.ADDITIVE : HoltWintersModel.SeasonalityType.MULTIPLICATIVE;
+
+
+ gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.SKIP : BucketHelpers.GapPolicy.INSERT_ZEROS;
+ metric = randomMetric("the_metric", VALUE_FIELD);
+ mockHisto = PipelineAggregationHelperTests.generateHistogram(interval, numBuckets, randomDouble(), randomDouble());
+
+ testValues = new HashMap<>(8);
+
+ for (MovAvgType type : MovAvgType.values()) {
+ for (MetricTarget target : MetricTarget.values()) {
+ setupExpected(type, target);
+ }
+ }
+
+ for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) {
+ for (double value : mockBucket.docValues) {
+ builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject()
+ .field(INTERVAL_FIELD, mockBucket.key)
+ .field(VALUE_FIELD, value).endObject()));
+ }
+ }
+
+ // Used for specially crafted gap tests
+ builders.add(client().prepareIndex("idx", "gap_type").setSource(jsonBuilder().startObject()
+ .field(INTERVAL_FIELD, 0)
+ .field(GAP_FIELD, 1).endObject()));
+
+ builders.add(client().prepareIndex("idx", "gap_type").setSource(jsonBuilder().startObject()
+ .field(INTERVAL_FIELD, 49)
+ .field(GAP_FIELD, 1).endObject()));
+
+ for (int i = -10; i < 10; i++) {
+ builders.add(client().prepareIndex("neg_idx", "type").setSource(
+ jsonBuilder().startObject().field(INTERVAL_FIELD, i).field(VALUE_FIELD, 10).endObject()));
+ }
+
+ for (int i = 0; i < 12; i++) {
+ builders.add(client().prepareIndex("double_predict", "type").setSource(
+ jsonBuilder().startObject().field(INTERVAL_FIELD, i).field(VALUE_FIELD, 10).endObject()));
+ }
+
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ /**
+ * Calculates the moving averages for a specific (model, target) tuple based on the previously generated mock histogram.
+ * Computed values are stored in the testValues map.
+ *
+ * @param type The moving average model to use
+ * @param target The document field "target", e.g. _count or a field value
+ */
+ private void setupExpected(MovAvgType type, MetricTarget target) {
+ ArrayList<Double> values = new ArrayList<>(numBuckets);
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+
+ for (PipelineAggregationHelperTests.MockBucket mockBucket : mockHisto) {
+ double metricValue;
+ double[] docValues = mockBucket.docValues;
+
+ // Gaps only apply to metric values, not doc _counts
+ if (mockBucket.count == 0 && target.equals(MetricTarget.VALUE)) {
+ // If there was a gap in doc counts and we are ignoring, just skip this bucket
+ if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) {
+ values.add(null);
+ continue;
+ } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) {
+ // otherwise insert a zero instead of the true value
+ metricValue = 0.0;
+ } else {
+ metricValue = PipelineAggregationHelperTests.calculateMetric(docValues, metric);
+ }
+
+ } else {
+ // If this isn't a gap, or is a _count, just insert the value
+ metricValue = target.equals(MetricTarget.VALUE) ? PipelineAggregationHelperTests.calculateMetric(docValues, metric) : mockBucket.count;
+ }
+
+ window.offer(metricValue);
+ switch (type) {
+ case SIMPLE:
+ values.add(simple(window));
+ break;
+ case LINEAR:
+ values.add(linear(window));
+ break;
+ case EWMA:
+ values.add(ewma(window));
+ break;
+ case HOLT:
+ values.add(holt(window));
+ break;
+ case HOLT_WINTERS:
+ // HW needs at least 2 periods of data to start
+ if (window.size() >= period * 2) {
+ values.add(holtWinters(window));
+ } else {
+ values.add(null);
+ }
+
+ break;
+ }
+
+ }
+ testValues.put(type.toString() + "_" + target.toString(), values);
+ }
+
+ /**
+ * Simple, unweighted moving average
+ *
+ * @param window Window of values to compute movavg for
+ * @return
+ */
+ private double simple(Collection<Double> window) {
+ double movAvg = 0;
+ for (double value : window) {
+ movAvg += value;
+ }
+ movAvg /= window.size();
+ return movAvg;
+ }
+
+ /**
+ * Linearly weighted moving avg
+ *
+ * @param window Window of values to compute movavg for
+ * @return
+ */
+ private double linear(Collection<Double> window) {
+ double avg = 0;
+ long totalWeight = 1;
+ long current = 1;
+
+ for (double value : window) {
+ avg += value * current;
+ totalWeight += current;
+ current += 1;
+ }
+ return avg / totalWeight;
+ }
+
+ /**
+ * Exponentionally weighted (EWMA, Single exponential) moving avg
+ *
+ * @param window Window of values to compute movavg for
+ * @return
+ */
+ private double ewma(Collection<Double> window) {
+ double avg = 0;
+ boolean first = true;
+
+ for (double value : window) {
+ if (first) {
+ avg = value;
+ first = false;
+ } else {
+ avg = (value * alpha) + (avg * (1 - alpha));
+ }
+ }
+ return avg;
+ }
+
+ /**
+ * Holt-Linear (Double exponential) moving avg
+ * @param window Window of values to compute movavg for
+ * @return
+ */
+ private double holt(Collection<Double> window) {
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+
+ int counter = 0;
+
+ double last;
+ for (double value : window) {
+ last = value;
+ if (counter == 1) {
+ s = value;
+ b = value - last;
+ } else {
+ s = alpha * value + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+ }
+
+ counter += 1;
+ last_s = s;
+ last_b = b;
+ }
+
+ return s + (0 * b) ;
+ }
+
+ /**
+ * Holt winters (triple exponential) moving avg
+ * @param window Window of values to compute movavg for
+ * @return
+ */
+ private double holtWinters(Collection<Double> window) {
+ // Smoothed value
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+
+ // Seasonal value
+ double[] seasonal = new double[window.size()];
+
+ double padding = seasonalityType.equals(HoltWintersModel.SeasonalityType.MULTIPLICATIVE) ? 0.0000000001 : 0;
+
+ int counter = 0;
+ double[] vs = new double[window.size()];
+ for (double v : window) {
+ vs[counter] = v + padding;
+ counter += 1;
+ }
+
+
+ // Initial level value is average of first season
+ // Calculate the slopes between first and second season for each period
+ for (int i = 0; i < period; i++) {
+ s += vs[i];
+ b += (vs[i] - vs[i + period]) / 2;
+ }
+ s /= (double) period;
+ b /= (double) period;
+ last_s = s;
+ last_b = b;
+
+ // Calculate first seasonal
+ if (Double.compare(s, 0.0) == 0 || Double.compare(s, -0.0) == 0) {
+ Arrays.fill(seasonal, 0.0);
+ } else {
+ for (int i = 0; i < period; i++) {
+ seasonal[i] = vs[i] / s;
+ }
+ }
+
+ for (int i = period; i < vs.length; i++) {
+ if (seasonalityType.equals(HoltWintersModel.SeasonalityType.MULTIPLICATIVE)) {
+ s = alpha * (vs[i] / seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
+ } else {
+ s = alpha * (vs[i] - seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
+ }
+
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+
+ if (seasonalityType.equals(HoltWintersModel.SeasonalityType.MULTIPLICATIVE)) {
+ seasonal[i] = gamma * (vs[i] / (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
+ } else {
+ seasonal[i] = gamma * (vs[i] - (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
+ }
+
+ last_s = s;
+ last_b = b;
+ }
+
+ int seasonCounter = (window.size() - 1) - period;
+ if (seasonalityType.equals(HoltWintersModel.SeasonalityType.MULTIPLICATIVE)) {
+ return s + (0 * b) * seasonal[seasonCounter % window.size()];
+ } else {
+ return s + (0 * b) + seasonal[seasonCounter % window.size()];
+ }
+ }
+
+
+ /**
+ * test simple moving average on single value field
+ */
+ @Test
+ public void simpleSingleValuedField() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size()));
+
+ List<Double> expectedCounts = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.COUNT.toString());
+ List<Double> expectedValues = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.VALUE.toString());
+
+ Iterator<? extends Histogram.Bucket> actualIter = buckets.iterator();
+ Iterator<PipelineAggregationHelperTests.MockBucket> expectedBucketIter = mockHisto.iterator();
+ Iterator<Double> expectedCountsIter = expectedCounts.iterator();
+ Iterator<Double> expectedValuesIter = expectedValues.iterator();
+
+ while (actualIter.hasNext()) {
+ assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter);
+
+ Histogram.Bucket actual = actualIter.next();
+ PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next();
+ Double expectedCount = expectedCountsIter.next();
+ Double expectedValue = expectedValuesIter.next();
+
+ assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key));
+ assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count));
+
+ assertBucketContents(actual, expectedCount, expectedValue);
+ }
+ }
+
+ @Test
+ public void linearSingleValuedField() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new LinearModel.LinearModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(new LinearModel.LinearModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size()));
+
+ List<Double> expectedCounts = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.COUNT.toString());
+ List<Double> expectedValues = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.VALUE.toString());
+
+ Iterator<? extends Histogram.Bucket> actualIter = buckets.iterator();
+ Iterator<PipelineAggregationHelperTests.MockBucket> expectedBucketIter = mockHisto.iterator();
+ Iterator<Double> expectedCountsIter = expectedCounts.iterator();
+ Iterator<Double> expectedValuesIter = expectedValues.iterator();
+
+ while (actualIter.hasNext()) {
+ assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter);
+
+ Histogram.Bucket actual = actualIter.next();
+ PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next();
+ Double expectedCount = expectedCountsIter.next();
+ Double expectedValue = expectedValuesIter.next();
+
+ assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key));
+ assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count));
+
+ assertBucketContents(actual, expectedCount, expectedValue);
+ }
+ }
+
+ @Test
+ public void ewmaSingleValuedField() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new EwmaModel.EWMAModelBuilder().alpha(alpha))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(new EwmaModel.EWMAModelBuilder().alpha(alpha))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size()));
+
+ List<Double> expectedCounts = testValues.get(MovAvgType.EWMA.toString() + "_" + MetricTarget.COUNT.toString());
+ List<Double> expectedValues = testValues.get(MovAvgType.EWMA.toString() + "_" + MetricTarget.VALUE.toString());
+
+ Iterator<? extends Histogram.Bucket> actualIter = buckets.iterator();
+ Iterator<PipelineAggregationHelperTests.MockBucket> expectedBucketIter = mockHisto.iterator();
+ Iterator<Double> expectedCountsIter = expectedCounts.iterator();
+ Iterator<Double> expectedValuesIter = expectedValues.iterator();
+
+ while (actualIter.hasNext()) {
+ assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter);
+
+ Histogram.Bucket actual = actualIter.next();
+ PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next();
+ Double expectedCount = expectedCountsIter.next();
+ Double expectedValue = expectedValuesIter.next();
+
+ assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key));
+ assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count));
+
+ assertBucketContents(actual, expectedCount, expectedValue);
+ }
+ }
+
+ @Test
+ public void holtSingleValuedField() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new HoltLinearModel.HoltLinearModelBuilder().alpha(alpha).beta(beta))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(new HoltLinearModel.HoltLinearModelBuilder().alpha(alpha).beta(beta))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size()));
+
+ List<Double> expectedCounts = testValues.get(MovAvgType.HOLT.toString() + "_" + MetricTarget.COUNT.toString());
+ List<Double> expectedValues = testValues.get(MovAvgType.HOLT.toString() + "_" + MetricTarget.VALUE.toString());
+
+ Iterator<? extends Histogram.Bucket> actualIter = buckets.iterator();
+ Iterator<PipelineAggregationHelperTests.MockBucket> expectedBucketIter = mockHisto.iterator();
+ Iterator<Double> expectedCountsIter = expectedCounts.iterator();
+ Iterator<Double> expectedValuesIter = expectedValues.iterator();
+
+ while (actualIter.hasNext()) {
+ assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter);
+
+ Histogram.Bucket actual = actualIter.next();
+ PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next();
+ Double expectedCount = expectedCountsIter.next();
+ Double expectedValue = expectedValuesIter.next();
+
+ assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key));
+ assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count));
+
+ assertBucketContents(actual, expectedCount, expectedValue);
+ }
+ }
+
+ @Test
+ public void HoltWintersValuedField() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new HoltWintersModel.HoltWintersModelBuilder()
+ .alpha(alpha).beta(beta).gamma(gamma).period(period).seasonalityType(seasonalityType))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(new HoltWintersModel.HoltWintersModelBuilder()
+ .alpha(alpha).beta(beta).gamma(gamma).period(period).seasonalityType(seasonalityType))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size()));
+
+ List<Double> expectedCounts = testValues.get(MovAvgType.HOLT_WINTERS.toString() + "_" + MetricTarget.COUNT.toString());
+ List<Double> expectedValues = testValues.get(MovAvgType.HOLT_WINTERS.toString() + "_" + MetricTarget.VALUE.toString());
+
+ Iterator<? extends Histogram.Bucket> actualIter = buckets.iterator();
+ Iterator<PipelineAggregationHelperTests.MockBucket> expectedBucketIter = mockHisto.iterator();
+ Iterator<Double> expectedCountsIter = expectedCounts.iterator();
+ Iterator<Double> expectedValuesIter = expectedValues.iterator();
+
+ while (actualIter.hasNext()) {
+ assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter);
+
+ Histogram.Bucket actual = actualIter.next();
+ PipelineAggregationHelperTests.MockBucket expected = expectedBucketIter.next();
+ Double expectedCount = expectedCountsIter.next();
+ Double expectedValue = expectedValuesIter.next();
+
+ assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key));
+ assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count));
+
+ assertBucketContents(actual, expectedCount, expectedValue);
+ }
+ }
+
+ @Test
+ public void testPredictNegativeKeysAtStart() {
+
+ SearchResponse response = client()
+ .prepareSearch("neg_idx")
+ .setTypes("type")
+ .addAggregation(
+ histogram("histo")
+ .field(INTERVAL_FIELD)
+ .interval(1)
+ .subAggregation(avg("avg").field(VALUE_FIELD))
+ .subAggregation(
+ movingAvg("movavg_values").window(windowSize).modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy).predict(5).setBucketsPaths("avg"))).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(25));
+
+ for (int i = 0; i < 20; i++) {
+ Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((long) bucket.getKey(), equalTo((long) i - 10));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+ Avg avgAgg = bucket.getAggregations().get("avg");
+ assertThat(avgAgg, notNullValue());
+ assertThat(avgAgg.value(), equalTo(10d));
+ SimpleValue movAvgAgg = bucket.getAggregations().get("movavg_values");
+ assertThat(movAvgAgg, notNullValue());
+ assertThat(movAvgAgg.value(), equalTo(10d));
+ }
+
+ for (int i = 20; i < 25; i++) {
+ System.out.println(i);
+ Bucket bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((long) bucket.getKey(), equalTo((long) i - 10));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+ Avg avgAgg = bucket.getAggregations().get("avg");
+ assertThat(avgAgg, nullValue());
+ SimpleValue movAvgAgg = bucket.getAggregations().get("movavg_values");
+ assertThat(movAvgAgg, notNullValue());
+ assertThat(movAvgAgg.value(), equalTo(10d));
+ }
+ }
+
+
+ @Test
+ public void testSizeZeroWindow() {
+ try {
+ client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(0)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+ fail("MovingAvg should not accept a window that is zero");
+
+ } catch (SearchPhaseExecutionException exception) {
+ // All good
+ }
+ }
+
+ @Test
+ public void testBadParent() {
+ try {
+ client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ range("histo").field(INTERVAL_FIELD).addRange(0, 10)
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(0)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+ fail("MovingAvg should not accept non-histogram as parent");
+
+ } catch (SearchPhaseExecutionException exception) {
+ // All good
+ }
+ }
+
+ @Test
+ public void testNegativeWindow() {
+ try {
+ client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(-10)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ ).execute().actionGet();
+ fail("MovingAvg should not accept a window that is negative");
+
+ } catch (SearchPhaseExecutionException exception) {
+ //Throwable rootCause = exception.unwrapCause();
+ //assertThat(rootCause, instanceOf(SearchParseException.class));
+ //assertThat("[window] value must be a positive, non-zero integer. Value supplied was [0] in [movingAvg].", equalTo(exception.getMessage()));
+ }
+ }
+
+ @Test
+ public void testNoBucketsInHistogram() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field("test").interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(0));
+ }
+
+ @Test
+ public void testNoBucketsInHistogramWithPredict() {
+ int numPredictions = randomIntBetween(1,10);
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field("test").interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric")
+ .predict(numPredictions))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat(buckets.size(), equalTo(0));
+ }
+
+ @Test
+ public void testZeroPrediction() {
+ try {
+ client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .predict(0)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+ fail("MovingAvg should not accept a prediction size that is zero");
+
+ } catch (SearchPhaseExecutionException exception) {
+ // All Good
+ }
+ }
+
+ @Test
+ public void testNegativePrediction() {
+ try {
+ client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(randomMetric("the_metric", VALUE_FIELD))
+ .subAggregation(movingAvg("movavg_counts")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .predict(-10)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+ fail("MovingAvg should not accept a prediction size that is negative");
+
+ } catch (SearchPhaseExecutionException exception) {
+ // All Good
+ }
+ }
+
+ /**
+ * This test uses the "gap" dataset, which is simply a doc at the beginning and end of
+ * the INTERVAL_FIELD range. These docs have a value of 1 in GAP_FIELD.
+ * This test verifies that large gaps don't break things, and that the mov avg roughly works
+ * in the correct manner (checks direction of change, but not actual values)
+ */
+ @Test
+ public void testGiantGap() {
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("gap_type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L)
+ .subAggregation(min("the_metric").field(GAP_FIELD))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50));
+
+ double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_values"))).value();
+ assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0));
+
+ double currentValue;
+ for (int i = 1; i < 49; i++) {
+ SimpleValue current = buckets.get(i).getAggregations().get("movavg_values");
+ if (current != null) {
+ currentValue = current.value();
+
+ // Since there are only two values in this test, at the beginning and end, the moving average should
+ // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing
+ // without actually verifying the computed values. Should work for all types of moving avgs and
+ // gap policies
+ assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0));
+ lastValue = currentValue;
+ }
+ }
+
+
+ SimpleValue current = buckets.get(49).getAggregations().get("movavg_values");
+ assertThat(current, notNullValue());
+ currentValue = current.value();
+
+ if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) {
+ // if we are ignoring, movavg could go up (holt) or stay the same (simple, linear, ewma)
+ assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0));
+ } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) {
+ // If we insert zeros, this should always increase the moving avg since the last bucket has a real value
+ assertThat(Double.compare(lastValue, currentValue), equalTo(-1));
+ }
+ }
+
+ /**
+ * Big gap, but with prediction at the end.
+ */
+ @Test
+ public void testGiantGapWithPredict() {
+ int numPredictions = randomIntBetween(1, 10);
+
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("gap_type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L)
+ .subAggregation(min("the_metric").field(GAP_FIELD))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric")
+ .predict(numPredictions))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions));
+
+
+ double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_values"))).value();
+ assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0));
+
+ double currentValue;
+ for (int i = 1; i < 49; i++) {
+ SimpleValue current = buckets.get(i).getAggregations().get("movavg_values");
+ if (current != null) {
+ currentValue = current.value();
+
+ // Since there are only two values in this test, at the beginning and end, the moving average should
+ // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing
+ // without actually verifying the computed values. Should work for all types of moving avgs and
+ // gap policies
+ assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0));
+ lastValue = currentValue;
+ }
+ }
+
+ SimpleValue current = buckets.get(49).getAggregations().get("movavg_values");
+ assertThat(current, notNullValue());
+ currentValue = current.value();
+
+ if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) {
+ // if we are ignoring, movavg could go up (holt) or stay the same (simple, linear, ewma)
+ assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0));
+ } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) {
+ // If we insert zeros, this should always increase the moving avg since the last bucket has a real value
+ assertThat(Double.compare(lastValue, currentValue), equalTo(-1));
+ }
+
+ // Now check predictions
+ for (int i = 50; i < 50 + numPredictions; i++) {
+ // Unclear at this point which direction the predictions will go, just verify they are
+ // not null, and that we don't have the_metric anymore
+ assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue());
+ assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue());
+ }
+ }
+
+ /**
+ * This test filters the "gap" data so that the first doc is excluded. This leaves a long stretch of empty
+ * buckets until the final bucket. The moving avg should be zero up until the last bucket, and should work
+ * regardless of mov avg type or gap policy.
+ */
+ @Test
+ public void testLeftGap() {
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("gap_type")
+ .addAggregation(
+ filter("filtered").filter(new RangeQueryBuilder(INTERVAL_FIELD).from(1)).subAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L)
+ .subAggregation(randomMetric("the_metric", GAP_FIELD))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalFilter filtered = response.getAggregations().get("filtered");
+ assertThat(filtered, notNullValue());
+ assertThat(filtered.getName(), equalTo("filtered"));
+
+ InternalHistogram<Bucket> histo = filtered.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50));
+
+ double lastValue = 0;
+
+ double currentValue;
+ for (int i = 0; i < 50; i++) {
+ SimpleValue current = buckets.get(i).getAggregations().get("movavg_values");
+ if (current != null) {
+ currentValue = current.value();
+
+ assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0));
+ lastValue = currentValue;
+ }
+ }
+ }
+
+ @Test
+ public void testLeftGapWithPredict() {
+ int numPredictions = randomIntBetween(1, 10);
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("gap_type")
+ .addAggregation(
+ filter("filtered").filter(new RangeQueryBuilder(INTERVAL_FIELD).from(1)).subAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L)
+ .subAggregation(randomMetric("the_metric", GAP_FIELD))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric")
+ .predict(numPredictions))
+ ))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalFilter filtered = response.getAggregations().get("filtered");
+ assertThat(filtered, notNullValue());
+ assertThat(filtered.getName(), equalTo("filtered"));
+
+ InternalHistogram<Bucket> histo = filtered.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions));
+
+
+ double lastValue = 0;
+
+ double currentValue;
+ for (int i = 0; i < 50; i++) {
+ SimpleValue current = buckets.get(i).getAggregations().get("movavg_values");
+ if (current != null) {
+ currentValue = current.value();
+
+ assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0));
+ lastValue = currentValue;
+ }
+ }
+
+ // Now check predictions
+ for (int i = 50; i < 50 + numPredictions; i++) {
+ // Unclear at this point which direction the predictions will go, just verify they are
+ // not null, and that we don't have the_metric anymore
+ assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue());
+ assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue());
+ }
+ }
+
+ /**
+ * This test filters the "gap" data so that the last doc is excluded. This leaves a long stretch of empty
+ * buckets after the first bucket.
+ */
+ @Test
+ public void testRightGap() {
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("gap_type")
+ .addAggregation(
+ filter("filtered").filter(new RangeQueryBuilder(INTERVAL_FIELD).to(1)).subAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L)
+ .subAggregation(randomMetric("the_metric", GAP_FIELD))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalFilter filtered = response.getAggregations().get("filtered");
+ assertThat(filtered, notNullValue());
+ assertThat(filtered.getName(), equalTo("filtered"));
+
+ InternalHistogram<Bucket> histo = filtered.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50));
+
+
+ SimpleValue current = buckets.get(0).getAggregations().get("movavg_values");
+ assertThat(current, notNullValue());
+
+ double lastValue = current.value();
+
+ double currentValue;
+ for (int i = 1; i < 50; i++) {
+ current = buckets.get(i).getAggregations().get("movavg_values");
+ if (current != null) {
+ currentValue = current.value();
+
+ assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0));
+ lastValue = currentValue;
+ }
+ }
+ }
+
+ @Test
+ public void testRightGapWithPredict() {
+ int numPredictions = randomIntBetween(1, 10);
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("gap_type")
+ .addAggregation(
+ filter("filtered").filter(new RangeQueryBuilder(INTERVAL_FIELD).to(1)).subAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L)
+ .subAggregation(randomMetric("the_metric", GAP_FIELD))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(randomModelBuilder())
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric")
+ .predict(numPredictions))
+ ))
+ .execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalFilter filtered = response.getAggregations().get("filtered");
+ assertThat(filtered, notNullValue());
+ assertThat(filtered.getName(), equalTo("filtered"));
+
+ InternalHistogram<Bucket> histo = filtered.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+
+ // If we are skipping, there will only be predictions at the very beginning and won't append any new buckets
+ if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) {
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50));
+ } else {
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions));
+ }
+
+ // Unlike left-gap tests, we cannot check the slope of prediction for right-gap. E.g. linear will
+ // converge on zero, but holt-linear may trend upwards based on the first value
+ // Just check for non-nullness
+ SimpleValue current = buckets.get(0).getAggregations().get("movavg_values");
+ assertThat(current, notNullValue());
+
+ // If we are skipping, there will only be predictions at the very beginning and won't append any new buckets
+ if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) {
+ // Now check predictions
+ for (int i = 1; i < 1 + numPredictions; i++) {
+ // Unclear at this point which direction the predictions will go, just verify they are
+ // not null
+ assertThat(buckets.get(i).getDocCount(), equalTo(0L));
+ assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue());
+ }
+ } else {
+ // Otherwise we'll have some predictions at the end
+ for (int i = 50; i < 50 + numPredictions; i++) {
+ // Unclear at this point which direction the predictions will go, just verify they are
+ // not null
+ assertThat(buckets.get(i).getDocCount(), equalTo(0L));
+ assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue());
+ }
+ }
+
+ }
+
+ @Test
+ public void testHoltWintersNotEnoughData() {
+ try {
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(10)
+ .modelBuilder(new HoltWintersModel.HoltWintersModelBuilder()
+ .alpha(alpha).beta(beta).gamma(gamma).period(20).seasonalityType(seasonalityType))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ .subAggregation(movingAvg("movavg_values")
+ .window(windowSize)
+ .modelBuilder(new HoltWintersModel.HoltWintersModelBuilder()
+ .alpha(alpha).beta(beta).gamma(gamma).period(20).seasonalityType(seasonalityType))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("the_metric"))
+ ).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ // All good
+ }
+
+ }
+
+ @Test
+ public void testTwoMovAvgsWithPredictions() {
+
+ SearchResponse response = client()
+ .prepareSearch("double_predict")
+ .setTypes("type")
+ .addAggregation(
+ histogram("histo")
+ .field(INTERVAL_FIELD)
+ .interval(1)
+ .subAggregation(avg("avg").field(VALUE_FIELD))
+ .subAggregation(derivative("deriv")
+ .setBucketsPaths("avg").gapPolicy(gapPolicy))
+ .subAggregation(
+ movingAvg("avg_movavg").window(windowSize).modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy).predict(12).setBucketsPaths("avg"))
+ .subAggregation(
+ movingAvg("deriv_movavg").window(windowSize).modelBuilder(new SimpleModel.SimpleModelBuilder())
+ .gapPolicy(gapPolicy).predict(12).setBucketsPaths("deriv"))
+ ).execute().actionGet();
+
+ assertSearchResponse(response);
+
+ InternalHistogram<Bucket> histo = response.getAggregations().get("histo");
+ assertThat(histo, notNullValue());
+ assertThat(histo.getName(), equalTo("histo"));
+ List<? extends Bucket> buckets = histo.getBuckets();
+ assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(24));
+
+ Bucket bucket = buckets.get(0);
+ assertThat(bucket, notNullValue());
+ assertThat((long) bucket.getKey(), equalTo((long) 0));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ Avg avgAgg = bucket.getAggregations().get("avg");
+ assertThat(avgAgg, notNullValue());
+ assertThat(avgAgg.value(), equalTo(10d));
+
+ SimpleValue movAvgAgg = bucket.getAggregations().get("avg_movavg");
+ assertThat(movAvgAgg, notNullValue());
+ assertThat(movAvgAgg.value(), equalTo(10d));
+
+ Derivative deriv = bucket.getAggregations().get("deriv");
+ assertThat(deriv, nullValue());
+
+ SimpleValue derivMovAvg = bucket.getAggregations().get("deriv_movavg");
+ assertThat(derivMovAvg, nullValue());
+
+ for (int i = 1; i < 12; i++) {
+ bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((long) bucket.getKey(), equalTo((long) i));
+ assertThat(bucket.getDocCount(), equalTo(1l));
+
+ avgAgg = bucket.getAggregations().get("avg");
+ assertThat(avgAgg, notNullValue());
+ assertThat(avgAgg.value(), equalTo(10d));
+
+ deriv = bucket.getAggregations().get("deriv");
+ assertThat(deriv, notNullValue());
+ assertThat(deriv.value(), equalTo(0d));
+
+ movAvgAgg = bucket.getAggregations().get("avg_movavg");
+ assertThat(movAvgAgg, notNullValue());
+ assertThat(movAvgAgg.value(), equalTo(10d));
+
+ derivMovAvg = bucket.getAggregations().get("deriv_movavg");
+ assertThat(derivMovAvg, notNullValue());
+ assertThat(derivMovAvg.value(), equalTo(0d));
+ }
+
+ // Predictions
+ for (int i = 12; i < 24; i++) {
+ bucket = buckets.get(i);
+ assertThat(bucket, notNullValue());
+ assertThat((long) bucket.getKey(), equalTo((long) i));
+ assertThat(bucket.getDocCount(), equalTo(0l));
+
+ avgAgg = bucket.getAggregations().get("avg");
+ assertThat(avgAgg, nullValue());
+
+ deriv = bucket.getAggregations().get("deriv");
+ assertThat(deriv, nullValue());
+
+ movAvgAgg = bucket.getAggregations().get("avg_movavg");
+ assertThat(movAvgAgg, notNullValue());
+ assertThat(movAvgAgg.value(), equalTo(10d));
+
+ derivMovAvg = bucket.getAggregations().get("deriv_movavg");
+ assertThat(derivMovAvg, notNullValue());
+ assertThat(derivMovAvg.value(), equalTo(0d));
+ }
+ }
+
+ @Test
+ public void testBadModelParams() {
+ try {
+ SearchResponse response = client()
+ .prepareSearch("idx").setTypes("type")
+ .addAggregation(
+ histogram("histo").field(INTERVAL_FIELD).interval(interval)
+ .extendedBounds(0L, (long) (interval * (numBuckets - 1)))
+ .subAggregation(metric)
+ .subAggregation(movingAvg("movavg_counts")
+ .window(10)
+ .modelBuilder(randomModelBuilder(100))
+ .gapPolicy(gapPolicy)
+ .setBucketsPaths("_count"))
+ ).execute().actionGet();
+ } catch (SearchPhaseExecutionException e) {
+ // All good
+ }
+
+ }
+
+
+ private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) {
+ if (!expectedBucketIter.hasNext()) {
+ fail("`expectedBucketIter` iterator ended before `actual` iterator, size mismatch");
+ }
+ if (!expectedCountsIter.hasNext()) {
+ fail("`expectedCountsIter` iterator ended before `actual` iterator, size mismatch");
+ }
+ if (!expectedValuesIter.hasNext()) {
+ fail("`expectedValuesIter` iterator ended before `actual` iterator, size mismatch");
+ }
+ }
+
+ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, Double expectedValue) {
+ // This is a gap bucket
+ SimpleValue countMovAvg = actual.getAggregations().get("movavg_counts");
+ if (expectedCount == null) {
+ assertThat("[_count] movavg is not null", countMovAvg, nullValue());
+ } else if (Double.isNaN(expectedCount)) {
+ assertThat("[_count] movavg should be NaN, but is ["+countMovAvg.value()+"] instead", countMovAvg.value(), equalTo(Double.NaN));
+ } else {
+ assertThat("[_count] movavg is null", countMovAvg, notNullValue());
+ assertThat("[_count] movavg does not match expected ["+countMovAvg.value()+" vs "+expectedCount+"]",
+ countMovAvg.value(), closeTo(expectedCount, 0.1));
+ }
+
+ // This is a gap bucket
+ SimpleValue valuesMovAvg = actual.getAggregations().get("movavg_values");
+ if (expectedValue == null) {
+ assertThat("[value] movavg is not null", valuesMovAvg, Matchers.nullValue());
+ } else if (Double.isNaN(expectedValue)) {
+ assertThat("[value] movavg should be NaN, but is ["+valuesMovAvg.value()+"] instead", valuesMovAvg.value(), equalTo(Double.NaN));
+ } else {
+ assertThat("[value] movavg is null", valuesMovAvg, notNullValue());
+ assertThat("[value] movavg does not match expected ["+valuesMovAvg.value()+" vs "+expectedValue+"]",
+ valuesMovAvg.value(), closeTo(expectedValue, 0.1));
+ }
+ }
+
+ private MovAvgModelBuilder randomModelBuilder() {
+ return randomModelBuilder(0);
+ }
+
+ private MovAvgModelBuilder randomModelBuilder(double padding) {
+ int rand = randomIntBetween(0,3);
+
+ // HoltWinters is excluded from random generation, because it's "cold start" behavior makes
+ // randomized testing too tricky. Should probably add dedicated, randomized tests just for HoltWinters,
+ // which can compensate for the idiosyncrasies
+ switch (rand) {
+ case 0:
+ return new SimpleModel.SimpleModelBuilder();
+ case 1:
+ return new LinearModel.LinearModelBuilder();
+ case 2:
+ return new EwmaModel.EWMAModelBuilder().alpha(alpha + padding);
+ case 3:
+ return new HoltLinearModel.HoltLinearModelBuilder().alpha(alpha + padding).beta(beta + padding);
+ default:
+ return new SimpleModel.SimpleModelBuilder();
+ }
+ }
+
+ private ValuesSourceMetricsAggregationBuilder randomMetric(String name, String field) {
+ int rand = randomIntBetween(0,3);
+
+ switch (rand) {
+ case 0:
+ return min(name).field(field);
+ case 2:
+ return max(name).field(field);
+ case 3:
+ return avg(name).field(field);
+ default:
+ return avg(name).field(field);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java
new file mode 100644
index 0000000000..0bd9711c7e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgUnitTests.java
@@ -0,0 +1,586 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.pipeline.moving.avg;
+
+import com.google.common.collect.EvictingQueue;
+
+import org.elasticsearch.search.aggregations.pipeline.movavg.models.*;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import static org.hamcrest.Matchers.equalTo;
+
+import org.junit.Test;
+
+import java.util.Arrays;
+
+public class MovAvgUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSimpleMovAvgModel() {
+ MovAvgModel model = new SimpleModel();
+
+ int numValues = randomIntBetween(1, 100);
+ int windowSize = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < numValues; i++) {
+
+ double randValue = randomDouble();
+ double expected = 0;
+
+ window.offer(randValue);
+
+ for (double value : window) {
+ expected += value;
+ }
+ expected /= window.size();
+
+ double actual = model.next(window);
+ assertThat(Double.compare(expected, actual), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testSimplePredictionModel() {
+ MovAvgModel model = new SimpleModel();
+
+ int windowSize = randomIntBetween(1, 50);
+ int numPredictions = randomIntBetween(1,50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+ double actual[] = model.predict(window, numPredictions);
+
+ double expected[] = new double[numPredictions];
+ for (int i = 0; i < numPredictions; i++) {
+ for (double value : window) {
+ expected[i] += value;
+ }
+ expected[i] /= window.size();
+ window.offer(expected[i]);
+ }
+
+ for (int i = 0; i < numPredictions; i++) {
+ assertThat(Double.compare(expected[i], actual[i]), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testLinearMovAvgModel() {
+ MovAvgModel model = new LinearModel();
+
+ int numValues = randomIntBetween(1, 100);
+ int windowSize = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < numValues; i++) {
+ double randValue = randomDouble();
+ window.offer(randValue);
+
+ double avg = 0;
+ long totalWeight = 1;
+ long current = 1;
+
+ for (double value : window) {
+ avg += value * current;
+ totalWeight += current;
+ current += 1;
+ }
+ double expected = avg / totalWeight;
+ double actual = model.next(window);
+ assertThat(Double.compare(expected, actual), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testLinearPredictionModel() {
+ MovAvgModel model = new LinearModel();
+
+ int windowSize = randomIntBetween(1, 50);
+ int numPredictions = randomIntBetween(1,50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+ double actual[] = model.predict(window, numPredictions);
+ double expected[] = new double[numPredictions];
+
+ for (int i = 0; i < numPredictions; i++) {
+ double avg = 0;
+ long totalWeight = 1;
+ long current = 1;
+
+ for (double value : window) {
+ avg += value * current;
+ totalWeight += current;
+ current += 1;
+ }
+ expected[i] = avg / totalWeight;
+ window.offer(expected[i]);
+ }
+
+ for (int i = 0; i < numPredictions; i++) {
+ assertThat(Double.compare(expected[i], actual[i]), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testEWMAMovAvgModel() {
+ double alpha = randomDouble();
+ MovAvgModel model = new EwmaModel(alpha);
+
+ int numValues = randomIntBetween(1, 100);
+ int windowSize = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < numValues; i++) {
+ double randValue = randomDouble();
+ window.offer(randValue);
+
+ double avg = 0;
+ boolean first = true;
+
+ for (double value : window) {
+ if (first) {
+ avg = value;
+ first = false;
+ } else {
+ avg = (value * alpha) + (avg * (1 - alpha));
+ }
+ }
+ double expected = avg;
+ double actual = model.next(window);
+ assertThat(Double.compare(expected, actual), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testEWMAPredictionModel() {
+ double alpha = randomDouble();
+ MovAvgModel model = new EwmaModel(alpha);
+
+ int windowSize = randomIntBetween(1, 50);
+ int numPredictions = randomIntBetween(1,50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+ double actual[] = model.predict(window, numPredictions);
+ double expected[] = new double[numPredictions];
+
+ for (int i = 0; i < numPredictions; i++) {
+ double avg = 0;
+ boolean first = true;
+
+ for (double value : window) {
+ if (first) {
+ avg = value;
+ first = false;
+ } else {
+ avg = (value * alpha) + (avg * (1 - alpha));
+ }
+ }
+ expected[i] = avg;
+ window.offer(expected[i]);
+ }
+
+ for (int i = 0; i < numPredictions; i++) {
+ assertThat(Double.compare(expected[i], actual[i]), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testHoltLinearMovAvgModel() {
+ double alpha = randomDouble();
+ double beta = randomDouble();
+ MovAvgModel model = new HoltLinearModel(alpha, beta);
+
+ int numValues = randomIntBetween(1, 100);
+ int windowSize = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < numValues; i++) {
+ double randValue = randomDouble();
+ window.offer(randValue);
+
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+ int counter = 0;
+
+ double last;
+ for (double value : window) {
+ last = value;
+ if (counter == 1) {
+ s = value;
+ b = value - last;
+ } else {
+ s = alpha * value + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+ }
+
+ counter += 1;
+ last_s = s;
+ last_b = b;
+ }
+
+ double expected = s + (0 * b) ;
+ double actual = model.next(window);
+ assertThat(Double.compare(expected, actual), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testHoltLinearPredictionModel() {
+ double alpha = randomDouble();
+ double beta = randomDouble();
+ MovAvgModel model = new HoltLinearModel(alpha, beta);
+
+ int windowSize = randomIntBetween(1, 50);
+ int numPredictions = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+ double actual[] = model.predict(window, numPredictions);
+ double expected[] = new double[numPredictions];
+
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+ int counter = 0;
+
+ double last;
+ for (double value : window) {
+ last = value;
+ if (counter == 1) {
+ s = value;
+ b = value - last;
+ } else {
+ s = alpha * value + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+ }
+
+ counter += 1;
+ last_s = s;
+ last_b = b;
+ }
+
+ for (int i = 0; i < numPredictions; i++) {
+ expected[i] = s + (i * b);
+ assertThat(Double.compare(expected[i], actual[i]), equalTo(0));
+ }
+ }
+
+ @Test
+ public void testHoltWintersMultiplicativePadModel() {
+ double alpha = randomDouble();
+ double beta = randomDouble();
+ double gamma = randomDouble();
+ int period = randomIntBetween(1,10);
+ MovAvgModel model = new HoltWintersModel(alpha, beta, gamma, period, HoltWintersModel.SeasonalityType.MULTIPLICATIVE, true);
+
+ int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+
+ // Smoothed value
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+
+ // Seasonal value
+ double[] seasonal = new double[windowSize];
+
+ int counter = 0;
+ double[] vs = new double[windowSize];
+ for (double v : window) {
+ vs[counter] = v + 0.0000000001;
+ counter += 1;
+ }
+
+
+ // Initial level value is average of first season
+ // Calculate the slopes between first and second season for each period
+ for (int i = 0; i < period; i++) {
+ s += vs[i];
+ b += (vs[i] - vs[i + period]) / 2;
+ }
+ s /= (double) period;
+ b /= (double) period;
+ last_s = s;
+ last_b = b;
+
+ // Calculate first seasonal
+ if (Double.compare(s, 0.0) == 0 || Double.compare(s, -0.0) == 0) {
+ Arrays.fill(seasonal, 0.0);
+ } else {
+ for (int i = 0; i < period; i++) {
+ seasonal[i] = vs[i] / s;
+ }
+ }
+
+ for (int i = period; i < vs.length; i++) {
+ s = alpha * (vs[i] / seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+
+ //seasonal[i] = gamma * (vs[i] / s) + ((1 - gamma) * seasonal[i - period]);
+ seasonal[i] = gamma * (vs[i] / (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
+ last_s = s;
+ last_b = b;
+ }
+
+ int seasonCounter = (windowSize - 1) - period;
+ double expected = s + (0 * b) * seasonal[seasonCounter % windowSize];;
+ double actual = model.next(window);
+ assertThat(Double.compare(expected, actual), equalTo(0));
+ }
+
+ @Test
+ public void testHoltWintersMultiplicativePadPredictionModel() {
+ double alpha = randomDouble();
+ double beta = randomDouble();
+ double gamma = randomDouble();
+ int period = randomIntBetween(1,10);
+ MovAvgModel model = new HoltWintersModel(alpha, beta, gamma, period, HoltWintersModel.SeasonalityType.MULTIPLICATIVE, true);
+
+ int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
+ int numPredictions = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+ double actual[] = model.predict(window, numPredictions);
+ double expected[] = new double[numPredictions];
+
+ // Smoothed value
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+
+ // Seasonal value
+ double[] seasonal = new double[windowSize];
+
+ int counter = 0;
+ double[] vs = new double[windowSize];
+ for (double v : window) {
+ vs[counter] = v + 0.0000000001;
+ counter += 1;
+ }
+
+
+ // Initial level value is average of first season
+ // Calculate the slopes between first and second season for each period
+ for (int i = 0; i < period; i++) {
+ s += vs[i];
+ b += (vs[i] - vs[i + period]) / 2;
+ }
+ s /= (double) period;
+ b /= (double) period;
+ last_s = s;
+ last_b = b;
+
+ for (int i = 0; i < period; i++) {
+ // Calculate first seasonal
+ seasonal[i] = vs[i] / s;
+ }
+
+ for (int i = period; i < vs.length; i++) {
+ s = alpha * (vs[i] / seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+
+ //seasonal[i] = gamma * (vs[i] / s) + ((1 - gamma) * seasonal[i - period]);
+ seasonal[i] = gamma * (vs[i] / (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
+ last_s = s;
+ last_b = b;
+ }
+
+ int seasonCounter = (windowSize - 1) - period;
+
+ for (int i = 0; i < numPredictions; i++) {
+
+ expected[i] = s + (i * b) * seasonal[seasonCounter % windowSize];
+ assertThat(Double.compare(expected[i], actual[i]), equalTo(0));
+ seasonCounter += 1;
+ }
+
+ }
+
+ @Test
+ public void testHoltWintersAdditiveModel() {
+ double alpha = randomDouble();
+ double beta = randomDouble();
+ double gamma = randomDouble();
+ int period = randomIntBetween(1,10);
+ MovAvgModel model = new HoltWintersModel(alpha, beta, gamma, period, HoltWintersModel.SeasonalityType.ADDITIVE, false);
+
+ int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+
+ // Smoothed value
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+
+ // Seasonal value
+ double[] seasonal = new double[windowSize];
+
+ int counter = 0;
+ double[] vs = new double[windowSize];
+ for (double v : window) {
+ vs[counter] = v;
+ counter += 1;
+ }
+
+
+ // Initial level value is average of first season
+ // Calculate the slopes between first and second season for each period
+ for (int i = 0; i < period; i++) {
+ s += vs[i];
+ b += (vs[i] - vs[i + period]) / 2;
+ }
+ s /= (double) period;
+ b /= (double) period;
+ last_s = s;
+ last_b = b;
+
+ for (int i = 0; i < period; i++) {
+ // Calculate first seasonal
+ seasonal[i] = vs[i] / s;
+ }
+
+ for (int i = period; i < vs.length; i++) {
+ s = alpha * (vs[i] - seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+
+ //seasonal[i] = gamma * (vs[i] / s) + ((1 - gamma) * seasonal[i - period]);
+ seasonal[i] = gamma * (vs[i] - (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
+ last_s = s;
+ last_b = b;
+ }
+
+ int seasonCounter = (windowSize - 1) - period;
+ double expected = s + (0 * b) + seasonal[seasonCounter % windowSize];;
+ double actual = model.next(window);
+ assertThat(Double.compare(expected, actual), equalTo(0));
+ }
+
+ @Test
+ public void testHoltWintersAdditivePredictionModel() {
+ double alpha = randomDouble();
+ double beta = randomDouble();
+ double gamma = randomDouble();
+ int period = randomIntBetween(1,10);
+ MovAvgModel model = new HoltWintersModel(alpha, beta, gamma, period, HoltWintersModel.SeasonalityType.ADDITIVE, false);
+
+ int windowSize = randomIntBetween(period * 2, 50); // HW requires at least two periods of data
+ int numPredictions = randomIntBetween(1, 50);
+
+ EvictingQueue<Double> window = EvictingQueue.create(windowSize);
+ for (int i = 0; i < windowSize; i++) {
+ window.offer(randomDouble());
+ }
+ double actual[] = model.predict(window, numPredictions);
+ double expected[] = new double[numPredictions];
+
+ // Smoothed value
+ double s = 0;
+ double last_s = 0;
+
+ // Trend value
+ double b = 0;
+ double last_b = 0;
+
+ // Seasonal value
+ double[] seasonal = new double[windowSize];
+
+ int counter = 0;
+ double[] vs = new double[windowSize];
+ for (double v : window) {
+ vs[counter] = v;
+ counter += 1;
+ }
+
+
+ // Initial level value is average of first season
+ // Calculate the slopes between first and second season for each period
+ for (int i = 0; i < period; i++) {
+ s += vs[i];
+ b += (vs[i] - vs[i + period]) / 2;
+ }
+ s /= (double) period;
+ b /= (double) period;
+ last_s = s;
+ last_b = b;
+
+ for (int i = 0; i < period; i++) {
+ // Calculate first seasonal
+ seasonal[i] = vs[i] / s;
+ }
+
+ for (int i = period; i < vs.length; i++) {
+ s = alpha * (vs[i] - seasonal[i - period]) + (1.0d - alpha) * (last_s + last_b);
+ b = beta * (s - last_s) + (1 - beta) * last_b;
+
+ //seasonal[i] = gamma * (vs[i] / s) + ((1 - gamma) * seasonal[i - period]);
+ seasonal[i] = gamma * (vs[i] - (last_s + last_b )) + (1 - gamma) * seasonal[i - period];
+ last_s = s;
+ last_b = b;
+ }
+
+ int seasonCounter = (windowSize - 1) - period;
+
+ for (int i = 0; i < numPredictions; i++) {
+
+ expected[i] = s + (i * b) + seasonal[seasonCounter % windowSize];
+ assertThat(Double.compare(expected[i], actual[i]), equalTo(0));
+ seasonCounter += 1;
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/support/MissingValuesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/support/MissingValuesTests.java
new file mode 100644
index 0000000000..cd72d7f069
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/support/MissingValuesTests.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+
+import org.apache.lucene.index.RandomAccessOrds;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.index.fielddata.AbstractRandomAccessOrds;
+import org.elasticsearch.index.fielddata.MultiGeoPointValues;
+import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
+import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+
+public class MissingValuesTests extends ElasticsearchTestCase {
+
+ public void testMissingBytes() {
+ final int numDocs = TestUtil.nextInt(random(), 1, 100);
+ final BytesRef[][] values = new BytesRef[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ values[i] = new BytesRef[random().nextInt(4)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = new BytesRef(RandomStrings.randomAsciiOfLength(random(), 2));
+ }
+ Arrays.sort(values[i]);
+ }
+ SortedBinaryDocValues asBinaryValues = new SortedBinaryDocValues() {
+
+ int i = -1;
+
+ @Override
+ public BytesRef valueAt(int index) {
+ return values[i][index];
+ }
+
+ @Override
+ public void setDocument(int docId) {
+ i = docId;
+ }
+
+ @Override
+ public int count() {
+ return values[i].length;
+ }
+ };
+ final BytesRef missing = new BytesRef(RandomStrings.randomAsciiOfLength(random(), 2));
+ SortedBinaryDocValues withMissingReplaced = MissingValues.replaceMissing(asBinaryValues, missing);
+ for (int i = 0; i < numDocs; ++i) {
+ withMissingReplaced.setDocument(i);
+ if (values[i].length > 0) {
+ assertEquals(values[i].length, withMissingReplaced.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], withMissingReplaced.valueAt(j));
+ }
+ } else {
+ assertEquals(1, withMissingReplaced.count());
+ assertEquals(missing, withMissingReplaced.valueAt(0));
+ }
+ }
+ }
+
+ public void testMissingOrds() {
+ final int numDocs = TestUtil.nextInt(random(), 1, 100);
+ final int numOrds = TestUtil.nextInt(random(), 1, 10);
+
+ final Set<BytesRef> valueSet = new HashSet<>();
+ while (valueSet.size() < numOrds) {
+ valueSet.add(new BytesRef(RandomStrings.randomAsciiOfLength(random(), 5)));
+ }
+ final BytesRef[] values = valueSet.toArray(new BytesRef[numOrds]);
+ Arrays.sort(values);
+
+ final int[][] ords = new int[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ ords[i] = new int[random().nextInt(numOrds)];
+ for (int j = 0; j < ords[i].length; ++j) {
+ ords[i][j] = j;
+ }
+ for (int j = ords[i].length - 1; j >= 0; --j) {
+ final int maxOrd = j == ords[i].length - 1 ? numOrds : ords[i][j+1];
+ ords[i][j] = TestUtil.nextInt(random(), ords[i][j], maxOrd - 1);
+ }
+ }
+ RandomAccessOrds asRandomAccessOrds = new AbstractRandomAccessOrds() {
+
+ int i = -1;
+
+ @Override
+ public void doSetDocument(int docID) {
+ i = docID;
+ }
+
+ @Override
+ public BytesRef lookupOrd(long ord) {
+ return values[(int) ord];
+ }
+
+ @Override
+ public long getValueCount() {
+ return values.length;
+ }
+
+ @Override
+ public long ordAt(int index) {
+ return ords[i][index];
+ }
+
+ @Override
+ public int cardinality() {
+ return ords[i].length;
+ }
+ };
+
+ final BytesRef existingMissing = RandomPicks.randomFrom(random(), values);
+ final BytesRef missingMissing = new BytesRef(RandomStrings.randomAsciiOfLength(random(), 5));
+
+ for (BytesRef missing : Arrays.asList(existingMissing, missingMissing)) {
+ RandomAccessOrds withMissingReplaced = MissingValues.replaceMissing(asRandomAccessOrds, missing);
+ if (valueSet.contains(missing)) {
+ assertEquals(values.length, withMissingReplaced.getValueCount());
+ } else {
+ assertEquals(values.length + 1, withMissingReplaced.getValueCount());
+ }
+ for (int i = 0; i < numDocs; ++i) {
+ withMissingReplaced.setDocument(i);
+ if (ords[i].length > 0) {
+ assertEquals(ords[i].length, withMissingReplaced.cardinality());
+ for (int j = 0; j < ords[i].length; ++j) {
+ assertEquals(values[ords[i][j]], withMissingReplaced.lookupOrd(withMissingReplaced.ordAt(j)));
+ }
+ } else {
+ assertEquals(1, withMissingReplaced.cardinality());
+ assertEquals(missing, withMissingReplaced.lookupOrd(withMissingReplaced.ordAt(0)));
+ }
+ }
+ }
+ }
+
+ public void testMissingLongs() {
+ final int numDocs = TestUtil.nextInt(random(), 1, 100);
+ final int[][] values = new int[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ values[i] = new int[random().nextInt(4)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = randomInt();
+ }
+ Arrays.sort(values[i]);
+ }
+ SortedNumericDocValues asNumericValues = new SortedNumericDocValues() {
+
+ int i = -1;
+
+ @Override
+ public long valueAt(int index) {
+ return values[i][index];
+ }
+
+ @Override
+ public void setDocument(int docId) {
+ i = docId;
+ }
+
+ @Override
+ public int count() {
+ return values[i].length;
+ }
+ };
+ final long missing = randomInt();
+ SortedNumericDocValues withMissingReplaced = MissingValues.replaceMissing(asNumericValues, missing);
+ for (int i = 0; i < numDocs; ++i) {
+ withMissingReplaced.setDocument(i);
+ if (values[i].length > 0) {
+ assertEquals(values[i].length, withMissingReplaced.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], withMissingReplaced.valueAt(j));
+ }
+ } else {
+ assertEquals(1, withMissingReplaced.count());
+ assertEquals(missing, withMissingReplaced.valueAt(0));
+ }
+ }
+ }
+
+ public void testMissingDoubles() {
+ final int numDocs = TestUtil.nextInt(random(), 1, 100);
+ final double[][] values = new double[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ values[i] = new double[random().nextInt(4)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = randomDouble();
+ }
+ Arrays.sort(values[i]);
+ }
+ SortedNumericDoubleValues asNumericValues = new SortedNumericDoubleValues() {
+
+ int i = -1;
+
+ @Override
+ public double valueAt(int index) {
+ return values[i][index];
+ }
+
+ @Override
+ public void setDocument(int docId) {
+ i = docId;
+ }
+
+ @Override
+ public int count() {
+ return values[i].length;
+ }
+ };
+ final long missing = randomInt();
+ SortedNumericDoubleValues withMissingReplaced = MissingValues.replaceMissing(asNumericValues, missing);
+ for (int i = 0; i < numDocs; ++i) {
+ withMissingReplaced.setDocument(i);
+ if (values[i].length > 0) {
+ assertEquals(values[i].length, withMissingReplaced.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], withMissingReplaced.valueAt(j), 0);
+ }
+ } else {
+ assertEquals(1, withMissingReplaced.count());
+ assertEquals(missing, withMissingReplaced.valueAt(0), 0);
+ }
+ }
+ }
+
+ public void testMissingGeoPoints() {
+ final int numDocs = TestUtil.nextInt(random(), 1, 100);
+ final GeoPoint[][] values = new GeoPoint[numDocs][];
+ for (int i = 0; i < numDocs; ++i) {
+ values[i] = new GeoPoint[random().nextInt(4)];
+ for (int j = 0; j < values[i].length; ++j) {
+ values[i][j] = new GeoPoint(randomDouble() * 90, randomDouble() * 180);
+ }
+ }
+ MultiGeoPointValues asGeoValues = new MultiGeoPointValues() {
+
+ int i = -1;
+
+ @Override
+ public GeoPoint valueAt(int index) {
+ return values[i][index];
+ }
+
+ @Override
+ public void setDocument(int docId) {
+ i = docId;
+ }
+
+ @Override
+ public int count() {
+ return values[i].length;
+ }
+ };
+ final GeoPoint missing = new GeoPoint(randomDouble() * 90, randomDouble() * 180);
+ MultiGeoPointValues withMissingReplaced = MissingValues.replaceMissing(asGeoValues, missing);
+ for (int i = 0; i < numDocs; ++i) {
+ withMissingReplaced.setDocument(i);
+ if (values[i].length > 0) {
+ assertEquals(values[i].length, withMissingReplaced.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], withMissingReplaced.valueAt(j));
+ }
+ } else {
+ assertEquals(1, withMissingReplaced.count());
+ assertEquals(missing, withMissingReplaced.valueAt(0));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java
new file mode 100644
index 0000000000..98ebe55e7b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/support/PathTests.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import org.elasticsearch.search.aggregations.AggregationExecutionException;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class PathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testInvalidPaths() throws Exception {
+ assertInvalidPath("[foo]", "brackets at the beginning of the token expression");
+ assertInvalidPath("foo[bar", "open brackets without closing at the token expression");
+ assertInvalidPath("foo[", "open bracket at the end of the token expression");
+ assertInvalidPath("foo[]", "empty brackets in the token expression");
+ assertInvalidPath("foo[bar]baz", "brackets not enclosing at the end of the token expression");
+ assertInvalidPath(".foo", "dot separator at the beginning of the token expression");
+ assertInvalidPath("foo.", "dot separator at the end of the token expression");
+ }
+
+ @Test
+ public void testValidPaths() throws Exception {
+ assertValidPath("foo>bar", tokens().add("foo").add("bar"));
+ assertValidPath("foo.bar", tokens().add("foo", "bar"));
+ assertValidPath("foo[bar]", tokens().add("foo", "bar"));
+ assertValidPath("foo[bar]>baz", tokens().add("foo", "bar").add("baz"));
+ assertValidPath("foo[bar]>baz[qux]", tokens().add("foo", "bar").add("baz", "qux"));
+ assertValidPath("foo[bar]>baz.qux", tokens().add("foo", "bar").add("baz", "qux"));
+ assertValidPath("foo.bar>baz.qux", tokens().add("foo.bar").add("baz", "qux"));
+ assertValidPath("foo.bar>baz[qux]", tokens().add("foo.bar").add("baz", "qux"));
+ }
+
+ private void assertInvalidPath(String path, String reason) {
+ try {
+ AggregationPath.parse(path);
+ fail("Expected parsing path [" + path + "] to fail - " + reason);
+ } catch (AggregationExecutionException aee) {
+ // expected
+ }
+ }
+
+ private void assertValidPath(String path, Tokens tokenz) {
+ AggregationPath.PathElement[] tokens = tokenz.toArray();
+ AggregationPath p = AggregationPath.parse(path);
+ assertThat(p.getPathElements().size(), equalTo(tokens.length));
+ for (int i = 0; i < p.getPathElements().size(); i++) {
+ AggregationPath.PathElement t1 = p.getPathElements().get(i);
+ AggregationPath.PathElement t2 = tokens[i];
+ assertThat(t1, equalTo(t2));
+ }
+ }
+
+ private static Tokens tokens() {
+ return new Tokens();
+ }
+
+ private static class Tokens {
+
+ private List<AggregationPath.PathElement> tokens = new ArrayList<>();
+
+ Tokens add(String name) {
+ tokens.add(new AggregationPath.PathElement(name, name, null));
+ return this;
+ }
+
+ Tokens add(String name, String key) {
+ if (Math.random() > 0.5) {
+ tokens.add(new AggregationPath.PathElement(name + "." + key, name, key));
+ } else {
+ tokens.add(new AggregationPath.PathElement(name + "[" + key + "]", name, key));
+ }
+ return this;
+ }
+
+ AggregationPath.PathElement[] toArray() {
+ return tokens.toArray(new AggregationPath.PathElement[tokens.size()]);
+ }
+
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java
new file mode 100644
index 0000000000..c9e9d4f078
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/aggregations/support/ScriptValuesTests.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.support;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.script.LeafSearchScript;
+import org.elasticsearch.search.aggregations.support.values.ScriptBytesValues;
+import org.elasticsearch.search.aggregations.support.values.ScriptDoubleValues;
+import org.elasticsearch.search.aggregations.support.values.ScriptLongValues;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Map;
+
+public class ScriptValuesTests extends ElasticsearchTestCase {
+
+ private static class FakeSearchScript implements LeafSearchScript {
+
+ private final Object[][] values;
+ int index;
+
+ FakeSearchScript(Object[][] values) {
+ this.values = values;
+ index = -1;
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ }
+
+ @Override
+ public Object run() {
+ // Script values are supposed to support null, single values, arrays and collections
+ final Object[] values = this.values[index];
+ if (values.length <= 1 && randomBoolean()) {
+ return values.length == 0 ? null : values[0];
+ }
+ return randomBoolean() ? values : Arrays.asList(values);
+ }
+
+ @Override
+ public Object unwrap(Object value) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) {
+ }
+
+ @Override
+ public void setDocument(int doc) {
+ index = doc;
+ }
+
+ @Override
+ public void setSource(Map<String, Object> source) {
+ }
+
+ @Override
+ public float runAsFloat() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long runAsLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public double runAsDouble() {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ @Test
+ public void longs() {
+ final Object[][] values = new Long[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ Long[] longs = new Long[randomInt(8)];
+ for (int j = 0; j < longs.length; ++j) {
+ longs[j] = randomLong();
+ }
+ Arrays.sort(longs);
+ values[i] = longs;
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptLongValues scriptValues = new ScriptLongValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ scriptValues.setDocument(i);
+ assertEquals(values[i].length, scriptValues.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], scriptValues.valueAt(j));
+ }
+ }
+ }
+
+ @Test
+ public void doubles() {
+ final Object[][] values = new Double[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ Double[] doubles = new Double[randomInt(8)];
+ for (int j = 0; j < doubles.length; ++j) {
+ doubles[j] = randomDouble();
+ }
+ Arrays.sort(doubles);
+ values[i] = doubles;
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptDoubleValues scriptValues = new ScriptDoubleValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ scriptValues.setDocument(i);
+ assertEquals(values[i].length, scriptValues.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(values[i][j], scriptValues.valueAt(j));
+ }
+ }
+ }
+
+ @Test
+ public void bytes() {
+ final String[][] values = new String[randomInt(10)][];
+ for (int i = 0; i < values.length; ++i) {
+ String[] strings = new String[randomInt(8)];
+ for (int j = 0; j < strings.length; ++j) {
+ strings[j] = RandomStrings.randomAsciiOfLength(getRandom(), 5);
+ }
+ Arrays.sort(strings);
+ values[i] = strings;
+ }
+ FakeSearchScript script = new FakeSearchScript(values);
+ ScriptBytesValues scriptValues = new ScriptBytesValues(script);
+ for (int i = 0; i < values.length; ++i) {
+ scriptValues.setDocument(i);
+ assertEquals(values[i].length, scriptValues.count());
+ for (int j = 0; j < values[i].length; ++j) {
+ assertEquals(new BytesRef(values[i][j]), scriptValues.valueAt(j));
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java
new file mode 100644
index 0000000000..46d1e0440c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+
+/**
+ * This test basically verifies that search with a single shard active (cause we indexed to it) and other
+ * shards possibly not active at all (cause they haven't allocated) will still work.
+ */
+public class SearchWhileCreatingIndexTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testIndexCausesIndexCreation() throws Exception {
+ searchWhileCreatingIndex(false, 1); // 1 replica in our default...
+ }
+
+ @Test
+ @Slow
+ public void testNoReplicas() throws Exception {
+ searchWhileCreatingIndex(true, 0);
+ }
+
+ @Test
+ @Slow
+ public void testOneReplica() throws Exception {
+ searchWhileCreatingIndex(true, 1);
+ }
+
+ @Test
+ @Slow
+ public void testTwoReplicas() throws Exception {
+ searchWhileCreatingIndex(true, 2);
+ }
+
+ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) throws Exception {
+
+ // make sure we have enough nodes to guaranty default QUORUM consistency.
+ // TODO: add a smarter choice based on actual consistency (when that is randomized)
+ int shardsNo = numberOfReplicas + 1;
+ int neededNodes = shardsNo <= 2 ? 1 : shardsNo / 2 + 1;
+ internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(neededNodes, shardsNo));
+ for (int i = 0; i < 20; i++) {
+ logger.info("running iteration {}", i);
+ if (createIndex) {
+ createIndex("test");
+ }
+ client().prepareIndex("test", "type1", randomAsciiOfLength(5)).setSource("field", "test").execute().actionGet();
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().actionGet();
+ assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); // at least one shard should be successful when refreshing
+
+ // we want to make sure that while recovery happens, and a replica gets recovered, its properly refreshed
+ ClusterHealthStatus status = ClusterHealthStatus.RED;
+ while (status != ClusterHealthStatus.GREEN) {
+ // first, verify that search on the primary search works
+ SearchResponse searchResponse = client().prepareSearch("test").setPreference("_primary").setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ assertHitCount(searchResponse, 1);
+ // now, let it go to primary or replica, though in a randomized re-creatable manner
+ String preference = randomAsciiOfLength(5);
+ Client client = client();
+ searchResponse = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ if (searchResponse.getHits().getTotalHits() != 1) {
+ refresh();
+ SearchResponse searchResponseAfterRefresh = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ logger.info("hits count mismatch on any shard search failed, post explicit refresh hits are {}", searchResponseAfterRefresh.getHits().getTotalHits());
+ ensureGreen();
+ SearchResponse searchResponseAfterGreen = client.prepareSearch("test").setPreference(preference).setQuery(QueryBuilders.termQuery("field", "test")).execute().actionGet();
+ logger.info("hits count mismatch on any shard search failed, post explicit wait for green hits are {}", searchResponseAfterGreen.getHits().getTotalHits());
+ assertHitCount(searchResponse, 1);
+ }
+ assertHitCount(searchResponse, 1);
+ status = client().admin().cluster().prepareHealth("test").get().getStatus();
+ internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + 1);
+ }
+ cluster().wipeIndices("test");
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java
new file mode 100644
index 0000000000..589c9bb230
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class SearchWhileRelocatingTests extends ElasticsearchIntegrationTest {
+
+// @LuceneTestCase.AwaitsFix(bugUrl = "problem with search searching on 1 shard (no replica), " +
+// "and between getting the cluster state to do the search, and executing it, " +
+// "the shard has fully relocated (moved from started on one node, to fully started on another node")
+// ^^ the current impl of the test handles this case gracefully since it can happen with 1 replica as well
+// we just make sure if we get a partial result without a failure that the postsearch is ok!
+ @Test
+ @Nightly
+ public void testSearchAndRelocateConcurrently0Replicas() throws Exception {
+ testSearchAndRelocateConcurrently(0);
+ }
+
+ @Test
+ @Nightly
+ public void testSearchAndRelocateConcurrently1Replicas() throws Exception {
+ testSearchAndRelocateConcurrently(1);
+ }
+
+ @Test
+ public void testSearchAndRelocateConcurrentlyRanodmReplicas() throws Exception {
+ testSearchAndRelocateConcurrently(randomIntBetween(0, 1));
+ }
+
+ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throws Exception {
+ final int numShards = between(1, 20);
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settingsBuilder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas))
+ .addMapping("type1", "loc", "type=geo_point", "test", "type=string").execute().actionGet();
+ ensureGreen();
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ final int numDocs = between(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ indexBuilders.add(client().prepareIndex("test", "type", Integer.toString(i))
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21)
+ .endObject().endObject()));
+ }
+ indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]));
+ assertHitCount(client().prepareSearch().get(), (long) (numDocs));
+ final int numIters = scaledRandomIntBetween(5, 20);
+ for (int i = 0; i < numIters; i++) {
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final List<Throwable> thrownExceptions = new CopyOnWriteArrayList<>();
+ final List<Throwable> nonCriticalExceptions = new CopyOnWriteArrayList<>();
+
+ Thread[] threads = new Thread[scaledRandomIntBetween(1, 3)];
+ for (int j = 0; j < threads.length; j++) {
+ threads[j] = new Thread() {
+ @Override
+ public void run() {
+ boolean criticalException = true;
+ try {
+ while (!stop.get()) {
+ SearchResponse sr = client().prepareSearch().setSize(numDocs).get();
+ // if we did not search all shards but had no failures that is potentially fine
+ // if only the hit-count is wrong. this can happen if the cluster-state is behind when the
+ // request comes in. It's a small window but a known limitation.
+ //
+ criticalException = sr.getTotalShards() == sr.getSuccessfulShards() || sr.getFailedShards() > 0;
+ assertHitCount(sr, (long) (numDocs));
+ criticalException = true;
+ final SearchHits sh = sr.getHits();
+ assertThat("Expected hits to be the same size the actual hits array", sh.getTotalHits(),
+ equalTo((long) (sh.getHits().length)));
+ // this is the more critical but that we hit the actual hit array has a different size than the
+ // actual number of hits.
+ }
+ } catch (SearchPhaseExecutionException ex) {
+ // it's possible that all shards fail if we have a small number of shards.
+ // with replicas this should not happen
+ if (numberOfReplicas == 1 || !ex.getMessage().contains("all shards failed")) {
+ thrownExceptions.add(ex);
+ }
+ } catch (Throwable t) {
+ if (!criticalException) {
+ nonCriticalExceptions.add(t);
+ } else {
+ thrownExceptions.add(t);
+ }
+ }
+ }
+ };
+ }
+ for (int j = 0; j < threads.length; j++) {
+ threads[j].start();
+ }
+ allowNodes("test", between(1, 3));
+ client().admin().cluster().prepareReroute().get();
+ stop.set(true);
+ for (int j = 0; j < threads.length; j++) {
+ threads[j].join();
+ }
+ // this might time out on some machines if they are really busy and you hit lots of throttling
+ ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForRelocatingShards(0).setWaitForEvents(Priority.LANGUID).setTimeout("5m").get();
+ assertNoTimeout(resp);
+ if (!thrownExceptions.isEmpty() || !nonCriticalExceptions.isEmpty()) {
+ Client client = client();
+ boolean postSearchOK = true;
+ String verified = "POST SEARCH OK";
+ for (int j = 0; j < 10; j++) {
+ if (client.prepareSearch().get().getHits().getTotalHits() != numDocs) {
+ verified = "POST SEARCH FAIL";
+ postSearchOK = false;
+ break;
+ }
+ }
+ assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, thrownExceptions, Matchers.emptyIterable());
+ // if we hit only non-critical exceptions we only make sure that the post search works
+ logger.info("Non-CriticalExceptions: " + nonCriticalExceptions.toString());
+ assertThat("numberOfReplicas: " + numberOfReplicas + " failed in iteration " + i + ", verification: " + verified, postSearchOK, is(true));
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java
new file mode 100644
index 0000000000..e720f449f8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java
@@ -0,0 +1,375 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.util.English;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.engine.MockEngineSupport;
+import org.elasticsearch.test.engine.ThrowingLeafReaderWrapper;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.store.MockFSDirectoryService;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow // maybe due to all the logging?
+ @TestLogging("action.search.type:TRACE,index.shard:TRACE")
+ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+ final double exceptionRate;
+ final double exceptionOnOpenRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ exceptionOnOpenRate = 1.0 / between(5, 100);
+ exceptionRate = 0.0d;
+ } else {
+ exceptionRate = 1.0 / between(5, 100);
+ exceptionOnOpenRate = 0.0d;
+ }
+ } else {
+ exceptionOnOpenRate = 1.0 / between(5, 100);
+ exceptionRate = 1.0 / between(5, 100);
+ }
+ } else {
+ // rarely no exception
+ exceptionRate = 0d;
+ exceptionOnOpenRate = 0d;
+ }
+ final boolean createIndexWithoutErrors = randomBoolean();
+ int numInitialDocs = 0;
+
+ if (createIndexWithoutErrors) {
+ Builder settings = settingsBuilder()
+ .put("index.number_of_replicas", numberOfReplicas());
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ numInitialDocs = between(10, 100);
+ ensureGreen();
+ for (int i = 0; i < numInitialDocs; i++) {
+ client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get();
+ }
+ client().admin().indices().prepareRefresh("test").execute().get();
+ client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().get();
+ client().admin().indices().prepareClose("test").execute().get();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder()
+ .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate)
+ .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate));
+ client().admin().indices().prepareOpen("test").execute().get();
+ } else {
+ Builder settings = settingsBuilder()
+ .put("index.number_of_replicas", randomIntBetween(0, 1))
+ .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false)
+ .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate)
+ .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ client().admin().indices().prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping).execute().actionGet();
+ }
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get(); // it's OK to timeout here
+ final int numDocs;
+ final boolean expectAllShardsFailed;
+ if (clusterHealthResponse.isTimedOut()) {
+ /* some seeds just won't let you create the index at all and we enter a ping-pong mode
+ * trying one node after another etc. that is ok but we need to make sure we don't wait
+ * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
+ * when we search below.*/
+ logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
+ numDocs = 1;
+ expectAllShardsFailed = true;
+ } else {
+ numDocs = between(10, 100);
+ expectAllShardsFailed = false;
+ }
+ int numCreated = 0;
+ boolean[] added = new boolean[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ added[i] = false;
+ try {
+ IndexResponse indexResponse = client().prepareIndex("test", "type", Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
+ if (indexResponse.isCreated()) {
+ numCreated++;
+ added[i] = true;
+ }
+ } catch (ElasticsearchException ex) {
+ }
+
+ }
+ NumShards numShards = getNumShards("test");
+ logger.info("Start Refresh");
+ final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+ final int numSearches = scaledRandomIntBetween(10, 20);
+ // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ int docToQuery = between(0, numDocs - 1);
+ int expectedResults = added[docToQuery] ? 1 : 0;
+ logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
+ SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery)))
+ .setSize(expectedResults).get();
+ logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
+ if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
+ assertResultsAndLogOnFailure(expectedResults, searchResponse);
+ }
+ // check match all
+ searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery())
+ .setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get();
+ logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
+ if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
+ assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse);
+ }
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("SearchPhaseException: [{}]", ex.getMessage());
+ // if a scheduled refresh or flush fails all shards we see all shards failed here
+ if (!(expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed"))) {
+ throw ex;
+ }
+ }
+ }
+
+ if (createIndexWithoutErrors) {
+ // check the index still contains the records that we indexed without errors
+ client().admin().indices().prepareClose("test").execute().get();
+ client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder()
+ .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, 0)
+ .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0));
+ client().admin().indices().prepareOpen("test").execute().get();
+ ensureGreen();
+ SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, numInitialDocs);
+ }
+ }
+
+ private void assertResultsAndLogOnFailure(long expectedResults, SearchResponse searchResponse) {
+ if (searchResponse.getHits().getTotalHits() != expectedResults) {
+ StringBuilder sb = new StringBuilder("search result contains [");
+ sb.append(searchResponse.getHits().getTotalHits()).append("] results. expected [").append(expectedResults).append("]");
+ String failMsg = sb.toString();
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
+ .append("] id [").append(hit.id()).append("]");
+ }
+ logger.warn(sb.toString());
+ fail(failMsg);
+ }
+ }
+
+ @Test
+ public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException {
+ String mapping = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type").
+ startObject("properties").
+ startObject("test")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject().
+ endObject().
+ endObject()
+ .endObject().string();
+ final double lowLevelRate;
+ final double topLevelRate;
+ if (frequently()) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ lowLevelRate = 1.0 / between(2, 10);
+ topLevelRate = 0.0d;
+ } else {
+ topLevelRate = 1.0 / between(2, 10);
+ lowLevelRate = 0.0d;
+ }
+ } else {
+ lowLevelRate = 1.0 / between(2, 10);
+ topLevelRate = 1.0 / between(2, 10);
+ }
+ } else {
+ // rarely no exception
+ topLevelRate = 0d;
+ lowLevelRate = 0d;
+ }
+
+ Builder settings = settingsBuilder()
+ .put(indexSettings())
+ .put(MockEngineSupport.READER_WRAPPER_TYPE, RandomExceptionDirectoryReaderWrapper.class.getName())
+ .put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
+ .put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
+ .put(MockEngineSupport.WRAP_READER_RATIO, 1.0d);
+ logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
+ assertAcked(prepareCreate("test")
+ .setSettings(settings)
+ .addMapping("type", mapping));
+ ensureSearchable();
+ final int numDocs = between(10, 100);
+ int numCreated = 0;
+ boolean[] added = new boolean[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ try {
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
+ if (indexResponse.isCreated()) {
+ numCreated++;
+ added[i] = true;
+ }
+ } catch (ElasticsearchException ex) {
+ }
+ }
+ logger.info("Start Refresh");
+ RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get(); // don't assert on failures here
+ final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
+ logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
+
+ NumShards test = getNumShards("test");
+ final int numSearches = scaledRandomIntBetween(100, 200);
+ // we don't check anything here really just making sure we don't leave any open files or a broken index behind.
+ for (int i = 0; i < numSearches; i++) {
+ try {
+ int docToQuery = between(0, numDocs - 1);
+ int expectedResults = added[docToQuery] ? 1 : 0;
+ logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery)))
+ .setSize(expectedResults).get();
+ logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries);
+ if (searchResponse.getSuccessfulShards() == test.numPrimaries && !refreshFailed) {
+ assertResultsAndLogOnFailure(expectedResults, searchResponse);
+ }
+ // check match all
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated).addSort("_id", SortOrder.ASC).get();
+ logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries);
+ if (searchResponse.getSuccessfulShards() == test.numPrimaries && !refreshFailed) {
+ assertResultsAndLogOnFailure(numCreated, searchResponse);
+ }
+
+ } catch (SearchPhaseExecutionException ex) {
+ logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
+ }
+ }
+ }
+
+
+ public static final String EXCEPTION_TOP_LEVEL_RATIO_KEY = "index.engine.exception.ratio.top";
+ public static final String EXCEPTION_LOW_LEVEL_RATIO_KEY = "index.engine.exception.ratio.low";
+
+
+ public static class RandomExceptionDirectoryReaderWrapper extends MockEngineSupport.DirectoryReaderWrapper {
+ private final Settings settings;
+
+ static class ThrowingSubReaderWrapper extends FilterDirectoryReader.SubReaderWrapper implements ThrowingLeafReaderWrapper.Thrower {
+ private final Random random;
+ private final double topLevelRatio;
+ private final double lowLevelRatio;
+
+ ThrowingSubReaderWrapper(Settings settings) {
+ final long seed = settings.getAsLong(SETTING_INDEX_SEED, 0l);
+ this.topLevelRatio = settings.getAsDouble(EXCEPTION_TOP_LEVEL_RATIO_KEY, 0.1d);
+ this.lowLevelRatio = settings.getAsDouble(EXCEPTION_LOW_LEVEL_RATIO_KEY, 0.1d);
+ this.random = new Random(seed);
+ }
+
+ @Override
+ public LeafReader wrap(LeafReader reader) {
+ return new ThrowingLeafReaderWrapper(reader, this);
+ }
+
+ @Override
+ public void maybeThrow(ThrowingLeafReaderWrapper.Flags flag) throws IOException {
+ switch (flag) {
+ case Fields:
+ case TermVectors:
+ case Terms:
+ case TermsEnum:
+ case Intersect:
+ case Norms:
+ case NumericDocValues:
+ case BinaryDocValues:
+ case SortedDocValues:
+ case SortedSetDocValues:
+ if (random.nextDouble() < topLevelRatio) {
+ throw new IOException("Forced top level Exception on [" + flag.name() + "]");
+ }
+ break;
+ case DocsEnum:
+ case DocsAndPositionsEnum:
+ if (random.nextDouble() < lowLevelRatio) {
+ throw new IOException("Forced low level Exception on [" + flag.name() + "]");
+ }
+ break;
+ }
+ }
+
+ @Override
+ public boolean wrapTerms(String field) {
+ return true;
+ }
+ }
+
+ public RandomExceptionDirectoryReaderWrapper(DirectoryReader in, Settings settings) throws IOException {
+ super(in, new ThrowingSubReaderWrapper(settings));
+ this.settings = settings;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
+ return new RandomExceptionDirectoryReaderWrapper(in, settings);
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java
new file mode 100644
index 0000000000..f75301f8ba
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/basic/TransportSearchFailuresTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.WriteConsistencyLevel;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+public class TransportSearchFailuresTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ @Test
+ public void testFailedSearchWithWrongQuery() throws Exception {
+ logger.info("Start Testing failed search with wrong query");
+ assertAcked(prepareCreate("test", 1, settingsBuilder().put("routing.hash.type", "simple")));
+ ensureYellow();
+
+ NumShards test = getNumShards("test");
+
+ for (int i = 0; i < 100; i++) {
+ index(client(), Integer.toString(i), "test", i);
+ }
+ RefreshResponse refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(test.totalNumShards));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(test.numPrimaries));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+ for (int i = 0; i < 5; i++) {
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ }
+
+ allowNodes("test", 2);
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes(">=2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("Running Cluster Health");
+ ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest("test")
+ .waitForYellowStatus().waitForRelocatingShards(0).waitForActiveShards(test.totalNumShards)).actionGet();
+ logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
+ assertThat(clusterHealth.isTimedOut(), equalTo(false));
+ assertThat(clusterHealth.getStatus(), anyOf(equalTo(ClusterHealthStatus.YELLOW), equalTo(ClusterHealthStatus.GREEN)));
+ assertThat(clusterHealth.getActiveShards(), equalTo(test.totalNumShards));
+
+ refreshResponse = client().admin().indices().refresh(refreshRequest("test")).actionGet();
+ assertThat(refreshResponse.getTotalShards(), equalTo(test.totalNumShards));
+ assertThat(refreshResponse.getSuccessfulShards(), equalTo(test.totalNumShards));
+ assertThat(refreshResponse.getFailedShards(), equalTo(0));
+
+ for (int i = 0; i < 5; i++) {
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ }
+
+ logger.info("Done Testing failed search");
+ }
+
+ private void index(Client client, String id, String nameValue, int age) throws IOException {
+ client.index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age)).consistencyLevel(WriteConsistencyLevel.ONE)).actionGet();
+ }
+
+ private XContentBuilder source(String id, String nameValue, int age) throws IOException {
+ StringBuilder multi = new StringBuilder().append(nameValue);
+ for (int i = 0; i < age; i++) {
+ multi.append(" ").append(nameValue);
+ }
+ return jsonBuilder().startObject()
+ .field("id", id)
+ .field("name", nameValue + id)
+ .field("age", age)
+ .field("multi", multi.toString())
+ .endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java b/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java
new file mode 100644
index 0000000000..2f9533671e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/basic/TransportTwoNodesSearchTests.java
@@ -0,0 +1,464 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.basic;
+
+
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.script.ScriptScoreFunctionBuilder;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Set;
+
+import static org.elasticsearch.action.search.SearchType.*;
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+public class TransportTwoNodesSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ private Set<String> prepareData() throws Exception {
+ return prepareData(-1);
+ }
+
+ private Set<String> prepareData(int numShards) throws Exception {
+ Set<String> fullExpectedIds = Sets.newTreeSet();
+
+ Settings.Builder settingsBuilder = settingsBuilder()
+ .put(indexSettings())
+ .put("routing.hash.type", "simple");
+
+ if (numShards > 0) {
+ settingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numShards);
+ }
+
+ client().admin().indices().create(createIndexRequest("test")
+ .settings(settingsBuilder))
+ .actionGet();
+
+ ensureGreen();
+ for (int i = 0; i < 100; i++) {
+ index(Integer.toString(i), "test", i);
+ fullExpectedIds.add(Integer.toString(i));
+ }
+ refresh();
+ return fullExpectedIds;
+ }
+
+ private void index(String id, String nameValue, int age) throws IOException {
+ client().index(Requests.indexRequest("test").type("type1").id(id).source(source(id, nameValue, age))).actionGet();
+ }
+
+ private XContentBuilder source(String id, String nameValue, int age) throws IOException {
+ StringBuilder multi = new StringBuilder().append(nameValue);
+ for (int i = 0; i < age; i++) {
+ multi.append(" ").append(nameValue);
+ }
+ return jsonBuilder().startObject()
+ .field("id", id)
+ .field("nid", Integer.parseInt(id))
+ .field("name", nameValue + id)
+ .field("age", age)
+ .field("multi", multi.toString())
+ .endObject();
+ }
+
+ @Test
+ public void testDfsQueryThenFetch() throws Exception {
+ Settings.Builder settingsBuilder = settingsBuilder()
+ .put(indexSettings())
+ .put("routing.hash.type", "simple");
+ client().admin().indices().create(createIndexRequest("test")
+ .settings(settingsBuilder))
+ .actionGet();
+ ensureGreen();
+
+ // we need to have age (ie number of repeats of "test" term) high enough
+ // to produce the same 8-bit norm for all docs here, so that
+ // the tf is basically the entire score (assuming idf is fixed, which
+ // it should be if dfs is working correctly)
+ for (int i = 1024; i < 1124; i++) {
+ index(Integer.toString(i - 1024), "test", i);
+ }
+ refresh();
+
+ int total = 0;
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(DFS_QUERY_THEN_FETCH).setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).setScroll(TimeValue.timeValueSeconds(30)).get();
+ while (true) {
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ SearchHit[] hits = searchResponse.getHits().hits();
+ if (hits.length == 0) {
+ break; // finished
+ }
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHit hit = hits[i];
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "] -> " + hit.explanation().toString(), hit.id(), equalTo(Integer.toString(100 - total - i - 1)));
+ }
+ total += hits.length;
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
+ }
+ clearScroll(searchResponse.getScrollId());
+ assertEquals(100, total);
+ }
+
+ @Test
+ public void testDfsQueryThenFetchWithSort() throws Exception {
+ prepareData();
+
+ int total = 0;
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(DFS_QUERY_THEN_FETCH).setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).addSort("age", SortOrder.ASC).setScroll(TimeValue.timeValueSeconds(30)).get();
+ while (true) {
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ SearchHit[] hits = searchResponse.getHits().hits();
+ if (hits.length == 0) {
+ break; // finished
+ }
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHit hit = hits[i];
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(total + i)));
+ }
+ total += hits.length;
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
+ }
+ clearScroll(searchResponse.getScrollId());
+ assertEquals(100, total);
+ }
+
+ @Test
+ public void testQueryThenFetch() throws Exception {
+ prepareData();
+
+ int total = 0;
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(QUERY_THEN_FETCH).setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).addSort("nid", SortOrder.DESC).setScroll(TimeValue.timeValueSeconds(30)).get();
+ while (true) {
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ SearchHit[] hits = searchResponse.getHits().hits();
+ if (hits.length == 0) {
+ break; // finished
+ }
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHit hit = hits[i];
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - total - i - 1)));
+ }
+ total += hits.length;
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
+ }
+ clearScroll(searchResponse.getScrollId());
+ assertEquals(100, total);
+ }
+
+ @Test
+ public void testQueryThenFetchWithFrom() throws Exception {
+ Set<String> fullExpectedIds = prepareData();
+
+ SearchSourceBuilder source = searchSource()
+ .query(matchAllQuery())
+ .explain(true);
+
+ Set<String> collectedIds = Sets.newTreeSet();
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source.from(0).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60));
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ collectedIds.add(hit.id());
+ }
+ searchResponse = client().search(searchRequest("test").source(source.from(60).size(60)).searchType(QUERY_THEN_FETCH)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(40));
+ for (int i = 0; i < 40; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ collectedIds.add(hit.id());
+ }
+ assertThat(collectedIds, equalTo(fullExpectedIds));
+ }
+
+ @Test
+ public void testQueryThenFetchWithSort() throws Exception {
+ prepareData();
+
+ int total = 0;
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(termQuery("multi", "test")).setSize(60).setExplain(true).addSort("age", SortOrder.ASC).setScroll(TimeValue.timeValueSeconds(30)).get();
+ while (true) {
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ SearchHit[] hits = searchResponse.getHits().hits();
+ if (hits.length == 0) {
+ break; // finished
+ }
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHit hit = hits[i];
+ assertThat(hit.explanation(), notNullValue());
+ assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(total + i)));
+ }
+ total += hits.length;
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueSeconds(30)).get();
+ }
+ clearScroll(searchResponse.getScrollId());
+ assertEquals(100, total);
+ }
+
+ @Test
+ public void testQueryAndFetch() throws Exception {
+ prepareData(3);
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true);
+
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < 100; i++) {
+ expectedIds.add(Integer.toString(i));
+ }
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+ // we can't really check here, since its query and fetch, and not controlling distribution
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+
+ do {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll("10m").get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, lessThanOrEqualTo(40));
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ // we don't do perfect sorting when it comes to scroll with Query+Fetch
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+ } while (searchResponse.getHits().getHits().length > 0);
+ clearScroll(searchResponse.getScrollId());
+ assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
+ }
+
+ @Test
+ public void testDfsQueryAndFetch() throws Exception {
+ prepareData(3);
+
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true);
+
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < 100; i++) {
+ expectedIds.add(Integer.toString(i));
+ }
+
+
+ //SearchResponse searchResponse = client().search(searchRequest("test").source(source).searchType(DFS_QUERY_AND_FETCH).scroll(new Scroll(timeValueMinutes(10)))).actionGet();
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(DFS_QUERY_AND_FETCH).setScroll("10m").setSource(source.buildAsBytes()).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(60)); // 20 per shard
+ for (int i = 0; i < 60; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+// System.out.println(hit.shard() + ": " + hit.explanation());
+ assertThat(hit.explanation(), notNullValue());
+// assertThat("id[" + hit.id() + "]", hit.id(), equalTo(Integer.toString(100 - i - 1)));
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+
+ do {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll("10m").get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, lessThanOrEqualTo(40));
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ // we don't do perfect sorting when it comes to scroll with Query+Fetch
+ assertThat("make sure we don't have duplicates", expectedIds.remove(hit.id()), notNullValue());
+ }
+ } while (searchResponse.getHits().hits().length > 0);
+ clearScroll(searchResponse.getScrollId());
+ assertThat("make sure we got all [" + expectedIds + "]", expectedIds.size(), equalTo(0));
+ }
+
+ @Test
+ public void testSimpleFacets() throws Exception {
+ prepareData();
+
+ SearchSourceBuilder sourceBuilder = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(0).size(20).explain(true)
+ .aggregation(AggregationBuilders.global("global").subAggregation(
+ AggregationBuilders.filter("all").filter(termQuery("multi", "test"))))
+ .aggregation(AggregationBuilders.filter("test1").filter(termQuery("name", "test1")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(sourceBuilder)).actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(100l));
+
+ Global global = searchResponse.getAggregations().get("global");
+ Filter all = global.getAggregations().get("all");
+ Filter test1 = searchResponse.getAggregations().get("test1");
+ assertThat(test1.getDocCount(), equalTo(1l));
+ assertThat(all.getDocCount(), equalTo(100l));
+ }
+
+ @Test
+ public void testFailedSearchWithWrongQuery() throws Exception {
+ prepareData();
+
+ NumShards test = getNumShards("test");
+
+ logger.info("Start Testing failed search with wrong query");
+ try {
+ SearchResponse searchResponse = client().search(searchRequest("test").source("{ xxx }".getBytes(Charsets.UTF_8))).actionGet();
+ assertThat(searchResponse.getTotalShards(), equalTo(test.numPrimaries));
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(0));
+ assertThat(searchResponse.getFailedShards(), equalTo(test.numPrimaries));
+ fail("search should fail");
+ } catch (ElasticsearchException e) {
+ assertThat(e.unwrapCause(), instanceOf(SearchPhaseExecutionException.class));
+ // all is well
+ }
+ logger.info("Done Testing failed search");
+ }
+
+ @Test
+ public void testFailedSearchWithWrongFrom() throws Exception {
+ prepareData();
+
+ NumShards test = getNumShards("test");
+
+ logger.info("Start Testing failed search with wrong from");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("multi", "test"))
+ .from(1000).size(20).explain(true);
+ SearchResponse response = client().search(searchRequest("test").searchType(DFS_QUERY_AND_FETCH).source(source)).actionGet();
+ assertThat(response.getHits().hits().length, equalTo(0));
+ assertThat(response.getTotalShards(), equalTo(test.numPrimaries));
+ assertThat(response.getSuccessfulShards(), equalTo(test.numPrimaries));
+ assertThat(response.getFailedShards(), equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(QUERY_THEN_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(DFS_QUERY_AND_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ response = client().search(searchRequest("test").searchType(DFS_QUERY_THEN_FETCH).source(source)).actionGet();
+ assertNoFailures(response);
+ assertThat(response.getHits().hits().length, equalTo(0));
+
+ logger.info("Done Testing failed search");
+ }
+
+ @Test
+ public void testFailedMultiSearchWithWrongQuery() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed multi search with a wrong query");
+
+ MultiSearchResponse response = client().prepareMultiSearch()
+ // Add function score with a bogus score mode
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1)).scoreMode("foobar")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
+
+ assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().getHits().hits().length, equalTo(1));
+
+ assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().getHits().hits().length, equalTo(10));
+
+ logger.info("Done Testing failed search");
+ }
+
+
+ @Test
+ public void testFailedMultiSearchWithWrongQuery_withFunctionScore() throws Exception {
+ prepareData();
+
+ logger.info("Start Testing failed multi search with a wrong query");
+
+ MultiSearchResponse response = client().prepareMultiSearch()
+ // Add custom score query with missing script
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("nid", 1)).add(new ScriptScoreFunctionBuilder())))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("nid", 2)))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+ assertThat(response.getResponses().length, equalTo(3));
+ assertThat(response.getResponses()[0].getFailureMessage(), notNullValue());
+
+ assertThat(response.getResponses()[1].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[1].getResponse().getHits().hits().length, equalTo(1));
+
+ assertThat(response.getResponses()[2].getFailureMessage(), nullValue());
+ assertThat(response.getResponses()[2].getResponse().getHits().hits().length, equalTo(10));
+
+ logger.info("Done Testing failed search");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTest.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTest.java
new file mode 100644
index 0000000000..42ecd47ca0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.builder;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class SearchSourceBuilderTest extends ElasticsearchTestCase {
+
+ SearchSourceBuilder builder = new SearchSourceBuilder();
+
+ @Test // issue #6632
+ public void testThatSearchSourceBuilderIncludesExcludesAreAppliedCorrectly() throws Exception {
+ builder.fetchSource("foo", null);
+ assertIncludes(builder, "foo");
+ assertExcludes(builder);
+
+ builder.fetchSource(null, "foo");
+ assertIncludes(builder);
+ assertExcludes(builder, "foo");
+
+ builder.fetchSource(null, new String[]{"foo"});
+ assertIncludes(builder);
+ assertExcludes(builder, "foo");
+
+ builder.fetchSource(new String[]{"foo"}, null);
+ assertIncludes(builder, "foo");
+ assertExcludes(builder);
+
+ builder.fetchSource("foo", "bar");
+ assertIncludes(builder, "foo");
+ assertExcludes(builder, "bar");
+
+ builder.fetchSource(new String[]{"foo"}, new String[]{"bar", "baz"});
+ assertIncludes(builder, "foo");
+ assertExcludes(builder, "bar", "baz");
+ }
+
+ private void assertIncludes(SearchSourceBuilder builder, String... elems) throws IOException {
+ assertFieldValues(builder, "includes", elems);
+ }
+
+ private void assertExcludes(SearchSourceBuilder builder, String... elems) throws IOException {
+ assertFieldValues(builder, "excludes", elems);
+ }
+
+ private void assertFieldValues(SearchSourceBuilder builder, String fieldName, String... elems) throws IOException {
+ Map<String, Object> map = getSourceMap(builder);
+
+ assertThat(map, hasKey(fieldName));
+ assertThat(map.get(fieldName), is(instanceOf(List.class)));
+ List<String> castedList = (List<String>) map.get(fieldName);
+ assertThat(castedList, hasSize(elems.length));
+ assertThat(castedList, hasItems(elems));
+ }
+
+ private Map<String, Object> getSourceMap(SearchSourceBuilder builder) throws IOException {
+ Map<String, Object> data = JsonXContent.jsonXContent.createParser(builder.toString()).mapAndClose();
+ assertThat(data, hasKey("_source"));
+ return (Map<String, Object>) data.get("_source");
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java
new file mode 100644
index 0000000000..f7b0489977
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchBwcTests.java
@@ -0,0 +1,225 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.child;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.startsWith;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class ChildQuerySearchBwcTests extends ChildQuerySearchTests {
+
+ @Override
+ public Settings indexSettings() {
+ return settings(Version.V_1_6_0).put(super.indexSettings()).build();
+ }
+
+ public void testSelfReferentialIsForbidden() {
+ // we allowed this, but it was actually broken. The has_child/has_parent results were sometimes wrong...
+ assertAcked(prepareCreate("test").addMapping("type", "_parent", "type=type"));
+ }
+
+ @Test
+ public void testAdd_ParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder()
+ .put(indexSettings())
+ .put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ refresh();
+ assertAcked(client().admin()
+ .indices()
+ .preparePutMapping("test")
+ .setType("child")
+ .setSource("_parent", "type=parent"));
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ client().admin().indices().prepareRefresh().get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+
+ searchResponse = client().prepareSearch("test")
+ .setPostFilter(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, parentId);
+
+ searchResponse = client().prepareSearch("test")
+ .setPostFilter(hasParentQuery("parent", termQuery("p_field", "1")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "c1");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "c1");
+ }
+
+ @Test
+ public void testExplainUsage() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("not implemented yet..."));
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "parent", parentId)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ // TODO: improve test once explanations are actually implemented
+ assertThat(explainResponse.getExplanation().toString(), startsWith("1.0 ="));
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270")
+ public void testParentFieldDataCacheBug() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder().put(indexSettings())
+ .put("index.refresh_interval", -1)) // Disable automatic refresh, so that the _parent doesn't get warmed
+ .addMapping("parent", jsonBuilder().startObject().startObject("parent")
+ .startObject("properties")
+ .startObject("p_field")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY)
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p_value0").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+
+ refresh();
+ // No _parent field yet, there shouldn't be anything in the field data for _parent field
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ // Now add mapping + children
+ client().admin().indices().preparePutMapping("test").setType("child")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent")
+ .field("type", "parent")
+ .endObject()
+ .startObject("properties")
+ .startObject("c_field")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY)
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ // index simple data
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l));
+
+ ClearIndicesCacheResponse clearCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).get();
+ assertNoFailures(clearCacheResponse);
+ assertAllSuccessful(clearCacheResponse);
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), equalTo(0l));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java
new file mode 100644
index 0000000000..78b2d93535
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java
@@ -0,0 +1,2056 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.child;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.cache.filter.FilterCacheModule;
+import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
+import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
+import org.elasticsearch.index.mapper.MergeMappingException;
+import org.elasticsearch.index.query.HasChildQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.search.child.ScoreType;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.bucket.filter.Filter;
+import org.elasticsearch.search.aggregations.bucket.global.Global;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static com.google.common.collect.Maps.newHashMap;
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.factorFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE)
+public class ChildQuerySearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ // aggressive filter caching so that we can assert on the filter cache size
+ .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class)
+ .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
+ .build();
+ }
+
+ @Test
+ public void testSelfReferentialIsForbidden() {
+ try {
+ prepareCreate("test").addMapping("type", "_parent", "type=type").get();
+ fail("self referential should be forbidden");
+ } catch (Exception e) {
+ Throwable cause = e.getCause();
+ assertThat(cause, instanceOf(IllegalArgumentException.class));
+ assertThat(cause.getMessage(), equalTo("The [_parent.type] option can't point to the same type"));
+ }
+ }
+
+ @Test
+ public void multiLevelChild() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("grandchild", "_parent", "type=child"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "c_value1").setParent("p1").get();
+ client().prepareIndex("test", "grandchild", "gc1").setSource("gc_field", "gc_value1")
+ .setParent("c1").setRouting("p1").get();
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ hasChildQuery(
+ "child",
+ filteredQuery(termQuery("c_field", "c_value1"),
+ hasChildQuery("grandchild", termQuery("gc_field", "gc_value1")))))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentQuery("parent", termQuery("p_field", "p_value1")))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentQuery("child", termQuery("c_field", "c_value1")))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("gc1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("parent", termQuery("p_field", "p_value1"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("child", termQuery("c_field", "c_value1"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("gc1"));
+ }
+
+ @Test
+ // see #6722
+ public void test6722() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("foo")
+ .addMapping("test", "_parent", "type=foo"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "foo", "1").setSource("foo", 1).get();
+ client().prepareIndex("test", "test", "2").setSource("foo", 1).setParent("1").get();
+ refresh();
+ String query = copyToStringFromClasspath("/org/elasticsearch/search/child/bool-query-with-empty-clauses.json");
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(query).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("2"));
+ }
+
+ @Test
+ // see #2744
+ public void test2744() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("foo")
+ .addMapping("test", "_parent", "type=foo"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "foo", "1").setSource("foo", 1).get();
+ client().prepareIndex("test", "test").setSource("foo", 1).setParent("1").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("test", matchQuery("foo", 1))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ }
+
+ @Test
+ public void simpleChildQuery() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ refresh();
+
+ // TEST FETCHING _parent from child
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(idsQuery("child").ids("c1")).addFields("_parent").execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+
+ // TEST matching on parent
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("_parent", "p1")).addFields("_parent").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("_parent:p1")).addFields("_parent").get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(0).field("_parent").value().toString(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("c1"), equalTo("c2")));
+ assertThat(searchResponse.getHits().getAt(1).field("_parent").value().toString(), equalTo("p1"));
+
+ // HAS CHILD
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "yellow"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "blue")).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(randomHasChild("child", "c_field", "red")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS PARENT
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value2")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c3"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c4"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomHasParent("parent", "p_field", "p_value1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c2"));
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/3290
+ public void testCachingBug_withFqueryFilter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ // index simple data
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "parent", Integer.toString(i)).setSource("p_field", i));
+ }
+ indexRandom(randomBoolean(), builders);
+ builders.clear();
+ for (int j = 0; j < 2; j++) {
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "child", Integer.toString(i)).setSource("c_field", i).setParent("" + 0));
+ }
+ for (int i = 0; i < 10; i++) {
+ builders.add(client().prepareIndex("test", "child", Integer.toString(i + 10)).setSource("c_field", i + 10).setParent(Integer.toString(i)));
+ }
+
+ if (randomBoolean()) {
+ break; // randomly break out and dont' have deletes / updates
+ }
+ }
+ indexRandom(true, builders);
+
+ for (int i = 1; i <= 10; i++) {
+ logger.info("Round {}", i);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", matchAllQuery()).scoreType("max")))
+ .get();
+ assertNoFailures(searchResponse);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasParentQuery("parent", matchAllQuery()).scoreType("score")))
+ .get();
+ assertNoFailures(searchResponse);
+ }
+ }
+
+ @Test
+ public void testHasParentFilter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+ Map<String, Set<String>> parentToChildren = newHashMap();
+ // Childless parent
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p0").get();
+ parentToChildren.put("p0", new HashSet<String>());
+
+ String previousParentId = null;
+ int numChildDocs = 32;
+ int numChildDocsPerParent = 0;
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 1; i <= numChildDocs; i++) {
+
+ if (previousParentId == null || i % numChildDocsPerParent == 0) {
+ previousParentId = "p" + i;
+ builders.add(client().prepareIndex("test", "parent", previousParentId).setSource("p_field", previousParentId));
+ numChildDocsPerParent++;
+ }
+
+ String childId = "c" + i;
+ builders.add(client().prepareIndex("test", "child", childId).setSource("c_field", childId).setParent(previousParentId));
+
+ if (!parentToChildren.containsKey(previousParentId)) {
+ parentToChildren.put(previousParentId, new HashSet<String>());
+ }
+ assertThat(parentToChildren.get(previousParentId).add(childId), is(true));
+ }
+ indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()]));
+
+ assertThat(parentToChildren.isEmpty(), equalTo(false));
+ for (Map.Entry<String, Set<String>> parentToChildrenEntry : parentToChildren.entrySet()) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasParentQuery("parent", termQuery("p_field", parentToChildrenEntry.getKey()))))
+ .setSize(numChildDocsPerParent).get();
+
+ assertNoFailures(searchResponse);
+ Set<String> childIds = parentToChildrenEntry.getValue();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long) childIds.size()));
+ for (int i = 0; i < searchResponse.getHits().totalHits(); i++) {
+ assertThat(childIds.remove(searchResponse.getHits().getAt(i).id()), is(true));
+ assertThat(searchResponse.getHits().getAt(i).score(), is(1.0f));
+ }
+ assertThat(childIds.size(), is(0));
+ }
+ }
+
+ @Test
+ public void simpleChildQueryWithFlush() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data with flushes, so we have many segments
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+ client().admin().indices().prepareFlush().get();
+ refresh();
+
+ // HAS CHILD QUERY
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "blue"))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "red"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ // HAS CHILD FILTER
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "red"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ }
+
+ @Test
+ public void testScopedFacet() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(hasChildQuery("child", boolQuery().should(termQuery("c_field", "red")).should(termQuery("c_field", "yellow"))))
+ .addAggregation(AggregationBuilders.global("global").subAggregation(
+ AggregationBuilders.filter("filter").filter(boolQuery().should(termQuery("c_field", "red")).should(termQuery("c_field", "yellow"))).subAggregation(
+ AggregationBuilders.terms("facet1").field("c_field")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), anyOf(equalTo("p2"), equalTo("p1")));
+ assertThat(searchResponse.getHits().getAt(1).id(), anyOf(equalTo("p2"), equalTo("p1")));
+
+ Global global = searchResponse.getAggregations().get("global");
+ Filter filter = global.getAggregations().get("filter");
+ Terms termsFacet = filter.getAggregations().get("facet1");
+ assertThat(termsFacet.getBuckets().size(), equalTo(2));
+ assertThat(termsFacet.getBuckets().get(0).getKeyAsString(), equalTo("red"));
+ assertThat(termsFacet.getBuckets().get(0).getDocCount(), equalTo(2L));
+ assertThat(termsFacet.getBuckets().get(1).getKeyAsString(), equalTo("yellow"));
+ assertThat(termsFacet.getBuckets().get(1).getDocCount(), equalTo(1L));
+ }
+
+ @Test
+ public void testDeletedParent() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ // update p1 and see what that we get updated values...
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1_updated").get();
+ client().admin().indices().prepareRefresh().get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "yellow")))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1_updated\""));
+ }
+
+ @Test
+ public void testDfsSearchType() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(hasChildQuery("child", boolQuery().should(queryStringQuery("c_field:*"))))).get();
+ assertNoFailures(searchResponse);
+
+ searchResponse = client().prepareSearch("test").setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(boolQuery().mustNot(hasParentQuery("parent", boolQuery().should(queryStringQuery("p_field:*"))))).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testHasChildAndHasParentFailWhenSomeSegmentsDontContainAnyParentOrChildDocs() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("c_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ client().prepareIndex("test", "type1", "1").setSource("p_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildQuery("child", matchAllQuery()))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentQuery("parent", matchAllQuery()))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testCountApiUsage() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ CountResponse countResponse = client().prepareCount("test").setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "1"))))
+ .get();
+ assertHitCount(countResponse, 1l);
+
+ countResponse = client().prepareCount("test").setQuery(constantScoreQuery(hasParentQuery("parent", termQuery("p_field", "1"))))
+ .get();
+ assertHitCount(countResponse, 1l);
+ }
+
+ @Test
+ public void testExplainUsage() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("Score based on join value p1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setExplain(true)
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), equalTo("Score based on join value p1"));
+
+ ExplainResponse explainResponse = client().prepareExplain("test", "parent", parentId)
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ assertThat(explainResponse.isExists(), equalTo(true));
+ assertThat(explainResponse.getExplanation().getDetails()[0].getDescription(), equalTo("Score based on join value p1"));
+ }
+
+ List<IndexRequestBuilder> createDocBuilders() {
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ // Parent 1 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("1").setIndex("test").setSource("p_field", "p_value1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("1").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("2").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("3").setIndex("test")
+ .setSource("c_field1", 2, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("4").setIndex("test")
+ .setSource("c_field1", 2, "c_field2", 0).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("5").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("1"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("6").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2).setParent("1"));
+
+ // Parent 2 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("2").setIndex("test").setSource("p_field", "p_value2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("7").setIndex("test")
+ .setSource("c_field1", 3, "c_field2", 0).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("8").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("9").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("p")); // why
+ // "p"????
+ indexBuilders.add(client().prepareIndex().setType("child").setId("10").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("11").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1).setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("12").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2).setParent("2"));
+
+ // Parent 3 and its children
+
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("3").setIndex("test")
+ .setSource("p_field1", "p_value3", "p_field2", 5));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("13").setIndex("test")
+ .setSource("c_field1", 4, "c_field2", 0, "c_field3", 0).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("14").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 1, "c_field3", 1).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("15").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 2).setParent("3")); // why
+ // "p"????
+ indexBuilders.add(client().prepareIndex().setType("child").setId("16").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 3).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("17").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 4).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("18").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 5).setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child1").setId("1").setIndex("test")
+ .setSource("c_field1", 1, "c_field2", 2, "c_field3", 6).setParent("3"));
+
+ return indexBuilders;
+ }
+
+ @Test
+ public void testScoreForParentChildQueries_withFunctionScore() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("child1", "_parent", "type=parent"));
+ ensureGreen();
+
+ indexRandom(true, createDocBuilders().toArray(new IndexRequestBuilder[0]));
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0),
+ scriptFunction(new Script("doc['c_field1'].value")))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("sum")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(3f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0),
+ scriptFunction(new Script("doc['c_field1'].value")))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("max")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(2f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(matchQuery("c_field2", 0),
+ scriptFunction(new Script("doc['c_field1'].value")))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("avg")).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(4f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1.5f));
+
+ response = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.hasParentQuery(
+ "parent",
+ QueryBuilders.functionScoreQuery(matchQuery("p_field1", "p_value3"),
+ scriptFunction(new Script("doc['p_field2'].value")))
+ .boostMode(CombineFunction.REPLACE.getName())).scoreType("score"))
+ .addSort(SortBuilders.fieldSort("c_field3")).addSort(SortBuilders.scoreSort()).get();
+
+ assertThat(response.getHits().totalHits(), equalTo(7l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("13"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("14"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("15"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[3].id(), equalTo("16"));
+ assertThat(response.getHits().hits()[3].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[4].id(), equalTo("17"));
+ assertThat(response.getHits().hits()[4].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[5].id(), equalTo("18"));
+ assertThat(response.getHits().hits()[5].score(), equalTo(5f));
+ assertThat(response.getHits().hits()[6].id(), equalTo("1"));
+ assertThat(response.getHits().hits()[6].score(), equalTo(5f));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/2536
+ public void testParentChildQueriesCanHandleNoRelevantTypesInIndex() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"))).get();
+ assertNoFailures(response);
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ client().prepareIndex("test", "child1").setSource(jsonBuilder().startObject().field("text", "value").endObject()).setRefresh(true)
+ .get();
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value"))).get();
+ assertNoFailures(response);
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasChildQuery("child", matchQuery("text", "value")).scoreType("max"))
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value"))).get();
+ assertNoFailures(response);
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value")).scoreType("score"))
+ .get();
+ assertNoFailures(response);
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+ }
+
+ @Test
+ public void testHasChildAndHasParentFilter_withFilter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get();
+ client().admin().indices().prepareFlush("test").get();
+
+ client().prepareIndex("test", "type1", "3").setSource("p_field", "p_value1").get();
+ client().admin().indices().prepareFlush("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildQuery("child", termQuery("c_field", 1)))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentQuery("parent", termQuery("p_field", 1)))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("2"));
+ }
+
+ @Test
+ public void testHasChildAndHasParentWrappedInAQueryFilter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // query filter in case for p/c shouldn't execute per segment, but rather
+ client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get();
+ client().admin().indices().prepareFlush("test").setForce(true).get();
+ client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasChildQuery("child", matchQuery("c_field", 1)))).get();
+ assertSearchHit(searchResponse, 1, hasId("1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), hasParentQuery("parent", matchQuery("p_field", 1)))).get();
+ assertSearchHit(searchResponse, 1, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), boolQuery().must(hasChildQuery("child", matchQuery("c_field", 1))))).get();
+ assertSearchHit(searchResponse, 1, hasId("1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), boolQuery().must(hasParentQuery("parent", matchQuery("p_field", 1))))).get();
+ assertSearchHit(searchResponse, 1, hasId("2"));
+ }
+
+ @Test
+ public void testSimpleQueryRewrite() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent", "p_field", "type=string")
+ .addMapping("child", "_parent", "type=parent", "c_field", "type=string"));
+ ensureGreen();
+
+ // index simple data
+ int childId = 0;
+ for (int i = 0; i < 10; i++) {
+ String parentId = String.format(Locale.ROOT, "p%03d", i);
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", parentId).get();
+ int j = childId;
+ for (; j < childId + 50; j++) {
+ String childUid = String.format(Locale.ROOT, "c%03d", j);
+ client().prepareIndex("test", "child", childUid).setSource("c_field", childUid).setParent(parentId).get();
+ }
+ childId = j;
+ }
+ refresh();
+
+ SearchType[] searchTypes = new SearchType[]{SearchType.QUERY_THEN_FETCH, SearchType.DFS_QUERY_THEN_FETCH};
+ for (SearchType searchType : searchTypes) {
+ SearchResponse searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(hasChildQuery("child", prefixQuery("c_field", "c")).scoreType("max")).addSort("p_field", SortOrder.ASC)
+ .setSize(5).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(10L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("p000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("p001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("p002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("p003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("p004"));
+
+ searchResponse = client().prepareSearch("test").setSearchType(searchType)
+ .setQuery(hasParentQuery("parent", prefixQuery("p_field", "p")).scoreType("score")).addSort("c_field", SortOrder.ASC)
+ .setSize(5).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(500L));
+ assertThat(searchResponse.getHits().hits()[0].id(), equalTo("c000"));
+ assertThat(searchResponse.getHits().hits()[1].id(), equalTo("c001"));
+ assertThat(searchResponse.getHits().hits()[2].id(), equalTo("c002"));
+ assertThat(searchResponse.getHits().hits()[3].id(), equalTo("c003"));
+ assertThat(searchResponse.getHits().hits()[4].id(), equalTo("c004"));
+ }
+ }
+
+ @Test
+ // See also issue:
+ // https://github.com/elasticsearch/elasticsearch/issues/3144
+ public void testReIndexingParentAndChildDocuments() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "x").setParent("p2").get();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "yellow")).scoreType("sum")).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ boolQuery().must(matchQuery("c_field", "x")).must(
+ hasParentQuery("parent", termQuery("p_field", "p_value2")).scoreType("score"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("c3"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("c4"));
+
+ // re-index
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "d" + i).setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().admin().indices().prepareRefresh("test").get();
+ }
+
+ searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "yellow")).scoreType("sum"))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p1"));
+ assertThat(searchResponse.getHits().getAt(0).sourceAsString(), containsString("\"p_value1\""));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ boolQuery().must(matchQuery("c_field", "x")).must(
+ hasParentQuery("parent", termQuery("p_field", "p_value2")).scoreType("score"))).get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().getAt(0).id(), Matchers.anyOf(equalTo("c3"), equalTo("c4")));
+ assertThat(searchResponse.getHits().getAt(1).id(), Matchers.anyOf(equalTo("c3"), equalTo("c4")));
+ }
+
+ @Test
+ // See also issue:
+ // https://github.com/elasticsearch/elasticsearch/issues/3203
+ public void testHasChildQueryWithMinimumScore() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "x").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "x").setParent("p2").get();
+ client().prepareIndex("test", "child", "c5").setSource("c_field", "x").setParent("p2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", matchAllQuery()).scoreType("sum"))
+ .setMinScore(3) // Score needs to be 3 or above!
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("p2"));
+ assertThat(searchResponse.getHits().getAt(0).score(), equalTo(3.0f));
+ }
+
+ @Test
+ public void testParentFieldFilter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder().put(indexSettings())
+ .put("index.refresh_interval", -1))
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent")
+ .addMapping("child2", "_parent", "type=parent"));
+ ensureGreen();
+
+ // test term filter
+ SearchResponse response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("_parent", "p1")))
+ .get();
+ assertHitCount(response, 0l);
+
+ client().prepareIndex("test", "some_type", "1").setSource("field", "value").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "value").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "value").setParent("p1").get();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 0l);
+ refresh();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ client().prepareIndex("test", "parent2", "p1").setSource("p_field", "value").setRefresh(true).get();
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ // test terms filter
+ client().prepareIndex("test", "child2", "c1").setSource("c_field", "value").setParent("p1").get();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("_parent", "parent#p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 1l);
+
+ refresh();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("_parent", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 2l);
+
+ refresh();
+ response = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("_parent", "p1", "p1"))).execute()
+ .actionGet();
+ assertHitCount(response, 2l);
+
+ response = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("_parent", "parent#p1", "parent2#p1"))).get();
+ assertHitCount(response, 2l);
+ }
+
+ @Test
+ public void testHasChildNotBeingCached() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "parent", "p5").setSource("p_field", "p_value5").get();
+ client().prepareIndex("test", "parent", "p6").setSource("p_field", "p_value6").get();
+ client().prepareIndex("test", "parent", "p7").setSource("p_field", "p_value7").get();
+ client().prepareIndex("test", "parent", "p8").setSource("p_field", "p_value8").get();
+ client().prepareIndex("test", "parent", "p9").setSource("p_field", "p_value9").get();
+ client().prepareIndex("test", "parent", "p10").setSource("p_field", "p_value10").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ client().prepareIndex("test", "child", "c2").setParent("p2").setSource("c_field", "blue").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+ private QueryBuilder randomHasChild(String type, String field, String value) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return constantScoreQuery(hasChildQuery(type, termQuery(field, value)));
+ } else {
+ return filteredQuery(matchAllQuery(), hasChildQuery(type, termQuery(field, value)));
+ }
+ } else {
+ return hasChildQuery(type, termQuery(field, value));
+ }
+ }
+
+ private QueryBuilder randomHasParent(String type, String field, String value) {
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ return constantScoreQuery(hasParentQuery(type, termQuery(field, value)));
+ } else {
+ return filteredQuery(matchAllQuery(), hasParentQuery(type, termQuery(field, value)));
+ }
+ } else {
+ return hasParentQuery(type, termQuery(field, value));
+ }
+ }
+
+ @Test
+ // Relates to bug: https://github.com/elasticsearch/elasticsearch/issues/3818
+ public void testHasChildQueryOnlyReturnsSingleChildType() {
+ assertAcked(prepareCreate("grandissue")
+ .addMapping("grandparent", "name", "type=string")
+ .addMapping("parent", "_parent", "type=grandparent")
+ .addMapping("child_type_one", "_parent", "type=parent")
+ .addMapping("child_type_two", "_parent", "type=parent"));
+
+ client().prepareIndex("grandissue", "grandparent", "1").setSource("name", "Grandpa").get();
+ client().prepareIndex("grandissue", "parent", "2").setParent("1").setSource("name", "Dana").get();
+ client().prepareIndex("grandissue", "child_type_one", "3").setParent("2").setRouting("1")
+ .setSource("name", "William")
+ .get();
+ client().prepareIndex("grandissue", "child_type_two", "4").setParent("2").setRouting("1")
+ .setSource("name", "Kate")
+ .get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("grandissue").setQuery(
+ boolQuery().must(
+ hasChildQuery(
+ "parent",
+ boolQuery().must(
+ hasChildQuery(
+ "child_type_one",
+ boolQuery().must(
+ queryStringQuery("name:William*").analyzeWildcard(true)
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("grandissue").setQuery(
+ boolQuery().must(
+ hasChildQuery(
+ "parent",
+ boolQuery().must(
+ hasChildQuery(
+ "child_type_two",
+ boolQuery().must(
+ queryStringQuery("name:William*").analyzeWildcard(true)
+ )
+ )
+ )
+ )
+ )
+ ).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void indexChildDocWithNoParentMapping() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child1"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1", "_parent", "bla").get();
+ try {
+ client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured"));
+ }
+ try {
+ client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured"));
+ }
+
+ refresh();
+ }
+
+ @Test
+ public void testAddingParentToExistingMapping() throws IOException {
+ createIndex("test");
+ ensureGreen();
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child").setSource("number", "type=integer")
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
+ Map<String, Object> mapping = getMappingsResponse.getMappings().get("test").get("child").getSourceAsMap();
+ assertThat(mapping.size(), greaterThanOrEqualTo(1)); // there are potentially some meta fields configured randomly
+ assertThat(mapping.get("properties"), notNullValue());
+
+ try {
+ // Adding _parent metadata field to existing mapping is prohibited:
+ client().admin().indices().preparePutMapping("test").setType("child").setSource(jsonBuilder().startObject().startObject("child")
+ .startObject("_parent").field("type", "parent").endObject()
+ .endObject().endObject()).get();
+ fail();
+ } catch (MergeMappingException e) {
+ assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]]}"));
+ }
+ }
+
+ @Test
+ public void testHasChildQueryWithNestedInnerObjects() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent", "objects", "type=nested")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p1")
+ .setSource(jsonBuilder().startObject().field("p_field", "1").startArray("objects")
+ .startObject().field("i_field", "1").endObject()
+ .startObject().field("i_field", "2").endObject()
+ .startObject().field("i_field", "3").endObject()
+ .startObject().field("i_field", "4").endObject()
+ .startObject().field("i_field", "5").endObject()
+ .startObject().field("i_field", "6").endObject()
+ .endArray().endObject())
+ .get();
+ client().prepareIndex("test", "parent", "p2")
+ .setSource(jsonBuilder().startObject().field("p_field", "2").startArray("objects")
+ .startObject().field("i_field", "1").endObject()
+ .startObject().field("i_field", "2").endObject()
+ .endArray().endObject())
+ .get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ refresh();
+
+ String scoreMode = ScoreType.values()[getRandom().nextInt(ScoreType.values().length)].name().toLowerCase(Locale.ROOT);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(QueryBuilders.hasChildQuery("child", termQuery("c_field", "blue")).scoreType(scoreMode), notQuery(termQuery("p_field", "3"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(QueryBuilders.hasChildQuery("child", termQuery("c_field", "red")).scoreType(scoreMode), notQuery(termQuery("p_field", "3"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+ @Test
+ public void testNamedFilters() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "1").setParent(parentId).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max").queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score").queryName("test"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "1")).queryName("test")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(hasParentQuery("parent", termQuery("p_field", "1")).queryName("test")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("test"));
+ }
+
+ @Test
+ public void testParentChildQueriesNoParentType() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ refresh();
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max"))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setPostFilter(hasChildQuery("child", termQuery("c_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", termQuery("p_field", "1")).scoreType("score"))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+
+ try {
+ client().prepareSearch("test")
+ .setPostFilter(hasParentQuery("parent", termQuery("p_field", "1")))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ }
+ }
+
+ @Test
+ public void testAdd_ParentFieldAfterIndexingParentDocButBeforeIndexingChildDoc() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.refresh_interval", -1)));
+ ensureGreen();
+
+ String parentId = "p1";
+ client().prepareIndex("test", "parent", parentId).setSource("p_field", "1").get();
+ refresh();
+
+ try {
+ assertAcked(client().admin()
+ .indices()
+ .preparePutMapping("test")
+ .setType("child")
+ .setSource("_parent", "type=parent"));
+ fail("Shouldn't be able the add the _parent field pointing to an already existing parent type");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), equalTo("can't add a _parent field that points to an already existing type"));
+ }
+ }
+
+ @Test
+ public void testParentChildCaching() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(
+ settingsBuilder()
+ .put(indexSettings())
+ .put("index.refresh_interval", -1)
+ )
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ // index simple data
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c1").setParent("p1").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c2").setParent("p1").setSource("c_field", "red").get();
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "red").get();
+ client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).setFlush(true).get();
+ client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get();
+ client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get();
+ client().prepareIndex("test", "child", "c4").setParent("p3").setSource("c_field", "green").get();
+ client().prepareIndex("test", "child", "c5").setParent("p3").setSource("c_field", "blue").get();
+ client().prepareIndex("test", "child", "c6").setParent("p4").setSource("c_field", "blue").get();
+ client().admin().indices().prepareFlush("test").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ for (int i = 0; i < 2; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), boolQuery()
+ .must(QueryBuilders.hasChildQuery("child", matchQuery("c_field", "red")))
+ .must(matchAllQuery())))
+ .get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+
+
+ client().prepareIndex("test", "child", "c3").setParent("p2").setSource("c_field", "blue").get();
+ client().admin().indices().prepareRefresh("test").get();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), boolQuery()
+ .must(QueryBuilders.hasChildQuery("child", matchQuery("c_field", "red")))
+ .must(matchAllQuery())))
+ .get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void testParentChildQueriesViaScrollApi() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "parent", "p" + i).setSource("{}").get();
+ client().prepareIndex("test", "child", "c" + i).setSource("{}").setParent("p" + i).get();
+ }
+
+ refresh();
+
+ QueryBuilder[] queries = new QueryBuilder[]{
+ hasChildQuery("child", matchAllQuery()),
+ filteredQuery(matchAllQuery(), hasChildQuery("child", matchAllQuery())),
+ hasParentQuery("parent", matchAllQuery()),
+ filteredQuery(matchAllQuery(), hasParentQuery("parent", matchAllQuery()))
+ };
+
+ for (QueryBuilder query : queries) {
+ SearchResponse scrollResponse = client().prepareSearch("test")
+ .setScroll(TimeValue.timeValueSeconds(30))
+ .setSize(1)
+ .addField("_id")
+ .setQuery(query)
+ .setSearchType("scan")
+ .execute()
+ .actionGet();
+
+ assertNoFailures(scrollResponse);
+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));
+ int scannedDocs = 0;
+ do {
+ scrollResponse = client()
+ .prepareSearchScroll(scrollResponse.getScrollId())
+ .setScroll(TimeValue.timeValueSeconds(30)).get();
+ assertThat(scrollResponse.getHits().totalHits(), equalTo(10l));
+ scannedDocs += scrollResponse.getHits().getHits().length;
+ } while (scrollResponse.getHits().getHits().length > 0);
+ assertThat(scannedDocs, equalTo(10));
+ }
+ }
+
+ // https://github.com/elasticsearch/elasticsearch/issues/5783
+ @Test
+ public void testQueryBeforeChildType() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("features")
+ .addMapping("posts", "_parent", "type=features")
+ .addMapping("specials"));
+ ensureGreen();
+
+ client().prepareIndex("test", "features", "1").setSource("field", "foo").get();
+ client().prepareIndex("test", "posts", "1").setParent("1").setSource("field", "bar").get();
+ refresh();
+
+ SearchResponse resp;
+ resp = client().prepareSearch("test")
+ .setSource("{\"query\": {\"has_child\": {\"type\": \"posts\", \"query\": {\"match\": {\"field\": \"bar\"}}}}}").get();
+ assertHitCount(resp, 1L);
+
+ // Now reverse the order for the type after the query
+ resp = client().prepareSearch("test")
+ .setSource("{\"query\": {\"has_child\": {\"query\": {\"match\": {\"field\": \"bar\"}}, \"type\": \"posts\"}}}").get();
+ assertHitCount(resp, 1L);
+
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/6256
+ public void testParentFieldInMultiMatchField() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1")
+ .addMapping("type2", "_parent", "type=type1")
+ );
+ ensureGreen();
+
+ client().prepareIndex("test", "type2", "1").setParent("1").setSource("field", "value").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("1", "_parent"))
+ .get();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testTypeIsAppliedInHasParentInnerQuery() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ List<IndexRequestBuilder> indexRequests = new ArrayList<>();
+ indexRequests.add(client().prepareIndex("test", "parent", "1").setSource("field1", "a"));
+ indexRequests.add(client().prepareIndex("test", "child", "1").setParent("1").setSource("{}"));
+ indexRequests.add(client().prepareIndex("test", "child", "2").setParent("1").setSource("{}"));
+ indexRandom(true, indexRequests);
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasParentQuery("parent", notQuery(termQuery("field1", "a")))))
+ .get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", constantScoreQuery(notQuery(termQuery("field1", "a")))))
+ .get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasParentQuery("parent", termQuery("field1", "a"))))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(hasParentQuery("parent", constantScoreQuery(termQuery("field1", "a"))))
+ .get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ private List<IndexRequestBuilder> createMinMaxDocBuilders() {
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ // Parent 1 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("1").setIndex("test").setSource("id",1));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("10").setIndex("test")
+ .setSource("foo", "one").setParent("1"));
+
+ // Parent 2 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("2").setIndex("test").setSource("id",2));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("11").setIndex("test")
+ .setSource("foo", "one").setParent("2"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("12").setIndex("test")
+ .setSource("foo", "one two").setParent("2"));
+
+ // Parent 3 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("3").setIndex("test").setSource("id",3));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("13").setIndex("test")
+ .setSource("foo", "one").setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("14").setIndex("test")
+ .setSource("foo", "one two").setParent("3"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("15").setIndex("test")
+ .setSource("foo", "one two three").setParent("3"));
+
+ // Parent 4 and its children
+ indexBuilders.add(client().prepareIndex().setType("parent").setId("4").setIndex("test").setSource("id",4));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("16").setIndex("test")
+ .setSource("foo", "one").setParent("4"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("17").setIndex("test")
+ .setSource("foo", "one two").setParent("4"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("18").setIndex("test")
+ .setSource("foo", "one two three").setParent("4"));
+ indexBuilders.add(client().prepareIndex().setType("child").setId("19").setIndex("test")
+ .setSource("foo", "one two three four").setParent("4"));
+
+ return indexBuilders;
+ }
+
+ private SearchResponse minMaxQuery(String scoreType, int minChildren, int maxChildren, int cutoff) throws SearchPhaseExecutionException {
+ return client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders
+ .hasChildQuery(
+ "child",
+ QueryBuilders.functionScoreQuery(constantScoreQuery(QueryBuilders.termQuery("foo", "two"))).boostMode("replace").scoreMode("sum")
+ .add(QueryBuilders.matchAllQuery(), factorFunction(1))
+ .add(QueryBuilders.termQuery("foo", "three"), factorFunction(1))
+ .add(QueryBuilders.termQuery("foo", "four"), factorFunction(1))).scoreType(scoreType)
+ .minChildren(minChildren).maxChildren(maxChildren).setShortCircuitCutoff(cutoff))
+ .addSort("_score", SortOrder.DESC).addSort("id", SortOrder.ASC).get();
+ }
+
+ private SearchResponse minMaxFilter(int minChildren, int maxChildren, int cutoff) throws SearchPhaseExecutionException {
+ return client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.constantScoreQuery(QueryBuilders.hasChildQuery("child", termQuery("foo", "two"))
+ .minChildren(minChildren).maxChildren(maxChildren).setShortCircuitCutoff(cutoff)))
+ .addSort("id", SortOrder.ASC).setTrackScores(true).get();
+ }
+
+ @Test
+ public void testMinMaxChildren() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("parent", "id", "type=long")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ indexRandom(true, createMinMaxDocBuilders().toArray(new IndexRequestBuilder[0]));
+ SearchResponse response;
+ int cutoff = getRandom().nextInt(4);
+
+ // Score mode = NONE
+ response = minMaxQuery("none", 0, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 1, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 2, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 3, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 4, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = minMaxQuery("none", 0, 4, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 0, 3, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 0, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxQuery("none", 2, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+
+ try {
+ response = minMaxQuery("none", 3, 2, cutoff);
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'"));
+ }
+
+ // Score mode = SUM
+ response = minMaxQuery("sum", 0, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("sum", 1, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("sum", 2, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+
+ response = minMaxQuery("sum", 3, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+
+ response = minMaxQuery("sum", 4, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = minMaxQuery("sum", 0, 4, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("sum", 0, 3, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(6f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("sum", 0, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxQuery("sum", 2, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+
+ try {
+ response = minMaxQuery("sum", 3, 2, cutoff);
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'"));
+ }
+
+ // Score mode = MAX
+ response = minMaxQuery("max", 0, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("max", 1, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("max", 2, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(2f));
+
+ response = minMaxQuery("max", 3, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+
+ response = minMaxQuery("max", 4, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = minMaxQuery("max", 0, 4, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("max", 0, 3, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(3f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("max", 0, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxQuery("max", 2, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+
+ try {
+ response = minMaxQuery("max", 3, 2, cutoff);
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'"));
+ }
+
+ // Score mode = AVG
+ response = minMaxQuery("avg", 0, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1.5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("avg", 1, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1.5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("avg", 2, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1.5f));
+
+ response = minMaxQuery("avg", 3, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+
+ response = minMaxQuery("avg", 4, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = minMaxQuery("avg", 0, 4, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1.5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("avg", 0, 3, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(2f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1.5f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxQuery("avg", 0, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1.5f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxQuery("avg", 2, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1.5f));
+
+ try {
+ response = minMaxQuery("avg", 3, 2, cutoff);
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'"));
+ }
+
+ // HasChildFilter
+ response = minMaxFilter(0, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxFilter(1, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxFilter(2, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxFilter(3, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+
+ response = minMaxFilter(4, 0, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+
+ response = minMaxFilter(0, 4, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxFilter(0, 3, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[2].id(), equalTo("4"));
+ assertThat(response.getHits().hits()[2].score(), equalTo(1f));
+
+ response = minMaxFilter(0, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("2"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+ assertThat(response.getHits().hits()[1].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[1].score(), equalTo(1f));
+
+ response = minMaxFilter(2, 2, cutoff);
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().hits()[0].id(), equalTo("3"));
+ assertThat(response.getHits().hits()[0].score(), equalTo(1f));
+
+ try {
+ response = minMaxFilter(3, 2, cutoff);
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'"));
+ }
+
+ }
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9461")
+ public void testParentFieldToNonExistingType() {
+ assertAcked(prepareCreate("test").addMapping("parent").addMapping("child", "_parent", "type=parent2"));
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasChildQuery("child", matchAllQuery()))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.hasParentQuery("parent", matchAllQuery()))
+ .get();
+ assertHitCount(response, 0);
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.hasChildQuery("child", matchAllQuery())))
+ .get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.hasParentQuery("parent", matchAllQuery())))
+ .get();
+ assertHitCount(response, 0);
+ }
+
+ static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder queryBuilder) {
+ HasChildQueryBuilder hasChildQueryBuilder = QueryBuilders.hasChildQuery(type, queryBuilder);
+ hasChildQueryBuilder.setShortCircuitCutoff(randomInt(10));
+ return hasChildQueryBuilder;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java
new file mode 100644
index 0000000000..9fac3f1868
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingBwcTest.java
@@ -0,0 +1,272 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.child;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.merge.policy.MergePolicyModule;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.child.ChildQuerySearchTests.hasChildQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ */
+public class ParentFieldLoadingBwcTest extends ElasticsearchIntegrationTest {
+
+ private final Settings indexSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexShard.INDEX_REFRESH_INTERVAL, -1)
+ // We never want merges in this test to ensure we have two segments for the last validation
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
+ .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_6_0)
+ .build();
+
+ @Test
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270")
+ public void testParentFieldDataCacheBug() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.builder().put(indexSettings())
+ .put("index.refresh_interval", -1)) // Disable automatic refresh, so that the _parent doesn't get warmed
+ .addMapping("parent", XContentFactory.jsonBuilder().startObject().startObject("parent")
+ .startObject("properties")
+ .startObject("p_field")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY)
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "p0").setSource("p_field", "p_value0").get();
+ client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
+
+ refresh();
+ // No _parent field yet, there shouldn't be anything in the field data for _parent field
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ // Now add mapping + children
+ client().admin().indices().preparePutMapping("test").setType("child")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("child")
+ .startObject("_parent")
+ .field("type", "parent")
+ .endObject()
+ .startObject("properties")
+ .startObject("c_field")
+ .field("type", "string")
+ .startObject("fielddata")
+ .field(FieldDataType.FORMAT_KEY, MappedFieldType.Loading.LAZY)
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ // index simple data
+ client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get();
+ client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get();
+ client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get();
+ client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get();
+ client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get();
+
+ refresh();
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue"))))
+ .get();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l));
+
+ ClearIndicesCacheResponse clearCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).get();
+ assertNoFailures(clearCacheResponse);
+ assertAllSuccessful(clearCacheResponse);
+ indicesStatsResponse = client().admin().indices()
+ .prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), equalTo(0l));
+ }
+
+ @Test
+ public void testEagerParentFieldLoading() throws Exception {
+ logger.info("testing lazy loading...");
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", childMapping(MappedFieldType.Loading.LAZY)));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ logger.info("testing default loading...");
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ response = client().admin().cluster().prepareClusterStats().get();
+ long fielddataSizeDefault = response.getIndicesStats().getFieldData().getMemorySizeInBytes();
+ assertThat(fielddataSizeDefault, greaterThan(0l));
+
+ logger.info("testing eager loading...");
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", childMapping(MappedFieldType.Loading.EAGER)));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(fielddataSizeDefault));
+
+ logger.info("testing eager global ordinals loading...");
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", childMapping(MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS)));
+ ensureGreen();
+
+ // Need to do 2 separate refreshes, otherwise we have 1 segment and then we can't measure if global ordinals
+ // is loaded by the size of the field data cache, because global ordinals on 1 segment shards takes no extra memory.
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ refresh();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(fielddataSizeDefault));
+ }
+
+ @Test
+ public void testChangingEagerParentFieldLoadingAtRuntime() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ long fielddataSizeDefault = response.getIndicesStats().getFieldData().getMemorySizeInBytes();
+ assertThat(fielddataSizeDefault, greaterThan(0l));
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child")
+ .setSource(childMapping(MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS))
+ .get();
+ assertAcked(putMappingResponse);
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState clusterState = internalCluster().clusterService().state();
+ ShardRouting shardRouting = clusterState.routingTable().index("test").shard(0).getShards().get(0);
+ String nodeName = clusterState.getNodes().get(shardRouting.currentNodeId()).getName();
+
+ boolean verified = false;
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName);
+ IndexService indexService = indicesService.indexService("test");
+ if (indexService != null) {
+ MapperService mapperService = indexService.mapperService();
+ DocumentMapper documentMapper = mapperService.documentMapper("child");
+ if (documentMapper != null) {
+ verified = documentMapper.parentFieldMapper().fieldType().fieldDataType().getLoading() == MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS;
+ }
+ }
+ assertTrue(verified);
+ }
+ });
+
+ // Need to add a new doc otherwise the refresh doesn't trigger a new searcher
+ // Because it ends up in its own segment, but isn't of type parent or child, this doc doesn't contribute to the size of the fielddata cache
+ client().prepareIndex("test", "dummy", "dummy").setSource("{}").get();
+ refresh();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(fielddataSizeDefault));
+ }
+
+ private XContentBuilder childMapping(MappedFieldType.Loading loading) throws IOException {
+ return jsonBuilder().startObject().startObject("child").startObject("_parent")
+ .field("type", "parent")
+ .startObject("fielddata").field(MappedFieldType.Loading.KEY, loading).endObject()
+ .endObject().endObject().endObject();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingTest.java b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingTest.java
new file mode 100644
index 0000000000..08936eeab2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/child/ParentFieldLoadingTest.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.child;
+
+import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.merge.policy.MergePolicyModule;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ */
+public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
+
+ private final Settings indexSettings = Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexShard.INDEX_REFRESH_INTERVAL, -1)
+ // We never want merges in this test to ensure we have two segments for the last validation
+ .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)
+ .build();
+
+ @Test
+ public void testEagerParentFieldLoading() throws Exception {
+ logger.info("testing lazy loading...");
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", childMapping(MappedFieldType.Loading.LAZY)));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ logger.info("testing default loading...");
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ logger.info("testing eager loading...");
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", childMapping(MappedFieldType.Loading.EAGER)));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ logger.info("testing eager global ordinals loading...");
+ assertAcked(client().admin().indices().prepareDelete("test").get());
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", childMapping(MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS)));
+ ensureGreen();
+
+ // Need to do 2 separate refreshes, otherwise we have 1 segment and then we can't measure if global ordinals
+ // is loaded by the size of the field data cache, because global ordinals on 1 segment shards takes no extra memory.
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ refresh();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ }
+
+ @Test
+ public void testChangingEagerParentFieldLoadingAtRuntime() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(indexSettings)
+ .addMapping("parent")
+ .addMapping("child", "_parent", "type=parent"));
+ ensureGreen();
+
+ client().prepareIndex("test", "parent", "1").setSource("{}").get();
+ client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
+ refresh();
+
+ ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child")
+ .setSource(childMapping(MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS))
+ .get();
+ assertAcked(putMappingResponse);
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState clusterState = internalCluster().clusterService().state();
+ ShardRouting shardRouting = clusterState.routingTable().index("test").shard(0).getShards().get(0);
+ String nodeName = clusterState.getNodes().get(shardRouting.currentNodeId()).getName();
+
+ boolean verified = false;
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName);
+ IndexService indexService = indicesService.indexService("test");
+ if (indexService != null) {
+ MapperService mapperService = indexService.mapperService();
+ DocumentMapper documentMapper = mapperService.documentMapper("child");
+ if (documentMapper != null) {
+ verified = documentMapper.parentFieldMapper().fieldType().fieldDataType().getLoading() == MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS;
+ }
+ }
+ assertTrue(verified);
+ }
+ });
+
+ // Need to add a new doc otherwise the refresh doesn't trigger a new searcher
+ // Because it ends up in its own segment, but isn't of type parent or child, this doc doesn't contribute to the size of the fielddata cache
+ client().prepareIndex("test", "dummy", "dummy").setSource("{}").get();
+ refresh();
+ response = client().admin().cluster().prepareClusterStats().get();
+ assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
+ }
+
+ private XContentBuilder childMapping(MappedFieldType.Loading loading) throws IOException {
+ return jsonBuilder().startObject().startObject("child").startObject("_parent")
+ .field("type", "parent")
+ .startObject("fielddata").field(MappedFieldType.Loading.KEY, loading).endObject()
+ .endObject().endObject().endObject();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/child/bool-query-with-empty-clauses.json b/core/src/test/java/org/elasticsearch/search/child/bool-query-with-empty-clauses.json
new file mode 100644
index 0000000000..844b5915a4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/child/bool-query-with-empty-clauses.json
@@ -0,0 +1,19 @@
+{
+"query": {
+ "filtered": {
+ "filter": {
+ "has_parent": {
+ "type": "foo",
+ "query": {
+ "bool": {
+ "must": [],
+ "must_not": [],
+ "should": []
+ }
+ }
+ },
+ "query": []
+ }
+ }
+}
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java b/core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
new file mode 100644
index 0000000000..5cd5a9acc3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/compress/SearchSourceCompressTests.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.compress;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.compress.Compressor;
+import org.elasticsearch.common.compress.CompressorFactory;
+import org.elasticsearch.common.compress.lzf.LZFTestCompressor;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class SearchSourceCompressTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testSourceCompressionLZF() throws IOException {
+ final Compressor defaultCompressor = CompressorFactory.defaultCompressor();
+ try {
+ CompressorFactory.setDefaultCompressor(new LZFTestCompressor());
+ verifySource(true);
+ verifySource(false);
+ verifySource(null);
+ } finally {
+ CompressorFactory.setDefaultCompressor(defaultCompressor);
+ }
+ }
+
+ private void verifySource(Boolean compress) throws IOException {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ createIndex("test", settings);
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("compress", compress).endObject()
+ .endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ for (int i = 1; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(buildSource(i)).execute().actionGet();
+ }
+ client().prepareIndex("test", "type1", Integer.toString(10000)).setSource(buildSource(10000)).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ for (int i = 1; i < 100; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(i).bytes().toBytes()));
+ }
+ GetResponse getResponse = client().prepareGet("test", "type1", Integer.toString(10000)).execute().actionGet();
+ assertThat(getResponse.getSourceAsBytes(), equalTo(buildSource(10000).bytes().toBytes()));
+
+ for (int i = 1; i < 100; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(i))).execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).source(), equalTo(buildSource(i).bytes().toBytes()));
+ }
+ }
+
+ private XContentBuilder buildSource(int count) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ StringBuilder sb = new StringBuilder();
+ for (int j = 0; j < count; j++) {
+ sb.append("value").append(j).append(' ');
+ }
+ builder.field("field", sb.toString());
+ return builder.endObject();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java b/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java
new file mode 100644
index 0000000000..87e43db68e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fetch.innerhits;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ConstantScoreQuery;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TotalHitCountCollector;
+import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter;
+import org.apache.lucene.search.join.BitDocIdSetFilter;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.fetch.innerhits.InnerHitsContext.NestedInnerHits.NestedChildrenQuery;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class NestedChildrenFilterTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testNestedChildrenFilter() throws Exception {
+ int numParentDocs = scaledRandomIntBetween(0, 32);
+ int maxChildDocsPerParent = scaledRandomIntBetween(8, 16);
+
+ Directory dir = newDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
+ for (int i = 0; i < numParentDocs; i++) {
+ int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent);
+ List<Document> docs = new ArrayList<>(numChildDocs + 1);
+ for (int j = 0; j < numChildDocs; j++) {
+ Document childDoc = new Document();
+ childDoc.add(new StringField("type", "child", Field.Store.NO));
+ docs.add(childDoc);
+ }
+
+ Document parenDoc = new Document();
+ parenDoc.add(new StringField("type", "parent", Field.Store.NO));
+ parenDoc.add(new IntField("num_child_docs", numChildDocs, Field.Store.YES));
+ docs.add(parenDoc);
+ writer.addDocuments(docs);
+ }
+
+ IndexReader reader = writer.getReader();
+ writer.close();
+
+ IndexSearcher searcher = new IndexSearcher(reader);
+ FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
+ BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("type", "parent"))));
+ Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("type", "child")));
+ int checkedParents = 0;
+ for (LeafReaderContext leaf : reader.leaves()) {
+ DocIdSetIterator parents = parentFilter.getDocIdSet(leaf).iterator();
+ for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) {
+ int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue();
+ hitContext.reset(null, leaf, parentDoc, searcher);
+ NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext);
+ TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
+ searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector);
+ assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs));
+ checkedParents++;
+ }
+ }
+ assertThat(checkedParents, equalTo(numParentDocs));
+ reader.close();
+ dir.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java
new file mode 100644
index 0000000000..f9c02f5229
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/fields/SearchFieldsTests.java
@@ -0,0 +1,828 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.fields;
+
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Base64;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.MapBuilder;
+import org.elasticsearch.common.joda.Joda;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.client.Requests.refreshRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class SearchFieldsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testStoredFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ // _timestamp and _size are randomly enabled via templates but we don't want it here to test stored fields behaviour
+ .startObject("_timestamp").field("enabled", false).endObject()
+ .startObject("_size").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").endObject()
+ .startObject("field2").field("type", "string").field("store", "no").endObject()
+ .startObject("field3").field("type", "string").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("field1", "value1")
+ .field("field2", "value2")
+ .field("field3", "value3")
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field1").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+
+ // field2 is not stored, check that it gets extracted from source
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field2").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field2").value().toString(), equalTo("value2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field3").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).source(), nullValue());
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").addField("_source").execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).source(), notNullValue());
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
+ }
+
+ @Test
+ public void testScriptDocAndFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("num1").field("type", "double").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).field("date", "1970-01-01T00:00:00").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).field("date", "1970-01-01T00:00:25").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).field("date", "1970-01-01T00:02:00").endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", new Script("doc['num1'].value"))
+ .addScriptField("sNum1_field", new Script("_fields['num1'].value"))
+ .addScriptField("date1", new Script("doc['date'].date.millis"))
+ .execute().actionGet();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).isSourceEmpty(), equalTo(true));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).fields().size(), equalTo(3));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1_field").values().get(0), equalTo(1.0));
+ assertThat((Long) response.getHits().getAt(0).fields().get("date1").values().get(0), equalTo(0l));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).fields().size(), equalTo(3));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1_field").values().get(0), equalTo(2.0));
+ assertThat((Long) response.getHits().getAt(1).fields().get("date1").values().get(0), equalTo(25000l));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat(response.getHits().getAt(2).fields().size(), equalTo(3));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1_field").values().get(0), equalTo(3.0));
+ assertThat((Long) response.getHits().getAt(2).fields().get("date1").values().get(0), equalTo(120000l));
+
+ logger.info("running doc['num1'].value * factor");
+ Map<String, Object> params = MapBuilder.<String, Object>newMapBuilder().put("factor", 2.0).map();
+ response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", new Script("doc['num1'].value * factor", ScriptType.INLINE, null, params))
+ .execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).fields().size(), equalTo(1));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(4.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat(response.getHits().getAt(2).fields().size(), equalTo(1));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(6.0));
+ }
+
+ @Test
+ public void testUidBasedScriptFields() throws Exception {
+ prepareCreate("test").addMapping("type1", "num1", "type=long").execute().actionGet();
+ ensureYellow();
+
+ int numDocs = randomIntBetween(1, 30);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("num1", i).endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("uid", new Script("_fields._uid.value")).get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long)numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(1));
+ assertThat((String)response.getHits().getAt(i).fields().get("uid").value(), equalTo("type1#" + Integer.toString(i)));
+ }
+
+ response = client().prepareSearch()
+ .setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("id", new Script("_fields._id.value")).get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long)numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(1));
+ assertThat((String)response.getHits().getAt(i).fields().get("id").value(), equalTo(Integer.toString(i)));
+ }
+
+ response = client().prepareSearch()
+ .setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("type", new Script("_fields._type.value")).get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long)numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(1));
+ assertThat((String)response.getHits().getAt(i).fields().get("type").value(), equalTo("type1"));
+ }
+
+ response = client().prepareSearch()
+ .setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("id", new Script("_fields._id.value")).addScriptField("uid", new Script("_fields._uid.value"))
+ .addScriptField("type", new Script("_fields._type.value")).get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long)numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(3));
+ assertThat((String)response.getHits().getAt(i).fields().get("uid").value(), equalTo("type1#" + Integer.toString(i)));
+ assertThat((String)response.getHits().getAt(i).fields().get("type").value(), equalTo("type1"));
+ assertThat((String)response.getHits().getAt(i).fields().get("id").value(), equalTo(Integer.toString(i)));
+ }
+ }
+
+ @Test
+ public void testScriptFieldUsingSource() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject()
+ .startObject("obj1").field("test", "something").endObject()
+ .startObject("obj2").startArray("arr2").value("arr_value1").value("arr_value2").endArray().endObject()
+ .startArray("arr3").startObject().field("arr3_field1", "arr3_value1").endObject().endArray()
+ .endObject())
+ .execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ SearchResponse response = client().prepareSearch()
+ .setQuery(matchAllQuery())
+.addScriptField("s_obj1", new Script("_source.obj1"))
+ .addScriptField("s_obj1_test", new Script("_source.obj1.test")).addScriptField("s_obj2", new Script("_source.obj2"))
+ .addScriptField("s_obj2_arr2", new Script("_source.obj2.arr2")).addScriptField("s_arr3", new Script("_source.arr3"))
+ .execute().actionGet();
+
+ assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));
+
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj1 = response.getHits().getAt(0).field("s_obj1").value();
+ assertThat(sObj1.get("test").toString(), equalTo("something"));
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj2 = response.getHits().getAt(0).field("s_obj2").value();
+ List sObj2Arr2 = (List) sObj2.get("arr2");
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").values();
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").values();
+ assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1"));
+ }
+
+ @Test
+ public void testPartialFields() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .field("field1", "value1")
+ .startObject("obj1")
+ .startArray("arr1")
+ .startObject().startObject("obj2").field("field2", "value21").endObject().endObject()
+ .startObject().startObject("obj2").field("field2", "value22").endObject().endObject()
+ .endArray()
+ .endObject()
+ .endObject())
+ .execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ }
+
+ @Test
+ public void testStoredFieldsWithoutSource() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("byte_field").field("type", "byte").field("store", "yes").endObject()
+ .startObject("short_field").field("type", "short").field("store", "yes").endObject()
+ .startObject("integer_field").field("type", "integer").field("store", "yes").endObject()
+ .startObject("long_field").field("type", "long").field("store", "yes").endObject()
+ .startObject("float_field").field("type", "float").field("store", "yes").endObject()
+ .startObject("double_field").field("type", "double").field("store", "yes").endObject()
+ .startObject("date_field").field("type", "date").field("store", "yes").endObject()
+ .startObject("boolean_field").field("type", "boolean").field("store", "yes").endObject()
+ .startObject("binary_field").field("type", "binary").field("store", "yes").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("byte_field", (byte) 1)
+ .field("short_field", (short) 2)
+ .field("integer_field", 3)
+ .field("long_field", 4l)
+ .field("float_field", 5.0f)
+ .field("double_field", 6.0d)
+ .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
+ .field("boolean_field", true)
+ .field("binary_field", Base64.encodeBytes("testing text".getBytes("UTF8")))
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addField("byte_field")
+ .addField("short_field")
+ .addField("integer_field")
+ .addField("long_field")
+ .addField("float_field")
+ .addField("double_field")
+ .addField("date_field")
+ .addField("boolean_field")
+ .addField("binary_field")
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(9));
+
+
+ assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("integer_field").value(), equalTo((Object) 3));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("long_field").value(), equalTo((Object) 4l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0f));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
+ String dateTime = Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) dateTime));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) Boolean.TRUE));
+ assertThat(((BytesReference) searchResponse.getHits().getAt(0).fields().get("binary_field").value()).toBytesArray(), equalTo((BytesReference) new BytesArray("testing text".getBytes("UTF8"))));
+
+ }
+
+ @Test
+ public void testSearchFields_metaData() throws Exception {
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setRouting("1")
+ .setSource(jsonBuilder().startObject().field("field1", "value").endObject())
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("my-index")
+ .setTypes("my-type1")
+ .addField("field1").addField("_routing")
+ .get();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field("field1").isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value"));
+ assertThat(searchResponse.getHits().getAt(0).field("_routing").isMetadataField(), equalTo(true));
+ assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1"));
+ }
+
+ @Test
+ public void testSearchFields_nonLeafField() throws Exception {
+ client().prepareIndex("my-index", "my-type1", "1")
+ .setSource(jsonBuilder().startObject().startObject("field1").field("field2", "value1").endObject().endObject())
+ .setRefresh(true)
+ .get();
+
+ assertFailures(client().prepareSearch("my-index").setTypes("my-type1").addField("field1"),
+ RestStatus.BAD_REQUEST,
+ containsString("field [field1] isn't a leaf field"));
+ }
+
+ @Test
+ public void testGetFields_complexField() throws Exception {
+ client().admin().indices().prepareCreate("my-index")
+ .setSettings(Settings.settingsBuilder().put("index.refresh_interval", -1))
+ .addMapping("my-type2", jsonBuilder().startObject().startObject("my-type2").startObject("properties")
+ .startObject("field1").field("type", "object").startObject("properties")
+ .startObject("field2").field("type", "object").startObject("properties")
+ .startObject("field3").field("type", "object").startObject("properties")
+ .startObject("field4").field("type", "string").field("store", "yes")
+ .endObject().endObject()
+ .endObject().endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject())
+ .get();
+
+ BytesReference source = jsonBuilder().startObject()
+ .startArray("field1")
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value1")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("field2")
+ .startArray("field3")
+ .startObject()
+ .field("field4", "value2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject().bytes();
+
+ client().prepareIndex("my-index", "my-type1", "1").setSource(source).get();
+ client().prepareIndex("my-index", "my-type2", "1").setRefresh(true).setSource(source).get();
+
+
+ String field = "field1.field2.field3.field4";
+ SearchResponse searchResponse = client().prepareSearch("my-index").setTypes("my-type1").addField(field).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2"));
+
+ searchResponse = client().prepareSearch("my-index").setTypes("my-type2").addField(field).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().getAt(0).field(field).isMetadataField(), equalTo(false));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().size(), equalTo(2));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(0).toString(), equalTo("value1"));
+ assertThat(searchResponse.getHits().getAt(0).field(field).getValues().get(1).toString(), equalTo("value2"));
+ }
+
+ @Test // see #8203
+ public void testSingleValueFieldDatatField() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("test_field", "foobar"));
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("type").setSource(new BytesArray(new BytesRef("{\"query\":{\"match_all\":{}},\"fielddata_fields\": \"test_field\"}"))).get();
+ assertHitCount(searchResponse, 1);
+ Map<String,SearchHitField> fields = searchResponse.getHits().getHits()[0].getFields();
+ assertThat((String)fields.get("test_field").value(), equalTo("foobar"));
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testInvalidFieldDataField() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ if (randomBoolean()) {
+ client().prepareSearch("test").setTypes("type").setSource(new BytesArray(new BytesRef("{\"query\":{\"match_all\":{}},\"fielddata_fields\": {}}"))).get();
+ } else {
+ client().prepareSearch("test").setTypes("type").setSource(new BytesArray(new BytesRef("{\"query\":{\"match_all\":{}},\"fielddata_fields\": 1.0}"))).get();
+ }
+ }
+
+ @Test
+ public void testFieldsPulledFromFieldData() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("string_field").field("type", "string").endObject()
+ .startObject("byte_field").field("type", "byte").endObject()
+ .startObject("short_field").field("type", "short").endObject()
+ .startObject("integer_field").field("type", "integer").endObject()
+ .startObject("long_field").field("type", "long").endObject()
+ .startObject("float_field").field("type", "float").endObject()
+ .startObject("double_field").field("type", "double").endObject()
+ .startObject("date_field").field("type", "date").endObject()
+ .startObject("boolean_field").field("type", "boolean").endObject()
+ .startObject("binary_field").field("type", "binary").endObject()
+ .endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("string_field", "foo")
+ .field("byte_field", (byte) 1)
+ .field("short_field", (short) 2)
+ .field("integer_field", 3)
+ .field("long_field", 4l)
+ .field("float_field", 5.0f)
+ .field("double_field", 6.0d)
+ .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(new DateTime(2012, 3, 22, 0, 0, DateTimeZone.UTC)))
+ .field("boolean_field", true)
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery())
+ .addFieldDataField("string_field")
+ .addFieldDataField("byte_field")
+ .addFieldDataField("short_field")
+ .addFieldDataField("integer_field")
+ .addFieldDataField("long_field")
+ .addFieldDataField("float_field")
+ .addFieldDataField("double_field")
+ .addFieldDataField("date_field")
+ .addFieldDataField("boolean_field");
+ SearchResponse searchResponse = builder.execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(9));
+
+ assertThat(searchResponse.getHits().getAt(0).fields().get("byte_field").value().toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("short_field").value().toString(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("integer_field").value(), equalTo((Object) 3l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("long_field").value(), equalTo((Object) 4l));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("float_field").value(), equalTo((Object) 5.0));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("double_field").value(), equalTo((Object) 6.0d));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("date_field").value(), equalTo((Object) 1332374400000L));
+ assertThat(searchResponse.getHits().getAt(0).fields().get("boolean_field").value(), equalTo((Object) 1L));
+
+ }
+
+ public void testScriptFields() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type",
+ "s", "type=string,index=not_analyzed",
+ "l", "type=long",
+ "d", "type=double",
+ "ms", "type=string,index=not_analyzed",
+ "ml", "type=long",
+ "md", "type=double").get());
+ final int numDocs = randomIntBetween(3, 8);
+ List<IndexRequestBuilder> reqs = new ArrayList<>();
+ for (int i = 0; i < numDocs; ++i) {
+ reqs.add(client().prepareIndex("index", "type", Integer.toString(i)).setSource(
+ "s", Integer.toString(i),
+ "ms", new String[] {Integer.toString(i), Integer.toString(i+1)},
+ "l", i,
+ "ml", new long[] {i, i+1},
+ "d", i,
+ "md", new double[] {i, i+1}));
+ }
+ indexRandom(true, reqs);
+ ensureSearchable();
+ SearchRequestBuilder req = client().prepareSearch("index");
+ for (String field : Arrays.asList("s", "ms", "l", "ml", "d", "md")) {
+ req.addScriptField(field, new Script("doc['" + field + "'].values"));
+ }
+ SearchResponse resp = req.get();
+ assertSearchResponse(resp);
+ for (SearchHit hit : resp.getHits().getHits()) {
+ final int id = Integer.parseInt(hit.getId());
+ Map<String, SearchHitField> fields = hit.getFields();
+ assertThat(fields.get("s").getValues(), equalTo(Collections.<Object> singletonList(Integer.toString(id))));
+ assertThat(fields.get("l").getValues(), equalTo(Collections.<Object> singletonList((long) id)));
+ assertThat(fields.get("d").getValues(), equalTo(Collections.<Object> singletonList((double) id)));
+ assertThat(fields.get("ms").getValues(), equalTo(Arrays.<Object> asList(Integer.toString(id), Integer.toString(id + 1))));
+ assertThat(fields.get("ml").getValues(), equalTo(Arrays.<Object> asList((long) id, id + 1L)));
+ assertThat(fields.get("md").getValues(), equalTo(Arrays.<Object> asList((double) id, id + 1d)));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testScriptDocAndFieldsOldScriptAPI() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("num1")
+ .field("type", "double").field("store", "yes").endObject().endObject().endObject().endObject().string();
+
+ client().admin().indices().preparePutMapping().setType("type1").setSource(mapping).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).field("date", "1970-01-01T00:00:00")
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).field("date", "1970-01-01T00:00:25")
+ .endObject()).execute().actionGet();
+ client().admin().indices().prepareFlush().execute().actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).field("date", "1970-01-01T00:02:00")
+ .endObject()).execute().actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("running doc['num1'].value");
+ SearchResponse response = client().prepareSearch().setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value").addScriptField("sNum1_field", "_fields['num1'].value")
+ .addScriptField("date1", "doc['date'].date.millis").execute().actionGet();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).isSourceEmpty(), equalTo(true));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).fields().size(), equalTo(3));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1_field").values().get(0), equalTo(1.0));
+ assertThat((Long) response.getHits().getAt(0).fields().get("date1").values().get(0), equalTo(0l));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).fields().size(), equalTo(3));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1_field").values().get(0), equalTo(2.0));
+ assertThat((Long) response.getHits().getAt(1).fields().get("date1").values().get(0), equalTo(25000l));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat(response.getHits().getAt(2).fields().size(), equalTo(3));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1_field").values().get(0), equalTo(3.0));
+ assertThat((Long) response.getHits().getAt(2).fields().get("date1").values().get(0), equalTo(120000l));
+
+ logger.info("running doc['num1'].value * factor");
+ Map<String, Object> params = MapBuilder.<String, Object> newMapBuilder().put("factor", 2.0).map();
+ response = client().prepareSearch().setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", "doc['num1'].value * factor", params).execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).fields().size(), equalTo(1));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(response.getHits().getAt(1).fields().size(), equalTo(1));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(4.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat(response.getHits().getAt(2).fields().size(), equalTo(1));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(6.0));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUidBasedScriptFieldsOldScriptAPI() throws Exception {
+ prepareCreate("test").addMapping("type1", "num1", "type=long").execute().actionGet();
+ ensureYellow();
+
+ int numDocs = randomIntBetween(1, 30);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("num1", i).endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse response = client().prepareSearch().setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("uid", "_fields._uid.value").get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long) numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(i).fields().get("uid").value(), equalTo("type1#" + Integer.toString(i)));
+ }
+
+ response = client().prepareSearch().setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("id", "_fields._id.value").get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long) numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(i).fields().get("id").value(), equalTo(Integer.toString(i)));
+ }
+
+ response = client().prepareSearch().setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("type", "_fields._type.value").get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long) numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(i).fields().get("type").value(), equalTo("type1"));
+ }
+
+ response = client().prepareSearch().setQuery(matchAllQuery()).addSort("num1", SortOrder.ASC).setSize(numDocs)
+ .addScriptField("id", "_fields._id.value").addScriptField("uid", "_fields._uid.value")
+ .addScriptField("type", "_fields._type.value").get();
+
+ assertNoFailures(response);
+
+ assertThat(response.getHits().totalHits(), equalTo((long) numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ assertThat(response.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(response.getHits().getAt(i).fields().size(), equalTo(3));
+ assertThat((String) response.getHits().getAt(i).fields().get("uid").value(), equalTo("type1#" + Integer.toString(i)));
+ assertThat((String) response.getHits().getAt(i).fields().get("type").value(), equalTo("type1"));
+ assertThat((String) response.getHits().getAt(i).fields().get("id").value(), equalTo(Integer.toString(i)));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testScriptFieldUsingSourceOldScriptAPI() throws Exception {
+ createIndex("test");
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(
+ jsonBuilder().startObject().startObject("obj1").field("test", "something").endObject().startObject("obj2")
+ .startArray("arr2").value("arr_value1").value("arr_value2").endArray().endObject().startArray("arr3")
+ .startObject().field("arr3_field1", "arr3_value1").endObject().endArray().endObject()).execute()
+ .actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ SearchResponse response = client().prepareSearch().setQuery(matchAllQuery()).addScriptField("s_obj1", "_source.obj1")
+ .addScriptField("s_obj1_test", "_source.obj1.test").addScriptField("s_obj2", "_source.obj2")
+ .addScriptField("s_obj2_arr2", "_source.obj2.arr2").addScriptField("s_arr3", "_source.arr3").execute().actionGet();
+
+ assertThat("Failures " + Arrays.toString(response.getShardFailures()), response.getShardFailures().length, equalTo(0));
+
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj1 = response.getHits().getAt(0).field("s_obj1").value();
+ assertThat(sObj1.get("test").toString(), equalTo("something"));
+ assertThat(response.getHits().getAt(0).field("s_obj1_test").value().toString(), equalTo("something"));
+
+ Map<String, Object> sObj2 = response.getHits().getAt(0).field("s_obj2").value();
+ List sObj2Arr2 = (List) sObj2.get("arr2");
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ sObj2Arr2 = response.getHits().getAt(0).field("s_obj2_arr2").values();
+ assertThat(sObj2Arr2.size(), equalTo(2));
+ assertThat(sObj2Arr2.get(0).toString(), equalTo("arr_value1"));
+ assertThat(sObj2Arr2.get(1).toString(), equalTo("arr_value2"));
+
+ List sObj2Arr3 = response.getHits().getAt(0).field("s_arr3").values();
+ assertThat(((Map) sObj2Arr3.get(0)).get("arr3_field1").toString(), equalTo("arr3_value1"));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ public void testScriptFieldsOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", "s", "type=string,index=not_analyzed", "l", "type=long", "d", "type=double",
+ "ms", "type=string,index=not_analyzed", "ml", "type=long", "md", "type=double").get());
+ final int numDocs = randomIntBetween(3, 8);
+ List<IndexRequestBuilder> reqs = new ArrayList<>();
+ for (int i = 0; i < numDocs; ++i) {
+ reqs.add(client().prepareIndex("index", "type", Integer.toString(i)).setSource("s", Integer.toString(i), "ms",
+ new String[] { Integer.toString(i), Integer.toString(i + 1) }, "l", i, "ml", new long[] { i, i + 1 }, "d", i, "md",
+ new double[] { i, i + 1 }));
+ }
+ indexRandom(true, reqs);
+ ensureSearchable();
+ SearchRequestBuilder req = client().prepareSearch("index");
+ for (String field : Arrays.asList("s", "ms", "l", "ml", "d", "md")) {
+ req.addScriptField(field, "doc['" + field + "'].values");
+ }
+ SearchResponse resp = req.get();
+ assertSearchResponse(resp);
+ for (SearchHit hit : resp.getHits().getHits()) {
+ final int id = Integer.parseInt(hit.getId());
+ Map<String, SearchHitField> fields = hit.getFields();
+ assertThat(fields.get("s").getValues(), equalTo(Collections.<Object> singletonList(Integer.toString(id))));
+ assertThat(fields.get("l").getValues(), equalTo(Collections.<Object> singletonList((long) id)));
+ assertThat(fields.get("d").getValues(), equalTo(Collections.<Object> singletonList((double) id)));
+ assertThat(fields.get("ms").getValues(), equalTo(Arrays.<Object> asList(Integer.toString(id), Integer.toString(id + 1))));
+ assertThat(fields.get("ml").getValues(), equalTo(Arrays.<Object> asList((long) id, id + 1L)));
+ assertThat(fields.get("md").getValues(), equalTo(Arrays.<Object> asList((double) id, id + 1d)));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java
new file mode 100644
index 0000000000..8ea20a47dc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java
@@ -0,0 +1,969 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchAllQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionBuilder;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.gaussDecayFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.isOneOf;
+import static org.hamcrest.Matchers.lessThan;
+
+public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDistanceScoreGeoLinGaussExp() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 10).field("lon", 20).endObject()
+ .endObject()));
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 22).endObject()
+ .endObject()));
+
+ int numDummyDocs = 20;
+ for (int i = 1; i <= numDummyDocs; i++) {
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId(Integer.toString(i + 3))
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11 + i).field("lon", 22 + i)
+ .endObject().endObject()));
+ }
+
+ indexRandom(true, indexBuilders);
+
+ // Test Gauss
+ List<Float> lonlat = new ArrayList<>();
+ lonlat.add(20f);
+ lonlat.add(11f);
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(constantScoreQuery(termQuery("test", "value")))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), gaussDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ // Test Exp
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(constantScoreQuery(termQuery("test", "value")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), linearDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ // Test Lin
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(constantScoreQuery(termQuery("test", "value")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), exponentialDecayFunction("loc", lonlat, "1000km")))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ }
+
+ @Test
+ public void testDistanceScoreGeoLinGaussExpWithOffset() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ // add tw docs within offset
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 0.5).endObject()));
+ indexBuilders.add(client().prepareIndex().setType("type1").setId("2").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.7).endObject()));
+
+ // add docs outside offset
+ int numDummyDocs = 20;
+ for (int i = 0; i < numDummyDocs; i++) {
+ indexBuilders.add(client().prepareIndex().setType("type1").setId(Integer.toString(i + 3)).setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 3.0 + i).endObject()));
+ }
+
+ indexRandom(true, indexBuilders);
+
+ // Test Gauss
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 1.0, 5.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ for (int i = 0; i < numDummyDocs; i++) {
+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));
+ }
+
+ // Test Exp
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"),
+ exponentialDecayFunction("num", 1.0, 5.0).setOffset(1.0)).boostMode(
+ CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ for (int i = 0; i < numDummyDocs; i++) {
+ assertThat(sh.getAt(i + 2).getId(), equalTo(Integer.toString(i + 3)));
+ }
+ // Test Lin
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .size(numDummyDocs + 2)
+ .query(functionScoreQuery(termQuery("test", "value"), linearDecayFunction("num", 1.0, 20.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (numDummyDocs + 2)));
+ assertThat(sh.getAt(0).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).getId(), anyOf(equalTo("1"), equalTo("2")));
+ assertThat(sh.getAt(1).score(), equalTo(sh.getAt(0).score()));
+ }
+
+ @Test
+ public void testBoostModeSettingWorks() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21).endObject()
+ .endObject()));
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value value").startObject("loc").field("lat", 11).field("lon", 20)
+ .endObject().endObject()));
+ indexRandom(true, false, indexBuilders); // force no dummy docs
+
+ // Test Gauss
+ List<Float> lonlat = new ArrayList<>();
+ lonlat.add(20f);
+ lonlat.add(11f);
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+ assertThat(sh.getAt(0).getId(), isOneOf("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+
+ // Test Exp
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", lonlat, "1000km")).boostMode(
+ CombineFunction.REPLACE.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+ assertThat(sh.getAt(0).getId(), equalTo("2"));
+ assertThat(sh.getAt(1).getId(), equalTo("1"));
+
+ }
+
+ @Test
+ public void testParseGeoPoint() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 20).field("lon", 11).endObject()
+ .endObject()).setRefresh(true).get();
+
+ GeoPoint point = new GeoPoint(20, 11);
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", point, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ float[] coords = { 11, 20 };
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("loc", coords, "1000km")).boostMode(
+ CombineFunction.MULT.getName()))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+ }
+
+ @Test
+ public void testCombineModes() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()).setRefresh(true).get();
+
+ // function score should return 0.5 for this function
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MULT))));
+ SearchResponse sr = response.actionGet();
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(0.30685282, 1.e-5));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.REPLACE))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.SUM))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282 + 0.5), 1.e-5));
+ logger.info("--> Hit[0] {} Explanation:\n {}", sr.getHits().getAt(0).id(), sr.getHits().getAt(0).explanation());
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.AVG))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo((0.30685282 + 0.5), 1.e-5));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MIN))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(2.0 * (0.30685282), 1.e-5));
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num", 0.0, 1.0).setDecay(0.5)).boost(
+ 2.0f).boostMode(CombineFunction.MAX))));
+ sr = response.actionGet();
+ sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (1)));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat((double) sh.getAt(0).score(), closeTo(1.0, 1.e-5));
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testExceptionThrownIfScaleLE0() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-28").endObject())).actionGet();
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")))));
+
+ SearchResponse sr = response.actionGet();
+ assertOrderedSearchHits(sr, "2", "1");
+ }
+
+ @Test
+ public void testParseDateMath() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis()).endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", System.currentTimeMillis() - (1000 * 60 * 60 * 24)).endObject())).actionGet();
+ refresh();
+
+ SearchResponse sr = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now", "2d"))))).get();
+
+ assertNoFailures(sr);
+ assertOrderedSearchHits(sr, "1", "2");
+
+ sr = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "now-1d", "2d"))))).get();
+
+ assertNoFailures(sr);
+ assertOrderedSearchHits(sr, "2", "1");
+
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception {
+ DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder("num1", "2013-05-28", "1d").setDecay(100);
+ }
+
+ @Test
+ public void testValueMissingLin() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().startObject("num2").field("type", "double")
+ .endObject().endObject().endObject().endObject())
+ );
+
+ ensureYellow();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").field("num2", "1.0")
+ .endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num2", "1.0").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("3")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").field("num2", "1.0")
+ .endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("4")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-30").endObject())).actionGet();
+
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value"))).add(linearDecayFunction("num1", "2013-05-28", "+3d"))
+ .add(linearDecayFunction("num2", "0.0", "1")).scoreMode("multiply"))));
+
+ SearchResponse sr = response.actionGet();
+
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(4));
+ double[] scores = new double[4];
+ for (int i = 0; i < sh.hits().length; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore();
+ }
+ assertThat(scores[0], lessThan(scores[1]));
+ assertThat(scores[2], lessThan(scores[3]));
+
+ }
+
+ @Test
+ public void testDateWithoutOrigin() throws Exception {
+ DateTime dt = new DateTime();
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num1").field("type", "date").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ DateTime docDate = dt.minusDays(1);
+ String docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+ docDate = dt.minusDays(2);
+ docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+ docDate = dt.minusDays(3);
+ docDateString = docDate.getYear() + "-" + docDate.getMonthOfYear() + "-" + docDate.getDayOfMonth();
+ client().index(
+ indexRequest("test").type("type1").id("3")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", docDateString).endObject())).actionGet();
+
+ refresh();
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(QueryBuilders.matchAllQuery()).add(linearDecayFunction("num1", "1000w"))
+ .add(gaussDecayFunction("num1", "1d")).add(exponentialDecayFunction("num1", "1000w"))
+ .scoreMode("multiply"))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(3));
+ double[] scores = new double[4];
+ for (int i = 0; i < sh.hits().length; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId()) - 1] = sh.getAt(i).getScore();
+ }
+ assertThat(scores[1], lessThan(scores[0]));
+ assertThat(scores[2], lessThan(scores[1]));
+
+ }
+
+ @Test
+ public void testManyDocsLin() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("date").field("type", "date").endObject().startObject("num").field("type", "double")
+ .endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ int numDocs = 200;
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+
+ for (int i = 0; i < numDocs; i++) {
+ double lat = 100 + (int) (10.0 * (float) (i) / (float) (numDocs));
+ double lon = 100;
+ int day = (int) (29.0 * (float) (i) / (float) (numDocs)) + 1;
+ String dayString = day < 10 ? "0" + Integer.toString(day) : Integer.toString(day);
+ String date = "2013-05-" + dayString;
+
+ indexBuilders.add(client().prepareIndex()
+ .setType("type")
+ .setId(Integer.toString(i))
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").field("date", date).field("num", i).startObject("geo")
+ .field("lat", lat).field("lon", lon).endObject().endObject()));
+ }
+ indexRandom(true, indexBuilders);
+ List<Float> lonlat = new ArrayList<>();
+ lonlat.add(100f);
+ lonlat.add(110f);
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().size(numDocs).query(
+ functionScoreQuery(termQuery("test", "value"))
+ .add(new MatchAllQueryBuilder(), linearDecayFunction("date", "2013-05-30", "+15d"))
+ .add(new MatchAllQueryBuilder(), linearDecayFunction("geo", lonlat, "1000km"))
+ .add(new MatchAllQueryBuilder(), linearDecayFunction("num", numDocs, numDocs / 2.0))
+ .scoreMode("multiply").boostMode(CombineFunction.REPLACE.getName()))));
+
+ SearchResponse sr = response.actionGet();
+ assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+ assertThat(sh.hits().length, equalTo(numDocs));
+ double[] scores = new double[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ scores[Integer.parseInt(sh.getAt(i).getId())] = sh.getAt(i).getScore();
+ }
+ for (int i = 0; i < numDocs - 1; i++) {
+ assertThat(scores[i], lessThan(scores[i + 1]));
+ }
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testParsingExceptionIfFieldDoesNotExist() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("geo").field("type", "geo_point").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ int numDocs = 2;
+ client().index(
+ indexRequest("test").type("type1").source(
+ jsonBuilder().startObject().field("test", "value").startObject("geo").field("lat", 1).field("lon", 2).endObject()
+ .endObject())).actionGet();
+ refresh();
+ List<Float> lonlat = new ArrayList<>();
+ lonlat.add(100f);
+ lonlat.add(110f);
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource()
+ .size(numDocs)
+ .query(functionScoreQuery(termQuery("test", "value")).add(new MatchAllQueryBuilder(),
+ linearDecayFunction("type1.geo", lonlat, "1000km")).scoreMode("multiply"))));
+ SearchResponse sr = response.actionGet();
+
+ }
+
+ @Test(expected = SearchPhaseExecutionException.class)
+ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "string").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(
+ jsonBuilder().startObject().field("test", "value").field("num", Integer.toString(1)).endObject())).actionGet();
+ refresh();
+ // so, we indexed a string field, but now we try to score a num field
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery(termQuery("test", "value")).add(new MatchAllQueryBuilder(),
+ linearDecayFunction("num", 1.0, 0.5)).scoreMode("multiply"))));
+ response.actionGet();
+ }
+
+ @Test
+ public void testNoQueryGiven() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()))
+ .actionGet();
+ refresh();
+ // so, we indexed a string field, but now we try to score a num field
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().query(
+ functionScoreQuery().add(new MatchAllQueryBuilder(), linearDecayFunction("num", 1, 0.5)).scoreMode(
+ "multiply"))));
+ response.actionGet();
+ }
+
+ @Test
+ public void testMultiFieldOptions() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("loc").field("type", "geo_point").endObject().startObject("num").field("type", "float").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ // Index for testing MIN and MAX
+ IndexRequestBuilder doc1 = client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startArray("loc").startObject().field("lat", 10).field("lon", 20).endObject().startObject().field("lat", 12).field("lon", 23).endObject().endArray()
+ .endObject());
+ IndexRequestBuilder doc2 = client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 22).endObject()
+ .endObject());
+
+ indexRandom(true, doc1, doc2);
+
+ ActionFuture<SearchResponse> response = client().search(
+ searchRequest().source(
+ searchSource().query(constantScoreQuery(termQuery("test", "value")))));
+ SearchResponse sr = response.actionGet();
+ assertSearchHits(sr, "1", "2");
+ SearchHits sh = sr.getHits();
+ assertThat(sh.getTotalHits(), equalTo((long) (2)));
+
+ List<Float> lonlat = new ArrayList<>();
+ lonlat.add(20f);
+ lonlat.add(10f);
+ response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode("min")))));
+ sr = response.actionGet();
+ assertSearchHits(sr, "1", "2");
+ sh = sr.getHits();
+
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+ response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), gaussDecayFunction("loc", lonlat, "1000km").setMultiValueMode("max")))));
+ sr = response.actionGet();
+ assertSearchHits(sr, "1", "2");
+ sh = sr.getHits();
+
+ assertThat(sh.getAt(0).getId(), equalTo("2"));
+ assertThat(sh.getAt(1).getId(), equalTo("1"));
+
+ // Now test AVG and SUM
+
+ doc1 = client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").startArray("num").value(0.0).value(1.0).value(2.0).endArray()
+ .endObject());
+ doc2 = client().prepareIndex()
+ .setType("type1")
+ .setId("2")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").field("num", 1.0)
+ .endObject());
+
+ indexRandom(true, doc1, doc2);
+ response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), linearDecayFunction("num", "0", "10").setMultiValueMode("sum")))));
+ sr = response.actionGet();
+ assertSearchHits(sr, "1", "2");
+ sh = sr.getHits();
+
+ assertThat(sh.getAt(0).getId(), equalTo("2"));
+ assertThat(sh.getAt(1).getId(), equalTo("1"));
+ assertThat((double)(1.0 - sh.getAt(0).getScore()), closeTo((double)((1.0 - sh.getAt(1).getScore())/3.0), 1.e-6d));
+ response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery("test", "value")), linearDecayFunction("num", "0", "10").setMultiValueMode("avg")))));
+ sr = response.actionGet();
+ assertSearchHits(sr, "1", "2");
+ sh = sr.getHits();
+ assertThat((double) (sh.getAt(0).getScore()), closeTo((double) (sh.getAt(1).getScore()), 1.e-6d));
+ }
+
+ @Test
+ public void errorMessageForFaultyFunctionScoreBody() throws Exception {
+ assertAcked(prepareCreate("test").addMapping(
+ "type",
+ jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+ client().index(
+ indexRequest("test").type("type").source(jsonBuilder().startObject().field("test", "value").field("num", 1.0).endObject()))
+ .actionGet();
+ refresh();
+
+ XContentBuilder query = XContentFactory.jsonBuilder();
+ // query that contains a functions[] array but also a single function
+ query.startObject().startObject("function_score").startArray("functions").startObject().field("boost_factor", "1.3").endObject().endArray().field("boost_factor", "1").endObject().endObject();
+ try {
+ client().search(
+ searchRequest().source(
+ searchSource().query(query))).actionGet();
+ fail("Search should result in SearchPhaseExecutionException");
+ } catch (SearchPhaseExecutionException e) {
+ logger.info(e.shardFailures()[0].reason());
+ assertTrue(e.shardFailures()[0].reason().contains("Found \"functions\": [...] already, now encountering \"boost_factor\". Did you mean \"boost\" instead?"));
+ }
+
+ query = XContentFactory.jsonBuilder();
+ // query that contains a single function and a functions[] array
+ query.startObject().startObject("function_score").field("boost_factor", "1").startArray("functions").startObject().field("boost_factor", "1.3").endObject().endArray().endObject().endObject();
+ try {
+ client().search(
+ searchRequest().source(
+ searchSource().query(query))).actionGet();
+ fail("Search should result in SearchPhaseExecutionException");
+ } catch (SearchPhaseExecutionException e) {
+ logger.info(e.shardFailures()[0].reason());
+ assertTrue(e.shardFailures()[0].reason().contains("Found \"boost_factor\" already, now encountering \"functions\": [...]. Did you mean \"boost\" instead?"));
+ }
+
+ query = XContentFactory.jsonBuilder();
+ // query that contains a single function (but not boost factor) and a functions[] array
+ query.startObject().startObject("function_score").startObject("random_score").field("seed", 3).endObject().startArray("functions").startObject().startObject("random_score").field("seed", 3).endObject().endObject().endArray().endObject().endObject();
+ try {
+ client().search(
+ searchRequest().source(
+ searchSource().query(query))).actionGet();
+ fail("Search should result in SearchPhaseExecutionException");
+ } catch (SearchPhaseExecutionException e) {
+ logger.info(e.shardFailures()[0].reason());
+ assertTrue(e.shardFailures()[0].reason().contains("Found \"random_score\" already, now encountering \"functions\": [...]."));
+ assertFalse(e.shardFailures()[0].reason().contains("Did you mean \"boost\" instead?"));
+
+ }
+ }
+
+ // issue https://github.com/elasticsearch/elasticsearch/issues/6292
+ @Test
+ public void testMissingFunctionThrowsElasticsearchParseException() throws IOException {
+
+ // example from issue https://github.com/elasticsearch/elasticsearch/issues/6292
+ String doc = "{\n" +
+ " \"text\": \"baseball bats\"\n" +
+ "}\n";
+
+ String query = "{\n" +
+ " \"function_score\": {\n" +
+ " \"score_mode\": \"sum\",\n" +
+ " \"boost_mode\": \"replace\",\n" +
+ " \"functions\": [\n" +
+ " {\n" +
+ " \"filter\": {\n" +
+ " \"term\": {\n" +
+ " \"text\": \"baseball\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " ]\n" +
+ " }\n" +
+ "}\n";
+
+ client().prepareIndex("t", "test").setSource(doc).get();
+ refresh();
+ ensureYellow("t");
+ try {
+ client().search(
+ searchRequest().source(
+ searchSource().query(query))).actionGet();
+ fail("Should fail with SearchPhaseExecutionException");
+ } catch (SearchPhaseExecutionException failure) {
+ assertTrue(failure.toString().contains("SearchParseException"));
+ assertFalse(failure.toString().contains("NullPointerException"));
+ }
+
+ query = "{\n" +
+ " \"function_score\": {\n" +
+ " \"score_mode\": \"sum\",\n" +
+ " \"boost_mode\": \"replace\",\n" +
+ " \"functions\": [\n" +
+ " {\n" +
+ " \"filter\": {\n" +
+ " \"term\": {\n" +
+ " \"text\": \"baseball\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"boost_factor\": 2\n" +
+ " },\n" +
+ " {\n" +
+ " \"filter\": {\n" +
+ " \"term\": {\n" +
+ " \"text\": \"baseball\"\n" +
+ " }\n" +
+ " }\n" +
+ " }\n" +
+ " ]\n" +
+ " }\n" +
+ "}";
+
+ try {
+ client().search(
+ searchRequest().source(
+ searchSource().query(query))).actionGet();
+ fail("Should fail with SearchPhaseExecutionException");
+ } catch (SearchPhaseExecutionException failure) {
+ assertTrue(failure.toString().contains("SearchParseException"));
+ assertFalse(failure.toString().contains("NullPointerException"));
+ assertTrue(failure.toString().contains("One entry in functions list is missing a function"));
+ }
+
+ // next test java client
+ try {
+ client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.matchAllQuery(), null)).get();
+ } catch (IllegalArgumentException failure) {
+ assertTrue(failure.toString().contains("function must not be null"));
+ }
+ try {
+ client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(QueryBuilders.matchAllQuery(), null)).get();
+ } catch (IllegalArgumentException failure) {
+ assertTrue(failure.toString().contains("function must not be null"));
+ }
+ try {
+ client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(null)).get();
+ } catch (IllegalArgumentException failure) {
+ assertTrue(failure.toString().contains("function must not be null"));
+ }
+ }
+
+ @Test
+ public void testExplainString() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "double").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+
+ client().prepareIndex().setType("type1").setId("1").setIndex("test")
+ .setSource(jsonBuilder().startObject().field("test", "value").array("num", 0.5, 0.7).endObject()).get();
+
+ refresh();
+
+ SearchResponse response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true)
+ .query(functionScoreQuery(termQuery("test", "value"))
+ .add(gaussDecayFunction("num", 1.0, 5.0).setOffset(1.0))
+ .add(linearDecayFunction("num", 1.0, 5.0).setOffset(1.0))
+ .add(exponentialDecayFunction("num", 1.0, 5.0).setOffset(1.0))
+ .boostMode(CombineFunction.REPLACE.getName())))).get();
+ String explanation = response.getHits().getAt(0).getExplanation().toString();
+ assertThat(explanation, containsString(" 1.0 = exp(-0.5*pow(MIN[Math.max(Math.abs(0.5(=doc value) - 1.0(=origin))) - 1.0(=offset), 0), Math.max(Math.abs(0.7(=doc value) - 1.0(=origin))) - 1.0(=offset), 0)],2.0)/18.033688011112044)"));
+ assertThat(explanation, containsString("1.0 = max(0.0, ((10.0 - MIN[Math.max(Math.abs(0.5(=doc value) - 1.0(=origin))) - 1.0(=offset), 0), Math.max(Math.abs(0.7(=doc value) - 1.0(=origin))) - 1.0(=offset), 0)])/10.0)"));
+ assertThat(explanation, containsString("1.0 = exp(- MIN[Math.max(Math.abs(0.5(=doc value) - 1.0(=origin))) - 1.0(=offset), 0), Math.max(Math.abs(0.7(=doc value) - 1.0(=origin))) - 1.0(=offset), 0)] * 0.13862943611198905)"));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptPlugin.java b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptPlugin.java
new file mode 100644
index 0000000000..806f1642b4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptPlugin.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.script.ScriptModule;
+
+public class ExplainableScriptPlugin extends AbstractPlugin {
+
+ public ExplainableScriptPlugin() {}
+ @Override
+ public String name() {
+ return "native-explainable-script";
+ }
+
+ @Override
+ public String description() {
+ return "Native explainable script";
+ }
+
+ public void onModule(ScriptModule module) {
+ module.registerScript("native_explainable_script", ExplainableScriptTests.MyNativeScriptFactory.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java
new file mode 100644
index 0000000000..3f10ce389a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.fielddata.ScriptDocValues;
+import org.elasticsearch.script.AbstractDoubleSearchScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.ExplainableSearchScript;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class ExplainableScriptTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", ExplainableScriptPlugin.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testNativeExplainScript() throws InterruptedException, IOException, ExecutionException {
+
+ List<IndexRequestBuilder> indexRequests = new ArrayList<>();
+ for (int i = 0; i < 20; i++) {
+ indexRequests.add(client().prepareIndex("test", "type").setId(Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("number_field", i).field("text", "text").endObject()));
+ }
+ indexRandom(true, true, indexRequests);
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ ensureYellow();
+ SearchResponse response = client().search(searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("text", "text")).add(
+ scriptFunction(new Script("native_explainable_script", ScriptType.INLINE, "native", null)))
+ .boostMode("replace")))).actionGet();
+
+ ElasticsearchAssertions.assertNoFailures(response);
+ SearchHits hits = response.getHits();
+ assertThat(hits.getTotalHits(), equalTo(20l));
+ int idCounter = 19;
+ for (SearchHit hit : hits.getHits()) {
+ assertThat(hit.getId(), equalTo(Integer.toString(idCounter)));
+ assertThat(hit.explanation().toString(), containsString(Double.toString(idCounter) + " = This script returned " + Double.toString(idCounter)));
+ assertThat(hit.explanation().toString(), containsString("1.0 = tf(freq=1.0), with freq of"));
+ assertThat(hit.explanation().getDetails().length, equalTo(2));
+ idCounter--;
+ }
+ }
+
+ static class MyNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new MyScript();
+ }
+ }
+
+ static class MyScript extends AbstractDoubleSearchScript implements ExplainableSearchScript, ExecutableScript {
+
+ @Override
+ public Explanation explain(Explanation subQueryScore) throws IOException {
+ Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore);
+ return Explanation.match((float) (runAsDouble()), "This script returned " + runAsDouble(), scoreExp);
+ }
+
+ @Override
+ public double runAsDouble() {
+ return ((Number) ((ScriptDocValues) doc().get("number_field")).getValues().get(0)).doubleValue();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java
new file mode 100644
index 0000000000..68f4a33d82
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreBackwardCompatibilityTests.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+/**
+ */
+public class FunctionScoreBackwardCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ /**
+ * Simple upgrade test for function score
+ */
+ @Test
+ public void testSimpleFunctionScoreParsingWorks() throws IOException, ExecutionException, InterruptedException {
+
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("text")
+ .field("type", "string")
+ .endObject()
+ .startObject("loc")
+ .field("type", "geo_point")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureYellow();
+
+ int numDocs = 10;
+ String[] ids = new String[numDocs];
+ List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ indexBuilders.add(client().prepareIndex()
+ .setType("type1").setId(id).setIndex("test")
+ .setSource(
+ jsonBuilder().startObject()
+ .field("text", "value " + (i < 5 ? "boosted" : ""))
+ .startObject("loc")
+ .field("lat", 10 + i)
+ .field("lon", 20)
+ .endObject()
+ .endObject()));
+ ids[i] = id;
+ }
+ indexRandom(true, indexBuilders);
+ checkFunctionScoreStillWorks(ids);
+ logClusterState();
+ // prevent any kind of allocation during the upgrade we recover from gateway
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ boolean upgraded;
+ int upgradedNodesCounter = 1;
+ do {
+ logger.debug("function_score bwc: upgrading {}st node", upgradedNodesCounter++);
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ logClusterState();
+ checkFunctionScoreStillWorks(ids);
+ } while (upgraded);
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+ logger.debug("done function_score while upgrading");
+ }
+
+ @Override
+ protected Settings commonNodeSettings(int nodeOrdinal) {
+ return Settings.builder().put(super.commonNodeSettings(nodeOrdinal))
+ .put("script.inline", "on").build();
+ }
+
+ private void checkFunctionScoreStillWorks(String... ids) throws ExecutionException, InterruptedException, IOException {
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(termQuery("text", "value"))
+ .add(gaussDecayFunction("loc", new GeoPoint(10, 20), "1000km"))
+ .add(scriptFunction("_index['text']['value'].tf()"))
+ .add(termQuery("text", "boosted"), factorFunction(5))
+ ))).actionGet();
+ assertSearchResponse(response);
+ assertOrderedSearchHits(response, ids);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java
new file mode 100644
index 0000000000..eef4ed2795
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+/**
+ * Tests for the {@code field_value_factor} function in a function_score query.
+ */
+public class FunctionScoreFieldValueTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testFieldValueFactor() throws IOException {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("test")
+ .field("type", randomFrom(new String[]{"short", "float", "long", "integer", "double"}))
+ .endObject()
+ .startObject("body")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()).get());
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1").setSource("test", 5, "body", "foo").get();
+ client().prepareIndex("test", "type1", "2").setSource("test", 17, "body", "foo").get();
+ client().prepareIndex("test", "type1", "3").setSource("body", "bar").get();
+
+ refresh();
+
+ // document 2 scores higher because 17 > 5
+ SearchResponse response = client().prepareSearch("test")
+ .setExplain(randomBoolean())
+ .setQuery(functionScoreQuery(simpleQueryStringQuery("foo"), fieldValueFactorFunction("test")))
+ .get();
+ assertOrderedSearchHits(response, "2", "1");
+
+ // document 1 scores higher because 1/5 > 1/17
+ response = client().prepareSearch("test")
+ .setExplain(randomBoolean())
+ .setQuery(functionScoreQuery(simpleQueryStringQuery("foo"),
+ fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL)))
+ .get();
+ assertOrderedSearchHits(response, "1", "2");
+
+ // doc 3 doesn't have a "test" field, so an exception will be thrown
+ try {
+ response = client().prepareSearch("test")
+ .setExplain(randomBoolean())
+ .setQuery(functionScoreQuery(matchAllQuery(), fieldValueFactorFunction("test")))
+ .get();
+ assertFailures(response);
+ } catch (SearchPhaseExecutionException e) {
+ // We are expecting an exception, because 3 has no field
+ }
+
+ // doc 3 doesn't have a "test" field but we're defaulting it to 100 so it should be last
+ response = client().prepareSearch("test")
+ .setExplain(randomBoolean())
+ .setQuery(functionScoreQuery(matchAllQuery(),
+ fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100)))
+ .get();
+ assertOrderedSearchHits(response, "1", "2", "3");
+
+ // n divided by 0 is infinity, which should provoke an exception.
+ try {
+ response = client().prepareSearch("test")
+ .setExplain(randomBoolean())
+ .setQuery(functionScoreQuery(simpleQueryStringQuery("foo"),
+ fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).factor(0)))
+ .get();
+ assertFailures(response);
+ } catch (SearchPhaseExecutionException e) {
+ // This is fine, the query will throw an exception if executed
+ // locally, instead of just having failures
+ }
+
+ // don't permit an array of factors
+ try {
+ String querySource = "{" +
+ "\"query\": {" +
+ " \"function_score\": {" +
+ " \"query\": {" +
+ " \"match\": {\"name\": \"foo\"}" +
+ " }," +
+ " \"functions\": [" +
+ " {" +
+ " \"field_value_factor\": {" +
+ " \"field\": \"test\"," +
+ " \"factor\": [1.2,2]" +
+ " }" +
+ " }" +
+ " ]" +
+ " }" +
+ " }" +
+ "}";
+ response = client().prepareSearch("test")
+ .setSource(querySource)
+ .get();
+ assertFailures(response);
+ } catch (SearchPhaseExecutionException e) {
+ // This is fine, the query will throw an exception if executed
+ // locally, instead of just having failures
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java
new file mode 100644
index 0000000000..05551cf493
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.apache.lucene.search.Explanation;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.functionscore.DecayFunction;
+import org.elasticsearch.index.query.functionscore.DecayFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.DecayFunctionParser;
+import org.elasticsearch.index.query.functionscore.FunctionScoreModule;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class FunctionScorePluginTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", CustomDistanceScorePlugin.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testPlugin() throws Exception {
+ client().admin()
+ .indices()
+ .prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test")
+ .field("type", "string").endObject().startObject("num1").field("type", "date").endObject().endObject()
+ .endObject().endObject()).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
+
+ client().index(
+ indexRequest("test").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-26").endObject())).actionGet();
+ client().index(
+ indexRequest("test").type("type1").id("2")
+ .source(jsonBuilder().startObject().field("test", "value").field("num1", "2013-05-27").endObject())).actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ DecayFunctionBuilder gfb = new CustomDistanceScoreBuilder("num1", "2013-05-28", "+1d");
+
+ ActionFuture<SearchResponse> response = client().search(searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(false).query(functionScoreQuery(termQuery("test", "value")).add(gfb))));
+
+ SearchResponse sr = response.actionGet();
+ ElasticsearchAssertions.assertNoFailures(sr);
+ SearchHits sh = sr.getHits();
+
+ assertThat(sh.hits().length, equalTo(2));
+ assertThat(sh.getAt(0).getId(), equalTo("1"));
+ assertThat(sh.getAt(1).getId(), equalTo("2"));
+
+ }
+
+ public static class CustomDistanceScorePlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-distance-score";
+ }
+
+ @Override
+ public String description() {
+ return "Distance score plugin to test pluggable implementation";
+ }
+
+ public void onModule(FunctionScoreModule scoreModule) {
+ scoreModule.registerParser(FunctionScorePluginTests.CustomDistanceScoreParser.class);
+ }
+
+ }
+
+ public static class CustomDistanceScoreParser extends DecayFunctionParser {
+
+ public static final String[] NAMES = { "linear_mult", "linearMult" };
+
+ @Override
+ public String[] getNames() {
+ return NAMES;
+ }
+
+ static final DecayFunction decayFunction = new LinearMultScoreFunction();
+
+ @Override
+ public DecayFunction getDecayFunction() {
+ return decayFunction;
+ }
+
+ static class LinearMultScoreFunction implements DecayFunction {
+ LinearMultScoreFunction() {
+ }
+
+ @Override
+ public double evaluate(double value, double scale) {
+
+ return value;
+ }
+
+ @Override
+ public Explanation explainFunction(String distanceString, double distanceVal, double scale) {
+ return Explanation.match((float) distanceVal, "" + distanceVal);
+ }
+
+ @Override
+ public double processScale(double userGivenScale, double userGivenValue) {
+ return userGivenScale;
+ }
+ }
+ }
+
+ public class CustomDistanceScoreBuilder extends DecayFunctionBuilder {
+
+ public CustomDistanceScoreBuilder(String fieldName, Object origin, Object scale) {
+ super(fieldName, origin, scale);
+ }
+
+ @Override
+ public String getName() {
+ return CustomDistanceScoreParser.NAMES[0];
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java
new file mode 100644
index 0000000000..0ff64ad876
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java
@@ -0,0 +1,801 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.functionscore;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
+import org.elasticsearch.index.query.functionscore.weight.WeightBuilder;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.gaussDecayFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.weightFactorFunction;
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.is;
+
+@Slow
+public class FunctionScoreTests extends ElasticsearchIntegrationTest {
+
+ static final String TYPE = "type";
+ static final String INDEX = "index";
+ static final String TEXT_FIELD = "text_field";
+ static final String DOUBLE_FIELD = "double_field";
+ static final String GEO_POINT_FIELD = "geo_point_field";
+ static final XContentBuilder SIMPLE_DOC;
+ static final XContentBuilder MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD;
+
+ @Test
+ public void testExplainQueryOnlyOnce() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("test").field("type", "string")
+ .endObject().startObject("num").field("type", "float").endObject().endObject().endObject().endObject()));
+ ensureYellow();
+
+ client().prepareIndex()
+ .setType("type1")
+ .setId("1")
+ .setIndex("test")
+ .setSource(
+ jsonBuilder().startObject().field("test", "value").field("num", 10).endObject()).get();
+ refresh();
+
+ SearchResponse response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value")).add(gaussDecayFunction("num", 5, 5)).add(exponentialDecayFunction("num", 5, 5)).add(linearDecayFunction("num", 5, 5))))).get();
+ String explanation = response.getHits().getAt(0).explanation().toString();
+
+ checkQueryExplanationAppearsOnlyOnce(explanation);
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value")).add(fieldValueFactorFunction("num"))))).get();
+ explanation = response.getHits().getAt(0).explanation().toString();
+ checkQueryExplanationAppearsOnlyOnce(explanation);
+
+ response = client().search(
+ searchRequest().searchType(SearchType.QUERY_THEN_FETCH).source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("test", "value")).add(randomFunction(10))))).get();
+ explanation = response.getHits().getAt(0).explanation().toString();
+
+ checkQueryExplanationAppearsOnlyOnce(explanation);
+ }
+
+ private void checkQueryExplanationAppearsOnlyOnce(String explanation) {
+ // use some substring of the query explanation and see if it appears twice
+ String queryExplanation = "idf(docFreq=1, maxDocs=1)";
+ int queryExplanationIndex = explanation.indexOf(queryExplanation, 0);
+ assertThat(queryExplanationIndex, greaterThan(-1));
+ queryExplanationIndex = explanation.indexOf(queryExplanation, queryExplanationIndex + 1);
+ assertThat(queryExplanationIndex, equalTo(-1));
+ }
+
+ static {
+ XContentBuilder simpleDoc;
+ XContentBuilder mappingWithDoubleAndGeoPointAndTestField;
+ try {
+ simpleDoc = jsonBuilder().startObject()
+ .field(TEXT_FIELD, "value")
+ .startObject(GEO_POINT_FIELD)
+ .field("lat", 10)
+ .field("lon", 20)
+ .endObject()
+ .field(DOUBLE_FIELD, Math.E)
+ .endObject();
+ } catch (IOException e) {
+ throw new ElasticsearchException("Exception while initializing FunctionScoreTests", e);
+ }
+ SIMPLE_DOC = simpleDoc;
+ try {
+
+ mappingWithDoubleAndGeoPointAndTestField = jsonBuilder().startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(TEXT_FIELD)
+ .field("type", "string")
+ .endObject()
+ .startObject(GEO_POINT_FIELD)
+ .field("type", "geo_point")
+ .endObject()
+ .startObject(DOUBLE_FIELD)
+ .field("type", "double")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ } catch (IOException e) {
+ throw new ElasticsearchException("Exception while initializing FunctionScoreTests", e);
+ }
+ MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD = mappingWithDoubleAndGeoPointAndTestField;
+ }
+
+ @Test
+ public void testExplain() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(
+ TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD
+ ));
+ ensureYellow();
+
+ index(INDEX, TYPE, "1", SIMPLE_DOC);
+ refresh();
+
+ SearchResponse responseWithWeights = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km"))
+ .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN).setWeight(2))
+ .add(scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()")).setWeight(3)))
+ .explain(true))).actionGet();
+
+ assertThat(
+ responseWithWeights.getHits().getAt(0).getExplanation().toString(),
+ equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = min of:\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: *:*\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: *:*\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: *:*\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"[script: _index['text_field']['value'].tf(), type: inline, lang: null, params: {}]\" and parameters: \n{}\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n"));
+ responseWithWeights = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value"))).add(weightFactorFunction(4.0f)))
+ .explain(true))).actionGet();
+ assertThat(
+ responseWithWeights.getHits().getAt(0).getExplanation().toString(),
+ equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = min of:\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n"));
+
+ }
+
+ @Test
+ public void simpleWeightedFunctionsTest() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(
+ TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD
+ ));
+ ensureYellow();
+
+ index(INDEX, TYPE, "1", SIMPLE_DOC);
+ refresh();
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km"))
+ .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN))
+ .add(scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()")))))).actionGet();
+ SearchResponse responseWithWeights = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km").setWeight(2))
+ .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN)
+ .setWeight(2))
+ .add(scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()")).setWeight(2)))))
+ .actionGet();
+
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).getScore(), is(1.0f));
+ assertThat(responseWithWeights.getHits().getAt(0).getScore(), is(8.0f));
+ }
+
+ @Test
+ public void simpleWeightedFunctionsTestWithRandomWeightsAndRandomCombineMode() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(
+ TYPE,
+ MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ XContentBuilder doc = SIMPLE_DOC;
+ index(INDEX, TYPE, "1", doc);
+ refresh();
+ ScoreFunctionBuilder[] scoreFunctionBuilders = getScoreFunctionBuilders();
+ float[] weights = createRandomWeights(scoreFunctionBuilders.length);
+ float[] scores = getScores(scoreFunctionBuilders);
+
+ String scoreMode = getRandomScoreMode();
+ FunctionScoreQueryBuilder withWeights = functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value"))).scoreMode(scoreMode);
+ int weightscounter = 0;
+ for (ScoreFunctionBuilder builder : scoreFunctionBuilders) {
+ withWeights.add(builder.setWeight(weights[weightscounter]));
+ weightscounter++;
+ }
+ SearchResponse responseWithWeights = client().search(
+ searchRequest().source(searchSource().query(withWeights))
+ ).actionGet();
+
+ double expectedScore = computeExpectedScore(weights, scores, scoreMode);
+ assertThat((float) expectedScore / responseWithWeights.getHits().getAt(0).getScore(), is(1.0f));
+ }
+
+ protected double computeExpectedScore(float[] weights, float[] scores, String scoreMode) {
+ double expectedScore = 0.0;
+ if ("multiply".equals(scoreMode)) {
+ expectedScore = 1.0;
+ }
+ if ("max".equals(scoreMode)) {
+ expectedScore = Float.MAX_VALUE * -1.0;
+ }
+ if ("min".equals(scoreMode)) {
+ expectedScore = Float.MAX_VALUE;
+ }
+
+ float weightSum = 0;
+
+ for (int i = 0; i < weights.length; i++) {
+ double functionScore = (double) weights[i] * scores[i];
+ weightSum += weights[i];
+
+ if ("avg".equals(scoreMode)) {
+ expectedScore += functionScore;
+ } else if ("max".equals(scoreMode)) {
+ expectedScore = Math.max(functionScore, expectedScore);
+ } else if ("min".equals(scoreMode)) {
+ expectedScore = Math.min(functionScore, expectedScore);
+ } else if ("sum".equals(scoreMode)) {
+ expectedScore += functionScore;
+ } else if ("multiply".equals(scoreMode)) {
+ expectedScore *= functionScore;
+ }
+
+ }
+ if ("avg".equals(scoreMode)) {
+ expectedScore /= weightSum;
+ }
+ return expectedScore;
+ }
+
+ @Test
+ public void simpleWeightedFunctionsTestSingleFunction() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(
+ TYPE,
+ MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ XContentBuilder doc = jsonBuilder().startObject()
+ .field(TEXT_FIELD, "value")
+ .startObject(GEO_POINT_FIELD)
+ .field("lat", 12)
+ .field("lon", 21)
+ .endObject()
+ .field(DOUBLE_FIELD, 10)
+ .endObject();
+ index(INDEX, TYPE, "1", doc);
+ refresh();
+ ScoreFunctionBuilder[] scoreFunctionBuilders = getScoreFunctionBuilders();
+ ScoreFunctionBuilder scoreFunctionBuilder = scoreFunctionBuilders[randomInt(3)];
+ float[] weights = createRandomWeights(1);
+ float[] scores = getScores(scoreFunctionBuilder);
+ FunctionScoreQueryBuilder withWeights = functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")));
+ withWeights.add(scoreFunctionBuilder.setWeight(weights[0]));
+
+ SearchResponse responseWithWeights = client().search(
+ searchRequest().source(searchSource().query(withWeights))
+ ).actionGet();
+
+ assertThat( (double) scores[0] * weights[0]/ responseWithWeights.getHits().getAt(0).getScore(), closeTo(1.0, 1.e-6));
+
+ }
+
+ private String getRandomScoreMode() {
+ String[] scoreModes = {"avg", "sum", "min", "max", "multiply"};
+ return scoreModes[randomInt(scoreModes.length - 1)];
+ }
+
+ private float[] getScores(ScoreFunctionBuilder... scoreFunctionBuilders) {
+ float[] scores = new float[scoreFunctionBuilders.length];
+ int scorecounter = 0;
+ for (ScoreFunctionBuilder builder : scoreFunctionBuilders) {
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(builder)
+ ))).actionGet();
+ scores[scorecounter] = response.getHits().getAt(0).getScore();
+ scorecounter++;
+ }
+ return scores;
+ }
+
+ private float[] createRandomWeights(int size) {
+ float[] weights = new float[size];
+ for (int i = 0; i < weights.length; i++) {
+ weights[i] = randomFloat() * (randomBoolean() ? 1.0f : -1.0f) * randomInt(100) + 1.e-6f;
+ }
+ return weights;
+ }
+
+ public ScoreFunctionBuilder[] getScoreFunctionBuilders() {
+ ScoreFunctionBuilder[] builders = new ScoreFunctionBuilder[4];
+ builders[0] = gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km");
+ builders[1] = randomFunction(10);
+ builders[2] = fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN);
+ builders[3] = scriptFunction(new Script("_index['" + TEXT_FIELD + "']['value'].tf()"));
+ return builders;
+ }
+
+ @Test
+ public void checkWeightOnlyCreatesBoostFunction() throws IOException {
+ assertAcked(prepareCreate(INDEX).addMapping(
+ TYPE,
+ MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ index(INDEX, TYPE, "1", SIMPLE_DOC);
+ refresh();
+ String query =jsonBuilder().startObject()
+ .startObject("query")
+ .startObject("function_score")
+ .startArray("functions")
+ .startObject()
+ .field("weight",2)
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject().string();
+ SearchResponse response = client().search(
+ searchRequest().source(query)
+ ).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(2.0f));
+
+ query =jsonBuilder().startObject()
+ .startObject("query")
+ .startObject("function_score")
+ .field("weight",2)
+ .endObject()
+ .endObject()
+ .endObject().string();
+ response = client().search(
+ searchRequest().source(query)
+ ).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(2.0f));
+ response = client().search(
+ searchRequest().source(searchSource().query(functionScoreQuery().add(new WeightBuilder().setWeight(2.0f))))
+ ).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(2.0f));
+ response = client().search(
+ searchRequest().source(searchSource().query(functionScoreQuery().add(weightFactorFunction(2.0f))))
+ ).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(2.0f));
+ }
+
+ @Test
+ public void testScriptScoresNested() throws IOException {
+ createIndex(INDEX);
+ ensureYellow();
+ index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject());
+ refresh();
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(
+ functionScoreQuery(
+functionScoreQuery().add(scriptFunction(new Script("1")))).add(
+ scriptFunction(new Script("_score.doubleValue()")))).add(
+ scriptFunction(new Script("_score.doubleValue()"))
+ )
+ )
+ )
+ ).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(1.0f));
+ }
+
+ @Test
+ public void testScriptScoresWithAgg() throws IOException {
+ createIndex(INDEX);
+ ensureYellow();
+ index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject());
+ refresh();
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(functionScoreQuery().add(scriptFunction(new Script("_score.doubleValue()")))).aggregation(
+ terms("score_agg").script(new Script("_score.doubleValue()")))
+ )
+ ).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(1.0f));
+ assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), equalTo("1.0"));
+ assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1l));
+ }
+
+ public void testMinScoreFunctionScoreBasic() throws IOException {
+ index(INDEX, TYPE, jsonBuilder().startObject().field("num", 2).endObject());
+ refresh();
+ ensureYellow();
+ float score = randomFloat();
+ float minScore = randomFloat();
+ SearchResponse searchResponse = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery().add(scriptFunction(new Script(Float.toString(score)))).setMinScore(minScore)))
+ ).actionGet();
+ if (score < minScore) {
+ assertThat(searchResponse.getHits().getTotalHits(), is(0l));
+ } else {
+ assertThat(searchResponse.getHits().getTotalHits(), is(1l));
+ }
+
+ searchResponse = client().search(
+ searchRequest().source(searchSource().query(functionScoreQuery()
+.add(scriptFunction(new Script(Float.toString(score))))
+ .add(scriptFunction(new Script(Float.toString(score))))
+ .scoreMode("avg").setMinScore(minScore)))
+ ).actionGet();
+ if (score < minScore) {
+ assertThat(searchResponse.getHits().getTotalHits(), is(0l));
+ } else {
+ assertThat(searchResponse.getHits().getTotalHits(), is(1l));
+ }
+ }
+
+ @Test
+ public void testMinScoreFunctionScoreManyDocsAndRandomMinScore() throws IOException, ExecutionException, InterruptedException {
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+ int numDocs = randomIntBetween(1, 100);
+ int scoreOffset = randomIntBetween(-2 * numDocs, 2 * numDocs);
+ int minScore = randomIntBetween(-2 * numDocs, 2 * numDocs);
+ for (int i = 0; i < numDocs; i++) {
+ docs.add(client().prepareIndex(INDEX, TYPE, Integer.toString(i)).setSource("num", i + scoreOffset));
+ }
+ indexRandom(true, docs);
+ ensureYellow();
+ Script script = new Script("return (doc['num'].value)");
+ int numMatchingDocs = numDocs + scoreOffset - minScore;
+ if (numMatchingDocs < 0) {
+ numMatchingDocs = 0;
+ }
+ if (numMatchingDocs > numDocs) {
+ numMatchingDocs = numDocs;
+ }
+
+ SearchResponse searchResponse = client().search(
+ searchRequest().source(searchSource().query(functionScoreQuery()
+ .add(scriptFunction(script))
+ .setMinScore(minScore)).size(numDocs))).actionGet();
+ assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs);
+
+ searchResponse = client().search(
+ searchRequest().source(searchSource().query(functionScoreQuery()
+ .add(scriptFunction(script))
+ .add(scriptFunction(script))
+ .scoreMode("avg").setMinScore(minScore)).size(numDocs))).actionGet();
+ assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs);
+ }
+
+ protected void assertMinScoreSearchResponses(int numDocs, SearchResponse searchResponse, int numMatchingDocs) {
+ assertSearchResponse(searchResponse);
+ assertThat((int) searchResponse.getHits().totalHits(), is(numMatchingDocs));
+ int pos = 0;
+ for (int hitId = numDocs - 1; (numDocs - hitId) < searchResponse.getHits().totalHits(); hitId--) {
+ assertThat(searchResponse.getHits().getAt(pos).getId(), equalTo(Integer.toString(hitId)));
+ pos++;
+ }
+ }
+
+ @Test
+ public void testWithEmptyFunctions() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test"));
+ ensureYellow();
+ index("test", "testtype", "1", jsonBuilder().startObject().field("text", "test text").endObject());
+ refresh();
+
+ // make sure that min_score works if functions is empty, see https://github.com/elastic/elasticsearch/issues/10253
+ float termQueryScore = 0.19178301f;
+ testMinScoreApplied("sum", termQueryScore);
+ testMinScoreApplied("avg", termQueryScore);
+ testMinScoreApplied("max", termQueryScore);
+ testMinScoreApplied("min", termQueryScore);
+ testMinScoreApplied("multiply", termQueryScore);
+ testMinScoreApplied("replace", termQueryScore);
+ }
+
+ protected void testMinScoreApplied(String boostMode, float expectedScore) throws InterruptedException, ExecutionException {
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(0.1f)))).get();
+ assertSearchResponse(response);
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getScore(), equalTo(expectedScore));
+
+ response = client().search(
+ searchRequest().source(
+ searchSource().explain(true).query(
+ functionScoreQuery(termQuery("text", "text")).boostMode(boostMode).setMinScore(2f)))).get();
+
+ assertSearchResponse(response);
+ assertThat(response.getHits().totalHits(), equalTo(0l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testExplainOldScriptAPI() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ index(INDEX, TYPE, "1", SIMPLE_DOC);
+ refresh();
+
+ SearchResponse responseWithWeights = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km"))
+ .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN)
+ .setWeight(2))
+ .add(scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()").setWeight(3))).explain(true)))
+ .actionGet();
+
+ assertThat(
+ responseWithWeights.getHits().getAt(0).getExplanation().toString(),
+ equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = min of:\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: *:*\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: *:*\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: *:*\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"[script: _index['text_field']['value'].tf(), type: inline, lang: null, params: {}]\" and parameters: \n{}\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n"));
+ responseWithWeights = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value"))).add(weightFactorFunction(4.0f)))
+ .explain(true))).actionGet();
+ assertThat(
+ responseWithWeights.getHits().getAt(0).getExplanation().toString(),
+ equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = min of:\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n"));
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void simpleWeightedFunctionsTestOldScriptAPI() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ index(INDEX, TYPE, "1", SIMPLE_DOC);
+ refresh();
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km"))
+ .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN))
+ .add(scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()"))))).actionGet();
+ SearchResponse responseWithWeights = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")))
+ .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km").setWeight(2))
+ .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN)
+ .setWeight(2))
+ .add(scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()").setWeight(2))))).actionGet();
+
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).getScore(), is(1.0f));
+ assertThat(responseWithWeights.getHits().getAt(0).getScore(), is(8.0f));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void simpleWeightedFunctionsTestWithRandomWeightsAndRandomCombineModeOldScriptAPI() throws IOException, ExecutionException,
+ InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ XContentBuilder doc = SIMPLE_DOC;
+ index(INDEX, TYPE, "1", doc);
+ refresh();
+ ScoreFunctionBuilder[] scoreFunctionBuilders = getScoreFunctionBuildersOldScriptAPI();
+ float[] weights = createRandomWeights(scoreFunctionBuilders.length);
+ float[] scores = getScores(scoreFunctionBuilders);
+
+ String scoreMode = getRandomScoreMode();
+ FunctionScoreQueryBuilder withWeights = functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value"))).scoreMode(scoreMode);
+ int weightscounter = 0;
+ for (ScoreFunctionBuilder builder : scoreFunctionBuilders) {
+ withWeights.add(builder.setWeight(weights[weightscounter]));
+ weightscounter++;
+ }
+ SearchResponse responseWithWeights = client().search(searchRequest().source(searchSource().query(withWeights))).actionGet();
+
+ double expectedScore = computeExpectedScore(weights, scores, scoreMode);
+ assertThat((float) expectedScore / responseWithWeights.getHits().getAt(0).getScore(), is(1.0f));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void simpleWeightedFunctionsTestSingleFunctionOldScriptAPI() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, MAPPING_WITH_DOUBLE_AND_GEO_POINT_AND_TEXT_FIELD));
+ ensureYellow();
+
+ XContentBuilder doc = jsonBuilder().startObject().field(TEXT_FIELD, "value").startObject(GEO_POINT_FIELD).field("lat", 12)
+ .field("lon", 21).endObject().field(DOUBLE_FIELD, 10).endObject();
+ index(INDEX, TYPE, "1", doc);
+ refresh();
+ ScoreFunctionBuilder[] scoreFunctionBuilders = getScoreFunctionBuildersOldScriptAPI();
+ ScoreFunctionBuilder scoreFunctionBuilder = scoreFunctionBuilders[randomInt(3)];
+ float[] weights = createRandomWeights(1);
+ float[] scores = getScores(scoreFunctionBuilder);
+ FunctionScoreQueryBuilder withWeights = functionScoreQuery(constantScoreQuery(termQuery(TEXT_FIELD, "value")));
+ withWeights.add(scoreFunctionBuilder.setWeight(weights[0]));
+
+ SearchResponse responseWithWeights = client().search(searchRequest().source(searchSource().query(withWeights))).actionGet();
+
+ assertThat((double) scores[0] * weights[0] / responseWithWeights.getHits().getAt(0).getScore(), closeTo(1.0, 1.e-6));
+
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ public ScoreFunctionBuilder[] getScoreFunctionBuildersOldScriptAPI() {
+ ScoreFunctionBuilder[] builders = new ScoreFunctionBuilder[4];
+ builders[0] = gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km");
+ builders[1] = randomFunction(10);
+ builders[2] = fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN);
+ builders[3] = scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()");
+ return builders;
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testScriptScoresNestedOldScriptAPI() throws IOException {
+ createIndex(INDEX);
+ ensureYellow();
+ index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject());
+ refresh();
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery(
+ functionScoreQuery(functionScoreQuery().add(scriptFunction("1"))).add(
+ scriptFunction("_score.doubleValue()"))).add(scriptFunction("_score.doubleValue()")))))
+ .actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(1.0f));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testScriptScoresWithAggOldScriptAPI() throws IOException {
+ createIndex(INDEX);
+ ensureYellow();
+ index(INDEX, TYPE, "1", jsonBuilder().startObject().field("dummy_field", 1).endObject());
+ refresh();
+ SearchResponse response = client().search(
+ searchRequest().source(
+ searchSource().query(functionScoreQuery().add(scriptFunction("_score.doubleValue()"))).aggregation(
+ terms("score_agg").script("_score.doubleValue()")))).actionGet();
+ assertSearchResponse(response);
+ assertThat(response.getHits().getAt(0).score(), equalTo(1.0f));
+ assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getKeyAsString(), equalTo("1.0"));
+ assertThat(((Terms) response.getAggregations().asMap().get("score_agg")).getBuckets().get(0).getDocCount(), is(1l));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ public void testMinScoreFunctionScoreBasicOldScriptAPI() throws IOException {
+ index(INDEX, TYPE, jsonBuilder().startObject().field("num", 2).endObject());
+ refresh();
+ ensureYellow();
+ float score = randomFloat();
+ float minScore = randomFloat();
+ SearchResponse searchResponse = client().search(
+ searchRequest().source(
+ searchSource().query(functionScoreQuery().add(scriptFunction(Float.toString(score))).setMinScore(minScore))))
+ .actionGet();
+ if (score < minScore) {
+ assertThat(searchResponse.getHits().getTotalHits(), is(0l));
+ } else {
+ assertThat(searchResponse.getHits().getTotalHits(), is(1l));
+ }
+
+ searchResponse = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery().add(scriptFunction(Float.toString(score))).add(scriptFunction(Float.toString(score)))
+ .scoreMode("avg").setMinScore(minScore)))).actionGet();
+ if (score < minScore) {
+ assertThat(searchResponse.getHits().getTotalHits(), is(0l));
+ } else {
+ assertThat(searchResponse.getHits().getTotalHits(), is(1l));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testMinScoreFunctionScoreManyDocsAndRandomMinScoreOldScriptAPI() throws IOException, ExecutionException,
+ InterruptedException {
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+ int numDocs = randomIntBetween(1, 100);
+ int scoreOffset = randomIntBetween(-2 * numDocs, 2 * numDocs);
+ int minScore = randomIntBetween(-2 * numDocs, 2 * numDocs);
+ for (int i = 0; i < numDocs; i++) {
+ docs.add(client().prepareIndex(INDEX, TYPE, Integer.toString(i)).setSource("num", i + scoreOffset));
+ }
+ indexRandom(true, docs);
+ ensureYellow();
+ String script = "return (doc['num'].value)";
+ int numMatchingDocs = numDocs + scoreOffset - minScore;
+ if (numMatchingDocs < 0) {
+ numMatchingDocs = 0;
+ }
+ if (numMatchingDocs > numDocs) {
+ numMatchingDocs = numDocs;
+ }
+
+ SearchResponse searchResponse = client().search(
+ searchRequest().source(
+ searchSource().query(functionScoreQuery().add(scriptFunction(script)).setMinScore(minScore)).size(numDocs)))
+ .actionGet();
+ assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs);
+
+ searchResponse = client().search(
+ searchRequest().source(
+ searchSource().query(
+ functionScoreQuery().add(scriptFunction(script)).add(scriptFunction(script)).scoreMode("avg")
+ .setMinScore(minScore)).size(numDocs))).actionGet();
+ assertMinScoreSearchResponses(numDocs, searchResponse, numMatchingDocs);
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java b/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java
new file mode 100644
index 0000000000..566c188788
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/functionscore/RandomScoreFunctionTests.java
@@ -0,0 +1,406 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.functionscore;
+
+import org.apache.lucene.util.ArrayUtil;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.functionscore.random.RandomScoreFunctionBuilder;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.CoreMatchers;
+import org.junit.Ignore;
+
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.nullValue;
+
+public class RandomScoreFunctionTests extends ElasticsearchIntegrationTest {
+
+ @Slow
+ public void testConsistentHitsWithSameSeed() throws Exception {
+ createIndex("test");
+ ensureGreen(); // make sure we are done otherwise preference could change?
+ int docCount = randomIntBetween(100, 200);
+ for (int i = 0; i < docCount; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+ flush();
+ refresh();
+ int outerIters = scaledRandomIntBetween(10, 20);
+ for (int o = 0; o < outerIters; o++) {
+ final int seed = randomInt();
+ String preference = randomRealisticUnicodeOfLengthBetween(1, 10); // at least one char!!
+ // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary)
+ while (preference.startsWith("_")) {
+ preference = randomRealisticUnicodeOfLengthBetween(1, 10);
+ }
+ int innerIters = scaledRandomIntBetween(2, 5);
+ SearchHit[] hits = null;
+ for (int i = 0; i < innerIters; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(docCount) // get all docs otherwise we are prone to tie-breaking
+ .setPreference(preference)
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(seed)))
+ .execute().actionGet();
+ assertThat("Failures " + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, CoreMatchers.equalTo(0));
+ final int hitCount = searchResponse.getHits().getHits().length;
+ final SearchHit[] currentHits = searchResponse.getHits().getHits();
+ ArrayUtil.timSort(currentHits, new Comparator<SearchHit>() {
+ @Override
+ public int compare(SearchHit o1, SearchHit o2) {
+ // for tie-breaking we have to resort here since if the score is
+ // identical we rely on collection order which might change.
+ int cmp = Float.compare(o1.getScore(), o2.getScore());
+ return cmp == 0 ? o1.getId().compareTo(o2.getId()) : cmp;
+ }
+ });
+ if (i == 0) {
+ assertThat(hits, nullValue());
+ hits = currentHits;
+ } else {
+ assertThat(hits.length, equalTo(searchResponse.getHits().getHits().length));
+ for (int j = 0; j < hitCount; j++) {
+ assertThat("" + j, currentHits[j].score(), equalTo(hits[j].score()));
+ assertThat("" + j, currentHits[j].id(), equalTo(hits[j].id()));
+ }
+ }
+
+ // randomly change some docs to get them in different segments
+ int numDocsToChange = randomIntBetween(20, 50);
+ while (numDocsToChange > 0) {
+ int doc = randomInt(docCount-1);// watch out this is inclusive the max values!
+ index("test", "type", "" + doc, jsonBuilder().startObject().endObject());
+ --numDocsToChange;
+ }
+ flush();
+ refresh();
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ public void testScoreAccessWithinScriptOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type", "body", "type=string", "index", "type=" + randomFrom(new String[]{"short", "float", "long", "integer", "double"})));
+ ensureYellow();
+
+ int docCount = randomIntBetween(100, 200);
+ for (int i = 0; i < docCount; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource("body", randomFrom(newArrayList("foo", "bar", "baz")), "index", i).get();
+ }
+ refresh();
+
+ // Test for accessing _score
+ SearchResponse resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchQuery("body", "foo"))
+ .add(fieldValueFactorFunction("index").factor(2))
+ .add(scriptFunction("log(doc['index'].value + (factor * _score))").param("factor", randomIntBetween(2, 4))))
+ .get();
+ assertNoFailures(resp);
+ SearchHit firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.intValue()
+ resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchQuery("body", "foo"))
+ .add(fieldValueFactorFunction("index").factor(2))
+ .add(scriptFunction("log(doc['index'].value + (factor * _score.intValue()))")
+ .param("factor", randomIntBetween(2, 4))))
+ .get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.longValue()
+ resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchQuery("body", "foo"))
+ .add(fieldValueFactorFunction("index").factor(2))
+ .add(scriptFunction("log(doc['index'].value + (factor * _score.longValue()))")
+ .param("factor", randomIntBetween(2, 4))))
+ .get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.floatValue()
+ resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchQuery("body", "foo"))
+ .add(fieldValueFactorFunction("index").factor(2))
+ .add(scriptFunction("log(doc['index'].value + (factor * _score.floatValue()))")
+ .param("factor", randomIntBetween(2, 4))))
+ .get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.doubleValue()
+ resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchQuery("body", "foo"))
+ .add(fieldValueFactorFunction("index").factor(2))
+ .add(scriptFunction("log(doc['index'].value + (factor * _score.doubleValue()))")
+ .param("factor", randomIntBetween(2, 4))))
+ .get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+ }
+
+ public void testScoreAccessWithinScript() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "body", "type=string", "index",
+ "type=" + randomFrom(new String[] { "short", "float", "long", "integer", "double" })));
+ ensureYellow();
+
+ int docCount = randomIntBetween(100, 200);
+ for (int i = 0; i < docCount; i++) {
+ client().prepareIndex("test", "type", "" + i).setSource("body", randomFrom(newArrayList("foo", "bar", "baz")), "index", i)
+ .get();
+ }
+ refresh();
+
+ Map<String, Object> params = new HashMap<>();
+ params.put("factor", randomIntBetween(2, 4));
+ // Test for accessing _score
+ SearchResponse resp = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchQuery("body", "foo")).add(fieldValueFactorFunction("index").factor(2)).add(
+ scriptFunction(new Script("log(doc['index'].value + (factor * _score))", ScriptType.INLINE, null, params))))
+ .get();
+ assertNoFailures(resp);
+ SearchHit firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.intValue()
+ resp = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchQuery("body", "foo")).add(fieldValueFactorFunction("index").factor(2)).add(
+ scriptFunction(new Script("log(doc['index'].value + (factor * _score.intValue()))", ScriptType.INLINE,
+ null, params)))).get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.longValue()
+ resp = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchQuery("body", "foo")).add(fieldValueFactorFunction("index").factor(2)).add(
+ scriptFunction(new Script("log(doc['index'].value + (factor * _score.longValue()))", ScriptType.INLINE,
+ null, params)))).get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.floatValue()
+ resp = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchQuery("body", "foo")).add(fieldValueFactorFunction("index").factor(2)).add(
+ scriptFunction(new Script("log(doc['index'].value + (factor * _score.floatValue()))", ScriptType.INLINE,
+ null, params)))).get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+
+ // Test for accessing _score.doubleValue()
+ resp = client()
+ .prepareSearch("test")
+ .setQuery(
+ functionScoreQuery(matchQuery("body", "foo")).add(fieldValueFactorFunction("index").factor(2)).add(
+ scriptFunction(new Script("log(doc['index'].value + (factor * _score.doubleValue()))", ScriptType.INLINE,
+ null, params)))).get();
+ assertNoFailures(resp);
+ firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.getScore(), greaterThan(1f));
+ }
+
+ public void testSeedReportedInExplain() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ index("test", "type", "1", jsonBuilder().startObject().endObject());
+ flush();
+ refresh();
+
+ int seed = 12345678;
+
+ SearchResponse resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(seed)))
+ .setExplain(true)
+ .get();
+ assertNoFailures(resp);
+ assertEquals(1, resp.getHits().totalHits());
+ SearchHit firstHit = resp.getHits().getAt(0);
+ assertThat(firstHit.explanation().toString(), containsString("" + seed));
+ }
+
+ public void testNoDocs() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ SearchResponse resp = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(1234)))
+ .get();
+ assertNoFailures(resp);
+ assertEquals(0, resp.getHits().totalHits());
+ }
+
+ public void testScoreRange() throws Exception {
+ // all random scores should be in range [0.0, 1.0]
+ createIndex("test");
+ ensureGreen();
+ int docCount = randomIntBetween(100, 200);
+ for (int i = 0; i < docCount; i++) {
+ String id = randomRealisticUnicodeOfCodepointLengthBetween(1, 50);
+ index("test", "type", id, jsonBuilder().startObject().endObject());
+ }
+ flush();
+ refresh();
+ int iters = scaledRandomIntBetween(10, 20);
+ for (int i = 0; i < iters; ++i) {
+ int seed = randomInt();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(seed)))
+ .setSize(docCount)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ assertThat(hit.score(), allOf(greaterThanOrEqualTo(0.0f), lessThanOrEqualTo(1.0f)));
+ }
+ }
+ }
+
+ public void testSeeds() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ final int docCount = randomIntBetween(100, 200);
+ for (int i = 0; i < docCount; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+ flushAndRefresh();
+
+ assertNoFailures(client().prepareSearch()
+ .setSize(docCount) // get all docs otherwise we are prone to tie-breaking
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(randomInt())))
+ .execute().actionGet());
+
+ assertNoFailures(client().prepareSearch()
+ .setSize(docCount) // get all docs otherwise we are prone to tie-breaking
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(randomLong())))
+ .execute().actionGet());
+
+ assertNoFailures(client().prepareSearch()
+ .setSize(docCount) // get all docs otherwise we are prone to tie-breaking
+ .setQuery(functionScoreQuery(matchAllQuery(), randomFunction(randomRealisticUnicodeOfLengthBetween(10, 20))))
+ .execute().actionGet());
+ }
+
+ @Ignore
+ public void checkDistribution() throws Exception {
+ int count = 10000;
+
+ assertAcked(prepareCreate("test"));
+ ensureGreen();
+
+ for (int i = 0; i < count; i++) {
+ index("test", "type", "" + i, jsonBuilder().startObject().endObject());
+ }
+
+ flush();
+ refresh();
+
+ int[] matrix = new int[count];
+
+ for (int i = 0; i < count; i++) {
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(functionScoreQuery(matchAllQuery(), new RandomScoreFunctionBuilder()))
+ .execute().actionGet();
+
+ matrix[Integer.valueOf(searchResponse.getHits().getAt(0).id())]++;
+ }
+
+ int filled = 0;
+ int maxRepeat = 0;
+ int sumRepeat = 0;
+ for (int i = 0; i < matrix.length; i++) {
+ int value = matrix[i];
+ sumRepeat += value;
+ maxRepeat = Math.max(maxRepeat, value);
+ if (value > 0) {
+ filled++;
+ }
+ }
+
+ System.out.println();
+ System.out.println("max repeat: " + maxRepeat);
+ System.out.println("avg repeat: " + sumRepeat / (double) filled);
+ System.out.println("distribution: " + filled / (double) count);
+
+ int percentile50 = filled / 2;
+ int percentile25 = (filled / 4);
+ int percentile75 = percentile50 + percentile25;
+
+ int sum = 0;
+
+ for (int i = 0; i < matrix.length; i++) {
+ if (matrix[i] == 0) {
+ continue;
+ }
+ sum += i * matrix[i];
+ if (percentile50 == 0) {
+ System.out.println("median: " + i);
+ } else if (percentile25 == 0) {
+ System.out.println("percentile_25: " + i);
+ } else if (percentile75 == 0) {
+ System.out.println("percentile_75: " + i);
+ }
+ percentile50--;
+ percentile25--;
+ percentile75--;
+ }
+
+ System.out.println("mean: " + sum / (double) count);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java
new file mode 100644
index 0000000000..b83cc33693
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxTests.java
@@ -0,0 +1,340 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.geoBoundingBoxQuery;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class GeoBoundingBoxTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleBoundingBoxTest() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()).execute().actionGet();
+
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(40.73, -74.1).bottomRight(40.717, -73.99)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(40.73, -74.1).bottomRight(40.717, -73.99).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(2l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("5")));
+ }
+ }
+
+ @Test
+ public void limitsBoundingBoxTest() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", -20).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", -10).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", 10).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 40).field("lon", 20).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 10).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 0).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", -10).field("lon", -170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "8").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 10).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "9").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", 0).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "10").setSource(jsonBuilder().startObject()
+ .startObject("location").field("lat", -10).field("lon", 170).endObject()
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(41, -11).bottomRight(40, 9)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(41, -11).bottomRight(40, 9).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(41, -9).bottomRight(40, 11)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(41, -9).bottomRight(40, 11).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(11, 171).bottomRight(1, -169)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("5"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(11, 171).bottomRight(1, -169).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("5"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(9, 169).bottomRight(-1, -171)))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("9"));
+ searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), geoBoundingBoxQuery("location").topLeft(9, 169).bottomRight(-1, -171).type("indexed")))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("9"));
+ }
+
+ @Test
+ public void limit2BoundingBoxTest() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("userid", 880)
+ .field("title", "Place in Stockholm")
+ .startObject("location").field("lat", 59.328355000000002).field("lon", 18.036842).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("userid", 534)
+ .field("title", "Place in Montreal")
+ .startObject("location").field("lat", 45.509526999999999).field("lon", -73.570986000000005).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 880),
+ geoBoundingBoxQuery("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 880),
+ geoBoundingBoxQuery("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 534),
+ geoBoundingBoxQuery("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(termQuery("userid", 534),
+ geoBoundingBoxQuery("location").topLeft(74.579421999999994, 143.5).bottomRight(-66.668903999999998, 113.96875).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test
+ public void completeLonRangeTest() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("userid", 880)
+ .field("title", "Place in Stockholm")
+ .startObject("location").field("lat", 59.328355000000002).field("lon", 18.036842).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("userid", 534)
+ .field("title", "Place in Montreal")
+ .startObject("location").field("lat", 45.509526999999999).field("lon", -73.570986000000005).endObject()
+ .endObject())
+ .setRefresh(true)
+ .execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(50, -180).bottomRight(-50, 180))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(50, -180).bottomRight(-50, 180).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(90, -180).bottomRight(-90, 180))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(90, -180).bottomRight(-90, 180).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(50, 0).bottomRight(-50, 360))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(50, 0).bottomRight(-50, 360).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(90, 0).bottomRight(-90, 360))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ searchResponse = client().prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ geoBoundingBoxQuery("location").topLeft(90, 0).bottomRight(-90, 360).type("indexed"))
+ ).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java
new file mode 100644
index 0000000000..8ecde92912
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceTests.java
@@ -0,0 +1,796 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
+import static org.elasticsearch.index.query.QueryBuilders.geoDistanceRangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class GeoDistanceTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleDistanceTests() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()),
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ .endObject()),
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endObject()),
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ .endObject()),
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject()
+ .endObject()),
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ .endObject()),
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()));
+
+ SearchResponse searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("3km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("3km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+
+ // now with a PLANE type
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("3km").geoDistance(GeoDistance.PLANE).point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 5);
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
+ }
+
+ // factor type is really too small for this resolution
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("2km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("2km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("1.242mi").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceQuery("location").distance("1.242mi").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeQuery("location").from("1.0km").to("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
+ }
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeQuery("location").from("1.0km").to("2.0km").point(40.7143528, -74.0059731).optimizeBbox("indexed")))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 2);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
+ }
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeQuery("location").to("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+
+ searchResponse = client().prepareSearch() // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoDistanceRangeQuery("location").from("2.0km").point(40.7143528, -74.0059731)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 3);
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ // SORTING
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 7);
+ assertOrderedSearchHits(searchResponse, "1", "3", "4", "5", "6", "2", "7");
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 7);
+ assertOrderedSearchHits(searchResponse, "7", "2", "6", "5", "4", "3", "1");
+ }
+
+ @Test
+ public void testDistanceSortingMVFields() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("names", "New York")
+ .startObject("locations").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("names", "Times Square", "Tribeca")
+ .startArray("locations")
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("names", "Wall Street", "Soho")
+ .startArray("locations")
+ // to NY: 1.055 km
+ .startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
+ // to NY: 1.258 km
+ .startObject().field("lat", 40.7247222).field("lon", -74).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("names", "Greenwich Village", "Brooklyn")
+ .startArray("locations")
+ // to NY: 2.029 km
+ .startObject().field("lat", 40.731033).field("lon", -73.9962255).endObject()
+ // to NY: 8.572 km
+ .startObject().field("lat", 40.65).field("lon", -73.95).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+
+ // Order: Asc, Mode: max
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ // Order: Desc, Mode: min
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ assertFailures(client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("sum")),
+ RestStatus.BAD_REQUEST,
+ containsString("sort_mode [sum] isn't supported for sorting by geo distance"));
+ }
+
+ @Test
+ // Regression bug: https://github.com/elasticsearch/elasticsearch/issues/2851
+ public void testDistanceSortingWithMissingGeoPoint() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("locations").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("names", "Times Square", "Tribeca")
+ .startArray("locations")
+ // to NY: 5.286 km
+ .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
+ // to NY: 0.4621 km
+ .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject()
+ .endArray()
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("names", "Wall Street", "Soho")
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 2);
+ assertOrderedSearchHits(searchResponse, "1", "2");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ // Doc with missing geo point is first, is consistent with 0.20.x
+ assertHitCount(searchResponse, 2);
+ assertOrderedSearchHits(searchResponse, "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286d, 10d));
+ }
+
+ @Test
+ public void distanceScriptTests() throws Exception {
+ double source_lat = 32.798;
+ double source_long = -117.151;
+ double target_lat = 32.81;
+ double target_long = -117.21;
+
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "TestPosition")
+ .startObject("location").field("lat", source_lat).field("lon", source_long).endObject()
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse1 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", new Script("doc['location'].arcDistance(" + target_lat + "," + target_long + ")")).execute()
+ .actionGet();
+ Double resultDistance1 = searchResponse1.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance1,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse2 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", new Script("doc['location'].distance(" + target_lat + "," + target_long + ")")).execute()
+ .actionGet();
+ Double resultDistance2 = searchResponse2.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance2,
+ closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse3 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", new Script("doc['location'].arcDistanceInKm(" + target_lat + "," + target_long + ")"))
+ .execute().actionGet();
+ Double resultArcDistance3 = searchResponse3.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance3,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse4 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", new Script("doc['location'].distanceInKm(" + target_lat + "," + target_long + ")")).execute()
+ .actionGet();
+ Double resultDistance4 = searchResponse4.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance4,
+ closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse5 = client()
+ .prepareSearch()
+ .addField("_source")
+ .addScriptField("distance", new Script("doc['location'].arcDistanceInKm(" + (target_lat) + "," + (target_long + 360) + ")"))
+ .execute().actionGet();
+ Double resultArcDistance5 = searchResponse5.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance5,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse6 = client()
+ .prepareSearch()
+ .addField("_source")
+ .addScriptField("distance", new Script("doc['location'].arcDistanceInKm(" + (target_lat + 360) + "," + (target_long) + ")"))
+ .execute().actionGet();
+ Double resultArcDistance6 = searchResponse6.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance6,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse7 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", new Script("doc['location'].arcDistanceInMiles(" + target_lat + "," + target_long + ")"))
+ .execute().actionGet();
+ Double resultDistance7 = searchResponse7.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance7,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+
+ SearchResponse searchResponse8 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", new Script("doc['location'].distanceInMiles(" + target_lat + "," + target_long + ")"))
+ .execute().actionGet();
+ Double resultDistance8 = searchResponse8.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance8,
+ closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+ }
+
+ /*
+ * Remove in 2.0
+ */
+ @Test
+ public void distanceScriptTestsOldScriptAPI() throws Exception {
+ double source_lat = 32.798;
+ double source_long = -117.151;
+ double target_lat = 32.81;
+ double target_long = -117.21;
+
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("location").field("type", "geo_point").field("lat_lon", true).endObject().endObject().endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource(
+ jsonBuilder().startObject().field("name", "TestPosition").startObject("location").field("lat", source_lat)
+ .field("lon", source_long).endObject().endObject()).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse1 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].arcDistance(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance1 = searchResponse1.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance1,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse2 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].distance(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance2 = searchResponse2.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance2,
+ closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.DEFAULT), 0.0001d));
+
+ SearchResponse searchResponse3 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].arcDistanceInKm(" + target_lat + "," + target_long + ")").execute()
+ .actionGet();
+ Double resultArcDistance3 = searchResponse3.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance3,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse4 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].distanceInKm(" + target_lat + "," + target_long + ")").execute().actionGet();
+ Double resultDistance4 = searchResponse4.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance4,
+ closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse5 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].arcDistanceInKm(" + (target_lat) + "," + (target_long + 360) + ")").execute()
+ .actionGet();
+ Double resultArcDistance5 = searchResponse5.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance5,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse6 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].arcDistanceInKm(" + (target_lat + 360) + "," + (target_long) + ")").execute()
+ .actionGet();
+ Double resultArcDistance6 = searchResponse6.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultArcDistance6,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.KILOMETERS), 0.0001d));
+
+ SearchResponse searchResponse7 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].arcDistanceInMiles(" + target_lat + "," + target_long + ")").execute()
+ .actionGet();
+ Double resultDistance7 = searchResponse7.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance7,
+ closeTo(GeoDistance.ARC.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+
+ SearchResponse searchResponse8 = client().prepareSearch().addField("_source")
+ .addScriptField("distance", "doc['location'].distanceInMiles(" + target_lat + "," + target_long + ")").execute()
+ .actionGet();
+ Double resultDistance8 = searchResponse8.getHits().getHits()[0].getFields().get("distance").getValue();
+ assertThat(resultDistance8,
+ closeTo(GeoDistance.PLANE.calculate(source_lat, source_long, target_lat, target_long, DistanceUnit.MILES), 0.0001d));
+ }
+
+ @Test
+ public void testDistanceSortingNestedFields() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("branches")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("name").field("type", "string").endObject()
+ .startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+
+ assertAcked(prepareCreate("companies").addMapping("company", xContentBuilder));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("companies", "company", "1").setSource(jsonBuilder().startObject()
+ .field("name", "company 1")
+ .startArray("branches")
+ .startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.7143528).field("lon", -74.0059731).endObject()
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "2").setSource(jsonBuilder().startObject()
+ .field("name", "company 2")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759011).field("lon", -73.9844722).endObject() // to NY: 5.286 km
+ .endObject()
+ .startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718266).field("lon", -74.007819).endObject() // to NY: 0.4621 km
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "3").setSource(jsonBuilder().startObject()
+ .field("name", "company 3")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.7051157).field("lon", -74.0088305).endObject() // to NY: 1.055 km
+ .endObject()
+ .startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.7247222).field("lon", -74).endObject() // to NY: 1.258 km
+ .endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("companies", "company", "4").setSource(jsonBuilder().startObject()
+ .field("name", "company 4")
+ .startArray("branches")
+ .startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731033).field("lon", -73.9962255).endObject() // to NY: 2.029 km
+ .endObject()
+ .startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject() // to NY: 8.572 km
+ .endObject()
+ .endArray()
+ .endObject()));
+
+ // Order: Asc
+ SearchResponse searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+
+ // Order: Asc, Mode: max
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+
+ // Order: Desc
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ // Order: Desc, Mode: min
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.geoDistanceSort("branches.location").setNestedPath("branches")
+ .point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC)
+ )
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 4);
+ assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
+
+ searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(
+ SortBuilders.geoDistanceSort("branches.location").setNestedFilter(termQuery("branches.name", "brooklyn"))
+ .point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC)
+ )
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("4"));
+ assertSearchHits(searchResponse, "1", "2", "3", "4");
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+ assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
+
+ assertFailures(client().prepareSearch("companies").setQuery(matchAllQuery())
+ .addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("sum")),
+ RestStatus.BAD_REQUEST,
+ containsString("sort_mode [sum] isn't supported for sorting by geo distance"));
+ }
+
+ /**
+ * Issue 3073
+ */
+ @Test
+ public void testGeoDistanceFilter() throws IOException {
+ double lat = 40.720611;
+ double lon = -73.998776;
+
+ XContentBuilder mapping = JsonXContent.contentBuilder()
+ .startObject()
+ .startObject("location")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .field("geohash", true)
+ .field("geohash_precision", 24)
+ .field("lat_lon", true)
+ .startObject("fielddata")
+ .field("format", randomNumericFieldDataFormat())
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ XContentBuilder source = JsonXContent.contentBuilder()
+ .startObject()
+ .field("pin", GeoHashUtils.encode(lat, lon))
+ .endObject();
+
+ assertAcked(prepareCreate("locations").addMapping("location", mapping));
+ client().prepareIndex("locations", "location", "1").setCreate(true).setSource(source).execute().actionGet();
+ refresh();
+ client().prepareGet("locations", "location", "1").execute().actionGet();
+
+ SearchResponse result = client().prepareSearch("locations")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(QueryBuilders.geoDistanceQuery("pin")
+ .geoDistance(GeoDistance.ARC)
+ .lat(lat).lon(lon)
+ .distance("1m"))
+ .execute().actionGet();
+
+ assertHitCount(result, 1);
+ }
+
+ private double randomLon() {
+ return randomDouble() * 360 - 180;
+ }
+
+ private double randomLat() {
+ return randomDouble() * 180 - 90;
+ }
+
+ public void testDuelOptimizations() throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", "location", "type=geo_point,lat_lon=true"));
+ final int numDocs = scaledRandomIntBetween(3000, 10000);
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+ for (int i = 0; i < numDocs; ++i) {
+ docs.add(client().prepareIndex("index", "type").setSource(jsonBuilder().startObject().startObject("location").field("lat", randomLat()).field("lon", randomLon()).endObject().endObject()));
+ }
+ indexRandom(true, docs);
+ ensureSearchable();
+
+ for (int i = 0; i < 10; ++i) {
+ final double originLat = randomLat();
+ final double originLon = randomLon();
+ final String distance = DistanceUnit.KILOMETERS.toString(randomInt(10000));
+ for (GeoDistance geoDistance : Arrays.asList(GeoDistance.ARC, GeoDistance.SLOPPY_ARC)) {
+ logger.info("Now testing GeoDistance={}, distance={}, origin=({}, {})", geoDistance, distance, originLat, originLon);
+ long matches = -1;
+ for (String optimizeBbox : Arrays.asList("none", "memory", "indexed")) {
+ SearchResponse resp = client().prepareSearch("index").setSize(0).setQuery(QueryBuilders.constantScoreQuery(
+ QueryBuilders.geoDistanceQuery("location").point(originLat, originLon).distance(distance).geoDistance(geoDistance).optimizeBbox(optimizeBbox))).execute().actionGet();
+ assertSearchResponse(resp);
+ logger.info("{} -> {} hits", optimizeBbox, resp.getHits().totalHits());
+ if (matches < 0) {
+ matches = resp.getHits().totalHits();
+ } else {
+ assertEquals(matches, resp.getHits().totalHits());
+ }
+ }
+ }
+ }
+ }
+
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java
new file mode 100644
index 0000000000..5895b94fc0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java
@@ -0,0 +1,636 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import com.spatial4j.core.context.SpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.shape.Shape;
+import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy;
+import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
+import org.apache.lucene.spatial.query.SpatialArgs;
+import org.apache.lucene.spatial.query.SpatialOperation;
+import org.apache.lucene.spatial.query.UnsupportedSpatialOperation;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.GeoUtils;
+import org.elasticsearch.common.geo.builders.MultiPolygonBuilder;
+import org.elasticsearch.common.geo.builders.PolygonBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.GeohashCellQuery;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.*;
+import java.util.zip.GZIPInputStream;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class GeoFilterTests extends ElasticsearchIntegrationTest {
+
+ private static boolean intersectSupport;
+ private static boolean disjointSupport;
+ private static boolean withinSupport;
+
+ @BeforeClass
+ public static void createNodes() throws Exception {
+ intersectSupport = testRelationSupport(SpatialOperation.Intersects);
+ disjointSupport = testRelationSupport(SpatialOperation.IsDisjointTo);
+ withinSupport = testRelationSupport(SpatialOperation.IsWithin);
+ }
+
+ private static byte[] unZipData(String path) throws IOException {
+ InputStream is = Streams.class.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+
+ ByteArrayOutputStream out = new ByteArrayOutputStream();
+ GZIPInputStream in = new GZIPInputStream(is);
+ Streams.copy(in, out);
+
+ is.close();
+ out.close();
+
+ return out.toByteArray();
+ }
+
+ @Test
+ public void testShapeBuilders() {
+
+ try {
+ // self intersection polygon
+ ShapeBuilder.newPolygon()
+ .point(-10, -10)
+ .point(10, 10)
+ .point(-10, 10)
+ .point(10, -10)
+ .close().build();
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ // polygon with hole
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close().close().build();
+
+ try {
+ // polygon with overlapping hole
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 11).point(5, 11).point(5, -5)
+ .close().close().build();
+
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ try {
+ // polygon with intersection holes
+ ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .hole()
+ .point(-5, -6).point(5, -6).point(5, -4).point(-5, -4)
+ .close()
+ .close().build();
+ fail("Intersection of holes not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+ try {
+ // Common line in polygon
+ ShapeBuilder.newPolygon()
+ .point(-10, -10)
+ .point(-10, 10)
+ .point(-5, 10)
+ .point(-5, -5)
+ .point(-5, 20)
+ .point(10, 20)
+ .point(10, -10)
+ .close().build();
+ fail("Self intersection not detected");
+ } catch (InvalidShapeException e) {
+ }
+
+// Not specified
+// try {
+// // two overlapping polygons within a multipolygon
+// ShapeBuilder.newMultiPolygon()
+// .polygon()
+// .point(-10, -10)
+// .point(-10, 10)
+// .point(10, 10)
+// .point(10, -10)
+// .close()
+// .polygon()
+// .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+// .close().build();
+// fail("Polygon intersection not detected";
+// } catch (InvalidShapeException e) {}
+
+ // Multipolygon: polygon with hole and polygon within the whole
+ ShapeBuilder.newMultiPolygon()
+ .polygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .close()
+ .polygon()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close()
+ .build();
+
+// Not supported
+// try {
+// // Multipolygon: polygon with hole and polygon within the hole but overlapping
+// ShapeBuilder.newMultiPolygon()
+// .polygon()
+// .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+// .hole()
+// .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+// .close()
+// .close()
+// .polygon()
+// .point(-4, -4).point(-4, 6).point(4, 6).point(4, -4)
+// .close()
+// .build();
+// fail("Polygon intersection not detected";
+// } catch (InvalidShapeException e) {}
+
+ }
+
+ @Test
+ public void testShapeRelations() throws Exception {
+
+ assertTrue( "Intersect relation is not supported", intersectSupport);
+ assertTrue("Disjoint relation is not supported", disjointSupport);
+ assertTrue("within relation is not supported", withinSupport);
+
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("polygon")
+ .startObject("properties")
+ .startObject("area")
+ .field("type", "geo_shape")
+ .field("tree", "geohash")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().string();
+
+ CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("shapes").addMapping("polygon", mapping);
+ mappingRequest.execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ // Create a multipolygon with two polygons. The first is an rectangle of size 10x10
+ // with a hole of size 5x5 equidistant from all sides. This hole in turn contains
+ // the second polygon of size 4x4 equidistant from all sites
+ MultiPolygonBuilder polygon = ShapeBuilder.newMultiPolygon()
+ .polygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .close()
+ .close()
+ .polygon()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close();
+
+ BytesReference data = jsonBuilder().startObject().field("area", polygon).endObject().bytes();
+
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Point in polygon
+ SearchResponse result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(3, 3)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ // Point in polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ // by definition the border of a polygon belongs to the inner
+ // so the border of a polygons hole also belongs to the inner
+ // of the polygon NOT the hole
+
+ // Point on polygon border
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(10.0, 5.0)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ // Point on hole border
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(5.0, 2.0)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+
+ if (disjointSupport) {
+ // Point not in polygon
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoDisjointQuery("area", ShapeBuilder.newPoint(3, 3)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ // Point in polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoDisjointQuery("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("1"));
+ }
+
+ // Create a polygon that fills the empty area of the polygon defined above
+ PolygonBuilder inverse = ShapeBuilder.newPolygon()
+ .point(-5, -5).point(-5, 5).point(5, 5).point(5, -5)
+ .hole()
+ .point(-4, -4).point(-4, 4).point(4, 4).point(4, -4)
+ .close()
+ .close();
+
+ data = jsonBuilder().startObject().field("area", inverse).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "2").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // re-check point on polygon hole
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(4.5, 4.5)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ assertFirstHit(result, hasId("2"));
+
+ // Create Polygon with hole and common edge
+ PolygonBuilder builder = ShapeBuilder.newPolygon()
+ .point(-10, -10).point(-10, 10).point(10, 10).point(10, -10)
+ .hole()
+ .point(-5, -5).point(-5, 5).point(10, 5).point(10, -5)
+ .close()
+ .close();
+
+ if (withinSupport) {
+ // Polygon WithIn Polygon
+ builder = ShapeBuilder.newPolygon()
+ .point(-30, -30).point(-30, 30).point(30, 30).point(30, -30).close();
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoWithinQuery("area", builder))
+ .execute().actionGet();
+ assertHitCount(result, 2);
+ }
+
+ // Create a polygon crossing longitude 180.
+ builder = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .close();
+
+ data = jsonBuilder().startObject().field("area", builder).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // Create a polygon crossing longitude 180 with hole.
+ builder = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .hole().point(175, -5).point(185, -5).point(185, 5).point(175, 5).close()
+ .close();
+
+ data = jsonBuilder().startObject().field("area", builder).endObject().bytes();
+ client().prepareIndex("shapes", "polygon", "1").setSource(data).execute().actionGet();
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(174, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(-174, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(180, -4)))
+ .execute().actionGet();
+ assertHitCount(result, 0);
+
+ result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.geoIntersectionQuery("area", ShapeBuilder.newPoint(180, -6)))
+ .execute().actionGet();
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ @Slow
+ public void bulktest() throws Exception {
+ byte[] bulkAction = unZipData("/org/elasticsearch/search/geo/gzippedmap.gz");
+
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("country")
+ .startObject("properties")
+ .startObject("pin")
+ .field("type", "geo_point")
+ .field("lat_lon", true)
+ .field("store", true)
+ .endObject()
+ .startObject("location")
+ .field("type", "geo_shape")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ client().admin().indices().prepareCreate("countries").addMapping("country", mapping).execute().actionGet();
+ BulkResponse bulk = client().prepareBulk().add(bulkAction, 0, bulkAction.length, null, null).execute().actionGet();
+
+ for (BulkItemResponse item : bulk.getItems()) {
+ assertFalse("unable to index data", item.isFailed());
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ String key = "DE";
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("_id", key))
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 1);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getId(), equalTo(key));
+ }
+
+ SearchResponse world = client().prepareSearch().addField("pin").setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ geoBoundingBoxQuery("pin")
+ .topLeft(90, -179.99999)
+ .bottomRight(-90, 179.99999))
+ ).execute().actionGet();
+
+ assertHitCount(world, 53);
+
+ SearchResponse distance = client().prepareSearch().addField("pin").setQuery(
+ filteredQuery(
+ matchAllQuery(),
+ geoDistanceQuery("pin").distance("425km").point(51.11, 9.851)
+ )).execute().actionGet();
+
+ assertHitCount(distance, 5);
+ GeoPoint point = new GeoPoint();
+ for (SearchHit hit : distance.getHits()) {
+ String name = hit.getId();
+ point.resetFromString(hit.fields().get("pin").getValue().toString());
+ double dist = distance(point.getLat(), point.getLon(), 51.11, 9.851);
+
+ assertThat("distance to '" + name + "'", dist, lessThanOrEqualTo(425000d));
+ assertThat(name, anyOf(equalTo("CZ"), equalTo("DE"), equalTo("BE"), equalTo("NL"), equalTo("LU")));
+ if (key.equals(name)) {
+ assertThat(dist, equalTo(0d));
+ }
+ }
+ }
+
+ @Test
+ public void testGeohashCellFilter() throws IOException {
+ String geohash = randomhash(10);
+ logger.info("Testing geohash_cell filter for [{}]", geohash);
+
+ Collection<? extends CharSequence> neighbors = GeoHashUtils.neighbors(geohash);
+ Collection<? extends CharSequence> parentNeighbors = GeoHashUtils.neighbors(geohash.substring(0, geohash.length() - 1));
+
+ logger.info("Neighbors {}", neighbors);
+ logger.info("Parent Neighbors {}", parentNeighbors);
+
+ ensureYellow();
+
+ client().admin().indices().prepareCreate("locations").addMapping("location", "pin", "type=geo_point,geohash_prefix=true,lat_lon=false").execute().actionGet();
+
+ // Index a pin
+ client().prepareIndex("locations", "location", "1").setCreate(true).setSource("pin", geohash).execute().actionGet();
+
+ // index neighbors
+ Iterator<? extends CharSequence> iterator = neighbors.iterator();
+ for (int i = 0; iterator.hasNext(); i++) {
+ client().prepareIndex("locations", "location", "N" + i).setCreate(true).setSource("pin", iterator.next()).execute().actionGet();
+ }
+
+ // Index parent cell
+ client().prepareIndex("locations", "location", "p").setCreate(true).setSource("pin", geohash.substring(0, geohash.length() - 1)).execute().actionGet();
+
+ // index neighbors
+ iterator = parentNeighbors.iterator();
+ for (int i = 0; iterator.hasNext(); i++) {
+ client().prepareIndex("locations", "location", "p" + i).setCreate(true).setSource("pin", iterator.next()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh("locations").execute().actionGet();
+
+ Map<GeohashCellQuery.Builder, Long> expectedCounts = new HashMap<>();
+ Map<GeohashCellQuery.Builder, String[]> expectedResults = new HashMap<>();
+
+ expectedCounts.put(geoHashCellQuery("pin", geohash, false), 1L);
+
+ expectedCounts.put(geoHashCellQuery("pin", geohash.substring(0, geohash.length() - 1), true), 2L + neighbors.size() + parentNeighbors.size());
+
+ // Testing point formats and precision
+ GeoPoint point = GeoHashUtils.decode(geohash);
+ int precision = geohash.length();
+
+ expectedCounts.put(geoHashCellQuery("pin", point).neighbors(true).precision(precision), 1L + neighbors.size());
+
+ logger.info("random testing of setting");
+
+ List<GeohashCellQuery.Builder> filterBuilders = new ArrayList<>(expectedCounts.keySet());
+ for (int j = filterBuilders.size() * 2 * randomIntBetween(1, 5); j > 0; j--) {
+ Collections.shuffle(filterBuilders, getRandom());
+ for (GeohashCellQuery.Builder builder : filterBuilders) {
+ try {
+ long expectedCount = expectedCounts.get(builder);
+ SearchResponse response = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(builder).setSize((int) expectedCount).get();
+ assertHitCount(response, expectedCount);
+ String[] expectedIds = expectedResults.get(builder);
+ if (expectedIds == null) {
+ ArrayList<String> ids = new ArrayList<>();
+ for (SearchHit hit : response.getHits()) {
+ ids.add(hit.id());
+ }
+ expectedResults.put(builder, ids.toArray(Strings.EMPTY_ARRAY));
+ continue;
+ }
+
+ assertSearchHits(response, expectedIds);
+
+ } catch (AssertionError error) {
+ throw new AssertionError(error.getMessage() + "\n geohash_cell filter:" + builder, error);
+ }
+
+
+ }
+ }
+
+ logger.info("Testing lat/lon format");
+ String pointTest1 = "{\"geohash_cell\": {\"pin\": {\"lat\": " + point.lat() + ",\"lon\": " + point.lon() + "},\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results3 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest1).execute().actionGet();
+ assertHitCount(results3, neighbors.size() + 1);
+
+
+ logger.info("Testing String format");
+ String pointTest2 = "{\"geohash_cell\": {\"pin\": \"" + point.lat() + "," + point.lon() + "\",\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results4 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest2).execute().actionGet();
+ assertHitCount(results4, neighbors.size() + 1);
+
+ logger.info("Testing Array format");
+ String pointTest3 = "{\"geohash_cell\": {\"pin\": [" + point.lon() + "," + point.lat() + "],\"precision\": " + precision + ",\"neighbors\": true}}";
+ SearchResponse results5 = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(pointTest3).execute().actionGet();
+ assertHitCount(results5, neighbors.size() + 1);
+ }
+
+ @Test
+ public void testNeighbors() {
+ // Simple root case
+ assertThat(GeoHashUtils.addNeighbors("7", new ArrayList<String>()), containsInAnyOrder("4", "5", "6", "d", "e", "h", "k", "s"));
+
+ // Root cases (Outer cells)
+ assertThat(GeoHashUtils.addNeighbors("0", new ArrayList<String>()), containsInAnyOrder("1", "2", "3", "p", "r"));
+ assertThat(GeoHashUtils.addNeighbors("b", new ArrayList<String>()), containsInAnyOrder("8", "9", "c", "x", "z"));
+ assertThat(GeoHashUtils.addNeighbors("p", new ArrayList<String>()), containsInAnyOrder("n", "q", "r", "0", "2"));
+ assertThat(GeoHashUtils.addNeighbors("z", new ArrayList<String>()), containsInAnyOrder("8", "b", "w", "x", "y"));
+
+ // Root crossing dateline
+ assertThat(GeoHashUtils.addNeighbors("2", new ArrayList<String>()), containsInAnyOrder("0", "1", "3", "8", "9", "p", "r", "x"));
+ assertThat(GeoHashUtils.addNeighbors("r", new ArrayList<String>()), containsInAnyOrder("0", "2", "8", "n", "p", "q", "w", "x"));
+
+ // level1: simple case
+ assertThat(GeoHashUtils.addNeighbors("dk", new ArrayList<String>()), containsInAnyOrder("d5", "d7", "de", "dh", "dj", "dm", "ds", "dt"));
+
+ // Level1: crossing cells
+ assertThat(GeoHashUtils.addNeighbors("d5", new ArrayList<String>()), containsInAnyOrder("d4", "d6", "d7", "dh", "dk", "9f", "9g", "9u"));
+ assertThat(GeoHashUtils.addNeighbors("d0", new ArrayList<String>()), containsInAnyOrder("d1", "d2", "d3", "9b", "9c", "6p", "6r", "3z"));
+ }
+
+ public static double distance(double lat1, double lon1, double lat2, double lon2) {
+ return GeoUtils.EARTH_SEMI_MAJOR_AXIS * DistanceUtils.distHaversineRAD(
+ DistanceUtils.toRadians(lat1),
+ DistanceUtils.toRadians(lon1),
+ DistanceUtils.toRadians(lat2),
+ DistanceUtils.toRadians(lon2)
+ );
+ }
+
+ protected static boolean testRelationSupport(SpatialOperation relation) {
+ if (relation == SpatialOperation.IsDisjointTo) {
+ // disjoint works in terms of intersection
+ relation = SpatialOperation.Intersects;
+ }
+ try {
+ GeohashPrefixTree tree = new GeohashPrefixTree(SpatialContext.GEO, 3);
+ RecursivePrefixTreeStrategy strategy = new RecursivePrefixTreeStrategy(tree, "area");
+ Shape shape = SpatialContext.GEO.makePoint(0, 0);
+ SpatialArgs args = new SpatialArgs(relation, shape);
+ strategy.makeFilter(args);
+ return true;
+ } catch (UnsupportedSpatialOperation e) {
+ e.printStackTrace();
+ return false;
+ }
+ }
+
+ protected static String randomhash(int length) {
+ return randomhash(getRandom(), length);
+ }
+
+ protected static String randomhash(Random random) {
+ return randomhash(random, 2 + random.nextInt(10));
+ }
+
+ protected static String randomhash() {
+ return randomhash(getRandom());
+ }
+
+ protected static String randomhash(Random random, int length) {
+ final char[] BASE_32 = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'b', 'c', 'd', 'e', 'f', 'g',
+ 'h', 'j', 'k', 'm', 'n', 'p', 'q', 'r',
+ 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'};
+
+ StringBuilder sb = new StringBuilder();
+ for (int i = 0; i < length; i++) {
+ sb.append(BASE_32[random.nextInt(BASE_32.length)]);
+ }
+
+ return sb.toString();
+ }
+}
+
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java
new file mode 100644
index 0000000000..8b7b505059
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.geoPolygonQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.anyOf;
+import static org.hamcrest.Matchers.equalTo;
+
+@ElasticsearchIntegrationTest.SuiteScopeTest
+public class GeoPolygonTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected void setupSuiteScopeCluster() throws Exception {
+ XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location").field("type", "geo_point").field("lat_lon", true)
+ .startObject("fielddata").field("format", randomNumericFieldDataFormat()).endObject().endObject().endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", xContentBuilder));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "New York")
+ .startObject("location").field("lat", 40.714).field("lon", -74.006).endObject()
+ .endObject()),
+ // to NY: 5.286 km
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Times Square")
+ .startObject("location").field("lat", 40.759).field("lon", -73.984).endObject()
+ .endObject()),
+ // to NY: 0.4621 km
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("name", "Tribeca")
+ .startObject("location").field("lat", 40.718).field("lon", -74.008).endObject()
+ .endObject()),
+ // to NY: 1.055 km
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject()
+ .field("name", "Wall Street")
+ .startObject("location").field("lat", 40.705).field("lon", -74.009).endObject()
+ .endObject()),
+ // to NY: 1.258 km
+ client().prepareIndex("test", "type1", "5").setSource(jsonBuilder().startObject()
+ .field("name", "Soho")
+ .startObject("location").field("lat", 40.725).field("lon", -74).endObject()
+ .endObject()),
+ // to NY: 2.029 km
+ client().prepareIndex("test", "type1", "6").setSource(jsonBuilder().startObject()
+ .field("name", "Greenwich Village")
+ .startObject("location").field("lat", 40.731).field("lon", -73.996).endObject()
+ .endObject()),
+ // to NY: 8.572 km
+ client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject()
+ .field("name", "Brooklyn")
+ .startObject("location").field("lat", 40.65).field("lon", -73.95).endObject()
+ .endObject()));
+ ensureSearchable("test");
+ }
+
+ @Test
+ public void simplePolygonTest() throws Exception {
+
+ SearchResponse searchResponse = client().prepareSearch("test") // from NY
+ .setQuery(filteredQuery(matchAllQuery(), geoPolygonQuery("location")
+ .addPoint(40.7, -74.0)
+ .addPoint(40.7, -74.1)
+ .addPoint(40.8, -74.1)
+ .addPoint(40.8, -74.0)
+ .addPoint(40.7, -74.0)))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 4);
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java
new file mode 100644
index 0000000000..eb6a6629aa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java
@@ -0,0 +1,488 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.geo;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.common.geo.ShapeRelation;
+import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
+import org.elasticsearch.index.query.GeoShapeQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.geo.RandomShapeGenerator;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.geoIntersectionQuery;
+import static org.elasticsearch.index.query.QueryBuilders.geoIntersectionQuery;
+import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.nullValue;
+
+public class GeoShapeIntegrationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testNullShape() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ indexRandom(false, client().prepareIndex("test", "type1", "aNullshape").setSource("{\"location\": null}"));
+ GetResponse result = client().prepareGet("test", "type1", "aNullshape").execute().actionGet();
+ assertThat(result.getField("location"), nullValue());
+ }
+
+ @Test
+ public void testIndexPointsFilterRectangle() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ indexRandom(true,
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-30).value(-30).endArray()
+ .endObject()
+ .endObject()),
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("name", "Document 2")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-45).value(-50).endArray()
+ .endObject()
+ .endObject()));
+
+ ShapeBuilder shape = ShapeBuilder.newEnvelope().topLeft(-45, 45).bottomRight(45, -45);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionQuery("location", shape)))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(geoShapeQuery("location", shape))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testEdgeCases() throws Exception {
+
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1", "blakely").setSource(jsonBuilder().startObject()
+ .field("name", "Blakely Island")
+ .startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(-122.83).value(48.57).endArray()
+ .startArray().value(-122.77).value(48.56).endArray()
+ .startArray().value(-122.79).value(48.53).endArray()
+ .startArray().value(-122.83).value(48.57).endArray() // close the polygon
+ .endArray().endArray()
+ .endObject()
+ .endObject()));
+
+
+ ShapeBuilder query = ShapeBuilder.newEnvelope().topLeft(-122.88, 48.62).bottomRight(-122.82, 48.54);
+
+ // This search would fail if both geoshape indexing and geoshape filtering
+ // used the bottom-level optimization in SpatialPrefixTree#recursiveGetNodes.
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionQuery("location", query)))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("blakely"));
+ }
+
+ @Test
+ public void testIndexedShapeReference() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("tree", "quadtree")
+ .endObject().endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ createIndex("shapes");
+ ensureGreen();
+
+ ShapeBuilder shape = ShapeBuilder.newEnvelope().topLeft(-45, 45).bottomRight(45, -45);
+
+ indexRandom(true,
+ client().prepareIndex("shapes", "shape_type", "Big_Rectangle").setSource(jsonBuilder().startObject()
+ .field("shape", shape).endObject()),
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Document 1")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(-30).value(-30).endArray()
+ .endObject()
+ .endObject()));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(),
+ geoIntersectionQuery("location", "Big_Rectangle", "shape_type")))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(geoShapeQuery("location", "Big_Rectangle", "shape_type"))
+ .execute().actionGet();
+
+ assertSearchResponse(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ }
+
+ @Test
+ public void testReusableBuilder() throws IOException {
+ ShapeBuilder polygon = ShapeBuilder.newPolygon()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10)
+ .hole().point(175, -5).point(185, -5).point(185, 5).point(175, 5).close()
+ .close();
+ assertUnmodified(polygon);
+
+ ShapeBuilder linestring = ShapeBuilder.newLineString()
+ .point(170, -10).point(190, -10).point(190, 10).point(170, 10);
+ assertUnmodified(linestring);
+ }
+
+ private void assertUnmodified(ShapeBuilder builder) throws IOException {
+ String before = jsonBuilder().startObject().field("area", builder).endObject().string();
+ builder.build();
+ String after = jsonBuilder().startObject().field("area", builder).endObject().string();
+ assertThat(before, equalTo(after));
+ }
+
+ @Test
+ public void testParsingMultipleShapes() throws Exception {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("location1")
+ .field("type", "geo_shape")
+ .endObject()
+ .startObject("location2")
+ .field("type", "geo_shape")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureYellow();
+
+ String p1 = "\"location1\" : {\"type\":\"polygon\", \"coordinates\":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}";
+ String p2 = "\"location2\" : {\"type\":\"polygon\", \"coordinates\":[[[-20,-20],[20,-20],[20,20],[-20,20],[-20,-20]]]}";
+ String o1 = "{" + p1 + ", " + p2 + "}";
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource(o1));
+
+ String filter = "{\"geo_shape\": {\"location2\": {\"indexed_shape\": {"
+ + "\"id\": \"1\","
+ + "\"type\": \"type1\","
+ + "\"index\": \"test\","
+ + "\"path\": \"location2\""
+ + "}}}}";
+
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).setPostFilter(filter).execute().actionGet();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ public void testShapeFetchingPath() throws Exception {
+ createIndex("shapes");
+ assertAcked(prepareCreate("test").addMapping("type", "location", "type=geo_shape"));
+
+ String location = "\"location\" : {\"type\":\"polygon\", \"coordinates\":[[[-10,-10],[10,-10],[10,10],[-10,10],[-10,-10]]]}";
+ indexRandom(true,
+ client().prepareIndex("shapes", "type", "1")
+ .setSource(
+ String.format(
+ Locale.ROOT, "{ %s, \"1\" : { %s, \"2\" : { %s, \"3\" : { %s } }} }", location, location, location, location
+ )
+ ),
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(-20).value(-20).endArray()
+ .startArray().value(20).value(-20).endArray()
+ .startArray().value(20).value(20).endArray()
+ .startArray().value(-20).value(20).endArray()
+ .startArray().value(-20).value(-20).endArray()
+ .endArray().endArray()
+ .endObject().endObject()));
+ ensureSearchable("test", "shapes");
+
+ GeoShapeQueryBuilder filter = QueryBuilders.geoShapeQuery("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("location");
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ filter = QueryBuilders.geoShapeQuery("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ filter = QueryBuilders.geoShapeQuery("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ filter = QueryBuilders.geoShapeQuery("location", "1", "type", ShapeRelation.INTERSECTS)
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.3.location");
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+
+ // now test the query variant
+ GeoShapeQueryBuilder query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ query = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex("shapes")
+ .indexedShapePath("1.2.3.location");
+ result = client().prepareSearch("test").setQuery(query).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ }
+
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9904")
+ @Test
+ public void testShapeFilterWithRandomGeoCollection() throws Exception {
+ // Create a random geometry collection.
+ GeometryCollectionBuilder gcb = RandomShapeGenerator.createGeometryCollection(getRandom());
+
+ logger.info("Created Random GeometryCollection containing " + gcb.numShapes() + " shapes");
+
+ createIndex("randshapes");
+ assertAcked(prepareCreate("test").addMapping("type", "location", "type=geo_shape"));
+
+ XContentBuilder docSource = gcb.toXContent(jsonBuilder().startObject().field("location"), null).endObject();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource(docSource));
+
+ ensureSearchable("test");
+
+ ShapeBuilder filterShape = (gcb.getShapeAt(randomIntBetween(0, gcb.numShapes() - 1)));
+
+ GeoShapeQueryBuilder filter = QueryBuilders.geoShapeQuery("location", filterShape, ShapeRelation.INTERSECTS);
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ }
+
+ @Test
+ public void testShapeFilterWithDefinedGeoCollection() throws Exception {
+ createIndex("shapes");
+ assertAcked(prepareCreate("test").addMapping("type", "location", "type=geo_shape"));
+
+ XContentBuilder docSource = jsonBuilder().startObject().startObject("location")
+ .field("type", "geometrycollection")
+ .startArray("geometries")
+ .startObject()
+ .field("type", "point")
+ .startArray("coordinates")
+ .value(100.0).value(0.0)
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("type", "linestring")
+ .startArray("coordinates")
+ .startArray()
+ .value(101.0).value(0.0)
+ .endArray()
+ .startArray()
+ .value(102.0).value(1.0)
+ .endArray()
+ .endArray()
+ .endObject()
+ .endArray()
+ .endObject().endObject();
+ indexRandom(true,
+ client().prepareIndex("test", "type", "1")
+ .setSource(docSource));
+ ensureSearchable("test");
+
+ GeoShapeQueryBuilder filter = QueryBuilders.geoShapeQuery("location", ShapeBuilder.newGeometryCollection().polygon(ShapeBuilder.newPolygon().point(99.0, -1.0).point(99.0, 3.0).point(103.0, 3.0).point(103.0, -1.0).point(99.0, -1.0)), ShapeRelation.INTERSECTS);
+ SearchResponse result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ filter = QueryBuilders.geoShapeQuery("location", ShapeBuilder.newGeometryCollection().polygon(ShapeBuilder.newPolygon().point(199.0, -11.0).point(199.0, 13.0).point(193.0, 13.0).point(193.0, -11.0).point(199.0, -11.0)), ShapeRelation.INTERSECTS);
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 0);
+ filter = QueryBuilders.geoShapeQuery("location", ShapeBuilder.newGeometryCollection()
+ .polygon(ShapeBuilder.newPolygon().point(99.0, -1.0).point(99.0, 3.0).point(103.0, 3.0).point(103.0, -1.0).point(99.0, -1.0))
+ .polygon(ShapeBuilder.newPolygon().point(199.0, -11.0).point(199.0, 13.0).point(193.0, 13.0).point(193.0, -11.0).point(199.0, -11.0)), ShapeRelation.INTERSECTS);
+ result = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery())
+ .setPostFilter(filter).get();
+ assertSearchResponse(result);
+ assertHitCount(result, 1);
+ }
+
+ /**
+ * Test that orientation parameter correctly persists across cluster restart
+ * @throws IOException
+ */
+ public void testOrientationPersistence() throws Exception {
+ String idxName = "orientation";
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("shape")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("orientation", "left")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ // create index
+ assertAcked(prepareCreate(idxName).addMapping("shape", mapping));
+
+ mapping = XContentFactory.jsonBuilder().startObject().startObject("shape")
+ .startObject("properties").startObject("location")
+ .field("type", "geo_shape")
+ .field("orientation", "right")
+ .endObject().endObject()
+ .endObject().endObject().string();
+
+ assertAcked(prepareCreate(idxName+"2").addMapping("shape", mapping));
+ ensureGreen(idxName, idxName+"2");
+
+ internalCluster().fullRestart();
+ ensureGreen(idxName, idxName+"2");
+
+ // left orientation test
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName));
+ IndexService indexService = indicesService.indexService(idxName);
+ FieldMapper fieldMapper = indexService.mapperService().smartNameFieldMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)fieldMapper;
+ ShapeBuilder.Orientation orientation = gsfm.fieldType().orientation();
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.CLOCKWISE));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.LEFT));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.CW));
+
+ // right orientation test
+ indicesService = internalCluster().getInstance(IndicesService.class, findNodeName(idxName+"2"));
+ indexService = indicesService.indexService(idxName+"2");
+ fieldMapper = indexService.mapperService().smartNameFieldMapper("location");
+ assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class));
+
+ gsfm = (GeoShapeFieldMapper)fieldMapper;
+ orientation = gsfm.fieldType().orientation();
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.COUNTER_CLOCKWISE));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.RIGHT));
+ assertThat(orientation, equalTo(ShapeBuilder.Orientation.CCW));
+ }
+
+ private String findNodeName(String index) {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ IndexShardRoutingTable shard = state.getRoutingTable().index(index).shard(0);
+ String nodeId = shard.assignedShards().get(0).currentNodeId();
+ return state.getNodes().get(nodeId).name();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/geo/gzippedmap.gz b/core/src/test/java/org/elasticsearch/search/geo/gzippedmap.gz
new file mode 100644
index 0000000000..d903def573
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/geo/gzippedmap.gz
Binary files differ
diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java
new file mode 100644
index 0000000000..3a9135cb73
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.index.mapper.FieldMapper;
+
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * total dumb highlighter used to test the pluggable highlighting functionality
+ */
+public class CustomHighlighter implements Highlighter {
+
+ @Override
+ public String[] names() {
+ return new String[] { "test-custom" };
+ }
+
+ @Override
+ public HighlightField highlight(HighlighterContext highlighterContext) {
+ SearchContextHighlight.Field field = highlighterContext.field;
+ CacheEntry cacheEntry = (CacheEntry) highlighterContext.hitContext.cache().get("test-custom");
+ final int docId = highlighterContext.hitContext.readerContext().docBase + highlighterContext.hitContext.docId();
+ if (cacheEntry == null) {
+ cacheEntry = new CacheEntry();
+ highlighterContext.hitContext.cache().put("test-custom", cacheEntry);
+ cacheEntry.docId = docId;
+ cacheEntry.position = 1;
+ } else {
+ if (cacheEntry.docId == docId) {
+ cacheEntry.position++;
+ } else {
+ cacheEntry.docId = docId;
+ cacheEntry.position = 1;
+ }
+ }
+
+ List<Text> responses = Lists.newArrayList();
+ responses.add(new StringText(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(),
+ cacheEntry.position)));
+
+ if (field.fieldOptions().options() != null) {
+ for (Map.Entry<String, Object> entry : field.fieldOptions().options().entrySet()) {
+ responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue()));
+ }
+ }
+
+ return new HighlightField(highlighterContext.fieldName, responses.toArray(new Text[]{}));
+ }
+
+ @Override
+ public boolean canHighlight(FieldMapper fieldMapper) {
+ return true;
+ }
+
+ private static class CacheEntry {
+ private int position;
+ private int docId;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java
new file mode 100644
index 0000000000..a3e327b097
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterPlugin.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.highlight;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+public class CustomHighlighterPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-highlighter";
+ }
+
+ @Override
+ public String description() {
+ return "Custom highlighter to test pluggable implementation";
+ }
+
+ public void onModule(HighlightModule highlightModule) {
+ highlightModule.registerHighlighter(CustomHighlighter.class);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java
new file mode 100644
index 0000000000..8aaff21dc8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighterSearchTests.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHighlight;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1)
+public class CustomHighlighterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", CustomHighlighterPlugin.class.getName())
+ .build();
+ }
+
+ @Before
+ protected void setup() throws Exception{
+ indexRandom(true,
+ client().prepareIndex("test", "test", "1").setSource(
+ "name", "arbitrary content", "other_name", "foo", "other_other_name", "bar"),
+ client().prepareIndex("test", "test", "2").setSource(
+ "other_name", "foo", "other_other_name", "bar"));
+ ensureYellow();
+ }
+
+ @Test
+ public void testThatCustomHighlightersAreSupported() throws IOException {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addHighlightedField("name").setHighlighterType("test-custom")
+ .execute().actionGet();
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterCanBeConfiguredPerField() throws Exception {
+ HighlightBuilder.Field highlightConfig = new HighlightBuilder.Field("name");
+ highlightConfig.highlighterType("test-custom");
+ Map<String, Object> options = Maps.newHashMap();
+ options.put("myFieldOption", "someValue");
+ highlightConfig.options(options);
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .addHighlightedField(highlightConfig)
+ .execute().actionGet();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1"));
+ assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myFieldOption:someValue"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterCanBeConfiguredGlobally() throws Exception {
+ Map<String, Object> options = Maps.newHashMap();
+ options.put("myGlobalOption", "someValue");
+
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setHighlighterOptions(options)
+ .setHighlighterType("test-custom")
+ .addHighlightedField("name")
+ .execute().actionGet();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1"));
+ assertHighlight(searchResponse, 0, "name", 1, equalTo("field:myGlobalOption:someValue"));
+ }
+
+ @Test
+ public void testThatCustomHighlighterReceivesFieldsInOrder() throws Exception {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("test")
+ .setQuery(QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).should(QueryBuilders
+ .termQuery("name", "arbitrary")))
+ .setHighlighterType("test-custom")
+ .addHighlightedField("name")
+ .addHighlightedField("other_name")
+ .addHighlightedField("other_other_name")
+ .setHighlighterExplicitFieldOrder(true)
+ .get();
+
+ assertHighlight(searchResponse, 0, "name", 0, equalTo("standard response for name at position 1"));
+ assertHighlight(searchResponse, 0, "other_name", 0, equalTo("standard response for other_name at position 2"));
+ assertHighlight(searchResponse, 0, "other_other_name", 0, equalTo("standard response for other_other_name at position 3"));
+ assertHighlight(searchResponse, 1, "name", 0, equalTo("standard response for name at position 1"));
+ assertHighlight(searchResponse, 1, "other_name", 0, equalTo("standard response for other_name at position 2"));
+ assertHighlight(searchResponse, 1, "other_other_name", 0, equalTo("standard response for other_other_name at position 3"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java
new file mode 100644
index 0000000000..7a0ebf5773
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java
@@ -0,0 +1,2609 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.highlight;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.MatchQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+import org.elasticsearch.search.highlight.HighlightBuilder.Field;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+
+@Slow
+public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testHighlightingWithWildcardName() throws IOException {
+ // test the kibana case with * as fieldname that will try highlight all fields including meta fields
+ XContentBuilder mappings = jsonBuilder();
+ mappings.startObject();
+ mappings.startObject("type")
+ .startObject("properties")
+ .startObject("text")
+ .field("type", "string")
+ .field("analyzer", "keyword")
+ .field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .endObject()
+ .endObject();
+ mappings.endObject();
+ assertAcked(prepareCreate("test")
+ .addMapping("type", mappings));
+ ensureYellow();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("text", "text").endObject())
+ .get();
+ refresh();
+ String highlighter = randomFrom(new String[]{"plain", "postings", "fvh"});
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("*").highlighterType(highlighter)).get();
+ assertHighlight(search, 0, "text", 0, equalTo("<em>text</em>"));
+ }
+
+ @Test
+ public void testPlainHighlighterWithLongUnanalyzedStringTerm() throws IOException {
+ XContentBuilder mappings = jsonBuilder();
+ mappings.startObject();
+ mappings.startObject("type")
+ .startObject("properties")
+ .startObject("long_text")
+ .field("type", "string")
+ .field("analyzer", "keyword")
+ .field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets")
+ .field("ignore_above", 1)
+ .endObject()
+ .startObject("text")
+ .field("type", "string")
+ .field("analyzer", "keyword")
+ .field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .endObject()
+ .endObject();
+ mappings.endObject();
+ assertAcked(prepareCreate("test")
+ .addMapping("type", mappings));
+ ensureYellow();
+ // crate a term that is larger than the allowed 32766, index it and then try highlight on it
+ // the search request should still succeed
+ StringBuilder builder = new StringBuilder();
+ for (int i = 0; i < 32767; i++) {
+ builder.append('a');
+ }
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("long_text", builder.toString()).field("text", "text").endObject())
+ .get();
+ refresh();
+ String highlighter = randomFrom(new String[]{"plain", "postings", "fvh"});
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("*").highlighterType(highlighter)).get();
+ assertHighlight(search, 0, "text", 0, equalTo("<em>text</em>"));
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("long_text").highlighterType(highlighter)).get();
+ assertNoFailures(search);
+ assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0));
+ }
+
+ @Test
+ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException {
+ XContentBuilder mappings = jsonBuilder();
+ mappings.startObject();
+ mappings.startObject("type")
+ .startObject("_source")
+ .field("enabled", false)
+ .endObject()
+ .startObject("properties")
+ .startObject("unstored_field")
+ .field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets")
+ .field("type", "string")
+ .field("store", "no")
+ .endObject()
+ .startObject("text")
+ .field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets")
+ .field("type", "string")
+ .field("store", "yes")
+ .endObject()
+ .endObject()
+ .endObject();
+ mappings.endObject();
+ assertAcked(prepareCreate("test")
+ .addMapping("type", mappings));
+ ensureYellow();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject())
+ .get();
+ refresh();
+ String highlighter = randomFrom(new String[]{"plain", "postings", "fvh"});
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("*").highlighterType(highlighter)).get();
+ assertHighlight(search, 0, "text", 0, equalTo("<em>text</em>"));
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("unstored_text")).get();
+ assertNoFailures(search);
+ assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0));
+ }
+
+
+ @Test
+ // see #3486
+ public void testHighTermFrequencyDoc() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "name", "type=string,term_vector=with_positions_offsets,store=" + (randomBoolean() ? "yes" : "no")));
+ ensureYellow();
+ StringBuilder builder = new StringBuilder();
+ for (int i = 0; i < 6000; i++) {
+ builder.append("abc").append(" ");
+ }
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", builder.toString())
+ .get();
+ refresh();
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, startsWith("<em>abc</em> <em>abc</em> <em>abc</em> <em>abc</em>"));
+ }
+
+ @Test
+ public void testNgramHighlightingWithBrokenPositions() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .startObject("properties")
+ .startObject("name")
+ .startObject("fields")
+ .startObject("autocomplete")
+ .field("type", "string")
+ .field("analyzer", "autocomplete")
+ .field("search_analyzer", "search_autocomplete")
+ .field("term_vector", "with_positions_offsets")
+ .endObject()
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .field("type", "multi_field")
+ .endObject()
+ .endObject()
+ .endObject())
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("analysis.tokenizer.autocomplete.max_gram", 20)
+ .put("analysis.tokenizer.autocomplete.min_gram", 1)
+ .put("analysis.tokenizer.autocomplete.token_chars", "letter,digit")
+ .put("analysis.tokenizer.autocomplete.type", "nGram")
+ .put("analysis.filter.wordDelimiter.type", "word_delimiter")
+ .putArray("analysis.filter.wordDelimiter.type_table",
+ "& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM",
+ "? => ALPHANUM", ". => ALPHANUM", "- => ALPHANUM", "# => ALPHANUM", "% => ALPHANUM",
+ "+ => ALPHANUM", ", => ALPHANUM", "~ => ALPHANUM", ": => ALPHANUM", "/ => ALPHANUM",
+ "^ => ALPHANUM", "$ => ALPHANUM", "@ => ALPHANUM", ") => ALPHANUM", "( => ALPHANUM",
+ "] => ALPHANUM", "[ => ALPHANUM", "} => ALPHANUM", "{ => ALPHANUM")
+
+ .put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
+ .put("analysis.filter.wordDelimiter.generate_word_parts", true)
+ .put("analysis.filter.wordDelimiter.generate_number_parts", false)
+ .put("analysis.filter.wordDelimiter.catenate_words", true)
+ .put("analysis.filter.wordDelimiter.catenate_numbers", true)
+ .put("analysis.filter.wordDelimiter.catenate_all", false)
+
+ .put("analysis.analyzer.autocomplete.tokenizer", "autocomplete")
+ .putArray("analysis.analyzer.autocomplete.filter", "lowercase", "wordDelimiter")
+ .put("analysis.analyzer.search_autocomplete.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.search_autocomplete.filter", "lowercase", "wordDelimiter")));
+ ensureYellow();
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "ARCOTEL Hotels Deutschland").get();
+ refresh();
+ SearchResponse search = client().prepareSearch("test").setTypes("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)).addHighlightedField("name.autocomplete").execute().actionGet();
+ assertHighlight(search, 0, "name.autocomplete", 0, equalTo("ARCO<em>TEL</em> Ho<em>tel</em>s <em>Deut</em>schland"));
+ }
+
+ @Test
+ public void testMultiPhraseCutoff() throws IOException {
+ /*
+ * MultiPhraseQuery can literally kill an entire node if there are too many terms in the
+ * query. We cut off and extract terms if there are more than 16 terms in the query
+ */
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "body", "type=string,analyzer=custom_analyzer,search_analyzer=custom_analyzer,term_vector=with_positions_offsets")
+ .setSettings(
+ settingsBuilder().put(indexSettings())
+ .put("analysis.filter.wordDelimiter.type", "word_delimiter")
+ .put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
+ .put("analysis.filter.wordDelimiter.generate_word_parts", true)
+ .put("analysis.filter.wordDelimiter.generate_number_parts", true)
+ .put("analysis.filter.wordDelimiter.catenate_words", true)
+ .put("analysis.filter.wordDelimiter.catenate_numbers", true)
+ .put("analysis.filter.wordDelimiter.catenate_all", false)
+ .put("analysis.analyzer.custom_analyzer.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.custom_analyzer.filter", "lowercase", "wordDelimiter"))
+ );
+
+ ensureGreen();
+ client().prepareIndex("test", "test", "1")
+ .setSource("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature")
+ .get();
+ refresh();
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com ").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
+ assertHighlight(search, 0, "body", 0, startsWith("<em>Test: http://www.facebook.com</em>"));
+ search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
+ assertHighlight(search, 0, "body", 0, equalTo("<em>Test</em>: <em>http://www.facebook.com</em> <em>http://elasticsearch.org</em> <em>http://xing.com</em> <em>http://cnn.com</em> http://quora.com"));
+ }
+
+ @Test
+ public void testNgramHighlightingPreLucene42() throws IOException {
+
+ assertAcked(prepareCreate("test")
+ .addMapping("test",
+ "name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets",
+ "name2", "type=string,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("analysis.filter.my_ngram.max_gram", 20)
+ .put("analysis.filter.my_ngram.version", "4.1")
+ .put("analysis.filter.my_ngram.min_gram", 1)
+ .put("analysis.filter.my_ngram.type", "ngram")
+ .put("analysis.tokenizer.my_ngramt.max_gram", 20)
+ .put("analysis.tokenizer.my_ngramt.version", "4.1")
+ .put("analysis.tokenizer.my_ngramt.min_gram", 1)
+ .put("analysis.tokenizer.my_ngramt.type", "ngram")
+ .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
+ .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
+ .putArray("analysis.analyzer.name2_index_analyzer.filter", "lowercase", "my_ngram")
+ .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")
+ .put("analysis.analyzer.name_search_analyzer.filter", "lowercase")));
+ ensureYellow();
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "logicacmg ehemals avinci - the know how company",
+ "name2", "logicacmg ehemals avinci - the know how company").get();
+ client().prepareIndex("test", "test", "2")
+ .setSource("name", "avinci, unilog avinci, logicacmg, logica",
+ "name2", "avinci, unilog avinci, logicacmg, logica").get();
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica m"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
+ equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
+ assertHighlight(search, 1, "name", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
+ equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica ma"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+ assertHighlight(search, 1, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica"))).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+ assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica m"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
+ equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
+ assertHighlight(search, 1, "name2", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
+ equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica ma"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+ assertHighlight(search, 1, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+
+ search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica"))).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+ assertHighlight(search, 1, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
+ equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
+ }
+
+ @Test
+ public void testNgramHighlighting() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("test",
+ "name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets",
+ "name2", "type=string,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("analysis.filter.my_ngram.max_gram", 20)
+ .put("analysis.filter.my_ngram.min_gram", 1)
+ .put("analysis.filter.my_ngram.type", "ngram")
+ .put("analysis.tokenizer.my_ngramt.max_gram", 20)
+ .put("analysis.tokenizer.my_ngramt.min_gram", 1)
+ .put("analysis.tokenizer.my_ngramt.token_chars", "letter,digit")
+ .put("analysis.tokenizer.my_ngramt.type", "ngram")
+ .put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
+ .put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
+ .put("analysis.analyzer.name2_index_analyzer.filter", "my_ngram")
+ .put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")));
+ client().prepareIndex("test", "test", "1")
+ .setSource("name", "logicacmg ehemals avinci - the know how company",
+ "name2", "logicacmg ehemals avinci - the know how company").get();
+ refresh();
+ ensureGreen();
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("name", "logica m")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name", "logica ma")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name", "logica")).addHighlightedField("name").get();
+ assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica m")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how <em>company</em>"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica ma")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how company"));
+
+ search = client().prepareSearch().setQuery(matchQuery("name2", "logica")).addHighlightedField("name2").get();
+ assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> ehemals avinci - the know how company"));
+ }
+
+ @Test
+ public void testEnsureNoNegativeOffsets() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ "no_long_term", "type=string,term_vector=with_positions_offsets",
+ "long_term", "type=string,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource("no_long_term", "This is a test where foo is highlighed and should be highlighted",
+ "long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted")
+ .get();
+ refresh();
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed"))
+ .addHighlightedField("long_term", 18, 1)
+ .get();
+ assertHighlight(search, 0, "long_term", 0, 1, equalTo("<em>thisisaverylongwordandmakessurethisfails</em>"));
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
+ .addHighlightedField("no_long_term", 18, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
+ .get();
+ assertNotHighlighted(search, 0, "no_long_term");
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
+ .addHighlightedField("no_long_term", 30, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
+ .get();
+
+ assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a <b>test</b> where <b>foo</b> is <b>highlighed</b> and"));
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title and don't use term vector, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "no").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("title", "This is a test on the highlighting bug present in elasticsearch")
+ .startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
+ assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .field("title", "This is a test on the highlighting bug present in elasticsearch")
+ .startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 2)
+ .execute().get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
+ assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ // we don't store title, now lets see if it works...
+ .startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").endObject()
+ .startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("index_options", "offsets").endObject().endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource(XContentFactory.jsonBuilder().startObject()
+ .array("title", "This is a test on the highlighting bug present in elasticsearch. Hopefully it works.",
+ "This is the second bug to perform highlighting on.")
+ .startArray("attachments").startObject().field("body", "attachment for this test").endObject().startObject().field("body", "attachment 2").endObject().endArray()
+ .endObject());
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ //asking for the whole field to be highlighted
+ .addHighlightedField("title", -1, 0).get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch. Hopefully it works."));
+ assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ //sentences will be generated out of each value
+ .addHighlightedField("title").get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch."));
+ assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
+ }
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("attachments.body", "attachment"))
+ .addHighlightedField("attachments.body", -1, 2)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> for this test"));
+ assertHighlight(search, i, "attachments.body", 1, 2, equalTo("<em>attachment</em> 2"));
+ }
+ }
+
+ @Test
+ public void testHighlightIssue1994() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=no", "titleTV", "type=string,store=no,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ indexRandom(false, client().prepareIndex("test", "type1", "1")
+ .setSource("title", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"},
+ "titleTV", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"}));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "2")
+ .setSource("titleTV", new String[]{"some text to highlight", "highlight other text"}));
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 2)
+ .addHighlightedField("titleTV", -1, 2).setHighlighterRequireFieldMatch(false)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ assertHighlight(search, 0, "title", 1, 2, equalTo("The <em>bug</em> is bugging us"));
+ assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The <em>bug</em> is bugging us"));
+
+ search = client().prepareSearch()
+ .setQuery(matchQuery("titleTV", "highlight"))
+ .addHighlightedField("titleTV", -1, 2)
+ .get();
+
+ assertHighlight(search, 0, "titleTV", 0, equalTo("some text to <em>highlight</em>"));
+ assertHighlight(search, 0, "titleTV", 1, 2, equalTo("<em>highlight</em> other text"));
+ }
+
+ @Test
+ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", new String[]{"this is a test", "this is the second test"},
+ "field2", new String[]{"this is another test", "yet another test"}).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1 and field2 produces different tags");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().order("score").preTags("<global>").postTags("</global>").fragmentSize(1).numOfFragments(1)
+ .field(new HighlightBuilder.Field("field1").numOfFragments(2))
+ .field(new HighlightBuilder.Field("field2").preTags("<field2>").postTags("</field2>").fragmentSize(50).requireFieldMatch(false)));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo(" <global>test</global>"));
+ assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo(" <global>test</global>"));
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("this is another <field2>test</field2>"));
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/5175
+ public void testHighlightingOnWildcardFields() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ "field-postings", "type=string,index_options=offsets",
+ "field-fvh", "type=string,term_vector=with_positions_offsets",
+ "field-plain", "type=string"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field-postings", "This is the first test sentence. Here is the second one.",
+ "field-fvh", "This is the test with term_vectors",
+ "field-plain", "This is the test for the plain highlighter").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field*");
+ SearchSourceBuilder source = searchSource()
+ //postings hl doesn't support require_field_match, its field needs to be queried directly
+ .query(termQuery("field-postings", "test"))
+ .highlight(highlight().field("field*").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field-postings", 0, 1, equalTo("This is the first <xxx>test</xxx> sentence."));
+ assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the <xxx>test</xxx> with term_vectors"));
+ assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the <xxx>test</xxx> for the plain highlighter"));
+ }
+
+ @Test
+ public void testForceSourceWithSourceDisabled() throws Exception {
+
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("store", "yes").field("index_options", "offsets")
+ .field("term_vector", "with_positions_offsets").endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content").get();
+ refresh();
+
+ //works using stored field
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>"))
+ .get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ assertFailures(client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("plain").forceSource(true)),
+ RestStatus.BAD_REQUEST,
+ containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ assertFailures(client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("fvh").forceSource(true)),
+ RestStatus.BAD_REQUEST,
+ containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ assertFailures(client().prepareSearch("test")
+ .setQuery(termQuery("field1", "quick"))
+ .addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("postings").forceSource(true)),
+ RestStatus.BAD_REQUEST,
+ containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
+ .highlight(highlight().forceSource(true).field("field1"));
+ assertFailures(client().prepareSearch("test").setSource(searchSource.buildAsBytes()),
+ RestStatus.BAD_REQUEST,
+ containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
+
+ searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
+ .highlight(highlight().forceSource(true).field("field*"));
+ assertFailures(client().prepareSearch("test").setSource(searchSource.buildAsBytes()),
+ RestStatus.BAD_REQUEST,
+ matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source"));
+ }
+
+ @Test
+ public void testPlainHighlighter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(prefixQuery("_all", "qui"))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all with constant score, highlighting on field2");
+ source = searchSource()
+ .query(constantScoreQuery(prefixQuery("_all", "qui")))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all with constant score, highlighting on field2");
+ source = searchSource()
+ .query(boolQuery().should(constantScoreQuery(prefixQuery("_all", "qui"))))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testFastVectorHighlighter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("_all", "test"))
+ .highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("_all", "quick"))
+ .highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+
+ logger.info("--> searching on _all, highlighting on field2");
+ source = searchSource()
+ .query(prefixQuery("_all", "qui"))
+ .highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
+
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
+ }
+
+ /**
+ * The FHV can spend a long time highlighting degenerate documents if phraseLimit is not set.
+ */
+ @Test(timeout=120000)
+ public void testFVHManyMatches() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ // Index one megabyte of "t " over and over and over again
+ client().prepareIndex("test", "type1")
+ .setSource("field1", Joiner.on("").join(Iterables.limit(Iterables.cycle("t "), 1024*256))).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "t"))
+ .highlight(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("<xxx>").postTags("</xxx>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, containsString("<xxx>t</xxx>"));
+ logger.info("--> done");
+ }
+
+
+ @Test
+ public void testMatchedFieldsFvhRequireFieldMatch() throws Exception {
+ checkMatchedFieldsCase(true);
+ }
+
+ @Test
+ public void testMatchedFieldsFvhNoRequireFieldMatch() throws Exception {
+ checkMatchedFieldsCase(false);
+ }
+
+ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("foo")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("store", "yes")
+ .field("analyzer", "english")
+ .endObject()
+ .startObject("plain")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("bar")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("bar")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("store", "yes")
+ .field("analyzer", "english")
+ .endObject()
+ .startObject("plain")
+ .field("type", "string")
+ .field("termVector", "with_positions_offsets")
+ .field("analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+
+ index("test", "type1", "1",
+ "foo", "running with scissors");
+ index("test", "type1", "2",
+ "foo", "cat cat junk junk junk junk junk junk junk cats junk junk",
+ "bar", "cat cat junk junk junk junk junk junk junk cats junk junk");
+ index("test", "type1", "3",
+ "foo", "weird",
+ "bar", "result");
+ refresh();
+
+ Field fooField = new Field("foo").numOfFragments(1).order("score").fragmentSize(25)
+ .highlighterType("fvh").requireFieldMatch(requireFieldMatch);
+ Field barField = new Field("bar").numOfFragments(1).order("score").fragmentSize(25)
+ .highlighterType("fvh").requireFieldMatch(requireFieldMatch);
+ SearchRequestBuilder req = client().prepareSearch("test").addHighlightedField(fooField);
+
+ // First check highlighting without any matched fields set
+ SearchResponse resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // And that matching a subfield doesn't automatically highlight it
+ resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("running with <em>scissors</em>"));
+
+ // Add the subfield to the list of matched fields but don't match it. Everything should still work
+ // like before we added it.
+ fooField.matchedFields("foo", "foo.plain");
+ resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now make half the matches come from the stored field and half from just a matched field.
+ resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now remove the stored field from the matched field list. That should work too.
+ fooField.matchedFields("foo.plain");
+ resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with scissors"));
+
+ // Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field.
+ fooField.matchedFields("foo", "foo.plain");
+ resp = req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Now just all matches are against the matched field. This still returns highlighting.
+ resp = req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // And all matched field via the queryString's field parameter, just in case
+ resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // Finding the same string two ways is ok too
+ resp = req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+
+ // But we use the best found score when sorting fragments
+ resp = req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // which can also be written by searching on the subfield
+ resp = req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain^5")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // Speaking of two fields, you can have two fields, only one of which has matchedFields enabled
+ QueryBuilder twoFieldsQuery = queryStringQuery("cats").field("foo").field("foo.plain^5")
+ .field("bar").field("bar.plain^5");
+ resp = req.setQuery(twoFieldsQuery).addHighlightedField(barField).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
+
+ // And you can enable matchedField highlighting on both
+ barField.matchedFields("bar", "bar.plain");
+ resp = req.get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("junk junk <em>cats</em> junk junk"));
+
+ // Setting a matchedField that isn't searched/doesn't exist is simply ignored.
+ barField.matchedFields("bar", "candy");
+ resp = req.get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
+
+ // If the stored field doesn't have a value it doesn't matter what you match, you get nothing.
+ barField.matchedFields("bar", "foo.plain");
+ resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
+
+ // If the stored field is found but the matched field isn't then you don't get a result either.
+ fooField.matchedFields("bar.plain");
+ resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo")));
+
+ // But if you add the stored field to the list of matched fields then you'll get a result again
+ fooField.matchedFields("foo", "bar.plain");
+ resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
+ assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
+
+ // You _can_ highlight fields that aren't subfields of one another.
+ resp = req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
+ assertHighlight(resp, 0, "foo", 0, equalTo("<em>weird</em>"));
+ assertHighlight(resp, 0, "bar", 0, equalTo("<em>resul</em>t"));
+
+ assertFailures(req.setQuery(queryStringQuery("result").field("foo").field("foo.plain").field("bar").field("bar.plain")),
+ RestStatus.INTERNAL_SERVER_ERROR, containsString("String index out of range"));
+ }
+
+ @Test
+ @Slow
+ public void testFastVectorHighlighterManyDocs() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ int COUNT = between(20, 100);
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
+ for (int i = 0; i < COUNT; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "test " + i);
+ }
+ logger.info("--> indexing docs");
+ indexRandom(true, indexRequestBuilders);
+
+ logger.info("--> searching explicitly on field1 and highlighting on it");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("<em>test</em> " + hit.id()));
+ }
+
+ logger.info("--> searching explicitly _all and highlighting on _all");
+ searchResponse = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("_all", "test"))
+ .addHighlightedField("_all", 100, 0)
+ .get();
+ for (int i = 0; i < COUNT; i++) {
+ SearchHit hit = searchResponse.getHits().getHits()[i];
+ assertHighlight(searchResponse, i, "_all", 0, 1, equalTo("<em>test</em> " + hit.id() + " "));
+ }
+ }
+
+ public XContentBuilder type1TermVectorMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("termVector", "with_positions_offsets").endObject()
+ .startObject("field2").field("type", "string").field("termVector", "with_positions_offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ @Test
+ public void testSameContent() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test on the highlighting bug present in elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", -1, 0)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testFastVectorHighlighterOffsetParameter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets").get());
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test on the highlighting bug present in elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "bug"))
+ .addHighlightedField("title", 30, 1, 10)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ // LUCENE 3.1 UPGRADE: Caused adding the space at the end...
+ assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>bug</em> present in elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testEscapeHtml() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=yes"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&amp;? elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testEscapeHtml_vector() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 30, 1, 10)
+ .get();
+
+ for (int i = 0; i < 5; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>test</em> for *&amp;? elasticsearch"));
+ }
+ }
+
+ @Test
+ public void testMultiMapperVectorWithStore() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperVectorFromSource() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperNoVectorWithStore() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testMultiMapperNoVectorFromSource() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ search = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title.key", 50, 1)
+ .get();
+
+ assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=no"));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the enabling fast vector highlighter");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+ assertNoFailures(search);
+
+ assertFailures(client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("title", 50, 1, 10)
+ .setHighlighterType("fast-vector-highlighter"),
+ RestStatus.BAD_REQUEST,
+ containsString("the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter"));
+
+ //should not fail if there is a wildcard
+ assertNoFailures(client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "this is a test"))
+ .addHighlightedField("tit*", 50, 1, 10)
+ .setHighlighterType("fast-vector-highlighter").get());
+ }
+
+ @Test
+ public void testDisableFastVectorHighlighter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets,analyzer=classic"));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField("title", 50, 1, 10)
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ // Because of SOLR-3724 nothing is highlighted when FVH is used
+ assertNotHighlighted(search, i, "title");
+ }
+
+ // Using plain highlighter instead of FVH
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField("title", 50, 1, 10)
+ .setHighlighterType("highlighter")
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
+ }
+
+ // Using plain highlighter instead of FVH on the field level
+ search = client().prepareSearch()
+ .setQuery(matchPhraseQuery("title", "test for the workaround"))
+ .addHighlightedField(new HighlightBuilder.Field("title").highlighterType("highlighter"))
+ .setHighlighterType("highlighter")
+ .get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
+ }
+ }
+
+ @Test
+ public void testFSHHighlightAllMvFragments() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "tags", "type=string,term_vector=with_positions_offsets"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource("tags", new String[]{
+ "this is a really long tag i would like to highlight",
+ "here is another one that is very long and has the tag token near the end"}).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "tag"))
+ .addHighlightedField("tags", -1, 0).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really long <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long and has the <em>tag</em> token near the end"));
+ }
+
+ @Test
+ public void testBoostingQuery() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testBoostingQueryTermVector() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testCommonTermsQuery() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100))
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testCommonTermsTermVector() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
+ refresh();
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100))
+ .highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+ }
+
+ @Test
+ public void testPhrasePrefix() throws IOException {
+ Builder builder = settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.synonym.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "quick => fast");
+
+ assertAcked(prepareCreate("test").setSettings(builder.build()).addMapping("type1", type1TermVectorMapping())
+ .addMapping("type2", "_all", "store=yes,termVector=with_positions_offsets",
+ "field4", "type=string,term_vector=with_positions_offsets,analyzer=synonym",
+ "field3", "type=string,analyzer=synonym"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "0")
+ .setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog").get();
+ client().prepareIndex("test", "type1", "1")
+ .setSource("field1", "The quick browse button is a fancy thing, right bro?").get();
+ refresh();
+ logger.info("--> highlighting and searching on field0");
+ SearchSourceBuilder source = searchSource()
+ .query(matchPhrasePrefixQuery("field0", "quick bro"))
+ .highlight(highlight().field("field0").order("score").preTags("<x>").postTags("</x>"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field1");
+ source = searchSource()
+ .query(matchPhrasePrefixQuery("field1", "quick bro"))
+ .highlight(highlight().field("field1").order("score").preTags("<x>").postTags("</x>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, anyOf(equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"), equalTo("The <x>quick brown</x> fox jumps over the lazy dog")));
+ assertHighlight(searchResponse, 1, "field1", 0, 1, anyOf(equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"), equalTo("The <x>quick brown</x> fox jumps over the lazy dog")));
+
+ // with synonyms
+ client().prepareIndex("test", "type2", "0")
+ .setSource("field4", "The quick brown fox jumps over the lazy dog", "field3", "The quick brown fox jumps over the lazy dog").get();
+ client().prepareIndex("test", "type2", "1")
+ .setSource("field4", "The quick browse button is a fancy thing, right bro?").get();
+ client().prepareIndex("test", "type2", "2")
+ .setSource("field4", "a quick fast blue car").get();
+ refresh();
+
+ source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field3", "fast bro"))
+ .highlight(highlight().field("field3").order("score").preTags("<x>").postTags("</x>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
+
+ logger.info("--> highlighting and searching on field4");
+ source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro"))
+ .highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field4", 0, 1, anyOf(equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"), equalTo("<x>The quick brown</x> fox jumps over the lazy dog")));
+ assertHighlight(searchResponse, 1, "field4", 0, 1, anyOf(equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"), equalTo("<x>The quick brown</x> fox jumps over the lazy dog")));
+
+ logger.info("--> highlighting and searching on field4");
+ source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca"))
+ .highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field4", 0, 1, equalTo("<x>a quick fast blue car</x>"));
+ }
+
+ @Test
+ public void testPlainHighlightDifferentFragmenter() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "tags", "type=string"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("tags",
+ "this is a really long tag i would like to highlight",
+ "here is another one that is very long tag and has the tag token near the end").endObject()).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("simple")).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
+
+ response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("span")).get();
+
+ assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
+ assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
+
+ assertFailures(client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
+ .addHighlightedField(new HighlightBuilder.Field("tags")
+ .fragmentSize(-1).numOfFragments(2).fragmenter("invalid")),
+ RestStatus.BAD_REQUEST,
+ containsString("unknown fragmenter option [invalid] for the field [tags]"));
+ }
+
+ @Test
+ public void testPlainHighlighterMultipleFields() {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
+ }
+
+ @Test
+ public void testFastVectorHighlighterMultipleFields() {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets", "field2", "type=string,term_vector=with_positions_offsets"));
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
+ assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
+ }
+
+ @Test
+ public void testMissingStoredField() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "highlight_field", "type=string,store=yes"));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject()
+ .field("field", "highlight")
+ .endObject()).get();
+ refresh();
+
+ // This query used to fail when the field to highlight was absent
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field", "highlight").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField(new HighlightBuilder.Field("highlight_field")
+ .fragmentSize(-1).numOfFragments(1).fragmenter("simple")).get();
+ assertThat(response.getHits().hits()[0].highlightFields().isEmpty(), equalTo(true));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/3211
+ public void testNumericHighlighting() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "text", "type=string,index=analyzed",
+ "byte", "type=byte", "short", "type=short", "int", "type=integer", "long", "type=long",
+ "float", "type=float", "double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("text", "elasticsearch test",
+ "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField("text")
+ .addHighlightedField("byte")
+ .addHighlightedField("short")
+ .addHighlightedField("int")
+ .addHighlightedField("long")
+ .addHighlightedField("float")
+ .addHighlightedField("double")
+ .get();
+ // Highlighting of numeric fields is not supported, but it should not raise errors
+ // (this behavior is consistent with version 0.20)
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/3200
+ public void testResetTwice() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("analysis.analyzer.my_analyzer.type", "pattern")
+ .put("analysis.analyzer.my_analyzer.pattern", "\\s+")
+ .build())
+ .addMapping("type", "text", "type=string,analyzer=my_analyzer"));
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource("text", "elasticsearch test").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
+ .addHighlightedField("text").execute().actionGet();
+ // PatternAnalyzer will throw an exception if it is resetted twice
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testHighlightUsesHighlightQuery() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "Testing the highlight query feature");
+ refresh();
+
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text");
+
+ SearchRequestBuilder search = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing"))
+ .addHighlightedField(field);
+ Matcher<String> searchQueryMatcher = equalTo("<em>Testing</em> the highlight query feature");
+
+ field.highlighterType("plain");
+ SearchResponse response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+ field.highlighterType("fvh");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, searchQueryMatcher);
+
+
+ Matcher<String> hlQueryMatcher = equalTo("Testing the highlight <em>query</em> feature");
+ field.highlightQuery(matchQuery("text", "query"));
+
+ field.highlighterType("fvh");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("plain");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ // Make sure the the highlightQuery is taken into account when it is set on the highlight context instead of the field
+ search.setHighlighterQuery(matchQuery("text", "query"));
+ field.highlighterType("fvh").highlightQuery(null);
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("plain");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+
+ field.highlighterType("postings");
+ response = search.get();
+ assertHighlight(response, 0, "text", 0, hlQueryMatcher);
+ }
+
+ private static String randomStoreField() {
+ if (randomBoolean()) {
+ return "store=yes,";
+ }
+ return "";
+ }
+
+ @Test
+ public void testHighlightNoMatchSize() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text = "I am pretty long so some of me should get cut off. Second sentence";
+ index("test", "type1", "1", "text", text);
+ refresh();
+
+ // When you don't set noMatchSize you don't get any results if there isn't anything to highlight.
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(21)
+ .numOfFragments(1)
+ .highlighterType("plain");
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // When noMatchSize is set to 0 you also shouldn't get any
+ field.highlighterType("plain").noMatchSize(0);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // When noMatchSize is between 0 and the size of the string
+ field.highlighterType("plain").noMatchSize(21);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ // The FVH also works but the fragment is longer than the plain highlighter because of boundary_max_scan
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We can also ask for a fragment longer than the input string and get the whole string
+ field.highlighterType("plain").noMatchSize(text.length() * 2);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We can also ask for a fragment exactly the size of the input field and get the whole field
+ field.highlighterType("plain").noMatchSize(text.length());
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo(text));
+
+ //no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // You can set noMatchSize globally in the highlighter as well
+ field.highlighterType("plain").noMatchSize(null);
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // We don't break if noMatchSize is less than zero though
+ field.highlighterType("plain").noMatchSize(randomIntBetween(Integer.MIN_VALUE, -1));
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+ }
+
+ @Test
+ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text1 = "I am pretty long so some of me should get cut off. We'll see how that goes.";
+ String text2 = "I am short";
+ index("test", "type1", "1", "text", new String[] {text1, text2});
+ refresh();
+
+ // The no match fragment should come from the first value of a multi-valued field
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(21)
+ .numOfFragments(1)
+ .highlighterType("plain")
+ .noMatchSize(21);
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
+
+ // And noMatchSize returns nothing when the first entry is empty string!
+ index("test", "type1", "2", "text", new String[] {"", text2});
+ refresh();
+
+ IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // But if the field was actually empty then you should get no highlighting field
+ index("test", "type1", "3", "text", new String[] {});
+ refresh();
+ idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ // Same for if the field doesn't even exist on the document
+ index("test", "type1", "4");
+ refresh();
+
+ idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test")
+ .setQuery(idsQueryBuilder)
+ .addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "postings");
+
+ // Again same if the field isn't mapped
+ field = new HighlightBuilder.Field("unmapped")
+ .highlighterType("plain")
+ .noMatchSize(21);
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertNotHighlighted(response, 0, "text");
+ }
+
+ @Test
+ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
+ ensureGreen();
+
+ String text1 = "This is the first sentence. This is the second sentence." + HighlightUtils.PARAGRAPH_SEPARATOR;
+ String text2 = "This is the third sentence. This is the fourth sentence.";
+ String text3 = "This is the fifth sentence";
+ index("test", "type1", "1", "text", new String[] {text1, text2, text3});
+ refresh();
+
+ // The no match fragment should come from the first value of a multi-valued field
+ HighlightBuilder.Field field = new HighlightBuilder.Field("text")
+ .fragmentSize(1)
+ .numOfFragments(0)
+ .highlighterType("plain")
+ .noMatchSize(20);
+ SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence"));
+
+ // Postings hl also works but the fragment is the whole first sentence (size ignored)
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence."));
+
+ //if there's a match we only return the values with matches (whole value as number_of_fragments == 0)
+ MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth");
+ field.highlighterType("plain");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+
+ field.highlighterType("fvh");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+
+ field.highlighterType("postings");
+ response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
+ assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
+ assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
+ }
+
+ @Test
+ public void testPostingsHighlighter() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on field1, highlighting on field1");
+ source = searchSource()
+ .query(termQuery("field1", "test"))
+ .highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
+
+ logger.info("--> searching on field2, highlighting on field2");
+ source = searchSource()
+ .query(termQuery("field2", "quick"))
+ .highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy <xxx>quick</xxx> dog"));
+
+ logger.info("--> searching on field2, highlighting on field2");
+ source = searchSource()
+ .query(matchPhraseQuery("field2", "quick brown"))
+ .highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>"));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ //phrase query results in highlighting all different terms regardless of their positions
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy <xxx>quick</xxx> dog"));
+
+ //lets fall back to the standard highlighter then, what people would do to highlight query matches
+ logger.info("--> searching on field2, highlighting on field2, falling back to the plain highlighter");
+ source = searchSource()
+ .query(matchPhraseQuery("_all", "quick brown"))
+ .highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>").highlighterType("highlighter").requireFieldMatch(false));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy quick dog"));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultipleFields() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()).get());
+ ensureGreen();
+
+ index("test", "type1", "1", "field1", "The <b>quick<b> brown fox. Second sentence.", "field2", "The <b>slow<b> brown fox. Second sentence.");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(QueryBuilders.matchQuery("field1", "fox"))
+ .addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
+ .get();
+ assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>."));
+ }
+
+ @Test
+ public void testPostingsHighlighterNumberOfFragments() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1")
+ .setSource("field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.",
+ "field2", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").numOfFragments(5).preTags("<field1>").postTags("</field1>")));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
+ assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+
+ client().prepareIndex("test", "type1", "2")
+ .setSource("field1", new String[]{"The quick brown fox jumps over the lazy dog. Second sentence not finished", "The lazy red fox jumps over the quick dog.", "The quick brown dog jumps over the lazy fox."}).get();
+ refresh();
+
+ source = searchSource()
+ .query(termQuery("field1", "fox"))
+ .highlight(highlight()
+ .field(new HighlightBuilder.Field("field1").numOfFragments(0).preTags("<field1>").postTags("</field1>")));
+
+ searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit searchHit : searchResponse.getHits()) {
+ if ("1".equals(searchHit.id())) {
+ assertHighlight(searchHit, "field1", 0, 1, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. The lazy red <field1>fox</field1> jumps over the quick dog. The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ } else if ("2".equals(searchHit.id())) {
+ assertHighlight(searchHit, "field1", 0, 3, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. Second sentence not finished"));
+ assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
+ assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
+ } else {
+ fail("Only hits with id 1 and 2 are returned");
+ }
+ }
+ }
+
+ @Test
+ public void testMultiMatchQueryHighlight() throws IOException {
+ String[] highlighterTypes = new String[] {"fvh", "plain", "postings"};
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("index_options", "offsets").endObject()
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("index_options", "offsets").field("term_vector", "with_positions_offsets").endObject()
+ .startObject("field2").field("type", "string").field("index_options", "offsets").field("term_vector", "with_positions_offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+ client().prepareIndex("test", "type1")
+ .setSource("field1", "The quick brown fox jumps over",
+ "field2", "The quick brown fox jumps over").get();
+ refresh();
+ final int iters = scaledRandomIntBetween(20, 30);
+ for (int i = 0; i < iters; i++) {
+ String highlighterType = rarely() ? null : RandomPicks.randomFrom(getRandom(), highlighterTypes);
+ MultiMatchQueryBuilder.Type[] supportedQueryTypes;
+ if ("postings".equals(highlighterType)) {
+ //phrase_prefix is not supported by postings highlighter, as it rewrites against an empty reader, the prefix will never match any term
+ supportedQueryTypes = new MultiMatchQueryBuilder.Type[]{MultiMatchQueryBuilder.Type.BEST_FIELDS, MultiMatchQueryBuilder.Type.CROSS_FIELDS, MultiMatchQueryBuilder.Type.MOST_FIELDS, MultiMatchQueryBuilder.Type.PHRASE};
+ } else {
+ supportedQueryTypes = MultiMatchQueryBuilder.Type.values();
+ }
+ MultiMatchQueryBuilder.Type matchQueryType = rarely() ? null : RandomPicks.randomFrom(getRandom(), supportedQueryTypes);
+ final MultiMatchQueryBuilder multiMatchQueryBuilder = multiMatchQuery("the quick brown fox", "field1", "field2").type(matchQueryType);
+
+ SearchSourceBuilder source = searchSource()
+ .query(multiMatchQueryBuilder)
+ .highlight(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType)
+ .field(new Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>")));
+ logger.info("Running multi-match type: [" + matchQueryType + "] highlight with type: [" + highlighterType + "]");
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 1l);
+ assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("<field1>The quick brown fox</field1> jumps over"),
+ equalTo("<field1>The</field1> <field1>quick</field1> <field1>brown</field1> <field1>fox</field1> jumps over")));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterOrderByScore() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1")
+ .setSource("field1", new String[]{"This sentence contains one match, not that short. This sentence contains two sentence matches. This one contains no matches.",
+ "This is the second value's first sentence. This one contains no matches. This sentence contains three sentence occurrences (sentence).",
+ "One sentence match here and scored lower since the text is quite long, not that appealing. This one contains no matches."}).get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(termQuery("field1", "sentence"))
+ .highlight(highlight().field("field1").order("score"));
+
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ Map<String,HighlightField> highlightFieldMap = searchResponse.getHits().getAt(0).highlightFields();
+ assertThat(highlightFieldMap.size(), equalTo(1));
+ HighlightField field1 = highlightFieldMap.get("field1");
+ assertThat(field1.fragments().length, equalTo(5));
+ assertThat(field1.fragments()[0].string(), equalTo("This <em>sentence</em> contains three <em>sentence</em> occurrences (<em>sentence</em>)."));
+ assertThat(field1.fragments()[1].string(), equalTo("This <em>sentence</em> contains two <em>sentence</em> matches."));
+ assertThat(field1.fragments()[2].string(), equalTo("This is the second value's first <em>sentence</em>."));
+ assertThat(field1.fragments()[3].string(), equalTo("This <em>sentence</em> contains one match, not that short."));
+ assertThat(field1.fragments()[4].string(), equalTo("One <em>sentence</em> match here and scored lower since the text is quite long, not that appealing."));
+ }
+
+ @Test
+ public void testPostingsHighlighterEscapeHtml() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "title", "type=string," + randomStoreField() + "index_options=offsets"));
+ ensureYellow();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < 5; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title", "test"))
+ .setHighlighterEncoder("html")
+ .addHighlightedField("title").get();
+
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ assertHighlight(searchResponse, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&amp;?"));
+ }
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiMapperWithStore() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test . Second sentence.").get();
+ refresh();
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse searchResponse = client().prepareSearch()
+ //lets make sure we analyze the query and we highlight the resulting terms
+ .setQuery(matchQuery("title", "This is a Test"))
+ .addHighlightedField("title").get();
+
+ assertHitCount(searchResponse, 1l);
+ SearchHit hit = searchResponse.getHits().getAt(0);
+ //stopwords are not highlighted since not indexed
+ assertHighlight(hit, "title", 0, 1, equalTo("this is a <em>test</em> ."));
+
+ // search on title.key and highlight on title
+ searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .addHighlightedField("title.key").get();
+ assertHitCount(searchResponse, 1l);
+
+ //stopwords are now highlighted since we used only whitespace analyzer here
+ assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em> ."));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiMapperFromSource() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "multi_field").startObject("fields")
+ .startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "classic").endObject()
+ .startObject("key").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
+ refresh();
+
+ // simple search on body with standard analyzer with a simple field query
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .get();
+
+ assertHighlight(searchResponse, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
+
+ // search on title.key and highlight on title.key
+ searchResponse = client().prepareSearch()
+ .setQuery(matchQuery("title.key", "this is a test"))
+ .addHighlightedField("title.key").get();
+
+ assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
+ }
+
+ @Test
+ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("title").field("type", "string").field("store", "yes").field("index_options", "docs").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
+ for (int i = 0; i < indexRequestBuilders.length; i++) {
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
+ .setSource("title", "This is a test for the postings highlighter");
+ }
+ indexRandom(true, indexRequestBuilders);
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .get();
+ assertNoFailures(search);
+
+ assertFailures(client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .setHighlighterType("postings-highlighter"),
+ RestStatus.BAD_REQUEST,
+ containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+
+
+ assertFailures(client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("title")
+ .setHighlighterType("postings"),
+ RestStatus.BAD_REQUEST,
+ containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
+
+ //should not fail if there is a wildcard
+ assertNoFailures(client().prepareSearch()
+ .setQuery(matchQuery("title", "this is a test"))
+ .addHighlightedField("tit*")
+ .setHighlighterType("postings").get());
+ }
+
+ @Test
+ public void testPostingsHighlighterBoostingQuery() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.")
+ .get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource()
+ .query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
+ .highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterCommonTermsQuery() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100))
+ .highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
+ SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog!"));
+ }
+
+ private static XContentBuilder type1PostingsffsetsMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("field1").field("type", "string").field("index_options", "offsets").endObject()
+ .startObject("field2").field("type", "string").field("index_options", "offsets").endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ @Test
+ public void testPostingsHighlighterPrefixQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+
+ SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+
+ }
+
+ @Test
+ public void testPostingsHighlighterFuzzyQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterRegexpQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterWildcardQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+
+ source = searchSource().query(wildcardQuery("field2", "qu*k"))
+ .highlight(highlight().field("field2"));
+ searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHitCount(searchResponse, 1l);
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterTermRangeQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "aaab").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("<em>aaab</em>"));
+ }
+
+ @Test
+ public void testPostingsHighlighterQueryString() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
+ refresh();
+ logger.info("--> highlighting and searching on field2");
+ SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2"))
+ .highlight(highlight().field("field2"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
+ }
+
+ @Test
+ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+
+ @Test
+ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(boolQuery()
+ .should(constantScoreQuery(QueryBuilders.missingQuery("field1")))
+ .should(matchQuery("field1", "test"))
+ .should(filteredQuery(queryStringQuery("field1:photo*"), null)))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+
+ @Test
+ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(boolQuery().must(prefixQuery("field1", "photo")).should(matchQuery("field1", "test").minimumShouldMatch("0")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+
+ @Test
+ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Exception {
+
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
+ refresh();
+
+ logger.info("--> highlighting and searching on field1");
+ SearchSourceBuilder source = searchSource().query(filteredQuery(queryStringQuery("field1:photo*"), missingQuery("field_null")))
+ .highlight(highlight().field("field1"));
+ SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
+ assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
+ }
+
+ @Test
+ @Slow
+ public void testPostingsHighlighterManyDocs() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ ensureGreen();
+
+ int COUNT = between(20, 100);
+ Map<String, String> prefixes = new HashMap<>(COUNT);
+
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
+ for (int i = 0; i < COUNT; i++) {
+ //generating text with word to highlight in a different position
+ //(https://github.com/elasticsearch/elasticsearch/issues/4103)
+ String prefix = randomAsciiOfLengthBetween(5, 30);
+ prefixes.put(String.valueOf(i), prefix);
+ indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "Sentence " + prefix
+ + " test. Sentence two.");
+ }
+ logger.info("--> indexing docs");
+ indexRandom(true, indexRequestBuilders);
+
+ logger.info("--> searching explicitly on field1 and highlighting on it");
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch()
+ .setSize(COUNT)
+ .setQuery(termQuery("field1", "test"))
+ .addHighlightedField("field1");
+ SearchResponse searchResponse =
+ searchRequestBuilder.get();
+ assertHitCount(searchResponse, (long)COUNT);
+ assertThat(searchResponse.getHits().hits().length, equalTo(COUNT));
+ for (SearchHit hit : searchResponse.getHits()) {
+ String prefix = prefixes.get(hit.id());
+ assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " <em>test</em>."));
+ }
+ }
+
+ @Test
+ public void testFastVectorHighlighterPhraseBoost() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
+ phraseBoostTestCase("fvh");
+ }
+
+ @Test
+ public void testPostingsHighlighterPhraseBoost() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
+ phraseBoostTestCase("postings");
+ }
+
+ /**
+ * Test phrase boosting over normal term matches. Note that this will never pass with the plain highlighter
+ * because it doesn't support the concept of terms having a different weight based on position.
+ * @param highlighterType highlighter to test
+ */
+ private void phraseBoostTestCase(String highlighterType) {
+ ensureGreen();
+ StringBuilder text = new StringBuilder();
+ text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n");
+ for (int i = 0; i<10; i++) {
+ text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
+ }
+ text.append("highlight words together\n");
+ for (int i = 0; i<10; i++) {
+ text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
+ }
+ index("test", "type1", "1", "field1", text.toString());
+ refresh();
+
+ // Match queries
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ matchQuery("field1", "highlight words together"),
+ matchPhraseQuery("field1", "highlight words together"));
+
+ // Query string with a single field
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryStringQuery("highlight words together").field("field1"),
+ queryStringQuery("\"highlight words together\"").field("field1").autoGeneratePhraseQueries(true));
+
+ // Query string with a single field without dismax
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryStringQuery("highlight words together").field("field1").useDisMax(false),
+ queryStringQuery("\"highlight words together\"").field("field1").useDisMax(false).autoGeneratePhraseQueries(true));
+
+ // Query string with more than one field
+ phraseBoostTestCaseForClauses(highlighterType, 100f,
+ queryStringQuery("highlight words together").field("field1").field("field2"),
+ queryStringQuery("\"highlight words together\"").field("field1").field("field2").autoGeneratePhraseQueries(true));
+
+ // Query string boosting the field
+ phraseBoostTestCaseForClauses(highlighterType, 1f,
+ queryStringQuery("highlight words together").field("field1"),
+ queryStringQuery("\"highlight words together\"").field("field1^100").autoGeneratePhraseQueries(true));
+ }
+
+ private <P extends QueryBuilder & BoostableQueryBuilder<?>> void
+ phraseBoostTestCaseForClauses(String highlighterType, float boost, QueryBuilder terms, P phrase) {
+ Matcher<String> highlightedMatcher = Matchers.either(containsString("<em>highlight words together</em>")).or(
+ containsString("<em>highlight</em> <em>words</em> <em>together</em>"));
+ SearchRequestBuilder search = client().prepareSearch("test").setHighlighterRequireFieldMatch(true)
+ .setHighlighterOrder("score").setHighlighterType(highlighterType)
+ .addHighlightedField("field1", 100, 1);
+
+ // Try with a bool query
+ phrase.boost(boost);
+ SearchResponse response = search.setQuery(boolQuery().must(terms).should(phrase)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ phrase.boost(1);
+ // Try with a boosting query
+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(boost).negativeBoost(1)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ // Try with a boosting query using a negative boost
+ response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(1).negativeBoost(1/boost)).get();
+ assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java b/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java
new file mode 100644
index 0000000000..9b121c7947
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/indicesboost/SimpleIndicesBoostSearchTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.indicesboost;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.client.Requests.indexRequest;
+import static org.elasticsearch.client.Requests.searchRequest;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+public class SimpleIndicesBoostSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testIndicesBoost() throws Exception {
+ assertHitCount(client().prepareSearch().setQuery(termQuery("test", "value")).get(), 0);
+
+ try {
+ client().prepareSearch("test").setQuery(termQuery("test", "value")).execute().actionGet();
+ fail("should fail");
+ } catch (Exception e) {
+ // ignore, no indices
+ }
+
+ createIndex("test1", "test2");
+ ensureGreen();
+ client().index(indexRequest("test1").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value check").endObject())).actionGet();
+ client().index(indexRequest("test2").type("type1").id("1")
+ .source(jsonBuilder().startObject().field("test", "value beck").endObject())).actionGet();
+ refresh();
+
+ float indexBoost = 1.1f;
+
+ logger.info("--- QUERY_THEN_FETCH");
+
+ logger.info("Query with test1 boosted");
+ SearchResponse response = client().search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test2"));
+
+ logger.info("Query with test2 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test1"));
+
+ logger.info("--- DFS_QUERY_THEN_FETCH");
+
+ logger.info("Query with test1 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test1", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test1"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test2"));
+
+ logger.info("Query with test2 boosted");
+ response = client().search(searchRequest()
+ .searchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .source(searchSource().explain(true).indexBoost("test2", indexBoost).query(termQuery("test", "value")))
+ ).actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ logger.info("Hit[0] {} Explanation {}", response.getHits().getAt(0).index(), response.getHits().getAt(0).explanation());
+ logger.info("Hit[1] {} Explanation {}", response.getHits().getAt(1).index(), response.getHits().getAt(1).explanation());
+ assertThat(response.getHits().getAt(0).index(), equalTo("test2"));
+ assertThat(response.getHits().getAt(1).index(), equalTo("test1"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java
new file mode 100644
index 0000000000..8d64ef2660
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java
@@ -0,0 +1,1217 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.innerhits;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.query.BoolQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.support.QueryInnerHitBuilder;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasParentQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+public class InnerHitsTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleNested() throws Exception {
+ assertAcked(prepareCreate("articles").addMapping("article", jsonBuilder().startObject().startObject("article").startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("title")
+ .field("type", "string")
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startArray("comments")
+ .startObject().field("message", "fox eat quick").endObject()
+ .startObject().field("message", "fox ate rabbit x y z").endObject()
+ .startObject().field("message", "rabbit got away").endObject()
+ .endArray()
+ .endObject()));
+ requests.add(client().prepareIndex("articles", "article", "2").setSource(jsonBuilder().startObject()
+ .field("title", "big gray elephant")
+ .startArray("comments")
+ .startObject().field("message", "elephant captured").endObject()
+ .startObject().field("message", "mice squashed by elephant x").endObject()
+ .startObject().field("message", "elephant scared by mice x y").endObject()
+ .endArray()
+ .endObject()));
+ indexRandom(true, requests);
+
+ // Inner hits can be defined in two ways: 1) with the query 2) as seperate inner_hit definition
+ SearchRequest[] searchRequests = new SearchRequest[]{
+ client().prepareSearch("articles").setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder().setName("comment"))).request(),
+ client().prepareSearch("articles").setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit().setPath("comments").setQuery(matchQuery("comments.message", "fox"))).request()
+ };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(2l));
+ assertThat(innerHits.getHits().length, equalTo(2));
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(1).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant")))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit().setPath("comments").setQuery(matchQuery("comments.message", "elephant"))).request(),
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant")).innerHit(new QueryInnerHitBuilder().setName("comment"))).request(),
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant")).innerHit(new QueryInnerHitBuilder().setName("comment").addSort("_doc", SortOrder.DESC))).request()
+ };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(3l));
+ assertThat(innerHits.getHits().length, equalTo(3));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(1).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(innerHits.getAt(2).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")))
+ .addInnerHit("comments", new InnerHitsBuilder.InnerHit().setPath("comments")
+ .setQuery(matchQuery("comments.message", "fox"))
+ .addHighlightedField("comments.message")
+ .setExplain(true)
+ .addFieldDataField("comments.message")
+ .addScriptField("script", new Script("doc['comments.message'].value"))
+ .setSize(1)).request(),
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder()
+ .addHighlightedField("comments.message")
+ .setExplain(true)
+ .addFieldDataField("comments.message")
+ .addScriptField("script", new Script("doc['comments.message'].value"))
+ .setSize(1))).request()
+ };
+
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
+ assertThat(innerHits.getTotalHits(), equalTo(2l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
+ assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(comments.message:fox in"));
+ assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat"));
+ assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat"));
+ }
+ }
+
+ @Test
+ public void testRandomNested() throws Exception {
+ assertAcked(prepareCreate("idx").addMapping("type", "field1", "type=nested", "field2", "type=nested"));
+ int numDocs = scaledRandomIntBetween(25, 100);
+ List<IndexRequestBuilder> requestBuilders = new ArrayList<>();
+
+ int[] field1InnerObjects = new int[numDocs];
+ int[] field2InnerObjects = new int[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ int numInnerObjects = field1InnerObjects[i] = scaledRandomIntBetween(1, numDocs);
+ XContentBuilder source = jsonBuilder().startObject().startArray("field1");
+ for (int j = 0; j < numInnerObjects; j++) {
+ source.startObject().field("x", "y").endObject();
+ }
+ numInnerObjects = field2InnerObjects[i] = scaledRandomIntBetween(1, numDocs);
+ source.endArray().startArray("field2");
+ for (int j = 0; j < numInnerObjects; j++) {
+ source.startObject().field("x", "y").endObject();
+ }
+ source.endArray().endObject();
+ requestBuilders.add(client().prepareIndex("idx", "type", String.format(Locale.ENGLISH, "%03d", i)).setSource(source));
+ }
+ indexRandom(true, requestBuilders);
+
+ int size = randomIntBetween(0, numDocs);
+ SearchResponse searchResponse;
+ if (randomBoolean()) {
+ searchResponse = client().prepareSearch("idx")
+ .setSize(numDocs)
+ .addSort("_uid", SortOrder.ASC)
+ .addInnerHit("a", new InnerHitsBuilder.InnerHit().setPath("field1").addSort("_doc", SortOrder.DESC).setSize(size)) // Sort order is DESC, because we reverse the inner objects during indexing!
+ .addInnerHit("b", new InnerHitsBuilder.InnerHit().setPath("field2").addSort("_doc", SortOrder.DESC).setSize(size))
+ .get();
+ } else {
+ BoolQueryBuilder boolQuery = new BoolQueryBuilder();
+ if (randomBoolean()) {
+ boolQuery.should(nestedQuery("field1", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("a").addSort("_doc", SortOrder.DESC).setSize(size)));
+ boolQuery.should(nestedQuery("field2", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("b").addSort("_doc", SortOrder.DESC).setSize(size)));
+ } else {
+ boolQuery.should(constantScoreQuery(nestedQuery("field1", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("a").addSort("_doc", SortOrder.DESC).setSize(size))));
+ boolQuery.should(constantScoreQuery(nestedQuery("field2", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("b").addSort("_doc", SortOrder.DESC).setSize(size))));
+ }
+ searchResponse = client().prepareSearch("idx")
+ .setQuery(boolQuery)
+ .setSize(numDocs)
+ .addSort("_uid", SortOrder.ASC)
+ .get();
+ }
+
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, numDocs);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(numDocs));
+ for (int i = 0; i < numDocs; i++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(i);
+ SearchHits inner = searchHit.getInnerHits().get("a");
+ assertThat(inner.totalHits(), equalTo((long) field1InnerObjects[i]));
+ for (int j = 0; j < field1InnerObjects[i] && j < size; j++) {
+ SearchHit innerHit = inner.getAt(j);
+ assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field1"));
+ assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j));
+ assertThat(innerHit.getNestedIdentity().getChild(), nullValue());
+ }
+
+ inner = searchHit.getInnerHits().get("b");
+ assertThat(inner.totalHits(), equalTo((long) field2InnerObjects[i]));
+ for (int j = 0; j < field2InnerObjects[i] && j < size; j++) {
+ SearchHit innerHit = inner.getAt(j);
+ assertThat(innerHit.getNestedIdentity().getField().string(), equalTo("field2"));
+ assertThat(innerHit.getNestedIdentity().getOffset(), equalTo(j));
+ assertThat(innerHit.getNestedIdentity().getChild(), nullValue());
+ }
+ }
+ }
+
+ @Test
+ public void testSimpleParentChild() throws Exception {
+ assertAcked(prepareCreate("articles")
+ .addMapping("article", "title", "type=string")
+ .addMapping("comment", "_parent", "type=article", "message", "type=string")
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource("title", "quick brown fox"));
+ requests.add(client().prepareIndex("articles", "comment", "1").setParent("1").setSource("message", "fox eat quick"));
+ requests.add(client().prepareIndex("articles", "comment", "2").setParent("1").setSource("message", "fox ate rabbit x y z"));
+ requests.add(client().prepareIndex("articles", "comment", "3").setParent("1").setSource("message", "rabbit got away"));
+ requests.add(client().prepareIndex("articles", "article", "2").setSource("title", "big gray elephant"));
+ requests.add(client().prepareIndex("articles", "comment", "4").setParent("2").setSource("message", "elephant captured"));
+ requests.add(client().prepareIndex("articles", "comment", "5").setParent("2").setSource("message", "mice squashed by elephant x"));
+ requests.add(client().prepareIndex("articles", "comment", "6").setParent("2").setSource("message", "elephant scared by mice x y"));
+ indexRandom(true, requests);
+
+ SearchRequest[] searchRequests = new SearchRequest[]{
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "fox")))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit().setType("comment").setQuery(matchQuery("message", "fox")))
+ .request(),
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "fox")).innerHit(new QueryInnerHitBuilder().setName("comment")))
+ .request()
+ };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("1"));
+
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(2l));
+
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).type(), equalTo("comment"));
+ assertThat(innerHits.getAt(1).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(1).type(), equalTo("comment"));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "elephant")))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit().setType("comment").setQuery(matchQuery("message", "elephant")))
+ .request(),
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "elephant")).innerHit(new QueryInnerHitBuilder()))
+ .request()
+ };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(3l));
+
+ assertThat(innerHits.getAt(0).getId(), equalTo("4"));
+ assertThat(innerHits.getAt(0).type(), equalTo("comment"));
+ assertThat(innerHits.getAt(1).getId(), equalTo("5"));
+ assertThat(innerHits.getAt(1).type(), equalTo("comment"));
+ assertThat(innerHits.getAt(2).getId(), equalTo("6"));
+ assertThat(innerHits.getAt(2).type(), equalTo("comment"));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "fox")))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit().setType("comment")
+ .setQuery(matchQuery("message", "fox"))
+ .addHighlightedField("message")
+ .setExplain(true)
+ .addFieldDataField("message")
+ .addScriptField("script", new Script("doc['message'].value"))
+ .setSize(1)
+ ).request(),
+ client().prepareSearch("articles")
+ .setQuery(
+ hasChildQuery("comment", matchQuery("message", "fox")).innerHit(
+ new QueryInnerHitBuilder().addHighlightedField("message").setExplain(true)
+ .addFieldDataField("message").addScriptField("script", new Script("doc['message'].value"))
+ .setSize(1))).request() };
+
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
+ assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(message:fox"));
+ assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("eat"));
+ assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat"));
+ }
+ }
+
+ @Test
+ public void testRandomParentChild() throws Exception {
+ assertAcked(prepareCreate("idx")
+ .addMapping("parent")
+ .addMapping("child1", "_parent", "type=parent")
+ .addMapping("child2", "_parent", "type=parent")
+ );
+ int numDocs = scaledRandomIntBetween(5, 50);
+ List<IndexRequestBuilder> requestBuilders = new ArrayList<>();
+
+ int child1 = 0;
+ int child2 = 0;
+ int[] child1InnerObjects = new int[numDocs];
+ int[] child2InnerObjects = new int[numDocs];
+ for (int parent = 0; parent < numDocs; parent++) {
+ String parentId = String.format(Locale.ENGLISH, "%03d", parent);
+ requestBuilders.add(client().prepareIndex("idx", "parent", parentId).setSource("{}"));
+
+ int numChildDocs = child1InnerObjects[parent] = scaledRandomIntBetween(1, numDocs);
+ int limit = child1 + numChildDocs;
+ for (; child1 < limit; child1++) {
+ requestBuilders.add(client().prepareIndex("idx", "child1", String.format(Locale.ENGLISH, "%04d", child1)).setParent(parentId).setSource("{}"));
+ }
+ numChildDocs = child2InnerObjects[parent] = scaledRandomIntBetween(1, numDocs);
+ limit = child2 + numChildDocs;
+ for (; child2 < limit; child2++) {
+ requestBuilders.add(client().prepareIndex("idx", "child2", String.format(Locale.ENGLISH, "%04d", child2)).setParent(parentId).setSource("{}"));
+ }
+ }
+ indexRandom(true, requestBuilders);
+
+ int size = randomIntBetween(0, numDocs);
+ SearchResponse searchResponse;
+ if (randomBoolean()) {
+ searchResponse = client().prepareSearch("idx")
+ .setSize(numDocs)
+ .setTypes("parent")
+ .addSort("_uid", SortOrder.ASC)
+ .addInnerHit("a", new InnerHitsBuilder.InnerHit().setType("child1").addSort("_uid", SortOrder.ASC).setSize(size))
+ .addInnerHit("b", new InnerHitsBuilder.InnerHit().setType("child2").addSort("_uid", SortOrder.ASC).setSize(size))
+ .get();
+ } else {
+ BoolQueryBuilder boolQuery = new BoolQueryBuilder();
+ if (randomBoolean()) {
+ boolQuery.should(hasChildQuery("child1", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("a").addSort("_uid", SortOrder.ASC).setSize(size)));
+ boolQuery.should(hasChildQuery("child2", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("b").addSort("_uid", SortOrder.ASC).setSize(size)));
+ } else {
+ boolQuery.should(constantScoreQuery(hasChildQuery("child1", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("a").addSort("_uid", SortOrder.ASC).setSize(size))));
+ boolQuery.should(constantScoreQuery(hasChildQuery("child2", matchAllQuery()).innerHit(new QueryInnerHitBuilder().setName("b").addSort("_uid", SortOrder.ASC).setSize(size))));
+ }
+ searchResponse = client().prepareSearch("idx")
+ .setSize(numDocs)
+ .setTypes("parent")
+ .addSort("_uid", SortOrder.ASC)
+ .setQuery(boolQuery)
+ .get();
+ }
+
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, numDocs);
+ assertThat(searchResponse.getHits().getHits().length, equalTo(numDocs));
+
+ int offset1 = 0;
+ int offset2 = 0;
+ for (int parent = 0; parent < numDocs; parent++) {
+ SearchHit searchHit = searchResponse.getHits().getAt(parent);
+ assertThat(searchHit.getType(), equalTo("parent"));
+ assertThat(searchHit.getId(), equalTo(String.format(Locale.ENGLISH, "%03d", parent)));
+
+ SearchHits inner = searchHit.getInnerHits().get("a");
+ assertThat(inner.totalHits(), equalTo((long) child1InnerObjects[parent]));
+ for (int child = 0; child < child1InnerObjects[parent] && child < size; child++) {
+ SearchHit innerHit = inner.getAt(child);
+ assertThat(innerHit.getType(), equalTo("child1"));
+ String childId = String.format(Locale.ENGLISH, "%04d", offset1 + child);
+ assertThat(innerHit.getId(), equalTo(childId));
+ assertThat(innerHit.getNestedIdentity(), nullValue());
+ }
+ offset1 += child1InnerObjects[parent];
+
+ inner = searchHit.getInnerHits().get("b");
+ assertThat(inner.totalHits(), equalTo((long) child2InnerObjects[parent]));
+ for (int child = 0; child < child2InnerObjects[parent] && child < size; child++) {
+ SearchHit innerHit = inner.getAt(child);
+ assertThat(innerHit.getType(), equalTo("child2"));
+ String childId = String.format(Locale.ENGLISH, "%04d", offset2 + child);
+ assertThat(innerHit.getId(), equalTo(childId));
+ assertThat(innerHit.getNestedIdentity(), nullValue());
+ }
+ offset2 += child2InnerObjects[parent];
+ }
+ }
+
+ @Test
+ public void testPathOrTypeMustBeDefined() {
+ createIndex("articles");
+ ensureGreen("articles");
+ try {
+ client().prepareSearch("articles")
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit())
+ .get();
+ } catch (Exception e) {
+ assertThat(e.getMessage(), containsString("Failed to build"));
+ }
+
+ }
+
+ @Test
+ public void testInnerHitsOnHasParent() throws Exception {
+ assertAcked(prepareCreate("stack")
+ .addMapping("question", "body", "type=string")
+ .addMapping("answer", "_parent", "type=question", "body", "type=string")
+ );
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("stack", "question", "1").setSource("body", "I'm using HTTPS + Basic authentication to protect a resource. How can I throttle authentication attempts to protect against brute force attacks?"));
+ requests.add(client().prepareIndex("stack", "answer", "1").setParent("1").setSource("body", "install fail2ban and enable rules for apache"));
+ requests.add(client().prepareIndex("stack", "question", "2").setSource("body", "I have firewall rules set up and also denyhosts installed.\\ndo I also need to install fail2ban?"));
+ requests.add(client().prepareIndex("stack", "answer", "2").setParent("2").setSource("body", "Denyhosts protects only ssh; Fail2Ban protects all daemons."));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("stack")
+ .setTypes("answer")
+ .addSort("_uid", SortOrder.ASC)
+ .setQuery(
+ boolQuery()
+ .must(matchQuery("body", "fail2ban"))
+ .must(hasParentQuery("question", matchAllQuery()).innerHit(new QueryInnerHitBuilder()))
+ ).get();
+ assertNoFailures(response);
+ assertHitCount(response, 2);
+
+ SearchHit searchHit = response.getHits().getAt(0);
+ assertThat(searchHit.getId(), equalTo("1"));
+ assertThat(searchHit.getType(), equalTo("answer"));
+ assertThat(searchHit.getInnerHits().get("question").getTotalHits(), equalTo(1l));
+ assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo("question"));
+ assertThat(searchHit.getInnerHits().get("question").getAt(0).id(), equalTo("1"));
+
+ searchHit = response.getHits().getAt(1);
+ assertThat(searchHit.getId(), equalTo("2"));
+ assertThat(searchHit.getType(), equalTo("answer"));
+ assertThat(searchHit.getInnerHits().get("question").getTotalHits(), equalTo(1l));
+ assertThat(searchHit.getInnerHits().get("question").getAt(0).getType(), equalTo("question"));
+ assertThat(searchHit.getInnerHits().get("question").getAt(0).id(), equalTo("2"));
+ }
+
+ @Test
+ public void testParentChildMultipleLayers() throws Exception {
+ assertAcked(prepareCreate("articles")
+ .addMapping("article", "title", "type=string")
+ .addMapping("comment", "_parent", "type=article", "message", "type=string")
+ .addMapping("remark", "_parent", "type=comment", "message", "type=string")
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource("title", "quick brown fox"));
+ requests.add(client().prepareIndex("articles", "comment", "1").setParent("1").setSource("message", "fox eat quick"));
+ requests.add(client().prepareIndex("articles", "remark", "1").setParent("1").setRouting("1").setSource("message", "good"));
+ requests.add(client().prepareIndex("articles", "article", "2").setSource("title", "big gray elephant"));
+ requests.add(client().prepareIndex("articles", "comment", "2").setParent("2").setSource("message", "elephant captured"));
+ requests.add(client().prepareIndex("articles", "remark", "2").setParent("2").setRouting("2").setSource("message", "bad"));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", hasChildQuery("remark", matchQuery("message", "good"))))
+ .addInnerHit("comment",
+ new InnerHitsBuilder.InnerHit().setType("comment")
+ .setQuery(hasChildQuery("remark", matchQuery("message", "good")))
+ .addInnerHit("remark", new InnerHitsBuilder.InnerHit().setType("remark").setQuery(matchQuery("message", "good")))
+ )
+ .get();
+
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("1"));
+
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).type(), equalTo("comment"));
+
+ innerHits = innerHits.getAt(0).getInnerHits().get("remark");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).type(), equalTo("remark"));
+
+ response = client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", hasChildQuery("remark", matchQuery("message", "bad"))))
+ .addInnerHit("comment",
+ new InnerHitsBuilder.InnerHit().setType("comment")
+ .setQuery(hasChildQuery("remark", matchQuery("message", "bad")))
+ .addInnerHit("remark", new InnerHitsBuilder.InnerHit().setType("remark").setQuery(matchQuery("message", "bad")))
+ )
+ .get();
+
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).type(), equalTo("comment"));
+
+ innerHits = innerHits.getAt(0).getInnerHits().get("remark");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).type(), equalTo("remark"));
+ }
+
+ @Test
+ public void testNestedMultipleLayers() throws Exception {
+ assertAcked(prepareCreate("articles").addMapping("article", jsonBuilder().startObject().startObject("article").startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message")
+ .field("type", "string")
+ .endObject()
+ .startObject("remarks")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message").field("type", "string").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("title")
+ .field("type", "string")
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startArray("comments")
+ .startObject()
+ .field("message", "fox eat quick")
+ .startArray("remarks").startObject().field("message", "good").endObject().endArray()
+ .endObject()
+ .endArray()
+ .endObject()));
+ requests.add(client().prepareIndex("articles", "article", "2").setSource(jsonBuilder().startObject()
+ .field("title", "big gray elephant")
+ .startArray("comments")
+ .startObject()
+ .field("message", "elephant captured")
+ .startArray("remarks").startObject().field("message", "bad").endObject().endArray()
+ .endObject()
+ .endArray()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"))))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit()
+ .setPath("comments")
+ .setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good")))
+ .addInnerHit("remark", new InnerHitsBuilder.InnerHit().setPath("comments.remarks").setQuery(matchQuery("comments.remarks.message", "good")))
+ ).get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ innerHits = innerHits.getAt(0).getInnerHits().get("remark");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
+
+ // Directly refer to the second level:
+ response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad")).innerHit(new QueryInnerHitBuilder()))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ innerHits = response.getHits().getAt(0).getInnerHits().get("comments.remarks");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
+
+ response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"))))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit()
+ .setPath("comments")
+ .setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad")))
+ .addInnerHit("remark", new InnerHitsBuilder.InnerHit().setPath("comments.remarks").setQuery(matchQuery("comments.remarks.message", "bad"))))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ innerHits = innerHits.getAt(0).getInnerHits().get("remark");
+ assertThat(innerHits.totalHits(), equalTo(1l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/9723
+ public void testNestedDefinedAsObject() throws Exception {
+ assertAcked(prepareCreate("articles").addMapping("article", "comments", "type=nested", "title", "type=string"));
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments").field("message", "fox eat quick").endObject()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder()))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue());
+ }
+
+ @Test
+ public void testNestedInnerHitsWithStoredFieldsAndNoSourceBackcompat() throws Exception {
+ assertAcked(prepareCreate("articles")
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)
+ .addMapping("article", jsonBuilder().startObject()
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ )
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments").field("message", "fox eat quick").endObject()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder().field("comments.message")))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue());
+ assertThat(String.valueOf(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).fields().get("comments.message").getValue()), equalTo("fox eat quick"));
+ }
+
+ @Test
+ public void testNestedInnerHitsWithHighlightOnStoredFieldBackcompat() throws Exception {
+ assertAcked(prepareCreate("articles")
+ .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)
+ .addMapping("article", jsonBuilder().startObject()
+ .startObject("_source").field("enabled", false).endObject()
+ .startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ )
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments").field("message", "fox eat quick").endObject()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder().addHighlightedField("comments.message")))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue());
+ assertThat(String.valueOf(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).highlightFields().get("comments.message").getFragments()[0]), equalTo("<em>fox</em> eat quick"));
+ }
+
+ @Test
+ public void testNestedInnerHitsWithExcludeSourceBackcompat() throws Exception {
+ assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)
+ .addMapping("article", jsonBuilder().startObject()
+ .startObject("_source").field("excludes", new String[]{"comments"}).endObject()
+ .startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ )
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments").field("message", "fox eat quick").endObject()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder().field("comments.message").setFetchSource(true)))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue());
+ assertThat(String.valueOf(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).fields().get("comments.message").getValue()), equalTo("fox eat quick"));
+ }
+
+ @Test
+ public void testNestedInnerHitsHiglightWithExcludeSourceBackcompat() throws Exception {
+ assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)
+ .addMapping("article", jsonBuilder().startObject()
+ .startObject("_source").field("excludes", new String[]{"comments"}).endObject()
+ .startObject("properties")
+ .startObject("comments")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("message").field("type", "string").field("store", "yes").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ )
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments").field("message", "fox eat quick").endObject()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(new QueryInnerHitBuilder().addHighlightedField("comments.message")))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getNestedIdentity().getChild(), nullValue());
+ assertThat(String.valueOf(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).highlightFields().get("comments.message").getFragments()[0]), equalTo("<em>fox</em> eat quick"));
+ }
+
+ @Test
+ public void testInnerHitsWithObjectFieldThatHasANestedField() throws Exception {
+ assertAcked(prepareCreate("articles")
+ .addMapping("article", jsonBuilder().startObject()
+ .startObject("properties")
+ .startObject("comments")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("messages").field("type", "nested").endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ )
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments")
+ .startArray("messages")
+ .startObject().field("message", "fox eat quick").endObject()
+ .startObject().field("message", "bear eat quick").endObject()
+ .endArray()
+ .endObject()
+ .endObject()));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox")).innerHit(new QueryInnerHitBuilder()))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue());
+
+ response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear")).innerHit(new QueryInnerHitBuilder()))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue());
+
+ // index the message in an object form instead of an array
+ requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject()
+ .field("title", "quick brown fox")
+ .startObject("comments").startObject("messages").field("message", "fox eat quick").endObject().endObject()
+ .endObject()));
+ indexRandom(true, requests);
+ response = client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox")).innerHit(new QueryInnerHitBuilder()))
+ .get();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages"));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testSimpleNestedOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("articles").addMapping(
+ "article",
+ jsonBuilder().startObject().startObject("article").startObject("properties").startObject("comments")
+ .field("type", "nested").startObject("properties").startObject("message").field("type", "string").endObject()
+ .endObject().endObject().startObject("title").field("type", "string").endObject().endObject().endObject()
+ .endObject()));
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource(
+ jsonBuilder().startObject().field("title", "quick brown fox").startArray("comments").startObject()
+ .field("message", "fox eat quick").endObject().startObject().field("message", "fox ate rabbit x y z").endObject()
+ .startObject().field("message", "rabbit got away").endObject().endArray().endObject()));
+ requests.add(client().prepareIndex("articles", "article", "2").setSource(
+ jsonBuilder().startObject().field("title", "big gray elephant").startArray("comments").startObject()
+ .field("message", "elephant captured").endObject().startObject().field("message", "mice squashed by elephant x")
+ .endObject().startObject().field("message", "elephant scared by mice x y").endObject().endArray().endObject()));
+ indexRandom(true, requests);
+
+ // Inner hits can be defined in two ways: 1) with the query 2) as seperate inner_hit definition
+ SearchRequest[] searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(
+ nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(
+ new QueryInnerHitBuilder().setName("comment"))).request(),
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")))
+ .addInnerHit("comment",
+ new InnerHitsBuilder.InnerHit().setPath("comments").setQuery(matchQuery("comments.message", "fox")))
+ .request() };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("1"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(2l));
+ assertThat(innerHits.getHits().length, equalTo(2));
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(1).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant")))
+ .addInnerHit("comment",
+ new InnerHitsBuilder.InnerHit().setPath("comments").setQuery(matchQuery("comments.message", "elephant")))
+ .request(),
+ client().prepareSearch("articles")
+ .setQuery(
+ nestedQuery("comments", matchQuery("comments.message", "elephant")).innerHit(
+ new QueryInnerHitBuilder().setName("comment"))).request(),
+ client().prepareSearch("articles")
+ .setQuery(
+ nestedQuery("comments", matchQuery("comments.message", "elephant")).innerHit(
+ new QueryInnerHitBuilder().setName("comment").addSort("_doc", SortOrder.DESC))).request() };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(3l));
+ assertThat(innerHits.getHits().length, equalTo(3));
+ assertThat(innerHits.getAt(0).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
+ assertThat(innerHits.getAt(1).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
+ assertThat(innerHits.getAt(2).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
+ assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(nestedQuery("comments", matchQuery("comments.message", "fox")))
+ .addInnerHit(
+ "comments",
+ new InnerHitsBuilder.InnerHit().setPath("comments").setQuery(matchQuery("comments.message", "fox"))
+ .addHighlightedField("comments.message").setExplain(true).addFieldDataField("comments.message")
+ .addScriptField("script", "doc['comments.message'].value").setSize(1)).request(),
+ client().prepareSearch("articles")
+ .setQuery(
+ nestedQuery("comments", matchQuery("comments.message", "fox")).innerHit(
+ new QueryInnerHitBuilder().addHighlightedField("comments.message").setExplain(true)
+ .addFieldDataField("comments.message")
+ .addScriptField("script", "doc['comments.message'].value").setSize(1))).request() };
+
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
+ assertThat(innerHits.getTotalHits(), equalTo(2l));
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(),
+ equalTo("<em>fox</em> eat quick"));
+ assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(comments.message:fox in"));
+ assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat"));
+ assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testSimpleParentChildOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("articles").addMapping("article", "title", "type=string").addMapping("comment", "_parent",
+ "type=article", "message", "type=string"));
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("articles", "article", "1").setSource("title", "quick brown fox"));
+ requests.add(client().prepareIndex("articles", "comment", "1").setParent("1").setSource("message", "fox eat quick"));
+ requests.add(client().prepareIndex("articles", "comment", "2").setParent("1").setSource("message", "fox ate rabbit x y z"));
+ requests.add(client().prepareIndex("articles", "comment", "3").setParent("1").setSource("message", "rabbit got away"));
+ requests.add(client().prepareIndex("articles", "article", "2").setSource("title", "big gray elephant"));
+ requests.add(client().prepareIndex("articles", "comment", "4").setParent("2").setSource("message", "elephant captured"));
+ requests.add(client().prepareIndex("articles", "comment", "5").setParent("2").setSource("message", "mice squashed by elephant x"));
+ requests.add(client().prepareIndex("articles", "comment", "6").setParent("2").setSource("message", "elephant scared by mice x y"));
+ indexRandom(true, requests);
+
+ SearchRequest[] searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles").setQuery(hasChildQuery("comment", matchQuery("message", "fox")))
+ .addInnerHit("comment", new InnerHitsBuilder.InnerHit().setType("comment").setQuery(matchQuery("message", "fox")))
+ .request(),
+ client().prepareSearch("articles")
+ .setQuery(
+ hasChildQuery("comment", matchQuery("message", "fox")).innerHit(
+ new QueryInnerHitBuilder().setName("comment"))).request() };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("1"));
+
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(2l));
+
+ assertThat(innerHits.getAt(0).getId(), equalTo("1"));
+ assertThat(innerHits.getAt(0).type(), equalTo("comment"));
+ assertThat(innerHits.getAt(1).getId(), equalTo("2"));
+ assertThat(innerHits.getAt(1).type(), equalTo("comment"));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "elephant")))
+ .addInnerHit("comment",
+ new InnerHitsBuilder.InnerHit().setType("comment").setQuery(matchQuery("message", "elephant"))).request(),
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "elephant")).innerHit(new QueryInnerHitBuilder()))
+ .request() };
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ assertHitCount(response, 1);
+ assertSearchHit(response, 1, hasId("2"));
+
+ assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.totalHits(), equalTo(3l));
+
+ assertThat(innerHits.getAt(0).getId(), equalTo("4"));
+ assertThat(innerHits.getAt(0).type(), equalTo("comment"));
+ assertThat(innerHits.getAt(1).getId(), equalTo("5"));
+ assertThat(innerHits.getAt(1).type(), equalTo("comment"));
+ assertThat(innerHits.getAt(2).getId(), equalTo("6"));
+ assertThat(innerHits.getAt(2).type(), equalTo("comment"));
+ }
+
+ searchRequests = new SearchRequest[] {
+ client().prepareSearch("articles")
+ .setQuery(hasChildQuery("comment", matchQuery("message", "fox")))
+ .addInnerHit(
+ "comment",
+ new InnerHitsBuilder.InnerHit().setType("comment").setQuery(matchQuery("message", "fox"))
+ .addHighlightedField("message").setExplain(true).addFieldDataField("message")
+ .addScriptField("script", "doc['message'].value").setSize(1)).request(),
+ client().prepareSearch("articles")
+ .setQuery(
+ hasChildQuery("comment", matchQuery("message", "fox")).innerHit(
+ new QueryInnerHitBuilder().addHighlightedField("message").setExplain(true)
+ .addFieldDataField("message").addScriptField("script", "doc['message'].value").setSize(1)))
+ .request() };
+
+ for (SearchRequest searchRequest : searchRequests) {
+ SearchResponse response = client().search(searchRequest).actionGet();
+ assertNoFailures(response);
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
+ assertThat(innerHits.getHits().length, equalTo(1));
+ assertThat(innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
+ assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(message:fox"));
+ assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("eat"));
+ assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat"));
+ }
+ }
+
+ @Test
+ public void testRoyals() throws Exception {
+ assertAcked(
+ prepareCreate("royals")
+ .addMapping("king")
+ .addMapping("prince", "_parent", "type=king")
+ .addMapping("duke", "_parent", "type=prince")
+ .addMapping("earl", "_parent", "type=duke")
+ .addMapping("baron", "_parent", "type=earl")
+ );
+
+ List<IndexRequestBuilder> requests = new ArrayList<>();
+ requests.add(client().prepareIndex("royals", "king", "king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "prince", "prince").setParent("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "duke", "duke").setParent("prince").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "earl", "earl1").setParent("duke").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "earl", "earl2").setParent("duke").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "earl", "earl3").setParent("duke").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "earl", "earl4").setParent("duke").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "baron", "baron1").setParent("earl1").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "baron", "baron2").setParent("earl2").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "baron", "baron3").setParent("earl3").setRouting("king").setSource("{}"));
+ requests.add(client().prepareIndex("royals", "baron", "baron4").setParent("earl4").setRouting("king").setSource("{}"));
+ indexRandom(true, requests);
+
+ SearchResponse response = client().prepareSearch("royals")
+ .setTypes("duke")
+ .addInnerHit("earls", new InnerHitsBuilder.InnerHit()
+ .setType("earl")
+ .addSort(SortBuilders.fieldSort("_uid").order(SortOrder.ASC))
+ .setSize(4)
+ .addInnerHit("barons", new InnerHitsBuilder.InnerHit().setType("baron"))
+ )
+ .addInnerHit("princes",
+ new InnerHitsBuilder.InnerHit().setType("prince")
+ .addInnerHit("kings", new InnerHitsBuilder.InnerHit().setType("king"))
+ )
+ .get();
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getAt(0).getId(), equalTo("duke"));
+
+ SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("earls");
+ assertThat(innerHits.getTotalHits(), equalTo(4l));
+ assertThat(innerHits.getAt(0).getId(), equalTo("earl1"));
+ assertThat(innerHits.getAt(1).getId(), equalTo("earl2"));
+ assertThat(innerHits.getAt(2).getId(), equalTo("earl3"));
+ assertThat(innerHits.getAt(3).getId(), equalTo("earl4"));
+
+ SearchHits innerInnerHits = innerHits.getAt(0).getInnerHits().get("barons");
+ assertThat(innerInnerHits.totalHits(), equalTo(1l));
+ assertThat(innerInnerHits.getAt(0).getId(), equalTo("baron1"));
+
+ innerInnerHits = innerHits.getAt(1).getInnerHits().get("barons");
+ assertThat(innerInnerHits.totalHits(), equalTo(1l));
+ assertThat(innerInnerHits.getAt(0).getId(), equalTo("baron2"));
+
+ innerInnerHits = innerHits.getAt(2).getInnerHits().get("barons");
+ assertThat(innerInnerHits.totalHits(), equalTo(1l));
+ assertThat(innerInnerHits.getAt(0).getId(), equalTo("baron3"));
+
+ innerInnerHits = innerHits.getAt(3).getInnerHits().get("barons");
+ assertThat(innerInnerHits.totalHits(), equalTo(1l));
+ assertThat(innerInnerHits.getAt(0).getId(), equalTo("baron4"));
+
+ innerHits = response.getHits().getAt(0).getInnerHits().get("princes");
+ assertThat(innerHits.getTotalHits(), equalTo(1l));
+ assertThat(innerHits.getAt(0).getId(), equalTo("prince"));
+
+ innerInnerHits = innerHits.getAt(0).getInnerHits().get("kings");
+ assertThat(innerInnerHits.totalHits(), equalTo(1l));
+ assertThat(innerInnerHits.getAt(0).getId(), equalTo("king"));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java b/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java
new file mode 100644
index 0000000000..b079b2f6e6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/matchedqueries/MatchedQueriesTests.java
@@ -0,0 +1,267 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.matchedqueries;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasItemInArray;
+
+/**
+ *
+ */
+public class MatchedQueriesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMatchedQueryFromFilteredQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test1", "number", 1).get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test2", "number", 2).get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test3", "number", 3).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), orQuery(rangeQuery("number").lte(2).queryName("test1"), rangeQuery("number").gt(2).queryName("test2")))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test1"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test2"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(boolQuery().should(rangeQuery("number").lte(2).queryName("test1")).should(rangeQuery("number").gt(2).queryName("test2"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test1"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("test2"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void simpleMatchedQueryFromTopLevelFilter() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test", "title", "title1").get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test").get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(orQuery(
+ termQuery("name", "test").queryName("name"),
+ termQuery("title", "title1").queryName("title"))).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else if (hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(boolQuery()
+ .should(termQuery("name", "test").queryName("name"))
+ .should(termQuery("title", "title1").queryName("title"))).get();
+
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else if (hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void simpleMatchedQueryFromTopLevelFilterAndFilteredQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("name", "test", "title", "title1").get();
+ client().prepareIndex("test", "type1", "2").setSource("name", "test", "title", "title2").get();
+ client().prepareIndex("test", "type1", "3").setSource("name", "test", "title", "title3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("title", "title1", "title2", "title3").queryName("title")))
+ .setPostFilter(termQuery("name", "test").queryName("name")).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(termsQuery("title", "title1", "title2", "title3").queryName("title"))
+ .setPostFilter(matchQuery("name", "test").queryName("name")).get();
+ assertHitCount(searchResponse, 3l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1") || hit.id().equals("2") || hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("name"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesFilterSupportsName() {
+ createIndex("test1", "test2");
+ ensureGreen();
+
+ client().prepareIndex("test1", "type1", "1").setSource("title", "title1").get();
+ client().prepareIndex("test2", "type1", "2").setSource("title", "title2").get();
+ client().prepareIndex("test2", "type1", "3").setSource("title", "title3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(),
+ orQuery(
+ indicesQuery(termQuery("title", "title1").queryName("title1"), "test1")
+ .noMatchQuery(termQuery("title", "title2").queryName("title2")).queryName("indices_filter"),
+ termQuery("title", "title3").queryName("title3")).queryName("or"))).get();
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(3));
+ assertThat(hit.matchedQueries(), hasItemInArray("indices_filter"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title1"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else if (hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(3));
+ assertThat(hit.matchedQueries(), hasItemInArray("indices_filter"));
+ assertThat(hit.matchedQueries(), hasItemInArray("title2"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else if (hit.id().equals("3")) {
+ assertThat(hit.matchedQueries().length, equalTo(2));
+ assertThat(hit.matchedQueries(), hasItemInArray("title3"));
+ assertThat(hit.matchedQueries(), hasItemInArray("or"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+
+ /**
+ * Test case for issue #4361: https://github.com/elasticsearch/elasticsearch/issues/4361
+ */
+ @Test
+ public void testMatchedWithShould() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("content", "Lorem ipsum dolor sit amet").get();
+ client().prepareIndex("test", "type1", "2").setSource("content", "consectetur adipisicing elit").get();
+ refresh();
+
+ // Execute search at least two times to load it in cache
+ int iter = scaledRandomIntBetween(2, 10);
+ for (int i = 0; i < iter; i++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(
+ boolQuery()
+ .minimumNumberShouldMatch(1)
+ .should(queryStringQuery("dolor").queryName("dolor"))
+ .should(queryStringQuery("elit").queryName("elit"))
+ )
+ .setPreference("_primary")
+ .get();
+
+ assertHitCount(searchResponse, 2l);
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (hit.id().equals("1")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("dolor"));
+ } else if (hit.id().equals("2")) {
+ assertThat(hit.matchedQueries().length, equalTo(1));
+ assertThat(hit.matchedQueries(), hasItemInArray("elit"));
+ } else {
+ fail("Unexpected document returned with id " + hit.id());
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testMatchedWithWrapperQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("content", "Lorem ipsum dolor sit amet").get();
+ refresh();
+
+ QueryBuilder[] queries = new QueryBuilder[]{
+ wrapperQuery(matchQuery("content", "amet").queryName("abc").buildAsBytes().toUtf8()),
+ constantScoreQuery(wrapperQuery(termQuery("content", "amet").queryName("abc").buildAsBytes().toUtf8()))
+ };
+ for (QueryBuilder query : queries) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(query)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("abc"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java b/core/src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java
new file mode 100644
index 0000000000..d1fbb9cf54
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/morelikethis/ItemSerializationTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.morelikethis;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.elasticsearch.action.get.MultiGetRequest;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.query.MoreLikeThisQueryBuilder;
+import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath;
+import static org.hamcrest.Matchers.is;
+
+public class ItemSerializationTests extends ElasticsearchTestCase {
+
+ private Item generateRandomItem(int arraySize, int stringSize) {
+ String index = randomAsciiOfLength(stringSize);
+ String type = randomAsciiOfLength(stringSize);
+ String id = String.valueOf(Math.abs(randomInt()));
+ String routing = randomBoolean() ? randomAsciiOfLength(stringSize) : null;
+ String[] fields = generateRandomStringArray(arraySize, stringSize, true);
+
+ long version = Math.abs(randomLong());
+ VersionType versionType = RandomPicks.randomFrom(new Random(), VersionType.values());
+
+ FetchSourceContext fetchSourceContext;
+ switch (randomIntBetween(0, 3)) {
+ case 0 :
+ fetchSourceContext = new FetchSourceContext(randomBoolean());
+ break;
+ case 1 :
+ fetchSourceContext = new FetchSourceContext(generateRandomStringArray(arraySize, stringSize, true));
+ break;
+ case 2 :
+ fetchSourceContext = new FetchSourceContext(generateRandomStringArray(arraySize, stringSize, true),
+ generateRandomStringArray(arraySize, stringSize, true));
+ break;
+ default:
+ fetchSourceContext = null;
+ break;
+ }
+ return (Item) new Item(index, type, id).routing(routing).fields(fields).version(version).versionType(versionType)
+ .fetchSourceContext(fetchSourceContext);
+ }
+
+ private String ItemToJSON(Item item) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ builder.startArray("docs");
+ item.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endArray();
+ builder.endObject();
+ return XContentHelper.convertToJson(builder.bytes(), false);
+ }
+
+ private MultiGetRequest.Item JSONtoItem(String json) throws Exception {
+ MultiGetRequest request = new MultiGetRequest().add(null, null, null, null, new BytesArray(json), true);
+ return request.getItems().get(0);
+ }
+
+ @Test
+ public void testItemSerialization() throws Exception {
+ int numOfTrials = 100;
+ int maxArraySize = 7;
+ int maxStringSize = 8;
+ for (int i = 0; i < numOfTrials; i++) {
+ Item item1 = generateRandomItem(maxArraySize, maxStringSize);
+ String json = ItemToJSON(item1);
+ MultiGetRequest.Item item2 = JSONtoItem(json);
+ assertEquals(item1, item2);
+ }
+ }
+
+ private List<MultiGetRequest.Item> testItemsFromJSON(String json) throws Exception {
+ MultiGetRequest request = new MultiGetRequest();
+ request.add(null, null, null, null, new BytesArray(json), true);
+ List<MultiGetRequest.Item> items = request.getItems();
+
+ assertEquals(items.size(), 3);
+ for (MultiGetRequest.Item item : items) {
+ assertThat(item.index(), is("test"));
+ assertThat(item.type(), is("type"));
+ FetchSourceContext fetchSource = item.fetchSourceContext();
+ switch (item.id()) {
+ case "1" :
+ assertThat(fetchSource.fetchSource(), is(false));
+ break;
+ case "2" :
+ assertThat(fetchSource.fetchSource(), is(true));
+ assertThat(fetchSource.includes(), is(new String[]{"field3", "field4"}));
+ break;
+ case "3" :
+ assertThat(fetchSource.fetchSource(), is(true));
+ assertThat(fetchSource.includes(), is(new String[]{"user"}));
+ assertThat(fetchSource.excludes(), is(new String[]{"user.location"}));
+ break;
+ default:
+ fail("item with id: " + item.id() + " is not 1, 2 or 3");
+ break;
+ }
+ }
+ return items;
+ }
+
+ @Test
+ public void testSimpleItemSerializationFromFile() throws Exception {
+ // test items from JSON
+ List<MultiGetRequest.Item> itemsFromJSON = testItemsFromJSON(
+ copyToStringFromClasspath("/org/elasticsearch/search/morelikethis/items.json"));
+
+ // create builder from items
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ builder.startArray("docs");
+ for (MultiGetRequest.Item item : itemsFromJSON) {
+ MoreLikeThisQueryBuilder.Item itemForBuilder = (MoreLikeThisQueryBuilder.Item) new MoreLikeThisQueryBuilder.Item(
+ item.index(), item.type(), item.id())
+ .fetchSourceContext(item.fetchSourceContext())
+ .fields(item.fields());
+ itemForBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ }
+ builder.endArray();
+ builder.endObject();
+
+ // verify generated JSON lead to the same items
+ String json = XContentHelper.convertToJson(builder.bytes(), false);
+ testItemsFromJSON(json);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java
new file mode 100644
index 0000000000..63f5586d40
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/morelikethis/MoreLikeThisTests.java
@@ -0,0 +1,591 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.morelikethis;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MoreLikeThisQueryBuilder;
+import org.elasticsearch.index.query.MoreLikeThisQueryBuilder.Item;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.client.Requests.*;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class MoreLikeThisTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleMoreLikeThis() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis");
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(response, 1l);
+ }
+
+
+ @Test
+ public void testSimpleMoreLikeOnLongField() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1", "some_long", "type=long"));
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("some_long", 1367484649580l).endObject())).actionGet();
+ client().index(indexRequest("test").type("type2").id("2").source(jsonBuilder().startObject().field("some_long", 0).endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("some_long", -666).endObject())).actionGet();
+
+
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis");
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(response, 0l);
+ }
+
+
+ @Test
+ public void testMoreLikeThisWithAliases() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+ logger.info("Creating aliases alias release");
+ client().admin().indices().aliases(indexAliasesRequest().addAlias("release", termQuery("text", "release"), "test")).actionGet();
+ client().admin().indices().aliases(indexAliasesRequest().addAlias("beta", termQuery("text", "beta"), "test")).actionGet();
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(jsonBuilder().startObject().field("text", "lucene beta").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(jsonBuilder().startObject().field("text", "lucene release").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("3").source(jsonBuilder().startObject().field("text", "elasticsearch beta").endObject())).actionGet();
+ client().index(indexRequest("test").type("type1").id("4").source(jsonBuilder().startObject().field("text", "elasticsearch release").endObject())).actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running moreLikeThis on index");
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(response, 2l);
+
+ logger.info("Running moreLikeThis on beta shard");
+ response = client().prepareSearch("beta").setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(response, 1l);
+ assertThat(response.getHits().getAt(0).id(), equalTo("3"));
+
+ logger.info("Running moreLikeThis on release shard");
+ response = client().prepareSearch("release").setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(response, 1l);
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+
+ logger.info("Running moreLikeThis on alias with node client");
+ response = internalCluster().clientNodeClient().prepareSearch("beta").setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(response, 1l);
+ assertThat(response.getHits().getAt(0).id(), equalTo("3"));
+
+ }
+
+ @Test
+ public void testMoreLikeThisIssue2197() throws Exception {
+ Client client = client();
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("foo").addMapping("bar", mapping).execute().actionGet();
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("foo", "bar", "1"))).get();
+ assertNoFailures(response);
+ assertThat(response, notNullValue());
+ response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("foo", "bar", "1"))).get();
+ assertNoFailures(response);
+ assertThat(response, notNullValue());
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/2489
+ public void testMoreLikeWithCustomRouting() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ client().admin().indices().prepareCreate("foo").addMapping("bar", mapping).execute().actionGet();
+ ensureGreen();
+
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .setRouting("2")
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem((Item) new Item("foo", "bar", "1").routing("2"))).get();
+ assertNoFailures(response);
+ assertThat(response, notNullValue());
+ }
+
+ @Test
+ // See issue: https://github.com/elasticsearch/elasticsearch/issues/3039
+ public void testMoreLikeThisIssueRoutingNotSerialized() throws Exception {
+ String mapping = XContentFactory.jsonBuilder().startObject().startObject("bar")
+ .startObject("properties")
+ .endObject()
+ .endObject().endObject().string();
+ assertAcked(prepareCreate("foo", 2,
+ Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put(SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("bar", mapping));
+ ensureGreen();
+
+ client().prepareIndex("foo", "bar", "1")
+ .setSource(jsonBuilder().startObject().startObject("foo").field("bar", "boz").endObject())
+ .setRouting("4000")
+ .execute().actionGet();
+ client().admin().indices().prepareRefresh("foo").execute().actionGet();
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem((Item) new Item("foo", "bar", "1").routing("4000"))).get();
+ assertNoFailures(response);
+ assertThat(response, notNullValue());
+ }
+
+ @Test
+ // See issue https://github.com/elasticsearch/elasticsearch/issues/3252
+ public void testNumericField() throws Exception {
+ final String[] numericTypes = new String[]{"byte", "short", "integer", "long"};
+ prepareCreate("test").addMapping("type", jsonBuilder()
+ .startObject().startObject("type")
+ .startObject("properties")
+ .startObject("int_value").field("type", randomFrom(numericTypes)).endObject()
+ .startObject("string_value").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject()).execute().actionGet();
+ ensureGreen();
+ client().prepareIndex("test", "type", "1")
+ .setSource(jsonBuilder().startObject().field("string_value", "lucene index").field("int_value", 1).endObject())
+ .execute().actionGet();
+ client().prepareIndex("test", "type", "2")
+ .setSource(jsonBuilder().startObject().field("string_value", "elasticsearch index").field("int_value", 42).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ // Implicit list of fields -> ignore numeric fields
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type", "1")).minTermFreq(1).minDocFreq(1)).get();
+ assertHitCount(searchResponse, 1l);
+
+ // Explicit list of fields including numeric fields -> fail
+ assertThrows(client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder("string_value", "int_value").addItem(new Item("test", "type", "1")).minTermFreq(1).minDocFreq(1)), SearchPhaseExecutionException.class);
+
+ // mlt query with no field -> OK
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery().likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt query with string fields
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("string_value").likeText("index").minTermFreq(1).minDocFreq(1)).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt query with at least a numeric field -> fail by default
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index")), SearchPhaseExecutionException.class);
+
+ // mlt query with at least a numeric field -> fail by command
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index").failOnUnsupportedField(true)), SearchPhaseExecutionException.class);
+
+
+ // mlt query with at least a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("string_value", "int_value").likeText("index").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(false)).get();
+ assertHitCount(searchResponse, 2l);
+
+ // mlt field query on a numeric field -> failure by default
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1)), SearchPhaseExecutionException.class);
+
+ // mlt field query on a numeric field -> failure by command
+ assertThrows(client().prepareSearch().setQuery(moreLikeThisQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(true)),
+ SearchPhaseExecutionException.class);
+
+ // mlt field query on a numeric field but fail_on_unsupported_field set to false
+ searchResponse = client().prepareSearch().setQuery(moreLikeThisQuery("int_value").likeText("42").minTermFreq(1).minDocFreq(1).failOnUnsupportedField(false)).execute().actionGet();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testSimpleMoreLikeInclude() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ client().index(indexRequest("test").type("type1").id("1").source(
+ jsonBuilder().startObject()
+ .field("text", "Apache Lucene is a free/open source information retrieval software library").endObject()))
+ .actionGet();
+ client().index(indexRequest("test").type("type1").id("2").source(
+ jsonBuilder().startObject()
+ .field("text", "Lucene has been ported to other programming languages").endObject()))
+ .actionGet();
+ client().admin().indices().refresh(refreshRequest()).actionGet();
+
+ logger.info("Running More Like This with include true");
+ SearchResponse response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1).include(true).minimumShouldMatch("0%")).get();
+ assertOrderedSearchHits(response, "1", "2");
+
+ response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "2")).minTermFreq(1).minDocFreq(1).include(true).minimumShouldMatch("0%")).get();
+ assertOrderedSearchHits(response, "2", "1");
+
+ logger.info("Running More Like This with include false");
+ response = client().prepareSearch().setQuery(
+ new MoreLikeThisQueryBuilder().addItem(new Item("test", "type1", "1")).minTermFreq(1).minDocFreq(1).minimumShouldMatch("0%")).get();
+ assertSearchHits(response, "2");
+ }
+
+ public void testSimpleMoreLikeThisIds() throws Exception {
+ logger.info("Creating index test");
+ assertAcked(prepareCreate("test").addMapping("type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex("test", "type1").setSource("text", "lucene").setId("1"));
+ builders.add(client().prepareIndex("test", "type1").setSource("text", "lucene release").setId("2"));
+ builders.add(client().prepareIndex("test", "type1").setSource("text", "apache lucene").setId("3"));
+ indexRandom(true, builders);
+
+ logger.info("Running MoreLikeThis");
+ MoreLikeThisQueryBuilder queryBuilder = QueryBuilders.moreLikeThisQuery("text").ids("1").include(true).minTermFreq(1).minDocFreq(1);
+ SearchResponse mltResponse = client().prepareSearch().setTypes("type1").setQuery(queryBuilder).execute().actionGet();
+ assertHitCount(mltResponse, 3l);
+ }
+
+ @Test
+ public void testSimpleMoreLikeThisIdsMultipleTypes() throws Exception {
+ logger.info("Creating index test");
+ int numOfTypes = randomIntBetween(2, 10);
+ CreateIndexRequestBuilder createRequestBuilder = prepareCreate("test");
+ for (int i = 0; i < numOfTypes; i++) {
+ createRequestBuilder.addMapping("type" + i, jsonBuilder().startObject().startObject("type" + i).startObject("properties")
+ .startObject("text").field("type", "string").endObject()
+ .endObject().endObject().endObject());
+ }
+ assertAcked(createRequestBuilder);
+
+ logger.info("Running Cluster Health");
+ assertThat(ensureGreen(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("Indexing...");
+ List<IndexRequestBuilder> builders = new ArrayList<>(numOfTypes);
+ for (int i = 0; i < numOfTypes; i++) {
+ builders.add(client().prepareIndex("test", "type" + i).setSource("text", "lucene" + " " + i).setId(String.valueOf(i)));
+ }
+ indexRandom(true, builders);
+
+ logger.info("Running MoreLikeThis");
+ MoreLikeThisQueryBuilder queryBuilder = QueryBuilders.moreLikeThisQuery("text").include(true).minTermFreq(1).minDocFreq(1)
+ .addItem(new MoreLikeThisQueryBuilder.Item("test", "type0", "0"));
+
+ String[] types = new String[numOfTypes];
+ for (int i = 0; i < numOfTypes; i++) {
+ types[i] = "type"+i;
+ }
+ SearchResponse mltResponse = client().prepareSearch().setTypes(types).setQuery(queryBuilder).execute().actionGet();
+ assertHitCount(mltResponse, numOfTypes);
+ }
+
+ @Test
+ public void testMoreLikeThisMultiValueFields() throws Exception {
+ logger.info("Creating the index ...");
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string,analyzer=keyword")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ ensureGreen();
+
+ logger.info("Indexing ...");
+ String[] values = {"aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff", "gggg", "hhhh", "iiii", "jjjj"};
+ List<IndexRequestBuilder> builders = new ArrayList<>(values.length + 1);
+ // index one document with all the values
+ builders.add(client().prepareIndex("test", "type1", "0").setSource("text", values));
+ // index each document with only one of the values
+ for (int i = 0; i < values.length; i++) {
+ builders.add(client().prepareIndex("test", "type1", String.valueOf(i + 1)).setSource("text", values[i]));
+ }
+ indexRandom(true, builders);
+
+ int maxIters = randomIntBetween(10, 20);
+ for (int i = 0; i < maxIters; i++)
+ {
+ int max_query_terms = randomIntBetween(1, values.length);
+ logger.info("Running More Like This with max_query_terms = %s", max_query_terms);
+ MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery("text").ids("0").minTermFreq(1).minDocFreq(1)
+ .maxQueryTerms(max_query_terms).minimumShouldMatch("0%");
+ SearchResponse response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).execute().actionGet();
+ assertSearchResponse(response);
+ assertHitCount(response, max_query_terms);
+ }
+ }
+
+ @Test
+ public void testMinimumShouldMatch() throws ExecutionException, InterruptedException {
+ logger.info("Creating the index ...");
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ ensureGreen();
+
+ logger.info("Indexing with each doc having one less term ...");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 10; i++) {
+ String text = "";
+ for (int j = 1; j <= 10 - i; j++) {
+ text += j + " ";
+ }
+ builders.add(client().prepareIndex("test", "type1", i + "").setSource("text", text));
+ }
+ indexRandom(true, builders);
+
+ logger.info("Testing each minimum_should_match from 0% - 100% with 10% increment ...");
+ for (int i = 0; i <= 10; i++) {
+ String minimumShouldMatch = (10 * i) + "%";
+ MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery("text")
+ .likeText("1 2 3 4 5 6 7 8 9 10")
+ .minTermFreq(1)
+ .minDocFreq(1)
+ .minimumShouldMatch(minimumShouldMatch);
+ logger.info("Testing with minimum_should_match = " + minimumShouldMatch);
+ SearchResponse response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ if (minimumShouldMatch.equals("0%")) {
+ assertHitCount(response, 10);
+ } else {
+ assertHitCount(response, 11 - i);
+ }
+ }
+ }
+
+ @Test
+ public void testMoreLikeThisArtificialDocs() throws Exception {
+ int numFields = randomIntBetween(5, 10);
+
+ createIndex("test");
+ ensureGreen();
+
+ logger.info("Indexing a single document ...");
+ XContentBuilder doc = jsonBuilder().startObject();
+ for (int i = 0; i < numFields; i++) {
+ doc.field("field"+i, generateRandomStringArray(5, 10, false));
+ }
+ doc.endObject();
+ indexRandom(true, client().prepareIndex("test", "type1", "0").setSource(doc));
+
+ logger.info("Checking the document matches ...");
+ MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery()
+ .like((Item) new Item().doc(doc).index("test").type("type1"))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .maxQueryTerms(100)
+ .minimumShouldMatch("100%"); // strict all terms must match!
+ SearchResponse response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 1);
+ }
+
+ @Test
+ public void testMoreLikeThisMalformedArtificialDocs() throws Exception {
+ logger.info("Creating the index ...");
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "text", "type=string,analyzer=whitespace", "date", "type=date"));
+ ensureGreen("test");
+
+ logger.info("Creating an index with a single document ...");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("text", "Hello World!")
+ .field("date", "2009-01-01")
+ .endObject()));
+
+ logger.info("Checking with a malformed field value ...");
+ XContentBuilder malformedFieldDoc = jsonBuilder()
+ .startObject()
+ .field("text", "Hello World!")
+ .field("date", "this is not a date!")
+ .endObject();
+ MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery()
+ .docs((Item) new Item().doc(malformedFieldDoc).index("test").type("type1"))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .minimumShouldMatch("0%");
+ SearchResponse response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 0);
+
+ logger.info("Checking with an empty document ...");
+ XContentBuilder emptyDoc = jsonBuilder().startObject().endObject();
+ mltQuery = moreLikeThisQuery()
+ .docs((Item) new Item().doc(emptyDoc).index("test").type("type1"))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .minimumShouldMatch("0%");
+ response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 0);
+
+ logger.info("Checking when document is malformed ...");
+ XContentBuilder malformedDoc = jsonBuilder().startObject();
+ mltQuery = moreLikeThisQuery()
+ .docs((Item) new Item().doc(malformedDoc).index("test").type("type1"))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .minimumShouldMatch("0%");
+ response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 0);
+
+ logger.info("Checking the document matches otherwise ...");
+ XContentBuilder normalDoc = jsonBuilder()
+ .startObject()
+ .field("text", "Hello World!")
+ .field("date", "1000-01-01") // should be properly parsed but ignored ...
+ .endObject();
+ mltQuery = moreLikeThisQuery()
+ .docs((Item) new Item().doc(normalDoc).index("test").type("type1"))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .minimumShouldMatch("100%"); // strict all terms must match but date is ignored
+ response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 1);
+ }
+
+ @Test
+ public void testMoreLikeThisIgnoreLike() throws ExecutionException, InterruptedException, IOException {
+ createIndex("test");
+ ensureGreen();
+ int numFields = randomIntBetween(5, 10);
+
+ logger.info("Create a document that has all the fields.");
+ XContentBuilder doc = jsonBuilder().startObject();
+ for (int i = 0; i < numFields; i++) {
+ doc.field("field"+i, i+"");
+ }
+ doc.endObject();
+
+ logger.info("Indexing each field value of this document as a single document.");
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < numFields; i++) {
+ builders.add(client().prepareIndex("test", "type1", i+"").setSource("field"+i, i+""));
+ }
+ indexRandom(true, builders);
+
+ logger.info("First check the document matches all indexed docs.");
+ MoreLikeThisQueryBuilder mltQuery = moreLikeThisQuery("field0")
+ .like((Item) new Item().doc(doc).index("test").type("type1"))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .maxQueryTerms(100)
+ .minimumShouldMatch("0%");
+ SearchResponse response = client().prepareSearch("test").setTypes("type1")
+ .setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, numFields);
+
+ logger.info("Now check like this doc, but ignore one doc in the index, then two and so on...");
+ List<Item> docs = new ArrayList<>();
+ for (int i = 0; i < numFields; i++) {
+ docs.add(new Item("test", "type1", i+""));
+ mltQuery = moreLikeThisQuery()
+ .like((Item) new Item().doc(doc).index("test").type("type1"))
+ .ignoreLike(docs.toArray(Item.EMPTY_ARRAY))
+ .minTermFreq(0)
+ .minDocFreq(0)
+ .maxQueryTerms(100)
+ .minimumShouldMatch("0%");
+ response = client().prepareSearch("test").setTypes("type1").setQuery(mltQuery).get();
+ assertSearchResponse(response);
+ assertHitCount(response, numFields - (i + 1));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/morelikethis/items.json b/core/src/test/java/org/elasticsearch/search/morelikethis/items.json
new file mode 100644
index 0000000000..dc56fc3841
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/morelikethis/items.json
@@ -0,0 +1,25 @@
+{
+ "docs" : [
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "1",
+ "_source" : false
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "2",
+ "_source" : ["field3", "field4"]
+ },
+ {
+ "_index" : "test",
+ "_type" : "type",
+ "_id" : "3",
+ "_source" : {
+ "include": ["user"],
+ "exclude": ["user.location"]
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java b/core/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java
new file mode 100644
index 0000000000..dc3b1dfe66
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/msearch/SimpleMultiSearchTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.msearch;
+
+import org.elasticsearch.action.search.MultiSearchResponse;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class SimpleMultiSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleMultiSearch() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("field", "xxx").execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", "yyy").execute().actionGet();
+ refresh();
+ MultiSearchResponse response = client().prepareMultiSearch()
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "xxx")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field", "yyy")))
+ .add(client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()))
+ .execute().actionGet();
+
+ for (MultiSearchResponse.Item item : response) {
+ assertNoFailures(item.getResponse());
+ }
+ assertThat(response.getResponses().length, equalTo(3));
+ assertHitCount(response.getResponses()[0].getResponse(), 1l);
+ assertHitCount(response.getResponses()[1].getResponse(), 1l);
+ assertHitCount(response.getResponses()[2].getResponse(), 2l);
+ assertFirstHit(response.getResponses()[0].getResponse(), hasId("1"));
+ assertFirstHit(response.getResponses()[1].getResponse(), hasId("2"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java
new file mode 100644
index 0000000000..bdfac26722
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.preference;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class SearchPreferenceTests extends ElasticsearchIntegrationTest {
+
+ @Test // see #2896
+ public void testStopOneNodePreferenceWithRedState() throws InterruptedException, IOException {
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder().put("index.number_of_shards", cluster().numDataNodes()+2).put("index.number_of_replicas", 0)));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", ""+i).setSource("field1", "value1").execute().actionGet();
+ }
+ refresh();
+ internalCluster().stopRandomDataNode();
+ client().admin().cluster().prepareHealth().setWaitForStatus(ClusterHealthStatus.RED).execute().actionGet();
+ String[] preferences = new String[] {"_primary", "_local", "_primary_first", "_prefer_node:somenode", "_prefer_node:server2"};
+ for (String pref : preferences) {
+ SearchResponse searchResponse = client().prepareSearch().setSize(0).setPreference(pref).execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ searchResponse = client().prepareSearch().setPreference(pref).execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat(pref, searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ }
+
+ //_only_local is a stricter preference, we need to send the request to a data node
+ SearchResponse searchResponse = dataNodeClient().prepareSearch().setSize(0).setPreference("_only_local").execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ searchResponse = dataNodeClient().prepareSearch().setPreference("_only_local").execute().actionGet();
+ assertThat(RestStatus.OK, equalTo(searchResponse.status()));
+ assertThat("_only_local", searchResponse.getFailedShards(), greaterThanOrEqualTo(0));
+ }
+
+ @Test
+ public void noPreferenceRandom() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(
+ //this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
+ settingsBuilder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))
+ ));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
+ refresh();
+
+ final Client client = internalCluster().smartClient();
+ SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
+ String firstNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
+ searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
+ String secondNodeId = searchResponse.getHits().getAt(0).shard().nodeId();
+
+ assertThat(firstNodeId, not(equalTo(secondNodeId)));
+ }
+
+ @Test
+ public void simplePreferenceTests() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_local").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_local").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("_primary").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPreference("1234").execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
+ }
+
+ @Test (expected = IllegalArgumentException.class)
+ public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareSearch().setQuery(matchAllQuery()).setPreference("_only_node:DOES-NOT-EXIST").execute().actionGet();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingTests.java b/core/src/test/java/org/elasticsearch/search/query/ExistsMissingTests.java
new file mode 100644
index 0000000000..a33e7c44a9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/query/ExistsMissingTests.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.elasticsearch.action.explain.ExplainResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+public class ExistsMissingTests extends ElasticsearchIntegrationTest {
+
+ public void testExistsMissing() throws Exception {
+ XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent)
+ .startObject()
+ .startObject("type")
+ .startObject(FieldNamesFieldMapper.NAME)
+ .field("enabled", randomBoolean())
+ .endObject()
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .endObject()
+ .startObject("bar")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .endObject()
+ .startObject("bar")
+ .field("type", "object")
+ .startObject("properties")
+ .startObject("bar")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("baz")
+ .field("type", "long")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+
+ assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", mapping));
+ @SuppressWarnings("unchecked")
+ final Map<String, Object>[] sources = new Map[] {
+ // simple property
+ ImmutableMap.of("foo", "bar"),
+ // object fields
+ ImmutableMap.of("bar", ImmutableMap.of("foo", "bar", "bar", ImmutableMap.of("bar", "foo"))),
+ ImmutableMap.of("bar", ImmutableMap.of("baz", 42)),
+ // empty doc
+ ImmutableMap.of()
+ };
+ List<IndexRequestBuilder> reqs = new ArrayList<IndexRequestBuilder>();
+ for (Map<String, Object> source : sources) {
+ reqs.add(client().prepareIndex("idx", "type").setSource(source));
+ }
+ // We do NOT index dummy documents, otherwise the type for these dummy documents
+ // would have _field_names indexed while the current type might not which might
+ // confuse the exists/missing parser at query time
+ indexRandom(true, false, reqs);
+
+ final Map<String, Integer> expected = new LinkedHashMap<String, Integer>();
+ expected.put("foo", 1);
+ expected.put("f*", 1);
+ expected.put("bar", 2);
+ expected.put("bar.*", 2);
+ expected.put("bar.foo", 1);
+ expected.put("bar.bar", 1);
+ expected.put("bar.bar.bar", 1);
+ expected.put("foobar", 0);
+
+ ensureYellow("idx");
+ final long numDocs = sources.length;
+ SearchResponse allDocs = client().prepareSearch("idx").setSize(sources.length).get();
+ assertSearchResponse(allDocs);
+ assertHitCount(allDocs, numDocs);
+ for (Map.Entry<String, Integer> entry : expected.entrySet()) {
+ final String fieldName = entry.getKey();
+ final int count = entry.getValue();
+ // exists
+ SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.existsQuery(fieldName)).execute().actionGet();
+ assertSearchResponse(resp);
+ try {
+ assertEquals(String.format(Locale.ROOT, "exists(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), count, resp.getHits().totalHits());
+ } catch (AssertionError e) {
+ for (SearchHit searchHit : allDocs.getHits()) {
+ final String index = searchHit.getIndex();
+ final String type = searchHit.getType();
+ final String id = searchHit.getId();
+ final ExplainResponse explanation = client().prepareExplain(index, type, id).setQuery(QueryBuilders.existsQuery(fieldName)).get();
+ logger.info("Explanation for [{}] / [{}] / [{}]: [{}]", fieldName, id, searchHit.getSourceAsString(), explanation.getExplanation());
+ }
+ throw e;
+ }
+
+ // missing
+ resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery(fieldName)).execute().actionGet();
+ assertSearchResponse(resp);
+ assertEquals(String.format(Locale.ROOT, "missing(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), numDocs - count, resp.getHits().totalHits());
+ }
+ }
+
+ public void testNullValueUnset() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed"));
+ indexRandom(true,
+ client().prepareIndex("idx", "type", "1").setSource("f", "foo"),
+ client().prepareIndex("idx", "type", "2").setSource("f", null),
+ client().prepareIndex("idx", "type", "3").setSource("g", "bar"),
+ client().prepareIndex("idx", "type", "4").setSource("f", "bar"));
+
+ SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(true).nullValue(true)).get();
+ assertSearchHits(resp, "2", "3");
+
+ resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(true).nullValue(false)).get();
+ assertSearchHits(resp, "2", "3");
+
+ resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(false).nullValue(true)).get();
+ assertSearchHits(resp);
+
+ try {
+ client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(false).nullValue(false)).get();
+ fail("both existence and null_value can't be false");
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+ }
+
+ public void testNullValueSet() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed,null_value=bar"));
+ indexRandom(true,
+ client().prepareIndex("idx", "type", "1").setSource("f", "foo"),
+ client().prepareIndex("idx", "type", "2").setSource("f", null),
+ client().prepareIndex("idx", "type", "3").setSource("g", "bar"),
+ client().prepareIndex("idx", "type", "4").setSource("f", "bar"));
+
+ SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(true).nullValue(true)).get();
+ assertSearchHits(resp, "2", "3", "4");
+
+ resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(true).nullValue(false)).get();
+ assertSearchHits(resp, "3");
+
+ resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(false).nullValue(true)).get();
+ assertSearchHits(resp, "2", "4");
+
+ try {
+ client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f").existence(false).nullValue(false)).get();
+ fail("both existence and null_value can't be false");
+ } catch (SearchPhaseExecutionException e) {
+ // expected
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java
new file mode 100644
index 0000000000..d0131cffde
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/query/MultiMatchQueryTests.java
@@ -0,0 +1,618 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.query;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.MultiMatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+public class MultiMatchQueryTests extends ElasticsearchIntegrationTest {
+
+ @Before
+ public void init() throws Exception {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.perfect_match.type", "custom")
+ .put("index.analysis.analyzer.perfect_match.tokenizer", "keyword")
+ .put("index.analysis.analyzer.perfect_match.filter", "lowercase")
+ .put("index.analysis.analyzer.category.type", "custom")
+ .put("index.analysis.analyzer.category.tokenizer", "whitespace")
+ .put("index.analysis.analyzer.category.filter", "lowercase")
+ );
+ assertAcked(builder.addMapping("test", createMapping()));
+ ensureGreen();
+ int numDocs = scaledRandomIntBetween(50, 100);
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ builders.add(client().prepareIndex("test", "test", "theone").setSource(
+ "full_name", "Captain America",
+ "first_name", "Captain",
+ "last_name", "America",
+ "category", "marvel hero",
+ "skill", 15,
+ "int-field", 25));
+ builders.add(client().prepareIndex("test", "test", "theother").setSource(
+ "full_name", "marvel hero",
+ "first_name", "marvel",
+ "last_name", "hero",
+ "category", "bogus",
+ "skill", 5));
+
+ builders.add(client().prepareIndex("test", "test", "ultimate1").setSource(
+ "full_name", "Alpha the Ultimate Mutant",
+ "first_name", "Alpha the",
+ "last_name", "Ultimate Mutant",
+ "category", "marvel hero",
+ "skill", 1));
+ builders.add(client().prepareIndex("test", "test", "ultimate2").setSource(
+ "full_name", "Man the Ultimate Ninja",
+ "first_name", "Man the Ultimate",
+ "last_name", "Ninja",
+ "category", "marvel hero",
+ "skill", 3));
+
+ builders.add(client().prepareIndex("test", "test", "anotherhero").setSource(
+ "full_name", "ultimate",
+ "first_name", "wolferine",
+ "last_name", "",
+ "category", "marvel hero",
+ "skill", 1));
+ List<String> firstNames = new ArrayList<>();
+ fill(firstNames, "Captain", between(15, 25));
+ fill(firstNames, "Ultimate", between(5, 10));
+ fillRandom(firstNames, between(3, 7));
+ List<String> lastNames = new ArrayList<>();
+ fill(lastNames, "Captain", between(3, 7));
+ fillRandom(lastNames, between(30, 40));
+ for (int i = 0; i < numDocs; i++) {
+ String first = RandomPicks.randomFrom(getRandom(), firstNames);
+ String last = randomPickExcept(lastNames, first);
+ builders.add(client().prepareIndex("test", "test", "" + i).setSource(
+ "full_name", first + " " + last,
+ "first_name", first,
+ "last_name", last,
+ "category", randomBoolean() ? "marvel hero" : "bogus",
+ "skill", between(1, 3)));
+ }
+ indexRandom(true, false, builders);
+ }
+
+ private XContentBuilder createMapping() throws IOException {
+ return XContentFactory.jsonBuilder().startObject().startObject("test")
+ .startObject("properties")
+ .startObject("full_name")
+ .field("type", "string")
+ .field("copy_to", "full_name_phrase")
+ .field("analyzer", "perfect_match")
+ .endObject()
+ .startObject("category")
+ .field("type", "string")
+ .field("analyzer", "category")
+ .endObject()
+ .startObject("first_name")
+ .field("type", "string")
+ .field("omit_norms", "true")
+ .field("copy_to", "first_name_phrase")
+ .endObject()
+ .startObject("last_name")
+ .field("type", "string")
+ .field("omit_norms", "true")
+ .field("copy_to", "last_name_phrase")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ }
+
+ @Test
+ public void testDefaults() throws ExecutionException, InterruptedException {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR))).get();
+ Set<String> topNIds = Sets.newHashSet("theone", "theother");
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ topNIds.remove(searchResponse.getHits().getAt(i).getId());
+ // very likely that we hit a random doc that has the same score so orders are random since
+ // the doc id is the tie-breaker
+ }
+ assertThat(topNIds, empty());
+ assertThat(searchResponse.getHits().hits()[0].getScore(), equalTo(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).type(type))).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).type(type))).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).type(type))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).type(type))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+ }
+
+ @Test
+ public void testPhraseType() {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("Man the Ultimate", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE))).get();
+ assertFirstHit(searchResponse, hasId("ultimate2"));
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("Captain", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE))).get();
+ assertThat(searchResponse.getHits().getTotalHits(), greaterThan(1l));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("the Ul", "full_name_phrase", "first_name_phrase", "last_name_phrase", "category_phrase")
+ .operator(MatchQueryBuilder.Operator.OR).type(MatchQueryBuilder.Type.PHRASE_PREFIX))).get();
+ assertSearchHits(searchResponse, "ultimate2", "ultimate1");
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testSingleField() throws NoSuchFieldException, IllegalAccessException {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("15", "skill"))).get();
+ assertNoFailures(searchResponse);
+ assertFirstHit(searchResponse, hasId("theone"));
+ String[] fields = {"full_name", "first_name", "last_name", "last_name_phrase", "first_name_phrase", "category_phrase", "category"};
+
+ String[] query = {"marvel","hero", "captain", "america", "15", "17", "1", "5", "ultimate", "Man",
+ "marvel", "wolferine", "ninja"};
+
+ // check if it's equivalent to a match query.
+ int numIters = scaledRandomIntBetween(10, 100);
+ for (int i = 0; i < numIters; i++) {
+ String field = RandomPicks.randomFrom(getRandom(), fields);
+ int numTerms = randomIntBetween(1, query.length);
+ StringBuilder builder = new StringBuilder();
+ for (int j = 0; j < numTerms; j++) {
+ builder.append(RandomPicks.randomFrom(getRandom(), query)).append(" ");
+ }
+ MultiMatchQueryBuilder multiMatchQueryBuilder = randomizeType(multiMatchQuery(builder.toString(), field));
+ SearchResponse multiMatchResp = client().prepareSearch("test")
+ // _uid sort field is a tie, in case hits have the same score,
+ // the hits will be sorted the same consistently
+ .addSort("_score", SortOrder.DESC)
+ .addSort("_uid", SortOrder.ASC)
+ .setQuery(multiMatchQueryBuilder).get();
+ MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery(field, builder.toString());
+ if (getType(multiMatchQueryBuilder) != null) {
+ matchQueryBuilder.type(MatchQueryBuilder.Type.valueOf(getType(multiMatchQueryBuilder).matchQueryType().toString()));
+ }
+ SearchResponse matchResp = client().prepareSearch("test")
+ // _uid tie sort
+ .addSort("_score", SortOrder.DESC)
+ .addSort("_uid", SortOrder.ASC)
+ .setQuery(matchQueryBuilder).get();
+ assertThat("field: " + field + " query: " + builder.toString(), multiMatchResp.getHits().getTotalHits(), equalTo(matchResp.getHits().getTotalHits()));
+ SearchHits hits = multiMatchResp.getHits();
+ for (int j = 0; j < hits.hits().length; j++) {
+ assertThat(hits.getHits()[j].score(), equalTo(matchResp.getHits().getHits()[j].score()));
+ assertThat(hits.getHits()[j].getId(), equalTo(matchResp.getHits().getHits()[j].getId()));
+ }
+ }
+
+ }
+
+ @Test
+ public void testCutoffFreq() throws ExecutionException, InterruptedException {
+ final long numDocs = client().prepareCount("test")
+ .setQuery(matchAllQuery()).get().getCount();
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ Float cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).cutoffFrequency(cutoffFrequency))).get();
+ Set<String> topNIds = Sets.newHashSet("theone", "theother");
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ topNIds.remove(searchResponse.getHits().getAt(i).getId());
+ // very likely that we hit a random doc that has the same score so orders are random since
+ // the doc id is the tie-breaker
+ }
+ assertThat(topNIds, empty());
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThanOrEqualTo(searchResponse.getHits().hits()[1].getScore()));
+
+ cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).cutoffFrequency(cutoffFrequency).type(type))).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+ long size = searchResponse.getHits().getTotalHits();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).useDisMax(false).type(type))).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theone"), hasId("theother")));
+ assertThat("common terms expected to be a way smaller result set", size, lessThan(searchResponse.getHits().getTotalHits()));
+
+ cutoffFrequency = randomBoolean() ? Math.min(1, numDocs * 1.f / between(10, 20)) : 1.f / between(10, 20);
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.OR).cutoffFrequency(cutoffFrequency).type(type))).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency).type(type))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency).type(type))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero", "first_name", "last_name", "category")
+ .operator(MatchQueryBuilder.Operator.AND).cutoffFrequency(cutoffFrequency)
+ .analyzer("category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theother"));
+ }
+
+
+ @Test
+ public void testEquivalence() {
+
+ final int numDocs = (int) client().prepareCount("test")
+ .setQuery(matchAllQuery()).get().getCount();
+ int numIters = scaledRandomIntBetween(5, 10);
+ for (int i = 0; i < numIters; i++) {
+ {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category") :
+ multiMatchQuery("marvel hero captain america", "*_name", randomBoolean() ? "category" : "categ*");
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(randomizeType(multiMatchQueryBuilder
+ .operator(MatchQueryBuilder.Operator.OR).type(type))).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(disMaxQuery().
+ add(matchQuery("full_name", "marvel hero captain america"))
+ .add(matchQuery("first_name", "marvel hero captain america"))
+ .add(matchQuery("last_name", "marvel hero captain america"))
+ .add(matchQuery("category", "marvel hero captain america"))
+ ).get();
+ assertEquivalent("marvel hero captain america", left, right);
+ }
+
+ {
+ MatchQueryBuilder.Type type = randomBoolean() ? null : MatchQueryBuilder.Type.BOOLEAN;
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ MultiMatchQueryBuilder multiMatchQueryBuilder = randomBoolean() ? multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category") :
+ multiMatchQuery("captain america", "*_name", randomBoolean() ? "category" : "categ*");
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(randomizeType(multiMatchQueryBuilder
+ .operator(op).useDisMax(false).minimumShouldMatch(minShouldMatch).type(type))).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(randomBoolean() ? termQuery("full_name", "captain america") : matchQuery("full_name", "captain america").operator(op))
+ .should(matchQuery("first_name", "captain america").operator(op))
+ .should(matchQuery("last_name", "captain america").operator(op))
+ .should(matchQuery("category", "captain america").operator(op))
+ ).get();
+ assertEquivalent("captain america", left, right);
+ }
+
+ {
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(randomizeType(multiMatchQuery("capta", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE_PREFIX).useDisMax(false).minimumShouldMatch(minShouldMatch))).get();
+
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(matchPhrasePrefixQuery("full_name", "capta"))
+ .should(matchPhrasePrefixQuery("first_name", "capta").operator(op))
+ .should(matchPhrasePrefixQuery("last_name", "capta").operator(op))
+ .should(matchPhrasePrefixQuery("category", "capta").operator(op))
+ ).get();
+ assertEquivalent("capta", left, right);
+ }
+ {
+ String minShouldMatch = randomBoolean() ? null : "" + between(0, 1);
+ MatchQueryBuilder.Operator op = randomBoolean() ? MatchQueryBuilder.Operator.AND : MatchQueryBuilder.Operator.OR;
+ SearchResponse left;
+ if (randomBoolean()) {
+ left = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE).useDisMax(false).minimumShouldMatch(minShouldMatch))).get();
+ } else {
+ left = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .type(MatchQueryBuilder.Type.PHRASE).tieBreaker(1.0f).minimumShouldMatch(minShouldMatch))).get();
+ }
+ SearchResponse right = client().prepareSearch("test").setSize(numDocs)
+ .addSort(SortBuilders.scoreSort()).addSort(SortBuilders.fieldSort("_uid"))
+ .setQuery(boolQuery().minimumShouldMatch(minShouldMatch)
+ .should(matchPhraseQuery("full_name", "captain america"))
+ .should(matchPhraseQuery("first_name", "captain america").operator(op))
+ .should(matchPhraseQuery("last_name", "captain america").operator(op))
+ .should(matchPhraseQuery("category", "captain america").operator(op))
+ ).get();
+ assertEquivalent("captain america", left, right);
+ }
+ }
+ }
+
+ @Test
+ public void testCrossFieldMode() throws ExecutionException, InterruptedException {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .operator(MatchQueryBuilder.Operator.OR))).get();
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero captain america", "full_name", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .operator(MatchQueryBuilder.Operator.OR))).get();
+ assertFirstHit(searchResponse, hasId("theone"));
+ assertSecondHit(searchResponse, hasId("theother"));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("marvel hero", "full_name", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .operator(MatchQueryBuilder.Operator.OR))).get();
+ assertFirstHit(searchResponse, hasId("theother"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america", "full_name", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america 15", "full_name", "first_name", "last_name", "category", "skill")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .analyzer("category")
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america 15", "first_name", "last_name", "skill")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .analyzer("category"))).get();
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("25 15", "int-field", "skill")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .analyzer("category"))).get();
+ assertFirstHit(searchResponse, hasId("theone"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .cutoffFrequency(0.1f)
+ .analyzer("category")
+ .operator(MatchQueryBuilder.Operator.OR))).get();
+ assertFirstHit(searchResponse, anyOf(hasId("theother"), hasId("theone")));
+ long numResults = searchResponse.getHits().totalHits();
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .analyzer("category")
+ .operator(MatchQueryBuilder.Operator.OR))).get();
+ assertThat(numResults, lessThan(searchResponse.getHits().getTotalHits()));
+ assertFirstHit(searchResponse, hasId("theone"));
+
+
+ // test group based on analyzer -- all fields are grouped into a cross field search
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .analyzer("category")
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("theone"));
+ // counter example
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category")
+ .type(randomBoolean() ? MultiMatchQueryBuilder.Type.CROSS_FIELDS : null)
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertHitCount(searchResponse, 0l);
+
+ // counter example
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("captain america marvel hero", "first_name", "last_name", "category")
+ .type(randomBoolean() ? MultiMatchQueryBuilder.Type.CROSS_FIELDS : null)
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertHitCount(searchResponse, 0l);
+
+ // test if boosts work
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name^2", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertFirstHit(searchResponse, hasId("ultimate1")); // has ultimate in the last_name and that is boosted
+ assertSecondHit(searchResponse, hasId("ultimate2"));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+
+ // since we try to treat the matching fields as one field scores are very similar but we have a small bias towards the
+ // more frequent field that acts as a tie-breaker internally
+ searchResponse = client().prepareSearch("test")
+ .setQuery(randomizeType(multiMatchQuery("the ultimate", "full_name", "first_name", "last_name", "category")
+ .type(MultiMatchQueryBuilder.Type.CROSS_FIELDS)
+ .operator(MatchQueryBuilder.Operator.AND))).get();
+ assertFirstHit(searchResponse, hasId("ultimate2"));
+ assertSecondHit(searchResponse, hasId("ultimate1"));
+ assertThat(searchResponse.getHits().hits()[0].getScore(), greaterThan(searchResponse.getHits().hits()[1].getScore()));
+ }
+
+ private static final void assertEquivalent(String query, SearchResponse left, SearchResponse right) {
+ assertNoFailures(left);
+ assertNoFailures(right);
+ SearchHits leftHits = left.getHits();
+ SearchHits rightHits = right.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] rHits = rightHits.getHits();
+ for (int i = 0; i < hits.length; i++) {
+ assertThat("query: " + query + " hit: " + i, (double) hits[i].getScore(), closeTo(rHits[i].getScore(), 0.00001d));
+ }
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length - 1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat("query: " + query, hits[i].getId(), equalTo(rHits[i].getId()));
+ }
+ }
+
+
+ public static List<String> fill(List<String> list, String value, int times) {
+ for (int i = 0; i < times; i++) {
+ list.add(value);
+ }
+ return list;
+ }
+
+ public List<String> fillRandom(List<String> list, int times) {
+ for (int i = 0; i < times; i++) {
+ list.add(randomRealisticUnicodeOfCodepointLengthBetween(1, 5));
+ }
+ return list;
+ }
+
+ public <T> T randomPickExcept(List<T> fromList, T butNot) {
+ while (true) {
+ T t = RandomPicks.randomFrom(getRandom(), fromList);
+ if (t.equals(butNot)) {
+ continue;
+ }
+ return t;
+ }
+ }
+
+ public MultiMatchQueryBuilder randomizeType(MultiMatchQueryBuilder builder) {
+ try {
+ MultiMatchQueryBuilder.Type type = getType(builder);
+ if (type == null && randomBoolean()) {
+ return builder;
+ }
+ if (type == null) {
+ type = MultiMatchQueryBuilder.Type.BEST_FIELDS;
+ }
+ if (randomBoolean()) {
+ builder.type(type);
+ } else {
+ Object oType = type;
+ switch (type) {
+ case BEST_FIELDS:
+ if (randomBoolean()) {
+ oType = MatchQueryBuilder.Type.BOOLEAN;
+ }
+ break;
+ case MOST_FIELDS:
+ if (randomBoolean()) {
+ oType = MatchQueryBuilder.Type.BOOLEAN;
+ }
+ break;
+ case CROSS_FIELDS:
+ break;
+ case PHRASE:
+ if (randomBoolean()) {
+ oType = MatchQueryBuilder.Type.PHRASE;
+ }
+ break;
+ case PHRASE_PREFIX:
+ if (randomBoolean()) {
+ oType = MatchQueryBuilder.Type.PHRASE_PREFIX;
+ }
+ break;
+ }
+ builder.type(oType);
+ }
+ return builder;
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ private MultiMatchQueryBuilder.Type getType(MultiMatchQueryBuilder builder) throws NoSuchFieldException, IllegalAccessException {
+ Field field = MultiMatchQueryBuilder.class.getDeclaredField("type");
+ field.setAccessible(true);
+ return (MultiMatchQueryBuilder.Type) field.get(builder);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java
new file mode 100644
index 0000000000..45e866717a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java
@@ -0,0 +1,2576 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.apache.lucene.util.English;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.query.BoolQueryBuilder;
+import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.MatchQueryBuilder.Type;
+import org.elasticsearch.index.query.MultiMatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.QueryStringQueryBuilder;
+import org.elasticsearch.index.query.TermQueryBuilder;
+import org.elasticsearch.index.query.WrapperQueryBuilder;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.andQuery;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.commonTermsQuery;
+import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery;
+import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery;
+import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
+import static org.elasticsearch.index.query.QueryBuilders.indicesQuery;
+import static org.elasticsearch.index.query.QueryBuilders.limitQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.missingQuery;
+import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
+import static org.elasticsearch.index.query.QueryBuilders.notQuery;
+import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.regexpQuery;
+import static org.elasticsearch.index.query.QueryBuilders.spanMultiTermQueryBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery;
+import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery;
+import static org.elasticsearch.index.query.QueryBuilders.spanOrQuery;
+import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termsLookupQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termsQuery;
+import static org.elasticsearch.index.query.QueryBuilders.typeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
+import static org.elasticsearch.index.query.QueryBuilders.wrapperQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.is;
+
+@Slow
+public class SearchQueryTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int maximumNumberOfShards() {
+ return 7;
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return Math.min(2, cluster().numDataNodes() - 1);
+ }
+
+ @Test
+ public void testOmitNormsOnAll() throws ExecutionException, InterruptedException, IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("omit_norms", true).endObject()
+ .endObject().endObject())
+ .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)); // only one shard otherwise IDF might be different for comparing scores
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+
+ assertHitCount(client().prepareSearch().setQuery(matchQuery("_all", "quick")).get(), 3l);
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("_all", "quick")).setExplain(true).get();
+ SearchHit[] hits = searchResponse.getHits().hits();
+ assertThat(hits.length, equalTo(3));
+ assertThat(hits[0].score(), allOf(equalTo(hits[1].getScore()), equalTo(hits[2].getScore())));
+ cluster().wipeIndices("test");
+
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+
+ assertHitCount(client().prepareSearch().setQuery(matchQuery("_all", "quick")).get(), 3l);
+ searchResponse = client().prepareSearch().setQuery(matchQuery("_all", "quick")).get();
+ hits = searchResponse.getHits().hits();
+ assertThat(hits.length, equalTo(3));
+ assertThat(hits[0].score(), allOf(greaterThan(hits[1].getScore()), greaterThan(hits[2].getScore())));
+
+ }
+ @Test // see #3952
+ public void testEmptyQueryString() throws ExecutionException, InterruptedException, IOException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumps"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick brown"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "quick"));
+ assertHitCount(client().prepareSearch().setQuery(queryStringQuery("quick")).get(), 3l);
+ assertHitCount(client().prepareSearch().setQuery(queryStringQuery("")).get(), 0l); // return no docs
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3177
+ public void testIssue3177() {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3").get();
+ ensureGreen();
+ waitForRelocation();
+ optimize();
+ refresh();
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setPostFilter(
+ andQuery(
+ matchAllQuery(),
+ notQuery(andQuery(termQuery("field1", "value1"),
+ termQuery("field1", "value2"))))).get(),
+ 3l);
+ assertHitCount(
+ client().prepareSearch()
+ .setQuery(
+ filteredQuery(
+ boolQuery().should(termQuery("field1", "value1")).should(termQuery("field1", "value2"))
+ .should(termQuery("field1", "value3")),
+ notQuery(andQuery(termQuery("field1", "value1"),
+ termQuery("field1", "value2"))))).get(),
+ 3l);
+ assertHitCount(
+ client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(notQuery(termQuery("field1", "value3"))).get(),
+ 2l);
+ }
+
+ @Test
+ public void passQueryAsStringTest() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery("{ \"term\" : { \"field1\" : \"value1_1\" }}").get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testIndexOptions() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,index_options=docs"));
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ assertHitCount(searchResponse, 1l);
+
+ assertFailures(client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(Type.PHRASE).slop(0)),
+ RestStatus.INTERNAL_SERVER_ERROR,
+ containsString("field \"field1\" was indexed without position data; cannot run PhraseQuery"));
+ }
+
+ @Test // see #3521
+ public void testConstantScoreQuery() throws Exception {
+ Random random = getRandom();
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"), client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+ ensureYellow();
+ SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get();
+ assertHitCount(searchResponse, 2l);
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + getRandom().nextFloat()))).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+
+ client().prepareSearch("test").setQuery(constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + getRandom().nextFloat())).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ constantScoreQuery(boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery("field1", "quick")).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat()))))).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).score()));
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+
+ int num = scaledRandomIntBetween(100, 200);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[num];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "type", "" + i).setSource("f", English.intToEnglish(i));
+ }
+ createIndex("test_1");
+ indexRandom(true, builders);
+ ensureYellow();
+ int queryRounds = scaledRandomIntBetween(10, 20);
+ for (int i = 0; i < queryRounds; i++) {
+ MatchQueryBuilder matchQuery = matchQuery("f", English.intToEnglish(between(0, num)));
+ searchResponse = client().prepareSearch("test_1").setQuery(matchQuery).setSize(num).get();
+ long totalHits = searchResponse.getHits().totalHits();
+ SearchHits hits = searchResponse.getHits();
+ for (SearchHit searchHit : hits) {
+ assertSearchHit(searchHit, hasScore(1.0f));
+ }
+ searchResponse = client().prepareSearch("test_1").setQuery(
+ boolQuery().must(matchAllQuery()).must(
+ constantScoreQuery(matchQuery).boost(1.0f + (random.nextBoolean()? 0.0f : random.nextFloat())))).setSize(num).get();
+ hits = searchResponse.getHits();
+ assertThat(hits.totalHits(), equalTo(totalHits));
+ if (totalHits > 1) {
+ float expected = hits.getAt(0).score();
+ for (SearchHit searchHit : hits) {
+ assertSearchHit(searchHit, hasScore(expected));
+ }
+ }
+ }
+ }
+
+ @Test // see #3521
+ public void testAllDocsQueryString() throws InterruptedException, ExecutionException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("foo", "bar"),
+ client().prepareIndex("test", "type1", "2").setSource("foo", "bar")
+ );
+ int iters = scaledRandomIntBetween(100, 200);
+ for (int i = 0; i < iters; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("*:*^10.0").boost(10.0f)).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery().must(matchAllQuery()).must(constantScoreQuery(matchAllQuery()))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(Math.sqrt(2), 0.1));
+ assertThat((double)searchResponse.getHits().getAt(1).score(),closeTo(Math.sqrt(2), 0.1));
+ }
+ }
+
+ @Test
+ public void testCommonTermsQueryOnAllField() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "message", "type=string", "comment", "type=string,boost=5.0")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("message", "test message", "comment", "whatever"),
+ client().prepareIndex("test", "type1", "2").setSource("message", "hello world", "comment", "test comment"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("_all", "test")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThat(searchResponse.getHits().getHits()[0].getScore(), greaterThan(searchResponse.getHits().getHits()[1].getScore()));
+ }
+
+ @Test
+ public void testCommonTermsQuery() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"),
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3).lowFreqOperator(Operator.AND)).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the huge fox").lowFreqMinimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("4")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery("{ \"common\" : { \"field1\" : { \"query\" : \"the lazy fox brown\", \"cutoff_frequency\" : 1, \"minimum_should_match\" : { \"high_freq\" : 4 } } } }").get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the lazy fox brown").cutoffFrequency(1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // stop drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with match query
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the quick brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // stop drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with multi match query
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the quick brown", "field1", "field2").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("3")); // better score due to different query stats
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThirdHit(searchResponse, hasId("2"));
+ }
+
+ @Test
+ public void testCommonTermsQueryStackedTokens() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_SHARDS,1)
+ .put("index.analysis.filter.syns.type","synonym")
+ .putArray("index.analysis.filter.syns.synonyms","quick,fast")
+ .put("index.analysis.analyzer.syns.tokenizer","whitespace")
+ .put("index.analysis.analyzer.syns.filter","syns")
+ )
+ .addMapping("type1", "field1", "type=string,analyzer=syns", "field2", "type=string,analyzer=syns"));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1", "3").setSource("field1", "quick lazy huge brown pidgin", "field2", "the quick lazy huge brown fox jumps over the tree"),
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree") );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast brown").cutoffFrequency(3).lowFreqOperator(Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast brown").cutoffFrequency(3).lowFreqOperator(Operator.AND)).get();
+ assertThat(searchResponse.getHits().totalHits(), equalTo(2l));
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast brown").cutoffFrequency(3)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast huge fox").lowFreqMinimumShouldMatch("3")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("5")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast lazy fox brown").cutoffFrequency(1).highFreqMinimumShouldMatch("6")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery("{ \"common\" : { \"field1\" : { \"query\" : \"the fast lazy fox brown\", \"cutoff_frequency\" : 1, \"minimum_should_match\" : { \"high_freq\" : 6 } } } }").get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // Default
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the fast lazy fox brown").cutoffFrequency(1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(commonTermsQuery("field1", "the quick brown").cutoffFrequency(3).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // stop drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ // try the same with match query
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the fast brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the fast brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the fast brown").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND).analyzer("stop")).get();
+ assertHitCount(searchResponse, 3l);
+ // stop drops "the" since its a stopword
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field1", "the fast brown").cutoffFrequency(3).minimumShouldMatch("3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ // try the same with multi match query
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery("the fast brown", "field1", "field2").cutoffFrequency(3).operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 3l);
+ assertFirstHit(searchResponse, hasId("3")); // better score due to different query stats
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThirdHit(searchResponse, hasId("2"));
+ }
+
+ @Test
+ public void testOmitTermFreqsAndPositions() throws Exception {
+ cluster().wipeTemplates(); // no randomized template for this test -- we are testing bwc compat and set version explicitly this might cause failures if an unsupported feature
+ // is added randomly via an index template.
+ Version version = Version.CURRENT;
+ int iters = scaledRandomIntBetween(10, 20);
+ for (int i = 0; i < iters; i++) {
+ try {
+ // backwards compat test!
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,omit_term_freq_and_positions=true")
+ .setSettings(settings(version).put(SETTING_NUMBER_OF_SHARDS, 1)));
+ assertThat(version.onOrAfter(Version.V_1_0_0_RC2), equalTo(false));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "quick brown fox", "field2", "quick brown fox"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "quick lazy huge brown fox", "field2", "quick lazy huge brown fox"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ assertHitCount(searchResponse, 1l);
+ try {
+ client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get();
+ fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue(e.toString().contains("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery"));
+ }
+ cluster().wipeIndices("test");
+ } catch (MapperParsingException ex) {
+ assertThat(version.toString(), version.onOrAfter(Version.V_1_0_0_RC2), equalTo(true));
+ assertThat(ex.getCause().getMessage(), equalTo("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'docs'] instead"));
+ }
+ version = randomVersion(random());
+ }
+ }
+
+ @Test
+ public void queryStringAnalyzedWildcard() throws Exception {
+ createIndex("test");
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("value*").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue*").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("*ue_1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("val*e_1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("v?l*e?1").analyzeWildcard(true)).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testLowercaseExpandedTerms() {
+ createIndex("test");
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value_1", "field2", "value_2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(true)).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("VALUE_3~1").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("ValUE_*").lowercaseExpandedTerms(true)).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("vAl*E_1")).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]")).get();
+ assertHitCount(searchResponse, 1l);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("[VALUE_1 TO VALUE_3]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/3540
+ public void testDateRangeInQueryString() {
+ //the mapping needs to be provided upfront otherwise we are not sure how many failures we get back
+ //as with dynamic mappings some shards might be lacking behind and parse a different query
+ assertAcked(prepareCreate("test").addMapping(
+ "type", "past", "type=date", "future", "type=date"
+ ));
+ ensureGreen();
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("future:[now/d TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 1l);
+
+ try {
+ client().prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lowercaseExpandedTerms(false)).get();
+ fail("expected SearchPhaseExecutionException (total failure)");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
+ assertThat(e.toString(), containsString("unit [D] not supported for date math"));
+ }
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/7880
+ public void testDateRangeInQueryStringWithTimeZone_7880() {
+ //the mapping needs to be provided upfront otherwise we are not sure how many failures we get back
+ //as with dynamic mappings some shards might be lacking behind and parse a different query
+ assertAcked(prepareCreate("test").addMapping(
+ "type", "past", "type=date"
+ ));
+ ensureGreen();
+
+ DateTimeZone timeZone = randomDateTimeZone();
+ String now = ISODateTimeFormat.dateTime().print(new DateTime(timeZone));
+ logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getID(), now);
+ client().prepareIndex("test", "type", "1").setSource("past", now).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]")
+ .timeZone(timeZone.getID())).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/10477
+ public void testDateRangeInQueryStringWithTimeZone_10477() {
+ //the mapping needs to be provided upfront otherwise we are not sure how many failures we get back
+ //as with dynamic mappings some shards might be lacking behind and parse a different query
+ assertAcked(prepareCreate("test").addMapping(
+ "type", "past", "type=date"
+ ));
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("past", "2015-04-05T23:00:00+0000").get();
+ client().prepareIndex("test", "type", "2").setSource("past", "2015-04-06T00:00:00+0000").get();
+ refresh();
+
+ // Timezone set with dates
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]"))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ // Same timezone set with time_zone
+ searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200"))
+ .get();
+ assertHitCount(searchResponse, 2l);
+
+ // We set a timezone which will give no result
+ searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+
+ // Same timezone set with time_zone but another timezone is set directly within dates which has the precedence
+ searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200"))
+ .get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void typeFilterTypeIndexedTests() throws Exception {
+ typeFilterTests("not_analyzed");
+ }
+
+ @Test
+ public void typeFilterTypeNotIndexedTests() throws Exception {
+ typeFilterTests("no");
+ }
+
+ private void typeFilterTests(String index) throws Exception {
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ assertAcked(prepareCreate("test").setSettings(indexSettings)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject())
+ .addMapping("type2", jsonBuilder().startObject().startObject("type2")
+ .startObject("_type").field("index", index).endObject()
+ .endObject().endObject()));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value1"),
+ client().prepareIndex("test", "type2", "3").setSource("field1", "value1"));
+
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeQuery("type1"))).get(), 2l);
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), typeQuery("type2"))).get(), 3l);
+
+ assertHitCount(client().prepareSearch().setTypes("type1").setQuery(matchAllQuery()).get(), 2l);
+ assertHitCount(client().prepareSearch().setTypes("type2").setQuery(matchAllQuery()).get(), 3l);
+
+ assertHitCount(client().prepareSearch().setTypes("type1", "type2").setQuery(matchAllQuery()).get(), 5l);
+ }
+
+ @Test
+ public void idsQueryTestsIdIndexed() throws Exception {
+ idsQueryTests("not_analyzed");
+ }
+
+ @Test
+ public void idsQueryTestsIdNotIndexed() throws Exception {
+ idsQueryTests("no");
+ }
+
+ private void idsQueryTests(String index) throws Exception {
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(indexSettings)
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1")
+ .startObject("_id").field("index", index).endObject()
+ .endObject().endObject()));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1").ids("1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // no type
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().ids("1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("1", "3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // no type
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1", "3")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("7", "10")).get();
+ assertHitCount(searchResponse, 0l);
+
+ // repeat..., with terms
+ searchResponse = client().prepareSearch().setTypes("type1").setQuery(constantScoreQuery(termsQuery("_id", "1", "3"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+ }
+
+ @Test
+ public void testLimitFilter() throws Exception {
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value1_2"),
+ client().prepareIndex("test", "type1", "3").setSource("field2", "value2_3"),
+ client().prepareIndex("test", "type1", "4").setSource("field3", "value3_4"));
+
+ assertHitCount(client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), limitQuery(2))).get(), 4l); // no-op
+ }
+
+ @Test
+ public void filterExistsMissingTests() throws Exception {
+ createIndex("test");
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x1", "x_1").field("field1", "value1_1").field("field2", "value2_1").endObject()),
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().startObject("obj1").field("obj1_val", "1").endObject().field("x2", "x_2").field("field1", "value1_2").endObject()),
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y1", "y_1").field("field2", "value2_3").endObject()),
+ client().prepareIndex("test", "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()) );
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsQuery("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(existsQuery("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("_exists_:field1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsQuery("field2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsQuery("field3"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+
+ // wildcard check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsQuery("x*"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ // object check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), existsQuery("obj1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingQuery("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingQuery("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingQuery("field1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("_missing_:field1")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ // wildcard check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingQuery("x*"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ // object check
+ searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), missingQuery("obj1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+ }
+
+ @Test
+ public void passQueryOrFilterAsJSONStringTest() throws Exception {
+ createIndex("test");
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1_1", "field2", "value2_1").setRefresh(true).get();
+
+ WrapperQueryBuilder wrapper = new WrapperQueryBuilder("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareSearch().setQuery(wrapper).get(), 1l);
+
+ BoolQueryBuilder bool = boolQuery().must(wrapper).must(new TermQueryBuilder("field2", "value2_1"));
+ assertHitCount(client().prepareSearch().setQuery(bool).get(), 1l);
+
+ WrapperQueryBuilder wrapperFilter = wrapperQuery("{ \"term\" : { \"field1\" : \"value1_1\" } }");
+ assertHitCount(client().prepareSearch().setPostFilter(wrapperFilter).get(), 1l);
+ }
+
+ @Test
+ public void testFiltersWithCustomCacheKey() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsQuery("field1", "value1"))).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testMatchQueryNumeric() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1", "long", "type=long", "double", "type=double"));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("long", 1l, "double", 1.0d),
+ client().prepareIndex("test", "type1", "2").setSource("long", 2l, "double", 2.0d),
+ client().prepareIndex("test", "type1", "3").setSource("long", 3l, "double", 3.0d));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("long", "1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(matchQuery("double", "2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+ try {
+ client().prepareSearch().setQuery(matchQuery("double", "2 3 4")).get();
+ fail("SearchPhaseExecutionException should have been thrown");
+ } catch (SearchPhaseExecutionException ex) {
+ // number format exception
+ }
+ }
+
+ @Test
+ public void testMultiMatchQuery() throws Exception {
+ createIndex("test");
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value4", "field3", "value3"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2", "field2", "value5", "field3", "value2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value3", "field2", "value6", "field3", "value1") );
+
+ MultiMatchQueryBuilder builder = multiMatchQuery("value1 value2 value4", "field1", "field2");
+ SearchResponse searchResponse = client().prepareSearch().setQuery(builder)
+ .addAggregation(AggregationBuilders.terms("field1").field("field1")).get();
+
+ assertHitCount(searchResponse, 2l);
+ // this uses dismax so scores are equal and the order can be arbitrary
+ assertSearchHits(searchResponse, "1", "2");
+
+ builder.useDisMax(false);
+ searchResponse = client().prepareSearch()
+ .setQuery(builder)
+ .get();
+
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ client().admin().indices().prepareRefresh("test").get();
+ builder = multiMatchQuery("value1", "field1", "field2")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch()
+ .setQuery(builder)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ refresh();
+ builder = multiMatchQuery("value1", "field1", "field3^1.5")
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "1");
+
+ client().admin().indices().prepareRefresh("test").get();
+ builder = multiMatchQuery("value1").field("field1").field("field3", 1.5f)
+ .operator(MatchQueryBuilder.Operator.AND); // Operator only applies on terms inside a field! Fields are always OR-ed together.
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "1");
+
+ // Test lenient
+ client().prepareIndex("test", "type1", "3").setSource("field1", "value7", "field2", "value8", "field4", 5).get();
+ refresh();
+
+ builder = multiMatchQuery("value1", "field1", "field2", "field4");
+
+ assertFailures(client().prepareSearch().setQuery(builder),
+ RestStatus.BAD_REQUEST,
+ containsString("NumberFormatException[For input string: \"value1\"]"));
+
+ builder.lenient(true);
+ searchResponse = client().prepareSearch().setQuery(builder).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testMatchQueryZeroTermsQuery() {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value2").get();
+ refresh();
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE));
+ SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(matchQuery("field1", "value1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 1l);
+
+ boolQuery = boolQuery().must(matchQuery("field1", "a").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ public void testMultiMatchQueryZeroTermsQuery() {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "field1", "type=string,analyzer=classic", "field2", "type=string,analyzer=classic"));
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2").get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "value3", "field2", "value4").get();
+ refresh();
+
+
+ BoolQueryBuilder boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE))
+ .must(multiMatchQuery("value1", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.NONE)); // Fields are ORed together
+ SearchResponse searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ boolQuery = boolQuery()
+ .must(multiMatchQuery("a", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL))
+ .must(multiMatchQuery("value4", "field1", "field2").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 1l);
+
+ boolQuery = boolQuery().must(multiMatchQuery("a", "field1").zeroTermsQuery(MatchQueryBuilder.ZeroTermsQuery.ALL));
+ searchResponse = client().prepareSearch().setQuery(boolQuery).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testMultiMatchQueryMinShouldMatch() {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("field1", new String[]{"value1", "value2", "value3"}).get();
+ client().prepareIndex("test", "type1", "2").setSource("field2", "value1").get();
+ refresh();
+
+ MultiMatchQueryBuilder multiMatchQuery = multiMatchQuery("value1 value2 foo", "field1", "field2");
+
+ multiMatchQuery.useDisMax(true);
+ multiMatchQuery.minimumShouldMatch("70%");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(multiMatchQuery)
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ multiMatchQuery.useDisMax(false);
+ multiMatchQuery.minimumShouldMatch("70%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ multiMatchQuery.minimumShouldMatch("30%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+
+ multiMatchQuery = multiMatchQuery("value1 value2 bar", "field1");
+ multiMatchQuery.minimumShouldMatch("100%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 0l);
+
+ multiMatchQuery.minimumShouldMatch("70%");
+ searchResponse = client().prepareSearch().setQuery(multiMatchQuery).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testFuzzyQueryString() {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("str:kimcy~1")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:11~1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("date:2012-02-02~1d")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testQuotedQueryStringWithBoost() throws InterruptedException, ExecutionException {
+ float boost = 10.0f;
+ assertAcked(prepareCreate("test").setSettings(SETTING_NUMBER_OF_SHARDS, 1));
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("important", "phrase match", "less_important", "nothing important"),
+ client().prepareIndex("test", "type1", "2").setSource("important", "nothing important", "less_important", "phrase match")
+ );
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(boost * searchResponse.getHits().getAt(1).score(), .1));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("\"phrase match\"").field("important", boost).field("less_important").useDisMax(false)).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThat((double)searchResponse.getHits().getAt(0).score(), closeTo(boost * searchResponse.getHits().getAt(1).score(), .1));
+ }
+
+ @Test
+ public void testSpecialRangeSyntaxInQueryString() {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("str", "kimchy", "date", "2012-02-01", "num", 12).get();
+ client().prepareIndex("test", "type1", "2").setSource("str", "shay", "date", "2012-02-05", "num", 20).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>19")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>20")).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>=20")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:>11")).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:<20")).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("num:<=20")).get();
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("+num:>11 +num:<20")).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testEmptytermsQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4") );
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("term", new String[0]))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), idsQuery())).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testFieldDatatermsQuery() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type", "str", "type=string", "lng", "type=long", "dbl", "type=double"));
+ ensureGreen();
+ indexRandom(true,
+ client().prepareIndex("test", "type", "1").setSource("str", "1", "lng", 1l, "dbl", 1.0d),
+ client().prepareIndex("test", "type", "2").setSource("str", "2", "lng", 2l, "dbl", 2.0d),
+ client().prepareIndex("test", "type", "3").setSource("str", "3", "lng", 3l, "dbl", 3.0d),
+ client().prepareIndex("test", "type", "4").setSource("str", "4", "lng", 4l, "dbl", 4.0d));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "1", "4").execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {2, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[]{2, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new int[] {1, 3}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new float[] {2, 4}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ // test partial matching
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "2", "5").execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[] {2, 5}).execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {2, 5}).execution("fielddata"))).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ // test valid type, but no matching terms
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("str", "5", "6").execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("dbl", new double[] {5, 6}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsQuery("lng", new long[] {5, 6}).execution("fielddata"))).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testTermsLookupFilter() throws Exception {
+ assertAcked(prepareCreate("lookup").addMapping("type", "terms","type=string", "other", "type=string"));
+ assertAcked(prepareCreate("lookup2").addMapping("type",
+ jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("arr").startObject("properties").startObject("term").field("type", "string")
+ .endObject().endObject().endObject().endObject().endObject().endObject()));
+ assertAcked(prepareCreate("test").addMapping("type", "term", "type=string"));
+
+ ensureGreen();
+
+ indexRandom(true,
+ client().prepareIndex("lookup", "type", "1").setSource("terms", new String[]{"1", "3"}),
+ client().prepareIndex("lookup", "type", "2").setSource("terms", new String[]{"2"}),
+ client().prepareIndex("lookup", "type", "3").setSource("terms", new String[]{"2", "4"}),
+ client().prepareIndex("lookup", "type", "4").setSource("other", "value"),
+ client().prepareIndex("lookup2", "type", "1").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "1").endObject()
+ .startObject().field("term", "3").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "2").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("lookup2", "type", "3").setSource(XContentFactory.jsonBuilder().startObject()
+ .startArray("arr")
+ .startObject().field("term", "2").endObject()
+ .startObject().field("term", "4").endObject()
+ .endArray()
+ .endObject()),
+ client().prepareIndex("test", "type", "1").setSource("term", "1"),
+ client().prepareIndex("test", "type", "2").setSource("term", "2"),
+ client().prepareIndex("test", "type", "3").setSource("term", "3"),
+ client().prepareIndex("test", "type", "4").setSource("term", "4") );
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // same as above, just on the _id...
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("_id").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ // another search with same parameters...
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup").lookupType("type").lookupId("1").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup").lookupType("type").lookupId("2").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup").lookupType("type").lookupId("3").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup").lookupType("type").lookupId("4").lookupPath("terms"))
+ ).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup2").lookupType("type").lookupId("1").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "3");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup2").lookupType("type").lookupId("2").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("2"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("term").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "2", "4");
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(filteredQuery(matchAllQuery(), termsLookupQuery("not_exists").lookupIndex("lookup2").lookupType("type").lookupId("3").lookupPath("arr.term"))
+ ).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testBasicFilterById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsQuery("type1").ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1", "type2").ids("1", "2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setPostFilter(idsQuery().ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().ids("1", "2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1").ids("1", "2"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery().ids("1"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery(null).ids("1"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(constantScoreQuery(idsQuery("type1", "type2", "type3").ids("1", "2", "3", "4"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ }
+
+ @Test
+ public void testBasicQueryById() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
+ client().prepareIndex("test", "type2", "2").setSource("field1", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2").ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1", "2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1").ids("1", "2")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery().ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery(null).ids("1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+ searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "type3").ids("1", "2", "3", "4")).get();
+ assertHitCount(searchResponse, 2l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+ }
+
+ @Test
+ public void testNumericTermsAndRanges() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("num_byte", 1, "num_short", 1, "num_integer", 1,
+ "num_long", 1, "num_float", 1, "num_double", 1).get();
+
+ client().prepareIndex("test", "type1", "2").setSource("num_byte", 2, "num_short", 2, "num_integer", 2,
+ "num_long", 2, "num_float", 2, "num_double", 2).get();
+
+ client().prepareIndex("test", "type1", "17").setSource("num_byte", 17, "num_short", 17, "num_integer", 17,
+ "num_long", 17, "num_float", 17, "num_double", 17).get();
+ refresh();
+
+ SearchResponse searchResponse;
+ logger.info("--> term query on 1");
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_byte", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_short", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_integer", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_long", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_float", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termQuery("num_double", 1)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> terms query on 1");
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_byte", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_short", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_integer", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_long", new int[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_float", new double[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(termsQuery("num_double", new double[]{1})).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> term filter on 1");
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("num_byte", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("num_short", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("num_integer", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("num_long", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("num_float", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termQuery("num_double", 1))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+
+ logger.info("--> terms filter on 1");
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("num_byte", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("num_short", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("num_integer", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("num_long", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("num_float", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ searchResponse = client().prepareSearch("test").setQuery(filteredQuery(matchAllQuery(), termsQuery("num_double", new int[]{1}))).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testNumericRangeFilter_2826() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ "num_byte", "type=byte", "num_short", "type=short",
+ "num_integer", "type=integer", "num_long", "type=long",
+ "num_float", "type=float", "num_double", "type=double"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "test1", "num_long", 1).get();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "test1", "num_long", 2).get();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "test2", "num_long", 3).get();
+ client().prepareIndex("test", "type1", "4").setSource("field1", "test2", "num_long", 4).get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setPostFilter(
+ boolQuery()
+ .should(rangeQuery("num_long").from(1).to(2))
+ .should(rangeQuery("num_long").from(3).to(4))
+ ).get();
+ assertHitCount(searchResponse, 4l);
+
+ // This made 2826 fail! (only with bit based filters)
+ searchResponse = client().prepareSearch("test").setPostFilter(
+ boolQuery()
+ .should(rangeQuery("num_long").from(1).to(2))
+ .should(rangeQuery("num_long").from(3).to(4))
+ ).get();
+ assertHitCount(searchResponse, 4l);
+
+ // This made #2979 fail!
+ searchResponse = client().prepareSearch("test").setPostFilter(
+ boolQuery()
+ .must(termQuery("field1", "test1"))
+ .should(rangeQuery("num_long").from(1).to(2))
+ .should(rangeQuery("num_long").from(3).to(4))
+ ).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void testEmptyTopLevelFilter() {
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).get();
+ SearchResponse searchResponse = client().prepareSearch().setPostFilter("{}").get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test // see #2926
+ public void testMustNot() throws IOException, ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test")
+ //issue manifested only with shards>=2
+ .setSettings(SETTING_NUMBER_OF_SHARDS, between(2, DEFAULT_MAX_NUM_SHARDS)));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"),
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"),
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other"),
+ client().prepareIndex("test", "test", "4").setSource("description", "foo"));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertHitCount(searchResponse, 4l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ boolQuery()
+ .mustNot(matchQuery("description", "anything").type(Type.BOOLEAN))
+ ).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test // see #2994
+ public void testSimpleSpan() throws IOException, ExecutionException, InterruptedException {
+ createIndex("test");
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar"),
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything"),
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other"),
+ client().prepareIndex("test", "test", "4").setSource("description", "foo"));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanTermQuery("description", "bar"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(
+ spanNearQuery()
+ .clause(spanTermQuery("description", "foo"))
+ .clause(spanTermQuery("description", "other"))
+ .slop(3)).get();
+ assertHitCount(searchResponse, 3l);
+ }
+
+ @Test
+ public void testSpanMultiTermQuery() throws IOException {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("description", "foo other anything bar", "count", 1).get();
+ client().prepareIndex("test", "test", "2").setSource("description", "foo other anything", "count", 2).get();
+ client().prepareIndex("test", "test", "3").setSource("description", "foo other", "count", 3).get();
+ client().prepareIndex("test", "test", "4").setSource("description", "fop", "count", 4).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(fuzzyQuery("description", "fop")))).get();
+ assertHitCount(response, 4);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(prefixQuery("description", "fo")))).get();
+ assertHitCount(response, 4);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(wildcardQuery("description", "oth*")))).get();
+ assertHitCount(response, 3);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(QueryBuilders.rangeQuery("description").from("ffa").to("foo"))))
+ .execute().actionGet();
+ assertHitCount(response, 3);
+
+ response = client().prepareSearch("test")
+ .setQuery(spanOrQuery().clause(spanMultiTermQueryBuilder(regexpQuery("description", "fo{2}")))).get();
+ assertHitCount(response, 3);
+ }
+
+ @Test
+ public void testSpanNot() throws IOException, ExecutionException, InterruptedException {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("description", "the quick brown fox jumped over the lazy dog").get();
+ client().prepareIndex("test", "test", "2").setSource("description", "the quick black fox leaped over the sleeping dog").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(spanNotQuery().include(spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("description", "quick"))
+ .clause(QueryBuilders.spanTermQuery("description", "fox")).slop(1)).exclude(spanTermQuery("description", "brown"))).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(spanNotQuery().include(spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("description", "quick"))
+ .clause(QueryBuilders.spanTermQuery("description", "fox")).slop(1)).exclude(spanTermQuery("description", "sleeping")).dist(5)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(spanNotQuery().include(spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("description", "quick"))
+ .clause(QueryBuilders.spanTermQuery("description", "fox")).slop(1)).exclude(spanTermQuery("description", "jumped")).pre(1).post(1)).get();
+ assertHitCount(searchResponse, 1l);
+
+ try {
+ client().prepareSearch("test")
+ .setQuery(spanNotQuery().include(spanNearQuery()
+ .clause(QueryBuilders.spanTermQuery("description", "quick"))
+ .clause(QueryBuilders.spanTermQuery("description", "fox")).slop(1)).exclude(spanTermQuery("description", "jumped")).dist(2).pre(2)
+ ).get();
+ fail("ElasticsearchIllegalArgumentException should have been caught");
+ } catch (ElasticsearchException e) {
+ assertThat("ElasticsearchIllegalArgumentException should have been caught", e.getDetailedMessage(), containsString("spanNot can either use [dist] or [pre] & [post] (or none)"));
+ }
+ }
+
+ @Test
+ public void testSimpleDFSQuery() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("s", jsonBuilder()
+ .startObject()
+ .startObject("s")
+ .startObject("_routing")
+ .field("required", true)
+ .endObject()
+ .startObject("properties")
+ .startObject("online")
+ .field("type", "boolean")
+ .endObject()
+ .startObject("ts")
+ .field("type", "date")
+ .field("ignore_malformed", false)
+ .field("format", "epoch_millis")
+ .endObject()
+ .startObject("bs")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject())
+ .addMapping("bs", "online", "type=boolean", "ts", "type=date,ignore_malformed=false,format=epoch_millis"));
+ ensureGreen();
+
+ client().prepareIndex("test", "s", "1").setRouting("Y").setSource("online", false, "bs", "Y", "ts", System.currentTimeMillis() - 100).get();
+ client().prepareIndex("test", "s", "2").setRouting("X").setSource("online", true, "bs", "X", "ts", System.currentTimeMillis() - 10000000).get();
+ client().prepareIndex("test", "bs", "3").setSource("online", false, "ts", System.currentTimeMillis() - 100).get();
+ client().prepareIndex("test", "bs", "4").setSource("online", true, "ts", System.currentTimeMillis() - 123123).get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(
+ boolQuery()
+ .must(termQuery("online", true))
+ .must(boolQuery()
+ .should(boolQuery()
+ .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000)))
+ .must(termQuery("_type", "bs"))
+ )
+ .should(boolQuery()
+ .must(rangeQuery("ts").lt(System.currentTimeMillis() - (15 * 1000)))
+ .must(termQuery("_type", "s"))
+ )
+ )
+ )
+ .setVersion(true)
+ .setFrom(0).setSize(100).setExplain(true).get();
+ assertNoFailures(response);
+ }
+
+ @Test
+ public void testMultiFieldQueryString() {
+ client().prepareIndex("test", "s", "1").setSource("field1", "value1", "field2", "value2").setRefresh(true).get();
+ logger.info("regular");
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value1").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("field\\*:value1")).get(), 1);
+ logger.info("prefix");
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value*").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("field\\*:value*")).get(), 1);
+ logger.info("wildcard");
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("v?lue*").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("field\\*:v?lue*")).get(), 1);
+ logger.info("fuzzy");
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("value~").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("field\\*:value~")).get(), 1);
+ logger.info("regexp");
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("/value[01]/").field("field1").field("field2")).get(), 1);
+ assertHitCount(client().prepareSearch("test").setQuery(queryStringQuery("field\\*:/value[01]/")).get(), 1);
+ }
+
+ // see #3881 - for extensive description of the issue
+ @Test
+ public void testMatchQueryWithSynonyms() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
+ assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search"));
+ ensureGreen();
+ client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick brown").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fast").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "fast brown fox").get();
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "quick brown").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ public void testMatchQueryWithStackedStems() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "keyword_repeat", "porterStem", "unique_stem")
+ .put("index.analysis.filter.unique_stem.type", "unique")
+ .put("index.analysis.filter.unique_stem.only_on_same_position", true));
+ assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search"));
+ ensureGreen();
+ client().prepareIndex("test", "test", "1").setSource("text", "the fox runs across the street").get();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "run fox run").get();
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchQuery("text", "fox runs").operator(MatchQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test
+ public void testQueryStringWithSynonyms() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.index.type", "custom")
+ .put("index.analysis.analyzer.index.tokenizer", "standard")
+ .put("index.analysis.analyzer.index.filter", "lowercase")
+ .put("index.analysis.analyzer.search.type", "custom")
+ .put("index.analysis.analyzer.search.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.search.filter", "lowercase", "synonym")
+ .put("index.analysis.filter.synonym.type", "synonym")
+ .putArray("index.analysis.filter.synonym.synonyms", "fast, quick"));
+ assertAcked(builder.addMapping("test", "text", "type=string,analyzer=index,search_analyzer=search"));
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("text", "quick brown fox").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("quick").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("quick brown").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("fast").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1);
+
+ client().prepareIndex("test", "test", "2").setSource("text", "fast brown fox").get();
+ refresh();
+
+ searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("quick").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ searchResponse = client().prepareSearch("test").setQuery(queryStringQuery("quick brown").defaultField("text").defaultOperator(QueryStringQueryBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 2);
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3898
+ public void testCustomWordDelimiterQueryString() {
+ assertAcked(client().admin().indices().prepareCreate("test")
+ .setSettings("analysis.analyzer.my_analyzer.type", "custom",
+ "analysis.analyzer.my_analyzer.tokenizer", "whitespace",
+ "analysis.analyzer.my_analyzer.filter", "custom_word_delimiter",
+ "analysis.filter.custom_word_delimiter.type", "word_delimiter",
+ "analysis.filter.custom_word_delimiter.generate_word_parts", "true",
+ "analysis.filter.custom_word_delimiter.generate_number_parts", "false",
+ "analysis.filter.custom_word_delimiter.catenate_numbers", "true",
+ "analysis.filter.custom_word_delimiter.catenate_words", "false",
+ "analysis.filter.custom_word_delimiter.split_on_case_change", "false",
+ "analysis.filter.custom_word_delimiter.split_on_numerics", "false",
+ "analysis.filter.custom_word_delimiter.stem_english_possessive", "false")
+ .addMapping("type1", "field1", "type=string,analyzer=my_analyzer", "field2", "type=string,analyzer=my_analyzer"));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "foo bar baz", "field2", "not needed").get();
+ refresh();
+
+ SearchResponse response = client()
+ .prepareSearch("test")
+ .setQuery(
+ queryStringQuery("foo.baz").useDisMax(false).defaultOperator(QueryStringQueryBuilder.Operator.AND)
+ .field("field1").field("field2")).get();
+ assertHitCount(response, 1l);
+ }
+
+ @Test // see https://github.com/elasticsearch/elasticsearch/issues/3797
+ public void testMultiMatchLenientIssue3797() {
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", 123, "field2", "value2").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field1^2", "field2").lenient(true).useDisMax(false)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field1^2", "field2").lenient(true).useDisMax(true)).get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(multiMatchQuery("value2", "field2^2").lenient(true)).get();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void testIndicesQuery() throws Exception {
+ createIndex("index1", "index2", "index3");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
+ client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery(matchQuery("text", "value2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ //default no match query is match_all
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery("all")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setQuery(indicesQuery(matchQuery("text", "value1"), "index1")
+ .noMatchQuery("none")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testIndicesFilter() throws Exception {
+ createIndex("index1", "index2", "index3");
+ ensureGreen();
+
+ client().prepareIndex("index1", "type1").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("index2", "type2").setId("2").setSource("text", "value2").get();
+ client().prepareIndex("index3", "type3").setId("3").setSource("text", "value3").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")
+ .noMatchQuery(termQuery("text", "value2"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+
+ //default no match filter is "all"
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")
+ .noMatchQuery("all")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch("index1", "index2", "index3")
+ .setPostFilter(indicesQuery(termQuery("text", "value1"), "index1")
+ .noMatchQuery("none")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("1"));
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416
+ public void testIndicesQuerySkipParsing() throws Exception {
+ createIndex("simple");
+ assertAcked(prepareCreate("related")
+ .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1").get();
+ client().prepareIndex("related", "parent").setId("2").setSource("text", "parent").get();
+ client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2").get();
+ refresh();
+
+ //has_child fails if executed on "simple" index
+ try {
+ client().prepareSearch("simple")
+ .setQuery(hasChildQuery("child", matchQuery("text", "value"))).get();
+ fail("Should have failed as has_child query can only be executed against parent-child types");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
+ }
+ }
+
+ //has_child doesn't get parsed for "simple" index
+ SearchResponse searchResponse = client().prepareSearch("related", "simple")
+ .setQuery(indicesQuery(hasChildQuery("child", matchQuery("text", "value2")), "related")
+ .noMatchQuery(matchQuery("text", "value1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+ }
+
+ @Test // https://github.com/elasticsearch/elasticsearch/issues/2416
+ public void testIndicesFilterSkipParsing() throws Exception {
+ createIndex("simple");
+ assertAcked(prepareCreate("related")
+ .addMapping("child", jsonBuilder().startObject().startObject("child").startObject("_parent").field("type", "parent")
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ indexRandom(true,
+ client().prepareIndex("simple", "lone").setId("1").setSource("text", "value1"),
+ client().prepareIndex("related", "parent").setId("2").setSource("text", "parent"),
+ client().prepareIndex("related", "child").setId("3").setParent("2").setSource("text", "value2"));
+
+ //has_child fails if executed on "simple" index
+ try {
+ client().prepareSearch("simple")
+ .setPostFilter(hasChildQuery("child", termQuery("text", "value1"))).get();
+ fail("Should have failed as has_child query can only be executed against parent-child types");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.shardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.reason(), containsString("No mapping for for type [child]"));
+ }
+ }
+
+ SearchResponse searchResponse = client().prepareSearch("related", "simple")
+ .setPostFilter(indicesQuery(hasChildQuery("child", termQuery("text", "value2")), "related")
+ .noMatchQuery(termQuery("text", "value1"))).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "1", "2");
+ }
+
+ @Test
+ public void testIndicesQueryMissingIndices() throws IOException, ExecutionException, InterruptedException {
+ createIndex("index1");
+ createIndex("index2");
+ ensureGreen();
+ indexRandom(true,
+ client().prepareIndex("index1", "type1", "1").setSource("field", "match"),
+ client().prepareIndex("index1", "type1", "2").setSource("field", "no_match"),
+ client().prepareIndex("index2", "type1", "10").setSource("field", "match"),
+ client().prepareIndex("index2", "type1", "20").setSource("field", "no_match"),
+ client().prepareIndex("index3", "type1", "100").setSource("field", "match"),
+ client().prepareIndex("index3", "type1", "200").setSource("field", "no_match"));
+
+ //all indices are missing
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "test1", "test2", "test3")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //only one index specified, which is missing
+ searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "test1")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //more than one index specified, one of them is missing
+ searchResponse = client().prepareSearch().setQuery(
+ indicesQuery(termQuery("field", "missing"), "index1", "test1")
+ .noMatchQuery(termQuery("field", "match"))).get();
+
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index2 or index3");
+ }
+ }
+ }
+
+ @Test
+ public void testIndicesFilterMissingIndices() throws IOException, ExecutionException, InterruptedException {
+ createIndex("index1");
+ createIndex("index2");
+ createIndex("index3");
+ ensureGreen();
+ indexRandom(true,
+ client().prepareIndex("index1", "type1", "1").setSource("field", "match"),
+ client().prepareIndex("index1", "type1", "2").setSource("field", "no_match"),
+ client().prepareIndex("index2", "type1", "10").setSource("field", "match"),
+ client().prepareIndex("index2", "type1", "20").setSource("field", "no_match"),
+ client().prepareIndex("index3", "type1", "100").setSource("field", "match"),
+ client().prepareIndex("index3", "type1", "200").setSource("field", "no_match"));
+
+ //all indices are missing
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesQuery(termQuery("field", "missing"), "test1", "test2", "test3")
+ .noMatchQuery(termQuery("field", "match")))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //only one index specified, which is missing
+ searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesQuery(termQuery("field", "missing"), "test1")
+ .noMatchQuery(termQuery("field", "match")))).get();
+
+ assertHitCount(searchResponse, 3l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index1".equals(hit.index())) {
+ assertThat(hit, hasId("1"));
+ } else if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index1, index2 or index3");
+ }
+ }
+
+ //more than one index specified, one of them is missing
+ searchResponse = client().prepareSearch().setQuery(
+ filteredQuery(matchAllQuery(),
+ indicesQuery(termQuery("field", "missing"), "index1", "test1")
+ .noMatchQuery(termQuery("field", "match")))).get();
+
+ assertHitCount(searchResponse, 2l);
+
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ if ("index2".equals(hit.index())) {
+ assertThat(hit, hasId("10"));
+ } else if ("index3".equals(hit.index())) {
+ assertThat(hit, hasId("100"));
+ } else {
+ fail("Returned documents should belong to either index2 or index3");
+ }
+ }
+ }
+
+ @Test
+ public void testMinScore() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ ensureGreen();
+ indexRandom(true,
+ client().prepareIndex("test", "test", "1").setSource("score", 1.5),
+ client().prepareIndex("test", "test", "2").setSource("score", 1.0),
+ client().prepareIndex("test", "test", "3").setSource("score", 2.0),
+ client().prepareIndex("test", "test", "4").setSource("score", 0.5));
+
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(
+functionScoreQuery(scriptFunction(new Script("_doc['score'].value")))).setMinScore(1.5f).get();
+ assertHitCount(searchResponse, 2);
+ assertFirstHit(searchResponse, hasId("3"));
+ assertSecondHit(searchResponse, hasId("1"));
+ }
+
+ @Test
+ public void testQueryStringWithSlopAndFields() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "customer", "1").setSource("desc", "one two three").get();
+ client().prepareIndex("test", "product", "2").setSource("desc", "one two three").get();
+ refresh();
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get();
+ assertHitCount(searchResponse, 2);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("product").setQuery(QueryBuilders.queryStringQuery("\"one two\"").field("desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("product").setQuery(QueryBuilders.queryStringQuery("\"one three\"~5").field("desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("customer").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ {
+ SearchResponse searchResponse = client().prepareSearch("test").setTypes("customer").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get();
+ assertHitCount(searchResponse, 1);
+ }
+ }
+
+ @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/11478")
+ @Test
+ public void testDateProvidedAsNumber() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ assertAcked(client().admin().indices().preparePutMapping("test").setType("type").setSource("field", "type=date").get());
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", -1000000000001L),
+ client().prepareIndex("test", "type", "2").setSource("field", -1000000000000L),
+ client().prepareIndex("test", "type", "3").setSource("field", -999999999999L));
+
+ assertHitCount(client().prepareCount("test").setQuery(rangeQuery("field").lte(-1000000000000L)).get(), 2);
+ assertHitCount(client().prepareCount("test").setQuery(rangeQuery("field").lte(-999999999999L)).get(), 3);
+ }
+
+ @Test
+ public void testRangeFilterWithTimeZone() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "date", "type=date", "num", "type=integer"));
+ ensureGreen();
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("date", "2014-01-01", "num", 1),
+ client().prepareIndex("test", "type1", "2").setSource("date", "2013-12-31T23:00:00", "num", 2),
+ client().prepareIndex("test", "type1", "3").setSource("date", "2014-01-01T01:00:00", "num", 3),
+ // Now in UTC+1
+ client().prepareIndex("test", "type1", "4").setSource("date", DateTime.now(DateTimeZone.forOffsetHours(1)).getMillis(), "num", 4));
+
+
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("1"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("2"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("1"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("2"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ // We define a time zone to be applied to the filter and from/to have no time zone
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("1"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("2"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ // When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00")))
+ .get();
+ fail("A Range Filter using ms since epoch with a TimeZone should raise a QueryParsingException");
+ } catch (SearchPhaseExecutionException e) {
+ // We expect it
+ }
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00")))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("4"));
+
+ // A Range Filter on a numeric field with a TimeZone should raise an exception
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), QueryBuilders.rangeQuery("num").from("0").to("4").timeZone("-01:00")))
+ .get();
+ fail("A Range Filter on a numeric field with a TimeZone should raise a QueryParsingException");
+ } catch (SearchPhaseExecutionException e) {
+ // We expect it
+ }
+ }
+
+ @Test
+ public void testRangeQueryWithTimeZone() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", "date", "type=date", "num", "type=integer"));
+ ensureGreen();
+
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("date", "2014-01-01", "num", 1),
+ client().prepareIndex("test", "type1", "2").setSource("date", "2013-12-31T23:00:00", "num", 2),
+ client().prepareIndex("test", "type1", "3").setSource("date", "2014-01-01T01:00:00", "num", 3),
+ // Now in UTC+1
+ client().prepareIndex("test", "type1", "4").setSource("date", DateTime.now(DateTimeZone.forOffsetHours(1)).getMillis(), "num", 4));
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("1"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00").to("2013-12-31T23:59:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("2"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00").to("2014-01-01T01:59:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ // We explicitly define a time zone in the from/to dates so whatever the time zone is, it won't be used
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00Z").to("2014-01-01T00:59:00Z").timeZone("+10:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("1"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2013-12-31T23:00:00Z").to("2013-12-31T23:59:00Z").timeZone("+10:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("2"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T01:00:00Z").to("2014-01-01T01:59:00Z").timeZone("+10:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ // We define a time zone to be applied to the filter and from/to have no time zone
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T03:00:00").to("2014-01-01T03:59:00").timeZone("+03:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("1"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T02:00:00").to("2014-01-01T02:59:00").timeZone("+03:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("2"));
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T04:00:00").to("2014-01-01T04:59:00").timeZone("+03:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ // When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00"))
+ .get();
+ fail("A Range Filter using ms since epoch with a TimeZone should raise a QueryParsingException");
+ } catch (SearchPhaseExecutionException e) {
+ // We expect it
+ }
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("3"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date").from("now/d-1d").timeZone("+01:00"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+ assertThat(searchResponse.getHits().getAt(0).getId(), is("4"));
+
+ // A Range Filter on a numeric field with a TimeZone should raise an exception
+ try {
+ client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("num").from("0").to("4").timeZone("-01:00"))
+ .get();
+ fail("A Range Filter on a numeric field with a TimeZone should raise a QueryParsingException");
+ } catch (SearchPhaseExecutionException e) {
+ // We expect it
+ }
+ }
+
+ @Test
+ public void testSearchEmptyDoc() {
+ assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}"));
+ client().prepareIndex("test", "type1", "1").setSource("{}").get();
+ refresh();
+ assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
+ }
+
+ @Test // see #5120
+ public void testNGramCopyField() {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.my_ngram_analyzer.type", "custom")
+ .put("index.analysis.analyzer.my_ngram_analyzer.tokenizer", "my_ngram_tokenizer")
+ .put("index.analysis.tokenizer.my_ngram_tokenizer.type", "nGram")
+ .put("index.analysis.tokenizer.my_ngram_tokenizer.min_gram", "1")
+ .put("index.analysis.tokenizer.my_ngram_tokenizer.max_gram", "10")
+ .putArray("index.analysis.tokenizer.my_ngram_tokenizer.token_chars", new String[0]));
+ assertAcked(builder.addMapping("test", "origin", "type=string,copy_to=meta", "meta", "type=string,analyzer=my_ngram_analyzer"));
+ // we only have ngrams as the index analyzer so searches will get standard analyzer
+ ensureGreen();
+
+ client().prepareIndex("test", "test", "1").setSource("origin", "C.A1234.5678")
+ .setRefresh(true)
+ .get();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(matchQuery("meta", "1234"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(matchQuery("meta", "1234.56"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("meta", "A1234"))
+ .get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(termQuery("meta", "a1234"))
+ .get();
+ assertHitCount(searchResponse, 0l); // it's upper case
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(matchQuery("meta", "A1234").analyzer("my_ngram_analyzer"))
+ .get(); // force ngram analyzer
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(matchQuery("meta", "a1234").analyzer("my_ngram_analyzer"))
+ .get(); // this one returns a hit since it's default operator is OR
+ assertHitCount(searchResponse, 1l);
+ }
+
+ public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedException {
+ createIndex("test1");
+ indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "Johnnie Walker Black Label"),
+ client().prepareIndex("test1", "type1", "2").setSource("field", "trying out Elasticsearch"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("field", "Johnnie la").slop(between(2,5)).type(Type.PHRASE_PREFIX)).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field", "trying").type(Type.PHRASE_PREFIX)).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "2");
+ searchResponse = client().prepareSearch().setQuery(matchQuery("field", "try").type(Type.PHRASE_PREFIX)).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "2");
+ }
+
+ @Test
+ public void testFilteredQuery() throws Exception {
+ Settings.Builder builder = Settings.settingsBuilder().put(indexSettings());
+ createIndex("test");
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, docs);
+ ensureGreen();
+ int iters = between(1, 100);
+ for (int i = 0; i < iters; i++) {
+ String intToEnglish = English.intToEnglish(between(0, numDocs - 1));
+ String query = intToEnglish.split(" ")[0];
+ String filter = intToEnglish.split(" ")[0];
+
+ SearchResponse one = client().prepareSearch()
+ .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.filteredQuery(QueryBuilders.termQuery("field1", query),
+ QueryBuilders.termQuery("field1", filter)))).setSize(numDocs).execute().actionGet();
+ SearchResponse other = client().prepareSearch()
+ .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.filteredQuery(QueryBuilders.termQuery("field1", filter),
+ QueryBuilders.termQuery("field1", query)))).setSize(numDocs).execute().actionGet();
+
+ Set<String> oneIds = new HashSet<>();
+ for (SearchHit hit : one.getHits().hits()) {
+ oneIds.add(hit.id());
+ }
+ Set<String> otherIds = new HashSet<>();
+ for (SearchHit hit : other.getHits().hits()) {
+ otherIds.add(hit.id());
+ }
+ assertThat(oneIds.size(), equalTo(otherIds.size()));
+ for (String id : oneIds) {
+ assertThat(otherIds.contains(id), is(true));
+ }
+ }
+ }
+
+ @Test // see #7365
+ public void testFilteredQueryWithoutQuery() throws Exception {
+ createIndex("test");
+ ensureYellow("test");
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "value1"));
+ SearchResponse response = client().prepareSearch()
+ .setQuery(QueryBuilders.filteredQuery(null,
+ QueryBuilders.termQuery("field1", "value1"))).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 1l);
+ }
+
+ @Test
+ public void testQueryStringParserCache() throws Exception {
+ createIndex("test");
+ indexRandom(true, false, client().prepareIndex("test", "type", "1").setSource("nameTokens", "xyz"));
+
+ SearchResponse response = client().prepareSearch("test")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100))
+ .get();
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+
+ float first = response.getHits().getAt(0).getScore();
+ for (int i = 0; i < 100; i++) {
+ response = client().prepareSearch("test")
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.queryStringQuery("xyz").boost(100))
+ .get();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ float actual = response.getHits().getAt(0).getScore();
+ assertThat(i + " expected: " + first + " actual: " + actual, Float.compare(first, actual), equalTo(0));
+ }
+ }
+
+ @Test // see #7686.
+ public void testIdsQueryWithInvalidValues() throws Exception {
+ createIndex("test");
+ indexRandom(true, false, client().prepareIndex("test", "type", "1").setSource("body", "foo"));
+ try {
+ client().prepareSearch("test")
+ .setTypes("type")
+ .setQuery("{\n" +
+ " \"ids\": {\n" +
+ " \"values\": [[\"1\"]]\n" +
+ " }\n" +
+ "}")
+ .get();
+ fail("query is invalid and should have produced a parse exception");
+ } catch (Exception e) {
+ assertThat("query could not be parsed due to bad format: " + e.toString(),
+ e.toString().contains("Illegal value for id, expecting a string or number, got: START_ARRAY"),
+ equalTo(true));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringTests.java b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringTests.java
new file mode 100644
index 0000000000..fc14abf27d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/query/SimpleQueryStringTests.java
@@ -0,0 +1,344 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.query;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.BoolQueryBuilder;
+import org.elasticsearch.index.query.SimpleQueryStringBuilder;
+import org.elasticsearch.index.query.SimpleQueryStringFlag;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
+import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the {@code simple_query_string} query
+ */
+public class SimpleQueryStringTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleQueryString() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ indexRandom(true, false,
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo"),
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar"),
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar"),
+ client().prepareIndex("test", "type1", "4").setSource("body", "quux baz eggplant"),
+ client().prepareIndex("test", "type1", "5").setSource("body", "quux baz spaghetti"),
+ client().prepareIndex("test", "type1", "6").setSource("otherbody", "spaghetti"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("foo bar").defaultOperator(SimpleQueryStringBuilder.Operator.AND)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("\"quux baz\" +(eggplant | spaghetti)")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "4", "5");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("eggplants").analyzer("snowball")).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("spaghetti").field("body", 1000.0f).field("otherbody", 2.0f).queryName("myquery")).get();
+ assertHitCount(searchResponse, 2l);
+ assertFirstHit(searchResponse, hasId("5"));
+ assertSearchHits(searchResponse, "5", "6");
+ assertThat(searchResponse.getHits().getAt(0).getMatchedQueries()[0], equalTo("myquery"));
+
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("spaghetti").field("*body")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "5", "6");
+
+ // Have to bypass the builder here because the builder always uses "fields" instead of "field"
+ searchResponse = client().prepareSearch().setQuery("{\"simple_query_string\": {\"query\": \"spaghetti\", \"field\": \"_all\"}}").get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "5", "6");
+ }
+
+ @Test
+ public void testSimpleQueryStringMinimumShouldMatch() throws Exception {
+ createIndex("test");
+ ensureGreen("test");
+ indexRandom(true, false,
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo"),
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar"),
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar"),
+ client().prepareIndex("test", "type1", "4").setSource("body", "foo baz bar"));
+
+
+ logger.info("--> query 1");
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ logger.info("--> query 2");
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ logger.info("--> query 3");
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body").field("body2").minimumShouldMatch("70%")).get();
+ assertHitCount(searchResponse, 2l);
+ assertSearchHits(searchResponse, "3", "4");
+
+ indexRandom(true, false,
+ client().prepareIndex("test", "type1", "5").setSource("body2", "foo", "other", "foo"),
+ client().prepareIndex("test", "type1", "6").setSource("body2", "bar", "other", "foo"),
+ client().prepareIndex("test", "type1", "7").setSource("body2", "foo bar", "other", "foo"),
+ client().prepareIndex("test", "type1", "8").setSource("body2", "foo baz bar", "other", "foo"));
+
+ logger.info("--> query 4");
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 4l);
+ assertSearchHits(searchResponse, "3", "4", "7", "8");
+
+ logger.info("--> query 5");
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get();
+ assertHitCount(searchResponse, 5l);
+ assertSearchHits(searchResponse, "3", "4", "6", "7", "8");
+
+ logger.info("--> query 6");
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body2").field("other").minimumShouldMatch("70%")).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "6", "7", "8");
+ }
+
+ @Test
+ public void testSimpleQueryStringLowercasing() {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("body", "Professional").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("Professio*")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("Professio*").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("Professionan~1")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("Professionan~1").lowercaseExpandedTerms(false)).get();
+ assertHitCount(searchResponse, 0l);
+ }
+
+ @Test
+ public void testQueryStringLocale() {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("body", "bılly").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("BILL*")).get();
+ assertHitCount(searchResponse, 0l);
+ searchResponse = client().prepareSearch().setQuery(queryStringQuery("body:BILL*")).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("BILL*").locale(new Locale("tr", "TR"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ searchResponse = client().prepareSearch().setQuery(
+ queryStringQuery("body:BILL*").locale(new Locale("tr", "TR"))).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ }
+
+ @Test
+ public void testNestedFieldSimpleQueryString() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("body").field("type", "string")
+ .startObject("fields")
+ .startObject("sub").field("type", "string")
+ .endObject() // sub
+ .endObject() // fields
+ .endObject() // body
+ .endObject() // properties
+ .endObject() // type1
+ .endObject()));
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo bar baz").get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("foo bar baz").field("body")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setTypes("type1").setQuery(
+ simpleQueryStringQuery("foo bar baz").field("body")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("foo bar baz").field("body.sub")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setTypes("type1").setQuery(
+ simpleQueryStringQuery("foo bar baz").field("body.sub")).get();
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ }
+
+ @Test
+ public void testSimpleQueryStringFlags() throws ExecutionException, InterruptedException {
+ createIndex("test");
+ indexRandom(true,
+ client().prepareIndex("test", "type1", "1").setSource("body", "foo"),
+ client().prepareIndex("test", "type1", "2").setSource("body", "bar"),
+ client().prepareIndex("test", "type1", "3").setSource("body", "foo bar"),
+ client().prepareIndex("test", "type1", "4").setSource("body", "quux baz eggplant"),
+ client().prepareIndex("test", "type1", "5").setSource("body", "quux baz spaghetti"),
+ client().prepareIndex("test", "type1", "6").setSource("otherbody", "spaghetti"));
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("foo bar").flags(SimpleQueryStringFlag.ALL)).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ // Sending a negative 'flags' value is the same as SimpleQueryStringFlag.ALL
+ searchResponse = client().prepareSearch().setQuery("{\"simple_query_string\": {\"query\": \"foo bar\", \"flags\": -1}}").get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("foo | bar")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.OR)).get();
+ assertHitCount(searchResponse, 3l);
+ assertSearchHits(searchResponse, "1", "2", "3");
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("foo | bar")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.NONE)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("baz | egg*")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.NONE)).get();
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch().setSource("{\n" +
+ " \"query\": {\n" +
+ " \"simple_query_string\": {\n" +
+ " \"query\": \"foo|bar\",\n" +
+ " \"default_operator\": \"AND\"," +
+ " \"flags\": \"NONE\"\n" +
+ " }\n" +
+ " }\n" +
+ "}").get();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(
+ simpleQueryStringQuery("baz | egg*")
+ .defaultOperator(SimpleQueryStringBuilder.Operator.AND)
+ .flags(SimpleQueryStringFlag.WHITESPACE, SimpleQueryStringFlag.PREFIX)).get();
+ assertHitCount(searchResponse, 1l);
+ assertFirstHit(searchResponse, hasId("4"));
+ }
+
+ @Test
+ public void testSimpleQueryStringLenient() throws ExecutionException, InterruptedException {
+ createIndex("test1", "test2");
+ indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "foo"),
+ client().prepareIndex("test2", "type1", "10").setSource("field", 5));
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo").field("field")).get();
+ assertFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+
+ searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo").field("field").lenient(true)).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ }
+
+ @Test // see: https://github.com/elasticsearch/elasticsearch/issues/7967
+ public void testLenientFlagBeingTooLenient() throws Exception {
+ indexRandom(true,
+ client().prepareIndex("test", "doc", "1").setSource("num", 1, "body", "foo bar baz"),
+ client().prepareIndex("test", "doc", "2").setSource("num", 2, "body", "eggplant spaghetti lasagna"));
+
+ BoolQueryBuilder q = boolQuery().should(simpleQueryStringQuery("bar").field("num").field("body").lenient(true));
+ SearchResponse resp = client().prepareSearch("test").setQuery(q).get();
+ assertNoFailures(resp);
+ // the bug is that this would be parsed into basically a match_all
+ // query and this would match both documents
+ assertHitCount(resp, 1);
+ assertSearchHits(resp, "1");
+ }
+
+ @Test
+ public void testSimpleQueryStringAnalyzeWildcard() throws ExecutionException, InterruptedException, IOException {
+ String mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("location")
+ .field("type", "string")
+ .field("analyzer", "german")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().string();
+
+ CreateIndexRequestBuilder mappingRequest = client().admin().indices().prepareCreate("test1").addMapping("type1", mapping);
+ mappingRequest.execute().actionGet();
+ indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("location", "Köln"));
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("Köln*").analyzeWildcard(true).field("location")).get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+ assertSearchHits(searchResponse, "1");
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java
new file mode 100644
index 0000000000..8bcf5f4477
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/rescore/QueryRescorerTests.java
@@ -0,0 +1,772 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.rescore;
+
+
+
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.util.English;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.lucene.search.function.CombineFunction;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.MatchQueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Comparator;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFourthHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThirdHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ *
+ */
+public class QueryRescorerTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testEnforceWindowSize() {
+ createIndex("test");
+ // this
+ int iters = scaledRandomIntBetween(10, 20);
+ for (int i = 0; i < iters; i ++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("f", Integer.toString(i)).execute().actionGet();
+ }
+ ensureYellow();
+ refresh();
+
+ int numShards = getNumShards("test").numPrimaries;
+ for (int j = 0 ; j < iters; j++) {
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setRescorer(RescoreBuilder.queryRescorer(
+ QueryBuilders.functionScoreQuery(QueryBuilders.matchAllQuery())
+ .boostMode("replace").add(ScoreFunctionBuilders.factorFunction(100))).setQueryWeight(0.0f).setRescoreQueryWeight(1.0f))
+ .setRescoreWindow(1).setSize(randomIntBetween(2,10)).execute().actionGet();
+ assertSearchResponse(searchResponse);
+ assertFirstHit(searchResponse, hasScore(100.f));
+ int numDocsWith100AsAScore = 0;
+ for (int i = 0; i < searchResponse.getHits().hits().length; i++) {
+ float score = searchResponse.getHits().hits()[i].getScore();
+ if (score == 100f) {
+ numDocsWith100AsAScore += 1;
+ }
+ }
+ // we cannot assert that they are equal since some shards might not have docs at all
+ assertThat(numDocsWith100AsAScore, lessThanOrEqualTo(numShards));
+ }
+ }
+
+ @Test
+ public void testRescorePhrase() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(Settings.settingsBuilder().put(indexSettings()).put("index.number_of_shards", 1)));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree ").get();
+ client().prepareIndex("test", "type1", "3")
+ .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").get();
+ ensureYellow();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "quick brown").slop(2).boost(4.0f)).setRescoreQueryWeight(2))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getHits()[0].getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getHits()[1].getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getHits()[2].getId(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "the quick brown").slop(3)))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(RescoreBuilder.queryRescorer((QueryBuilders.matchPhraseQuery("field1", "the quick brown"))))
+ .setRescoreWindow(5).execute().actionGet();
+
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+ }
+
+ @Test
+ public void testMoreDocs() throws Exception {
+ Builder builder = Settings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("analyzer", "whitespace").field("search_analyzer", "synonym")
+ .endObject().endObject().endObject().endObject();
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1)));
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "massachusetts avenue boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "lexington avenue boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "3").setSource("field1", "boston avenue lexington massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "4").setSource("field1", "boston road lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "5").setSource("field1", "lexington street lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "6").setSource("field1", "massachusetts avenue lexington massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "7").setSource("field1", "bosten street san franciso california").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "8").setSource("field1", "hollywood boulevard los angeles california").execute().actionGet();
+ client().prepareIndex("test", "type1", "9").setSource("field1", "1st street boston massachussetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "10").setSource("field1", "1st street boston massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "11").setSource("field1", "2st street boston massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "12").setSource("field1", "3st street boston massachusetts").execute().actionGet();
+ ensureYellow();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("2"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ // Make sure non-zero from works:
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "lexington avenue massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(2)
+ .setSize(5)
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(20).execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(5));
+ assertHitCount(searchResponse, 9);
+ assertFirstHit(searchResponse, hasId("3"));
+ }
+
+ // Tests a rescore window smaller than number of hits:
+ @Test
+ public void testSmallRescoreWindow() throws Exception {
+ Builder builder = Settings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("analyzer", "whitespace").field("search_analyzer", "synonym")
+ .endObject().endObject().endObject().endObject();
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1)));
+
+ client().prepareIndex("test", "type1", "3").setSource("field1", "massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "6").setSource("field1", "massachusetts avenue lexington massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "lexington massachusetts avenue").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "lexington avenue boston massachusetts road").execute().actionGet();
+ ensureYellow();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "massachusetts"))
+ .setFrom(0)
+ .setSize(5).execute().actionGet();
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("3"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("1"));
+ assertFourthHit(searchResponse, hasId("2"));
+
+ // Now, rescore only top 2 hits w/ proximity:
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "massachusetts"))
+ .setFrom(0)
+ .setSize(5)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(2).execute().actionGet();
+ // Only top 2 hits were re-ordered:
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("6"));
+ assertSecondHit(searchResponse, hasId("3"));
+ assertThirdHit(searchResponse, hasId("1"));
+ assertFourthHit(searchResponse, hasId("2"));
+
+ // Now, rescore only top 3 hits w/ proximity:
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "massachusetts"))
+ .setFrom(0)
+ .setSize(5)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(0.6f).setRescoreQueryWeight(2.0f)).setRescoreWindow(3).execute().actionGet();
+
+ // Only top 3 hits were re-ordered:
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("6"));
+ assertSecondHit(searchResponse, hasId("1"));
+ assertThirdHit(searchResponse, hasId("3"));
+ assertFourthHit(searchResponse, hasId("2"));
+ }
+
+ // Tests a rescorer that penalizes the scores:
+ @Test
+ public void testRescorerMadeScoresWorse() throws Exception {
+ Builder builder = Settings.builder();
+ builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
+ builder.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
+ builder.put("index.analysis.filter.synonym.type", "synonym");
+ builder.putArray("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("field1").field("type", "string").field("analyzer", "whitespace").field("search_analyzer", "synonym")
+ .endObject().endObject().endObject().endObject();
+
+ assertAcked(client().admin().indices().prepareCreate("test").addMapping("type1", mapping).setSettings(builder.put("index.number_of_shards", 1)));
+
+ client().prepareIndex("test", "type1", "3").setSource("field1", "massachusetts").execute().actionGet();
+ client().prepareIndex("test", "type1", "6").setSource("field1", "massachusetts avenue lexington massachusetts").execute().actionGet();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "lexington massachusetts avenue").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "lexington avenue boston massachusetts road").execute().actionGet();
+ ensureYellow();
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5).execute().actionGet();
+ assertThat(searchResponse.getHits().hits().length, equalTo(4));
+ assertHitCount(searchResponse, 4);
+ assertFirstHit(searchResponse, hasId("3"));
+ assertSecondHit(searchResponse, hasId("6"));
+ assertThirdHit(searchResponse, hasId("1"));
+ assertFourthHit(searchResponse, hasId("2"));
+
+ // Now, penalizing rescore (nothing matches the rescore query):
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(QueryBuilders.matchQuery("field1", "massachusetts").operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(5)
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "lexington avenue massachusetts").slop(3))
+ .setQueryWeight(1.0f).setRescoreQueryWeight(-1f)).setRescoreWindow(3).execute().actionGet();
+
+ // 6 and 1 got worse, and then the hit (2) outside the rescore window were sorted ahead:
+ assertFirstHit(searchResponse, hasId("3"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("6"));
+ assertFourthHit(searchResponse, hasId("1"));
+ }
+
+ // Comparator that sorts hits and rescored hits in the same way.
+ // The rescore uses the docId as tie, while regular search uses the slot the hit is in as a tie if score
+ // and shard id are equal during merging shard results.
+ // This comparator uses a custom tie in case the scores are equal, so that both regular hits and rescored hits
+ // are sorted equally. This is fine since tests only care about the fact the scores should be equal, not ordering.
+ private final static Comparator<SearchHit> searchHitsComparator = new Comparator<SearchHit>() {
+ @Override
+ public int compare(SearchHit hit1, SearchHit hit2) {
+ int cmp = Float.compare(hit2.getScore(), hit1.getScore());
+ if (cmp == 0) {
+ return hit1.id().compareTo(hit2.id());
+ } else {
+ return cmp;
+ }
+ }
+ };
+
+ private static void assertEquivalent(String query, SearchResponse plain, SearchResponse rescored) {
+ assertNoFailures(plain);
+ assertNoFailures(rescored);
+ SearchHits leftHits = plain.getHits();
+ SearchHits rightHits = rescored.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] rHits = rightHits.getHits();
+ Arrays.sort(hits, searchHitsComparator);
+ Arrays.sort(rHits, searchHitsComparator);
+ for (int i = 0; i < hits.length; i++) {
+ assertThat("query: " + query, hits[i].getScore(), equalTo(rHits[i].getScore()));
+ }
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length-1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat("query: " + query,hits[i].getId(), equalTo(rHits[i].getId()));
+ }
+ }
+
+ private static void assertEquivalentOrSubstringMatch(String query, SearchResponse plain, SearchResponse rescored) {
+ assertNoFailures(plain);
+ assertNoFailures(rescored);
+ SearchHits leftHits = plain.getHits();
+ SearchHits rightHits = rescored.getHits();
+ assertThat(leftHits.getTotalHits(), equalTo(rightHits.getTotalHits()));
+ assertThat(leftHits.getHits().length, equalTo(rightHits.getHits().length));
+ SearchHit[] hits = leftHits.getHits();
+ SearchHit[] otherHits = rightHits.getHits();
+ if (!hits[0].getId().equals(otherHits[0].getId())) {
+ assertThat(((String) otherHits[0].sourceAsMap().get("field1")).contains(query), equalTo(true));
+ } else {
+ Arrays.sort(hits, searchHitsComparator);
+ Arrays.sort(otherHits, searchHitsComparator);
+ for (int i = 0; i < hits.length; i++) {
+ if (hits[i].getScore() == hits[hits.length-1].getScore()) {
+ return; // we need to cut off here since this is the tail of the queue and we might not have fetched enough docs
+ }
+ assertThat(query, hits[i].getId(), equalTo(rightHits.getHits()[i].getId()));
+ }
+ }
+ }
+
+ @Test
+ // forces QUERY_THEN_FETCH because of https://github.com/elasticsearch/elasticsearch/issues/4829
+ public void testEquivalence() throws Exception {
+ // no dummy docs since merges can change scores while we run queries.
+ int numDocs = indexRandomNumbers("whitespace", -1, false);
+
+ final int iters = scaledRandomIntBetween(50, 100);
+ for (int i = 0; i < iters; i++) {
+ int resultSize = numDocs;
+ int rescoreWindow = between(1, 3) * resultSize;
+ String intToEnglish = English.intToEnglish(between(0, numDocs-1));
+ String query = intToEnglish.split(" ")[0];
+ SearchResponse rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders
+ .constantScoreQuery(QueryBuilders.matchPhraseQuery("field1", intToEnglish).slop(3)))
+ .setQueryWeight(1.0f)
+ .setRescoreQueryWeight(0.0f)) // no weight - so we basically use the same score as the actual query
+ .setRescoreWindow(rescoreWindow).execute().actionGet();
+
+ SearchResponse plain = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR)).setFrom(0).setSize(resultSize)
+ .execute().actionGet();
+
+ // check equivalence
+ assertEquivalent(query, plain, rescored);
+
+ rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders
+ .constantScoreQuery(QueryBuilders.matchPhraseQuery("field1", "not in the index").slop(3)))
+ .setQueryWeight(1.0f)
+ .setRescoreQueryWeight(1.0f))
+ .setRescoreWindow(rescoreWindow).execute().actionGet();
+ // check equivalence
+ assertEquivalent(query, plain, rescored);
+
+ rescored = client()
+ .prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.matchQuery("field1", query).operator(MatchQueryBuilder.Operator.OR))
+ .setFrom(0)
+ .setSize(resultSize)
+ .setRescorer(
+ RescoreBuilder
+ .queryRescorer(
+ QueryBuilders.matchPhraseQuery("field1", intToEnglish).slop(0))
+ .setQueryWeight(1.0f).setRescoreQueryWeight(1.0f)).setRescoreWindow(2 * rescoreWindow).execute().actionGet();
+ // check equivalence or if the first match differs we check if the phrase is a substring of the top doc
+ assertEquivalentOrSubstringMatch(intToEnglish, plain, rescored);
+ }
+ }
+
+ @Test
+ public void testExplain() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", "whitespace").field("type", "string").endObject().endObject().endObject().endObject())
+ );
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field1", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ client().prepareIndex("test", "type1", "3")
+ .setSource("field1", "quick huge brown", "field2", "the quick lazy huge brown fox jumps over the tree").execute()
+ .actionGet();
+ ensureYellow();
+ refresh();
+
+ {
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(
+ RescoreBuilder.queryRescorer(QueryBuilders.matchPhraseQuery("field1", "the quick brown").slop(2).boost(4.0f))
+ .setQueryWeight(0.5f).setRescoreQueryWeight(0.4f)).setRescoreWindow(5).setExplain(true).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int i = 0; i < 3; i++) {
+ assertThat(searchResponse.getHits().getAt(i).explanation(), notNullValue());
+ assertThat(searchResponse.getHits().getAt(i).explanation().isMatch(), equalTo(true));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails().length, equalTo(2));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[0].isMatch(), equalTo(true));
+ if (i == 2) {
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[1].getValue(), equalTo(0.5f));
+ } else {
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDescription(), equalTo("sum of:"));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[0].getDetails()[1].getValue(), equalTo(0.5f));
+ assertThat(searchResponse.getHits().getAt(i).explanation().getDetails()[1].getDetails()[1].getValue(), equalTo(0.4f));
+ }
+ }
+ }
+
+ String[] scoreModes = new String[]{ "max", "min", "avg", "total", "multiply", "" };
+ String[] descriptionModes = new String[]{ "max of:", "min of:", "avg of:", "sum of:", "product of:", "sum of:" };
+ for (int innerMode = 0; innerMode < scoreModes.length; innerMode++) {
+ QueryRescorer innerRescoreQuery = RescoreBuilder.queryRescorer(QueryBuilders.matchQuery("field1", "the quick brown").boost(4.0f))
+ .setQueryWeight(0.5f).setRescoreQueryWeight(0.4f);
+
+ if (!"".equals(scoreModes[innerMode])) {
+ innerRescoreQuery.setScoreMode(scoreModes[innerMode]);
+ }
+
+ SearchResponse searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .setRescorer(innerRescoreQuery).setRescoreWindow(5).setExplain(true).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int j = 0; j < 3; j++) {
+ assertThat(searchResponse.getHits().getAt(j).explanation().getDescription(), equalTo(descriptionModes[innerMode]));
+ }
+
+ for (int outerMode = 0; outerMode < scoreModes.length; outerMode++) {
+ QueryRescorer outerRescoreQuery = RescoreBuilder.queryRescorer(QueryBuilders.matchQuery("field1", "the quick brown")
+ .boost(4.0f)).setQueryWeight(0.5f).setRescoreQueryWeight(0.4f);
+
+ if (!"".equals(scoreModes[outerMode])) {
+ outerRescoreQuery.setScoreMode(scoreModes[outerMode]);
+ }
+
+ searchResponse = client()
+ .prepareSearch()
+ .setSearchType(SearchType.DFS_QUERY_THEN_FETCH)
+ .setQuery(QueryBuilders.matchQuery("field1", "the quick brown").operator(MatchQueryBuilder.Operator.OR))
+ .addRescorer(innerRescoreQuery).setRescoreWindow(5)
+ .addRescorer(outerRescoreQuery).setRescoreWindow(10)
+ .setExplain(true).get();
+ assertHitCount(searchResponse, 3);
+ assertFirstHit(searchResponse, hasId("1"));
+ assertSecondHit(searchResponse, hasId("2"));
+ assertThirdHit(searchResponse, hasId("3"));
+
+ for (int j = 0; j < 3; j++) {
+ Explanation explanation = searchResponse.getHits().getAt(j).explanation();
+ assertThat(explanation.getDescription(), equalTo(descriptionModes[outerMode]));
+ assertThat(explanation.getDetails()[0].getDetails()[0].getDescription(), equalTo(descriptionModes[innerMode]));
+ }
+ }
+ }
+ }
+
+ @Test @Slow
+ public void testScoring() throws Exception {
+ int numDocs = indexRandomNumbers("keyword");
+
+ String[] scoreModes = new String[]{ "max", "min", "avg", "total", "multiply", "" };
+ float primaryWeight = 1.1f;
+ float secondaryWeight = 1.6f;
+
+ for (String scoreMode : scoreModes) {
+ for (int i = 0; i < numDocs - 4; i++) {
+ String[] intToEnglish = new String[] { English.intToEnglish(i), English.intToEnglish(i + 1), English.intToEnglish(i + 2), English.intToEnglish(i + 3) };
+
+ QueryRescorer rescoreQuery = RescoreBuilder
+ .queryRescorer(
+ QueryBuilders.boolQuery()
+ .disableCoord(true)
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[0]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("5.0f"))))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[1]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("7.0f"))))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[3]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("0.0f")))))
+ .setQueryWeight(primaryWeight)
+ .setRescoreQueryWeight(secondaryWeight);
+
+ if (!"".equals(scoreMode)) {
+ rescoreQuery.setScoreMode(scoreMode);
+ }
+
+ SearchResponse rescored = client()
+ .prepareSearch()
+ .setPreference("test") // ensure we hit the same shards for tie-breaking
+ .setQuery(QueryBuilders.boolQuery()
+ .disableCoord(true)
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[0]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("2.0f"))))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[1]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("3.0f"))))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[2]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("5.0f"))))
+ .should(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", intToEnglish[3]))
+ .boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("0.2f")))))
+ .setFrom(0)
+ .setSize(10)
+ .setRescorer(rescoreQuery)
+ .setRescoreWindow(50).execute().actionGet();
+
+ assertHitCount(rescored, 4);
+
+ if ("total".equals(scoreMode) || "".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight + 7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight + 5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight + 0.0f * secondaryWeight));
+ } else if ("max".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight));
+ } else if ("min".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 2)));
+ assertSecondHit(rescored, hasId(String.valueOf(i + 1)));
+ assertThirdHit(rescored, hasId(String.valueOf(i)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(3.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(2.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.0f * secondaryWeight));
+ } else if ("avg".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThirdHit(rescored, hasId(String.valueOf(i)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo((3.0f * primaryWeight + 7.0f * secondaryWeight) / 2.0f));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo((2.0f * primaryWeight + 5.0f * secondaryWeight) / 2.0f));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo((0.2f * primaryWeight) / 2.0f));
+ } else if ("multiply".equals(scoreMode)) {
+ assertFirstHit(rescored, hasId(String.valueOf(i + 1)));
+ assertSecondHit(rescored, hasId(String.valueOf(i)));
+ assertThirdHit(rescored, hasId(String.valueOf(i + 2)));
+ assertThat(rescored.getHits().getHits()[0].getScore(), equalTo(3.0f * primaryWeight * 7.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[1].getScore(), equalTo(2.0f * primaryWeight * 5.0f * secondaryWeight));
+ assertThat(rescored.getHits().getHits()[2].getScore(), equalTo(5.0f * primaryWeight));
+ assertThat(rescored.getHits().getHits()[3].getScore(), equalTo(0.2f * primaryWeight * 0.0f * secondaryWeight));
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testMultipleRescores() throws Exception {
+ int numDocs = indexRandomNumbers("keyword", 1, true);
+ QueryRescorer eightIsGreat = RescoreBuilder.queryRescorer(
+ QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", English.intToEnglish(8))).boostMode(CombineFunction.REPLACE)
+.add(ScoreFunctionBuilders.scriptFunction(new Script("1000.0f")))).setScoreMode(
+ "total");
+ QueryRescorer sevenIsBetter = RescoreBuilder.queryRescorer(
+ QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", English.intToEnglish(7))).boostMode(CombineFunction.REPLACE)
+.add(ScoreFunctionBuilders.scriptFunction(new Script("10000.0f"))))
+ .setScoreMode("total");
+
+ // First set the rescore window large enough that both rescores take effect
+ SearchRequestBuilder request = client().prepareSearch().setRescoreWindow(numDocs);
+ request.addRescorer(eightIsGreat).addRescorer(sevenIsBetter);
+ SearchResponse response = request.get();
+ assertFirstHit(response, hasId("7"));
+ assertSecondHit(response, hasId("8"));
+
+ // Now squash the second rescore window so it never gets to see a seven
+ response = request.setSize(1).clearRescorers().addRescorer(eightIsGreat).addRescorer(sevenIsBetter, 1).get();
+ assertFirstHit(response, hasId("8"));
+ // We have no idea what the second hit will be because we didn't get a chance to look for seven
+
+ // Now use one rescore to drag the number we're looking for into the window of another
+ QueryRescorer ninetyIsGood = RescoreBuilder.queryRescorer(
+ QueryBuilders.functionScoreQuery(QueryBuilders.queryStringQuery("*ninety*")).boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("1000.0f")))).setScoreMode("total");
+ QueryRescorer oneToo = RescoreBuilder.queryRescorer(
+ QueryBuilders.functionScoreQuery(QueryBuilders.queryStringQuery("*one*")).boostMode(CombineFunction.REPLACE)
+ .add(ScoreFunctionBuilders.scriptFunction(new Script("1000.0f")))).setScoreMode("total");
+ request.clearRescorers().addRescorer(ninetyIsGood).addRescorer(oneToo, 10);
+ response = request.setSize(2).get();
+ assertFirstHit(response, hasId("91"));
+ assertFirstHit(response, hasScore(2001.0f));
+ assertSecondHit(response, hasScore(1001.0f)); // Not sure which one it is but it is ninety something
+ }
+
+ private int indexRandomNumbers(String analyzer) throws Exception {
+ return indexRandomNumbers(analyzer, -1, true);
+ }
+
+ private int indexRandomNumbers(String analyzer, int shards, boolean dummyDocs) throws Exception {
+ Builder builder = Settings.settingsBuilder().put(indexSettings());
+
+ if (shards > 0) {
+ builder.put(SETTING_NUMBER_OF_SHARDS, shards);
+ }
+
+ assertAcked(prepareCreate("test")
+ .addMapping(
+ "type1",
+ jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("field1")
+ .field("analyzer", analyzer).field("type", "string").endObject().endObject().endObject().endObject())
+ .setSettings(builder));
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, dummyDocs, docs);
+ ensureGreen();
+ return numDocs;
+ }
+
+ // #11277
+ public void testFromSize() throws Exception {
+ Builder settings = Settings.builder();
+ settings.put(SETTING_NUMBER_OF_SHARDS, 1);
+ settings.put(SETTING_NUMBER_OF_REPLICAS, 0);
+ assertAcked(prepareCreate("test").setSettings(settings));
+ for(int i=0;i<5;i++) {
+ client().prepareIndex("test", "type", ""+i).setSource("text", "hello world").get();
+ }
+ refresh();
+
+ SearchRequestBuilder request = client().prepareSearch();
+ request.setQuery(QueryBuilders.termQuery("text", "hello"));
+ request.setFrom(1);
+ request.setSize(4);
+ request.addRescorer(RescoreBuilder.queryRescorer(QueryBuilders.matchAllQuery()));
+ request.setRescoreWindow(50);
+
+ assertEquals(4, request.get().getHits().hits().length);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/scan/ScanContextTests.java b/core/src/test/java/org/elasticsearch/search/scan/ScanContextTests.java
new file mode 100644
index 0000000000..4a43b867ba
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scan/ScanContextTests.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.elasticsearch.search.scan.ScanContext.MinDocQuery;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+public class ScanContextTests extends ElasticsearchTestCase {
+
+ public void testMinDocQueryBasics() {
+ MinDocQuery query1 = new MinDocQuery(42);
+ MinDocQuery query2 = new MinDocQuery(42);
+ MinDocQuery query3 = new MinDocQuery(43);
+ QueryUtils.check(query1);
+ QueryUtils.checkEqual(query1, query2);
+ QueryUtils.checkUnequal(query1, query3);
+ }
+
+ public void testMinDocQueryRandom() throws IOException {
+ final int numDocs = randomIntBetween(10, 200);
+ final Document doc = new Document();
+ final Directory dir = newDirectory();
+ final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir);
+ for (int i = 0; i < numDocs; ++i) {
+ w.addDocument(doc);
+ }
+ final IndexReader reader = w.getReader();
+ final IndexSearcher searcher = newSearcher(reader);
+ for (int i = 0; i <= numDocs; ++i) {
+ assertEquals(numDocs - i, searcher.count(new MinDocQuery(i)));
+ }
+ w.close();
+ reader.close();
+ dir.close();
+ }
+
+ public void testRandom() throws Exception {
+ final int numDocs = randomIntBetween(10, 200);
+ final Document doc1 = new Document();
+ doc1.add(new StringField("foo", "bar", Store.NO));
+ final Document doc2 = new Document();
+ final Directory dir = newDirectory();
+ final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir);
+ for (int i = 0; i < numDocs; ++i) {
+ w.addDocument(randomBoolean() ? doc1 : doc2);
+ }
+ final IndexReader reader = w.getReader();
+ final IndexSearcher searcher = newSearcher(reader);
+
+ final boolean trackScores = randomBoolean();
+ final int pageSize = randomIntBetween(1, numDocs / 2);
+ Query query = new TermQuery(new Term("foo", "bar"));
+ if (trackScores == false) {
+ query.setBoost(0f);
+ }
+ final ScoreDoc[] expected = searcher.search(query, numDocs, Sort.INDEXORDER, true, true).scoreDocs;
+
+ final List<ScoreDoc> actual = new ArrayList<>();
+ ScanContext context = new ScanContext();
+ while (true) {
+ final ScoreDoc[] page = context.execute(searcher, query, pageSize, trackScores).scoreDocs;
+ assertTrue(page.length <= pageSize);
+ if (page.length == 0) {
+ assertEquals(0, context.execute(searcher, query, pageSize, trackScores).scoreDocs.length);
+ break;
+ }
+ actual.addAll(Arrays.asList(page));
+ }
+ assertEquals(expected.length, actual.size());
+ for (int i = 0; i < expected.length; ++i) {
+ ScoreDoc sd1 = expected[i];
+ ScoreDoc sd2 = actual.get(i);
+ assertEquals(sd1.doc, sd2.doc);
+ assertEquals(sd1.score, sd2.score, 0.001f);
+ }
+ w.close();
+ reader.close();
+ dir.close();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java b/core/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java
new file mode 100644
index 0000000000..5a11bd3a04
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scan/SearchScanScrollingTests.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class SearchScanScrollingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testRandomized() throws Exception {
+ testScroll(scaledRandomIntBetween(100, 200), between(1, 300), getRandom().nextBoolean(), getRandom().nextBoolean());
+ }
+
+ private void testScroll(long numberOfDocs, int size, boolean unbalanced, boolean trackScores) throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ Set<String> ids = Sets.newHashSet();
+ Set<String> expectedIds = Sets.newHashSet();
+ for (int i = 0; i < numberOfDocs; i++) {
+ String id = Integer.toString(i);
+ expectedIds.add(id);
+ String routing = null;
+ if (unbalanced) {
+ if (i < (numberOfDocs * 0.6)) {
+ routing = "0";
+ } else if (i < (numberOfDocs * 0.9)) {
+ routing = "1";
+ } else {
+ routing = "2";
+ }
+ }
+ client().prepareIndex("test", "type1", id).setRouting(routing).setSource("field", i).execute().actionGet();
+ // make some segments
+ if (i % 10 == 0) {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setTrackScores(trackScores)
+ .execute().actionGet();
+ try {
+ assertHitCount(searchResponse, numberOfDocs);
+
+ // start scrolling, until we get not results
+ while (true) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ assertHitCount(searchResponse, numberOfDocs);
+
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id() + "should not exist in the result set", ids.contains(hit.id()), equalTo(false));
+ ids.add(hit.id());
+ if (trackScores) {
+ assertThat(hit.getScore(), greaterThan(0.0f));
+ } else {
+ assertThat(hit.getScore(), equalTo(0.0f));
+ }
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+
+ assertThat(expectedIds, equalTo(ids));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java b/core/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java
new file mode 100644
index 0000000000..31fd0ae1e9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scan/SearchScanTests.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scan;
+
+import com.google.common.collect.Sets;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Set;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+public class SearchScanTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @Slow
+ public void testNarrowingQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ Set<String> ids = Sets.newHashSet();
+ Set<String> expectedIds = Sets.newHashSet();
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[scaledRandomIntBetween(50, 100)];
+ for (int i = 0; i < builders.length/2; i++) {
+ expectedIds.add(Integer.toString(i));
+ builders[i] = client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy1").field("postDate", System.currentTimeMillis()).field("message", "test").endObject());
+ }
+
+ for (int i = builders.length/2; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy2").field("postDate", System.currentTimeMillis()).field("message", "test").endObject());
+ }
+ indexRandom(true, builders);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(termQuery("user", "kimchy1"))
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)builders.length/2));
+ assertThat(searchResponse.getHits().getHits().length, equalTo(0));
+
+ // start scrolling, until we get not results
+ while (true) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ assertThat(searchResponse.getHits().totalHits(), equalTo((long)builders.length/2));
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.id() + "should not exists in the result set", ids.contains(hit.id()), equalTo(false));
+ ids.add(hit.id());
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+
+ assertThat(expectedIds, equalTo(ids));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java b/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java
new file mode 100644
index 0000000000..a37bc269ae
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scriptfilter;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.cache.filter.FilterCacheModule;
+import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
+import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService.ScriptType;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ *
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope=ElasticsearchIntegrationTest.Scope.SUITE)
+public class ScriptQuerySearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ // aggressive filter caching so that we can assert on the number of iterations of the script filters
+ .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class)
+ .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
+ .build();
+ }
+
+ @Test
+ public void testCustomScriptBoost() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject())
+ .execute().actionGet();
+ flush();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).endObject())
+ .execute().actionGet();
+ flush();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject())
+ .execute().actionGet();
+ refresh();
+
+ logger.info("running doc['num1'].value > 1");
+ SearchResponse response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptQuery(new Script("doc['num1'].value > 1")))).addSort("num1", SortOrder.ASC)
+ .addScriptField("sNum1", new Script("doc['num1'].value")).execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ Map<String, Object> params = new HashMap<>();
+ params.put("param1", 2);
+
+ logger.info("running doc['num1'].value > param1");
+ response = client()
+ .prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ scriptQuery(new Script("doc['num1'].value > param1", ScriptType.INLINE, null, params))))
+ .addSort("num1", SortOrder.ASC).addScriptField("sNum1", new Script("doc['num1'].value")).execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ params = new HashMap<>();
+ params.put("param1", -1);
+ logger.info("running doc['num1'].value > param1");
+ response = client()
+ .prepareSearch()
+ .setQuery(
+ filteredQuery(matchAllQuery(),
+ scriptQuery(new Script("doc['num1'].value > param1", ScriptType.INLINE, null, params))))
+ .addSort("num1", SortOrder.ASC).addScriptField("sNum1", new Script("doc['num1'].value")).execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testCustomScriptBoostOldScriptAPI() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 1.0f).endObject()).execute().actionGet();
+ flush();
+ client().prepareIndex("test", "type1", "2")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 2.0f).endObject()).execute().actionGet();
+ flush();
+ client().prepareIndex("test", "type1", "3")
+ .setSource(jsonBuilder().startObject().field("test", "value beck").field("num1", 3.0f).endObject()).execute().actionGet();
+ refresh();
+
+ logger.info("running doc['num1'].value > 1");
+ SearchResponse response = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), scriptQuery("doc['num1'].value > 1")))
+ .addSort("num1", SortOrder.ASC).addScriptField("sNum1", "doc['num1'].value").execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(2l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ logger.info("running doc['num1'].value > param1");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptQuery("doc['num1'].value > param1").addParam("param1", 2)))
+ .addSort("num1", SortOrder.ASC).addScriptField("sNum1", "doc['num1'].value").execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(1l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(3.0));
+
+ logger.info("running doc['num1'].value > param1");
+ response = client().prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), scriptQuery("doc['num1'].value > param1").addParam("param1", -1)))
+ .addSort("num1", SortOrder.ASC).addScriptField("sNum1", "doc['num1'].value").execute().actionGet();
+
+ assertThat(response.getHits().totalHits(), equalTo(3l));
+ assertThat(response.getHits().getAt(0).id(), equalTo("1"));
+ assertThat((Double) response.getHits().getAt(0).fields().get("sNum1").values().get(0), equalTo(1.0));
+ assertThat(response.getHits().getAt(1).id(), equalTo("2"));
+ assertThat((Double) response.getHits().getAt(1).fields().get("sNum1").values().get(0), equalTo(2.0));
+ assertThat(response.getHits().getAt(2).id(), equalTo("3"));
+ assertThat((Double) response.getHits().getAt(2).fields().get("sNum1").values().get(0), equalTo(3.0));
+ }
+
+ private static AtomicInteger scriptCounter = new AtomicInteger(0);
+
+ public static int incrementScriptCounter() {
+ return scriptCounter.incrementAndGet();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollTests.java b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollTests.java
new file mode 100644
index 0000000000..0dc085dd0a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scroll/DuelScrollTests.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scroll;
+
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHits;
+import org.elasticsearch.search.sort.SortBuilder;
+import org.elasticsearch.search.sort.SortBuilders;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Arrays;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+public class DuelScrollTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testDuel_queryThenFetch() throws Exception {
+ TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH);
+
+ SearchResponse control = client().prepareSearch("index")
+ .setSearchType(context.searchType)
+ .addSort(context.sort)
+ .setSize(context.numDocs).get();
+ assertNoFailures(control);
+ SearchHits sh = control.getHits();
+ assertThat(sh.totalHits(), equalTo((long) context.numDocs));
+ assertThat(sh.getHits().length, equalTo(context.numDocs));
+
+ SearchResponse searchScrollResponse = client().prepareSearch("index")
+ .setSearchType(context.searchType)
+ .addSort(context.sort)
+ .setSize(context.scrollRequestSize)
+ .setScroll("10m").get();
+
+ assertNoFailures(searchScrollResponse);
+ assertThat(searchScrollResponse.getHits().getTotalHits(), equalTo((long) context.numDocs));
+ assertThat(searchScrollResponse.getHits().hits().length, equalTo(context.scrollRequestSize));
+
+ int counter = 0;
+ for (SearchHit hit : searchScrollResponse.getHits()) {
+ assertThat(hit.sortValues()[0], equalTo(sh.getAt(counter++).sortValues()[0]));
+ }
+
+ int iter = 1;
+ String scrollId = searchScrollResponse.getScrollId();
+ while (true) {
+ searchScrollResponse = client().prepareSearchScroll(scrollId).setScroll("10m").get();
+ assertNoFailures(searchScrollResponse);
+ assertThat(searchScrollResponse.getHits().getTotalHits(), equalTo((long) context.numDocs));
+ if (searchScrollResponse.getHits().hits().length == 0) {
+ break;
+ }
+
+ int expectedLength;
+ int scrollSlice = ++iter * context.scrollRequestSize;
+ if (scrollSlice <= context.numDocs) {
+ expectedLength = context.scrollRequestSize;
+ } else {
+ expectedLength = context.scrollRequestSize - (scrollSlice - context.numDocs);
+ }
+ assertThat(searchScrollResponse.getHits().hits().length, equalTo(expectedLength));
+ for (SearchHit hit : searchScrollResponse.getHits()) {
+ assertThat(hit.sortValues()[0], equalTo(sh.getAt(counter++).sortValues()[0]));
+ }
+ scrollId = searchScrollResponse.getScrollId();
+ }
+
+ assertThat(counter, equalTo(context.numDocs));
+ clearScroll(scrollId);
+ }
+
+ @Test
+ public void testDuel_queryAndFetch() throws Exception {
+ // *_QUERY_AND_FETCH search types are tricky: the ordering can be incorrect, since it returns num_shards * (from + size)
+ // a subsequent scroll call can return hits that should have been in the hits of the first scroll call.
+
+ TestContext context = create(SearchType.DFS_QUERY_AND_FETCH, SearchType.QUERY_AND_FETCH);
+ SearchResponse searchScrollResponse = client().prepareSearch("index")
+ .setSearchType(context.searchType)
+ .addSort(context.sort)
+ .setSize(context.scrollRequestSize)
+ .setScroll("10m").get();
+
+ assertNoFailures(searchScrollResponse);
+ assertThat(searchScrollResponse.getHits().getTotalHits(), equalTo((long) context.numDocs));
+
+ int counter = searchScrollResponse.getHits().hits().length;
+ String scrollId = searchScrollResponse.getScrollId();
+ while (true) {
+ searchScrollResponse = client().prepareSearchScroll(scrollId).setScroll("10m").get();
+ assertNoFailures(searchScrollResponse);
+ assertThat(searchScrollResponse.getHits().getTotalHits(), equalTo((long) context.numDocs));
+ if (searchScrollResponse.getHits().hits().length == 0) {
+ break;
+ }
+
+ counter += searchScrollResponse.getHits().hits().length;
+ scrollId = searchScrollResponse.getScrollId();
+ }
+
+ assertThat(counter, equalTo(context.numDocs));
+ clearScroll(scrollId);
+ }
+
+
+ private TestContext create(SearchType... searchTypes) throws Exception {
+ assertAcked(prepareCreate("index").addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties")
+ .startObject("field1")
+ .field("type", "long")
+ .endObject()
+ .startObject("field2")
+ .field("type", "string")
+ .endObject()
+ .startObject("nested")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("field3")
+ .field("type", "long")
+ .endObject()
+ .startObject("field4")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject().endObject()));
+
+ int numDocs = 2 + randomInt(512);
+ int scrollRequestSize = randomIntBetween(1, rarely() ? numDocs : numDocs / 2);
+ boolean unevenRouting = randomBoolean();
+
+ int numMissingDocs = scaledRandomIntBetween(0, numDocs / 100);
+ IntHashSet missingDocs = new IntHashSet(numMissingDocs);
+ for (int i = 0; i < numMissingDocs; i++) {
+ while (!missingDocs.add(randomInt(numDocs))) {}
+ }
+
+ for (int i = 1; i <= numDocs; i++) {
+ IndexRequestBuilder indexRequestBuilder = client()
+ .prepareIndex("index", "type", String.valueOf(i));
+ if (missingDocs.contains(i)) {
+ indexRequestBuilder.setSource("x", "y");
+ } else {
+ indexRequestBuilder.setSource(jsonBuilder().startObject()
+ .field("field1", i)
+ .field("field2", String.valueOf(i))
+ .startObject("nested")
+ .field("field3", i)
+ .field("field4", String.valueOf(i))
+ .endObject()
+ .endObject());
+ }
+
+ if (unevenRouting && randomInt(3) <= 2) {
+ indexRequestBuilder.setRouting("a");
+ }
+ indexRandom(false, indexRequestBuilder);
+ }
+ refresh();
+
+ final SortBuilder sort;
+ if (randomBoolean()) {
+ if (randomBoolean()) {
+ sort = SortBuilders.fieldSort("field1").missing(1);
+ } else {
+ sort = SortBuilders.fieldSort("field2")
+ .missing("1");
+ }
+ } else {
+ if (randomBoolean()) {
+ sort = SortBuilders.fieldSort("nested.field3").missing(1);
+ } else {
+ sort = SortBuilders.fieldSort("nested.field4").missing("1");
+ }
+ }
+ sort.order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC);
+
+ SearchType searchType = RandomPicks.randomFrom(getRandom(), Arrays.asList(searchTypes));
+
+ logger.info("numDocs={}, scrollRequestSize={}, sort={}, searchType={}", numDocs, scrollRequestSize, sort, searchType);
+ return new TestContext(numDocs, scrollRequestSize, sort, searchType);
+ }
+
+
+ class TestContext {
+
+ final int numDocs;
+ final int scrollRequestSize;
+ final SortBuilder sort;
+ final SearchType searchType;
+
+ TestContext(int numDocs, int scrollRequestSize, SortBuilder sort, SearchType searchType) {
+ this.numDocs = numDocs;
+ this.scrollRequestSize = scrollRequestSize;
+ this.sort = sort;
+ this.searchType = searchType;
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java
new file mode 100644
index 0000000000..6920d015c4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java
@@ -0,0 +1,579 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scroll;
+
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.rest.action.search.RestClearScrollAction;
+import org.elasticsearch.rest.action.search.RestSearchScrollAction;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.elasticsearch.search.sort.SortOrder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class SearchScrollTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleScrollQueryThenFetch() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ long counter = 0;
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(30));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetchSmallSizeUnevenDistribution() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ String routing = "0";
+ if (i > 90) {
+ routing = "1";
+ } else if (i > 60) {
+ routing = "2";
+ }
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", i).setRouting(routing).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ long counter = 0;
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ for (int i = 0; i < 32; i++) {
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+ }
+
+ // and now, the last one is one
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ // a the last is zero
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(0));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter++));
+ }
+
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testScrollAndUpdateIndex() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 5)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 500; i++) {
+ client().prepareIndex("test", "tweet", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("user", "kimchy").field("postDate", System.currentTimeMillis()).field("message", "test").endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(queryStringQuery("user:kimchy"))
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .addSort("postDate", SortOrder.ASC)
+ .execute().actionGet();
+ try {
+ do {
+ for (SearchHit searchHit : searchResponse.getHits().hits()) {
+ Map<String, Object> map = searchHit.sourceAsMap();
+ map.put("message", "update");
+ client().prepareIndex("test", "tweet", searchHit.id()).setSource(map).execute().actionGet();
+ }
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ } while (searchResponse.getHits().hits().length > 0);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ assertThat(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "test")).execute().actionGet().getCount(), equalTo(0l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(500l));
+ assertThat(client().prepareCount().setQuery(termQuery("message", "update")).execute().actionGet().getCount(), equalTo(500l));
+ } finally {
+ clearScroll(searchResponse.getScrollId());
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetch_clearScrollIds() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ SearchResponse searchResponse2 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ long counter1 = 0;
+ long counter2 = 0;
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .addScrollId(searchResponse1.getScrollId())
+ .addScrollId(searchResponse2.getScrollId())
+ .execute().actionGet();
+ assertThat(clearResponse.isSucceeded(), is(true));
+ assertThat(clearResponse.getNumFreed(), greaterThan(0));
+ assertThat(clearResponse.status(), equalTo(RestStatus.OK));
+
+ assertThrows(client().prepareSearchScroll(searchResponse1.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
+ assertThrows(client().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
+ }
+
+ @Test
+ public void testClearNonExistentScrollId() throws Exception {
+ createIndex("idx");
+ ClearScrollResponse response = client().prepareClearScroll()
+ .addScrollId("cXVlcnlUaGVuRmV0Y2g7MzsyOlpBRC1qOUhrUjhhZ0NtQWUxU2FuWlE7MjpRcjRaNEJ2R1JZV1VEMW02ZGF1LW5ROzI6S0xUal9lZDRTd3lWNUhUU2VSb01CQTswOw==")
+ .get();
+ // Whether we actually clear a scroll, we can't know, since that information isn't serialized in the
+ // free search context response, which is returned from each node we want to clear a particular scroll.
+ assertThat(response.isSucceeded(), is(true));
+ assertThat(response.getNumFreed(), equalTo(0));
+ assertThat(response.status(), equalTo(RestStatus.NOT_FOUND));
+ }
+
+ @Test
+ public void testClearIllegalScrollId() throws Exception {
+ createIndex("idx");
+ try {
+ client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+ try {
+ // Fails during base64 decoding (Base64-encoded string must have at least four characters)
+ client().prepareClearScroll().addScrollId("a").get();
+ fail();
+ } catch (IllegalArgumentException e) {
+ }
+ try {
+ client().prepareClearScroll().addScrollId("abcabc").get();
+ fail();
+ // if running without -ea this will also throw ElasticsearchIllegalArgumentException
+ } catch (UncategorizedExecutionException e) {
+ assertThat(e.getRootCause(), instanceOf(AssertionError.class));
+ }
+ }
+
+ @Test
+ public void testSimpleScrollQueryThenFetch_clearAllScrollIds() throws Exception {
+ client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder().put("index.number_of_shards", 3)).execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ for (int i = 0; i < 100; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject()).execute().actionGet();
+ }
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ SearchResponse searchResponse1 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ SearchResponse searchResponse2 = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(35)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .setSearchType(SearchType.QUERY_THEN_FETCH)
+ .addSort("field", SortOrder.ASC)
+ .execute().actionGet();
+
+ long counter1 = 0;
+ long counter2 = 0;
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ searchResponse1 = client().prepareSearchScroll(searchResponse1.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ searchResponse2 = client().prepareSearchScroll(searchResponse2.getScrollId())
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+
+ assertThat(searchResponse1.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse1.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse1.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter1++));
+ }
+
+ assertThat(searchResponse2.getHits().getTotalHits(), equalTo(100l));
+ assertThat(searchResponse2.getHits().hits().length, equalTo(35));
+ for (SearchHit hit : searchResponse2.getHits()) {
+ assertThat(((Number) hit.sortValues()[0]).longValue(), equalTo(counter2++));
+ }
+
+ ClearScrollResponse clearResponse = client().prepareClearScroll().addScrollId("_all")
+ .execute().actionGet();
+ assertThat(clearResponse.isSucceeded(), is(true));
+ assertThat(clearResponse.getNumFreed(), greaterThan(0));
+ assertThat(clearResponse.status(), equalTo(RestStatus.OK));
+
+ assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse1.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
+ assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse2.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)), RestStatus.NOT_FOUND);
+ }
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/4156
+ public void testDeepPaginationWithOneDocIndexAndDoNotBlowUp() throws Exception {
+ client().prepareIndex("index", "type", "1")
+ .setSource("field", "value")
+ .setRefresh(true)
+ .execute().get();
+
+ for (SearchType searchType : SearchType.values()) {
+ SearchRequestBuilder builder = client().prepareSearch("index")
+ .setSearchType(searchType)
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setSize(Integer.MAX_VALUE);
+
+ if (searchType == SearchType.SCAN || searchType != SearchType.COUNT && randomBoolean()) {
+ builder.setScroll("1m");
+ }
+
+ SearchResponse response = builder.execute().actionGet();
+ try {
+ ElasticsearchAssertions.assertHitCount(response, 1l);
+ } finally {
+ String scrollId = response.getScrollId();
+ if (scrollId != null) {
+ clearScroll(scrollId);
+ }
+ }
+ }
+ }
+
+ @Test
+ public void testThatNonExistingScrollIdReturnsCorrectException() throws Exception {
+ client().prepareIndex("index", "type", "1").setSource("field", "value").execute().get();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get();
+ assertThat(searchResponse.getScrollId(), is(notNullValue()));
+
+ ClearScrollResponse clearScrollResponse = client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get();
+ assertThat(clearScrollResponse.isSucceeded(), is(true));
+
+ assertThrows(internalCluster().transportClient().prepareSearchScroll(searchResponse.getScrollId()), RestStatus.NOT_FOUND);
+ }
+
+ @Test
+ public void testStringSortMissingAscTerminates() throws Exception {
+ assertAcked(prepareCreate("test")
+ .setSettings(Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
+ .addMapping("test", "no_field", "type=string", "some_field", "type=string"));
+ client().prepareIndex("test", "test", "1").setSource("some_field", "test").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test")
+ .setTypes("test")
+ .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_last"))
+ .setScroll("1m")
+ .get();
+ assertHitCount(response, 1);
+ assertSearchHits(response, "1");
+
+ response = client().prepareSearchScroll(response.getScrollId()).get();
+ assertSearchResponse(response);
+ assertHitCount(response, 1);
+ assertNoSearchHits(response);
+
+ response = client().prepareSearch("test")
+ .setTypes("test")
+ .addSort(new FieldSortBuilder("no_field").order(SortOrder.ASC).missing("_first"))
+ .setScroll("1m")
+ .get();
+ assertHitCount(response, 1);
+ assertSearchHits(response, "1");
+
+ response = client().prepareSearchScroll(response.getScrollId()).get();
+ assertHitCount(response, 1);
+ assertThat(response.getHits().getHits().length, equalTo(0));
+ }
+
+ @Test
+ public void testParseSearchScrollRequest() throws Exception {
+ BytesReference content = XContentFactory.jsonBuilder()
+ .startObject()
+ .field("scroll_id", "SCROLL_ID")
+ .field("scroll", "1m")
+ .endObject().bytes();
+
+ SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
+ RestSearchScrollAction.buildFromContent(content, searchScrollRequest);
+
+ assertThat(searchScrollRequest.scrollId(), equalTo("SCROLL_ID"));
+ assertThat(searchScrollRequest.scroll().keepAlive(), equalTo(TimeValue.parseTimeValue("1m", null, "scroll")));
+ }
+
+ @Test
+ public void testParseSearchScrollRequestWithInvalidJsonThrowsException() throws Exception {
+ SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
+ BytesReference invalidContent = XContentFactory.jsonBuilder().startObject()
+ .value("invalid_json").endObject().bytes();
+
+ try {
+ RestSearchScrollAction.buildFromContent(invalidContent, searchScrollRequest);
+ fail("expected parseContent failure");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), equalTo("Failed to parse request body"));
+ }
+ }
+
+ @Test
+ public void testParseSearchScrollRequestWithUnknownParamThrowsException() throws Exception {
+ SearchScrollRequest searchScrollRequest = new SearchScrollRequest();
+ BytesReference invalidContent = XContentFactory.jsonBuilder().startObject()
+ .field("scroll_id", "value_2")
+ .field("unknown", "keyword")
+ .endObject().bytes();
+
+ try {
+ RestSearchScrollAction.buildFromContent(invalidContent, searchScrollRequest);
+ fail("expected parseContent failure");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
+ }
+ }
+
+ @Test
+ public void testParseClearScrollRequest() throws Exception {
+ BytesReference content = XContentFactory.jsonBuilder().startObject()
+ .array("scroll_id", "value_1", "value_2")
+ .endObject().bytes();
+ ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
+ RestClearScrollAction.buildFromContent(content, clearScrollRequest);
+ assertThat(clearScrollRequest.scrollIds(), contains("value_1", "value_2"));
+ }
+
+ @Test
+ public void testParseClearScrollRequestWithInvalidJsonThrowsException() throws Exception {
+ BytesReference invalidContent = XContentFactory.jsonBuilder().startObject()
+ .value("invalid_json").endObject().bytes();
+ ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
+
+ try {
+ RestClearScrollAction.buildFromContent(invalidContent, clearScrollRequest);
+ fail("expected parseContent failure");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), equalTo("Failed to parse request body"));
+ }
+ }
+
+ @Test
+ public void testParseClearScrollRequestWithUnknownParamThrowsException() throws Exception {
+ BytesReference invalidContent = XContentFactory.jsonBuilder().startObject()
+ .array("scroll_id", "value_1", "value_2")
+ .field("unknown", "keyword")
+ .endObject().bytes();
+ ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
+
+ try {
+ RestClearScrollAction.buildFromContent(invalidContent, clearScrollRequest);
+ fail("expected parseContent failure");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(IllegalArgumentException.class));
+ assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]"));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesTests.java b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesTests.java
new file mode 100644
index 0000000000..836d76b5a9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/scroll/SearchScrollWithFailingNodesTests.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.scroll;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
+public class SearchScrollWithFailingNodesTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfShards() {
+ return 2;
+ }
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ @Test
+ public void testScanScrollWithShardExceptions() throws Exception {
+ internalCluster().startNode();
+ internalCluster().startNode();
+ assertAcked(
+ prepareCreate("test")
+ // Enforces that only one shard can only be allocated to a single node
+ .setSettings(Settings.builder().put(indexSettings()).put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE, 1))
+ );
+
+ List<IndexRequestBuilder> writes = new ArrayList<>();
+ for (int i = 0; i < 100; i++) {
+ writes.add(
+ client().prepareIndex("test", "type1")
+ .setSource(jsonBuilder().startObject().field("field", i).endObject())
+ );
+ }
+ indexRandom(false, writes);
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .setScroll(TimeValue.timeValueMinutes(1))
+ .get();
+ assertAllSuccessful(searchResponse);
+ long numHits = 0;
+ do {
+ numHits += searchResponse.getHits().hits().length;
+ searchResponse = client()
+ .prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1))
+ .get();
+ assertAllSuccessful(searchResponse);
+ } while (searchResponse.getHits().hits().length > 0);
+ assertThat(numHits, equalTo(100l));
+ clearScroll("_all");
+
+ internalCluster().stopRandomNonMasterNode();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .setScroll(TimeValue.timeValueMinutes(1))
+ .get();
+ assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards()));
+ numHits = 0;
+ int numberOfSuccessfulShards = searchResponse.getSuccessfulShards();
+ do {
+ numHits += searchResponse.getHits().hits().length;
+ searchResponse = client()
+ .prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1))
+ .get();
+ assertThat(searchResponse.getSuccessfulShards(), equalTo(numberOfSuccessfulShards));
+ } while (searchResponse.getHits().hits().length > 0);
+ assertThat(numHits, greaterThan(0l));
+
+ clearScroll(searchResponse.getScrollId());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java
new file mode 100644
index 0000000000..57964e950b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.simple;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.containsString;
+
+public class SimpleSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSearchNullIndex() {
+ try {
+ client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+
+ }
+
+ try {
+ client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ fail();
+ } catch (IllegalArgumentException e) {
+
+ }
+ }
+
+ @Test
+ public void testSearchRandomPreference() throws InterruptedException, ExecutionException {
+ createIndex("test");
+ indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", "value"),
+ client().prepareIndex("test", "type", "2").setSource("field", "value"),
+ client().prepareIndex("test", "type", "3").setSource("field", "value"),
+ client().prepareIndex("test", "type", "4").setSource("field", "value"),
+ client().prepareIndex("test", "type", "5").setSource("field", "value"),
+ client().prepareIndex("test", "type", "6").setSource("field", "value"));
+
+ int iters = scaledRandomIntBetween(10, 20);
+ for (int i = 0; i < iters; i++) {
+ String randomPreference = randomUnicodeOfLengthBetween(0, 4);
+ // randomPreference should not start with '_' (reserved for known preference types (e.g. _shards, _primary)
+ while (randomPreference.startsWith("_")) {
+ randomPreference = randomUnicodeOfLengthBetween(0, 4);
+ }
+ // id is not indexed, but lets see that we automatically convert to
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setPreference(randomPreference).get();
+ assertHitCount(searchResponse, 6l);
+
+ }
+ }
+
+ @Test
+ public void simpleIpTests() throws Exception {
+ createIndex("test");
+
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("from").field("type", "ip").endObject()
+ .startObject("to").field("type", "ip").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("from", "192.168.0.5", "to", "192.168.0.10").setRefresh(true).execute().actionGet();
+
+ SearchResponse search = client().prepareSearch()
+ .setQuery(boolQuery().must(rangeQuery("from").lt("192.168.0.7")).must(rangeQuery("to").gt("192.168.0.7")))
+ .execute().actionGet();
+
+ assertHitCount(search, 1l);
+ }
+
+ @Test
+ public void simpleIdTests() {
+ createIndex("test");
+
+ client().prepareIndex("test", "type", "XXX1").setSource("field", "value").setRefresh(true).execute().actionGet();
+ // id is not indexed, but lets see that we automatically convert to
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX1")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ // id is not index, but we can automatically support prefix as well
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.prefixQuery("_id", "XXX")).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch().setQuery(QueryBuilders.queryStringQuery("_id:XXX*").lowercaseExpandedTerms(false)).execute().actionGet();
+ assertHitCount(searchResponse, 1l);
+ }
+
+ @Test
+ public void simpleDateRangeTests() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "type1", "1").setSource("field", "2010-01-05T02:00").execute().actionGet();
+ client().prepareIndex("test", "type1", "2").setSource("field", "2010-01-06T02:00").execute().actionGet();
+ ensureGreen();
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-03||+2d").lte("2010-01-04||+2d/d")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lte("2010-01-06T02:00")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 2l);
+
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gte("2010-01-05T02:00").lt("2010-01-06T02:00")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1l);
+
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.rangeQuery("field").gt("2010-01-05T02:00").lt("2010-01-06T02:00")).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 0l);
+
+ searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("field:[2010-01-03||+2d TO 2010-01-04||+2d/d]")).execute().actionGet();
+ assertHitCount(searchResponse, 2l);
+ }
+
+ @Test
+ public void localeDependentDateTests() throws Exception {
+ assumeFalse("Locals are buggy on JDK9EA", Constants.JRE_IS_MINIMUM_JAVA9 && systemPropertyAsBoolean("tests.security.manager", false));
+ assertAcked(prepareCreate("test")
+ .addMapping("type1",
+ jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("date_field")
+ .field("type", "date")
+ .field("format", "E, d MMM yyyy HH:mm:ss Z")
+ .field("locale", "de")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "type1", "" + i).setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800").execute().actionGet();
+ client().prepareIndex("test", "type1", "" + (10 + i)).setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800").execute().actionGet();
+ }
+
+ refresh();
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 10l);
+
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .execute().actionGet();
+ assertHitCount(searchResponse, 20l);
+
+ }
+ }
+
+ @Test
+ public void simpleTerminateAfterCountTests() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+ int max = randomIntBetween(3, 29);
+ List<IndexRequestBuilder> docbuilders = new ArrayList<>(max);
+
+ for (int i = 1; i <= max; i++) {
+ String id = String.valueOf(i);
+ docbuilders.add(client().prepareIndex("test", "type1", id).setSource("field", i));
+ }
+
+ indexRandom(true, docbuilders);
+ ensureGreen();
+ refresh();
+
+ SearchResponse searchResponse;
+
+ for (int i = 1; i <= max; i++) {
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max))
+ .setTerminateAfter(i).execute().actionGet();
+ assertHitCount(searchResponse, (long)i);
+ assertTrue(searchResponse.isTerminatedEarly());
+ }
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max))
+ .setTerminateAfter(2 * max).execute().actionGet();
+
+ assertHitCount(searchResponse, max);
+ assertFalse(searchResponse.isTerminatedEarly());
+ }
+
+ @Test
+ public void testInsaneFrom() throws Exception {
+ createIndex("idx");
+ indexRandom(true, client().prepareIndex("idx", "type").setSource("{}"));
+
+ try {
+ client().prepareSearch("idx").setFrom(Integer.MAX_VALUE).get();
+ fail();
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.toString(), containsString("Result window is too large, from + size must be less than or equal to:"));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/core/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java
new file mode 100644
index 0000000000..8a369d8f5f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java
@@ -0,0 +1,2529 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.sort;
+
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.UnicodeUtil;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.fielddata.IndexFieldData;
+import org.elasticsearch.index.mapper.Uid;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchHitField;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction;
+import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+
+/**
+ *
+ */
+public class SimpleSortTests extends ElasticsearchIntegrationTest {
+
+ @TestLogging("action.search.type:TRACE")
+ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9421")
+ public void testIssue8226() {
+ int numIndices = between(5, 10);
+ final boolean useMapping = randomBoolean();
+ for (int i = 0; i < numIndices; i++) {
+ if (useMapping) {
+ assertAcked(prepareCreate("test_" + i).addAlias(new Alias("test")).addMapping("foo", "entry", "type=long"));
+ } else {
+ assertAcked(prepareCreate("test_" + i).addAlias(new Alias("test")));
+ }
+ if (i > 0) {
+ client().prepareIndex("test_" + i, "foo", "" + i).setSource("{\"entry\": " + i + "}").get();
+ }
+ }
+ ensureYellow();
+ refresh();
+ // sort DESC
+ SearchResponse searchResponse = client().prepareSearch()
+ .addSort(new FieldSortBuilder("entry").order(SortOrder.DESC).unmappedType(useMapping ? null : "long"))
+ .setSize(10).get();
+ logClusterState();
+ assertSearchResponse(searchResponse);
+
+ for (int j = 1; j < searchResponse.getHits().hits().length; j++) {
+ Number current = (Number) searchResponse.getHits().hits()[j].getSource().get("entry");
+ Number previous = (Number) searchResponse.getHits().hits()[j-1].getSource().get("entry");
+ assertThat(searchResponse.toString(), current.intValue(), lessThan(previous.intValue()));
+ }
+
+ // sort ASC
+ searchResponse = client().prepareSearch()
+ .addSort(new FieldSortBuilder("entry").order(SortOrder.ASC).unmappedType(useMapping ? null : "long"))
+ .setSize(10).get();
+ logClusterState();
+ assertSearchResponse(searchResponse);
+
+ for (int j = 1; j < searchResponse.getHits().hits().length; j++) {
+ Number current = (Number) searchResponse.getHits().hits()[j].getSource().get("entry");
+ Number previous = (Number) searchResponse.getHits().hits()[j-1].getSource().get("entry");
+ assertThat(searchResponse.toString(), current.intValue(), greaterThan(previous.intValue()));
+ }
+ }
+
+ @LuceneTestCase.BadApple(bugUrl = "simon is working on this")
+ public void testIssue6614() throws ExecutionException, InterruptedException {
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ boolean strictTimeBasedIndices = randomBoolean();
+ final int numIndices = randomIntBetween(2, 25); // at most 25 days in the month
+ for (int i = 0; i < numIndices; i++) {
+ final String indexId = strictTimeBasedIndices ? "idx_" + i : "idx";
+ if (strictTimeBasedIndices || i == 0) {
+ createIndex(indexId);
+ }
+ final int numDocs = randomIntBetween(1, 23); // hour of the day
+ for (int j = 0; j < numDocs; j++) {
+ builders.add(client().prepareIndex(indexId, "type").setSource("foo", "bar", "timeUpdated", "2014/07/" + String.format(Locale.ROOT, "%02d", i+1)+" " + String.format(Locale.ROOT, "%02d", j+1) + ":00:00"));
+ }
+ }
+ int docs = builders.size();
+ indexRandom(true, builders);
+ ensureYellow();
+ SearchResponse allDocsResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(),
+ QueryBuilders.boolQuery().must(QueryBuilders.termQuery("foo", "bar")).must(
+ QueryBuilders.rangeQuery("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01"))))
+ .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date"))
+ .setSize(docs).get();
+ assertSearchResponse(allDocsResponse);
+
+ final int numiters = randomIntBetween(1, 20);
+ for (int i = 0; i < numiters; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(),
+ QueryBuilders.boolQuery().must(QueryBuilders.termQuery("foo", "bar")).must(
+ QueryBuilders.rangeQuery("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01"))))
+ .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date"))
+ .setSize(scaledRandomIntBetween(1, docs)).get();
+ assertSearchResponse(searchResponse);
+ for (int j = 0; j < searchResponse.getHits().hits().length; j++) {
+ assertThat(searchResponse.toString() + "\n vs. \n" + allDocsResponse.toString(), searchResponse.getHits().hits()[j].getId(), equalTo(allDocsResponse.getHits().hits()[j].getId()));
+ }
+ }
+
+ }
+
+ public void testIssue6639() throws ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("$index")
+ .addMapping("$type","{\"$type\": {\"properties\": {\"grantee\": {\"index\": \"not_analyzed\", \"term_vector\": \"with_positions_offsets\", \"type\": \"string\", \"analyzer\": \"snowball\", \"boost\": 1.0, \"store\": \"yes\"}}}}"));
+ indexRandom(true,
+ client().prepareIndex("$index", "$type", "data.activity.5").setSource("{\"django_ct\": \"data.activity\", \"grantee\": \"Grantee 1\"}"),
+ client().prepareIndex("$index", "$type", "data.activity.6").setSource("{\"django_ct\": \"data.activity\", \"grantee\": \"Grantee 2\"}"));
+ ensureYellow();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("grantee", SortOrder.ASC)
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "data.activity.5", "data.activity.6");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("grantee", SortOrder.DESC)
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "data.activity.6", "data.activity.5");
+ }
+
+ @Test
+ public void testTrackScores() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ index("test", "type1", jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .field("ivalue", 100)
+ .field("dvalue", 0.1)
+ .endObject());
+ index("test", "type1", jsonBuilder().startObject()
+ .field("id", "2")
+ .field("svalue", "bbb")
+ .field("ivalue", 200)
+ .field("dvalue", 0.2)
+ .endObject());
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getMaxScore(), equalTo(Float.NaN));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getScore(), equalTo(Float.NaN));
+ }
+
+ // now check with score tracking
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("svalue", SortOrder.ASC)
+ .setTrackScores(true)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getMaxScore(), not(equalTo(Float.NaN)));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat(hit.getScore(), not(equalTo(Float.NaN)));
+ }
+ }
+
+ public void testRandomSorting() throws IOException, InterruptedException, ExecutionException {
+ Random random = getRandom();
+ assertAcked(prepareCreate("test")
+ .addMapping("type",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("sparse_bytes")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .startObject("dense_bytes")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+
+ TreeMap<BytesRef, String> sparseBytes = new TreeMap<>();
+ TreeMap<BytesRef, String> denseBytes = new TreeMap<>();
+ int numDocs = randomIntBetween(200, 300);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ String docId = Integer.toString(i);
+ BytesRef ref = null;
+ do {
+ ref = new BytesRef(TestUtil.randomRealisticUnicodeString(random));
+ } while (denseBytes.containsKey(ref));
+ denseBytes.put(ref, docId);
+ XContentBuilder src = jsonBuilder().startObject().field("dense_bytes", ref.utf8ToString());
+ if (rarely()) {
+ src.field("sparse_bytes", ref.utf8ToString());
+ sparseBytes.put(ref, docId);
+ }
+ src.endObject();
+ builders[i] = client().prepareIndex("test", "type", docId).setSource(src);
+ }
+ indexRandom(true, builders);
+ {
+ int size = between(1, denseBytes.size());
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).setSize(size)
+ .addSort("dense_bytes", SortOrder.ASC).execute().actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo((long) numDocs));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ Set<Entry<BytesRef, String>> entrySet = denseBytes.entrySet();
+ Iterator<Entry<BytesRef, String>> iterator = entrySet.iterator();
+ for (int i = 0; i < size; i++) {
+ assertThat(iterator.hasNext(), equalTo(true));
+ Entry<BytesRef, String> next = iterator.next();
+ assertThat("pos: " + i, searchResponse.getHits().getAt(i).id(), equalTo(next.getValue()));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(next.getKey().utf8ToString()));
+ }
+ }
+ if (!sparseBytes.isEmpty()) {
+ int size = between(1, sparseBytes.size());
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .setPostFilter(QueryBuilders.existsQuery("sparse_bytes")).setSize(size).addSort("sparse_bytes", SortOrder.ASC).execute()
+ .actionGet();
+ assertNoFailures(searchResponse);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo((long) sparseBytes.size()));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ Set<Entry<BytesRef, String>> entrySet = sparseBytes.entrySet();
+ Iterator<Entry<BytesRef, String>> iterator = entrySet.iterator();
+ for (int i = 0; i < size; i++) {
+ assertThat(iterator.hasNext(), equalTo(true));
+ Entry<BytesRef, String> next = iterator.next();
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(next.getValue()));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(next.getKey().utf8ToString()));
+ }
+ }
+ }
+
+
+ @Test
+ public void test3078() {
+ createIndex("test");
+ ensureGreen();
+
+ for (int i = 1; i < 101; i++) {
+ client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", Integer.toString(i)).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // reindex and refresh
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+ refresh();
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // reindex - no refresh
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ // optimize
+ optimize();
+ refresh();
+
+ client().prepareIndex("test", "type", Integer.toString(1)).setSource("field", Integer.toString(1)).execute().actionGet();
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+
+ refresh();
+ searchResponse = client().prepareSearch("test").setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC)).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).sortValues()[0].toString(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).sortValues()[0].toString(), equalTo("10"));
+ assertThat(searchResponse.getHits().getAt(2).sortValues()[0].toString(), equalTo("100"));
+ }
+
+ @Test
+ public void testScoreSortDirection() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", 1).execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", 0).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.scriptFunction(new Script("_source.field"))))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.scriptFunction(new Script("_source.field"))))
+ .addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client()
+ .prepareSearch("test")
+ .setQuery(
+ QueryBuilders.functionScoreQuery(matchAllQuery(), ScoreFunctionBuilders.scriptFunction(new Script("_source.field"))))
+ .addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ }
+
+
+ @Test
+ public void testScoreSortDirection_withFunctionScore() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource("field", 2).execute().actionGet();
+ client().prepareIndex("test", "type", "2").setSource("field", 1).execute().actionGet();
+ client().prepareIndex("test", "type", "3").setSource("field", 0).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchAllQuery(), scriptFunction(new Script("_source.field")))).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchAllQuery(), scriptFunction(new Script("_source.field"))))
+ .addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).score(), Matchers.lessThan(searchResponse.getHits().getAt(0).score()));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).score(), Matchers.lessThan(searchResponse.getHits().getAt(1).score()));
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+
+ searchResponse = client().prepareSearch("test")
+ .setQuery(functionScoreQuery(matchAllQuery(), scriptFunction(new Script("_source.field"))))
+ .addSort("_score", SortOrder.DESC).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(2).getId(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(1).getId(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(0).getId(), equalTo("1"));
+ }
+
+ @Test
+ public void testIssue2986() {
+ createIndex("test");
+
+ client().prepareIndex("test", "post", "1").setSource("{\"field1\":\"value1\"}").execute().actionGet();
+ client().prepareIndex("test", "post", "2").setSource("{\"field1\":\"value2\"}").execute().actionGet();
+ client().prepareIndex("test", "post", "3").setSource("{\"field1\":\"value3\"}").execute().actionGet();
+ refresh();
+ SearchResponse result = client().prepareSearch("test").setQuery(matchAllQuery()).setTrackScores(true).addSort("field1", SortOrder.ASC).execute().actionGet();
+
+ for (SearchHit hit : result.getHits()) {
+ assertFalse(Float.isNaN(hit.getScore()));
+ }
+ }
+
+ @Test
+ public void testIssue2991() {
+ for (int i = 1; i < 4; i++) {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+ createIndex("test");
+ ensureGreen();
+ client().prepareIndex("test", "type", "1").setSource("tag", "alpha").execute().actionGet();
+ refresh();
+
+ client().prepareIndex("test", "type", "3").setSource("tag", "gamma").execute().actionGet();
+ refresh();
+
+ client().prepareIndex("test", "type", "4").setSource("tag", "delta").execute().actionGet();
+
+ refresh();
+ client().prepareIndex("test", "type", "2").setSource("tag", "beta").execute().actionGet();
+
+ refresh();
+ SearchResponse resp = client().prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.ASC)).execute().actionGet();
+ assertHitCount(resp, 4);
+ assertThat(resp.getHits().hits().length, equalTo(2));
+ assertFirstHit(resp, hasId("1"));
+ assertSecondHit(resp, hasId("2"));
+
+ resp = client().prepareSearch("test").setSize(2).setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("tag").order(SortOrder.DESC)).execute().actionGet();
+ assertHitCount(resp, 4);
+ assertThat(resp.getHits().hits().length, equalTo(2));
+ assertFirstHit(resp, hasId("3"));
+ assertSecondHit(resp, hasId("4"));
+ }
+ }
+
+ @Test
+ public void testSimpleSorts() throws Exception {
+ Random random = getRandom();
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("boolean_value").field("type", "boolean").endObject()
+ .startObject("byte_value").field("type", "byte").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("short_value").field("type", "short").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("integer_value").field("type", "integer").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("long_value").field("type", "long").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("float_value").field("type", "float").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("double_value").field("type", "double").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder builder = client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("str_value", new String(new char[]{(char) (97 + i), (char) (97 + i)}))
+ .field("boolean_value", true)
+ .field("byte_value", i)
+ .field("short_value", i)
+ .field("integer_value", i)
+ .field("long_value", i)
+ .field("float_value", 0.1 * i)
+ .field("double_value", 0.1 * i)
+ .endObject());
+ builders.add(builder);
+ }
+ Collections.shuffle(builders, random);
+ for (IndexRequestBuilder builder : builders) {
+ builder.execute().actionGet();
+ if (random.nextBoolean()) {
+ if (random.nextInt(5) != 0) {
+ refresh();
+ } else {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ }
+ refresh();
+
+ // STRING
+ int size = 1 + random.nextInt(10);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.ASC)
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + i), (char) (97 + i)})));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + (9 - i)), (char) (97 + (9 - i))})));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+
+ // STRING script
+ size = 1 + random.nextInt(10);
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort(new ScriptSortBuilder(new Script("doc['str_value'].value"), "string")).execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[] { (char) (97 + i),
+ (char) (97 + i) })));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[] { (char) (97 + (9 - i)),
+ (char) (97 + (9 - i)) })));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // BYTE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.ASC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("byte_value", SortOrder.DESC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // SHORT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.ASC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("short_value", SortOrder.DESC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // INTEGER
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.ASC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo(i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("integer_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo((9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // LONG
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.ASC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("long_value", SortOrder.DESC).execute()
+ .actionGet();
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // FLOAT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.ASC).execute()
+ .actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("float_value", SortOrder.DESC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // DOUBLE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.ASC).execute()
+ .actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("double_value", SortOrder.DESC).execute()
+ .actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void test2920() throws IOException {
+ assertAcked(prepareCreate("test").addMapping(
+ "test",
+ jsonBuilder().startObject().startObject("test").startObject("properties").startObject("value").field("type", "string")
+ .endObject().endObject().endObject().endObject()));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "test", Integer.toString(i))
+ .setSource(jsonBuilder().startObject().field("value", "" + i).endObject()).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addSort(SortBuilders.scriptSort(new Script("\u0027\u0027"), "string")).setSize(10).execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testSimpleSortsOldScriptAPI() throws Exception {
+ Random random = getRandom();
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("str_value")
+ .field("type", "string").field("index", "not_analyzed").startObject("fielddata")
+ .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("boolean_value")
+ .field("type", "boolean").endObject().startObject("byte_value").field("type", "byte").startObject("fielddata")
+ .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("short_value")
+ .field("type", "short").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null)
+ .endObject().endObject().startObject("integer_value").field("type", "integer").startObject("fielddata")
+ .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("long_value")
+ .field("type", "long").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null)
+ .endObject().endObject().startObject("float_value").field("type", "float").startObject("fielddata")
+ .field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject().startObject("double_value")
+ .field("type", "double").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null)
+ .endObject().endObject().endObject().endObject().endObject()));
+ ensureGreen();
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder builder = client().prepareIndex("test", "type1", Integer.toString(i)).setSource(
+ jsonBuilder().startObject().field("str_value", new String(new char[] { (char) (97 + i), (char) (97 + i) }))
+ .field("boolean_value", true).field("byte_value", i).field("short_value", i).field("integer_value", i)
+ .field("long_value", i).field("float_value", 0.1 * i).field("double_value", 0.1 * i).endObject());
+ builders.add(builder);
+ }
+ Collections.shuffle(builders, random);
+ for (IndexRequestBuilder builder : builders) {
+ builder.execute().actionGet();
+ if (random.nextBoolean()) {
+ if (random.nextInt(5) != 0) {
+ refresh();
+ } else {
+ client().admin().indices().prepareFlush().execute().actionGet();
+ }
+ }
+
+ }
+ refresh();
+
+ // STRING
+ int size = 1 + random.nextInt(10);
+
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size)
+ .addSort("str_value", SortOrder.ASC).execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[] { (char) (97 + i),
+ (char) (97 + i) })));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size).addSort("str_value", SortOrder.DESC).execute()
+ .actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[] { (char) (97 + (9 - i)),
+ (char) (97 + (9 - i)) })));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // STRING script
+ size = 1 + random.nextInt(10);
+
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(size)
+ .addSort(new ScriptSortBuilder("doc['str_value'].value", "string"))
+ .execute().actionGet();
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + i), (char) (97 + i)})));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("str_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(searchResponse.getHits().getAt(i).sortValues()[0].toString(), equalTo(new String(new char[]{(char) (97 + (9 - i)), (char) (97 + (9 - i))})));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // BYTE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("byte_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("byte_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).byteValue(), equalTo((byte) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // SHORT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("short_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) i));
+ }
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("short_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).shortValue(), equalTo((short) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // INTEGER
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("integer_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo(i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("integer_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).intValue(), equalTo((9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // LONG
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("long_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) i));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("long_value", SortOrder.DESC)
+ .execute().actionGet();
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).longValue(), equalTo((long) (9 - i)));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // FLOAT
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("float_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("float_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(10l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+
+ // DOUBLE
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("double_value", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * i, 0.000001d));
+ }
+
+ assertThat(searchResponse.toString(), not(containsString("error")));
+ size = 1 + random.nextInt(10);
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(size)
+ .addSort("double_value", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertHitCount(searchResponse, 10l);
+ assertThat(searchResponse.getHits().hits().length, equalTo(size));
+ for (int i = 0; i < size; i++) {
+ assertThat(searchResponse.getHits().getAt(i).id(), equalTo(Integer.toString(9 - i)));
+ assertThat(((Number) searchResponse.getHits().getAt(i).sortValues()[0]).doubleValue(), closeTo(0.1d * (9 - i), 0.000001d));
+ }
+
+ assertNoFailures(searchResponse);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void test2920OldScriptAPI() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("test",
+ jsonBuilder().startObject().startObject("test").startObject("properties")
+ .startObject("value").field("type", "string").endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ client().prepareIndex("test", "test", Integer.toString(i)).setSource(jsonBuilder().startObject()
+ .field("value", "" + i).endObject()).execute().actionGet();
+ }
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.scriptSort("\u0027\u0027", "string")).setSize(10)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testSortMinValueScript() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("lvalue").field("type", "long").endObject()
+ .startObject("dvalue").field("type", "double").endObject()
+ .startObject("svalue").field("type", "string").endObject()
+ .startObject("gvalue").field("type", "geo_point").endObject()
+ .endObject().endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder req = client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject()
+ .field("ord", i)
+ .field("svalue", new String[]{"" + i, "" + (i + 1), "" + (i + 2)})
+ .field("lvalue", new long[]{i, i + 1, i + 2})
+ .field("dvalue", new double[]{i, i + 1, i + 2})
+ .startObject("gvalue")
+ .field("lat", (double) i + 1)
+ .field("lon", (double) i)
+ .endObject()
+ .endObject());
+ req.execute().actionGet();
+ }
+
+ for (int i = 10; i < 20; i++) { // add some docs that don't have values in those fields
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject()
+ .field("ord", i)
+ .endObject()).execute().actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // test the long values
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", new Script("retval = Long.MAX_VALUE; for (v in doc['lvalue'].values){ retval = min(v, retval) }; retval"))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Long) searchResponse.getHits().getAt(i).field("min").value(), equalTo((long) i));
+ }
+ // test the double values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", new Script("retval = Double.MAX_VALUE; for (v in doc['dvalue'].values){ retval = min(v, retval) }; retval"))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+
+ // test the string values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", new Script("retval = Integer.MAX_VALUE; for (v in doc['svalue'].values){ retval = min(Integer.parseInt(v), retval) }; retval"))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Integer) searchResponse.getHits().getAt(i).field("min").value(), equalTo(i));
+ }
+
+ // test the geopoint values
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min", new Script("retval = Double.MAX_VALUE; for (v in doc['gvalue'].values){ retval = min(v.lon, retval) }; retval"))
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+ }
+
+ @Test
+ public void testDocumentsWithNullValue() throws Exception {
+ // TODO: sort shouldn't fail when sort field is mapped dynamically
+ // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not
+ // be propagated to all nodes yet and sort operation fail when the sort field is not defined
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .nullField("svalue")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "3")
+ .field("svalue", "bbb")
+ .endObject()).execute().actionGet();
+
+
+ flush();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", new Script("doc['id'].value"))
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", new Script("doc['id'].values[0]"))
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", new Script("doc['id'].value"))
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ // a query with docs just with null values
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("id", "2"))
+ .addScriptField("id", new Script("doc['id'].value"))
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("2"));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testSortMinValueScriptOldScriptAPI() throws IOException {
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("lvalue")
+ .field("type", "long").endObject().startObject("dvalue").field("type", "double").endObject().startObject("svalue")
+ .field("type", "string").endObject().startObject("gvalue").field("type", "geo_point").endObject().endObject().endObject()
+ .endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ for (int i = 0; i < 10; i++) {
+ IndexRequestBuilder req = client().prepareIndex("test", "type1", "" + i).setSource(
+ jsonBuilder().startObject().field("ord", i).field("svalue", new String[] { "" + i, "" + (i + 1), "" + (i + 2) })
+ .field("lvalue", new long[] { i, i + 1, i + 2 }).field("dvalue", new double[] { i, i + 1, i + 2 })
+ .startObject("gvalue").field("lat", (double) i + 1).field("lon", (double) i).endObject().endObject());
+ req.execute().actionGet();
+ }
+
+ for (int i = 10; i < 20; i++) { // add some docs that don't have values in those fields
+ client().prepareIndex("test", "type1", "" + i).setSource(jsonBuilder().startObject().field("ord", i).endObject()).execute()
+ .actionGet();
+ }
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // test the long values
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("min", "retval = Long.MAX_VALUE; for (v in doc['lvalue'].values){ retval = min(v, retval) }; retval")
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(),
+ (Long) searchResponse.getHits().getAt(i).field("min").value(), equalTo((long) i));
+ }
+ // test the double values
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("min", "retval = Double.MAX_VALUE; for (v in doc['dvalue'].values){ retval = min(v, retval) }; retval")
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(),
+ (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+
+ // test the string values
+ searchResponse = client()
+ .prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("min",
+ "retval = Integer.MAX_VALUE; for (v in doc['svalue'].values){ retval = min(Integer.parseInt(v), retval) }; retval")
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(), (Integer) searchResponse.getHits().getAt(i)
+ .field("min").value(), equalTo(i));
+ }
+
+ // test the geopoint values
+ searchResponse = client().prepareSearch().setQuery(matchAllQuery())
+ .addScriptField("min", "retval = Double.MAX_VALUE; for (v in doc['gvalue'].values){ retval = min(v.lon, retval) }; retval")
+ .addSort(SortBuilders.fieldSort("ord").order(SortOrder.ASC).unmappedType("long")).setSize(10).execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(20l));
+ for (int i = 0; i < 10; i++) {
+ assertThat("res: " + i + " id: " + searchResponse.getHits().getAt(i).getId(),
+ (Double) searchResponse.getHits().getAt(i).field("min").value(), equalTo((double) i));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testDocumentsWithNullValueOldScriptAPI() throws Exception {
+ // TODO: sort shouldn't fail when sort field is mapped dynamically
+ // We have to specify mapping explicitly because by the time search is performed dynamic mapping might not
+ // be propagated to all nodes yet and sort operation fail when the sort field is not defined
+ String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("svalue").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject().string();
+ assertAcked(prepareCreate("test").addMapping("type1", mapping));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("svalue", "aaa")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .nullField("svalue")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject()
+ .field("id", "3")
+ .field("svalue", "bbb")
+ .endObject()).execute().actionGet();
+
+
+ flush();
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].values[0]")
+ .addSort("svalue", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("3"));
+ assertThat((String) searchResponse.getHits().getAt(1).field("id").value(), equalTo("1"));
+ assertThat((String) searchResponse.getHits().getAt(2).field("id").value(), equalTo("2"));
+
+ // a query with docs just with null values
+ searchResponse = client().prepareSearch()
+ .setQuery(termQuery("id", "2"))
+ .addScriptField("id", "doc['id'].value")
+ .addSort("svalue", SortOrder.DESC)
+ .execute().actionGet();
+
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Failed shards:");
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ logger.warn("-> {}", shardSearchFailure);
+ }
+ }
+ assertThat(searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
+ assertThat((String) searchResponse.getHits().getAt(0).field("id").value(), equalTo("2"));
+ }
+
+ @Test
+ public void testSortMissingNumbers() throws Exception {
+ assertAcked(prepareCreate("test").addMapping("type1",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("i_value")
+ .field("type", "integer")
+ .startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject()
+ .endObject()
+ .startObject("d_value")
+ .field("type", "float")
+ .startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", -1)
+ .field("d_value", -1.1)
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", 2)
+ .field("d_value", 2.2)
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+
+ logger.info("--> sort with no missing (same as missing _last)");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _last");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_last"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _first");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("i_value").order(SortOrder.ASC).missing("_first"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ }
+
+ @Test @Slow
+ public void testSortMissingStrings() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("type1",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("value")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("value", "a")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject()
+ .field("id", "2")
+ .endObject()).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("value", "c")
+ .endObject()).execute().actionGet();
+
+ flush();
+ refresh();
+
+ // TODO: WTF?
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+
+ logger.info("--> sort with no missing (same as missing _last)");
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _last");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_last"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("3"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("2"));
+
+ logger.info("--> sort with missing _first");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("_first"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+
+ logger.info("--> sort with missing b");
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("value").order(SortOrder.ASC).missing("b"))
+ .execute().actionGet();
+ assertThat(Arrays.toString(searchResponse.getShardFailures()), searchResponse.getFailedShards(), equalTo(0));
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1"));
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2"));
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3"));
+ }
+
+ @Test
+ public void testIgnoreUnmapped() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject()
+ .field("id", "1")
+ .field("i_value", -1)
+ .field("d_value", -1.1)
+ .endObject()).execute().actionGet();
+
+ logger.info("--> sort with an unmapped field, verify it fails");
+ try {
+ SearchResponse result = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("kkk"))
+ .execute().actionGet();
+ assertThat("Expected exception but returned with", result, nullValue());
+ } catch (SearchPhaseExecutionException e) {
+ //we check that it's a parse failure rather than a different shard failure
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.toString(), containsString("[No mapping found for [kkk] in order to sort on]"));
+ }
+ }
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(SortBuilders.fieldSort("kkk").unmappedType("string"))
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ }
+
+ @Test
+ public void testSortMVField() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("long_values").field("type", "long").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("int_values").field("type", "integer").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("short_values").field("type", "short").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("byte_values").field("type", "byte").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("float_values").field("type", "float").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("double_values").field("type", "double").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .array("long_values", 1l, 5l, 10l, 8l)
+ .array("int_values", 1, 5, 10, 8)
+ .array("short_values", 1, 5, 10, 8)
+ .array("byte_values", 1, 5, 10, 8)
+ .array("float_values", 1f, 5f, 10f, 8f)
+ .array("double_values", 1d, 5d, 10d, 8d)
+ .array("string_values", "01", "05", "10", "08")
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .array("long_values", 11l, 15l, 20l, 7l)
+ .array("int_values", 11, 15, 20, 7)
+ .array("short_values", 11, 15, 20, 7)
+ .array("byte_values", 11, 15, 20, 7)
+ .array("float_values", 11f, 15f, 20f, 7f)
+ .array("double_values", 11d, 15d, 20d, 7d)
+ .array("string_values", "11", "15", "20", "07")
+ .endObject()).execute().actionGet();
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .array("long_values", 2l, 1l, 3l, -4l)
+ .array("int_values", 2, 1, 3, -4)
+ .array("short_values", 2, 1, 3, -4)
+ .array("byte_values", 2, 1, 3, -4)
+ .array("float_values", 2f, 1f, 3f, -4f)
+ .array("double_values", 2d, 1d, 3d, -4d)
+ .array("string_values", "02", "01", "03", "!4")
+ .endObject()).execute().actionGet();
+
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("long_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(-4l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(1l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(7l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("long_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(20l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(10l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(3l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort(SortBuilders.fieldSort("long_values").order(SortOrder.DESC).sortMode("sum"))
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).longValue(), equalTo(53l));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).longValue(), equalTo(24l));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).longValue(), equalTo(2l));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("int_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("int_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("short_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("short_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("byte_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(-4));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(1));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(7));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("byte_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).intValue(), equalTo(20));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).intValue(), equalTo(10));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).intValue(), equalTo(3));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("float_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).floatValue(), equalTo(-4f));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).floatValue(), equalTo(1f));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).floatValue(), equalTo(7f));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("float_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).floatValue(), equalTo(20f));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).floatValue(), equalTo(10f));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).floatValue(), equalTo(3f));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("double_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(-4d));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(1d));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(7d));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("double_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(20d));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(10d));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(3d));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("string_values", SortOrder.ASC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("!4"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("01"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("07"));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(10)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().getTotalHits(), equalTo(3l));
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+ }
+
+ @Test
+ public void testSortOnRareField() throws IOException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
+ .endObject().endObject().endObject()));
+ ensureGreen();
+ client().prepareIndex("test", "type1", Integer.toString(1)).setSource(jsonBuilder().startObject()
+ .array("string_values", "01", "05", "10", "08")
+ .endObject()).execute().actionGet();
+
+
+ refresh();
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(1));
+
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("10"));
+
+ client().prepareIndex("test", "type1", Integer.toString(2)).setSource(jsonBuilder().startObject()
+ .array("string_values", "11", "15", "20", "07")
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ }
+ refresh();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(2)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(2));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+
+ client().prepareIndex("test", "type1", Integer.toString(3)).setSource(jsonBuilder().startObject()
+ .array("string_values", "02", "01", "03", "!4")
+ .endObject()).execute().actionGet();
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ }
+ refresh();
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+
+ for (int i = 0; i < 15; i++) {
+ client().prepareIndex("test", "type1", Integer.toString(300 + i)).setSource(jsonBuilder().startObject()
+ .array("some_other_field", "foobar")
+ .endObject()).execute().actionGet();
+ refresh();
+ }
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(3)
+ .addSort("string_values", SortOrder.DESC)
+ .execute().actionGet();
+
+ assertThat(searchResponse.getHits().hits().length, equalTo(3));
+
+ assertThat(searchResponse.getHits().getAt(0).id(), equalTo(Integer.toString(2)));
+ assertThat(((Text) searchResponse.getHits().getAt(0).sortValues()[0]).string(), equalTo("20"));
+
+ assertThat(searchResponse.getHits().getAt(1).id(), equalTo(Integer.toString(1)));
+ assertThat(((Text) searchResponse.getHits().getAt(1).sortValues()[0]).string(), equalTo("10"));
+
+ assertThat(searchResponse.getHits().getAt(2).id(), equalTo(Integer.toString(3)));
+ assertThat(((Text) searchResponse.getHits().getAt(2).sortValues()[0]).string(), equalTo("03"));
+ }
+
+ public void testSortMetaField() throws Exception {
+ final boolean idDocValues = random().nextBoolean();
+ final boolean timestampDocValues = random().nextBoolean();
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
+ .startObject("_timestamp").field("enabled", true).field("store", true).field("index", !timestampDocValues || randomBoolean() ? "not_analyzed" : "no").field("doc_values", timestampDocValues).endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test")
+ .addMapping("type", mapping));
+ ensureGreen();
+ final int numDocs = randomIntBetween(10, 20);
+ IndexRequestBuilder[] indexReqs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ indexReqs[i] = client().prepareIndex("test", "type", Integer.toString(i)).setTimestamp(Integer.toString(randomInt(1000))).setSource();
+ }
+ indexRandom(true, indexReqs);
+
+ SortOrder order = randomFrom(SortOrder.values());
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_uid", order)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ SearchHit[] hits = searchResponse.getHits().hits();
+ BytesRef previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
+ for (int i = 0; i < hits.length; ++i) {
+ final BytesRef uid = new BytesRef(Uid.createUid(hits[i].type(), hits[i].id()));
+ assertThat(previous, order == SortOrder.ASC ? lessThan(uid) : greaterThan(uid));
+ previous = uid;
+ }
+
+ /*
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_id", order)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ previous = order == SortOrder.ASC ? new BytesRef() : UnicodeUtil.BIG_TERM;
+ for (int i = 0; i < hits.length; ++i) {
+ final BytesRef id = new BytesRef(Uid.createUid(hits[i].type(), hits[i].id()));
+ assertThat(previous, order == SortOrder.ASC ? lessThan(id) : greaterThan(id));
+ previous = id;
+ }*/
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .setSize(randomIntBetween(1, numDocs + 5))
+ .addSort("_timestamp", order)
+ .addField("_timestamp")
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ Long previousTs = order == SortOrder.ASC ? 0 : Long.MAX_VALUE;
+ for (int i = 0; i < hits.length; ++i) {
+ SearchHitField timestampField = hits[i].getFields().get("_timestamp");
+ Long timestamp = timestampField.<Long>getValue();
+ assertThat(previousTs, order == SortOrder.ASC ? lessThanOrEqualTo(timestamp) : greaterThanOrEqualTo(timestamp));
+ previousTs = timestamp;
+ }
+ }
+
+ /**
+ * Test case for issue 6150: https://github.com/elasticsearch/elasticsearch/issues/6150
+ */
+ @Test
+ public void testNestedSort() throws IOException, InterruptedException, ExecutionException {
+ assertAcked(prepareCreate("test")
+ .addMapping("type",
+ XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type")
+ .startObject("properties")
+ .startObject("nested")
+ .field("type", "nested")
+ .startObject("properties")
+ .startObject("foo")
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("sub")
+ .field("type", "string")
+ .field("index", "not_analyzed")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()));
+ ensureGreen();
+
+ client().prepareIndex("test", "type", "1").setSource(jsonBuilder().startObject()
+ .startObject("nested")
+ .field("foo", "bar bar")
+ .endObject()
+ .endObject()).execute().actionGet();
+ refresh();
+
+ // We sort on nested field
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("nested.foo", SortOrder.DESC)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ SearchHit[] hits = searchResponse.getHits().hits();
+ for (int i = 0; i < hits.length; ++i) {
+ assertThat(hits[i].getSortValues().length, is(1));
+ Object o = hits[i].getSortValues()[0];
+ assertThat(o, notNullValue());
+ assertThat(o instanceof StringAndBytesText, is(true));
+ StringAndBytesText text = (StringAndBytesText) o;
+ assertThat(text.string(), is("bar"));
+ }
+
+
+ // We sort on nested sub field
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort("nested.foo.sub", SortOrder.DESC)
+ .execute().actionGet();
+ assertNoFailures(searchResponse);
+ hits = searchResponse.getHits().hits();
+ for (int i = 0; i < hits.length; ++i) {
+ assertThat(hits[i].getSortValues().length, is(1));
+ Object o = hits[i].getSortValues()[0];
+ assertThat(o, notNullValue());
+ assertThat(o instanceof StringAndBytesText, is(true));
+ StringAndBytesText text = (StringAndBytesText) o;
+ assertThat(text.string(), is("bar bar"));
+ }
+ }
+
+ @Test
+ public void testSortDuelBetweenSingleShardAndMultiShardIndex() throws Exception {
+ String sortField = "sortField";
+ assertAcked(prepareCreate("test1")
+ .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(2, maximumNumberOfShards()))
+ .addMapping("type", sortField, "type=long").get());
+ assertAcked(prepareCreate("test2")
+ .setSettings(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .addMapping("type", sortField, "type=long").get());
+
+ for (String index : new String[]{"test1", "test2"}) {
+ List<IndexRequestBuilder> docs = new ArrayList<>();
+ for (int i = 0; i < 256; i++) {
+ docs.add(client().prepareIndex(index, "type", Integer.toString(i)).setSource(sortField, i));
+ }
+ indexRandom(true, docs);
+ }
+
+ ensureSearchable("test1", "test2");
+ SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC;
+ int from = between(0, 256);
+ int size = between(0, 256);
+ SearchResponse multiShardResponse = client().prepareSearch("test1").setFrom(from).setSize(size).addSort(sortField, order).get();
+ assertNoFailures(multiShardResponse);
+ SearchResponse singleShardResponse = client().prepareSearch("test2").setFrom(from).setSize(size).addSort(sortField, order).get();
+ assertNoFailures(singleShardResponse);
+
+ assertThat(multiShardResponse.getHits().totalHits(), equalTo(singleShardResponse.getHits().totalHits()));
+ assertThat(multiShardResponse.getHits().getHits().length, equalTo(singleShardResponse.getHits().getHits().length));
+ for (int i = 0; i < multiShardResponse.getHits().getHits().length; i++) {
+ assertThat(multiShardResponse.getHits().getAt(i).sortValues()[0], equalTo(singleShardResponse.getHits().getAt(i).sortValues()[0]));
+ assertThat(multiShardResponse.getHits().getAt(i).id(), equalTo(singleShardResponse.getHits().getAt(i).id()));
+ }
+ }
+
+ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedException, IOException {
+ /**
+ * | q | d1 | d2
+ * | | |
+ * | | |
+ * | | |
+ * |2 o| x | x
+ * | | |
+ * |1 o| x | x
+ * |___________________________
+ * 1 2 3 4 5 6 7
+ */
+ assertAcked(prepareCreate("index").addMapping("type", "location", "type=geo_point"));
+ XContentBuilder d1Builder = jsonBuilder();
+ GeoPoint[] d1Points = {new GeoPoint(3, 2), new GeoPoint(4, 1)};
+ createShuffeldJSONArray(d1Builder, d1Points);
+
+ XContentBuilder d2Builder = jsonBuilder();
+ GeoPoint[] d2Points = {new GeoPoint(5, 1), new GeoPoint(6, 2)};
+ createShuffeldJSONArray(d2Builder, d2Points);
+
+ logger.info(d1Builder.string());
+ logger.info(d2Builder.string());
+ indexRandom(true,
+ client().prepareIndex("index", "type", "d1").setSource(d1Builder),
+ client().prepareIndex("index", "type", "d2").setSource(d2Builder));
+ ensureYellow();
+ GeoPoint[] q = new GeoPoint[2];
+ if (randomBoolean()) {
+ q[0] = new GeoPoint(2, 1);
+ q[1] = new GeoPoint(2, 2);
+ } else {
+ q[1] = new GeoPoint(2, 2);
+ q[0] = new GeoPoint(2, 1);
+ }
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d1", "d2");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 2, 3, 2, DistanceUnit.KILOMETERS)));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 1, 5, 1, DistanceUnit.KILOMETERS)));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("min").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d2", "d1");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 1, 5, 1, DistanceUnit.KILOMETERS)));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 2, 3, 2, DistanceUnit.KILOMETERS)));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("max").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d1", "d2");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 2, 4, 1, DistanceUnit.KILOMETERS)));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 1, 6, 2, DistanceUnit.KILOMETERS)));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("max").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d2", "d1");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 1, 6, 2, DistanceUnit.KILOMETERS)));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], equalTo(GeoDistance.PLANE.calculate(2, 2, 4, 1, DistanceUnit.KILOMETERS)));
+ }
+
+ protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] pointsArray) throws IOException {
+ List<GeoPoint> points = new ArrayList<>();
+ points.addAll(Arrays.asList(pointsArray));
+ builder.startObject();
+ builder.startArray("location");
+ int numPoints = points.size();
+ for (int i = 0; i < numPoints; i++) {
+ builder.value(points.remove(randomInt(points.size() - 1)));
+ }
+ builder.endArray();
+ builder.endObject();
+ }
+
+ public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException {
+ /** q d1 d2
+ * |4 o| x | x
+ * | | |
+ * |3 o| x | x
+ * | | |
+ * |2 o| x | x
+ * | | |
+ * |1 o|x |x
+ * |______________________
+ * 1 2 3 4 5 6
+ */
+ assertAcked(prepareCreate("index").addMapping("type", "location", "type=geo_point"));
+ XContentBuilder d1Builder = jsonBuilder();
+ GeoPoint[] d1Points = {new GeoPoint(2.5, 1), new GeoPoint(2.75, 2), new GeoPoint(3, 3), new GeoPoint(3.25, 4)};
+ createShuffeldJSONArray(d1Builder, d1Points);
+
+ XContentBuilder d2Builder = jsonBuilder();
+ GeoPoint[] d2Points = {new GeoPoint(4.5, 1), new GeoPoint(4.75, 2), new GeoPoint(5, 3), new GeoPoint(5.25, 4)};
+ createShuffeldJSONArray(d2Builder, d2Points);
+
+ indexRandom(true,
+ client().prepareIndex("index", "type", "d1").setSource(d1Builder),
+ client().prepareIndex("index", "type", "d2").setSource(d2Builder));
+ ensureYellow();
+
+ List<String> qHashes = new ArrayList<>();
+ List<GeoPoint> qPoints = new ArrayList<>();
+ createQPoints(qHashes, qPoints);
+
+ GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
+ for (int i = 0; i < 4; i++) {
+ int at = randomInt(3 - i);
+ if (randomBoolean()) {
+ geoDistanceSortBuilder.geohashes(qHashes.get(at));
+ } else {
+ geoDistanceSortBuilder.points(qPoints.get(at));
+ }
+ qHashes.remove(at);
+ qPoints.remove(at);
+ }
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d1", "d2");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-5));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(4.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-5));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(geoDistanceSortBuilder.sortMode("max").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d1", "d2");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(3.25, 4, 2, 1, DistanceUnit.KILOMETERS), 1.e-5));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(5.25, 4, 2, 1, DistanceUnit.KILOMETERS), 1.e-5));
+
+ //test all the different formats in one
+ createQPoints(qHashes, qPoints);
+ XContentBuilder searchSourceBuilder = jsonBuilder();
+ searchSourceBuilder.startObject().startArray("sort").startObject().startObject("_geo_distance").startArray("location");
+
+ for (int i = 0; i < 4; i++) {
+ int at = randomInt(qPoints.size() - 1);
+ int format = randomInt(3);
+ switch (format) {
+ case 0: {
+ searchSourceBuilder.value(qHashes.get(at));
+ break;
+ }
+ case 1: {
+ searchSourceBuilder.value(qPoints.get(at).lat() + "," + qPoints.get(at).lon());
+ break;
+ }
+ case 2: {
+ searchSourceBuilder.value(qPoints.get(at));
+ break;
+ }
+ case 3: {
+ searchSourceBuilder.startArray().value(qPoints.get(at).lon()).value(qPoints.get(at).lat()).endArray();
+ break;
+ }
+ }
+ qHashes.remove(at);
+ qPoints.remove(at);
+ }
+
+ searchSourceBuilder.endArray();
+ searchSourceBuilder.field("order", "asc");
+ searchSourceBuilder.field("unit", "km");
+ searchSourceBuilder.field("sort_mode", "min");
+ searchSourceBuilder.field("distance_type", "plane");
+ searchSourceBuilder.endObject();
+ searchSourceBuilder.endObject();
+ searchSourceBuilder.endArray();
+ searchSourceBuilder.endObject();
+
+ searchResponse = client().prepareSearch().setSource(searchSourceBuilder).execute().actionGet();
+ assertOrderedSearchHits(searchResponse, "d1", "d2");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-5));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(4.5, 1, 2, 1, DistanceUnit.KILOMETERS), 1.e-5));
+ }
+
+ public void testSinglePointGeoDistanceSort() throws ExecutionException, InterruptedException, IOException {
+ assertAcked(prepareCreate("index").addMapping("type", "location", "type=geo_point"));
+ indexRandom(true,
+ client().prepareIndex("index", "type", "d1").setSource(jsonBuilder().startObject().startObject("location").field("lat", 1).field("lon", 1).endObject().endObject()),
+ client().prepareIndex("index", "type", "d2").setSource(jsonBuilder().startObject().startObject("location").field("lat", 1).field("lon", 2).endObject().endObject()));
+ ensureYellow();
+
+ String hashPoint = "s037ms06g7h0";
+
+ GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
+ geoDistanceSortBuilder.geohashes(hashPoint);
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ checkCorrectSortOrderForGeoSort(searchResponse);
+
+ geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
+ geoDistanceSortBuilder.points(new GeoPoint(2, 2));
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ checkCorrectSortOrderForGeoSort(searchResponse);
+
+ geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
+ geoDistanceSortBuilder.point(2, 2);
+
+ searchResponse = client().prepareSearch()
+ .setQuery(matchAllQuery())
+ .addSort(geoDistanceSortBuilder.sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
+ .execute().actionGet();
+ checkCorrectSortOrderForGeoSort(searchResponse);
+
+ String geoSortRequest = jsonBuilder().startObject().startArray("sort").startObject()
+ .startObject("_geo_distance")
+ .startArray("location").value(2f).value(2f).endArray()
+ .field("unit", "km")
+ .field("distance_type", "plane")
+ .endObject()
+ .endObject().endArray().string();
+ searchResponse = client().prepareSearch().setSource(geoSortRequest)
+ .execute().actionGet();
+ checkCorrectSortOrderForGeoSort(searchResponse);
+
+ geoSortRequest = jsonBuilder().startObject().startArray("sort").startObject()
+ .startObject("_geo_distance")
+ .field("location", "s037ms06g7h0")
+ .field("unit", "km")
+ .field("distance_type", "plane")
+ .endObject()
+ .endObject().endArray().string();
+ searchResponse = client().prepareSearch().setSource(geoSortRequest)
+ .execute().actionGet();
+ checkCorrectSortOrderForGeoSort(searchResponse);
+
+ geoSortRequest = jsonBuilder().startObject().startArray("sort").startObject()
+ .startObject("_geo_distance")
+ .startObject("location")
+ .field("lat", 2)
+ .field("lon", 2)
+ .endObject()
+ .field("unit", "km")
+ .field("distance_type", "plane")
+ .endObject()
+ .endObject().endArray().string();
+ searchResponse = client().prepareSearch().setSource(geoSortRequest)
+ .execute().actionGet();
+ checkCorrectSortOrderForGeoSort(searchResponse);
+ }
+
+ private void checkCorrectSortOrderForGeoSort(SearchResponse searchResponse) {
+ assertOrderedSearchHits(searchResponse, "d2", "d1");
+ assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 1, 2, DistanceUnit.KILOMETERS), 1.e-5));
+ assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 1, 1, DistanceUnit.KILOMETERS), 1.e-5));
+ }
+
+ protected void createQPoints(List<String> qHashes, List<GeoPoint> qPoints) {
+ GeoPoint[] qp = {new GeoPoint(2, 1), new GeoPoint(2, 2), new GeoPoint(2, 3), new GeoPoint(2, 4)};
+ qPoints.addAll(Arrays.asList(qp));
+ String[] qh = {"s02equ04ven0", "s037ms06g7h0", "s065kk0dc540", "s06g7h0dyg00"};
+ qHashes.addAll(Arrays.asList(qh));
+ }
+
+ public void testCrossIndexIgnoreUnmapped() throws Exception {
+ assertAcked(prepareCreate("test1").addMapping(
+ "type", "str_field1", "type=string",
+ "long_field", "type=long",
+ "double_field", "type=double").get());
+ assertAcked(prepareCreate("test2").get());
+
+ indexRandom(true,
+ client().prepareIndex("test1", "type").setSource("str_field", "bcd", "long_field", 3, "double_field", 0.65),
+ client().prepareIndex("test2", "type").setSource());
+
+ ensureYellow("test1", "test2");
+
+ SearchResponse resp = client().prepareSearch("test1", "test2")
+ .addSort(fieldSort("str_field").order(SortOrder.ASC).unmappedType("string"))
+ .addSort(fieldSort("str_field2").order(SortOrder.DESC).unmappedType("string")).get();
+
+ final StringAndBytesText maxTerm = new StringAndBytesText(IndexFieldData.XFieldComparatorSource.MAX_TERM.utf8ToString());
+ assertSortValues(resp,
+ new Object[] {new StringAndBytesText("bcd"), null},
+ new Object[] {maxTerm, null});
+
+ resp = client().prepareSearch("test1", "test2")
+ .addSort(fieldSort("long_field").order(SortOrder.ASC).unmappedType("long"))
+ .addSort(fieldSort("long_field2").order(SortOrder.DESC).unmappedType("long")).get();
+ assertSortValues(resp,
+ new Object[] {3L, Long.MIN_VALUE},
+ new Object[] {Long.MAX_VALUE, Long.MIN_VALUE});
+
+ resp = client().prepareSearch("test1", "test2")
+ .addSort(fieldSort("double_field").order(SortOrder.ASC).unmappedType("double"))
+ .addSort(fieldSort("double_field2").order(SortOrder.DESC).unmappedType("double")).get();
+ assertSortValues(resp,
+ new Object[] {0.65, Double.NEGATIVE_INFINITY},
+ new Object[] {Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY});
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java
new file mode 100644
index 0000000000..cad9db7085
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/sort/SortParserTests.java
@@ -0,0 +1,149 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.elasticsearch.search.sort;
+
+
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.TestSearchContext;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+public class SortParserTests extends ElasticsearchSingleNodeTest {
+
+ @Test
+ public void testGeoDistanceSortParserManyPointsNoException() throws Exception {
+ XContentBuilder mapping = jsonBuilder();
+ mapping.startObject().startObject("type").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject().endObject();
+ IndexService indexService = createIndex("testidx", Settings.settingsBuilder().build(), "type", mapping);
+ TestSearchContext context = (TestSearchContext) createSearchContext(indexService);
+ context.setTypes("type");
+
+ XContentBuilder sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.startArray("location");
+ sortBuilder.startArray().value(1.2).value(3).endArray().startArray().value(5).value(6).endArray();
+ sortBuilder.endArray();
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ XContentParser parser = XContentHelper.createParser(sortBuilder.bytes());
+ parser.nextToken();
+ GeoDistanceSortParser geoParser = new GeoDistanceSortParser();
+ geoParser.parse(parser, context);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.startArray("location");
+ sortBuilder.value(new GeoPoint(1.2, 3)).value(new GeoPoint(1.2, 3));
+ sortBuilder.endArray();
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.startArray("location");
+ sortBuilder.value("1,2").value("3,4");
+ sortBuilder.endArray();
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.startArray("location");
+ sortBuilder.value("s3y0zh7w1z0g").value("s6wjr4et3f8v");
+ sortBuilder.endArray();
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.startArray("location");
+ sortBuilder.value(1.2).value(3);
+ sortBuilder.endArray();
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.field("location", new GeoPoint(1, 2));
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.field("location", "1,2");
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.field("location", "s3y0zh7w1z0g");
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+
+ sortBuilder = jsonBuilder();
+ sortBuilder.startObject();
+ sortBuilder.startArray("location");
+ sortBuilder.value(new GeoPoint(1, 2)).value("s3y0zh7w1z0g").startArray().value(1).value(2).endArray().value("1,2");
+ sortBuilder.endArray();
+ sortBuilder.field("order", "desc");
+ sortBuilder.field("unit", "km");
+ sortBuilder.field("sort_mode", "max");
+ sortBuilder.endObject();
+ parse(context, sortBuilder);
+ }
+
+ protected void parse(TestSearchContext context, XContentBuilder sortBuilder) throws Exception {
+ XContentParser parser = XContentHelper.createParser(sortBuilder.bytes());
+ parser.nextToken();
+ GeoDistanceSortParser geoParser = new GeoDistanceSortParser();
+ geoParser.parse(parser, context);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java b/core/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java
new file mode 100644
index 0000000000..ce8b3a534b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/source/SourceFetchingTests.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.source;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.hamcrest.core.IsEqual.equalTo;
+
+public class SourceFetchingTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSourceDefaultBehavior() {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "field", "value");
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").addField("bla").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ response = client().prepareSearch("test").addField("_source").get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ }
+
+ @Test
+ public void testSourceFiltering() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value", "field2", "value2").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").setFetchSource(false).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), nullValue());
+
+ response = client().prepareSearch("test").setFetchSource(true).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+
+ response = client().prepareSearch("test").setFetchSource("field1", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field1"), equalTo("value"));
+
+ response = client().prepareSearch("test").setFetchSource("hello", null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(0));
+
+ response = client().prepareSearch("test").setFetchSource(new String[]{"*"}, new String[]{"field2"}).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field1"), equalTo("value"));
+
+ }
+
+ /**
+ * Test Case for #5132: Source filtering with wildcards broken when given multiple patterns
+ * https://github.com/elasticsearch/elasticsearch/issues/5132
+ */
+ @Test
+ public void testSourceWithWildcardFiltering() {
+ createIndex("test");
+ ensureGreen();
+
+ client().prepareIndex("test", "type1", "1").setSource("field", "value").get();
+ refresh();
+
+ SearchResponse response = client().prepareSearch("test").setFetchSource(new String[]{"*.notexisting","field"}, null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field"), equalTo("value"));
+
+ response = client().prepareSearch("test").setFetchSource(new String[]{"field.notexisting.*","field"}, null).get();
+ assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
+ assertThat(response.getHits().getAt(0).getSource().size(), equalTo(1));
+ assertThat((String) response.getHits().getAt(0).getSource().get("field"), equalTo("value"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java
new file mode 100644
index 0000000000..a4d17090ed
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.stats;
+
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.GroupShardsIterator;
+import org.elasticsearch.cluster.routing.ShardIterator;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.search.stats.SearchStats.Stats;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class SearchStatsTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected int numberOfReplicas() {
+ return 0;
+ }
+
+ @Test
+ public void testSimpleStats() throws Exception {
+ // clear all stats first
+ client().admin().indices().prepareStats().clear().execute().actionGet();
+ final int numNodes = cluster().numDataNodes();
+ assertThat(numNodes, greaterThanOrEqualTo(2));
+ final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard...
+ final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10));
+ assertThat(numNodes, lessThanOrEqualTo(shardsIdx1 + shardsIdx2));
+ assertAcked(prepareCreate("test1").setSettings(Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, shardsIdx1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)));
+ int docsTest1 = scaledRandomIntBetween(3*shardsIdx1, 5*shardsIdx1);
+ for (int i = 0; i < docsTest1; i++) {
+ client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ if (rarely()) {
+ refresh();
+ }
+ }
+ assertAcked(prepareCreate("test2").setSettings(Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, shardsIdx2)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)));
+ int docsTest2 = scaledRandomIntBetween(3*shardsIdx2, 5*shardsIdx2);
+ for (int i = 0; i < docsTest2; i++) {
+ client().prepareIndex("test2", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ if (rarely()) {
+ refresh();
+ }
+ }
+ assertThat(shardsIdx1+shardsIdx2, equalTo(numAssignedShards("test1", "test2")));
+ assertThat(numAssignedShards("test1", "test2"), greaterThanOrEqualTo(2));
+ // THERE WILL BE AT LEAST 2 NODES HERE SO WE CAN WAIT FOR GREEN
+ ensureGreen();
+ refresh();
+ int iters = scaledRandomIntBetween(100, 150);
+ for (int i = 0; i < iters; i++) {
+ SearchResponse searchResponse = internalCluster().clientNodeClient().prepareSearch()
+ .setQuery(QueryBuilders.termQuery("field", "value")).setStats("group1", "group2")
+ .addHighlightedField("field")
+ .addScriptField("scrip1", new Script("_source.field"))
+ .setSize(100)
+ .execute().actionGet();
+ assertHitCount(searchResponse, docsTest1 + docsTest2);
+ assertAllSuccessful(searchResponse);
+ }
+
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ logger.debug("###### indices search stats: " + indicesStats.getTotal().getSearch());
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getQueryTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getTotal().getFetchTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats(), nullValue());
+
+ indicesStats = client().admin().indices().prepareStats().setGroups("group1").execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats(), notNullValue());
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getQueryCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getQueryTimeInMillis(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getFetchCount(), greaterThan(0l));
+ assertThat(indicesStats.getTotal().getSearch().getGroupStats().get("group1").getFetchTimeInMillis(), greaterThan(0l));
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();
+ NodeStats[] nodes = nodeStats.getNodes();
+ Set<String> nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2");
+ int num = 0;
+ for (NodeStats stat : nodes) {
+ Stats total = stat.getIndices().getSearch().getTotal();
+ if (nodeIdsWithIndex.contains(stat.getNode().getId())) {
+ assertThat(total.getQueryCount(), greaterThan(0l));
+ assertThat(total.getQueryTimeInMillis(), greaterThan(0l));
+ num++;
+ } else {
+ assertThat(total.getQueryCount(), equalTo(0l));
+ assertThat(total.getQueryTimeInMillis(), equalTo(0l));
+ }
+ }
+
+ assertThat(num, greaterThan(0));
+
+ }
+
+ private Set<String> nodeIdsWithIndex(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ Set<String> nodes = new HashSet<>();
+ for (ShardIterator shardIterator : allAssignedShardsGrouped) {
+ for (ShardRouting routing : shardIterator.asUnordered()) {
+ if (routing.active()) {
+ nodes.add(routing.currentNodeId());
+ }
+
+ }
+ }
+ return nodes;
+ }
+
+ @Test
+ public void testOpenContexts() {
+ createIndex("test1");
+ ensureGreen("test1");
+ final int docs = scaledRandomIntBetween(20, 50);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("test1", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ IndicesStatsResponse indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(5)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+ assertSearchResponse(searchResponse);
+
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo((long)numAssignedShards("test1")));
+
+ // scroll, but with no timeout (so no context)
+ searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).execute().actionGet();
+
+ indicesStats = client().admin().indices().prepareStats().execute().actionGet();
+ assertThat(indicesStats.getTotal().getSearch().getOpenContexts(), equalTo(0l));
+ }
+
+ protected int numAssignedShards(String... indices) {
+ ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
+ GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
+ return allAssignedShardsGrouped.size();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java
new file mode 100644
index 0000000000..a9f134b1be
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/stats/SearchStatsUnitTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.stats;
+
+import org.elasticsearch.index.search.stats.SearchStats;
+import org.elasticsearch.index.search.stats.SearchStats.Stats;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class SearchStatsUnitTests extends ElasticsearchTestCase {
+
+ @Test
+ // https://github.com/elasticsearch/elasticsearch/issues/7644
+ public void testShardLevelSearchGroupStats() throws Exception {
+ // let's create two dummy search stats with groups
+ Map<String, Stats> groupStats1 = new HashMap<>();
+ Map<String, Stats> groupStats2 = new HashMap<>();
+ groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1));
+ SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1), 0, groupStats1);
+ SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1), 0, groupStats2);
+
+ // adding these two search stats and checking group stats are correct
+ searchStats1.add(searchStats2);
+ assertStats(groupStats1.get("group1"), 1);
+
+ // another call, adding again ...
+ searchStats1.add(searchStats2);
+ assertStats(groupStats1.get("group1"), 2);
+
+ // making sure stats2 was not affected (this would previously return 2!)
+ assertStats(groupStats2.get("group1"), 1);
+
+ // adding again would then return wrong search stats (would return 4! instead of 3)
+ searchStats1.add(searchStats2);
+ assertStats(groupStats1.get("group1"), 3);
+ }
+
+ private void assertStats(Stats stats, long equalTo) {
+ assertEquals(equalTo, stats.getQueryCount());
+ assertEquals(equalTo, stats.getQueryTimeInMillis());
+ assertEquals(equalTo, stats.getQueryCurrent());
+ assertEquals(equalTo, stats.getFetchCount());
+ assertEquals(equalTo, stats.getFetchTimeInMillis());
+ assertEquals(equalTo, stats.getFetchCurrent());
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java
new file mode 100644
index 0000000000..a87c66aa67
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java
@@ -0,0 +1,1170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.carrotsearch.hppc.ObjectLongHashMap;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.collect.Lists;
+
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
+import org.elasticsearch.action.admin.indices.segments.ShardSegments;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.MapperException;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.percolator.PercolatorService;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.sort.FieldSortBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionStats;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionFuzzyBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.hamcrest.Matchers.*;
+
+@SuppressCodecs("*") // requires custom completion format
+public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest {
+
+ private final String INDEX = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final String TYPE = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final String FIELD = RandomStrings.randomAsciiOfLength(getRandom(), 10).toLowerCase(Locale.ROOT);
+ private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+
+ @Test
+ public void testSimple() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+ String[][] input = {{"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"},
+ {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"},
+ {"The Prodigy"}, {"The Prodigy"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"},
+ {"Turbonegro"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}}; // work with frequencies
+ for (int i = 0; i < input.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .endObject()
+ .endObject()
+ )
+ .execute().actionGet();
+ }
+
+ refresh();
+
+ assertSuggestionsNotInOrder("f", "Foo Fighters", "Firestarter", "Foo Fighters Generator", "Foo Fighters Learn to Fly");
+ assertSuggestionsNotInOrder("t", "The Prodigy", "Turbonegro", "Turbonegro Get it on", "The Prodigy Firestarter");
+ }
+
+ @Test
+ public void testSuggestFieldWithPercolateApi() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+ String[][] input = {{"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"}, {"Foo Fighters"},
+ {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"},
+ {"The Prodigy"}, {"The Prodigy"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"},
+ {"Turbonegro"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}}; // work with frequencies
+ for (int i = 0; i < input.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .endObject()
+ .endObject()
+ )
+ .execute().actionGet();
+ }
+
+ client().prepareIndex(INDEX, PercolatorService.TYPE_NAME, "4")
+ .setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ PercolateResponse response = client().preparePercolate().setIndices(INDEX).setDocumentType(TYPE)
+ .setGetRequest(Requests.getRequest(INDEX).type(TYPE).id("1"))
+ .execute().actionGet();
+ assertThat(response.getCount(), equalTo(1l));
+ }
+
+ @Test
+ public void testBasicPrefixSuggestion() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+ for (int i = 0; i < 2; i++) {
+ createData(i == 0);
+ assertSuggestions("f", "Firestarter - The Prodigy", "Foo Fighters", "Generator - Foo Fighters", "Learn to Fly - Foo Fighters");
+ assertSuggestions("ge", "Generator - Foo Fighters", "Get it on - Turbonegro");
+ assertSuggestions("ge", "Generator - Foo Fighters", "Get it on - Turbonegro");
+ assertSuggestions("t", "The Prodigy", "Firestarter - The Prodigy", "Get it on - Turbonegro", "Turbonegro");
+ }
+ }
+
+ @Test
+ public void testThatWeightsAreWorking() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ List<String> similarNames = Lists.newArrayList("the", "The Prodigy", "The Verve", "The the");
+ // the weight is 1000 divided by string length, so the results are easy to to check
+ for (String similarName : similarNames) {
+ client().prepareIndex(INDEX, TYPE, similarName).setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(similarName).endArray()
+ .field("weight", 1000 / similarName.length())
+ .endObject().endObject()
+ ).get();
+ }
+
+ refresh();
+
+ assertSuggestions("the", "the", "The the", "The Verve", "The Prodigy");
+ }
+
+ @Test
+ public void testThatWeightMustBeAnInteger() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ try {
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("sth").endArray()
+ .field("weight", 2.5)
+ .endObject().endObject()
+ ).get();
+ fail("Indexing with a float weight was successful, but should not be");
+ } catch (MapperParsingException e) {
+ assertThat(e.toString(), containsString("2.5"));
+ }
+ }
+
+ @Test
+ public void testThatWeightCanBeAString() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("testing").endArray()
+ .field("weight", "10")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("test").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "testing");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+
+ assertThat(prefixOption.getText().string(), equalTo("testing"));
+ assertThat((long) prefixOption.getScore(), equalTo(10l));
+ }
+
+
+ @Test
+ public void testThatWeightMustNotBeANonNumberString() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ try {
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("sth").endArray()
+ .field("weight", "thisIsNotValid")
+ .endObject().endObject()
+ ).get();
+ fail("Indexing with a non-number representing string as weight was successful, but should not be");
+ } catch (MapperParsingException e) {
+ assertThat(e.toString(), containsString("thisIsNotValid"));
+ }
+ }
+
+ @Test
+ public void testThatWeightAsStringMustBeInt() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ String weight = String.valueOf(Long.MAX_VALUE - 4);
+ try {
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("testing").endArray()
+ .field("weight", weight)
+ .endObject().endObject()
+ ).get();
+ fail("Indexing with weight string representing value > Int.MAX_VALUE was successful, but should not be");
+ } catch (MapperParsingException e) {
+ assertThat(e.toString(), containsString(weight));
+ }
+ }
+
+ @Test
+ public void testThatInputCanBeAStringInsteadOfAnArray() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .field("input", "Foo Fighters")
+ .field("output", "Boo Fighters")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("f", "Boo Fighters");
+ }
+
+ @Test
+ public void testThatPayloadsAreArbitraryJsonObjects() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .startObject("payload").field("foo", "bar").startArray("test").value("spam").value("eggs").endArray().endObject()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ // parse JSON
+ Map<String, Object> jsonMap = prefixOption.getPayloadAsMap();
+ assertThat(jsonMap.size(), is(2));
+ assertThat(jsonMap.get("foo").toString(), is("bar"));
+ assertThat(jsonMap.get("test"), is(instanceOf(List.class)));
+ List<String> listValues = (List<String>) jsonMap.get("test");
+ assertThat(listValues, hasItems("spam", "eggs"));
+ }
+
+ @Test
+ public void testPayloadAsNumeric() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .field("payload", 1)
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ assertThat(prefixOption.getPayloadAsLong(), equalTo(1l));
+ }
+
+ @Test
+ public void testPayloadAsString() throws Exception {
+ completionMappingBuilder.payloads(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .field("payload", "test")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("testSuggestions").field(FIELD).text("foo").size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, "testSuggestions", "Boo Fighters");
+ Suggest.Suggestion.Entry.Option option = suggestResponse.getSuggest().getSuggestion("testSuggestions").getEntries().get(0).getOptions().get(0);
+ assertThat(option, is(instanceOf(CompletionSuggestion.Entry.Option.class)));
+ CompletionSuggestion.Entry.Option prefixOption = (CompletionSuggestion.Entry.Option) option;
+ assertThat(prefixOption.getPayload(), is(notNullValue()));
+
+ assertThat(prefixOption.getPayloadAsString(), equalTo("test"));
+ }
+
+ @Test(expected = MapperException.class)
+ public void testThatExceptionIsThrownWhenPayloadsAreDisabledButInIndexRequest() throws Exception {
+ completionMappingBuilder.payloads(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("output", "Boo Fighters")
+ .startArray("payload").value("spam").value("eggs").endArray()
+ .endObject().endObject()
+ ).get();
+ }
+
+ @Test
+ public void testDisabledPreserveSeparators() throws Exception {
+ completionMappingBuilder.preserveSeparators(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .field("weight", 10)
+ .endObject().endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foof").endArray()
+ .field("weight", 20)
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foof", "Foof", "Foo Fighters");
+ }
+
+ @Test
+ public void testEnabledPreserveSeparators() throws Exception {
+ completionMappingBuilder.preserveSeparators(true);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .endObject().endObject()
+ ).get();
+
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foof").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foof", "Foof");
+ }
+
+ @Test
+ public void testThatMultipleInputsAreSupported() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").value("Fu Fighters").endArray()
+ .field("output", "The incredible Foo Fighters")
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("foo", "The incredible Foo Fighters");
+ assertSuggestions("fu", "The incredible Foo Fighters");
+ }
+
+ @Test
+ public void testThatShortSyntaxIsWorking() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startArray(FIELD)
+ .value("The Prodigy Firestarter").value("Firestarter")
+ .endArray().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("t", "The Prodigy Firestarter");
+ assertSuggestions("f", "Firestarter");
+ }
+
+ @Test
+ public void testThatDisablingPositionIncrementsWorkForStopwords() throws Exception {
+ // analyzer which removes stopwords... so may not be the simple one
+ completionMappingBuilder.searchAnalyzer("classic").indexAnalyzer("classic").preservePositionIncrements(false);
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("The Beatles").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("b", "The Beatles");
+ }
+
+ @Test
+ public void testThatSynonymsWork() throws Exception {
+ Settings.Builder settingsBuilder = settingsBuilder()
+ .put("analysis.analyzer.suggest_analyzer_synonyms.type", "custom")
+ .put("analysis.analyzer.suggest_analyzer_synonyms.tokenizer", "standard")
+ .putArray("analysis.analyzer.suggest_analyzer_synonyms.filter", "standard", "lowercase", "my_synonyms")
+ .put("analysis.filter.my_synonyms.type", "synonym")
+ .putArray("analysis.filter.my_synonyms.synonyms", "foo,renamed");
+ completionMappingBuilder.searchAnalyzer("suggest_analyzer_synonyms").indexAnalyzer("suggest_analyzer_synonyms");
+ createIndexAndMappingAndSettings(settingsBuilder.build(), completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Foo Fighters").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // get suggestions for renamed
+ assertSuggestions("r", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatUpgradeToMultiFieldTypeWorks() throws Exception {
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, mapping));
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject(FIELD).field("type", "string").endObject()
+ .startObject("suggest").field("type", "completion").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ new CompletionSuggestionBuilder("suggs").field(FIELD + ".suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.completionSuggestion("suggs").field(FIELD + ".suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatUpgradeToMultiFieldsWorks() throws Exception {
+ final XContentBuilder mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, mapping));
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "string")
+ .startObject("fields")
+ .startObject("suggest").field("type", "completion").field("analyzer", "simple").endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.completionSuggestion("suggs").field(FIELD + ".suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, "suggs");
+
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").endObject()).get();
+ ensureGreen(INDEX);
+
+ SuggestResponse afterReindexingResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.completionSuggestion("suggs").field(FIELD + ".suggest").text("f").size(10)
+ ).execute().actionGet();
+ assertSuggestions(afterReindexingResponse, "suggs", "Foo Fighters");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterWorks() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nirv").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nirw").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsEditDistances() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // edit distance 1
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Norw").size(10)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ // edit distance 2
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Norw").size(10).setFuzziness(Fuzziness.TWO)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsTranspositions() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nriv").size(10).setFuzzyTranspositions(false).setFuzziness(Fuzziness.ONE)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nriv").size(10).setFuzzyTranspositions(true).setFuzziness(Fuzziness.ONE)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsMinPrefixLength() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nriva").size(10).setFuzzyMinLength(6)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nrivan").size(10).setFuzzyMinLength(6)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterSupportsNonPrefixLength() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nirw").size(10).setFuzzyPrefixLength(4)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("Nirvo").size(10).setFuzzyPrefixLength(4)
+ ).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "Nirvana");
+ }
+
+ @Test
+ public void testThatFuzzySuggesterIsUnicodeAware() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("ööööö").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+
+ // suggestion with a character, which needs unicode awareness
+ CompletionSuggestionFuzzyBuilder completionSuggestionBuilder =
+ SuggestBuilders.fuzzyCompletionSuggestion("foo").field(FIELD).text("öööи").size(10).setUnicodeAware(true);
+
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "ööööö");
+
+ // removing unicode awareness leads to no result
+ completionSuggestionBuilder.setUnicodeAware(false);
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo");
+
+ // increasing edit distance instead of unicode awareness works again, as this is only a single character
+ completionSuggestionBuilder.setFuzziness(Fuzziness.TWO);
+ suggestResponse = client().prepareSuggest(INDEX).addSuggestion(completionSuggestionBuilder).execute().actionGet();
+ assertSuggestions(suggestResponse, false, "foo", "ööööö");
+ }
+
+ @Test
+ public void testThatStatsAreWorking() throws Exception {
+ String otherField = "testOtherField";
+
+ createIndex(INDEX);
+
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD.toString())
+ .field("type", "completion").field("analyzer", "simple")
+ .endObject()
+ .startObject(otherField)
+ .field("type", "completion").field("analyzer", "simple")
+ .endObject()
+ .endObject().endObject().endObject())
+ .get();
+ assertThat(putMappingResponse.isAcknowledged(), is(true));
+
+ // Index two entities
+ client().prepareIndex(INDEX, TYPE, "1").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Foo Fighters").field(otherField, "WHATEVER").endObject()).get();
+ client().prepareIndex(INDEX, TYPE, "2").setRefresh(true).setSource(jsonBuilder().startObject().field(FIELD, "Bar Fighters").field(otherField, "WHATEVER2").endObject()).get();
+
+ // Get all stats
+ IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).get();
+ CompletionStats completionStats = indicesStatsResponse.getIndex(INDEX).getPrimaries().completion;
+ assertThat(completionStats, notNullValue());
+ long totalSizeInBytes = completionStats.getSizeInBytes();
+ assertThat(totalSizeInBytes, is(greaterThan(0L)));
+
+ IndicesStatsResponse singleFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields(FIELD).get();
+ long singleFieldSizeInBytes = singleFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get(FIELD);
+ IndicesStatsResponse otherFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields(otherField).get();
+ long otherFieldSizeInBytes = otherFieldStats.getIndex(INDEX).getPrimaries().completion.getFields().get(otherField);
+ assertThat(singleFieldSizeInBytes + otherFieldSizeInBytes, is(totalSizeInBytes));
+
+ // regexes
+ IndicesStatsResponse regexFieldStats = client().admin().indices().prepareStats(INDEX).setIndices(INDEX).setCompletion(true).setCompletionFields("*").get();
+ ObjectLongHashMap<String> fields = regexFieldStats.getIndex(INDEX).getPrimaries().completion.getFields();
+ long regexSizeInBytes = fields.get(FIELD) + fields.get(otherField);
+ assertThat(regexSizeInBytes, is(totalSizeInBytes));
+ }
+
+ @Test
+ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exception {
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Nirvana").endArray()
+ .endObject().endObject()
+ ).get();
+
+ refresh();
+ try {
+ client().prepareSearch(INDEX).setTypes(TYPE).addSort(new FieldSortBuilder(FIELD)).execute().actionGet();
+ fail("Expected an exception due to trying to sort on completion field, but did not happen");
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status().getStatus(), is(400));
+ assertThat(e.toString(), containsString("Sorting not supported for field[" + FIELD + "]"));
+ }
+ }
+
+ @Test
+ public void testThatSuggestStopFilterWorks() throws Exception {
+ Settings.Builder settingsBuilder = settingsBuilder()
+ .put("index.analysis.analyzer.stoptest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.stoptest.filter", "standard", "suggest_stop_filter")
+ .put("index.analysis.filter.suggest_stop_filter.type", "stop")
+ .put("index.analysis.filter.suggest_stop_filter.remove_trailing", false);
+
+ CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+ completionMappingBuilder.preserveSeparators(true).preservePositionIncrements(true);
+ completionMappingBuilder.searchAnalyzer("stoptest");
+ completionMappingBuilder.indexAnalyzer("simple");
+ createIndexAndMappingAndSettings(settingsBuilder.build(), completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Feed trolls").endArray()
+ .field("weight", 5).endObject().endObject()
+ ).get();
+
+ // Higher weight so it's ranked first:
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("Feed the trolls").endArray()
+ .field("weight", 10).endObject().endObject()
+ ).get();
+
+ refresh();
+
+ assertSuggestions("f", "Feed the trolls", "Feed trolls");
+ assertSuggestions("fe", "Feed the trolls", "Feed trolls");
+ assertSuggestions("fee", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed t", "Feed the trolls", "Feed trolls");
+ assertSuggestions("feed the", "Feed the trolls");
+ // stop word complete, gets ignored on query time, makes it "feed" only
+ assertSuggestions("feed the ", "Feed the trolls", "Feed trolls");
+ // stopword gets removed, but position increment kicks in, which doesnt work for the prefix suggester
+ assertSuggestions("feed the t");
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void testThatIndexingInvalidFieldsInCompletionFieldResultsInException() throws Exception {
+ CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder();
+ createIndexAndMapping(completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("FRIGGININVALID").value("Nirvana").endArray()
+ .endObject().endObject()).get();
+ }
+
+
+ public void assertSuggestions(String suggestion, String... suggestions) {
+ String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, suggestionName, suggestions);
+ }
+
+ public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) {
+ String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
+ SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10)
+ ).execute().actionGet();
+
+ assertSuggestions(suggestResponse, false, suggestionName, suggestions);
+ }
+
+ private void assertSuggestions(SuggestResponse suggestResponse, String name, String... suggestions) {
+ assertSuggestions(suggestResponse, true, name, suggestions);
+ }
+
+ private void assertSuggestions(SuggestResponse suggestResponse, boolean suggestionOrderStrict, String name, String... suggestions) {
+ assertAllSuccessful(suggestResponse);
+
+ List<String> suggestionNames = Lists.newArrayList();
+ for (Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> suggestion : Lists.newArrayList(suggestResponse.getSuggest().iterator())) {
+ suggestionNames.add(suggestion.getName());
+ }
+ String expectFieldInResponseMsg = String.format(Locale.ROOT, "Expected suggestion named %s in response, got %s", name, suggestionNames);
+ assertThat(expectFieldInResponseMsg, suggestResponse.getSuggest().getSuggestion(name), is(notNullValue()));
+
+ Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> suggestion = suggestResponse.getSuggest().getSuggestion(name);
+
+ List<String> suggestionList = getNames(suggestion.getEntries().get(0));
+ List<Suggest.Suggestion.Entry.Option> options = suggestion.getEntries().get(0).getOptions();
+
+ String assertMsg = String.format(Locale.ROOT, "Expected options %s length to be %s, but was %s", suggestionList, suggestions.length, options.size());
+ assertThat(assertMsg, options.size(), is(suggestions.length));
+ if (suggestionOrderStrict) {
+ for (int i = 0; i < suggestions.length; i++) {
+ String errMsg = String.format(Locale.ROOT, "Expected elem %s in list %s to be [%s] score: %s", i, suggestionList, suggestions[i], options.get(i).getScore());
+ assertThat(errMsg, options.get(i).getText().toString(), is(suggestions[i]));
+ }
+ } else {
+ for (String expectedSuggestion : suggestions) {
+ String errMsg = String.format(Locale.ROOT, "Expected elem %s to be in list %s", expectedSuggestion, suggestionList);
+ assertThat(errMsg, suggestionList, hasItem(expectedSuggestion));
+ }
+ }
+ }
+
+ private List<String> getNames(Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> suggestEntry) {
+ List<String> names = Lists.newArrayList();
+ for (Suggest.Suggestion.Entry.Option entry : suggestEntry.getOptions()) {
+ names.add(entry.getText().string());
+ }
+ return names;
+ }
+
+ private void createIndexAndMappingAndSettings(Settings settings, CompletionMappingBuilder completionMappingBuilder) throws IOException {
+ assertAcked(client().admin().indices().prepareCreate(INDEX)
+ .setSettings(Settings.settingsBuilder().put(indexSettings()).put(settings))
+ .addMapping(TYPE, jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .field("analyzer", completionMappingBuilder.indexAnalyzer)
+ .field("search_analyzer", completionMappingBuilder.searchAnalyzer)
+ .field("payloads", completionMappingBuilder.payloads)
+ .field("preserve_separators", completionMappingBuilder.preserveSeparators)
+ .field("preserve_position_increments", completionMappingBuilder.preservePositionIncrements)
+ .endObject()
+ .endObject().endObject()
+ .endObject())
+ .get());
+ ensureYellow();
+ }
+
+ private void createIndexAndMapping(CompletionMappingBuilder completionMappingBuilder) throws IOException {
+ createIndexAndMappingAndSettings(Settings.EMPTY, completionMappingBuilder);
+ }
+
+ private void createData(boolean optimize) throws IOException, InterruptedException, ExecutionException {
+ String[][] input = {{"Foo Fighters"}, {"Generator", "Foo Fighters Generator"}, {"Learn to Fly", "Foo Fighters Learn to Fly"}, {"The Prodigy"}, {"Firestarter", "The Prodigy Firestarter"}, {"Turbonegro"}, {"Get it on", "Turbonegro Get it on"}};
+ String[] surface = {"Foo Fighters", "Generator - Foo Fighters", "Learn to Fly - Foo Fighters", "The Prodigy", "Firestarter - The Prodigy", "Turbonegro", "Get it on - Turbonegro"};
+ int[] weight = {10, 9, 8, 12, 11, 6, 7};
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[input.length];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .field("output", surface[i])
+ .startObject("payload").field("id", i).endObject()
+ .field("weight", 1) // WE FORCEFULLY INDEX A BOGUS WEIGHT
+ .endObject()
+ .endObject()
+ );
+ }
+ indexRandom(false, builders);
+
+ for (int i = 0; i < builders.length; i++) { // add them again to make sure we deduplicate on the surface form
+ builders[i] = client().prepareIndex(INDEX, TYPE, "n" + i)
+ .setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(input[i]).endArray()
+ .field("output", surface[i])
+ .startObject("payload").field("id", i).endObject()
+ .field("weight", weight[i])
+ .endObject()
+ .endObject()
+ );
+ }
+ indexRandom(false, builders);
+
+ client().admin().indices().prepareRefresh(INDEX).execute().actionGet();
+ if (optimize) {
+ // make sure merging works just fine
+ client().admin().indices().prepareFlush(INDEX).execute().actionGet();
+ client().admin().indices().prepareOptimize(INDEX).setMaxNumSegments(randomIntBetween(1, 5)).get();
+ }
+ }
+
+ @Test // see #3555
+ public void testPrunedSegments() throws IOException {
+ createIndexAndMappingAndSettings(settingsBuilder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build(), completionMappingBuilder);
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value("The Beatles").endArray()
+ .endObject().endObject()
+ ).get();
+ client().prepareIndex(INDEX, TYPE, "2").setSource(jsonBuilder()
+ .startObject()
+ .field("somefield", "somevalue")
+ .endObject()
+ ).get(); // we have 2 docs in a segment...
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
+ assertAllSuccessful(actionGet);
+ refresh();
+ // update the first one and then merge.. the target segment will have no value in FIELD
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject()
+ .field("somefield", "somevalue")
+ .endObject()
+ ).get();
+ actionGet = client().admin().indices().prepareOptimize().setFlush(true).setMaxNumSegments(1).execute().actionGet();
+ assertAllSuccessful(actionGet);
+ refresh();
+
+ assertSuggestions("b");
+ assertThat(2l, equalTo(client().prepareCount(INDEX).get().getCount()));
+ for (IndexShardSegments seg : client().admin().indices().prepareSegments().get().getIndices().get(INDEX)) {
+ ShardSegments[] shards = seg.getShards();
+ for (ShardSegments shardSegments : shards) {
+ assertThat(shardSegments.getSegments().size(), equalTo(1));
+ }
+ }
+ }
+
+ @Test
+ public void testMaxFieldLength() throws IOException {
+ client().admin().indices().prepareCreate(INDEX).get();
+ ensureGreen();
+ int iters = scaledRandomIntBetween(10, 20);
+ for (int i = 0; i < iters; i++) {
+ int maxInputLen = between(3, 50);
+ String str = replaceReservedChars(randomRealisticUnicodeOfCodepointLengthBetween(maxInputLen + 1, maxInputLen + scaledRandomIntBetween(2, 50)), (char) 0x01);
+ assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .field("max_input_length", maxInputLen)
+ // upgrade mapping each time
+ .field("analyzer", "keyword")
+ .endObject()
+ .endObject().endObject()
+ .endObject()));
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(str).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+ // need to flush and refresh, because we keep changing the same document
+ // we have to make sure that segments without any live documents are deleted
+ flushAndRefresh();
+ int prefixLen = CompletionFieldMapper.correctSubStringLen(str, between(1, maxInputLen - 1));
+ assertSuggestions(str.substring(0, prefixLen), "foobar");
+ if (maxInputLen + 1 < str.length()) {
+ int offset = Character.isHighSurrogate(str.charAt(maxInputLen - 1)) ? 2 : 1;
+ int correctSubStringLen = CompletionFieldMapper.correctSubStringLen(str, maxInputLen + offset);
+ String shortenedSuggestion = str.substring(0, correctSubStringLen);
+ assertSuggestions(shortenedSuggestion);
+ }
+ }
+ }
+
+ @Test
+ // see #3596
+ public void testVeryLongInput() throws IOException {
+ assertAcked(client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()).get());
+ ensureYellow();
+ // can cause stack overflow without the default max_input_length
+ String longString = replaceReservedChars(randomRealisticUnicodeOfLength(randomIntBetween(5000, 10000)), (char) 0x01);
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(longString).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+
+ }
+
+ // see #3648
+ @Test(expected = MapperParsingException.class)
+ public void testReservedChars() throws IOException {
+ assertAcked(client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()).get());
+ ensureYellow();
+ // can cause stack overflow without the default max_input_length
+ String string = "foo" + (char) 0x00 + "bar";
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject().startObject(FIELD)
+ .startArray("input").value(string).endArray()
+ .field("output", "foobar")
+ .endObject().endObject()
+ ).setRefresh(true).get();
+ }
+
+ @Test // see #5930
+ public void testIssue5930() throws IOException {
+ assertAcked(client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, jsonBuilder().startObject()
+ .startObject(TYPE).startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject().endObject()
+ .endObject()).get());
+ ensureYellow();
+ String string = "foo bar";
+ client().prepareIndex(INDEX, TYPE, "1").setSource(jsonBuilder()
+ .startObject()
+ .field(FIELD, string)
+ .endObject()
+ ).setRefresh(true).get();
+
+ try {
+ client().prepareSearch(INDEX).addAggregation(AggregationBuilders.terms("suggest_agg").field(FIELD)
+ .collectMode(randomFrom(SubAggCollectionMode.values()))).execute().actionGet();
+ // Exception must be thrown
+ assertFalse(true);
+ } catch (SearchPhaseExecutionException e) {
+ assertTrue(e.toString().contains("found no fielddata type for field [" + FIELD + "]"));
+ }
+ }
+
+ // see issue #6399
+ @Test
+ public void testIndexingUnrelatedNullValue() throws Exception {
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ assertAcked(client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).get());
+ ensureGreen();
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(FIELD, "strings make me happy", FIELD + "_1", "nulls make me sad")
+ .setRefresh(true).get();
+
+ try {
+ client().prepareIndex(INDEX, TYPE, "2").setSource(FIELD, null, FIELD + "_1", "nulls make me sad")
+ .setRefresh(true).get();
+ fail("Expected MapperParsingException for null value");
+ } catch (MapperParsingException e) {
+ // make sure that the exception has the name of the field causing the error
+ assertTrue(e.getDetailedMessage().contains(FIELD));
+ }
+
+ }
+
+ private static String replaceReservedChars(String input, char replacement) {
+ char[] charArray = input.toCharArray();
+ for (int i = 0; i < charArray.length; i++) {
+ if (CompletionFieldMapper.isReservedChar(charArray[i])) {
+ charArray[i] = replacement;
+ }
+ }
+ return new String(charArray);
+ }
+
+ private static class CompletionMappingBuilder {
+ private String searchAnalyzer = "simple";
+ private String indexAnalyzer = "simple";
+ private Boolean payloads = getRandom().nextBoolean();
+ private Boolean preserveSeparators = getRandom().nextBoolean();
+ private Boolean preservePositionIncrements = getRandom().nextBoolean();
+
+ public CompletionMappingBuilder searchAnalyzer(String searchAnalyzer) {
+ this.searchAnalyzer = searchAnalyzer;
+ return this;
+ }
+ public CompletionMappingBuilder indexAnalyzer(String indexAnalyzer) {
+ this.indexAnalyzer = indexAnalyzer;
+ return this;
+ }
+ public CompletionMappingBuilder payloads(Boolean payloads) {
+ this.payloads = payloads;
+ return this;
+ }
+ public CompletionMappingBuilder preserveSeparators(Boolean preserveSeparators) {
+ this.preserveSeparators = preserveSeparators;
+ return this;
+ }
+ public CompletionMappingBuilder preservePositionIncrements(Boolean preservePositionIncrements) {
+ this.preservePositionIncrements = preservePositionIncrements;
+ return this;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java
new file mode 100644
index 0000000000..af36c5739f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.core.SimpleAnalyzer;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.analysis.synonym.SynonymMap;
+import org.apache.lucene.analysis.synonym.SynonymMap.Builder;
+import org.apache.lucene.analysis.tokenattributes.*;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream;
+import org.elasticsearch.search.suggest.completion.CompletionTokenStream.ByteTermAttribute;
+import org.elasticsearch.test.ElasticsearchTokenStreamTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.StringReader;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class CompletionTokenStreamTest extends ElasticsearchTokenStreamTestCase {
+
+ final XAnalyzingSuggester suggester = new XAnalyzingSuggester(new SimpleAnalyzer());
+
+ @Test
+ public void testSuggestTokenFilter() throws Exception {
+ Tokenizer tokenStream = new MockTokenizer(MockTokenizer.WHITESPACE, true);
+ tokenStream.setReader(new StringReader("mykeyword"));
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ assertTokenStreamContents(suggestTokenStream, new String[] {"mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10"}, new int[] { 1 }, null, null);
+ }
+
+ @Test
+ public void testSuggestTokenFilterWithSynonym() throws Exception {
+ Builder builder = new SynonymMap.Builder(true);
+ builder.add(new CharsRef("mykeyword"), new CharsRef("mysynonym"), true);
+
+ Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);
+ tokenizer.setReader(new StringReader("mykeyword"));
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(filter, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ assertTokenStreamContents(suggestTokenStream, new String[] {"mysynonym", "mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10", "Surface keyword|friggin payload|10"}, new int[] { 2, 0 }, null, null);
+ }
+
+ @Test
+ public void testValidNumberOfExpansions() throws IOException {
+ Builder builder = new SynonymMap.Builder(true);
+ for (int i = 0; i < 256; i++) {
+ builder.add(new CharsRef("" + (i+1)), new CharsRef("" + (1000 + (i+1))), true);
+ }
+ StringBuilder valueBuilder = new StringBuilder();
+ for (int i = 0 ; i < 8 ; i++) {
+ valueBuilder.append(i+1);
+ valueBuilder.append(" ");
+ }
+ MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);
+ tokenizer.setReader(new StringReader(valueBuilder.toString()));
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ Set<IntsRef> finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ return finiteStrings;
+ }
+ });
+
+ suggestTokenStream.reset();
+ ByteTermAttribute attr = suggestTokenStream.addAttribute(ByteTermAttribute.class);
+ PositionIncrementAttribute posAttr = suggestTokenStream.addAttribute(PositionIncrementAttribute.class);
+ int maxPos = 0;
+ int count = 0;
+ while(suggestTokenStream.incrementToken()) {
+ count++;
+ assertNotNull(attr.getBytesRef());
+ assertTrue(attr.getBytesRef().length > 0);
+ maxPos += posAttr.getPositionIncrement();
+ }
+ suggestTokenStream.close();
+ assertEquals(count, 256);
+ assertEquals(count, maxPos);
+
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testInValidNumberOfExpansions() throws IOException {
+ Builder builder = new SynonymMap.Builder(true);
+ for (int i = 0; i < 256; i++) {
+ builder.add(new CharsRef("" + (i+1)), new CharsRef("" + (1000 + (i+1))), true);
+ }
+ StringBuilder valueBuilder = new StringBuilder();
+ for (int i = 0 ; i < 9 ; i++) { // 9 -> expands to 512
+ valueBuilder.append(i+1);
+ valueBuilder.append(" ");
+ }
+ MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);
+ tokenizer.setReader(new StringReader(valueBuilder.toString()));
+ SynonymFilter filter = new SynonymFilter(tokenizer, builder.build(), true);
+
+ TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ Set<IntsRef> finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ return finiteStrings;
+ }
+ });
+
+ suggestTokenStream.reset();
+ suggestTokenStream.incrementToken();
+ suggestTokenStream.close();
+
+ }
+
+ @Test
+ public void testSuggestTokenFilterProperlyDelegateInputStream() throws Exception {
+ Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true);
+ tokenizer.setReader(new StringReader("mykeyword"));
+ BytesRef payload = new BytesRef("Surface keyword|friggin payload|10");
+ TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenizer, payload, new CompletionTokenStream.ToFiniteStrings() {
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream);
+ }
+ }));
+ TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class);
+ BytesRef ref = termAtt.getBytesRef();
+ assertNotNull(ref);
+ suggestTokenStream.reset();
+
+ while (suggestTokenStream.incrementToken()) {
+ termAtt.fillBytesRef();
+ assertThat(ref.utf8ToString(), equalTo("mykeyword"));
+ }
+ suggestTokenStream.end();
+ suggestTokenStream.close();
+ }
+
+
+ public final static class ByteTermAttrToCharTermAttrFilter extends TokenFilter {
+ private ByteTermAttribute byteAttr = addAttribute(ByteTermAttribute.class);
+ private PayloadAttribute payload = addAttribute(PayloadAttribute.class);
+ private TypeAttribute type = addAttribute(TypeAttribute.class);
+ private CharTermAttribute charTermAttribute = addAttribute(CharTermAttribute.class);
+ protected ByteTermAttrToCharTermAttrFilter(TokenStream input) {
+ super(input);
+ }
+
+ @Override
+ public boolean incrementToken() throws IOException {
+ if (input.incrementToken()) {
+ BytesRef bytesRef = byteAttr.getBytesRef();
+ // we move them over so we can assert them more easily in the tests
+ type.setType(payload.getPayload().utf8ToString());
+ return true;
+ }
+ return false;
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java b/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java
new file mode 100644
index 0000000000..95bbcbba03
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java
@@ -0,0 +1,1057 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestRequest;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.mapper.MapperParsingException;
+import org.elasticsearch.search.suggest.Suggest.Suggestion;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry;
+import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry.Option;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
+import org.elasticsearch.search.suggest.completion.CompletionSuggestionFuzzyBuilder;
+import org.elasticsearch.search.suggest.context.ContextBuilder;
+import org.elasticsearch.search.suggest.context.ContextMapping;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions.assertDistance;
+import static org.hamcrest.Matchers.containsString;
+
+@SuppressCodecs("*") // requires custom completion format
+public class ContextSuggestSearchTests extends ElasticsearchIntegrationTest {
+
+ private static final String INDEX = "test";
+ private static final String TYPE = "testType";
+ private static final String FIELD = "testField";
+
+ private static final String[][] HEROS = {
+ { "Afari, Jamal", "Jamal Afari", "Jamal" },
+ { "Allerdyce, St. John", "Allerdyce, John", "St. John", "St. John Allerdyce" },
+ { "Beaubier, Jean-Paul", "Jean-Paul Beaubier", "Jean-Paul" },
+ { "Beaubier, Jeanne-Marie", "Jeanne-Marie Beaubier", "Jeanne-Marie" },
+ { "Braddock, Elizabeth \"Betsy\"", "Betsy", "Braddock, Elizabeth", "Elizabeth Braddock", "Elizabeth" },
+ { "Cody Mushumanski gun Man", "the hunter", "gun man", "Cody Mushumanski" },
+ { "Corbo, Adrian", "Adrian Corbo", "Adrian" },
+ { "Corbo, Jared", "Jared Corbo", "Jared" },
+ { "Creel, Carl \"Crusher\"", "Creel, Carl", "Crusher", "Carl Creel", "Carl" },
+ { "Crichton, Lady Jacqueline Falsworth", "Lady Jacqueline Falsworth Crichton", "Lady Jacqueline Falsworth",
+ "Jacqueline Falsworth" }, { "Crichton, Kenneth", "Kenneth Crichton", "Kenneth" },
+ { "MacKenzie, Al", "Al MacKenzie", "Al" },
+ { "MacPherran, Mary \"Skeeter\"", "Mary MacPherran \"Skeeter\"", "MacPherran, Mary", "Skeeter", "Mary MacPherran" },
+ { "MacTaggert, Moira", "Moira MacTaggert", "Moira" }, { "Rasputin, Illyana", "Illyana Rasputin", "Illyana" },
+ { "Rasputin, Mikhail", "Mikhail Rasputin", "Mikhail" }, { "Rasputin, Piotr", "Piotr Rasputin", "Piotr" },
+ { "Smythe, Alistair", "Alistair Smythe", "Alistair" }, { "Smythe, Spencer", "Spencer Smythe", "Spencer" },
+ { "Whitemane, Aelfyre", "Aelfyre Whitemane", "Aelfyre" }, { "Whitemane, Kofi", "Kofi Whitemane", "Kofi" } };
+
+ @Test
+ public void testBasicGeo() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.location("st").precision("5km").neighbors(true))));
+ ensureYellow();
+
+ XContentBuilder source1 = jsonBuilder()
+ .startObject()
+ .startObject(FIELD)
+ .array("input", "Hotel Amsterdam", "Amsterdam")
+ .field("output", "Hotel Amsterdam in Berlin")
+ .startObject("context").latlon("st", 52.529172, 13.407333).endObject()
+ .endObject()
+ .endObject();
+ client().prepareIndex(INDEX, TYPE, "1").setSource(source1).execute().actionGet();
+
+ XContentBuilder source2 = jsonBuilder()
+ .startObject()
+ .startObject(FIELD)
+ .array("input", "Hotel Berlin", "Berlin")
+ .field("output", "Hotel Berlin in Amsterdam")
+ .startObject("context").latlon("st", 52.363389, 4.888695).endObject()
+ .endObject()
+ .endObject();
+ client().prepareIndex(INDEX, TYPE, "2").setSource(source2).execute().actionGet();
+
+ client().admin().indices().prepareRefresh(INDEX).get();
+
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text("h").size(10)
+ .addGeoLocation("st", 52.52, 13.4);
+
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+
+ assertEquals(suggestResponse.getSuggest().size(), 1);
+ assertEquals("Hotel Amsterdam in Berlin", suggestResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string());
+ }
+
+ @Test
+ public void testMultiLevelGeo() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.location("st")
+ .precision(1)
+ .precision(2)
+ .precision(3)
+ .precision(4)
+ .precision(5)
+ .precision(6)
+ .precision(7)
+ .precision(8)
+ .precision(9)
+ .precision(10)
+ .precision(11)
+ .precision(12)
+ .neighbors(true))));
+ ensureYellow();
+
+ XContentBuilder source1 = jsonBuilder()
+ .startObject()
+ .startObject(FIELD)
+ .array("input", "Hotel Amsterdam", "Amsterdam")
+ .field("output", "Hotel Amsterdam in Berlin")
+ .startObject("context").latlon("st", 52.529172, 13.407333).endObject()
+ .endObject()
+ .endObject();
+ client().prepareIndex(INDEX, TYPE, "1").setSource(source1).execute().actionGet();
+
+ client().admin().indices().prepareRefresh(INDEX).get();
+
+ for (int precision = 1; precision <= 12; precision++) {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = new CompletionSuggestionBuilder(suggestionName).field(FIELD).text("h").size(10)
+ .addGeoLocation("st", 52.529172, 13.407333, precision);
+
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+ assertEquals(suggestResponse.getSuggest().size(), 1);
+ assertEquals("Hotel Amsterdam in Berlin", suggestResponse.getSuggest().getSuggestion(suggestionName).iterator().next()
+ .getOptions().iterator().next().getText().string());
+ }
+ }
+
+ @Test
+ public void testMappingIdempotency() throws Exception {
+ List<Integer> precisions = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(4, 12); i++) {
+ precisions.add(i+1);
+ }
+ Collections.shuffle(precisions, getRandom());
+ XContentBuilder mapping = jsonBuilder().startObject().startObject(TYPE)
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .startObject("context")
+ .startObject("location")
+ .field("type", "geo")
+ .array("precision", precisions.toArray(new Integer[precisions.size()]))
+ .endObject()
+ .endObject().endObject()
+ .endObject().endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, mapping.string()));
+ ensureYellow();
+
+ Collections.shuffle(precisions, getRandom());
+ mapping = jsonBuilder().startObject().startObject(TYPE)
+ .startObject("properties").startObject("completion")
+ .field("type", "completion")
+ .startObject("context")
+ .startObject("location")
+ .field("type", "geo")
+ .array("precision", precisions.toArray(new Integer[precisions.size()]))
+ .endObject()
+ .endObject().endObject()
+ .endObject().endObject();
+ assertAcked(client().admin().indices().preparePutMapping(INDEX).setType(TYPE).setSource(mapping.string()).get());
+ }
+
+
+ @Test
+ public void testGeoField() throws Exception {
+
+ XContentBuilder mapping = jsonBuilder();
+ mapping.startObject();
+ mapping.startObject(TYPE);
+ mapping.startObject("properties");
+ mapping.startObject("pin");
+ mapping.field("type", "geo_point");
+ mapping.endObject();
+ mapping.startObject(FIELD);
+ mapping.field("type", "completion");
+ mapping.field("analyzer", "simple");
+
+ mapping.startObject("context");
+ mapping.value(ContextBuilder.location("st", 5, true).field("pin").build());
+ mapping.endObject();
+
+ mapping.endObject();
+ mapping.endObject();
+ mapping.endObject();
+ mapping.endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, mapping));
+ ensureYellow();
+
+ XContentBuilder source1 = jsonBuilder()
+ .startObject()
+ .latlon("pin", 52.529172, 13.407333)
+ .startObject(FIELD)
+ .array("input", "Hotel Amsterdam", "Amsterdam")
+ .field("output", "Hotel Amsterdam in Berlin")
+ .startObject("context").endObject()
+ .endObject()
+ .endObject();
+ client().prepareIndex(INDEX, TYPE, "1").setSource(source1).execute().actionGet();
+
+ XContentBuilder source2 = jsonBuilder()
+ .startObject()
+ .latlon("pin", 52.363389, 4.888695)
+ .startObject(FIELD)
+ .array("input", "Hotel Berlin", "Berlin")
+ .field("output", "Hotel Berlin in Amsterdam")
+ .startObject("context").endObject()
+ .endObject()
+ .endObject();
+ client().prepareIndex(INDEX, TYPE, "2").setSource(source2).execute().actionGet();
+
+ refresh();
+
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text("h").size(10)
+ .addGeoLocation("st", 52.52, 13.4);
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+
+ assertEquals(suggestResponse.getSuggest().size(), 1);
+ assertEquals("Hotel Amsterdam in Berlin", suggestResponse.getSuggest().getSuggestion(suggestionName).iterator().next().getOptions().iterator().next().getText().string());
+ }
+
+ @Test
+ public void testSimpleGeo() throws Exception {
+ String reinickendorf = "u337p3mp11e2";
+ String pankow = "u33e0cyyjur4";
+ String koepenick = "u33dm4f7fn40";
+ String bernau = "u33etnjf1yjn";
+ String berlin = "u33dc1v0xupz";
+ String mitte = "u33dc0cpke4q";
+ String steglitz = "u336m36rjh2p";
+ String wilmersdorf = "u336wmw0q41s";
+ String spandau = "u336uqek7gh6";
+ String tempelhof = "u33d91jh3by0";
+ String schoeneberg = "u336xdrkzbq7";
+ String treptow = "u33d9unn7fp7";
+
+ double precision = 100.0; // meters
+
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.location("st").precision(precision).neighbors(true))));
+ ensureYellow();
+
+ String[] locations = { reinickendorf, pankow, koepenick, bernau, berlin, mitte, steglitz, wilmersdorf, spandau, tempelhof,
+ schoeneberg, treptow };
+
+ String[][] input = { { "pizza - reinickendorf", "pizza", "food" }, { "pizza - pankow", "pizza", "food" },
+ { "pizza - koepenick", "pizza", "food" }, { "pizza - bernau", "pizza", "food" }, { "pizza - berlin", "pizza", "food" },
+ { "pizza - mitte", "pizza - berlin mitte", "pizza", "food" },
+ { "pizza - steglitz", "pizza - Berlin-Steglitz", "pizza", "food" }, { "pizza - wilmersdorf", "pizza", "food" },
+ { "pizza - spandau", "spandau bei berlin", "pizza", "food" },
+ { "pizza - tempelhof", "pizza - berlin-tempelhof", "pizza", "food" },
+ { "pizza - schoeneberg", "pizza - schöneberg", "pizza - berlin schoeneberg", "pizza", "food" },
+ { "pizza - treptow", "pizza", "food" } };
+
+ for (int i = 0; i < locations.length; i++) {
+ XContentBuilder source = jsonBuilder().startObject().startObject(FIELD).startArray("input").value(input[i]).endArray()
+ .startObject("context").field("st", locations[i]).endObject().field("payload", locations[i]).endObject().endObject();
+ client().prepareIndex(INDEX, TYPE, "" + i).setSource(source).execute().actionGet();
+ }
+
+ refresh();
+
+ assertGeoSuggestionsInRange(berlin, "pizza", precision);
+ assertGeoSuggestionsInRange(reinickendorf, "pizza", precision);
+ assertGeoSuggestionsInRange(spandau, "pizza", precision);
+ assertGeoSuggestionsInRange(koepenick, "pizza", precision);
+ assertGeoSuggestionsInRange(schoeneberg, "pizza", precision);
+ assertGeoSuggestionsInRange(tempelhof, "pizza", precision);
+ assertGeoSuggestionsInRange(bernau, "pizza", precision);
+ assertGeoSuggestionsInRange(pankow, "pizza", precision);
+ assertGeoSuggestionsInRange(mitte, "pizza", precision);
+ assertGeoSuggestionsInRange(steglitz, "pizza", precision);
+ assertGeoSuggestionsInRange(mitte, "pizza", precision);
+ assertGeoSuggestionsInRange(wilmersdorf, "pizza", precision);
+ assertGeoSuggestionsInRange(treptow, "pizza", precision);
+ }
+
+ @Test
+ public void testSimplePrefix() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.category("st"))));
+ ensureYellow();
+
+ for (int i = 0; i < HEROS.length; i++) {
+ XContentBuilder source = jsonBuilder().startObject().startObject(FIELD).startArray("input").value(HEROS[i]).endArray()
+ .startObject("context").field("st", i%3).endObject()
+ .startObject("payload").field("group", i % 3).field("id", i).endObject()
+ .endObject().endObject();
+ client().prepareIndex(INDEX, TYPE, "" + i).setSource(source).execute().actionGet();
+ }
+
+ refresh();
+
+ assertPrefixSuggestions(0, "a", "Afari, Jamal", "Adrian Corbo", "Adrian");
+ assertPrefixSuggestions(0, "b", "Beaubier, Jeanne-Marie");
+ assertPrefixSuggestions(0, "c", "Corbo, Adrian", "Crichton, Lady Jacqueline Falsworth");
+ assertPrefixSuggestions(0, "mary", "Mary MacPherran \"Skeeter\"", "Mary MacPherran");
+ assertPrefixSuggestions(0, "s", "Skeeter", "Smythe, Spencer", "Spencer Smythe", "Spencer");
+ assertPrefixSuggestions(1, "s", "St. John", "St. John Allerdyce");
+ assertPrefixSuggestions(2, "s", "Smythe, Alistair");
+ assertPrefixSuggestions(1, "w", "Whitemane, Aelfyre");
+ assertPrefixSuggestions(2, "w", "Whitemane, Kofi");
+ }
+
+ @Test
+ public void testTypeCategoryIsActuallyCalledCategory() throws Exception {
+ XContentBuilder mapping = jsonBuilder();
+ mapping.startObject().startObject(TYPE).startObject("properties")
+ .startObject("suggest_field").field("type", "completion")
+ .startObject("context").startObject("color").field("type", "category").endObject().endObject()
+ .endObject()
+ .endObject().endObject().endObject();
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, mapping));
+ ensureYellow();
+ XContentBuilder doc1 = jsonBuilder();
+ doc1.startObject().startObject("suggest_field")
+ .field("input", "backpack_red")
+ .startObject("context").field("color", "red", "all_colors").endObject()
+ .endObject().endObject();
+ XContentBuilder doc2 = jsonBuilder();
+ doc2.startObject().startObject("suggest_field")
+ .field("input", "backpack_green")
+ .startObject("context").field("color", "green", "all_colors").endObject()
+ .endObject().endObject();
+
+ client().prepareIndex(INDEX, TYPE, "1")
+ .setSource(doc1).execute()
+ .actionGet();
+ client().prepareIndex(INDEX, TYPE, "2")
+ .setSource(doc2).execute()
+ .actionGet();
+
+ refresh();
+ getBackpackSuggestionAndCompare("all_colors", "backpack_red", "backpack_green");
+ getBackpackSuggestionAndCompare("red", "backpack_red");
+ getBackpackSuggestionAndCompare("green", "backpack_green");
+ getBackpackSuggestionAndCompare("not_existing_color");
+
+ }
+
+ private void getBackpackSuggestionAndCompare(String contextValue, String... expectedText) {
+ Set<String> expected = Sets.newHashSet(expectedText);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion("suggestion").field("suggest_field").text("back").size(10).addContextField("color", contextValue);
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+ Suggest suggest = suggestResponse.getSuggest();
+ assertEquals(suggest.size(), 1);
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ assertEquals(options.size(), expectedText.length);
+ for (CompletionSuggestion.Entry.Option option : options) {
+ assertTrue(expected.contains(option.getText().string()));
+ expected.remove(option.getText().string());
+ }
+ }
+ }
+ }
+
+
+ @Test
+ public void testBasic() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, false, ContextBuilder.reference("st", "_type"), ContextBuilder.reference("nd", "_type"))));
+ ensureYellow();
+
+ client().prepareIndex(INDEX, TYPE, "1")
+ .setSource(
+ jsonBuilder().startObject().startObject(FIELD).startArray("input").value("my hotel").value("this hotel").endArray()
+ .startObject("context").endObject()
+ .field("payload", TYPE + "|" + TYPE).endObject().endObject()).execute()
+ .actionGet();
+
+ refresh();
+
+ assertDoubleFieldSuggestions(TYPE, TYPE, "m", "my hotel");
+ }
+
+ @Test
+ public void testSimpleField() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.reference("st", "category"))));
+ ensureYellow();
+
+ for (int i = 0; i < HEROS.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(
+ jsonBuilder().startObject().field("category", Integer.toString(i % 3)).startObject(FIELD).startArray("input")
+ .value(HEROS[i]).endArray().startObject("context").endObject().field("payload", Integer.toString(i % 3))
+ .endObject().endObject()).execute().actionGet();
+ }
+
+ refresh();
+
+ assertFieldSuggestions("0", "a", "Afari, Jamal", "Adrian Corbo", "Adrian");
+ assertFieldSuggestions("0", "b", "Beaubier, Jeanne-Marie");
+ assertFieldSuggestions("0", "c", "Corbo, Adrian", "Crichton, Lady Jacqueline Falsworth");
+ assertFieldSuggestions("0", "mary", "Mary MacPherran \"Skeeter\"", "Mary MacPherran");
+ assertFieldSuggestions("0", "s", "Skeeter", "Smythe, Spencer", "Spencer Smythe", "Spencer");
+ assertFieldSuggestions("1", "s", "St. John", "St. John Allerdyce");
+ assertFieldSuggestions("2", "s", "Smythe, Alistair");
+ assertFieldSuggestions("1", "w", "Whitemane, Aelfyre");
+ assertFieldSuggestions("2", "w", "Whitemane, Kofi");
+
+ }
+
+ @Test // see issue #10987
+ public void testEmptySuggestion() throws Exception {
+ String mapping = jsonBuilder()
+ .startObject()
+ .startObject(TYPE)
+ .startObject("properties")
+ .startObject(FIELD)
+ .field("type", "completion")
+ .startObject("context")
+ .startObject("type_context")
+ .field("path", "_type")
+ .field("type", "category")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ assertAcked(client().admin().indices().prepareCreate(INDEX).addMapping(TYPE, mapping).get());
+ ensureGreen();
+
+ client().prepareIndex(INDEX, TYPE, "1").setSource(FIELD, "")
+ .setRefresh(true).get();
+
+ }
+
+ @Test
+ public void testMultiValueField() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.reference("st", "category"))));
+ ensureYellow();
+
+ for (int i = 0; i < HEROS.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(
+ jsonBuilder().startObject().startArray("category").value(Integer.toString(i % 3)).value("other").endArray()
+ .startObject(FIELD).startArray("input").value(HEROS[i]).endArray().startObject("context").endObject()
+ .field("payload", Integer.toString(i % 3)).endObject().endObject()).execute().actionGet();
+ }
+
+ refresh();
+
+ assertFieldSuggestions("0", "a", "Afari, Jamal", "Adrian Corbo", "Adrian");
+ assertFieldSuggestions("0", "b", "Beaubier, Jeanne-Marie");
+ assertFieldSuggestions("0", "c", "Corbo, Adrian", "Crichton, Lady Jacqueline Falsworth");
+ assertFieldSuggestions("0", "mary", "Mary MacPherran \"Skeeter\"", "Mary MacPherran");
+ assertFieldSuggestions("0", "s", "Skeeter", "Smythe, Spencer", "Spencer Smythe", "Spencer");
+ assertFieldSuggestions("1", "s", "St. John", "St. John Allerdyce");
+ assertFieldSuggestions("2", "s", "Smythe, Alistair");
+ assertFieldSuggestions("1", "w", "Whitemane, Aelfyre");
+ assertFieldSuggestions("2", "w", "Whitemane, Kofi");
+ }
+
+ @Test
+ public void testMultiContext() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.reference("st", "categoryA"), ContextBuilder.reference("nd", "categoryB"))));
+ ensureYellow();
+
+ for (int i = 0; i < HEROS.length; i++) {
+ client().prepareIndex(INDEX, TYPE, "" + i)
+ .setSource(
+ jsonBuilder().startObject().field("categoryA").value("" + (char) ('0' + (i % 3))).field("categoryB")
+ .value("" + (char) ('A' + (i % 3))).startObject(FIELD).startArray("input").value(HEROS[i]).endArray()
+ .startObject("context").endObject().field("payload", ((char) ('0' + (i % 3))) + "" + (char) ('A' + (i % 3)))
+ .endObject().endObject()).execute().actionGet();
+ }
+
+ refresh();
+
+ assertMultiContextSuggestions("0", "A", "a", "Afari, Jamal", "Adrian Corbo", "Adrian");
+ assertMultiContextSuggestions("0", "A", "b", "Beaubier, Jeanne-Marie");
+ assertMultiContextSuggestions("0", "A", "c", "Corbo, Adrian", "Crichton, Lady Jacqueline Falsworth");
+ assertMultiContextSuggestions("0", "A", "mary", "Mary MacPherran \"Skeeter\"", "Mary MacPherran");
+ assertMultiContextSuggestions("0", "A", "s", "Skeeter", "Smythe, Spencer", "Spencer Smythe", "Spencer");
+ assertMultiContextSuggestions("1", "B", "s", "St. John", "St. John Allerdyce");
+ assertMultiContextSuggestions("2", "C", "s", "Smythe, Alistair");
+ assertMultiContextSuggestions("1", "B", "w", "Whitemane, Aelfyre");
+ assertMultiContextSuggestions("2", "C", "w", "Whitemane, Kofi");
+ }
+
+ @Test
+ public void testMultiContextWithFuzzyLogic() throws Exception {
+ assertAcked(prepareCreate(INDEX).addMapping(TYPE, createMapping(TYPE, ContextBuilder.reference("st", "categoryA"), ContextBuilder.reference("nd", "categoryB"))));
+ ensureYellow();
+
+ for (int i = 0; i < HEROS.length; i++) {
+ String source = jsonBuilder().startObject().field("categoryA", "" + (char) ('0' + (i % 3)))
+ .field("categoryB", "" + (char) ('a' + (i % 3))).startObject(FIELD).array("input", HEROS[i])
+ .startObject("context").endObject().startObject("payload").field("categoryA", "" + (char) ('0' + (i % 3)))
+ .field("categoryB", "" + (char) ('a' + (i % 3))).endObject().endObject().endObject().string();
+ client().prepareIndex(INDEX, TYPE, "" + i).setSource(source).execute().actionGet();
+ }
+
+ refresh();
+
+ String[] prefix1 = { "0", "1", "2" };
+ String[] prefix2 = { "a", "b", "c" };
+ String[] prefix3 = { "0", "1" };
+ String[] prefix4 = { "a", "b" };
+
+ assertContextWithFuzzySuggestions(prefix1, prefix2, "mary", "MacKenzie, Al", "MacPherran, Mary", "MacPherran, Mary \"Skeeter\"",
+ "MacTaggert, Moira", "Mary MacPherran", "Mary MacPherran \"Skeeter\"");
+ assertContextWithFuzzySuggestions(prefix1, prefix2, "mac", "Mikhail", "Mary MacPherran \"Skeeter\"", "MacTaggert, Moira",
+ "Moira MacTaggert", "Moira", "MacKenzie, Al", "Mary MacPherran", "Mikhail Rasputin", "MacPherran, Mary",
+ "MacPherran, Mary \"Skeeter\"");
+ assertContextWithFuzzySuggestions(prefix3, prefix4, "mary", "MacPherran, Mary", "MacPherran, Mary \"Skeeter\"",
+ "MacTaggert, Moira", "Mary MacPherran", "Mary MacPherran \"Skeeter\"");
+ assertContextWithFuzzySuggestions(prefix3, prefix4, "mac", "MacPherran, Mary", "MacPherran, Mary \"Skeeter\"", "MacTaggert, Moira",
+ "Mary MacPherran", "Mary MacPherran \"Skeeter\"", "Mikhail", "Mikhail Rasputin", "Moira", "Moira MacTaggert");
+ }
+
+ @Test
+ public void testSimpleType() throws Exception {
+ String[] types = { TYPE + "A", TYPE + "B", TYPE + "C" };
+
+ CreateIndexRequestBuilder createIndexRequestBuilder = prepareCreate(INDEX);
+ for (String type : types) {
+ createIndexRequestBuilder.addMapping(type, createMapping(type, ContextBuilder.reference("st", "_type")));
+ }
+ assertAcked(createIndexRequestBuilder);
+ ensureYellow();
+
+ for (int i = 0; i < HEROS.length; i++) {
+ String type = types[i % types.length];
+ client().prepareIndex(INDEX, type, "" + i)
+ .setSource(
+ jsonBuilder().startObject().startObject(FIELD).startArray("input").value(HEROS[i]).endArray()
+ .startObject("context").endObject().field("payload", type).endObject().endObject()).execute().actionGet();
+ }
+
+ refresh();
+
+ assertFieldSuggestions(types[0], "a", "Afari, Jamal", "Adrian Corbo", "Adrian");
+ assertFieldSuggestions(types[0], "b", "Beaubier, Jeanne-Marie");
+ assertFieldSuggestions(types[0], "c", "Corbo, Adrian", "Crichton, Lady Jacqueline Falsworth");
+ assertFieldSuggestions(types[0], "mary", "Mary MacPherran \"Skeeter\"", "Mary MacPherran");
+ assertFieldSuggestions(types[0], "s", "Skeeter", "Smythe, Spencer", "Spencer Smythe", "Spencer");
+ assertFieldSuggestions(types[1], "s", "St. John", "St. John Allerdyce");
+ assertFieldSuggestions(types[2], "s", "Smythe, Alistair");
+ assertFieldSuggestions(types[1], "w", "Whitemane, Aelfyre");
+ assertFieldSuggestions(types[2], "w", "Whitemane, Kofi");
+ }
+
+ @Test // issue 5525, default location didnt work with lat/lon map, and did not set default location appropriately
+ public void testGeoContextDefaultMapping() throws Exception {
+ GeoPoint berlinAlexanderplatz = GeoHashUtils.decode("u33dc1");
+
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("poi").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("precision", "500m")
+ .startObject("default").field("lat", berlinAlexanderplatz.lat()).field("lon", berlinAlexanderplatz.lon()).endObject()
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("poi", xContentBuilder));
+ ensureYellow();
+
+ index(INDEX, "poi", "1", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Alexanderplatz").endObject().endObject());
+ refresh();
+
+ CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("suggestion").field("suggest").text("b").size(10).addGeoLocation("location", berlinAlexanderplatz.lat(), berlinAlexanderplatz.lon());
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionBuilder).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Berlin Alexanderplatz");
+ }
+
+ @Test // issue 5525, setting the path of a category context and then indexing a document without that field returned an error
+ public void testThatMissingPrefixesForContextReturnException() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("service").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("color")
+ .field("type", "category")
+ .field("path", "color")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("service", xContentBuilder));
+ ensureYellow();
+
+ // now index a document with color field
+ index(INDEX, "service", "1", jsonBuilder().startObject().field("color", "red").startObject("suggest").field("input", "backback").endObject().endObject());
+
+ // now index a document without a color field
+ try {
+ index(INDEX, "service", "2", jsonBuilder().startObject().startObject("suggest").field("input", "backback").endObject().endObject());
+ fail("index operation was not supposed to be successful");
+ } catch (IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("one or more prefixes needed"));
+ }
+ }
+
+ @Test // issue 5525, the geo point parser did not work when the lat/lon values were inside of a value object
+ public void testThatLocationVenueCanBeParsedAsDocumented() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("poi").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("precision", "1m")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("poi", xContentBuilder));
+ ensureYellow();
+
+ SuggestRequest suggestRequest = new SuggestRequest(INDEX);
+ XContentBuilder builder = jsonBuilder().startObject()
+ .startObject("suggest")
+ .field("text", "m")
+ .startObject("completion")
+ .field("field", "suggest")
+ .startObject("context").startObject("location").startObject("value").field("lat", 0).field("lon", 0).endObject().field("precision", "1km").endObject().endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ suggestRequest.suggest(builder.bytes());
+
+ SuggestResponse suggestResponse = client().suggest(suggestRequest).get();
+ assertNoFailures(suggestResponse);
+ }
+
+ @Test
+ public void testThatCategoryDefaultWorks() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("item").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("color")
+ .field("type", "category").field("default", "red")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("item", xContentBuilder));
+ ensureYellow();
+
+ index(INDEX, "item", "1", jsonBuilder().startObject().startObject("suggest").field("input", "Hoodie red").endObject().endObject());
+ index(INDEX, "item", "2", jsonBuilder().startObject().startObject("suggest").field("input", "Hoodie blue").startObject("context").field("color", "blue").endObject().endObject().endObject());
+ refresh();
+
+ CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("suggestion").field("suggest").text("h").size(10).addContextField("color", "red");
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionBuilder).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Hoodie red");
+ }
+
+ @Test
+ public void testThatDefaultCategoryAndPathWorks() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("item").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("color")
+ .field("type", "category")
+ .field("default", "red")
+ .field("path", "color")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("item", xContentBuilder));
+ ensureYellow();
+
+ index(INDEX, "item", "1", jsonBuilder().startObject().startObject("suggest").field("input", "Hoodie red").endObject().endObject());
+ index(INDEX, "item", "2", jsonBuilder().startObject().startObject("suggest").field("input", "Hoodie blue").endObject().field("color", "blue").endObject());
+ refresh();
+
+ CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("suggestion").field("suggest").text("h").size(10).addContextField("color", "red");
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionBuilder).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Hoodie red");
+ }
+
+ @Test
+ public void testThatGeoPrecisionIsWorking() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("item").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("precision", 4) // this means geo hashes with a length of four are used, like u345
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("item", xContentBuilder));
+ ensureYellow();
+
+ // lets create some locations by geohashes in different cells with the precision 4
+ // this means, that poelchaustr is not a neighour to alexanderplatz, but they share the same prefix until the fourth char!
+ GeoPoint alexanderplatz = GeoHashUtils.decode("u33dc1");
+ GeoPoint poelchaustr = GeoHashUtils.decode("u33du5");
+ GeoPoint dahlem = GeoHashUtils.decode("u336q"); // berlin dahlem, should be included with that precision
+ GeoPoint middleOfNoWhere = GeoHashUtils.decode("u334"); // location for west from berlin, should not be included in any suggestions
+
+ index(INDEX, "item", "1", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Alexanderplatz").field("weight", 3).startObject("context").startObject("location").field("lat", alexanderplatz.lat()).field("lon", alexanderplatz.lon()).endObject().endObject().endObject().endObject());
+ index(INDEX, "item", "2", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Poelchaustr.").field("weight", 2).startObject("context").startObject("location").field("lat", poelchaustr.lat()).field("lon", poelchaustr.lon()).endObject().endObject().endObject().endObject());
+ index(INDEX, "item", "3", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Far Away").field("weight", 1).startObject("context").startObject("location").field("lat", middleOfNoWhere.lat()).field("lon", middleOfNoWhere.lon()).endObject().endObject().endObject().endObject());
+ index(INDEX, "item", "4", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Dahlem").field("weight", 1).startObject("context").startObject("location").field("lat", dahlem.lat()).field("lon", dahlem.lon()).endObject().endObject().endObject().endObject());
+ refresh();
+
+ CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("suggestion").field("suggest").text("b").size(10).addGeoLocation("location", alexanderplatz.lat(), alexanderplatz.lon());
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionBuilder).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Berlin Alexanderplatz", "Berlin Poelchaustr.", "Berlin Dahlem");
+ }
+
+ @Test
+ public void testThatNeighborsCanBeExcluded() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("item").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("precision", 6)
+ .field("neighbors", false)
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("item", xContentBuilder));
+ ensureYellow();
+
+ GeoPoint alexanderplatz = GeoHashUtils.decode("u33dc1");
+ // does not look like it, but is a direct neighbor
+ // this test would fail, if the precision was set 4, as then both cells would be the same, u33d
+ GeoPoint cellNeighbourOfAlexanderplatz = GeoHashUtils.decode("u33dbc");
+
+ index(INDEX, "item", "1", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Alexanderplatz").field("weight", 3).startObject("context").startObject("location").field("lat", alexanderplatz.lat()).field("lon", alexanderplatz.lon()).endObject().endObject().endObject().endObject());
+ index(INDEX, "item", "2", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Hackescher Markt").field("weight", 2).startObject("context").startObject("location").field("lat", cellNeighbourOfAlexanderplatz.lat()).field("lon", cellNeighbourOfAlexanderplatz.lon()).endObject().endObject().endObject().endObject());
+ refresh();
+
+ CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("suggestion").field("suggest").text("b").size(10).addGeoLocation("location", alexanderplatz.lat(), alexanderplatz.lon());
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionBuilder).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Berlin Alexanderplatz");
+ }
+
+ @Test
+ public void testThatGeoPathCanBeSelected() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("item").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("precision", "5m")
+ .field("path", "loc")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("item", xContentBuilder));
+ ensureYellow();
+
+ GeoPoint alexanderplatz = GeoHashUtils.decode("u33dc1");
+ index(INDEX, "item", "1", jsonBuilder().startObject().startObject("suggest").field("input", "Berlin Alexanderplatz").endObject().startObject("loc").field("lat", alexanderplatz.lat()).field("lon", alexanderplatz.lon()).endObject().endObject());
+ refresh();
+
+ CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion("suggestion").field("suggest").text("b").size(10).addGeoLocation("location", alexanderplatz.lat(), alexanderplatz.lon());
+ SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(suggestionBuilder).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Berlin Alexanderplatz");
+ }
+
+ @Test(expected = MapperParsingException.class)
+ public void testThatPrecisionIsRequired() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("item").startObject("properties").startObject("suggest")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("path", "loc")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject();
+
+ assertAcked(prepareCreate(INDEX).addMapping("item", xContentBuilder));
+ }
+
+ @Test
+ public void testThatLatLonParsingFromSourceWorks() throws Exception {
+ XContentBuilder xContentBuilder = jsonBuilder().startObject()
+ .startObject("mappings").startObject("test").startObject("properties").startObject("suggest_geo")
+ .field("type", "completion")
+ .startObject("context").startObject("location")
+ .field("type", "geo")
+ .field("precision", "1km")
+ .endObject().endObject()
+ .endObject().endObject().endObject()
+ .endObject().endObject();
+
+ assertAcked(prepareCreate("test").setSource(xContentBuilder.bytes()));
+
+ double latitude = 52.22;
+ double longitude = 4.53;
+ String geohash = GeoHashUtils.encode(latitude, longitude);
+
+ XContentBuilder doc1 = jsonBuilder().startObject().startObject("suggest_geo").field("input", "Hotel Marriot in Amsterdam").startObject("context").startObject("location").field("lat", latitude).field("lon", longitude).endObject().endObject().endObject().endObject();
+ index("test", "test", "1", doc1);
+ XContentBuilder doc2 = jsonBuilder().startObject().startObject("suggest_geo").field("input", "Hotel Marriot in Berlin").startObject("context").startObject("location").field("lat", 53.31).field("lon", 13.24).endObject().endObject().endObject().endObject();
+ index("test", "test", "2", doc2);
+ refresh();
+
+ XContentBuilder source = jsonBuilder().startObject().startObject("suggestion").field("text", "h").startObject("completion").field("field", "suggest_geo").startObject("context").field("location", geohash).endObject().endObject().endObject().endObject();
+ SuggestRequest suggestRequest = new SuggestRequest(INDEX).suggest(source.bytes());
+ SuggestResponse suggestResponse = client().suggest(suggestRequest).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Hotel Marriot in Amsterdam");
+
+ // this is exact the same request, but using lat/lon instead of geohash
+ source = jsonBuilder().startObject().startObject("suggestion").field("text", "h").startObject("completion").field("field", "suggest_geo").startObject("context").startObject("location").field("lat", latitude).field("lon", longitude).endObject().endObject().endObject().endObject().endObject();
+ suggestRequest = new SuggestRequest(INDEX).suggest(source.bytes());
+ suggestResponse = client().suggest(suggestRequest).get();
+ assertSuggestion(suggestResponse.getSuggest(), 0, "suggestion", "Hotel Marriot in Amsterdam");
+ }
+
+ public void assertGeoSuggestionsInRange(String location, String suggest, double precision) throws IOException {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggest).size(10)
+ .addGeoLocation("st", location);
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+
+ Suggest suggest2 = suggestResponse.getSuggest();
+ assertTrue(suggest2.iterator().hasNext());
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest2) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ assertTrue(suggestion.iterator().hasNext());
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ assertTrue(options.iterator().hasNext());
+ for (CompletionSuggestion.Entry.Option option : options) {
+ String target = option.getPayloadAsString();
+ assertDistance(location, target, Matchers.lessThanOrEqualTo(precision));
+ }
+ }
+ }
+ }
+
+ public void assertPrefixSuggestions(long prefix, String suggest, String... hits) throws IOException {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggest)
+ .size(hits.length + 1).addCategory("st", Long.toString(prefix));
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+ ArrayList<String> suggestions = new ArrayList<>();
+ Suggest suggest2 = suggestResponse.getSuggest();
+ assertTrue(suggest2.iterator().hasNext());
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest2) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ for (CompletionSuggestion.Entry.Option option : options) {
+ Map<String, Object> payload = option.getPayloadAsMap();
+ int group = (Integer) payload.get("group");
+ String text = option.getText().string();
+ assertEquals(prefix, group);
+ suggestions.add(text);
+ }
+ }
+ }
+ assertSuggestionsMatch(suggestions, hits);
+ }
+
+ public void assertContextWithFuzzySuggestions(String[] prefix1, String[] prefix2, String suggest, String... hits) throws IOException {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionFuzzyBuilder context = SuggestBuilders.fuzzyCompletionSuggestion(suggestionName).field(FIELD).text(suggest)
+ .size(hits.length + 10).addContextField("st", prefix1).addContextField("nd", prefix2).setFuzziness(Fuzziness.TWO);
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+
+ ArrayList<String> suggestions = new ArrayList<>();
+
+ Suggest suggest2 = suggestResponse.getSuggest();
+ assertTrue(suggest2.iterator().hasNext());
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest2) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ for (CompletionSuggestion.Entry.Option option : options) {
+ Map<String, Object> payload = option.getPayloadAsMap();
+ String text = option.getText().string();
+ assertThat(prefix1, Matchers.hasItemInArray(payload.get("categoryA")));
+ assertThat(prefix2, Matchers.hasItemInArray(payload.get("categoryB")));
+ suggestions.add(text);
+ }
+ }
+ }
+
+ assertSuggestionsMatch(suggestions, hits);
+ }
+
+ public void assertFieldSuggestions(String value, String suggest, String... hits) throws IOException {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggest).size(10)
+ .addContextField("st", value);
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+
+ ArrayList<String> suggestions = new ArrayList<>();
+
+ Suggest suggest2 = suggestResponse.getSuggest();
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest2) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ for (CompletionSuggestion.Entry.Option option : options) {
+ String payload = option.getPayloadAsString();
+ String text = option.getText().string();
+ assertEquals(value, payload);
+ suggestions.add(text);
+ }
+ }
+ }
+ assertSuggestionsMatch(suggestions, hits);
+ }
+
+ public void assertDoubleFieldSuggestions(String field1, String field2, String suggest, String... hits) throws IOException {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggest).size(10)
+ .addContextField("st", field1).addContextField("nd", field2);
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+ ArrayList<String> suggestions = new ArrayList<>();
+
+ Suggest suggest2 = suggestResponse.getSuggest();
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest2) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ for (CompletionSuggestion.Entry.Option option : options) {
+ String payload = option.getPayloadAsString();
+ String text = option.getText().string();
+ assertEquals(field1 + "|" + field2, payload);
+ suggestions.add(text);
+ }
+ }
+ }
+ assertSuggestionsMatch(suggestions, hits);
+ }
+
+ public void assertMultiContextSuggestions(String value1, String value2, String suggest, String... hits) throws IOException {
+ String suggestionName = randomAsciiOfLength(10);
+ CompletionSuggestionBuilder context = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggest).size(10)
+ .addContextField("st", value1).addContextField("nd", value2);
+
+ SuggestRequestBuilder suggestionRequest = client().prepareSuggest(INDEX).addSuggestion(context);
+ SuggestResponse suggestResponse = suggestionRequest.execute().actionGet();
+ ArrayList<String> suggestions = new ArrayList<>();
+
+ Suggest suggest2 = suggestResponse.getSuggest();
+ for (Suggestion<? extends Entry<? extends Option>> s : suggest2) {
+ CompletionSuggestion suggestion = (CompletionSuggestion) s;
+ for (CompletionSuggestion.Entry entry : suggestion) {
+ List<CompletionSuggestion.Entry.Option> options = entry.getOptions();
+ for (CompletionSuggestion.Entry.Option option : options) {
+ String payload = option.getPayloadAsString();
+ String text = option.getText().string();
+ assertEquals(value1 + value2, payload);
+ suggestions.add(text);
+ }
+ }
+ }
+ assertSuggestionsMatch(suggestions, hits);
+ }
+
+ private void assertSuggestionsMatch(List<String> suggestions, String... hits) {
+ boolean[] suggested = new boolean[hits.length];
+ Arrays.sort(hits);
+ Arrays.fill(suggested, false);
+ int numSuggestions = 0;
+
+ for (String suggestion : suggestions) {
+ int hitpos = Arrays.binarySearch(hits, suggestion);
+
+ assertEquals(hits[hitpos], suggestion);
+ assertTrue(hitpos >= 0);
+ assertTrue(!suggested[hitpos]);
+
+ suggested[hitpos] = true;
+ numSuggestions++;
+
+ }
+ assertEquals(hits.length, numSuggestions);
+ }
+
+ private XContentBuilder createMapping(String type, ContextBuilder<?>... context) throws IOException {
+ return createMapping(type, false, context);
+ }
+
+ private XContentBuilder createMapping(String type, boolean preserveSeparators, ContextBuilder<?>... context) throws IOException {
+ return createMapping(type, "simple", "simple", true, preserveSeparators, true, context);
+ }
+
+ private XContentBuilder createMapping(String type, String indexAnalyzer, String searchAnalyzer, boolean payloads, boolean preserveSeparators,
+ boolean preservePositionIncrements, ContextBuilder<?>... contexts) throws IOException {
+ XContentBuilder mapping = jsonBuilder();
+ mapping.startObject();
+ mapping.startObject(type);
+ mapping.startObject("properties");
+ mapping.startObject(FIELD);
+ mapping.field("type", "completion");
+ mapping.field("analyzer", indexAnalyzer);
+ mapping.field("search_analyzer", searchAnalyzer);
+ mapping.field("payloads", payloads);
+ mapping.field("preserve_separators", preserveSeparators);
+ mapping.field("preserve_position_increments", preservePositionIncrements);
+
+ mapping.startObject("context");
+ for (ContextBuilder<? extends ContextMapping> context : contexts) {
+ mapping.value(context.build());
+ }
+ mapping.endObject();
+
+ mapping.endObject();
+ mapping.endObject();
+ mapping.endObject();
+ mapping.endObject();
+ return mapping;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java
new file mode 100644
index 0000000000..6e57390a16
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.util.CharsRefBuilder;
+import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ *
+ */
+public class CustomSuggester extends Suggester<CustomSuggester.CustomSuggestionsContext> {
+
+
+ // This is a pretty dumb implementation which returns the original text + fieldName + custom config option + 12 or 123
+ @Override
+ public Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute(String name, CustomSuggestionsContext suggestion, IndexSearcher searcher, CharsRefBuilder spare) throws IOException {
+ // Get the suggestion context
+ String text = suggestion.getText().utf8ToString();
+
+ // create two suggestions with 12 and 123 appended
+ Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> response = new Suggest.Suggestion<>(name, suggestion.getSize());
+
+ String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12");
+ Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<>(new StringText(firstSuggestion), 0, text.length() + 2);
+ response.addTerm(resultEntry12);
+
+ String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123");
+ Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<>(new StringText(secondSuggestion), 0, text.length() + 3);
+ response.addTerm(resultEntry123);
+
+ return response;
+ }
+
+ @Override
+ public String[] names() {
+ return new String[] {"custom"};
+ }
+
+ @Override
+ public SuggestContextParser getContextParser() {
+ return new SuggestContextParser() {
+ @Override
+ public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException {
+ Map<String, Object> options = parser.map();
+ CustomSuggestionsContext suggestionContext = new CustomSuggestionsContext(CustomSuggester.this, options);
+ suggestionContext.setField((String) options.get("field"));
+ return suggestionContext;
+ }
+ };
+ }
+
+ public static class CustomSuggestionsContext extends SuggestionSearchContext.SuggestionContext {
+
+ public Map<String, Object> options;
+
+ public CustomSuggestionsContext(Suggester suggester, Map<String, Object> options) {
+ super(suggester);
+ this.options = options;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java
new file mode 100644
index 0000000000..a54421cb1f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterPlugin.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import org.elasticsearch.plugins.AbstractPlugin;
+
+/**
+ *
+ */
+public class CustomSuggesterPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-plugin-custom-suggester";
+ }
+
+ @Override
+ public String description() {
+ return "Custom suggester to test pluggable implementation";
+ }
+
+ public void onModule(SuggestModule suggestModule) {
+ suggestModule.registerSuggester(CustomSuggester.class);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java
new file mode 100644
index 0000000000..c6f48e7600
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggesterSearchTests.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Locale;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope= Scope.SUITE, numDataNodes =1)
+public class CustomSuggesterSearchTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("plugin.types", CustomSuggesterPlugin.class.getName()).build();
+ }
+
+ @Test
+ public void testThatCustomSuggestersCanBeRegisteredAndWork() throws Exception {
+ createIndex("test");
+ client().prepareIndex("test", "test", "1").setSource(jsonBuilder()
+ .startObject()
+ .field("name", "arbitrary content")
+ .endObject())
+ .setRefresh(true).execute().actionGet();
+ ensureYellow();
+
+ String randomText = randomAsciiOfLength(10);
+ String randomField = randomAsciiOfLength(10);
+ String randomSuffix = randomAsciiOfLength(10);
+ SearchRequestBuilder searchRequestBuilder = client().prepareSearch("test").setTypes("test").setFrom(0).setSize(1);
+ XContentBuilder query = jsonBuilder().startObject()
+ .startObject("suggest")
+ .startObject("someName")
+ .field("text", randomText)
+ .startObject("custom")
+ .field("field", randomField)
+ .field("suffix", randomSuffix)
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ searchRequestBuilder.setExtraSource(query.bytes());
+
+ SearchResponse searchResponse = searchRequestBuilder.execute().actionGet();
+
+ // TODO: infer type once JI-9019884 is fixed
+ List<Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> suggestions = Lists.<Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>>newArrayList(searchResponse.getSuggest().getSuggestion("someName").iterator());
+ assertThat(suggestions, hasSize(2));
+ assertThat(suggestions.get(0).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-12", randomText, randomField, randomSuffix)));
+ assertThat(suggestions.get(1).getText().string(), is(String.format(Locale.ROOT, "%s-%s-%s-123", randomText, randomField, randomSuffix)));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java
new file mode 100644
index 0000000000..b5c114ab38
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java
@@ -0,0 +1,1285 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.Resources;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.*;
+import org.elasticsearch.action.suggest.SuggestRequestBuilder;
+import org.elasticsearch.action.suggest.SuggestResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
+import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.DirectCandidateGenerator;
+import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.search.suggest.SuggestBuilders.phraseSuggestion;
+import static org.elasticsearch.search.suggest.SuggestBuilders.termSuggestion;
+import static org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.candidateGenerator;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * Integration tests for term and phrase suggestions. Many of these tests many requests that vary only slightly from one another. Where
+ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that
+ * request, modify again, request again, etc. This makes it very obvious what changes between requests.
+ */
+public class SuggestSearchTests extends ElasticsearchIntegrationTest {
+
+ @Test // see #3196
+ public void testSuggestAcrossMultipleIndices() throws IOException {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "abcd");
+ index("test", "type1", "2", "text", "aacd");
+ index("test", "type1", "3", "text", "abbd");
+ index("test", "type1", "4", "text", "abcc");
+ refresh();
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ logger.info("--> run suggestions with one index");
+ searchSuggest( termSuggest);
+ createIndex("test_1");
+ ensureGreen();
+
+ index("test_1", "type1", "1", "text", "ab cd");
+ index("test_1", "type1", "2", "text", "aa cd");
+ index("test_1", "type1", "3", "text", "ab bd");
+ index("test_1", "type1", "4", "text", "ab cc");
+ refresh();
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ab cd")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with two indices");
+ searchSuggest( termSuggest);
+
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("text").field("type", "string").field("analyzer", "keyword").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(prepareCreate("test_2").addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test_2", "type1", "1", "text", "ab cd");
+ index("test_2", "type1", "2", "text", "aa cd");
+ index("test_2", "type1", "3", "text", "ab bd");
+ index("test_2", "type1", "4", "text", "ab cc");
+ index("test_2", "type1", "1", "text", "abcd");
+ index("test_2", "type1", "2", "text", "aacd");
+ index("test_2", "type1", "3", "text", "abbd");
+ index("test_2", "type1", "4", "text", "abcc");
+ refresh();
+
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ab cd")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with three indices");
+ try {
+ searchSuggest( termSuggest);
+ fail(" can not suggest across multiple indices with different analysis chains");
+ } catch (ReduceSearchPhaseException ex) {
+ assertThat(ex.getCause(), instanceOf(IllegalStateException.class));
+ assertThat(ex.getCause().getMessage(),
+ anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"),
+ endsWith("Suggest entries have different sizes actual [2] expected [1]")));
+ } catch (IllegalStateException ex) {
+ assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"),
+ endsWith("Suggest entries have different sizes actual [2] expected [1]")));
+ }
+
+
+ termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("ABCD")
+ .minWordLength(1)
+ .field("text");
+ logger.info("--> run suggestions with four indices");
+ try {
+ searchSuggest( termSuggest);
+ fail(" can not suggest across multiple indices with different analysis chains");
+ } catch (ReduceSearchPhaseException ex) {
+ assertThat(ex.getCause(), instanceOf(IllegalStateException.class));
+ assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"),
+ endsWith("Suggest entries have different text actual [abcd] expected [ABCD]")));
+ } catch (IllegalStateException ex) {
+ assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"),
+ endsWith("Suggest entries have different text actual [abcd] expected [ABCD]")));
+ }
+ }
+
+ @Test // see #3037
+ public void testSuggestModes() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put(SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("index.analysis.analyzer.biword.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 3));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .startObject("shingled")
+ .field("type", "string")
+ .field("analyzer", "biword")
+ .field("search_analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+
+ index("test", "type1", "1", "name", "I like iced tea");
+ index("test", "type1", "2", "name", "I like tea.");
+ index("test", "type1", "3", "name", "I like ice cream.");
+ refresh();
+
+ DirectCandidateGenerator generator = candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2);
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name.shingled")
+ .addCandidateGenerator(generator)
+ .gramSize(3);
+ Suggest searchSuggest = searchSuggest( "ice tea", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, "did_you_mean", "iced tea");
+
+ generator.suggestMode(null);
+ searchSuggest = searchSuggest( "ice tea", phraseSuggestion);
+ assertSuggestionSize(searchSuggest, 0, 0, "did_you_mean");
+ }
+
+ @Test // see #2729
+ public void testSizeOneShard() throws Exception {
+ prepareCreate("test").setSettings(
+ SETTING_NUMBER_OF_SHARDS, 1,
+ SETTING_NUMBER_OF_REPLICAS, 0).get();
+ ensureGreen();
+
+ for (int i = 0; i < 15; i++) {
+ index("test", "type1", Integer.toString(i), "text", "abc" + i);
+ }
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellchecker")).get();
+ assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue());
+
+ TermSuggestionBuilder termSuggestion = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text")
+ .size(10);
+ Suggest suggest = searchSuggest( termSuggestion);
+ assertSuggestion(suggest, 0, "test", 10, "abc0");
+
+ termSuggestion.text("abcd").shardSize(5);
+ suggest = searchSuggest( termSuggestion);
+ assertSuggestion(suggest, 0, "test", 5, "abc0");
+ }
+
+ @Test
+ public void testUnmappedField() throws IOException, InterruptedException, ExecutionException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.biword.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.biword.filter", "shingler", "lowercase")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 3));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .endObject()
+ .startObject("shingled")
+ .field("type", "string")
+ .field("analyzer", "biword")
+ .field("search_analyzer", "standard")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ indexRandom(true, client().prepareIndex("test", "type1").setSource("name", "I like iced tea"),
+ client().prepareIndex("test", "type1").setSource("name", "I like tea."),
+ client().prepareIndex("test", "type1").setSource("name", "I like ice cream."));
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("did_you_mean").field("name.shingled")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("name").prefixLength(0).minWordLength(0).suggestMode("always").maxEdits(2))
+ .gramSize(3);
+ Suggest searchSuggest = searchSuggest( "ice tea", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, 0, "did_you_mean", "iced tea");
+
+ phraseSuggestion.field("nosuchField");
+ {
+ SearchRequestBuilder suggestBuilder = client().prepareSearch().setSize(0);
+ suggestBuilder.setSuggestText("tetsting sugestion");
+ suggestBuilder.addSuggestion(phraseSuggestion);
+ assertThrows(suggestBuilder, SearchPhaseExecutionException.class);
+ }
+ {
+ SearchRequestBuilder suggestBuilder = client().prepareSearch().setSize(0);
+ suggestBuilder.setSuggestText("tetsting sugestion");
+ suggestBuilder.addSuggestion(phraseSuggestion);
+ assertThrows(suggestBuilder, SearchPhaseExecutionException.class);
+ }
+ }
+
+ @Test
+ public void testSimple() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "text", "abcd");
+ index("test", "type1", "2", "text", "aacd");
+ index("test", "type1", "3", "text", "abbd");
+ index("test", "type1", "4", "text", "abcc");
+ refresh();
+
+ SearchResponse search = client().prepareSearch().setQuery(matchQuery("text", "spellcecker")).get();
+ assertThat("didn't ask for suggestions but got some", search.getSuggest(), nullValue());
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ Suggest suggest = searchSuggest( termSuggest);
+ assertSuggestion(suggest, 0, "test", "aacd", "abbd", "abcc");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+
+ suggest = searchSuggest( termSuggest);
+ assertSuggestion(suggest, 0, "test", "aacd","abbd", "abcc");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+ }
+
+ @Test
+ public void testEmpty() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "type1", "1", "foo", "bar");
+ refresh();
+
+ TermSuggestionBuilder termSuggest = termSuggestion("test")
+ .suggestMode("always") // Always, otherwise the results can vary between requests.
+ .text("abcd")
+ .field("text");
+ Suggest suggest = searchSuggest( termSuggest);
+ assertSuggestionSize(suggest, 0, 0, "test");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+
+ suggest = searchSuggest( termSuggest);
+ assertSuggestionSize(suggest, 0, 0, "test");
+ assertThat(suggest.getSuggestion("test").getEntries().get(0).getText().string(), equalTo("abcd"));
+ }
+
+ @Test
+ public void testWithMultipleCommands() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ index("test", "typ1", "1", "field1", "prefix_abcd", "field2", "prefix_efgh");
+ index("test", "typ1", "2", "field1", "prefix_aacd", "field2", "prefix_eeeh");
+ index("test", "typ1", "3", "field1", "prefix_abbd", "field2", "prefix_efff");
+ index("test", "typ1", "4", "field1", "prefix_abcc", "field2", "prefix_eggg");
+ refresh();
+
+ Suggest suggest = searchSuggest(
+ termSuggestion("size1")
+ .size(1).text("prefix_abcd").maxTermFreq(10).prefixLength(1).minDocFreq(0)
+ .field("field1").suggestMode("always"),
+ termSuggestion("field2")
+ .field("field2").text("prefix_eeeh prefix_efgh")
+ .maxTermFreq(10).minDocFreq(0).suggestMode("always"),
+ termSuggestion("accuracy")
+ .field("field2").text("prefix_efgh").setAccuracy(1f)
+ .maxTermFreq(10).minDocFreq(0).suggestMode("always"));
+ assertSuggestion(suggest, 0, "size1", "prefix_aacd");
+ assertThat(suggest.getSuggestion("field2").getEntries().get(0).getText().string(), equalTo("prefix_eeeh"));
+ assertSuggestion(suggest, 0, "field2", "prefix_efgh");
+ assertThat(suggest.getSuggestion("field2").getEntries().get(1).getText().string(), equalTo("prefix_efgh"));
+ assertSuggestion(suggest, 1, "field2", "prefix_eeeh", "prefix_efff", "prefix_eggg");
+ assertSuggestionSize(suggest, 0, 0, "accuracy");
+ }
+
+ @Test
+ public void testSizeAndSort() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ Map<String, Integer> termsAndDocCount = new HashMap<>();
+ termsAndDocCount.put("prefix_aaad", 20);
+ termsAndDocCount.put("prefix_abbb", 18);
+ termsAndDocCount.put("prefix_aaca", 16);
+ termsAndDocCount.put("prefix_abba", 14);
+ termsAndDocCount.put("prefix_accc", 12);
+ termsAndDocCount.put("prefix_addd", 10);
+ termsAndDocCount.put("prefix_abaa", 8);
+ termsAndDocCount.put("prefix_dbca", 6);
+ termsAndDocCount.put("prefix_cbad", 4);
+ termsAndDocCount.put("prefix_aacd", 1);
+ termsAndDocCount.put("prefix_abcc", 1);
+ termsAndDocCount.put("prefix_accd", 1);
+
+ for (Map.Entry<String, Integer> entry : termsAndDocCount.entrySet()) {
+ for (int i = 0; i < entry.getValue(); i++) {
+ index("test", "type1", entry.getKey() + i, "field1", entry.getKey());
+ }
+ }
+ refresh();
+
+ Suggest suggest = searchSuggest( "prefix_abcd",
+ termSuggestion("size3SortScoreFirst")
+ .size(3).minDocFreq(0).field("field1").suggestMode("always"),
+ termSuggestion("size10SortScoreFirst")
+ .size(10).minDocFreq(0).field("field1").suggestMode("always").shardSize(50),
+ termSuggestion("size3SortScoreFirstMaxEdits1")
+ .maxEdits(1)
+ .size(10).minDocFreq(0).field("field1").suggestMode("always"),
+ termSuggestion("size10SortFrequencyFirst")
+ .size(10).sort("frequency").shardSize(1000)
+ .minDocFreq(0).field("field1").suggestMode("always"));
+
+ // The commented out assertions fail sometimes because suggestions are based off of shard frequencies instead of index frequencies.
+ assertSuggestion(suggest, 0, "size3SortScoreFirst", "prefix_aacd", "prefix_abcc", "prefix_accd");
+ assertSuggestion(suggest, 0, "size10SortScoreFirst", 10, "prefix_aacd", "prefix_abcc", "prefix_accd" /*, "prefix_aaad" */);
+ assertSuggestion(suggest, 0, "size3SortScoreFirstMaxEdits1", "prefix_aacd", "prefix_abcc", "prefix_accd");
+ assertSuggestion(suggest, 0, "size10SortFrequencyFirst", "prefix_aaad", "prefix_abbb", "prefix_aaca", "prefix_abba",
+ "prefix_accc", "prefix_addd", "prefix_abaa", "prefix_dbca", "prefix_cbad", "prefix_aacd");
+
+ // assertThat(suggest.get(3).getSuggestedWords().get("prefix_abcd").get(4).getTerm(), equalTo("prefix_abcc"));
+ // assertThat(suggest.get(3).getSuggestedWords().get("prefix_abcd").get(4).getTerm(), equalTo("prefix_accd"));
+ }
+
+ @Test // see #2817
+ public void testStopwordsOnlyPhraseSuggest() throws IOException {
+ assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=string,analyzer=stopwd").setSettings(
+ settingsBuilder()
+ .put("index.analysis.analyzer.stopwd.tokenizer", "whitespace")
+ .putArray("index.analysis.analyzer.stopwd.filter", "stop")
+ ));
+ ensureGreen();
+ index("test", "typ1", "1", "body", "this is a test");
+ refresh();
+
+ Suggest searchSuggest = searchSuggest( "a an the",
+ phraseSuggestion("simple_phrase").field("body").gramSize(1)
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .size(1));
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+ }
+
+ @Test
+ public void testPrefixLength() throws IOException { // Stopped here
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("body").field("type", "string").field("analyzer", "body").endObject()
+ .startObject("body_reverse").field("type", "string").field("analyzer", "reverse").endObject()
+ .startObject("bigram").field("type", "string").field("analyzer", "bigram").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ index("test", "type1", "1", "body", "hello world");
+ index("test", "type1", "2", "body", "hello world");
+ index("test", "type1", "3", "body", "hello words");
+ refresh();
+
+ Suggest searchSuggest = searchSuggest( "hello word",
+ phraseSuggestion("simple_phrase").field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(4).minWordLength(1).suggestMode("always"))
+ .size(1).confidence(1.0f));
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "hello words");
+
+ searchSuggest = searchSuggest( "hello word",
+ phraseSuggestion("simple_phrase").field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").prefixLength(2).minWordLength(1).suggestMode("always"))
+ .size(1).confidence(1.0f));
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "hello world");
+ }
+
+ @Test
+ @Slow
+ @Nightly
+ public void testMarvelHerosPhraseSuggest() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body").
+ field("type", "string").
+ field("analyzer", "body")
+ .endObject()
+ .startObject("body_reverse").
+ field("type", "string").
+ field("analyzer", "reverse")
+ .endObject()
+ .startObject("bigram").
+ field("type", "string").
+ field("analyzer", "bigram")
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ for (String line: Resources.readLines(SuggestSearchTests.class.getResource("/config/names.txt"), Charsets.UTF_8)) {
+ index("test", "type1", line, "body", line, "body_reverse", line, "bigram", line);
+ }
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggest = phraseSuggestion("simple_phrase")
+ .field("bigram").gramSize(2).analyzer("body")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .size(1);
+ Suggest searchSuggest = searchSuggest( "american ame", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace");
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("american ame"));
+
+ phraseSuggest.realWordErrorLikelihood(0.95f);
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ // Check the "text" field this one time.
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel"));
+
+ // Ask for highlighting
+ phraseSuggest.highlight("<em>", "</em>");
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getOptions().get(0).getHighlighted().string(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
+
+ // pass in a correct phrase
+ phraseSuggest.highlight(null, null).confidence(0f).size(1).maxErrors(0.5f);
+ searchSuggest = searchSuggest( "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // pass in a correct phrase - set confidence to 2
+ phraseSuggest.confidence(2f);
+ searchSuggest = searchSuggest( "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // pass in a correct phrase - set confidence to 0.99
+ phraseSuggest.confidence(0.99f);
+ searchSuggest = searchSuggest( "Xorr the God-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ //test reverse suggestions with pre & post filter
+ phraseSuggest
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .addCandidateGenerator(candidateGenerator("body_reverse").minWordLength(1).suggestMode("always").preFilter("reverse").postFilter("reverse"));
+ searchSuggest = searchSuggest( "xor the yod-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // set all mass to trigrams (not indexed)
+ phraseSuggest.clearCandidateGenerators()
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"))
+ .smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(1,0,0));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // set all mass to bigrams
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0,1,0));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // distribute mass
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ searchSuggest = searchSuggest( "american ame", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "american ace");
+
+ // try all smoothing methods
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.LinearInterpolation(0.4,0.4,0.2));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.Laplace(0.2));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ phraseSuggest.smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+
+ // check tokenLimit
+ phraseSuggest.smoothingModel(null).tokenLimit(4);
+ searchSuggest = searchSuggest( "Xor the Got-Jewel", phraseSuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ phraseSuggest.tokenLimit(15).smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1));
+ searchSuggest = searchSuggest( "Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel", phraseSuggest);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel xorr the god jewel xorr the god jewel");
+ // Check the name this time because we're repeating it which is funky
+ assertThat(searchSuggest.getSuggestion("simple_phrase").getEntries().get(0).getText().string(), equalTo("Xor the Got-Jewel Xor the Got-Jewel Xor the Got-Jewel"));
+ }
+
+ @Test
+ public void testSizePararm() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(SETTING_NUMBER_OF_SHARDS, 1)
+ .put("index.analysis.analyzer.reverse.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.reverse.filter", "lowercase", "reverse")
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body")
+ .field("type", "string")
+ .field("analyzer", "body")
+ .endObject()
+ .startObject("body_reverse")
+ .field("type", "string")
+ .field("analyzer", "reverse")
+ .endObject()
+ .startObject("bigram")
+ .field("type", "string")
+ .field("analyzer", "bigram")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ String line = "xorr the god jewel";
+ index("test", "type1", "1", "body", line, "body_reverse", line, "bigram", line);
+ line = "I got it this time";
+ index("test", "type1", "2", "body", line, "body_reverse", line, "bigram", line);
+ refresh();
+
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase")
+ .realWordErrorLikelihood(0.95f)
+ .field("bigram")
+ .gramSize(2)
+ .analyzer("body")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(1).accuracy(0.1f))
+ .smoothingModel(new PhraseSuggestionBuilder.StupidBackoff(0.1))
+ .maxErrors(1.0f)
+ .size(5);
+ Suggest searchSuggest = searchSuggest( "Xorr the Gut-Jewel", phraseSuggestion);
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ // we allow a size of 2 now on the shard generator level so "god" will be found since it's LD2
+ phraseSuggestion.clearCandidateGenerators()
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).prefixLength(1).suggestMode("always").size(2).accuracy(0.1f));
+ searchSuggest = searchSuggest( "Xorr the Gut-Jewel", phraseSuggestion);
+ assertSuggestion(searchSuggest, 0, "simple_phrase", "xorr the god jewel");
+ }
+
+ @Test
+ @Nightly
+ public void testPhraseBoundaryCases() throws IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) // to get reliable statistics we should put this all into one shard
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase")
+ .put("index.analysis.analyzer.bigram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.bigram.filter", "my_shingle", "lowercase")
+ .put("index.analysis.analyzer.ngram.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.ngram.filter", "my_shingle2", "lowercase")
+ .put("index.analysis.analyzer.myDefAnalyzer.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.myDefAnalyzer.filter", "shingle", "lowercase")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", false)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle2.type", "shingle")
+ .put("index.analysis.filter.my_shingle2.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle2.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle2.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject().startObject("type1")
+ .startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
+ .startObject("properties")
+ .startObject("body").field("type", "string").field("analyzer", "body").endObject()
+ .startObject("bigram").field("type", "string").field("analyzer", "bigram").endObject()
+ .startObject("ngram").field("type", "string").field("analyzer", "ngram").endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ for (String line: Resources.readLines(SuggestSearchTests.class.getResource("/config/names.txt"), Charsets.UTF_8)) {
+ index("test", "type1", line, "body", line, "bigram", line, "ngram", line);
+ }
+ refresh();
+
+ NumShards numShards = getNumShards("test");
+
+ // Lets make sure some things throw exceptions
+ PhraseSuggestionBuilder phraseSuggestion = phraseSuggestion("simple_phrase")
+ .field("bigram")
+ .analyzer("body")
+ .addCandidateGenerator(candidateGenerator("does_not_exist").minWordLength(1).suggestMode("always"))
+ .realWordErrorLikelihood(0.95f)
+ .maxErrors(0.5f)
+ .size(1);
+ try {
+ searchSuggest( "Xor the Got-Jewel", numShards.numPrimaries, phraseSuggestion);
+ fail("field does not exists");
+ } catch (SearchPhaseExecutionException e) {}
+
+ phraseSuggestion.clearCandidateGenerators().analyzer(null);
+ try {
+ searchSuggest( "Xor the Got-Jewel", numShards.numPrimaries, phraseSuggestion);
+ fail("analyzer does only produce ngrams");
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ phraseSuggestion.analyzer("bigram");
+ try {
+ searchSuggest( "Xor the Got-Jewel", numShards.numPrimaries, phraseSuggestion);
+ fail("analyzer does only produce ngrams");
+ } catch (SearchPhaseExecutionException e) {
+ }
+
+ // Now we'll make sure some things don't
+ phraseSuggestion.forceUnigrams(false);
+ searchSuggest( "Xor the Got-Jewel", phraseSuggestion);
+
+ // Field doesn't produce unigrams but the analyzer does
+ phraseSuggestion.forceUnigrams(true).field("bigram").analyzer("ngram");
+ searchSuggest( "Xor the Got-Jewel",
+ phraseSuggestion);
+
+ phraseSuggestion.field("ngram").analyzer("myDefAnalyzer")
+ .addCandidateGenerator(candidateGenerator("body").minWordLength(1).suggestMode("always"));
+ Suggest suggest = searchSuggest( "Xor the Got-Jewel", phraseSuggestion);
+
+ // "xorr the god jewel" and and "xorn the god jewel" have identical scores (we are only using unigrams to score), so we tie break by
+ // earlier term (xorn):
+ assertSuggestion(suggest, 0, "simple_phrase", "xorn the god jewel");
+
+ phraseSuggestion.analyzer(null);
+ suggest = searchSuggest( "Xor the Got-Jewel", phraseSuggestion);
+
+ // In this case xorr has a better score than xorn because we set the field back to the default (my_shingle2) analyzer, so the
+ // probability that the term is not in the dictionary but is NOT a misspelling is relatively high in this case compared to the
+ // others that have no n-gram with the other terms in the phrase :) you can set this realWorldErrorLikelyhood
+ assertSuggestion(suggest, 0, "simple_phrase", "xorr the god jewel");
+ }
+
+ @Test
+ public void testDifferentShardSize() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ indexRandom(true, client().prepareIndex("test", "type1", "1").setSource("field1", "foobar1").setRouting("1"),
+ client().prepareIndex("test", "type1", "2").setSource("field1", "foobar2").setRouting("2"),
+ client().prepareIndex("test", "type1", "3").setSource("field1", "foobar3").setRouting("3"));
+
+ Suggest suggest = searchSuggest( "foobar",
+ termSuggestion("simple")
+ .size(10).minDocFreq(0).field("field1").suggestMode("always"));
+ ElasticsearchAssertions.assertSuggestionSize(suggest, 0, 3, "simple");
+ }
+
+ @Test // see #3469
+ public void testShardFailures() throws IOException, InterruptedException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.suggest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 5)
+ .put("index.analysis.filter.shingler.output_unigrams", true));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type2")
+ .startObject("properties")
+ .startObject("name")
+ .field("type", "multi_field")
+ .startObject("fields")
+ .startObject("name")
+ .field("type", "string")
+ .field("analyzer", "suggest")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject().endObject();
+ assertAcked(builder.addMapping("type2", mapping));
+ ensureGreen();
+
+ index("test", "type2", "1", "foo", "bar");
+ index("test", "type2", "2", "foo", "bar");
+ index("test", "type2", "3", "foo", "bar");
+ index("test", "type2", "4", "foo", "bar");
+ index("test", "type2", "5", "foo", "bar");
+ index("test", "type2", "1", "name", "Just testing the suggestions api");
+ index("test", "type2", "2", "name", "An other title about equal length");
+ // Note that the last document has to have about the same length as the other or cutoff rechecking will remove the useful suggestion.
+ refresh();
+
+ // When searching on a shard with a non existing mapping, we should fail
+ SearchRequestBuilder request = client().prepareSearch().setSize(0)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("fielddoesnotexist").maxErrors(5.0f));
+ assertThrows(request, SearchPhaseExecutionException.class);
+
+ // When searching on a shard which does not hold yet any document of an existing type, we should not fail
+ SearchResponse searchResponse = client().prepareSearch().setSize(0)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))
+ .get();
+ ElasticsearchAssertions.assertNoFailures(searchResponse);
+ ElasticsearchAssertions.assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions");
+ }
+
+ @Test // see #3469
+ public void testEmptyShards() throws IOException, InterruptedException {
+ XContentBuilder mappingBuilder = XContentFactory.jsonBuilder().
+ startObject().
+ startObject("type1").
+ startObject("properties").
+ startObject("name").
+ field("type", "multi_field").
+ startObject("fields").
+ startObject("name").
+ field("type", "string").
+ field("analyzer", "suggest").
+ endObject().
+ endObject().
+ endObject().
+ endObject().
+ endObject().
+ endObject();
+ assertAcked(prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.suggest.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.suggest.filter", "standard", "lowercase", "shingler")
+ .put("index.analysis.filter.shingler.type", "shingle")
+ .put("index.analysis.filter.shingler.min_shingle_size", 2)
+ .put("index.analysis.filter.shingler.max_shingle_size", 5)
+ .put("index.analysis.filter.shingler.output_unigrams", true)).addMapping("type1", mappingBuilder));
+ ensureGreen();
+
+ index("test", "type2", "1", "foo", "bar");
+ index("test", "type2", "2", "foo", "bar");
+ index("test", "type1", "1", "name", "Just testing the suggestions api");
+ index("test", "type1", "2", "name", "An other title about equal length");
+ refresh();
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSize(0)
+ .setSuggestText("tetsting sugestion")
+ .addSuggestion(phraseSuggestion("did_you_mean").field("name").maxErrors(5.0f))
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertSuggestion(searchResponse.getSuggest(), 0, 0, "did_you_mean", "testing suggestions");
+ }
+
+ /**
+ * Searching for a rare phrase shouldn't provide any suggestions if confidence > 1. This was possible before we rechecked the cutoff
+ * score during the reduce phase. Failures don't occur every time - maybe two out of five tries but we don't repeat it to save time.
+ */
+ @Test
+ public void testSearchForRarePhrase() throws IOException {
+ // If there isn't enough chaf per shard then shards can become unbalanced, making the cutoff recheck this is testing do more harm then good.
+ int chafPerShard = 100;
+
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put("index.analysis.analyzer.body.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.body.filter", "lowercase", "my_shingle")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 2));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_all")
+ .field("store", "yes")
+ .field("termVector", "with_positions_offsets")
+ .endObject()
+ .startObject("properties")
+ .startObject("body")
+ .field("type", "string")
+ .field("analyzer", "body")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ NumShards test = getNumShards("test");
+
+ List<String> phrases = new ArrayList<>();
+ Collections.addAll(phrases, "nobel prize", "noble gases", "somethingelse prize", "pride and joy", "notes are fun");
+ for (int i = 0; i < 8; i++) {
+ phrases.add("noble somethingelse" + i);
+ }
+ for (int i = 0; i < test.numPrimaries * chafPerShard; i++) {
+ phrases.add("chaff" + i);
+ }
+ for (String phrase: phrases) {
+ index("test", "type1", phrase, "body", phrase);
+ }
+ refresh();
+
+ Suggest searchSuggest = searchSuggest("nobel prize", phraseSuggestion("simple_phrase")
+ .field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f))
+ .confidence(2f)
+ .maxErrors(5f)
+ .size(1));
+ assertSuggestionSize(searchSuggest, 0, 0, "simple_phrase");
+
+ searchSuggest = searchSuggest("noble prize", phraseSuggestion("simple_phrase")
+ .field("body")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("body").minWordLength(1).suggestMode("always").maxTermFreq(.99f))
+ .confidence(2f)
+ .maxErrors(5f)
+ .size(1));
+ assertSuggestion(searchSuggest, 0, 0, "simple_phrase", "nobel prize");
+ }
+
+ /**
+ * If the suggester finds tons of options then picking the right one is slow without <<<INSERT SOLUTION HERE>>>.
+ */
+ @Test
+ @Nightly
+ public void suggestWithManyCandidates() throws InterruptedException, ExecutionException, IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable.
+ .put("index.analysis.analyzer.text.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.text.filter", "lowercase", "my_shingle")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 3));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .field("analyzer", "text")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ ImmutableList.Builder<String> titles = ImmutableList.<String>builder();
+
+ // We're going to be searching for:
+ // united states house of representatives elections in washington 2006
+ // But we need to make sure we generate a ton of suggestions so we add a bunch of candidates.
+ // Many of these candidates are drawn from page names on English Wikipedia.
+
+ // Tons of different options very near the exact query term
+ titles.add("United States House of Representatives Elections in Washington 1789");
+ for (int year = 1790; year < 2014; year+= 2) {
+ titles.add("United States House of Representatives Elections in Washington " + year);
+ }
+ // Six of these are near enough to be viable suggestions, just not the top one
+
+ // But we can't stop there! Titles that are just a year are pretty common so lets just add one per year
+ // since 0. Why not?
+ for (int year = 0; year < 2015; year++) {
+ titles.add(Integer.toString(year));
+ }
+ // That ought to provide more less good candidates for the last term
+
+ // Now remove or add plural copies of every term we can
+ titles.add("State");
+ titles.add("Houses of Parliament");
+ titles.add("Representative Government");
+ titles.add("Election");
+
+ // Now some possessive
+ titles.add("Washington's Birthday");
+
+ // And some conjugation
+ titles.add("Unified Modeling Language");
+ titles.add("Unite Against Fascism");
+ titles.add("Stated Income Tax");
+ titles.add("Media organizations housed within colleges");
+
+ // And other stuff
+ titles.add("Untied shoelaces");
+ titles.add("Unit circle");
+ titles.add("Untitled");
+ titles.add("Unicef");
+ titles.add("Unrated");
+ titles.add("UniRed");
+ titles.add("Jalan Uniten–Dengkil"); // Highway in Malaysia
+ titles.add("UNITAS");
+ titles.add("UNITER");
+ titles.add("Un-Led-Ed");
+ titles.add("STATS LLC");
+ titles.add("Staples");
+ titles.add("Skates");
+ titles.add("Statues of the Liberators");
+ titles.add("Staten Island");
+ titles.add("Statens Museum for Kunst");
+ titles.add("Hause"); // The last name or the German word, whichever.
+ titles.add("Hose");
+ titles.add("Hoses");
+ titles.add("Howse Peak");
+ titles.add("The Hoose-Gow");
+ titles.add("Hooser");
+ titles.add("Electron");
+ titles.add("Electors");
+ titles.add("Evictions");
+ titles.add("Coronal mass ejection");
+ titles.add("Wasington"); // A film?
+ titles.add("Warrington"); // A town in England
+ titles.add("Waddington"); // Lots of places have this name
+ titles.add("Watlington"); // Ditto
+ titles.add("Waplington"); // Yup, also a town
+ titles.add("Washing of the Spears"); // Book
+
+ for (char c = 'A'; c <= 'Z'; c++) {
+ // Can't forget lists, glorious lists!
+ titles.add("List of former members of the United States House of Representatives (" + c + ")");
+
+ // Lots of people are named Washington <Middle Initial>. LastName
+ titles.add("Washington " + c + ". Lastname");
+
+ // Lets just add some more to be evil
+ titles.add("United " + c);
+ titles.add("States " + c);
+ titles.add("House " + c);
+ titles.add("Elections " + c);
+ titles.add("2006 " + c);
+ titles.add(c + " United");
+ titles.add(c + " States");
+ titles.add(c + " House");
+ titles.add(c + " Elections");
+ titles.add(c + " 2006");
+ }
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (String title: titles.build()) {
+ builders.add(client().prepareIndex("test", "type1").setSource("title", title));
+ }
+ indexRandom(true, builders);
+
+ PhraseSuggestionBuilder suggest = phraseSuggestion("title")
+ .field("title")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("title")
+ .suggestMode("always")
+ .maxTermFreq(.99f)
+ .size(1000) // Setting a silly high size helps of generate a larger list of candidates for testing.
+ .maxInspections(1000) // This too
+ )
+ .confidence(0f)
+ .maxErrors(2f)
+ .shardSize(30000)
+ .size(30000);
+ Suggest searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", suggest);
+ assertSuggestion(searchSuggest, 0, 0, "title", "united states house of representatives elections in washington 2006");
+ assertSuggestionSize(searchSuggest, 0, 25480, "title"); // Just to prove that we've run through a ton of options
+
+ suggest.size(1);
+ long start = System.currentTimeMillis();
+ searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", suggest);
+ long total = System.currentTimeMillis() - start;
+ assertSuggestion(searchSuggest, 0, 0, "title", "united states house of representatives elections in washington 2006");
+ // assertThat(total, lessThan(1000L)); // Takes many seconds without fix - just for debugging
+ }
+
+ @Test
+ public void testPhraseSuggesterCollate() throws InterruptedException, ExecutionException, IOException {
+ CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable.
+ .put("index.analysis.analyzer.text.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.text.filter", "lowercase", "my_shingle")
+ .put("index.analysis.filter.my_shingle.type", "shingle")
+ .put("index.analysis.filter.my_shingle.output_unigrams", true)
+ .put("index.analysis.filter.my_shingle.min_shingle_size", 2)
+ .put("index.analysis.filter.my_shingle.max_shingle_size", 3));
+
+ XContentBuilder mapping = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("title")
+ .field("type", "string")
+ .field("analyzer", "text")
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject();
+ assertAcked(builder.addMapping("type1", mapping));
+ ensureGreen();
+
+ ImmutableList.Builder<String> titles = ImmutableList.<String>builder();
+
+ titles.add("United States House of Representatives Elections in Washington 2006");
+ titles.add("United States House of Representatives Elections in Washington 2005");
+ titles.add("State");
+ titles.add("Houses of Parliament");
+ titles.add("Representative Government");
+ titles.add("Election");
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+ for (String title: titles.build()) {
+ builders.add(client().prepareIndex("test", "type1").setSource("title", title));
+ }
+ indexRandom(true, builders);
+
+ // suggest without collate
+ PhraseSuggestionBuilder suggest = phraseSuggestion("title")
+ .field("title")
+ .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("title")
+ .suggestMode("always")
+ .maxTermFreq(.99f)
+ .size(10)
+ .maxInspections(200)
+ )
+ .confidence(0f)
+ .maxErrors(2f)
+ .shardSize(30000)
+ .size(10);
+ Suggest searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", suggest);
+ assertSuggestionSize(searchSuggest, 0, 10, "title");
+
+ // suggest with collate
+ String filterString = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("match_phrase")
+ .field("title", "{{suggestion}}")
+ .endObject()
+ .endObject()
+ .string();
+ PhraseSuggestionBuilder filteredQuerySuggest = suggest.collateQuery(filterString);
+ searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", filteredQuerySuggest);
+ assertSuggestionSize(searchSuggest, 0, 2, "title");
+
+ // collate suggest with no result (boundary case)
+ searchSuggest = searchSuggest("Elections of Representatives Parliament", filteredQuerySuggest);
+ assertSuggestionSize(searchSuggest, 0, 0, "title");
+
+ NumShards numShards = getNumShards("test");
+
+ // collate suggest with bad query
+ String incorrectFilterString = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("test")
+ .field("title", "{{suggestion}}")
+ .endObject()
+ .endObject()
+ .string();
+ PhraseSuggestionBuilder incorrectFilteredSuggest = suggest.collateQuery(incorrectFilterString);
+ try {
+ searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, incorrectFilteredSuggest);
+ fail("Post query error has been swallowed");
+ } catch(ElasticsearchException e) {
+ // expected
+ }
+
+ // suggest with collation
+ String filterStringAsFilter = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("query")
+ .startObject("match_phrase")
+ .field("title", "{{suggestion}}")
+ .endObject()
+ .endObject()
+ .endObject()
+ .string();
+
+ PhraseSuggestionBuilder filteredFilterSuggest = suggest.collateQuery(filterStringAsFilter);
+ searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", filteredFilterSuggest);
+ assertSuggestionSize(searchSuggest, 0, 2, "title");
+
+ // collate suggest with bad query
+ String filterStr = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("pprefix")
+ .field("title", "{{suggestion}}")
+ .endObject()
+ .endObject()
+ .string();
+
+ PhraseSuggestionBuilder in = suggest.collateQuery(filterStr);
+ try {
+ searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, in);
+ fail("Post filter error has been swallowed");
+ } catch(ElasticsearchException e) {
+ //expected
+ }
+
+ // collate script failure due to no additional params
+ String collateWithParams = XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("{{query_type}}")
+ .field("{{query_field}}", "{{suggestion}}")
+ .endObject()
+ .endObject()
+ .string();
+
+
+ PhraseSuggestionBuilder phraseSuggestWithNoParams = suggest.collateQuery(collateWithParams);
+ try {
+ searchSuggest("united states house of representatives elections in washington 2006", numShards.numPrimaries, phraseSuggestWithNoParams);
+ fail("Malformed query (lack of additional params) should fail");
+ } catch (ElasticsearchException e) {
+ // expected
+ }
+
+ // collate script with additional params
+ Map<String, Object> params = new HashMap<>();
+ params.put("query_type", "match_phrase");
+ params.put("query_field", "title");
+
+ PhraseSuggestionBuilder phraseSuggestWithParams = suggest.collateQuery(collateWithParams).collateParams(params);
+ searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", phraseSuggestWithParams);
+ assertSuggestionSize(searchSuggest, 0, 2, "title");
+
+ // collate query request with prune set to true
+ PhraseSuggestionBuilder phraseSuggestWithParamsAndReturn = suggest.collateQuery(collateWithParams).collateParams(params).collatePrune(true);
+ searchSuggest = searchSuggest("united states house of representatives elections in washington 2006", phraseSuggestWithParamsAndReturn);
+ assertSuggestionSize(searchSuggest, 0, 10, "title");
+ assertSuggestionPhraseCollateMatchExists(searchSuggest, "title", 2);
+ }
+
+ protected Suggest searchSuggest(SuggestionBuilder<?>... suggestion) {
+ return searchSuggest(null, suggestion);
+ }
+
+ protected Suggest searchSuggest(String suggestText, SuggestionBuilder<?>... suggestions) {
+ return searchSuggest(suggestText, 0, suggestions);
+ }
+
+ protected Suggest searchSuggest(String suggestText, int expectShardsFailed, SuggestionBuilder<?>... suggestions) {
+ if (randomBoolean()) {
+ SearchRequestBuilder builder = client().prepareSearch().setSize(0);
+ if (suggestText != null) {
+ builder.setSuggestText(suggestText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder.addSuggestion(suggestion);
+ }
+ SearchResponse actionGet = builder.execute().actionGet();
+ assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed));
+ return actionGet.getSuggest();
+ } else {
+ SuggestRequestBuilder builder = client().prepareSuggest();
+ if (suggestText != null) {
+ builder.setSuggestText(suggestText);
+ }
+ for (SuggestionBuilder<?> suggestion : suggestions) {
+ builder.addSuggestion(suggestion);
+ }
+
+ SuggestResponse actionGet = builder.execute().actionGet();
+ assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(expectShardsFailed));
+ if (expectShardsFailed > 0) {
+ throw new SearchPhaseExecutionException("suggest", "Suggest execution failed", new ShardSearchFailure[0]);
+ }
+ return actionGet.getSuggest();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java
new file mode 100644
index 0000000000..a2867abcd8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.carrotsearch.hppc.ObjectLongHashMap;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XFuzzySuggester;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.Accountables;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.fst.ByteSequenceOutputs;
+import org.apache.lucene.util.fst.FST;
+import org.apache.lucene.util.fst.PairOutputs;
+import org.apache.lucene.util.fst.PairOutputs.Pair;
+import org.apache.lucene.util.fst.PositiveIntOutputs;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.search.suggest.completion.AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.CompletionLookupProvider;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+import org.elasticsearch.search.suggest.context.ContextMapping.ContextQuery;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import static org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester.HOLE_CHARACTER;
+
+/**
+ * This is an older implementation of the AnalyzingCompletionLookupProvider class
+ * We use this to test for backwards compatibility in our tests, namely
+ * CompletionPostingsFormatTest
+ * This ensures upgrades between versions work smoothly
+ */
+public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvider {
+
+ // for serialization
+ public static final int SERIALIZE_PRESERVE_SEPARATORS = 1;
+ public static final int SERIALIZE_HAS_PAYLOADS = 2;
+ public static final int SERIALIZE_PRESERVE_POSITION_INCREMENTS = 4;
+
+ private static final int MAX_SURFACE_FORMS_PER_ANALYZED_FORM = 256;
+ private static final int MAX_GRAPH_EXPANSIONS = -1;
+
+ public static final String CODEC_NAME = "analyzing";
+ public static final int CODEC_VERSION = 1;
+
+ private boolean preserveSep;
+ private boolean preservePositionIncrements;
+ private int maxSurfaceFormsPerAnalyzedForm;
+ private int maxGraphExpansions;
+ private boolean hasPayloads;
+ private final XAnalyzingSuggester prototype;
+
+ // important, these are the settings from the old xanalyzingsuggester
+ public static final int SEP_LABEL = 0xFF;
+ public static final int END_BYTE = 0x0;
+ public static final int PAYLOAD_SEP = '\u001f';
+
+ public AnalyzingCompletionLookupProviderV1(boolean preserveSep, boolean exactFirst, boolean preservePositionIncrements, boolean hasPayloads) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.hasPayloads = hasPayloads;
+ this.maxSurfaceFormsPerAnalyzedForm = MAX_SURFACE_FORMS_PER_ANALYZED_FORM;
+ this.maxGraphExpansions = MAX_GRAPH_EXPANSIONS;
+ int options = preserveSep ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+ // needs to fixed in the suggester first before it can be supported
+ //options |= exactFirst ? XAnalyzingSuggester.EXACT_FIRST : 0;
+ prototype = new XAnalyzingSuggester(null, null, null, options, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions, preservePositionIncrements,
+ null, false, 1, SEP_LABEL, PAYLOAD_SEP, END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ }
+
+ @Override
+ public String getName() {
+ return "analyzing";
+ }
+
+ @Override
+ public FieldsConsumer consumer(final IndexOutput output) throws IOException {
+ // TODO write index header?
+ CodecUtil.writeHeader(output, CODEC_NAME, CODEC_VERSION);
+ return new FieldsConsumer() {
+ private Map<String, Long> fieldOffsets = new HashMap<>();
+
+ @Override
+ public void close() throws IOException {
+ try { /*
+ * write the offsets per field such that we know where
+ * we need to load the FSTs from
+ */
+ long pointer = output.getFilePointer();
+ output.writeVInt(fieldOffsets.size());
+ for (Map.Entry<String, Long> entry : fieldOffsets.entrySet()) {
+ output.writeString(entry.getKey());
+ output.writeVLong(entry.getValue());
+ }
+ output.writeLong(pointer);
+ } finally {
+ IOUtils.close(output);
+ }
+ }
+
+ @Override
+ public void write(Fields fields) throws IOException {
+ for (String field : fields) {
+ Terms terms = fields.terms(field);
+ if (terms == null) {
+ continue;
+ }
+ TermsEnum termsEnum = terms.iterator();
+ PostingsEnum docsEnum = null;
+ final SuggestPayload spare = new SuggestPayload();
+ int maxAnalyzedPathsForOneInput = 0;
+ final XAnalyzingSuggester.XBuilder builder = new XAnalyzingSuggester.XBuilder(maxSurfaceFormsPerAnalyzedForm, hasPayloads, XAnalyzingSuggester.PAYLOAD_SEP);
+ int docCount = 0;
+ while (true) {
+ BytesRef term = termsEnum.next();
+ if (term == null) {
+ break;
+ }
+ docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS);
+ builder.startTerm(term);
+ int docFreq = 0;
+ while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ for (int i = 0; i < docsEnum.freq(); i++) {
+ final int position = docsEnum.nextPosition();
+ AnalyzingCompletionLookupProviderV1.this.parsePayload(docsEnum.getPayload(), spare);
+ builder.addSurface(spare.surfaceForm.get(), spare.payload.get(), spare.weight);
+ // multi fields have the same surface form so we sum up here
+ maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, position + 1);
+ }
+ docFreq++;
+ docCount = Math.max(docCount, docsEnum.docID() + 1);
+ }
+ builder.finishTerm(docFreq);
+ }
+ /*
+ * Here we are done processing the field and we can
+ * buid the FST and write it to disk.
+ */
+ FST<Pair<Long, BytesRef>> build = builder.build();
+ assert build != null || docCount == 0 : "the FST is null but docCount is != 0 actual value: [" + docCount + "]";
+ /*
+ * it's possible that the FST is null if we have 2 segments that get merged
+ * and all docs that have a value in this field are deleted. This will cause
+ * a consumer to be created but it doesn't consume any values causing the FSTBuilder
+ * to return null.
+ */
+ if (build != null) {
+ fieldOffsets.put(field, output.getFilePointer());
+ build.save(output);
+ /* write some more meta-info */
+ output.writeVInt(maxAnalyzedPathsForOneInput);
+ output.writeVInt(maxSurfaceFormsPerAnalyzedForm);
+ output.writeInt(maxGraphExpansions); // can be negative
+ int options = 0;
+ options |= preserveSep ? SERIALIZE_PRESERVE_SEPARATORS : 0;
+ options |= hasPayloads ? SERIALIZE_HAS_PAYLOADS : 0;
+ options |= preservePositionIncrements ? SERIALIZE_PRESERVE_POSITION_INCREMENTS : 0;
+ output.writeVInt(options);
+ }
+ }
+ }
+ };
+ }
+
+ @Override
+ public LookupFactory load(IndexInput input) throws IOException {
+ CodecUtil.checkHeader(input, CODEC_NAME, CODEC_VERSION, CODEC_VERSION);
+ final Map<String, AnalyzingSuggestHolder> lookupMap = new HashMap<>();
+ input.seek(input.length() - 8);
+ long metaPointer = input.readLong();
+ input.seek(metaPointer);
+ int numFields = input.readVInt();
+
+ Map<Long, String> meta = new TreeMap<>();
+ for (int i = 0; i < numFields; i++) {
+ String name = input.readString();
+ long offset = input.readVLong();
+ meta.put(offset, name);
+ }
+ long sizeInBytes = 0;
+ for (Map.Entry<Long, String> entry : meta.entrySet()) {
+ input.seek(entry.getKey());
+ FST<Pair<Long, BytesRef>> fst = new FST<>(input, new PairOutputs<>(
+ PositiveIntOutputs.getSingleton(), ByteSequenceOutputs.getSingleton()));
+ int maxAnalyzedPathsForOneInput = input.readVInt();
+ int maxSurfaceFormsPerAnalyzedForm = input.readVInt();
+ int maxGraphExpansions = input.readInt();
+ int options = input.readVInt();
+ boolean preserveSep = (options & SERIALIZE_PRESERVE_SEPARATORS) != 0;
+ boolean hasPayloads = (options & SERIALIZE_HAS_PAYLOADS) != 0;
+ boolean preservePositionIncrements = (options & SERIALIZE_PRESERVE_POSITION_INCREMENTS) != 0;
+ sizeInBytes += fst.ramBytesUsed();
+ lookupMap.put(entry.getValue(), new AnalyzingSuggestHolder(preserveSep, preservePositionIncrements, maxSurfaceFormsPerAnalyzedForm, maxGraphExpansions,
+ hasPayloads, maxAnalyzedPathsForOneInput, fst));
+ }
+ final long ramBytesUsed = sizeInBytes;
+ return new LookupFactory() {
+ @Override
+ public Lookup getLookup(CompletionFieldMapper mapper, CompletionSuggestionContext suggestionContext) {
+ AnalyzingSuggestHolder analyzingSuggestHolder = lookupMap.get(mapper.fieldType().names().indexName());
+ if (analyzingSuggestHolder == null) {
+ return null;
+ }
+ int flags = analyzingSuggestHolder.getPreserveSeparator() ? XAnalyzingSuggester.PRESERVE_SEP : 0;
+
+ final Automaton queryPrefix = mapper.requiresContext() ? ContextQuery.toAutomaton(analyzingSuggestHolder.getPreserveSeparator(), suggestionContext.getContextQueries()) : null;
+
+ XAnalyzingSuggester suggester;
+ if (suggestionContext.isFuzzy()) {
+ suggester = new XFuzzySuggester(mapper.fieldType().indexAnalyzer(), queryPrefix, mapper.fieldType().searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ suggestionContext.getFuzzyEditDistance(), suggestionContext.isFuzzyTranspositions(),
+ suggestionContext.getFuzzyPrefixLength(), suggestionContext.getFuzzyMinLength(), false,
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
+ } else {
+ suggester = new XAnalyzingSuggester(mapper.fieldType().indexAnalyzer(), queryPrefix, mapper.fieldType().searchAnalyzer(), flags,
+ analyzingSuggestHolder.maxSurfaceFormsPerAnalyzedForm, analyzingSuggestHolder.maxGraphExpansions,
+ analyzingSuggestHolder.preservePositionIncrements,
+ analyzingSuggestHolder.fst, analyzingSuggestHolder.hasPayloads,
+ analyzingSuggestHolder.maxAnalyzedPathsForOneInput, SEP_LABEL, PAYLOAD_SEP, END_BYTE, HOLE_CHARACTER);
+ }
+ return suggester;
+ }
+
+ @Override
+ public CompletionStats stats(String... fields) {
+ long sizeInBytes = 0;
+ ObjectLongHashMap<String> completionFields = null;
+ if (fields != null && fields.length > 0) {
+ completionFields = new ObjectLongHashMap<>(fields.length);
+ }
+
+ for (Map.Entry<String, AnalyzingSuggestHolder> entry : lookupMap.entrySet()) {
+ sizeInBytes += entry.getValue().fst.ramBytesUsed();
+ if (fields == null || fields.length == 0) {
+ continue;
+ }
+ for (String field : fields) {
+ // support for getting fields by regex as in fielddata
+ if (Regex.simpleMatch(field, entry.getKey())) {
+ long fstSize = entry.getValue().fst.ramBytesUsed();
+ completionFields.addTo(field, fstSize);
+ }
+ }
+ }
+
+ return new CompletionStats(sizeInBytes, completionFields);
+ }
+
+ @Override
+ AnalyzingSuggestHolder getAnalyzingSuggestHolder(CompletionFieldMapper mapper) {
+ return lookupMap.get(mapper.fieldType().names().indexName());
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return ramBytesUsed;
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Accountables.namedAccountables("field", lookupMap);
+ }
+ };
+ }
+
+ /*
+ // might be readded when we change the current impl, right now not needed
+ static class AnalyzingSuggestHolder {
+ final boolean preserveSep;
+ final boolean preservePositionIncrements;
+ final int maxSurfaceFormsPerAnalyzedForm;
+ final int maxGraphExpansions;
+ final boolean hasPayloads;
+ final int maxAnalyzedPathsForOneInput;
+ final FST<Pair<Long, BytesRef>> fst;
+
+ public AnalyzingSuggestHolder(boolean preserveSep, boolean preservePositionIncrements, int maxSurfaceFormsPerAnalyzedForm, int maxGraphExpansions,
+ boolean hasPayloads, int maxAnalyzedPathsForOneInput, FST<Pair<Long, BytesRef>> fst) {
+ this.preserveSep = preserveSep;
+ this.preservePositionIncrements = preservePositionIncrements;
+ this.maxSurfaceFormsPerAnalyzedForm = maxSurfaceFormsPerAnalyzedForm;
+ this.maxGraphExpansions = maxGraphExpansions;
+ this.hasPayloads = hasPayloads;
+ this.maxAnalyzedPathsForOneInput = maxAnalyzedPathsForOneInput;
+ this.fst = fst;
+ }
+
+ }
+ */
+
+ @Override
+ public Set<IntsRef> toFiniteStrings(TokenStream stream) throws IOException {
+ return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream);
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java
new file mode 100644
index 0000000000..d77d6308b6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java
@@ -0,0 +1,545 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.suggest.completion;
+
+import com.google.common.collect.Lists;
+import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.FilterCodec;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.suggest.InputIterator;
+import org.apache.lucene.search.suggest.Lookup;
+import org.apache.lucene.search.suggest.Lookup.LookupResult;
+import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester;
+import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LineFileDocs;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.analysis.NamedAnalyzer;
+import org.elasticsearch.index.codec.postingsformat.Elasticsearch090PostingsFormat;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MappedFieldType.Names;
+import org.elasticsearch.index.mapper.core.AbstractFieldMapper;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.search.suggest.SuggestUtils;
+import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat.LookupFactory;
+import org.elasticsearch.search.suggest.context.ContextMapping;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
+
+ Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id).build();
+ static final MappedFieldType FIELD_TYPE = CompletionFieldMapper.Defaults.FIELD_TYPE.clone();
+ static final NamedAnalyzer analyzer = new NamedAnalyzer("foo", new StandardAnalyzer());
+ static {
+ FIELD_TYPE.setNames(new Names("foo"));
+ FIELD_TYPE.setIndexAnalyzer(analyzer);
+ FIELD_TYPE.setSearchAnalyzer(analyzer);
+ FIELD_TYPE.freeze();
+ }
+
+ @Test
+ public void testCompletionPostingsFormat() throws IOException {
+ AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+ List<Completion090PostingsFormat.CompletionLookupProvider> providers = Lists.newArrayList(providerV1, currentProvider);
+
+ Completion090PostingsFormat.CompletionLookupProvider randomProvider = providers.get(getRandom().nextInt(providers.size()));
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, randomProvider);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormat format = PostingsFormat.forName(Lucene.LATEST_POSTINGS_FORMAT);
+ Lookup lookup = load.getLookup(new CompletionFieldMapper(FIELD_TYPE, format, true, true, true, Integer.MAX_VALUE, indexSettings, AbstractFieldMapper.MultiFields.empty(), null, ContextMapping.EMPTY_MAPPING), new CompletionSuggestionContext(null));
+ List<LookupResult> result = lookup.lookup("ge", false, 10);
+ assertThat(result.get(0).key.toString(), equalTo("Generator - Foo Fighters"));
+ assertThat(result.get(0).payload.utf8ToString(), equalTo("id:10"));
+ dir.close();
+ }
+
+ @Test
+ public void testProviderBackwardCompatibilityForVersion1() throws IOException {
+ AnalyzingCompletionLookupProviderV1 providerV1 = new AnalyzingCompletionLookupProviderV1(true, false, true, true);
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, providerV1);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormat format = new Elasticsearch090PostingsFormat();
+ AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load.getAnalyzingSuggestHolder(new CompletionFieldMapper(FIELD_TYPE, format, true, true, true, Integer.MAX_VALUE, indexSettings, AbstractFieldMapper.MultiFields.empty(), null, ContextMapping.EMPTY_MAPPING));
+ assertThat(analyzingSuggestHolder.sepLabel, is(AnalyzingCompletionLookupProviderV1.SEP_LABEL));
+ assertThat(analyzingSuggestHolder.payloadSep, is(AnalyzingCompletionLookupProviderV1.PAYLOAD_SEP));
+ assertThat(analyzingSuggestHolder.endByte, is(AnalyzingCompletionLookupProviderV1.END_BYTE));
+ dir.close();
+ }
+
+ @Test
+ public void testProviderVersion2() throws IOException {
+ AnalyzingCompletionLookupProvider currentProvider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+
+ RAMDirectory dir = new RAMDirectory();
+ writeData(dir, currentProvider);
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = currentProvider.load(input);
+ PostingsFormat format = new Elasticsearch090PostingsFormat();
+ AnalyzingCompletionLookupProvider.AnalyzingSuggestHolder analyzingSuggestHolder = load.getAnalyzingSuggestHolder(new CompletionFieldMapper(FIELD_TYPE, format, true, true, true, Integer.MAX_VALUE, indexSettings, AbstractFieldMapper.MultiFields.empty(), null, ContextMapping.EMPTY_MAPPING));
+ assertThat(analyzingSuggestHolder.sepLabel, is(XAnalyzingSuggester.SEP_LABEL));
+ assertThat(analyzingSuggestHolder.payloadSep, is(XAnalyzingSuggester.PAYLOAD_SEP));
+ assertThat(analyzingSuggestHolder.endByte, is(XAnalyzingSuggester.END_BYTE));
+ dir.close();
+ }
+
+ @Test
+ public void testDuellCompletions() throws IOException, NoSuchFieldException, SecurityException, IllegalArgumentException,
+ IllegalAccessException {
+ final boolean preserveSeparators = getRandom().nextBoolean();
+ final boolean preservePositionIncrements = getRandom().nextBoolean();
+ final boolean usePayloads = getRandom().nextBoolean();
+ final int options = preserveSeparators ? AnalyzingSuggester.PRESERVE_SEP : 0;
+
+ XAnalyzingSuggester reference = new XAnalyzingSuggester(new StandardAnalyzer(), null, new StandardAnalyzer(),
+ options, 256, -1, preservePositionIncrements, null, false, 1, XAnalyzingSuggester.SEP_LABEL, XAnalyzingSuggester.PAYLOAD_SEP, XAnalyzingSuggester.END_BYTE, XAnalyzingSuggester.HOLE_CHARACTER);
+ LineFileDocs docs = new LineFileDocs(getRandom());
+ int num = scaledRandomIntBetween(150, 300);
+ final String[] titles = new String[num];
+ final long[] weights = new long[num];
+ for (int i = 0; i < titles.length; i++) {
+ Document nextDoc = docs.nextDoc();
+ IndexableField field = nextDoc.getField("title");
+ titles[i] = field.stringValue();
+ weights[i] = between(0, 100);
+
+ }
+ docs.close();
+ final InputIterator primaryIter = new InputIterator() {
+ int index = 0;
+ long currentWeight = -1;
+
+ @Override
+ public BytesRef next() throws IOException {
+ if (index < titles.length) {
+ currentWeight = weights[index];
+ return new BytesRef(titles[index++]);
+ }
+ return null;
+ }
+
+ @Override
+ public long weight() {
+ return currentWeight;
+ }
+
+ @Override
+ public BytesRef payload() {
+ return null;
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return false;
+ }
+
+ @Override
+ public Set<BytesRef> contexts() {
+ return null;
+ }
+
+ @Override
+ public boolean hasContexts() {
+ return false;
+ }
+
+ };
+ InputIterator iter;
+ if (usePayloads) {
+ iter = new InputIterator() {
+ @Override
+ public long weight() {
+ return primaryIter.weight();
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ return primaryIter.next();
+ }
+
+ @Override
+ public BytesRef payload() {
+ return new BytesRef(Long.toString(weight()));
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ return true;
+ }
+
+ @Override
+ public Set<BytesRef> contexts() {
+ return null;
+ }
+
+ @Override
+ public boolean hasContexts() {
+ return false;
+ }
+ };
+ } else {
+ iter = primaryIter;
+ }
+ reference.build(iter);
+ PostingsFormat provider = PostingsFormat.forName(Lucene.LATEST_POSTINGS_FORMAT);
+
+ final CompletionFieldMapper mapper = new CompletionFieldMapper(FIELD_TYPE, provider, usePayloads,
+ preserveSeparators, preservePositionIncrements, Integer.MAX_VALUE, indexSettings, AbstractFieldMapper.MultiFields.empty(), null, ContextMapping.EMPTY_MAPPING);
+ Lookup buildAnalyzingLookup = buildAnalyzingLookup(mapper, titles, titles, weights);
+ Field field = buildAnalyzingLookup.getClass().getDeclaredField("maxAnalyzedPathsForOneInput");
+ field.setAccessible(true);
+ Field refField = reference.getClass().getDeclaredField("maxAnalyzedPathsForOneInput");
+ refField.setAccessible(true);
+ assertThat(refField.get(reference), equalTo(field.get(buildAnalyzingLookup)));
+
+ for (int i = 0; i < titles.length; i++) {
+ int res = between(1, 10);
+ final StringBuilder builder = new StringBuilder();
+ SuggestUtils.analyze(analyzer.tokenStream("foo", titles[i]), new SuggestUtils.TokenConsumer() {
+ @Override
+ public void nextToken() throws IOException {
+ if (builder.length() == 0) {
+ builder.append(this.charTermAttr.toString());
+ }
+ }
+ });
+ String firstTerm = builder.toString();
+ String prefix = firstTerm.isEmpty() ? "" : firstTerm.substring(0, between(1, firstTerm.length()));
+ List<LookupResult> refLookup = reference.lookup(prefix, false, res);
+ List<LookupResult> lookup = buildAnalyzingLookup.lookup(prefix, false, res);
+ assertThat(refLookup.toString(),lookup.size(), equalTo(refLookup.size()));
+ for (int j = 0; j < refLookup.size(); j++) {
+ assertThat(lookup.get(j).key, equalTo(refLookup.get(j).key));
+ assertThat("prefix: " + prefix + " " + j + " -- missmatch cost: " + lookup.get(j).key + " - " + lookup.get(j).value + " | " + refLookup.get(j).key + " - " + refLookup.get(j).value ,
+ lookup.get(j).value, equalTo(refLookup.get(j).value));
+ assertThat(lookup.get(j).payload, equalTo(refLookup.get(j).payload));
+ if (usePayloads) {
+ assertThat(lookup.get(j).payload.utf8ToString(), equalTo(Long.toString(lookup.get(j).value)));
+ }
+ }
+ }
+ }
+
+ public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces, long[] weights)
+ throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ FilterCodec filterCodec = new FilterCodec("filtered", Codec.getDefault()) {
+ @Override
+ public PostingsFormat postingsFormat() {
+ final PostingsFormat in = super.postingsFormat();
+ return mapper.postingsFormat(in);
+ }
+ };
+ IndexWriterConfig indexWriterConfig = new IndexWriterConfig(mapper.fieldType().indexAnalyzer());
+
+ indexWriterConfig.setCodec(filterCodec);
+ IndexWriter writer = new IndexWriter(dir, indexWriterConfig);
+ for (int i = 0; i < weights.length; i++) {
+ Document doc = new Document();
+ BytesRef payload = mapper.buildPayload(new BytesRef(surfaces[i]), weights[i], new BytesRef(Long.toString(weights[i])));
+ doc.add(mapper.getCompletionField(ContextMapping.EMPTY_CONTEXT, terms[i], payload));
+ if (randomBoolean()) {
+ writer.commit();
+ }
+ writer.addDocument(doc);
+ }
+ writer.commit();
+ writer.forceMerge(1, true);
+ writer.commit();
+ DirectoryReader reader = DirectoryReader.open(writer, true);
+ assertThat(reader.leaves().size(), equalTo(1));
+ assertThat(reader.leaves().get(0).reader().numDocs(), equalTo(weights.length));
+ LeafReaderContext atomicReaderContext = reader.leaves().get(0);
+ Terms luceneTerms = atomicReaderContext.reader().terms(mapper.fieldType().names().fullName());
+ Lookup lookup = ((Completion090PostingsFormat.CompletionTerms) luceneTerms).getLookup(mapper, new CompletionSuggestionContext(null));
+ reader.close();
+ writer.close();
+ dir.close();
+ return lookup;
+ }
+ @Test
+ public void testNoDocs() throws IOException {
+ AnalyzingCompletionLookupProvider provider = new AnalyzingCompletionLookupProvider(true, false, true, true);
+ RAMDirectory dir = new RAMDirectory();
+ IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
+ FieldsConsumer consumer = provider.consumer(output);
+ consumer.write(new Fields() {
+ @Override
+ public Iterator<String> iterator() {
+ return Arrays.asList("foo").iterator();
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return 1;
+ }
+ });
+ consumer.close();
+ output.close();
+
+ IndexInput input = dir.openInput("foo.txt", IOContext.DEFAULT);
+ LookupFactory load = provider.load(input);
+ PostingsFormat format = new Elasticsearch090PostingsFormat();
+ assertNull(load.getLookup(new CompletionFieldMapper(FIELD_TYPE, format, true, true, true, Integer.MAX_VALUE, indexSettings, AbstractFieldMapper.MultiFields.empty(), null, ContextMapping.EMPTY_MAPPING), new CompletionSuggestionContext(null)));
+ dir.close();
+ }
+
+ // TODO ADD more unittests
+ private void writeData(Directory dir, Completion090PostingsFormat.CompletionLookupProvider provider) throws IOException {
+ IndexOutput output = dir.createOutput("foo.txt", IOContext.DEFAULT);
+ FieldsConsumer consumer = provider.consumer(output);
+ final List<TermPosAndPayload> terms = new ArrayList<>();
+ terms.add(new TermPosAndPayload("foofightersgenerator", 256 - 2, provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10"))));
+ terms.add(new TermPosAndPayload("generator", 256 - 1, provider.buildPayload(new BytesRef("Generator - Foo Fighters"), 9, new BytesRef("id:10"))));
+ Fields fields = new Fields() {
+ @Override
+ public Iterator<String> iterator() {
+ return Arrays.asList("foo").iterator();
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ if (field.equals("foo")) {
+ return new Terms() {
+ @Override
+ public TermsEnum iterator() throws IOException {
+ final Iterator<TermPosAndPayload> iterator = terms.iterator();
+ return new TermsEnum() {
+ private TermPosAndPayload current = null;
+ @Override
+ public SeekStatus seekCeil(BytesRef text) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void seekExact(long ord) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef term() throws IOException {
+ return current == null ? null : current.term;
+ }
+
+ @Override
+ public long ord() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int docFreq() throws IOException {
+ return current == null ? 0 : 1;
+ }
+
+ @Override
+ public long totalTermFreq() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ final TermPosAndPayload data = current;
+ return new PostingsEnum() {
+ boolean done = false;
+ @Override
+ public int nextPosition() throws IOException {
+ return current.pos;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return current.payload;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 1;
+ }
+
+ @Override
+ public int docID() {
+ if (done) {
+ return NO_MORE_DOCS;
+ }
+ return 0;
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ if (done) {
+ return NO_MORE_DOCS;
+ }
+ done = true;
+ return 0;
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ if (done) {
+ return NO_MORE_DOCS;
+ }
+ done = true;
+ return 0;
+ }
+
+ @Override
+ public long cost() {
+ return 0;
+ }
+ };
+ }
+
+ @Override
+ public BytesRef next() throws IOException {
+ if (iterator.hasNext()) {
+ current = iterator.next();
+ return current.term;
+ }
+ current = null;
+ return null;
+ }
+ };
+ }
+
+ @Override
+ public long size() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getSumTotalTermFreq() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long getSumDocFreq() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getDocCount() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasFreqs() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasOffsets() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasPositions() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean hasPayloads() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return 0;
+ }
+ };
+ consumer.write(fields);
+ consumer.close();
+ output.close();
+
+ }
+
+ private static class TermPosAndPayload {
+ final BytesRef term;
+ final int pos;
+ final BytesRef payload;
+
+
+ private TermPosAndPayload(String term, int pos, BytesRef payload) {
+ this.term = new BytesRef(term);
+ this.pos = pos;
+ this.payload = payload;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/context/GeoLocationContextMappingTest.java b/core/src/test/java/org/elasticsearch/search/suggest/context/GeoLocationContextMappingTest.java
new file mode 100644
index 0000000000..1f6bd5eea7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/context/GeoLocationContextMappingTest.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.context;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.GeoHashUtils;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.search.suggest.context.ContextMapping.ContextConfig;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+/**
+ *
+ */
+public class GeoLocationContextMappingTest extends ElasticsearchTestCase {
+
+ @Test
+ public void testThatParsingGeoPointsWorksWithCoercion() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", "52").field("lon", "4").endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", 12);
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ mapping.parseQuery("foo", parser);
+ }
+
+
+ @Test
+ public void testUseWithDefaultGeoHash() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", 52d).field("lon", 4d).endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+
+ String geohash = GeoHashUtils.encode(randomIntBetween(-90, +90), randomIntBetween(-180, +180));
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", 12);
+ config.put("default", geohash);
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ mapping.parseQuery("foo", parser);
+ }
+
+ @Test
+ public void testUseWithDefaultLatLon() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", 52d).field("lon", 4d).endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", 12);
+ HashMap<String, Double> pointAsMap = new HashMap<>();
+ pointAsMap.put("lat", 51d);
+ pointAsMap.put("lon", 0d);
+ config.put("default", pointAsMap);
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ mapping.parseQuery("foo", parser);
+ }
+
+ @Test
+ public void testUseWithDefaultBadLatLon() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", 52d).field("lon", 4d).endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", 12);
+ HashMap<String, Double> pointAsMap = new HashMap<>();
+ pointAsMap.put("latitude", 51d); // invalid field names
+ pointAsMap.put("longitude", 0d); // invalid field names
+ config.put("default", pointAsMap);
+ ElasticsearchParseException expected = null;
+ try {
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ mapping.parseQuery("foo", parser);
+
+ } catch (ElasticsearchParseException e) {
+ expected = e;
+ }
+ assertNotNull(expected);
+ }
+
+ @Test
+ public void testUseWithMultiplePrecisions() throws Exception {
+ XContentBuilder builder = jsonBuilder().startObject().field("lat", 52d).field("lon", 4d).endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken();
+
+ HashMap<String, Object> config = new HashMap<>();
+ int numElements = randomIntBetween(1, 12);
+ ArrayList<Integer> precisions = new ArrayList<>();
+ for (int i = 0; i < numElements; i++) {
+ precisions.add(randomIntBetween(1, 12));
+ }
+ config.put("precision", precisions);
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ mapping.parseQuery("foo", parser);
+ }
+
+ @Test
+ public void testHashcode() throws Exception {
+ HashMap<String, Object> config = new HashMap<>();
+ if (randomBoolean()) {
+ config.put("precision", Arrays.asList(1, 2, 3, 4));
+ } else {
+ config.put("precision", randomIntBetween(1, 12));
+ }
+ if (randomBoolean()) {
+ HashMap<String, Double> pointAsMap = new HashMap<>();
+ pointAsMap.put("lat", 51d);
+ pointAsMap.put("lon", 0d);
+ config.put("default", pointAsMap);
+ }
+ HashMap<String, Object> config2 = new HashMap<>(config);
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ GeolocationContextMapping mapping2 = GeolocationContextMapping.load("foo", config2);
+
+ assertEquals(mapping, mapping2);
+ assertEquals(mapping.hashCode(), mapping2.hashCode());
+ }
+
+ @Test
+ public void testUseWithBadGeoContext() throws Exception {
+ double lon = 4d;
+ String badLat = "W";
+ XContentBuilder builder = jsonBuilder().startObject().startArray("location").value(4d).value(badLat).endArray().endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken(); // start of object
+ parser.nextToken(); // "location" field name
+ parser.nextToken(); // array
+
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", randomIntBetween(1, 12));
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ ElasticsearchParseException expected = null;
+ try {
+ ContextConfig geoconfig = mapping.parseContext(null, parser);
+ } catch (ElasticsearchParseException e) {
+ expected = e;
+ }
+ assertNotNull(expected);
+ }
+
+ @Test
+ public void testUseWithLonLatGeoContext() throws Exception {
+ double lon = 4d;
+ double lat = 52d;
+ XContentBuilder builder = jsonBuilder().startObject().startArray("location").value(lon).value(lat).endArray().endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken(); // start of object
+ parser.nextToken(); // "location" field name
+ parser.nextToken(); // array
+
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", randomIntBetween(1, 12));
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ mapping.parseContext(null, parser);
+ }
+
+ public void testUseWithMultiGeoHashGeoContext() throws Exception {
+ String geohash1 = GeoHashUtils.encode(randomIntBetween(-90, +90), randomIntBetween(-180, +180));
+ String geohash2 = GeoHashUtils.encode(randomIntBetween(-90, +90), randomIntBetween(-180, +180));
+ XContentBuilder builder = jsonBuilder().startObject().startArray("location").value(geohash1).value(geohash2).endArray().endObject();
+ XContentParser parser = XContentHelper.createParser(builder.bytes());
+ parser.nextToken(); // start of object
+ parser.nextToken(); // "location" field name
+ parser.nextToken(); // array
+
+ HashMap<String, Object> config = new HashMap<>();
+ config.put("precision", randomIntBetween(1, 12));
+ GeolocationContextMapping mapping = GeolocationContextMapping.load("foo", config);
+ ContextConfig parsedContext = mapping.parseContext(null, parser);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java b/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
new file mode 100644
index 0000000000..c4d4b48e28
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/suggest/phrase/NoisyChannelSpellCheckerTests.java
@@ -0,0 +1,401 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.suggest.phrase;
+
+import com.google.common.base.Charsets;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.core.LowerCaseFilter;
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
+import org.apache.lucene.analysis.reverse.ReverseStringFilter;
+import org.apache.lucene.analysis.shingle.ShingleFilter;
+import org.apache.lucene.analysis.standard.StandardTokenizer;
+import org.apache.lucene.analysis.synonym.SolrSynonymParser;
+import org.apache.lucene.analysis.synonym.SynonymFilter;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.TextField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.search.spell.DirectSpellChecker;
+import org.apache.lucene.search.spell.SuggestMode;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Version;
+import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.Result;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.*;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class NoisyChannelSpellCheckerTests extends ElasticsearchTestCase{
+ private final BytesRef space = new BytesRef(" ");
+ private final BytesRef preTag = new BytesRef("<em>");
+ private final BytesRef postTag = new BytesRef("</em>");
+
+ @Test
+ public void testMarvelHeros() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ return new TokenStreamComponents(t, new LowerCaseFilter(t));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ WordScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
+
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
+ Result result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2);
+ Correction[] corrections = result.corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ace"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american <em>ace</em>"));
+ assertThat(result.cutoffScore, greaterThan(0d));
+
+ result = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 0, 1);
+ corrections = result.corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("american ame"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("american ame"));
+ assertThat(result.cutoffScore, equalTo(Double.MIN_VALUE));
+
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the <em>god</em> jewel"));
+ assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor the <em>god</em> jewel"));
+ assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn</em> the <em>god</em> jewel"));
+ assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr</em> the got jewel"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xorr the got jewel"));
+
+ // Test some of the highlighting corner cases
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor teh Got-Jewel"), generator, 4f, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(space).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(space).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(space).utf8ToString(), equalTo("xor teh god jewel"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorr the god</em> jewel"));
+ assertThat(corrections[1].join(space, preTag, postTag).utf8ToString(), equalTo("xor <em>the god</em> jewel"));
+ assertThat(corrections[2].join(space, preTag, postTag).utf8ToString(), equalTo("<em>xorn the god</em> jewel"));
+ assertThat(corrections[3].join(space, preTag, postTag).utf8ToString(), equalTo("xor teh <em>god</em> jewel"));
+
+ // test synonyms
+
+ Analyzer analyzer = new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ TokenFilter filter = new LowerCaseFilter(t);
+ try {
+ SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer());
+ ((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
+ filter = new SynonymFilter(filter, parser.build(), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return new TokenStreamComponents(t, filter);
+ }
+ };
+
+ spellchecker.setAccuracy(0.0f);
+ spellchecker.setMinPrefix(1);
+ spellchecker.setMinQueryLength(1);
+ suggester = new NoisyChannelSpellChecker(0.85);
+ wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5f);
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(space).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
+
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("<em>captain america</em>"));
+
+ // Make sure that user supplied text is not marked as highlighted in the presence of a synonym filter
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.85, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captain usw"), generator, 2, 4, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+ assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain <em>america</em>"));
+ }
+
+ @Test
+ public void testMarvelHerosMultiGenerator() throws IOException {
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ return new TokenStreamComponents(t, new LowerCaseFilter(t));
+ }
+
+ });
+ mapping.put("body_reverse", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ return new TokenStreamComponents(t, new ReverseStringFilter(new LowerCaseFilter(t)));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_reverse", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ LaplaceScorer wordScorer = new LaplaceScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5f);
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator forward = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10);
+ DirectCandidateGenerator reverse = new DirectCandidateGenerator(spellchecker, "body_reverse", SuggestMode.SUGGEST_ALWAYS, ir, 0.95, 10, wrapper, wrapper, MultiFields.getTerms(ir, "body_reverse"));
+ CandidateGenerator generator = new MultiCandidateGeneratorWrapper(10, forward, reverse);
+
+ Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ generator = new MultiCandidateGeneratorWrapper(5, forward, reverse);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american cae"), forward, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(0)); // only use forward with constant prefix
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("america cae"), generator, 2, 1, ir, "body", wordScorer, 1, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 2).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("zorr the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("four the god jewel"));
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Zorr the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 1.5f, 2).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+ }
+
+ @Test
+ public void testMarvelHerosTrigram() throws IOException {
+
+
+ RAMDirectory dir = new RAMDirectory();
+ Map<String, Analyzer> mapping = new HashMap<>();
+ mapping.put("body_ngram", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ ShingleFilter tf = new ShingleFilter(t, 2, 3);
+ tf.setOutputUnigrams(false);
+ return new TokenStreamComponents(t, new LowerCaseFilter(tf));
+ }
+
+ });
+
+ mapping.put("body", new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ return new TokenStreamComponents(t, new LowerCaseFilter(t));
+ }
+
+ });
+ PerFieldAnalyzerWrapper wrapper = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(), mapping);
+
+ IndexWriterConfig conf = new IndexWriterConfig(wrapper);
+ IndexWriter writer = new IndexWriter(dir, conf);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), Charsets.UTF_8));
+ String line = null;
+ while ((line = reader.readLine()) != null) {
+ Document doc = new Document();
+ doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
+ doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
+ writer.addDocument(doc);
+ }
+
+ DirectoryReader ir = DirectoryReader.open(writer, false);
+ WordScorer wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
+
+ NoisyChannelSpellChecker suggester = new NoisyChannelSpellChecker();
+ DirectSpellChecker spellchecker = new DirectSpellChecker();
+ spellchecker.setMinQueryLength(1);
+ DirectCandidateGenerator generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 5);
+ Correction[] corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ace"));
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("american ame"), generator, 1, 1, ir, "body", wordScorer, 1, 1).corrections;
+ assertThat(corrections.length, equalTo(0));
+// assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("american ape"));
+
+ wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.5, 0.4, 0.1);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 0, 3).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
+
+
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections.length, equalTo(4));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ assertThat(corrections[2].join(new BytesRef(" ")).utf8ToString(), equalTo("xorn the god jewel"));
+ assertThat(corrections[3].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the got jewel"));
+
+
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 1, ir, "body", wordScorer, 100, 3).corrections;
+ assertThat(corrections.length, equalTo(1));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+
+
+ // test synonyms
+
+ Analyzer analyzer = new Analyzer() {
+
+ @Override
+ protected TokenStreamComponents createComponents(String fieldName) {
+ Tokenizer t = new StandardTokenizer();
+ TokenFilter filter = new LowerCaseFilter(t);
+ try {
+ SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer());
+ ((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
+ filter = new SynonymFilter(filter, parser.build(), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ return new TokenStreamComponents(t, filter);
+ }
+ };
+
+ spellchecker.setAccuracy(0.0f);
+ spellchecker.setMinPrefix(1);
+ spellchecker.setMinQueryLength(1);
+ suggester = new NoisyChannelSpellChecker(0.95);
+ wordScorer = new LinearInterpoatingScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.95d, new BytesRef(" "), 0.5, 0.4, 0.1);
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usa"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+
+ generator = new DirectCandidateGenerator(spellchecker, "body", SuggestMode.SUGGEST_MORE_POPULAR, ir, 0.95, 10, null, analyzer, MultiFields.getTerms(ir, "body"));
+ corrections = suggester.getCorrections(analyzer, new BytesRef("captian usw"), generator, 2, 4, ir, "body", wordScorer, 1, 3).corrections;
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("captain america"));
+
+
+ wordScorer = new StupidBackoffScorer(ir, MultiFields.getTerms(ir, "body_ngram"), "body_ngram", 0.85d, new BytesRef(" "), 0.4);
+ corrections = suggester.getCorrections(wrapper, new BytesRef("Xor the Got-Jewel"), generator, 0.5f, 2, ir, "body", wordScorer, 0, 3).corrections;
+ assertThat(corrections.length, equalTo(2));
+ assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
+ assertThat(corrections[1].join(new BytesRef(" ")).utf8ToString(), equalTo("xor the god jewel"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java b/core/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java
new file mode 100644
index 0000000000..3025752f46
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.timeout;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope=ElasticsearchIntegrationTest.Scope.SUITE)
+public class SearchTimeoutTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Test
+ public void simpleTimeoutTest() throws Exception {
+ client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
+
+ SearchResponse searchResponse = client().prepareSearch("test")
+ .setTimeout("10ms")
+ .setQuery(filteredQuery(matchAllQuery(), scriptQuery(new Script("Thread.sleep(500); return true;"))))
+ .execute().actionGet();
+ assertThat(searchResponse.isTimedOut(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/similarity/SimilarityTests.java b/core/src/test/java/org/elasticsearch/similarity/SimilarityTests.java
new file mode 100644
index 0000000000..8dd58e2307
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/similarity/SimilarityTests.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.similarity;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+public class SimilarityTests extends ElasticsearchIntegrationTest {
+
+
+ @Test
+ public void testCustomBM25Similarity() throws Exception {
+ try {
+ client().admin().indices().prepareDelete("test").execute().actionGet();
+ } catch (Exception e) {
+ // ignore
+ }
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject()
+ .startObject("type1")
+ .startObject("properties")
+ .startObject("field1")
+ .field("similarity", "custom")
+ .field("type", "string")
+ .endObject()
+ .startObject("field2")
+ .field("similarity", "default")
+ .field("type", "string")
+ .endObject()
+ .endObject()
+ .endObject())
+ .setSettings(Settings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .put("index.number_of_replicas", 0)
+ .put("similarity.custom.type", "BM25")
+ .put("similarity.custom.k1", 2.0f)
+ .put("similarity.custom.b", 1.5f)
+ ).execute().actionGet();
+
+ client().prepareIndex("test", "type1", "1").setSource("field1", "the quick brown fox jumped over the lazy dog",
+ "field2", "the quick brown fox jumped over the lazy dog")
+ .setRefresh(true).execute().actionGet();
+
+ SearchResponse bm25SearchResponse = client().prepareSearch().setQuery(matchQuery("field1", "quick brown fox")).execute().actionGet();
+ assertThat(bm25SearchResponse.getHits().totalHits(), equalTo(1l));
+ float bm25Score = bm25SearchResponse.getHits().hits()[0].score();
+
+ SearchResponse defaultSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet();
+ assertThat(defaultSearchResponse.getHits().totalHits(), equalTo(1l));
+ float defaultScore = defaultSearchResponse.getHits().hits()[0].score();
+
+ assertThat(bm25Score, not(equalTo(defaultScore)));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java
new file mode 100644
index 0000000000..a6a401b4a4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotTests.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.mockstore.MockRepository;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+
+/**
+ */
+@Ignore
+public abstract class AbstractSnapshotTests extends ElasticsearchIntegrationTest {
+
+ public static long getFailureCount(String repository) {
+ long failureCount = 0;
+ for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
+ MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
+ failureCount += mockRepository.getFailureCount();
+ }
+ return failureCount;
+ }
+
+ public static int numberOfFiles(Path dir) throws IOException {
+ final AtomicInteger count = new AtomicInteger();
+ Files.walkFileTree(dir, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ count.incrementAndGet();
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ return count.get();
+ }
+
+ public static void stopNode(final String node) throws IOException {
+ internalCluster().stopRandomNode(new Predicate<Settings>() {
+ @Override
+ public boolean apply(Settings settings) {
+ return settings.get("name").equals(node);
+ }
+ });
+ }
+
+ public void waitForBlock(String node, String repository, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, node);
+ MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository);
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ if (mockRepository.blocked()) {
+ return;
+ }
+ Thread.sleep(100);
+ }
+ fail("Timeout!!!");
+ }
+
+ public SnapshotInfo waitForCompletion(String repository, String snapshot, TimeValue timeout) throws InterruptedException {
+ long start = System.currentTimeMillis();
+ SnapshotId snapshotId = new SnapshotId(repository, snapshot);
+ while (System.currentTimeMillis() - start < timeout.millis()) {
+ List<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
+ assertThat(snapshotInfos.size(), equalTo(1));
+ if (snapshotInfos.get(0).state().completed()) {
+ // Make sure that snapshot clean up operations are finished
+ ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
+ SnapshotMetaData snapshotMetaData = stateResponse.getState().getMetaData().custom(SnapshotMetaData.TYPE);
+ if (snapshotMetaData == null || snapshotMetaData.snapshot(snapshotId) == null) {
+ return snapshotInfos.get(0);
+ }
+ }
+ Thread.sleep(100);
+ }
+ fail("Timeout!!!");
+ return null;
+ }
+
+ public static String blockNodeWithIndex(String index) {
+ for(String node : internalCluster().nodesInclude("test-idx")) {
+ ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).blockOnDataFiles(true);
+ return node;
+ }
+ fail("No nodes for the index " + index + " found");
+ return null;
+ }
+
+ public static void unblockNode(String node) {
+ ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
+ }
+
+ protected void assertBusyPendingTasks(final String taskPrefix, final int expectedCount) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ PendingClusterTasksResponse tasks = client().admin().cluster().preparePendingClusterTasks().get();
+ int count = 0;
+ for(PendingClusterTask task : tasks) {
+ if (task.getSource().toString().startsWith(taskPrefix)) {
+ count++;
+ }
+ }
+ assertThat(count, greaterThanOrEqualTo(expectedCount));
+ }
+ }, 1, TimeUnit.MINUTES);
+ }
+
+ /**
+ * Cluster state task that blocks waits for the blockOn task to show up and then blocks execution not letting
+ * any cluster state update task to be performed unless they have priority higher then passThroughPriority.
+ *
+ * This class is useful to testing of cluster state update task batching for lower priority tasks.
+ */
+ protected class BlockingClusterStateListener implements ClusterStateListener {
+
+ private final Predicate<ClusterChangedEvent> blockOn;
+ private final Predicate<ClusterChangedEvent> countOn;
+ private final ClusterService clusterService;
+ private final CountDownLatch latch;
+ private final Priority passThroughPriority;
+ private int count;
+ private boolean timedOut;
+ private final TimeValue timeout;
+ private long stopWaitingAt = -1;
+
+ public BlockingClusterStateListener(ClusterService clusterService, String blockOn, String countOn, Priority passThroughPriority) {
+ this(clusterService, blockOn, countOn, passThroughPriority, TimeValue.timeValueMinutes(1));
+ }
+
+ public BlockingClusterStateListener(ClusterService clusterService, final String blockOn, final String countOn, Priority passThroughPriority, TimeValue timeout) {
+ this.clusterService = clusterService;
+ this.blockOn = new Predicate<ClusterChangedEvent>() {
+ @Override
+ public boolean apply(ClusterChangedEvent clusterChangedEvent) {
+ return clusterChangedEvent.source().startsWith(blockOn);
+ }
+ };
+ this.countOn = new Predicate<ClusterChangedEvent>() {
+ @Override
+ public boolean apply(ClusterChangedEvent clusterChangedEvent) {
+ return clusterChangedEvent.source().startsWith(countOn);
+ }
+ };
+ this.latch = new CountDownLatch(1);
+ this.passThroughPriority = passThroughPriority;
+ this.timeout = timeout;
+
+ }
+
+ public void unblock() {
+ latch.countDown();
+ }
+
+ @Override
+ public void clusterChanged(ClusterChangedEvent event) {
+ if (blockOn.apply(event)) {
+ logger.info("blocking cluster state tasks on [{}]", event.source());
+ assert stopWaitingAt < 0; // Make sure we are the first time here
+ stopWaitingAt = System.currentTimeMillis() + timeout.getMillis();
+ addBlock();
+ }
+ if (countOn.apply(event)) {
+ count++;
+ }
+ }
+
+ private void addBlock() {
+ // We should block after this task - add blocking cluster state update task
+ clusterService.submitStateUpdateTask("test_block", passThroughPriority, new ClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ while(System.currentTimeMillis() < stopWaitingAt) {
+ for (PendingClusterTask task : clusterService.pendingTasks()) {
+ if (task.getSource().string().equals("test_block") == false && passThroughPriority.sameOrAfter(task.getPriority())) {
+ // There are other higher priority tasks in the queue and let them pass through and then set the block again
+ logger.info("passing through cluster state task {}", task.getSource());
+ addBlock();
+ return currentState;
+ }
+ }
+ try {
+ logger.info("waiting....");
+ if (latch.await(Math.min(100, timeout.millis()), TimeUnit.MILLISECONDS)){
+ // Done waiting - unblock
+ logger.info("unblocked");
+ return currentState;
+ }
+ logger.info("done waiting....");
+ } catch (InterruptedException ex) {
+ logger.info("interrupted....");
+ Thread.currentThread().interrupt();
+ return currentState;
+ }
+ }
+ timedOut = true;
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.warn("failed to execute [{}]", t, source);
+ }
+ });
+
+ }
+
+ public int count() {
+ return count;
+ }
+
+ public boolean timedOut() {
+ return timedOut;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java
new file mode 100644
index 0000000000..968d433790
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java
@@ -0,0 +1,1118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.hppc.IntHashSet;
+import com.carrotsearch.hppc.IntSet;
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.ListenableFuture;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.ListenableActionFuture;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
+import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
+import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.AbstractDiffable;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask;
+import org.elasticsearch.cluster.metadata.MetaData.Custom;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.indices.ttl.IndicesTTLService;
+import org.elasticsearch.repositories.RepositoryMissingException;
+import org.elasticsearch.rest.RestChannel;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.RestResponse;
+import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
+import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryPlugin;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.rest.FakeRestRequest;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
+public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
+
+ @Test
+ public void restorePersistentSettingsTest() throws Exception {
+ logger.info("--> start 2 nodes");
+ Settings nodeSettings = settingsBuilder()
+ .put("discovery.type", "zen")
+ .put("discovery.zen.ping_timeout", "200ms")
+ .put("discovery.initial_state_timeout", "500ms")
+ .build();
+ internalCluster().startNode(nodeSettings);
+ Client client = client();
+ String secondNode = internalCluster().startNode(nodeSettings);
+ logger.info("--> wait for the second node to join the cluster");
+ assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut(), equalTo(false));
+
+ int random = randomIntBetween(10, 42);
+
+ logger.info("--> set test persistent setting");
+ client.admin().cluster().prepareUpdateSettings().setPersistentSettings(
+ Settings.settingsBuilder()
+ .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2)
+ .put(IndicesTTLService.INDICES_TTL_INTERVAL, random, TimeUnit.MINUTES))
+ .execute().actionGet();
+
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis()));
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), equalTo(2));
+
+ logger.info("--> create repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> clean the test persistent setting");
+ client.admin().cluster().prepareUpdateSettings().setPersistentSettings(
+ Settings.settingsBuilder()
+ .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 1)
+ .put(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)))
+ .execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis()));
+
+ stopNode(secondNode);
+ assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("1").get().isTimedOut(), equalTo(false));
+
+ logger.info("--> restore snapshot");
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet();
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis()));
+
+ logger.info("--> ensure that zen discovery minimum master nodes wasn't restored");
+ assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState()
+ .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), not(equalTo(2)));
+ }
+
+ @Test
+ public void restoreCustomMetadata() throws Exception {
+ Path tempDir = randomRepoPath();
+
+ logger.info("--> start node");
+ internalCluster().startNode();
+ Client client = client();
+ createIndex("test-idx");
+ ensureYellow();
+ logger.info("--> add custom persistent metadata");
+ updateClusterState(new ClusterStateUpdater() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ ClusterState.Builder builder = ClusterState.builder(currentState);
+ MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData());
+ metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("before_snapshot_s"));
+ metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("before_snapshot_ns"));
+ metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("before_snapshot_s_gw"));
+ metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("before_snapshot_ns_gw"));
+ metadataBuilder.putCustom(SnapshotableGatewayNoApiMetadata.TYPE, new SnapshotableGatewayNoApiMetadata("before_snapshot_s_gw_noapi"));
+ builder.metaData(metadataBuilder);
+ return builder.build();
+ }
+ });
+
+ logger.info("--> create repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", tempDir)).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> change custom persistent metadata");
+ updateClusterState(new ClusterStateUpdater() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ ClusterState.Builder builder = ClusterState.builder(currentState);
+ MetaData.Builder metadataBuilder = MetaData.builder(currentState.metaData());
+ if (randomBoolean()) {
+ metadataBuilder.putCustom(SnapshottableMetadata.TYPE, new SnapshottableMetadata("after_snapshot_s"));
+ } else {
+ metadataBuilder.removeCustom(SnapshottableMetadata.TYPE);
+ }
+ metadataBuilder.putCustom(NonSnapshottableMetadata.TYPE, new NonSnapshottableMetadata("after_snapshot_ns"));
+ if (randomBoolean()) {
+ metadataBuilder.putCustom(SnapshottableGatewayMetadata.TYPE, new SnapshottableGatewayMetadata("after_snapshot_s_gw"));
+ } else {
+ metadataBuilder.removeCustom(SnapshottableGatewayMetadata.TYPE);
+ }
+ metadataBuilder.putCustom(NonSnapshottableGatewayMetadata.TYPE, new NonSnapshottableGatewayMetadata("after_snapshot_ns_gw"));
+ metadataBuilder.removeCustom(SnapshotableGatewayNoApiMetadata.TYPE);
+ builder.metaData(metadataBuilder);
+ return builder.build();
+ }
+ });
+
+ logger.info("--> delete repository");
+ assertAcked(client.admin().cluster().prepareDeleteRepository("test-repo"));
+
+ logger.info("--> create repository");
+ putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", tempDir)).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> restore snapshot");
+ client.admin().cluster().prepareRestoreSnapshot("test-repo-2", "test-snap").setRestoreGlobalState(true).setIndices("-*").setWaitForCompletion(true).execute().actionGet();
+
+ logger.info("--> make sure old repository wasn't restored");
+ assertThrows(client.admin().cluster().prepareGetRepositories("test-repo"), RepositoryMissingException.class);
+ assertThat(client.admin().cluster().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1));
+
+ logger.info("--> check that custom persistent metadata was restored");
+ ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
+ logger.info("Cluster state: {}", clusterState);
+ MetaData metaData = clusterState.getMetaData();
+ assertThat(((SnapshottableMetadata) metaData.custom(SnapshottableMetadata.TYPE)).getData(), equalTo("before_snapshot_s"));
+ assertThat(((NonSnapshottableMetadata) metaData.custom(NonSnapshottableMetadata.TYPE)).getData(), equalTo("after_snapshot_ns"));
+ assertThat(((SnapshottableGatewayMetadata) metaData.custom(SnapshottableGatewayMetadata.TYPE)).getData(), equalTo("before_snapshot_s_gw"));
+ assertThat(((NonSnapshottableGatewayMetadata) metaData.custom(NonSnapshottableGatewayMetadata.TYPE)).getData(), equalTo("after_snapshot_ns_gw"));
+
+ logger.info("--> restart all nodes");
+ internalCluster().fullRestart();
+ ensureYellow();
+
+ logger.info("--> check that gateway-persistent custom metadata survived full cluster restart");
+ clusterState = client().admin().cluster().prepareState().get().getState();
+ logger.info("Cluster state: {}", clusterState);
+ metaData = clusterState.getMetaData();
+ assertThat(metaData.custom(SnapshottableMetadata.TYPE), nullValue());
+ assertThat(metaData.custom(NonSnapshottableMetadata.TYPE), nullValue());
+ assertThat(((SnapshottableGatewayMetadata) metaData.custom(SnapshottableGatewayMetadata.TYPE)).getData(), equalTo("before_snapshot_s_gw"));
+ assertThat(((NonSnapshottableGatewayMetadata) metaData.custom(NonSnapshottableGatewayMetadata.TYPE)).getData(), equalTo("after_snapshot_ns_gw"));
+ // Shouldn't be returned as part of API response
+ assertThat(metaData.custom(SnapshotableGatewayNoApiMetadata.TYPE), nullValue());
+ // But should still be in state
+ metaData = internalCluster().getInstance(ClusterService.class).state().metaData();
+ assertThat(((SnapshotableGatewayNoApiMetadata) metaData.custom(SnapshotableGatewayNoApiMetadata.TYPE)).getData(), equalTo("before_snapshot_s_gw_noapi"));
+ }
+
+ private void updateClusterState(final ClusterStateUpdater updater) throws InterruptedException {
+ final CountDownLatch countDownLatch = new CountDownLatch(1);
+ final ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
+ clusterService.submitStateUpdateTask("test", new ProcessedClusterStateUpdateTask() {
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ return updater.execute(currentState);
+ }
+
+ @Override
+ public void onFailure(String source, @Nullable Throwable t) {
+ countDownLatch.countDown();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ countDownLatch.countDown();
+ }
+ });
+ countDownLatch.await();
+ }
+
+ private static interface ClusterStateUpdater {
+ public ClusterState execute(ClusterState currentState) throws Exception;
+ }
+
+ @Test
+ public void snapshotDuringNodeShutdownTest() throws Exception {
+ logger.info("--> start 2 nodes");
+ Client client = client();
+
+ assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0)));
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> create repository");
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], shutting it down", blockedNode);
+ unblockNode(blockedNode);
+
+ logger.info("--> stopping node", blockedNode);
+ stopNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(60));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+ }
+
+ @Test
+ public void snapshotWithStuckNodeTest() throws Exception {
+ logger.info("--> start 2 nodes");
+ ArrayList<String> nodes = newArrayList();
+ nodes.add(internalCluster().startNode());
+ nodes.add(internalCluster().startNode());
+ Client client = client();
+
+ assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", 2).put("number_of_replicas", 0)));
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> creating repository");
+ Path repo = randomRepoPath();
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", repo)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+ // Remove it from the list of available nodes
+ nodes.remove(blockedNode);
+
+ int numberOfFilesBeforeSnapshot = numberOfFiles(repo);
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], aborting snapshot", blockedNode);
+
+ ListenableActionFuture<DeleteSnapshotResponse> deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
+ // Make sure that abort makes some progress
+ Thread.sleep(100);
+ unblockNode(blockedNode);
+ logger.info("--> stopping node", blockedNode);
+ stopNode(blockedNode);
+ try {
+ DeleteSnapshotResponse deleteSnapshotResponse = deleteSnapshotResponseFuture.actionGet();
+ assertThat(deleteSnapshotResponse.isAcknowledged(), equalTo(true));
+ } catch (SnapshotMissingException ex) {
+ // When master node is closed during this test, it sometime manages to delete the snapshot files before
+ // completely stopping. In this case the retried delete snapshot operation on the new master can fail
+ // with SnapshotMissingException
+ }
+
+ logger.info("--> making sure that snapshot no longer exists");
+ assertThrows(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute(), SnapshotMissingException.class);
+ // Subtract index file from the count
+ assertThat("not all files were deleted during snapshot cancellation", numberOfFilesBeforeSnapshot, equalTo(numberOfFiles(repo) - 1));
+ logger.info("--> done");
+ }
+
+ @Test
+ public void restoreIndexWithMissingShards() throws Exception {
+ logger.info("--> start 2 nodes");
+ internalCluster().startNode();
+ internalCluster().startNode();
+ cluster().wipeIndices("_all");
+
+ logger.info("--> create an index that will have some unallocated shards");
+ assertAcked(prepareCreate("test-idx-some", 2, settingsBuilder().put("number_of_shards", 6)
+ .put("number_of_replicas", 0)));
+ ensureGreen();
+
+ logger.info("--> indexing some data into test-idx-some");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-some", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx-some").get().getCount(), equalTo(100L));
+
+ logger.info("--> shutdown one of the nodes");
+ internalCluster().stopRandomDataNode();
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("<2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> create an index that will have all allocated shards");
+ assertAcked(prepareCreate("test-idx-all", 1, settingsBuilder().put("number_of_shards", 6)
+ .put("number_of_replicas", 0)));
+ ensureGreen("test-idx-all");
+
+ logger.info("--> create an index that will be closed");
+ assertAcked(prepareCreate("test-idx-closed", 1, settingsBuilder().put("number_of_shards", 4).put("number_of_replicas", 0)));
+ ensureGreen("test-idx-closed");
+
+ logger.info("--> indexing some data into test-idx-all");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-all", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-closed", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx-all").get().getCount(), equalTo(100L));
+ assertAcked(client().admin().indices().prepareClose("test-idx-closed"));
+
+ logger.info("--> create an index that will have no allocated shards");
+ assertAcked(prepareCreate("test-idx-none", 1, settingsBuilder().put("number_of_shards", 6)
+ .put("index.routing.allocation.include.tag", "nowhere")
+ .put("number_of_replicas", 0)));
+
+ logger.info("--> create repository");
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> start snapshot with default settings and closed index - should be blocked");
+ assertBlocked(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
+ .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed")
+ .setWaitForCompletion(true), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
+
+
+ logger.info("--> start snapshot with default settings without a closed index - should fail");
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
+ .setIndices("test-idx-all", "test-idx-none", "test-idx-some")
+ .setWaitForCompletion(true).execute().actionGet();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
+ assertThat(createSnapshotResponse.getSnapshotInfo().reason(), containsString("Indices don't have primary shards"));
+
+ if (randomBoolean()) {
+ logger.info("checking snapshot completion using status");
+ client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2")
+ .setIndices("test-idx-all", "test-idx-none", "test-idx-some")
+ .setWaitForCompletion(false).setPartial(true).execute().actionGet();
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get();
+ ImmutableList<SnapshotStatus> snapshotStatuses = snapshotsStatusResponse.getSnapshots();
+ if (snapshotStatuses.size() == 1) {
+ logger.trace("current snapshot status [{}]", snapshotStatuses.get(0));
+ return snapshotStatuses.get(0).getState().completed();
+ }
+ return false;
+ }
+ });
+ SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get();
+ ImmutableList<SnapshotStatus> snapshotStatuses = snapshotsStatusResponse.getSnapshots();
+ assertThat(snapshotStatuses.size(), equalTo(1));
+ SnapshotStatus snapshotStatus = snapshotStatuses.get(0);
+ logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
+ assertThat(snapshotStatus.getShardsStats().getTotalShards(), equalTo(18));
+ assertThat(snapshotStatus.getShardsStats().getDoneShards(), lessThan(12));
+ assertThat(snapshotStatus.getShardsStats().getDoneShards(), greaterThan(6));
+
+ // There is slight delay between snapshot being marked as completed in the cluster state and on the file system
+ // After it was marked as completed in the cluster state - we need to check if it's completed on the file system as well
+ awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").get();
+ assertThat(response.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = response.getSnapshots().get(0);
+ if (snapshotInfo.state().completed()) {
+ assertThat(snapshotInfo.state(), equalTo(SnapshotState.PARTIAL));
+ return true;
+ }
+ return false;
+ }
+ });
+ } else {
+ logger.info("checking snapshot completion using wait_for_completion flag");
+ createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2")
+ .setIndices("test-idx-all", "test-idx-none", "test-idx-some")
+ .setWaitForCompletion(true).setPartial(true).execute().actionGet();
+ logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(18));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(12));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(6));
+ assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.PARTIAL));
+ }
+
+ assertAcked(client().admin().indices().prepareClose("test-idx-some", "test-idx-all").execute().actionGet());
+
+ logger.info("--> restore incomplete snapshot - should fail");
+ assertThrows(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setWaitForCompletion(true).execute(), SnapshotRestoreException.class);
+
+ logger.info("--> restore snapshot for the index that was snapshotted completely");
+ RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setRestoreGlobalState(false).setIndices("test-idx-all").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+
+ assertThat(client().prepareCount("test-idx-all").get().getCount(), equalTo(100L));
+
+ logger.info("--> restore snapshot for the partial index");
+ cluster().wipeIndices("test-idx-some");
+ restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2")
+ .setRestoreGlobalState(false).setIndices("test-idx-some").setPartial(true).setWaitForCompletion(true).get();
+ assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), lessThan(6)));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
+
+ assertThat(client().prepareCount("test-idx-some").get().getCount(), allOf(greaterThan(0L), lessThan(100L)));
+
+ logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully");
+ cluster().wipeIndices("test-idx-none");
+ restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2")
+ .setRestoreGlobalState(false).setIndices("test-idx-none").setPartial(true).setWaitForCompletion(true).get();
+ assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue());
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(6));
+
+ assertThat(client().prepareCount("test-idx-some").get().getCount(), allOf(greaterThan(0L), lessThan(100L)));
+ }
+
+ @Test
+ public void restoreIndexWithShardsMissingInLocalGateway() throws Exception {
+ logger.info("--> start 2 nodes");
+ Settings nodeSettings = settingsBuilder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)
+ .build();
+
+ internalCluster().startNode(nodeSettings);
+ internalCluster().startNode(nodeSettings);
+ cluster().wipeIndices("_all");
+
+ logger.info("--> create repository");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).execute().actionGet();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+ int numberOfShards = 6;
+ logger.info("--> create an index that will have some unallocated shards");
+ assertAcked(prepareCreate("test-idx", 2, settingsBuilder().put("number_of_shards", numberOfShards)
+ .put("number_of_replicas", 0)));
+ ensureGreen();
+
+ logger.info("--> indexing some data into test-idx");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client().prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> start snapshot");
+ assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx").setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> close the index");
+ assertAcked(client().admin().indices().prepareClose("test-idx"));
+
+ logger.info("--> shutdown one of the nodes that should make half of the shards unavailable");
+ internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
+ @Override
+ public boolean clearData(String nodeName) {
+ return true;
+ }
+ });
+
+ assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2").execute().actionGet().isTimedOut(), equalTo(false));
+
+ logger.info("--> restore index snapshot");
+ assertThat(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setRestoreGlobalState(false).setWaitForCompletion(true).get().getRestoreInfo().successfulShards(), equalTo(6));
+
+ ensureGreen("test-idx");
+ assertThat(client().prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ IntSet reusedShards = new IntHashSet();
+ for (ShardRecoveryResponse response : client().admin().indices().prepareRecoveries("test-idx").get().shardResponses().get("test-idx")) {
+ if (response.recoveryState().getIndex().reusedBytes() > 0) {
+ reusedShards.add(response.getShardId());
+ }
+ }
+ logger.info("--> check that at least half of the shards had some reuse: [{}]", reusedShards);
+ assertThat(reusedShards.size(), greaterThanOrEqualTo(numberOfShards / 2));
+ }
+
+
+ @Test
+ public void registrationFailureTest() {
+ logger.info("--> start first node");
+ internalCluster().startNode(settingsBuilder().put("plugin.types", MockRepositoryPlugin.class.getName()));
+ logger.info("--> start second node");
+ // Make sure the first node is elected as master
+ internalCluster().startNode(settingsBuilder().put("node.master", false));
+ // Register mock repositories
+ for (int i = 0; i < 5; i++) {
+ client().admin().cluster().preparePutRepository("test-repo" + i)
+ .setType("mock").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())).setVerify(false).get();
+ }
+ logger.info("--> make sure that properly setup repository can be registered on all nodes");
+ client().admin().cluster().preparePutRepository("test-repo-0")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())).get();
+
+ }
+
+ @Test
+ public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception {
+ Settings nodeSettings = settingsBuilder().put("plugin.types", MockRepositoryPlugin.class.getName()).build();
+ logger.info("--> start two nodes");
+ internalCluster().startNodesAsync(2, nodeSettings).get();
+ // Register mock repositories
+ client().admin().cluster().preparePutRepository("test-repo")
+ .setType("mock").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("secret.mock.username", "notsecretusername")
+ .put("secret.mock.password", "verysecretpassword")
+ ).get();
+
+ RestGetRepositoriesAction getRepoAction = internalCluster().getInstance(RestGetRepositoriesAction.class);
+ RestRequest getRepoRequest = new FakeRestRequest();
+ getRepoRequest.params().put("repository", "test-repo");
+ final CountDownLatch getRepoLatch = new CountDownLatch(1);
+ final AtomicReference<AssertionError> getRepoError = new AtomicReference<>();
+ getRepoAction.handleRequest(getRepoRequest, new RestChannel(getRepoRequest, true) {
+ @Override
+ public void sendResponse(RestResponse response) {
+ try {
+ assertThat(response.content().toUtf8(), containsString("notsecretusername"));
+ assertThat(response.content().toUtf8(), not(containsString("verysecretpassword")));
+ } catch (AssertionError ex) {
+ getRepoError.set(ex);
+ }
+ getRepoLatch.countDown();
+ }
+ });
+ assertTrue(getRepoLatch.await(1, TimeUnit.SECONDS));
+ if (getRepoError.get() != null) {
+ throw getRepoError.get();
+ }
+
+ RestClusterStateAction clusterStateAction = internalCluster().getInstance(RestClusterStateAction.class);
+ RestRequest clusterStateRequest = new FakeRestRequest();
+ final CountDownLatch clusterStateLatch = new CountDownLatch(1);
+ final AtomicReference<AssertionError> clusterStateError = new AtomicReference<>();
+ clusterStateAction.handleRequest(clusterStateRequest, new RestChannel(clusterStateRequest, true) {
+ @Override
+ public void sendResponse(RestResponse response) {
+ try {
+ assertThat(response.content().toUtf8(), containsString("notsecretusername"));
+ assertThat(response.content().toUtf8(), not(containsString("verysecretpassword")));
+ } catch (AssertionError ex) {
+ clusterStateError.set(ex);
+ }
+ clusterStateLatch.countDown();
+ }
+ });
+ assertTrue(clusterStateLatch.await(1, TimeUnit.SECONDS));
+ if (clusterStateError.get() != null) {
+ throw clusterStateError.get();
+ }
+
+ }
+
+ @Test
+ @Ignore
+ public void chaosSnapshotTest() throws Exception {
+ final List<String> indices = new CopyOnWriteArrayList<>();
+ Settings settings = settingsBuilder().put("action.write_consistency", "one").build();
+ int initialNodes = between(1, 3);
+ logger.info("--> start {} nodes", initialNodes);
+ for (int i = 0; i < initialNodes; i++) {
+ internalCluster().startNode(settings);
+ }
+
+ logger.info("--> creating repository");
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ int initialIndices = between(1, 3);
+ logger.info("--> create {} indices", initialIndices);
+ for (int i = 0; i < initialIndices; i++) {
+ createTestIndex("test-" + i);
+ indices.add("test-" + i);
+ }
+
+ int asyncNodes = between(0, 5);
+ logger.info("--> start {} additional nodes asynchronously", asyncNodes);
+ ListenableFuture<List<String>> asyncNodesFuture = internalCluster().startNodesAsync(asyncNodes, settings);
+
+ int asyncIndices = between(0, 10);
+ logger.info("--> create {} additional indices asynchronously", asyncIndices);
+ Thread[] asyncIndexThreads = new Thread[asyncIndices];
+ for (int i = 0; i < asyncIndices; i++) {
+ final int cur = i;
+ asyncIndexThreads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ createTestIndex("test-async-" + cur);
+ indices.add("test-async-" + cur);
+
+ }
+ });
+ asyncIndexThreads[i].start();
+ }
+
+ logger.info("--> snapshot");
+
+ ListenableActionFuture<CreateSnapshotResponse> snapshotResponseFuture = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-*").setPartial(true).execute();
+
+ long start = System.currentTimeMillis();
+ // Produce chaos for 30 sec or until snapshot is done whatever comes first
+ int randomIndices = 0;
+ while (System.currentTimeMillis() - start < 30000 && !snapshotIsDone("test-repo", "test-snap")) {
+ Thread.sleep(100);
+ int chaosType = randomInt(10);
+ if (chaosType < 4) {
+ // Randomly delete an index
+ if (indices.size() > 0) {
+ String index = indices.remove(randomInt(indices.size() - 1));
+ logger.info("--> deleting random index [{}]", index);
+ internalCluster().wipeIndices(index);
+ }
+ } else if (chaosType < 6) {
+ // Randomly shutdown a node
+ if (cluster().size() > 1) {
+ logger.info("--> shutting down random node");
+ internalCluster().stopRandomDataNode();
+ }
+ } else if (chaosType < 8) {
+ // Randomly create an index
+ String index = "test-rand-" + randomIndices;
+ logger.info("--> creating random index [{}]", index);
+ createTestIndex(index);
+ randomIndices++;
+ } else {
+ // Take a break
+ logger.info("--> noop");
+ }
+ }
+
+ logger.info("--> waiting for async indices creation to finish");
+ for (int i = 0; i < asyncIndices; i++) {
+ asyncIndexThreads[i].join();
+ }
+
+ logger.info("--> update index settings to back to normal");
+ assertAcked(client().admin().indices().prepareUpdateSettings("test-*").setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node")
+ ));
+
+ // Make sure that snapshot finished - doesn't matter if it failed or succeeded
+ try {
+ CreateSnapshotResponse snapshotResponse = snapshotResponseFuture.get();
+ SnapshotInfo snapshotInfo = snapshotResponse.getSnapshotInfo();
+ assertNotNull(snapshotInfo);
+ logger.info("--> snapshot is done with state [{}], total shards [{}], successful shards [{}]", snapshotInfo.state(), snapshotInfo.totalShards(), snapshotInfo.successfulShards());
+ } catch (Exception ex) {
+ logger.info("--> snapshot didn't start properly", ex);
+ }
+
+ asyncNodesFuture.get();
+ logger.info("--> done");
+ }
+
+ @Test
+ public void masterShutdownDuringSnapshotTest() throws Exception {
+
+ Settings masterSettings = settingsBuilder().put("node.data", false).build();
+ Settings dataSettings = settingsBuilder().put("node.master", false).build();
+
+ logger.info("--> starting two master nodes and two data nodes");
+ internalCluster().startNode(masterSettings);
+ internalCluster().startNode(masterSettings);
+ internalCluster().startNode(dataSettings);
+ internalCluster().startNode(dataSettings);
+
+ final Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ assertAcked(prepareCreate("test-idx", 0, settingsBuilder().put("number_of_shards", between(1, 20))
+ .put("number_of_replicas", 0)));
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ final int numdocs = randomIntBetween(10, 100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
+ }
+ indexRandom(true, builders);
+ flushAndRefresh();
+
+ final int numberOfShards = getNumShards("test-idx").numPrimaries;
+ logger.info("number of shards: {}", numberOfShards);
+
+ final ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName());
+ BlockingClusterStateListener snapshotListener = new BlockingClusterStateListener(clusterService, "update_snapshot [", "update snapshot state", Priority.HIGH);
+ try {
+ clusterService.addFirst(snapshotListener);
+ logger.info("--> snapshot");
+ dataNodeClient().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ // Await until some updates are in pending state.
+ assertBusyPendingTasks("update snapshot state", 1);
+
+ logger.info("--> stopping master node");
+ internalCluster().stopCurrentMasterNode();
+
+ logger.info("--> unblocking snapshot execution");
+ snapshotListener.unblock();
+
+ } finally {
+ clusterService.remove(snapshotListener);
+ }
+
+ logger.info("--> wait until the snapshot is done");
+
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get();
+ SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
+ assertTrue(snapshotInfo.state().completed());
+ }
+ }, 1, TimeUnit.MINUTES);
+
+ logger.info("--> verify that snapshot was succesful");
+
+ GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get();
+ SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
+ assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
+ assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards());
+ assertEquals(0, snapshotInfo.failedShards());
+ }
+
+
+ private boolean snapshotIsDone(String repository, String snapshot) {
+ try {
+ SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus(repository).setSnapshots(snapshot).get();
+ if (snapshotsStatusResponse.getSnapshots().isEmpty()) {
+ return false;
+ }
+ for (SnapshotStatus snapshotStatus : snapshotsStatusResponse.getSnapshots()) {
+ if (snapshotStatus.getState().completed()) {
+ return true;
+ }
+ }
+ return false;
+ } catch (SnapshotMissingException ex) {
+ return false;
+ }
+ }
+
+ private void createTestIndex(String name) {
+ assertAcked(prepareCreate(name, 0, settingsBuilder().put("number_of_shards", between(1, 6))
+ .put("number_of_replicas", between(1, 6))));
+
+ ensureYellow(name);
+
+ logger.info("--> indexing some data into {}", name);
+ for (int i = 0; i < between(10, 500); i++) {
+ index(name, "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+
+ assertAcked(client().admin().indices().prepareUpdateSettings(name).setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all")
+ .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, between(100, 50000))
+ ));
+ }
+
+ public static abstract class TestCustomMetaData extends AbstractDiffable<Custom> implements MetaData.Custom {
+ private final String data;
+
+ protected TestCustomMetaData(String data) {
+ this.data = data;
+ }
+
+ public String getData() {
+ return data;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TestCustomMetaData that = (TestCustomMetaData) o;
+
+ if (!data.equals(that.data)) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return data.hashCode();
+ }
+
+ protected abstract TestCustomMetaData newTestCustomMetaData(String data);
+
+ @Override
+ public Custom readFrom(StreamInput in) throws IOException {
+ return newTestCustomMetaData(in.readString());
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(getData());
+ }
+
+ @Override
+ public Custom fromXContent(XContentParser parser) throws IOException {
+ XContentParser.Token token;
+ String data = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ String currentFieldName = parser.currentName();
+ if ("data".equals(currentFieldName)) {
+ if (parser.nextToken() != XContentParser.Token.VALUE_STRING) {
+ throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type");
+ }
+ data = parser.text();
+ } else {
+ throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]");
+ }
+ } else {
+ throw new ElasticsearchParseException("failed to parse snapshottable metadata");
+ }
+ }
+ if (data == null) {
+ throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found");
+ }
+ return newTestCustomMetaData(data);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.field("data", getData());
+ return builder;
+ }
+ }
+
+
+ static {
+ MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO);
+ MetaData.registerPrototype(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.PROTO);
+ MetaData.registerPrototype(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.PROTO);
+ MetaData.registerPrototype(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.PROTO);
+ MetaData.registerPrototype(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.PROTO);
+ }
+
+ public static class SnapshottableMetadata extends TestCustomMetaData {
+ public static final String TYPE = "test_snapshottable";
+
+ public static final SnapshottableMetadata PROTO = new SnapshottableMetadata("");
+
+ public SnapshottableMetadata(String data) {
+ super(data);
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ protected TestCustomMetaData newTestCustomMetaData(String data) {
+ return new SnapshottableMetadata(data);
+ }
+
+ @Override
+ public EnumSet<MetaData.XContentContext> context() {
+ return MetaData.API_AND_SNAPSHOT;
+ }
+ }
+
+ public static class NonSnapshottableMetadata extends TestCustomMetaData {
+ public static final String TYPE = "test_non_snapshottable";
+
+ public static final NonSnapshottableMetadata PROTO = new NonSnapshottableMetadata("");
+
+ public NonSnapshottableMetadata(String data) {
+ super(data);
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ protected NonSnapshottableMetadata newTestCustomMetaData(String data) {
+ return new NonSnapshottableMetadata(data);
+ }
+
+ @Override
+ public EnumSet<MetaData.XContentContext> context() {
+ return MetaData.API_ONLY;
+ }
+ }
+
+ public static class SnapshottableGatewayMetadata extends TestCustomMetaData {
+ public static final String TYPE = "test_snapshottable_gateway";
+
+ public static final SnapshottableGatewayMetadata PROTO = new SnapshottableGatewayMetadata("");
+
+ public SnapshottableGatewayMetadata(String data) {
+ super(data);
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ protected TestCustomMetaData newTestCustomMetaData(String data) {
+ return new SnapshottableGatewayMetadata(data);
+ }
+
+ @Override
+ public EnumSet<MetaData.XContentContext> context() {
+ return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY);
+ }
+ }
+
+ public static class NonSnapshottableGatewayMetadata extends TestCustomMetaData {
+ public static final String TYPE = "test_non_snapshottable_gateway";
+
+ public static final NonSnapshottableGatewayMetadata PROTO = new NonSnapshottableGatewayMetadata("");
+
+ public NonSnapshottableGatewayMetadata(String data) {
+ super(data);
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) {
+ return new NonSnapshottableGatewayMetadata(data);
+ }
+
+ @Override
+ public EnumSet<MetaData.XContentContext> context() {
+ return MetaData.API_AND_GATEWAY;
+ }
+
+ }
+
+ public static class SnapshotableGatewayNoApiMetadata extends TestCustomMetaData {
+ public static final String TYPE = "test_snapshottable_gateway_no_api";
+
+ public static final SnapshotableGatewayNoApiMetadata PROTO = new SnapshotableGatewayNoApiMetadata("");
+
+ public SnapshotableGatewayNoApiMetadata(String data) {
+ super(data);
+ }
+
+ @Override
+ public String type() {
+ return TYPE;
+ }
+
+ @Override
+ protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) {
+ return new SnapshotableGatewayNoApiMetadata(data);
+ }
+
+ @Override
+ public EnumSet<MetaData.XContentContext> context() {
+ return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java b/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java
new file mode 100644
index 0000000000..74e99c995a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+
+import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
+import org.elasticsearch.cluster.metadata.RepositoryMetaData;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.repositories.RepositoryException;
+import org.elasticsearch.repositories.RepositoryVerificationException;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.nio.file.Path;
+import java.util.List;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2)
+public class RepositoriesTests extends AbstractSnapshotTests {
+
+ @Test
+ public void testRepositoryCreation() throws Exception {
+ Client client = client();
+
+ Path location = randomRepoPath();
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", location)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> verify the repository");
+ int numberOfFiles = FileSystemUtils.files(location).length;
+ VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get();
+ assertThat(verifyRepositoryResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes()));
+
+ logger.info("--> verify that we didn't leave any files as a result of verification");
+ assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles));
+
+ logger.info("--> check that repository is really there");
+ ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetaData(true).get();
+ MetaData metaData = clusterStateResponse.getState().getMetaData();
+ RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
+ assertThat(repositoriesMetaData, notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
+
+ logger.info("--> creating another repository");
+ putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> check that both repositories are in cluster state");
+ clusterStateResponse = client.admin().cluster().prepareState().clear().setMetaData(true).get();
+ metaData = clusterStateResponse.getState().getMetaData();
+ repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE);
+ assertThat(repositoriesMetaData, notNullValue());
+ assertThat(repositoriesMetaData.repositories().size(), equalTo(2));
+ assertThat(repositoriesMetaData.repository("test-repo-1"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-1").type(), equalTo("fs"));
+ assertThat(repositoriesMetaData.repository("test-repo-2"), notNullValue());
+ assertThat(repositoriesMetaData.repository("test-repo-2").type(), equalTo("fs"));
+
+ logger.info("--> check that both repositories can be retrieved by getRepositories query");
+ GetRepositoriesResponse repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(2));
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
+
+ logger.info("--> delete repository test-repo-1");
+ client.admin().cluster().prepareDeleteRepository("test-repo-1").get();
+ repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(1));
+ assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
+
+ logger.info("--> delete repository test-repo-2");
+ client.admin().cluster().prepareDeleteRepository("test-repo-2").get();
+ repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
+ assertThat(repositoriesResponse.repositories().size(), equalTo(0));
+ }
+
+ private RepositoryMetaData findRepository(List<RepositoryMetaData> repositories, String name) {
+ for (RepositoryMetaData repository : repositories) {
+ if (repository.name().equals(name)) {
+ return repository;
+ }
+ }
+ return null;
+ }
+
+ @Test
+ public void testMisconfiguredRepository() throws Exception {
+ Client client = client();
+
+ logger.info("--> trying creating repository with incorrect settings");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo").setType("fs").get();
+ fail("Shouldn't be here");
+ } catch (RepositoryException ex) {
+ assertThat(ex.toString(), containsString("missing location"));
+ }
+
+ logger.info("--> trying creating repository with location that is not registered in path.repo setting");
+ String location = createTempDir().toAbsolutePath().toString();
+ try {
+ client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", location))
+ .get();
+ fail("Shouldn't be here");
+ } catch (RepositoryException ex) {
+ assertThat(ex.toString(), containsString("location [" + location + "] doesn't match any of the locations specified by path.repo"));
+ }
+ }
+
+ @Test
+ public void repositoryAckTimeoutTest() throws Exception {
+ logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
+ PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-1")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES)
+ )
+ .setTimeout("0s").get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false));
+
+ logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
+ putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack");
+ DeleteRepositoryResponse deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-2")
+ .setTimeout("0s").get();
+ assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false));
+
+ logger.info("--> deleting repository test-repo-1 with standard timeout - should ack");
+ deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-1").get();
+ assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true));
+ }
+
+ @Test
+ public void repositoryVerificationTest() throws Exception {
+ Client client = client();
+
+ Settings settings = Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("random_control_io_exception_rate", 1.0).build();
+ logger.info("--> creating repository that cannot write any files - should fail");
+ assertThrows(client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(settings),
+ RepositoryVerificationException.class);
+
+ logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(settings).setVerify(false));
+
+ logger.info("--> verifying repository");
+ assertThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class);
+
+ Path location = randomRepoPath();
+
+ logger.info("--> creating repository");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType(MockRepositoryModule.class.getCanonicalName())
+ .setSettings(Settings.settingsBuilder()
+ .put("location", location)
+ .put("localize_location", true)
+ ).get();
+ fail("RepositoryVerificationException wasn't generated");
+ } catch (RepositoryVerificationException ex) {
+ assertThat(ex.getMessage(), containsString("is not shared"));
+ }
+ }
+
+ @Test
+ public void repositoryVerificationTimeoutTest() throws Exception {
+ Client client = client();
+
+ Settings settings = Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("random_control_io_exception_rate", 1.0).build();
+ logger.info("--> creating repository that cannot write any files - should fail");
+ assertThrows(client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(settings),
+ RepositoryVerificationException.class);
+
+ logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(settings).setVerify(false));
+
+ logger.info("--> verifying repository");
+ assertThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class);
+
+ Path location = randomRepoPath();
+
+ logger.info("--> creating repository");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo-1")
+ .setType(MockRepositoryModule.class.getCanonicalName())
+ .setSettings(Settings.settingsBuilder()
+ .put("location", location)
+ .put("localize_location", true)
+ ).get();
+ fail("RepositoryVerificationException wasn't generated");
+ } catch (RepositoryVerificationException ex) {
+ assertThat(ex.getMessage(), containsString("is not shared"));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
new file mode 100644
index 0000000000..b9fb2bf1fe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java
@@ -0,0 +1,1897 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ListenableActionFuture;
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.status.*;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.metadata.*;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData.Entry;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData.ShardSnapshotStatus;
+import org.elasticsearch.cluster.metadata.SnapshotMetaData.State;
+import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.indices.InvalidIndexNameException;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.mockstore.MockRepositoryModule;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.Test;
+
+import java.nio.channels.SeekableByteChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
+import static org.elasticsearch.index.shard.IndexShard.INDEX_REFRESH_INTERVAL;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@Slow
+public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
+
+ @Test
+ public void basicWorkFlowTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
+ index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
+ }
+ refresh();
+ assertHitCount(client.prepareCount("test-idx-1").get(), 100L);
+ assertHitCount(client.prepareCount("test-idx-2").get(), 100L);
+ assertHitCount(client.prepareCount("test-idx-3").get(), 100L);
+
+ ListenableActionFuture<FlushResponse> flushResponseFuture = null;
+ if (randomBoolean()) {
+ ArrayList<String> indicesToFlush = newArrayList();
+ for (int i = 1; i < 4; i++) {
+ if (randomBoolean()) {
+ indicesToFlush.add("test-idx-" + i);
+ }
+ }
+ if (!indicesToFlush.isEmpty()) {
+ String[] indices = indicesToFlush.toArray(new String[indicesToFlush.size()]);
+ logger.info("--> starting asynchronous flush for indices {}", Arrays.toString(indices));
+ flushResponseFuture = client.admin().indices().prepareFlush(indices).execute();
+ }
+ }
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete some data");
+ for (int i = 0; i < 50; i++) {
+ client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
+ }
+ for (int i = 50; i < 100; i++) {
+ client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
+ }
+ for (int i = 0; i < 100; i += 2) {
+ client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
+ }
+ assertAllSuccessful(refresh());
+ assertHitCount(client.prepareCount("test-idx-1").get(), 50L);
+ assertHitCount(client.prepareCount("test-idx-2").get(), 50L);
+ assertHitCount(client.prepareCount("test-idx-3").get(), 50L);
+
+ logger.info("--> close indices");
+ client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ for (int i=0; i<5; i++) {
+ assertHitCount(client.prepareCount("test-idx-1").get(), 100L);
+ assertHitCount(client.prepareCount("test-idx-2").get(), 100L);
+ assertHitCount(client.prepareCount("test-idx-3").get(), 50L);
+ }
+
+ // Test restore after index deletion
+ logger.info("--> delete indices");
+ cluster().wipeIndices("test-idx-1", "test-idx-2");
+ logger.info("--> restore one index after deletion");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ for (int i=0; i<5; i++) {
+ assertHitCount(client.prepareCount("test-idx-1").get(), 100L);
+ }
+ ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
+ assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
+ assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
+
+ if (flushResponseFuture != null) {
+ // Finish flush
+ flushResponseFuture.actionGet();
+ }
+ }
+
+
+ @Test
+ public void singleGetAfterRestoreTest() throws Exception {
+ String indexName = "testindex";
+ String repoName = "test-restore-snapshot-repo";
+ String snapshotName = "test-restore-snapshot";
+ String absolutePath = randomRepoPath().toAbsolutePath().toString();
+ logger.info("Path [{}]", absolutePath);
+ String restoredIndexName = indexName + "-restored";
+ String typeName = "actions";
+ String expectedValue = "expected";
+
+ Client client = client();
+ // Write a document
+ String docId = Integer.toString(randomInt());
+ index(indexName, typeName, docId, "value", expectedValue);
+
+ // TODO: Remove after dynamic mapping flushing is implemented
+ waitForConcreteMappingsOnAll(indexName, typeName, "value");
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository(repoName)
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", absolutePath)
+ ));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName)
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repoName, snapshotName)
+ .setWaitForCompletion(true)
+ .setRenamePattern(indexName)
+ .setRenameReplacement(restoredIndexName)
+ .get();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareGet(restoredIndexName, typeName, docId).get().isExists(), equalTo(true));
+ }
+
+ @Test
+ public void testFreshIndexUUID() {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test");
+ String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID);
+ assertTrue(originalIndexUUID, originalIndexUUID != null);
+ assertFalse(originalIndexUUID, originalIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE));
+ ensureGreen();
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ NumShards numShards = getNumShards("test");
+
+ cluster().wipeIndices("test");
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries)));
+ ensureGreen();
+ String newIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID);
+ assertTrue(newIndexUUID, newIndexUUID != null);
+ assertFalse(newIndexUUID, newIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE));
+ assertFalse(newIndexUUID, newIndexUUID.equals(originalIndexUUID));
+ logger.info("--> close index");
+ client.admin().indices().prepareClose("test").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureGreen();
+ String newAfterRestoreIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID);
+ assertTrue("UUID has changed after restore: " + newIndexUUID + " vs. " + newAfterRestoreIndexUUID, newIndexUUID.equals(newAfterRestoreIndexUUID));
+
+ logger.info("--> restore indices with different names");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ String copyRestoreUUID = client().admin().indices().prepareGetSettings("test-copy").get().getSetting("test-copy", IndexMetaData.SETTING_UUID);
+ assertFalse("UUID has been reused on restore: " + copyRestoreUUID + " vs. " + originalIndexUUID, copyRestoreUUID.equals(originalIndexUUID));
+ }
+
+ @Test
+ public void restoreWithDifferentMappingsAndSettingsTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ logger.info("--> create index with foo type");
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder()
+ .put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS)));
+
+ NumShards numShards = getNumShards("test-idx");
+
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("foo").setSource("baz", "type=string"));
+ ensureGreen();
+
+ logger.info("--> snapshot it");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete the index and recreate it with bar type");
+ cluster().wipeIndices("test-idx");
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS)));
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=string"));
+ ensureGreen();
+
+ logger.info("--> close index");
+ client.admin().indices().prepareClose("test-idx").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ logger.info("--> assert that old mapping is restored");
+ ImmutableOpenMap<String, MappingMetaData> mappings = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test-idx").getMappings();
+ assertThat(mappings.get("foo"), notNullValue());
+ assertThat(mappings.get("bar"), nullValue());
+
+ logger.info("--> assert that old settings are restored");
+ GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet();
+ assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10000ms"));
+ }
+
+ @Test
+ public void emptySnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ }
+
+ @Test
+ public void restoreAliasesTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+
+ logger.info("--> create test indices");
+ createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+ ensureGreen();
+
+ logger.info("--> create aliases");
+ assertAcked(client.admin().indices().prepareAliases()
+ .addAlias("test-idx-1", "alias-123")
+ .addAlias("test-idx-2", "alias-123")
+ .addAlias("test-idx-3", "alias-123")
+ .addAlias("test-idx-1", "alias-1")
+ .get());
+ assertAliasesExist(client.admin().indices().prepareAliasesExist("alias-123").get());
+
+ logger.info("--> snapshot");
+ assertThat(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete all indices");
+ cluster().wipeIndices("test-idx-1", "test-idx-2", "test-idx-3");
+ assertAliasesMissing(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get());
+
+ logger.info("--> restore snapshot with aliases");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ // We don't restore any indices here
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards())));
+
+ logger.info("--> check that aliases are restored");
+ assertAliasesExist(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get());
+
+ logger.info("--> update aliases");
+ assertAcked(client.admin().indices().prepareAliases().removeAlias("test-idx-3", "alias-123"));
+ assertAcked(client.admin().indices().prepareAliases().addAlias("test-idx-3", "alias-3"));
+
+ logger.info("--> delete and close indices");
+ cluster().wipeIndices("test-idx-1", "test-idx-2");
+ assertAcked(client.admin().indices().prepareClose("test-idx-3"));
+ assertAliasesMissing(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get());
+
+ logger.info("--> restore snapshot without aliases");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).setIncludeAliases(false).execute().actionGet();
+ // We don't restore any indices here
+ assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards())));
+
+ logger.info("--> check that aliases are not restored and existing aliases still exist");
+ assertAliasesMissing(client.admin().indices().prepareAliasesExist("alias-123", "alias-1").get());
+ assertAliasesExist(client.admin().indices().prepareAliasesExist("alias-3").get());
+
+ }
+
+ @Test
+ public void restoreTemplatesTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", randomRepoPath())));
+
+ logger.info("--> creating test template");
+ assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template");
+ assertThat(client.admin().indices().prepareDeleteTemplate("test-template").get().isAcknowledged(), equalTo(true));
+ GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
+
+ logger.info("--> restore cluster state");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ // We don't restore any indices here
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template is restored");
+ getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateExists(getIndexTemplatesResponse, "test-template");
+
+ }
+
+ @Test
+ public void includeGlobalStateTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ Path location = randomRepoPath();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", location)));
+
+ logger.info("--> creating test template");
+ assertThat(client.admin().indices().preparePutTemplate("test-template").setTemplate("te*").addMapping("test-mapping", "{}").get().isAcknowledged(), equalTo(true));
+
+ logger.info("--> snapshot without global state");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state").setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> snapshot with global state");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-with-global-state").setIndices().setIncludeGlobalState(true).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-with-global-state").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template");
+ cluster().wipeTemplates("test-template");
+ GetIndexTemplatesResponse getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
+
+ logger.info("--> try restoring cluster state from snapshot without global state");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template wasn't restored");
+ getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
+
+ logger.info("--> restore cluster state");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0));
+
+ logger.info("--> check that template is restored");
+ getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateExists(getIndexTemplatesResponse, "test-template");
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot without global state but with indices");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index").setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-no-global-state-with-index").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete test template and index ");
+ cluster().wipeIndices("test-idx");
+ cluster().wipeTemplates("test-template");
+ getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
+
+ logger.info("--> try restoring index and cluster state from snapshot without global state");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index").setWaitForCompletion(true).setRestoreGlobalState(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+
+ logger.info("--> check that template wasn't restored but index was");
+ getIndexTemplatesResponse = client().admin().indices().prepareGetTemplates().get();
+ assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template");
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ }
+
+ @Test
+ public void snapshotFileFailureDuringSnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("random", randomAsciiOfLength(10))
+ .put("random_control_io_exception_rate", 0.2))
+ .setVerify(false));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ try {
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
+ // If we are here, that means we didn't have any failures, let's check it
+ assertThat(getFailureCount("test-repo"), equalTo(0L));
+ } else {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
+ assertThat(shardFailure.reason(), containsString("Random IOException"));
+ assertThat(shardFailure.nodeId(), notNullValue());
+ assertThat(shardFailure.index(), equalTo("test-idx"));
+ }
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ if (snapshotInfo.state() == SnapshotState.SUCCESS) {
+ assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
+ assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards()));
+ }
+ }
+ } catch (Exception ex) {
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(ExceptionsHelper.detailedMessage(ex), containsString("IOException"));
+ }
+ }
+
+ @Test
+ public void dataFileFailureDuringSnapshotTest() throws Exception {
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 0.3)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) {
+ logger.info("--> no failures");
+ // If we are here, that means we didn't have any failures, let's check it
+ assertThat(getFailureCount("test-repo"), equalTo(0L));
+ } else {
+ logger.info("--> some failures");
+ assertThat(getFailureCount("test-repo"), greaterThan(0L));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
+ for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
+ assertThat(shardFailure.nodeId(), notNullValue());
+ assertThat(shardFailure.index(), equalTo("test-idx"));
+ }
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
+ assertThat(snapshotInfo.state(), equalTo(SnapshotState.PARTIAL));
+ assertThat(snapshotInfo.shardFailures().size(), greaterThan(0));
+ assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards()));
+
+ // Verify that snapshot status also contains the same failures
+ SnapshotsStatusResponse snapshotsStatusResponse = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get();
+ assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1));
+ SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0);
+ assertThat(snapshotStatus.getIndices().size(), equalTo(1));
+ SnapshotIndexStatus indexStatus = snapshotStatus.getIndices().get("test-idx");
+ assertThat(indexStatus, notNullValue());
+ assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(snapshotInfo.failedShards()));
+ assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards()));
+ assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards()));
+
+ int numberOfFailures = 0;
+ for (SnapshotIndexShardStatus shardStatus : indexStatus.getShards().values()) {
+ if (shardStatus.getStage() == SnapshotIndexShardStage.FAILURE) {
+ assertThat(shardStatus.getFailure(), notNullValue());
+ numberOfFailures++;
+ } else {
+ assertThat(shardStatus.getFailure(), nullValue());
+ }
+ }
+ assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(numberOfFailures));
+ }
+
+ }
+
+ @Test
+ public void dataFileFailureDuringRestoreTest() throws Exception {
+ Path repositoryLocation = randomRepoPath();
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+
+ logger.info("--> update repository with mock version");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 0.3)));
+
+ // Test restore after index deletion
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> restore index after deletion");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ CountResponse countResponse = client.prepareCount("test-idx").get();
+ assertThat(countResponse.getCount(), equalTo(100L));
+ }
+
+
+ @Test
+ public void deletionOfFailingToRecoverIndexShouldStopRestore() throws Exception {
+ Path repositoryLocation = randomRepoPath();
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(createSnapshotResponse.getSnapshotInfo().successfulShards()));
+
+ logger.info("--> update repository with mock version");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("random_data_file_io_exception_rate", 1.0) // Fail completely
+ ));
+
+ // Test restore after index deletion
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> restore index after deletion");
+ ListenableActionFuture<RestoreSnapshotResponse> restoreSnapshotResponseFuture =
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
+
+ logger.info("--> wait for the index to appear");
+ // that would mean that recovery process started and failing
+ assertThat(waitForIndex("test-idx", TimeValue.timeValueSeconds(10)), equalTo(true));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+ logger.info("--> get restore results");
+ // Now read restore results and make sure it failed
+ RestoreSnapshotResponse restoreSnapshotResponse = restoreSnapshotResponseFuture.actionGet(TimeValue.timeValueSeconds(10));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().failedShards()));
+
+ logger.info("--> restoring working repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> trying to restore index again");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
+ CountResponse countResponse = client.prepareCount("test-idx").get();
+ assertThat(countResponse.getCount(), equalTo(100L));
+
+ }
+
+ @Test
+ public void unallocatedShardsTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())));
+
+ logger.info("--> creating index that cannot be allocated");
+ prepareCreate("test-idx", 2, Settings.builder().put(FilterAllocationDecider.INDEX_ROUTING_INCLUDE_GROUP + ".tag", "nowhere").put("index.number_of_shards", 3)).get();
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(3));
+ assertThat(createSnapshotResponse.getSnapshotInfo().reason(), startsWith("Indices don't have primary shards"));
+ }
+
+ @Test
+ public void deleteSnapshotTest() throws Exception {
+ final int numberOfSnapshots = between(5, 15);
+ Client client = client();
+
+ Path repo = randomRepoPath();
+ logger.info("--> creating repository at " + repo.toAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ int[] numberOfFiles = new int[numberOfSnapshots];
+ logger.info("--> creating {} snapshots ", numberOfSnapshots);
+ for (int i = 0; i < numberOfSnapshots; i++) {
+ for (int j = 0; j < 10; j++) {
+ index("test-idx", "doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j);
+ }
+ refresh();
+ logger.info("--> snapshot {}", i);
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-" + i).setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ // Store number of files after each snapshot
+ numberOfFiles[i] = numberOfFiles(repo);
+ }
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
+ int numberOfFilesBeforeDeletion = numberOfFiles(repo);
+
+ logger.info("--> delete all snapshots except the first one and last one");
+ for (int i = 1; i < numberOfSnapshots - 1; i++) {
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-" + i).get();
+ }
+
+ int numberOfFilesAfterDeletion = numberOfFiles(repo);
+
+ assertThat(numberOfFilesAfterDeletion, lessThan(numberOfFilesBeforeDeletion));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index");
+ String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1);
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", lastSnapshot).setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(10L * numberOfSnapshots));
+
+ logger.info("--> delete the last snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", lastSnapshot).get();
+ logger.info("--> make sure that number of files is back to what it was when the first snapshot was made");
+ assertThat(numberOfFiles(repo), equalTo(numberOfFiles[0]));
+ }
+
+ @Test
+ public void deleteSnapshotWithMissingIndexAndShardMetadataTest() throws Exception {
+ Client client = client();
+
+ Path repo = randomRepoPath();
+ logger.info("--> creating repository at " + repo.toAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test-idx-1", "test-idx-2");
+ ensureYellow();
+ logger.info("--> indexing some data");
+ indexRandom(true,
+ client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
+ client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
+
+ logger.info("--> creating snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete index metadata and shard metadata");
+ Path indices = repo.resolve("indices");
+ Path testIndex1 = indices.resolve("test-idx-1");
+ Path testIndex2 = indices.resolve("test-idx-2");
+ Path testIndex2Shard0 = testIndex2.resolve("0");
+ IOUtils.deleteFilesIgnoringExceptions(testIndex1.resolve("snapshot-test-snap-1"));
+ IOUtils.deleteFilesIgnoringExceptions(testIndex2Shard0.resolve("snapshot-test-snap-1"));
+
+ logger.info("--> delete snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
+
+ logger.info("--> make sure snapshot doesn't exist");
+ assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
+ }
+
+ @Test
+ public void deleteSnapshotWithMissingMetadataTest() throws Exception {
+ Client client = client();
+
+ Path repo = randomRepoPath();
+ logger.info("--> creating repository at " + repo.toAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test-idx-1", "test-idx-2");
+ ensureYellow();
+ logger.info("--> indexing some data");
+ indexRandom(true,
+ client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
+ client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
+
+ logger.info("--> creating snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete index metadata and shard metadata");
+ Path metadata = repo.resolve("metadata-test-snap-1");
+ Files.delete(metadata);
+
+ logger.info("--> delete snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
+
+ logger.info("--> make sure snapshot doesn't exist");
+ assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
+ }
+
+ @Test
+ public void deleteSnapshotWithCorruptedSnapshotFileTest() throws Exception {
+ Client client = client();
+
+ Path repo = randomRepoPath();
+ logger.info("--> creating repository at " + repo.toAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test-idx-1", "test-idx-2");
+ ensureYellow();
+ logger.info("--> indexing some data");
+ indexRandom(true,
+ client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
+ client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
+
+ logger.info("--> creating snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> truncate snapshot file to make it unreadable");
+ Path snapshotPath = repo.resolve("snapshot-test-snap-1");
+ try(SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) {
+ outChan.truncate(randomInt(10));
+ }
+ logger.info("--> delete snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
+
+ logger.info("--> make sure snapshot doesn't exist");
+ assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
+
+ logger.info("--> make sure that we can create the snapshot again");
+ createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ }
+
+
+ @Test
+ public void snapshotClosedIndexTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())));
+
+ createIndex("test-idx", "test-idx-closed");
+ ensureGreen();
+ logger.info("--> closing index test-idx-closed");
+ assertAcked(client.admin().indices().prepareClose("test-idx-closed"));
+ ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get();
+ assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE));
+ assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(1));
+ assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), equalTo(0));
+
+ logger.info("--> deleting snapshot");
+ client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
+
+ logger.info("--> snapshot with closed index");
+ assertBlocked(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx", "test-idx-closed"), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
+ }
+
+ @Test
+ public void snapshotSingleClosedIndexTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())));
+
+ createIndex("test-idx");
+ ensureGreen();
+ logger.info("--> closing index test-idx");
+ assertAcked(client.admin().indices().prepareClose("test-idx"));
+
+ logger.info("--> snapshot");
+ assertBlocked(client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
+ .setWaitForCompletion(true).setIndices("test-idx"), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
+ }
+
+ @Test
+ public void renameOnRestoreTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())));
+
+ createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+ ensureGreen();
+
+ assertAcked(client.admin().indices().prepareAliases()
+ .addAlias("test-idx-1", "alias-1")
+ .addAlias("test-idx-2", "alias-2")
+ .addAlias("test-idx-3", "alias-3")
+ );
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-1", "test-idx-2").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> restore indices with different names");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
+
+ logger.info("--> close just restored indices");
+ client.admin().indices().prepareClose("test-idx-1-copy", "test-idx-2-copy").get();
+
+ logger.info("--> and try to restore these indices again");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareCount("test-idx-1-copy").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2-copy").get().getCount(), equalTo(100L));
+
+
+ logger.info("--> close indices");
+ assertAcked(client.admin().indices().prepareClose("test-idx-1", "test-idx-2-copy"));
+
+ logger.info("--> restore indices with different names");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setRenamePattern("(.+-2)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ logger.info("--> delete indices");
+ cluster().wipeIndices("test-idx-1", "test-idx-1-copy", "test-idx-2", "test-idx-2-copy");
+
+ logger.info("--> try renaming indices using the same name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("(.+)").setRenameReplacement("same-name").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices using the same name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRenamePattern("test-idx-2").setRenameReplacement("test-idx-1").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices using invalid index name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("__WRONG__").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (InvalidIndexNameException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices into existing alias name");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern(".+").setRenameReplacement("alias-3").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (InvalidIndexNameException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices into existing alias of itself");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1").setRenamePattern("test-idx").setRenameReplacement("alias").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices into existing alias of another restored index");
+ try {
+ client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setIndices("test-idx-1", "test-idx-2").setRenamePattern("test-idx-1").setRenameReplacement("alias-2").setWaitForCompletion(true).execute().actionGet();
+ fail("Shouldn't be here");
+ } catch (SnapshotRestoreException ex) {
+ // Expected
+ }
+
+ logger.info("--> try renaming indices into existing alias of itself, but don't restore aliases ");
+ restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
+ .setIndices("test-idx-1").setRenamePattern("test-idx").setRenameReplacement("alias")
+ .setWaitForCompletion(true).setIncludeAliases(false).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+
+ }
+
+ @Test
+ public void moveShardWhileSnapshottingTest() throws Exception {
+ Client client = client();
+ Path repositoryLocation = randomRepoPath();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], moving shards away from this node", blockedNode);
+ Settings.Builder excludeSettings = Settings.builder().put("index.routing.allocation.exclude._name", blockedNode);
+ client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings).get();
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+ List<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
+
+ assertThat(snapshotInfos.size(), equalTo(1));
+ assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> replace mock repository with real one at the same location");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+ }
+
+ @Test
+ public void deleteRepositoryWhileSnapshottingTest() throws Exception {
+ Client client = client();
+ Path repositoryLocation = randomRepoPath();
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode);
+
+ try {
+ client.admin().cluster().prepareDeleteRepository("test-repo").execute().get();
+ fail("shouldn't be able to delete in-use repository");
+ } catch (Exception ex) {
+ logger.info("--> in-use repository deletion failed");
+ }
+
+ logger.info("--> trying to move repository to another location");
+ try {
+ client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation.resolve("test"))
+ ).get();
+ fail("shouldn't be able to replace in-use repository");
+ } catch (Exception ex) {
+ logger.info("--> in-use repository replacement failed");
+ }
+
+ logger.info("--> trying to create a repository with different name");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo-2")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation.resolve("test"))));
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+ logger.info("--> waiting for completion");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+ List<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
+
+ assertThat(snapshotInfos.size(), equalTo(1));
+ assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> replace mock repository with real one at the same location");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder().put("location", repositoryLocation)));
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+ }
+
+ @Test
+ public void urlRepositoryTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ Path repositoryLocation = randomRepoPath();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> create read-only URL repository");
+ assertAcked(client.admin().cluster().preparePutRepository("url-repo")
+ .setType("url").setSettings(Settings.settingsBuilder()
+ .put("url", repositoryLocation.toUri().toURL())
+ .put("list_directories", randomBoolean())));
+ logger.info("--> restore index after deletion");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("url-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> list available shapshots");
+ GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
+ assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1));
+
+ logger.info("--> delete snapshot");
+ DeleteSnapshotResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
+ assertAcked(deleteSnapshotResponse);
+
+ logger.info("--> list available shapshot again, no snapshots should be returned");
+ getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get();
+ assertThat(getSnapshotsResponse.getSnapshots(), notNullValue());
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
+ }
+
+ @Test
+ public void throttlingTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ Path repositoryLocation = randomRepoPath();
+ boolean throttleSnapshot = randomBoolean();
+ boolean throttleRestore = randomBoolean();
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(1000, 10000), ByteSizeUnit.BYTES)
+ .put("max_restore_bytes_per_sec", throttleRestore ? "0.5k" : "0")
+ .put("max_snapshot_bytes_per_sec", throttleSnapshot ? "0.5k" : "0")));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete index");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ long snapshotPause = 0L;
+ long restorePause = 0L;
+ for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
+ snapshotPause += repositoriesService.repository("test-repo").snapshotThrottleTimeInNanos();
+ restorePause += repositoriesService.repository("test-repo").restoreThrottleTimeInNanos();
+ }
+
+ if (throttleSnapshot) {
+ assertThat(snapshotPause, greaterThan(0L));
+ } else {
+ assertThat(snapshotPause, equalTo(0L));
+ }
+
+ if (throttleRestore) {
+ assertThat(restorePause, greaterThan(0L));
+ } else {
+ assertThat(restorePause, equalTo(0L));
+ }
+ }
+
+
+ @Test
+ public void snapshotStatusTest() throws Exception {
+ Client client = client();
+ Path repositoryLocation = randomRepoPath();
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(
+ Settings.settingsBuilder()
+ .put("location", repositoryLocation)
+ .put("random", randomAsciiOfLength(10))
+ .put("wait_after_unblock", 200)
+ ).get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Pick one node and block it
+ String blockedNode = blockNodeWithIndex("test-idx");
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ logger.info("--> waiting for block to kick in");
+ waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
+
+ logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode);
+ SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet();
+ assertThat(response.getSnapshots().size(), equalTo(1));
+ SnapshotStatus snapshotStatus = response.getSnapshots().get(0);
+ assertThat(snapshotStatus.getState(), equalTo(SnapshotMetaData.State.STARTED));
+ // We blocked the node during data write operation, so at least one shard snapshot should be in STARTED stage
+ assertThat(snapshotStatus.getShardsStats().getStartedShards(), greaterThan(0));
+ for (SnapshotIndexShardStatus shardStatus : snapshotStatus.getIndices().get("test-idx")) {
+ if (shardStatus.getStage() == SnapshotIndexShardStage.STARTED) {
+ assertThat(shardStatus.getNodeId(), notNullValue());
+ }
+ }
+
+ logger.info("--> checking snapshot status for all currently running and snapshot with empty repository", blockedNode);
+ response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet();
+ assertThat(response.getSnapshots().size(), equalTo(1));
+ snapshotStatus = response.getSnapshots().get(0);
+ assertThat(snapshotStatus.getState(), equalTo(SnapshotMetaData.State.STARTED));
+ // We blocked the node during data write operation, so at least one shard snapshot should be in STARTED stage
+ assertThat(snapshotStatus.getShardsStats().getStartedShards(), greaterThan(0));
+ for (SnapshotIndexShardStatus shardStatus : snapshotStatus.getIndices().get("test-idx")) {
+ if (shardStatus.getStage() == SnapshotIndexShardStage.STARTED) {
+ assertThat(shardStatus.getNodeId(), notNullValue());
+ }
+ }
+
+ logger.info("--> checking that _current returns the currently running snapshot", blockedNode);
+ GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().execute().actionGet();
+ assertThat(getResponse.getSnapshots().size(), equalTo(1));
+ SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0);
+ assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS));
+
+ logger.info("--> unblocking blocked node");
+ unblockNode(blockedNode);
+
+ snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
+ logger.info("--> done");
+
+
+ logger.info("--> checking snapshot status again after snapshot is done", blockedNode);
+ response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").execute().actionGet();
+ snapshotStatus = response.getSnapshots().get(0);
+ assertThat(snapshotStatus.getIndices().size(), equalTo(1));
+ SnapshotIndexStatus indexStatus = snapshotStatus.getIndices().get("test-idx");
+ assertThat(indexStatus, notNullValue());
+ assertThat(indexStatus.getShardsStats().getInitializingShards(), equalTo(0));
+ assertThat(indexStatus.getShardsStats().getFailedShards(), equalTo(snapshotInfo.failedShards()));
+ assertThat(indexStatus.getShardsStats().getDoneShards(), equalTo(snapshotInfo.successfulShards()));
+ assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards()));
+
+ logger.info("--> checking snapshot status after it is done with empty repository", blockedNode);
+ response = client.admin().cluster().prepareSnapshotStatus().execute().actionGet();
+ assertThat(response.getSnapshots().size(), equalTo(0));
+
+ logger.info("--> checking that _current no longer returns the snapshot", blockedNode);
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").execute().actionGet().getSnapshots().isEmpty(), equalTo(true));
+
+ try {
+ client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").execute().actionGet();
+ fail();
+ } catch (SnapshotMissingException ex) {
+ // Expected
+ }
+ }
+
+
+ @Test
+ public void snapshotRelocatingPrimary() throws Exception {
+ Client client = client();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ // Create index on 1 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate("test-idx", 1, Settings.builder().put("number_of_replicas", 0)));
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx").get().getCount(), equalTo(100L));
+
+ // Update settings to make sure that relocation is slow so we can start snapshot before relocation is finished
+ assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all")
+ .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, 100, ByteSizeUnit.BYTES)
+ ));
+
+ logger.info("--> start relocations");
+ allowNodes("test-idx", internalCluster().numDataNodes());
+
+ logger.info("--> wait for relocations to start");
+
+ waitForRelocationsToStart("test-idx", TimeValue.timeValueMillis(300));
+
+ logger.info("--> snapshot");
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+
+ // Update settings to back to normal
+ assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(Settings.builder()
+ .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node")
+ ));
+
+ logger.info("--> wait for snapshot to complete");
+ SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
+ assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS));
+ assertThat(snapshotInfo.shardFailures().size(), equalTo(0));
+ logger.info("--> done");
+ }
+
+ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedException {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ // only one shard
+ assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)));
+ ensureGreen();
+ logger.info("--> indexing");
+
+ final int numdocs = randomIntBetween(10, 100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "doc", Integer.toString(i)).setSource("foo", "bar" + i);
+ }
+ indexRandom(true, builders);
+ flushAndRefresh();
+ assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setMaxNumSegments(1).get());
+
+ CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseFirst.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ {
+ SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0);
+ List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
+ for (SnapshotIndexShardStatus status : shards) {
+ assertThat(status.getStats().getProcessedFiles(), greaterThan(1));
+ }
+ }
+
+ CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ {
+ SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0);
+ List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
+ for (SnapshotIndexShardStatus status : shards) {
+ assertThat(status.getStats().getProcessedFiles(), equalTo(0));
+ }
+ }
+
+ client().prepareDelete("test", "doc", "1").get();
+ CreateSnapshotResponse createSnapshotResponseThird = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-2").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-2").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ {
+ SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0);
+ List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
+ for (SnapshotIndexShardStatus status : shards) {
+ assertThat(status.getStats().getProcessedFiles(), equalTo(2)); // we flush before the snapshot such that we have to process the segments_N files plus the .del file
+ }
+ }
+ }
+
+ @Test
+ public void changeSettingsOnRestoreTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ logger.info("--> create test index with synonyms search analyzer");
+
+ Settings.Builder indexSettings = Settings.builder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))
+ .put(INDEX_REFRESH_INTERVAL, "10s")
+ .put("index.analysis.analyzer.my_analyzer.type", "custom")
+ .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.my_analyzer.filter", "lowercase", "my_synonym")
+ .put("index.analysis.filter.my_synonym.type", "synonym")
+ .put("index.analysis.filter.my_synonym.synonyms", "foo => bar");
+
+ assertAcked(prepareCreate("test-idx", 2, indexSettings));
+
+ int numberOfShards = getNumShards("test-idx").numPrimaries;
+ assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("type1").setSource("field1", "type=string,analyzer=standard,search_analyzer=my_analyzer"));
+ final int numdocs = randomIntBetween(10, 100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
+ }
+ indexRandom(true, builders);
+ flushAndRefresh();
+
+ assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "foo")).get(), numdocs);
+ assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "bar")).get(), numdocs);
+
+ logger.info("--> snapshot it");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> delete the index and recreate it while changing refresh interval and analyzer");
+ cluster().wipeIndices("test-idx");
+
+ Settings newIndexSettings = Settings.builder()
+ .put("refresh_interval", "5s")
+ .put("index.analysis.analyzer.my_analyzer.type", "standard")
+ .build();
+
+ Settings newIncorrectIndexSettings = Settings.builder()
+ .put(newIndexSettings)
+ .put(SETTING_NUMBER_OF_SHARDS, numberOfShards + 100)
+ .build();
+
+ logger.info("--> try restoring while changing the number of shards - should fail");
+ assertThrows(client.admin().cluster()
+ .prepareRestoreSnapshot("test-repo", "test-snap")
+ .setIgnoreIndexSettings("index.analysis.*")
+ .setIndexSettings(newIncorrectIndexSettings)
+ .setWaitForCompletion(true), SnapshotRestoreException.class);
+
+ logger.info("--> try restoring while changing the number of replicas to a negative number - should fail");
+ Settings newIncorrectReplicasIndexSettings = Settings.builder()
+ .put(newIndexSettings)
+ .put(SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1))
+ .build();
+ assertThrows(client.admin().cluster()
+ .prepareRestoreSnapshot("test-repo", "test-snap")
+ .setIgnoreIndexSettings("index.analysis.*")
+ .setIndexSettings(newIncorrectReplicasIndexSettings)
+ .setWaitForCompletion(true), IllegalArgumentException.class);
+
+ logger.info("--> restore index with correct settings from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster()
+ .prepareRestoreSnapshot("test-repo", "test-snap")
+ .setIgnoreIndexSettings("index.analysis.*")
+ .setIndexSettings(newIndexSettings)
+ .setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ logger.info("--> assert that correct settings are restored");
+ GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet();
+ assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL), equalTo("5s"));
+ // Make sure that number of shards didn't change
+ assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards));
+ assertThat(getSettingsResponse.getSetting("test-idx", "index.analysis.analyzer.my_analyzer.type"), equalTo("standard"));
+ assertThat(getSettingsResponse.getSetting("test-idx", "index.analysis.filter.my_synonym.type"), nullValue());
+
+ assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "foo")).get(), 0);
+ assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "bar")).get(), numdocs);
+
+ logger.info("--> delete the index and recreate it while deleting all index settings");
+ cluster().wipeIndices("test-idx");
+
+ logger.info("--> restore index with correct settings from the snapshot");
+ restoreSnapshotResponse = client.admin().cluster()
+ .prepareRestoreSnapshot("test-repo", "test-snap")
+ .setIgnoreIndexSettings("*") // delete everything we can delete
+ .setIndexSettings(newIndexSettings)
+ .setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ logger.info("--> assert that correct settings are restored and index is still functional");
+ getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet();
+ assertThat(getSettingsResponse.getSetting("test-idx", INDEX_REFRESH_INTERVAL), equalTo("5s"));
+ // Make sure that number of shards didn't change
+ assertThat(getSettingsResponse.getSetting("test-idx", SETTING_NUMBER_OF_SHARDS), equalTo("" + numberOfShards));
+
+ assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "foo")).get(), 0);
+ assertHitCount(client.prepareCount("test-idx").setQuery(matchQuery("field1", "bar")).get(), numdocs);
+
+ }
+
+ @Test
+ public void deleteIndexDuringSnapshotTest() throws Exception {
+ Client client = client();
+
+ boolean allowPartial = randomBoolean();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
+ .put("block_on_init", true)
+ ));
+
+ createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ for (int i = 0; i < 100; i++) {
+ index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+ index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
+ index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
+ }
+ refresh();
+ assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
+ assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));
+
+ logger.info("--> snapshot allow partial {}", allowPartial);
+ ListenableActionFuture<CreateSnapshotResponse> future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
+ .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute();
+ logger.info("--> wait for block to kick in");
+ waitForBlock(internalCluster().getMasterName(), "test-repo", TimeValue.timeValueMinutes(1));
+ logger.info("--> delete some indices while snapshot is running");
+ client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get();
+ logger.info("--> unblock running master node");
+ unblockNode(internalCluster().getMasterName());
+ logger.info("--> waiting for snapshot to finish");
+ CreateSnapshotResponse createSnapshotResponse = future.get();
+
+ if (allowPartial) {
+ logger.info("Deleted index during snapshot, but allow partial");
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.PARTIAL)));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(createSnapshotResponse.getSnapshotInfo().totalShards()));
+ } else {
+ logger.info("Deleted index during snapshot and doesn't allow partial");
+ assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.FAILED)));
+ }
+ }
+
+
+ @Test
+ public void deleteOrphanSnapshotTest() throws Exception {
+ Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType(MockRepositoryModule.class.getCanonicalName()).setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
+ ));
+
+ createIndex("test-idx");
+ ensureGreen();
+
+ ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName());
+
+ final CountDownLatch countDownLatch = new CountDownLatch(1);
+
+ logger.info("--> snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> emulate an orphan snapshot");
+
+ clusterService.submitStateUpdateTask("orphan snapshot test", new ProcessedClusterStateUpdateTask() {
+
+ @Override
+ public ClusterState execute(ClusterState currentState) {
+ // Simulate orphan snapshot
+ ImmutableMap.Builder<ShardId, ShardSnapshotStatus> shards = ImmutableMap.builder();
+ shards.put(new ShardId("test-idx", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED));
+ shards.put(new ShardId("test-idx", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED));
+ shards.put(new ShardId("test-idx", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED));
+ ImmutableList.Builder<Entry> entries = ImmutableList.builder();
+ entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, ImmutableList.of("test-idx"), System.currentTimeMillis(), shards.build()));
+ MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
+ mdBuilder.putCustom(SnapshotMetaData.TYPE, new SnapshotMetaData(entries.build()));
+ return ClusterState.builder(currentState).metaData(mdBuilder).build();
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ fail();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) {
+ countDownLatch.countDown();
+ }
+ });
+
+ countDownLatch.await();
+ logger.info("--> try deleting the orphan snapshot");
+
+ assertAcked(client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get("10s"));
+
+ }
+
+ private boolean waitForIndex(final String index, TimeValue timeout) throws InterruptedException {
+ return awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ return client().admin().indices().prepareExists(index).execute().actionGet().isExists();
+ }
+ }, timeout.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ private boolean waitForRelocationsToStart(final String index, TimeValue timeout) throws InterruptedException {
+ return awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ return client().admin().cluster().prepareHealth(index).execute().actionGet().getRelocatingShards() > 0;
+ }
+ }, timeout.millis(), TimeUnit.MILLISECONDS);
+ }
+
+ @Test
+ @TestLogging("cluster:DEBUG")
+ public void batchingShardUpdateTaskTest() throws Exception {
+
+ final Client client = client();
+
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ assertAcked(prepareCreate("test-idx", 0, settingsBuilder().put("number_of_shards", between(1, 20))
+ .put("number_of_replicas", 0)));
+ ensureGreen();
+
+ logger.info("--> indexing some data");
+ final int numdocs = randomIntBetween(10, 100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
+ }
+ indexRandom(true, builders);
+ flushAndRefresh();
+
+ final int numberOfShards = getNumShards("test-idx").numPrimaries;
+ logger.info("number of shards: {}", numberOfShards);
+
+ final ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName());
+ BlockingClusterStateListener snapshotListener = new BlockingClusterStateListener(clusterService, "update_snapshot [", "update snapshot state", Priority.HIGH);
+ try {
+ clusterService.addFirst(snapshotListener);
+ logger.info("--> snapshot");
+ ListenableActionFuture<CreateSnapshotResponse> snapshotFuture = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").execute();
+
+ // Await until shard updates are in pending state.
+ assertBusyPendingTasks("update snapshot state", numberOfShards);
+ snapshotListener.unblock();
+
+ // Check that the snapshot was successful
+ CreateSnapshotResponse createSnapshotResponse = snapshotFuture.actionGet();
+ assertEquals(SnapshotState.SUCCESS, createSnapshotResponse.getSnapshotInfo().state());
+ assertEquals(numberOfShards, createSnapshotResponse.getSnapshotInfo().totalShards());
+ assertEquals(numberOfShards, createSnapshotResponse.getSnapshotInfo().successfulShards());
+
+ } finally {
+ clusterService.remove(snapshotListener);
+ }
+
+ // Check that we didn't timeout
+ assertFalse(snapshotListener.timedOut());
+ // Check that cluster state update task was called only once
+ assertEquals(1, snapshotListener.count());
+
+ logger.info("--> close indices");
+ client.admin().indices().prepareClose("test-idx").get();
+
+ BlockingClusterStateListener restoreListener = new BlockingClusterStateListener(clusterService, "restore_snapshot[", "update snapshot state", Priority.HIGH);
+
+ try {
+ clusterService.addFirst(restoreListener);
+ logger.info("--> restore snapshot");
+ ListenableActionFuture<RestoreSnapshotResponse> futureRestore = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute();
+
+ // Await until shard updates are in pending state.
+ assertBusyPendingTasks("update snapshot state", numberOfShards);
+ restoreListener.unblock();
+
+ RestoreSnapshotResponse restoreSnapshotResponse = futureRestore.actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(numberOfShards));
+
+ } finally {
+ clusterService.remove(restoreListener);
+ }
+
+ // Check that we didn't timeout
+ assertFalse(restoreListener.timedOut());
+ // Check that cluster state update task was called only once
+ assertEquals(1, restoreListener.count());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java
new file mode 100644
index 0000000000..69cf99923f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityTest.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus;
+import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThan;
+
+public class SnapshotBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ @Test
+ public void testSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException {
+ logger.info("--> creating repository");
+ assertAcked(client().admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath().toAbsolutePath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+ String[] indicesBefore = new String[randomIntBetween(2,5)];
+ String[] indicesAfter = new String[randomIntBetween(2,5)];
+ for (int i = 0; i < indicesBefore.length; i++) {
+ indicesBefore[i] = "index_before_" + i;
+ createIndex(indicesBefore[i]);
+ }
+ for (int i = 0; i < indicesAfter.length; i++) {
+ indicesAfter[i] = "index_after_" + i;
+ createIndex(indicesAfter[i]);
+ }
+ String[] indices = new String[indicesBefore.length + indicesAfter.length];
+ System.arraycopy(indicesBefore, 0, indices, 0, indicesBefore.length);
+ System.arraycopy(indicesAfter, 0, indices, indicesBefore.length, indicesAfter.length);
+ ensureYellow();
+ logger.info("--> indexing some data");
+ IndexRequestBuilder[] buildersBefore = new IndexRequestBuilder[randomIntBetween(10, 200)];
+ for (int i = 0; i < buildersBefore.length; i++) {
+ buildersBefore[i] = client().prepareIndex(RandomPicks.randomFrom(getRandom(), indicesBefore), "foo", Integer.toString(i)).setSource("{ \"foo\" : \"bar\" } ");
+ }
+ IndexRequestBuilder[] buildersAfter = new IndexRequestBuilder[randomIntBetween(10, 200)];
+ for (int i = 0; i < buildersAfter.length; i++) {
+ buildersAfter[i] = client().prepareIndex(RandomPicks.randomFrom(getRandom(), indicesBefore), "bar", Integer.toString(i)).setSource("{ \"foo\" : \"bar\" } ");
+ }
+ indexRandom(true, buildersBefore);
+ indexRandom(true, buildersAfter);
+ assertThat(client().prepareCount(indices).get().getCount(), equalTo((long) (buildersBefore.length + buildersAfter.length)));
+ long[] counts = new long[indices.length];
+ for (int i = 0; i < indices.length; i++) {
+ counts[i] = client().prepareCount(indices[i]).get().getCount();
+ }
+
+ logger.info("--> snapshot subset of indices before upgrage");
+ CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("index_before_*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+ logger.info("--> delete some data from indices that were already snapshotted");
+ int howMany = randomIntBetween(1, buildersBefore.length);
+
+ for (int i = 0; i < howMany; i++) {
+ IndexRequestBuilder indexRequestBuilder = RandomPicks.randomFrom(getRandom(), buildersBefore);
+ IndexRequest request = indexRequestBuilder.request();
+ client().prepareDelete(request.index(), request.type(), request.id()).get();
+ }
+ refresh();
+ final long numDocs = client().prepareCount(indices).get().getCount();
+ assertThat(client().prepareCount(indices).get().getCount(), lessThan((long) (buildersBefore.length + buildersAfter.length)));
+
+
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ backwardsCluster().allowOnAllNodes(indices);
+ logClusterState();
+ boolean upgraded;
+ do {
+ logClusterState();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ } while (upgraded);
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+
+ logger.info("--> close indices");
+ client().admin().indices().prepareClose("index_before_*").get();
+
+ logger.info("--> verify repository");
+ client().admin().cluster().prepareVerifyRepository("test-repo").get();
+
+ logger.info("--> restore all indices from the snapshot");
+ RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+ ensureYellow();
+ assertThat(client().prepareCount(indices).get().getCount(), equalTo((long) (buildersBefore.length + buildersAfter.length)));
+ for (int i = 0; i < indices.length; i++) {
+ assertThat(counts[i], equalTo(client().prepareCount(indices[i]).get().getCount()));
+ }
+
+ logger.info("--> snapshot subset of indices after upgrade");
+ createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setIndices("index_*").get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ // Test restore after index deletion
+ logger.info("--> delete indices");
+ String index = RandomPicks.randomFrom(getRandom(), indices);
+ cluster().wipeIndices(index);
+ logger.info("--> restore one index after deletion");
+ restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setIndices(index).execute().actionGet();
+ assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+ ensureYellow();
+ assertThat(client().prepareCount(indices).get().getCount(), equalTo((long) (buildersBefore.length + buildersAfter.length)));
+ for (int i = 0; i < indices.length; i++) {
+ assertThat(counts[i], equalTo(client().prepareCount(indices[i]).get().getCount()));
+ }
+ }
+
+ public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedException, IOException {
+ Client client = client();
+ final Path tempDir = randomRepoPath().toAbsolutePath();
+ logger.info("--> creating repository");
+ assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", tempDir)
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ // only one shard
+ assertAcked(prepareCreate("test").setSettings(Settings.builder()
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ ));
+ ensureYellow();
+ logger.info("--> indexing");
+
+ final int numDocs = randomIntBetween(10, 100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < builders.length; i++) {
+ builders[i] = client().prepareIndex("test", "doc", Integer.toString(i)).setSource("foo", "bar" + i);
+ }
+ indexRandom(true, builders);
+ flushAndRefresh();
+ assertNoFailures(client().admin().indices().prepareOptimize("test").setFlush(true).setMaxNumSegments(1).get());
+
+ CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseFirst.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ {
+ SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0);
+ List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
+ for (SnapshotIndexShardStatus status : shards) {
+ assertThat(status.getStats().getProcessedFiles(), greaterThan(1));
+ }
+ }
+ if (frequently()) {
+ logger.info("--> upgrade");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ backwardsCluster().allowOnAllNodes("test");
+ logClusterState();
+ boolean upgraded;
+ do {
+ logClusterState();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ } while (upgraded);
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+ }
+ if (cluster().numDataNodes() > 1 && randomBoolean()) { // only bump the replicas if we have enough nodes
+ logger.info("--> move from 0 to 1 replica");
+ client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
+ }
+ logger.debug("---> repo exists: " + Files.exists(tempDir.resolve("indices/test/0")) + " files: " + Arrays.toString(FileSystemUtils.files(tempDir.resolve("indices/test/0")))); // it's only one shard!
+ CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ {
+ SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0);
+ List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
+ for (SnapshotIndexShardStatus status : shards) {
+
+ assertThat(status.getStats().getProcessedFiles(), equalTo(1)); // we flush before the snapshot such that we have to process the segments_N files
+ }
+ }
+
+ client().prepareDelete("test", "doc", "1").get();
+ CreateSnapshotResponse createSnapshotResponseThird = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-2").setWaitForCompletion(true).setIndices("test").get();
+ assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards()));
+ assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-2").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+ {
+ SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0);
+ List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
+ for (SnapshotIndexShardStatus status : shards) {
+ assertThat(status.getStats().getProcessedFiles(), equalTo(2)); // we flush before the snapshot such that we have to process the segments_N files plus the .del file
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java
new file mode 100644
index 0000000000..71957c5cc2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.List;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.containsInAnyOrder;
+
+/**
+ */
+public class SnapshotUtilsTests extends ElasticsearchTestCase {
+ @Test
+ public void testIndexNameFiltering() {
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"*"}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo"}, new String[]{"foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"ba*", "-bar", "-baz"}, new String[]{});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"-bar"}, new String[]{"foo", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"-ba*"}, new String[]{"foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"+ba*"}, new String[]{"bar", "baz"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"+bar", "+foo"}, new String[]{"bar", "foo"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"zzz", "bar"}, IndicesOptions.lenientExpandOpen(), new String[]{"bar"});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{""}, IndicesOptions.lenientExpandOpen(), new String[]{});
+ assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "", "ba*"}, IndicesOptions.lenientExpandOpen(), new String[]{"foo", "bar", "baz"});
+ }
+
+ private void assertIndexNameFiltering(String[] indices, String[] filter, String[] expected) {
+ assertIndexNameFiltering(indices, filter, IndicesOptions.lenientExpandOpen(), expected);
+ }
+
+ private void assertIndexNameFiltering(String[] indices, String[] filter, IndicesOptions indicesOptions, String[] expected) {
+ List<String> indicesList = ImmutableList.copyOf(indices);
+ List<String> actual = SnapshotUtils.filterIndices(indicesList, filter, indicesOptions);
+ assertThat(actual, containsInAnyOrder(expected));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java
new file mode 100644
index 0000000000..6ab2cdf782
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots.mockstore;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Map;
+
+/**
+ *
+ */
+public class BlobContainerWrapper implements BlobContainer {
+ private BlobContainer delegate;
+
+ public BlobContainerWrapper(BlobContainer delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public BlobPath path() {
+ return delegate.path();
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return delegate.blobExists(blobName);
+ }
+
+ @Override
+ public InputStream openInput(String name) throws IOException {
+ return delegate.openInput(name);
+ }
+
+ @Override
+ public OutputStream createOutput(String blobName) throws IOException {
+ return delegate.createOutput(blobName);
+ }
+
+ @Override
+ public void deleteBlob(String blobName) throws IOException {
+ delegate.deleteBlob(blobName);
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
+ delegate.deleteBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public Map<String, BlobMetaData> listBlobs() throws IOException {
+ return delegate.listBlobs();
+ }
+
+ @Override
+ public Map<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ return delegate.listBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public void move(String sourceBlobName, String targetBlobName) throws IOException {
+ delegate.move(sourceBlobName, targetBlobName);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
new file mode 100644
index 0000000000..086aac209b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+
+import java.io.IOException;
+
+/**
+ *
+ */
+public class BlobStoreWrapper implements BlobStore {
+
+ private BlobStore delegate;
+
+ public BlobStoreWrapper(BlobStore delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public BlobContainer blobContainer(BlobPath path) {
+ return delegate.blobContainer(path);
+ }
+
+ @Override
+ public void delete(BlobPath path) throws IOException {
+ delegate.delete(path);
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ }
+
+ protected BlobStore delegate() {
+ return delegate;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
new file mode 100644
index 0000000000..12e51475c9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
@@ -0,0 +1,321 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.repositories.RepositoryName;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.repositories.fs.FsRepository;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+import java.nio.file.Path;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ */
+public class MockRepository extends FsRepository {
+
+ private final AtomicLong failureCounter = new AtomicLong();
+
+ public long getFailureCount() {
+ return failureCounter.get();
+ }
+
+ private final double randomControlIOExceptionRate;
+
+ private final double randomDataFileIOExceptionRate;
+
+ private final long waitAfterUnblock;
+
+ private final MockBlobStore mockBlobStore;
+
+ private final String randomPrefix;
+
+ private volatile boolean blockOnInitialization;
+
+ private volatile boolean blockOnControlFiles;
+
+ private volatile boolean blockOnDataFiles;
+
+ private volatile boolean blocked = false;
+
+ @Inject
+ public MockRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, ClusterService clusterService, Environment environment) throws IOException {
+ super(name, overrideSettings(repositorySettings, clusterService), indexShardRepository, environment);
+ randomControlIOExceptionRate = repositorySettings.settings().getAsDouble("random_control_io_exception_rate", 0.0);
+ randomDataFileIOExceptionRate = repositorySettings.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
+ blockOnControlFiles = repositorySettings.settings().getAsBoolean("block_on_control", false);
+ blockOnDataFiles = repositorySettings.settings().getAsBoolean("block_on_data", false);
+ blockOnInitialization = repositorySettings.settings().getAsBoolean("block_on_init", false);
+ randomPrefix = repositorySettings.settings().get("random", "default");
+ waitAfterUnblock = repositorySettings.settings().getAsLong("wait_after_unblock", 0L);
+ logger.info("starting mock repository with random prefix " + randomPrefix);
+ mockBlobStore = new MockBlobStore(super.blobStore());
+ }
+
+ @Override
+ public void initializeSnapshot(SnapshotId snapshotId, List<String> indices, MetaData metaData) {
+ if (blockOnInitialization ) {
+ blockExecution();
+ }
+ super.initializeSnapshot(snapshotId, indices, metaData);
+ }
+
+ private static RepositorySettings overrideSettings(RepositorySettings repositorySettings, ClusterService clusterService) {
+ if (repositorySettings.settings().getAsBoolean("localize_location", false)) {
+ return new RepositorySettings(
+ repositorySettings.globalSettings(),
+ localizeLocation(repositorySettings.settings(), clusterService));
+ } else {
+ return repositorySettings;
+ }
+ }
+
+ private static Settings localizeLocation(Settings settings, ClusterService clusterService) {
+ Path location = PathUtils.get(settings.get("location"));
+ location = location.resolve(clusterService.localNode().getId());
+ return settingsBuilder().put(settings).put("location", location.toAbsolutePath()).build();
+ }
+
+ private void addFailure() {
+ failureCounter.incrementAndGet();
+ }
+
+ @Override
+ protected void doStop() {
+ unblock();
+ super.doStop();
+ }
+
+ @Override
+ protected BlobStore blobStore() {
+ return mockBlobStore;
+ }
+
+ public void unblock() {
+ unblockExecution();
+ }
+
+ public void blockOnDataFiles(boolean blocked) {
+ blockOnDataFiles = blocked;
+ }
+
+ public void blockOnControlFiles(boolean blocked) {
+ blockOnControlFiles = blocked;
+ }
+
+ public synchronized void unblockExecution() {
+ if (blocked) {
+ blocked = false;
+ // Clean blocking flags, so we wouldn't try to block again
+ blockOnDataFiles = false;
+ blockOnControlFiles = false;
+ blockOnInitialization = false;
+ this.notifyAll();
+ }
+ }
+
+ public boolean blocked() {
+ return blocked;
+ }
+
+ private synchronized boolean blockExecution() {
+ logger.debug("Blocking execution");
+ boolean wasBlocked = false;
+ try {
+ while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization) {
+ blocked = true;
+ this.wait();
+ wasBlocked = true;
+ }
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ }
+ logger.debug("Unblocking execution");
+ return wasBlocked;
+ }
+
+ public class MockBlobStore extends BlobStoreWrapper {
+ ConcurrentMap<String, AtomicLong> accessCounts = new ConcurrentHashMap<>();
+
+ private long incrementAndGet(String path) {
+ AtomicLong value = accessCounts.get(path);
+ if (value == null) {
+ value = accessCounts.putIfAbsent(path, new AtomicLong(1));
+ }
+ if (value != null) {
+ return value.incrementAndGet();
+ }
+ return 1;
+ }
+
+ public MockBlobStore(BlobStore delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public BlobContainer blobContainer(BlobPath path) {
+ return new MockBlobContainer(super.blobContainer(path));
+ }
+
+ private class MockBlobContainer extends BlobContainerWrapper {
+ private MessageDigest digest;
+
+ private boolean shouldFail(String blobName, double probability) {
+ if (probability > 0.0) {
+ String path = path().add(blobName).buildAsString("/") + "/" + randomPrefix;
+ path += "/" + incrementAndGet(path);
+ logger.info("checking [{}] [{}]", path, Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability);
+ return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability;
+ } else {
+ return false;
+ }
+ }
+
+ private int hashCode(String path) {
+ try {
+ digest = MessageDigest.getInstance("MD5");
+ byte[] bytes = digest.digest(path.getBytes("UTF-8"));
+ int i = 0;
+ return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
+ | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
+ } catch (NoSuchAlgorithmException | UnsupportedEncodingException ex) {
+ throw new ElasticsearchException("cannot calculate hashcode", ex);
+ }
+ }
+
+ private void maybeIOExceptionOrBlock(String blobName) throws IOException {
+ if (blobName.startsWith("__")) {
+ if (shouldFail(blobName, randomDataFileIOExceptionRate)) {
+ logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path());
+ addFailure();
+ throw new IOException("Random IOException");
+ } else if (blockOnDataFiles) {
+ logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
+ if (blockExecution() && waitAfterUnblock > 0) {
+ try {
+ // Delay operation after unblocking
+ // So, we can start node shutdown while this operation is still running.
+ Thread.sleep(waitAfterUnblock);
+ } catch (InterruptedException ex) {
+ //
+ }
+ }
+ }
+ } else {
+ if (shouldFail(blobName, randomControlIOExceptionRate)) {
+ logger.info("throwing random IOException for file [{}] at path [{}]", blobName, path());
+ addFailure();
+ throw new IOException("Random IOException");
+ } else if (blockOnControlFiles) {
+ logger.info("blocking I/O operation for file [{}] at path [{}]", blobName, path());
+ if (blockExecution() && waitAfterUnblock > 0) {
+ try {
+ // Delay operation after unblocking
+ // So, we can start node shutdown while this operation is still running.
+ Thread.sleep(waitAfterUnblock);
+ } catch (InterruptedException ex) {
+ //
+ }
+ }
+ }
+ }
+ }
+
+
+ public MockBlobContainer(BlobContainer delegate) {
+ super(delegate);
+ }
+
+ @Override
+ public boolean blobExists(String blobName) {
+ return super.blobExists(blobName);
+ }
+
+ @Override
+ public InputStream openInput(String name) throws IOException {
+ maybeIOExceptionOrBlock(name);
+ return super.openInput(name);
+ }
+
+ @Override
+ public void deleteBlob(String blobName) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ super.deleteBlob(blobName);
+ }
+
+ @Override
+ public void deleteBlobsByPrefix(String blobNamePrefix) throws IOException {
+ maybeIOExceptionOrBlock(blobNamePrefix);
+ super.deleteBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public Map<String, BlobMetaData> listBlobs() throws IOException {
+ maybeIOExceptionOrBlock("");
+ return super.listBlobs();
+ }
+
+ @Override
+ public Map<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
+ maybeIOExceptionOrBlock(blobNamePrefix);
+ return super.listBlobsByPrefix(blobNamePrefix);
+ }
+
+ @Override
+ public void move(String sourceBlob, String targetBlob) throws IOException {
+ maybeIOExceptionOrBlock(targetBlob);
+ super.move(sourceBlob, targetBlob);
+ }
+
+ @Override
+ public OutputStream createOutput(String blobName) throws IOException {
+ maybeIOExceptionOrBlock(blobName);
+ return super.createOutput(blobName);
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java
new file mode 100644
index 0000000000..0da50f15d6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryModule.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.repositories.Repository;
+
+/**
+ */
+public class MockRepositoryModule extends AbstractModule {
+
+ public MockRepositoryModule() {
+ super();
+ }
+
+ @Override
+ protected void configure() {
+ bind(Repository.class).to(MockRepository.class).asEagerSingleton();
+ bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton();
+ }
+
+}
+
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryPlugin.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryPlugin.java
new file mode 100644
index 0000000000..a09c8601f7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepositoryPlugin.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots.mockstore;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.SettingsFilter;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.repositories.RepositoriesModule;
+
+import java.util.Collection;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+public class MockRepositoryPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "mock-repository";
+ }
+
+ @Override
+ public String description() {
+ return "Mock Repository";
+ }
+
+ public void onModule(RepositoriesModule repositoriesModule) {
+ repositoriesModule.registerRepository("mock", MockRepositoryModule.class);
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> modules() {
+ Collection<Class<? extends Module>> modules = newArrayList();
+ modules.add(SettingsFilteringModule.class);
+ return modules;
+ }
+
+ public static class SettingsFilteringModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(SettingsFilteringService.class).asEagerSingleton();
+ }
+ }
+
+ public static class SettingsFilteringService {
+ @Inject
+ public SettingsFilteringService(SettingsFilter settingsFilter) {
+ settingsFilter.addFilter("secret.mock.password");
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java b/core/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java
new file mode 100644
index 0000000000..fc56f128ed
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/client/ClientFailover.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.client;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ */
+public class ClientFailover {
+
+ public static void main(String[] args) throws Exception {
+ Node[] nodes = new Node[3];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().node();
+ }
+
+ // TODO: what is this? a public static void main test?!?!
+
+ final TransportClient client = TransportClient.builder().build()
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9300))
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9301))
+ .addTransportAddress(new InetSocketTransportAddress("localhost", 9302));
+
+ final AtomicBoolean done = new AtomicBoolean();
+ final AtomicLong indexed = new AtomicLong();
+ final CountDownLatch latch = new CountDownLatch(1);
+ Thread indexer = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ try {
+ client.prepareIndex("test", "type").setSource("field", "value").execute().actionGet();
+ indexed.incrementAndGet();
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ latch.countDown();
+ }
+ });
+ indexer.start();
+
+ for (int i = 0; i < 100; i++) {
+ int index = i % nodes.length;
+ nodes[index].close();
+
+ ClusterHealthResponse health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ System.err.println("timed out on health");
+ }
+
+ nodes[index] = NodeBuilder.nodeBuilder().node();
+
+ health = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (health.isTimedOut()) {
+ System.err.println("timed out on health");
+ }
+ }
+
+ latch.await();
+
+ // TODO add verification to the number of indexed docs
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
new file mode 100644
index 0000000000..59fca1b672
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/fullrestart/FullRestartStressTest.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.fullrestart;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class FullRestartStressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private int numberOfNodes = 4;
+
+ private int numberOfIndices = 5;
+ private int textTokens = 150;
+ private int numberOfFields = 10;
+ private int bulkSize = 1000;
+ private int numberOfDocsPerRound = 50000;
+
+ private Settings settings = Settings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private AtomicLong indexCounter = new AtomicLong();
+
+ public FullRestartStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfIndices(int numberOfIndices) {
+ this.numberOfIndices = numberOfIndices;
+ return this;
+ }
+
+ public FullRestartStressTest textTokens(int textTokens) {
+ this.textTokens = textTokens;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfFields(int numberOfFields) {
+ this.numberOfFields = numberOfFields;
+ return this;
+ }
+
+ public FullRestartStressTest bulkSize(int bulkSize) {
+ this.bulkSize = bulkSize;
+ return this;
+ }
+
+ public FullRestartStressTest numberOfDocsPerRound(int numberOfDocsPerRound) {
+ this.numberOfDocsPerRound = numberOfDocsPerRound;
+ return this;
+ }
+
+ public FullRestartStressTest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public FullRestartStressTest period(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ public void run() throws Exception {
+ long numberOfRounds = 0;
+ Random random = new Random(0);
+ long testStart = System.currentTimeMillis();
+ while (true) {
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ Node client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ // verify that the indices are there
+ for (int i = 0; i < numberOfIndices; i++) {
+ try {
+ client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
+ } catch (Exception e) {
+ // might already exists, fine
+ }
+ }
+
+ logger.info("*** Waiting for GREEN status");
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.info("*** index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ // verify count
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.debug("index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("!!! count does not match, index_count [{}], expected_count [{}]", count.getCount(), indexCounter.get());
+ throw new Exception("failed test, count does not match...");
+ }
+ }
+
+ // verify search
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ // do a search with norms field, so we don't rely on match all filtering cache
+ SearchResponse search = client.client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ logger.debug("index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("!!! search does not match, index_count [{}], expected_count [{}]", search.getHits().totalHits(), indexCounter.get());
+ throw new Exception("failed test, count does not match...");
+ }
+ }
+
+ logger.info("*** ROUND {}", ++numberOfRounds);
+ // bulk index data
+ int numberOfBulks = numberOfDocsPerRound / bulkSize;
+ for (int b = 0; b < numberOfBulks; b++) {
+ BulkRequestBuilder bulk = client.client().prepareBulk();
+ for (int k = 0; k < bulkSize; k++) {
+ StringBuilder sb = new StringBuilder();
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value" + ThreadLocalRandom.current().nextInt());
+
+ int fields = ThreadLocalRandom.current().nextInt() % numberOfFields;
+ for (int i = 0; i < fields; i++) {
+ json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
+ int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
+ sb.setLength(0);
+ for (int j = 0; j < tokens; j++) {
+ sb.append(Strings.randomBase64UUID(random)).append(' ');
+ }
+ json.field("text_" + i, sb.toString());
+ }
+
+ json.endObject();
+
+ bulk.add(Requests.indexRequest("test" + (Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices)).type("type1").source(json));
+ indexCounter.incrementAndGet();
+ }
+ bulk.execute().actionGet();
+ }
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ logger.info("test finished, full_restart_rounds [{}]", numberOfRounds);
+ break;
+ }
+
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfNodes = 2;
+ Settings settings = Settings.settingsBuilder()
+ .put("index.shard.check_on_startup", true)
+ .put("gateway.recover_after_nodes", numberOfNodes)
+ .put("index.number_of_shards", 1)
+ .put("path.data", "data/data1,data/data2")
+ .build();
+
+ FullRestartStressTest test = new FullRestartStressTest()
+ .settings(settings)
+ .period(TimeValue.timeValueMinutes(20))
+ .numberOfNodes(numberOfNodes)
+ .numberOfIndices(1)
+ .textTokens(150)
+ .numberOfFields(10)
+ .bulkSize(1000)
+ .numberOfDocsPerRound(10000);
+
+ test.run();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java b/core/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java
new file mode 100644
index 0000000000..315dab8703
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/gcbehavior/FilterCacheGcStress.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.gcbehavior;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
+import static org.elasticsearch.index.query.QueryBuilders.filteredQuery;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+public class FilterCacheGcStress {
+
+ public static void main(String[] args) {
+
+ Settings settings = Settings.EMPTY;
+
+ Node node = NodeBuilder.nodeBuilder().settings(settings).node();
+ final Client client = node.client();
+
+ client.admin().indices().prepareCreate("test").execute().actionGet();
+ client.admin().cluster().prepareHealth().setWaitForYellowStatus().execute().actionGet();
+
+ final AtomicBoolean stop = new AtomicBoolean();
+
+ Thread indexingThread = new Thread() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ client.prepareIndex("test", "type1").setSource("field", System.currentTimeMillis()).execute().actionGet();
+ }
+ }
+ };
+ indexingThread.start();
+
+ Thread searchThread = new Thread() {
+ @Override
+ public void run() {
+ while (!stop.get()) {
+ client.prepareSearch()
+ .setQuery(filteredQuery(matchAllQuery(), rangeQuery("field").from(System.currentTimeMillis() - 1000000)))
+ .execute().actionGet();
+ }
+ }
+ };
+
+ searchThread.start();
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java
new file mode 100644
index 0000000000..33d4b6f866
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/get/GetStressTest.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.get;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class GetStressTest {
+
+ public static void main(String[] args) throws Exception {
+ Settings settings = Settings.settingsBuilder()
+ .put("index.number_of_shards", 2)
+ .put("index.number_of_replicas", 1)
+ .build();
+
+ final int NUMBER_OF_NODES = 2;
+ final int NUMBER_OF_THREADS = 50;
+ final TimeValue TEST_TIME = TimeValue.parseTimeValue("10m", null, "TEST_TIME");
+
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ final Node client = NodeBuilder.nodeBuilder()
+ .settings(settings)
+ .client(true)
+ .node();
+
+ client.client().admin().indices().prepareCreate("test").execute().actionGet();
+
+ final AtomicBoolean done = new AtomicBoolean();
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong counter = new AtomicLong();
+
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ ThreadLocalRandom random = ThreadLocalRandom.current();
+ while (!done.get()) {
+ String id = String.valueOf(idGenerator.incrementAndGet());
+ client.client().prepareIndex("test", "type1", id)
+ .setSource("field", random.nextInt(100))
+ .execute().actionGet();
+
+ GetResponse getResponse = client.client().prepareGet("test", "type1", id)
+ //.setFields(Strings.EMPTY_ARRAY)
+ .execute().actionGet();
+ if (!getResponse.isExists()) {
+ System.err.println("Failed to find " + id);
+ }
+
+ long count = counter.incrementAndGet();
+ if ((count % 10000) == 0) {
+ System.out.println("Executed " + count);
+ }
+ }
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ Thread.sleep(TEST_TIME.millis());
+
+ System.out.println("test done.");
+ done.set(true);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java b/core/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java
new file mode 100644
index 0000000000..3118c22121
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/get/MGetStress1.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.get;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.get.MultiGetItemResponse;
+import org.elasticsearch.action.get.MultiGetResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ */
+public class MGetStress1 {
+
+ public static void main(String[] args) throws Exception {
+ final int NUMBER_OF_NODES = 2;
+ final int NUMBER_OF_DOCS = 50000;
+ final int MGET_BATCH = 1000;
+
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().node();
+ }
+
+ System.out.println("---> START Indexing initial data [" + NUMBER_OF_DOCS + "]");
+ final Client client = nodes[0].client();
+ for (int i = 0; i < NUMBER_OF_DOCS; i++) {
+ client.prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").execute().actionGet();
+ }
+ System.out.println("---> DONE Indexing initial data [" + NUMBER_OF_DOCS + "]");
+
+ final AtomicBoolean done = new AtomicBoolean();
+ // start indexer
+ Thread indexer = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ client.prepareIndex("test", "type", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)))
+ .setSource("field", "value").execute().actionGet();
+ }
+ }
+ });
+ indexer.start();
+ System.out.println("---> Starting indexer");
+
+ // start the mget one
+ Thread mget = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ while (!done.get()) {
+ Set<String> ids = Sets.newHashSet();
+ for (int i = 0; i < MGET_BATCH; i++) {
+ ids.add(Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS)));
+ }
+ //System.out.println("---> mget for [" + ids.size() + "]");
+ MultiGetResponse response = client.prepareMultiGet().add("test", "type", ids).execute().actionGet();
+ int expected = ids.size();
+ int count = 0;
+ for (MultiGetItemResponse item : response) {
+ count++;
+ if (item.isFailed()) {
+ System.err.println("item failed... " + item.getFailure());
+ } else {
+ boolean removed = ids.remove(item.getId());
+ if (!removed) {
+ System.err.println("got id twice " + item.getId());
+ }
+ }
+ }
+ if (expected != count) {
+ System.err.println("Expected [" + expected + "], got back [" + count + "]");
+ }
+ }
+ }
+ });
+ mget.start();
+ System.out.println("---> Starting mget");
+
+ Thread.sleep(TimeValue.timeValueMinutes(10).millis());
+
+ done.set(true);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java
new file mode 100644
index 0000000000..640a523ebd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/indexing/BulkIndexingStressTest.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.indexing;
+
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ */
+public class BulkIndexingStressTest {
+
+ public static void main(String[] args) {
+ final int NUMBER_OF_NODES = 4;
+ final int NUMBER_OF_INDICES = 600;
+ final int BATCH = 300;
+
+ final Settings nodeSettings = Settings.settingsBuilder().put("index.number_of_shards", 2).build();
+
+// ESLogger logger = Loggers.getLogger("org.elasticsearch");
+// logger.setLevel("DEBUG");
+ Node[] nodes = new Node[NUMBER_OF_NODES];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(nodeSettings).node();
+ }
+
+ Client client = nodes.length == 1 ? nodes[0].client() : nodes[1].client();
+
+ while (true) {
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ for (int i = 0; i < BATCH; i++) {
+ bulkRequest.add(Requests.indexRequest("test" + ThreadLocalRandom.current().nextInt(NUMBER_OF_INDICES)).type("type").source("field", "value"));
+ }
+ BulkResponse bulkResponse = bulkRequest.execute().actionGet();
+ if (bulkResponse.hasFailures()) {
+ for (BulkItemResponse item : bulkResponse) {
+ if (item.isFailed()) {
+ System.out.println("failed response:" + item.getFailureMessage());
+ }
+ }
+
+ throw new RuntimeException("Failed responses");
+ }
+ ;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java
new file mode 100644
index 0000000000..3fca8141b8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/indexing/ConcurrentIndexingVersioningStressTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.indexing;
+
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+
+/**
+ * Checks that index operation does not create duplicate documents.
+ */
+public class ConcurrentIndexingVersioningStressTest {
+
+ public static void main(String[] args) throws Exception {
+
+ Settings settings = Settings.EMPTY;
+
+ Node node1 = nodeBuilder().settings(settings).node();
+ Node node2 = nodeBuilder().settings(settings).node();
+ final Node client = nodeBuilder().settings(settings).client(true).node();
+
+ final int NUMBER_OF_DOCS = 10000;
+ final int NUMBER_OF_THREADS = 10;
+ final long NUMBER_OF_ITERATIONS = SizeValue.parseSizeValue("10k").singles();
+ final long DELETE_EVERY = 10;
+
+ final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS);
+ Thread[] threads = new Thread[NUMBER_OF_THREADS];
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) {
+ if ((i % DELETE_EVERY) == 0) {
+ client.client().prepareDelete("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).execute().actionGet();
+ } else {
+ client.client().prepareIndex("test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))).setSource("field1", "value1").execute().actionGet();
+ }
+ }
+ } finally {
+ latch.countDown();
+ }
+ }
+ };
+ }
+
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ latch.await();
+ System.out.println("done indexing, verifying docs");
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < NUMBER_OF_DOCS; i++) {
+ String id = Integer.toString(i);
+ for (int j = 0; j < 5; j++) {
+ SearchResponse response = client.client().prepareSearch().setQuery(QueryBuilders.termQuery("_id", id)).execute().actionGet();
+ if (response.getHits().totalHits() > 1) {
+ System.err.println("[" + i + "] FAIL, HITS [" + response.getHits().totalHits() + "]");
+ }
+ }
+ GetResponse getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (getResponse.isExists()) {
+ long version = getResponse.getVersion();
+ for (int j = 0; j < 5; j++) {
+ getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (!getResponse.isExists()) {
+ System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
+ break;
+ }
+ if (version != getResponse.getVersion()) {
+ System.err.println("[" + i + "] FAIL, DIFFERENT VERSIONS: [" + version + "], [" + getResponse.getVersion() + "]");
+ break;
+ }
+ }
+ } else {
+ for (int j = 0; j < 5; j++) {
+ getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet();
+ if (getResponse.isExists()) {
+ System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED");
+ break;
+ }
+ }
+ }
+ }
+ System.out.println("done.");
+
+ client.close();
+ node1.close();
+ node2.close();
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java b/core/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java
new file mode 100644
index 0000000000..3ea972dce4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/leaks/GenericStatsLeak.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.leaks;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.monitor.jvm.JvmService;
+import org.elasticsearch.monitor.network.NetworkService;
+import org.elasticsearch.monitor.os.OsService;
+import org.elasticsearch.monitor.process.ProcessService;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+public class GenericStatsLeak {
+
+ public static void main(String[] args) {
+ Node node = NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder()
+ .put("monitor.os.refresh_interval", 0)
+ .put("monitor.process.refresh_interval", 0)
+ .put("monitor.network.refresh_interval", 0)
+ ).node();
+
+ JvmService jvmService = node.injector().getInstance(JvmService.class);
+ OsService osService = node.injector().getInstance(OsService.class);
+ ProcessService processService = node.injector().getInstance(ProcessService.class);
+ NetworkService networkService = node.injector().getInstance(NetworkService.class);
+
+ while (true) {
+ jvmService.stats();
+ osService.stats();
+ processService.stats();
+ networkService.stats();
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java b/core/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java
new file mode 100644
index 0000000000..e558b47bea
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/leaks/JvmStatsLeak.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.leaks;
+
+import org.elasticsearch.monitor.jvm.JvmStats;
+
+/**
+ * This test mainly comes to check the native memory leak with getLastGCInfo (which is now
+ * disabled by default).
+ */
+public class JvmStatsLeak {
+
+ public static void main(String[] args) {
+ while (true) {
+ JvmStats.jvmStats();
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
new file mode 100644
index 0000000000..591f5ce309
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesRemoteStressTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+
+/**
+ *
+ */
+public class ManyIndicesRemoteStressTest {
+
+ private static final ESLogger logger = Loggers.getLogger(ManyIndicesRemoteStressTest.class);
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfShards = 1;
+ int numberOfReplicas = 1;
+ int numberOfIndices = 1000;
+ int numberOfDocs = 1;
+
+ Client client;
+ Node node = null;
+ // TODO: what is this? a public static void main test?!?!?!
+ if (true) {
+ client = TransportClient.builder().settings(Settings.EMPTY).build().addTransportAddress(new InetSocketTransportAddress("localhost", 9300));
+ } else {
+ node = NodeBuilder.nodeBuilder().client(true).node();
+ client = node.client();
+ }
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ logger.info("START index [{}] ...", i);
+ client.admin().indices().prepareCreate("index_" + i)
+ .setSettings(Settings.settingsBuilder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas))
+ .execute().actionGet();
+
+ for (int j = 0; j < numberOfDocs; j++) {
+ client.prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
+ }
+ logger.info("DONE index [{}]", i);
+ }
+
+ logger.info("closing node...");
+ if (node != null) {
+ node.close();
+ }
+ logger.info("node closed");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java
new file mode 100644
index 0000000000..01476f177c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyIndicesStressTest.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class ManyIndicesStressTest {
+
+ private static final ESLogger logger = Loggers.getLogger(ManyIndicesStressTest.class);
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ int numberOfIndices = 100;
+ int numberOfDocs = 100;
+
+ Settings settings = Settings.settingsBuilder()
+ .put("index.shard.check_on_startup", false)
+ .put("index.number_of_shards", 1)
+ .build();
+ Node node = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ logger.info("START index [{}] ...", i);
+ node.client().admin().indices().prepareCreate("index_" + i).execute().actionGet();
+
+ for (int j = 0; j < numberOfDocs; j++) {
+ node.client().prepareIndex("index_" + i, "type").setSource("field1", "test", "field2", 2, "field3", new Date()).execute().actionGet();
+ }
+ logger.info("DONE index [{}] ...", i);
+ }
+
+ logger.info("closing node...");
+ node.close();
+ logger.info("node closed");
+
+ logger.info("starting node...");
+ node = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ ClusterHealthResponse health = node.client().admin().cluster().prepareHealth().setTimeout("5m").setWaitForYellowStatus().execute().actionGet();
+ logger.info("health: " + health.getStatus());
+ logger.info("active shards: " + health.getActiveShards());
+ logger.info("active primary shards: " + health.getActivePrimaryShards());
+ if (health.isTimedOut()) {
+ logger.error("Timed out on health...");
+ }
+
+ ClusterState clusterState = node.client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (int i = 0; i < numberOfIndices; i++) {
+ if (clusterState.blocks().indices().containsKey("index_" + i)) {
+ logger.error("index [{}] has blocks: {}", i, clusterState.blocks().indices().get("index_" + i));
+ }
+ }
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ long count = node.client().prepareCount("index_" + i).setQuery(matchAllQuery()).execute().actionGet().getCount();
+ if (count == numberOfDocs) {
+ logger.info("VERIFIED [{}], count [{}]", i, count);
+ } else {
+ logger.error("FAILED [{}], expected [{}], got [{}]", i, numberOfDocs, count);
+ }
+ }
+
+ logger.info("closing node...");
+ node.close();
+ logger.info("node closed");
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java
new file mode 100644
index 0000000000..ea4d20f914
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.manyindices;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.List;
+
+public class ManyNodesManyIndicesRecoveryStressTest {
+
+ public static void main(String[] args) throws Exception {
+ final int NUM_NODES = 40;
+ final int NUM_INDICES = 100;
+ final int NUM_DOCS = 2;
+ final int FLUSH_AFTER = 1;
+
+ final Settings nodeSettings = Settings.settingsBuilder()
+ .put("transport.netty.connections_per_node.low", 0)
+ .put("transport.netty.connections_per_node.med", 0)
+ .put("transport.netty.connections_per_node.high", 1)
+ .build();
+
+ final Settings indexSettings = Settings.settingsBuilder()
+ .put("index.number_of_shards", 1)
+ .build();
+
+ List<Node> nodes = Lists.newArrayList();
+ for (int i = 0; i < NUM_NODES; i++) {
+ nodes.add(NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
+ }
+ Client client = nodes.get(0).client();
+
+ for (int index = 0; index < NUM_INDICES; index++) {
+ String indexName = "index_" + index;
+ System.out.println("--> Processing index [" + indexName + "]...");
+ client.admin().indices().prepareCreate(indexName).setSettings(indexSettings).execute().actionGet();
+
+ boolean flushed = false;
+ for (int doc = 0; doc < NUM_DOCS; doc++) {
+ if (!flushed && doc > FLUSH_AFTER) {
+ flushed = true;
+ client.admin().indices().prepareFlush(indexName).execute().actionGet();
+ }
+ client.prepareIndex(indexName, "type1", Integer.toString(doc)).setSource("field", "value" + doc).execute().actionGet();
+ }
+ System.out.println("--> DONE index [" + indexName + "]");
+ }
+
+ System.out.println("--> Initiating shutdown");
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("--> Waiting for all nodes to be closed...");
+ while (true) {
+ boolean allAreClosed = true;
+ for (Node node : nodes) {
+ if (!node.isClosed()) {
+ allAreClosed = false;
+ break;
+ }
+ }
+ if (allAreClosed) {
+ break;
+ }
+ Thread.sleep(100);
+ }
+ System.out.println("Waiting a bit for node lock to really be released?");
+ Thread.sleep(5000);
+ System.out.println("--> All nodes are closed, starting back...");
+
+ nodes = Lists.newArrayList();
+ for (int i = 0; i < NUM_NODES; i++) {
+ nodes.add(NodeBuilder.nodeBuilder().settings(Settings.settingsBuilder().put(nodeSettings).put("name", "node" + i)).node());
+ }
+ client = nodes.get(0).client();
+
+ System.out.println("--> Waiting for green status");
+ while (true) {
+ ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ System.err.println("--> cluster health timed out..., active shards [" + clusterHealth.getActiveShards() + "]");
+ } else {
+ break;
+ }
+ }
+
+ System.out.println("Verifying counts...");
+ for (int index = 0; index < NUM_INDICES; index++) {
+ String indexName = "index_" + index;
+ CountResponse count = client.prepareCount(indexName).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet();
+ if (count.getCount() != NUM_DOCS) {
+ System.err.println("Wrong count value, expected [" + NUM_DOCS + "], got [" + count.getCount() + "] for index [" + indexName + "]");
+ }
+ }
+
+ System.out.println("Test end");
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java b/core/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java
new file mode 100644
index 0000000000..eec385241e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/refresh/RefreshStressTest1.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.refresh;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.io.IOException;
+import java.util.UUID;
+
+/**
+ */
+public class RefreshStressTest1 {
+
+ public static void main(String[] args) throws InterruptedException, IOException {
+ int numberOfShards = 5;
+ Node node = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
+ Settings.settingsBuilder()
+ .put("node.name", "node1")
+ .put("index.number_of_shards", numberOfShards)
+ //.put("path.data", new File("target/data").getAbsolutePath())
+ .build()).node();
+ Node node2 = NodeBuilder.nodeBuilder().local(true).loadConfigSettings(false).clusterName("testCluster").settings(
+ Settings.settingsBuilder()
+ .put("node.name", "node2")
+ .put("index.number_of_shards", numberOfShards)
+ //.put("path.data", new File("target/data").getAbsolutePath())
+ .build()).node();
+ Client client = node.client();
+
+ for (int loop = 1; loop < 1000; loop++) {
+ String indexName = "testindex" + loop;
+ String typeName = "testType" + loop;
+ String id = UUID.randomUUID().toString();
+ String mapping = "{ \"" + typeName + "\" : {\"dynamic_templates\" : [{\"no_analyze_strings\" : {\"match_mapping_type\" : \"string\",\"match\" : \"*\",\"mapping\" : {\"type\" : \"string\",\"index\" : \"not_analyzed\"}}}]}}";
+ client.admin().indices().prepareCreate(indexName).execute().actionGet();
+ client.admin().indices().preparePutMapping(indexName).setType(typeName).setSource(mapping).execute().actionGet();
+// sleep after put mapping
+// Thread.sleep(100);
+
+ System.out.println("indexing " + loop);
+ String name = "name" + id;
+ client.prepareIndex(indexName, typeName, id).setSource("{ \"id\": \"" + id + "\", \"name\": \"" + name + "\" }").execute().actionGet();
+
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+// sleep after refresh
+// Thread.sleep(100);
+
+ System.out.println("searching " + loop);
+ SearchResponse result = client.prepareSearch(indexName).setPostFilter(QueryBuilders.termQuery("name", name)).execute().actionGet();
+ if (result.getHits().hits().length != 1) {
+ for (int i = 1; i <= 100; i++) {
+ System.out.println("retry " + loop + ", " + i + ", previous total hits: " + result.getHits().getTotalHits());
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ Thread.sleep(100);
+ result = client.prepareSearch(indexName).setPostFilter(QueryBuilders.termQuery("name", name)).execute().actionGet();
+ if (result.getHits().hits().length == 1) {
+ client.admin().indices().prepareRefresh(indexName).execute().actionGet();
+ result = client.prepareSearch(indexName).setPostFilter(QueryBuilders.termQuery("name", name)).execute().actionGet();
+ throw new RuntimeException("Record found after " + (i * 100) + " ms, second go: " + result.getHits().hits().length);
+ } else if (i == 100) {
+ if (client.prepareGet(indexName, typeName, id).execute().actionGet().isExists())
+ throw new RuntimeException("Record wasn't found after 10s but can be get by id");
+ else throw new RuntimeException("Record wasn't found after 10s and can't be get by id");
+ }
+ }
+ }
+
+ //client.admin().indices().prepareDelete(indexName).execute().actionGet();
+ }
+ client.close();
+ node2.close();
+ node.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java
new file mode 100644
index 0000000000..be9281ad3c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/rollingrestart/QuickRollingRestartStressTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.rollingrestart;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+
+import java.util.Date;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ */
+public class QuickRollingRestartStressTest {
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ Random random = new Random();
+
+ Settings settings = Settings.settingsBuilder().build();
+
+ Node[] nodes = new Node[5];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+
+ Node client = NodeBuilder.nodeBuilder().client(true).node();
+
+ long COUNT;
+ if (client.client().admin().indices().prepareExists("test").execute().actionGet().isExists()) {
+ ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ throw new ElasticsearchException("failed to wait for green state on startup...");
+ }
+ COUNT = client.client().prepareCount().execute().actionGet().getCount();
+ System.out.println("--> existing index, count [" + COUNT + "]");
+ } else {
+ COUNT = SizeValue.parseSizeValue("100k").singles();
+ System.out.println("--> indexing data...");
+ for (long i = 0; i < COUNT; i++) {
+ client.client().prepareIndex("test", "type", Long.toString(i))
+ .setSource("date", new Date(), "data", RandomStrings.randomAsciiOfLength(random, 10000))
+ .execute().actionGet();
+ }
+ System.out.println("--> done indexing data [" + COUNT + "]");
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ long count = client.client().prepareCount().execute().actionGet().getCount();
+ if (COUNT != count) {
+ System.err.println("--> the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
+ }
+ }
+ }
+
+ final int ROLLING_RESTARTS = 100;
+ System.out.println("--> starting rolling restarts [" + ROLLING_RESTARTS + "]");
+ for (int rollingRestart = 0; rollingRestart < ROLLING_RESTARTS; rollingRestart++) {
+ System.out.println("--> doing rolling restart [" + rollingRestart + "]...");
+ int nodeId = ThreadLocalRandom.current().nextInt();
+ for (int i = 0; i < nodes.length; i++) {
+ int nodeIdx = Math.abs(nodeId++) % nodes.length;
+ nodes[nodeIdx].close();
+ nodes[nodeIdx] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ System.out.println("--> done rolling restart [" + rollingRestart + "]");
+
+ System.out.println("--> waiting for green state now...");
+ ClusterHealthResponse clusterHealthResponse = client.client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForRelocatingShards(0).setTimeout("10m").execute().actionGet();
+ if (clusterHealthResponse.isTimedOut()) {
+ System.err.println("--> timed out waiting for green state...");
+ ClusterState state = client.client().admin().cluster().prepareState().execute().actionGet().getState();
+ System.out.println(state.nodes().prettyPrint());
+ System.out.println(state.routingTable().prettyPrint());
+ System.out.println(state.routingNodes().prettyPrint());
+ throw new ElasticsearchException("timed out waiting for green state");
+ } else {
+ System.out.println("--> got green status");
+ }
+
+ System.out.println("--> checking data [" + rollingRestart + "]....");
+ boolean failed = false;
+ for (int i = 0; i < 10; i++) {
+ long count = client.client().prepareCount().execute().actionGet().getCount();
+ if (COUNT != count) {
+ failed = true;
+ System.err.println("--> ERROR the indexed docs do not match the count..., got [" + count + "], expected [" + COUNT + "]");
+ }
+ }
+ if (!failed) {
+ System.out.println("--> count verified");
+ }
+ }
+
+ System.out.println("--> shutting down...");
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java
new file mode 100644
index 0000000000..76e27a5554
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/rollingrestart/RollingRestartStressTest.java
@@ -0,0 +1,354 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.rollingrestart;
+
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+
+/**
+ *
+ */
+public class RollingRestartStressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private int numberOfShards = 5;
+ private int numberOfReplicas = 1;
+ private int numberOfNodes = 4;
+
+ private int textTokens = 150;
+ private int numberOfFields = 10;
+ private long initialNumberOfDocs = 100000;
+
+ private int indexers = 0;
+
+ private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
+
+ private Settings settings = Settings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private boolean clearNodeData = true;
+
+ private Node client;
+
+ private AtomicLong indexCounter = new AtomicLong();
+ private AtomicLong idCounter = new AtomicLong();
+
+
+ public RollingRestartStressTest numberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfShards(int numberOfShards) {
+ this.numberOfShards = numberOfShards;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfReplicas(int numberOfReplicas) {
+ this.numberOfReplicas = numberOfReplicas;
+ return this;
+ }
+
+ public RollingRestartStressTest initialNumberOfDocs(long initialNumberOfDocs) {
+ this.initialNumberOfDocs = initialNumberOfDocs;
+ return this;
+ }
+
+ public RollingRestartStressTest textTokens(int textTokens) {
+ this.textTokens = textTokens;
+ return this;
+ }
+
+ public RollingRestartStressTest numberOfFields(int numberOfFields) {
+ this.numberOfFields = numberOfFields;
+ return this;
+ }
+
+ public RollingRestartStressTest indexers(int indexers) {
+ this.indexers = indexers;
+ return this;
+ }
+
+ public RollingRestartStressTest indexerThrottle(TimeValue indexerThrottle) {
+ this.indexerThrottle = indexerThrottle;
+ return this;
+ }
+
+ public RollingRestartStressTest period(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ public RollingRestartStressTest cleanNodeData(boolean clearNodeData) {
+ this.clearNodeData = clearNodeData;
+ return this;
+ }
+
+ public RollingRestartStressTest settings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public void run() throws Exception {
+ Random random = new Random(0);
+
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ client.client().admin().indices().prepareCreate("test").setSettings(settingsBuilder()
+ .put("index.number_of_shards", numberOfShards)
+ .put("index.number_of_replicas", numberOfReplicas)
+ ).execute().actionGet();
+
+ logger.info("********** [START] INDEXING INITIAL DOCS");
+ for (long i = 0; i < initialNumberOfDocs; i++) {
+ indexDoc(random);
+ }
+ logger.info("********** [DONE ] INDEXING INITIAL DOCS");
+
+ Indexer[] indexerThreads = new Indexer[indexers];
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i] = new Indexer();
+ }
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i].start();
+ }
+
+ long testStart = System.currentTimeMillis();
+
+ // start doing the rolling restart
+ int nodeIndex = 0;
+ while (true) {
+ Path[] nodeData = nodes[nodeIndex].injector().getInstance(NodeEnvironment.class).nodeDataPaths();
+ nodes[nodeIndex].close();
+ if (clearNodeData) {
+ try {
+ IOUtils.rm(nodeData);
+ } catch (Exception ex) {
+ logger.debug("Failed to delete node data directories", ex);
+
+ }
+ }
+
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/))
+ .setWaitForRelocatingShards(0)
+ .setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node();
+
+ Thread.sleep(1000);
+
+ try {
+ ClusterHealthResponse clusterHealth = client.client().admin().cluster().prepareHealth()
+ .setWaitForGreenStatus()
+ .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/))
+ .setWaitForRelocatingShards(0)
+ .setTimeout("10m").execute().actionGet();
+ if (clusterHealth.isTimedOut()) {
+ logger.warn("timed out waiting for green status....");
+ }
+ } catch (Exception e) {
+ logger.warn("failed to execute cluster health....");
+ }
+
+ if (++nodeIndex == nodes.length) {
+ nodeIndex = 0;
+ }
+
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ logger.info("test finished");
+ break;
+ }
+ }
+
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i].close = true;
+ }
+
+ Thread.sleep(indexerThrottle.millis() + 10000);
+
+ for (int i = 0; i < indexerThreads.length; i++) {
+ if (!indexerThreads[i].closed) {
+ logger.warn("thread not closed!");
+ }
+ }
+
+ client.client().admin().indices().prepareRefresh().execute().actionGet();
+
+ // check the count
+ for (int i = 0; i < (nodes.length * 5); i++) {
+ CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet();
+ logger.info("indexed [{}], count [{}], [{}]", count.getCount(), indexCounter.get(), count.getCount() == indexCounter.get() ? "OK" : "FAIL");
+ if (count.getCount() != indexCounter.get()) {
+ logger.warn("count does not match!");
+ }
+ }
+
+ // scan all the docs, verify all have the same version based on the number of replicas
+ SearchResponse searchResponse = client.client().prepareSearch()
+ .setSearchType(SearchType.SCAN)
+ .setQuery(matchAllQuery())
+ .setSize(50)
+ .setScroll(TimeValue.timeValueMinutes(2))
+ .execute().actionGet();
+ logger.info("Verifying versions for {} hits...", searchResponse.getHits().totalHits());
+
+ while (true) {
+ searchResponse = client.client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(2)).execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("Search Failures " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ for (SearchHit hit : searchResponse.getHits()) {
+ long version = -1;
+ for (int i = 0; i < (numberOfReplicas + 1); i++) {
+ GetResponse getResponse = client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet();
+ if (version == -1) {
+ version = getResponse.getVersion();
+ } else {
+ if (version != getResponse.getVersion()) {
+ logger.warn("Doc {} has different version numbers {} and {}", hit.id(), version, getResponse.getVersion());
+ }
+ }
+ }
+ }
+ if (searchResponse.getHits().hits().length == 0) {
+ break;
+ }
+ }
+ logger.info("Done verifying versions");
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+ }
+
+ private class Indexer extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ Random random = new Random(0);
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ indexDoc(random);
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to index / sleep", e);
+ }
+ }
+ }
+ }
+
+ private void indexDoc(Random random) throws Exception {
+ StringBuilder sb = new StringBuilder();
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("field", "value" + ThreadLocalRandom.current().nextInt());
+
+ int fields = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfFields;
+ for (int i = 0; i < fields; i++) {
+ json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
+ int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
+ sb.setLength(0);
+ for (int j = 0; j < tokens; j++) {
+ sb.append(Strings.randomBase64UUID(random)).append(' ');
+ }
+ json.field("text_" + i, sb.toString());
+ }
+
+ json.endObject();
+
+ String id = Long.toString(idCounter.incrementAndGet());
+ client.client().prepareIndex("test", "type1", id)
+ .setCreate(true)
+ .setSource(json)
+ .execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.setProperty("es.logger.prefix", "");
+
+ Settings settings = settingsBuilder()
+ .put("index.shard.check_on_startup", true)
+ .put("path.data", "data/data1,data/data2")
+ .build();
+
+ RollingRestartStressTest test = new RollingRestartStressTest()
+ .settings(settings)
+ .numberOfNodes(4)
+ .numberOfShards(5)
+ .numberOfReplicas(1)
+ .initialNumberOfDocs(1000)
+ .textTokens(150)
+ .numberOfFields(10)
+ .cleanNodeData(false)
+ .indexers(5)
+ .indexerThrottle(TimeValue.timeValueMillis(50))
+ .period(TimeValue.timeValueMinutes(3));
+
+ test.run();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java b/core/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java
new file mode 100644
index 0000000000..75109210ed
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/search1/ConcurrentSearchSerializationTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.junit.Ignore;
+
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Tests that data don't get corrupted while reading it over the streams.
+ * <p/>
+ * See: https://github.com/elasticsearch/elasticsearch/issues/1686.
+ */
+@Ignore("Stress Test")
+public class ConcurrentSearchSerializationTests {
+
+ public static void main(String[] args) throws Exception {
+
+ Node node1 = NodeBuilder.nodeBuilder().node();
+ Node node2 = NodeBuilder.nodeBuilder().node();
+ Node node3 = NodeBuilder.nodeBuilder().node();
+
+ final Client client = node1.client();
+
+ System.out.println("Indexing...");
+ final String data = RandomStrings.randomAsciiOfLength(ThreadLocalRandom.current(), 100);
+ final CountDownLatch latch1 = new CountDownLatch(100);
+ for (int i = 0; i < 100; i++) {
+ client.prepareIndex("test", "type", Integer.toString(i))
+ .setSource("field", data)
+ .execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse indexResponse) {
+ latch1.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ latch1.countDown();
+ }
+ });
+ }
+ latch1.await();
+ System.out.println("Indexed");
+
+ System.out.println("searching...");
+ Thread[] threads = new Thread[10];
+ final CountDownLatch latch = new CountDownLatch(threads.length);
+ for (int i = 0; i < threads.length; i++) {
+ threads[i] = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ for (int i = 0; i < 1000; i++) {
+ SearchResponse searchResponse = client.prepareSearch("test")
+ .setQuery(QueryBuilders.matchAllQuery())
+ .setSize(i % 100)
+ .execute().actionGet();
+ for (SearchHit hit : searchResponse.getHits()) {
+ try {
+ if (!hit.sourceAsMap().get("field").equals(data)) {
+ System.err.println("Field not equal!");
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ latch.countDown();
+ }
+ });
+ }
+ for (Thread thread : threads) {
+ thread.start();
+ }
+
+ latch.await();
+
+ System.out.println("done searching");
+ client.close();
+ node1.close();
+ node2.close();
+ node3.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java b/core/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java
new file mode 100644
index 0000000000..23943f9707
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/search1/ParentChildStressTest.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.transport.RemoteTransportException;
+
+import java.io.IOException;
+import java.util.*;
+
+
+public class ParentChildStressTest {
+
+ private Node elasticNode;
+ private Client client;
+
+ private static final String PARENT_TYPE_NAME = "content";
+ private static final String CHILD_TYPE_NAME = "contentFiles";
+ private static final String INDEX_NAME = "acme";
+
+ /**
+ * Constructor. Initialize elastic and create the index/mapping
+ */
+ public ParentChildStressTest() {
+ NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder();
+ Settings settings = nodeBuilder.settings()
+ .build();
+ this.elasticNode = nodeBuilder.settings(settings).client(true).node();
+ this.client = this.elasticNode.client();
+
+ String mapping =
+ "{\"contentFiles\": {" +
+ "\"_parent\": {" +
+ "\"type\" : \"content\"" +
+ "}}}";
+
+ try {
+ client.admin().indices().create(new CreateIndexRequest(INDEX_NAME).mapping(CHILD_TYPE_NAME, mapping)).actionGet();
+ } catch (RemoteTransportException e) {
+ // usually means the index is already created.
+ }
+ }
+
+ public void shutdown() throws IOException {
+ client.close();
+ elasticNode.close();
+ }
+
+ /**
+ * Deletes the item from both the parent and child type locations.
+ */
+ public void deleteById(String id) {
+ client.prepareDelete(INDEX_NAME, PARENT_TYPE_NAME, id).execute().actionGet();
+ client.prepareDelete(INDEX_NAME, CHILD_TYPE_NAME, id).execute().actionGet();
+ }
+
+ /**
+ * Index a parent doc
+ */
+ public void indexParent(String id, Map<String, Object> objectMap) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+
+ // index content
+ client.prepareIndex(INDEX_NAME, PARENT_TYPE_NAME, id).setSource(builder.map(objectMap)).execute().actionGet();
+ }
+
+ /**
+ * Index the file as a child doc
+ */
+ public void indexChild(String id, Map<String, Object> objectMap) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+
+ IndexRequestBuilder indexRequestbuilder = client.prepareIndex(INDEX_NAME, CHILD_TYPE_NAME, id);
+ indexRequestbuilder = indexRequestbuilder.setParent(id);
+ indexRequestbuilder = indexRequestbuilder.setSource(builder.map(objectMap));
+ indexRequestbuilder.execute().actionGet();
+ }
+
+ /**
+ * Execute a search based on a JSON String in QueryDSL format.
+ * <p/>
+ * Throws a RuntimeException if there are any shard failures to
+ * elevate the visibility of the problem.
+ */
+ public List<String> executeSearch(String source) {
+ SearchRequest request = Requests.searchRequest(INDEX_NAME).source(source);
+
+ List<ShardSearchFailure> failures;
+ SearchResponse response;
+
+ response = client.search(request).actionGet();
+ failures = Arrays.asList(response.getShardFailures());
+
+ // throw an exception so that we see the shard failures
+ if (failures.size() != 0) {
+ String failuresStr = failures.toString();
+ if (!failuresStr.contains("reason [No active shards]")) {
+ throw new RuntimeException(failures.toString());
+ }
+ }
+
+ ArrayList<String> results = new ArrayList<>();
+ if (response != null) {
+ for (SearchHit hit : response.getHits()) {
+ String sourceStr = hit.sourceAsString();
+ results.add(sourceStr);
+ }
+ }
+ return results;
+ }
+
+ /**
+ * Create a document as a parent and index it.
+ * Load a file and index it as a child.
+ */
+ public String indexDoc() throws IOException {
+ String id = UUID.randomUUID().toString();
+
+ Map<String, Object> objectMap = new HashMap<>();
+ objectMap.put("title", "this is a document");
+
+ Map<String, Object> objectMap2 = new HashMap<>();
+ objectMap2.put("description", "child test");
+
+ this.indexParent(id, objectMap);
+ this.indexChild(id, objectMap2);
+ return id;
+ }
+
+ /**
+ * Perform the has_child query for the doc.
+ * <p/>
+ * Since it might take time to get indexed, it
+ * loops until it finds the doc.
+ */
+ public void searchDocByChild() throws InterruptedException {
+ String dslString =
+ "{\"query\":{" +
+ "\"has_child\":{" +
+ "\"query\":{" +
+ "\"field\":{" +
+ "\"description\":\"child test\"}}," +
+ "\"type\":\"contentFiles\"}}}";
+
+ int numTries = 0;
+ List<String> items = new ArrayList<>();
+
+ while (items.size() != 1 && numTries < 20) {
+ items = executeSearch(dslString);
+
+ numTries++;
+ if (items.size() != 1) {
+ Thread.sleep(250);
+ }
+ }
+ if (items.size() != 1) {
+ System.out.println("Exceeded number of retries");
+ System.exit(1);
+ }
+ }
+
+ /**
+ * Program to loop on:
+ * create parent/child doc
+ * search for the doc
+ * delete the doc
+ * repeat the above until shard failure.
+ * <p/>
+ * Eventually fails with:
+ * <p/>
+ * [shard [[74wz0lrXRSmSOsJOqgPvlw][acme][1]], reason [RemoteTransportException
+ * [[Kismet][inet[/10.10.30.52:9300]][search/phase/query]]; nested:
+ * QueryPhaseExecutionException[[acme][1]:
+ * query[ConstantScore(child_filter[contentFiles
+ * /content](filtered(file:mission
+ * file:statement)->FilterCacheFilterWrapper(
+ * _type:contentFiles)))],from[0],size[10]: Query Failed [Failed to execute
+ * child query [filtered(file:mission
+ * file:statement)->FilterCacheFilterWrapper(_type:contentFiles)]]]; nested:
+ * ]]
+ *
+ * @param args
+ */
+ public static void main(String[] args) throws IOException {
+ ParentChildStressTest elasticTest = new ParentChildStressTest();
+ try {
+ // loop a bunch of times - usually fails before the count is done.
+ int NUM_LOOPS = 1000;
+ System.out.println();
+ System.out.println("Looping [" + NUM_LOOPS + "] times:");
+ System.out.println();
+ for (int i = 0; i < NUM_LOOPS; i++) {
+ String id = elasticTest.indexDoc();
+
+ elasticTest.searchDocByChild();
+
+ elasticTest.deleteById(id);
+
+ System.out.println(" Success: " + i);
+ }
+ elasticTest.shutdown();
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ elasticTest.shutdown();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java b/core/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java
new file mode 100644
index 0000000000..0963717e99
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java
@@ -0,0 +1,378 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.stresstest.search1;
+
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.sort.SortOrder;
+import org.junit.Ignore;
+
+import java.util.Arrays;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.index.query.QueryBuilders.termQuery;
+
+/**
+ *
+ */
+@Ignore("Stress Test")
+public class Search1StressTest {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+
+ private int numberOfNodes = 4;
+
+ private int indexers = 0;
+ private SizeValue preIndexDocs = new SizeValue(0);
+ private TimeValue indexerThrottle = TimeValue.timeValueMillis(100);
+ private int searchers = 0;
+ private TimeValue searcherThrottle = TimeValue.timeValueMillis(20);
+ private int numberOfIndices = 10;
+ private int numberOfTypes = 4;
+ private int numberOfValues = 20;
+ private int numberOfHits = 300;
+ private TimeValue flusherThrottle = TimeValue.timeValueMillis(1000);
+
+ private Settings settings = Settings.Builder.EMPTY_SETTINGS;
+
+ private TimeValue period = TimeValue.timeValueMinutes(20);
+
+ private AtomicLong indexCounter = new AtomicLong();
+ private AtomicLong searchCounter = new AtomicLong();
+
+
+ private Node client;
+
+ public Search1StressTest setNumberOfNodes(int numberOfNodes) {
+ this.numberOfNodes = numberOfNodes;
+ return this;
+ }
+
+ public Search1StressTest setPreIndexDocs(SizeValue preIndexDocs) {
+ this.preIndexDocs = preIndexDocs;
+ return this;
+ }
+
+ public Search1StressTest setIndexers(int indexers) {
+ this.indexers = indexers;
+ return this;
+ }
+
+ public Search1StressTest setIndexerThrottle(TimeValue indexerThrottle) {
+ this.indexerThrottle = indexerThrottle;
+ return this;
+ }
+
+ public Search1StressTest setSearchers(int searchers) {
+ this.searchers = searchers;
+ return this;
+ }
+
+ public Search1StressTest setSearcherThrottle(TimeValue searcherThrottle) {
+ this.searcherThrottle = searcherThrottle;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfIndices(int numberOfIndices) {
+ this.numberOfIndices = numberOfIndices;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfTypes(int numberOfTypes) {
+ this.numberOfTypes = numberOfTypes;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfValues(int numberOfValues) {
+ this.numberOfValues = numberOfValues;
+ return this;
+ }
+
+ public Search1StressTest setNumberOfHits(int numberOfHits) {
+ this.numberOfHits = numberOfHits;
+ return this;
+ }
+
+ public Search1StressTest setFlusherThrottle(TimeValue flusherThrottle) {
+ this.flusherThrottle = flusherThrottle;
+ return this;
+ }
+
+ public Search1StressTest setSettings(Settings settings) {
+ this.settings = settings;
+ return this;
+ }
+
+ public Search1StressTest setPeriod(TimeValue period) {
+ this.period = period;
+ return this;
+ }
+
+ private String nextIndex() {
+ return "test" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfIndices;
+ }
+
+ private String nextType() {
+ return "type" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfTypes;
+ }
+
+ private int nextNumValue() {
+ return Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
+ }
+
+ private String nextFieldValue() {
+ return "value" + Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfValues;
+ }
+
+ private class Searcher extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ String indexName = nextIndex();
+ SearchRequestBuilder builder = client.client().prepareSearch(indexName);
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ builder.addSort("num", SortOrder.DESC);
+ } else if (ThreadLocalRandom.current().nextBoolean()) {
+ // add a _score based sorting, won't do any sorting, just to test...
+ builder.addSort("_score", SortOrder.DESC);
+ }
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ builder.setSearchType(SearchType.DFS_QUERY_THEN_FETCH);
+ }
+ int size = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfHits;
+ builder.setSize(size);
+ if (ThreadLocalRandom.current().nextBoolean()) {
+ // update from
+ builder.setFrom(size / 2);
+ }
+ String value = nextFieldValue();
+ builder.setQuery(termQuery("field", value));
+ searchCounter.incrementAndGet();
+ SearchResponse searchResponse = builder.execute().actionGet();
+ if (searchResponse.getFailedShards() > 0) {
+ logger.warn("failed search " + Arrays.toString(searchResponse.getShardFailures()));
+ }
+ // verify that all come from the requested index
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!hit.shard().index().equals(indexName)) {
+ logger.warn("got wrong index, asked for [{}], got [{}]", indexName, hit.shard().index());
+ }
+ }
+ // verify that all has the relevant value
+ for (SearchHit hit : searchResponse.getHits()) {
+ if (!value.equals(hit.sourceAsMap().get("field"))) {
+ logger.warn("got wrong field, asked for [{}], got [{}]", value, hit.sourceAsMap().get("field"));
+ }
+ }
+ Thread.sleep(searcherThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to search", e);
+ }
+ }
+ }
+ }
+
+ private class Indexer extends Thread {
+
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ indexDoc();
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to index / sleep", e);
+ }
+ }
+ }
+ }
+
+ private class Flusher extends Thread {
+ volatile boolean close = false;
+
+ volatile boolean closed = false;
+
+ @Override
+ public void run() {
+ while (true) {
+ if (close) {
+ closed = true;
+ return;
+ }
+ try {
+ client.client().admin().indices().prepareFlush().execute().actionGet();
+ Thread.sleep(indexerThrottle.millis());
+ } catch (Exception e) {
+ logger.warn("failed to flush / sleep", e);
+ }
+ }
+ }
+ }
+
+ private void indexDoc() throws Exception {
+ XContentBuilder json = XContentFactory.jsonBuilder().startObject()
+ .field("num", nextNumValue())
+ .field("field", nextFieldValue());
+
+ json.endObject();
+
+ client.client().prepareIndex(nextIndex(), nextType())
+ .setSource(json)
+ .execute().actionGet();
+ indexCounter.incrementAndGet();
+ }
+
+ public void run() throws Exception {
+ Node[] nodes = new Node[numberOfNodes];
+ for (int i = 0; i < nodes.length; i++) {
+ nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node();
+ }
+ client = NodeBuilder.nodeBuilder().settings(settings).client(true).node();
+
+ for (int i = 0; i < numberOfIndices; i++) {
+ client.client().admin().indices().prepareCreate("test" + i).execute().actionGet();
+ }
+
+ logger.info("Pre indexing docs [{}]...", preIndexDocs);
+ for (long i = 0; i < preIndexDocs.singles(); i++) {
+ indexDoc();
+ }
+ logger.info("Done pre indexing docs [{}]", preIndexDocs);
+
+ Indexer[] indexerThreads = new Indexer[indexers];
+ for (int i = 0; i < indexerThreads.length; i++) {
+ indexerThreads[i] = new Indexer();
+ }
+ for (Indexer indexerThread : indexerThreads) {
+ indexerThread.start();
+ }
+
+ Thread.sleep(10000);
+
+ Searcher[] searcherThreads = new Searcher[searchers];
+ for (int i = 0; i < searcherThreads.length; i++) {
+ searcherThreads[i] = new Searcher();
+ }
+ for (Searcher searcherThread : searcherThreads) {
+ searcherThread.start();
+ }
+
+ Flusher flusher = null;
+ if (flusherThrottle.millis() > 0) {
+ flusher = new Flusher();
+ flusher.start();
+ }
+
+ long testStart = System.currentTimeMillis();
+
+ while (true) {
+ Thread.sleep(5000);
+ if ((System.currentTimeMillis() - testStart) > period.millis()) {
+ break;
+ }
+ }
+
+ System.out.println("DONE, closing .....");
+
+ if (flusher != null) {
+ flusher.close = true;
+ }
+
+ for (Searcher searcherThread : searcherThreads) {
+ searcherThread.close = true;
+ }
+
+ for (Indexer indexerThread : indexerThreads) {
+ indexerThread.close = true;
+ }
+
+ Thread.sleep(indexerThrottle.millis() + 10000);
+
+ if (flusher != null && !flusher.closed) {
+ logger.warn("flusher not closed!");
+ }
+ for (Searcher searcherThread : searcherThreads) {
+ if (!searcherThread.closed) {
+ logger.warn("search thread not closed!");
+ }
+ }
+ for (Indexer indexerThread : indexerThreads) {
+ if (!indexerThread.closed) {
+ logger.warn("index thread not closed!");
+ }
+ }
+
+ client.close();
+ for (Node node : nodes) {
+ node.close();
+ }
+
+ System.out.println("********** DONE, indexed [" + indexCounter.get() + "], searched [" + searchCounter.get() + "]");
+ }
+
+ public static void main(String[] args) throws Exception {
+ Search1StressTest test = new Search1StressTest()
+ .setPeriod(TimeValue.timeValueMinutes(10))
+ .setNumberOfNodes(2)
+ .setPreIndexDocs(SizeValue.parseSizeValue("100"))
+ .setIndexers(2)
+ .setIndexerThrottle(TimeValue.timeValueMillis(100))
+ .setSearchers(10)
+ .setSearcherThrottle(TimeValue.timeValueMillis(10))
+ .setFlusherThrottle(TimeValue.timeValueMillis(1000))
+ .setNumberOfIndices(10)
+ .setNumberOfTypes(5)
+ .setNumberOfValues(50)
+ .setNumberOfHits(300);
+
+ test.run();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/BackgroundIndexer.java b/core/src/test/java/org/elasticsearch/test/BackgroundIndexer.java
new file mode 100644
index 0000000000..764c85657d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/BackgroundIndexer.java
@@ -0,0 +1,286 @@
+package org.elasticsearch.test;/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+public class BackgroundIndexer implements AutoCloseable {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ final Thread[] writers;
+ final CountDownLatch stopLatch;
+ final CopyOnWriteArrayList<Throwable> failures;
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final AtomicBoolean hasBudget = new AtomicBoolean(false); // when set to true, writers will acquire writes from a semaphore
+ final Semaphore availableBudget = new Semaphore(0);
+
+ volatile int minFieldSize = 10;
+ volatile int maxFieldSize = 140;
+
+ /**
+ * Start indexing in the background using a random number of threads.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ */
+ public BackgroundIndexer(String index, String type, Client client) {
+ this(index, type, client, -1);
+ }
+
+ /**
+ * Start indexing in the background using a random number of threads. Indexing will be paused after numOfDocs docs has
+ * been indexed.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ */
+ public BackgroundIndexer(String index, String type, Client client, int numOfDocs) {
+ this(index, type, client, numOfDocs, RandomizedTest.scaledRandomIntBetween(2, 5));
+ }
+
+ /**
+ * Start indexing in the background using a given number of threads. Indexing will be paused after numOfDocs docs has
+ * been indexed.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ * @param writerCount number of indexing threads to use
+ */
+ public BackgroundIndexer(String index, String type, Client client, int numOfDocs, final int writerCount) {
+ this(index, type, client, numOfDocs, writerCount, true, null);
+ }
+
+ /**
+ * Start indexing in the background using a given number of threads. Indexing will be paused after numOfDocs docs has
+ * been indexed.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ * @param writerCount number of indexing threads to use
+ * @param autoStart set to true to start indexing as soon as all threads have been created.
+ * @param random random instance to use
+ */
+ public BackgroundIndexer(final String index, final String type, final Client client, final int numOfDocs, final int writerCount,
+ boolean autoStart, Random random) {
+
+ if (random == null) {
+ random = RandomizedTest.getRandom();
+ }
+ failures = new CopyOnWriteArrayList<>();
+ writers = new Thread[writerCount];
+ stopLatch = new CountDownLatch(writers.length);
+ logger.info("--> creating {} indexing threads (auto start: [{}], numOfDocs: [{}])", writerCount, autoStart, numOfDocs);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final boolean batch = random.nextBoolean();
+ final Random threadRandom = new Random(random.nextLong());
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ long id = -1;
+ try {
+ startLatch.await();
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ if (batch) {
+ int batchSize = threadRandom.nextInt(20) + 1;
+ if (hasBudget.get()) {
+ batchSize = Math.max(Math.min(batchSize, availableBudget.availablePermits()), 1);// always try to get at least one
+ if (!availableBudget.tryAcquire(batchSize, 250, TimeUnit.MILLISECONDS)) {
+ // time out -> check if we have to stop.
+ continue;
+ }
+
+ }
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ for (int i = 0; i < batchSize; i++) {
+ id = idGenerator.incrementAndGet();
+ bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)));
+ }
+ BulkResponse bulkResponse = bulkRequest.get();
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ if (!bulkItemResponse.isFailed()) {
+ indexCounter.incrementAndGet();
+ } else {
+ throw new ElasticsearchException("bulk request failure, id: ["
+ + bulkItemResponse.getFailure().getId() + "] message: " + bulkItemResponse.getFailure().getMessage());
+ }
+ }
+
+ } else {
+
+ if (hasBudget.get() && !availableBudget.tryAcquire(250, TimeUnit.MILLISECONDS)) {
+ // time out -> check if we have to stop.
+ continue;
+ }
+ id = idGenerator.incrementAndGet();
+ client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get();
+ indexCounter.incrementAndGet();
+ }
+ }
+ logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), indexCounter.get());
+ } catch (Throwable e) {
+ failures.add(e);
+ logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ if (autoStart) {
+ start(numOfDocs);
+ }
+ }
+
+ private XContentBuilder generateSource(long id, Random random) throws IOException {
+ int contentLength = RandomInts.randomIntBetween(random, minFieldSize, maxFieldSize);
+ StringBuilder text = new StringBuilder(contentLength);
+ while (text.length() < contentLength) {
+ int tokenLength = RandomInts.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10));
+ text.append(" ").append(RandomStrings.randomRealisticUnicodeOfCodepointLength(random, tokenLength));
+ }
+ XContentBuilder builder = XContentFactory.smileBuilder();
+ builder.startObject().field("test", "value" + id)
+ .field("text", text.toString())
+ .endObject();
+ return builder;
+
+ }
+
+ private void setBudget(int numOfDocs) {
+ logger.debug("updating budget to [{}]", numOfDocs);
+ if (numOfDocs >= 0) {
+ hasBudget.set(true);
+ availableBudget.release(numOfDocs);
+ } else {
+ hasBudget.set(false);
+ }
+
+ }
+
+ /** Start indexing with no limit to the number of documents */
+ public void start() {
+ start(-1);
+ }
+
+ /**
+ * Start indexing
+ *
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ */
+ public void start(int numOfDocs) {
+ assert !stop.get() : "background indexer can not be started after it has stopped";
+ setBudget(numOfDocs);
+ startLatch.countDown();
+ }
+
+ /** Pausing indexing by setting current document limit to 0 */
+ public void pauseIndexing() {
+ availableBudget.drainPermits();
+ setBudget(0);
+ }
+
+ /** Continue indexing after it has paused. No new document limit will be set */
+ public void continueIndexing() {
+ continueIndexing(-1);
+ }
+
+ /**
+ * Continue indexing after it has paused.
+ *
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ */
+ public void continueIndexing(int numOfDocs) {
+ setBudget(numOfDocs);
+ }
+
+ /** Stop all background threads * */
+ public void stop() throws InterruptedException {
+ if (stop.get()) {
+ return;
+ }
+ stop.set(true);
+ Assert.assertThat("timeout while waiting for indexing threads to stop", stopLatch.await(6, TimeUnit.MINUTES), equalTo(true));
+ assertNoFailures();
+ }
+
+ public long totalIndexedDocs() {
+ return indexCounter.get();
+ }
+
+ public Throwable[] getFailures() {
+ return failures.toArray(new Throwable[failures.size()]);
+ }
+
+ public void assertNoFailures() {
+ Assert.assertThat(failures, emptyIterable());
+ }
+
+ /** the minimum size in code points of a payload field in the indexed documents */
+ public void setMinFieldSize(int fieldSize) {
+ minFieldSize = fieldSize;
+ }
+
+ /** the minimum size in code points of a payload field in the indexed documents */
+ public void setMaxFieldSize(int fieldSize) {
+ maxFieldSize = fieldSize;
+ }
+
+ @Override
+ public void close() throws Exception {
+ stop();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java b/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java
new file mode 100644
index 0000000000..618900c426
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/CompositeTestCluster.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Iterators;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.FilterClient;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Random;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+/**
+ * A test cluster implementation that holds a fixed set of external nodes as well as a InternalTestCluster
+ * which is used to run mixed version clusters in tests like backwards compatibility tests.
+ * Note: this is an experimental API
+ */
+public class CompositeTestCluster extends TestCluster {
+ private final InternalTestCluster cluster;
+ private final ExternalNode[] externalNodes;
+ private final ExternalClient client = new ExternalClient();
+ private static final String NODE_PREFIX = "external_";
+
+ public CompositeTestCluster(InternalTestCluster cluster, int numExternalNodes, ExternalNode externalNode) throws IOException {
+ super(cluster.seed());
+ this.cluster = cluster;
+ this.externalNodes = new ExternalNode[numExternalNodes];
+ for (int i = 0; i < externalNodes.length; i++) {
+ externalNodes[i] = externalNode;
+ }
+ }
+
+ @Override
+ public synchronized void afterTest() throws IOException {
+ cluster.afterTest();
+ }
+
+ @Override
+ public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException {
+ super.beforeTest(random, transportClientRatio);
+ cluster.beforeTest(random, transportClientRatio);
+ Settings defaultSettings = cluster.getDefaultSettings();
+ final Client client = cluster.size() > 0 ? cluster.client() : cluster.clientNodeClient();
+ for (int i = 0; i < externalNodes.length; i++) {
+ if (!externalNodes[i].running()) {
+ try {
+ externalNodes[i] = externalNodes[i].start(client, defaultSettings, NODE_PREFIX + i, cluster.getClusterName(), i);
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ return;
+ }
+ }
+ externalNodes[i].reset(random.nextLong());
+ }
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(">=" + Integer.toString(this.size())).get();
+ }
+ }
+
+ private Collection<ExternalNode> runningNodes() {
+ return Collections2.filter(Arrays.asList(externalNodes), new Predicate<ExternalNode>() {
+ @Override
+ public boolean apply(ExternalNode input) {
+ return input.running();
+ }
+ });
+ }
+
+ /**
+ * Upgrades one external running node to a node from the version running the tests. Commonly this is used
+ * to move from a node with version N-1 to a node running version N. This works seamless since they will
+ * share the same data directory. This method will return <tt>true</tt> iff a node got upgraded otherwise if no
+ * external node is running it returns <tt>false</tt>
+ */
+ public synchronized boolean upgradeOneNode() throws InterruptedException, IOException {
+ return upgradeOneNode(Settings.EMPTY);
+ }
+
+ /**
+ * Upgrades all external running nodes to a node from the version running the tests.
+ * All nodes are shut down before the first upgrade happens.
+ * @return <code>true</code> iff at least one node as upgraded.
+ */
+ public synchronized boolean upgradeAllNodes() throws InterruptedException, IOException {
+ return upgradeAllNodes(Settings.EMPTY);
+ }
+
+
+ /**
+ * Upgrades all external running nodes to a node from the version running the tests.
+ * All nodes are shut down before the first upgrade happens.
+ * @return <code>true</code> iff at least one node as upgraded.
+ * @param nodeSettings settings for the upgrade nodes
+ */
+ public synchronized boolean upgradeAllNodes(Settings nodeSettings) throws InterruptedException, IOException {
+ boolean upgradedOneNode = false;
+ while(upgradeOneNode(nodeSettings)) {
+ upgradedOneNode = true;
+ }
+ return upgradedOneNode;
+ }
+
+ /**
+ * Upgrades one external running node to a node from the version running the tests. Commonly this is used
+ * to move from a node with version N-1 to a node running version N. This works seamless since they will
+ * share the same data directory. This method will return <tt>true</tt> iff a node got upgraded otherwise if no
+ * external node is running it returns <tt>false</tt>
+ */
+ public synchronized boolean upgradeOneNode(Settings nodeSettings) throws InterruptedException, IOException {
+ Collection<ExternalNode> runningNodes = runningNodes();
+ if (!runningNodes.isEmpty()) {
+ final Client existingClient = cluster.client();
+ ExternalNode externalNode = RandomPicks.randomFrom(random, runningNodes);
+ externalNode.stop();
+ String s = cluster.startNode(nodeSettings);
+ ExternalNode.waitForNode(existingClient, s);
+ assertNoTimeout(existingClient.admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(size())).get());
+ return true;
+ }
+ return false;
+ }
+
+
+ /**
+ * Returns the a simple pattern that matches all "new" nodes in the cluster.
+ */
+ public String newNodePattern() {
+ return cluster.nodePrefix() + "*";
+ }
+
+ /**
+ * Returns the a simple pattern that matches all "old" / "backwardss" nodes in the cluster.
+ */
+ public String backwardsNodePattern() {
+ return NODE_PREFIX + "*";
+ }
+
+ /**
+ * Allows allocation of shards of the given indices on all nodes in the cluster.
+ */
+ public void allowOnAllNodes(String... index) {
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", "").build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+
+ /**
+ * Allows allocation of shards of the given indices only on "new" nodes in the cluster.
+ * Note: if a shard is allocated on an "old" node and can't be allocated on a "new" node it will only be removed it can
+ * be allocated on some other "new" node.
+ */
+ public void allowOnlyNewNodes(String... index) {
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", backwardsNodePattern()).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+
+ /**
+ * Starts a current version data node
+ */
+ public void startNewNode() {
+ cluster.startNode();
+ }
+
+
+ @Override
+ public synchronized Client client() {
+ return client;
+ }
+
+ @Override
+ public synchronized int size() {
+ return runningNodes().size() + cluster.size();
+ }
+
+ @Override
+ public int numDataNodes() {
+ return runningNodes().size() + cluster.numDataNodes();
+ }
+
+ @Override
+ public int numDataAndMasterNodes() {
+ return runningNodes().size() + cluster.numDataAndMasterNodes();
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ return cluster.httpAddresses();
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ IOUtils.close(externalNodes);
+ } finally {
+ IOUtils.close(cluster);
+ }
+ }
+
+ @Override
+ public void ensureEstimatedStats() {
+ if (size() > 0) {
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Fielddata breaker not reset to 0 on node: " + stats.getNode(),
+ stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ }
+ // CompositeTestCluster does not check the request breaker,
+ // because checking it requires a network request, which in
+ // turn increments the breaker, making it non-0
+ }
+ }
+
+ @Override
+ public String getClusterName() {
+ return cluster.getClusterName();
+ }
+
+ @Override
+ public synchronized Iterator<Client> iterator() {
+ return Iterators.singletonIterator(client());
+ }
+
+ /**
+ * Delegates to {@link org.elasticsearch.test.InternalTestCluster#fullRestart()}
+ */
+ public void fullRestartInternalCluster() throws Exception {
+ cluster.fullRestart();
+ }
+
+ /**
+ * Returns the number of current version data nodes in the cluster
+ */
+ public int numNewDataNodes() {
+ return cluster.numDataNodes();
+ }
+
+ /**
+ * Returns the number of former version data nodes in the cluster
+ */
+ public int numBackwardsDataNodes() {
+ return runningNodes().size();
+ }
+
+ public TransportAddress externalTransportAddress() {
+ return RandomPicks.randomFrom(random, externalNodes).getTransportAddress();
+ }
+
+ public InternalTestCluster internalCluster() {
+ return cluster;
+ }
+
+ private synchronized Client internalClient() {
+ Collection<ExternalNode> externalNodes = runningNodes();
+ return random.nextBoolean() && !externalNodes.isEmpty() ? RandomPicks.randomFrom(random, externalNodes).getClient() : cluster.client();
+ }
+
+ private final class ExternalClient extends FilterClient {
+
+ public ExternalClient() {
+ super(internalClient());
+ }
+
+ @Override
+ public void close() {
+ // never close this client
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/DummyShardLock.java b/core/src/test/java/org/elasticsearch/test/DummyShardLock.java
new file mode 100644
index 0000000000..078803a812
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/DummyShardLock.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.env.ShardLock;
+import org.elasticsearch.index.shard.ShardId;
+
+/*
+ * A ShardLock that does nothing... for tests only
+ */
+public class DummyShardLock extends ShardLock {
+
+ public DummyShardLock(ShardId id) {
+ super(id);
+ }
+
+ @Override
+ protected void closeInternal() {
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java
new file mode 100644
index 0000000000..c932d8219f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchAllocationTestCase.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.collect.ImmutableSet;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.MutableShardRouting;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecidersModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+import org.elasticsearch.node.settings.NodeSettingsService;
+
+import java.lang.reflect.Constructor;
+import java.util.*;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+
+/**
+ */
+public abstract class ElasticsearchAllocationTestCase extends ElasticsearchTestCase {
+
+ public static AllocationService createAllocationService() {
+ return createAllocationService(Settings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static AllocationService createAllocationService(Settings settings) {
+ return createAllocationService(settings, getRandom());
+ }
+
+ public static AllocationService createAllocationService(Settings settings, Random random) {
+ return createAllocationService(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), random);
+ }
+
+ public static AllocationService createAllocationService(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+ return new AllocationService(settings,
+ randomAllocationDeciders(settings, nodeSettingsService, random),
+ new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), ClusterInfoService.EMPTY);
+ }
+
+
+ public static AllocationDeciders randomAllocationDeciders(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+ final ImmutableSet<Class<? extends AllocationDecider>> defaultAllocationDeciders = AllocationDecidersModule.DEFAULT_ALLOCATION_DECIDERS;
+ final List<AllocationDecider> list = new ArrayList<>();
+ for (Class<? extends AllocationDecider> deciderClass : defaultAllocationDeciders) {
+ try {
+ try {
+ Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, NodeSettingsService.class);
+ list.add(constructor.newInstance(settings, nodeSettingsService));
+ } catch (NoSuchMethodException e) {
+ Constructor<? extends AllocationDecider> constructor = null;
+ constructor = deciderClass.getConstructor(Settings.class);
+ list.add(constructor.newInstance(settings));
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ assertThat(list.size(), equalTo(defaultAllocationDeciders.size()));
+ for (AllocationDecider d : list) {
+ assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
+ }
+ Collections.shuffle(list, random);
+ return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
+
+ }
+
+ public static DiscoveryNode newNode(String nodeId) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, TransportAddress address) {
+ return new DiscoveryNode(nodeId, address, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
+ return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Version version) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, version);
+ }
+
+ public static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {
+ List<MutableShardRouting> initializingShards = clusterState.routingNodes().shardsWithState(INITIALIZING);
+ if (initializingShards.isEmpty()) {
+ return clusterState;
+ }
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, newArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))).routingTable();
+ return ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java
new file mode 100644
index 0000000000..4d01d53d22
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.junit.Ignore;
+
+import java.io.IOException;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Map;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Abstract base class for backwards compatibility tests. Subclasses of this class
+ * can run tests against a mixed version cluster. A subset of the nodes in the cluster
+ * are started in dedicated process running off a full fledged elasticsearch release.
+ * Nodes can be "upgraded" from the "backwards" node to an "new" node where "new" nodes
+ * version corresponds to current version.
+ * The purpose of this test class is to run tests in scenarios where clusters are in an
+ * intermediate state during a rolling upgrade as well as upgrade situations. The clients
+ * accessed via #client() are random clients to the nodes in the cluster which might
+ * execute requests on the "new" as well as the "old" nodes.
+ * <p>
+ * Note: this base class is still experimental and might have bugs or leave external processes running behind.
+ * </p>
+ * Backwards compatibility tests are disabled by default via {@link Backwards} annotation.
+ * The following system variables control the test execution:
+ * <ul>
+ * <li>
+ * <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY}</tt> enables / disables
+ * tests annotated with {@link Backwards} (defaults to
+ * <tt>false</tt>)
+ * </li>
+ * <li>
+ * <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION}</tt>
+ * sets the version to run the external nodes from formatted as <i>X.Y.Z</i>.
+ * The tests class will try to locate a release folder <i>elasticsearch-X.Y.Z</i>
+ * within path passed via {@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ * depending on this system variable.
+ * </li>
+ * <li>
+ * <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}</tt> the path to the
+ * elasticsearch releases to run backwards compatibility tests against.
+ * </li>
+ * </ul>
+ *
+ */
+// the transportClientRatio is tricky here since we don't fully control the cluster nodes
+@ElasticsearchBackwardsCompatIntegrationTest.Backwards
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 0, maxNumDataNodes = 2, scope = ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
+@Ignore
+public abstract class ElasticsearchBackwardsCompatIntegrationTest extends ElasticsearchIntegrationTest {
+
+ /**
+ * Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
+ * via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY}
+ */
+ public static final String TESTS_BACKWARDS_COMPATIBILITY = "tests.bwc";
+ public static final String TESTS_BACKWARDS_COMPATIBILITY_VERSION = "tests.bwc.version";
+ /**
+ * Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
+ * via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ */
+ public static final String TESTS_BACKWARDS_COMPATIBILITY_PATH = "tests.bwc.path";
+ /**
+ * Property that allows to adapt the tests behaviour to older features/bugs based on the input version
+ */
+ private static final String TESTS_COMPATIBILITY = "tests.compatibility";
+
+ private static final Version GLOABL_COMPATIBILITY_VERSION = Version.fromString(compatibilityVersionProperty());
+
+ private static Path backwardsCompatibilityPath() {
+ String path = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_PATH);
+ if (path == null || path.isEmpty()) {
+ throw new IllegalArgumentException("Must specify backwards test path with property " + TESTS_BACKWARDS_COMPATIBILITY_PATH);
+ }
+ String version = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ if (version == null || version.isEmpty()) {
+ throw new IllegalArgumentException("Must specify backwards test version with property " + TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ }
+ if (Version.fromString(version).before(Version.CURRENT.minimumCompatibilityVersion())) {
+ throw new IllegalArgumentException("Backcompat elasticsearch version must be same major version as current. " +
+ "backcompat: " + version + ", current: " + Version.CURRENT.toString());
+ }
+ Path file = PathUtils.get(path, "elasticsearch-" + version);
+ if (!Files.exists(file)) {
+ throw new IllegalArgumentException("Backwards tests location is missing: " + file.toAbsolutePath());
+ }
+ if (!Files.isDirectory(file)) {
+ throw new IllegalArgumentException("Backwards tests location is not a directory: " + file.toAbsolutePath());
+ }
+ return file;
+ }
+
+ @Override
+ protected Settings.Builder setRandomSettings(Random random, Settings.Builder builder) {
+ if (globalCompatibilityVersion().before(Version.V_1_3_2)) {
+ // if we test against nodes before 1.3.2 we disable all the compression due to a known bug
+ // see #7210
+ builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false);
+ }
+ return builder;
+ }
+
+ /**
+ * Retruns the tests compatibility version.
+ */
+ public Version compatibilityVersion() {
+ return compatibilityVersion(getClass());
+ }
+
+ private Version compatibilityVersion(Class<?> clazz) {
+ if (clazz == Object.class || clazz == ElasticsearchIntegrationTest.class) {
+ return globalCompatibilityVersion();
+ }
+ CompatibilityVersion annotation = clazz.getAnnotation(CompatibilityVersion.class);
+ if (annotation != null) {
+ return Version.smallest(Version.fromId(annotation.version()), compatibilityVersion(clazz.getSuperclass()));
+ }
+ return compatibilityVersion(clazz.getSuperclass());
+ }
+
+ /**
+ * Returns a global compatibility version that is set via the
+ * {@value #TESTS_COMPATIBILITY} or {@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION} system property.
+ * If both are unset the current version is used as the global compatibility version. This
+ * compatibility version is used for static randomization. For per-suite compatibility version see
+ * {@link #compatibilityVersion()}
+ */
+ public static Version globalCompatibilityVersion() {
+ return GLOABL_COMPATIBILITY_VERSION;
+ }
+
+ private static String compatibilityVersionProperty() {
+ final String version = System.getProperty(TESTS_COMPATIBILITY);
+ if (Strings.hasLength(version)) {
+ return version;
+ }
+ return System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ }
+
+ public CompositeTestCluster backwardsCluster() {
+ return (CompositeTestCluster) cluster();
+ }
+
+ @Override
+ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
+ TestCluster cluster = super.buildTestCluster(scope, seed);
+ ExternalNode externalNode = new ExternalNode(backwardsCompatibilityPath(), randomLong(), new SettingsSource() {
+ @Override
+ public Settings node(int nodeOrdinal) {
+ return externalNodeSettings(nodeOrdinal);
+ }
+
+ @Override
+ public Settings transportClient() {
+ return transportClientSettings();
+ }
+ });
+ return new CompositeTestCluster((InternalTestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), externalNode);
+ }
+
+ private Settings addLoggerSettings(Settings externalNodesSettings) {
+ TestLogging logging = getClass().getAnnotation(TestLogging.class);
+ Map<String, String> loggingLevels = LoggingListener.getLoggersAndLevelsFromAnnotation(logging);
+ Settings.Builder finalSettings = Settings.settingsBuilder();
+ if (loggingLevels != null) {
+ for (Map.Entry<String, String> level : loggingLevels.entrySet()) {
+ finalSettings.put("logger." + level.getKey(), level.getValue());
+ }
+ }
+ finalSettings.put(externalNodesSettings);
+ return finalSettings.build();
+ }
+
+ protected int minExternalNodes() { return 1; }
+
+ protected int maxExternalNodes() {
+ return 2;
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ protected Settings requiredSettings() {
+ return ExternalNode.REQUIRED_SETTINGS;
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return commonNodeSettings(nodeOrdinal);
+ }
+
+ public void assertAllShardsOnNodes(String index, String pattern) {
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
+ String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
+ assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
+ }
+ }
+ }
+ }
+ }
+
+ protected Settings commonNodeSettings(int nodeOrdinal) {
+ Settings.Builder builder = Settings.builder().put(requiredSettings())
+ .put(TransportModule.TRANSPORT_TYPE_KEY, NettyTransport.class.getName()) // run same transport / disco as external
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, TransportService.class.getName());
+ if (compatibilityVersion().before(Version.V_1_3_2)) {
+ // if we test against nodes before 1.3.2 we disable all the compression due to a known bug
+ // see #7210
+ builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, false)
+ .put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false);
+ }
+ return builder.build();
+ }
+
+ protected Settings externalNodeSettings(int nodeOrdinal) {
+ return addLoggerSettings(commonNodeSettings(nodeOrdinal));
+ }
+
+ /**
+ * Annotation for backwards compat tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = false, sysProperty = ElasticsearchBackwardsCompatIntegrationTest.TESTS_BACKWARDS_COMPATIBILITY)
+ public @interface Backwards {
+ }
+
+ /**
+ * If a test is annotated with {@link CompatibilityVersion}
+ * all randomized settings will only contain settings or mappings which are compatible with the specified version ID.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target({ElementType.TYPE})
+ @Ignore
+ public @interface CompatibilityVersion {
+ int version();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
new file mode 100644
index 0000000000..621d4fc4d0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
@@ -0,0 +1,2079 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.Randomness;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Joiner;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Lists;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.lucene.store.StoreRateLimiting;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.fielddata.FieldDataType;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MappedFieldType.Loading;
+import org.elasticsearch.index.mapper.internal.SizeFieldMapper;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.index.merge.policy.*;
+import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider;
+import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.index.translog.TranslogService;
+import org.elasticsearch.index.translog.TranslogWriter;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.cache.query.IndicesQueryCache;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.flush.IndicesSyncedFlushResult;
+import org.elasticsearch.indices.flush.SyncedFlushService;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.test.client.RandomizingClient;
+import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTimeZone;
+import org.junit.*;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.*;
+import java.net.InetSocketAddress;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.XContentTestUtils.convertToMap;
+import static org.elasticsearch.test.XContentTestUtils.mapsEqualIgnoringArrayOrder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+/**
+ * {@link ElasticsearchIntegrationTest} is an abstract base class to run integration
+ * tests against a JVM private Elasticsearch Cluster. The test class supports 2 different
+ * cluster scopes.
+ * <ul>
+ * <li>{@link Scope#TEST} - uses a new cluster for each individual test method.</li>
+ * <li>{@link Scope#SUITE} - uses a cluster shared across all test methods in the same suite</li>
+ * </ul>
+ * <p/>
+ * The most common test scope is {@link Scope#SUITE} which shares a cluster per test suite.
+ * <p/>
+ * If the test methods need specific node settings or change persistent and/or transient cluster settings {@link Scope#TEST}
+ * should be used. To configure a scope for the test cluster the {@link ClusterScope} annotation
+ * should be used, here is an example:
+ * <pre>
+ *
+ * @ClusterScope(scope=Scope.TEST) public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
+ * @Test public void testMethod() {}
+ * }
+ * </pre>
+ * <p/>
+ * If no {@link ClusterScope} annotation is present on an integration test the default scope is {@link Scope#SUITE}
+ * <p/>
+ * A test cluster creates a set of nodes in the background before the test starts. The number of nodes in the cluster is
+ * determined at random and can change across tests. The {@link ClusterScope} allows configuring the initial number of nodes
+ * that are created before the tests start.
+ * <p/>
+ * <pre>
+ * @ClusterScope(scope=Scope.SUITE, numDataNodes=3)
+ * public class SomeIntegrationTest extends ElasticsearchIntegrationTest {
+ * @Test public void testMethod() {}
+ * }
+ * </pre>
+ * <p/>
+ * Note, the {@link ElasticsearchIntegrationTest} uses randomized settings on a cluster and index level. For instance
+ * each test might use different directory implementation for each test or will return a random client to one of the
+ * nodes in the cluster for each call to {@link #client()}. Test failures might only be reproducible if the correct
+ * system properties are passed to the test execution environment.
+ * <p/>
+ * <p>
+ * This class supports the following system properties (passed with -Dkey=value to the application)
+ * <ul>
+ * <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li>
+ * <li>-D{@value InternalTestCluster#TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is
+ * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.</li>
+ * <li> - a random seed used to initialize the index random context.
+ * </ul>
+ * </p>
+ */
+@Ignore
+@ElasticsearchIntegrationTest.Integration
+@LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet
+public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase {
+
+ /**
+ * Property that allows to control whether the Integration tests are run (default) or not
+ */
+ public static final String SYSPROP_INTEGRATION = "tests.integration";
+
+ /**
+ * Annotation for integration tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = true, sysProperty = ElasticsearchIntegrationTest.SYSPROP_INTEGRATION)
+ public @interface Integration {
+ }
+
+ /**
+ * Property that controls whether ThirdParty Integration tests are run (not the default).
+ */
+ public static final String SYSPROP_THIRDPARTY = "tests.thirdparty";
+
+ /**
+ * Annotation for third-party integration tests.
+ * <p/>
+ * These are tests the require a third-party service in order to run. They
+ * may require the user to manually configure an external process (such as rabbitmq),
+ * or may additionally require some external configuration (e.g. AWS credentials)
+ * via the {@code tests.config} system property.
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = false, sysProperty = ElasticsearchIntegrationTest.SYSPROP_THIRDPARTY)
+ public @interface ThirdParty {
+ }
+
+ /** node names of the corresponding clusters will start with these prefixes */
+ public static final String SUITE_CLUSTER_NODE_PREFIX = "node_s";
+ public static final String TEST_CLUSTER_NODE_PREFIX = "node_t";
+
+ /**
+ * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO}
+ */
+ public static final String TESTS_CLIENT_RATIO = "tests.client.ratio";
+
+ /**
+ * Key used to eventually switch to using an external cluster and provide its transport addresses
+ */
+ public static final String TESTS_CLUSTER = "tests.cluster";
+
+ /**
+ * Key used to retrieve the index random seed from the index settings on a running node.
+ * The value of this seed can be used to initialize a random context for a specific index.
+ * It's set once per test via a generic index template.
+ */
+ public static final String SETTING_INDEX_SEED = "index.tests.seed";
+
+ /**
+ * Threshold at which indexing switches from frequently async to frequently bulk.
+ */
+ private static final int FREQUENT_BULK_THRESHOLD = 300;
+
+ /**
+ * Threshold at which bulk indexing will always be used.
+ */
+ private static final int ALWAYS_BULK_THRESHOLD = 3000;
+
+ /**
+ * Maximum number of async operations that indexRandom will kick off at one time.
+ */
+ private static final int MAX_IN_FLIGHT_ASYNC_INDEXES = 150;
+
+ /**
+ * Maximum number of documents in a single bulk index request.
+ */
+ private static final int MAX_BULK_INDEX_REQUEST_SIZE = 1000;
+
+ /**
+ * Default minimum number of shards for an index
+ */
+ protected static final int DEFAULT_MIN_NUM_SHARDS = 1;
+
+ /**
+ * Default maximum number of shards for an index
+ */
+ protected static final int DEFAULT_MAX_NUM_SHARDS = 10;
+
+ /**
+ * The current cluster depending on the configured {@link Scope}.
+ * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster.
+ */
+ private static TestCluster currentCluster;
+
+ private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
+
+ private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>();
+
+ private static ElasticsearchIntegrationTest INSTANCE = null; // see @SuiteScope
+ private static Long SUITE_SEED = null;
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ SUITE_SEED = randomLong();
+ initializeSuiteScope();
+ }
+
+ protected final void beforeInternal() throws Exception {
+ assert Thread.getDefaultUncaughtExceptionHandler() instanceof ElasticsearchUncaughtExceptionHandler;
+ try {
+ final Scope currentClusterScope = getCurrentClusterScope();
+ switch (currentClusterScope) {
+ case SUITE:
+ assert SUITE_SEED != null : "Suite seed was not initialized";
+ currentCluster = buildAndPutCluster(currentClusterScope, SUITE_SEED);
+ break;
+ case TEST:
+ currentCluster = buildAndPutCluster(currentClusterScope, randomLong());
+ break;
+ default:
+ fail("Unknown Scope: [" + currentClusterScope + "]");
+ }
+ cluster().beforeTest(getRandom(), getPerTestTransportClientRatio());
+ cluster().wipe();
+ randomIndexTemplate();
+ printTestMessage("before");
+ } catch (OutOfMemoryError e) {
+ if (e.getMessage().contains("unable to create new native thread")) {
+ ElasticsearchTestCase.printStackDump(logger);
+ }
+ throw e;
+ }
+ }
+
+ private void printTestMessage(String message) {
+ if (isSuiteScopedTest(getClass())) {
+ logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message);
+ } else {
+ logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), getTestName(), message);
+ }
+ }
+
+ private Loading randomLoadingValues() {
+ return randomFrom(Loading.values());
+ }
+
+ /**
+ * Creates a randomized index template. This template is used to pass in randomized settings on a
+ * per index basis. Allows to enable/disable the randomization for number of shards and replicas
+ */
+ public void randomIndexTemplate() throws IOException {
+
+ // TODO move settings for random directory etc here into the index based randomized settings.
+ if (cluster().size() > 0) {
+ Settings.Builder randomSettingsBuilder =
+ setRandomSettings(getRandom(), Settings.builder())
+ .put(SETTING_INDEX_SEED, getRandom().nextLong());
+
+ randomSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards())
+ .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas());
+
+ // if the test class is annotated with SuppressCodecs("*"), it means don't use lucene's codec randomization
+ // otherwise, use it, it has assertions and so on that can find bugs.
+ SuppressCodecs annotation = getClass().getAnnotation(SuppressCodecs.class);
+ if (annotation != null && annotation.value().length == 1 && "*".equals(annotation.value()[0])) {
+ randomSettingsBuilder.put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC));
+ } else {
+ randomSettingsBuilder.put("index.codec", CodecService.LUCENE_DEFAULT_CODEC);
+ }
+ XContentBuilder mappings = null;
+ if (frequently() && randomDynamicTemplates()) {
+ mappings = XContentFactory.jsonBuilder().startObject().startObject("_default_");
+ if (randomBoolean()) {
+ boolean timestampEnabled = randomBoolean();
+ mappings.startObject(TimestampFieldMapper.NAME)
+ .field("enabled", timestampEnabled);
+ if (timestampEnabled) {
+ mappings.field("doc_values", randomBoolean());
+ }
+ mappings.endObject();
+ }
+ if (randomBoolean()) {
+ mappings.startObject(SizeFieldMapper.NAME)
+ .field("enabled", randomBoolean())
+ .endObject();
+ }
+ mappings.startArray("dynamic_templates")
+ .startObject()
+ .startObject("template-strings")
+ .field("match_mapping_type", "string")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(FieldDataType.FORMAT_KEY, randomFrom("paged_bytes", "fst"))
+ .field(Loading.KEY, randomLoadingValues())
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-longs")
+ .field("match_mapping_type", "long")
+ .startObject("mapping")
+ .field("doc_values", randomBoolean())
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-doubles")
+ .field("match_mapping_type", "double")
+ .startObject("mapping")
+ .field("doc_values", randomBoolean())
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-geo_points")
+ .field("match_mapping_type", "geo_point")
+ .startObject("mapping")
+ .field("doc_values", randomBoolean())
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-booleans")
+ .field("match_mapping_type", "boolean")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(FieldDataType.FORMAT_KEY, randomFrom("array", "doc_values"))
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray();
+ mappings.endObject().endObject();
+ }
+
+ PutIndexTemplateRequestBuilder putTemplate = client().admin().indices()
+ .preparePutTemplate("random_index_template")
+ .setTemplate("*")
+ .setOrder(0)
+ .setSettings(randomSettingsBuilder);
+ if (mappings != null) {
+ logger.info("test using _default_ mappings: [{}]", mappings.bytesStream().bytes().toUtf8());
+ putTemplate.addMapping("_default_", mappings);
+ }
+ assertAcked(putTemplate.execute().actionGet());
+ }
+ }
+
+ protected Settings.Builder setRandomSettings(Random random, Settings.Builder builder) {
+ setRandomMerge(random, builder);
+ setRandomTranslogSettings(random, builder);
+ setRandomNormsLoading(random, builder);
+ setRandomScriptingSettings(random, builder);
+ if (random.nextBoolean()) {
+ if (random.nextInt(10) == 0) { // do something crazy slow here
+ builder.put(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
+ } else {
+ builder.put(IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
+ }
+ }
+ if (random.nextBoolean()) {
+ builder.put(IndicesStore.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values()));
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE, false);
+ }
+
+ if (random.nextBoolean()) {
+ if (random.nextInt(10) == 0) { // do something crazy slow here
+ builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
+ } else {
+ builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
+ }
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, random.nextBoolean());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogWriter.Type.values()).name());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, random.nextBoolean());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put("index.shard.check_on_startup", randomFrom(random, "false", "checksum", "true"));
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32));
+ builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32));
+ }
+ if (random.nextBoolean()) {
+ builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms");
+ }
+ return builder;
+ }
+
+ private static Settings.Builder setRandomScriptingSettings(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, RandomInts.randomIntBetween(random, -100, 2000));
+ }
+ if (random.nextBoolean()) {
+ builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000)));
+ }
+ return builder;
+ }
+
+ private static Settings.Builder setRandomMerge(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(AbstractMergePolicyProvider.INDEX_COMPOUND_FORMAT,
+ random.nextBoolean() ? random.nextDouble() : random.nextBoolean());
+ }
+ Class<? extends MergePolicyProvider<?>> mergePolicy = TieredMergePolicyProvider.class;
+ switch (random.nextInt(5)) {
+ case 4:
+ mergePolicy = LogByteSizeMergePolicyProvider.class;
+ break;
+ case 3:
+ mergePolicy = LogDocMergePolicyProvider.class;
+ break;
+ case 0:
+ mergePolicy = null;
+ }
+ if (mergePolicy != null) {
+ builder.put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, mergePolicy.getName());
+ }
+
+ switch (random.nextInt(4)) {
+ case 3:
+ builder.put(MergeSchedulerModule.MERGE_SCHEDULER_TYPE_KEY, ConcurrentMergeSchedulerProvider.class);
+ final int maxThreadCount = RandomInts.randomIntBetween(random, 1, 4);
+ final int maxMergeCount = RandomInts.randomIntBetween(random, maxThreadCount, maxThreadCount + 4);
+ builder.put(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT, maxMergeCount);
+ builder.put(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT, maxThreadCount);
+ break;
+ }
+
+ return builder;
+ }
+
+ private static Settings.Builder setRandomNormsLoading(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(SearchService.NORMS_LOADING_KEY, RandomPicks.randomFrom(random, Arrays.asList(MappedFieldType.Loading.EAGER, MappedFieldType.Loading.LAZY)));
+ }
+ return builder;
+ }
+
+ private static Settings.Builder setRandomTranslogSettings(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, RandomInts.randomIntBetween(random, 1, 10000));
+ }
+ if (random.nextBoolean()) {
+ builder.put(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB));
+ }
+ if (random.nextBoolean()) {
+ builder.put(TranslogService.INDEX_TRANSLOG_FLUSH_THRESHOLD_PERIOD, TimeValue.timeValueMinutes(RandomInts.randomIntBetween(random, 1, 60)));
+ }
+ if (random.nextBoolean()) {
+ builder.put(TranslogService.INDEX_TRANSLOG_FLUSH_INTERVAL, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 1, 10000)));
+ }
+ if (random.nextBoolean()) {
+ builder.put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, random.nextBoolean());
+ }
+ if (random.nextBoolean()) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, RandomPicks.randomFrom(random, Translog.Durabilty.values()));
+ }
+ return builder;
+ }
+
+ private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception {
+ return RandomizedContext.current().runWithPrivateRandomness(new Randomness(seed), new Callable<TestCluster>() {
+ @Override
+ public TestCluster call() throws Exception {
+ return buildTestCluster(scope, seed);
+ }
+ });
+ }
+
+ private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception {
+ final Class<?> clazz = this.getClass();
+ TestCluster testCluster = clusters.remove(clazz); // remove this cluster first
+ clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere
+ switch (currentClusterScope) {
+ case SUITE:
+ if (testCluster == null) { // only build if it's not there yet
+ testCluster = buildWithPrivateContext(currentClusterScope, seed);
+ }
+ break;
+ case TEST:
+ // close the previous one and create a new one
+ IOUtils.closeWhileHandlingException(testCluster);
+ testCluster = buildTestCluster(currentClusterScope, seed);
+ break;
+ }
+ clusters.put(clazz, testCluster);
+ return testCluster;
+ }
+
+ private static void clearClusters() throws IOException {
+ if (!clusters.isEmpty()) {
+ IOUtils.close(clusters.values());
+ clusters.clear();
+ }
+ }
+
+ protected final void afterInternal(boolean afterClass) throws Exception {
+ boolean success = false;
+ try {
+ final Scope currentClusterScope = getCurrentClusterScope();
+ printTestMessage("cleaning up after");
+ clearDisruptionScheme();
+ try {
+ if (cluster() != null) {
+ if (currentClusterScope != Scope.TEST) {
+ MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
+ assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData
+ .persistentSettings().getAsMap().size(), equalTo(0));
+ assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData
+ .transientSettings().getAsMap().size(), equalTo(0));
+ }
+ ensureClusterSizeConsistency();
+ ensureClusterStateConsistency();
+ beforeIndexDeletion();
+ cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete
+ if (afterClass || currentClusterScope == Scope.TEST) {
+ cluster().close();
+ }
+ cluster().assertAfterTest();
+ }
+ } finally {
+ if (currentClusterScope == Scope.TEST) {
+ clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST
+ }
+ }
+ printTestMessage("cleaned up after");
+ success = true;
+ } finally {
+ if (!success) {
+ // if we failed here that means that something broke horribly so we should clear all clusters
+ // TODO: just let the exception happen, WTF is all this horseshit
+ // afterTestRule.forceFailure();
+ }
+ }
+ }
+
+ protected void beforeIndexDeletion() {
+ cluster().beforeIndexDeletion();
+ }
+
+ public static TestCluster cluster() {
+ return currentCluster;
+ }
+
+ public static boolean isInternalCluster() {
+ return (currentCluster instanceof InternalTestCluster);
+ }
+
+ public static InternalTestCluster internalCluster() {
+ if (!isInternalCluster()) {
+ throw new UnsupportedOperationException("current test cluster is immutable");
+ }
+ return (InternalTestCluster) currentCluster;
+ }
+
+ public ClusterService clusterService() {
+ return internalCluster().clusterService();
+ }
+
+ public static Client client() {
+ return client(null);
+ }
+
+ public static Client client(@Nullable String node) {
+ if (node != null) {
+ return internalCluster().client(node);
+ }
+ Client client = cluster().client();
+ if (frequently()) {
+ client = new RandomizingClient(client, getRandom());
+ }
+ return client;
+ }
+
+ public static Client dataNodeClient() {
+ Client client = internalCluster().dataNodeClient();
+ if (frequently()) {
+ client = new RandomizingClient(client, getRandom());
+ }
+ return client;
+ }
+
+ public static Iterable<Client> clients() {
+ return cluster();
+ }
+
+ protected int minimumNumberOfShards() {
+ return DEFAULT_MIN_NUM_SHARDS;
+ }
+
+ protected int maximumNumberOfShards() {
+ return DEFAULT_MAX_NUM_SHARDS;
+ }
+
+ protected int numberOfShards() {
+ return between(minimumNumberOfShards(), maximumNumberOfShards());
+ }
+
+ protected int minimumNumberOfReplicas() {
+ return 0;
+ }
+
+ protected int maximumNumberOfReplicas() {
+ //use either 0 or 1 replica, yet a higher amount when possible, but only rarely
+ int maxNumReplicas = Math.max(0, cluster().numDataNodes() - 1);
+ return frequently() ? Math.min(1, maxNumReplicas) : maxNumReplicas;
+ }
+
+ protected int numberOfReplicas() {
+ return between(minimumNumberOfReplicas(), maximumNumberOfReplicas());
+ }
+
+
+ public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
+ internalCluster().setDisruptionScheme(scheme);
+ }
+
+ public void clearDisruptionScheme() {
+ if (isInternalCluster()) {
+ internalCluster().clearDisruptionScheme();
+ }
+ }
+
+ /**
+ * Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends.
+ * This method can be overwritten by subclasses to set defaults for the indices that are created by the test.
+ * By default it returns a settings object that sets a random number of shards. Number of shards and replicas
+ * can be controlled through specific methods.
+ */
+ public Settings indexSettings() {
+ Settings.Builder builder = Settings.builder();
+ int numberOfShards = numberOfShards();
+ if (numberOfShards > 0) {
+ builder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
+ }
+ int numberOfReplicas = numberOfReplicas();
+ if (numberOfReplicas >= 0) {
+ builder.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
+ }
+ // 30% of the time
+ if (randomInt(9) < 3) {
+ final Path dataPath = createTempDir();
+ logger.info("using custom data_path for index: [{}]", dataPath);
+ builder.put(IndexMetaData.SETTING_DATA_PATH, dataPath);
+ }
+ return builder.build();
+ }
+
+ /**
+ * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices
+ * already exists this method will fail and wipe all the indices created so far.
+ */
+ public final void createIndex(String... names) {
+
+ List<String> created = new ArrayList<>();
+ for (String name : names) {
+ boolean success = false;
+ try {
+ assertAcked(prepareCreate(name));
+ created.add(name);
+ success = true;
+ } finally {
+ if (!success && !created.isEmpty()) {
+ cluster().wipeIndices(created.toArray(new String[created.size()]));
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index) {
+ return client().admin().indices().prepareCreate(index).setSettings(indexSettings());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index, int numNodes) {
+ return prepareCreate(index, numNodes, Settings.builder());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, Settings.Builder settingsBuilder) {
+ internalCluster().ensureAtLeastNumDataNodes(numNodes);
+
+ Settings.Builder builder = Settings.builder().put(indexSettings()).put(settingsBuilder.build());
+
+ if (numNodes > 0) {
+ getExcludeSettings(index, numNodes, builder);
+ }
+ return client().admin().indices().prepareCreate(index).setSettings(builder.build());
+ }
+
+ private Settings.Builder getExcludeSettings(String index, int num, Settings.Builder builder) {
+ String exclude = Joiner.on(',').join(internalCluster().allDataNodesButN(num));
+ builder.put("index.routing.allocation.exclude._name", exclude);
+ return builder;
+ }
+
+ /**
+ * Waits until all nodes have no pending tasks.
+ */
+ public void waitNoPendingTasksOnAll() throws Exception {
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get());
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ for (Client client : clients()) {
+ ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get();
+ assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0));
+ PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get();
+ assertThat("client " + client + " still has pending tasks " + pendingTasks.prettyPrint(), pendingTasks, Matchers.emptyIterable());
+ clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get();
+ assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0));
+ }
+ }
+ });
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get());
+ }
+
+ /**
+ * Waits till a (pattern) field name mappings concretely exists on all nodes. Note, this waits for the current
+ * started shards and checks for concrete mappings.
+ */
+ public void waitForConcreteMappingsOnAll(final String index, final String type, final String... fieldNames) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ Set<String> nodes = internalCluster().nodesInclude(index);
+ assertThat(nodes, Matchers.not(Matchers.emptyIterable()));
+ for (String node : nodes) {
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
+ IndexService indexService = indicesService.indexService(index);
+ assertThat("index service doesn't exists on " + node, indexService, notNullValue());
+ DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
+ assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue());
+ for (String fieldName : fieldNames) {
+ Collection<String> matches = documentMapper.mappers().simpleMatchToFullName(fieldName);
+ assertThat("field " + fieldName + " doesn't exists on " + node, matches, Matchers.not(emptyIterable()));
+ }
+ }
+ }
+ });
+ waitForMappingOnMaster(index, type, fieldNames);
+ }
+
+ /**
+ * Waits for the given mapping type to exists on the master node.
+ */
+ public void waitForMappingOnMaster(final String index, final String type, final String... fieldNames) throws Exception {
+ assertBusy(new Callable() {
+ @Override
+ public Object call() throws Exception {
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings(index).setTypes(type).get();
+ ImmutableOpenMap<String, MappingMetaData> mappings = response.getMappings().get(index);
+ assertThat(mappings, notNullValue());
+ MappingMetaData mappingMetaData = mappings.get(type);
+ assertThat(mappingMetaData, notNullValue());
+
+ Map<String, Object> mappingSource = mappingMetaData.getSourceAsMap();
+ assertFalse(mappingSource.isEmpty());
+ assertTrue(mappingSource.containsKey("properties"));
+
+ for (String fieldName : fieldNames) {
+ Map<String, Object> mappingProperties = (Map<String, Object>) mappingSource.get("properties");
+ if (fieldName.indexOf('.') != -1) {
+ fieldName = fieldName.replace(".", ".properties.");
+ }
+ assertThat("field " + fieldName + " doesn't exists in mapping " + mappingMetaData.source().string(), XContentMapValues.extractValue(fieldName, mappingProperties), notNullValue());
+ }
+
+ return null;
+ }
+ });
+ }
+
+ /**
+ * Restricts the given index to be allocated on <code>n</code> nodes using the allocation deciders.
+ * Yet if the shards can't be allocated on any other node shards for this index will remain allocated on
+ * more than <code>n</code> nodes.
+ */
+ public void allowNodes(String index, int n) {
+ assert index != null;
+ internalCluster().ensureAtLeastNumDataNodes(n);
+ Settings.Builder builder = Settings.builder();
+ if (n > 0) {
+ getExcludeSettings(index, n, builder);
+ }
+ Settings build = builder.build();
+ if (!build.getAsMap().isEmpty()) {
+ logger.debug("allowNodes: updating [{}]'s setting to [{}]", index, build.toDelimitedString(';'));
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ */
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ return ensureGreen(TimeValue.timeValueSeconds(30), indices);
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ *
+ * @param timeout time out value to set on {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest}
+ */
+ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ fail("timed out waiting for green state");
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ logger.debug("indices {} are green", indices.length == 0 ? "[_all]" : indices);
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Waits for all relocating shards to become active using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation() {
+ return waitForRelocation(null);
+ }
+
+ /**
+ * Waits for all relocating shards to become active and the cluster has reached the given health status
+ * using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
+ ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
+ if (status != null) {
+ request.waitForStatus(status);
+ }
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(request).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
+ }
+ if (status != null) {
+ assertThat(actionGet.getStatus(), equalTo(status));
+ }
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Waits until at least a give number of document is visible for searchers
+ *
+ * @param numDocs number of documents to wait for.
+ * @return the actual number of docs seen.
+ * @throws InterruptedException
+ */
+ public long waitForDocs(final long numDocs) throws InterruptedException {
+ return waitForDocs(numDocs, null);
+ }
+
+ /**
+ * Waits until at least a give number of document is visible for searchers
+ *
+ * @param numDocs number of documents to wait for
+ * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed.
+ * This saves on unneeded searches.
+ * @return the actual number of docs seen.
+ * @throws InterruptedException
+ */
+ public long waitForDocs(final long numDocs, final @Nullable BackgroundIndexer indexer) throws InterruptedException {
+ // indexing threads can wait for up to ~1m before retrying when they first try to index into a shard which is not STARTED.
+ return waitForDocs(numDocs, 90, TimeUnit.SECONDS, indexer);
+ }
+
+ /**
+ * Waits until at least a give number of document is visible for searchers
+ *
+ * @param numDocs number of documents to wait for
+ * @param maxWaitTime if not progress have been made during this time, fail the test
+ * @param maxWaitTimeUnit the unit in which maxWaitTime is specified
+ * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed.
+ * This saves on unneeded searches.
+ * @return the actual number of docs seen.
+ * @throws InterruptedException
+ */
+ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit, final @Nullable BackgroundIndexer indexer)
+ throws InterruptedException {
+ final AtomicLong lastKnownCount = new AtomicLong(-1);
+ long lastStartCount = -1;
+ Predicate<Object> testDocs = new Predicate<Object>() {
+ @Override
+ public boolean apply(Object o) {
+ if (indexer != null) {
+ lastKnownCount.set(indexer.totalIndexedDocs());
+ }
+ if (lastKnownCount.get() >= numDocs) {
+ try {
+ long count = client().prepareCount().setQuery(matchAllQuery()).execute().actionGet().getCount();
+ if (count == lastKnownCount.get()) {
+ // no progress - try to refresh for the next time
+ client().admin().indices().prepareRefresh().get();
+ }
+ lastKnownCount.set(count);
+ } catch (Throwable e) { // count now acts like search and barfs if all shards failed...
+ logger.debug("failed to executed count", e);
+ return false;
+ }
+ logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount.get(), numDocs);
+ } else {
+ logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount.get(), numDocs);
+ }
+ return lastKnownCount.get() >= numDocs;
+ }
+ };
+
+ while (!awaitBusy(testDocs, maxWaitTime, maxWaitTimeUnit)) {
+ if (lastStartCount == lastKnownCount.get()) {
+ // we didn't make any progress
+ fail("failed to reach " + numDocs + "docs");
+ }
+ lastStartCount = lastKnownCount.get();
+ }
+ return lastKnownCount.get();
+ }
+
+
+ /**
+ * Sets the cluster's minimum master node and make sure the response is acknowledge.
+ * Note: this doesn't guaranty the new settings is in effect, just that it has been received bu all nodes.
+ */
+ public void setMinimumMasterNodes(int n) {
+ assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+ settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n))
+ .get().isAcknowledged());
+ }
+
+ /**
+ * Ensures the cluster has a yellow state via the cluster health API.
+ */
+ public ClusterHealthStatus ensureYellow(String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false));
+ }
+ logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices);
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Prints the current cluster state as debug logging.
+ */
+ public void logClusterState() {
+ logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ }
+
+ /**
+ * Prints the segments info for the given indices as debug logging.
+ */
+ public void logSegmentsState(String... indices) throws Exception {
+ IndicesSegmentResponse segsRsp = client().admin().indices().prepareSegments(indices).get();
+ logger.debug("segments {} state: \n{}", indices.length == 0 ? "[_all]" : indices,
+ segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string());
+ }
+
+ /**
+ * Prints current memory stats as info logging.
+ */
+ public void logMemoryStats() {
+ logger.info("memory: {}", XContentHelper.toString(client().admin().cluster().prepareNodesStats().clear().setJvm(true).get()));
+ }
+
+ void ensureClusterSizeConsistency() {
+ if (cluster() != null) { // if static init fails the cluster can be null
+ logger.trace("Check consistency for [{}] nodes", cluster().size());
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(cluster().size())).get());
+ }
+ }
+
+ /**
+ * Verifies that all nodes that have the same version of the cluster state as master have same cluster state
+ */
+ protected void ensureClusterStateConsistency() throws IOException {
+ if (cluster() != null) {
+ boolean getResolvedAddress = InetSocketTransportAddress.getResolveAddress();
+ try {
+ InetSocketTransportAddress.setResolveAddress(false);
+ ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState();
+ byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState);
+ // remove local node reference
+ masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null);
+ Map<String, Object> masterStateMap = convertToMap(masterClusterState);
+ int masterClusterStateSize = masterClusterState.toString().length();
+ String masterId = masterClusterState.nodes().masterNodeId();
+ for (Client client : cluster()) {
+ ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState();
+ byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState);
+ // remove local node reference
+ localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null);
+ final Map<String, Object> localStateMap = convertToMap(localClusterState);
+ final int localClusterStateSize = localClusterState.toString().length();
+ // Check that the non-master node has the same version of the cluster state as the master and that this node didn't disconnect from the master
+ if (masterClusterState.version() == localClusterState.version() && localClusterState.nodes().nodes().containsKey(masterId)) {
+ try {
+ assertEquals("clusterstate UUID does not match", masterClusterState.uuid(), localClusterState.uuid());
+ // We cannot compare serialization bytes since serialization order of maps is not guaranteed
+ // but we can compare serialization sizes - they should be the same
+ assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize);
+ // Compare JSON serialization
+ assertTrue("clusterstate JSON serialization does not match", mapsEqualIgnoringArrayOrder(masterStateMap, localStateMap));
+ } catch (AssertionError error) {
+ logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString());
+ throw error;
+ }
+ }
+ }
+ } finally {
+ InetSocketTransportAddress.setResolveAddress(getResolvedAddress);
+ }
+ }
+
+ }
+
+ /**
+ * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each
+ * shard is available on the cluster.
+ */
+ protected ClusterHealthStatus ensureSearchable(String... indices) {
+ // this is just a temporary thing but it's easier to change if it is encapsulated.
+ return ensureGreen(indices);
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, XContentBuilder source) {
+ return client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Map<String, Object> source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareGet(index, type, id).execute().actionGet();
+ * </pre>
+ */
+ protected final GetResponse get(String index, String type, String id) {
+ return client().prepareGet(index, type, id).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, XContentBuilder source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Object... source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <p/>
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ * <p/>
+ * where source is a String.
+ */
+ protected final IndexResponse index(String index, String type, String id, String source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Waits for relocations and refreshes all indices in the cluster.
+ *
+ * @see #waitForRelocation()
+ */
+ protected final RefreshResponse refresh() {
+ waitForRelocation();
+ // TODO RANDOMIZE with flush?
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Flushes and refreshes all indices in the cluster
+ */
+ protected final void flushAndRefresh(String... indices) {
+ flush(indices);
+ refresh();
+ }
+
+ /**
+ * Flush some or all indices in the cluster.
+ */
+ protected final FlushResponse flush(String... indices) {
+ waitForRelocation();
+ FlushResponse actionGet = client().admin().indices().prepareFlush(indices).setWaitIfOngoing(true).execute().actionGet();
+ for (ShardOperationFailedException failure : actionGet.getShardFailures()) {
+ assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+ return actionGet;
+ }
+
+ /**
+ * Waits for all relocations and optimized all indices in the cluster to 1 segment.
+ */
+ protected OptimizeResponse optimize() {
+ waitForRelocation();
+ OptimizeResponse actionGet = client().admin().indices().prepareOptimize().setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Returns <code>true</code> iff the given index exists otherwise <code>false</code>
+ */
+ protected boolean indexExists(String index) {
+ IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet();
+ return actionGet.isExists();
+ }
+
+ /**
+ * Returns a random admin client. This client can either be a node or a transport client pointing to any of
+ * the nodes in the cluster.
+ */
+ protected AdminClient admin() {
+ return client().admin();
+ }
+
+ /**
+ * Convenience method that forwards to {@link #indexRandom(boolean, List)}.
+ */
+ public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, Arrays.asList(builders));
+ }
+
+ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders));
+ }
+
+
+ private static final String RANDOM_BOGUS_TYPE = "RANDOM_BOGUS_TYPE______";
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ *
+ * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. Additionally if <tt>true</tt>
+ * some empty dummy documents are may be randomly inserted into the document list and deleted once all documents are indexed.
+ * This is useful to produce deleted documents on the server side.
+ * @param builders the documents to index.
+ * @see #indexRandom(boolean, boolean, java.util.List)
+ */
+ public void indexRandom(boolean forceRefresh, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, forceRefresh, builders);
+ }
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ *
+ * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
+ * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
+ * all documents are indexed. This is useful to produce deleted documents on the server side.
+ * @param builders the documents to index.
+ */
+ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, dummyDocuments, true, builders);
+ }
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ *
+ * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
+ * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
+ * all documents are indexed. This is useful to produce deleted documents on the server side.
+ * @param maybeFlush if <tt>true</tt> this method may randomly execute full flushes after index operations.
+ * @param builders the documents to index.
+ */
+ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+
+ Random random = getRandom();
+ Set<String> indicesSet = new HashSet<>();
+ for (IndexRequestBuilder builder : builders) {
+ indicesSet.add(builder.request().index());
+ }
+ Set<Tuple<String, String>> bogusIds = new HashSet<>();
+ if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
+ builders = new ArrayList<>(builders);
+ final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
+ // inject some bogus docs
+ final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
+ final int unicodeLen = between(1, 10);
+ for (int i = 0; i < numBogusDocs; i++) {
+ String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
+ String index = RandomPicks.randomFrom(random, indices);
+ bogusIds.add(new Tuple<>(index, id));
+ builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}"));
+ }
+ }
+ final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
+ Collections.shuffle(builders, random);
+ final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<>();
+ List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
+ // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk.
+ if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
+ if (frequently()) {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors));
+ postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
+ }
+ } else {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute().actionGet();
+ postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
+ }
+ }
+ } else {
+ List<List<IndexRequestBuilder>> partition = Lists.partition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE,
+ Math.max(1, (int) (builders.size() * randomDouble()))));
+ logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size());
+ for (List<IndexRequestBuilder> segmented : partition) {
+ BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ for (IndexRequestBuilder indexRequestBuilder : segmented) {
+ bulkBuilder.add(indexRequestBuilder);
+ }
+ BulkResponse actionGet = bulkBuilder.execute().actionGet();
+ assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
+ }
+ }
+ for (CountDownLatch operation : inFlightAsyncOperations) {
+ operation.await();
+ }
+ final List<Throwable> actualErrors = new ArrayList<>();
+ for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) {
+ if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
+ tuple.v1().execute().actionGet(); // re-index if rejected
+ } else {
+ actualErrors.add(tuple.v2());
+ }
+ }
+ assertThat(actualErrors, emptyIterable());
+ if (!bogusIds.isEmpty()) {
+ // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
+ for (Tuple<String, String> doc : bogusIds) {
+ // see https://github.com/elasticsearch/elasticsearch/issues/8706
+ final DeleteResponse deleteResponse = client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get();
+ if (deleteResponse.isFound() == false) {
+ logger.warn("failed to delete a dummy doc [{}][{}]", doc.v1(), doc.v2());
+ }
+ }
+ }
+ if (forceRefresh) {
+ assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
+ }
+ }
+
+ private AtomicInteger dummmyDocIdGenerator = new AtomicInteger();
+
+ /** Disables translog flushing for the specified index */
+ public static void disableTranslogFlush(String index) {
+ Settings settings = Settings.builder().put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, true).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Enables translog flushing for the specified index */
+ public static void enableTranslogFlush(String index) {
+ Settings settings = Settings.builder().put(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH, false).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Disables an index block for the specified index */
+ public static void disableIndexBlock(String index, String block) {
+ Settings settings = Settings.builder().put(block, false).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Enables an index block for the specified index */
+ public static void enableIndexBlock(String index, String block) {
+ Settings settings = Settings.builder().put(block, true).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Sets or unsets the cluster read_only mode **/
+ public static void setClusterReadOnly(boolean value) {
+ Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
+ }
+
+ private static CountDownLatch newLatch(List<CountDownLatch> latches) {
+ CountDownLatch l = new CountDownLatch(1);
+ latches.add(l);
+ return l;
+ }
+
+ /**
+ * Maybe refresh, optimize, or flush then always make sure there aren't too many in flight async operations.
+ */
+ private void postIndexAsyncActions(String[] indices, List<CountDownLatch> inFlightAsyncOperations, boolean maybeFlush) throws InterruptedException {
+ if (rarely()) {
+ if (rarely()) {
+ client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
+ new LatchedActionListener<RefreshResponse>(newLatch(inFlightAsyncOperations)));
+ } else if (maybeFlush && rarely()) {
+ if (randomBoolean()) {
+ client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
+ new LatchedActionListener<FlushResponse>(newLatch(inFlightAsyncOperations)));
+ } else {
+ internalCluster().getInstance(SyncedFlushService.class).attemptSyncedFlush(indices, IndicesOptions.lenientExpandOpen(),
+ new LatchedActionListener<IndicesSyncedFlushResult>(newLatch(inFlightAsyncOperations)));
+ }
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute(
+ new LatchedActionListener<OptimizeResponse>(newLatch(inFlightAsyncOperations)));
+ }
+ }
+ while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) {
+ int waitFor = between(0, inFlightAsyncOperations.size() - 1);
+ inFlightAsyncOperations.remove(waitFor).await();
+ }
+ }
+
+ /**
+ * The scope of a test cluster used together with
+ * {@link org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope} annotations on {@link org.elasticsearch.test.ElasticsearchIntegrationTest} subclasses.
+ */
+ public enum Scope {
+ /**
+ * A cluster shared across all method in a single test suite
+ */
+ SUITE,
+ /**
+ * A test exclusive test cluster
+ */
+ TEST
+ }
+
+ /**
+ * Defines a cluster scope for a {@link org.elasticsearch.test.ElasticsearchIntegrationTest} subclass.
+ * By default if no {@link ClusterScope} annotation is present {@link org.elasticsearch.test.ElasticsearchIntegrationTest.Scope#SUITE} is used
+ * together with randomly chosen settings like number of nodes etc.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target({ElementType.TYPE})
+ public @interface ClusterScope {
+ /**
+ * Returns the scope. {@link org.elasticsearch.test.ElasticsearchIntegrationTest.Scope#SUITE} is default.
+ */
+ Scope scope() default Scope.SUITE;
+
+ /**
+ * Returns the number of nodes in the cluster. Default is <tt>-1</tt> which means
+ * a random number of nodes is used, where the minimum and maximum number of nodes
+ * are either the specified ones or the default ones if not specified.
+ */
+ int numDataNodes() default -1;
+
+ /**
+ * Returns the minimum number of nodes in the cluster. Default is <tt>-1</tt>.
+ * Ignored when {@link ClusterScope#numDataNodes()} is set.
+ */
+ int minNumDataNodes() default -1;
+
+ /**
+ * Returns the maximum number of nodes in the cluster. Default is <tt>-1</tt>.
+ * Ignored when {@link ClusterScope#numDataNodes()} is set.
+ */
+ int maxNumDataNodes() default -1;
+
+ /**
+ * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a
+ * negative value means that the number of client nodes will be randomized.
+ */
+ int numClientNodes() default InternalTestCluster.DEFAULT_NUM_CLIENT_NODES;
+
+ /**
+ * Returns the transport client ratio. By default this returns <code>-1</code> which means a random
+ * ratio in the interval <code>[0..1]</code> is used.
+ */
+ double transportClientRatio() default -1;
+
+ /**
+ * Return whether or not to enable dynamic templates for the mappings.
+ */
+ boolean randomDynamicTemplates() default true;
+ }
+
+ private class LatchedActionListener<Response> implements ActionListener<Response> {
+ private final CountDownLatch latch;
+
+ public LatchedActionListener(CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public final void onResponse(Response response) {
+ latch.countDown();
+ }
+
+ @Override
+ public final void onFailure(Throwable t) {
+ try {
+ logger.info("Action Failed", t);
+ addError(t);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ protected void addError(Throwable t) {
+ }
+
+ }
+
+ private class PayloadLatchedActionListener<Response, T> extends LatchedActionListener<Response> {
+ private final CopyOnWriteArrayList<Tuple<T, Throwable>> errors;
+ private final T builder;
+
+ public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList<Tuple<T, Throwable>> errors) {
+ super(latch);
+ this.errors = errors;
+ this.builder = builder;
+ }
+
+ @Override
+ protected void addError(Throwable t) {
+ errors.add(new Tuple<>(builder, t));
+ }
+
+ }
+
+ /**
+ * Clears the given scroll Ids
+ */
+ public void clearScroll(String... scrollIds) {
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .setScrollIds(Arrays.asList(scrollIds)).get();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+ }
+
+ private static ClusterScope getAnnotation(Class<?> clazz) {
+ if (clazz == Object.class || clazz == ElasticsearchIntegrationTest.class) {
+ return null;
+ }
+ ClusterScope annotation = clazz.getAnnotation(ClusterScope.class);
+ if (annotation != null) {
+ return annotation;
+ }
+ return getAnnotation(clazz.getSuperclass());
+ }
+
+ private Scope getCurrentClusterScope() {
+ return getCurrentClusterScope(this.getClass());
+ }
+
+ private static Scope getCurrentClusterScope(Class<?> clazz) {
+ ClusterScope annotation = getAnnotation(clazz);
+ // if we are not annotated assume suite!
+ return annotation == null ? Scope.SUITE : annotation.scope();
+ }
+
+ private int getNumDataNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? -1 : annotation.numDataNodes();
+ }
+
+ private int getMinNumDataNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null || annotation.minNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes();
+ }
+
+ private int getMaxNumDataNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null || annotation.maxNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes();
+ }
+
+ private int getNumClientNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null ? InternalTestCluster.DEFAULT_NUM_CLIENT_NODES : annotation.numClientNodes();
+ }
+
+ private boolean randomDynamicTemplates() {
+ ClusterScope annotation = getAnnotation(this.getClass());
+ return annotation == null || annotation.randomDynamicTemplates();
+ }
+
+ /**
+ * This method is used to obtain settings for the <tt>Nth</tt> node in the cluster.
+ * Nodes in this cluster are associated with an ordinal number such that nodes can
+ * be started with specific configurations. This method might be called multiple
+ * times with the same ordinal and is expected to return the same value for each invocation.
+ * In other words subclasses must ensure this method is idempotent.
+ */
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ // Default the watermarks to absurdly low to prevent the tests
+ // from failing on nodes without enough disk space
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b")
+ .put("script.indexed", "on")
+ .put("script.inline", "on")
+ // wait short time for other active shards before actually deleting, default 30s not needed in tests
+ .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(1, TimeUnit.SECONDS))
+ .build();
+ }
+
+ /**
+ * This method is used to obtain additional settings for clients created by the internal cluster.
+ * These settings will be applied on the client in addition to some randomized settings defined in
+ * the cluster. These setttings will also override any other settings the internal cluster might
+ * add by default.
+ */
+ protected Settings transportClientSettings() {
+ return Settings.EMPTY;
+ }
+
+ private ExternalTestCluster buildExternalCluster(String clusterAddresses) {
+ String[] stringAddresses = clusterAddresses.split(",");
+ TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
+ int i = 0;
+ for (String stringAddress : stringAddresses) {
+ String[] split = stringAddress.split(":");
+ if (split.length < 2) {
+ throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid");
+ }
+ try {
+ transportAddresses[i++] = new InetSocketTransportAddress(split[0], Integer.valueOf(split[1]));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
+ }
+ }
+ return new ExternalTestCluster(transportAddresses);
+ }
+
+ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
+ String clusterAddresses = System.getProperty(TESTS_CLUSTER);
+ if (Strings.hasLength(clusterAddresses)) {
+ if (scope == Scope.TEST) {
+ throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER);
+ }
+ return buildExternalCluster(clusterAddresses);
+ }
+
+ final String nodePrefix;
+ switch (scope) {
+ case TEST:
+ nodePrefix = TEST_CLUSTER_NODE_PREFIX;
+ break;
+ case SUITE:
+ nodePrefix = SUITE_CLUSTER_NODE_PREFIX;
+ break;
+ default:
+ throw new ElasticsearchException("Scope not supported: " + scope);
+ }
+ SettingsSource settingsSource = new SettingsSource() {
+ @Override
+ public Settings node(int nodeOrdinal) {
+ return Settings.builder().put(Node.HTTP_ENABLED, false).
+ put(nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Override
+ public Settings transportClient() {
+ return transportClientSettings();
+ }
+ };
+
+ int numDataNodes = getNumDataNodes();
+ int minNumDataNodes;
+ int maxNumDataNodes;
+ if (numDataNodes >= 0) {
+ minNumDataNodes = maxNumDataNodes = numDataNodes;
+ } else {
+ minNumDataNodes = getMinNumDataNodes();
+ maxNumDataNodes = getMaxNumDataNodes();
+ }
+ return new InternalTestCluster(seed, createTempDir(), minNumDataNodes, maxNumDataNodes,
+ InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", settingsSource, getNumClientNodes(),
+ InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix);
+ }
+
+ /**
+ * Returns the client ratio configured via
+ */
+ private static double transportClientRatio() {
+ String property = System.getProperty(TESTS_CLIENT_RATIO);
+ if (property == null || property.isEmpty()) {
+ return Double.NaN;
+ }
+ return Double.parseDouble(property);
+ }
+
+ /**
+ * Returns the transport client ratio from the class level annotation or via
+ * {@link System#getProperty(String)} if available. If both are not available this will
+ * return a random ratio in the interval <tt>[0..1]</tt>
+ */
+ protected double getPerTestTransportClientRatio() {
+ final ClusterScope annotation = getAnnotation(this.getClass());
+ double perTestRatio = -1;
+ if (annotation != null) {
+ perTestRatio = annotation.transportClientRatio();
+ }
+ if (perTestRatio == -1) {
+ return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO;
+ }
+ assert perTestRatio >= 0.0 && perTestRatio <= 1.0;
+ return perTestRatio;
+ }
+
+ /**
+ * Returns a random numeric field data format from the choices of "array" or "doc_values".
+ */
+ public static String randomNumericFieldDataFormat() {
+ return randomFrom(Arrays.asList("array", "doc_values"));
+ }
+
+ /**
+ * Returns a random bytes field data format from the choices of
+ * "paged_bytes", "fst", or "doc_values".
+ */
+ public static String randomBytesFieldDataFormat() {
+ return randomFrom(Arrays.asList("paged_bytes", "fst"));
+ }
+
+ /**
+ * Returns a random JODA Time Zone based on Java Time Zones
+ */
+ public static DateTimeZone randomDateTimeZone() {
+ DateTimeZone timeZone;
+
+ // It sounds like some Java Time Zones are unknown by JODA. For example: Asia/Riyadh88
+ // We need to fallback in that case to a known time zone
+ try {
+ timeZone = DateTimeZone.forTimeZone(RandomizedTest.randomTimeZone());
+ } catch (IllegalArgumentException e) {
+ timeZone = DateTimeZone.forOffsetHours(randomIntBetween(-12, 12));
+ }
+
+ return timeZone;
+ }
+
+ /**
+ * Returns path to a random directory that can be used to create a temporary file system repo
+ */
+ public Path randomRepoPath() {
+ if (currentCluster instanceof InternalTestCluster) {
+ return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings());
+ } else if (currentCluster instanceof CompositeTestCluster) {
+ return randomRepoPath(((CompositeTestCluster) currentCluster).internalCluster().getDefaultSettings());
+ }
+ throw new UnsupportedOperationException("unsupported cluster type");
+ }
+
+ /**
+ * Returns path to a random directory that can be used to create a temporary file system repo
+ */
+ public static Path randomRepoPath(Settings settings) {
+ Environment environment = new Environment(settings);
+ Path[] repoFiles = environment.repoFiles();
+ assert repoFiles.length > 0;
+ Path path;
+ do {
+ path = repoFiles[0].resolve(randomAsciiOfLength(10));
+ } while (Files.exists(path));
+ return path;
+ }
+
+ protected NumShards getNumShards(String index) {
+ MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData();
+ assertThat(metaData.hasIndex(index), equalTo(true));
+ int numShards = Integer.valueOf(metaData.index(index).settings().get(SETTING_NUMBER_OF_SHARDS));
+ int numReplicas = Integer.valueOf(metaData.index(index).settings().get(SETTING_NUMBER_OF_REPLICAS));
+ return new NumShards(numShards, numReplicas);
+ }
+
+ /**
+ * Asserts that all shards are allocated on nodes matching the given node pattern.
+ */
+ public Set<String> assertAllShardsOnNodes(String index, String... pattern) {
+ Set<String> nodes = new HashSet<>();
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
+ String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
+ nodes.add(name);
+ assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
+ }
+ }
+ }
+ }
+ return nodes;
+ }
+
+ /**
+ * Asserts that there are no files in the specified path
+ */
+ public void assertPathHasBeenCleared(String path) throws Exception {
+ assertPathHasBeenCleared(PathUtils.get(path));
+ }
+
+ /**
+ * Asserts that there are no files in the specified path
+ */
+ public void assertPathHasBeenCleared(Path path) throws Exception {
+ logger.info("--> checking that [{}] has been cleared", path);
+ int count = 0;
+ StringBuilder sb = new StringBuilder();
+ sb.append("[");
+ if (Files.exists(path)) {
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
+ for (Path file : stream) {
+ logger.info("--> found file: [{}]", file.toAbsolutePath().toString());
+ if (Files.isDirectory(file)) {
+ assertPathHasBeenCleared(file);
+ } else if (Files.isRegularFile(file)) {
+ count++;
+ sb.append(file.toAbsolutePath().toString());
+ sb.append("\n");
+ }
+ }
+ }
+ }
+ sb.append("]");
+ assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0));
+ }
+
+ protected static class NumShards {
+ public final int numPrimaries;
+ public final int numReplicas;
+ public final int totalNumShards;
+ public final int dataCopies;
+
+ private NumShards(int numPrimaries, int numReplicas) {
+ this.numPrimaries = numPrimaries;
+ this.numReplicas = numReplicas;
+ this.dataCopies = numReplicas + 1;
+ this.totalNumShards = numPrimaries * dataCopies;
+ }
+ }
+
+ private static boolean runTestScopeLifecycle() {
+ return INSTANCE == null;
+ }
+
+
+ @Before
+ public final void before() throws Exception {
+ if (runTestScopeLifecycle()) {
+ beforeInternal();
+ }
+ }
+
+
+ @After
+ public final void after() throws Exception {
+ // Deleting indices is going to clear search contexts implicitely so we
+ // need to check that there are no more in-flight search contexts before
+ // we remove indices
+ super.ensureAllSearchContextsReleased();
+ if (runTestScopeLifecycle()) {
+ afterInternal(false);
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws Exception {
+ if (!runTestScopeLifecycle()) {
+ try {
+ INSTANCE.afterInternal(true);
+ } finally {
+ INSTANCE = null;
+ }
+ } else {
+ clearClusters();
+ }
+ SUITE_SEED = null;
+ currentCluster = null;
+ }
+
+ private static void initializeSuiteScope() throws Exception {
+ Class<?> targetClass = getTestClass();
+ /**
+ * Note we create these test class instance via reflection
+ * since JUnit creates a new instance per test and that is also
+ * the reason why INSTANCE is static since this entire method
+ * must be executed in a static context.
+ */
+ assert INSTANCE == null;
+ if (isSuiteScopedTest(targetClass)) {
+ // note we need to do this this way to make sure this is reproducible
+ INSTANCE = (ElasticsearchIntegrationTest) targetClass.newInstance();
+ boolean success = false;
+ try {
+ INSTANCE.beforeInternal();
+ INSTANCE.setupSuiteScopeCluster();
+ success = true;
+ } finally {
+ if (!success) {
+ afterClass();
+ }
+ }
+ } else {
+ INSTANCE = null;
+ }
+ }
+
+ /**
+ * Compute a routing key that will route documents to the <code>shard</code>-th shard
+ * of the provided index.
+ */
+ protected String routingKeyForShard(String index, String type, int shard) {
+ return internalCluster().routingKeyForShard(index, type, shard, getRandom());
+ }
+
+ /**
+ * Return settings that could be used to start a node that has the given zipped home directory.
+ */
+ protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settings) throws IOException {
+ Path indexDir = createTempDir();
+ Path dataDir = indexDir.resolve("data");
+ try (InputStream stream = Files.newInputStream(backwardsIndex)) {
+ TestUtil.unzip(stream, indexDir);
+ }
+ assertTrue(Files.exists(dataDir));
+
+ // list clusters in the datapath, ignoring anything from extrasfs
+ final Path[] list;
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dataDir)) {
+ List<Path> dirs = new ArrayList<>();
+ for (Path p : stream) {
+ if (!p.getFileName().toString().startsWith("extra")) {
+ dirs.add(p);
+ }
+ }
+ list = dirs.toArray(new Path[0]);
+ }
+
+ if (list.length != 1) {
+ throw new IllegalStateException("Backwards index must contain exactly one cluster\n" + StringUtils.join(list, "\n"));
+ }
+ Path src = list[0];
+ Path dest = dataDir.resolve(internalCluster().getClusterName());
+ assertTrue(Files.exists(src));
+ Files.move(src, dest);
+ assertFalse(Files.exists(src));
+ assertTrue(Files.exists(dest));
+ Settings.Builder builder = Settings.builder()
+ .put(settings)
+ .put("path.data", dataDir.toAbsolutePath());
+
+ Path configDir = indexDir.resolve("config");
+ if (Files.exists(configDir)) {
+ builder.put("path.conf", configDir.toAbsolutePath());
+ }
+ return builder.build();
+ }
+
+ protected HttpRequestBuilder httpClient() {
+ final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get();
+ final NodeInfo[] nodes = nodeInfos.getNodes();
+ assertTrue(nodes.length > 0);
+ TransportAddress publishAddress = randomFrom(nodes).getHttp().address().publishAddress();
+ assertEquals(1, publishAddress.uniqueAddressTypeId());
+ InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address();
+ return new HttpRequestBuilder(HttpClients.createDefault()).host(address.getHostName()).port(address.getPort());
+ }
+
+ /**
+ * This method is executed iff the test is annotated with {@link SuiteScopeTest}
+ * before the first test of this class is executed.
+ *
+ * @see SuiteScopeTest
+ */
+ protected void setupSuiteScopeCluster() throws Exception {
+ }
+
+ private static boolean isSuiteScopedTest(Class<?> clazz) {
+ return clazz.getAnnotation(SuiteScopeTest.class) != null;
+ }
+
+ /**
+ * If a test is annotated with {@link org.elasticsearch.test.ElasticsearchIntegrationTest.SuiteScopeTest}
+ * the checks and modifications that are applied to the used test cluster are only done after all tests
+ * of this class are executed. This also has the side-effect of a suite level setup method {@link #setupSuiteScopeCluster()}
+ * that is executed in a separate test instance. Variables that need to be accessible across test instances must be static.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Inherited
+ @Ignore
+ public @interface SuiteScopeTest {
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java
new file mode 100644
index 0000000000..bc868417c7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ * A test that keep a singleton node started for all tests that can be used to get
+ * references to Guice injectors in unit tests.
+ */
+@Ignore
+public abstract class ElasticsearchSingleNodeTest extends ElasticsearchTestCase {
+
+ private static Node NODE = null;
+
+ private static void reset() {
+ assert NODE != null;
+ stopNode();
+ startNode();
+ }
+
+ private static void startNode() {
+ assert NODE == null;
+ NODE = newNode();
+ }
+
+ private static void stopNode() {
+ Node node = NODE;
+ NODE = null;
+ Releasables.close(node);
+ }
+
+ static void cleanup(boolean resetNode) {
+ assertAcked(client().admin().indices().prepareDelete("*").get());
+ if (resetNode) {
+ reset();
+ }
+ MetaData metaData = client().admin().cluster().prepareState().get().getState().getMetaData();
+ assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(),
+ metaData.persistentSettings().getAsMap().size(), equalTo(0));
+ assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(),
+ metaData.transientSettings().getAsMap().size(), equalTo(0));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ logger.info("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName());
+ super.tearDown();
+ cleanup(resetNodeAfterTest());
+ }
+
+ @BeforeClass
+ public static void setUpClass() throws Exception {
+ stopNode();
+ startNode();
+ }
+
+ @AfterClass
+ public static void tearDownClass() {
+ stopNode();
+ }
+
+ /**
+ * This method returns <code>true</code> if the node that is used in the background should be reset
+ * after each test. This is useful if the test changes the cluster state metadata etc. The default is
+ * <code>false</code>.
+ */
+ protected boolean resetNodeAfterTest() {
+ return false;
+ }
+
+ private static Node newNode() {
+ Node build = NodeBuilder.nodeBuilder().local(true).data(true).settings(Settings.builder()
+ .put(ClusterName.SETTING, InternalTestCluster.clusterName("single-node-cluster", randomLong()))
+ .put("path.home", createTempDir())
+ .put("node.name", nodeName())
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("script.inline", "on")
+ .put("script.indexed", "on")
+ .put(EsExecutors.PROCESSORS, 1) // limit the number of threads created
+ .put("http.enabled", false)
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // make sure we get what we set :)
+ ).build();
+ build.start();
+ assertThat(DiscoveryNode.localNode(build.settings()), is(true));
+ return build;
+ }
+
+ /**
+ * Returns a client to the single-node cluster.
+ */
+ public static Client client() {
+ return NODE.client();
+ }
+
+ /**
+ * Returns the single test nodes name.
+ */
+ public static String nodeName() {
+ return "node_s_0";
+ }
+
+ /**
+ * Return a reference to the singleton node.
+ */
+ protected static Node node() {
+ return NODE;
+ }
+
+ /**
+ * Get an instance for a particular class using the injector of the singleton node.
+ */
+ protected static <T> T getInstanceFromNode(Class<T> clazz) {
+ return NODE.injector().getInstance(clazz);
+ }
+
+ /**
+ * Create a new index on the singleton node with empty index settings.
+ */
+ protected static IndexService createIndex(String index) {
+ return createIndex(index, Settings.EMPTY);
+ }
+
+ /**
+ * Create a new index on the singleton node with the provided index settings.
+ */
+ protected static IndexService createIndex(String index, Settings settings) {
+ return createIndex(index, settings, null, (XContentBuilder) null);
+ }
+
+ /**
+ * Create a new index on the singleton node with the provided index settings.
+ */
+ protected static IndexService createIndex(String index, Settings settings, String type, XContentBuilder mappings) {
+ CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings);
+ if (type != null && mappings != null) {
+ createIndexRequestBuilder.addMapping(type, mappings);
+ }
+ return createIndex(index, createIndexRequestBuilder);
+ }
+
+ /**
+ * Create a new index on the singleton node with the provided index settings.
+ */
+ protected static IndexService createIndex(String index, Settings settings, String type, Object... mappings) {
+ CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings);
+ if (type != null && mappings != null) {
+ createIndexRequestBuilder.addMapping(type, mappings);
+ }
+ return createIndex(index, createIndexRequestBuilder);
+ }
+
+ protected static IndexService createIndex(String index, CreateIndexRequestBuilder createIndexRequestBuilder) {
+ assertAcked(createIndexRequestBuilder.get());
+ // Wait for the index to be allocated so that cluster state updates don't override
+ // changes that would have been done locally
+ ClusterHealthResponse health = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(index).waitForYellowStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW));
+ assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1));
+ IndicesService instanceFromNode = getInstanceFromNode(IndicesService.class);
+ return instanceFromNode.indexServiceSafe(index);
+ }
+
+ protected static org.elasticsearch.index.engine.Engine engine(IndexService service) {
+ return service.shard(0).engine();
+ }
+
+ /**
+ * Create a new search context.
+ */
+ protected static SearchContext createSearchContext(IndexService indexService) {
+ BigArrays bigArrays = indexService.injector().getInstance(BigArrays.class);
+ ThreadPool threadPool = indexService.injector().getInstance(ThreadPool.class);
+ PageCacheRecycler pageCacheRecycler = indexService.injector().getInstance(PageCacheRecycler.class);
+ return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, indexService, indexService.cache().filter(), indexService.fieldData());
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ */
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ return ensureGreen(TimeValue.timeValueSeconds(30), indices);
+ }
+
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ *
+ * @param timeout time out value to set on {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest}
+ */
+ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ logger.debug("indices {} are green", indices.length == 0 ? "[_all]" : indices);
+ return actionGet.getStatus();
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java
new file mode 100644
index 0000000000..f8a580f9bf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java
@@ -0,0 +1,598 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter;
+import com.google.common.base.Predicate;
+
+import org.apache.lucene.uninverting.UninvertingReader;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.bootstrap.BootstrapForTesting;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.routing.DjbHashFunction;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsAbortPolicy;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.test.cache.recycler.MockBigArrays;
+import org.elasticsearch.test.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+import org.elasticsearch.test.search.MockSearchService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.*;
+import org.junit.rules.RuleChain;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.nio.file.FileSystem;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import static com.google.common.collect.Lists.newArrayList;
+
+/**
+ * Base testcase for randomized unit testing with Elasticsearch
+ */
+@Listeners({
+ ReproduceInfoPrinter.class,
+ LoggingListener.class
+})
+@ThreadLeakScope(Scope.SUITE)
+@ThreadLeakLingering(linger = 5000) // 5 sec lingering
+@TimeoutSuite(millis = 20 * TimeUnits.MINUTE)
+@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
+// we suppress pretty much all the lucene codecs for now, except asserting
+// assertingcodec is the winner for a codec here: it finds bugs and gives clear exceptions.
+@SuppressCodecs({
+ "SimpleText", "Memory", "CheapBastard", "Direct", "Compressing", "FST50", "FSTOrd50",
+ "TestBloomFilteredLucenePostings", "MockRandom", "BlockTreeOrds", "LuceneFixedGap",
+ "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", "Lucene50"
+})
+@LuceneTestCase.SuppressReproduceLine
+public abstract class ElasticsearchTestCase extends LuceneTestCase {
+
+ static {
+ BootstrapForTesting.ensureInitialized();
+ }
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ // -----------------------------------------------------------------
+ // Suite and test case setup/cleanup.
+ // -----------------------------------------------------------------
+
+ @Rule
+ public RuleChain failureAndSuccessEvents = RuleChain.outerRule(new TestRuleAdapter() {
+ @Override
+ protected void afterIfSuccessful() throws Throwable {
+ ElasticsearchTestCase.this.afterIfSuccessful();
+ }
+
+ @Override
+ protected void afterAlways(List<Throwable> errors) throws Throwable {
+ if (errors != null && errors.isEmpty() == false) {
+ ElasticsearchTestCase.this.afterIfFailed(errors);
+ }
+ super.afterAlways(errors);
+ }
+ });
+
+ /** called when a test fails, supplying the errors it generated */
+ protected void afterIfFailed(List<Throwable> errors) {
+ }
+
+ /** called after a test is finished, but only if succesfull */
+ protected void afterIfSuccessful() throws Exception {
+ }
+
+ // setup mock filesystems for this test run. we change PathUtils
+ // so that all accesses are plumbed thru any mock wrappers
+
+ @BeforeClass
+ public static void setFileSystem() throws Exception {
+ Field field = PathUtils.class.getDeclaredField("DEFAULT");
+ field.setAccessible(true);
+ FileSystem mock = LuceneTestCase.getBaseTempDirForTestClass().getFileSystem();
+ field.set(null, mock);
+ assertEquals(mock, PathUtils.getDefaultFileSystem());
+ }
+
+ @AfterClass
+ public static void restoreFileSystem() throws Exception {
+ Field field1 = PathUtils.class.getDeclaredField("ACTUAL_DEFAULT");
+ field1.setAccessible(true);
+ Field field2 = PathUtils.class.getDeclaredField("DEFAULT");
+ field2.setAccessible(true);
+ field2.set(null, field1.get(null));
+ }
+
+ // setup a default exception handler which knows when and how to print a stacktrace
+ private static Thread.UncaughtExceptionHandler defaultHandler;
+
+ @BeforeClass
+ public static void setDefaultExceptionHandler() throws Exception {
+ defaultHandler = Thread.getDefaultUncaughtExceptionHandler();
+ Thread.setDefaultUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(defaultHandler));
+ }
+
+ @AfterClass
+ public static void restoreDefaultExceptionHandler() throws Exception {
+ Thread.setDefaultUncaughtExceptionHandler(defaultHandler);
+ }
+
+ // randomize content type for request builders
+
+ @BeforeClass
+ public static void setContentType() throws Exception {
+ Requests.CONTENT_TYPE = randomFrom(XContentType.values());
+ Requests.INDEX_CONTENT_TYPE = randomFrom(XContentType.values());
+ }
+
+ @AfterClass
+ public static void restoreContentType() {
+ Requests.CONTENT_TYPE = XContentType.SMILE;
+ Requests.INDEX_CONTENT_TYPE = XContentType.JSON;
+ }
+
+ // randomize and override the number of cpus so tests reproduce regardless of real number of cpus
+
+ @BeforeClass
+ public static void setProcessors() {
+ int numCpu = TestUtil.nextInt(random(), 1, 4);
+ System.setProperty(EsExecutors.DEFAULT_SYSPROP, Integer.toString(numCpu));
+ assertEquals(numCpu, EsExecutors.boundedNumberOfProcessors(Settings.EMPTY));
+ }
+
+ @AfterClass
+ public static void restoreProcessors() {
+ System.clearProperty(EsExecutors.DEFAULT_SYSPROP);
+ }
+
+ @After
+ public final void ensureCleanedUp() throws Exception {
+ MockPageCacheRecycler.ensureAllPagesAreReleased();
+ MockBigArrays.ensureAllArraysAreReleased();
+ // field cache should NEVER get loaded.
+ String[] entries = UninvertingReader.getUninvertedStats();
+ assertEquals("fieldcache must never be used, got=" + Arrays.toString(entries), 0, entries.length);
+ }
+
+ // this must be a separate method from other ensure checks above so suite scoped integ tests can call...TODO: fix that
+ @After
+ public final void ensureAllSearchContextsReleased() throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ MockSearchService.assertNoInFLightContext();
+ }
+ });
+ }
+
+ // mockdirectorywrappers currently set this boolean if checkindex fails
+ // TODO: can we do this cleaner???
+
+ /** MockFSDirectoryService sets this: */
+ public static boolean checkIndexFailed;
+
+ @Before
+ public final void resetCheckIndexStatus() throws Exception {
+ checkIndexFailed = false;
+ }
+
+ @After
+ public final void ensureCheckIndexPassed() throws Exception {
+ assertFalse("at least one shard failed CheckIndex", checkIndexFailed);
+ }
+
+ // -----------------------------------------------------------------
+ // Test facilities and facades for subclasses.
+ // -----------------------------------------------------------------
+
+ // TODO: replaces uses of getRandom() with random()
+ // TODO: decide on one set of naming for between/scaledBetween and remove others
+ // TODO: replace frequently() with usually()
+
+ /** Shortcut for {@link RandomizedContext#getRandom()}. Use {@link #random()} instead. */
+ public static Random getRandom() {
+ // TODO: replace uses of this function with random()
+ return random();
+ }
+
+ /**
+ * Returns a "scaled" random number between min and max (inclusive).
+ *
+ * @see RandomizedTest#scaledRandomIntBetween(int, int);
+ */
+ public static int scaledRandomIntBetween(int min, int max) {
+ return RandomizedTest.scaledRandomIntBetween(min, max);
+ }
+
+ /**
+ * A random integer from <code>min</code> to <code>max</code> (inclusive).
+ *
+ * @see #scaledRandomIntBetween(int, int)
+ */
+ public static int randomIntBetween(int min, int max) {
+ return RandomInts.randomIntBetween(random(), min, max);
+ }
+
+ /**
+ * Returns a "scaled" number of iterations for loops which can have a variable
+ * iteration count. This method is effectively
+ * an alias to {@link #scaledRandomIntBetween(int, int)}.
+ */
+ public static int iterations(int min, int max) {
+ return scaledRandomIntBetween(min, max);
+ }
+
+ /**
+ * An alias for {@link #randomIntBetween(int, int)}.
+ *
+ * @see #scaledRandomIntBetween(int, int)
+ */
+ public static int between(int min, int max) {
+ return randomIntBetween(min, max);
+ }
+
+ /**
+ * The exact opposite of {@link #rarely()}.
+ */
+ public static boolean frequently() {
+ return !rarely();
+ }
+
+ public static boolean randomBoolean() {
+ return random().nextBoolean();
+ }
+
+ public static byte randomByte() {
+ return (byte) random().nextInt();
+ }
+
+ public static short randomShort() {
+ return (short) random().nextInt();
+ }
+
+ public static int randomInt() {
+ return random().nextInt();
+ }
+
+ public static float randomFloat() {
+ return random().nextFloat();
+ }
+
+ public static double randomDouble() {
+ return random().nextDouble();
+ }
+
+ public static long randomLong() {
+ return random().nextLong();
+ }
+
+ /** A random integer from 0..max (inclusive). */
+ public static int randomInt(int max) {
+ return RandomizedTest.randomInt(max);
+ }
+
+ /** Pick a random object from the given array. The array must not be empty. */
+ public static <T> T randomFrom(T... array) {
+ return RandomPicks.randomFrom(random(), array);
+ }
+
+ /** Pick a random object from the given list. */
+ public static <T> T randomFrom(List<T> list) {
+ return RandomPicks.randomFrom(random(), list);
+ }
+
+ public static String randomAsciiOfLengthBetween(int minCodeUnits, int maxCodeUnits) {
+ return RandomizedTest.randomAsciiOfLengthBetween(minCodeUnits, maxCodeUnits);
+ }
+
+ public static String randomAsciiOfLength(int codeUnits) {
+ return RandomizedTest.randomAsciiOfLength(codeUnits);
+ }
+
+ public static String randomUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) {
+ return RandomizedTest.randomUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits);
+ }
+
+ public static String randomUnicodeOfLength(int codeUnits) {
+ return RandomizedTest.randomUnicodeOfLength(codeUnits);
+ }
+
+ public static String randomUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) {
+ return RandomizedTest.randomUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints);
+ }
+
+ public static String randomUnicodeOfCodepointLength(int codePoints) {
+ return RandomizedTest.randomUnicodeOfCodepointLength(codePoints);
+ }
+
+ public static String randomRealisticUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) {
+ return RandomizedTest.randomRealisticUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits);
+ }
+
+ public static String randomRealisticUnicodeOfLength(int codeUnits) {
+ return RandomizedTest.randomRealisticUnicodeOfLength(codeUnits);
+ }
+
+ public static String randomRealisticUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) {
+ return RandomizedTest.randomRealisticUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints);
+ }
+
+ public static String randomRealisticUnicodeOfCodepointLength(int codePoints) {
+ return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints);
+ }
+
+ public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull) {
+ if (allowNull && random().nextBoolean()) {
+ return null;
+ }
+ String[] array = new String[random().nextInt(maxArraySize)]; // allow empty arrays
+ for (int i = 0; i < array.length; i++) {
+ array[i] = RandomStrings.randomAsciiOfLength(random(), maxStringSize);
+ }
+ return array;
+ }
+
+ /**
+ * Runs the code block for 10 seconds waiting for no assertion to trip.
+ */
+ public static void assertBusy(Runnable codeBlock) throws Exception {
+ assertBusy(Executors.callable(codeBlock), 10, TimeUnit.SECONDS);
+ }
+
+ public static void assertBusy(Runnable codeBlock, long maxWaitTime, TimeUnit unit) throws Exception {
+ assertBusy(Executors.callable(codeBlock), maxWaitTime, unit);
+ }
+
+ /**
+ * Runs the code block for 10 seconds waiting for no assertion to trip.
+ */
+ public static <V> V assertBusy(Callable<V> codeBlock) throws Exception {
+ return assertBusy(codeBlock, 10, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Runs the code block for the provided interval, waiting for no assertions to trip.
+ */
+ public static <V> V assertBusy(Callable<V> codeBlock, long maxWaitTime, TimeUnit unit) throws Exception {
+ long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
+ long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1);
+ long timeInMillis = 1;
+ long sum = 0;
+ List<AssertionError> failures = new ArrayList<>();
+ for (int i = 0; i < iterations; i++) {
+ try {
+ return codeBlock.call();
+ } catch (AssertionError e) {
+ failures.add(e);
+ }
+ sum += timeInMillis;
+ Thread.sleep(timeInMillis);
+ timeInMillis *= 2;
+ }
+ timeInMillis = maxTimeInMillis - sum;
+ Thread.sleep(Math.max(timeInMillis, 0));
+ try {
+ return codeBlock.call();
+ } catch (AssertionError e) {
+ for (AssertionError failure : failures) {
+ e.addSuppressed(failure);
+ }
+ throw e;
+ }
+ }
+
+ public static boolean awaitBusy(Predicate<?> breakPredicate) throws InterruptedException {
+ return awaitBusy(breakPredicate, 10, TimeUnit.SECONDS);
+ }
+
+ public static boolean awaitBusy(Predicate<?> breakPredicate, long maxWaitTime, TimeUnit unit) throws InterruptedException {
+ long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
+ long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1);
+ long timeInMillis = 1;
+ long sum = 0;
+ for (int i = 0; i < iterations; i++) {
+ if (breakPredicate.apply(null)) {
+ return true;
+ }
+ sum += timeInMillis;
+ Thread.sleep(timeInMillis);
+ timeInMillis *= 2;
+ }
+ timeInMillis = maxTimeInMillis - sum;
+ Thread.sleep(Math.max(timeInMillis, 0));
+ return breakPredicate.apply(null);
+ }
+
+ public static boolean terminate(ExecutorService... services) throws InterruptedException {
+ boolean terminated = true;
+ for (ExecutorService service : services) {
+ if (service != null) {
+ terminated &= ThreadPool.terminate(service, 10, TimeUnit.SECONDS);
+ }
+ }
+ return terminated;
+ }
+
+ public static boolean terminate(ThreadPool service) throws InterruptedException {
+ return ThreadPool.terminate(service, 10, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Returns a {@link java.nio.file.Path} pointing to the class path relative resource given
+ * as the first argument. In contrast to
+ * <code>getClass().getResource(...).getFile()</code> this method will not
+ * return URL encoded paths if the parent path contains spaces or other
+ * non-standard characters.
+ */
+ @Override
+ public Path getDataPath(String relativePath) {
+ // we override LTC behavior here: wrap even resources with mockfilesystems,
+ // because some code is buggy when it comes to multiple nio.2 filesystems
+ // (e.g. FileSystemUtils, and likely some tests)
+ try {
+ return PathUtils.get(getClass().getResource(relativePath).toURI());
+ } catch (Exception e) {
+ throw new RuntimeException("resource not found: " + relativePath, e);
+ }
+ }
+
+ /** Returns a random number of temporary paths. */
+ public String[] tmpPaths() {
+ final int numPaths = TestUtil.nextInt(random(), 1, 3);
+ final String[] absPaths = new String[numPaths];
+ for (int i = 0; i < numPaths; i++) {
+ absPaths[i] = createTempDir().toAbsolutePath().toString();
+ }
+ return absPaths;
+ }
+
+ public NodeEnvironment newNodeEnvironment() throws IOException {
+ return newNodeEnvironment(Settings.EMPTY);
+ }
+
+ public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException {
+ Settings build = Settings.builder()
+ .put(settings)
+ .put("path.home", createTempDir().toAbsolutePath())
+ .putArray("path.data", tmpPaths()).build();
+ return new NodeEnvironment(build, new Environment(build));
+ }
+
+ /** Return consistent index settings for the provided index version. */
+ public static Settings.Builder settings(Version version) {
+ Settings.Builder builder = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version);
+ if (version.before(Version.V_2_0_0)) {
+ builder.put(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION, DjbHashFunction.class);
+ }
+ return builder;
+ }
+
+ // -----------------------------------------------------------------
+ // Failure utilities
+ // -----------------------------------------------------------------
+
+ static final class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
+
+ private final Thread.UncaughtExceptionHandler parent;
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ private ElasticsearchUncaughtExceptionHandler(Thread.UncaughtExceptionHandler parent) {
+ this.parent = parent;
+ }
+
+ @Override
+ public void uncaughtException(Thread t, Throwable e) {
+ if (e instanceof EsRejectedExecutionException) {
+ if (e.getMessage() != null && e.getMessage().contains(EsAbortPolicy.SHUTTING_DOWN_KEY)) {
+ return; // ignore the EsRejectedExecutionException when a node shuts down
+ }
+ } else if (e instanceof OutOfMemoryError) {
+ if (e.getMessage() != null && e.getMessage().contains("unable to create new native thread")) {
+ printStackDump(logger);
+ }
+ }
+ parent.uncaughtException(t, e);
+ }
+ }
+
+ protected static final void printStackDump(ESLogger logger) {
+ // print stack traces if we can't create any native thread anymore
+ Map<Thread, StackTraceElement[]> allStackTraces = Thread.getAllStackTraces();
+ logger.error(formatThreadStacks(allStackTraces));
+ }
+
+ /** Dump threads and their current stack trace. */
+ private static String formatThreadStacks(Map<Thread, StackTraceElement[]> threads) {
+ StringBuilder message = new StringBuilder();
+ int cnt = 1;
+ final Formatter f = new Formatter(message, Locale.ENGLISH);
+ for (Map.Entry<Thread, StackTraceElement[]> e : threads.entrySet()) {
+ if (e.getKey().isAlive()) {
+ f.format(Locale.ENGLISH, "\n %2d) %s", cnt++, threadName(e.getKey())).flush();
+ }
+ if (e.getValue().length == 0) {
+ message.append("\n at (empty stack)");
+ } else {
+ for (StackTraceElement ste : e.getValue()) {
+ message.append("\n at ").append(ste);
+ }
+ }
+ }
+ return message.toString();
+ }
+
+ private static String threadName(Thread t) {
+ return "Thread[" +
+ "id=" + t.getId() +
+ ", name=" + t.getName() +
+ ", state=" + t.getState() +
+ ", group=" + groupName(t.getThreadGroup()) +
+ "]";
+ }
+
+ private static String groupName(ThreadGroup threadGroup) {
+ if (threadGroup == null) {
+ return "{null group}";
+ } else {
+ return threadGroup.getName();
+ }
+ }
+
+ /**
+ * Returns size random values
+ */
+ public static <T> List<T> randomSubsetOf(int size, T... values) {
+ if (size > values.length) {
+ throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects");
+ }
+ List<T> list = newArrayList(values);
+ Collections.shuffle(list);
+ return list.subList(0, size);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java
new file mode 100644
index 0000000000..f3a8e5a290
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchTokenStreamTestCase.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.bootstrap.BootstrapForTesting;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@TimeoutSuite(millis = TimeUnits.HOUR)
+@LuceneTestCase.SuppressReproduceLine
+@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
+/**
+ * Basic test case for token streams. the assertion methods in this class will
+ * run basic checks to enforce correct behavior of the token streams.
+ */
+public abstract class ElasticsearchTokenStreamTestCase extends BaseTokenStreamTestCase {
+
+ static {
+ BootstrapForTesting.ensureInitialized();
+ }
+
+ public static Version randomVersion() {
+ return VersionUtils.randomVersion(random());
+ }
+
+ public Settings.Builder newAnalysisSettingsBuilder() {
+ return Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ExternalNode.java b/core/src/test/java/org/elasticsearch/test/ExternalNode.java
new file mode 100644
index 0000000000..4116632fd2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ExternalNode.java
@@ -0,0 +1,250 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.base.Predicate;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.transport.TransportModule;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ * Simple helper class to start external nodes to be used within a test cluster
+ */
+final class ExternalNode implements Closeable {
+
+ public static final Settings REQUIRED_SETTINGS = Settings.builder()
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen")
+ .put("node.mode", "network").build(); // we need network mode for this
+
+ private final Path path;
+ private final Random random;
+ private final SettingsSource settingsSource;
+ private Process process;
+ private NodeInfo nodeInfo;
+ private final String clusterName;
+ private TransportClient client;
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+ private Settings externalNodeSettings;
+
+
+ ExternalNode(Path path, long seed, SettingsSource settingsSource) {
+ this(path, null, seed, settingsSource);
+ }
+
+ ExternalNode(Path path, String clusterName, long seed, SettingsSource settingsSource) {
+ if (!Files.isDirectory(path)) {
+ throw new IllegalArgumentException("path must be a directory");
+ }
+ this.path = path;
+ this.clusterName = clusterName;
+ this.random = new Random(seed);
+ this.settingsSource = settingsSource;
+ }
+
+ synchronized ExternalNode start(Client localNode, Settings defaultSettings, String nodeName, String clusterName, int nodeOrdinal) throws IOException, InterruptedException {
+ ExternalNode externalNode = new ExternalNode(path, clusterName, random.nextLong(), settingsSource);
+ Settings settings = Settings.builder().put(defaultSettings).put(settingsSource.node(nodeOrdinal)).build();
+ externalNode.startInternal(localNode, settings, nodeName, clusterName);
+ return externalNode;
+ }
+
+ @SuppressForbidden(reason = "needs java.io.File api to start a process")
+ synchronized void startInternal(Client client, Settings settings, String nodeName, String clusterName) throws IOException, InterruptedException {
+ if (process != null) {
+ throw new IllegalStateException("Already started");
+ }
+ List<String> params = new ArrayList<>();
+
+ if (!Constants.WINDOWS) {
+ params.add("bin/elasticsearch");
+ } else {
+ params.add("bin/elasticsearch.bat");
+ }
+ params.add("-Des.cluster.name=" + clusterName);
+ params.add("-Des.node.name=" + nodeName);
+ Settings.Builder externaNodeSettingsBuilder = Settings.builder();
+ for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
+ switch (entry.getKey()) {
+ case "cluster.name":
+ case "node.name":
+ case "path.home":
+ case "node.mode":
+ case "node.local":
+ case TransportModule.TRANSPORT_TYPE_KEY:
+ case DiscoveryModule.DISCOVERY_TYPE_KEY:
+ case TransportModule.TRANSPORT_SERVICE_TYPE_KEY:
+ case InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING:
+ continue;
+ default:
+ externaNodeSettingsBuilder.put(entry.getKey(), entry.getValue());
+
+ }
+ }
+ this.externalNodeSettings = externaNodeSettingsBuilder.put(REQUIRED_SETTINGS).build();
+ for (Map.Entry<String, String> entry : externalNodeSettings.getAsMap().entrySet()) {
+ params.add("-Des." + entry.getKey() + "=" + entry.getValue());
+ }
+
+ params.add("-Des.path.home=" + PathUtils.get(".").toAbsolutePath());
+ params.add("-Des.path.conf=" + path + "/config");
+
+ ProcessBuilder builder = new ProcessBuilder(params);
+ builder.directory(path.toFile());
+ builder.inheritIO();
+ boolean success = false;
+ try {
+ logger.info("starting external node [{}] with: {}", nodeName, builder.command());
+ process = builder.start();
+ this.nodeInfo = null;
+ if (waitForNode(client, nodeName)) {
+ nodeInfo = nodeInfo(client, nodeName);
+ assert nodeInfo != null;
+ logger.info("external node {} found, version [{}], build {}", nodeInfo.getNode(), nodeInfo.getVersion(), nodeInfo.getBuild());
+ } else {
+ throw new IllegalStateException("Node [" + nodeName + "] didn't join the cluster");
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ stop();
+ }
+ }
+ }
+
+ static boolean waitForNode(final Client client, final String name) throws InterruptedException {
+ return ElasticsearchTestCase.awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(java.lang.Object input) {
+ final NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().get();
+ final NodeInfo[] nodes = nodeInfos.getNodes();
+ for (NodeInfo info : nodes) {
+ if (name.equals(info.getNode().getName())) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }, 30, TimeUnit.SECONDS);
+ }
+
+ static NodeInfo nodeInfo(final Client client, final String nodeName) {
+ final NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().get();
+ final NodeInfo[] nodes = nodeInfos.getNodes();
+ for (NodeInfo info : nodes) {
+ if (nodeName.equals(info.getNode().getName())) {
+ return info;
+ }
+ }
+ return null;
+ }
+
+ synchronized TransportAddress getTransportAddress() {
+ if (nodeInfo == null) {
+ throw new IllegalStateException("Node has not started yet");
+ }
+ return nodeInfo.getTransport().getAddress().publishAddress();
+ }
+
+ synchronized Client getClient() {
+ if (nodeInfo == null) {
+ throw new IllegalStateException("Node has not started yet");
+ }
+ if (client == null) {
+ TransportAddress addr = nodeInfo.getTransport().getAddress().publishAddress();
+ // verify that the end node setting will have network enabled.
+
+ Settings clientSettings = settingsBuilder().put(externalNodeSettings)
+ .put("client.transport.nodes_sampler_interval", "1s")
+ .put("name", "transport_client_" + nodeInfo.getNode().name())
+ .put(ClusterName.SETTING, clusterName).put("client.transport.sniff", false).build();
+ TransportClient client = TransportClient.builder().settings(clientSettings).build();
+ client.addTransportAddress(addr);
+ this.client = client;
+ }
+ return client;
+ }
+
+ synchronized void reset(long seed) {
+ this.random.setSeed(seed);
+ }
+
+ synchronized void stop() {
+ if (running()) {
+ try {
+ if (this.client != null) {
+ client.close();
+ }
+ } finally {
+ process.destroy();
+ try {
+ process.waitFor();
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ }
+ process = null;
+ nodeInfo = null;
+
+ }
+ }
+ }
+
+
+ synchronized boolean running() {
+ return process != null;
+ }
+
+ @Override
+ public void close() {
+ stop();
+ }
+
+ synchronized String getName() {
+ if (nodeInfo == null) {
+ throw new IllegalStateException("Node has not started yet");
+ }
+ return nodeInfo.getNode().getName();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java
new file mode 100644
index 0000000000..228bf05ff4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+/**
+ * External cluster to run the tests against.
+ * It is a pure immutable test cluster that allows to send requests to a pre-existing cluster
+ * and supports by nature all the needed test operations like wipeIndices etc.
+ */
+public final class ExternalTestCluster extends TestCluster {
+
+ private static final ESLogger logger = Loggers.getLogger(ExternalTestCluster.class);
+
+ private static final AtomicInteger counter = new AtomicInteger();
+ public static final String EXTERNAL_CLUSTER_PREFIX = "external_";
+
+ private final Client client;
+
+ private final InetSocketAddress[] httpAddresses;
+
+ private final String clusterName;
+
+ private final int numDataNodes;
+ private final int numMasterAndDataNodes;
+
+ public ExternalTestCluster(TransportAddress... transportAddresses) {
+ super(0);
+ Settings clientSettings = Settings.settingsBuilder()
+ .put("name", InternalTestCluster.TRANSPORT_CLIENT_PREFIX + EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement())
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // prevents any settings to be replaced by system properties.
+ .put("client.transport.ignore_cluster_name", true)
+ .put("node.mode", "network").build(); // we require network here!
+
+ this.client = TransportClient.builder().settings(clientSettings).build().addTransportAddresses(transportAddresses);
+
+ NodesInfoResponse nodeInfos = this.client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get();
+ httpAddresses = new InetSocketAddress[nodeInfos.getNodes().length];
+ this.clusterName = nodeInfos.getClusterName().value();
+ int dataNodes = 0;
+ int masterAndDataNodes = 0;
+ for (int i = 0; i < nodeInfos.getNodes().length; i++) {
+ NodeInfo nodeInfo = nodeInfos.getNodes()[i];
+ httpAddresses[i] = ((InetSocketTransportAddress) nodeInfo.getHttp().address().publishAddress()).address();
+ if (DiscoveryNode.dataNode(nodeInfo.getSettings())) {
+ dataNodes++;
+ masterAndDataNodes++;
+ } else if (DiscoveryNode.masterNode(nodeInfo.getSettings())) {
+ masterAndDataNodes++;
+ }
+ }
+ this.numDataNodes = dataNodes;
+ this.numMasterAndDataNodes = masterAndDataNodes;
+ logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size());
+ }
+
+ @Override
+ public void afterTest() {
+
+ }
+
+ @Override
+ public Client client() {
+ return client;
+ }
+
+ @Override
+ public int size() {
+ return httpAddresses.length;
+ }
+
+ @Override
+ public int numDataNodes() {
+ return numDataNodes;
+ }
+
+ @Override
+ public int numDataAndMasterNodes() {
+ return numMasterAndDataNodes;
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ return httpAddresses;
+ }
+
+ @Override
+ public void close() throws IOException {
+ client.close();
+ }
+
+ @Override
+ public void ensureEstimatedStats() {
+ if (size() > 0) {
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).setIndices(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Fielddata breaker not reset to 0 on node: " + stats.getNode(),
+ stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ // ExternalTestCluster does not check the request breaker,
+ // because checking it requires a network request, which in
+ // turn increments the breaker, making it non-0
+
+ assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("Filter cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+ }
+ }
+ }
+
+ @Override
+ public Iterator<Client> iterator() {
+ return Lists.newArrayList(client).iterator();
+ }
+
+ @Override
+ public String getClusterName() {
+ return clusterName;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java
new file mode 100644
index 0000000000..31255804ac
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java
@@ -0,0 +1,1800 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.carrotsearch.randomizedtesting.SysGlobals;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.*;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cache.recycler.PageCacheRecyclerModule;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.BigArraysModule;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.cache.filter.FilterCacheModule;
+import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
+import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
+import org.elasticsearch.index.cache.filter.none.NoneFilterCache;
+import org.elasticsearch.index.engine.CommitStats;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineClosedException;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardModule;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStoreModule;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.index.translog.TranslogWriter;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.plugins.PluginsService;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.SearchServiceModule;
+import org.elasticsearch.test.cache.recycler.MockBigArraysModule;
+import org.elasticsearch.test.cache.recycler.MockPageCacheRecyclerModule;
+import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
+import org.elasticsearch.test.engine.MockEngineFactory;
+import org.elasticsearch.test.search.MockSearchServiceModule;
+import org.elasticsearch.test.store.MockFSIndexStoreModule;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.junit.Assert;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static junit.framework.Assert.fail;
+import static org.apache.lucene.util.LuceneTestCase.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.node.NodeBuilder.nodeBuilder;
+import static org.elasticsearch.test.ElasticsearchTestCase.assertBusy;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+
+/**
+ * InternalTestCluster manages a set of JVM private nodes and allows convenient access to them.
+ * The cluster supports randomized configuration such that nodes started in the cluster will
+ * automatically load asserting services tracking resources like file handles or open searchers.
+ * <p>
+ * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and
+ * {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates
+ * to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls
+ * are involved reproducibility is very limited. This class should only be used through {@link ElasticsearchIntegrationTest}.
+ * </p>
+ */
+public final class InternalTestCluster extends TestCluster {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ static SettingsSource DEFAULT_SETTINGS_SOURCE = SettingsSource.EMPTY;
+
+ /**
+ * A boolean value to enable or disable mock modules. This is useful to test the
+ * system without asserting modules that to make sure they don't hide any bugs in
+ * production.
+ *
+ * @see ElasticsearchIntegrationTest
+ */
+ public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules";
+
+ /**
+ * A node level setting that holds a per node random seed that is consistent across node restarts
+ */
+ public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed";
+
+ private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0"));
+ public static final int BASE_PORT = 9300 + 100 * (JVM_ORDINAL + 1);
+
+ private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true);
+
+ static final int DEFAULT_MIN_NUM_DATA_NODES = 1;
+ static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3;
+
+ static final int DEFAULT_NUM_CLIENT_NODES = -1;
+ static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0;
+ static final int DEFAULT_MAX_NUM_CLIENT_NODES = 1;
+
+ static final boolean DEFAULT_ENABLE_HTTP_PIPELINING = true;
+
+ public static final String NODE_MODE = nodeMode();
+
+ /* sorted map to make traverse order reproducible, concurrent since we do checks on it not within a sync block */
+ private final NavigableMap<String, NodeAndClient> nodes = new TreeMap<>();
+
+ private final Set<Path> dataDirToClean = new HashSet<>();
+
+ private final String clusterName;
+
+ private final AtomicBoolean open = new AtomicBoolean(true);
+
+ private final Settings defaultSettings;
+
+ private AtomicInteger nextNodeId = new AtomicInteger(0);
+
+ /* Each shared node has a node seed that is used to start up the node and get default settings
+ * this is important if a node is randomly shut down in a test since the next test relies on a
+ * fully shared cluster to be more reproducible */
+ private final long[] sharedNodesSeeds;
+
+ private final int numSharedDataNodes;
+
+ private final int numSharedClientNodes;
+
+ private final SettingsSource settingsSource;
+
+ private final ExecutorService executor;
+
+ /**
+ * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number
+ */
+ private final String nodePrefix;
+ private final Path baseDir;
+
+ private ServiceDisruptionScheme activeDisruptionScheme;
+
+ public InternalTestCluster(long clusterSeed, Path baseDir, int minNumDataNodes, int maxNumDataNodes, String clusterName, int numClientNodes,
+ boolean enableHttpPipelining, String nodePrefix) {
+ this(clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, DEFAULT_SETTINGS_SOURCE, numClientNodes, enableHttpPipelining, nodePrefix);
+ }
+
+ public InternalTestCluster(long clusterSeed, Path baseDir,
+ int minNumDataNodes, int maxNumDataNodes, String clusterName, SettingsSource settingsSource, int numClientNodes,
+ boolean enableHttpPipelining, String nodePrefix) {
+ super(clusterSeed);
+ this.baseDir = baseDir;
+ this.clusterName = clusterName;
+ if (minNumDataNodes < 0 || maxNumDataNodes < 0) {
+ throw new IllegalArgumentException("minimum and maximum number of data nodes must be >= 0");
+ }
+
+ if (maxNumDataNodes < minNumDataNodes) {
+ throw new IllegalArgumentException("maximum number of data nodes must be >= minimum number of data nodes");
+ }
+
+ Random random = new Random(clusterSeed);
+
+ this.numSharedDataNodes = RandomInts.randomIntBetween(random, minNumDataNodes, maxNumDataNodes);
+ assert this.numSharedDataNodes >= 0;
+
+ //for now all shared data nodes are also master eligible
+ if (numSharedDataNodes == 0) {
+ this.numSharedClientNodes = 0;
+ } else {
+ if (numClientNodes < 0) {
+ this.numSharedClientNodes = RandomInts.randomIntBetween(random, DEFAULT_MIN_NUM_CLIENT_NODES, DEFAULT_MAX_NUM_CLIENT_NODES);
+ } else {
+ this.numSharedClientNodes = numClientNodes;
+ }
+ }
+ assert this.numSharedClientNodes >= 0;
+
+ this.nodePrefix = nodePrefix;
+
+ assert nodePrefix != null;
+
+ /*
+ * TODO
+ * - we might want start some master only nodes?
+ * - we could add a flag that returns a client to the master all the time?
+ * - we could add a flag that never returns a client to the master
+ * - along those lines use a dedicated node that is master eligible and let all other nodes be only data nodes
+ */
+ sharedNodesSeeds = new long[numSharedDataNodes + numSharedClientNodes];
+ for (int i = 0; i < sharedNodesSeeds.length; i++) {
+ sharedNodesSeeds[i] = random.nextLong();
+ }
+
+ logger.info("Setup InternalTestCluster [{}] with seed [{}] using [{}] data nodes and [{}] client nodes", clusterName, SeedUtils.formatSeed(clusterSeed), numSharedDataNodes, numSharedClientNodes);
+ this.settingsSource = settingsSource;
+ Builder builder = Settings.settingsBuilder();
+ if (random.nextInt(5) == 0) { // sometimes set this
+ // randomize (multi/single) data path, special case for 0, don't set it at all...
+ final int numOfDataPaths = random.nextInt(5);
+ if (numOfDataPaths > 0) {
+ StringBuilder dataPath = new StringBuilder();
+ for (int i = 0; i < numOfDataPaths; i++) {
+ dataPath.append(baseDir.resolve("d" + i).toAbsolutePath()).append(',');
+ }
+ builder.put("path.data", dataPath.toString());
+ }
+ }
+ builder.put("bootstrap.sigar", rarely(random));
+ builder.put("path.home", baseDir);
+ builder.put("path.repo", baseDir.resolve("repos"));
+ builder.put("transport.tcp.port", BASE_PORT + "-" + (BASE_PORT+100));
+ builder.put("http.port", BASE_PORT+101 + "-" + (BASE_PORT+200));
+ builder.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true);
+ builder.put("node.mode", NODE_MODE);
+ builder.put("http.pipelining", enableHttpPipelining);
+ builder.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false);
+ builder.put(NodeEnvironment.SETTING_CUSTOM_DATA_PATH_ENABLED, true);
+ if (Strings.hasLength(System.getProperty("es.logger.level"))) {
+ builder.put("logger.level", System.getProperty("es.logger.level"));
+ }
+ if (Strings.hasLength(System.getProperty("es.logger.prefix"))) {
+ builder.put("logger.prefix", System.getProperty("es.logger.prefix"));
+ }
+ // Default the watermarks to absurdly low to prevent the tests
+ // from failing on nodes without enough disk space
+ builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b");
+ builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b");
+ if (TEST_NIGHTLY) {
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 10, 15));
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 10, 15));
+ builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 5, 10));
+ } else if (random.nextInt(100) <= 90) {
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 3, 6));
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 3, 6));
+ builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 2, 5));
+ }
+ // always reduce this - it can make tests really slow
+ builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
+ defaultSettings = builder.build();
+ executor = EsExecutors.newCached(0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName));
+ }
+
+ public static String nodeMode() {
+ Builder builder = Settings.builder();
+ if (Strings.isEmpty(System.getProperty("es.node.mode")) && Strings.isEmpty(System.getProperty("es.node.local"))) {
+ return "local"; // default if nothing is specified
+ }
+ if (Strings.hasLength(System.getProperty("es.node.mode"))) {
+ builder.put("node.mode", System.getProperty("es.node.mode"));
+ }
+ if (Strings.hasLength(System.getProperty("es.node.local"))) {
+ builder.put("node.local", System.getProperty("es.node.local"));
+ }
+ if (DiscoveryNode.localNode(builder.build())) {
+ return "local";
+ } else {
+ return "network";
+ }
+ }
+
+ @Override
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ public String[] getNodeNames() {
+ return nodes.keySet().toArray(Strings.EMPTY_ARRAY);
+ }
+
+ private static boolean isLocalTransportConfigured() {
+ if ("local".equals(System.getProperty("es.node.mode", "network"))) {
+ return true;
+ }
+ return Boolean.parseBoolean(System.getProperty("es.node.local", "false"));
+ }
+
+ private Settings getSettings(int nodeOrdinal, long nodeSeed, Settings others) {
+ Builder builder = Settings.settingsBuilder().put(defaultSettings)
+ .put(getRandomNodeSettings(nodeSeed));
+ Settings settings = settingsSource.node(nodeOrdinal);
+ if (settings != null) {
+ if (settings.get(ClusterName.SETTING) != null) {
+ throw new IllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]");
+ }
+ builder.put(settings);
+ }
+ if (others != null) {
+ builder.put(others);
+ }
+ builder.put(ClusterName.SETTING, clusterName);
+ return builder.build();
+ }
+
+ private static Settings getRandomNodeSettings(long seed) {
+ Random random = new Random(seed);
+ Builder builder = Settings.settingsBuilder()
+ // decrease the routing schedule so new nodes will be added quickly - some random value between 30 and 80 ms
+ .put("cluster.routing.schedule", (30 + random.nextInt(50)) + "ms")
+ .put(SETTING_CLUSTER_NODE_SEED, seed);
+ if (ENABLE_MOCK_MODULES && usually(random)) {
+ builder.put(IndexStoreModule.STORE_TYPE, MockFSIndexStoreModule.class.getName());
+ builder.put(IndexShardModule.ENGINE_FACTORY, MockEngineFactory.class);
+ builder.put(PageCacheRecyclerModule.CACHE_IMPL, MockPageCacheRecyclerModule.class.getName());
+ builder.put(BigArraysModule.IMPL, MockBigArraysModule.class.getName());
+ builder.put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName());
+ builder.put(SearchServiceModule.IMPL, MockSearchServiceModule.class.getName());
+ }
+ if (isLocalTransportConfigured()) {
+ builder.put(TransportModule.TRANSPORT_TYPE_KEY, AssertingLocalTransport.class.getName());
+ } else {
+ builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random));
+ }
+ if (random.nextBoolean()) {
+ builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values()));
+ }
+ if (random.nextInt(10) == 0) { // 10% of the nodes have a very frequent check interval
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(10 + random.nextInt(2000)));
+ } else if (random.nextInt(10) != 0) { // 90% of the time - 10% of the time we don't set anything
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
+ }
+ if (random.nextBoolean()) { // sometimes set a
+ builder.put(SearchService.DEFAULT_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
+ }
+ if (random.nextBoolean()) {
+ // change threadpool types to make sure we don't have components that rely on the type of thread pools
+ for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
+ ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.OPTIMIZE,
+ ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
+ ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
+ if (random.nextBoolean()) {
+ final String type = RandomPicks.randomFrom(random, Arrays.asList("fixed", "cached", "scaling"));
+ builder.put(ThreadPool.THREADPOOL_GROUP + name + ".type", type);
+ }
+ }
+ }
+
+ if (random.nextInt(10) == 0) {
+ // node gets an extra cpu this time
+ builder.put(EsExecutors.PROCESSORS, 1 + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY));
+ }
+
+ if (random.nextBoolean()) {
+ if (random.nextBoolean()) {
+ builder.put("indices.fielddata.cache.size", 1 + random.nextInt(1000), ByteSizeUnit.MB);
+ }
+ if (random.nextBoolean()) {
+ builder.put("indices.fielddata.cache.expire", TimeValue.timeValueMillis(1 + random.nextInt(10000)));
+ }
+ }
+
+ // randomize netty settings
+ if (random.nextBoolean()) {
+ builder.put(NettyTransport.WORKER_COUNT, random.nextInt(3) + 1);
+ builder.put(NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, random.nextInt(2) + 1);
+ builder.put(NettyTransport.CONNECTIONS_PER_NODE_BULK, random.nextInt(3) + 1);
+ builder.put(NettyTransport.CONNECTIONS_PER_NODE_REG, random.nextInt(6) + 1);
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS));
+ }
+
+ if (random.nextInt(10) == 0) {
+ builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop");
+ builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop");
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexFilterCache.class : NoneFilterCache.class);
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, random.nextBoolean());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogWriter.Type.values()));
+ if (rarely(random)) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_SYNC_INTERVAL, 0); // 0 has special meaning to sync each op
+ } else {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_SYNC_INTERVAL, RandomInts.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS);
+ }
+ }
+
+ return builder.build();
+ }
+
+ public static String clusterName(String prefix, long clusterSeed) {
+ StringBuilder builder = new StringBuilder(prefix);
+ final int childVM = RandomizedTest.systemPropertyAsInt(SysGlobals.CHILDVM_SYSPROP_JVM_ID, 0);
+ builder.append('-').append(NetworkUtils.getLocalHostName("__default_host__"));
+ builder.append("-CHILD_VM=[").append(childVM).append(']');
+ builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']');
+ // if multiple maven task run on a single host we better have an identifier that doesn't rely on input params
+ builder.append("-HASH=[").append(SeedUtils.formatSeed(System.nanoTime())).append(']');
+ return builder.toString();
+ }
+
+ private void ensureOpen() {
+ if (!open.get()) {
+ throw new RuntimeException("Cluster is already closed");
+ }
+ }
+
+ private synchronized NodeAndClient getOrBuildRandomNode() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient;
+ }
+ NodeAndClient buildNode = buildNode();
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode;
+ }
+
+ private synchronized NodeAndClient getRandomNodeAndClient() {
+ Predicate<NodeAndClient> all = Predicates.alwaysTrue();
+ return getRandomNodeAndClient(all);
+ }
+
+
+ private synchronized NodeAndClient getRandomNodeAndClient(Predicate<NodeAndClient> predicate) {
+ ensureOpen();
+ Collection<NodeAndClient> values = Collections2.filter(nodes.values(), predicate);
+ if (!values.isEmpty()) {
+ int whichOne = random.nextInt(values.size());
+ for (NodeAndClient nodeAndClient : values) {
+ if (whichOne-- == 0) {
+ return nodeAndClient;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Ensures that at least <code>n</code> data nodes are present in the cluster.
+ * if more nodes than <code>n</code> are present this method will not
+ * stop any of the running nodes.
+ */
+ public void ensureAtLeastNumDataNodes(int n) {
+ List<ListenableFuture<String>> futures = Lists.newArrayList();
+ synchronized (this) {
+ int size = numDataNodes();
+ for (int i = size; i < n; i++) {
+ logger.info("increasing cluster size from {} to {}", size, n);
+ futures.add(startNodeAsync());
+ }
+ }
+ try {
+ Futures.allAsList(futures).get();
+ } catch (Exception e) {
+ throw new ElasticsearchException("failed to start nodes", e);
+ }
+ if (!futures.isEmpty()) {
+ synchronized (this) {
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodes.size())).get());
+ }
+ }
+ }
+
+ /**
+ * Ensures that at most <code>n</code> are up and running.
+ * If less nodes that <code>n</code> are running this method
+ * will not start any additional nodes.
+ */
+ public synchronized void ensureAtMostNumDataNodes(int n) throws IOException {
+ int size = numDataNodes();
+ if (size <= n) {
+ return;
+ }
+ // prevent killing the master if possible and client nodes
+ final Iterator<NodeAndClient> values = n == 0 ? nodes.values().iterator() : Iterators.filter(nodes.values().iterator(),
+ Predicates.and(new DataNodePredicate(), Predicates.not(new MasterNodePredicate(getMasterName()))));
+
+ final Iterator<NodeAndClient> limit = Iterators.limit(values, size - n);
+ logger.info("changing cluster size from {} to {}, {} data nodes", size(), n + numSharedClientNodes, n);
+ Set<NodeAndClient> nodesToRemove = new HashSet<>();
+ while (limit.hasNext()) {
+ NodeAndClient next = limit.next();
+ nodesToRemove.add(next);
+ removeDisruptionSchemeFromNode(next);
+ next.close();
+ }
+ for (NodeAndClient toRemove : nodesToRemove) {
+ nodes.remove(toRemove.name);
+ }
+ if (!nodesToRemove.isEmpty() && size() > 0) {
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodes.size())).get());
+ }
+ }
+
+ private NodeAndClient buildNode(Settings settings, Version version) {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), settings, version);
+ }
+
+ private NodeAndClient buildNode() {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), null, Version.CURRENT);
+ }
+
+ private NodeAndClient buildNode(int nodeId, long seed, Settings settings, Version version) {
+ assert Thread.holdsLock(this);
+ ensureOpen();
+ settings = getSettings(nodeId, seed, settings);
+ String name = buildNodeName(nodeId);
+ assert !nodes.containsKey(name);
+ Settings finalSettings = settingsBuilder()
+ .put("path.home", baseDir) // allow overriding path.home
+ .put(settings)
+ .put("name", name)
+ .put("discovery.id.seed", seed)
+ .put("tests.mock.version", version)
+ .build();
+ Node node = nodeBuilder().settings(finalSettings).build();
+ return new NodeAndClient(name, node);
+ }
+
+ private String buildNodeName(int id) {
+ return nodePrefix + id;
+ }
+
+ /**
+ * Returns the common node name prefix for this test cluster.
+ */
+ public String nodePrefix() {
+ return nodePrefix;
+ }
+
+ @Override
+ public synchronized Client client() {
+ ensureOpen();
+ /* Randomly return a client to one of the nodes in the cluster */
+ return getOrBuildRandomNode().client(random);
+ }
+
+ /**
+ * Returns a node client to a data node in the cluster.
+ * Note: use this with care tests should not rely on a certain nodes client.
+ */
+ public synchronized Client dataNodeClient() {
+ ensureOpen();
+ /* Randomly return a client to one of the nodes in the cluster */
+ return getRandomNodeAndClient(new DataNodePredicate()).client(random);
+ }
+
+ /**
+ * Returns a node client to the current master node.
+ * Note: use this with care tests should not rely on a certain nodes client.
+ */
+ public synchronized Client masterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client master is requested
+ }
+ Assert.fail("No master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a node client to random node but not the master. This method will fail if no non-master client is available.
+ */
+ public synchronized Client nonMasterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested
+ }
+ Assert.fail("No non-master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a client to a node started with "node.client: true"
+ */
+ public synchronized Client clientNodeClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate());
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ int nodeId = nextNodeId.getAndIncrement();
+ Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY);
+ startNodeClient(settings);
+ return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
+ }
+
+ public synchronized Client startNodeClient(Settings settings) {
+ ensureOpen(); // currently unused
+ Builder builder = settingsBuilder().put(settings).put("node.client", true);
+ if (size() == 0) {
+ // if we are the first node - don't wait for a state
+ builder.put("discovery.initial_state_timeout", 0);
+ }
+ String name = startNode(builder);
+ return nodes.get(name).nodeClient();
+ }
+
+ /**
+ * Returns a transport client
+ */
+ public synchronized Client transportClient() {
+ ensureOpen();
+ // randomly return a transport client going to one of the nodes in the cluster
+ return getOrBuildRandomNode().transportClient();
+ }
+
+ /**
+ * Returns a node client to a given node.
+ */
+ public synchronized Client client(String nodeName) {
+ ensureOpen();
+ NodeAndClient nodeAndClient = nodes.get(nodeName);
+ if (nodeAndClient != null) {
+ return nodeAndClient.client(random);
+ }
+ Assert.fail("No node found with name: [" + nodeName + "]");
+ return null; // can't happen
+ }
+
+
+ /**
+ * Returns a "smart" node client to a random node in the cluster
+ */
+ public synchronized Client smartClient() {
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient();
+ }
+ Assert.fail("No smart client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a random node that applies to the given predicate.
+ * The predicate can filter nodes based on the nodes settings.
+ * If all nodes are filtered out this method will return <code>null</code>
+ */
+ public synchronized Client client(final Predicate<Settings> filterPredicate) {
+ ensureOpen();
+ final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new Predicate<NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return filterPredicate.apply(nodeAndClient.node.settings());
+ }
+ });
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ return null;
+ }
+
+ @Override
+ public void close() {
+ if (this.open.compareAndSet(true, false)) {
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.testClusterClosed();
+ activeDisruptionScheme = null;
+ }
+ IOUtils.closeWhileHandlingException(nodes.values());
+ nodes.clear();
+ executor.shutdownNow();
+ }
+ }
+
+ private final class NodeAndClient implements Closeable {
+ private Node node;
+ private Client nodeClient;
+ private Client transportClient;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+ private final String name;
+
+ NodeAndClient(String name, Node node) {
+ this.node = node;
+ this.name = name;
+ }
+
+ Node node() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return node;
+ }
+
+ Client client(Random random) {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ double nextDouble = random.nextDouble();
+ if (nextDouble < transportClientRatio) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Using transport client for node [{}] sniff: [{}]", node.settings().get("name"), false);
+ }
+ return getOrBuildTransportClient();
+ } else {
+ return getOrBuildNodeClient();
+ }
+ }
+
+ Client nodeClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return getOrBuildNodeClient();
+ }
+
+ Client transportClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return getOrBuildTransportClient();
+ }
+
+ private Client getOrBuildNodeClient() {
+ if (nodeClient != null) {
+ return nodeClient;
+ }
+ return nodeClient = node.client();
+ }
+
+ private Client getOrBuildTransportClient() {
+ if (transportClient != null) {
+ return transportClient;
+ }
+ /* no sniff client for now - doesn't work will all tests since it might throw NoNodeAvailableException if nodes are shut down.
+ * we first need support of transportClientRatio as annotations or so
+ */
+ return transportClient = new TransportClientFactory(false, settingsSource.transportClient(), baseDir).client(node, clusterName);
+ }
+
+ void resetClient() throws IOException {
+ if (closed.get() == false) {
+ Releasables.close(nodeClient, transportClient);
+ nodeClient = null;
+ transportClient = null;
+ }
+ }
+
+ void closeNode() {
+ registerDataPath();
+ node.close();
+ }
+
+ void restart(RestartCallback callback) throws Exception {
+ assert callback != null;
+ resetClient();
+ if (!node.isClosed()) {
+ closeNode();
+ }
+ Settings newSettings = callback.onNodeStopped(name);
+ if (newSettings == null) {
+ newSettings = Settings.EMPTY;
+ }
+ if (callback.clearData(name)) {
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
+ if (nodeEnv.hasNodeFile()) {
+ IOUtils.rm(nodeEnv.nodeDataPaths());
+ }
+ }
+ node = nodeBuilder().settings(node.settings()).settings(newSettings).node();
+ }
+
+ void registerDataPath() {
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
+ if (nodeEnv.hasNodeFile()) {
+ dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths()));
+ }
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ resetClient();
+ closed.set(true);
+ closeNode();
+ }
+ }
+
+ public static final String TRANSPORT_CLIENT_PREFIX = "transport_client_";
+
+ static class TransportClientFactory {
+ private final boolean sniff;
+ private final Settings settings;
+ private final Path baseDir;
+
+ TransportClientFactory(boolean sniff, Settings settings, Path baseDir) {
+ this.sniff = sniff;
+ this.settings = settings != null ? settings : Settings.EMPTY;
+ this.baseDir = baseDir;
+ }
+
+ public Client client(Node node, String clusterName) {
+ TransportAddress addr = node.injector().getInstance(TransportService.class).boundAddress().publishAddress();
+ Settings nodeSettings = node.settings();
+ Builder builder = settingsBuilder()
+ .put("client.transport.nodes_sampler_interval", "1s")
+ .put("path.home", baseDir)
+ .put("name", TRANSPORT_CLIENT_PREFIX + node.settings().get("name"))
+ .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false)
+ .put(ClusterName.SETTING, clusterName).put("client.transport.sniff", sniff)
+ .put("node.mode", nodeSettings.get("node.mode", NODE_MODE))
+ .put("node.local", nodeSettings.get("node.local", ""))
+ .put("logger.prefix", nodeSettings.get("logger.prefix", ""))
+ .put("logger.level", nodeSettings.get("logger.level", "INFO"))
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put(settings);
+
+ TransportClient client = TransportClient.builder().settings(builder.build()).build();
+ client.addTransportAddress(addr);
+ return client;
+ }
+ }
+
+ @Override
+ public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException {
+ super.beforeTest(random, transportClientRatio);
+ reset(true);
+ }
+
+ private synchronized void reset(boolean wipeData) throws IOException {
+ // clear all rules for mock transport services
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ TransportService transportService = nodeAndClient.node.injector().getInstance(TransportService.class);
+ if (transportService instanceof MockTransportService) {
+ final MockTransportService mockTransportService = (MockTransportService) transportService;
+ mockTransportService.clearAllRules();
+ mockTransportService.clearTracers();
+ }
+ }
+ randomlyResetClients();
+ if (wipeData) {
+ wipeDataDirectories();
+ }
+ if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) {
+ logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ return;
+ }
+ logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+
+
+ Set<NodeAndClient> sharedNodes = new HashSet<>();
+ assert sharedNodesSeeds.length == numSharedDataNodes + numSharedClientNodes;
+ boolean changed = false;
+ for (int i = 0; i < numSharedDataNodes; i++) {
+ String buildNodeName = buildNodeName(i);
+ NodeAndClient nodeAndClient = nodes.get(buildNodeName);
+ if (nodeAndClient == null) {
+ changed = true;
+ nodeAndClient = buildNode(i, sharedNodesSeeds[i], null, Version.CURRENT);
+ nodeAndClient.node.start();
+ logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
+ }
+ sharedNodes.add(nodeAndClient);
+ }
+ for (int i = numSharedDataNodes; i < numSharedDataNodes + numSharedClientNodes; i++) {
+ String buildNodeName = buildNodeName(i);
+ NodeAndClient nodeAndClient = nodes.get(buildNodeName);
+ if (nodeAndClient == null) {
+ changed = true;
+ Builder clientSettingsBuilder = Settings.builder().put("node.client", true);
+ nodeAndClient = buildNode(i, sharedNodesSeeds[i], clientSettingsBuilder.build(), Version.CURRENT);
+ nodeAndClient.node.start();
+ logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
+ }
+ sharedNodes.add(nodeAndClient);
+ }
+ if (!changed && sharedNodes.size() == nodes.size()) {
+ logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ return; // we are consistent - return
+ }
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ nodes.remove(nodeAndClient.name);
+ }
+
+ // trash the remaining nodes
+ final Collection<NodeAndClient> toShutDown = nodes.values();
+ for (NodeAndClient nodeAndClient : toShutDown) {
+ logger.debug("Close Node [{}] not shared", nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ nodes.clear();
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ publishNode(nodeAndClient);
+ }
+ nextNodeId.set(sharedNodesSeeds.length);
+ assert size() == sharedNodesSeeds.length;
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ }
+
+ @Override
+ public synchronized void afterTest() throws IOException {
+ wipeDataDirectories();
+ randomlyResetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
+ }
+
+ @Override
+ public void beforeIndexDeletion() {
+ // Check that the operations counter on index shard has reached 0.
+ // The assumption here is that after a test there are no ongoing write operations.
+ // test that have ongoing write operations after the test (for example because ttl is used
+ // and not all docs have been purged after the test) and inherit from
+ // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures.
+ assertShardIndexCounter();
+ //check that shards that have same sync id also contain same number of documents
+ assertSameSyncIdSameDocs();
+
+ }
+
+ private void assertSameSyncIdSameDocs() {
+ Map<String, Long> docsOnShards = new HashMap<>();
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
+ for (IndexService indexService : indexServices) {
+ for (IndexShard indexShard : indexService) {
+ try {
+ CommitStats commitStats = indexShard.engine().commitStats();
+ String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
+ if (syncId != null) {
+ long liveDocsOnShard = commitStats.getNumDocs();
+ if (docsOnShards.get(syncId) != null) {
+ assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard));
+ } else {
+ docsOnShards.put(syncId, liveDocsOnShard);
+ }
+ }
+ } catch (EngineClosedException e) {
+ // nothing to do, shard is closed
+ }
+ }
+ }
+ }
+ }
+
+ private void assertShardIndexCounter() {
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
+ for (IndexService indexService : indexServices) {
+ for (IndexShard indexShard : indexService) {
+ assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getOperationsCount(), equalTo(0));
+ }
+ }
+ }
+ }
+
+ private void randomlyResetClients() throws IOException {
+ // only reset the clients on nightly tests, it causes heavy load...
+ if (RandomizedTest.isNightly() && rarely(random)) {
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ nodeAndClient.resetClient();
+ }
+ }
+ }
+
+ private void wipeDataDirectories() {
+ if (!dataDirToClean.isEmpty()) {
+ try {
+ for (Path path : dataDirToClean) {
+ try {
+ FileSystemUtils.deleteSubDirectories(path);
+ logger.info("Successfully wiped data directory for node location: {}", path);
+ } catch (IOException e) {
+ logger.info("Failed to wipe data directory for node location: {}", path);
+ }
+ }
+ } finally {
+ dataDirToClean.clear();
+ }
+ }
+ }
+
+ /**
+ * Returns a reference to a random node's {@link ClusterService}
+ */
+ public ClusterService clusterService() {
+ return clusterService(null);
+ }
+
+ /**
+ * Returns a reference to a node's {@link ClusterService}. If the given node is null, a random node will be selected.
+ */
+ public synchronized ClusterService clusterService(@Nullable String node) {
+ return getInstance(ClusterService.class, node);
+ }
+
+ /**
+ * Returns an Iterable to all instances for the given class &gt;T&lt; across all nodes in the cluster.
+ */
+ public synchronized <T> Iterable<T> getInstances(Class<T> clazz) {
+ List<T> instances = new ArrayList<>(nodes.size());
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ instances.add(getInstanceFromNode(clazz, nodeAndClient.node));
+ }
+ return instances;
+ }
+
+ /**
+ * Returns an Iterable to all instances for the given class &gt;T&lt; across all data nodes in the cluster.
+ */
+ public synchronized <T> Iterable<T> getDataNodeInstances(Class<T> clazz) {
+ return getInstances(clazz, new DataNodePredicate());
+ }
+
+ private synchronized <T> Iterable<T> getInstances(Class<T> clazz, Predicate<NodeAndClient> predicate) {
+ Iterable<NodeAndClient> filteredNodes = Iterables.filter(nodes.values(), predicate);
+ List<T> instances = new ArrayList<>();
+ for (NodeAndClient nodeAndClient : filteredNodes) {
+ instances.add(getInstanceFromNode(clazz, nodeAndClient.node));
+ }
+ return instances;
+ }
+
+ /**
+ * Returns a reference to the given nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz, final String node) {
+ final Predicate<InternalTestCluster.NodeAndClient> predicate;
+ if (node != null) {
+ predicate = new Predicate<InternalTestCluster.NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return node.equals(nodeAndClient.name);
+ }
+ };
+ } else {
+ predicate = Predicates.alwaysTrue();
+ }
+ return getInstance(clazz, predicate);
+ }
+
+ public synchronized <T> T getDataNodeInstance(Class<T> clazz) {
+ return getInstance(clazz, new DataNodePredicate());
+ }
+
+ private synchronized <T> T getInstance(Class<T> clazz, Predicate<NodeAndClient> predicate) {
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(predicate);
+ assert randomNodeAndClient != null;
+ return getInstanceFromNode(clazz, randomNodeAndClient.node);
+ }
+
+ /**
+ * Returns a reference to a random nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz) {
+ return getInstance(clazz, Predicates.<NodeAndClient>alwaysTrue());
+ }
+
+ private synchronized <T> T getInstanceFromNode(Class<T> clazz, Node node) {
+ return node.injector().getInstance(clazz);
+ }
+
+ @Override
+ public synchronized int size() {
+ return this.nodes.size();
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ List<InetSocketAddress> addresses = Lists.newArrayList();
+ for (HttpServerTransport httpServerTransport : getInstances(HttpServerTransport.class)) {
+ addresses.add(((InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress()).address());
+ }
+ return addresses.toArray(new InetSocketAddress[addresses.size()]);
+ }
+
+ /**
+ * Stops a random data node in the cluster. Returns true if a node was found to stop, false otherwise.
+ */
+ public synchronized boolean stopRandomDataNode() throws IOException {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(new DataNodePredicate());
+ if (nodeAndClient != null) {
+ logger.info("Closing random node [{}] ", nodeAndClient.name);
+ removeDisruptionSchemeFromNode(nodeAndClient);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Stops a random node in the cluster that applies to the given filter or non if the non of the nodes applies to the
+ * filter.
+ */
+ public synchronized void stopRandomNode(final Predicate<Settings> filter) throws IOException {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(new Predicate<InternalTestCluster.NodeAndClient>() {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return filter.apply(nodeAndClient.node.settings());
+ }
+ });
+ if (nodeAndClient != null) {
+ logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
+ removeDisruptionSchemeFromNode(nodeAndClient);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Stops the current master node forcefully
+ */
+ public synchronized void stopCurrentMasterNode() throws IOException {
+ ensureOpen();
+ assert size() > 0;
+ String masterNodeName = getMasterName();
+ assert nodes.containsKey(masterNodeName);
+ logger.info("Closing master node [{}] ", masterNodeName);
+ removeDisruptionSchemeFromNode(nodes.get(masterNodeName));
+ NodeAndClient remove = nodes.remove(masterNodeName);
+ remove.close();
+ }
+
+ /**
+ * Stops the any of the current nodes but not the master node.
+ */
+ public void stopRandomNonMasterNode() throws IOException {
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(Predicates.not(new MasterNodePredicate(getMasterName())));
+ if (nodeAndClient != null) {
+ logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName());
+ removeDisruptionSchemeFromNode(nodeAndClient);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Restarts a random node in the cluster
+ */
+ public void restartRandomNode() throws Exception {
+ restartRandomNode(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts a random node in the cluster and calls the callback during restart.
+ */
+ public void restartRandomNode(RestartCallback callback) throws Exception {
+ restartRandomNode(Predicates.<NodeAndClient>alwaysTrue(), callback);
+ }
+
+ /**
+ * Restarts a random data node in the cluster
+ */
+ public void restartRandomDataNode() throws Exception {
+ restartRandomNode(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts a random data node in the cluster and calls the callback during restart.
+ */
+ public void restartRandomDataNode(RestartCallback callback) throws Exception {
+ restartRandomNode(new DataNodePredicate(), callback);
+ }
+
+ /**
+ * Restarts a random node in the cluster and calls the callback during restart.
+ */
+ private void restartRandomNode(Predicate<NodeAndClient> predicate, RestartCallback callback) throws Exception {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(predicate);
+ if (nodeAndClient != null) {
+ logger.info("Restarting random node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+
+ private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception {
+ ensureOpen();
+ List<NodeAndClient> toRemove = new ArrayList<>();
+ try {
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ if (!callback.doRestart(nodeAndClient.name)) {
+ logger.info("Closing node [{}] during restart", nodeAndClient.name);
+ toRemove.add(nodeAndClient);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.close();
+ }
+ }
+ } finally {
+ for (NodeAndClient nodeAndClient : toRemove) {
+ nodes.remove(nodeAndClient.name);
+ }
+ }
+ logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart);
+ if (rollingRestart) {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Restarting node [{}] ", nodeAndClient.name);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.restart(callback);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
+ }
+ }
+ } else {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Stopping node [{}] ", nodeAndClient.name);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.closeNode();
+ }
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ logger.info("Starting node [{}] ", nodeAndClient.name);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.restart(callback);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
+ }
+ }
+ }
+ }
+
+
+ private static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String node) {
+ return null;
+ }
+ };
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart() throws Exception {
+ fullRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart() throws Exception {
+ rollingRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart(RestartCallback function) throws Exception {
+ restartAllNodes(true, function);
+ }
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart(RestartCallback function) throws Exception {
+ restartAllNodes(false, function);
+ }
+
+
+ /**
+ * Returns the name of the current master node in the cluster.
+ */
+ public String getMasterName() {
+ return getMasterName(null);
+ }
+
+ /**
+ * Returns the name of the current master node in the cluster and executes the request via the node specified
+ * in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to.
+ */
+ public String getMasterName(@Nullable String viaNode) {
+ try {
+ Client client = viaNode != null ? client(viaNode) : client();
+ ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState();
+ return state.nodes().masterNode().name();
+ } catch (Throwable e) {
+ logger.warn("Can't fetch cluster state", e);
+ throw new RuntimeException("Can't get master node " + e.getMessage(), e);
+ }
+ }
+
+ synchronized Set<String> allDataNodesButN(int numNodes) {
+ return nRandomDataNodes(numDataNodes() - numNodes);
+ }
+
+ private synchronized Set<String> nRandomDataNodes(int numNodes) {
+ assert size() >= numNodes;
+ NavigableMap<String, NodeAndClient> dataNodes = Maps.filterEntries(nodes, new EntryNodePredicate(new DataNodePredicate()));
+ return Sets.newHashSet(Iterators.limit(dataNodes.keySet().iterator(), numNodes));
+ }
+
+ /**
+ * Returns a set of nodes that have at least one shard of the given index.
+ */
+ public synchronized Set<String> nodesInclude(String index) {
+ if (clusterService().state().routingTable().hasIndex(index)) {
+ List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
+ DiscoveryNodes discoveryNodes = clusterService().state().getNodes();
+ Set<String> nodes = new HashSet<>();
+ for (ShardRouting shardRouting : allShards) {
+ if (shardRouting.assignedToNode()) {
+ DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId());
+ nodes.add(discoveryNode.getName());
+ }
+ }
+ return nodes;
+ }
+ return Collections.emptySet();
+ }
+
+ /**
+ * Starts a node with default settings and returns it's name.
+ */
+ public synchronized String startNode() {
+ return startNode(Settings.EMPTY, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node with default settings ad the specified version and returns it's name.
+ */
+ public synchronized String startNode(Version version) {
+ return startNode(Settings.EMPTY, version);
+ }
+
+ /**
+ * Starts a node with the given settings builder and returns it's name.
+ */
+ public synchronized String startNode(Settings.Builder settings) {
+ return startNode(settings.build(), Version.CURRENT);
+ }
+
+ /**
+ * Starts a node with the given settings and returns it's name.
+ */
+ public synchronized String startNode(Settings settings) {
+ return startNode(settings, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node with the given settings and version and returns it's name.
+ */
+ public synchronized String startNode(Settings settings, Version version) {
+ NodeAndClient buildNode = buildNode(settings, version);
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode.name;
+ }
+
+ /**
+ * Starts a node in an async manner with the given settings and returns future with its name.
+ */
+ public synchronized ListenableFuture<String> startNodeAsync() {
+ return startNodeAsync(Settings.EMPTY, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node in an async manner with the given settings and returns future with its name.
+ */
+ public synchronized ListenableFuture<String> startNodeAsync(final Settings settings) {
+ return startNodeAsync(settings, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node in an async manner with the given settings and version and returns future with its name.
+ */
+ public synchronized ListenableFuture<String> startNodeAsync(final Settings settings, final Version version) {
+ final SettableFuture<String> future = SettableFuture.create();
+ final NodeAndClient buildNode = buildNode(settings, version);
+ Runnable startNode = new Runnable() {
+ @Override
+ public void run() {
+ try {
+ buildNode.node().start();
+ publishNode(buildNode);
+ future.set(buildNode.name);
+ } catch (Throwable t) {
+ future.setException(t);
+ }
+ }
+ };
+ executor.execute(startNode);
+ return future;
+ }
+
+ /**
+ * Starts multiple nodes in an async manner and returns future with its name.
+ */
+ public synchronized ListenableFuture<List<String>> startNodesAsync(final int numNodes) {
+ return startNodesAsync(numNodes, Settings.EMPTY, Version.CURRENT);
+ }
+
+ /**
+ * Starts multiple nodes in an async manner with the given settings and returns future with its name.
+ */
+ public synchronized ListenableFuture<List<String>> startNodesAsync(final int numNodes, final Settings settings) {
+ return startNodesAsync(numNodes, settings, Version.CURRENT);
+ }
+
+ /**
+ * Starts multiple nodes in an async manner with the given settings and version and returns future with its name.
+ */
+ public synchronized ListenableFuture<List<String>> startNodesAsync(final int numNodes, final Settings settings, final Version version) {
+ List<ListenableFuture<String>> futures = Lists.newArrayList();
+ for (int i = 0; i < numNodes; i++) {
+ futures.add(startNodeAsync(settings, version));
+ }
+ return Futures.allAsList(futures);
+ }
+
+ /**
+ * Starts multiple nodes (based on the number of settings provided) in an async manner, with explicit settings for each node.
+ * The order of the node names returned matches the order of the settings provided.
+ */
+ public synchronized ListenableFuture<List<String>> startNodesAsync(final Settings... settings) {
+ List<ListenableFuture<String>> futures = Lists.newArrayList();
+ for (Settings setting : settings) {
+ futures.add(startNodeAsync(setting, Version.CURRENT));
+ }
+ return Futures.allAsList(futures);
+ }
+
+ private synchronized void publishNode(NodeAndClient nodeAndClient) {
+ assert !nodeAndClient.node().isClosed();
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, nodeAndClient.node);
+ if (nodeEnv.hasNodeFile()) {
+ dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths()));
+ }
+ nodes.put(nodeAndClient.name, nodeAndClient);
+ applyDisruptionSchemeToNode(nodeAndClient);
+ }
+
+ public void closeNonSharedNodes(boolean wipeData) throws IOException {
+ reset(wipeData);
+ }
+
+ @Override
+ public int numDataNodes() {
+ return dataNodeAndClients().size();
+ }
+
+ @Override
+ public int numDataAndMasterNodes() {
+ return dataAndMasterNodes().size();
+ }
+
+ public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
+ clearDisruptionScheme();
+ scheme.applyToCluster(this);
+ activeDisruptionScheme = scheme;
+ }
+
+ public void clearDisruptionScheme() {
+ if (activeDisruptionScheme != null) {
+ TimeValue expectedHealingTime = activeDisruptionScheme.expectedTimeToHeal();
+ logger.info("Clearing active scheme {}, expected healing time {}", activeDisruptionScheme, expectedHealingTime);
+ activeDisruptionScheme.removeFromCluster(this);
+ // We don't what scheme is picked, certain schemes don't partition the cluster, but process slow, so we need
+ // to to sleep, cluster health alone doesn't verify if these schemes have been cleared.
+ if (expectedHealingTime != null && expectedHealingTime.millis() > 0) {
+ try {
+ Thread.sleep(expectedHealingTime.millis());
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ assertFalse("cluster failed to form after disruption was healed", client().admin().cluster().prepareHealth()
+ .setWaitForNodes("" + nodes.size())
+ .setWaitForRelocatingShards(0)
+ .get().isTimedOut());
+ }
+ activeDisruptionScheme = null;
+ }
+
+ private void applyDisruptionSchemeToNode(NodeAndClient nodeAndClient) {
+ if (activeDisruptionScheme != null) {
+ assert nodes.containsKey(nodeAndClient.name);
+ activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
+ }
+ }
+
+ private void removeDisruptionSchemeFromNode(NodeAndClient nodeAndClient) {
+ if (activeDisruptionScheme != null) {
+ assert nodes.containsKey(nodeAndClient.name);
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ }
+
+ private synchronized Collection<NodeAndClient> dataNodeAndClients() {
+ return Collections2.filter(nodes.values(), new DataNodePredicate());
+ }
+
+ private synchronized Collection<NodeAndClient> dataAndMasterNodes() {
+ return Collections2.filter(nodes.values(), new DataOrMasterNodePredicate());
+ }
+
+ private static final class DataNodePredicate implements Predicate<NodeAndClient> {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return DiscoveryNode.dataNode(nodeAndClient.node.settings());
+ }
+ }
+
+ private static final class DataOrMasterNodePredicate implements Predicate<NodeAndClient> {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return DiscoveryNode.dataNode(nodeAndClient.node.settings()) ||
+ DiscoveryNode.masterNode(nodeAndClient.node.settings());
+ }
+ }
+
+ private static final class MasterNodePredicate implements Predicate<NodeAndClient> {
+ private final String masterNodeName;
+
+ public MasterNodePredicate(String masterNodeName) {
+ this.masterNodeName = masterNodeName;
+ }
+
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return masterNodeName.equals(nodeAndClient.name);
+ }
+ }
+
+ private static final class ClientNodePredicate implements Predicate<NodeAndClient> {
+ @Override
+ public boolean apply(NodeAndClient nodeAndClient) {
+ return DiscoveryNode.clientNode(nodeAndClient.node.settings());
+ }
+ }
+
+ private static final class EntryNodePredicate implements Predicate<Map.Entry<String, NodeAndClient>> {
+ private final Predicate<NodeAndClient> delegateNodePredicate;
+
+ EntryNodePredicate(Predicate<NodeAndClient> delegateNodePredicate) {
+ this.delegateNodePredicate = delegateNodePredicate;
+ }
+
+ @Override
+ public boolean apply(Map.Entry<String, NodeAndClient> entry) {
+ return delegateNodePredicate.apply(entry.getValue());
+ }
+ }
+
+ synchronized String routingKeyForShard(String index, String type, int shard, Random random) {
+ assertThat(shard, greaterThanOrEqualTo(0));
+ assertThat(shard, greaterThanOrEqualTo(0));
+ for (NodeAndClient n : nodes.values()) {
+ Node node = n.node;
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class, node);
+ ClusterService clusterService = getInstanceFromNode(ClusterService.class, node);
+ IndexService indexService = indicesService.indexService(index);
+ if (indexService != null) {
+ assertThat(indexService.settingsService().getSettings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), greaterThan(shard));
+ OperationRouting operationRouting = indexService.injector().getInstance(OperationRouting.class);
+ while (true) {
+ String routing = RandomStrings.randomAsciiOfLength(random, 10);
+ final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId();
+ if (shard == targetShard) {
+ return routing;
+ }
+ }
+ }
+ }
+ fail("Could not find a node that holds " + index);
+ return null;
+ }
+
+ @Override
+ public synchronized Iterator<Client> iterator() {
+ ensureOpen();
+ final Iterator<NodeAndClient> iterator = nodes.values().iterator();
+ return new Iterator<Client>() {
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Client next() {
+ return iterator.next().client(random);
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("");
+ }
+
+ };
+ }
+
+ /**
+ * Returns a predicate that only accepts settings of nodes with one of the given names.
+ */
+ public static Predicate<Settings> nameFilter(String... nodeName) {
+ return new NodeNamePredicate(new HashSet<>(Arrays.asList(nodeName)));
+ }
+
+ private static final class NodeNamePredicate implements Predicate<Settings> {
+ private final HashSet<String> nodeNames;
+
+
+ public NodeNamePredicate(HashSet<String> nodeNames) {
+ this.nodeNames = nodeNames;
+ }
+
+ @Override
+ public boolean apply(Settings settings) {
+ return nodeNames.contains(settings.get("name"));
+
+ }
+ }
+
+
+ /**
+ * An abstract class that is called during {@link #rollingRestart(InternalTestCluster.RestartCallback)}
+ * and / or {@link #fullRestart(InternalTestCluster.RestartCallback)} to execute actions at certain
+ * stages of the restart.
+ */
+ public static abstract class RestartCallback {
+
+ /**
+ * Executed once the give node name has been stopped.
+ */
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return Settings.EMPTY;
+ }
+
+ /**
+ * Executed for each node before the <tt>n+1</tt> node is restarted. The given client is
+ * an active client to the node that will be restarted next.
+ */
+ public void doAfterNodes(int n, Client client) throws Exception {
+ }
+
+ /**
+ * If this returns <code>true</code> all data for the node with the given node name will be cleared including
+ * gateways and all index data. Returns <code>false</code> by default.
+ */
+ public boolean clearData(String nodeName) {
+ return false;
+ }
+
+
+ /**
+ * If this returns <code>false</code> the node with the given node name will not be restarted. It will be
+ * closed and removed from the cluster. Returns <code>true</code> by default.
+ */
+ public boolean doRestart(String nodeName) {
+ return true;
+ }
+ }
+
+ public Settings getDefaultSettings() {
+ return defaultSettings;
+ }
+
+ @Override
+ public void ensureEstimatedStats() {
+ if (size() > 0) {
+ // Checks that the breakers have been reset without incurring a
+ // network request, because a network request can increment one
+ // of the breakers
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ final IndicesFieldDataCache fdCache = getInstanceFromNode(IndicesFieldDataCache.class, nodeAndClient.node);
+ // Clean up the cache, ensuring that entries' listeners have been called
+ fdCache.getCache().cleanUp();
+
+ final String name = nodeAndClient.name;
+ final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node);
+ CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA);
+ assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L));
+ // Anything that uses transport or HTTP can increase the
+ // request breaker (because they use bigarrays), because of
+ // that the breaker can sometimes be incremented from ping
+ // requests from other clusters because Jenkins is running
+ // multiple ES testing jobs in parallel on the same machine.
+ // To combat this we check whether the breaker has reached 0
+ // in an assertBusy loop, so it will try for 10 seconds and
+ // fail if it never reached 0
+ try {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST);
+ assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L));
+ }
+ });
+ } catch (Exception e) {
+ fail("Exception during check for request breaker reset to 0: " + e);
+ }
+
+ NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
+ NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false);
+ assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("Filter cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+ }
+ }
+ }
+
+ @Override
+ public void assertAfterTest() throws IOException {
+ super.assertAfterTest();
+ for (NodeEnvironment env : this.getInstances(NodeEnvironment.class)) {
+ Set<ShardId> shardIds = env.lockedShards();
+ for (ShardId id : shardIds) {
+ try {
+ env.shardLock(id, TimeUnit.SECONDS.toMillis(5)).close();
+ } catch (IOException ex) {
+ fail("Shard " + id + " is still locked after 5 sec waiting");
+ }
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/MockLogAppender.java b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java
new file mode 100644
index 0000000000..2e0c293c1d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/MockLogAppender.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.spi.LoggingEvent;
+import org.elasticsearch.common.regex.Regex;
+
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ * Test appender that can be used to verify that certain events were logged correctly
+ */
+public class MockLogAppender extends AppenderSkeleton {
+
+ private final static String COMMON_PREFIX = System.getProperty("es.logger.prefix", "org.elasticsearch.");
+
+ private List<LoggingExpectation> expectations;
+
+ public MockLogAppender() {
+ expectations = newArrayList();
+ }
+
+ public void addExpectation(LoggingExpectation expectation) {
+ expectations.add(expectation);
+ }
+
+ @Override
+ protected void append(LoggingEvent loggingEvent) {
+ for (LoggingExpectation expectation : expectations) {
+ expectation.match(loggingEvent);
+ }
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ public void assertAllExpectationsMatched() {
+ for (LoggingExpectation expectation : expectations) {
+ expectation.assertMatched();
+ }
+ }
+
+ public interface LoggingExpectation {
+ void match(LoggingEvent loggingEvent);
+
+ void assertMatched();
+ }
+
+ public static abstract class AbstractEventExpectation implements LoggingExpectation {
+ protected final String name;
+ protected final String logger;
+ protected final Level level;
+ protected final String message;
+ protected boolean saw;
+
+ public AbstractEventExpectation(String name, String logger, Level level, String message) {
+ this.name = name;
+ this.logger = getLoggerName(logger);
+ this.level = level;
+ this.message = message;
+ this.saw = false;
+ }
+
+ @Override
+ public void match(LoggingEvent event) {
+ if (event.getLevel() == level && event.getLoggerName().equals(logger)) {
+ if (Regex.isSimpleMatchPattern(message)) {
+ if (Regex.simpleMatch(message, event.getMessage().toString())) {
+ saw = true;
+ }
+ } else {
+ if (event.getMessage().toString().contains(message)) {
+ saw = true;
+ }
+ }
+ }
+ }
+ }
+
+ public static class UnseenEventExpectation extends AbstractEventExpectation {
+
+ public UnseenEventExpectation(String name, String logger, Level level, String message) {
+ super(name, logger, level, message);
+ }
+
+ @Override
+ public void assertMatched() {
+ assertThat(name, saw, equalTo(false));
+ }
+ }
+
+ public static class SeenEventExpectation extends AbstractEventExpectation {
+
+ public SeenEventExpectation(String name, String logger, Level level, String message) {
+ super(name, logger, level, message);
+ }
+
+ @Override
+ public void assertMatched() {
+ assertThat(name, saw, equalTo(true));
+ }
+ }
+
+ private static String getLoggerName(String name) {
+ if (name.startsWith("org.elasticsearch.")) {
+ name = name.substring("org.elasticsearch.".length());
+ }
+ return COMMON_PREFIX + name;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/SettingsSource.java b/core/src/test/java/org/elasticsearch/test/SettingsSource.java
new file mode 100644
index 0000000000..6341d842d6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/SettingsSource.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.common.settings.Settings;
+
+public abstract class SettingsSource {
+
+ public static final SettingsSource EMPTY = new SettingsSource() {
+ @Override
+ public Settings node(int nodeOrdinal) {
+ return null;
+ }
+
+ @Override
+ public Settings transportClient() {
+ return null;
+ }
+ };
+
+ /**
+ * @return the settings for the node represented by the given ordinal, or {@code null} if there are no settings defined
+ */
+ public abstract Settings node(int nodeOrdinal);
+
+ public abstract Settings transportClient();
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/TestCluster.java b/core/src/test/java/org/elasticsearch/test/TestCluster.java
new file mode 100644
index 0000000000..a1f5f016a8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/TestCluster.java
@@ -0,0 +1,214 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.indices.IndexTemplateMissingException;
+import org.elasticsearch.repositories.RepositoryMissingException;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Random;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+/**
+ * Base test cluster that exposes the basis to run tests against any elasticsearch cluster, whose layout
+ * (e.g. number of nodes) is predefined and cannot be changed during the tests execution
+ */
+public abstract class TestCluster implements Iterable<Client>, Closeable {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+ private final long seed;
+
+ protected Random random;
+
+ protected double transportClientRatio = 0.0;
+
+ public TestCluster(long seed) {
+ this.seed = seed;
+ }
+
+ public long seed() {
+ return seed;
+ }
+
+ /**
+ * This method should be executed before each test to reset the cluster to its initial state.
+ */
+ public void beforeTest(Random random, double transportClientRatio) throws IOException {
+ assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
+ logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
+ this.transportClientRatio = transportClientRatio;
+ this.random = new Random(random.nextLong());
+ }
+
+ /**
+ * Wipes any data that a test can leave behind: indices, templates and repositories
+ */
+ public void wipe() {
+ wipeIndices("_all");
+ wipeTemplates();
+ wipeRepositories();
+ }
+
+ /**
+ * Assertions that should run before the cluster is wiped should be called in this method
+ */
+ public void beforeIndexDeletion() {
+ }
+
+ /**
+ * This method checks all the things that need to be checked after each test
+ */
+ public void assertAfterTest() throws IOException {
+ ensureEstimatedStats();
+ }
+
+ /**
+ * This method should be executed during tear down, after each test (but after assertAfterTest)
+ */
+ public abstract void afterTest() throws IOException;
+
+ /**
+ * Returns a client connected to any node in the cluster
+ */
+ public abstract Client client();
+
+ /**
+ * Returns the number of nodes in the cluster.
+ */
+ public abstract int size();
+
+ /**
+ * Returns the number of data nodes in the cluster.
+ */
+ public abstract int numDataNodes();
+
+ /**
+ * Returns the number of data and master eligible nodes in the cluster.
+ */
+ public abstract int numDataAndMasterNodes();
+
+ /**
+ * Returns the http addresses of the nodes within the cluster.
+ * Can be used to run REST tests against the test cluster.
+ */
+ public abstract InetSocketAddress[] httpAddresses();
+
+ /**
+ * Closes the current cluster
+ */
+ @Override
+ public abstract void close() throws IOException;
+
+ /**
+ * Deletes the given indices from the tests cluster. If no index name is passed to this method
+ * all indices are removed.
+ */
+ public void wipeIndices(String... indices) {
+ assert indices != null && indices.length > 0;
+ if (size() > 0) {
+ try {
+ assertAcked(client().admin().indices().prepareDelete(indices));
+ } catch (IndexMissingException e) {
+ // ignore
+ } catch (IllegalArgumentException e) {
+ // Happens if `action.destructive_requires_name` is set to true
+ // which is the case in the CloseIndexDisableCloseAllTests
+ if ("_all".equals(indices[0])) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ ObjectArrayList<String> concreteIndices = new ObjectArrayList<>();
+ for (IndexMetaData indexMetaData : clusterStateResponse.getState().metaData()) {
+ concreteIndices.add(indexMetaData.getIndex());
+ }
+ if (!concreteIndices.isEmpty()) {
+ assertAcked(client().admin().indices().prepareDelete(concreteIndices.toArray(String.class)));
+ }
+ }
+ } catch (AssertionError ae) {
+ // Try to see what threads are doing when we hit the "Delete index failed - not acked":
+ logger.info("dump all threads on AssertionError");
+ ElasticsearchTestCase.printStackDump(logger);
+ logger.info("done dump all threads on AssertionError");
+ throw ae;
+ }
+ }
+ }
+
+ /**
+ * Deletes index templates, support wildcard notation.
+ * If no template name is passed to this method all templates are removed.
+ */
+ public void wipeTemplates(String... templates) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (templates.length == 0) {
+ templates = new String[]{"*"};
+ }
+ for (String template : templates) {
+ try {
+ client().admin().indices().prepareDeleteTemplate(template).execute().actionGet();
+ } catch (IndexTemplateMissingException e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes repositories, supports wildcard notation.
+ */
+ public void wipeRepositories(String... repositories) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (repositories.length == 0) {
+ repositories = new String[]{"*"};
+ }
+ for (String repository : repositories) {
+ try {
+ client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
+ } catch (RepositoryMissingException ex) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Ensures that any breaker statistics are reset to 0.
+ *
+ * The implementation is specific to the test cluster, because the act of
+ * checking some breaker stats can increase them.
+ */
+ public abstract void ensureEstimatedStats();
+
+ /**
+ * Returns the cluster name
+ */
+ public abstract String getClusterName();
+}
diff --git a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java
new file mode 100644
index 0000000000..7e3bdda26a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java
@@ -0,0 +1,663 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.util.Counter;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.HasContext;
+import org.elasticsearch.common.HasContextAndHeaders;
+import org.elasticsearch.common.HasHeaders;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+import org.elasticsearch.index.cache.filter.FilterCache;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.FieldMapper;
+import org.elasticsearch.index.mapper.FieldMappers;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.query.IndexQueryParserService;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.Scroll;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
+import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.scan.ScanContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+public class TestSearchContext extends SearchContext {
+
+ final PageCacheRecycler pageCacheRecycler;
+ final BigArrays bigArrays;
+ final IndexService indexService;
+ final IndexFieldDataService indexFieldDataService;
+ final BitsetFilterCache fixedBitSetFilterCache;
+ final ThreadPool threadPool;
+
+ ContextIndexSearcher searcher;
+ int size;
+ private int terminateAfter = DEFAULT_TERMINATE_AFTER;
+ private String[] types;
+ private SearchContextAggregations aggregations;
+
+ public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService, FilterCache filterCache, IndexFieldDataService indexFieldDataService) {
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.bigArrays = bigArrays.withCircuitBreaking();
+ this.indexService = indexService;
+ this.indexFieldDataService = indexService.fieldData();
+ this.fixedBitSetFilterCache = indexService.bitsetFilterCache();
+ this.threadPool = threadPool;
+ }
+
+ public TestSearchContext() {
+ this.pageCacheRecycler = null;
+ this.bigArrays = null;
+ this.indexService = null;
+ this.indexFieldDataService = null;
+ this.threadPool = null;
+ this.fixedBitSetFilterCache = null;
+ }
+
+ public void setTypes(String... types) {
+ this.types = types;
+ }
+
+ @Override
+ public void preProcess() {
+ }
+
+ @Override
+ public Filter searchFilter(String[] types) {
+ return null;
+ }
+
+ @Override
+ public long id() {
+ return 0;
+ }
+
+ @Override
+ public String source() {
+ return null;
+ }
+
+ @Override
+ public ShardSearchRequest request() {
+ return null;
+ }
+
+ @Override
+ public SearchType searchType() {
+ return null;
+ }
+
+ @Override
+ public SearchContext searchType(SearchType searchType) {
+ return null;
+ }
+
+ @Override
+ public SearchShardTarget shardTarget() {
+ return null;
+ }
+
+ @Override
+ public int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ public boolean hasTypes() {
+ return false;
+ }
+
+ @Override
+ public String[] types() {
+ return new String[0];
+ }
+
+ @Override
+ public float queryBoost() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext queryBoost(float queryBoost) {
+ return null;
+ }
+
+ @Override
+ protected long nowInMillisImpl() {
+ return 0;
+ }
+
+ @Override
+ public Scroll scroll() {
+ return null;
+ }
+
+ @Override
+ public SearchContext scroll(Scroll scroll) {
+ return null;
+ }
+
+ @Override
+ public SearchContextAggregations aggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public SearchContext aggregations(SearchContextAggregations aggregations) {
+ this.aggregations = aggregations;
+ return this;
+ }
+
+ @Override
+ public SearchContextHighlight highlight() {
+ return null;
+ }
+
+ @Override
+ public void highlight(SearchContextHighlight highlight) {
+ }
+
+ @Override
+ public SuggestionSearchContext suggest() {
+ return null;
+ }
+
+ @Override
+ public void suggest(SuggestionSearchContext suggest) {
+ }
+
+ @Override
+ public List<RescoreSearchContext> rescore() {
+ return null;
+ }
+
+ @Override
+ public void addRescore(RescoreSearchContext rescore) {
+ }
+
+ @Override
+ public boolean hasFieldDataFields() {
+ return false;
+ }
+
+ @Override
+ public FieldDataFieldsContext fieldDataFields() {
+ return null;
+ }
+
+ @Override
+ public boolean hasScriptFields() {
+ return false;
+ }
+
+ @Override
+ public ScriptFieldsContext scriptFields() {
+ return null;
+ }
+
+ @Override
+ public boolean sourceRequested() {
+ return false;
+ }
+
+ @Override
+ public boolean hasFetchSourceContext() {
+ return false;
+ }
+
+ @Override
+ public FetchSourceContext fetchSourceContext() {
+ return null;
+ }
+
+ @Override
+ public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ return null;
+ }
+
+ @Override
+ public ContextIndexSearcher searcher() {
+ return searcher;
+ }
+
+ public void setSearcher(ContextIndexSearcher searcher) {
+ this.searcher = searcher;
+ }
+
+ @Override
+ public IndexShard indexShard() {
+ return null;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ if (indexService != null) {
+ return indexService.mapperService();
+ }
+ return null;
+ }
+
+ @Override
+ public AnalysisService analysisService() {
+ return indexService.analysisService();
+ }
+
+ @Override
+ public IndexQueryParserService queryParserService() {
+ return indexService.queryParserService();
+ }
+
+ @Override
+ public SimilarityService similarityService() {
+ return null;
+ }
+
+ @Override
+ public ScriptService scriptService() {
+ return indexService.injector().getInstance(ScriptService.class);
+ }
+
+ @Override
+ public PageCacheRecycler pageCacheRecycler() {
+ return pageCacheRecycler;
+ }
+
+ @Override
+ public BigArrays bigArrays() {
+ return bigArrays;
+ }
+
+ @Override
+ public BitsetFilterCache bitsetFilterCache() {
+ return fixedBitSetFilterCache;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return indexFieldDataService;
+ }
+
+ @Override
+ public long timeoutInMillis() {
+ return 0;
+ }
+
+ @Override
+ public void timeoutInMillis(long timeoutInMillis) {
+ }
+
+ @Override
+ public int terminateAfter() {
+ return terminateAfter;
+ }
+
+ @Override
+ public void terminateAfter(int terminateAfter) {
+ this.terminateAfter = terminateAfter;
+ }
+
+ @Override
+ public SearchContext minimumScore(float minimumScore) {
+ return null;
+ }
+
+ @Override
+ public Float minimumScore() {
+ return null;
+ }
+
+ @Override
+ public SearchContext sort(Sort sort) {
+ return null;
+ }
+
+ @Override
+ public Sort sort() {
+ return null;
+ }
+
+ @Override
+ public SearchContext trackScores(boolean trackScores) {
+ return null;
+ }
+
+ @Override
+ public boolean trackScores() {
+ return false;
+ }
+
+ @Override
+ public SearchContext parsedPostFilter(ParsedQuery postFilter) {
+ return null;
+ }
+
+ @Override
+ public ParsedQuery parsedPostFilter() {
+ return null;
+ }
+
+ @Override
+ public Filter aliasFilter() {
+ return null;
+ }
+
+ @Override
+ public SearchContext parsedQuery(ParsedQuery query) {
+ return null;
+ }
+
+ @Override
+ public ParsedQuery parsedQuery() {
+ return null;
+ }
+
+ @Override
+ public Query query() {
+ return null;
+ }
+
+ @Override
+ public boolean queryRewritten() {
+ return false;
+ }
+
+ @Override
+ public SearchContext updateRewriteQuery(Query rewriteQuery) {
+ return null;
+ }
+
+ @Override
+ public int from() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext from(int from) {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ this.size = size;
+ }
+
+
+ @Override
+ public SearchContext size(int size) {
+ return null;
+ }
+
+ @Override
+ public boolean hasFieldNames() {
+ return false;
+ }
+
+ @Override
+ public List<String> fieldNames() {
+ return null;
+ }
+
+ @Override
+ public void emptyFieldNames() {
+ }
+
+ @Override
+ public boolean explain() {
+ return false;
+ }
+
+ @Override
+ public void explain(boolean explain) {
+ }
+
+ @Override
+ public List<String> groupStats() {
+ return null;
+ }
+
+ @Override
+ public void groupStats(List<String> groupStats) {
+ }
+
+ @Override
+ public boolean version() {
+ return false;
+ }
+
+ @Override
+ public void version(boolean version) {
+ }
+
+ @Override
+ public int[] docIdsToLoad() {
+ return new int[0];
+ }
+
+ @Override
+ public int docIdsToLoadFrom() {
+ return 0;
+ }
+
+ @Override
+ public int docIdsToLoadSize() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
+ return null;
+ }
+
+ @Override
+ public void accessed(long accessTime) {
+ }
+
+ @Override
+ public long lastAccessTime() {
+ return 0;
+ }
+
+ @Override
+ public long keepAlive() {
+ return 0;
+ }
+
+ @Override
+ public void keepAlive(long keepAlive) {
+ }
+
+ @Override
+ public void lastEmittedDoc(ScoreDoc doc) {
+ }
+
+ @Override
+ public ScoreDoc lastEmittedDoc() {
+ return null;
+ }
+
+ @Override
+ public SearchLookup lookup() {
+ return new SearchLookup(mapperService(), fieldData(), null);
+ }
+
+ @Override
+ public DfsSearchResult dfsResult() {
+ return null;
+ }
+
+ @Override
+ public QuerySearchResult queryResult() {
+ return null;
+ }
+
+ @Override
+ public FetchSearchResult fetchResult() {
+ return null;
+ }
+
+ @Override
+ public ScanContext scanContext() {
+ return null;
+ }
+
+ @Override
+ public FieldMapper smartNameFieldMapper(String name) {
+ if (mapperService() != null) {
+ return mapperService().smartNameFieldMapper(name, types());
+ }
+ return null;
+ }
+
+ @Override
+ public FieldMapper smartNameFieldMapperFromAnyType(String name) {
+ if (mapperService() != null) {
+ return mapperService().smartNameFieldMapper(name);
+ }
+ return null;
+ }
+
+ @Override
+ public MapperService.SmartNameObjectMapper smartNameObjectMapper(String name) {
+ if (mapperService() != null) {
+ return mapperService().smartNameObjectMapper(name, types);
+ }
+ return null;
+ }
+
+ @Override
+ public void doClose() {
+ }
+
+ @Override
+ public Counter timeEstimateCounter() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void innerHits(InnerHitsContext innerHitsContext) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public InnerHitsContext innerHits() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public <V> V putInContext(Object key, Object value) {
+ return null;
+ }
+
+ @Override
+ public void putAllInContext(ObjectObjectAssociativeContainer<Object, Object> map) {
+ }
+
+ @Override
+ public <V> V getFromContext(Object key) {
+ return null;
+ }
+
+ @Override
+ public <V> V getFromContext(Object key, V defaultValue) {
+ return defaultValue;
+ }
+
+ @Override
+ public boolean hasInContext(Object key) {
+ return false;
+ }
+
+ @Override
+ public int contextSize() {
+ return 0;
+ }
+
+ @Override
+ public boolean isContextEmpty() {
+ return true;
+ }
+
+ @Override
+ public ImmutableOpenMap<Object, Object> getContext() {
+ return ImmutableOpenMap.of();
+ }
+
+ @Override
+ public void copyContextFrom(HasContext other) {
+ }
+
+ @Override
+ public <V> void putHeader(String key, V value) {}
+
+ @Override
+ public <V> V getHeader(String key) {
+ return null;
+ }
+
+ @Override
+ public boolean hasHeader(String key) {
+ return false;
+ }
+
+ @Override
+ public Set<String> getHeaders() {
+ return Collections.EMPTY_SET;
+ }
+
+ @Override
+ public void copyHeadersFrom(HasHeaders from) {}
+
+ @Override
+ public void copyContextAndHeadersFrom(HasContextAndHeaders other) {}
+}
diff --git a/core/src/test/java/org/elasticsearch/test/VersionUtils.java b/core/src/test/java/org/elasticsearch/test/VersionUtils.java
new file mode 100644
index 0000000000..316a3926d5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/VersionUtils.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.Version;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+/** Utilities for selecting versions in tests */
+public class VersionUtils {
+
+ private static final List<Version> SORTED_VERSIONS;
+ static {
+ Field[] declaredFields = Version.class.getDeclaredFields();
+ Set<Integer> ids = new HashSet<>();
+ for (Field field : declaredFields) {
+ final int mod = field.getModifiers();
+ if (Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
+ if (field.getType() == Version.class) {
+ try {
+ Version object = (Version) field.get(null);
+ ids.add(object.id);
+ } catch (Throwable e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+ List<Integer> idList = new ArrayList<>(ids);
+ Collections.sort(idList);
+ ImmutableList.Builder<Version> version = ImmutableList.builder();
+ for (Integer integer : idList) {
+ version.add(Version.fromId(integer));
+ }
+ SORTED_VERSIONS = version.build();
+ }
+
+ /** Returns immutable list of all known versions. */
+ public static List<Version> allVersions() {
+ return Collections.unmodifiableList(SORTED_VERSIONS);
+ }
+
+ /** Returns the {@link Version} before the {@link Version#CURRENT} */
+ public static Version getPreviousVersion() {
+ Version version = SORTED_VERSIONS.get(SORTED_VERSIONS.size() - 2);
+ assert version.before(Version.CURRENT);
+ return version;
+ }
+
+ /** Returns the oldest {@link Version} */
+ public static Version getFirstVersion() {
+ return SORTED_VERSIONS.get(0);
+ }
+
+ /** Returns a random {@link Version} from all available versions. */
+ public static Version randomVersion(Random random) {
+ return SORTED_VERSIONS.get(random.nextInt(SORTED_VERSIONS.size()));
+ }
+
+ /** Returns a random {@link Version} between <code>minVersion</code> and <code>maxVersion</code> (inclusive). */
+ public static Version randomVersionBetween(Random random, Version minVersion, Version maxVersion) {
+ int minVersionIndex = 0;
+ if (minVersion != null) {
+ minVersionIndex = SORTED_VERSIONS.indexOf(minVersion);
+ }
+ int maxVersionIndex = SORTED_VERSIONS.size() - 1;
+ if (maxVersion != null) {
+ maxVersionIndex = SORTED_VERSIONS.indexOf(maxVersion);
+ }
+ if (minVersionIndex == -1) {
+ throw new IllegalArgumentException("minVersion [" + minVersion + "] does not exist.");
+ } else if (maxVersionIndex == -1) {
+ throw new IllegalArgumentException("maxVersion [" + maxVersion + "] does not exist.");
+ } else if (minVersionIndex > maxVersionIndex) {
+ throw new IllegalArgumentException("maxVersion [" + maxVersion + "] cannot be less than minVersion [" + minVersion + "]");
+ } else {
+ // minVersionIndex is inclusive so need to add 1 to this index
+ int range = maxVersionIndex + 1 - minVersionIndex;
+ return SORTED_VERSIONS.get(minVersionIndex + random.nextInt(range));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/XContentTestUtils.java b/core/src/test/java/org/elasticsearch/test/XContentTestUtils.java
new file mode 100644
index 0000000000..1f1b8eff71
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/XContentTestUtils.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
+
+public final class XContentTestUtils {
+ private XContentTestUtils() {
+
+ }
+
+ public static Map<String, Object> convertToMap(ToXContent part) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ part.toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return XContentHelper.convertToMap(builder.bytes(), false).v2();
+ }
+
+
+ /**
+ * Compares to maps generated from XContentObjects. The order of elements in arrays is ignored
+ */
+ public static boolean mapsEqualIgnoringArrayOrder(Map<String, Object> first, Map<String, Object> second) {
+ if (first.size() != second.size()) {
+ return false;
+ }
+
+ for (String key : first.keySet()) {
+ if (objectsEqualIgnoringArrayOrder(first.get(key), second.get(key)) == false) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @SuppressWarnings("unchecked")
+ private static boolean objectsEqualIgnoringArrayOrder(Object first, Object second) {
+ if (first == null ) {
+ return second == null;
+ } else if (first instanceof List) {
+ if (second instanceof List) {
+ List<Object> secondList = Lists.newArrayList((List<Object>) second);
+ List<Object> firstList = (List<Object>) first;
+ if (firstList.size() == secondList.size()) {
+ for (Object firstObj : firstList) {
+ boolean found = false;
+ for (Object secondObj : secondList) {
+ if (objectsEqualIgnoringArrayOrder(firstObj, secondObj)) {
+ secondList.remove(secondObj);
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ return false;
+ }
+ }
+ return secondList.isEmpty();
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ } else if (first instanceof Map) {
+ if (second instanceof Map) {
+ return mapsEqualIgnoringArrayOrder((Map<String, Object>) first, (Map<String, Object>) second);
+ } else {
+ return false;
+ }
+ } else {
+ return first.equals(second);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java
new file mode 100644
index 0000000000..08f86a7004
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java
@@ -0,0 +1,567 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.cache.recycler;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.Accountables;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.util.BigArray;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.ByteArray;
+import org.elasticsearch.common.util.DoubleArray;
+import org.elasticsearch.common.util.FloatArray;
+import org.elasticsearch.common.util.IntArray;
+import org.elasticsearch.common.util.LongArray;
+import org.elasticsearch.common.util.ObjectArray;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.WeakHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class MockBigArrays extends BigArrays {
+
+ /**
+ * Tracking allocations is useful when debugging a leak but shouldn't be enabled by default as this would also be very costly
+ * since it creates a new Exception every time a new array is created.
+ */
+ private static final boolean TRACK_ALLOCATIONS = false;
+
+ private static final Set<BigArrays> INSTANCES = Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<BigArrays, Boolean>()));
+ private static final ConcurrentMap<Object, Object> ACQUIRED_ARRAYS = new ConcurrentHashMap<>();
+
+ public static void ensureAllArraysAreReleased() throws Exception {
+ final Map<Object, Object> masterCopy = Maps.newHashMap(ACQUIRED_ARRAYS);
+ if (!masterCopy.isEmpty()) {
+ // not empty, we might be executing on a shared cluster that keeps on obtaining
+ // and releasing arrays, lets make sure that after a reasonable timeout, all master
+ // copy (snapshot) have been released
+ boolean success = ElasticsearchTestCase.awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return Sets.intersection(masterCopy.keySet(), ACQUIRED_ARRAYS.keySet()).isEmpty();
+ }
+ });
+ if (!success) {
+ masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
+ ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
+ if (!masterCopy.isEmpty()) {
+ final Object cause = masterCopy.entrySet().iterator().next().getValue();
+ throw new RuntimeException(masterCopy.size() + " arrays have not been released", cause instanceof Throwable ? (Throwable) cause : null);
+ }
+ }
+ }
+ }
+
+ private final Random random;
+ private final PageCacheRecycler recycler;
+ private final CircuitBreakerService breakerService;
+
+ @Inject
+ public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService) {
+ this(recycler, breakerService, false);
+ }
+
+ public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) {
+ super(recycler, breakerService, checkBreaker);
+ this.recycler = recycler;
+ this.breakerService = breakerService;
+ long seed;
+ try {
+ seed = SeedUtils.parseSeed(RandomizedContext.current().getRunnerSeedAsString());
+ } catch (IllegalStateException e) { // rest tests don't run randomized and have no context
+ seed = 0;
+ }
+ random = new Random(seed);
+ INSTANCES.add(this);
+ }
+
+
+ @Override
+ public BigArrays withCircuitBreaking() {
+ return new MockBigArrays(this.recycler, this.breakerService, true);
+ }
+
+ @Override
+ public ByteArray newByteArray(long size, boolean clearOnResize) {
+ final ByteArrayWrapper array = new ByteArrayWrapper(super.newByteArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public ByteArray resize(ByteArray array, long size) {
+ ByteArrayWrapper arr = (ByteArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof ByteArrayWrapper) {
+ arr = (ByteArrayWrapper) array;
+ } else {
+ arr = new ByteArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public IntArray newIntArray(long size, boolean clearOnResize) {
+ final IntArrayWrapper array = new IntArrayWrapper(super.newIntArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public IntArray resize(IntArray array, long size) {
+ IntArrayWrapper arr = (IntArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof IntArrayWrapper) {
+ arr = (IntArrayWrapper) array;
+ } else {
+ arr = new IntArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public LongArray newLongArray(long size, boolean clearOnResize) {
+ final LongArrayWrapper array = new LongArrayWrapper(super.newLongArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public LongArray resize(LongArray array, long size) {
+ LongArrayWrapper arr = (LongArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof LongArrayWrapper) {
+ arr = (LongArrayWrapper) array;
+ } else {
+ arr = new LongArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public FloatArray newFloatArray(long size, boolean clearOnResize) {
+ final FloatArrayWrapper array = new FloatArrayWrapper(super.newFloatArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public FloatArray resize(FloatArray array, long size) {
+ FloatArrayWrapper arr = (FloatArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof FloatArrayWrapper) {
+ arr = (FloatArrayWrapper) array;
+ } else {
+ arr = new FloatArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public DoubleArray newDoubleArray(long size, boolean clearOnResize) {
+ final DoubleArrayWrapper array = new DoubleArrayWrapper(super.newDoubleArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public DoubleArray resize(DoubleArray array, long size) {
+ DoubleArrayWrapper arr = (DoubleArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof DoubleArrayWrapper) {
+ arr = (DoubleArrayWrapper) array;
+ } else {
+ arr = new DoubleArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public <T> ObjectArray<T> newObjectArray(long size) {
+ return new ObjectArrayWrapper<>(super.<T>newObjectArray(size));
+ }
+
+ @Override
+ public <T> ObjectArray<T> resize(ObjectArray<T> array, long size) {
+ ObjectArrayWrapper<T> arr = (ObjectArrayWrapper<T>) array;
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof ObjectArrayWrapper) {
+ arr = (ObjectArrayWrapper<T>) array;
+ } else {
+ arr = new ObjectArrayWrapper<>(array);
+ }
+ return arr;
+ }
+
+ private static abstract class AbstractArrayWrapper {
+
+ final BigArray in;
+ boolean clearOnResize;
+ AtomicBoolean released;
+
+ AbstractArrayWrapper(BigArray in, boolean clearOnResize) {
+ ACQUIRED_ARRAYS.put(this, TRACK_ALLOCATIONS ? new RuntimeException() : Boolean.TRUE);
+ this.in = in;
+ this.clearOnResize = clearOnResize;
+ released = new AtomicBoolean(false);
+ }
+
+ protected abstract BigArray getDelegate();
+
+ protected abstract void randomizeContent(long from, long to);
+
+ public long size() {
+ return getDelegate().size();
+ }
+
+ public long ramBytesUsed() {
+ return in.ramBytesUsed();
+ }
+
+ public void close() {
+ if (!released.compareAndSet(false, true)) {
+ throw new IllegalStateException("Double release");
+ }
+ ACQUIRED_ARRAYS.remove(this);
+ randomizeContent(0, size());
+ getDelegate().close();
+ }
+
+ }
+
+ private class ByteArrayWrapper extends AbstractArrayWrapper implements ByteArray {
+
+ private final ByteArray in;
+
+ ByteArrayWrapper(ByteArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, (byte) random.nextInt(1 << 8));
+ }
+
+ @Override
+ public byte get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public byte set(long index, byte value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public boolean get(long index, int len, BytesRef ref) {
+ return in.get(index, len, ref);
+ }
+
+ @Override
+ public void set(long index, byte[] buf, int offset, int len) {
+ in.set(index, buf, offset, len);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, byte value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class IntArrayWrapper extends AbstractArrayWrapper implements IntArray {
+
+ private final IntArray in;
+
+ IntArrayWrapper(IntArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, random.nextInt());
+ }
+
+ @Override
+ public int get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public int set(long index, int value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public int increment(long index, int inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, int value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class LongArrayWrapper extends AbstractArrayWrapper implements LongArray {
+
+ private final LongArray in;
+
+ LongArrayWrapper(LongArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, random.nextLong());
+ }
+
+ @Override
+ public long get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public long set(long index, long value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public long increment(long index, long inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, long value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+
+ }
+
+ private class FloatArrayWrapper extends AbstractArrayWrapper implements FloatArray {
+
+ private final FloatArray in;
+
+ FloatArrayWrapper(FloatArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, (random.nextFloat() - 0.5f) * 1000);
+ }
+
+ @Override
+ public float get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public float set(long index, float value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public float increment(long index, float inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, float value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class DoubleArrayWrapper extends AbstractArrayWrapper implements DoubleArray {
+
+ private final DoubleArray in;
+
+ DoubleArrayWrapper(DoubleArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, (random.nextDouble() - 0.5) * 1000);
+ }
+
+ @Override
+ public double get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public double set(long index, double value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public double increment(long index, double inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, double value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class ObjectArrayWrapper<T> extends AbstractArrayWrapper implements ObjectArray<T> {
+
+ private final ObjectArray<T> in;
+
+ ObjectArrayWrapper(ObjectArray<T> in) {
+ super(in, false);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ public T get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public T set(long index, T value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ // will be cleared anyway
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArraysModule.java b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArraysModule.java
new file mode 100644
index 0000000000..e5b48a318d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArraysModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.cache.recycler;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.util.BigArrays;
+
+public class MockBigArraysModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(BigArrays.class).to(MockBigArrays.class).asEagerSingleton();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java
new file mode 100644
index 0000000000..eea29f8230
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.cache.recycler;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler.V;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentMap;
+
+public class MockPageCacheRecycler extends PageCacheRecycler {
+
+ private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = Maps.newConcurrentMap();
+
+ public static void ensureAllPagesAreReleased() throws Exception {
+ final Map<Object, Throwable> masterCopy = Maps.newHashMap(ACQUIRED_PAGES);
+ if (!masterCopy.isEmpty()) {
+ // not empty, we might be executing on a shared cluster that keeps on obtaining
+ // and releasing pages, lets make sure that after a reasonable timeout, all master
+ // copy (snapshot) have been released
+ boolean success = ElasticsearchTestCase.awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ return Sets.intersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()).isEmpty();
+ }
+ });
+ if (!success) {
+ masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
+ ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
+ if (!masterCopy.isEmpty()) {
+ final Throwable t = masterCopy.entrySet().iterator().next().getValue();
+ throw new RuntimeException(masterCopy.size() + " pages have not been released", t);
+ }
+ }
+ }
+ }
+
+ private final Random random;
+
+ @Inject
+ public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
+ super(settings, threadPool);
+ final long seed = settings.getAsLong(InternalTestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
+ random = new Random(seed);
+ }
+
+ private <T> V<T> wrap(final V<T> v) {
+ ACQUIRED_PAGES.put(v, new Throwable());
+ return new V<T>() {
+
+ @Override
+ public void close() {
+ final Throwable t = ACQUIRED_PAGES.remove(v);
+ if (t == null) {
+ throw new IllegalStateException("Releasing a page that has not been acquired");
+ }
+ final T ref = v();
+ if (ref instanceof Object[]) {
+ Arrays.fill((Object[])ref, 0, Array.getLength(ref), null);
+ } else if (ref instanceof byte[]) {
+ Arrays.fill((byte[])ref, 0, Array.getLength(ref), (byte) random.nextInt(256));
+ } else if (ref instanceof long[]) {
+ Arrays.fill((long[])ref, 0, Array.getLength(ref), random.nextLong());
+ } else if (ref instanceof int[]) {
+ Arrays.fill((int[])ref, 0, Array.getLength(ref), random.nextInt());
+ } else if (ref instanceof double[]) {
+ Arrays.fill((double[])ref, 0, Array.getLength(ref), random.nextDouble() - 0.5);
+ } else if (ref instanceof float[]) {
+ Arrays.fill((float[])ref, 0, Array.getLength(ref), random.nextFloat() - 0.5f);
+ } else {
+ for (int i = 0; i < Array.getLength(ref); ++i) {
+ Array.set(ref, i, (byte) random.nextInt(256));
+ }
+ }
+ v.close();
+ }
+
+ @Override
+ public T v() {
+ return v.v();
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return v.isRecycled();
+ }
+
+ };
+ }
+
+ @Override
+ public V<byte[]> bytePage(boolean clear) {
+ final V<byte[]> page = super.bytePage(clear);
+ if (!clear) {
+ Arrays.fill(page.v(), 0, page.v().length, (byte)random.nextInt(1<<8));
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<int[]> intPage(boolean clear) {
+ final V<int[]> page = super.intPage(clear);
+ if (!clear) {
+ Arrays.fill(page.v(), 0, page.v().length, random.nextInt());
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<long[]> longPage(boolean clear) {
+ final V<long[]> page = super.longPage(clear);
+ if (!clear) {
+ Arrays.fill(page.v(), 0, page.v().length, random.nextLong());
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<Object[]> objectPage() {
+ return wrap(super.objectPage());
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java
new file mode 100644
index 0000000000..339fb94969
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecyclerModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.cache.recycler;
+
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.inject.AbstractModule;
+
+public class MockPageCacheRecyclerModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(PageCacheRecycler.class).to(MockPageCacheRecycler.class).asEagerSingleton();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/client/RandomizingClient.java b/core/src/test/java/org/elasticsearch/test/client/RandomizingClient.java
new file mode 100644
index 0000000000..5814cac131
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/client/RandomizingClient.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.FilterClient;
+import org.elasticsearch.cluster.routing.Preference;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.Random;
+
+/** A {@link Client} that randomizes request parameters. */
+public class RandomizingClient extends FilterClient {
+
+ private final SearchType defaultSearchType;
+ private final String defaultPreference;
+
+
+ public RandomizingClient(Client client, Random random) {
+ super(client);
+ // we don't use the QUERY_AND_FETCH types that break quite a lot of tests
+ // given that they return `size*num_shards` hits instead of `size`
+ defaultSearchType = RandomPicks.randomFrom(random, Arrays.asList(
+ SearchType.DFS_QUERY_THEN_FETCH,
+ SearchType.QUERY_THEN_FETCH));
+ if (random.nextInt(10) == 0) {
+ defaultPreference = RandomPicks.randomFrom(random, EnumSet.of(Preference.PRIMARY_FIRST, Preference.LOCAL)).type();
+ } else if (random.nextInt(10) == 0) {
+ String s = TestUtil.randomRealisticUnicodeString(random, 1, 10);
+ defaultPreference = s.startsWith("_") ? null : s; // '_' is a reserved character
+ } else {
+ defaultPreference = null;
+ }
+
+ }
+
+ @Override
+ public SearchRequestBuilder prepareSearch(String... indices) {
+ return in.prepareSearch(indices).setSearchType(defaultSearchType).setPreference(defaultPreference);
+ }
+
+ @Override
+ public String toString() {
+ return "randomized(" + super.toString() + ")";
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java b/core/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java
new file mode 100644
index 0000000000..18f712e725
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.List;
+
+public class NoopClusterService implements ClusterService {
+
+ final ClusterState state;
+
+ public NoopClusterService() {
+ this(ClusterState.builder(new ClusterName("noop")).build());
+ }
+
+ public NoopClusterService(ClusterState state) {
+ if (state.getNodes().size() == 0) {
+ state = ClusterState.builder(state).nodes(
+ DiscoveryNodes.builder()
+ .put(new DiscoveryNode("noop_id", DummyTransportAddress.INSTANCE, Version.CURRENT))
+ .localNodeId("noop_id")).build();
+ }
+
+ assert state.getNodes().localNode() != null;
+ this.state = state;
+
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return state.getNodes().localNode();
+ }
+
+ @Override
+ public ClusterState state() {
+ return state;
+ }
+
+ @Override
+ public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+
+ }
+
+ @Override
+ public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+
+ }
+
+ @Override
+ public OperationRouting operationRouting() {
+ return null;
+ }
+
+ @Override
+ public void addFirst(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void addLast(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void add(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void remove(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void add(LocalNodeMasterListener listener) {
+
+ }
+
+ @Override
+ public void remove(LocalNodeMasterListener listener) {
+
+ }
+
+ @Override
+ public void add(TimeValue timeout, TimeoutClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
+
+ }
+
+ @Override
+ public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
+
+ }
+
+ @Override
+ public List<PendingClusterTask> pendingTasks() {
+ return null;
+ }
+
+ @Override
+ public int numberOfPendingTasks() {
+ return 0;
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return null;
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public ClusterService start() {
+ return null;
+ }
+
+ @Override
+ public ClusterService stop() {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java
new file mode 100644
index 0000000000..6a61665e35
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Queue;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ScheduledFuture;
+
+/** a class that simulate simple cluster service features, like state storage and listeners */
+public class TestClusterService implements ClusterService {
+
+ volatile ClusterState state;
+ private final Collection<ClusterStateListener> listeners = new CopyOnWriteArrayList<>();
+ private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue();
+ private final ThreadPool threadPool;
+
+ public TestClusterService() {
+ this(ClusterState.builder(new ClusterName("test")).build());
+ }
+
+ public TestClusterService(ThreadPool threadPool) {
+ this(ClusterState.builder(new ClusterName("test")).build(), threadPool);
+ }
+
+ public TestClusterService(ClusterState state) {
+ this(state, null);
+ }
+
+ public TestClusterService(ClusterState state, @Nullable ThreadPool threadPool) {
+ if (state.getNodes().size() == 0) {
+ state = ClusterState.builder(state).nodes(
+ DiscoveryNodes.builder()
+ .put(new DiscoveryNode("test_id", DummyTransportAddress.INSTANCE, Version.CURRENT))
+ .localNodeId("test_id")).build();
+ }
+
+ assert state.getNodes().localNode() != null;
+ this.state = state;
+ this.threadPool = threadPool;
+
+ }
+
+
+ /** set the current state and trigger any registered listeners about the change */
+ public void setState(ClusterState state) {
+ assert state.getNodes().localNode() != null;
+ // make sure we have a version increment
+ state = ClusterState.builder(state).version(this.state.version() + 1).build();
+ ClusterChangedEvent event = new ClusterChangedEvent("test", state, this.state);
+ this.state = state;
+ for (ClusterStateListener listener : listeners) {
+ listener.clusterChanged(event);
+ }
+ }
+
+ /** set the current state and trigger any registered listeners about the change */
+ public void setState(ClusterState.Builder state) {
+ setState(state.build());
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return state.getNodes().localNode();
+ }
+
+ @Override
+ public ClusterState state() {
+ return state;
+ }
+
+ @Override
+ public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public OperationRouting operationRouting() {
+ return null;
+ }
+
+ @Override
+ public void addFirst(ClusterStateListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addLast(ClusterStateListener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void add(ClusterStateListener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void remove(ClusterStateListener listener) {
+ listeners.remove(listener);
+ for (Iterator<NotifyTimeout> it = onGoingTimeouts.iterator(); it.hasNext(); ) {
+ NotifyTimeout timeout = it.next();
+ if (timeout.listener.equals(listener)) {
+ timeout.cancel();
+ it.remove();
+ }
+ }
+ }
+
+ @Override
+ public void add(LocalNodeMasterListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void remove(LocalNodeMasterListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) {
+ if (threadPool == null) {
+ throw new UnsupportedOperationException("TestClusterService wasn't initialized with a thread pool");
+ }
+ NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout);
+ notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout);
+ onGoingTimeouts.add(notifyTimeout);
+ listeners.add(listener);
+ listener.postAdded();
+ }
+
+ @Override
+ public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<PendingClusterTask> pendingTasks() {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public int numberOfPendingTasks() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterService start() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterService stop() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void close() {
+ throw new UnsupportedOperationException();
+ }
+
+ class NotifyTimeout implements Runnable {
+ final TimeoutClusterStateListener listener;
+ final TimeValue timeout;
+ volatile ScheduledFuture future;
+
+ NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) {
+ this.listener = listener;
+ this.timeout = timeout;
+ }
+
+ public void cancel() {
+ FutureUtils.cancel(future);
+ }
+
+ @Override
+ public void run() {
+ if (future != null && future.isCancelled()) {
+ return;
+ }
+ listener.onTimeout(this.timeout);
+ // note, we rely on the listener to remove itself in case of timeout if needed
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
new file mode 100644
index 0000000000..6b63243b54
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
@@ -0,0 +1,177 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.discovery;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.google.common.primitives.Ints;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.SettingsSource;
+import org.elasticsearch.transport.local.LocalTransport;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.util.HashSet;
+import java.util.Set;
+
+public class ClusterDiscoveryConfiguration extends SettingsSource {
+
+ static Settings DEFAULT_NODE_SETTINGS = Settings.settingsBuilder().put("discovery.type", "zen").build();
+
+ final int numOfNodes;
+ final Settings nodeSettings;
+ final Settings transportClientSettings;
+
+ public ClusterDiscoveryConfiguration(int numOfNodes, Settings extraSettings) {
+ this.numOfNodes = numOfNodes;
+ this.nodeSettings = Settings.builder().put(DEFAULT_NODE_SETTINGS).put(extraSettings).build();
+ this.transportClientSettings = Settings.builder().put(extraSettings).build();
+ }
+
+ @Override
+ public Settings node(int nodeOrdinal) {
+ return nodeSettings;
+ }
+
+ @Override
+ public Settings transportClient() {
+ return transportClientSettings;
+ }
+
+ public static class UnicastZen extends ClusterDiscoveryConfiguration {
+
+ // this variable is incremented on each bind attempt and will maintain the next port that should be tried
+ private static int nextPort = calcBasePort();
+
+ private final int[] unicastHostOrdinals;
+ private final int[] unicastHostPorts;
+ private final boolean localMode;
+
+ public UnicastZen(int numOfNodes) {
+ this(numOfNodes, numOfNodes);
+ }
+
+ public UnicastZen(int numOfNodes, Settings extraSettings) {
+ this(numOfNodes, numOfNodes, extraSettings);
+ }
+
+ public UnicastZen(int numOfNodes, int numOfUnicastHosts) {
+ this(numOfNodes, numOfUnicastHosts, Settings.EMPTY);
+ }
+
+ public UnicastZen(int numOfNodes, int numOfUnicastHosts, Settings extraSettings) {
+ super(numOfNodes, extraSettings);
+ if (numOfUnicastHosts == numOfNodes) {
+ unicastHostOrdinals = new int[numOfNodes];
+ for (int i = 0; i < numOfNodes; i++) {
+ unicastHostOrdinals[i] = i;
+ }
+ } else {
+ Set<Integer> ordinals = new HashSet<>(numOfUnicastHosts);
+ while (ordinals.size() != numOfUnicastHosts) {
+ ordinals.add(RandomizedTest.randomInt(numOfNodes - 1));
+ }
+ unicastHostOrdinals = Ints.toArray(ordinals);
+ }
+ this.localMode = nodeSettings.get("node.mode", InternalTestCluster.NODE_MODE).equals("local");
+ this.unicastHostPorts = localMode ? new int[0] : unicastHostPorts(numOfNodes);
+ assert localMode || unicastHostOrdinals.length <= unicastHostPorts.length;
+ }
+
+ public UnicastZen(int numOfNodes, int[] unicastHostOrdinals) {
+ this(numOfNodes, Settings.EMPTY, unicastHostOrdinals);
+ }
+
+ public UnicastZen(int numOfNodes, Settings extraSettings, int[] unicastHostOrdinals) {
+ super(numOfNodes, extraSettings);
+ this.unicastHostOrdinals = unicastHostOrdinals;
+ this.localMode = nodeSettings.get("node.mode", InternalTestCluster.NODE_MODE).equals("local");
+ this.unicastHostPorts = localMode ? new int[0] : unicastHostPorts(numOfNodes);
+ assert localMode || unicastHostOrdinals.length <= unicastHostPorts.length;
+ }
+
+ private static int calcBasePort() {
+ return 30000 + InternalTestCluster.BASE_PORT;
+ }
+
+ @Override
+ public Settings node(int nodeOrdinal) {
+ Settings.Builder builder = Settings.builder()
+ .put("discovery.zen.ping.multicast.enabled", false);
+
+ String[] unicastHosts = new String[unicastHostOrdinals.length];
+ if (localMode) {
+ builder.put(LocalTransport.TRANSPORT_LOCAL_ADDRESS, "node_" + nodeOrdinal);
+ for (int i = 0; i < unicastHosts.length; i++) {
+ unicastHosts[i] = "node_" + unicastHostOrdinals[i];
+ }
+ } else if (nodeOrdinal >= unicastHostPorts.length) {
+ throw new ElasticsearchException("nodeOrdinal [" + nodeOrdinal + "] is greater than the number unicast ports [" + unicastHostPorts.length + "]");
+ } else {
+ // we need to pin the node port & host so we'd know where to point things
+ builder.put("transport.tcp.port", unicastHostPorts[nodeOrdinal]);
+ builder.put("transport.host", "localhost");
+ for (int i = 0; i < unicastHostOrdinals.length; i++) {
+ unicastHosts[i] = "localhost:" + (unicastHostPorts[unicastHostOrdinals[i]]);
+ }
+ }
+ builder.putArray("discovery.zen.ping.unicast.hosts", unicastHosts);
+ return builder.put(super.node(nodeOrdinal)).build();
+ }
+
+ protected synchronized static int[] unicastHostPorts(int numHosts) {
+ int[] unicastHostPorts = new int[numHosts];
+
+ final int basePort = calcBasePort();
+ final int maxPort = basePort + 1000;
+ int tries = 0;
+ for (int i = 0; i < unicastHostPorts.length; i++) {
+ boolean foundPortInRange = false;
+ while (tries < 1000 && !foundPortInRange) {
+ try (ServerSocket serverSocket = new ServerSocket()) {
+ // Set SO_REUSEADDR as we may bind here and not be able to reuse the address immediately without it.
+ serverSocket.setReuseAddress(NetworkUtils.defaultReuseAddress());
+ serverSocket.bind(new InetSocketAddress(nextPort));
+
+ // bind was a success
+ foundPortInRange = true;
+ unicastHostPorts[i] = nextPort;
+ } catch (IOException e) {
+ // Do nothing
+ }
+
+ nextPort++;
+ if (nextPort >= maxPort) {
+ // Roll back to the beginning of the range and do not go into another JVM's port range
+ nextPort = basePort;
+ }
+ tries++;
+ }
+
+ if (!foundPortInRange) {
+ throw new ElasticsearchException("could not find enough open ports in range [" + basePort + "-" + maxPort + "]. required [" + unicastHostPorts.length + "] ports");
+ }
+ }
+ return unicastHostPorts;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/core/src/test/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
new file mode 100644
index 0000000000..d197268810
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateNonMasterUpdateTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class BlockClusterStateProcessing extends SingleNodeDisruption {
+
+ AtomicReference<CountDownLatch> disruptionLatch = new AtomicReference<>();
+
+
+ public BlockClusterStateProcessing(Random random) {
+ this(null, random);
+ }
+
+ public BlockClusterStateProcessing(String disruptedNode, Random random) {
+ super(random);
+ this.disruptedNode = disruptedNode;
+ }
+
+
+ @Override
+ public void startDisrupting() {
+ final String disruptionNodeCopy = disruptedNode;
+ if (disruptionNodeCopy == null) {
+ return;
+ }
+ ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
+ if (clusterService == null) {
+ return;
+ }
+ logger.info("delaying cluster state updates on node [{}]", disruptionNodeCopy);
+ boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
+ assert success : "startDisrupting called without waiting on stopDistrupting to complete";
+ final CountDownLatch started = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("service_disruption_block", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() {
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ started.countDown();
+ CountDownLatch latch = disruptionLatch.get();
+ if (latch != null) {
+ latch.await();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected error during disruption", t);
+ }
+ });
+ try {
+ started.await();
+ } catch (InterruptedException e) {
+ }
+ }
+
+ @Override
+ public void stopDisrupting() {
+ CountDownLatch latch = disruptionLatch.get();
+ if (latch != null) {
+ latch.countDown();
+ }
+
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMinutes(0);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/core/src/test/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
new file mode 100644
index 0000000000..d957220c6d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Simulates irregular long gc intervals.
+ */
+public class IntermittentLongGCDisruption extends LongGCDisruption {
+
+ volatile boolean disrupting;
+ volatile Thread worker;
+
+ final long intervalBetweenDelaysMin;
+ final long intervalBetweenDelaysMax;
+ final long delayDurationMin;
+ final long delayDurationMax;
+
+
+ public IntermittentLongGCDisruption(Random random) {
+ this(null, random);
+ }
+
+ public IntermittentLongGCDisruption(String disruptedNode, Random random) {
+ this(disruptedNode, random, 100, 200, 300, 20000);
+ }
+
+ public IntermittentLongGCDisruption(String disruptedNode, Random random, long intervalBetweenDelaysMin,
+ long intervalBetweenDelaysMax, long delayDurationMin, long delayDurationMax) {
+ this(random, disruptedNode, intervalBetweenDelaysMin, intervalBetweenDelaysMax, delayDurationMin, delayDurationMax);
+ }
+
+ public IntermittentLongGCDisruption(Random random, String disruptedNode, long intervalBetweenDelaysMin, long intervalBetweenDelaysMax,
+ long delayDurationMin, long delayDurationMax) {
+ super(random, disruptedNode);
+ this.intervalBetweenDelaysMin = intervalBetweenDelaysMin;
+ this.intervalBetweenDelaysMax = intervalBetweenDelaysMax;
+ this.delayDurationMin = delayDurationMin;
+ this.delayDurationMax = delayDurationMax;
+ }
+
+ final static AtomicInteger thread_ids = new AtomicInteger();
+
+ @Override
+ public void startDisrupting() {
+ disrupting = true;
+ worker = new Thread(new BackgroundWorker(), "long_gc_simulation_" + thread_ids.incrementAndGet());
+ worker.setDaemon(true);
+ worker.start();
+ }
+
+ @Override
+ public void stopDisrupting() {
+ if (worker == null) {
+ return;
+ }
+ logger.info("stopping long GCs on [{}]", disruptedNode);
+ disrupting = false;
+ worker.interrupt();
+ try {
+ worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax));
+ } catch (InterruptedException e) {
+ logger.info("background thread failed to stop");
+ }
+ worker = null;
+ }
+
+ private void simulateLongGC(final TimeValue duration) throws InterruptedException {
+ final String disruptionNodeCopy = disruptedNode;
+ if (disruptionNodeCopy == null) {
+ return;
+ }
+ logger.info("node [{}] goes into GC for for [{}]", disruptionNodeCopy, duration);
+ final Set<Thread> nodeThreads = new HashSet<>();
+ try {
+ while (stopNodeThreads(disruptionNodeCopy, nodeThreads)) ;
+ if (!nodeThreads.isEmpty()) {
+ Thread.sleep(duration.millis());
+ }
+ } finally {
+ logger.info("node [{}] resumes from GC", disruptionNodeCopy);
+ resumeThreads(nodeThreads);
+ }
+ }
+
+ class BackgroundWorker implements Runnable {
+
+ @Override
+ public void run() {
+ while (disrupting && disruptedNode != null) {
+ try {
+ TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin)));
+ simulateLongGC(duration);
+
+ duration = new TimeValue(intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)));
+ if (disrupting && disruptedNode != null) {
+ Thread.sleep(duration.millis());
+ }
+ } catch (InterruptedException e) {
+ } catch (Exception e) {
+ logger.error("error in background worker", e);
+ }
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/core/src/test/java/org/elasticsearch/test/disruption/LongGCDisruption.java
new file mode 100644
index 0000000000..de4532269e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/LongGCDisruption.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+/**
+ * Suspends all threads on the specified node in order to simulate a long gc.
+ */
+public class LongGCDisruption extends SingleNodeDisruption {
+
+ private final static Pattern[] unsafeClasses = new Pattern[]{
+ // logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing
+ Pattern.compile("Logger")
+ };
+
+ protected final String disruptedNode;
+ private Set<Thread> suspendedThreads;
+
+ public LongGCDisruption(Random random, String disruptedNode) {
+ super(random);
+ this.disruptedNode = disruptedNode;
+ }
+
+ @Override
+ public synchronized void startDisrupting() {
+ if (suspendedThreads == null) {
+ suspendedThreads = new HashSet<>();
+ stopNodeThreads(disruptedNode, suspendedThreads);
+ } else {
+ throw new IllegalStateException("can't disrupt twice, call stopDisrupting() first");
+ }
+ }
+
+ @Override
+ public synchronized void stopDisrupting() {
+ if (suspendedThreads != null) {
+ resumeThreads(suspendedThreads);
+ suspendedThreads = null;
+ }
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMillis(0);
+ }
+
+ @SuppressForbidden(reason = "stops/resumes threads intentionally")
+ protected boolean stopNodeThreads(String node, Set<Thread> nodeThreads) {
+ Set<Thread> allThreadsSet = Thread.getAllStackTraces().keySet();
+ boolean stopped = false;
+ final String nodeThreadNamePart = "[" + node + "]";
+ for (Thread thread : allThreadsSet) {
+ String name = thread.getName();
+ if (name.contains(nodeThreadNamePart)) {
+ if (thread.isAlive() && nodeThreads.add(thread)) {
+ stopped = true;
+ thread.suspend();
+ // double check the thread is not in a shared resource like logging. If so, let it go and come back..
+ boolean safe = true;
+ safe:
+ for (StackTraceElement stackElement : thread.getStackTrace()) {
+ String className = stackElement.getClassName();
+ for (Pattern unsafePattern : unsafeClasses) {
+ if (unsafePattern.matcher(className).find()) {
+ safe = false;
+ break safe;
+ }
+ }
+ }
+ if (!safe) {
+ thread.resume();
+ nodeThreads.remove(thread);
+ }
+ }
+ }
+ }
+ return stopped;
+ }
+
+ @SuppressForbidden(reason = "stops/resumes threads intentionally")
+ protected void resumeThreads(Set<Thread> threads) {
+ for (Thread thread : threads) {
+ thread.resume();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
new file mode 100644
index 0000000000..9eb99302e4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.Random;
+import java.util.Set;
+
+public class NetworkDelaysPartition extends NetworkPartition {
+
+ static long DEFAULT_DELAY_MIN = 10000;
+ static long DEFAULT_DELAY_MAX = 90000;
+
+
+ final long delayMin;
+ final long delayMax;
+
+ TimeValue duration;
+
+ public NetworkDelaysPartition(Random random) {
+ this(random, DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX);
+ }
+
+ public NetworkDelaysPartition(Random random, long delayMin, long delayMax) {
+ super(random);
+ this.delayMin = delayMin;
+ this.delayMax = delayMax;
+ }
+
+ public NetworkDelaysPartition(String node1, String node2, Random random) {
+ this(node1, node2, DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX, random);
+ }
+
+ public NetworkDelaysPartition(String node1, String node2, long delayMin, long delayMax, Random random) {
+ super(node1, node2, random);
+ this.delayMin = delayMin;
+ this.delayMax = delayMax;
+ }
+
+ public NetworkDelaysPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ this(nodesSideOne, nodesSideTwo, DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX, random);
+ }
+
+ public NetworkDelaysPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, long delayMin, long delayMax, Random random) {
+ super(nodesSideOne, nodesSideTwo, random);
+ this.delayMin = delayMin;
+ this.delayMax = delayMax;
+
+ }
+
+ @Override
+ public synchronized void startDisrupting() {
+ duration = new TimeValue(delayMin + random.nextInt((int) (delayMax - delayMin)));
+ super.startDisrupting();
+ }
+
+ @Override
+ void applyDisruption(DiscoveryNode node1, MockTransportService transportService1,
+ DiscoveryNode node2, MockTransportService transportService2) {
+ transportService1.addUnresponsiveRule(node1, duration);
+ transportService1.addUnresponsiveRule(node2, duration);
+ }
+
+ @Override
+ protected String getPartitionDescription() {
+ return "network delays for [" + duration + "]";
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMillis(delayMax);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
new file mode 100644
index 0000000000..8653b50f74
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.Random;
+import java.util.Set;
+
+public class NetworkDisconnectPartition extends NetworkPartition {
+
+
+ public NetworkDisconnectPartition(Random random) {
+ super(random);
+ }
+
+ public NetworkDisconnectPartition(String node1, String node2, Random random) {
+ super(node1, node2, random);
+ }
+
+ public NetworkDisconnectPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ super(nodesSideOne, nodesSideTwo, random);
+ }
+
+ @Override
+ protected String getPartitionDescription() {
+ return "disconnected";
+ }
+
+ @Override
+ void applyDisruption(DiscoveryNode node1, MockTransportService transportService1,
+ DiscoveryNode node2, MockTransportService transportService2) {
+ transportService1.addFailToSendNoConnectRule(node2);
+ transportService2.addFailToSendNoConnectRule(node1);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueSeconds(0);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java
new file mode 100644
index 0000000000..8206fafef4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import com.google.common.collect.ImmutableList;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+public abstract class NetworkPartition implements ServiceDisruptionScheme {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ final Set<String> nodesSideOne;
+ final Set<String> nodesSideTwo;
+ volatile boolean autoExpand;
+ protected final Random random;
+ protected volatile InternalTestCluster cluster;
+ protected volatile boolean activeDisruption = false;
+
+
+ public NetworkPartition(Random random) {
+ this.random = new Random(random.nextLong());
+ nodesSideOne = new HashSet<>();
+ nodesSideTwo = new HashSet<>();
+ autoExpand = true;
+ }
+
+ public NetworkPartition(String node1, String node2, Random random) {
+ this(random);
+ nodesSideOne.add(node1);
+ nodesSideTwo.add(node2);
+ autoExpand = false;
+ }
+
+ public NetworkPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ this(random);
+ this.nodesSideOne.addAll(nodesSideOne);
+ this.nodesSideTwo.addAll(nodesSideTwo);
+ autoExpand = false;
+ }
+
+
+ public List<String> getNodesSideOne() {
+ return ImmutableList.copyOf(nodesSideOne);
+ }
+
+ public List<String> getNodesSideTwo() {
+ return ImmutableList.copyOf(nodesSideTwo);
+ }
+
+ public List<String> getMajoritySide() {
+ if (nodesSideOne.size() >= nodesSideTwo.size()) {
+ return getNodesSideOne();
+ } else {
+ return getNodesSideTwo();
+ }
+ }
+
+ public List<String> getMinoritySide() {
+ if (nodesSideOne.size() >= nodesSideTwo.size()) {
+ return getNodesSideTwo();
+ } else {
+ return getNodesSideOne();
+ }
+ }
+
+ @Override
+ public void applyToCluster(InternalTestCluster cluster) {
+ this.cluster = cluster;
+ if (autoExpand) {
+ for (String node : cluster.getNodeNames()) {
+ applyToNode(node, cluster);
+ }
+ }
+ }
+
+ @Override
+ public void removeFromCluster(InternalTestCluster cluster) {
+ stopDisrupting();
+ }
+
+ @Override
+ public synchronized void applyToNode(String node, InternalTestCluster cluster) {
+ if (!autoExpand || nodesSideOne.contains(node) || nodesSideTwo.contains(node)) {
+ return;
+ }
+ if (nodesSideOne.isEmpty()) {
+ nodesSideOne.add(node);
+ } else if (nodesSideTwo.isEmpty()) {
+ nodesSideTwo.add(node);
+ } else if (random.nextBoolean()) {
+ nodesSideOne.add(node);
+ } else {
+ nodesSideTwo.add(node);
+ }
+ }
+
+ @Override
+ public synchronized void removeFromNode(String node, InternalTestCluster cluster) {
+ MockTransportService transportService = (MockTransportService) cluster.getInstance(TransportService.class, node);
+ DiscoveryNode discoveryNode = discoveryNode(node);
+ Set<String> otherSideNodes;
+ if (nodesSideOne.contains(node)) {
+ otherSideNodes = nodesSideTwo;
+ } else if (nodesSideTwo.contains(node)) {
+ otherSideNodes = nodesSideOne;
+ } else {
+ return;
+ }
+ for (String node2 : otherSideNodes) {
+ MockTransportService transportService2 = (MockTransportService) cluster.getInstance(TransportService.class, node2);
+ DiscoveryNode discoveryNode2 = discoveryNode(node2);
+ removeDisruption(discoveryNode, transportService, discoveryNode2, transportService2);
+ }
+ }
+
+ @Override
+ public synchronized void testClusterClosed() {
+
+ }
+
+ protected abstract String getPartitionDescription();
+
+
+ protected DiscoveryNode discoveryNode(String node) {
+ return cluster.getInstance(Discovery.class, node).localNode();
+ }
+
+ @Override
+ public synchronized void startDisrupting() {
+ if (nodesSideOne.size() == 0 || nodesSideTwo.size() == 0) {
+ return;
+ }
+ logger.info("nodes {} will be partitioned from {}. partition type [{}]", nodesSideOne, nodesSideTwo, getPartitionDescription());
+ activeDisruption = true;
+ for (String node1 : nodesSideOne) {
+ MockTransportService transportService1 = (MockTransportService) cluster.getInstance(TransportService.class, node1);
+ DiscoveryNode discoveryNode1 = discoveryNode(node1);
+ for (String node2 : nodesSideTwo) {
+ DiscoveryNode discoveryNode2 = discoveryNode(node2);
+ MockTransportService transportService2 = (MockTransportService) cluster.getInstance(TransportService.class, node2);
+ applyDisruption(discoveryNode1, transportService1, discoveryNode2, transportService2);
+ }
+ }
+ }
+
+
+ @Override
+ public synchronized void stopDisrupting() {
+ if (nodesSideOne.size() == 0 || nodesSideTwo.size() == 0 || !activeDisruption) {
+ return;
+ }
+ logger.info("restoring partition between nodes {} & nodes {}", nodesSideOne, nodesSideTwo);
+ for (String node1 : nodesSideOne) {
+ MockTransportService transportService1 = (MockTransportService) cluster.getInstance(TransportService.class, node1);
+ DiscoveryNode discoveryNode1 = discoveryNode(node1);
+ for (String node2 : nodesSideTwo) {
+ DiscoveryNode discoveryNode2 = discoveryNode(node2);
+ MockTransportService transportService2 = (MockTransportService) cluster.getInstance(TransportService.class, node2);
+ removeDisruption(discoveryNode1, transportService1, discoveryNode2, transportService2);
+ }
+ }
+ activeDisruption = false;
+ }
+
+ abstract void applyDisruption(DiscoveryNode node1, MockTransportService transportService1,
+ DiscoveryNode node2, MockTransportService transportService2);
+
+
+ protected void removeDisruption(DiscoveryNode node1, MockTransportService transportService1,
+ DiscoveryNode node2, MockTransportService transportService2) {
+ transportService1.clearRule(node2);
+ transportService2.clearRule(node1);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
new file mode 100644
index 0000000000..1feb56c46c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.Random;
+import java.util.Set;
+
+public class NetworkUnresponsivePartition extends NetworkPartition {
+
+ public NetworkUnresponsivePartition(Random random) {
+ super(random);
+ }
+
+ public NetworkUnresponsivePartition(String node1, String node2, Random random) {
+ super(node1, node2, random);
+ }
+
+ public NetworkUnresponsivePartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ super(nodesSideOne, nodesSideTwo, random);
+ }
+
+ @Override
+ protected String getPartitionDescription() {
+ return "unresponsive";
+ }
+
+ @Override
+ void applyDisruption(DiscoveryNode node1, MockTransportService transportService1,
+ DiscoveryNode node2, MockTransportService transportService2) {
+ transportService1.addUnresponsiveRule(node2);
+ transportService2.addUnresponsiveRule(node1);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueSeconds(0);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java b/core/src/test/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
new file mode 100644
index 0000000000..7b348b1afe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+public class NoOpDisruptionScheme implements ServiceDisruptionScheme {
+
+ @Override
+ public void applyToCluster(InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void removeFromCluster(InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void applyToNode(String node, InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void removeFromNode(String node, InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void startDisrupting() {
+
+ }
+
+ @Override
+ public void stopDisrupting() {
+
+ }
+
+ @Override
+ public void testClusterClosed() {
+
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueSeconds(0);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java b/core/src/test/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
new file mode 100644
index 0000000000..70774a8235
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+public interface ServiceDisruptionScheme {
+
+ public void applyToCluster(InternalTestCluster cluster);
+
+ public void removeFromCluster(InternalTestCluster cluster);
+
+ public void applyToNode(String node, InternalTestCluster cluster);
+
+ public void removeFromNode(String node, InternalTestCluster cluster);
+
+ public void startDisrupting();
+
+ public void stopDisrupting();
+
+ public void testClusterClosed();
+
+ public TimeValue expectedTimeToHeal();
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/core/src/test/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
new file mode 100644
index 0000000000..3148254011
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.Random;
+
+public abstract class SingleNodeDisruption implements ServiceDisruptionScheme {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ protected volatile String disruptedNode;
+ protected volatile InternalTestCluster cluster;
+ protected final Random random;
+
+
+ public SingleNodeDisruption(String disruptedNode, Random random) {
+ this(random);
+ this.disruptedNode = disruptedNode;
+ }
+
+ public SingleNodeDisruption(Random random) {
+ this.random = new Random(random.nextLong());
+ }
+
+ @Override
+ public void applyToCluster(InternalTestCluster cluster) {
+ this.cluster = cluster;
+ if (disruptedNode == null) {
+ String[] nodes = cluster.getNodeNames();
+ disruptedNode = nodes[random.nextInt(nodes.length)];
+ }
+ }
+
+ @Override
+ public void removeFromCluster(InternalTestCluster cluster) {
+ if (disruptedNode != null) {
+ removeFromNode(disruptedNode, cluster);
+ }
+ }
+
+ @Override
+ public synchronized void applyToNode(String node, InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public synchronized void removeFromNode(String node, InternalTestCluster cluster) {
+ if (disruptedNode == null) {
+ return;
+ }
+ if (!node.equals(disruptedNode)) {
+ return;
+ }
+ stopDisrupting();
+ disruptedNode = null;
+ }
+
+ @Override
+ public synchronized void testClusterClosed() {
+ disruptedNode = null;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/core/src/test/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
new file mode 100644
index 0000000000..746d7f942b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateNonMasterUpdateTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class SlowClusterStateProcessing extends SingleNodeDisruption {
+
+ volatile boolean disrupting;
+ volatile Thread worker;
+
+ final long intervalBetweenDelaysMin;
+ final long intervalBetweenDelaysMax;
+ final long delayDurationMin;
+ final long delayDurationMax;
+
+
+ public SlowClusterStateProcessing(Random random) {
+ this(null, random);
+ }
+
+ public SlowClusterStateProcessing(String disruptedNode, Random random) {
+ this(disruptedNode, random, 100, 200, 300, 20000);
+ }
+
+ public SlowClusterStateProcessing(String disruptedNode, Random random, long intervalBetweenDelaysMin,
+ long intervalBetweenDelaysMax, long delayDurationMin, long delayDurationMax) {
+ this(random, intervalBetweenDelaysMin, intervalBetweenDelaysMax, delayDurationMin, delayDurationMax);
+ this.disruptedNode = disruptedNode;
+ }
+
+ public SlowClusterStateProcessing(Random random,
+ long intervalBetweenDelaysMin, long intervalBetweenDelaysMax, long delayDurationMin,
+ long delayDurationMax) {
+ super(random);
+ this.intervalBetweenDelaysMin = intervalBetweenDelaysMin;
+ this.intervalBetweenDelaysMax = intervalBetweenDelaysMax;
+ this.delayDurationMin = delayDurationMin;
+ this.delayDurationMax = delayDurationMax;
+ }
+
+
+ @Override
+ public void startDisrupting() {
+ disrupting = true;
+ worker = new Thread(new BackgroundWorker());
+ worker.setDaemon(true);
+ worker.start();
+ }
+
+ @Override
+ public void stopDisrupting() {
+ if (worker == null) {
+ return;
+ }
+ logger.info("stopping to slow down cluster state processing on [{}]", disruptedNode);
+ disrupting = false;
+ worker.interrupt();
+ try {
+ worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax));
+ } catch (InterruptedException e) {
+ logger.info("background thread failed to stop");
+ }
+ worker = null;
+ }
+
+
+ private boolean interruptClusterStateProcessing(final TimeValue duration) throws InterruptedException {
+ final String disruptionNodeCopy = disruptedNode;
+ if (disruptionNodeCopy == null) {
+ return false;
+ }
+ logger.info("delaying cluster state updates on node [{}] for [{}]", disruptionNodeCopy, duration);
+ final CountDownLatch countDownLatch = new CountDownLatch(1);
+ ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
+ if (clusterService == null) {
+ return false;
+ }
+ final AtomicBoolean stopped = new AtomicBoolean(false);
+ clusterService.submitStateUpdateTask("service_disruption_delay", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() {
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ long count = duration.millis() / 200;
+ // wait while checking for a stopped
+ for (; count > 0 && !stopped.get(); count--) {
+ Thread.sleep(200);
+ }
+ if (!stopped.get()) {
+ Thread.sleep(duration.millis() % 200);
+ }
+ countDownLatch.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ countDownLatch.countDown();
+ }
+ });
+ try {
+ countDownLatch.await();
+ } catch (InterruptedException e) {
+ stopped.set(true);
+ // try to wait again, we really want the cluster state thread to be freed up when stopping disruption
+ countDownLatch.await();
+ }
+ return true;
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMillis(0);
+ }
+
+ class BackgroundWorker implements Runnable {
+
+ @Override
+ public void run() {
+ while (disrupting && disruptedNode != null) {
+ try {
+ TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin)));
+ if (!interruptClusterStateProcessing(duration)) {
+ continue;
+ }
+ if (intervalBetweenDelaysMax > 0) {
+ duration = new TimeValue(intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)));
+ if (disrupting && disruptedNode != null) {
+ Thread.sleep(duration.millis());
+ }
+ }
+ } catch (InterruptedException e) {
+ } catch (Exception e) {
+ logger.error("error in background worker", e);
+ }
+ }
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java b/core/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java
new file mode 100644
index 0000000000..fec406a784
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * A searcher that asserts the IndexReader's refcount on close
+ */
+class AssertingSearcher extends Engine.Searcher {
+ private final Engine.Searcher wrappedSearcher;
+ private final ShardId shardId;
+ private final IndexSearcher indexSearcher;
+ private RuntimeException firstReleaseStack;
+ private final Object lock = new Object();
+ private final int initialRefCount;
+ private final ESLogger logger;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+
+ AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher,
+ ShardId shardId,
+ ESLogger logger) {
+ super(wrappedSearcher.source(), indexSearcher);
+ // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher
+ // with a wrapped reader.
+ this.wrappedSearcher = wrappedSearcher;
+ this.logger = logger;
+ this.shardId = shardId;
+ initialRefCount = wrappedSearcher.reader().getRefCount();
+ this.indexSearcher = indexSearcher;
+ assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed";
+ }
+
+ @Override
+ public String source() {
+ return wrappedSearcher.source();
+ }
+
+ @Override
+ public void close() {
+ synchronized (lock) {
+ if (closed.compareAndSet(false, true)) {
+ firstReleaseStack = new RuntimeException();
+ final int refCount = wrappedSearcher.reader().getRefCount();
+ // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential
+ // problems.
+ assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]";
+ try {
+ wrappedSearcher.close();
+ } catch (RuntimeException ex) {
+ logger.debug("Failed to release searcher", ex);
+ throw ex;
+ }
+ } else {
+ AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]");
+ error.initCause(firstReleaseStack);
+ throw error;
+ }
+ }
+ }
+
+ @Override
+ public IndexReader reader() {
+ return indexSearcher.getIndexReader();
+ }
+
+ @Override
+ public IndexSearcher searcher() {
+ return indexSearcher;
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public boolean isOpen() {
+ return closed.get() == false;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java b/core/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java
new file mode 100644
index 0000000000..160bf26ce1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/engine/MockEngineFactory.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.engine;
+
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineFactory;
+
+/**
+ *
+ */
+public final class MockEngineFactory implements EngineFactory {
+ @Override
+ public Engine newReadWriteEngine(EngineConfig config, boolean skipTranslogRecovery) {
+ return new MockInternalEngine(config, skipTranslogRecovery);
+ }
+
+ @Override
+ public Engine newReadOnlyEngine(EngineConfig config) {
+ return new MockShadowEngine(config);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java
new file mode 100644
index 0000000000..b321a0dfbb
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java
@@ -0,0 +1,227 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.AssertingDirectoryReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.QueryCache;
+import org.apache.lucene.search.QueryCachingPolicy;
+import org.apache.lucene.search.SearcherManager;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.util.IdentityHashMap;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine} or {@link org.elasticsearch.test.engine.MockShadowEngine}
+ * since they need to subclass the actual engine
+ */
+public final class MockEngineSupport {
+
+ public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio";
+ public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper";
+ public static final String FLUSH_ON_CLOSE_RATIO = "index.engine.mock.flush_on_close.ratio";
+
+ private final AtomicBoolean closing = new AtomicBoolean(false);
+ private final ESLogger logger = Loggers.getLogger(Engine.class);
+ private final ShardId shardId;
+ private final QueryCache filterCache;
+ private final QueryCachingPolicy filterCachingPolicy;
+ private final SearcherCloseable searcherCloseable;
+ private final MockContext mockContext;
+
+ public static class MockContext {
+ private final Random random;
+ private final boolean wrapReader;
+ private final Class<? extends FilterDirectoryReader> wrapper;
+ private final Settings indexSettings;
+ private final double flushOnClose;
+
+ public MockContext(Random random, boolean wrapReader, Class<? extends FilterDirectoryReader> wrapper, Settings indexSettings) {
+ this.random = random;
+ this.wrapReader = wrapReader;
+ this.wrapper = wrapper;
+ this.indexSettings = indexSettings;
+ flushOnClose = indexSettings.getAsDouble(FLUSH_ON_CLOSE_RATIO, 0.5d);
+ }
+ }
+
+ public MockEngineSupport(EngineConfig config) {
+ Settings indexSettings = config.getIndexSettings();
+ shardId = config.getShardId();
+ filterCache = config.getFilterCache();
+ filterCachingPolicy = config.getFilterCachingPolicy();
+ final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l);
+ Random random = new Random(seed);
+ final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
+ Class<? extends AssertingDirectoryReader> wrapper = indexSettings.getAsClass(READER_WRAPPER_TYPE, AssertingDirectoryReader.class);
+ boolean wrapReader = random.nextDouble() < ratio;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader);
+ }
+ mockContext = new MockContext(random, wrapReader, wrapper, indexSettings);
+ this.searcherCloseable = new SearcherCloseable();
+ LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine
+ }
+
+ enum CloseAction {
+ FLUSH_AND_CLOSE,
+ CLOSE;
+ }
+
+
+ /**
+ * Returns the CloseAction to execute on the actual engine. Note this method changes the state on
+ * the first call and treats subsequent calls as if the engine passed is already closed.
+ */
+ public CloseAction flushOrClose(Engine engine, CloseAction originalAction) throws IOException {
+ if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow.
+ if (mockContext.flushOnClose > mockContext.random.nextDouble()) {
+ return CloseAction.FLUSH_AND_CLOSE;
+ } else {
+ return CloseAction.CLOSE;
+ }
+ } else {
+ return originalAction;
+ }
+ }
+
+ public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
+ IndexReader reader = searcher.getIndexReader();
+ IndexReader wrappedReader = reader;
+ assert reader != null;
+ if (reader instanceof DirectoryReader && mockContext.wrapReader) {
+ wrappedReader = wrapReader((DirectoryReader) reader);
+ }
+ // this executes basic query checks and asserts that weights are normalized only once etc.
+ final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader);
+ assertingIndexSearcher.setSimilarity(searcher.getSimilarity());
+ assertingIndexSearcher.setQueryCache(filterCache);
+ assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy);
+ return assertingIndexSearcher;
+ }
+
+ private DirectoryReader wrapReader(DirectoryReader reader) {
+ try {
+ Constructor<?>[] constructors = mockContext.wrapper.getConstructors();
+ Constructor<?> nonRandom = null;
+ for (Constructor<?> constructor : constructors) {
+ Class<?>[] parameterTypes = constructor.getParameterTypes();
+ if (parameterTypes.length > 0 && parameterTypes[0] == DirectoryReader.class) {
+ if (parameterTypes.length == 1) {
+ nonRandom = constructor;
+ } else if (parameterTypes.length == 2 && parameterTypes[1] == Settings.class) {
+
+ return (DirectoryReader) constructor.newInstance(reader, mockContext.indexSettings);
+ }
+ }
+ }
+ if (nonRandom != null) {
+ return (DirectoryReader) nonRandom.newInstance(reader);
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchException("Can not wrap reader", e);
+ }
+ return reader;
+ }
+
+ public static abstract class DirectoryReaderWrapper extends FilterDirectoryReader {
+ protected final SubReaderWrapper subReaderWrapper;
+
+ public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrapper) throws IOException {
+ super(in, subReaderWrapper);
+ this.subReaderWrapper = subReaderWrapper;
+ }
+
+ @Override
+ public Object getCoreCacheKey() {
+ return in.getCoreCacheKey();
+ }
+
+ @Override
+ public Object getCombinedCoreAndDeletesKey() {
+ return in.getCombinedCoreAndDeletesKey();
+ }
+
+ }
+
+ public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher, IndexSearcher searcher, SearcherManager manager) {
+ final AssertingIndexSearcher assertingIndexSearcher = newSearcher(source, searcher, manager);
+ assertingIndexSearcher.setSimilarity(searcher.getSimilarity());
+ // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will
+ // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager
+ // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here
+ AssertingSearcher assertingSearcher = new AssertingSearcher(assertingIndexSearcher, engineSearcher, shardId, logger) {
+ @Override
+ public void close() {
+ try {
+ searcherCloseable.remove(this);
+ } finally {
+ super.close();
+ }
+ }
+ };
+ searcherCloseable.add(assertingSearcher, engineSearcher.source());
+ return assertingSearcher;
+ }
+
+ private static final class SearcherCloseable implements Closeable {
+
+ private final IdentityHashMap<AssertingSearcher, RuntimeException> openSearchers = new IdentityHashMap<>();
+
+ @Override
+ public synchronized void close() throws IOException {
+ if (openSearchers.isEmpty() == false) {
+ AssertionError error = new AssertionError("Unreleased searchers found");
+ for (RuntimeException ex : openSearchers.values()) {
+ error.addSuppressed(ex);
+ }
+ throw error;
+ }
+ }
+
+ void add(AssertingSearcher searcher, String source) {
+ final RuntimeException ex = new RuntimeException("Unreleased Searcher, source [" + source+ "]");
+ synchronized (this) {
+ openSearchers.put(searcher, ex);
+ }
+ }
+
+ synchronized void remove(AssertingSearcher searcher) {
+ openSearchers.remove(searcher);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/core/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java
new file mode 100644
index 0000000000..ed4dc95795
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.SearcherManager;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.InternalEngine;
+
+import java.io.IOException;
+
+final class MockInternalEngine extends InternalEngine {
+ private MockEngineSupport support;
+ private final boolean randomizeFlushOnClose;
+
+
+ MockInternalEngine(EngineConfig config, boolean skipInitialTranslogRecovery) throws EngineException {
+ super(config, skipInitialTranslogRecovery);
+ randomizeFlushOnClose = IndexMetaData.isOnSharedFilesystem(config.getIndexSettings()) == false;
+ }
+
+ private synchronized MockEngineSupport support() {
+ // lazy initialized since we need it already on super() ctor execution :(
+ if (support == null) {
+ support = new MockEngineSupport(config());
+ }
+ return support;
+ }
+
+ @Override
+ public void close() throws IOException {
+ switch (support().flushOrClose(this, MockEngineSupport.CloseAction.CLOSE)) {
+ case FLUSH_AND_CLOSE:
+ super.flushAndClose();
+ break;
+ case CLOSE:
+ super.close();
+ break;
+ }
+ }
+
+ @Override
+ public void flushAndClose() throws IOException {
+ if (randomizeFlushOnClose) {
+ switch (support().flushOrClose(this, MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) {
+ case FLUSH_AND_CLOSE:
+ super.flushAndClose();
+ break;
+ case CLOSE:
+ super.close();
+ break;
+ }
+ } else {
+ super.flushAndClose();
+ }
+ }
+
+ @Override
+ protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
+ final Searcher engineSearcher = super.newSearcher(source, searcher, manager);
+ return support().wrapSearcher(source, engineSearcher, searcher, manager);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java b/core/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java
new file mode 100644
index 0000000000..1ed920b20f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.SearcherManager;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.ShadowEngine;
+
+import java.io.IOException;
+import java.util.Map;
+
+final class MockShadowEngine extends ShadowEngine {
+ private final MockEngineSupport support;
+
+ MockShadowEngine(EngineConfig config) {
+ super(config);
+ this.support = new MockEngineSupport(config);
+ }
+
+ @Override
+ protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
+ final Searcher engineSearcher = super.newSearcher(source, searcher, manager);
+ return support.wrapSearcher(source, engineSearcher, searcher, manager);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
new file mode 100644
index 0000000000..365bf7fb65
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+
+import java.io.IOException;
+
+/**
+ * An FilterLeafReader that allows to throw exceptions if certain methods
+ * are called on is. This allows to test parts of the system under certain
+ * error conditions that would otherwise not be possible.
+ */
+public class ThrowingLeafReaderWrapper extends FilterLeafReader {
+
+ private final Thrower thrower;
+
+ /**
+ * Flags passed to {@link Thrower#maybeThrow(org.elasticsearch.test.engine.ThrowingLeafReaderWrapper.Flags)}
+ * when the corresponding method is called.
+ */
+ public enum Flags {
+ TermVectors,
+ Terms,
+ TermsEnum,
+ Intersect,
+ DocsEnum,
+ DocsAndPositionsEnum,
+ Fields,
+ Norms, NumericDocValues, BinaryDocValues, SortedDocValues, SortedSetDocValues;
+ }
+
+ /**
+ * A callback interface that allows to throw certain exceptions for
+ * methods called on the IndexReader that is wrapped by {@link ThrowingLeafReaderWrapper}
+ */
+ public static interface Thrower {
+ /**
+ * Maybe throws an exception ;)
+ */
+ public void maybeThrow(Flags flag) throws IOException;
+
+ /**
+ * If this method returns true the {@link Terms} instance for the given field
+ * is wrapped with Thrower support otherwise no exception will be thrown for
+ * the current {@link Terms} instance or any other instance obtained from it.
+ */
+ public boolean wrapTerms(String field);
+ }
+
+ public ThrowingLeafReaderWrapper(LeafReader in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+
+ @Override
+ public Fields fields() throws IOException {
+ Fields fields = super.fields();
+ thrower.maybeThrow(Flags.Fields);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ @Override
+ public Fields getTermVectors(int docID) throws IOException {
+ Fields fields = super.getTermVectors(docID);
+ thrower.maybeThrow(Flags.TermVectors);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ /**
+ * Wraps a Fields but with additional asserts
+ */
+ public static class ThrowingFields extends FilterFields {
+ private final Thrower thrower;
+
+ public ThrowingFields(Fields in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ Terms terms = super.terms(field);
+ if (thrower.wrapTerms(field)) {
+ thrower.maybeThrow(Flags.Terms);
+ return terms == null ? null : new ThrowingTerms(terms, thrower);
+ }
+ return terms;
+ }
+ }
+
+ /**
+ * Wraps a Terms but with additional asserts
+ */
+ public static class ThrowingTerms extends FilterTerms {
+ private final Thrower thrower;
+
+ public ThrowingTerms(Terms in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public TermsEnum intersect(CompiledAutomaton automaton, BytesRef bytes) throws IOException {
+ TermsEnum termsEnum = in.intersect(automaton, bytes);
+ thrower.maybeThrow(Flags.Intersect);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+
+ @Override
+ public TermsEnum iterator() throws IOException {
+ TermsEnum termsEnum = super.iterator();
+ thrower.maybeThrow(Flags.TermsEnum);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+ }
+
+ static class ThrowingTermsEnum extends FilterTermsEnum {
+ private final Thrower thrower;
+
+ public ThrowingTermsEnum(TermsEnum in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+
+ }
+
+ @Override
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ if ((flags & PostingsEnum.POSITIONS) != 0) {
+ thrower.maybeThrow(Flags.DocsAndPositionsEnum);
+ } else {
+ thrower.maybeThrow(Flags.DocsEnum);
+ }
+ return super.postings(liveDocs, reuse, flags);
+ }
+ }
+
+
+ @Override
+ public NumericDocValues getNumericDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.NumericDocValues);
+ return super.getNumericDocValues(field);
+
+ }
+
+ @Override
+ public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.BinaryDocValues);
+ return super.getBinaryDocValues(field);
+ }
+
+ @Override
+ public SortedDocValues getSortedDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedDocValues);
+ return super.getSortedDocValues(field);
+ }
+
+ @Override
+ public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedSetDocValues);
+ return super.getSortedSetDocValues(field);
+ }
+
+ @Override
+ public NumericDocValues getNormValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.Norms);
+ return super.getNormValues(field);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/core/src/test/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
new file mode 100644
index 0000000000..ac58251103
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.gateway;
+
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.GatewayAllocator;
+
+/**
+ * An allocator used for tests that doesn't do anything
+ */
+public class NoopGatewayAllocator extends GatewayAllocator {
+
+ public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator();
+
+ private NoopGatewayAllocator() {
+ super(Settings.EMPTY, null, null);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ // noop
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ // noop
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ return false;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java
new file mode 100644
index 0000000000..9583c673f7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java
@@ -0,0 +1,303 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.geo;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.spatial4j.core.context.jts.JtsSpatialContext;
+import com.spatial4j.core.distance.DistanceUtils;
+import com.spatial4j.core.exception.InvalidShapeException;
+import com.spatial4j.core.shape.Point;
+import com.spatial4j.core.shape.Rectangle;
+import com.spatial4j.core.shape.impl.Range;
+import com.vividsolutions.jts.algorithm.ConvexHull;
+import com.vividsolutions.jts.geom.Coordinate;
+import com.vividsolutions.jts.geom.Geometry;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.geo.builders.BaseLineStringBuilder;
+import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder;
+import org.elasticsearch.common.geo.builders.LineStringBuilder;
+import org.elasticsearch.common.geo.builders.MultiLineStringBuilder;
+import org.elasticsearch.common.geo.builders.MultiPointBuilder;
+import org.elasticsearch.common.geo.builders.PointBuilder;
+import org.elasticsearch.common.geo.builders.PointCollection;
+import org.elasticsearch.common.geo.builders.PolygonBuilder;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+
+import java.util.Random;
+
+import static com.spatial4j.core.shape.SpatialRelation.CONTAINS;
+
+/**
+ * Random geoshape generation utilities for randomized Geospatial testing
+ */
+public class RandomShapeGenerator {
+
+ protected static JtsSpatialContext ctx = ShapeBuilder.SPATIAL_CONTEXT;
+ protected static final double xDIVISIBLE = 2;
+ protected static boolean ST_VALIDATE = true;
+
+ public static enum ShapeType {
+ POINT, MULTIPOINT, LINESTRING, MULTILINESTRING, POLYGON;
+ private static final ShapeType[] types = values();
+
+ public static ShapeType randomType(Random r) {
+ return types[RandomInts.randomIntBetween(r, 0, types.length - 1)];
+ }
+ }
+
+ public static ShapeBuilder createShapeNear(Random r, Point nearPoint) throws InvalidShapeException {
+ return createShape(r, nearPoint, null, null);
+ }
+
+ public static ShapeBuilder createShapeNear(Random r, Point nearPoint, ShapeType st) throws InvalidShapeException {
+ return createShape(r, nearPoint, null, st);
+ }
+
+ public static ShapeBuilder createShapeWithin(Random r, Rectangle bbox) throws InvalidShapeException {
+ return createShape(r, null, bbox, null);
+ }
+
+ public static ShapeBuilder createShapeWithin(Random r, Rectangle bbox, ShapeType st) throws InvalidShapeException {
+ return createShape(r, null, bbox, st);
+ }
+
+ public static GeometryCollectionBuilder createGeometryCollection(Random r) throws InvalidShapeException {
+ return createGeometryCollection(r, null, null, 0);
+ }
+
+ public static GeometryCollectionBuilder createGeometryCollectionNear(Random r, Point nearPoint) throws InvalidShapeException {
+ return createGeometryCollection(r, nearPoint, null, 0);
+ }
+
+ public static GeometryCollectionBuilder createGeometryCollectionNear(Random r, Point nearPoint, int size) throws
+ InvalidShapeException {
+ return createGeometryCollection(r, nearPoint, null, size);
+ }
+
+ public static GeometryCollectionBuilder createGeometryCollectionWithin(Random r, Rectangle within) throws InvalidShapeException {
+ return createGeometryCollection(r, null, within, 0);
+ }
+
+ public static GeometryCollectionBuilder createGeometryCollectionWithin(Random r, Rectangle within, int size) throws
+ InvalidShapeException {
+ return createGeometryCollection(r, null, within, size);
+ }
+
+ protected static GeometryCollectionBuilder createGeometryCollection(Random r, Point nearPoint, Rectangle bounds, int numGeometries)
+ throws InvalidShapeException {
+ if (numGeometries <= 0) {
+ // cap geometry collection at 4 shapes (to save test time)
+ numGeometries = RandomInts.randomIntBetween(r, 2, 5);
+ }
+
+ if (nearPoint == null) {
+ nearPoint = xRandomPoint(r);
+ }
+
+ if (bounds == null) {
+ bounds = xRandomRectangle(r, nearPoint);
+ }
+
+ GeometryCollectionBuilder gcb = new GeometryCollectionBuilder();
+ for (int i=0; i<numGeometries;) {
+ ShapeBuilder builder = createShapeWithin(r, bounds);
+ // due to world wrapping, and the possibility for ambiguous polygons, the random shape generation could bail with
+ // a null shape. We catch that situation here, and only increment the counter when a valid shape is returned.
+ // Not the most efficient but its the lesser of the evil alternatives
+ if (builder != null) {
+ gcb.shape(builder);
+ ++i;
+ }
+ }
+ return gcb;
+ }
+
+ private static ShapeBuilder createShape(Random r, Point nearPoint, Rectangle within, ShapeType st) throws InvalidShapeException {
+ return createShape(r, nearPoint, within, st, ST_VALIDATE);
+ }
+
+ /**
+ * Creates a random shape useful for randomized testing, NOTE: exercise caution when using this to build random GeometryCollections
+ * as creating a large random number of random shapes can result in massive resource consumption
+ * see: {@link org.elasticsearch.search.geo.GeoShapeIntegrationTests#testShapeFilterWithRandomGeoCollection}
+ *
+ * The following options are included
+ * @param nearPoint Create a shape near a provided point
+ * @param within Create a shape within the provided rectangle (note: if not null this will override the provided point)
+ * @param st Create a random shape of the provided type
+ * @return the ShapeBuilder for a random shape
+ */
+ private static ShapeBuilder createShape(Random r, Point nearPoint, Rectangle within, ShapeType st, boolean validate) throws
+ InvalidShapeException {
+
+ if (st == null) {
+ st = ShapeType.randomType(r);
+ }
+
+ if (within == null) {
+ within = xRandomRectangle(r, nearPoint);
+ }
+
+ // NOTE: multipolygon not yet supported. Overlapping polygons are invalid so randomization
+ // requires an approach to avoid overlaps. This could be approached by creating polygons
+ // inside non overlapping bounding rectangles
+ switch (st) {
+ case POINT:
+ Point p = xRandomPointIn(r, within);
+ PointBuilder pb = new PointBuilder().coordinate(new Coordinate(p.getX(), p.getY(), Double.NaN));
+ return pb;
+ case MULTIPOINT:
+ case LINESTRING:
+ // for random testing having a maximum number of 10 points for a line string is more than sufficient
+ // if this number gets out of hand, the number of self intersections for a linestring can become
+ // (n^2-n)/2 and computing the relation intersection matrix will become NP-Hard
+ int numPoints = RandomInts.randomIntBetween(r, 3, 10);
+ PointCollection pcb = (st == ShapeType.MULTIPOINT) ? new MultiPointBuilder() : new LineStringBuilder();
+ for (int i=0; i<numPoints; ++i) {
+ p = xRandomPointIn(r, within);
+ pcb.point(p.getX(), p.getY());
+ }
+ return pcb;
+ case MULTILINESTRING:
+ MultiLineStringBuilder mlsb = new MultiLineStringBuilder();
+ for (int i=0; i<RandomInts.randomIntBetween(r, 1, 10); ++i) {
+ mlsb.linestring((BaseLineStringBuilder) createShape(r, nearPoint, within, ShapeType.LINESTRING, false));
+ }
+ return mlsb;
+ case POLYGON:
+ numPoints = RandomInts.randomIntBetween(r, 5, 25);
+ Coordinate[] coordinates = new Coordinate[numPoints];
+ for (int i=0; i<numPoints; ++i) {
+ p = (Point) createShape(r, nearPoint, within, ShapeType.POINT, false).build();
+ coordinates[i] = new Coordinate(p.getX(), p.getY());
+ }
+ // random point order or random linestrings can lead to invalid self-crossing polygons,
+ // compute the convex hull for a set of points to ensure polygon does not self cross
+ Geometry shell = new ConvexHull(coordinates, ctx.getGeometryFactory()).getConvexHull();
+ Coordinate[] shellCoords = shell.getCoordinates();
+ // if points are in a line the convex hull will be 2 points which will also lead to an invalid polygon
+ // when all else fails, use the bounding box as the polygon
+ if (shellCoords.length < 3) {
+ shellCoords = new Coordinate[4];
+ shellCoords[0] = new Coordinate(within.getMinX(), within.getMinY());
+ shellCoords[1] = new Coordinate(within.getMinX(), within.getMaxY());
+ shellCoords[2] = new Coordinate(within.getMaxX(), within.getMaxY());
+ shellCoords[3] = new Coordinate(within.getMaxX(), within.getMinY());
+ }
+ PolygonBuilder pgb = new PolygonBuilder().points(shellCoords).close();
+ if (validate) {
+ // This test framework builds semi-random geometry (in the sense that points are not truly random due to spatial
+ // auto-correlation) As a result of the semi-random nature of the geometry, one can not predict the orientation
+ // intent for ambiguous polygons. Therefore, an invalid oriented dateline crossing polygon could be built.
+ // The validate flag will check for these possibilities and bail if an incorrect geometry is created
+ try {
+ pgb.build();
+ } catch (InvalidShapeException e) {
+ return null;
+ }
+ }
+ return pgb;
+ default:
+ throw new ElasticsearchException("Unable to create shape of type [" + st + "]");
+ }
+ }
+
+ protected static Point xRandomPoint(Random r) {
+ return xRandomPointIn(r, ctx.getWorldBounds());
+ }
+
+ protected static Point xRandomPointIn(Random rand, Rectangle r) {
+ double x = r.getMinX() + rand.nextDouble()*r.getWidth();
+ double y = r.getMinY() + rand.nextDouble()*r.getHeight();
+ x = xNormX(x);
+ y = xNormY(y);
+ Point p = ctx.makePoint(x,y);
+ RandomizedTest.assertEquals(CONTAINS, r.relate(p));
+ return p;
+ }
+
+ protected static Rectangle xRandomRectangle(Random r, Point nearP) {
+ Rectangle bounds = ctx.getWorldBounds();
+ if (nearP == null)
+ nearP = xRandomPointIn(r, bounds);
+
+ Range xRange = xRandomRange(r, rarely(r) ? 0 : nearP.getX(), Range.xRange(bounds, ctx));
+ Range yRange = xRandomRange(r, rarely(r) ? 0 : nearP.getY(), Range.yRange(bounds, ctx));
+
+ return xMakeNormRect(
+ xDivisible(xRange.getMin()*10e3)/10e3,
+ xDivisible(xRange.getMax()*10e3)/10e3,
+ xDivisible(yRange.getMin()*10e3)/10e3,
+ xDivisible(yRange.getMax()*10e3)/10e3);
+ }
+
+ private static boolean rarely(Random r) {
+ return RandomInts.randomInt(r, 100) >= 90;
+ }
+
+ private static Range xRandomRange(Random r, double near, Range bounds) {
+ double mid = near + r.nextGaussian() * bounds.getWidth() / 6;
+ double width = Math.abs(r.nextGaussian()) * bounds.getWidth() / 6;//1/3rd
+ return new Range(mid - width / 2, mid + width / 2);
+ }
+
+ private static double xDivisible(double v, double divisible) {
+ return (int) (Math.round(v / divisible) * divisible);
+ }
+
+ private static double xDivisible(double v) {
+ return xDivisible(v, xDIVISIBLE);
+ }
+
+ protected static Rectangle xMakeNormRect(double minX, double maxX, double minY, double maxY) {
+ minX = DistanceUtils.normLonDEG(minX);
+ maxX = DistanceUtils.normLonDEG(maxX);
+
+ if (maxX < minX) {
+ double t = minX;
+ minX = maxX;
+ maxX = t;
+ }
+
+ double minWorldY = ctx.getWorldBounds().getMinY();
+ double maxWorldY = ctx.getWorldBounds().getMaxY();
+ if (minY < minWorldY || minY > maxWorldY) {
+ minY = DistanceUtils.normLatDEG(minY);
+ }
+ if (maxY < minWorldY || maxY > maxWorldY) {
+ maxY = DistanceUtils.normLatDEG(maxY);
+ }
+ if (maxY < minY) {
+ double t = minY;
+ minY = maxY;
+ maxY = t;
+ }
+ return ctx.makeRectangle(minX, maxX, minY, maxY);
+ }
+
+ protected static double xNormX(double x) {
+ return ctx.isGeo() ? DistanceUtils.normLonDEG(x) : x;
+ }
+
+ protected static double xNormY(double y) {
+ return ctx.isGeo() ? DistanceUtils.normLatDEG(y) : y;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
new file mode 100644
index 0000000000..b21e94d30a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Matcher;
+
+/**
+ * Assertions for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionAssertions {
+
+ public static Matcher<ImmutableOpenMap> hasKey(final String key) {
+ return new CollectionMatchers.ImmutableOpenMapHasKeyMatcher(key);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java b/core/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
new file mode 100644
index 0000000000..521ba58b0e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ * Matchers for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionMatchers {
+
+ public static class ImmutableOpenMapHasKeyMatcher extends TypeSafeMatcher<ImmutableOpenMap> {
+
+ private final String key;
+
+ public ImmutableOpenMapHasKeyMatcher(String key) {
+ this.key = key;
+ }
+
+ @Override
+ protected boolean matchesSafely(ImmutableOpenMap item) {
+ return item.containsKey(key);
+ }
+
+ @Override
+ public void describeMismatchSafely(final ImmutableOpenMap map, final Description mismatchDescription) {
+ if (map.size() == 0) {
+ mismatchDescription.appendText("was empty");
+ } else {
+ mismatchDescription.appendText(" was ").appendValue(map);
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("ImmutableOpenMap should contain key " + key);
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
new file mode 100644
index 0000000000..819c1d5ab1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
@@ -0,0 +1,805 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import com.google.common.base.Function;
+import com.google.common.base.Predicate;
+import com.google.common.base.Predicates;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.Iterables;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
+import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.exists.ExistsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.support.broadcast.BroadcastResponse;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.*;
+
+import static com.google.common.base.Predicates.isNull;
+import static org.elasticsearch.test.ElasticsearchTestCase.*;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ElasticsearchAssertions {
+
+ public static void assertAcked(AcknowledgedRequestBuilder<?, ?, ?> builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertNoTimeout(ClusterHealthRequestBuilder requestBuilder) {
+ assertNoTimeout(requestBuilder.get());
+ }
+
+ public static void assertNoTimeout(ClusterHealthResponse response) {
+ assertThat("ClusterHealthResponse has timed out - returned: [" + response + "]", response.isTimedOut(), is(false));
+ }
+
+ public static void assertAcked(AcknowledgedResponse response) {
+ assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAcked(DeleteIndexRequestBuilder builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertAcked(DeleteIndexResponse response) {
+ assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ /**
+ * Executes the request and fails if the request has not been blocked.
+ *
+ * @param builder the request builder
+ */
+ public static void assertBlocked(ActionRequestBuilder builder) {
+ assertBlocked(builder, null);
+ }
+
+ /**
+ * Executes the request and fails if the request has not been blocked by a specific {@link ClusterBlock}.
+ *
+ * @param builder the request builder
+ * @param expectedBlock the expected block
+ */
+ public static void assertBlocked(ActionRequestBuilder builder, ClusterBlock expectedBlock) {
+ try {
+ builder.get();
+ fail("Request executed with success but a ClusterBlockException was expected");
+ } catch (ClusterBlockException e) {
+ assertThat(e.blocks().size(), greaterThan(0));
+ assertThat(e.status(), equalTo(RestStatus.FORBIDDEN));
+
+ if (expectedBlock != null) {
+ boolean found = false;
+ for (ClusterBlock clusterBlock : e.blocks()) {
+ if (clusterBlock.id() == expectedBlock.id()) {
+ found = true;
+ break;
+ }
+ }
+ assertThat("Request should have been blocked by [" + expectedBlock + "] instead of " + e.blocks(), found, equalTo(true));
+ }
+ }
+ }
+
+ public static String formatShardStatus(BroadcastResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardOperationFailedException failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ public static String formatShardStatus(SearchResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardSearchFailure failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ /*
+ * assertions
+ */
+ public static void assertHitCount(SearchResponse searchResponse, long expectedHitCount) {
+ if (searchResponse.getHits().totalHits() != expectedHitCount) {
+ fail("Hit count is " + searchResponse.getHits().totalHits() + " but " + expectedHitCount + " was expected. "
+ + formatShardStatus(searchResponse));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoSearchHits(SearchResponse searchResponse) {
+ assertEquals(0, searchResponse.getHits().getHits().length);
+ }
+
+ public static void assertSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+
+ Set<String> idsSet = new HashSet<>(Arrays.asList(ids));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat("id [" + hit.getId() + "] was found in search results but wasn't expected (type [" + hit.getType() + "], index [" + hit.index() + "])"
+ + shardStatus, idsSet.remove(hit.getId()),
+ equalTo(true));
+ }
+ assertThat("Some expected ids were not found in search results: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + "."
+ + shardStatus, idsSet.size(), equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertSortValues(SearchResponse searchResponse, Object[]... sortValues) {
+ assertSearchResponse(searchResponse);
+ SearchHit[] hits = searchResponse.getHits().getHits();
+ assertEquals(sortValues.length, hits.length);
+ for (int i = 0; i < sortValues.length; ++i) {
+ final Object[] hitsSortValues = hits[i].getSortValues();
+ assertArrayEquals("Offset " + Integer.toString(i) + ", id " + hits[i].getId(), sortValues[i], hitsSortValues);
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+ assertThat("Expected different hit count. " + shardStatus, searchResponse.getHits().hits().length, equalTo(ids.length));
+ for (int i = 0; i < ids.length; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i]));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertHitCount(CountResponse countResponse, long expectedHitCount) {
+ if (countResponse.getCount() != expectedHitCount) {
+ fail("Count is " + countResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse));
+ }
+ assertVersionSerializable(countResponse);
+ }
+
+ public static void assertExists(ExistsResponse existsResponse, boolean expected) {
+ if (existsResponse.exists() != expected) {
+ fail("Exist is " + existsResponse.exists() + " but " + expected + " was expected " + formatShardStatus(existsResponse));
+ }
+ assertVersionSerializable(existsResponse);
+ }
+
+ public static void assertMatchCount(PercolateResponse percolateResponse, long expectedHitCount) {
+ if (percolateResponse.getCount() != expectedHitCount) {
+ fail("Count is " + percolateResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(percolateResponse));
+ }
+ assertVersionSerializable(percolateResponse);
+ }
+
+ public static void assertExists(GetResponse response) {
+ String message = String.format(Locale.ROOT, "Expected %s/%s/%s to exist, but does not", response.getIndex(), response.getType(), response.getId());
+ assertThat(message, response.isExists(), is(true));
+ }
+
+ public static void assertFirstHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 1, matcher);
+ }
+
+ public static void assertSecondHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 2, matcher);
+ }
+
+ public static void assertThirdHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 3, matcher);
+ }
+
+ public static void assertFourthHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 4, matcher);
+ }
+
+ public static void assertFifthHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 5, matcher);
+ }
+
+ public static void assertSearchHit(SearchResponse searchResponse, int number, Matcher<SearchHit> matcher) {
+ assertThat(number, greaterThan(0));
+ assertThat("SearchHit number must be greater than 0", number, greaterThan(0));
+ assertThat(searchResponse.getHits().totalHits(), greaterThanOrEqualTo((long) number));
+ assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher);
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(SearchResponse searchResponse) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()),
+ searchResponse.getShardFailures().length, equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertFailures(SearchResponse searchResponse) {
+ assertThat("Expected at least one shard failure, got none",
+ searchResponse.getShardFailures().length, greaterThan(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(BulkResponse response) {
+ assertThat("Unexpected ShardFailures: " + response.buildFailureMessage(),
+ response.hasFailures(), is(false));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus, Matcher<String> reasonMatcher) {
+ //when the number for shards is randomized and we expect failures
+ //we can either run into partial or total failures depending on the current number of shards
+ try {
+ SearchResponse searchResponse = searchRequestBuilder.get();
+ assertThat("Expected shard failures, got none", searchResponse.getShardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ assertThat(shardSearchFailure.status(), equalTo(restStatus));
+ assertThat(shardSearchFailure.reason(), reasonMatcher);
+ }
+ assertVersionSerializable(searchResponse);
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(restStatus));
+ assertThat(e.toString(), reasonMatcher);
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.status(), equalTo(restStatus));
+ assertThat(shardSearchFailure.reason(), reasonMatcher);
+ }
+ } catch (Exception e) {
+ fail("SearchPhaseExecutionException expected but got " + e.getClass());
+ }
+ }
+
+ public static void assertFailures(PercolateResponse percolateResponse) {
+ assertThat("Expected at least one shard failure, got none",
+ percolateResponse.getShardFailures().length, greaterThan(0));
+ assertVersionSerializable(percolateResponse);
+ }
+
+ public static void assertNoFailures(BroadcastResponse response) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAllSuccessful(BroadcastResponse response) {
+ assertNoFailures(response);
+ assertThat("Expected all shards successful but got successful [" + response.getSuccessfulShards() + "] total [" + response.getTotalShards() + "]",
+ response.getTotalShards(), equalTo(response.getSuccessfulShards()));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAllSuccessful(SearchResponse response) {
+ assertNoFailures(response);
+ assertThat("Expected all shards successful but got successful [" + response.getSuccessfulShards() + "] total [" + response.getTotalShards() + "]",
+ response.getTotalShards(), equalTo(response.getSuccessfulShards()));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertSearchHit(SearchHit searchHit, Matcher<SearchHit> matcher) {
+ assertThat(searchHit, matcher);
+ assertVersionSerializable(searchHit);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ private static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertHighlight(resp.getHits().hits()[hit], field, fragment, fragmentsMatcher, matcher);
+ assertVersionSerializable(resp);
+ }
+
+ private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertThat(hit.getHighlightFields(), hasKey(field));
+ assertThat(hit.getHighlightFields().get(field).fragments().length, fragmentsMatcher);
+ assertThat(hit.highlightFields().get(field).fragments()[fragment].string(), matcher);
+ }
+
+ public static void assertNotHighlighted(SearchResponse resp, int hit, String field) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertThat(resp.getHits().hits()[hit].getHighlightFields(), not(hasKey(field)));
+ }
+
+ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int size, String key) {
+ assertThat(searchSuggest, notNullValue());
+ String msg = "Suggest result: " + searchSuggest.toString();
+ assertThat(msg, searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ public static void assertSuggestionPhraseCollateMatchExists(Suggest searchSuggest, String key, int numberOfPhraseExists) {
+ int counter = 0;
+ assertThat(searchSuggest, notNullValue());
+ String msg = "Suggest result: " + searchSuggest.toString();
+ assertThat(msg, searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key));
+
+ for (Suggest.Suggestion.Entry.Option option : searchSuggest.getSuggestion(key).getEntries().get(0).getOptions()) {
+ if (option.collateMatch()) {
+ counter++;
+ }
+ }
+
+ assertThat(counter, equalTo(numberOfPhraseExists));
+ }
+
+ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, String key, String text) {
+ assertThat(searchSuggest, notNullValue());
+ String msg = "Suggest result: " + searchSuggest.toString();
+ assertThat(msg, searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ /**
+ * Assert suggestion returns exactly the provided text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, String... text) {
+ assertSuggestion(searchSuggest, entry, key, text.length, text);
+ }
+
+ /**
+ * Assert suggestion returns size suggestions and the first are the provided
+ * text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, int size, String... text) {
+ assertSuggestionSize(searchSuggest, entry, size, key);
+ for (int i = 0; i < text.length; i++) {
+ assertSuggestion(searchSuggest, entry, i, key, text[i]);
+ }
+ }
+
+ /**
+ * Assert that an index template is missing
+ */
+ public static void assertIndexTemplateMissing(GetIndexTemplatesResponse templatesResponse, String name) {
+ List<String> templateNames = new ArrayList<>();
+ for (IndexTemplateMetaData indexTemplateMetaData : templatesResponse.getIndexTemplates()) {
+ templateNames.add(indexTemplateMetaData.name());
+ }
+ assertThat(templateNames, not(hasItem(name)));
+ }
+
+ /**
+ * Assert that an index template exists
+ */
+ public static void assertIndexTemplateExists(GetIndexTemplatesResponse templatesResponse, String name) {
+ List<String> templateNames = new ArrayList<>();
+ for (IndexTemplateMetaData indexTemplateMetaData : templatesResponse.getIndexTemplates()) {
+ templateNames.add(indexTemplateMetaData.name());
+ }
+ assertThat(templateNames, hasItem(name));
+ }
+
+ /**
+ * Assert that aliases are missing
+ */
+ public static void assertAliasesMissing(AliasesExistResponse aliasesExistResponse) {
+ assertFalse("Aliases shouldn't exist", aliasesExistResponse.exists());
+ }
+
+ /**
+ * Assert that aliases exist
+ */
+ public static void assertAliasesExist(AliasesExistResponse aliasesExistResponse) {
+ assertTrue("Aliases should exist", aliasesExistResponse.exists());
+ }
+
+ /*
+ * matchers
+ */
+ public static Matcher<SearchHit> hasId(final String id) {
+ return new ElasticsearchMatchers.SearchHitHasIdMatcher(id);
+ }
+
+ public static Matcher<SearchHit> hasType(final String type) {
+ return new ElasticsearchMatchers.SearchHitHasTypeMatcher(type);
+ }
+
+ public static Matcher<SearchHit> hasIndex(final String index) {
+ return new ElasticsearchMatchers.SearchHitHasIndexMatcher(index);
+ }
+
+ public static Matcher<SearchHit> hasScore(final float score) {
+ return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score);
+ }
+
+ public static Matcher<HttpResponse> hasStatus(RestStatus restStatus) {
+ return new ElasticsearchMatchers.HttpResponseHasStatusMatcher(restStatus);
+ }
+
+ public static <T extends Query> T assertBooleanSubQuery(Query query, Class<T> subqueryType, int i) {
+ assertThat(query, instanceOf(BooleanQuery.class));
+ BooleanQuery q = (BooleanQuery) query;
+ assertThat(q.getClauses().length, greaterThan(i));
+ assertThat(q.getClauses()[i].getQuery(), instanceOf(subqueryType));
+ return (T) q.getClauses()[i].getQuery();
+ }
+
+ /**
+ * Run the request from a given builder and check that it throws an exception of the right type
+ */
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass) {
+ assertThrows(builder.execute(), exceptionClass);
+ }
+
+ /**
+ * Run the request from a given builder and check that it throws an exception of the right type, with a given {@link org.elasticsearch.rest.RestStatus}
+ */
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass, RestStatus status) {
+ assertThrows(builder.execute(), exceptionClass, status);
+ }
+
+ /**
+ * Run the request from a given builder and check that it throws an exception of the right type
+ *
+ * @param extraInfo extra information to add to the failure message
+ */
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass, String extraInfo) {
+ assertThrows(builder.execute(), exceptionClass, extraInfo);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass) {
+ assertThrows(future, exceptionClass, null, null);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type, with a given {@link org.elasticsearch.rest.RestStatus}
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, RestStatus status) {
+ assertThrows(future, exceptionClass, status, null);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type
+ *
+ * @param extraInfo extra information to add to the failure message
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, String extraInfo) {
+ assertThrows(future, exceptionClass, null, extraInfo);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type, optionally checking the exception's rest status
+ *
+ * @param exceptionClass expected exception class
+ * @param status {@link org.elasticsearch.rest.RestStatus} to check for. Can be null to disable the check
+ * @param extraInfo extra information to add to the failure message. Can be null.
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, @Nullable RestStatus status, @Nullable String extraInfo) {
+ boolean fail = false;
+ extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
+ extraInfo += "expected a " + exceptionClass + " exception to be thrown";
+
+ if (status != null) {
+ extraInfo += " with status [" + status + "]";
+ }
+
+
+ try {
+ future.actionGet();
+ fail = true;
+
+ } catch (ElasticsearchException esException) {
+ assertThat(extraInfo, esException.unwrapCause(), instanceOf(exceptionClass));
+ if (status != null) {
+ assertThat(extraInfo, ExceptionsHelper.status(esException), equalTo(status));
+ }
+ } catch (Throwable e) {
+ assertThat(extraInfo, e, instanceOf(exceptionClass));
+ if (status != null) {
+ assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status));
+ }
+ }
+ // has to be outside catch clause to get a proper message
+ if (fail) {
+ throw new AssertionError(extraInfo);
+ }
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, RestStatus status) {
+ assertThrows(builder.execute(), status);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, RestStatus status, String extraInfo) {
+ assertThrows(builder.execute(), status, extraInfo);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionFuture future, RestStatus status) {
+ assertThrows(future, status, null);
+ }
+
+ public static void assertThrows(ActionFuture future, RestStatus status, String extraInfo) {
+ boolean fail = false;
+ extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
+ extraInfo += "expected a " + status + " status exception to be thrown";
+
+ try {
+ future.actionGet();
+ fail = true;
+ } catch (Throwable e) {
+ assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status));
+ }
+ // has to be outside catch clause to get a proper message
+ if (fail) {
+ throw new AssertionError(extraInfo);
+ }
+ }
+
+ private static BytesReference serialize(Version version, Streamable streamable) throws IOException {
+ BytesStreamOutput output = new BytesStreamOutput();
+ output.setVersion(version);
+ streamable.writeTo(output);
+ output.flush();
+ return output.bytes();
+ }
+
+ public static void assertVersionSerializable(Streamable streamable) {
+ assertTrue(Version.CURRENT.after(VersionUtils.getPreviousVersion()));
+ assertVersionSerializable(randomVersion(random()), streamable);
+ }
+
+ public static void assertVersionSerializable(Version version, Streamable streamable) {
+ try {
+ Streamable newInstance = tryCreateNewInstance(streamable);
+ if (newInstance == null) {
+ return; // can't create a new instance - we never modify a
+ // streamable that comes in.
+ }
+ if (streamable instanceof ActionRequest) {
+ ((ActionRequest<?>) streamable).validate();
+ }
+ BytesReference orig = serialize(version, streamable);
+ StreamInput input = StreamInput.wrap(orig);
+ input.setVersion(version);
+ newInstance.readFrom(input);
+ assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(), equalTo(0));
+ assertThat("Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable + "]", serialize(version, streamable), equalTo(orig));
+ } catch (Throwable ex) {
+ throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex);
+ }
+
+ }
+
+ private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException,
+ IllegalAccessException, InvocationTargetException {
+ try {
+ Class<? extends Streamable> clazz = streamable.getClass();
+ Constructor<? extends Streamable> constructor = clazz.getDeclaredConstructor();
+ assertThat(constructor, Matchers.notNullValue());
+ constructor.setAccessible(true);
+ Streamable newInstance = constructor.newInstance();
+ return newInstance;
+ } catch (Throwable e) {
+ return null;
+ }
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchRequestBuilder request) {
+ return assertSearchResponse(request.get());
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchResponse response) {
+ assertNoFailures(response);
+ assertThat("One or more shards were not successful but didn't trigger a failure", response.getSuccessfulShards(), equalTo(response.getTotalShards()));
+ return response;
+ }
+
+ public static void assertNodeContainsPlugins(NodesInfoResponse response, String nodeId,
+ List<String> expectedJvmPluginNames,
+ List<String> expectedJvmPluginDescriptions,
+ List<String> expectedJvmVersions,
+ List<String> expectedSitePluginNames,
+ List<String> expectedSitePluginDescriptions,
+ List<String> expectedSiteVersions) {
+
+ Assert.assertThat(response.getNodesMap().get(nodeId), notNullValue());
+
+ PluginsInfo plugins = response.getNodesMap().get(nodeId).getPlugins();
+ Assert.assertThat(plugins, notNullValue());
+
+ List<String> pluginNames = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(nameFunction).toList();
+ for (String expectedJvmPluginName : expectedJvmPluginNames) {
+ Assert.assertThat(pluginNames, hasItem(expectedJvmPluginName));
+ }
+
+ List<String> pluginDescriptions = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(descriptionFunction).toList();
+ for (String expectedJvmPluginDescription : expectedJvmPluginDescriptions) {
+ Assert.assertThat(pluginDescriptions, hasItem(expectedJvmPluginDescription));
+ }
+
+ List<String> jvmPluginVersions = FluentIterable.from(plugins.getInfos()).filter(jvmPluginPredicate).transform(versionFunction).toList();
+ for (String pluginVersion : expectedJvmVersions) {
+ Assert.assertThat(jvmPluginVersions, hasItem(pluginVersion));
+ }
+
+ FluentIterable<String> jvmUrls = FluentIterable.from(plugins.getInfos())
+ .filter(Predicates.and(jvmPluginPredicate, Predicates.not(sitePluginPredicate)))
+ .filter(isNull())
+ .transform(urlFunction);
+ Assert.assertThat(Iterables.size(jvmUrls), is(0));
+
+ List<String> sitePluginNames = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(nameFunction).toList();
+ Assert.assertThat(sitePluginNames.isEmpty(), is(expectedSitePluginNames.isEmpty()));
+ for (String expectedSitePluginName : expectedSitePluginNames) {
+ Assert.assertThat(sitePluginNames, hasItem(expectedSitePluginName));
+ }
+
+ List<String> sitePluginDescriptions = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(descriptionFunction).toList();
+ Assert.assertThat(sitePluginDescriptions.isEmpty(), is(expectedSitePluginDescriptions.isEmpty()));
+ for (String sitePluginDescription : expectedSitePluginDescriptions) {
+ Assert.assertThat(sitePluginDescriptions, hasItem(sitePluginDescription));
+ }
+
+ List<String> sitePluginUrls = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(urlFunction).toList();
+ Assert.assertThat(sitePluginUrls, not(contains(nullValue())));
+
+
+ List<String> sitePluginVersions = FluentIterable.from(plugins.getInfos()).filter(sitePluginPredicate).transform(versionFunction).toList();
+ Assert.assertThat(sitePluginVersions.isEmpty(), is(expectedSiteVersions.isEmpty()));
+ for (String pluginVersion : expectedSiteVersions) {
+ Assert.assertThat(sitePluginVersions, hasItem(pluginVersion));
+ }
+ }
+
+ private static Predicate<PluginInfo> jvmPluginPredicate = new Predicate<PluginInfo>() {
+ @Override
+ public boolean apply(PluginInfo pluginInfo) {
+ return pluginInfo.isJvm();
+ }
+ };
+
+ private static Predicate<PluginInfo> sitePluginPredicate = new Predicate<PluginInfo>() {
+ @Override
+ public boolean apply(PluginInfo pluginInfo) {
+ return pluginInfo.isSite();
+ }
+ };
+
+ private static Function<PluginInfo, String> nameFunction = new Function<PluginInfo, String>() {
+ @Override
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getName();
+ }
+ };
+
+ private static Function<PluginInfo, String> descriptionFunction = new Function<PluginInfo, String>() {
+ @Override
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getDescription();
+ }
+ };
+
+ private static Function<PluginInfo, String> urlFunction = new Function<PluginInfo, String>() {
+ @Override
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getUrl();
+ }
+ };
+
+ private static Function<PluginInfo, String> versionFunction = new Function<PluginInfo, String>() {
+ @Override
+ public String apply(PluginInfo pluginInfo) {
+ return pluginInfo.getVersion();
+ }
+ };
+
+ /**
+ * Check if a file exists
+ */
+ public static void assertFileExists(Path file) {
+ assertThat("file/dir [" + file + "] should exist.", Files.exists(file), is(true));
+ }
+
+ /**
+ * Check if a file does not exist
+ */
+ public static void assertFileNotExists(Path file) {
+ assertThat("file/dir [" + file + "] should not exist.", Files.exists(file), is(false));
+ }
+
+ /**
+ * Check if a directory exists
+ */
+ public static void assertDirectoryExists(Path dir) {
+ assertFileExists(dir);
+ assertThat("file [" + dir + "] should be a directory.", Files.isDirectory(dir), is(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java
new file mode 100644
index 0000000000..595e84f41b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java
@@ -0,0 +1,261 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import com.spatial4j.core.shape.Shape;
+import com.spatial4j.core.shape.ShapeCollection;
+import com.spatial4j.core.shape.impl.GeoCircle;
+import com.spatial4j.core.shape.impl.RectangleImpl;
+import com.spatial4j.core.shape.jts.JtsGeometry;
+import com.spatial4j.core.shape.jts.JtsPoint;
+import com.vividsolutions.jts.geom.*;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.common.geo.GeoDistance;
+import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
+import org.elasticsearch.common.unit.DistanceUnit;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.hamcrest.Matcher;
+import org.junit.Assert;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+public class ElasticsearchGeoAssertions {
+
+ private static int top(Coordinate...points) {
+ int top = 0;
+ for (int i = 1; i < points.length; i++) {
+ if(points[i].y < points[top].y) {
+ top = i;
+ } else if(points[i].y == points[top].y) {
+ if(points[i].x <= points[top].x) {
+ top = i;
+ }
+ }
+ }
+ return top;
+ }
+
+ private static int prev(int top, Coordinate...points) {
+ for (int i = 1; i < points.length; i++) {
+ int p = (top + points.length - i) % points.length;
+ if((points[p].x != points[top].x) || (points[p].y != points[top].y)) {
+ return p;
+ }
+ }
+ return -1;
+ }
+
+ private static int next(int top, Coordinate...points) {
+ for (int i = 1; i < points.length; i++) {
+ int n = (top + i) % points.length;
+ if((points[n].x != points[top].x) || (points[n].y != points[top].y)) {
+ return n;
+ }
+ }
+ return -1;
+ }
+
+ private static Coordinate[] fixedOrderedRing(List<Coordinate> coordinates, boolean direction) {
+ return fixedOrderedRing(coordinates.toArray(new Coordinate[coordinates.size()]), direction);
+ }
+
+ private static Coordinate[] fixedOrderedRing(Coordinate[] points, boolean direction) {
+
+ final int top = top(points);
+ final int next = next(top, points);
+ final int prev = prev(top, points);
+ final boolean orientation = points[next].x < points[prev].x;
+
+ if(orientation != direction) {
+ List<Coordinate> asList = Arrays.asList(points);
+ Collections.reverse(asList);
+ return fixedOrderedRing(asList, direction);
+ } else {
+ if(top>0) {
+ Coordinate[] aligned = new Coordinate[points.length];
+ System.arraycopy(points, top, aligned, 0, points.length-top-1);
+ System.arraycopy(points, 0, aligned, points.length-top-1, top);
+ aligned[aligned.length-1] = aligned[0];
+ return aligned;
+ } else {
+ return points;
+ }
+ }
+
+ }
+
+ public static void assertEquals(Coordinate c1, Coordinate c2) {
+ assertTrue("expected coordinate " + c1 + " but found " + c2, c1.x == c2.x && c1.y == c2.y);
+ }
+
+ private static boolean isRing(Coordinate[] c) {
+ return (c[0].x == c[c.length-1].x) && (c[0].y == c[c.length-1].y);
+ }
+
+ public static void assertEquals(Coordinate[] c1, Coordinate[] c2) {
+ Assert.assertEquals(c1.length, c2.length);
+
+ if(isRing(c1) && isRing(c2)) {
+ c1 = fixedOrderedRing(c1, true);
+ c2 = fixedOrderedRing(c2, true);
+ }
+
+ for (int i = 0; i < c2.length; i++) {
+ assertEquals(c1[i], c2[i]);
+ }
+ }
+
+ public static void assertEquals(LineString l1, LineString l2) {
+ assertEquals(l1.getCoordinates(), l2.getCoordinates());
+ }
+
+ public static void assertEquals(MultiLineString l1, MultiLineString l2) {
+ assertEquals(l1.getCoordinates(), l2.getCoordinates());
+ }
+
+ public static void assertEquals(Polygon p1, Polygon p2) {
+ Assert.assertEquals(p1.getNumInteriorRing(), p2.getNumInteriorRing());
+
+ assertEquals(p1.getExteriorRing(), p2.getExteriorRing());
+
+ // TODO: This test do not check all permutations of linestrings. So the test
+ // fails if the holes of the polygons are not ordered the same way
+ for (int i = 0; i < p1.getNumInteriorRing(); i++) {
+ assertEquals(p1.getInteriorRingN(i), p2.getInteriorRingN(i));
+ }
+ }
+
+ public static void assertEquals(MultiPolygon p1, MultiPolygon p2) {
+ Assert.assertEquals(p1.getNumGeometries(), p2.getNumGeometries());
+
+ // TODO: This test do not check all permutations. So the Test fails
+ // if the inner polygons are not ordered the same way in both Multipolygons
+ for (int i = 0; i < p1.getNumGeometries(); i++) {
+ Geometry a = p1.getGeometryN(i);
+ Geometry b = p2.getGeometryN(i);
+ assertEquals(a, b);
+ }
+ }
+
+ public static void assertEquals(Geometry s1, Geometry s2) {
+ if(s1 instanceof LineString && s2 instanceof LineString) {
+ assertEquals((LineString) s1, (LineString) s2);
+
+ } else if (s1 instanceof Polygon && s2 instanceof Polygon) {
+ assertEquals((Polygon) s1, (Polygon) s2);
+
+ } else if (s1 instanceof MultiPoint && s2 instanceof MultiPoint) {
+ Assert.assertEquals(s1, s2);
+
+ } else if (s1 instanceof MultiPolygon && s2 instanceof MultiPolygon) {
+ assertEquals((MultiPolygon) s1, (MultiPolygon) s2);
+
+ } else if (s1 instanceof MultiLineString && s2 instanceof MultiLineString) {
+ assertEquals((MultiLineString) s1, (MultiLineString) s2);
+
+ } else {
+ throw new RuntimeException("equality of shape types not supported [" + s1.getClass().getName() + " and " + s2.getClass().getName() + "]");
+ }
+ }
+
+ public static void assertEquals(JtsGeometry g1, JtsGeometry g2) {
+ assertEquals(g1.getGeom(), g2.getGeom());
+ }
+
+ public static void assertEquals(ShapeCollection s1, ShapeCollection s2) {
+ Assert.assertEquals(s1.size(), s2.size());
+ for (int i = 0; i < s1.size(); i++) {
+ assertEquals(s1.get(i), s2.get(i));
+ }
+ }
+
+ public static void assertEquals(Shape s1, Shape s2) {
+ if(s1 instanceof JtsGeometry && s2 instanceof JtsGeometry) {
+ assertEquals((JtsGeometry) s1, (JtsGeometry) s2);
+ } else if(s1 instanceof JtsPoint && s2 instanceof JtsPoint) {
+ JtsPoint p1 = (JtsPoint) s1;
+ JtsPoint p2 = (JtsPoint) s2;
+ Assert.assertEquals(p1, p2);
+ } else if (s1 instanceof ShapeCollection && s2 instanceof ShapeCollection) {
+ assertEquals((ShapeCollection)s1, (ShapeCollection)s2);
+ } else if (s1 instanceof GeoCircle && s2 instanceof GeoCircle) {
+ Assert.assertEquals((GeoCircle)s1, (GeoCircle)s2);
+ } else if (s1 instanceof RectangleImpl && s2 instanceof RectangleImpl) {
+ Assert.assertEquals((RectangleImpl)s1, (RectangleImpl)s2);
+ } else {
+ //We want to know the type of the shape because we test shape equality in a special way...
+ //... in particular we test that one ring is equivalent to another ring even if the points are rotated or reversed.
+ throw new RuntimeException(
+ "equality of shape types not supported [" + s1.getClass().getName() + " and " + s2.getClass().getName() + "]");
+ }
+ }
+
+ private static Geometry unwrap(Shape shape) {
+ assertThat(shape, instanceOf(JtsGeometry.class));
+ return ((JtsGeometry)shape).getGeom();
+ }
+
+ public static void assertMultiPolygon(Shape shape) {
+ assert(unwrap(shape) instanceof MultiPolygon): "expected MultiPolygon but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertPolygon(Shape shape) {
+ assert(unwrap(shape) instanceof Polygon): "expected Polygon but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertLineString(Shape shape) {
+ assert(unwrap(shape) instanceof LineString): "expected LineString but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertMultiLineString(Shape shape) {
+ assert(unwrap(shape) instanceof MultiLineString): "expected MultiLineString but found " + unwrap(shape).getClass().getName();
+ }
+
+ public static void assertDistance(String geohash1, String geohash2, Matcher<Double> match) {
+ GeoPoint p1 = new GeoPoint(geohash1);
+ GeoPoint p2 = new GeoPoint(geohash2);
+ assertDistance(p1.lat(), p1.lon(), p2.lat(),p2.lon(), match);
+ }
+
+ public static void assertDistance(double lat1, double lon1, double lat2, double lon2, Matcher<Double> match) {
+ assertThat(distance(lat1, lon1, lat2, lon2), match);
+ }
+
+ private static double distance(double lat1, double lon1, double lat2, double lon2) {
+ return GeoDistance.ARC.calculate(lat1, lon1, lat2, lon2, DistanceUnit.DEFAULT);
+ }
+
+ public static void assertValidException(XContentParser parser, Class expectedException) {
+ try {
+ ShapeBuilder.parse(parser).build();
+ Assert.fail("process completed successfully when " + expectedException.getName() + " expected");
+ } catch (Exception e) {
+ assert(e.getClass().equals(expectedException)):
+ "expected " + expectedException.getName() + " but found " + e.getClass().getName();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
new file mode 100644
index 0000000000..1853d291c6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+public class ElasticsearchMatchers {
+
+ public static class SearchHitHasIdMatcher extends TypeSafeMatcher<SearchHit> {
+ private String id;
+
+ public SearchHitHasIdMatcher(String id) {
+ this.id = id;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getId().equals(id);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getId());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit id should be ").appendValue(id);
+ }
+ }
+
+ public static class SearchHitHasTypeMatcher extends TypeSafeMatcher<SearchHit> {
+ private String type;
+
+ public SearchHitHasTypeMatcher(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getType().equals(type);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getType());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit type should be ").appendValue(type);
+ }
+ }
+
+ public static class SearchHitHasIndexMatcher extends TypeSafeMatcher<SearchHit> {
+ private String index;
+
+ public SearchHitHasIndexMatcher(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getIndex().equals(index);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getIndex());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit index should be ").appendValue(index);
+ }
+ }
+
+ public static class SearchHitHasScoreMatcher extends TypeSafeMatcher<SearchHit> {
+ private float score;
+
+ public SearchHitHasScoreMatcher(float score) {
+ this.score = score;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getScore() == score;
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getScore());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit score should be ").appendValue(score);
+ }
+ }
+
+ public static class HttpResponseHasStatusMatcher extends TypeSafeMatcher<HttpResponse> {
+
+ private RestStatus restStatus;
+
+ public HttpResponseHasStatusMatcher(RestStatus restStatus) {
+ this.restStatus = restStatus;
+ }
+
+ @Override
+ protected boolean matchesSafely(HttpResponse response) {
+ return response.getStatusCode() == restStatus.getStatus();
+ }
+
+ @Override
+ public void describeMismatchSafely(final HttpResponse response, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(response.getStatusCode());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("HTTP response status code should be ").appendValue(restStatus.getStatus());
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/core/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
new file mode 100644
index 0000000000..62c35e551c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.util.regex.Pattern;
+
+/**
+ * Matcher that supports regular expression and allows to provide optional flags
+ */
+public class RegexMatcher extends TypeSafeMatcher<String> {
+
+ private final String regex;
+ private final Pattern pattern;
+
+ public RegexMatcher(String regex) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex);
+ }
+
+ public RegexMatcher(String regex, int flag) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex, flag);
+ }
+
+ @Override
+ protected boolean matchesSafely(String item) {
+ return pattern.matcher(item).find();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(regex);
+ }
+
+ public static RegexMatcher matches(String regex) {
+ return new RegexMatcher(regex);
+ }
+
+ public static RegexMatcher matches(String regex, int flag) {
+ return new RegexMatcher(regex, flag);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java b/core/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java
new file mode 100644
index 0000000000..4d74b04961
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.index.merge;
+
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.NoMergePolicy;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.merge.policy.AbstractMergePolicyProvider;
+import org.elasticsearch.index.store.Store;
+
+/**
+ * {@link org.elasticsearch.index.merge.policy.MergePolicyProvider} for lucenes {@link org.apache.lucene.index.NoMergePolicy}
+ */
+public class NoMergePolicyProvider extends AbstractMergePolicyProvider<MergePolicy> {
+
+ @Inject
+ public NoMergePolicyProvider(Store store) {
+ super(store);
+ }
+
+ @Override
+ public MergePolicy getMergePolicy() {
+ return NoMergePolicy.INSTANCE;
+ }
+
+ @Override
+ public void close() {}
+}
+
diff --git a/core/src/test/java/org/elasticsearch/test/junit/annotations/Network.java b/core/src/test/java/org/elasticsearch/test/junit/annotations/Network.java
new file mode 100644
index 0000000000..d2615eabca
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/junit/annotations/Network.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+
+import java.lang.annotation.*;
+
+/**
+ * Annotation used to set if internet network connectivity is required to run the test.
+ * By default, tests annotated with @Network won't be executed.
+ * Set -Dtests.network=true when running test to launch network tests
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Inherited
+@TestGroup(enabled = false, sysProperty = "tests.network")
+public @interface Network {
+}
diff --git a/core/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/core/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java
new file mode 100644
index 0000000000..e09cc7534e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/junit/annotations/TestLogging.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.METHOD;
+import static java.lang.annotation.ElementType.PACKAGE;
+import static java.lang.annotation.ElementType.TYPE;
+
+/**
+ * Annotation used to set a custom log level for a specific test method.
+ *
+ * It supports multiple logger:level comma separated key value pairs
+ * Use the _root keyword to set the root logger level
+ * e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE")
+ * or just @TestLogging("_root:DEBUG,cluster.metadata:TRACE") since we start the test with -Des.logger.prefix=
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({PACKAGE, TYPE, METHOD})
+public @interface TestLogging {
+ String value();
+}
diff --git a/core/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/core/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
new file mode 100644
index 0000000000..8237095b49
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.runner.Description;
+import org.junit.runner.Result;
+import org.junit.runner.notification.RunListener;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A {@link RunListener} that allows to change the log level for a specific test method.
+ * When a test method is annotated with the {@link org.elasticsearch.test.junit.annotations.TestLogging} annotation, the level for the specified loggers
+ * will be internally saved before the test method execution and overridden with the specified ones.
+ * At the end of the test method execution the original loggers levels will be restored.
+ *
+ * Note: This class is not thread-safe. Given the static nature of the logging api, it assumes that tests
+ * are never run concurrently in the same jvm. For the very same reason no synchronization has been implemented
+ * regarding the save/restore process of the original loggers levels.
+ */
+public class LoggingListener extends RunListener {
+
+ private Map<String, String> previousLoggingMap;
+ private Map<String, String> previousClassLoggingMap;
+ private Map<String, String> previousPackageLoggingMap;
+
+ @Override
+ public void testRunStarted(Description description) throws Exception {
+ previousPackageLoggingMap = processTestLogging(description.getTestClass().getPackage().getAnnotation(TestLogging.class));
+ previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class));
+ }
+
+ @Override
+ public void testRunFinished(Result result) throws Exception {
+ previousClassLoggingMap = reset(previousClassLoggingMap);
+ previousPackageLoggingMap = reset(previousPackageLoggingMap);
+ }
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ final TestLogging testLogging = description.getAnnotation(TestLogging.class);
+ previousLoggingMap = processTestLogging(testLogging);
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ previousLoggingMap = reset(previousLoggingMap);
+ }
+
+ private static ESLogger resolveLogger(String loggerName) {
+ if (loggerName.equalsIgnoreCase("_root")) {
+ return ESLoggerFactory.getRootLogger();
+ }
+ return Loggers.getLogger(loggerName);
+ }
+
+ private Map<String, String> processTestLogging(TestLogging testLogging) {
+ Map<String, String> map = getLoggersAndLevelsFromAnnotation(testLogging);
+ if (map == null) {
+ return null;
+ }
+ Map<String, String> previousValues = new HashMap<>();
+ for (Map.Entry<String, String> entry : map.entrySet()) {
+ ESLogger esLogger = resolveLogger(entry.getKey());
+ previousValues.put(entry.getKey(), esLogger.getLevel());
+ esLogger.setLevel(entry.getValue());
+ }
+ return previousValues;
+ }
+
+ public static Map<String, String> getLoggersAndLevelsFromAnnotation(TestLogging testLogging) {
+ if (testLogging == null) {
+ return null;
+ }
+ Map<String, String> map = new HashMap<>();
+ final String[] loggersAndLevels = testLogging.value().split(",");
+ for (String loggerAndLevel : loggersAndLevels) {
+ String[] loggerAndLevelArray = loggerAndLevel.split(":");
+ if (loggerAndLevelArray.length >=2) {
+ String loggerName = loggerAndLevelArray[0];
+ String level = loggerAndLevelArray[1];
+ map.put(loggerName, level);
+ }
+ }
+ return map;
+ }
+
+ private Map<String, String> reset(Map<String, String> map) {
+ if (map != null) {
+ for (Map.Entry<String, String> previousLogger : map.entrySet()) {
+ ESLogger esLogger = resolveLogger(previousLogger.getKey());
+ esLogger.setLevel(previousLogger.getValue());
+ }
+ }
+ return null;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
new file mode 100644
index 0000000000..e57732bf5f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import com.carrotsearch.randomizedtesting.TraceFormatting;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.InternalTestCluster;
+import org.junit.internal.AssumptionViolatedException;
+import org.junit.runner.Description;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+
+import java.util.Locale;
+import java.util.TimeZone;
+
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS;
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_PREFIX;
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTMETHOD;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.TESTS_CLUSTER;
+import static org.elasticsearch.test.rest.ElasticsearchRestTestCase.REST_TESTS_BLACKLIST;
+import static org.elasticsearch.test.rest.ElasticsearchRestTestCase.REST_TESTS_SPEC;
+import static org.elasticsearch.test.rest.ElasticsearchRestTestCase.REST_TESTS_SUITE;
+import static org.elasticsearch.test.rest.ElasticsearchRestTestCase.Rest;
+
+/**
+ * A {@link RunListener} that emits to {@link System#err} a string with command
+ * line parameters allowing quick test re-run under MVN command line.
+ */
+public class ReproduceInfoPrinter extends RunListener {
+
+ protected final ESLogger logger = Loggers.getLogger(ElasticsearchTestCase.class);
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ logger.trace("Test {} started", description.getDisplayName());
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ logger.trace("Test {} finished", description.getDisplayName());
+ }
+
+ @Override
+ public void testFailure(Failure failure) throws Exception {
+ // Ignore assumptions.
+ if (failure.getException() instanceof AssumptionViolatedException) {
+ return;
+ }
+
+ final StringBuilder b = new StringBuilder();
+ b.append("REPRODUCE WITH: mvn test -Pdev");
+ MavenMessageBuilder mavenMessageBuilder = new MavenMessageBuilder(b);
+ mavenMessageBuilder.appendAllOpts(failure.getDescription());
+
+ //Rest tests are a special case as they allow for additional parameters
+ if (failure.getDescription().getTestClass().isAnnotationPresent(Rest.class)) {
+ mavenMessageBuilder.appendRestTestsProperties();
+ }
+
+ System.err.println(b.toString());
+ }
+
+ protected TraceFormatting traces() {
+ TraceFormatting traces = new TraceFormatting();
+ try {
+ traces = RandomizedContext.current().getRunner().getTraceFormatting();
+ } catch (IllegalStateException e) {
+ // Ignore if no context.
+ }
+ return traces;
+ }
+
+ protected static class MavenMessageBuilder extends ReproduceErrorMessageBuilder {
+
+ public MavenMessageBuilder(StringBuilder b) {
+ super(b);
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
+ super.appendAllOpts(description);
+
+ if (description.getMethodName() != null) {
+ //prints out the raw method description instead of methodName(description) which filters out the parameters
+ super.appendOpt(SYSPROP_TESTMETHOD(), "\"" + description.getMethodName() + "\"");
+ }
+
+ return appendESProperties();
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendEnvironmentSettings() {
+ // we handle our own environment settings
+ return this;
+ }
+
+ /**
+ * Append a single VM option.
+ */
+ @Override
+ public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) {
+ if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there!
+ return this;
+ }
+ if (sysPropName.equals(SYSPROP_TESTMETHOD())) {
+ //don't print out the test method, we print it ourselves in appendAllOpts
+ //without filtering out the parameters (needed for REST tests)
+ return this;
+ }
+ if (sysPropName.equals(SYSPROP_PREFIX())) {
+ // we always use the default prefix
+ return this;
+ }
+ if (Strings.hasLength(value)) {
+ return super.appendOpt(sysPropName, value);
+ }
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendESProperties() {
+ appendProperties("es.logger.level", "es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES,
+ "tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size",
+ "tests.bwc", "tests.bwc.version");
+ if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {
+ appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\"");
+ }
+ appendOpt("tests.locale", Locale.getDefault().toString());
+ appendOpt("tests.timezone", TimeZone.getDefault().getID());
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendRestTestsProperties() {
+ return appendProperties(REST_TESTS_SUITE, REST_TESTS_SPEC, REST_TESTS_BLACKLIST);
+ }
+
+ protected ReproduceErrorMessageBuilder appendProperties(String... properties) {
+ for (String sysPropName : properties) {
+ if (Strings.hasLength(System.getProperty(sysPropName))) {
+ appendOpt(sysPropName, System.getProperty(sysPropName));
+ }
+ }
+ return this;
+ }
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java b/core/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
new file mode 100644
index 0000000000..7ded36f380
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.rule;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+/**
+ * A helper rule to catch all BindTransportExceptions
+ * and rerun the test for a configured number of times
+ *
+ * Note: Be aware, that when a test is repeated, the @After and @Before
+ * annotated methods are not run a second time
+ *
+ */
+public class RepeatOnExceptionRule implements TestRule {
+
+ private ESLogger logger;
+ private int retryCount;
+ private Class expectedException;
+
+ /**
+ *
+ * @param logger the es logger from the test class
+ * @param retryCount number of amounts to try a single test before failing
+ * @param expectedException The exception class you want to catch
+ *
+ */
+ public RepeatOnExceptionRule(ESLogger logger, int retryCount, Class expectedException) {
+ this.logger = logger;
+ this.retryCount = retryCount;
+ this.expectedException = expectedException;
+ }
+
+ @Override
+ public Statement apply(final Statement base, Description description) {
+
+ return new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ Throwable caughtThrowable = null;
+
+ for (int i = 0; i < retryCount; i++) {
+ try {
+ base.evaluate();
+ return;
+ } catch (Throwable t) {
+ if (t.getClass().equals(expectedException)) {
+ caughtThrowable = t;
+ logger.info("Exception [{}] occurred, rerunning the test after [{}] failures", t, t.getClass().getSimpleName(), i+1);
+ } else {
+ throw t;
+ }
+ }
+ }
+ logger.error("Giving up after [{}] failures... marking test as failed", retryCount);
+ throw caughtThrowable;
+ }
+ };
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java b/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java
new file mode 100644
index 0000000000..5874b31f09
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/ElasticsearchRestTestCase.java
@@ -0,0 +1,374 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import com.google.common.collect.Lists;
+
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase.SuppressFsync;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.ExecutableSection;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.SkipSection;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestSpec;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.FileSystem;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.PathMatcher;
+import java.nio.file.StandardCopyOption;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Runs the clients test suite against an elasticsearch cluster.
+ */
+@ElasticsearchRestTestCase.Rest
+@Slow
+@SuppressFsync // we aren't trying to test this here, and it can make the test slow
+@SuppressCodecs("*") // requires custom completion postings format
+@ClusterScope(randomDynamicTemplates = false)
+@TimeoutSuite(millis = 40 * TimeUnits.MINUTE) // timeout the suite after 40min and fail the test.
+public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegrationTest {
+
+ /**
+ * Property that allows to control whether the REST tests are run (default) or not
+ */
+ public static final String TESTS_REST = "tests.rest";
+
+ /**
+ * Annotation for REST tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = true, sysProperty = ElasticsearchRestTestCase.TESTS_REST)
+ public @interface Rest {
+ }
+
+ /**
+ * Property that allows to control which REST tests get run. Supports comma separated list of tests
+ * or directories that contain tests e.g. -Dtests.rest.suite=index,get,create/10_with_id
+ */
+ public static final String REST_TESTS_SUITE = "tests.rest.suite";
+ /**
+ * Property that allows to blacklist some of the REST tests based on a comma separated list of globs
+ * e.g. -Dtests.rest.blacklist=get/10_basic/*
+ */
+ public static final String REST_TESTS_BLACKLIST = "tests.rest.blacklist";
+ /**
+ * Property that allows to control whether spec validation is enabled or not (default true).
+ */
+ public static final String REST_TESTS_VALIDATE_SPEC = "tests.rest.validate_spec";
+ /**
+ * Property that allows to control where the REST spec files need to be loaded from
+ */
+ public static final String REST_TESTS_SPEC = "tests.rest.spec";
+
+ private static final String DEFAULT_TESTS_PATH = "/rest-api-spec/test";
+ private static final String DEFAULT_SPEC_PATH = "/rest-api-spec/api";
+
+ private static final String PATHS_SEPARATOR = ",";
+
+ private final PathMatcher[] blacklistPathMatchers;
+ private static RestTestExecutionContext restTestExecutionContext;
+
+ private final RestTestCandidate testCandidate;
+
+ public ElasticsearchRestTestCase(RestTestCandidate testCandidate) {
+ this.testCandidate = testCandidate;
+ String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null);
+ if (blacklist != null) {
+ blacklistPathMatchers = new PathMatcher[blacklist.length];
+ int i = 0;
+ for (String glob : blacklist) {
+ blacklistPathMatchers[i++] = PathUtils.getDefaultFileSystem().getPathMatcher("glob:" + glob);
+ }
+ } else {
+ blacklistPathMatchers = new PathMatcher[0];
+ }
+ }
+
+ @Override
+ protected void afterIfFailed(List<Throwable> errors) {
+ logger.info("Stash dump on failure [{}]", XContentHelper.toString(restTestExecutionContext.stash()));
+ super.afterIfFailed(errors);
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .put(Node.HTTP_ENABLED, true)
+ .put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ public static Iterable<Object[]> createParameters(int id, int count) throws IOException, RestTestParseException {
+ TestGroup testGroup = Rest.class.getAnnotation(TestGroup.class);
+ String sysProperty = TestGroup.Utilities.getSysProperty(Rest.class);
+ boolean enabled;
+ try {
+ enabled = RandomizedTest.systemPropertyAsBoolean(sysProperty, testGroup.enabled());
+ } catch (IllegalArgumentException e) {
+ // Ignore malformed system property, disable the group if malformed though.
+ enabled = false;
+ }
+ if (!enabled) {
+ return Lists.newArrayList();
+ }
+ //parse tests only if rest test group is enabled, otherwise rest tests might not even be available on file system
+ List<RestTestCandidate> restTestCandidates = collectTestCandidates(id, count);
+ List<Object[]> objects = Lists.newArrayList();
+ for (RestTestCandidate restTestCandidate : restTestCandidates) {
+ objects.add(new Object[]{restTestCandidate});
+ }
+ return objects;
+ }
+
+ private static List<RestTestCandidate> collectTestCandidates(int id, int count) throws RestTestParseException, IOException {
+ List<RestTestCandidate> testCandidates = Lists.newArrayList();
+ FileSystem fileSystem = getFileSystem();
+ // don't make a try-with, getFileSystem returns null
+ // ... and you can't close() the default filesystem
+ try {
+ String[] paths = resolvePathsProperty(REST_TESTS_SUITE, DEFAULT_TESTS_PATH);
+ Map<String, Set<Path>> yamlSuites = FileUtils.findYamlSuites(fileSystem, DEFAULT_TESTS_PATH, paths);
+ RestTestSuiteParser restTestSuiteParser = new RestTestSuiteParser();
+ //yaml suites are grouped by directory (effectively by api)
+ for (String api : yamlSuites.keySet()) {
+ List<Path> yamlFiles = Lists.newArrayList(yamlSuites.get(api));
+ for (Path yamlFile : yamlFiles) {
+ String key = api + yamlFile.getFileName().toString();
+ if (mustExecute(key, id, count)) {
+ RestTestSuite restTestSuite = restTestSuiteParser.parse(api, yamlFile);
+ for (TestSection testSection : restTestSuite.getTestSections()) {
+ testCandidates.add(new RestTestCandidate(restTestSuite, testSection));
+ }
+ }
+ }
+ }
+ } finally {
+ IOUtils.close(fileSystem);
+ }
+
+ //sort the candidates so they will always be in the same order before being shuffled, for repeatability
+ Collections.sort(testCandidates, new Comparator<RestTestCandidate>() {
+ @Override
+ public int compare(RestTestCandidate o1, RestTestCandidate o2) {
+ return o1.getTestPath().compareTo(o2.getTestPath());
+ }
+ });
+
+ return testCandidates;
+ }
+
+ private static boolean mustExecute(String test, int id, int count) {
+ int hash = (int) (Math.abs((long)test.hashCode()) % count);
+ return hash == id;
+ }
+
+ private static String[] resolvePathsProperty(String propertyName, String defaultValue) {
+ String property = System.getProperty(propertyName);
+ if (!Strings.hasLength(property)) {
+ return defaultValue == null ? null : new String[]{defaultValue};
+ } else {
+ return property.split(PATHS_SEPARATOR);
+ }
+ }
+
+ /**
+ * Returns a new FileSystem to read REST resources, or null if they
+ * are available from classpath.
+ */
+ @SuppressForbidden(reason = "proper use of URL, hack around a JDK bug")
+ static FileSystem getFileSystem() throws IOException {
+ // REST suite handling is currently complicated, with lots of filtering and so on
+ // For now, to work embedded in a jar, return a ZipFileSystem over the jar contents.
+ URL codeLocation = FileUtils.class.getProtectionDomain().getCodeSource().getLocation();
+
+ if (codeLocation.getFile().endsWith(".jar")) {
+ try {
+ // hack around a bug in the zipfilesystem implementation before java 9,
+ // its checkWritable was incorrect and it won't work without write permissions.
+ // if we add the permission, it will open jars r/w, which is too scary! so copy to a safe r-w location.
+ Path tmp = Files.createTempFile(null, ".jar");
+ try (InputStream in = codeLocation.openStream()) {
+ Files.copy(in, tmp, StandardCopyOption.REPLACE_EXISTING);
+ }
+ return FileSystems.newFileSystem(new URI("jar:" + tmp.toUri()), Collections.<String,Object>emptyMap());
+ } catch (URISyntaxException e) {
+ throw new IOException("couldn't open zipfilesystem: ", e);
+ }
+ } else {
+ return null;
+ }
+ }
+
+ @BeforeClass
+ public static void initExecutionContext() throws IOException, RestException {
+ String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH);
+ RestSpec restSpec = null;
+ FileSystem fileSystem = getFileSystem();
+ // don't make a try-with, getFileSystem returns null
+ // ... and you can't close() the default filesystem
+ try {
+ restSpec = RestSpec.parseFrom(fileSystem, DEFAULT_SPEC_PATH, specPaths);
+ } finally {
+ IOUtils.close(fileSystem);
+ }
+ validateSpec(restSpec);
+ restTestExecutionContext = new RestTestExecutionContext(restSpec);
+ }
+
+ private static void validateSpec(RestSpec restSpec) {
+ boolean validateSpec = RandomizedTest.systemPropertyAsBoolean(REST_TESTS_VALIDATE_SPEC, true);
+ if (validateSpec) {
+ StringBuilder errorMessage = new StringBuilder();
+ for (RestApi restApi : restSpec.getApis()) {
+ if (restApi.getMethods().contains("GET") && restApi.isBodySupported()) {
+ if (!restApi.getMethods().contains("POST")) {
+ errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support POST");
+ }
+ }
+ }
+ if (errorMessage.length() > 0) {
+ throw new IllegalArgumentException(errorMessage.toString());
+ }
+ }
+ }
+
+ @AfterClass
+ public static void close() {
+ if (restTestExecutionContext != null) {
+ restTestExecutionContext.close();
+ restTestExecutionContext = null;
+ }
+ }
+
+ @Override
+ protected int maximumNumberOfShards() {
+ return 3; // never go crazy in the REST tests
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ // hardcoded 1 since this is what clients also do and our tests must expect that we have only node
+ // with replicas set to 1 ie. the cluster won't be green
+ return 1;
+
+ }
+
+ /**
+ * Used to obtain settings for the REST client that is used to send REST requests.
+ */
+ protected Settings restClientSettings() {
+ return Settings.EMPTY;
+ }
+
+ @Before
+ public void reset() throws IOException, RestException {
+ //skip test if it matches one of the blacklist globs
+ for (PathMatcher blacklistedPathMatcher : blacklistPathMatchers) {
+ //we need to replace a few characters otherwise the test section name can't be parsed as a path on windows
+ String testSection = testCandidate.getTestSection().getName().replace("*", "").replace("\\", "/").replaceAll("\\s+/", "/").replace(":", "").trim();
+ String testPath = testCandidate.getSuitePath() + "/" + testSection;
+ assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.matches(PathUtils.get(testPath)));
+ }
+ //The client needs non static info to get initialized, therefore it can't be initialized in the before class
+ restTestExecutionContext.initClient(cluster().httpAddresses(), restClientSettings());
+ restTestExecutionContext.clear();
+
+ //skip test if the whole suite (yaml file) is disabled
+ assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getSetupSection().getSkipSection()),
+ testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion()));
+ //skip test if test section is disabled
+ assumeFalse(buildSkipMessage(testCandidate.getTestPath(), testCandidate.getTestSection().getSkipSection()),
+ testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion()));
+ }
+
+ private static String buildSkipMessage(String description, SkipSection skipSection) {
+ StringBuilder messageBuilder = new StringBuilder();
+ if (skipSection.isVersionCheck()) {
+ messageBuilder.append("[").append(description).append("] skipped, reason: [").append(skipSection.getReason()).append("] ");
+ } else {
+ messageBuilder.append("[").append(description).append("] skipped, reason: features ").append(skipSection.getFeatures()).append(" not supported");
+ }
+ return messageBuilder.toString();
+ }
+
+ @Test
+ public void test() throws IOException {
+ //let's check that there is something to run, otherwise there might be a problem with the test section
+ if (testCandidate.getTestSection().getExecutableSections().size() == 0) {
+ throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]");
+ }
+
+ if (!testCandidate.getSetupSection().isEmpty()) {
+ logger.info("start setup test [{}]", testCandidate.getTestPath());
+ for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) {
+ doSection.execute(restTestExecutionContext);
+ }
+ logger.info("end setup test [{}]", testCandidate.getTestPath());
+ }
+
+ restTestExecutionContext.clear();
+
+ for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) {
+ executableSection.execute(restTestExecutionContext);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/FakeRestRequest.java b/core/src/test/java/org/elasticsearch/test/rest/FakeRestRequest.java
new file mode 100644
index 0000000000..a24869b40b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/FakeRestRequest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.rest.RestRequest;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class FakeRestRequest extends RestRequest {
+
+ private final Map<String, String> headers;
+
+ private final Map<String, String> params;
+
+ public FakeRestRequest() {
+ this(new HashMap<String, String>(), new HashMap<String, String>());
+ }
+
+ public FakeRestRequest(Map<String, String> headers, Map<String, String> context) {
+ this.headers = headers;
+ for (Map.Entry<String, String> entry : context.entrySet()) {
+ putInContext(entry.getKey(), entry.getValue());
+ }
+ this.params = new HashMap<>();
+ }
+
+ @Override
+ public Method method() {
+ return Method.GET;
+ }
+
+ @Override
+ public String uri() {
+ return "/";
+ }
+
+ @Override
+ public String rawPath() {
+ return "/";
+ }
+
+ @Override
+ public boolean hasContent() {
+ return false;
+ }
+
+ @Override
+ public BytesReference content() {
+ return null;
+ }
+
+ @Override
+ public String header(String name) {
+ return headers.get(name);
+ }
+
+ @Override
+ public Iterable<Map.Entry<String, String>> headers() {
+ return headers.entrySet();
+ }
+
+ @Override
+ public boolean hasParam(String key) {
+ return params.containsKey(key);
+ }
+
+ @Override
+ public String param(String key) {
+ return params.get(key);
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ String value = params.get(key);
+ if (value == null) {
+ return defaultValue;
+ }
+ return value;
+ }
+
+ @Override
+ public Map<String, String> params() {
+ return params;
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java
new file mode 100644
index 0000000000..f86836876c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 0 */
+public class Rest0Tests extends ElasticsearchRestTestCase {
+ public Rest0Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(0, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java
new file mode 100644
index 0000000000..d75444fe00
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 1 */
+public class Rest1Tests extends ElasticsearchRestTestCase {
+ public Rest1Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(1, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java
new file mode 100644
index 0000000000..1d01ecc58e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 2 */
+public class Rest2Tests extends ElasticsearchRestTestCase {
+ public Rest2Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(2, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java
new file mode 100644
index 0000000000..044e182e7a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 3 */
+public class Rest3Tests extends ElasticsearchRestTestCase {
+ public Rest3Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(3, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java
new file mode 100644
index 0000000000..75213143b9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 4 */
+public class Rest4Tests extends ElasticsearchRestTestCase {
+ public Rest4Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(4, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java
new file mode 100644
index 0000000000..a2c1af46dd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 5 */
+public class Rest5Tests extends ElasticsearchRestTestCase {
+ public Rest5Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(5, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java
new file mode 100644
index 0000000000..bb7ccd1003
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 6 */
+public class Rest6Tests extends ElasticsearchRestTestCase {
+ public Rest6Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(6, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java b/core/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java
new file mode 100644
index 0000000000..aba7c03136
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import java.io.IOException;
+
+/** Rest API tests subset 7 */
+public class Rest7Tests extends ElasticsearchRestTestCase {
+ public Rest7Tests(@Name("yaml") RestTestCandidate testCandidate) {
+ super(testCandidate);
+ }
+ @ParametersFactory
+ public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
+ return createParameters(7, 8);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/RestTestCandidate.java b/core/src/test/java/org/elasticsearch/test/rest/RestTestCandidate.java
new file mode 100644
index 0000000000..e454c396a3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/RestTestCandidate.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.elasticsearch.test.rest.section.TestSection;
+
+/**
+ * Wraps {@link org.elasticsearch.test.rest.section.TestSection}s ready to be run.
+ * Each test section is associated to its {@link org.elasticsearch.test.rest.section.RestTestSuite}.
+ */
+public class RestTestCandidate {
+
+ private final RestTestSuite restTestSuite;
+ private final TestSection testSection;
+
+ public RestTestCandidate(RestTestSuite restTestSuite, TestSection testSection) {
+ this.restTestSuite = restTestSuite;
+ this.testSection = testSection;
+ }
+
+ public String getApi() {
+ return restTestSuite.getApi();
+ }
+
+ public String getName() {
+ return restTestSuite.getName();
+ }
+
+ public String getSuitePath() {
+ return restTestSuite.getPath();
+ }
+
+ public String getTestPath() {
+ return restTestSuite.getPath() + "/" + testSection.getName();
+ }
+
+ public SetupSection getSetupSection() {
+ return restTestSuite.getSetupSection();
+ }
+
+ public TestSection getTestSection() {
+ return testSection;
+ }
+
+ @Override
+ public String toString() {
+ return getTestPath();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/core/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
new file mode 100644
index 0000000000..bf7116e169
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.rest.client.RestClient;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Execution context passed across the REST tests.
+ * Holds the REST client used to communicate with elasticsearch.
+ * Caches the last obtained test response and allows to stash part of it within variables
+ * that can be used as input values in following requests.
+ */
+public class RestTestExecutionContext implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestTestExecutionContext.class);
+
+ private final Stash stash = new Stash();
+
+ private final RestSpec restSpec;
+
+ private RestClient restClient;
+
+ private RestResponse response;
+
+ public RestTestExecutionContext(RestSpec restSpec) {
+ this.restSpec = restSpec;
+ }
+
+ /**
+ * Calls an elasticsearch api with the parameters and request body provided as arguments.
+ * Saves the obtained response in the execution context.
+ * @throws RestException if the returned status code is non ok
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies) throws IOException, RestException {
+ //makes a copy of the parameters before modifying them for this specific request
+ HashMap<String, String> requestParams = Maps.newHashMap(params);
+ for (Map.Entry<String, String> entry : requestParams.entrySet()) {
+ if (stash.isStashedValue(entry.getValue())) {
+ entry.setValue(stash.unstashValue(entry.getValue()).toString());
+ }
+ }
+
+ String body = actualBody(bodies);
+
+ try {
+ response = callApiInternal(apiName, requestParams, body);
+ //we always stash the last response body
+ stash.stashValue("body", response.getBody());
+ return response;
+ } catch(RestException e) {
+ response = e.restResponse();
+ throw e;
+ }
+ }
+
+ private String actualBody(List<Map<String, Object>> bodies) throws IOException {
+ if (bodies.isEmpty()) {
+ return "";
+ }
+
+ if (bodies.size() == 1) {
+ return bodyAsString(stash.unstashMap(bodies.get(0)));
+ }
+
+ StringBuilder bodyBuilder = new StringBuilder();
+ for (Map<String, Object> body : bodies) {
+ bodyBuilder.append(bodyAsString(stash.unstashMap(body))).append("\n");
+ }
+ return bodyBuilder.toString();
+ }
+
+ private String bodyAsString(Map<String, Object> body) throws IOException {
+ return XContentFactory.jsonBuilder().map(body).string();
+ }
+
+ private RestResponse callApiInternal(String apiName, Map<String, String> params, String body) throws IOException, RestException {
+ return restClient.callApi(apiName, params, body);
+ }
+
+ /**
+ * Extracts a specific value from the last saved response
+ */
+ public Object response(String path) throws IOException {
+ return response.evaluate(path, stash);
+ }
+
+ /**
+ * Creates the embedded REST client when needed. Needs to be called before each test.
+ */
+ public void initClient(InetSocketAddress[] addresses, Settings settings) throws IOException, RestException {
+ if (restClient == null) {
+ restClient = new RestClient(restSpec, settings, addresses);
+ }
+ }
+
+ /**
+ * Clears the last obtained response and the stashed fields
+ */
+ public void clear() {
+ logger.debug("resetting client, response and stash");
+ response = null;
+ stash.clear();
+ }
+
+ public Stash stash() {
+ return stash;
+ }
+
+ /**
+ * Returns the current es version as a string
+ */
+ public Version esVersion() {
+ return restClient.getEsVersion();
+ }
+
+ /**
+ * Closes the execution context and releases the underlying resources
+ */
+ @Override
+ public void close() {
+ if (restClient != null) {
+ restClient.close();
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/Stash.java b/core/src/test/java/org/elasticsearch/test/rest/Stash.java
new file mode 100644
index 0000000000..4d0a1fb7fe
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/Stash.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Allows to cache the last obtained test response and or part of it within variables
+ * that can be used as input values in following requests and assertions.
+ */
+public class Stash implements ToXContent {
+
+ private static final ESLogger logger = Loggers.getLogger(Stash.class);
+
+ public static final Stash EMPTY = new Stash();
+
+ private final Map<String, Object> stash = Maps.newHashMap();
+
+ /**
+ * Allows to saved a specific field in the stash as key-value pair
+ */
+ public void stashValue(String key, Object value) {
+ logger.trace("stashing [{}]=[{}]", key, value);
+ Object old = stash.put(key, value);
+ if (old != null && old != value) {
+ logger.trace("replaced stashed value [{}] with same key [{}]", old, key);
+ }
+ }
+
+ /**
+ * Clears the previously stashed values
+ */
+ public void clear() {
+ stash.clear();
+ }
+
+ /**
+ * Tells whether a particular value needs to be looked up in the stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public boolean isStashedValue(Object key) {
+ if (key == null) {
+ return false;
+ }
+ String stashKey = key.toString();
+ return Strings.hasLength(stashKey) && stashKey.startsWith("$");
+ }
+
+ /**
+ * Extracts a value from the current stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public Object unstashValue(String value) {
+ Object stashedValue = stash.get(value.substring(1));
+ if (stashedValue == null) {
+ throw new IllegalArgumentException("stashed value not found for key [" + value + "]");
+ }
+ return stashedValue;
+ }
+
+ /**
+ * Recursively unstashes map values if needed
+ */
+ public Map<String, Object> unstashMap(Map<String, Object> map) {
+ Map<String, Object> copy = Maps.newHashMap(map);
+ unstashObject(copy);
+ return copy;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void unstashObject(Object obj) {
+ if (obj instanceof List) {
+ List list = (List) obj;
+ for (int i = 0; i < list.size(); i++) {
+ Object o = list.get(i);
+ if (isStashedValue(o)) {
+ list.set(i, unstashValue(o.toString()));
+ } else {
+ unstashObject(o);
+ }
+ }
+ }
+ if (obj instanceof Map) {
+ Map<String, Object> map = (Map) obj;
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ if (isStashedValue(entry.getValue())) {
+ entry.setValue(unstashValue(entry.getValue().toString()));
+ } else {
+ unstashObject(entry.getValue());
+ }
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("stash", stash);
+ return builder;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/RestClient.java b/core/src/test/java/org/elasticsearch/test/rest/client/RestClient.java
new file mode 100644
index 0000000000..b6222948fd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/RestClient.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.Version;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * REST client used to test the elasticsearch REST layer
+ * Holds the {@link RestSpec} used to translate api calls into REST calls
+ */
+public class RestClient implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestClient.class);
+ //query_string params that don't need to be declared in the spec, thay are supported by default
+ private static final Set<String> ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path");
+
+ private final RestSpec restSpec;
+ private final CloseableHttpClient httpClient;
+ private final Headers headers;
+ private final InetSocketAddress[] addresses;
+ private final Version esVersion;
+
+ public RestClient(RestSpec restSpec, Settings settings, InetSocketAddress[] addresses) throws IOException, RestException {
+ assert addresses.length > 0;
+ this.restSpec = restSpec;
+ this.headers = new Headers(settings);
+ this.httpClient = createHttpClient();
+ this.addresses = addresses;
+ this.esVersion = readAndCheckVersion();
+ logger.info("REST client initialized {}, elasticsearch version: [{}]", addresses, esVersion);
+ }
+
+ private Version readAndCheckVersion() throws IOException, RestException {
+ //we make a manual call here without using callApi method, mainly because we are initializing
+ //and the randomized context doesn't exist for the current thread (would be used to choose the method otherwise)
+ RestApi restApi = restApi("info");
+ assert restApi.getPaths().size() == 1;
+ assert restApi.getMethods().size() == 1;
+
+ String version = null;
+ for (InetSocketAddress address : addresses) {
+ RestResponse restResponse = new RestResponse(new HttpRequestBuilder(httpClient).addHeaders(headers)
+ .host(address.getHostName()).port(address.getPort())
+ .path(restApi.getPaths().get(0))
+ .method(restApi.getMethods().get(0)).execute());
+ checkStatusCode(restResponse);
+
+ Object latestVersion = restResponse.evaluate("version.number");
+ if (latestVersion == null) {
+ throw new RuntimeException("elasticsearch version not found in the response");
+ }
+ if (version == null) {
+ version = latestVersion.toString();
+ } else {
+ if (!latestVersion.equals(version)) {
+ throw new IllegalArgumentException("provided nodes addresses run different elasticsearch versions");
+ }
+ }
+ }
+ return Version.fromString(version);
+ }
+
+ public Version getEsVersion() {
+ return esVersion;
+ }
+
+ /**
+ * Calls an api with the provided parameters and body
+ * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
+ * according to the ignore parameter received as input (which won't get sent to elasticsearch)
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, String body) throws IOException, RestException {
+
+ List<Integer> ignores = Lists.newArrayList();
+ Map<String, String> requestParams = null;
+ if (params != null) {
+ //makes a copy of the parameters before modifying them for this specific request
+ requestParams = Maps.newHashMap(params);
+ //ignore is a special parameter supported by the clients, shouldn't be sent to es
+ String ignoreString = requestParams.remove("ignore");
+ if (Strings.hasLength(ignoreString)) {
+ try {
+ ignores.add(Integer.valueOf(ignoreString));
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead");
+ }
+ }
+ }
+
+ HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
+ logger.debug("calling api [{}]", apiName);
+ HttpResponse httpResponse = httpRequestBuilder.execute();
+
+ //http HEAD doesn't support response body
+ // For the few api (exists class of api) that use it we need to accept 404 too
+ if (!httpResponse.supportsBody()) {
+ ignores.add(404);
+ }
+
+ RestResponse restResponse = new RestResponse(httpResponse);
+ checkStatusCode(restResponse, ignores);
+ return restResponse;
+ }
+
+ private void checkStatusCode(RestResponse restResponse, List<Integer> ignores) throws RestException {
+ //ignore is a catch within the client, to prevent the client from throwing error if it gets non ok codes back
+ if (ignores.contains(restResponse.getStatusCode())) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("ignored non ok status codes {} as requested", ignores);
+ }
+ return;
+ }
+ checkStatusCode(restResponse);
+ }
+
+ private void checkStatusCode(RestResponse restResponse) throws RestException {
+ if (restResponse.isError()) {
+ throw new RestException("non ok status code [" + restResponse.getStatusCode() + "] returned", restResponse);
+ }
+ }
+
+ private HttpRequestBuilder callApiBuilder(String apiName, Map<String, String> params, String body) {
+
+ //create doesn't exist in the spec but is supported in the clients (index with op_type=create)
+ boolean indexCreateApi = "create".equals(apiName);
+ String api = indexCreateApi ? "index" : apiName;
+ RestApi restApi = restApi(api);
+
+ HttpRequestBuilder httpRequestBuilder = httpRequestBuilder();
+
+ //divide params between ones that go within query string and ones that go within path
+ Map<String, String> pathParts = Maps.newHashMap();
+ if (params != null) {
+ for (Map.Entry<String, String> entry : params.entrySet()) {
+ if (restApi.getPathParts().contains(entry.getKey())) {
+ pathParts.put(entry.getKey(), entry.getValue());
+ } else {
+ if (restApi.getParams().contains(entry.getKey()) || ALWAYS_ACCEPTED_QUERY_STRING_PARAMS.contains(entry.getKey())) {
+ httpRequestBuilder.addParam(entry.getKey(), entry.getValue());
+ } else {
+ throw new IllegalArgumentException("param [" + entry.getKey() + "] not supported in [" + restApi.getName() + "] api");
+ }
+ }
+ }
+ }
+
+ if (indexCreateApi) {
+ httpRequestBuilder.addParam("op_type", "create");
+ }
+
+ List<String> supportedMethods = restApi.getSupportedMethods(pathParts.keySet());
+ if (Strings.hasLength(body)) {
+ if (!restApi.isBodySupported()) {
+ throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api");
+ }
+ //test the GET with source param instead of GET/POST with body
+ if (supportedMethods.contains("GET") && RandomizedTest.rarely()) {
+ logger.debug("sending the request body as source param with GET method");
+ httpRequestBuilder.addParam("source", body).method("GET");
+ } else {
+ httpRequestBuilder.body(body).method(RandomizedTest.randomFrom(supportedMethods));
+ }
+ } else {
+ if (restApi.isBodyRequired()) {
+ throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api");
+ }
+ httpRequestBuilder.method(RandomizedTest.randomFrom(supportedMethods));
+ }
+
+ //the http method is randomized (out of the available ones with the chosen api)
+ return httpRequestBuilder.path(RandomizedTest.randomFrom(restApi.getFinalPaths(pathParts)));
+ }
+
+ private RestApi restApi(String apiName) {
+ RestApi restApi = restSpec.getApi(apiName);
+ if (restApi == null) {
+ throw new IllegalArgumentException("rest api [" + apiName + "] doesn't exist in the rest spec");
+ }
+ return restApi;
+ }
+
+ protected HttpRequestBuilder httpRequestBuilder() {
+ //the address used is randomized between the available ones
+ InetSocketAddress address = RandomizedTest.randomFrom(addresses);
+ return new HttpRequestBuilder(httpClient).addHeaders(headers).host(address.getHostName()).port(address.getPort());
+ }
+
+ protected CloseableHttpClient createHttpClient() {
+ return HttpClients.createMinimal(new PoolingHttpClientConnectionManager(15, TimeUnit.SECONDS));
+ }
+
+ /**
+ * Closes the REST client and the underlying http client
+ */
+ @Override
+ public void close() {
+ IOUtils.closeWhileHandlingException(httpClient);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/RestException.java b/core/src/test/java/org/elasticsearch/test/rest/client/RestException.java
new file mode 100644
index 0000000000..2236134837
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/RestException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+/**
+ * Thrown when a status code that holds an error is received (unless needs to be ignored)
+ * Holds the original {@link RestResponse}
+ */
+public class RestException extends Exception {
+
+ private final RestResponse restResponse;
+
+ public RestException(String message, RestResponse restResponse) {
+ super(message);
+ this.restResponse = restResponse;
+ }
+
+ public RestResponse restResponse() {
+ return restResponse;
+ }
+
+ public int statusCode() {
+ return restResponse.getStatusCode();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java b/core/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java
new file mode 100644
index 0000000000..3c7fcfdc64
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/RestResponse.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import org.elasticsearch.test.rest.Stash;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.json.JsonPath;
+
+import java.io.IOException;
+
+/**
+ * Response obtained from a REST call
+ * Supports parsing the response body as json when needed and returning specific values extracted from it
+ */
+public class RestResponse {
+
+ private final HttpResponse response;
+ private JsonPath parsedResponse;
+
+ RestResponse(HttpResponse response) {
+ this.response = response;
+ }
+
+ public int getStatusCode() {
+ return response.getStatusCode();
+ }
+
+ public String getReasonPhrase() {
+ return response.getReasonPhrase();
+ }
+
+ /**
+ * Returns the body properly parsed depending on the content type.
+ * Might be a string or a json object parsed as a map.
+ */
+ public Object getBody() throws IOException {
+ if (isJson()) {
+ return parsedResponse().evaluate("");
+ }
+ return response.getBody();
+ }
+
+ /**
+ * Returns the body as a string
+ */
+ public String getBodyAsString() {
+ return response.getBody();
+ }
+
+ public boolean isError() {
+ return response.isError();
+ }
+
+ /**
+ * Parses the response body as json and extracts a specific value from it (identified by the provided path)
+ */
+ public Object evaluate(String path) throws IOException {
+ return evaluate(path, Stash.EMPTY);
+ }
+
+ /**
+ * Parses the response body as json and extracts a specific value from it (identified by the provided path)
+ */
+ public Object evaluate(String path, Stash stash) throws IOException {
+
+ if (response == null) {
+ return null;
+ }
+
+ JsonPath jsonPath = parsedResponse();
+
+ if (jsonPath == null) {
+ //special case: api that don't support body (e.g. exists) return true if 200, false if 404, even if no body
+ //is_true: '' means the response had no body but the client returned true (caused by 200)
+ //is_false: '' means the response had no body but the client returned false (caused by 404)
+ if ("".equals(path) && !response.supportsBody()) {
+ return !response.isError();
+ }
+ return null;
+ }
+
+ return jsonPath.evaluate(path, stash);
+ }
+
+ private boolean isJson() {
+ String contentType = response.getHeaders().get("Content-Type");
+ return contentType != null && contentType.contains("application/json");
+ }
+
+ private JsonPath parsedResponse() throws IOException {
+ if (parsedResponse != null) {
+ return parsedResponse;
+ }
+ if (response == null || !response.hasBody()) {
+ return null;
+ }
+ return parsedResponse = new JsonPath(response.getBody());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
new file mode 100644
index 0000000000..480fc7b2f0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send DELETE requests providing a body (not supported out of the box)
+ */
+public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "DELETE";
+
+ public HttpDeleteWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
new file mode 100644
index 0000000000..aa0129f466
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send GET requests providing a body (not supported out of the box)
+ */
+public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "GET";
+
+ public HttpGetWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
new file mode 100644
index 0000000000..09f79a0fc7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+import org.apache.http.client.methods.*;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLEncoder;
+import java.nio.charset.Charset;
+import java.util.Map;
+
+/**
+ * Executable builder for an http request
+ * Holds an {@link org.apache.http.client.HttpClient} that is used to send the built http request
+ */
+public class HttpRequestBuilder {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpRequestBuilder.class);
+
+ static final Charset DEFAULT_CHARSET = Charset.forName("utf-8");
+
+ private final CloseableHttpClient httpClient;
+
+ private String protocol = "http";
+
+ private String host;
+
+ private int port;
+
+ private String path = "";
+
+ private final Map<String, String> params = Maps.newHashMap();
+
+ private final Map<String, String> headers = Maps.newHashMap();
+
+ private String method = HttpGetWithEntity.METHOD_NAME;
+
+ private String body;
+
+ public HttpRequestBuilder(CloseableHttpClient httpClient) {
+ this.httpClient = httpClient;
+ }
+
+ public HttpRequestBuilder host(String host) {
+ this.host = host;
+ return this;
+ }
+
+ public HttpRequestBuilder httpTransport(HttpServerTransport httpServerTransport) {
+ InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress();
+ return host(transportAddress.address().getHostName()).port(transportAddress.address().getPort());
+ }
+
+ public HttpRequestBuilder port(int port) {
+ this.port = port;
+ return this;
+ }
+
+ public HttpRequestBuilder path(String path) {
+ this.path = path;
+ return this;
+ }
+
+ public HttpRequestBuilder addParam(String name, String value) {
+ try {
+ //manually url encode params, since URI does it only partially (e.g. '+' stays as is)
+ this.params.put(name, URLEncoder.encode(value, "utf-8"));
+ return this;
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public HttpRequestBuilder addHeaders(Headers headers) {
+ for (String header : headers.headers().names()) {
+ this.headers.put(header, headers.headers().get(header));
+ }
+ return this;
+ }
+
+ public HttpRequestBuilder addHeader(String name, String value) {
+ this.headers.put(name, value);
+ return this;
+ }
+
+ public HttpRequestBuilder protocol(String protocol) {
+ this.protocol = protocol;
+ return this;
+ }
+
+ public HttpRequestBuilder method(String method) {
+ this.method = method;
+ return this;
+ }
+
+ public HttpRequestBuilder body(String body) {
+ if (Strings.hasLength(body)) {
+ this.body = body;
+ }
+ return this;
+ }
+
+ public HttpResponse execute() throws IOException {
+ HttpUriRequest httpUriRequest = buildRequest();
+ if (logger.isTraceEnabled()) {
+ StringBuilder stringBuilder = new StringBuilder(httpUriRequest.getMethod()).append(" ").append(httpUriRequest.getURI());
+ if (Strings.hasLength(body)) {
+ stringBuilder.append("\n").append(body);
+ }
+ logger.trace("sending request \n{}", stringBuilder.toString());
+ }
+ for (Map.Entry<String, String> entry : this.headers.entrySet()) {
+ httpUriRequest.addHeader(entry.getKey(), entry.getValue());
+ }
+ try (CloseableHttpResponse closeableHttpResponse = httpClient.execute(httpUriRequest)) {
+ HttpResponse httpResponse = new HttpResponse(httpUriRequest, closeableHttpResponse);
+ logger.trace("got response \n{}\n{}", closeableHttpResponse, httpResponse.hasBody() ? httpResponse.getBody() : "");
+ return httpResponse;
+ }
+ }
+
+ private HttpUriRequest buildRequest() {
+
+ if (HttpGetWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpGetWithEntity(buildUri()));
+ }
+
+ if (HttpHead.METHOD_NAME.equalsIgnoreCase(method)) {
+ checkBodyNotSupported();
+ return new HttpHead(buildUri());
+ }
+
+ if (HttpOptions.METHOD_NAME.equalsIgnoreCase(method)) {
+ checkBodyNotSupported();
+ return new HttpOptions(buildUri());
+ }
+
+ if (HttpDeleteWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpDeleteWithEntity(buildUri()));
+ }
+
+ if (HttpPut.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPut(buildUri()));
+ }
+
+ if (HttpPost.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPost(buildUri()));
+ }
+
+ throw new UnsupportedOperationException("method [" + method + "] not supported");
+ }
+
+ private URI buildUri() {
+ try {
+ //url encode rules for path and query params are different. We use URI to encode the path, but we manually encode each query param through URLEncoder.
+ URI uri = new URI(protocol, null, host, port, path, null, null);
+ //String concatenation FTW. If we use the nicer multi argument URI constructor query parameters will get only partially encoded
+ //(e.g. '+' will stay as is) hence when trying to properly encode params manually they will end up double encoded (+ becomes %252B instead of %2B).
+ StringBuilder uriBuilder = new StringBuilder(protocol).append("://").append(host).append(":").append(port).append(uri.getRawPath());
+ if (params.size() > 0) {
+ uriBuilder.append("?").append(Joiner.on('&').withKeyValueSeparator("=").join(params));
+ }
+ return URI.create(uriBuilder.toString());
+ } catch(URISyntaxException e) {
+ throw new IllegalArgumentException("unable to build uri", e);
+ }
+ }
+
+ private HttpEntityEnclosingRequestBase addOptionalBody(HttpEntityEnclosingRequestBase requestBase) {
+ if (Strings.hasText(body)) {
+ requestBase.setEntity(new StringEntity(body, DEFAULT_CHARSET));
+ }
+ return requestBase;
+ }
+
+ private void checkBodyNotSupported() {
+ if (Strings.hasText(body)) {
+ throw new IllegalArgumentException("request body not supported with head request");
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(method).append(" '")
+ .append(host).append(":").append(port).append(path).append("'");
+ if (!params.isEmpty()) {
+ stringBuilder.append(", params=").append(params);
+ }
+ if (Strings.hasLength(body)) {
+ stringBuilder.append(", body=\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
new file mode 100644
index 0000000000..9945edbefa
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.Header;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.util.EntityUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Response obtained from an http request
+ * Always consumes the whole response body loading it entirely into a string
+ */
+public class HttpResponse {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpResponse.class);
+
+ private final HttpUriRequest httpRequest;
+ private final int statusCode;
+ private final String reasonPhrase;
+ private final String body;
+ private final Map<String, String> headers = new HashMap<>();
+
+ HttpResponse(HttpUriRequest httpRequest, CloseableHttpResponse httpResponse) {
+ this.httpRequest = httpRequest;
+ this.statusCode = httpResponse.getStatusLine().getStatusCode();
+ this.reasonPhrase = httpResponse.getStatusLine().getReasonPhrase();
+ for (Header header : httpResponse.getAllHeaders()) {
+ this.headers.put(header.getName(), header.getValue());
+ }
+ if (httpResponse.getEntity() != null) {
+ try {
+ this.body = EntityUtils.toString(httpResponse.getEntity(), HttpRequestBuilder.DEFAULT_CHARSET);
+ } catch (IOException e) {
+ EntityUtils.consumeQuietly(httpResponse.getEntity());
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ httpResponse.close();
+ } catch (IOException e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+ } else {
+ this.body = null;
+ }
+ }
+
+ public boolean isError() {
+ return statusCode >= 400;
+ }
+
+ public int getStatusCode() {
+ return statusCode;
+ }
+
+ public String getReasonPhrase() {
+ return reasonPhrase;
+ }
+
+ public String getBody() {
+ return body;
+ }
+
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ public boolean supportsBody() {
+ return !HttpHead.METHOD_NAME.equals(httpRequest.getMethod());
+ }
+
+ public Map<String, String> getHeaders() {
+ return headers;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(statusCode).append(" ").append(reasonPhrase);
+ if (hasBody()) {
+ stringBuilder.append("\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java b/core/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java
new file mode 100644
index 0000000000..e2f1b7a3b2
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/json/JsonPath.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.json;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.Stash;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Holds a json object and allows to extract specific values from it
+ */
+public class JsonPath {
+
+ final String json;
+ final Map<String, Object> jsonMap;
+
+ public JsonPath(String json) throws IOException {
+ this.json = json;
+ this.jsonMap = convertToMap(json);
+ }
+
+ private static Map<String, Object> convertToMap(String json) throws IOException {
+ return JsonXContent.jsonXContent.createParser(json).mapOrderedAndClose();
+ }
+
+ /**
+ * Returns the object corresponding to the provided path if present, null otherwise
+ */
+ public Object evaluate(String path) {
+ return evaluate(path, Stash.EMPTY);
+ }
+
+ /**
+ * Returns the object corresponding to the provided path if present, null otherwise
+ */
+ public Object evaluate(String path, Stash stash) {
+ String[] parts = parsePath(path);
+ Object object = jsonMap;
+ for (String part : parts) {
+ object = evaluate(part, object, stash);
+ if (object == null) {
+ return null;
+ }
+ }
+ return object;
+ }
+
+ @SuppressWarnings("unchecked")
+ private Object evaluate(String key, Object object, Stash stash) {
+ if (stash.isStashedValue(key)) {
+ key = stash.unstashValue(key).toString();
+ }
+
+ if (object instanceof Map) {
+ return ((Map<String, Object>) object).get(key);
+ }
+ if (object instanceof List) {
+ List<Object> list = (List<Object>) object;
+ try {
+ return list.get(Integer.valueOf(key));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("element was a list, but [" + key + "] was not numeric", e);
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("element was a list with " + list.size() + " elements, but [" + key + "] was out of bounds", e);
+ }
+ }
+
+ throw new IllegalArgumentException("no object found for [" + key + "] within object of class [" + object.getClass() + "]");
+ }
+
+ private String[] parsePath(String path) {
+ List<String> list = Lists.newArrayList();
+ StringBuilder current = new StringBuilder();
+ boolean escape = false;
+ for (int i = 0; i < path.length(); i++) {
+ char c = path.charAt(i);
+ if (c == '\\') {
+ escape = true;
+ continue;
+ }
+
+ if (c == '.') {
+ if (escape) {
+ escape = false;
+ } else {
+ if (current.length() > 0) {
+ list.add(current.toString());
+ current.setLength(0);
+ }
+ continue;
+ }
+ }
+
+ current.append(c);
+ }
+
+ if (current.length() > 0) {
+ list.add(current.toString());
+ }
+
+ return list.toArray(new String[list.size()]);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
new file mode 100644
index 0000000000..ec5aef5445
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for do sections
+ */
+public class DoSectionParser implements RestTestFragmentParser<DoSection> {
+
+ @Override
+ public DoSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ DoSection doSection = new DoSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("catch".equals(currentFieldName)) {
+ doSection.setCatch(parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if (currentFieldName != null) {
+ ApiCallSection apiCallSection = new ApiCallSection(currentFieldName);
+ String paramName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ paramName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("body".equals(paramName)) {
+ String body = parser.text();
+ XContentType bodyContentType = XContentFactory.xContentType(body);
+ XContentParser bodyParser = XContentFactory.xContent(bodyContentType).createParser(body);
+ //multiple bodies are supported e.g. in case of bulk provided as a whole string
+ while(bodyParser.nextToken() != null) {
+ apiCallSection.addBody(bodyParser.mapOrdered());
+ }
+ } else {
+ apiCallSection.addParam(paramName, parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("body".equals(paramName)) {
+ apiCallSection.addBody(parser.mapOrdered());
+ }
+ }
+ }
+ doSection.setApiCallSection(apiCallSection);
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (doSection.getApiCallSection() == null) {
+ throw new RestTestParseException("client call section is mandatory within a do section");
+ }
+
+ return doSection;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
new file mode 100644
index 0000000000..68f833d35c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.GreaterThanEqualToAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for gte assert sections
+ */
+public class GreaterThanEqualToParser implements RestTestFragmentParser<GreaterThanEqualToAssertion> {
+
+ @Override
+ public GreaterThanEqualToAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("gte section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new GreaterThanEqualToAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
new file mode 100644
index 0000000000..a66122138c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.GreaterThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for gt assert sections
+ */
+public class GreaterThanParser implements RestTestFragmentParser<GreaterThanAssertion> {
+
+ @Override
+ public GreaterThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("gt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new GreaterThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
new file mode 100644
index 0000000000..81cade6d84
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsFalseAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_false assert sections
+ */
+public class IsFalseParser implements RestTestFragmentParser<IsFalseAssertion> {
+
+ @Override
+ public IsFalseAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsFalseAssertion(parseContext.parseField());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
new file mode 100644
index 0000000000..922629b47e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_true assert sections
+ */
+public class IsTrueParser implements RestTestFragmentParser<IsTrueAssertion> {
+
+ @Override
+ public IsTrueAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsTrueAssertion(parseContext.parseField());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java
new file mode 100644
index 0000000000..414be59f4c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/LengthParser.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LengthAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for length assert sections
+ */
+public class LengthParser implements RestTestFragmentParser<LengthAssertion> {
+
+ @Override
+ public LengthAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ assert stringObjectTuple.v2() != null;
+ int value;
+ if (stringObjectTuple.v2() instanceof Number) {
+ value = ((Number) stringObjectTuple.v2()).intValue();
+ } else {
+ try {
+ value = Integer.valueOf(stringObjectTuple.v2().toString());
+ } catch(NumberFormatException e) {
+ throw new RestTestParseException("length is not a valid number", e);
+ }
+
+ }
+ return new LengthAssertion(stringObjectTuple.v1(), value);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
new file mode 100644
index 0000000000..f2d53d05a5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LessThanOrEqualToAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for lte assert section
+ */
+public class LessThanOrEqualToParser implements RestTestFragmentParser<LessThanOrEqualToAssertion> {
+
+ @Override
+ public LessThanOrEqualToAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("lte section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new LessThanOrEqualToAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java
new file mode 100644
index 0000000000..065dd19d6a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/LessThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LessThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for lt assert sections
+ */
+public class LessThanParser implements RestTestFragmentParser<LessThanAssertion> {
+
+ @Override
+ public LessThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("lt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new LessThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java
new file mode 100644
index 0000000000..30ee18a4e0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/MatchParser.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for match assert sections
+ */
+public class MatchParser implements RestTestFragmentParser<MatchAssertion> {
+
+ @Override
+ public MatchAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ return new MatchAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
new file mode 100644
index 0000000000..8d2bd8be76
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import java.io.IOException;
+
+/**
+ * Base parser for a REST test suite fragment
+ * @param <T> the test fragment's type that gets parsed and returned
+ */
+public interface RestTestFragmentParser<T> {
+
+ /**
+ * Parses a test fragment given the current {@link RestTestSuiteParseContext}
+ */
+ T parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException;
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
new file mode 100644
index 0000000000..3e1af2cd74
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+/**
+ * Exception thrown whenever there is a problem parsing any of the REST test suite fragment
+ */
+public class RestTestParseException extends Exception {
+
+ RestTestParseException(String message) {
+ super(message);
+ }
+
+ RestTestParseException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
new file mode 100644
index 0000000000..0763615111
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for a complete test section
+ */
+public class RestTestSectionParser implements RestTestFragmentParser<TestSection> {
+
+ @Override
+ public TestSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+ parseContext.advanceToFieldName();
+ TestSection testSection = new TestSection(parser.currentName());
+ parser.nextToken();
+ testSection.setSkipSection(parseContext.parseSkipSection());
+
+ while ( parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ parseContext.advanceToFieldName();
+ testSection.addExecutableSection(parseContext.parseExecutableSection());
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ return testSection;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
new file mode 100644
index 0000000000..10110adf4b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.*;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Context shared across the whole tests parse phase.
+ * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version)
+ */
+public class RestTestSuiteParseContext {
+
+ private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser();
+ private static final RestTestSectionParser TEST_SECTION_PARSER = new RestTestSectionParser();
+ private static final SkipSectionParser SKIP_SECTION_PARSER = new SkipSectionParser();
+ private static final DoSectionParser DO_SECTION_PARSER = new DoSectionParser();
+ private static final Map<String, RestTestFragmentParser<? extends ExecutableSection>> EXECUTABLE_SECTIONS_PARSERS = Maps.newHashMap();
+ static {
+ EXECUTABLE_SECTIONS_PARSERS.put("do", DO_SECTION_PARSER);
+ EXECUTABLE_SECTIONS_PARSERS.put("set", new SetSectionParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("match", new MatchParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_true", new IsTrueParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_false", new IsFalseParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("gt", new GreaterThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("gte", new GreaterThanEqualToParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("lt", new LessThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("lte", new LessThanOrEqualToParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("length", new LengthParser());
+ }
+
+ private final String api;
+ private final String suiteName;
+ private final XContentParser parser;
+
+ public RestTestSuiteParseContext(String api, String suiteName, XContentParser parser) {
+ this.api = api;
+ this.suiteName = suiteName;
+ this.parser = parser;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getSuiteName() {
+ return suiteName;
+ }
+
+ public XContentParser parser() {
+ return parser;
+ }
+
+ public SetupSection parseSetupSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("setup".equals(parser.currentName())) {
+ parser.nextToken();
+ SetupSection setupSection = SETUP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return setupSection;
+ }
+
+ return SetupSection.EMPTY;
+ }
+
+ public TestSection parseTestSection() throws IOException, RestTestParseException {
+ return TEST_SECTION_PARSER.parse(this);
+ }
+
+ public SkipSection parseSkipSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("skip".equals(parser.currentName())) {
+ SkipSection skipSection = SKIP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return skipSection;
+ }
+
+ return SkipSection.EMPTY;
+ }
+
+ public ExecutableSection parseExecutableSection() throws IOException, RestTestParseException {
+ advanceToFieldName();
+ String section = parser.currentName();
+ RestTestFragmentParser<? extends ExecutableSection> execSectionParser = EXECUTABLE_SECTIONS_PARSERS.get(section);
+ if (execSectionParser == null) {
+ throw new RestTestParseException("no parser found for executable section [" + section + "]");
+ }
+ ExecutableSection executableSection = execSectionParser.parse(this);
+ parser.nextToken();
+ return executableSection;
+ }
+
+ public DoSection parseDoSection() throws IOException, RestTestParseException {
+ return DO_SECTION_PARSER.parse(this);
+ }
+
+ public void advanceToFieldName() throws IOException, RestTestParseException {
+ XContentParser.Token token = parser.currentToken();
+ //we are in the beginning, haven't called nextToken yet
+ if (token == null) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_ARRAY) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ }
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new RestTestParseException("malformed test section: field name expected but found " + token);
+ }
+ }
+
+ public String parseField() throws IOException, RestTestParseException {
+ parser.nextToken();
+ assert parser.currentToken().isValue();
+ String field = parser.text();
+ parser.nextToken();
+ return field;
+ }
+
+ public Tuple<String, Object> parseTuple() throws IOException, RestTestParseException {
+ parser.nextToken();
+ advanceToFieldName();
+ Map<String,Object> map = parser.map();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ if (map.size() != 1) {
+ throw new RestTestParseException("expected key value pair but found " + map.size() + " ");
+ }
+
+ Map.Entry<String, Object> entry = map.entrySet().iterator().next();
+ return Tuple.tuple(entry.getKey(), entry.getValue());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
new file mode 100644
index 0000000000..e8422887ad
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+
+/**
+ * Parser for a complete test suite (yaml file)
+ */
+public class RestTestSuiteParser implements RestTestFragmentParser<RestTestSuite> {
+
+ public RestTestSuite parse(String api, Path file) throws IOException, RestTestParseException {
+
+ if (!Files.isRegularFile(file)) {
+ throw new IllegalArgumentException(file.toAbsolutePath() + " is not a file");
+ }
+
+ String filename = file.getFileName().toString();
+ //remove the file extension
+ int i = filename.lastIndexOf('.');
+ if (i > 0) {
+ filename = filename.substring(0, i);
+ }
+
+ //our yaml parser seems to be too tolerant. Each yaml suite must end with \n, otherwise clients tests might break.
+ try (FileChannel channel = FileChannel.open(file, StandardOpenOption.READ)) {
+ ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
+ channel.read(bb, channel.size() - 1);
+ if (bb.get(0) != 10) {
+ throw new RestTestParseException("test suite [" + api + "/" + filename + "] doesn't end with line feed (\\n)");
+ }
+ }
+
+ XContentParser parser = YamlXContent.yamlXContent.createParser(Files.newInputStream(file));
+ try {
+ RestTestSuiteParseContext testParseContext = new RestTestSuiteParseContext(api, filename, parser);
+ return parse(testParseContext);
+ } catch(Exception e) {
+ throw new RestTestParseException("Error parsing " + api + "/" + filename, e);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public RestTestSuite parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+
+ RestTestSuite restTestSuite = new RestTestSuite(parseContext.getApi(), parseContext.getSuiteName());
+
+ restTestSuite.setSetupSection(parseContext.parseSetupSection());
+
+ while(true) {
+ //the "---" section separator is not understood by the yaml parser. null is returned, same as when the parser is closed
+ //we need to somehow distinguish between a null in the middle of a test ("---")
+ // and a null at the end of the file (at least two consecutive null tokens)
+ if(parser.currentToken() == null) {
+ if (parser.nextToken() == null) {
+ break;
+ }
+ }
+
+ TestSection testSection = parseContext.parseTestSection();
+ if (!restTestSuite.addTestSection(testSection)) {
+ throw new RestTestParseException("duplicate test section [" + testSection.getName() + "] found in [" + restTestSuite.getPath() + "]");
+ }
+ }
+
+ return restTestSuite;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
new file mode 100644
index 0000000000..8afafc09f7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for set sections
+ */
+public class SetSectionParser implements RestTestFragmentParser<SetSection> {
+
+ @Override
+ public SetSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ SetSection setSection = new SetSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ setSection.addSet(currentFieldName, parser.text());
+ }
+ }
+
+ parser.nextToken();
+
+ if (setSection.getStash().isEmpty()) {
+ throw new RestTestParseException("set section must set at least a value");
+ }
+
+ return setSection;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
new file mode 100644
index 0000000000..2a2e39ea74
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for setup sections
+ */
+public class SetupSectionParser implements RestTestFragmentParser<SetupSection> {
+
+ @Override
+ public SetupSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ SetupSection setupSection = new SetupSection();
+ setupSection.setSkipSection(parseContext.parseSkipSection());
+
+ while (parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ parseContext.advanceToFieldName();
+ if (!"do".equals(parser.currentName())) {
+ throw new RestTestParseException("section [" + parser.currentName() + "] not supported within setup section");
+ }
+
+ parser.nextToken();
+ setupSection.addDoSection(parseContext.parseDoSection());
+ parser.nextToken();
+ }
+
+ parser.nextToken();
+
+ return setupSection;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java b/core/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
new file mode 100644
index 0000000000..0a81583cf3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * Parser for skip sections
+ */
+public class SkipSectionParser implements RestTestFragmentParser<SkipSection> {
+
+ @Override
+ public SkipSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ String version = null;
+ String reason = null;
+ List<String> features = Lists.newArrayList();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.text();
+ } else if ("reason".equals(currentFieldName)) {
+ reason = parser.text();
+ } else if ("features".equals(currentFieldName)) {
+ features.add(parser.text());
+ }
+ else {
+ throw new RestTestParseException("field " + currentFieldName + " not supported within skip section");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("features".equals(currentFieldName)) {
+ while(parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ features.add(parser.text());
+ }
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (!Strings.hasLength(version) && features.isEmpty()) {
+ throw new RestTestParseException("version or features is mandatory within skip section");
+ }
+ if (Strings.hasLength(version) && !features.isEmpty()) {
+ throw new RestTestParseException("version or features are mutually exclusive");
+ }
+ if (Strings.hasLength(version) && !Strings.hasLength(reason)) {
+ throw new RestTestParseException("reason is mandatory within skip version section");
+ }
+
+ return new SkipSection(version, features, reason);
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java
new file mode 100644
index 0000000000..2a49cd4c59
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Represents a test fragment that contains the information needed to call an api
+ */
+public class ApiCallSection {
+
+ private final String api;
+ private final Map<String, String> params = Maps.newHashMap();
+ private final List<Map<String, Object>> bodies = Lists.newArrayList();
+
+ public ApiCallSection(String api) {
+ this.api = api;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public Map<String, String> getParams() {
+ //make sure we never modify the parameters once returned
+ return ImmutableMap.copyOf(params);
+ }
+
+ public void addParam(String key, String value) {
+ String existingValue = params.get(key);
+ if (existingValue != null) {
+ value = Joiner.on(",").join(existingValue, value);
+ }
+ this.params.put(key, value);
+ }
+
+ public List<Map<String, Object>> getBodies() {
+ return ImmutableList.copyOf(bodies);
+ }
+
+ public void addBody(Map<String, Object> body) {
+ this.bodies.add(body);
+ }
+
+ public boolean hasBody() {
+ return bodies.size() > 0;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/Assertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/Assertion.java
new file mode 100644
index 0000000000..c420309f20
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/Assertion.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Base class for executable sections that hold assertions
+ */
+public abstract class Assertion implements ExecutableSection {
+
+ private final String field;
+ private final Object expectedValue;
+
+ protected Assertion(String field, Object expectedValue) {
+ this.field = field;
+ this.expectedValue = expectedValue;
+ }
+
+ public final String getField() {
+ return field;
+ }
+
+ public final Object getExpectedValue() {
+ return expectedValue;
+ }
+
+ protected final Object resolveExpectedValue(RestTestExecutionContext executionContext) throws IOException {
+ if (expectedValue instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> map = (Map<String, Object>) expectedValue;
+ return executionContext.stash().unstashMap(map);
+ }
+
+ if (executionContext.stash().isStashedValue(expectedValue)) {
+ return executionContext.stash().unstashValue(expectedValue.toString());
+ }
+ return expectedValue;
+ }
+
+ protected final Object getActualValue(RestTestExecutionContext executionContext) throws IOException {
+ if (executionContext.stash().isStashedValue(field)) {
+ return executionContext.stash().unstashValue(field);
+ }
+ return executionContext.response(field);
+ }
+
+ @Override
+ public final void execute(RestTestExecutionContext executionContext) throws IOException {
+ doAssert(getActualValue(executionContext), resolveExpectedValue(executionContext));
+ }
+
+ /**
+ * Executes the assertion comparing the actual value (parsed from the response) with the expected one
+ */
+ protected abstract void doAssert(Object actualValue, Object expectedValue);
+
+ /**
+ * a utility to get the class of an object, protecting for null (i.e., returning null if the input is null)
+ */
+ protected Class<?> safeClass(Object o) {
+ return o == null ? null : o.getClass();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/DoSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/DoSection.java
new file mode 100644
index 0000000000..cf15029a9d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/DoSection.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.common.collect.Tuple.tuple;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a do section:
+ *
+ * - do:
+ * catch: missing
+ * update:
+ * index: test_1
+ * type: test
+ * id: 1
+ * body: { doc: { foo: bar } }
+ *
+ */
+public class DoSection implements ExecutableSection {
+
+ private static final ESLogger logger = Loggers.getLogger(DoSection.class);
+
+ private String catchParam;
+ private ApiCallSection apiCallSection;
+
+ public String getCatch() {
+ return catchParam;
+ }
+
+ public void setCatch(String catchParam) {
+ this.catchParam = catchParam;
+ }
+
+ public ApiCallSection getApiCallSection() {
+ return apiCallSection;
+ }
+
+ public void setApiCallSection(ApiCallSection apiCallSection) {
+ this.apiCallSection = apiCallSection;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+
+ if ("param".equals(catchParam)) {
+ //client should throw validation error before sending request
+ //lets just return without doing anything as we don't have any client to test here
+ logger.info("found [catch: param], no request sent");
+ return;
+ }
+
+ try {
+ RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies());
+ if (Strings.hasLength(catchParam)) {
+ String catchStatusCode;
+ if (catches.containsKey(catchParam)) {
+ catchStatusCode = catches.get(catchParam).v1();
+ } else if (catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ catchStatusCode = "4xx|5xx";
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ fail(formatStatusCodeMessage(restResponse, catchStatusCode));
+ }
+ } catch(RestException e) {
+ if (!Strings.hasLength(catchParam)) {
+ fail(formatStatusCodeMessage(e.restResponse(), "2xx"));
+ } else if (catches.containsKey(catchParam)) {
+ assertStatusCode(e.restResponse());
+ } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ //the text of the error message matches regular expression
+ assertThat(formatStatusCodeMessage(e.restResponse(), "4xx|5xx"), e.statusCode(), greaterThanOrEqualTo(400));
+ Object error = executionContext.response("error");
+ assertThat("error was expected in the response", error, notNullValue());
+ //remove delimiters from regex
+ String regex = catchParam.substring(1, catchParam.length() - 1);
+ assertThat("the error message was expected to match the provided regex but didn't",
+ error.toString(), matches(regex));
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ }
+ }
+
+ private void assertStatusCode(RestResponse restResponse) {
+ Tuple<String, org.hamcrest.Matcher<Integer>> stringMatcherTuple = catches.get(catchParam);
+ assertThat(formatStatusCodeMessage(restResponse, stringMatcherTuple.v1()),
+ restResponse.getStatusCode(), stringMatcherTuple.v2());
+ }
+
+ private String formatStatusCodeMessage(RestResponse restResponse, String expected) {
+ return "expected [" + expected + "] status code but api [" + apiCallSection.getApi() + "] returned ["
+ + restResponse.getStatusCode() + " " + restResponse.getReasonPhrase() + "] [" + restResponse.getBodyAsString() + "]";
+ }
+
+ private static Map<String, Tuple<String, org.hamcrest.Matcher<Integer>>> catches = Maps.newHashMap();
+
+ static {
+ catches.put("missing", tuple("404", equalTo(404)));
+ catches.put("conflict", tuple("409", equalTo(409)));
+ catches.put("forbidden", tuple("403", equalTo(403)));
+ catches.put("request", tuple("4xx|5xx", allOf(greaterThanOrEqualTo(400), not(equalTo(404)), not(equalTo(409)), not(equalTo(403)))));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java
new file mode 100644
index 0000000000..669d82cdd7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/ExecutableSection.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+
+/**
+ * Represents a test fragment that can be executed (e.g. api call, assertion)
+ */
+public interface ExecutableSection {
+
+ /**
+ * Executes the section passing in the execution context
+ */
+ void execute(RestTestExecutionContext executionContext) throws IOException;
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
new file mode 100644
index 0000000000..a136056685
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a gt assert section:
+ * <p/>
+ * - gt: { fields._ttl: 0}
+ */
+public class GreaterThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(GreaterThanAssertion.class);
+
+ public GreaterThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is greater than [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, greaterThan((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not greater than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
new file mode 100644
index 0000000000..cfdca7bc33
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a gte assert section:
+ *
+ * - gte: { fields._ttl: 0 }
+ */
+public class GreaterThanEqualToAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class);
+
+ public GreaterThanEqualToAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is greater than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, greaterThanOrEqualTo((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not greater than or equal to [" + getExpectedValue() + "]";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
new file mode 100644
index 0000000000..9f3a8b6df9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_false assert section:
+ *
+ * - is_false: get.fields.bar
+ *
+ */
+public class IsFalseAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class);
+
+ public IsFalseAssertion(String field) {
+ super(field, false);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] doesn't have a true value (field: [{}])", actualValue, getField());
+
+ if (actualValue == null) {
+ return;
+ }
+
+ String actualString = actualValue.toString();
+ assertThat(errorMessage(), actualString, anyOf(
+ equalTo(""),
+ equalToIgnoringCase(Boolean.FALSE.toString()),
+ equalTo("0")
+ ));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] has a true value but it shouldn't";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
new file mode 100644
index 0000000000..aacb5f0a3b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_true assert section:
+ *
+ * - is_true: get.fields.bar
+ *
+ */
+public class IsTrueAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
+
+ public IsTrueAssertion(String field) {
+ super(field, true);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has a true value (field [{}])", actualValue, getField());
+ String errorMessage = errorMessage();
+ assertThat(errorMessage, actualValue, notNullValue());
+ String actualString = actualValue.toString();
+ assertThat(errorMessage, actualString, not(equalTo("")));
+ assertThat(errorMessage, actualString, not(equalToIgnoringCase(Boolean.FALSE.toString())));
+ assertThat(errorMessage, actualString, not(equalTo("0")));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have a true value";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java
new file mode 100644
index 0000000000..4e81618c76
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/LengthAssertion.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a length assert section:
+ * <p/>
+ * - length: { hits.hits: 1 }
+ */
+public class LengthAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class);
+
+ public LengthAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has length [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("expected value of [" + getField() + "] is not numeric (got [" + expectedValue.getClass() + "]", expectedValue, instanceOf(Number.class));
+ int length = ((Number) expectedValue).intValue();
+ if (actualValue instanceof String) {
+ assertThat(errorMessage(), ((String) actualValue).length(), equalTo(length));
+ } else if (actualValue instanceof List) {
+ assertThat(errorMessage(), ((List) actualValue).size(), equalTo(length));
+ } else if (actualValue instanceof Map) {
+ assertThat(errorMessage(), ((Map) actualValue).keySet().size(), equalTo(length));
+ } else {
+ throw new UnsupportedOperationException("value is of unsupported type [" + safeClass(actualValue) + "]");
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have length [" + getExpectedValue() + "]";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
new file mode 100644
index 0000000000..89387ff895
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThan;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a lt assert section:
+ *
+ * - lt: { fields._ttl: 20000}
+ *
+ */
+public class LessThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LessThanAssertion.class);
+
+ public LessThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is less than [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, lessThan((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not less than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
new file mode 100644
index 0000000000..99cbf1155d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a lte assert section:
+ *
+ * - lte: { fields._ttl: 0 }
+ */
+public class LessThanOrEqualToAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class);
+
+ public LessThanOrEqualToAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is less than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, lessThanOrEqualTo((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not less than or equal to [" + getExpectedValue() + "]";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java b/core/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java
new file mode 100644
index 0000000000..16efcae96c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/MatchAssertion.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a match assert section:
+ *
+ * - match: { get.fields._routing: "5" }
+ *
+ */
+public class MatchAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class);
+
+ public MatchAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+
+ //if the value is wrapped into / it is a regexp (e.g. /s+d+/)
+ if (expectedValue instanceof String) {
+ String expValue = ((String) expectedValue).trim();
+ if (expValue.length() > 2 && expValue.startsWith("/") && expValue.endsWith("/")) {
+ assertThat("field [" + getField() + "] was expected to be of type String but is an instanceof [" + safeClass(actualValue) + "]", actualValue, instanceOf(String.class));
+ String stringValue = (String) actualValue;
+ String regex = expValue.substring(1, expValue.length() - 1);
+ logger.trace("assert that [{}] matches [{}]", stringValue, regex);
+ assertThat("field [" + getField() + "] was expected to match the provided regex but didn't",
+ stringValue, matches(regex, Pattern.COMMENTS));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, notNullValue());
+ logger.trace("assert that [{}] matches [{}] (field [{}])", actualValue, expectedValue, getField());
+ if (!actualValue.getClass().equals(safeClass(expectedValue))) {
+ if (actualValue instanceof Number && expectedValue instanceof Number) {
+ //Double 1.0 is equal to Integer 1
+ assertThat(errorMessage(), ((Number) actualValue).doubleValue(), equalTo(((Number) expectedValue).doubleValue()));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, equalTo(expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't match the expected value";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/core/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java
new file mode 100644
index 0000000000..a9047c18db
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/RestTestSuite.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Holds a REST test suite loaded from a specific yaml file.
+ * Supports a setup section and multiple test sections.
+ */
+public class RestTestSuite {
+
+ private final String api;
+ private final String name;
+
+ private SetupSection setupSection;
+
+ private Set<TestSection> testSections = Sets.newTreeSet();
+
+ public RestTestSuite(String api, String name) {
+ this.api = api;
+ this.name = name;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getPath() {
+ return api + "/" + name;
+ }
+
+ public SetupSection getSetupSection() {
+ return setupSection;
+ }
+
+ public void setSetupSection(SetupSection setupSection) {
+ this.setupSection = setupSection;
+ }
+
+ /**
+ * Adds a {@link org.elasticsearch.test.rest.section.TestSection} to the REST suite
+ * @return true if the test section was not already present, false otherwise
+ */
+ public boolean addTestSection(TestSection testSection) {
+ return this.testSections.add(testSection);
+ }
+
+ public List<TestSection> getTestSections() {
+ return Lists.newArrayList(testSections);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/SetSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/SetSection.java
new file mode 100644
index 0000000000..0a52a7798b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/SetSection.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Maps;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Represents a set section:
+ *
+ * - set: {_scroll_id: scroll_id}
+ *
+ */
+public class SetSection implements ExecutableSection {
+
+ private Map<String, String> stash = Maps.newHashMap();
+
+ public void addSet(String responseField, String stashedField) {
+ stash.put(responseField, stashedField);
+ }
+
+ public Map<String, String> getStash() {
+ return stash;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+ for (Map.Entry<String, String> entry : stash.entrySet()) {
+ Object actualValue = executionContext.response(entry.getKey());
+ executionContext.stash().stashValue(entry.getValue(), actualValue);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java
new file mode 100644
index 0000000000..72f653e6c8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/SetupSection.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Represents a setup section. Holds a skip section and multiple do sections.
+ */
+public class SetupSection {
+
+ public static final SetupSection EMPTY;
+
+ static {
+ EMPTY = new SetupSection();
+ EMPTY.setSkipSection(SkipSection.EMPTY);
+ }
+
+ private SkipSection skipSection;
+
+ private List<DoSection> doSections = Lists.newArrayList();
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<DoSection> getDoSections() {
+ return doSections;
+ }
+
+ public void addDoSection(DoSection doSection) {
+ this.doSections.add(doSection);
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java
new file mode 100644
index 0000000000..e7ab455577
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.rest.support.Features;
+
+import java.util.List;
+
+/**
+ * Represents a skip section that tells whether a specific test section or suite needs to be skipped
+ * based on:
+ * - the elasticsearch version the tests are running against
+ * - a specific test feature required that might not be implemented yet by the runner
+ */
+public class SkipSection {
+
+ public static final SkipSection EMPTY = new SkipSection();
+
+ private final Version lowerVersion;
+ private final Version upperVersion;
+ private final List<String> features;
+ private final String reason;
+
+ private SkipSection() {
+ this.lowerVersion = null;
+ this.upperVersion = null;
+ this.features = Lists.newArrayList();
+ this.reason = null;
+ }
+
+ public SkipSection(String versionRange, List<String> features, String reason) {
+ assert features != null;
+ assert versionRange != null && features.isEmpty() || versionRange == null && features.isEmpty() == false;
+ Version[] versions = parseVersionRange(versionRange);
+ this.lowerVersion = versions[0];
+ this.upperVersion = versions[1];
+ this.features = features;
+ this.reason = reason;
+ }
+
+ public Version getLowerVersion() {
+ return lowerVersion;
+ }
+
+ public Version getUpperVersion() {
+ return upperVersion;
+ }
+
+ public List<String> getFeatures() {
+ return features;
+ }
+
+ public String getReason() {
+ return reason;
+ }
+
+ public boolean skip(Version currentVersion) {
+ if (isEmpty()) {
+ return false;
+ }
+ if (isVersionCheck()) {
+ return currentVersion.onOrAfter(lowerVersion) && currentVersion.onOrBefore(upperVersion);
+ } else {
+ return Features.areAllSupported(features) == false;
+ }
+ }
+
+ public boolean isVersionCheck() {
+ return features.isEmpty();
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+
+ private Version[] parseVersionRange(String versionRange) {
+ if (versionRange == null) {
+ return new Version[] { null, null };
+ }
+ if (versionRange.trim().equals("all")) {
+ return new Version[]{VersionUtils.getFirstVersion(), Version.CURRENT};
+ }
+ String[] skipVersions = versionRange.split("-");
+ if (skipVersions.length > 2) {
+ throw new IllegalArgumentException("version range malformed: " + versionRange);
+ }
+
+ String lower = skipVersions[0].trim();
+ String upper = skipVersions[1].trim();
+ return new Version[] {
+ lower.isEmpty() ? VersionUtils.getFirstVersion() : Version.fromString(lower),
+ upper.isEmpty() ? Version.CURRENT : Version.fromString(upper)
+ };
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/TestSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/TestSection.java
new file mode 100644
index 0000000000..def613b1c5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/section/TestSection.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import com.google.common.collect.Lists;
+
+import java.util.List;
+
+/**
+ * Represents a test section, which is composed of a skip section and multiple executable sections.
+ */
+public class TestSection implements Comparable<TestSection> {
+ private final String name;
+ private SkipSection skipSection;
+ private final List<ExecutableSection> executableSections;
+
+ public TestSection(String name) {
+ this.name = name;
+ this.executableSections = Lists.newArrayList();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<ExecutableSection> getExecutableSections() {
+ return executableSections;
+ }
+
+ public void addExecutableSection(ExecutableSection executableSection) {
+ this.executableSections.add(executableSection);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TestSection that = (TestSection) o;
+
+ if (name != null ? !name.equals(that.name) : that.name != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return name != null ? name.hashCode() : 0;
+ }
+
+ @Override
+ public int compareTo(TestSection o) {
+ return name.compareTo(o.getName());
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java b/core/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java
new file mode 100644
index 0000000000..0996df45b4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/spec/RestApi.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Represents an elasticsearch REST endpoint (api)
+ */
+public class RestApi {
+
+ private final String name;
+ private List<String> methods = Lists.newArrayList();
+ private List<String> paths = Lists.newArrayList();
+ private List<String> pathParts = Lists.newArrayList();
+ private List<String> params = Lists.newArrayList();
+ private BODY body = BODY.NOT_SUPPORTED;
+
+ public static enum BODY {
+ NOT_SUPPORTED, OPTIONAL, REQUIRED
+ }
+
+ RestApi(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public List<String> getMethods() {
+ return methods;
+ }
+
+ /**
+ * Returns the supported http methods given the rest parameters provided
+ */
+ public List<String> getSupportedMethods(Set<String> restParams) {
+ //we try to avoid hardcoded mappings but the index api is the exception
+ if ("index".equals(name) || "create".equals(name)) {
+ List<String> indexMethods = Lists.newArrayList();
+ for (String method : methods) {
+ if (restParams.contains("id")) {
+ //PUT when the id is provided
+ if (HttpPut.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ } else {
+ //POST without id
+ if (HttpPost.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ }
+ }
+ return indexMethods;
+ }
+
+ return methods;
+ }
+
+ void addMethod(String method) {
+ this.methods.add(method);
+ }
+
+ public List<String> getPaths() {
+ return paths;
+ }
+
+ void addPath(String path) {
+ this.paths.add(path);
+ }
+
+ public List<String> getPathParts() {
+ return pathParts;
+ }
+
+ void addPathPart(String pathPart) {
+ this.pathParts.add(pathPart);
+ }
+
+ public List<String> getParams() {
+ return params;
+ }
+
+ void addParam(String param) {
+ this.params.add(param);
+ }
+
+ void setBodyOptional() {
+ this.body = BODY.OPTIONAL;
+ }
+
+ void setBodyRequired() {
+ this.body = BODY.REQUIRED;
+ }
+
+ public boolean isBodySupported() {
+ return body != BODY.NOT_SUPPORTED;
+ }
+
+ public boolean isBodyRequired() {
+ return body == BODY.REQUIRED;
+ }
+
+ /**
+ * Finds the best matching rest path given the current parameters and replaces
+ * placeholders with their corresponding values received as arguments
+ */
+ public String[] getFinalPaths(Map<String, String> pathParams) {
+
+ List<RestPath> matchingRestPaths = findMatchingRestPaths(pathParams.keySet());
+ if (matchingRestPaths == null || matchingRestPaths.isEmpty()) {
+ throw new IllegalArgumentException("unable to find matching rest path for api [" + name + "] and path params " + pathParams);
+ }
+
+ String[] paths = new String[matchingRestPaths.size()];
+ for (int i = 0; i < matchingRestPaths.size(); i++) {
+ RestPath restPath = matchingRestPaths.get(i);
+ String path = restPath.path;
+ for (Map.Entry<String, String> paramEntry : restPath.parts.entrySet()) {
+ // replace path placeholders with actual values
+ String value = pathParams.get(paramEntry.getValue());
+ if (value == null) {
+ throw new IllegalArgumentException("parameter [" + paramEntry.getValue() + "] missing");
+ }
+ path = path.replace(paramEntry.getKey(), value);
+ }
+ paths[i] = path;
+ }
+ return paths;
+ }
+
+ /**
+ * Finds the matching rest paths out of the available ones with the current api (based on REST spec).
+ *
+ * The best path is the one that has exactly the same number of placeholders to replace
+ * (e.g. /{index}/{type}/{id} when the path params are exactly index, type and id).
+ */
+ private List<RestPath> findMatchingRestPaths(Set<String> restParams) {
+
+ List<RestPath> matchingRestPaths = Lists.newArrayList();
+ RestPath[] restPaths = buildRestPaths();
+
+ for (RestPath restPath : restPaths) {
+ if (restPath.parts.size() == restParams.size()) {
+ if (restPath.parts.values().containsAll(restParams)) {
+ matchingRestPaths.add(restPath);
+ }
+ }
+ }
+
+ return matchingRestPaths;
+ }
+
+ private RestPath[] buildRestPaths() {
+ RestPath[] restPaths = new RestPath[paths.size()];
+ for (int i = 0; i < restPaths.length; i++) {
+ restPaths[i] = new RestPath(paths.get(i));
+ }
+ return restPaths;
+ }
+
+ private static class RestPath {
+ private static final Pattern PLACEHOLDERS_PATTERN = Pattern.compile("(\\{(.*?)})");
+
+ final String path;
+ //contains param to replace (e.g. {index}) and param key to use for lookup in the current values map (e.g. index)
+ final Map<String, String> parts;
+
+ RestPath(String path) {
+ this.path = path;
+ this.parts = extractParts(path);
+ }
+
+ private static Map<String,String> extractParts(String input) {
+ Map<String, String> parts = Maps.newHashMap();
+ Matcher matcher = PLACEHOLDERS_PATTERN.matcher(input);
+ while (matcher.find()) {
+ //key is e.g. {index}
+ String key = input.substring(matcher.start(), matcher.end());
+ if (matcher.groupCount() != 2) {
+ throw new IllegalArgumentException("no lookup key found for param [" + key + "]");
+ }
+ //to be replaced with current value found with key e.g. index
+ String value = matcher.group(2);
+ parts.put(key, value);
+ }
+ return parts;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java b/core/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java
new file mode 100644
index 0000000000..0328e4c87d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/spec/RestApiParser.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Parser for a REST api spec (single json file)
+ */
+public class RestApiParser {
+
+ public RestApi parse(XContentParser parser) throws IOException {
+
+ try {
+ while ( parser.nextToken() != XContentParser.Token.FIELD_NAME ) {
+ //move to first field name
+ }
+
+ RestApi restApi = new RestApi(parser.currentName());
+
+ int level = -1;
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT || level >= 0) {
+
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("methods".equals(parser.currentName())) {
+ parser.nextToken();
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addMethod(parser.text());
+ }
+ }
+
+ if ("url".equals(parser.currentName())) {
+ String currentFieldName = "url";
+ int innerLevel = -1;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT || innerLevel >= 0) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_ARRAY && "paths".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addPath(parser.text());
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "parts".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addPathPart(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected parts field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "params".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addParam(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected params field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ innerLevel++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ innerLevel--;
+ }
+ }
+ }
+
+ if ("body".equals(parser.currentName())) {
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ boolean requiredFound = false;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("required".equals(parser.currentName())) {
+ requiredFound = true;
+ parser.nextToken();
+ if (parser.booleanValue()) {
+ restApi.setBodyRequired();
+ } else {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+ if (!requiredFound) {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ level++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ level--;
+ }
+
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT : "Expected [END_OBJECT] but was [" + parser.currentToken() +"]";
+ parser.nextToken();
+
+ return restApi;
+
+ } finally {
+ parser.close();
+ }
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java b/core/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java
new file mode 100644
index 0000000000..979bacc26c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/spec/RestSpec.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import com.google.common.collect.Maps;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.support.FileUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.FileSystem;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Holds the elasticsearch REST spec
+ */
+public class RestSpec {
+ Map<String, RestApi> restApiMap = Maps.newHashMap();
+
+ private RestSpec() {
+ }
+
+ void addApi(RestApi restApi) {
+ restApiMap.put(restApi.getName(), restApi);
+ }
+
+ public RestApi getApi(String api) {
+ return restApiMap.get(api);
+ }
+
+ public Collection<RestApi> getApis() {
+ return restApiMap.values();
+ }
+
+ /**
+ * Parses the complete set of REST spec available under the provided directories
+ */
+ public static RestSpec parseFrom(FileSystem fileSystem, String optionalPathPrefix, String... paths) throws IOException {
+ RestSpec restSpec = new RestSpec();
+ for (String path : paths) {
+ for (Path jsonFile : FileUtils.findJsonSpec(fileSystem, optionalPathPrefix, path)) {
+ try (InputStream stream = Files.newInputStream(jsonFile)) {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(stream);
+ RestApi restApi = new RestApiParser().parse(parser);
+ restSpec.addApi(restApi);
+ } catch (Throwable ex) {
+ throw new IOException("Can't parse rest spec file: [" + jsonFile + "]", ex);
+ }
+ }
+ }
+ return restSpec;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/support/Features.java b/core/src/test/java/org/elasticsearch/test/rest/support/Features.java
new file mode 100644
index 0000000000..68e0848a1a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/support/Features.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.support;
+
+import com.google.common.collect.Lists;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+
+import java.util.List;
+
+/**
+ * Allows to register additional features supported by the tests runner.
+ * This way any runner can add extra features and use proper skip sections to avoid
+ * breaking others runners till they have implemented the new feature as well.
+ *
+ * Once all runners have implemented the feature, it can be removed from the list
+ * and the related skip sections can be removed from the tests as well.
+ */
+public final class Features {
+
+ private static final List<String> SUPPORTED = Lists.newArrayList("stash_in_path", "groovy_scripting");
+
+ private Features() {
+
+ }
+
+ /**
+ * Tells whether all the features provided as argument are supported
+ */
+ public static boolean areAllSupported(List<String> features) {
+ for (String feature : features) {
+ if ("requires_replica".equals(feature) && ElasticsearchIntegrationTest.cluster().numDataNodes() >= 2) {
+ continue;
+ }
+ if (!SUPPORTED.contains(feature)) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java b/core/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java
new file mode 100644
index 0000000000..5e230a6f99
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/support/FileUtils.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.support;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.PathUtils;
+
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.DirectoryStream;
+import java.nio.file.FileSystem;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.NotDirectoryException;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+public final class FileUtils {
+
+ private static final String YAML_SUFFIX = ".yaml";
+ private static final String JSON_SUFFIX = ".json";
+
+ private FileUtils() {
+
+ }
+
+ /**
+ * Returns the json files found within the directory provided as argument.
+ * Files are looked up in the classpath, or optionally from {@code fileSystem} if its not null.
+ */
+ public static Set<Path> findJsonSpec(FileSystem fileSystem, String optionalPathPrefix, String path) throws IOException {
+ Path dir = resolveFile(fileSystem, optionalPathPrefix, path, null);
+
+ if (!Files.isDirectory(dir)) {
+ throw new NotDirectoryException(path);
+ }
+
+ Set<Path> jsonFiles = new HashSet<>();
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir)) {
+ for (Path item : stream) {
+ if (item.toString().endsWith(JSON_SUFFIX)) {
+ jsonFiles.add(item);
+ }
+ }
+ }
+
+ if (jsonFiles.isEmpty()) {
+ throw new NoSuchFileException(path, null, "no json files found");
+ }
+
+ return jsonFiles;
+ }
+
+ /**
+ * Returns the yaml files found within the paths provided.
+ * Each input path can either be a single file (the .yaml suffix is optional) or a directory.
+ * Each path is looked up in the classpath, or optionally from {@code fileSystem} if its not null.
+ */
+ public static Map<String, Set<Path>> findYamlSuites(FileSystem fileSystem, String optionalPathPrefix, final String... paths) throws IOException {
+ Map<String, Set<Path>> yamlSuites = Maps.newHashMap();
+ for (String path : paths) {
+ collectFiles(resolveFile(fileSystem, optionalPathPrefix, path, YAML_SUFFIX), YAML_SUFFIX, yamlSuites);
+ }
+ return yamlSuites;
+ }
+
+ private static Path resolveFile(FileSystem fileSystem, String optionalPathPrefix, String path, String optionalFileSuffix) throws IOException {
+ if (fileSystem != null) {
+ Path file = findFile(fileSystem, path, optionalFileSuffix);
+ if (!lenientExists(file)) {
+ // try with optional prefix: /rest-api-spec/test (or /rest-api-spec/api) is optional
+ String newPath = optionalPathPrefix + "/" + path;
+ file = findFile(fileSystem, newPath, optionalFileSuffix);
+ if (!lenientExists(file)) {
+ throw new NoSuchFileException(path);
+ }
+ }
+ return file;
+ } else {
+ //try within classpath
+ URL resource = findResource(path, optionalFileSuffix);
+ if (resource == null) {
+ //try within classpath with optional prefix: /rest-api-spec/test (or /rest-api-spec/api) is optional
+ String newPath = optionalPathPrefix + "/" + path;
+ resource = findResource(newPath, optionalFileSuffix);
+ if (resource == null) {
+ throw new NoSuchFileException(path);
+ }
+ }
+ try {
+ return PathUtils.get(resource.toURI());
+ } catch (Exception e) {
+ // some filesystems have REALLY useless exceptions here.
+ // ZipFileSystem I am looking at you.
+ throw new RuntimeException("couldn't retrieve URL: " + resource, e);
+ }
+ }
+ }
+
+ private static URL findResource(String path, String optionalFileSuffix) {
+ URL resource = FileUtils.class.getResource(path);
+ if (resource == null) {
+ //if not found we append the file suffix to the path (as it is optional)
+ if (Strings.hasLength(optionalFileSuffix) && !path.endsWith(optionalFileSuffix)) {
+ resource = FileUtils.class.getResource(path + optionalFileSuffix);
+ }
+ }
+ return resource;
+ }
+
+ // used because this test "guesses" from like 4 different places from the filesystem!
+ private static boolean lenientExists(Path file) {
+ boolean exists = false;
+ try {
+ exists = Files.exists(file);
+ } catch (SecurityException ok) {}
+ return exists;
+ }
+
+ private static Path findFile(FileSystem fileSystem, String path, String optionalFileSuffix) {
+ Path file = fileSystem.getPath(path);
+ if (!lenientExists(file)) {
+ file = fileSystem.getPath(path + optionalFileSuffix);
+ }
+ return file;
+ }
+
+ private static void collectFiles(final Path dir, final String fileSuffix, final Map<String, Set<Path>> files) throws IOException {
+ Files.walkFileTree(dir, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ if (file.toString().endsWith(fileSuffix)) {
+ String groupName = file.toAbsolutePath().getParent().getFileName().toString();
+ Set<Path> filesSet = files.get(groupName);
+ if (filesSet == null) {
+ filesSet = Sets.newHashSet();
+ files.put(groupName, filesSet);
+ }
+ filesSet.add(file);
+ }
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java
new file mode 100644
index 0000000000..1a84178e3c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTests.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.After;
+import org.junit.Ignore;
+
+import static org.hamcrest.Matchers.nullValue;
+
+@Ignore
+public abstract class AbstractParserTests extends ElasticsearchTestCase {
+
+ protected XContentParser parser;
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ //this is the way to make sure that we consumed the whole yaml
+ assertThat(parser.currentToken(), nullValue());
+ parser.close();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
new file mode 100644
index 0000000000..ac1090e01d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.*;
+import org.elasticsearch.test.rest.section.*;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class AssertionParsersTests extends AbstractParserTests {
+
+ @Test
+ public void testParseIsTrue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "get.fields._timestamp"
+ );
+
+ IsTrueParser isTrueParser = new IsTrueParser();
+ IsTrueAssertion trueAssertion = isTrueParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(trueAssertion, notNullValue());
+ assertThat(trueAssertion.getField(), equalTo("get.fields._timestamp"));
+ }
+
+ @Test
+ public void testParseIsFalse() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "docs.1._source"
+ );
+
+ IsFalseParser isFalseParser = new IsFalseParser();
+ IsFalseAssertion falseAssertion = isFalseParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(falseAssertion, notNullValue());
+ assertThat(falseAssertion.getField(), equalTo("docs.1._source"));
+ }
+
+ @Test
+ public void testParseGreaterThan() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 3}"
+ );
+
+ GreaterThanParser greaterThanParser = new GreaterThanParser();
+ GreaterThanAssertion greaterThanAssertion = greaterThanParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ assertThat(greaterThanAssertion, notNullValue());
+ assertThat(greaterThanAssertion.getField(), equalTo("field"));
+ assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(3));
+ }
+
+ @Test
+ public void testParseLessThan() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 3}"
+ );
+
+ LessThanParser lessThanParser = new LessThanParser();
+ LessThanAssertion lessThanAssertion = lessThanParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ assertThat(lessThanAssertion, notNullValue());
+ assertThat(lessThanAssertion.getField(), equalTo("field"));
+ assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(3));
+ }
+
+ @Test
+ public void testParseLength() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: 22}"
+ );
+
+ LengthParser lengthParser = new LengthParser();
+ LengthAssertion lengthAssertion = lengthParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ assertThat(lengthAssertion, notNullValue());
+ assertThat(lengthAssertion.getField(), equalTo("_id"));
+ assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(22));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSimpleIntegerValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ field: 10 }"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("field"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) matchAssertion.getExpectedValue(), equalTo(10));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSimpleStringValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ foo: bar }"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("foo"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(String.class));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("bar"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchArray() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{'matches': ['test_percolator_1', 'test_percolator_2']}"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("matches"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(List.class));
+ List strings = (List) matchAssertion.getExpectedValue();
+ assertThat(strings.size(), equalTo(2));
+ assertThat(strings.get(0).toString(), equalTo("test_percolator_1"));
+ assertThat(strings.get(1).toString(), equalTo("test_percolator_2"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testParseMatchSourceValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _source: { responses.0.hits.total: 3, foo: bar }}"
+ );
+
+ MatchParser matchParser = new MatchParser();
+ MatchAssertion matchAssertion = matchParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(matchAssertion, notNullValue());
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ Map<String, Object> expectedValue = (Map<String, Object>) matchAssertion.getExpectedValue();
+ assertThat(expectedValue.size(), equalTo(2));
+ Object o = expectedValue.get("responses.0.hits.total");
+ assertThat(o, instanceOf(Integer.class));
+ assertThat((Integer)o, equalTo(3));
+ o = expectedValue.get("foo");
+ assertThat(o, instanceOf(String.class));
+ assertThat(o.toString(), equalTo("bar"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
new file mode 100644
index 0000000000..911bb8999b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
@@ -0,0 +1,393 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.DoSectionParser;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.hamcrest.MatcherAssert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+public class DoSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseDoSectionNoBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "get:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ " id: 1"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("get"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_index"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test_type"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseDoSectionNoParamsNoBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "cluster.node_info: {}"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("cluster.node_info"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonBody() throws Exception {
+ String body = "{ \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }";
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: " + body
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_1"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonMultipleBodiesAsLongString() throws Exception {
+ String bodies[] = new String[]{
+ "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }\n",
+ "{ \"f1\":\"v1\", \"f2\":42 }\n",
+ "{ \"index\": { \"_index\":\"test_index2\", \"_type\":\"test_type2\", \"_id\":\"test_id2\" } }\n",
+ "{ \"f1\":\"v2\", \"f2\":47 }\n"
+ };
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body: |\n" +
+ " " + bodies[0] +
+ " " + bodies[1] +
+ " " + bodies[2] +
+ " " + bodies[3]
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(4));
+ }
+
+ @Test
+ public void testParseDoSectionWithJsonMultipleBodiesRepeatedProperty() throws Exception {
+ String[] bodies = new String[] {
+ "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }",
+ "{ \"f1\":\"v1\", \"f2\":42 }",
+ };
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body: \n" +
+ " " + bodies[0] + "\n" +
+ " body: \n" +
+ " " + bodies[1]
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlBody() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "search:\n" +
+ " body:\n" +
+ " _source: [ include.field1, include.field2 ]\n" +
+ " query: { match_all: {} }"
+ );
+ String body = "{ \"_source\": [ \"include.field1\", \"include.field2\" ], \"query\": { \"match_all\": {} }}";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("search"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlMultipleBodies() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body:\n" +
+ " - index:\n" +
+ " _index: test_index\n" +
+ " _type: test_type\n" +
+ " _id: test_id\n" +
+ " - f1: v1\n" +
+ " f2: 42\n" +
+ " - index:\n" +
+ " _index: test_index2\n" +
+ " _type: test_type2\n" +
+ " _id: test_id2\n" +
+ " - f1: v2\n" +
+ " f2: 47"
+ );
+ String[] bodies = new String[4];
+ bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}";
+ bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }";
+ bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_type\": \"test_type2\", \"_id\": \"test_id2\"}}";
+ bodies[3] = "{ \"f1\":\"v2\", \"f2\": 47 }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlMultipleBodiesRepeatedProperty() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "bulk:\n" +
+ " refresh: true\n" +
+ " body:\n" +
+ " index:\n" +
+ " _index: test_index\n" +
+ " _type: test_type\n" +
+ " _id: test_id\n" +
+ " body:\n" +
+ " f1: v1\n" +
+ " f2: 42\n"
+ );
+ String[] bodies = new String[2];
+ bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}";
+ bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("bulk"));
+ assertThat(apiCallSection.getParams().size(), equalTo(1));
+ assertThat(apiCallSection.getParams().get("refresh"), equalTo("true"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(bodies.length));
+
+ for (int i = 0; i < bodies.length; i++) {
+ assertJsonEquals(apiCallSection.getBodies().get(i), bodies[i]);
+ }
+ }
+
+ @Test
+ public void testParseDoSectionWithYamlBodyMultiGet() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "mget:\n" +
+ " body:\n" +
+ " docs:\n" +
+ " - { _index: test_2, _type: test, _id: 1}\n" +
+ " - { _index: test_1, _type: none, _id: 1}"
+ );
+ String body = "{ \"docs\": [ " +
+ "{\"_index\": \"test_2\", \"_type\":\"test\", \"_id\":1}, " +
+ "{\"_index\": \"test_1\", \"_type\":\"none\", \"_id\":1} " +
+ "]}";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("mget"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ assertJsonEquals(apiCallSection.getBodies().get(0), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithBodyStringified() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: \"{ _source: true, query: { match_all: {} } }\""
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection, notNullValue());
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(3));
+ assertThat(apiCallSection.getParams().get("index"), equalTo("test_1"));
+ assertThat(apiCallSection.getParams().get("type"), equalTo("test"));
+ assertThat(apiCallSection.getParams().get("id"), equalTo("1"));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(1));
+ //stringified body is taken as is
+ assertJsonEquals(apiCallSection.getBodies().get(0), "{ _source: true, query: { match_all: {} } }");
+ }
+
+ @Test
+ public void testParseDoSectionWithBodiesStringifiedAndNot() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "index:\n" +
+ " body:\n" +
+ " - \"{ _source: true, query: { match_all: {} } }\"\n" +
+ " - { size: 100, query: { match_all: {} } }"
+ );
+
+ String body = "{ \"size\": 100, \"query\": { \"match_all\": {} } }";
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ ApiCallSection apiCallSection = doSection.getApiCallSection();
+
+ assertThat(apiCallSection.getApi(), equalTo("index"));
+ assertThat(apiCallSection.getParams().size(), equalTo(0));
+ assertThat(apiCallSection.hasBody(), equalTo(true));
+ assertThat(apiCallSection.getBodies().size(), equalTo(2));
+ //stringified body is taken as is
+ assertJsonEquals(apiCallSection.getBodies().get(0), "{ _source: true, query: { match_all: {} } }");
+ assertJsonEquals(apiCallSection.getBodies().get(1), body);
+ }
+
+ @Test
+ public void testParseDoSectionWithCatch() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "catch: missing\n" +
+ "indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test (expected = RestTestParseException.class)
+ public void testParseDoSectionWithoutClientCallSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "catch: missing\n"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ }
+
+ @Test
+ public void testParseDoSectionMultivaluedField() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "indices.get_field_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ " field: [ text , text1 ]"
+ );
+
+ DoSectionParser doSectionParser = new DoSectionParser();
+ DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type"));
+ assertThat(doSection.getApiCallSection().getParams().get("field"), equalTo("text,text1"));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0));
+ }
+
+ private static void assertJsonEquals(Map<String, Object> actual, String expected) throws IOException {
+ Map<String,Object> expectedMap = JsonXContent.jsonXContent.createParser(expected).mapOrderedAndClose();
+ MatcherAssert.assertThat(actual, equalTo(expectedMap));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
new file mode 100644
index 0000000000..10db051e2b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.Test;
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+import static org.hamcrest.Matchers.greaterThan;
+
+public class FileUtilsTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testLoadSingleYamlSuite() throws Exception {
+ Map<String,Set<Path>> yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "/rest-api-spec/test/get/10_basic");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+
+ //the path prefix is optional
+ yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "get/10_basic.yaml");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+
+ //extension .yaml is optional
+ yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "get/10_basic");
+ assertSingleFile(yamlSuites, "get", "10_basic.yaml");
+ }
+
+ @Test
+ public void testLoadMultipleYamlSuites() throws Exception {
+ //single directory
+ Map<String,Set<Path>> yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "get");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), greaterThan(1));
+
+ //multiple directories
+ yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "get", "index");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("get"), equalTo(true));
+ assertThat(yamlSuites.get("get").size(), greaterThan(1));
+ assertThat(yamlSuites.containsKey("index"), equalTo(true));
+ assertThat(yamlSuites.get("index").size(), greaterThan(1));
+
+ //multiple paths, which can be both directories or yaml test suites (with optional file extension)
+ yamlSuites = FileUtils.findYamlSuites(null, "/rest-api-spec/test", "indices.optimize/10_basic", "index");
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(2));
+ assertThat(yamlSuites.containsKey("indices.optimize"), equalTo(true));
+ assertThat(yamlSuites.get("indices.optimize").size(), equalTo(1));
+ assertSingleFile(yamlSuites.get("indices.optimize"), "indices.optimize", "10_basic.yaml");
+ assertThat(yamlSuites.containsKey("index"), equalTo(true));
+ assertThat(yamlSuites.get("index").size(), greaterThan(1));
+
+ //files can be loaded from classpath and from file system too
+ Path dir = createTempDir();
+ Path file = dir.resolve("test_loading.yaml");
+ Files.createFile(file);
+
+ //load from directory outside of the classpath
+ yamlSuites = FileUtils.findYamlSuites(dir.getFileSystem(), "/rest-api-spec/test", dir.toAbsolutePath().toString());
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey(dir.getFileName().toString()), equalTo(true));
+ assertSingleFile(yamlSuites.get(dir.getFileName().toString()), dir.getFileName().toString(), file.getFileName().toString());
+
+ //load from external file (optional extension)
+ yamlSuites = FileUtils.findYamlSuites(dir.getFileSystem(), "/rest-api-spec/test", dir.resolve("test_loading").toAbsolutePath().toString());
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey(dir.getFileName().toString()), equalTo(true));
+ assertSingleFile(yamlSuites.get(dir.getFileName().toString()), dir.getFileName().toString(), file.getFileName().toString());
+ }
+
+ private static void assertSingleFile(Map<String, Set<Path>> yamlSuites, String dirName, String fileName) {
+ assertThat(yamlSuites, notNullValue());
+ assertThat(yamlSuites.size(), equalTo(1));
+ assertThat(yamlSuites.containsKey(dirName), equalTo(true));
+ assertSingleFile(yamlSuites.get(dirName), dirName, fileName);
+ }
+
+ private static void assertSingleFile(Set<Path> files, String dirName, String fileName) {
+ assertThat(files.size(), equalTo(1));
+ Path file = files.iterator().next();
+ assertThat(file.getFileName().toString(), equalTo(fileName));
+ assertThat(file.toAbsolutePath().getParent().getFileName().toString(), equalTo(dirName));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
new file mode 100644
index 0000000000..73892ee4c7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.Stash;
+import org.elasticsearch.test.rest.json.JsonPath;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.*;
+
+public class JsonPathTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testEvaluateObjectPathEscape() throws Exception {
+ String json = "{ \"field1\": { \"field2.field3\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2\\.field3");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateObjectPathWithDoubleDot() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1..field2");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateObjectPathEndsWithDot() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2.");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateString() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : \"value2\" } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateInteger() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : 333 } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(Integer.class));
+ assertThat((Integer)object, equalTo(333));
+ }
+
+ @Test
+ public void testEvaluateDouble() throws Exception {
+ String json = "{ \"field1\": { \"field2\" : 3.55 } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.field2");
+ assertThat(object, instanceOf(Double.class));
+ assertThat((Double)object, equalTo(3.55));
+ }
+
+ @Test
+ public void testEvaluateArray() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ \"value1\", \"value2\" ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1");
+ assertThat(object, instanceOf(List.class));
+ List list = (List) object;
+ assertThat(list.size(), equalTo(2));
+ assertThat(list.get(0), instanceOf(String.class));
+ assertThat((String)list.get(0), equalTo("value1"));
+ assertThat(list.get(1), instanceOf(String.class));
+ assertThat((String)list.get(1), equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElement() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ \"value1\", \"value2\" ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1.1");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElementObject() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array1.1.element");
+ assertThat(object, instanceOf(String.class));
+ assertThat((String)object, equalTo("value2"));
+ }
+
+ @Test
+ public void testEvaluateArrayElementObjectWrongPath() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("field1.array2.1.element");
+ assertThat(object, nullValue());
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEvaluateObjectKeys() throws Exception {
+ String json = "{ \"metadata\": { \"templates\" : {\"template_1\": { \"field\" : \"value\"}, \"template_2\": { \"field\" : \"value\"} } } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("metadata.templates");
+ assertThat(object, instanceOf(Map.class));
+ Map<String, Object> map = (Map<String, Object>)object;
+ assertThat(map.size(), equalTo(2));
+ Set<String> strings = map.keySet();
+ assertThat(strings, contains("template_1", "template_2"));
+ }
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testEvaluateEmptyPath() throws Exception {
+ String json = "{ \"field1\": { \"array1\" : [ {\"element\": \"value1\"}, {\"element\":\"value2\"} ] } }";
+ JsonPath jsonPath = new JsonPath(json);
+ Object object = jsonPath.evaluate("");
+ assertThat(object, notNullValue());
+ assertThat(object, instanceOf(Map.class));
+ assertThat(((Map<String, Object>)object).containsKey("field1"), equalTo(true));
+ }
+
+ @Test
+ public void testEvaluateStashInPropertyName() throws Exception {
+ String json = "{ \"field1\": { \"elements\" : {\"element1\": \"value1\"}}}";
+ JsonPath jsonPath = new JsonPath(json);
+ try {
+ jsonPath.evaluate("field1.$placeholder.element1");
+ fail("evaluate should have failed due to unresolved placeholder");
+ } catch(IllegalArgumentException e) {
+ assertThat(e.getMessage(), containsString("stashed value not found for key [$placeholder]"));
+ }
+
+ Stash stash = new Stash();
+ stash.stashValue("placeholder", "elements");
+ Object object = jsonPath.evaluate("field1.$placeholder.element1", stash);
+ assertThat(object, notNullValue());
+ assertThat(object.toString(), equalTo("value1"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
new file mode 100644
index 0000000000..c86660f5c8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.rest.spec.RestApiParser;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.containsString;
+
+/**
+ *
+ */
+public class RestApiParserFailingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void brokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParams() throws Exception {
+ parseAndExpectFailure(BROKEN_SPEC_PARAMS, "Expected params field in rest api definition to contain an object");
+ }
+
+ @Test
+ public void brokenSpecShouldThrowUsefulExceptionWhenParsingFailsOnParts() throws Exception {
+ parseAndExpectFailure(BROKEN_SPEC_PARTS, "Expected parts field in rest api definition to contain an object");
+ }
+
+ private void parseAndExpectFailure(String brokenJson, String expectedErrorMessage) throws Exception {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(brokenJson);
+ try {
+ new RestApiParser().parse(parser);
+ fail("Expected to fail parsing but did not happen");
+ } catch (IOException e) {
+ assertThat(e.getMessage(), containsString(expectedErrorMessage));
+ }
+
+ }
+
+ // see params section is broken, an inside param is missing
+ private static final String BROKEN_SPEC_PARAMS = "{\n" +
+ " \"ping\": {" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/\"," +
+ " \"methods\": [\"HEAD\"]," +
+ " \"url\": {" +
+ " \"path\": \"/\"," +
+ " \"paths\": [\"/\"]," +
+ " \"parts\": {" +
+ " }," +
+ " \"params\": {" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " }" +
+ " }," +
+ " \"body\": null" +
+ " }" +
+ "}";
+
+ // see parts section is broken, an inside param is missing
+ private static final String BROKEN_SPEC_PARTS = "{\n" +
+ " \"ping\": {" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/\"," +
+ " \"methods\": [\"HEAD\"]," +
+ " \"url\": {" +
+ " \"path\": \"/\"," +
+ " \"paths\": [\"/\"]," +
+ " \"parts\": {" +
+ " \"type\" : \"boolean\",\n" +
+ " }," +
+ " \"params\": {\n" +
+ " \"ignore_unavailable\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " } \n" +
+ " }," +
+ " \"body\": null" +
+ " }" +
+ "}";
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
new file mode 100644
index 0000000000..5558041505
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestApiParser;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class RestApiParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseRestSpecIndexApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_INDEX_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("index"));
+ assertThat(restApi.getMethods().size(), equalTo(2));
+ assertThat(restApi.getMethods().get(0), equalTo("POST"));
+ assertThat(restApi.getMethods().get(1), equalTo("PUT"));
+ assertThat(restApi.getPaths().size(), equalTo(2));
+ assertThat(restApi.getPaths().get(0), equalTo("/{index}/{type}"));
+ assertThat(restApi.getPaths().get(1), equalTo("/{index}/{type}/{id}"));
+ assertThat(restApi.getPathParts().size(), equalTo(3));
+ assertThat(restApi.getPathParts().get(0), equalTo("id"));
+ assertThat(restApi.getPathParts().get(1), equalTo("index"));
+ assertThat(restApi.getPathParts().get(2), equalTo("type"));
+ assertThat(restApi.getParams().size(), equalTo(4));
+ assertThat(restApi.getParams(), contains("consistency", "op_type", "parent", "refresh"));
+ assertThat(restApi.isBodySupported(), equalTo(true));
+ assertThat(restApi.isBodyRequired(), equalTo(true));
+ }
+
+ @Test
+ public void testParseRestSpecGetTemplateApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_GET_TEMPLATE_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("indices.get_template"));
+ assertThat(restApi.getMethods().size(), equalTo(1));
+ assertThat(restApi.getMethods().get(0), equalTo("GET"));
+ assertThat(restApi.getPaths().size(), equalTo(2));
+ assertThat(restApi.getPaths().get(0), equalTo("/_template"));
+ assertThat(restApi.getPaths().get(1), equalTo("/_template/{name}"));
+ assertThat(restApi.getPathParts().size(), equalTo(1));
+ assertThat(restApi.getPathParts().get(0), equalTo("name"));
+ assertThat(restApi.getParams().size(), equalTo(0));
+ assertThat(restApi.isBodySupported(), equalTo(false));
+ assertThat(restApi.isBodyRequired(), equalTo(false));
+ }
+
+ @Test
+ public void testParseRestSpecCountApi() throws Exception {
+ parser = JsonXContent.jsonXContent.createParser(REST_SPEC_COUNT_API);
+ RestApi restApi = new RestApiParser().parse(parser);
+ assertThat(restApi, notNullValue());
+ assertThat(restApi.getName(), equalTo("count"));
+ assertThat(restApi.getMethods().size(), equalTo(2));
+ assertThat(restApi.getMethods().get(0), equalTo("POST"));
+ assertThat(restApi.getMethods().get(1), equalTo("GET"));
+ assertThat(restApi.getPaths().size(), equalTo(3));
+ assertThat(restApi.getPaths().get(0), equalTo("/_count"));
+ assertThat(restApi.getPaths().get(1), equalTo("/{index}/_count"));
+ assertThat(restApi.getPaths().get(2), equalTo("/{index}/{type}/_count"));
+ assertThat(restApi.getPathParts().size(), equalTo(2));
+ assertThat(restApi.getPathParts().get(0), equalTo("index"));
+ assertThat(restApi.getPathParts().get(1), equalTo("type"));
+ assertThat(restApi.getParams().size(), equalTo(1));
+ assertThat(restApi.getParams().get(0), equalTo("ignore_unavailable"));
+ assertThat(restApi.isBodySupported(), equalTo(true));
+ assertThat(restApi.isBodyRequired(), equalTo(false));
+ }
+
+ private static final String REST_SPEC_COUNT_API = "{\n" +
+ " \"count\": {\n" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-count.html\",\n" +
+ " \"methods\": [\"POST\", \"GET\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/_count\",\n" +
+ " \"paths\": [\"/_count\", \"/{index}/_count\", \"/{index}/{type}/_count\"],\n" +
+ " \"parts\": {\n" +
+ " \"index\": {\n" +
+ " \"type\" : \"list\",\n" +
+ " \"description\" : \"A comma-separated list of indices to restrict the results\"\n" +
+ " },\n" +
+ " \"type\": {\n" +
+ " \"type\" : \"list\",\n" +
+ " \"description\" : \"A comma-separated list of types to restrict the results\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"params\": {\n" +
+ " \"ignore_unavailable\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Whether specified concrete indices should be ignored when unavailable (missing or closed)\"\n" +
+ " } \n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": {\n" +
+ " \"description\" : \"A query to restrict the results specified with the Query DSL (optional)\"\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+
+ private static final String REST_SPEC_GET_TEMPLATE_API = "{\n" +
+ " \"indices.get_template\": {\n" +
+ " \"documentation\": \"http://www.elasticsearch.org/guide/reference/api/admin-indices-templates/\",\n" +
+ " \"methods\": [\"GET\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/_template/{name}\",\n" +
+ " \"paths\": [\"/_template\", \"/_template/{name}\"],\n" +
+ " \"parts\": {\n" +
+ " \"name\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : false,\n" +
+ " \"description\" : \"The name of the template\"\n" +
+ " }\n" +
+ " },\n" +
+ " \"params\": {\n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": null\n" +
+ " }\n" +
+ "}";
+
+ private static final String REST_SPEC_INDEX_API = "{\n" +
+ " \"index\": {\n" +
+ " \"documentation\": \"http://elasticsearch.org/guide/reference/api/index_/\",\n" +
+ " \"methods\": [\"POST\", \"PUT\"],\n" +
+ " \"url\": {\n" +
+ " \"path\": \"/{index}/{type}\",\n" +
+ " \"paths\": [\"/{index}/{type}\", \"/{index}/{type}/{id}\"],\n" +
+ " \"parts\": {\n" +
+ " \"id\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"description\" : \"Document ID\"\n" +
+ " },\n" +
+ " \"index\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : true,\n" +
+ " \"description\" : \"The name of the index\"\n" +
+ " },\n" +
+ " \"type\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"required\" : true,\n" +
+ " \"description\" : \"The type of the document\"\n" +
+ " }\n" +
+ " } ,\n" +
+ " \"params\": {\n" +
+ " \"consistency\": {\n" +
+ " \"type\" : \"enum\",\n" +
+ " \"options\" : [\"one\", \"quorum\", \"all\"],\n" +
+ " \"description\" : \"Explicit write consistency setting for the operation\"\n" +
+ " },\n" +
+ " \"op_type\": {\n" +
+ " \"type\" : \"enum\",\n" +
+ " \"options\" : [\"index\", \"create\"],\n" +
+ " \"default\" : \"index\",\n" +
+ " \"description\" : \"Explicit operation type\"\n" +
+ " },\n" +
+ " \"parent\": {\n" +
+ " \"type\" : \"string\",\n" +
+ " \"description\" : \"ID of the parent document\"\n" +
+ " },\n" +
+ " \"refresh\": {\n" +
+ " \"type\" : \"boolean\",\n" +
+ " \"description\" : \"Refresh the index after performing the operation\"\n" +
+ " }\n" +
+ " }\n" +
+ " },\n" +
+ " \"body\": {\n" +
+ " \"description\" : \"The document\",\n" +
+ " \"required\" : true\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
new file mode 100644
index 0000000000..34b2e9462f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.junit.After;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class RestTestParserTests extends ElasticsearchTestCase {
+
+ private XContentParser parser;
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ //makes sure that we consumed the whole stream, XContentParser doesn't expose isClosed method
+ //next token can be null even in the middle of the document (e.g. with "---"), but not too many consecutive times
+ assertThat(parser.currentToken(), nullValue());
+ assertThat(parser.nextToken(), nullValue());
+ assertThat(parser.nextToken(), nullValue());
+ parser.close();
+ }
+
+ @Test
+ public void testParseTestSetupAndSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "setup:\n" +
+ " - do:\n" +
+ " indices.create:\n" +
+ " index: test_index\n" +
+ "\n" +
+ "---\n" +
+ "\"Get index mapping\":\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ "\n" +
+ " - match: {test_index.test_type.properties.text.type: string}\n" +
+ " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" +
+ "\n" +
+ "---\n" +
+ "\"Get type mapping - pre 1.0\":\n" +
+ "\n" +
+ " - skip:\n" +
+ " version: \"0.90.9 - \"\n" +
+ " reason: \"for newer versions the index name is always returned\"\n" +
+ "\n" +
+ " - do:\n" +
+ " indices.get_mapping:\n" +
+ " index: test_index\n" +
+ " type: test_type\n" +
+ "\n" +
+ " - match: {test_type.properties.text.type: string}\n" +
+ " - match: {test_type.properties.text.analyzer: whitespace}\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+ assertThat(restTestSuite.getSetupSection(), notNullValue());
+ assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getSetupSection().getDoSections().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getApi(), equalTo("indices.create"));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(restTestSuite.getSetupSection().getDoSections().get(0).getApiCallSection().getParams().get("index"), equalTo("test_index"));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(Version.V_0_90_9));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0);
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index"));
+ assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(1);
+ assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string"));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.analyzer"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace"));
+ }
+
+ @Test
+ public void testParseTestSingleTestSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Index with ID\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test-weird-index-中文\n" +
+ " type: weird.type\n" +
+ " id: 1\n" +
+ " body: { foo: bar }\n" +
+ "\n" +
+ " - is_true: ok\n" +
+ " - match: { _index: test-weird-index-中文 }\n" +
+ " - match: { _type: weird.type }\n" +
+ " - match: { _id: \"1\"}\n" +
+ " - match: { _version: 1}\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test-weird-index-中文\n" +
+ " type: weird.type\n" +
+ " id: 1\n" +
+ "\n" +
+ " - match: { _index: test-weird-index-中文 }\n" +
+ " - match: { _type: weird.type }\n" +
+ " - match: { _id: \"1\"}\n" +
+ " - match: { _version: 1}\n" +
+ " - match: { _source: { foo: bar }}"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+
+ assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(1));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(12));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(IsTrueAssertion.class));
+ IsTrueAssertion trueAssertion = (IsTrueAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(trueAssertion.getField(), equalTo("ok"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class));
+ MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(3), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(3);
+ assertThat(matchAssertion.getField(), equalTo("_type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4);
+ assertThat(matchAssertion.getField(), equalTo("_id"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(5);
+ assertThat(matchAssertion.getField(), equalTo("_version"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(6);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8);
+ assertThat(matchAssertion.getField(), equalTo("_type"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9);
+ assertThat(matchAssertion.getField(), equalTo("_id"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(10), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(10);
+ assertThat(matchAssertion.getField(), equalTo("_version"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1"));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(11), instanceOf(MatchAssertion.class));
+ matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(11);
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ assertThat(((Map) matchAssertion.getExpectedValue()).get("foo").toString(), equalTo("bar"));
+ }
+
+ @Test
+ public void testParseTestMultipleTestSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Missing document (partial doc)\":\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ "\n" +
+ " - do:\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ " ignore: 404\n" +
+ "\n" +
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n" +
+ "\n" +
+ " - do:\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " ignore: 404\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ RestTestSuite restTestSuite = testParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(restTestSuite, notNullValue());
+ assertThat(restTestSuite.getName(), equalTo("suite"));
+
+ assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Missing document (partial doc)"));
+ assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(2));
+
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class));
+ DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+
+ assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Missing document (script)"));
+ assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(2));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class));
+ assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class));
+ doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("update"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseTestDuplicateTestSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { doc: { foo: bar } }\n" +
+ "\n" +
+ "---\n" +
+ "\"Missing document (script)\":\n" +
+ "\n" +
+ "\n" +
+ " - do:\n" +
+ " catch: missing\n" +
+ " update:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body:\n" +
+ " script: \"ctx._source.foo = bar\"\n" +
+ " params: { bar: 'xxx' }\n" +
+ "\n"
+ );
+
+ RestTestSuiteParser testParser = new RestTestSuiteParser();
+ testParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
new file mode 100644
index 0000000000..7e2724a167
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SetSectionParser;
+import org.elasticsearch.test.rest.section.SetSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SetSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSetSectionSingleValue() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: id }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ SetSection setSection = setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(setSection, notNullValue());
+ assertThat(setSection.getStash(), notNullValue());
+ assertThat(setSection.getStash().size(), equalTo(1));
+ assertThat(setSection.getStash().get("_id"), equalTo("id"));
+ }
+
+ @Test
+ public void testParseSetSectionMultipleValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ _id: id, _type: type, _index: index }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ SetSection setSection = setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(setSection, notNullValue());
+ assertThat(setSection.getStash(), notNullValue());
+ assertThat(setSection.getStash().size(), equalTo(3));
+ assertThat(setSection.getStash().get("_id"), equalTo("id"));
+ assertThat(setSection.getStash().get("_type"), equalTo("type"));
+ assertThat(setSection.getStash().get("_index"), equalTo("index"));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSetSectionNoValues() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "{ }"
+ );
+
+ SetSectionParser setSectionParser = new SetSectionParser();
+
+ setSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
new file mode 100644
index 0000000000..d2427e0bb4
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SetupSectionParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+public class SetupSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSetupSection() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(true));
+ assertThat(setupSection.getDoSections().size(), equalTo(2));
+ assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
+ assertThat(setupSection.getDoSections().get(1).getApiCallSection().getApi(), equalTo("index2"));
+ }
+
+ @Test
+ public void testParseSetupAndSkipSectionNoSkip() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do:\n" +
+ " index1:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 1\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n" +
+ " - do:\n" +
+ " index2:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 2\n" +
+ " body: { \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }\n"
+ );
+
+ SetupSectionParser setupSectionParser = new SetupSectionParser();
+ SetupSection setupSection = setupSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(setupSection, notNullValue());
+ assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false));
+ assertThat(setupSection.getSkipSection(), notNullValue());
+ assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_0_90_0));
+ assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_0_90_7));
+ assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(setupSection.getDoSections().size(), equalTo(2));
+ assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1"));
+ assertThat(setupSection.getDoSections().get(1).getApiCallSection().getApi(), equalTo("index2"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
new file mode 100644
index 0000000000..1e71ae9181
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.parser.SkipSectionParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.*;
+
+public class SkipSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseSkipSectionVersionNoFeature() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \" - 0.90.2\"\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion()));
+ assertThat(skipSection.getUpperVersion(), equalTo(Version.V_0_90_2));
+ assertThat(skipSection.getFeatures().size(), equalTo(0));
+ assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param"));
+ }
+
+ public void testParseSkipSectionAllVersions() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \" all \"\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion()));
+ assertThat(skipSection.getUpperVersion(), equalTo(Version.CURRENT));
+ assertThat(skipSection.getFeatures().size(), equalTo(0));
+ assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param"));
+ }
+
+ @Test
+ public void testParseSkipSectionFeatureNoVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "features: regex"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.isVersionCheck(), equalTo(false));
+ assertThat(skipSection.getFeatures().size(), equalTo(1));
+ assertThat(skipSection.getFeatures().get(0), equalTo("regex"));
+ assertThat(skipSection.getReason(), nullValue());
+ }
+
+ @Test
+ public void testParseSkipSectionFeaturesNoVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "features: [regex1,regex2,regex3]"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(skipSection, notNullValue());
+ assertThat(skipSection.isVersionCheck(), equalTo(false));
+ assertThat(skipSection.getFeatures().size(), equalTo(3));
+ assertThat(skipSection.getFeatures().get(0), equalTo("regex1"));
+ assertThat(skipSection.getFeatures().get(1), equalTo("regex2"));
+ assertThat(skipSection.getFeatures().get(2), equalTo("regex3"));
+ assertThat(skipSection.getReason(), nullValue());
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionBothFeatureAndVersion() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \" - 0.90.2\"\n" +
+ "features: regex\n" +
+ "reason: Delete ignores the parent param"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionNoReason() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "version: \" - 0.90.2\"\n"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ }
+
+ @Test(expected = RestTestParseException.class)
+ public void testParseSkipSectionNoVersionNorFeature() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "reason: Delete ignores the parent param\n"
+ );
+
+ SkipSectionParser skipSectionParser = new SkipSectionParser();
+ skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java b/core/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
new file mode 100644
index 0000000000..374ade56c9
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
@@ -0,0 +1,247 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.parser.RestTestSectionParser;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParseContext;
+import org.elasticsearch.test.rest.section.*;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.*;
+
+public class TestSectionParserTests extends AbstractParserTests {
+
+ @Test
+ public void testParseTestSectionWithDoSection() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"First test section\": \n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(1));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exception {
+ String yaml =
+ "\"First test section\": \n" +
+ " - skip:\n" +
+ " version: \"0.90.0 - 0.90.7\"\n" +
+ " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" +
+ " - do :\n" +
+ " catch: missing\n" +
+ " indices.get_warmer:\n" +
+ " index: test_index\n" +
+ " name: test_warmer\n" +
+ " - set: {_scroll_id: scroll_id}";
+
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ parser = YamlXContent.yamlXContent.createParser(yaml);
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("First test section"));
+ assertThat(testSection.getSkipSection(), notNullValue());
+ assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_0_90_0));
+ assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.V_0_90_7));
+ assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259"));
+ assertThat(testSection.getExecutableSections().size(), equalTo(2));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), equalTo("missing"));
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ SetSection setSection = (SetSection) testSection.getExecutableSections().get(1);
+ assertThat(setSection.getStash().size(), equalTo(1));
+ assertThat(setSection.getStash().get("_scroll_id"), equalTo("scroll_id"));
+ }
+
+ @Test
+ public void testParseTestSectionWithMultipleDoSections() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"Basic\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ " body: { \"foo\": \"Hello: 中文\" }\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("Basic"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(2));
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+ doSection = (DoSection)testSection.getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+ }
+
+ @Test
+ public void testParseTestSectionWithDoSectionsAndAssertions() throws Exception {
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"Basic\":\n" +
+ "\n" +
+ " - do:\n" +
+ " index:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ " body: { \"foo\": \"Hello: 中文\" }\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " type: test\n" +
+ " id: 中文\n" +
+ "\n" +
+ " - match: { _index: test_1 }\n" +
+ " - is_true: _source\n" +
+ " - match: { _source: { foo: \"Hello: 中文\" } }\n" +
+ "\n" +
+ " - do:\n" +
+ " get:\n" +
+ " index: test_1\n" +
+ " id: 中文\n" +
+ "\n" +
+ " - length: { _index: 6 }\n" +
+ " - is_false: whatever\n" +
+ " - gt: { size: 5 }\n" +
+ " - lt: { size: 10 }"
+ );
+
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("Basic"));
+ assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY));
+ assertThat(testSection.getExecutableSections().size(), equalTo(10));
+
+ DoSection doSection = (DoSection)testSection.getExecutableSections().get(0);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("index"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(true));
+
+ doSection = (DoSection)testSection.getExecutableSections().get(1);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+
+ MatchAssertion matchAssertion = (MatchAssertion)testSection.getExecutableSections().get(2);
+ assertThat(matchAssertion.getField(), equalTo("_index"));
+ assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test_1"));
+
+ IsTrueAssertion trueAssertion = (IsTrueAssertion)testSection.getExecutableSections().get(3);
+ assertThat(trueAssertion.getField(), equalTo("_source"));
+
+ matchAssertion = (MatchAssertion)testSection.getExecutableSections().get(4);
+ assertThat(matchAssertion.getField(), equalTo("_source"));
+ assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class));
+ Map map = (Map) matchAssertion.getExpectedValue();
+ assertThat(map.size(), equalTo(1));
+ assertThat(map.get("foo").toString(), equalTo("Hello: 中文"));
+
+ doSection = (DoSection)testSection.getExecutableSections().get(5);
+ assertThat(doSection.getCatch(), nullValue());
+ assertThat(doSection.getApiCallSection(), notNullValue());
+ assertThat(doSection.getApiCallSection().getApi(), equalTo("get"));
+ assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
+ assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
+
+ LengthAssertion lengthAssertion = (LengthAssertion) testSection.getExecutableSections().get(6);
+ assertThat(lengthAssertion.getField(), equalTo("_index"));
+ assertThat(lengthAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(6));
+
+ IsFalseAssertion falseAssertion = (IsFalseAssertion)testSection.getExecutableSections().get(7);
+ assertThat(falseAssertion.getField(), equalTo("whatever"));
+
+ GreaterThanAssertion greaterThanAssertion = (GreaterThanAssertion) testSection.getExecutableSections().get(8);
+ assertThat(greaterThanAssertion.getField(), equalTo("size"));
+ assertThat(greaterThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) greaterThanAssertion.getExpectedValue(), equalTo(5));
+
+ LessThanAssertion lessThanAssertion = (LessThanAssertion) testSection.getExecutableSections().get(9);
+ assertThat(lessThanAssertion.getField(), equalTo("size"));
+ assertThat(lessThanAssertion.getExpectedValue(), instanceOf(Integer.class));
+ assertThat((Integer) lessThanAssertion.getExpectedValue(), equalTo(10));
+ }
+
+ @Test
+ public void testSmallSection() throws Exception {
+
+ parser = YamlXContent.yamlXContent.createParser(
+ "\"node_info test\":\n" +
+ " - do:\n" +
+ " cluster.node_info: {}\n" +
+ " \n" +
+ " - is_true: nodes\n" +
+ " - is_true: cluster_name\n");
+ RestTestSectionParser testSectionParser = new RestTestSectionParser();
+ TestSection testSection = testSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
+ assertThat(testSection, notNullValue());
+ assertThat(testSection.getName(), equalTo("node_info test"));
+ assertThat(testSection.getExecutableSections().size(), equalTo(3));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java b/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java
new file mode 100644
index 0000000000..684a66f1b0
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.search;
+
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.IndicesWarmer;
+import org.elasticsearch.indices.cache.query.IndicesQueryCache;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.search.dfs.DfsPhase;
+import org.elasticsearch.search.fetch.FetchPhase;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.query.QueryPhase;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class MockSearchService extends SearchService {
+
+ private static final Map<SearchContext, Throwable> ACTIVE_SEARCH_CONTEXTS = new ConcurrentHashMap<>();
+
+ /** Throw an {@link AssertionError} if there are still in-flight contexts. */
+ public static void assertNoInFLightContext() {
+ final Map<SearchContext, Throwable> copy = new HashMap<>(ACTIVE_SEARCH_CONTEXTS);
+ if (copy.isEmpty() == false) {
+ throw new AssertionError("There are still " + copy.size() + " in-flight contexts", copy.values().iterator().next());
+ }
+ }
+
+ @Inject
+ public MockSearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer,
+ ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
+ DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesQueryCache indicesQueryCache) {
+ super(settings, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase,
+ queryPhase, fetchPhase, indicesQueryCache);
+ }
+
+ @Override
+ protected void putContext(SearchContext context) {
+ super.putContext(context);
+ ACTIVE_SEARCH_CONTEXTS.put(context, new RuntimeException());
+ }
+
+ @Override
+ protected SearchContext removeContext(long id) {
+ final SearchContext removed = super.removeContext(id);
+ if (removed != null) {
+ ACTIVE_SEARCH_CONTEXTS.remove(removed);
+ }
+ return removed;
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/search/MockSearchServiceModule.java b/core/src/test/java/org/elasticsearch/test/search/MockSearchServiceModule.java
new file mode 100644
index 0000000000..896c403bd1
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/search/MockSearchServiceModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.search;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.search.SearchService;
+
+public class MockSearchServiceModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(SearchService.class).to(MockSearchService.class).asEagerSingleton();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java
new file mode 100644
index 0000000000..036d98cc18
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -0,0 +1,305 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Charsets;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestRuleMarkFailure;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.store.FsDirectoryService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.IndexStoreModule;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.indices.IndicesLifecycle;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Assert;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.reflect.Field;
+import java.nio.file.Path;
+import java.util.*;
+
+public class MockFSDirectoryService extends FsDirectoryService {
+
+ public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close";
+ public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open";
+ public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write";
+ public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file";
+ public static final String CRASH_INDEX = "index.store.mock.random.crash_index";
+
+ private static final EnumSet<IndexShardState> validCheckIndexStates = EnumSet.of(
+ IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY
+ );
+
+ private final FsDirectoryService delegateService;
+ private final boolean checkIndexOnClose;
+ private final Random random;
+ private final double randomIOExceptionRate;
+ private final double randomIOExceptionRateOnOpen;
+ private final MockDirectoryWrapper.Throttling throttle;
+ private final Settings indexSettings;
+ private final boolean preventDoubleWrite;
+ private final boolean noDeleteOpenFile;
+ private final boolean crashIndex;
+
+ @Inject
+ public MockFSDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, final IndicesService service, final ShardPath path) {
+ super(indexSettings, indexStore, path);
+ final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l);
+ this.random = new Random(seed);
+ checkIndexOnClose = indexSettings.getAsBoolean(CHECK_INDEX_ON_CLOSE, true);
+ randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d);
+ randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d);
+ preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW
+ noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW
+ random.nextInt(shardId.getId() + 1); // some randomness per shard
+ throttle = MockDirectoryWrapper.Throttling.NEVER;
+ crashIndex = indexSettings.getAsBoolean(CRASH_INDEX, true);
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed),
+ throttle, crashIndex);
+ }
+ this.indexSettings = indexSettings;
+ delegateService = randomDirectorService(indexStore, path);
+ if (checkIndexOnClose) {
+ final IndicesLifecycle.Listener listener = new IndicesLifecycle.Listener() {
+
+ boolean canRun = false;
+
+ @Override
+ public void beforeIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard,
+ @IndexSettings Settings indexSettings) {
+ if (indexShard != null && shardId.equals(sid)) {
+ if (validCheckIndexStates.contains(indexShard.state()) && IndexMetaData.isOnSharedFilesystem(indexSettings) == false) {
+ canRun = true;
+ }
+ }
+ }
+
+ @Override
+ public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard,
+ @IndexSettings Settings indexSettings) {
+ if (shardId.equals(sid) && indexShard != null && canRun) {
+ assert indexShard.state() == IndexShardState.CLOSED : "Current state must be closed";
+ checkIndex(indexShard.store(), sid);
+ }
+ service.indicesLifecycle().removeListener(this);
+ }
+ };
+ service.indicesLifecycle().addListener(listener);
+ }
+ }
+
+
+ @Override
+ public Directory newDirectory() throws IOException {
+ return wrap(delegateService.newDirectory());
+ }
+
+ @Override
+ protected synchronized Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ public void checkIndex(Store store, ShardId shardId) throws IndexShardException {
+ if (store.tryIncRef()) {
+ logger.info("start check index");
+ try {
+ Directory dir = store.directory();
+ if (!Lucene.indexExists(dir)) {
+ return;
+ }
+ if (IndexWriter.isLocked(dir)) {
+ ElasticsearchTestCase.checkIndexFailed = true;
+ throw new IllegalStateException("IndexWriter is still open on shard " + shardId);
+ }
+ try (CheckIndex checkIndex = new CheckIndex(dir)) {
+ BytesStreamOutput os = new BytesStreamOutput();
+ PrintStream out = new PrintStream(os, false, Charsets.UTF_8.name());
+ checkIndex.setInfoStream(out);
+ out.flush();
+ CheckIndex.Status status = checkIndex.checkIndex();
+ if (!status.clean) {
+ ElasticsearchTestCase.checkIndexFailed = true;
+ logger.warn("check index [failure] index files={}\n{}",
+ Arrays.toString(dir.listAll()),
+ new String(os.bytes().toBytes(), Charsets.UTF_8));
+ throw new IndexShardException(shardId, "index check failure");
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8));
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to check index", e);
+ } finally {
+ logger.info("end check index");
+ store.decRef();
+ }
+ }
+ }
+
+ @Override
+ public void onPause(long nanos) {
+ delegateService.onPause(nanos);
+ }
+
+ @Override
+ public StoreRateLimiting rateLimiting() {
+ return delegateService.rateLimiting();
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return delegateService.throttleTimeInNanos();
+ }
+
+ public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate";
+
+ private Directory wrap(Directory dir) {
+ final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex);
+ w.setRandomIOExceptionRate(randomIOExceptionRate);
+ w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen);
+ w.setThrottling(throttle);
+ w.setCheckIndexOnClose(false); // we do this on the index level
+ w.setPreventDoubleWrite(preventDoubleWrite);
+ // TODO: make this test robust to virus scanner
+ w.setEnableVirusScanner(false);
+ w.setNoDeleteOpenFile(noDeleteOpenFile);
+ w.setUseSlowOpenClosers(false);
+ LuceneTestCase.closeAfterSuite(new CloseableDirectory(w));
+ return w;
+ }
+
+ private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) {
+ Settings.Builder builder = Settings.settingsBuilder();
+ builder.put(indexSettings);
+ builder.put(IndexStoreModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexStoreModule.Type.values()));
+ return new FsDirectoryService(builder.build(), indexStore, path);
+ }
+
+ public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper {
+
+ private final boolean crash;
+ private final Set<String> superUnSyncedFiles;
+ private final Random superRandomState;
+
+ public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, boolean crash) {
+ super(random, delegate);
+ this.crash = crash;
+
+ // TODO: remove all this and cutover to MockFS (DisableFsyncFS) instead
+ try {
+ Field field = MockDirectoryWrapper.class.getDeclaredField("unSyncedFiles");
+ field.setAccessible(true);
+ superUnSyncedFiles = (Set<String>) field.get(this);
+
+ field = MockDirectoryWrapper.class.getDeclaredField("randomState");
+ field.setAccessible(true);
+ superRandomState = (Random) field.get(this);
+ } catch (ReflectiveOperationException roe) {
+ throw new RuntimeException(roe);
+ }
+ }
+
+ /**
+ * Returns true if {@link #in} must sync its files.
+ * Currently, only {@link org.apache.lucene.store.NRTCachingDirectory} requires sync'ing its files
+ * because otherwise they are cached in an internal {@link org.apache.lucene.store.RAMDirectory}. If
+ * other directories require that too, they should be added to this method.
+ */
+ private boolean mustSync() {
+ Directory delegate = in;
+ while (delegate instanceof FilterDirectory) {
+ if (delegate instanceof NRTCachingDirectory) {
+ return true;
+ }
+ delegate = ((FilterDirectory) delegate).getDelegate();
+ }
+ return delegate instanceof NRTCachingDirectory;
+ }
+
+ @Override
+ public synchronized void sync(Collection<String> names) throws IOException {
+ // don't wear out our hardware so much in tests.
+ if (superRandomState.nextInt(100) == 0 || mustSync()) {
+ super.sync(names);
+ } else {
+ superUnSyncedFiles.removeAll(names);
+ }
+ }
+
+ @Override
+ public synchronized void crash() throws IOException {
+ if (crash) {
+ super.crash();
+ }
+ }
+ }
+
+ final class CloseableDirectory implements Closeable {
+ private final BaseDirectoryWrapper dir;
+ private final TestRuleMarkFailure failureMarker;
+
+ public CloseableDirectory(BaseDirectoryWrapper dir) {
+ this.dir = dir;
+ try {
+ final Field suiteFailureMarker = LuceneTestCase.class.getDeclaredField("suiteFailureMarker");
+ suiteFailureMarker.setAccessible(true);
+ this.failureMarker = (TestRuleMarkFailure) suiteFailureMarker.get(LuceneTestCase.class);
+ } catch (Throwable e) {
+ throw new ElasticsearchException("foo", e);
+ }
+ }
+
+ @Override
+ public void close() {
+ // We only attempt to check open/closed state if there were no other test
+ // failures.
+ try {
+ if (failureMarker.wasSuccessful() && dir.isOpen()) {
+ Assert.fail("Directory not closed: " + dir);
+ }
+ } finally {
+ // TODO: perform real close of the delegate: LUCENE-4058
+ // dir.close();
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java b/core/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java
new file mode 100644
index 0000000000..2020c9f271
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.settings.IndexSettings;
+import org.elasticsearch.index.settings.IndexSettingsService;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.indices.store.IndicesStore;
+
+public class MockFSIndexStore extends IndexStore {
+
+ @Inject
+ public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService,
+ IndicesStore indicesStore) {
+ super(index, indexSettings, indexSettingsService, indicesStore);
+ }
+
+ @Override
+ public Class<? extends DirectoryService> shardDirectory() {
+ return MockFSDirectoryService.class;
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java b/core/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java
new file mode 100644
index 0000000000..c4f9d2046a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/store/MockFSIndexStoreModule.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.index.store.IndexStore;
+
+public class MockFSIndexStoreModule extends AbstractModule {
+
+ @Override
+ protected void configure() {
+ bind(IndexStore.class).to(MockFSIndexStore.class).asEagerSingleton();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
new file mode 100644
index 0000000000..d7ffab9311
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.test;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.SettingsSource;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasEntry;
+
+/**
+ * Basic test that ensure that the internal cluster reproduces the same
+ * configuration given the same seed / input.
+ */
+@LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet
+public class InternalTestClusterTests extends ElasticsearchTestCase {
+
+ public void testInitializiationIsConsistent() {
+ long clusterSeed = randomLong();
+ int minNumDataNodes = randomIntBetween(0, 9);
+ int maxNumDataNodes = randomIntBetween(minNumDataNodes, 10);
+ String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
+ SettingsSource settingsSource = SettingsSource.EMPTY;
+ int numClientNodes = randomIntBetween(0, 10);
+ boolean enableHttpPipelining = randomBoolean();
+ String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
+
+ Path baseDir = createTempDir();
+ InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix);
+ InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix);
+ assertClusters(cluster0, cluster1, true);
+
+ }
+
+ public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean assertClusterName) {
+ Settings defaultSettings0 = cluster0.getDefaultSettings();
+ Settings defaultSettings1 = cluster1.getDefaultSettings();
+ assertSettings(defaultSettings0, defaultSettings1, assertClusterName);
+ assertThat(cluster0.numDataNodes(), equalTo(cluster1.numDataNodes()));
+ if (assertClusterName) {
+ assertThat(cluster0.getClusterName(), equalTo(cluster1.getClusterName()));
+ }
+ }
+
+ public static void assertSettings(Settings left, Settings right, boolean compareClusterName) {
+ ImmutableSet<Map.Entry<String, String>> entries0 = left.getAsMap().entrySet();
+ Map<String, String> entries1 = right.getAsMap();
+ assertThat(entries0.size(), equalTo(entries1.size()));
+ for (Map.Entry<String, String> entry : entries0) {
+ if(entry.getKey().equals(ClusterName.SETTING) && compareClusterName == false) {
+ continue;
+ }
+ assertThat(entries1, hasEntry(entry.getKey(), entry.getValue()));
+ }
+ }
+
+ public void testBeforeTest() throws IOException {
+ long clusterSeed = randomLong();
+ int minNumDataNodes = randomIntBetween(0, 3);
+ int maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
+ final String clusterName1 = "shared1";//clusterName("shared1", clusterSeed);
+ final String clusterName2 = "shared2";//clusterName("shared", Integer.toString(CHILD_JVM_ID), clusterSeed);
+ /*while (clusterName.equals(clusterName1)) {
+ clusterName1 = clusterName("shared", Integer.toString(CHILD_JVM_ID), clusterSeed); // spin until the time changes
+ }*/
+ SettingsSource settingsSource = SettingsSource.EMPTY;
+ int numClientNodes = randomIntBetween(0, 2);
+ boolean enableHttpPipelining = randomBoolean();
+ int jvmOrdinal = randomIntBetween(0, 10);
+ String nodePrefix = "foobar";
+
+ Path baseDir = createTempDir();
+ InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix);
+ InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, settingsSource, numClientNodes, enableHttpPipelining, nodePrefix);
+
+ assertClusters(cluster0, cluster1, false);
+ long seed = randomLong();
+ try {
+ {
+ Random random = new Random(seed);
+ cluster0.beforeTest(random, random.nextDouble());
+ }
+ {
+ Random random = new Random(seed);
+ cluster1.beforeTest(random, random.nextDouble());
+ }
+ assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames());
+ Iterator<Client> iterator1 = cluster1.iterator();
+ for (Client client : cluster0) {
+ assertTrue(iterator1.hasNext());
+ Client other = iterator1.next();
+ assertSettings(client.settings(), other.settings(), false);
+ }
+ assertArrayEquals(cluster0.getNodeNames(), cluster1.getNodeNames());
+ cluster0.afterTest();
+ cluster1.afterTest();
+ } finally {
+
+ IOUtils.close(cluster0, cluster1);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/core/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
new file mode 100644
index 0000000000..70de553c76
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.test;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+import org.junit.Test;
+import org.junit.runner.Description;
+import org.junit.runner.Result;
+
+import java.lang.reflect.Method;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.nullValue;
+
+public class LoggingListenerTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testCustomLevelPerMethod() throws Exception {
+
+ LoggingListener loggingListener = new LoggingListener();
+
+ Description suiteDescription = Description.createSuiteDescription(TestClass.class);
+
+ ESLogger abcLogger = Loggers.getLogger("abc");
+ ESLogger xyzLogger = Loggers.getLogger("xyz");
+
+ assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), nullValue());
+ loggingListener.testRunStarted(suiteDescription);
+ assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(abcLogger.getLevel(), nullValue());
+
+ Method method = TestClass.class.getDeclaredMethod("annotatedTestMethod");
+ TestLogging annotation = method.getAnnotation(TestLogging.class);
+ Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
+ loggingListener.testStarted(testDescription);
+ assertThat(xyzLogger.getLevel(), equalTo("TRACE"));
+ assertThat(abcLogger.getLevel(), nullValue());
+
+ loggingListener.testFinished(testDescription);
+ assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(abcLogger.getLevel(), nullValue());
+
+ loggingListener.testRunFinished(new Result());
+ assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(abcLogger.getLevel(), nullValue());
+ }
+
+ @Test
+ public void testCustomLevelPerClass() throws Exception {
+
+ LoggingListener loggingListener = new LoggingListener();
+
+ Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class);
+
+ ESLogger abcLogger = Loggers.getLogger("abc");
+ ESLogger xyzLogger = Loggers.getLogger("xyz");
+
+ assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(abcLogger.getLevel(), nullValue());
+ loggingListener.testRunStarted(suiteDescription);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), nullValue());
+
+ Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "test");
+ loggingListener.testStarted(testDescription);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), nullValue());
+
+ loggingListener.testFinished(testDescription);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), nullValue());
+
+ loggingListener.testRunFinished(new Result());
+ assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), nullValue());
+ }
+
+ @Test
+ public void testCustomLevelPerClassAndPerMethod() throws Exception {
+
+ LoggingListener loggingListener = new LoggingListener();
+
+ Description suiteDescription = Description.createSuiteDescription(AnnotatedTestClass.class);
+
+ ESLogger abcLogger = Loggers.getLogger("abc");
+ ESLogger xyzLogger = Loggers.getLogger("xyz");
+
+ assertThat(xyzLogger.getLevel(), nullValue());
+ assertThat(abcLogger.getLevel(), nullValue());
+ loggingListener.testRunStarted(suiteDescription);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), nullValue());
+
+ Method method = TestClass.class.getDeclaredMethod("annotatedTestMethod");
+ TestLogging annotation = method.getAnnotation(TestLogging.class);
+ Description testDescription = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod", annotation);
+ loggingListener.testStarted(testDescription);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), equalTo("TRACE"));
+
+ loggingListener.testFinished(testDescription);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), nullValue());
+
+ Method method2 = TestClass.class.getDeclaredMethod("annotatedTestMethod2");
+ TestLogging annotation2 = method2.getAnnotation(TestLogging.class);
+ Description testDescription2 = Description.createTestDescription(LoggingListenerTests.class, "annotatedTestMethod2", annotation2);
+ loggingListener.testStarted(testDescription2);
+ assertThat(abcLogger.getLevel(), equalTo("TRACE"));
+ assertThat(xyzLogger.getLevel(), equalTo("DEBUG"));
+
+ loggingListener.testFinished(testDescription2);
+ assertThat(abcLogger.getLevel(), equalTo("ERROR"));
+ assertThat(xyzLogger.getLevel(), nullValue());
+
+ loggingListener.testRunFinished(new Result());
+ assertThat(abcLogger.getLevel(), nullValue());
+ assertThat(xyzLogger.getLevel(), nullValue());
+ }
+
+ @TestLogging("abc:ERROR")
+ public static class AnnotatedTestClass {
+ //dummy class used to create a junit suite description that has the @TestLogging annotation
+ }
+
+ public static class TestClass {
+ //dummy class used to create a junit suite description that doesn't have the @TestLogging annotation, but its test methods have it
+
+ @SuppressWarnings("unused")
+ @TestLogging("xyz:TRACE")
+ public void annotatedTestMethod() {}
+
+ @SuppressWarnings("unused")
+ @TestLogging("abc:TRACE,xyz:DEBUG")
+ public void annotatedTestMethod2() {}
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterTests.java b/core/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterTests.java
new file mode 100644
index 0000000000..bb6e4e976e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterTests.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.test;
+
+import com.carrotsearch.randomizedtesting.annotations.Repeat;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * This test ensures that the cluster initializion for suite scope is not influencing
+ * the tests random sequence due to initializtion using the same random instance.
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE)
+public class SuiteScopeClusterTests extends ElasticsearchIntegrationTest {
+ private static int ITER = 0;
+ private static long[] SEQUENCE = new long[100];
+ private static Long CLUSTER_SEED = null;
+
+ @Test
+ @Repeat(iterations = 10, useConstantSeed = true)
+ public void testReproducible() throws IOException {
+ if (ITER++ == 0) {
+ CLUSTER_SEED = cluster().seed();
+ for (int i = 0; i < SEQUENCE.length; i++) {
+ SEQUENCE[i] = randomLong();
+ }
+ } else {
+ assertEquals(CLUSTER_SEED, new Long(cluster().seed()));
+ for (int i = 0; i < SEQUENCE.length; i++) {
+ assertThat(SEQUENCE[i], equalTo(randomLong()));
+ }
+ }
+ }
+
+ @Override
+ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
+ // produce some randomness
+ int iters = between(1, 100);
+ for (int i = 0; i < iters; i++) {
+ randomLong();
+ }
+ return super.buildTestCluster(scope, seed);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/test/TestScopeClusterTests.java b/core/src/test/java/org/elasticsearch/test/test/TestScopeClusterTests.java
new file mode 100644
index 0000000000..310be735bd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/test/TestScopeClusterTests.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.test;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.TestCluster;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * This test ensures that the cluster initializion for TEST scope is not influencing
+ * the tests random sequence due to initializtion using the same random instance.
+ */
+@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST)
+public class TestScopeClusterTests extends ElasticsearchIntegrationTest {
+ private static int ITER = 0;
+ private static long[] SEQUENCE = new long[100];
+ private static Long CLUSTER_SEED = null;
+
+ @Test
+ public void testReproducible() throws IOException {
+ if (ITER++ == 0) {
+ CLUSTER_SEED = cluster().seed();
+ for (int i = 0; i < SEQUENCE.length; i++) {
+ SEQUENCE[i] = randomLong();
+ }
+ } else {
+ assertEquals(CLUSTER_SEED, new Long(cluster().seed()));
+ for (int i = 0; i < SEQUENCE.length; i++) {
+ assertThat(SEQUENCE[i], equalTo(randomLong()));
+ }
+ }
+ }
+
+ @Override
+ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
+ // produce some randomness
+ int iters = between(1, 100);
+ for (int i = 0; i < iters; i++) {
+ randomLong();
+ }
+ return super.buildTestCluster(scope, seed);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java b/core/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
new file mode 100644
index 0000000000..e368a063b6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.VersionUtils;
+
+import java.util.List;
+
+public class VersionUtilsTests extends ElasticsearchTestCase {
+
+ public void testAllVersionsSorted() {
+ List<Version> allVersions = VersionUtils.allVersions();
+ for (int i = 0, j = 1; j < allVersions.size(); ++i, ++j) {
+ assertTrue(allVersions.get(i).before(allVersions.get(j)));
+ }
+ }
+
+ public void testRandomVersionBetween() {
+ // full range
+ Version got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), Version.CURRENT);
+ assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
+ assertTrue(got.onOrBefore(Version.CURRENT));
+ got = VersionUtils.randomVersionBetween(random(), null, Version.CURRENT);
+ assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
+ assertTrue(got.onOrBefore(Version.CURRENT));
+ got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), null);
+ assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
+ assertTrue(got.onOrBefore(Version.CURRENT));
+
+ // sub range
+ got = VersionUtils.randomVersionBetween(random(), Version.V_0_90_12, Version.V_1_4_5);
+ assertTrue(got.onOrAfter(Version.V_0_90_12));
+ assertTrue(got.onOrBefore(Version.V_1_4_5));
+
+ // unbounded lower
+ got = VersionUtils.randomVersionBetween(random(), null, Version.V_1_4_5);
+ assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
+ assertTrue(got.onOrBefore(Version.V_1_4_5));
+ got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allVersions().get(0));
+ assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
+ assertTrue(got.onOrBefore(VersionUtils.allVersions().get(0)));
+
+ // unbounded upper
+ got = VersionUtils.randomVersionBetween(random(), Version.V_0_90_12, null);
+ assertTrue(got.onOrAfter(Version.V_0_90_12));
+ assertTrue(got.onOrBefore(Version.CURRENT));
+ got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null);
+ assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion()));
+ assertTrue(got.onOrBefore(Version.CURRENT));
+
+ // range of one
+ got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getFirstVersion());
+ assertEquals(got, VersionUtils.getFirstVersion());
+ got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
+ assertEquals(got, Version.CURRENT);
+ got = VersionUtils.randomVersionBetween(random(), Version.V_1_2_4, Version.V_1_2_4);
+ assertEquals(got, Version.V_1_2_4);
+
+ // implicit range of one
+ got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());
+ assertEquals(got, VersionUtils.getFirstVersion());
+ got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, null);
+ assertEquals(got, Version.CURRENT);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/core/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
new file mode 100644
index 0000000000..f5dc92ba3d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.local.LocalTransport;
+
+import java.io.IOException;
+import java.util.Random;
+
+/**
+ *
+ */
+public class AssertingLocalTransport extends LocalTransport {
+
+ public static final String ASSERTING_TRANSPORT_MIN_VERSION_KEY = "transport.asserting.version.min";
+ public static final String ASSERTING_TRANSPORT_MAX_VERSION_KEY = "transport.asserting.version.max";
+ private final Random random;
+ private final Version minVersion;
+ private final Version maxVersion;
+
+ @Inject
+ public AssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version) {
+ super(settings, threadPool, version);
+ final long seed = settings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ minVersion = settings.getAsVersion(ASSERTING_TRANSPORT_MIN_VERSION_KEY, Version.V_0_18_0);
+ maxVersion = settings.getAsVersion(ASSERTING_TRANSPORT_MAX_VERSION_KEY, Version.CURRENT);
+ }
+
+ @Override
+ protected void handleParsedResponse(final TransportResponse response, final TransportResponseHandler handler) {
+ ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersionBetween(random, minVersion, maxVersion), response);
+ super.handleParsedResponse(response, handler);
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersionBetween(random, minVersion, maxVersion), request);
+ super.sendRequest(node, requestId, action, request, options);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java b/core/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java
new file mode 100644
index 0000000000..8cb1f620c3
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+
+/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */
+public class CapturingTransport implements Transport {
+ private TransportServiceAdapter adapter;
+
+ static public class CapturedRequest {
+ final public DiscoveryNode node;
+ final public long requestId;
+ final public String action;
+ final public TransportRequest request;
+
+ public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) {
+ this.node = node;
+ this.requestId = requestId;
+ this.action = action;
+ this.request = request;
+ }
+ }
+
+ private BlockingQueue<CapturedRequest> capturedRequests = ConcurrentCollections.newBlockingQueue();
+
+ /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */
+ public CapturedRequest[] capturedRequests() {
+ return capturedRequests.toArray(new CapturedRequest[0]);
+ }
+
+ /**
+ * returns all requests captured so far, grouped by target node.
+ * Doesn't clear the captured request list. See {@link #clear()}
+ */
+ public Map<String, List<CapturedRequest>> capturedRequestsByTargetNode() {
+ Map<String, List<CapturedRequest>> map = new HashMap<>();
+ for (CapturedRequest request : capturedRequests) {
+ List<CapturedRequest> nodeList = map.get(request.node.id());
+ if (nodeList == null) {
+ nodeList = new ArrayList<>();
+ map.put(request.node.id(), nodeList);
+ }
+ nodeList.add(request);
+ }
+ return map;
+ }
+
+ /** clears captured requests */
+ public void clear() {
+ capturedRequests.clear();
+ }
+
+ /** simulate a response for the given requestId */
+ public void handleResponse(final long requestId, final TransportResponse response) {
+ adapter.onResponseReceived(requestId).handleResponse(response);
+ }
+
+ /** simulate a remote error for the given requesTId */
+ public void handleResponse(final long requestId, final Throwable t) {
+ adapter.onResponseReceived(requestId).handleException(new RemoteTransportException("remote failure", t));
+ }
+
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ capturedRequests.add(new CapturedRequest(node, requestId, action, request));
+ }
+
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter adapter) {
+ this.adapter = adapter;
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return null;
+ }
+
+ @Override
+ public Map<String, BoundTransportAddress> profileBoundAddresses() {
+ return null;
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address) throws Exception {
+ return new TransportAddress[0];
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return false;
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return true;
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+
+ }
+
+ @Override
+ public long serverOpen() {
+ return 0;
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return null;
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public Transport start() {
+ return null;
+ }
+
+ @Override
+ public Transport stop() {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/transport/MockTransportService.java b/core/src/test/java/org/elasticsearch/test/transport/MockTransportService.java
new file mode 100644
index 0000000000..2a566a1ff5
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/transport/MockTransportService.java
@@ -0,0 +1,497 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * A mock transport service that allows to simulate different network topology failures.
+ */
+public class MockTransportService extends TransportService {
+
+ private final Transport original;
+
+ @Inject
+ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
+ super(settings, new LookupTestTransport(transport), threadPool);
+ this.original = transport;
+
+ }
+
+ /**
+ * Clears all the registered rules.
+ */
+ public void clearAllRules() {
+ transport().transports.clear();
+ }
+
+ /**
+ * Clears the rule associated with the provided node.
+ */
+ public void clearRule(DiscoveryNode node) {
+ transport().transports.remove(node.getAddress());
+ }
+
+ /**
+ * Returns the original Transport service wrapped by this mock transport service.
+ */
+ public Transport original() {
+ return original;
+ }
+
+ /**
+ * Adds a rule that will cause every send request to fail, and each new connect since the rule
+ * is added to fail as well.
+ */
+ public void addFailToSendNoConnectRule(DiscoveryNode node) {
+ addDelegate(node, new DelegateTransport(original) {
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "DISCONNECT: simulated");
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "DISCONNECT: simulated");
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ throw new ConnectTransportException(node, "DISCONNECT: simulated");
+ }
+ });
+ }
+
+ /**
+ * Adds a rule that will cause matching operations to throw ConnectTransportExceptions
+ */
+ public void addFailToSendNoConnectRule(DiscoveryNode node, final String... blockedActions) {
+ addFailToSendNoConnectRule(node, new HashSet<>(Arrays.asList(blockedActions)));
+ }
+
+ /**
+ * Adds a rule that will cause matching operations to throw ConnectTransportExceptions
+ */
+ public void addFailToSendNoConnectRule(DiscoveryNode node, final Set<String> blockedActions) {
+
+ addDelegate(node, new DelegateTransport(original) {
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ original.connectToNode(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ original.connectToNodeLight(node);
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (blockedActions.contains(action)) {
+ logger.info("--> preventing {} request", action);
+ throw new ConnectTransportException(node, "DISCONNECT: prevented " + action + " request");
+ }
+ original.sendRequest(node, requestId, action, request, options);
+ }
+ });
+ }
+
+ /**
+ * Adds a rule that will cause ignores each send request, simulating an unresponsive node
+ * and failing to connect once the rule was added.
+ */
+ public void addUnresponsiveRule(DiscoveryNode node) {
+ addDelegate(node, new DelegateTransport(original) {
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ // don't send anything, the receiving node is unresponsive
+ }
+ });
+ }
+
+ /**
+ * Adds a rule that will cause ignores each send request, simulating an unresponsive node
+ * and failing to connect once the rule was added.
+ *
+ * @param duration the amount of time to delay sending and connecting.
+ */
+ public void addUnresponsiveRule(DiscoveryNode node, final TimeValue duration) {
+ final long startTime = System.currentTimeMillis();
+
+ addDelegate(node, new DelegateTransport(original) {
+
+ TimeValue getDelay() {
+ return new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime));
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ TimeValue delay = getDelay();
+ if (delay.millis() <= 0) {
+ original.connectToNode(node);
+ return;
+ }
+
+ // TODO: Replace with proper setting
+ TimeValue connectingTimeout = NetworkService.TcpSettings.TCP_DEFAULT_CONNECT_TIMEOUT;
+ try {
+ if (delay.millis() < connectingTimeout.millis()) {
+ Thread.sleep(delay.millis());
+ original.connectToNode(node);
+ } else {
+ Thread.sleep(connectingTimeout.millis());
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+ } catch (InterruptedException e) {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: interrupted while sleeping", e);
+ }
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ TimeValue delay = getDelay();
+ if (delay.millis() <= 0) {
+ original.connectToNodeLight(node);
+ return;
+ }
+
+ // TODO: Replace with proper setting
+ TimeValue connectingTimeout = NetworkService.TcpSettings.TCP_DEFAULT_CONNECT_TIMEOUT;
+ try {
+ if (delay.millis() < connectingTimeout.millis()) {
+ Thread.sleep(delay.millis());
+ original.connectToNodeLight(node);
+ } else {
+ Thread.sleep(connectingTimeout.millis());
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+ } catch (InterruptedException e) {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: interrupted while sleeping", e);
+ }
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, TransportRequest request, final TransportRequestOptions options) throws IOException, TransportException {
+ // delayed sending - even if larger then the request timeout to simulated a potential late response from target node
+
+ TimeValue delay = getDelay();
+ if (delay.millis() <= 0) {
+ original.sendRequest(node, requestId, action, request, options);
+ return;
+ }
+
+ // poor mans request cloning...
+ RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action);
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ request.writeTo(bStream);
+ final TransportRequest clonedRequest = reg.newRequest();
+ clonedRequest.readFrom(StreamInput.wrap(bStream.bytes()));
+
+ threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() {
+ @Override
+ public void onFailure(Throwable e) {
+ logger.debug("failed to send delayed request", e);
+ }
+
+ @Override
+ protected void doRun() throws IOException {
+ original.sendRequest(node, requestId, action, clonedRequest, options);
+ }
+ });
+ }
+ });
+ }
+
+ /**
+ * Adds a new delegate transport that is used for communication with the given node.
+ *
+ * @return <tt>true</tt> iff no other delegate was registered for this node before, otherwise <tt>false</tt>
+ */
+ public boolean addDelegate(DiscoveryNode node, DelegateTransport transport) {
+ return transport().transports.put(node.getAddress(), transport) == null;
+ }
+
+ private LookupTestTransport transport() {
+ return (LookupTestTransport) transport;
+ }
+
+ /**
+ * A lookup transport that has a list of potential Transport implementations to delegate to for node operations,
+ * if none is registered, then the default one is used.
+ */
+ private static class LookupTestTransport extends DelegateTransport {
+
+ final ConcurrentMap<TransportAddress, Transport> transports = ConcurrentCollections.newConcurrentMap();
+
+ LookupTestTransport(Transport transport) {
+ super(transport);
+ }
+
+ private Transport getTransport(DiscoveryNode node) {
+ Transport transport = transports.get(node.getAddress());
+ if (transport != null) {
+ return transport;
+ }
+ return this.transport;
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return getTransport(node).nodeConnected(node);
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ getTransport(node).connectToNode(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ getTransport(node).connectToNodeLight(node);
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+ getTransport(node).disconnectFromNode(node);
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ getTransport(node).sendRequest(node, requestId, action, request, options);
+ }
+ }
+
+ /**
+ * A pure delegate transport.
+ * Can be extracted to a common class if needed in other places in the codebase.
+ */
+ public static class DelegateTransport implements Transport {
+
+ protected final Transport transport;
+
+
+ public DelegateTransport(Transport transport) {
+ this.transport = transport;
+ }
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter service) {
+ transport.transportServiceAdapter(service);
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return transport.boundAddress();
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address) throws Exception {
+ return transport.addressesFromString(address);
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return transport.addressSupported(address);
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return transport.nodeConnected(node);
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ transport.connectToNode(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ transport.connectToNodeLight(node);
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+ transport.disconnectFromNode(node);
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ transport.sendRequest(node, requestId, action, request, options);
+ }
+
+ @Override
+ public long serverOpen() {
+ return transport.serverOpen();
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return transport.lifecycleState();
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+ transport.addLifecycleListener(listener);
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+ transport.removeLifecycleListener(listener);
+ }
+
+ @Override
+ public Transport start() {
+ transport.start();
+ return this;
+ }
+
+ @Override
+ public Transport stop() {
+ transport.stop();
+ return this;
+ }
+
+ @Override
+ public void close() {
+ transport.close();
+ }
+
+ @Override
+ public Map<String, BoundTransportAddress> profileBoundAddresses() {
+ return transport.profileBoundAddresses();
+ }
+ }
+
+
+ List<Tracer> activeTracers = new CopyOnWriteArrayList<>();
+
+ public static class Tracer {
+ public void receivedRequest(long requestId, String action) {
+ }
+
+ public void responseSent(long requestId, String action) {
+ }
+
+ public void responseSent(long requestId, String action, Throwable t) {
+ }
+
+ public void receivedResponse(long requestId, DiscoveryNode sourceNode, String action) {
+ }
+
+ public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
+ }
+ }
+
+ public void addTracer(Tracer tracer) {
+ activeTracers.add(tracer);
+ }
+
+ public boolean removeTracer(Tracer tracer) {
+ return activeTracers.remove(tracer);
+ }
+
+ public void clearTracers() {
+ activeTracers.clear();
+ }
+
+ @Override
+ protected Adapter createAdapter() {
+ return new MockAdapter();
+ }
+
+ class MockAdapter extends Adapter {
+
+ @Override
+ protected boolean traceEnabled() {
+ return super.traceEnabled() || activeTracers.isEmpty() == false;
+ }
+
+ @Override
+ protected void traceReceivedRequest(long requestId, String action) {
+ super.traceReceivedRequest(requestId, action);
+ for (Tracer tracer : activeTracers) {
+ tracer.receivedRequest(requestId, action);
+ }
+ }
+
+ @Override
+ protected void traceResponseSent(long requestId, String action) {
+ super.traceResponseSent(requestId, action);
+ for (Tracer tracer : activeTracers) {
+ tracer.responseSent(requestId, action);
+ }
+ }
+
+ @Override
+ protected void traceResponseSent(long requestId, String action, Throwable t) {
+ super.traceResponseSent(requestId, action, t);
+ for (Tracer tracer : activeTracers) {
+ tracer.responseSent(requestId, action, t);
+ }
+ }
+
+ @Override
+ protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) {
+ super.traceReceivedResponse(requestId, sourceNode, action);
+ for (Tracer tracer : activeTracers) {
+ tracer.receivedResponse(requestId, sourceNode, action);
+ }
+ }
+
+ @Override
+ protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
+ super.traceRequestSent(node, requestId, action, options);
+ for (Tracer tracer : activeTracers) {
+ tracer.requestSent(node, requestId, action, options);
+ }
+ }
+ }
+
+
+}
diff --git a/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java
new file mode 100644
index 0000000000..2e4ef0fe20
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/threadpool/SimpleThreadPoolTests.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import com.google.common.collect.Sets;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.common.network.MulticastChannel;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchSingleNodeTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.hamcrest.RegexMatcher;
+import org.elasticsearch.threadpool.ThreadPool.Names;
+import org.elasticsearch.tribe.TribeTests;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.*;
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0)
+public class SimpleThreadPoolTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("threadpool.search.type", "cached").build();
+ }
+
+ @Test
+ public void verifyThreadNames() throws Exception {
+
+ ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
+ Set<String> preNodeStartThreadNames = Sets.newHashSet();
+ for (long l : threadBean.getAllThreadIds()) {
+ ThreadInfo threadInfo = threadBean.getThreadInfo(l);
+ if (threadInfo != null) {
+ preNodeStartThreadNames.add(threadInfo.getThreadName());
+ }
+ }
+ logger.info("pre node threads are {}", preNodeStartThreadNames);
+ String node = internalCluster().startNode();
+ logger.info("do some indexing, flushing, optimize, and searches");
+ int numDocs = randomIntBetween(2, 100);
+ IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; ++i) {
+ builders[i] = client().prepareIndex("idx", "type").setSource(jsonBuilder()
+ .startObject()
+ .field("str_value", "s" + i)
+ .field("str_values", new String[]{"s" + (i * 2), "s" + (i * 2 + 1)})
+ .field("l_value", i)
+ .field("l_values", new int[]{i * 2, i * 2 + 1})
+ .field("d_value", i)
+ .field("d_values", new double[]{i * 2, i * 2 + 1})
+ .endObject());
+ }
+ indexRandom(true, builders);
+ int numSearches = randomIntBetween(2, 100);
+ for (int i = 0; i < numSearches; i++) {
+ assertNoFailures(client().prepareSearch("idx").setQuery(QueryBuilders.termQuery("str_value", "s" + i)).get());
+ assertNoFailures(client().prepareSearch("idx").setQuery(QueryBuilders.termQuery("l_value", i)).get());
+ }
+ Set<String> threadNames = Sets.newHashSet();
+ for (long l : threadBean.getAllThreadIds()) {
+ ThreadInfo threadInfo = threadBean.getThreadInfo(l);
+ if (threadInfo != null) {
+ threadNames.add(threadInfo.getThreadName());
+ }
+ }
+ logger.info("post node threads are {}", threadNames);
+ threadNames.removeAll(preNodeStartThreadNames);
+ logger.info("post node *new* threads are {}", threadNames);
+ for (String threadName : threadNames) {
+ // ignore some shared threads we know that are created within the same VM, like the shared discovery one
+ // or the ones that are occasionally come up from ElasticsearchSingleNodeTest
+ if (threadName.contains("[" + MulticastChannel.SHARED_CHANNEL_NAME + "]")
+ || threadName.contains("[" + ElasticsearchSingleNodeTest.nodeName() + "]")
+ || threadName.contains("Keep-Alive-Timer")) {
+ continue;
+ }
+ String nodePrefix = "(" + Pattern.quote(InternalTestCluster.TRANSPORT_CLIENT_PREFIX) + ")?(" +
+ Pattern.quote(ElasticsearchIntegrationTest.SUITE_CLUSTER_NODE_PREFIX) + "|" +
+ Pattern.quote(ElasticsearchIntegrationTest.TEST_CLUSTER_NODE_PREFIX) + "|" +
+ Pattern.quote(TribeTests.SECOND_CLUSTER_NODE_PREFIX) + ")";
+ assertThat(threadName, RegexMatcher.matches("\\[" + nodePrefix + "\\d+\\]"));
+ }
+ }
+
+ @Test(timeout = 20000)
+ public void testUpdatingThreadPoolSettings() throws Exception {
+ internalCluster().startNodesAsync(2).get();
+ ThreadPool threadPool = internalCluster().getDataNodeInstance(ThreadPool.class);
+ // Check that settings are changed
+ assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(5L));
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.keep_alive", "10m").build()).execute().actionGet();
+ assertThat(((ThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Make sure that threads continue executing when executor is replaced
+ final CyclicBarrier barrier = new CyclicBarrier(2);
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ } catch (BrokenBarrierException ex) {
+ //
+ }
+ }
+ });
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
+ assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
+ barrier.await();
+
+ // Make sure that new thread executor is functional
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ barrier.await();
+ } catch (InterruptedException ex) {
+ Thread.currentThread().interrupt();
+ } catch (BrokenBarrierException ex) {
+ //
+ }
+ }
+ });
+ client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put("threadpool.search.type", "fixed").build()).execute().actionGet();
+ barrier.await();
+ Thread.sleep(200);
+
+ // Check that node info is correct
+ NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().all().execute().actionGet();
+ for (int i = 0; i < 2; i++) {
+ NodeInfo nodeInfo = nodesInfoResponse.getNodes()[i];
+ boolean found = false;
+ for (ThreadPool.Info info : nodeInfo.getThreadPool()) {
+ if (info.getName().equals(Names.SEARCH)) {
+ assertThat(info.getType(), equalTo("fixed"));
+ found = true;
+ break;
+ }
+ }
+ assertThat(found, equalTo(true));
+
+ Map<String, Object> poolMap = getPoolSettingsThroughJson(nodeInfo.getThreadPool(), Names.SEARCH);
+ }
+ }
+
+ @Test
+ public void testThreadPoolLeakingThreadsWithTribeNode() {
+ Settings settings = Settings.builder()
+ .put("node.name", "thread_pool_leaking_threads_tribe_node")
+ .put("path.home", createTempDir())
+ .put("tribe.t1.cluster.name", "non_existing_cluster")
+ //trigger initialization failure of one of the tribes (doesn't require starting the node)
+ .put("tribe.t1.plugin.mandatory", "non_existing").build();
+
+ try {
+ NodeBuilder.nodeBuilder().settings(settings).build();
+ fail("The node startup is supposed to fail");
+ } catch(Throwable t) {
+ //all good
+ assertThat(t.getMessage(), containsString("mandatory plugins [non_existing]"));
+ }
+ }
+
+ private Map<String, Object> getPoolSettingsThroughJson(ThreadPoolInfo info, String poolName) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ info.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+ builder.close();
+ XContentParser parser = JsonXContent.jsonXContent.createParser(builder.string());
+ Map<String, Object> poolsMap = parser.mapAndClose();
+ return (Map<String, Object>) ((Map<String, Object>) poolsMap.get("thread_pool")).get(poolName);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java
new file mode 100644
index 0000000000..1984406306
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolSerializationTests.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.threadpool;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.SizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ThreadPoolSerializationTests extends ElasticsearchTestCase {
+
+ BytesStreamOutput output = new BytesStreamOutput();
+
+ @Test
+ public void testThatQueueSizeSerializationWorks() throws Exception {
+ ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("10k"));
+ output.setVersion(Version.CURRENT);
+ info.writeTo(output);
+
+ StreamInput input = StreamInput.wrap(output.bytes());
+ ThreadPool.Info newInfo = new ThreadPool.Info();
+ newInfo.readFrom(input);
+
+ assertThat(newInfo.getQueueSize().singles(), is(10000l));
+ }
+
+ @Test
+ public void testThatNegativeQueueSizesCanBeSerialized() throws Exception {
+ ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), null);
+ output.setVersion(Version.CURRENT);
+ info.writeTo(output);
+
+ StreamInput input = StreamInput.wrap(output.bytes());
+ ThreadPool.Info newInfo = new ThreadPool.Info();
+ newInfo.readFrom(input);
+
+ assertThat(newInfo.getQueueSize(), is(nullValue()));
+ }
+
+ @Test
+ public void testThatToXContentWritesOutUnboundedCorrectly() throws Exception {
+ ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), null);
+ XContentBuilder builder = jsonBuilder();
+ builder.startObject();
+ info.toXContent(builder, ToXContent.EMPTY_PARAMS);
+ builder.endObject();
+
+ BytesReference bytesReference = builder.bytes();
+ XContentParser parser = XContentFactory.xContent(bytesReference).createParser(bytesReference);
+ Map<String, Object> map = parser.mapAndClose();
+ assertThat(map, hasKey("foo"));
+ map = (Map<String, Object>) map.get("foo");
+ assertThat(map, hasKey("queue_size"));
+ assertThat(map.get("queue_size").toString(), is("-1"));
+ }
+
+ @Test
+ public void testThatNegativeSettingAllowsToStart() throws InterruptedException {
+ Settings settings = settingsBuilder().put("name", "index").put("threadpool.index.queue_size", "-1").build();
+ ThreadPool threadPool = new ThreadPool(settings);
+ assertThat(threadPool.info("index").getQueueSize(), is(nullValue()));
+ terminate(threadPool);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
new file mode 100644
index 0000000000..27a379e5de
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java
@@ -0,0 +1,300 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.threadpool;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool.Names;
+import org.junit.Test;
+
+import java.lang.reflect.Field;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.*;
+
+/**
+ */
+public class UpdateThreadPoolSettingsTests extends ElasticsearchTestCase {
+
+ private ThreadPool.Info info(ThreadPool threadPool, String name) {
+ for (ThreadPool.Info info : threadPool.info()) {
+ if (info.getName().equals(name)) {
+ return info;
+ }
+ }
+ return null;
+ }
+
+ @Test
+ public void testCachedExecutorType() throws InterruptedException {
+ ThreadPool threadPool = new ThreadPool(
+ Settings.settingsBuilder()
+ .put("threadpool.search.type", "cached")
+ .put("name","testCachedExecutorType").build());
+
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Replace with different type
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "same").build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("same"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(MoreExecutors.directExecutor().getClass()));
+
+ // Replace with different type again
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(1));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Put old type back
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "cached").build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ // Make sure keep alive value reused
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Change keep alive
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ // Set the same keep alive
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.keep_alive", "1m").build());
+ // Make sure keep alive value didn't change
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(1L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("cached"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+ terminate(threadPool);
+ }
+
+ @Test
+ public void testFixedExecutorType() throws InterruptedException {
+ ThreadPool threadPool = new ThreadPool(settingsBuilder()
+ .put("threadpool.search.type", "fixed")
+ .put("name","testCachedExecutorType").build());
+
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Replace with different type
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .put("threadpool.search.min", "2")
+ .put("threadpool.search.size", "15")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+
+ // Put old type back
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "fixed")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
+ // Make sure keep alive value is not used
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive(), nullValue());
+ // Make sure keep pool size value were reused
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(15));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+
+ // Change size
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.size", "10").build());
+ // Make sure size values changed
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(10));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(10));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(10));
+ // Make sure executor didn't change
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("fixed"));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ // Change queue capacity
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.queue", "500")
+ .build());
+
+ terminate(threadPool);
+ }
+
+
+ @Test
+ public void testScalingExecutorType() throws InterruptedException {
+ ThreadPool threadPool = new ThreadPool(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.size", 10)
+ .put("name","testCachedExecutorType").build());
+
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(1));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(10));
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(5L));
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+
+ // Change settings that doesn't require pool replacement
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.updateSettings(settingsBuilder()
+ .put("threadpool.search.type", "scaling")
+ .put("threadpool.search.keep_alive", "10m")
+ .put("threadpool.search.min", "2")
+ .put("threadpool.search.size", "15")
+ .build());
+ assertThat(info(threadPool, Names.SEARCH).getType(), equalTo("scaling"));
+ assertThat(threadPool.executor(Names.SEARCH), instanceOf(EsThreadPoolExecutor.class));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getCorePoolSize(), equalTo(2));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getMaximumPoolSize(), equalTo(15));
+ assertThat(info(threadPool, Names.SEARCH).getMin(), equalTo(2));
+ assertThat(info(threadPool, Names.SEARCH).getMax(), equalTo(15));
+ // Make sure keep alive value changed
+ assertThat(info(threadPool, Names.SEARCH).getKeepAlive().minutes(), equalTo(10L));
+ assertThat(((EsThreadPoolExecutor) threadPool.executor(Names.SEARCH)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(10L));
+ assertThat(threadPool.executor(Names.SEARCH), sameInstance(oldExecutor));
+
+ terminate(threadPool);
+ }
+
+ @Test(timeout = 10000)
+ public void testShutdownDownNowDoesntBlock() throws Exception {
+ ThreadPool threadPool = new ThreadPool(Settings.settingsBuilder()
+ .put("threadpool.search.type", "cached")
+ .put("name","testCachedExecutorType").build());
+
+ final CountDownLatch latch = new CountDownLatch(1);
+ Executor oldExecutor = threadPool.executor(Names.SEARCH);
+ threadPool.executor(Names.SEARCH).execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Thread.sleep(20000);
+ } catch (InterruptedException ex) {
+ latch.countDown();
+ Thread.currentThread().interrupt();
+ }
+ }
+ });
+ threadPool.updateSettings(settingsBuilder().put("threadpool.search.type", "fixed").build());
+ assertThat(threadPool.executor(Names.SEARCH), not(sameInstance(oldExecutor)));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isShutdown(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminating(), equalTo(true));
+ assertThat(((ThreadPoolExecutor) oldExecutor).isTerminated(), equalTo(false));
+ threadPool.shutdownNow(); // interrupt the thread
+ latch.await();
+ terminate(threadPool);
+ }
+
+ @Test
+ public void testCustomThreadPool() throws Exception {
+ ThreadPool threadPool = new ThreadPool(Settings.settingsBuilder()
+ .put("threadpool.my_pool1.type", "cached")
+ .put("threadpool.my_pool2.type", "fixed")
+ .put("threadpool.my_pool2.size", "1")
+ .put("threadpool.my_pool2.queue_size", "1")
+ .put("name", "testCustomThreadPool").build());
+
+ ThreadPoolInfo groups = threadPool.info();
+ boolean foundPool1 = false;
+ boolean foundPool2 = false;
+ outer: for (ThreadPool.Info info : groups) {
+ if ("my_pool1".equals(info.getName())) {
+ foundPool1 = true;
+ assertThat(info.getType(), equalTo("cached"));
+ } else if ("my_pool2".equals(info.getName())) {
+ foundPool2 = true;
+ assertThat(info.getType(), equalTo("fixed"));
+ assertThat(info.getMin(), equalTo(1));
+ assertThat(info.getMax(), equalTo(1));
+ assertThat(info.getQueueSize().singles(), equalTo(1l));
+ } else {
+ for (Field field : Names.class.getFields()) {
+ if (info.getName().equalsIgnoreCase(field.getName())) {
+ // This is ok it is a default thread pool
+ continue outer;
+ }
+ }
+ fail("Unexpected pool name: " + info.getName());
+ }
+ }
+ assertThat(foundPool1, is(true));
+ assertThat(foundPool2, is(true));
+
+ // Updating my_pool2
+ Settings settings = Settings.builder()
+ .put("threadpool.my_pool2.size", "10")
+ .build();
+ threadPool.updateSettings(settings);
+
+ groups = threadPool.info();
+ foundPool1 = false;
+ foundPool2 = false;
+ outer: for (ThreadPool.Info info : groups) {
+ if ("my_pool1".equals(info.getName())) {
+ foundPool1 = true;
+ assertThat(info.getType(), equalTo("cached"));
+ } else if ("my_pool2".equals(info.getName())) {
+ foundPool2 = true;
+ assertThat(info.getMax(), equalTo(10));
+ assertThat(info.getMin(), equalTo(10));
+ assertThat(info.getQueueSize().singles(), equalTo(1l));
+ assertThat(info.getType(), equalTo("fixed"));
+ } else {
+ for (Field field : Names.class.getFields()) {
+ if (info.getName().equalsIgnoreCase(field.getName())) {
+ // This is ok it is a default thread pool
+ continue outer;
+ }
+ }
+ fail("Unexpected pool name: " + info.getName());
+ }
+ }
+ assertThat(foundPool1, is(true));
+ assertThat(foundPool2, is(true));
+ terminate(threadPool);
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java b/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java
new file mode 100644
index 0000000000..8f10547096
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/timestamp/SimpleTimestampTests.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.timestamp;
+
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Locale;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ */
+public class SimpleTimestampTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testSimpleTimestamp() throws Exception {
+
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("_timestamp").field("enabled", true).field("store", "yes").endObject().endObject().endObject())
+ .execute().actionGet();
+ client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
+
+ logger.info("--> check with automatic timestamp");
+ long now1 = System.currentTimeMillis();
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setRefresh(true).execute().actionGet();
+ long now2 = System.currentTimeMillis();
+
+ // we check both realtime get and non realtime get
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, greaterThanOrEqualTo(now1));
+ assertThat(timestamp, lessThanOrEqualTo(now2));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(true).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ // non realtime get (stored)
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, greaterThanOrEqualTo(now1));
+ assertThat(timestamp, lessThanOrEqualTo(now2));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ logger.info("--> check with custom timestamp (numeric)");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("10").setRefresh(true).execute().actionGet();
+
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(10l));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+
+ logger.info("--> check with custom timestamp (string)");
+ client().prepareIndex("test", "type1", "1").setSource("field1", "value1").setTimestamp("1970-01-01T00:00:00.020").setRefresh(true).execute().actionGet();
+
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(20l));
+ // verify its the same timestamp when going the replica
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_timestamp").setRealtime(false).execute().actionGet();
+ assertThat(((Number) getResponse.getField("_timestamp").getValue()).longValue(), equalTo(timestamp));
+ }
+
+ @Test // issue 5053
+ public void testThatUpdatingMappingShouldNotRemoveTimestampConfiguration() throws Exception {
+ String index = "foo";
+ String type = "mytype";
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("_timestamp").field("enabled", true).endObject().endObject();
+ assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
+
+ // check mapping again
+ assertTimestampMappingEnabled(index, type, true);
+
+ // update some field in the mapping
+ XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "string").endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get();
+ assertAcked(putMappingResponse);
+
+ // make sure timestamp field is still in mapping
+ assertTimestampMappingEnabled(index, type, true);
+ }
+
+ @Test
+ public void testThatTimestampCanBeSwitchedOnAndOff() throws Exception {
+ String index = "foo";
+ String type = "mytype";
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("_timestamp").field("enabled", true).field("store", true).endObject().endObject();
+ assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
+
+ // check mapping again
+ assertTimestampMappingEnabled(index, type, true);
+
+ // update some field in the mapping
+ XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("_timestamp").field("enabled", false).field("store", true).endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get();
+ assertAcked(putMappingResponse);
+
+ // make sure timestamp field is still in mapping
+ assertTimestampMappingEnabled(index, type, false);
+ }
+
+ private void assertTimestampMappingEnabled(String index, String type, boolean enabled) {
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes(type).get();
+ MappingMetaData.Timestamp timestamp = getMappingsResponse.getMappings().get(index).get(type).timestamp();
+ assertThat(timestamp, is(notNullValue()));
+ String errMsg = String.format(Locale.ROOT, "Expected timestamp field mapping to be "+ (enabled ? "enabled" : "disabled") +" for %s/%s", index, type);
+ assertThat(errMsg, timestamp.enabled(), is(enabled));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java
new file mode 100644
index 0000000000..cf70f6ad2f
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java
@@ -0,0 +1,1220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.transport.TransportRequestOptions.options;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase {
+
+ protected ThreadPool threadPool;
+
+ protected static final Version version0 = Version.fromId(/*0*/99);
+ protected DiscoveryNode nodeA;
+ protected MockTransportService serviceA;
+
+ protected static final Version version1 = Version.fromId(199);
+ protected DiscoveryNode nodeB;
+ protected MockTransportService serviceB;
+
+ protected abstract MockTransportService build(Settings settings, Version version);
+
+ @Override
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+ threadPool = new ThreadPool(getClass().getName());
+ serviceA = build(
+ Settings.builder().put("name", "TS_A", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(),
+ version0
+ );
+ nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version0);
+ serviceB = build(
+ Settings.builder().put("name", "TS_B", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(),
+ version1
+ );
+ nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), version1);
+
+ // wait till all nodes are properly connected and the event has been sent, so tests in this class
+ // will not get this callback called on the connections done in this setup
+ final boolean useLocalNode = randomBoolean();
+ final CountDownLatch latch = new CountDownLatch(useLocalNode ? 2 : 4);
+ TransportConnectionListener waitForConnection = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ fail("disconnect should not be called " + node);
+ }
+ };
+ serviceA.addConnectionListener(waitForConnection);
+ serviceB.addConnectionListener(waitForConnection);
+
+ if (useLocalNode) {
+ logger.info("--> using local node optimization");
+ serviceA.setLocalNode(nodeA);
+ serviceB.setLocalNode(nodeB);
+ } else {
+ logger.info("--> actively connecting to local node");
+ serviceA.connectToNode(nodeA);
+ serviceB.connectToNode(nodeB);
+ }
+
+ serviceA.connectToNode(nodeB);
+ serviceB.connectToNode(nodeA);
+
+ assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ serviceA.removeConnectionListener(waitForConnection);
+ serviceB.removeConnectionListener(waitForConnection);
+ }
+
+ @Override
+ @After
+ public void tearDown() throws Exception {
+ super.tearDown();
+ serviceA.close();
+ serviceB.close();
+ terminate(threadPool);
+ }
+
+ @Test
+ public void testHelloWorld() {
+ serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testLocalNodeConnection() throws InterruptedException {
+ assertTrue("serviceA is not connected to nodeA", serviceA.nodeConnected(nodeA));
+ if (((TransportService) serviceA).getLocalNode() != null) {
+ // this should be a noop
+ serviceA.disconnectFromNode(nodeA);
+ }
+ final AtomicReference<Exception> exception = new AtomicReference<>();
+ serviceA.registerRequestHandler("localNode", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ try {
+ channel.sendResponse(new StringMessageResponse(request.message));
+ } catch (IOException e) {
+ exception.set(e);
+ }
+ }
+ });
+ final AtomicReference<String> responseString = new AtomicReference<>();
+ final CountDownLatch responseLatch = new CountDownLatch(1);
+ serviceA.sendRequest(nodeA, "localNode", new StringMessageRequest("test"), new TransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ responseString.set(response.message);
+ responseLatch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exception.set(exp);
+ responseLatch.countDown();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+ });
+ responseLatch.await();
+ assertNull(exception.get());
+ assertThat(responseString.get(), equalTo("test"));
+ }
+
+ @Test
+ public void testVoidMessageCompressed() {
+ serviceA.registerRequestHandler("sayHello", TransportRequest.Empty.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<TransportRequest.Empty>() {
+ @Override
+ public void messageReceived(TransportRequest.Empty request, TransportChannel channel) {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.options().withCompress(true));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<TransportResponse.Empty> res = serviceB.submitRequest(nodeA, "sayHello",
+ TransportRequest.Empty.INSTANCE, TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<TransportResponse.Empty>() {
+ @Override
+ public TransportResponse.Empty newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ TransportResponse.Empty message = res.get();
+ assertThat(message, notNullValue());
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testHelloWorldCompressed() {
+ serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message), TransportResponseOptions.options().withCompress(true));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withCompress(true), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello moshe", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.get();
+ assertThat("hello moshe", equalTo(message.message));
+ } catch (Exception e) {
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testErrorMessage() {
+ serviceA.registerRequestHandler("sayHelloException", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ assertThat("moshe", equalTo(request.message));
+ throw new RuntimeException("bad message !!!");
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloException",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ fail("got response instead of exception");
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat("bad message !!!", equalTo(exp.getCause().getMessage()));
+ }
+ });
+
+ try {
+ res.txGet();
+ fail("exception should be thrown");
+ } catch (Exception e) {
+ assertThat(e.getCause().getMessage(), equalTo("bad message !!!"));
+ }
+
+ serviceA.removeHandler("sayHelloException");
+ }
+
+ @Test
+ public void testDisconnectListener() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+ TransportConnectionListener disconnectListener = new TransportConnectionListener() {
+ @Override
+ public void onNodeConnected(DiscoveryNode node) {
+ fail("node connected should not be called, all connection have been done previously, node: " + node);
+ }
+
+ @Override
+ public void onNodeDisconnected(DiscoveryNode node) {
+ latch.countDown();
+ }
+ };
+ serviceA.addConnectionListener(disconnectListener);
+ serviceB.close();
+ assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true));
+ }
+
+ @Test
+ public void testNotifyOnShutdown() throws Exception {
+ final CountDownLatch latch2 = new CountDownLatch(1);
+
+ serviceA.registerRequestHandler("foobar", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ try {
+ latch2.await();
+ logger.info("Stop ServiceB now");
+ serviceB.stop();
+ } catch (Exception e) {
+ fail(e.getMessage());
+ }
+ }
+ });
+ TransportFuture<TransportResponse.Empty> foobar = serviceB.submitRequest(nodeA, "foobar",
+ new StringMessageRequest(""), options(), EmptyTransportResponseHandler.INSTANCE_SAME);
+ latch2.countDown();
+ try {
+ foobar.txGet();
+ fail("TransportException expected");
+ } catch (TransportException ex) {
+
+ }
+ serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
+ }
+
+ @Test
+ public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception {
+ serviceA.registerRequestHandler("sayHelloTimeoutNoResponse", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ assertThat("moshe", equalTo(request.message));
+ // don't send back a response
+// try {
+// channel.sendResponse(new StringMessage("hello " + request.message));
+// } catch (IOException e) {
+// e.printStackTrace();
+// assertThat(e.getMessage(), false, equalTo(true));
+// }
+ }
+ });
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloTimeoutNoResponse",
+ new StringMessageRequest("moshe"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ fail("got response instead of exception");
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.txGet();
+ fail("exception should be thrown");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+
+ serviceA.removeHandler("sayHelloTimeoutNoResponse");
+ }
+
+ @Test
+ public void testTimeoutSendExceptionWithDelayedResponse() throws Exception {
+ serviceA.registerRequestHandler("sayHelloTimeoutDelayedResponse", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) {
+ TimeValue sleep = TimeValue.parseTimeValue(request.message, null, "sleep");
+ try {
+ Thread.sleep(sleep.millis());
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ try {
+ channel.sendResponse(new StringMessageResponse("hello " + request.message));
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+ final CountDownLatch latch = new CountDownLatch(1);
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse",
+ new StringMessageRequest("300ms"), options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ latch.countDown();
+ fail("got response instead of exception");
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ latch.countDown();
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ StringMessageResponse message = res.txGet();
+ fail("exception should be thrown");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ latch.await();
+
+ for (int i = 0; i < 10; i++) {
+ final int counter = i;
+ // now, try and send another request, this times, with a short timeout
+ res = serviceB.submitRequest(nodeA, "sayHelloTimeoutDelayedResponse",
+ new StringMessageRequest(counter + "ms"), options().withTimeout(3000), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ assertThat("hello " + counter + "ms", equalTo(response.message));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail("got exception instead of a response for " + counter + ": " + exp.getDetailedMessage());
+ }
+ });
+
+ StringMessageResponse message = res.txGet();
+ assertThat(message.message, equalTo("hello " + counter + "ms"));
+ }
+
+ serviceA.removeHandler("sayHelloTimeoutDelayedResponse");
+ }
+
+
+ @Test
+ @TestLogging(value = "test. transport.tracer:TRACE")
+ public void testTracerLog() throws InterruptedException {
+ TransportRequestHandler handler = new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ channel.sendResponse(new StringMessageResponse(""));
+ }
+ };
+
+ TransportRequestHandler handlerWithError = new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ if (request.timeout() > 0) {
+ Thread.sleep(request.timeout);
+ }
+ channel.sendResponse(new RuntimeException(""));
+
+ }
+ };
+
+ final Semaphore requestCompleted = new Semaphore(0);
+ TransportResponseHandler noopResponseHandler = new BaseTransportResponseHandler<StringMessageResponse>() {
+
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ requestCompleted.release();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ requestCompleted.release();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ };
+
+ serviceA.registerRequestHandler("test", StringMessageRequest.class, ThreadPool.Names.SAME, handler);
+ serviceA.registerRequestHandler("testError", StringMessageRequest.class, ThreadPool.Names.SAME, handlerWithError);
+ serviceB.registerRequestHandler("test", StringMessageRequest.class, ThreadPool.Names.SAME, handler);
+ serviceB.registerRequestHandler("testError", StringMessageRequest.class, ThreadPool.Names.SAME, handlerWithError);
+
+ final Tracer tracer = new Tracer();
+ serviceA.addTracer(tracer);
+ serviceB.addTracer(tracer);
+
+ tracer.reset(4);
+ boolean timeout = randomBoolean();
+ TransportRequestOptions options = timeout ? new TransportRequestOptions().withTimeout(1) : TransportRequestOptions.EMPTY;
+ serviceA.sendRequest(nodeB, "test", new StringMessageRequest("", 10), options, noopResponseHandler);
+ requestCompleted.acquire();
+ tracer.expectedEvents.get().await();
+ assertThat("didn't see request sent", tracer.sawRequestSent, equalTo(true));
+ assertThat("didn't see request received", tracer.sawRequestReceived, equalTo(true));
+ assertThat("didn't see response sent", tracer.sawResponseSent, equalTo(true));
+ assertThat("didn't see response received", tracer.sawResponseReceived, equalTo(true));
+ assertThat("saw error sent", tracer.sawErrorSent, equalTo(false));
+
+ tracer.reset(4);
+ serviceA.sendRequest(nodeB, "testError", new StringMessageRequest(""), noopResponseHandler);
+ requestCompleted.acquire();
+ tracer.expectedEvents.get().await();
+ assertThat("didn't see request sent", tracer.sawRequestSent, equalTo(true));
+ assertThat("didn't see request received", tracer.sawRequestReceived, equalTo(true));
+ assertThat("saw response sent", tracer.sawResponseSent, equalTo(false));
+ assertThat("didn't see response received", tracer.sawResponseReceived, equalTo(true));
+ assertThat("didn't see error sent", tracer.sawErrorSent, equalTo(true));
+
+ String includeSettings;
+ String excludeSettings;
+ if (randomBoolean()) {
+ // sometimes leave include empty (default)
+ includeSettings = randomBoolean() ? "*" : "";
+ excludeSettings = "*Error";
+ } else {
+ includeSettings = "test";
+ excludeSettings = "DOESN'T_MATCH";
+ }
+
+ serviceA.applySettings(Settings.builder()
+ .put(TransportService.SETTING_TRACE_LOG_INCLUDE, includeSettings, TransportService.SETTING_TRACE_LOG_EXCLUDE, excludeSettings)
+ .build());
+
+ tracer.reset(4);
+ serviceA.sendRequest(nodeB, "test", new StringMessageRequest(""), noopResponseHandler);
+ requestCompleted.acquire();
+ tracer.expectedEvents.get().await();
+ assertThat("didn't see request sent", tracer.sawRequestSent, equalTo(true));
+ assertThat("didn't see request received", tracer.sawRequestReceived, equalTo(true));
+ assertThat("didn't see response sent", tracer.sawResponseSent, equalTo(true));
+ assertThat("didn't see response received", tracer.sawResponseReceived, equalTo(true));
+ assertThat("saw error sent", tracer.sawErrorSent, equalTo(false));
+
+ tracer.reset(2);
+ serviceA.sendRequest(nodeB, "testError", new StringMessageRequest(""), noopResponseHandler);
+ requestCompleted.acquire();
+ tracer.expectedEvents.get().await();
+ assertThat("saw request sent", tracer.sawRequestSent, equalTo(false));
+ assertThat("didn't see request received", tracer.sawRequestReceived, equalTo(true));
+ assertThat("saw response sent", tracer.sawResponseSent, equalTo(false));
+ assertThat("saw response received", tracer.sawResponseReceived, equalTo(false));
+ assertThat("didn't see error sent", tracer.sawErrorSent, equalTo(true));
+ }
+
+ private static class Tracer extends MockTransportService.Tracer {
+ public volatile boolean sawRequestSent;
+ public volatile boolean sawRequestReceived;
+ public volatile boolean sawResponseSent;
+ public volatile boolean sawErrorSent;
+ public volatile boolean sawResponseReceived;
+
+ public AtomicReference<CountDownLatch> expectedEvents = new AtomicReference<>();
+
+
+ @Override
+ public void receivedRequest(long requestId, String action) {
+ super.receivedRequest(requestId, action);
+ sawRequestReceived = true;
+ expectedEvents.get().countDown();
+ }
+
+ @Override
+ public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
+ super.requestSent(node, requestId, action, options);
+ sawRequestSent = true;
+ expectedEvents.get().countDown();
+ }
+
+ @Override
+ public void responseSent(long requestId, String action) {
+ super.responseSent(requestId, action);
+ sawResponseSent = true;
+ expectedEvents.get().countDown();
+ }
+
+ @Override
+ public void responseSent(long requestId, String action, Throwable t) {
+ super.responseSent(requestId, action, t);
+ sawErrorSent = true;
+ expectedEvents.get().countDown();
+ }
+
+ @Override
+ public void receivedResponse(long requestId, DiscoveryNode sourceNode, String action) {
+ super.receivedResponse(requestId, sourceNode, action);
+ sawResponseReceived = true;
+ expectedEvents.get().countDown();
+ }
+
+ public void reset(int expectedCount) {
+ sawRequestSent = false;
+ sawRequestReceived = false;
+ sawResponseSent = false;
+ sawErrorSent = false;
+ sawResponseReceived = false;
+ expectedEvents.set(new CountDownLatch(expectedCount));
+ }
+ }
+
+
+ static class StringMessageRequest extends TransportRequest {
+
+ private String message;
+ private long timeout;
+
+ StringMessageRequest(String message, long timeout) {
+ this.message = message;
+ this.timeout = timeout;
+ }
+
+ StringMessageRequest() {
+ }
+
+ public StringMessageRequest(String message) {
+ this(message, -1);
+ }
+
+ public long timeout() {
+ return timeout;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ message = in.readString();
+ timeout = in.readLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(message);
+ out.writeLong(timeout);
+ }
+ }
+
+ static class StringMessageResponse extends TransportResponse {
+
+ private String message;
+
+ StringMessageResponse(String message) {
+ this.message = message;
+ }
+
+ StringMessageResponse() {
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ message = in.readString();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeString(message);
+ }
+ }
+
+
+ static class Version0Request extends TransportRequest {
+
+ int value1;
+
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ value1 = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(value1);
+ }
+ }
+
+ static class Version1Request extends Version0Request {
+
+ int value2;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.getVersion().onOrAfter(version1)) {
+ value2 = in.readInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (out.getVersion().onOrAfter(version1)) {
+ out.writeInt(value2);
+ }
+ }
+ }
+
+ static class Version0Response extends TransportResponse {
+
+ int value1;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ value1 = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeInt(value1);
+ }
+ }
+
+ static class Version1Response extends Version0Response {
+
+ int value2;
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ if (in.getVersion().onOrAfter(version1)) {
+ value2 = in.readInt();
+ }
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ if (out.getVersion().onOrAfter(version1)) {
+ out.writeInt(value2);
+ }
+ }
+ }
+
+ @Test
+ public void testVersion_from0to1() throws Exception {
+ serviceB.registerRequestHandler("/version", Version1Request.class, ThreadPool.Names.SAME, new TransportRequestHandler<Version1Request>() {
+ @Override
+ public void messageReceived(Version1Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ assertThat(request.value2, equalTo(0)); // not set, coming from service A
+ Version1Response response = new Version1Response();
+ response.value1 = 1;
+ response.value2 = 2;
+ channel.sendResponse(response);
+ }
+ });
+
+ Version0Request version0Request = new Version0Request();
+ version0Request.value1 = 1;
+ Version0Response version0Response = serviceA.submitRequest(nodeB, "/version", version0Request, new BaseTransportResponseHandler<Version0Response>() {
+ @Override
+ public Version0Response newInstance() {
+ return new Version0Response();
+ }
+
+ @Override
+ public void handleResponse(Version0Response response) {
+ assertThat(response.value1, equalTo(1));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version0Response.value1, equalTo(1));
+ }
+
+ @Test
+ public void testVersion_from1to0() throws Exception {
+ serviceA.registerRequestHandler("/version", Version0Request.class, ThreadPool.Names.SAME, new TransportRequestHandler<Version0Request>() {
+ @Override
+ public void messageReceived(Version0Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ Version0Response response = new Version0Response();
+ response.value1 = 1;
+ channel.sendResponse(response);
+ }
+ });
+
+ Version1Request version1Request = new Version1Request();
+ version1Request.value1 = 1;
+ version1Request.value2 = 2;
+ Version1Response version1Response = serviceB.submitRequest(nodeA, "/version", version1Request, new BaseTransportResponseHandler<Version1Response>() {
+ @Override
+ public Version1Response newInstance() {
+ return new Version1Response();
+ }
+
+ @Override
+ public void handleResponse(Version1Response response) {
+ assertThat(response.value1, equalTo(1));
+ assertThat(response.value2, equalTo(0)); // initial values, cause its serialized from version 0
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version1Response.value1, equalTo(1));
+ assertThat(version1Response.value2, equalTo(0));
+ }
+
+ @Test
+ public void testVersion_from1to1() throws Exception {
+ serviceB.registerRequestHandler("/version", Version1Request.class, ThreadPool.Names.SAME, new TransportRequestHandler<Version1Request>() {
+ @Override
+ public void messageReceived(Version1Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ assertThat(request.value2, equalTo(2));
+ Version1Response response = new Version1Response();
+ response.value1 = 1;
+ response.value2 = 2;
+ channel.sendResponse(response);
+ }
+ });
+
+ Version1Request version1Request = new Version1Request();
+ version1Request.value1 = 1;
+ version1Request.value2 = 2;
+ Version1Response version1Response = serviceB.submitRequest(nodeB, "/version", version1Request, new BaseTransportResponseHandler<Version1Response>() {
+ @Override
+ public Version1Response newInstance() {
+ return new Version1Response();
+ }
+
+ @Override
+ public void handleResponse(Version1Response response) {
+ assertThat(response.value1, equalTo(1));
+ assertThat(response.value2, equalTo(2));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version1Response.value1, equalTo(1));
+ assertThat(version1Response.value2, equalTo(2));
+ }
+
+ @Test
+ public void testVersion_from0to0() throws Exception {
+ serviceA.registerRequestHandler("/version", Version0Request.class, ThreadPool.Names.SAME, new TransportRequestHandler<Version0Request>() {
+ @Override
+ public void messageReceived(Version0Request request, TransportChannel channel) throws Exception {
+ assertThat(request.value1, equalTo(1));
+ Version0Response response = new Version0Response();
+ response.value1 = 1;
+ channel.sendResponse(response);
+ }
+ });
+
+ Version0Request version0Request = new Version0Request();
+ version0Request.value1 = 1;
+ Version0Response version0Response = serviceA.submitRequest(nodeA, "/version", version0Request, new BaseTransportResponseHandler<Version0Response>() {
+ @Override
+ public Version0Response newInstance() {
+ return new Version0Response();
+ }
+
+ @Override
+ public void handleResponse(Version0Response response) {
+ assertThat(response.value1, equalTo(1));
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ fail();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ }).txGet();
+
+ assertThat(version0Response.value1, equalTo(1));
+ }
+
+ @Test
+ public void testMockFailToSendNoConnectRule() {
+ serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ assertThat("moshe", equalTo(request.message));
+ throw new RuntimeException("bad message !!!");
+ }
+ });
+
+ serviceB.addFailToSendNoConnectRule(nodeA);
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ fail("got response instead of exception");
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp.getCause().getMessage(), endsWith("DISCONNECT: simulated"));
+ }
+ });
+
+ try {
+ res.txGet();
+ fail("exception should be thrown");
+ } catch (Exception e) {
+ assertThat(e.getCause().getMessage(), endsWith("DISCONNECT: simulated"));
+ }
+
+ try {
+ serviceB.connectToNode(nodeA);
+ fail("exception should be thrown");
+ } catch (ConnectTransportException e) {
+ // all is well
+ }
+
+ try {
+ serviceB.connectToNodeLight(nodeA);
+ fail("exception should be thrown");
+ } catch (ConnectTransportException e) {
+ // all is well
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+ @Test
+ public void testMockUnresponsiveRule() {
+ serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<StringMessageRequest>() {
+ @Override
+ public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception {
+ assertThat("moshe", equalTo(request.message));
+ throw new RuntimeException("bad message !!!");
+ }
+ });
+
+ serviceB.addUnresponsiveRule(nodeA);
+
+ TransportFuture<StringMessageResponse> res = serviceB.submitRequest(nodeA, "sayHello",
+ new StringMessageRequest("moshe"), TransportRequestOptions.options().withTimeout(100), new BaseTransportResponseHandler<StringMessageResponse>() {
+ @Override
+ public StringMessageResponse newInstance() {
+ return new StringMessageResponse();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(StringMessageResponse response) {
+ fail("got response instead of exception");
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ assertThat(exp, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+ });
+
+ try {
+ res.txGet();
+ fail("exception should be thrown");
+ } catch (Exception e) {
+ assertThat(e, instanceOf(ReceiveTimeoutTransportException.class));
+ }
+
+ try {
+ serviceB.connectToNode(nodeA);
+ fail("exception should be thrown");
+ } catch (ConnectTransportException e) {
+ // all is well
+ }
+
+ try {
+ serviceB.connectToNodeLight(nodeA);
+ fail("exception should be thrown");
+ } catch (ConnectTransportException e) {
+ // all is well
+ }
+
+ serviceA.removeHandler("sayHello");
+ }
+
+
+ @Test
+ public void testHostOnMessages() throws InterruptedException {
+ final CountDownLatch latch = new CountDownLatch(2);
+ final AtomicReference<TransportAddress> addressA = new AtomicReference<>();
+ final AtomicReference<TransportAddress> addressB = new AtomicReference<>();
+ serviceB.registerRequestHandler("action1", TestRequest.class, ThreadPool.Names.SAME, new TransportRequestHandler<TestRequest>() {
+ @Override
+ public void messageReceived(TestRequest request, TransportChannel channel) throws Exception {
+ addressA.set(request.remoteAddress());
+ channel.sendResponse(new TestResponse());
+ latch.countDown();
+ }
+ });
+ serviceA.sendRequest(nodeB, "action1", new TestRequest(), new TransportResponseHandler<TestResponse>() {
+ @Override
+ public TestResponse newInstance() {
+ return new TestResponse();
+ }
+
+ @Override
+ public void handleResponse(TestResponse response) {
+ addressB.set(response.remoteAddress());
+ latch.countDown();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ latch.countDown();
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+
+ if (!latch.await(10, TimeUnit.SECONDS)) {
+ fail("message round trip did not complete within a sensible time frame");
+ }
+
+ assertTrue(nodeA.address().sameHost(addressA.get()));
+ assertTrue(nodeB.address().sameHost(addressB.get()));
+ }
+
+ private static class TestRequest extends TransportRequest {
+ }
+
+ private static class TestResponse extends TransportResponse {
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/ActionNamesTests.java b/core/src/test/java/org/elasticsearch/transport/ActionNamesTests.java
new file mode 100644
index 0000000000..69be9f8fdf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/ActionNamesTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.either;
+import static org.hamcrest.CoreMatchers.startsWith;
+
+/**
+ * This test verifies that all of the action names follow our defined naming conventions.
+ * The identified categories are:
+ * - indices:admin: apis that allow to perform administration tasks against indices
+ * - indices:data: apis that are about data
+ * - indices:read: apis that read data
+ * - indices:write: apis that write data
+ * - cluster:admin: cluster apis that allow to perform administration tasks
+ * - cluster:monitor: cluster apis that allow to monitor the system
+ * - internal: internal actions that are used from node to node but not directly exposed to users
+ *
+ * Any transport action belongs to one of the above categories and its name starts with its category, followed by a '/'
+ * and the name of the api itself (e.g. cluster:admin/nodes/restart).
+ * When an api exposes multiple transport handlers, some of which are invoked internally during the execution of the api,
+ * we use the `[n]` suffix to identify node actions and the `[s]` suffix to identify shard actions.
+ */
+public class ActionNamesTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testActionNamesCategories() throws NoSuchFieldException, IllegalAccessException {
+ TransportService transportService = internalCluster().getInstance(TransportService.class);
+ for (String action : transportService.requestHandlers.keySet()) {
+ assertThat("action doesn't belong to known category", action, either(startsWith("indices:admin")).or(startsWith("indices:monitor"))
+ .or(startsWith("indices:data/read")).or(startsWith("indices:data/write"))
+ .or(startsWith("cluster:admin")).or(startsWith("cluster:monitor"))
+ .or(startsWith("internal:")));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportTests.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportTests.java
new file mode 100644
index 0000000000..289659867d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportTests.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableList;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.elasticsearch.action.*;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest;
+import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.ActionFilter;
+import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.FilterClient;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.inject.PreProcessModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.index.query.*;
+import org.elasticsearch.plugins.AbstractPlugin;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.script.groovy.GroovyScriptEngineService;
+import org.elasticsearch.script.mustache.MustacheScriptEngineService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.*;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.node.Node.HTTP_ENABLED;
+import static org.elasticsearch.rest.RestStatus.OK;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = SUITE)
+public class ContextAndHeaderTransportTests extends ElasticsearchIntegrationTest {
+
+ private static final List<ActionRequest> requests = Collections.synchronizedList(new ArrayList<ActionRequest>());
+ private String randomHeaderKey = randomAsciiOfLength(10);
+ private String randomHeaderValue = randomAsciiOfLength(20);
+ private String queryIndex = "query-" + randomAsciiOfLength(10).toLowerCase(Locale.ROOT);
+ private String lookupIndex = "lookup-" + randomAsciiOfLength(10).toLowerCase(Locale.ROOT);
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("plugin.types", ActionLoggingPlugin.class.getName())
+ .put("script.indexed", "on")
+ .put(HTTP_ENABLED, true)
+ .build();
+ }
+
+ @Before
+ public void createIndices() throws Exception {
+ String mapping = jsonBuilder().startObject().startObject("type")
+ .startObject("properties")
+ .startObject("location").field("type", "geo_shape").endObject()
+ .startObject("name").field("type", "string").endObject()
+ .endObject()
+ .endObject().endObject().string();
+
+ Settings settings = settingsBuilder()
+ .put(indexSettings())
+ .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable.
+ .build();
+ assertAcked(transportClient().admin().indices().prepareCreate(lookupIndex)
+ .setSettings(settings).addMapping("type", mapping));
+ assertAcked(transportClient().admin().indices().prepareCreate(queryIndex)
+ .setSettings(settings).addMapping("type", mapping));
+ ensureGreen(queryIndex, lookupIndex);
+
+ requests.clear();
+ }
+
+ @After
+ public void checkAllRequestsContainHeaders() {
+ assertRequestsContainHeader(IndexRequest.class);
+ assertRequestsContainHeader(RefreshRequest.class);
+ }
+
+ @Test
+ public void testThatTermsLookupGetRequestContainsContextAndHeaders() throws Exception {
+ transportClient().prepareIndex(lookupIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().array("followers", "foo", "bar", "baz").endObject()).get();
+ transportClient().prepareIndex(queryIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().field("username", "foo").endObject()).get();
+ transportClient().admin().indices().prepareRefresh(queryIndex, lookupIndex).get();
+
+ TermsLookupQueryBuilder termsLookupFilterBuilder = QueryBuilders.termsLookupQuery("username").lookupIndex(lookupIndex).lookupType("type").lookupId("1").lookupPath("followers");
+ BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery().must(QueryBuilders.matchAllQuery()).must(termsLookupFilterBuilder);
+
+ SearchResponse searchResponse = transportClient()
+ .prepareSearch(queryIndex)
+ .setQuery(queryBuilder)
+ .get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1);
+
+ assertGetRequestsContainHeaders();
+ }
+
+ @Test
+ public void testThatGeoShapeQueryGetRequestContainsContextAndHeaders() throws Exception {
+ transportClient().prepareIndex(lookupIndex, "type", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Munich Suburban Area")
+ .startObject("location")
+ .field("type", "polygon")
+ .startArray("coordinates").startArray()
+ .startArray().value(11.34).value(48.25).endArray()
+ .startArray().value(11.68).value(48.25).endArray()
+ .startArray().value(11.65).value(48.06).endArray()
+ .startArray().value(11.37).value(48.13).endArray()
+ .startArray().value(11.34).value(48.25).endArray() // close the polygon
+ .endArray().endArray()
+ .endObject()
+ .endObject())
+ .get();
+ // second document
+ transportClient().prepareIndex(queryIndex, "type", "1").setSource(jsonBuilder().startObject()
+ .field("name", "Munich Center")
+ .startObject("location")
+ .field("type", "point")
+ .startArray("coordinates").value(11.57).value(48.13).endArray()
+ .endObject()
+ .endObject())
+ .get();
+ transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get();
+
+ GeoShapeQueryBuilder queryBuilder = QueryBuilders.geoShapeQuery("location", "1", "type")
+ .indexedShapeIndex(lookupIndex)
+ .indexedShapePath("location");
+
+ SearchResponse searchResponse = transportClient()
+ .prepareSearch(queryIndex)
+ .setQuery(queryBuilder)
+ .get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1);
+ assertThat(requests, hasSize(greaterThan(0)));
+
+ assertGetRequestsContainHeaders();
+ }
+
+ @Test
+ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHeaders() throws Exception {
+ transportClient().prepareIndex(lookupIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject())
+ .get();
+ transportClient().prepareIndex(queryIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().field("name", "Jar Jar Binks - A horrible mistake").endObject())
+ .get();
+ transportClient().prepareIndex(queryIndex, "type", "2")
+ .setSource(jsonBuilder().startObject().field("name", "Star Wars - Return of the jedi").endObject())
+ .get();
+ transportClient().admin().indices().prepareRefresh(lookupIndex, queryIndex).get();
+
+ MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = QueryBuilders.moreLikeThisQuery("name")
+ .addItem(new MoreLikeThisQueryBuilder.Item(lookupIndex, "type", "1"))
+ .minTermFreq(1)
+ .minDocFreq(1);
+
+ SearchResponse searchResponse = transportClient()
+ .prepareSearch(queryIndex)
+ .setQuery(moreLikeThisQueryBuilder)
+ .get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1);
+
+ assertRequestsContainHeader(MultiTermVectorsRequest.class);
+ }
+
+ @Test
+ public void testThatPercolatingExistingDocumentGetRequestContainsContextAndHeaders() throws Exception {
+ transportClient().prepareIndex(lookupIndex, ".percolator", "1")
+ .setSource(jsonBuilder().startObject().startObject("query").startObject("match").field("name", "star wars").endObject().endObject().endObject())
+ .get();
+ transportClient().prepareIndex(lookupIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject())
+ .get();
+ transportClient().admin().indices().prepareRefresh(lookupIndex).get();
+
+ GetRequest getRequest = transportClient().prepareGet(lookupIndex, "type", "1").request();
+ PercolateResponse response = transportClient().preparePercolate().setDocumentType("type").setGetRequest(getRequest).get();
+ assertThat(response.getCount(), is(1l));
+
+ assertGetRequestsContainHeaders();
+ }
+
+ @Test
+ public void testThatIndexedScriptGetRequestContainsContextAndHeaders() throws Exception {
+ PutIndexedScriptResponse scriptResponse = transportClient().preparePutIndexedScript(GroovyScriptEngineService.NAME, "my_script",
+ jsonBuilder().startObject().field("script", "_score * 10").endObject().string()
+ ).get();
+ assertThat(scriptResponse.isCreated(), is(true));
+
+ transportClient().prepareIndex(queryIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject())
+ .get();
+ transportClient().admin().indices().prepareRefresh(queryIndex).get();
+
+ // custom content, not sure how to specify "script_id" otherwise in the API
+ XContentBuilder builder = jsonBuilder().startObject().startObject("function_score").field("boost_mode", "replace").startArray("functions")
+ .startObject().startObject("script_score").field("script_id", "my_script").field("lang", "groovy").endObject().endObject().endArray().endObject().endObject();
+
+ SearchResponse searchResponse = transportClient()
+ .prepareSearch(queryIndex)
+ .setQuery(builder)
+ .get();
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1);
+ assertThat(searchResponse.getHits().getMaxScore(), is(10.0f));
+
+ assertGetRequestsContainHeaders(".scripts");
+ assertRequestsContainHeader(PutIndexedScriptRequest.class);
+ }
+
+ @Test
+ public void testThatSearchTemplatesWithIndexedTemplatesGetRequestContainsContextAndHeaders() throws Exception {
+ PutIndexedScriptResponse scriptResponse = transportClient().preparePutIndexedScript(MustacheScriptEngineService.NAME, "the_template",
+ jsonBuilder().startObject().startObject("template").startObject("query").startObject("match")
+ .field("name", "{{query_string}}").endObject().endObject().endObject().endObject().string()
+ ).get();
+ assertThat(scriptResponse.isCreated(), is(true));
+
+ transportClient().prepareIndex(queryIndex, "type", "1")
+ .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject())
+ .get();
+ transportClient().admin().indices().prepareRefresh(queryIndex).get();
+
+ Map<String, Object> params = new HashMap<>();
+ params.put("query_string", "star wars");
+
+ SearchResponse searchResponse = transportClient().prepareSearch(queryIndex)
+ .setTemplateName("the_template")
+ .setTemplateParams(params)
+ .setTemplateType(ScriptService.ScriptType.INDEXED)
+ .get();
+
+ assertNoFailures(searchResponse);
+ assertHitCount(searchResponse, 1);
+
+ assertGetRequestsContainHeaders(".scripts");
+ assertRequestsContainHeader(PutIndexedScriptRequest.class);
+ }
+
+ @Test
+ public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception {
+ String releventHeaderName = "relevant_" + randomHeaderKey;
+ for (RestController restController : internalCluster().getDataNodeInstances(RestController.class)) {
+ restController.registerRelevantHeaders(releventHeaderName);
+ }
+
+ CloseableHttpClient httpClient = HttpClients.createDefault();
+ HttpResponse response = new HttpRequestBuilder(httpClient)
+ .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class))
+ .addHeader(randomHeaderKey, randomHeaderValue)
+ .addHeader(releventHeaderName, randomHeaderValue)
+ .path("/" + queryIndex + "/_search")
+ .execute();
+
+ assertThat(response, hasStatus(OK));
+ List<SearchRequest> searchRequests = getRequests(SearchRequest.class);
+ assertThat(searchRequests, hasSize(greaterThan(0)));
+ for (SearchRequest searchRequest : searchRequests) {
+ assertThat(searchRequest.hasHeader(releventHeaderName), is(true));
+ // was not specified, thus is not included
+ assertThat(searchRequest.hasHeader(randomHeaderKey), is(false));
+ }
+ }
+
+ private <T> List<T> getRequests(Class<T> clazz) {
+ List<T> results = new ArrayList<>();
+ for (ActionRequest request : requests) {
+ if (request.getClass().equals(clazz)) {
+ results.add((T) request);
+ }
+ }
+
+ return results;
+ }
+
+ private void assertRequestsContainHeader(Class<? extends ActionRequest> clazz) {
+ List<? extends ActionRequest> classRequests = getRequests(clazz);
+ for (ActionRequest request : classRequests) {
+ assertRequestContainsHeader(request);
+ }
+ }
+
+ private void assertGetRequestsContainHeaders() {
+ assertGetRequestsContainHeaders(this.lookupIndex);
+ }
+
+ private void assertGetRequestsContainHeaders(String index) {
+ List<GetRequest> getRequests = getRequests(GetRequest.class);
+ assertThat(getRequests, hasSize(greaterThan(0)));
+
+ for (GetRequest request : getRequests) {
+ if (!request.index().equals(index)) {
+ continue;
+ }
+ assertRequestContainsHeader(request);
+ }
+ }
+
+ private void assertRequestContainsHeader(ActionRequest request) {
+ String msg = String.format(Locale.ROOT, "Expected header %s to be in request %s", randomHeaderKey, request.getClass().getName());
+ if (request instanceof IndexRequest) {
+ IndexRequest indexRequest = (IndexRequest) request;
+ msg = String.format(Locale.ROOT, "Expected header %s to be in index request %s/%s/%s", randomHeaderKey,
+ indexRequest.index(), indexRequest.type(), indexRequest.id());
+ }
+ assertThat(msg, request.hasHeader(randomHeaderKey), is(true));
+ assertThat(request.getHeader(randomHeaderKey).toString(), is(randomHeaderValue));
+ }
+
+ /**
+ * a transport client that adds our random header
+ */
+ private Client transportClient() {
+ Client transportClient = internalCluster().transportClient();
+ FilterClient filterClient = new FilterClient(transportClient) {
+ @Override
+ protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
+ request.putHeader(randomHeaderKey, randomHeaderValue);
+ super.doExecute(action, request, listener);
+ }
+ };
+
+ return filterClient;
+ }
+
+ public static class ActionLoggingPlugin extends AbstractPlugin {
+
+ @Override
+ public String name() {
+ return "test-action-logging";
+ }
+
+ @Override
+ public String description() {
+ return "Test action logging";
+ }
+
+ @Override
+ public Collection<Class<? extends Module>> modules() {
+ return ImmutableList.of(ActionLoggingModule.class);
+ }
+ }
+
+ public static class ActionLoggingModule extends AbstractModule implements PreProcessModule {
+
+
+ @Override
+ protected void configure() {
+ bind(LoggingFilter.class).asEagerSingleton();
+ }
+
+ @Override
+ public void processModule(Module module) {
+ if (module instanceof ActionModule) {
+ ((ActionModule)module).registerFilter(LoggingFilter.class);
+ }
+ }
+ }
+
+ public static class LoggingFilter extends ActionFilter.Simple {
+
+ @Inject
+ public LoggingFilter(Settings settings) {
+ super(settings);
+ }
+
+ @Override
+ public int order() {
+ return 999;
+ }
+
+ @Override
+ protected boolean apply(String action, ActionRequest request, ActionListener listener) {
+ requests.add(request);
+ return true;
+ }
+
+ @Override
+ protected boolean apply(String action, ActionResponse response, ActionListener listener) {
+ return true;
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java
new file mode 100644
index 0000000000..0002c5f46a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import com.google.common.base.Charsets;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.cache.recycler.MockBigArrays;
+import org.elasticsearch.test.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.net.Socket;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.is;
+
+/**
+ * This test checks, if a HTTP look-alike request (starting with a HTTP method and a space)
+ * actually returns text response instead of just dropping the connection
+ */
+public class NettySizeHeaderFrameDecoderTests extends ElasticsearchTestCase {
+
+ private final Settings settings = settingsBuilder().put("name", "foo").put("transport.host", "127.0.0.1").build();
+
+ private ThreadPool threadPool;
+ private NettyTransport nettyTransport;
+ private int port;
+ private String host;
+
+ @Before
+ public void startThreadPool() {
+ threadPool = new ThreadPool(settings);
+ threadPool.setNodeSettingsService(new NodeSettingsService(settings));
+ NetworkService networkService = new NetworkService(settings);
+ BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService());
+ nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT);
+ nettyTransport.start();
+ TransportService transportService = new TransportService(nettyTransport, threadPool);
+ nettyTransport.transportServiceAdapter(transportService.createAdapter());
+
+ InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) nettyTransport.boundAddress().boundAddress();
+ port = transportAddress.address().getPort();
+ host = transportAddress.address().getHostString();
+
+ }
+
+ @After
+ public void terminateThreadPool() throws InterruptedException {
+ nettyTransport.stop();
+ terminate(threadPool);
+ }
+
+ @Test
+ public void testThatTextMessageIsReturnedOnHTTPLikeRequest() throws Exception {
+ String randomMethod = randomFrom("GET", "POST", "PUT", "DELETE", "HEAD", "OPTIONS", "PATCH");
+ String data = randomMethod + " / HTTP/1.1";
+
+ try (Socket socket = new Socket(host, port)) {
+ socket.getOutputStream().write(data.getBytes(Charsets.UTF_8));
+ socket.getOutputStream().flush();
+
+ try (BufferedReader reader = new BufferedReader(new InputStreamReader(socket.getInputStream(), Charsets.UTF_8))) {
+ assertThat(reader.readLine(), is("This is not a HTTP port"));
+ }
+ }
+ }
+
+ @Test
+ public void testThatNothingIsReturnedForOtherInvalidPackets() throws Exception {
+ try (Socket socket = new Socket(host, port)) {
+ socket.getOutputStream().write("FOOBAR".getBytes(Charsets.UTF_8));
+ socket.getOutputStream().flush();
+
+ // end of stream
+ assertThat(socket.getInputStream().read(), is(-1));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/TransportMessageTests.java b/core/src/test/java/org/elasticsearch/transport/TransportMessageTests.java
new file mode 100644
index 0000000000..4fdd076f7c
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/TransportMessageTests.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+public class TransportMessageTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSerialization() throws Exception {
+ Message message = new Message();
+ message.putHeader("key1", "value1");
+ message.putHeader("key2", "value2");
+ message.putInContext("key3", "value3");
+
+ BytesStreamOutput out = new BytesStreamOutput();
+ out.setVersion(Version.CURRENT);
+ message.writeTo(out);
+ StreamInput in = StreamInput.wrap(out.bytes());
+ in.setVersion(Version.CURRENT);
+ message = new Message();
+ message.readFrom(in);
+ assertThat(message.getHeaders().size(), is(2));
+ assertThat((String) message.getHeader("key1"), equalTo("value1"));
+ assertThat((String) message.getHeader("key2"), equalTo("value2"));
+ assertThat(message.isContextEmpty(), is(true));
+
+ // ensure that casting is not needed
+ String key1 = message.getHeader("key1");
+ assertThat(key1, is("value1"));
+ }
+
+ @Test
+ public void testCopyHeadersAndContext() throws Exception {
+ Message m1 = new Message();
+ m1.putHeader("key1", "value1");
+ m1.putHeader("key2", "value2");
+ m1.putInContext("key3", "value3");
+
+ Message m2 = new Message(m1);
+
+ assertThat(m2.getHeaders().size(), is(2));
+ assertThat((String) m2.getHeader("key1"), equalTo("value1"));
+ assertThat((String) m2.getHeader("key2"), equalTo("value2"));
+ assertThat((String) m2.getFromContext("key3"), equalTo("value3"));
+
+ // ensure that casting is not needed
+ String key3 = m2.getFromContext("key3");
+ assertThat(key3, is("value3"));
+ testContext(m2, "key3", "value3");
+ }
+
+ // ensure that generic arg like this is not needed: TransportMessage<?> transportMessage
+ private void testContext(TransportMessage transportMessage, String key, String expectedValue) {
+ String result = transportMessage.getFromContext(key);
+ assertThat(result, is(expectedValue));
+
+ }
+
+ private static class Message extends TransportMessage<Message> {
+
+ private Message() {
+ }
+
+ private Message(Message message) {
+ super(message);
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java b/core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java
new file mode 100644
index 0000000000..4c04c79ff7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/local/SimpleLocalTransportTests.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.local;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.AbstractSimpleTransportTests;
+
+public class SimpleLocalTransportTests extends AbstractSimpleTransportTests {
+
+ @Override
+ protected MockTransportService build(Settings settings, Version version) {
+ MockTransportService transportService = new MockTransportService(Settings.EMPTY, new LocalTransport(settings, threadPool, version), threadPool);
+ transportService.start();
+ return transportService;
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java
new file mode 100644
index 0000000000..1f5e5f9c9e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.common.util.concurrent.KeyedLock;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.hamcrest.Matchers;
+import org.junit.Test;
+
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.not;
+
+public class KeyedLockTests extends ElasticsearchTestCase {
+
+ @Test
+ public void checkIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedException {
+ ConcurrentHashMap<String, Integer> counter = new ConcurrentHashMap<>();
+ ConcurrentHashMap<String, AtomicInteger> safeCounter = new ConcurrentHashMap<>();
+ KeyedLock<String> connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable<String>(randomBoolean()) : new KeyedLock<String>(randomBoolean());
+ String[] names = new String[randomIntBetween(1, 40)];
+ for (int i = 0; i < names.length; i++) {
+ names[i] = randomRealisticUnicodeOfLengthBetween(10, 20);
+ }
+ CountDownLatch startLatch = new CountDownLatch(1);
+ int numThreads = randomIntBetween(3, 10);
+ AcquireAndReleaseThread[] threads = new AcquireAndReleaseThread[numThreads];
+ for (int i = 0; i < numThreads; i++) {
+ threads[i] = new AcquireAndReleaseThread(startLatch, connectionLock, names, counter, safeCounter);
+ }
+ for (int i = 0; i < numThreads; i++) {
+ threads[i].start();
+ }
+ startLatch.countDown();
+ for (int i = 0; i < numThreads; i++) {
+ if (randomBoolean()) {
+ threads[i].incWithGlobal();
+ }
+ }
+
+ for (int i = 0; i < numThreads; i++) {
+ threads[i].join();
+ }
+ assertThat(connectionLock.hasLockedKeys(), equalTo(false));
+
+ Set<Entry<String, Integer>> entrySet = counter.entrySet();
+ assertThat(counter.size(), equalTo(safeCounter.size()));
+ for (Entry<String, Integer> entry : entrySet) {
+ AtomicInteger atomicInteger = safeCounter.get(entry.getKey());
+ assertThat(atomicInteger, not(Matchers.nullValue()));
+ assertThat(atomicInteger.get(), equalTo(entry.getValue()));
+ }
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void checkCannotAcquireTwoLocksGlobal() throws InterruptedException {
+ KeyedLock.GlobalLockable<String> connectionLock = new KeyedLock.GlobalLockable<>();
+ String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50));
+ connectionLock.acquire(name);
+ try {
+ connectionLock.acquire(name);
+ } finally {
+ connectionLock.release(name);
+ connectionLock.globalLock().lock();
+ connectionLock.globalLock().unlock();
+ }
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void checkCannotAcquireTwoLocks() throws InterruptedException {
+ KeyedLock<String> connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable<String>() : new KeyedLock<String>();
+ String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50));
+ connectionLock.acquire(name);
+ connectionLock.acquire(name);
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void checkCannotReleaseUnacquiredLock() throws InterruptedException {
+ KeyedLock<String> connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable<String>() : new KeyedLock<String>();
+ String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50));
+ connectionLock.release(name);
+ }
+
+ public static class AcquireAndReleaseThread extends Thread {
+ private CountDownLatch startLatch;
+ KeyedLock<String> connectionLock;
+ String[] names;
+ ConcurrentHashMap<String, Integer> counter;
+ ConcurrentHashMap<String, AtomicInteger> safeCounter;
+
+ public AcquireAndReleaseThread(CountDownLatch startLatch, KeyedLock<String> connectionLock, String[] names,
+ ConcurrentHashMap<String, Integer> counter, ConcurrentHashMap<String, AtomicInteger> safeCounter) {
+ this.startLatch = startLatch;
+ this.connectionLock = connectionLock;
+ this.names = names;
+ this.counter = counter;
+ this.safeCounter = safeCounter;
+ }
+
+ @Override
+ public void run() {
+ try {
+ startLatch.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException();
+ }
+ int numRuns = scaledRandomIntBetween(5000, 50000);
+ for (int i = 0; i < numRuns; i++) {
+ String curName = names[randomInt(names.length - 1)];
+ connectionLock.acquire(curName);
+ try {
+ Integer integer = counter.get(curName);
+ if (integer == null) {
+ counter.put(curName, 1);
+ } else {
+ counter.put(curName, integer.intValue() + 1);
+ }
+ } finally {
+ connectionLock.release(curName);
+ }
+ AtomicInteger atomicInteger = new AtomicInteger(0);
+ AtomicInteger value = safeCounter.putIfAbsent(curName, atomicInteger);
+ if (value == null) {
+ atomicInteger.incrementAndGet();
+ } else {
+ value.incrementAndGet();
+ }
+ }
+ }
+
+ public void incWithGlobal() {
+ if (connectionLock instanceof KeyedLock.GlobalLockable) {
+ final int iters = randomIntBetween(10, 200);
+ for (int i = 0; i < iters; i++) {
+ ((KeyedLock.GlobalLockable) connectionLock).globalLock().lock();
+ try {
+ String curName = names[randomInt(names.length - 1)];
+ Integer integer = counter.get(curName);
+ if (integer == null) {
+ counter.put(curName, 1);
+ } else {
+ counter.put(curName, integer.intValue() + 1);
+ }
+ AtomicInteger atomicInteger = new AtomicInteger(0);
+ AtomicInteger value = safeCounter.putIfAbsent(curName, atomicInteger);
+ if (value == null) {
+ atomicInteger.incrementAndGet();
+ } else {
+ value.incrementAndGet();
+ }
+ } finally {
+ ((KeyedLock.GlobalLockable) connectionLock).globalLock().unlock();
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java
new file mode 100644
index 0000000000..6b885c21a7
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.transport.netty;
+
+import com.google.common.collect.ImmutableMap;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ */
+public class NettyScheduledPingTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testScheduledPing() throws Exception {
+ ThreadPool threadPool = new ThreadPool(getClass().getName());
+
+ int startPort = 11000 + randomIntBetween(0, 255);
+ int endPort = startPort + 10;
+ Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE, "5ms").put("transport.tcp.port", startPort + "-" + endPort).build();
+
+ final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT);
+ MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
+ serviceA.start();
+
+ final NettyTransport nettyB = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT);
+ MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool);
+ serviceB.start();
+
+ DiscoveryNode nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), Version.CURRENT);
+ DiscoveryNode nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), ImmutableMap.<String, String>of(), Version.CURRENT);
+
+ serviceA.connectToNode(nodeB);
+ serviceB.connectToNode(nodeA);
+
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat(nettyA.scheduledPing.successfulPings.count(), greaterThan(100l));
+ assertThat(nettyB.scheduledPing.successfulPings.count(), greaterThan(100l));
+ }
+ });
+ assertThat(nettyA.scheduledPing.failedPings.count(), equalTo(0l));
+ assertThat(nettyB.scheduledPing.failedPings.count(), equalTo(0l));
+
+ serviceA.registerRequestHandler("sayHello", TransportRequest.Empty.class, ThreadPool.Names.GENERIC, new TransportRequestHandler<TransportRequest.Empty>() {
+ @Override
+ public void messageReceived(TransportRequest.Empty request, TransportChannel channel) {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.options());
+ } catch (IOException e) {
+ e.printStackTrace();
+ assertThat(e.getMessage(), false, equalTo(true));
+ }
+ }
+ });
+
+ // send some messages while ping requests are going around
+ int rounds = scaledRandomIntBetween(100, 5000);
+ for (int i = 0; i < rounds; i++) {
+ serviceB.submitRequest(nodeA, "sayHello",
+ TransportRequest.Empty.INSTANCE, TransportRequestOptions.options().withCompress(randomBoolean()), new BaseTransportResponseHandler<TransportResponse.Empty>() {
+ @Override
+ public TransportResponse.Empty newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.GENERIC;
+ }
+
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ exp.printStackTrace();
+ assertThat("got exception instead of a response: " + exp.getMessage(), false, equalTo(true));
+ }
+ }).txGet();
+ }
+
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat(nettyA.scheduledPing.successfulPings.count(), greaterThan(200l));
+ assertThat(nettyB.scheduledPing.successfulPings.count(), greaterThan(200l));
+ }
+ });
+ assertThat(nettyA.scheduledPing.failedPings.count(), equalTo(0l));
+ assertThat(nettyB.scheduledPing.failedPings.count(), equalTo(0l));
+
+ Releasables.close(serviceA, serviceB);
+ terminate(threadPool);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java
new file mode 100644
index 0000000000..637a65f1df
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationTests.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.junit.annotations.Network;
+import org.elasticsearch.transport.TransportModule;
+import org.junit.Test;
+
+import java.util.Locale;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1, numClientNodes = 0)
+public class NettyTransportMultiPortIntegrationTests extends ElasticsearchIntegrationTest {
+
+ private static int randomPort = -1;
+ private static String randomPortRange;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ if (randomPort == -1) {
+ randomPort = randomIntBetween(49152, 65525);
+ randomPortRange = String.format(Locale.ROOT, "%s-%s", randomPort, randomPort+10);
+ }
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("network.host", "127.0.0.1")
+ .put(TransportModule.TRANSPORT_TYPE_KEY, NettyTransport.class.getName())
+ .put("node.mode", "network")
+ .put("transport.profiles.client1.port", randomPortRange)
+ .put("transport.profiles.client1.publish_host", "127.0.0.7")
+ .put("transport.profiles.client1.publish_port", "4321")
+ .put("transport.profiles.client1.reuse_address", true)
+ .build();
+ }
+
+ @Test
+ public void testThatTransportClientCanConnect() throws Exception {
+ Settings settings = settingsBuilder()
+ .put("cluster.name", internalCluster().getClusterName())
+ .put(TransportModule.TRANSPORT_TYPE_KEY, NettyTransport.class.getName())
+ .put("path.home", createTempDir().toString())
+ .build();
+ try (TransportClient transportClient = TransportClient.builder().settings(settings).loadConfigSettings(false).build()) {
+ transportClient.addTransportAddress(new InetSocketTransportAddress("127.0.0.1", randomPort));
+ ClusterHealthResponse response = transportClient.admin().cluster().prepareHealth().get();
+ assertThat(response.getStatus(), is(ClusterHealthStatus.GREEN));
+ }
+ }
+
+ @Test
+ @Network
+ public void testThatInfosAreExposed() throws Exception {
+ NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setTransport(true).get();
+ for (NodeInfo nodeInfo : response.getNodes()) {
+ assertThat(nodeInfo.getTransport().getProfileAddresses().keySet(), hasSize(1));
+ assertThat(nodeInfo.getTransport().getProfileAddresses(), hasKey("client1"));
+ assertThat(nodeInfo.getTransport().getProfileAddresses().get("client1").boundAddress(), instanceOf(InetSocketTransportAddress.class));
+
+ // bound address
+ InetSocketTransportAddress inetSocketTransportAddress = (InetSocketTransportAddress) nodeInfo.getTransport().getProfileAddresses().get("client1").boundAddress();
+ assertThat(inetSocketTransportAddress.address().getPort(), is(allOf(greaterThanOrEqualTo(randomPort), lessThanOrEqualTo(randomPort + 10))));
+
+ // publish address
+ assertThat(nodeInfo.getTransport().getProfileAddresses().get("client1").publishAddress(), instanceOf(InetSocketTransportAddress.class));
+ InetSocketTransportAddress publishAddress = (InetSocketTransportAddress) nodeInfo.getTransport().getProfileAddresses().get("client1").publishAddress();
+ assertThat(publishAddress.address().getHostName(), is("127.0.0.7"));
+ assertThat(publishAddress.address().getPort(), is(4321));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java
new file mode 100644
index 0000000000..213500dc4d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.transport.netty;
+
+import com.carrotsearch.hppc.IntHashSet;
+import com.google.common.base.Charsets;
+import org.elasticsearch.Version;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.junit.rule.RepeatOnExceptionRule;
+import org.elasticsearch.test.cache.recycler.MockBigArrays;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.BindTransportException;
+import org.elasticsearch.transport.TransportService;
+import org.junit.Rule;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.hamcrest.Matchers.is;
+
+public class NettyTransportMultiPortTests extends ElasticsearchTestCase {
+
+ private static final int MAX_RETRIES = 10;
+
+ @Rule
+ public RepeatOnExceptionRule repeatOnBindExceptionRule = new RepeatOnExceptionRule(logger, MAX_RETRIES, BindTransportException.class);
+
+ @Test
+ public void testThatNettyCanBindToMultiplePorts() throws Exception {
+ int[] ports = getRandomPorts(3);
+
+ Settings settings = settingsBuilder()
+ .put("network.host", "127.0.0.1")
+ .put("transport.tcp.port", ports[0])
+ .put("transport.profiles.default.port", ports[1])
+ .put("transport.profiles.client1.port", ports[2])
+ .build();
+
+ ThreadPool threadPool = new ThreadPool("tst");
+ try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
+ assertConnectionRefused(ports[0]);
+ assertPortIsBound(ports[1]);
+ assertPortIsBound(ports[2]);
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ @Test
+ public void testThatDefaultProfileInheritsFromStandardSettings() throws Exception {
+ int[] ports = getRandomPorts(2);
+
+ Settings settings = settingsBuilder()
+ .put("network.host", "127.0.0.1")
+ .put("transport.tcp.port", ports[0])
+ .put("transport.profiles.client1.port", ports[1])
+ .build();
+
+ ThreadPool threadPool = new ThreadPool("tst");
+ try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
+ assertPortIsBound(ports[0]);
+ assertPortIsBound(ports[1]);
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ @Test
+ public void testThatProfileWithoutPortSettingsFails() throws Exception {
+ int[] ports = getRandomPorts(1);
+
+ Settings settings = settingsBuilder()
+ .put("network.host", "127.0.0.1")
+ .put("transport.tcp.port", ports[0])
+ .put("transport.profiles.client1.whatever", "foo")
+ .build();
+
+ ThreadPool threadPool = new ThreadPool("tst");
+ try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
+ assertPortIsBound(ports[0]);
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ @Test
+ public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exception {
+ int[] ports = getRandomPorts(3);
+
+ Settings settings = settingsBuilder()
+ .put("network.host", "127.0.0.1")
+ .put("transport.tcp.port", ports[0])
+ .put("transport.netty.port", ports[1])
+ .put("transport.profiles.default.port", ports[2])
+ .build();
+
+ ThreadPool threadPool = new ThreadPool("tst");
+ try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
+ assertConnectionRefused(ports[0]);
+ assertConnectionRefused(ports[1]);
+ assertPortIsBound(ports[2]);
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ @Test
+ public void testThatBindingOnDifferentHostsWorks() throws Exception {
+ int[] ports = getRandomPorts(2);
+ InetAddress firstNonLoopbackAddress = NetworkUtils.getFirstNonLoopbackAddress(NetworkUtils.StackType.IPv4);
+ assumeTrue("No IP-v4 non-loopback address available - are you on a plane?", firstNonLoopbackAddress != null);
+ Settings settings = settingsBuilder()
+ .put("network.host", "127.0.0.1")
+ .put("transport.tcp.port", ports[0])
+ .put("transport.profiles.default.bind_host", "127.0.0.1")
+ .put("transport.profiles.client1.bind_host", firstNonLoopbackAddress.getHostAddress())
+ .put("transport.profiles.client1.port", ports[1])
+ .build();
+
+ ThreadPool threadPool = new ThreadPool("tst");
+ try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
+ assertPortIsBound("127.0.0.1", ports[0]);
+ assertPortIsBound(firstNonLoopbackAddress.getHostAddress(), ports[1]);
+ assertConnectionRefused(ports[1]);
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ @Test
+ public void testThatProfileWithoutValidNameIsIgnored() throws Exception {
+ int[] ports = getRandomPorts(3);
+
+ Settings settings = settingsBuilder()
+ .put("network.host", "127.0.0.1")
+ .put("transport.tcp.port", ports[0])
+ // mimics someone trying to define a profile for .local which is the profile for a node request to itself
+ .put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", ports[1])
+ .put("transport.profiles..port", ports[2])
+ .build();
+
+ ThreadPool threadPool = new ThreadPool("tst");
+ try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
+ assertPortIsBound(ports[0]);
+ assertConnectionRefused(ports[1]);
+ assertConnectionRefused(ports[2]);
+ } finally {
+ terminate(threadPool);
+ }
+ }
+
+ private int[] getRandomPorts(int numberOfPorts) {
+ IntHashSet ports = new IntHashSet();
+
+ for (int i = 0; i < numberOfPorts; i++) {
+ int port = randomIntBetween(49152, 65535);
+ while (ports.contains(port)) {
+ port = randomIntBetween(49152, 65535);
+ }
+ ports.add(port);
+ }
+
+ return ports.toArray();
+ }
+
+ private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) {
+ BigArrays bigArrays = new MockBigArrays(new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService());
+
+ NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, Version.CURRENT);
+ nettyTransport.start();
+
+ assertThat(nettyTransport.lifecycleState(), is(Lifecycle.State.STARTED));
+ return nettyTransport;
+ }
+
+ private void assertConnectionRefused(int port) throws Exception {
+ try {
+ trySocketConnection(new InetSocketTransportAddress("localhost", port).address());
+ fail("Expected to get exception when connecting to port " + port);
+ } catch (IOException e) {
+ // expected
+ logger.info("Got expected connection message {}", e.getMessage());
+ }
+ }
+
+ private void assertPortIsBound(int port) throws Exception {
+ assertPortIsBound("localhost", port);
+ }
+
+ private void assertPortIsBound(String host, int port) throws Exception {
+ logger.info("Trying to connect to [{}]:[{}]", host, port);
+ trySocketConnection(new InetSocketTransportAddress(host, port).address());
+ }
+
+ private void trySocketConnection(InetSocketAddress address) throws Exception {
+ try (Socket socket = new Socket()) {
+ logger.info("Connecting to {}", address);
+ socket.connect(address, 500);
+
+ assertThat(socket.isConnected(), is(true));
+ try (OutputStream os = socket.getOutputStream()) {
+ os.write("foo".getBytes(Charsets.UTF_8));
+ os.flush();
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java
new file mode 100644
index 0000000000..403f2a7a2e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.transport.netty;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.*;
+import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelPipeline;
+import org.jboss.netty.channel.ChannelPipelineFactory;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope = Scope.TEST, numDataNodes = 1)
+public class NettyTransportTests extends ElasticsearchIntegrationTest {
+
+ // static so we can use it in anonymous classes
+ private static String channelProfileName = null;
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder().put(super.nodeSettings(nodeOrdinal))
+ .put("node.mode", "network")
+ .put(TransportModule.TRANSPORT_TYPE_KEY, ExceptionThrowingNettyTransport.class.getName()).build();
+ }
+
+ @Test
+ public void testThatConnectionFailsAsIntended() throws Exception {
+ Client transportClient = internalCluster().transportClient();
+ ClusterHealthResponse clusterIndexHealths = transportClient.admin().cluster().prepareHealth().get();
+ assertThat(clusterIndexHealths.getStatus(), is(ClusterHealthStatus.GREEN));
+
+ try {
+ transportClient.admin().cluster().prepareHealth().putHeader("ERROR", "MY MESSAGE").get();
+ fail("Expected exception, but didnt happen");
+ } catch (ElasticsearchException e) {
+ assertThat(e.getMessage(), containsString("MY MESSAGE"));
+ assertThat(channelProfileName, is(NettyTransport.DEFAULT_PROFILE));
+ }
+ }
+
+ public static final class ExceptionThrowingNettyTransport extends NettyTransport {
+
+ @Inject
+ public ExceptionThrowingNettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version) {
+ super(settings, threadPool, networkService, bigArrays, version);
+ }
+
+ @Override
+ public ChannelPipelineFactory configureServerChannelPipelineFactory(String name, Settings groupSettings) {
+ return new ErrorPipelineFactory(this, name, groupSettings);
+ }
+
+ private static class ErrorPipelineFactory extends ServerChannelPipelineFactory {
+
+ private final ESLogger logger;
+
+ public ErrorPipelineFactory(ExceptionThrowingNettyTransport exceptionThrowingNettyTransport, String name, Settings groupSettings) {
+ super(exceptionThrowingNettyTransport, name, groupSettings);
+ this.logger = exceptionThrowingNettyTransport.logger;
+ }
+
+ @Override
+ public ChannelPipeline getPipeline() throws Exception {
+ ChannelPipeline pipeline = super.getPipeline();
+ pipeline.replace("dispatcher", "dispatcher", new MessageChannelHandler(nettyTransport, logger, NettyTransport.DEFAULT_PROFILE) {
+
+ @Override
+ protected String handleRequest(Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
+ final String action = buffer.readString();
+
+ final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, name);
+ try {
+ final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action);
+ if (reg == null) {
+ throw new ActionNotFoundTransportException(action);
+ }
+ final TransportRequest request = reg.newRequest();
+ request.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress()));
+ request.readFrom(buffer);
+ if (request.hasHeader("ERROR")) {
+ throw new ElasticsearchException((String) request.getHeader("ERROR"));
+ }
+ if (reg.getExecutor() == ThreadPool.Names.SAME) {
+ //noinspection unchecked
+ reg.getHandler().messageReceived(request, transportChannel);
+ } else {
+ threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel));
+ }
+ } catch (Throwable e) {
+ try {
+ transportChannel.sendResponse(e);
+ } catch (IOException e1) {
+ logger.warn("Failed to send error message back to client for action [" + action + "]", e);
+ logger.warn("Actual Exception", e1);
+ }
+ }
+ channelProfileName = transportChannel.getProfileName();
+ return action;
+ }
+
+ class RequestHandler extends AbstractRunnable {
+ private final RequestHandlerRegistry reg;
+ private final TransportRequest request;
+ private final NettyTransportChannel transportChannel;
+
+ public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, NettyTransportChannel transportChannel) {
+ this.reg = reg;
+ this.request = request;
+ this.transportChannel = transportChannel;
+ }
+
+ @SuppressWarnings({"unchecked"})
+ @Override
+ protected void doRun() throws Exception {
+ reg.getHandler().messageReceived(request, transportChannel);
+ }
+
+ @Override
+ public boolean isForceExecution() {
+ return reg.isForceExecution();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ if (transport.lifecycleState() == Lifecycle.State.STARTED) {
+ // we can only send a response transport is started....
+ try {
+ transportChannel.sendResponse(e);
+ } catch (Throwable e1) {
+ logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1);
+ logger.warn("Actual Exception", e);
+ }
+ } }
+ }
+ });
+ return pipeline;
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java b/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java
new file mode 100644
index 0000000000..663e928e52
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/transport/netty/SimpleNettyTransportTests.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport.netty;
+
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.AbstractSimpleTransportTests;
+import org.elasticsearch.transport.ConnectTransportException;
+import org.junit.Test;
+
+@Slow
+public class SimpleNettyTransportTests extends AbstractSimpleTransportTests {
+
+ @Override
+ protected MockTransportService build(Settings settings, Version version) {
+ int startPort = 11000 + randomIntBetween(0, 255);
+ int endPort = startPort + 10;
+ settings = Settings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
+ MockTransportService transportService = new MockTransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, version), threadPool);
+ transportService.start();
+ return transportService;
+ }
+
+ @Test(expected = ConnectTransportException.class)
+ public void testConnectException() {
+ serviceA.connectToNode(new DiscoveryNode("C", new InetSocketTransportAddress("localhost", 9876), Version.CURRENT));
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeTests.java
new file mode 100644
index 0000000000..7a0a8f769b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/tribe/TribeTests.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tribe;
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.MasterNotDiscoveredException;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.TestCluster;
+import org.apache.lucene.util.LuceneTestCase.Slow;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ * Note, when talking to tribe client, no need to set the local flag on master read operations, it
+ * does it by default.
+ */
+@Slow
+@LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet
+public class TribeTests extends ElasticsearchIntegrationTest {
+
+ public static final String SECOND_CLUSTER_NODE_PREFIX = "node_tribe2";
+
+ private static InternalTestCluster cluster2;
+
+ private Node tribeNode;
+ private Client tribeClient;
+
+ @BeforeClass
+ public static void setupSecondCluster() throws Exception {
+ ElasticsearchIntegrationTest.beforeClass();
+ // create another cluster
+ cluster2 = new InternalTestCluster(randomLong(), createTempDir(), 2, 2, Strings.randomBase64UUID(getRandom()), 0, false, SECOND_CLUSTER_NODE_PREFIX);
+ cluster2.beforeTest(getRandom(), 0.1);
+ cluster2.ensureAtLeastNumDataNodes(2);
+ }
+
+ @AfterClass
+ public static void tearDownSecondCluster() {
+ if (cluster2 != null) {
+ try {
+ cluster2.close();
+ } finally {
+ cluster2 = null;
+ }
+ }
+ }
+
+ @After
+ public void tearDownTribeNode() throws IOException {
+ if (cluster2 != null) {
+ try {
+ cluster2.wipe();
+ } finally {
+ cluster2.afterTest();
+ }
+ }
+ if (tribeNode != null) {
+ tribeNode.close();
+ tribeNode = null;
+ }
+ }
+
+ private void setupTribeNode(Settings settings) {
+ ImmutableMap<String,String> asMap = internalCluster().getDefaultSettings().getAsMap();
+ Settings.Builder tribe1Defaults = Settings.builder();
+ Settings.Builder tribe2Defaults = Settings.builder();
+ for (Map.Entry<String, String> entry : asMap.entrySet()) {
+ tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue());
+ tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue());
+ }
+ Settings merged = Settings.builder()
+ .put("tribe.t1.cluster.name", internalCluster().getClusterName())
+ .put("tribe.t2.cluster.name", cluster2.getClusterName())
+ .put("tribe.blocks.write", false)
+ .put("tribe.blocks.read", false)
+ .put(settings)
+ .put(tribe1Defaults.build())
+ .put(tribe2Defaults.build())
+ .put(internalCluster().getDefaultSettings())
+ .put("node.name", "tribe_node") // make sure we can identify threads from this node
+ .build();
+
+ tribeNode = NodeBuilder.nodeBuilder()
+ .settings(merged)
+ .node();
+ tribeClient = tribeNode.client();
+ }
+
+ @Test
+ public void testGlobalReadWriteBlocks() throws Exception {
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ internalCluster().client().admin().indices().prepareCreate("test1").get();
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+
+
+ setupTribeNode(Settings.builder()
+ .put("tribe.blocks.write", true)
+ .put("tribe.blocks.metadata", true)
+ .build());
+
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitIndicesInClusterState("test1", "test2");
+
+ try {
+ tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").execute().actionGet();
+ fail("cluster block should be thrown");
+ } catch (ClusterBlockException e) {
+ // all is well!
+ }
+ try {
+ tribeClient.admin().indices().prepareOptimize("test1").execute().actionGet();
+ fail("cluster block should be thrown");
+ } catch (ClusterBlockException e) {
+ // all is well!
+ }
+ try {
+ tribeClient.admin().indices().prepareOptimize("test2").execute().actionGet();
+ fail("cluster block should be thrown");
+ } catch (ClusterBlockException e) {
+ // all is well!
+ }
+ }
+
+ @Test
+ public void testIndexWriteBlocks() throws Exception {
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
+ assertAcked(internalCluster().client().admin().indices().prepareCreate("block_test1"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("block_test2"));
+
+ setupTribeNode(Settings.builder()
+ .put("tribe.blocks.write.indices", "block_*")
+ .build());
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitIndicesInClusterState("test1", "test2", "block_test1", "block_test2");
+
+ tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
+ try {
+ tribeClient.prepareIndex("block_test1", "type1", "1").setSource("field1", "value1").get();
+ fail("cluster block should be thrown");
+ } catch (ClusterBlockException e) {
+ // all is well!
+ }
+
+ tribeClient.prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
+ try {
+ tribeClient.prepareIndex("block_test2", "type1", "1").setSource("field1", "value1").get();
+ fail("cluster block should be thrown");
+ } catch (ClusterBlockException e) {
+ // all is well!
+ }
+ }
+
+ @Test
+ public void testOnConflictDrop() throws Exception {
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ assertAcked(cluster().client().admin().indices().prepareCreate("conflict"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("conflict"));
+ assertAcked(cluster().client().admin().indices().prepareCreate("test1"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+
+ setupTribeNode(Settings.builder()
+ .put("tribe.on_conflict", "drop")
+ .build());
+
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitIndicesInClusterState("test1", "test2");
+
+ assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test1").getSettings().get(TribeService.TRIBE_NAME), equalTo("t1"));
+ assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test2").getSettings().get(TribeService.TRIBE_NAME), equalTo("t2"));
+ assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().hasIndex("conflict"), equalTo(false));
+ }
+
+ @Test
+ public void testOnConflictPrefer() throws Exception {
+ testOnConflictPrefer(randomBoolean() ? "t1" : "t2");
+ }
+
+ private void testOnConflictPrefer(String tribe) throws Exception {
+ logger.info("testing preference for tribe {}", tribe);
+
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ assertAcked(internalCluster().client().admin().indices().prepareCreate("conflict"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("conflict"));
+ assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+
+ setupTribeNode(Settings.builder()
+ .put("tribe.on_conflict", "prefer_" + tribe)
+ .build());
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitIndicesInClusterState("test1", "test2", "conflict");
+
+ assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test1").getSettings().get(TribeService.TRIBE_NAME), equalTo("t1"));
+ assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("test2").getSettings().get(TribeService.TRIBE_NAME), equalTo("t2"));
+ assertThat(tribeClient.admin().cluster().prepareState().get().getState().getMetaData().index("conflict").getSettings().get(TribeService.TRIBE_NAME), equalTo(tribe));
+ }
+
+ @Test
+ public void testTribeOnOneCluster() throws Exception {
+ setupTribeNode(Settings.EMPTY);
+ logger.info("create 2 indices, test1 on t1, and test2 on t2");
+ assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+
+
+ // wait till the tribe node connected to the cluster, by checking if the index exists in the cluster state
+ logger.info("wait till test1 and test2 exists in the tribe node state");
+ awaitIndicesInClusterState("test1", "test2");
+
+ logger.info("wait till tribe has the same nodes as the 2 clusters");
+ awaitSameNodeCounts();
+
+ assertThat(tribeClient.admin().cluster().prepareHealth().setWaitForGreenStatus().get().getStatus(), equalTo(ClusterHealthStatus.GREEN));
+
+ logger.info("create 2 docs through the tribe node");
+ tribeClient.prepareIndex("test1", "type1", "1").setSource("field1", "value1").get();
+ tribeClient.prepareIndex("test2", "type1", "1").setSource("field1", "value1").get();
+ tribeClient.admin().indices().prepareRefresh().get();
+
+ logger.info("verify they are there");
+ assertHitCount(tribeClient.prepareCount().get(), 2l);
+ assertHitCount(tribeClient.prepareSearch().get(), 2l);
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ assertThat(tribeState.getMetaData().index("test1").mapping("type1"), notNullValue());
+ assertThat(tribeState.getMetaData().index("test2").mapping("type1"), notNullValue());
+ }
+ });
+
+
+ logger.info("write to another type");
+ tribeClient.prepareIndex("test1", "type2", "1").setSource("field1", "value1").get();
+ tribeClient.prepareIndex("test2", "type2", "1").setSource("field1", "value1").get();
+ assertNoFailures(tribeClient.admin().indices().prepareRefresh().get());
+
+
+ logger.info("verify they are there");
+ assertHitCount(tribeClient.prepareCount().get(), 4l);
+ assertHitCount(tribeClient.prepareSearch().get(), 4l);
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ assertThat(tribeState.getMetaData().index("test1").mapping("type1"), notNullValue());
+ assertThat(tribeState.getMetaData().index("test1").mapping("type2"), notNullValue());
+ assertThat(tribeState.getMetaData().index("test2").mapping("type1"), notNullValue());
+ assertThat(tribeState.getMetaData().index("test2").mapping("type2"), notNullValue());
+ }
+ });
+
+ logger.info("make sure master level write operations fail... (we don't really have a master)");
+ try {
+ tribeClient.admin().indices().prepareCreate("tribe_index").setMasterNodeTimeout("10ms").get();
+ fail();
+ } catch (MasterNotDiscoveredException e) {
+ // all is well!
+ }
+
+ logger.info("delete an index, and make sure its reflected");
+ cluster2.client().admin().indices().prepareDelete("test2").get();
+ awaitIndicesNotInClusterState("test2");
+
+ try {
+ logger.info("stop a node, make sure its reflected");
+ cluster2.stopRandomDataNode();
+ awaitSameNodeCounts();
+ } finally {
+ cluster2.startNode();
+ awaitSameNodeCounts();
+ }
+ }
+
+ @Test
+ public void testCloseAndOpenIndex() throws Exception {
+ //create an index and close it even before starting the tribe node
+ assertAcked(internalCluster().client().admin().indices().prepareCreate("test1"));
+ ensureGreen(internalCluster());
+ assertAcked(internalCluster().client().admin().indices().prepareClose("test1"));
+
+ setupTribeNode(Settings.EMPTY);
+ awaitSameNodeCounts();
+
+ //the closed index is not part of the tribe node cluster state
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ assertThat(tribeState.getMetaData().hasIndex("test1"), equalTo(false));
+
+ //open the index, it becomes part of the tribe node cluster state
+ assertAcked(internalCluster().client().admin().indices().prepareOpen("test1"));
+ awaitIndicesInClusterState("test1");
+ ensureGreen(internalCluster());
+
+ //create a second index, wait till it is seen from within the tribe node
+ assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
+ awaitIndicesInClusterState("test1", "test2");
+ ensureGreen(cluster2);
+
+ //close the second index, wait till it gets removed from the tribe node cluster state
+ assertAcked(cluster2.client().admin().indices().prepareClose("test2"));
+ awaitIndicesNotInClusterState("test2");
+
+ //open the second index, wait till it gets added back to the tribe node cluster state
+ assertAcked(cluster2.client().admin().indices().prepareOpen("test2"));
+ awaitIndicesInClusterState("test1", "test2");
+ ensureGreen(cluster2);
+ }
+
+ private void awaitIndicesInClusterState(final String... indices) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ for (String index : indices) {
+ assertTrue(tribeState.getMetaData().hasIndex(index));
+ assertTrue(tribeState.getRoutingTable().hasIndex(index));
+ }
+ }
+ });
+ }
+
+ private void awaitIndicesNotInClusterState(final String... indices) throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState tribeState = tribeNode.client().admin().cluster().prepareState().get().getState();
+ for (String index : indices) {
+ assertFalse(tribeState.getMetaData().hasIndex(index));
+ assertFalse(tribeState.getRoutingTable().hasIndex(index));
+ }
+ }
+ });
+ }
+
+ private void ensureGreen(TestCluster testCluster) {
+ ClusterHealthResponse actionGet = testCluster.client().admin().cluster()
+ .health(Requests.clusterHealthRequest().waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", testCluster.client().admin().cluster().prepareState().get().getState().prettyPrint(), testCluster.client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ }
+
+ private void awaitSameNodeCounts() throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ DiscoveryNodes tribeNodes = tribeNode.client().admin().cluster().prepareState().get().getState().getNodes();
+ assertThat(countDataNodesForTribe("t1", tribeNodes), equalTo(internalCluster().client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()));
+ assertThat(countDataNodesForTribe("t2", tribeNodes), equalTo(cluster2.client().admin().cluster().prepareState().get().getState().getNodes().dataNodes().size()));
+ }
+ });
+ }
+
+ private int countDataNodesForTribe(String tribeName, DiscoveryNodes nodes) {
+ int count = 0;
+ for (DiscoveryNode node : nodes) {
+ if (!node.dataNode()) {
+ continue;
+ }
+ if (tribeName.equals(node.getAttributes().get(TribeService.TRIBE_NAME))) {
+ count++;
+ }
+ }
+ return count;
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/core/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
new file mode 100644
index 0000000000..1eb962bb86
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.tribe;
+
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeBuilder;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.test.InternalTestCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.nio.file.Path;
+
+import static org.hamcrest.CoreMatchers.either;
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ * This test doesn't extend {@link org.elasticsearch.test.ElasticsearchIntegrationTest} as the internal cluster ignores system properties
+ * all the time, while we need to make the tribe node accept them in this case, so that we can verify that they are not read again as part
+ * of the tribe client nodes initialization. Note that the started nodes will obey to the 'node.mode' settings as the internal cluster does.
+ */
+public class TribeUnitTests extends ElasticsearchTestCase {
+
+ private static Node tribe1;
+ private static Node tribe2;
+
+ private static final String NODE_MODE = InternalTestCluster.nodeMode();
+
+ @BeforeClass
+ public static void createTribes() {
+ Settings baseSettings = Settings.builder()
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put("http.enabled", false)
+ .put("node.mode", NODE_MODE)
+ .put("path.home", createTempDir()).build();
+
+ tribe1 = NodeBuilder.nodeBuilder().settings(Settings.builder().put(baseSettings).put("cluster.name", "tribe1").put("node.name", "tribe1_node")).node();
+ tribe2 = NodeBuilder.nodeBuilder().settings(Settings.builder().put(baseSettings).put("cluster.name", "tribe2").put("node.name", "tribe2_node")).node();
+ }
+
+ @AfterClass
+ public static void closeTribes() {
+ tribe1.close();
+ tribe1 = null;
+ tribe2.close();
+ tribe2 = null;
+ }
+
+ @Test
+ public void testThatTribeClientsIgnoreGlobalSysProps() throws Exception {
+ System.setProperty("es.cluster.name", "tribe_node_cluster");
+ System.setProperty("es.tribe.t1.cluster.name", "tribe1");
+ System.setProperty("es.tribe.t2.cluster.name", "tribe2");
+
+ try {
+ assertTribeNodeSuccesfullyCreated(Settings.EMPTY);
+ } finally {
+ System.clearProperty("es.cluster.name");
+ System.clearProperty("es.tribe.t1.cluster.name");
+ System.clearProperty("es.tribe.t2.cluster.name");
+ }
+ }
+
+ @Test
+ public void testThatTribeClientsIgnoreGlobalConfig() throws Exception {
+ Path pathConf = getDataPath("elasticsearch.yml").getParent();
+ Settings settings = Settings.builder().put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true).put("path.conf", pathConf).build();
+ assertTribeNodeSuccesfullyCreated(settings);
+ }
+
+ private static void assertTribeNodeSuccesfullyCreated(Settings extraSettings) throws Exception {
+ //tribe node doesn't need the node.mode setting, as it's forced local internally anyways. The tribe clients do need it to make sure
+ //they can find their corresponding tribes using the proper transport
+ Settings settings = Settings.builder().put("http.enabled", false).put("node.name", "tribe_node")
+ .put("tribe.t1.node.mode", NODE_MODE).put("tribe.t2.node.mode", NODE_MODE)
+ .put("path.home", createTempDir()).put(extraSettings).build();
+
+ try (Node node = NodeBuilder.nodeBuilder().settings(settings).node()) {
+ try (Client client = node.client()) {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ ClusterState state = client.admin().cluster().prepareState().clear().setNodes(true).get().getState();
+ assertThat(state.getClusterName().value(), equalTo("tribe_node_cluster"));
+ assertThat(state.getNodes().getSize(), equalTo(5));
+ for (DiscoveryNode discoveryNode : state.getNodes()) {
+ assertThat(discoveryNode.getName(), either(equalTo("tribe1_node")).or(equalTo("tribe2_node")).or(equalTo("tribe_node"))
+ .or(equalTo("tribe_node/t1")).or(equalTo("tribe_node/t2")));
+ }
+ }
+ });
+ }
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/tribe/elasticsearch.yml b/core/src/test/java/org/elasticsearch/tribe/elasticsearch.yml
new file mode 100644
index 0000000000..89f4922a6a
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/tribe/elasticsearch.yml
@@ -0,0 +1,3 @@
+cluster.name: tribe_node_cluster
+tribe.t1.cluster.name: tribe1
+tribe.t2.cluster.name: tribe2 \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java b/core/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java
new file mode 100644
index 0000000000..dde0202f15
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/ttl/SimpleTTLTests.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.ttl;
+
+import com.google.common.base.Predicate;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Locale;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.ElasticsearchIntegrationTest.*;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.*;
+
+@ClusterScope(scope= Scope.SUITE, numDataNodes = 1)
+public class SimpleTTLTests extends ElasticsearchIntegrationTest {
+
+ static private final long PURGE_INTERVAL = 200;
+
+ @Override
+ protected int numberOfShards() {
+ return 2;
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("indices.ttl.interval", PURGE_INTERVAL, TimeUnit.MILLISECONDS)
+ .put("cluster.routing.operation.use_type", false) // make sure we control the shard computation
+ .put("cluster.routing.operation.hash.type", "djb")
+ .build();
+ }
+
+ @Test
+ public void testSimpleTTL() throws Exception {
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject())
+ .addMapping("type2", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type2")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).field("default", "1d").endObject()
+ .endObject()
+ .endObject()));
+ ensureYellow("test");
+
+ final NumShards test = getNumShards("test");
+
+ long providedTTLValue = 3000;
+ logger.info("--> checking ttl");
+ // Index one doc without routing, one doc with routing, one doc with not TTL and no default and one doc with default TTL
+ long now = System.currentTimeMillis();
+ IndexResponse indexResponse = client().prepareIndex("test", "type1", "1").setSource("field1", "value1")
+ .setTimestamp(String.valueOf(now)).setTTL(providedTTLValue).setRefresh(true).get();
+ assertThat(indexResponse.isCreated(), is(true));
+ indexResponse = client().prepareIndex("test", "type1", "with_routing").setSource("field1", "value1")
+ .setTimestamp(String.valueOf(now)).setTTL(providedTTLValue).setRouting("routing").setRefresh(true).get();
+ assertThat(indexResponse.isCreated(), is(true));
+ indexResponse = client().prepareIndex("test", "type1", "no_ttl").setSource("field1", "value1").get();
+ assertThat(indexResponse.isCreated(), is(true));
+ indexResponse = client().prepareIndex("test", "type2", "default_ttl").setSource("field1", "value1").get();
+ assertThat(indexResponse.isCreated(), is(true));
+
+ // realtime get check
+ long currentTime = System.currentTimeMillis();
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").get();
+ long ttl0;
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
+ }
+ // verify the ttl is still decreasing when going to the replica
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").get();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
+ }
+ // non realtime get (stored)
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).get();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
+ }
+ // non realtime get going the replica
+ currentTime = System.currentTimeMillis();
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).get();
+ if (getResponse.isExists()) {
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, lessThanOrEqualTo(providedTTLValue - (currentTime - now)));
+ } else {
+ assertThat(providedTTLValue - (currentTime - now), lessThanOrEqualTo(0l));
+ }
+
+ // no TTL provided so no TTL fetched
+ getResponse = client().prepareGet("test", "type1", "no_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.getField("_ttl"), nullValue());
+ // no TTL provided make sure it has default TTL
+ getResponse = client().prepareGet("test", "type2", "default_ttl").setFields("_ttl").setRealtime(true).execute().actionGet();
+ ttl0 = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl0, greaterThan(0L));
+
+ IndicesStatsResponse response = client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
+ assertThat(response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount(), equalTo(0L));
+
+ // make sure the purger has done its job for all indexed docs that are expired
+ long shouldBeExpiredDate = now + providedTTLValue + PURGE_INTERVAL + 2000;
+ currentTime = System.currentTimeMillis();
+ if (shouldBeExpiredDate - currentTime > 0) {
+ Thread.sleep(shouldBeExpiredDate - currentTime);
+ }
+
+ // We can't assume that after waiting for ttl + purgeInterval (waitTime) that the document have actually been deleted.
+ // The ttl purging happens in the background in a different thread, and might not have been completed after waiting for waitTime.
+ // But we can use index statistics' delete count to be sure that deletes have been executed, that must be incremented before
+ // ttl purging has finished.
+ logger.info("--> checking purger");
+ assertThat(awaitBusy(new Predicate<Object>() {
+ @Override
+ public boolean apply(Object input) {
+ if (rarely()) {
+ client().admin().indices().prepareFlush("test").get();
+ } else if (rarely()) {
+ client().admin().indices().prepareOptimize("test").setMaxNumSegments(1).get();
+ }
+ IndicesStatsResponse response = client().admin().indices().prepareStats("test").clear().setIndexing(true).get();
+ // TTL deletes two docs, but it is indexed in the primary shard and replica shard.
+ return response.getIndices().get("test").getTotal().getIndexing().getTotal().getDeleteCount() == 2L * test.dataCopies;
+ }
+ }, 5, TimeUnit.SECONDS), equalTo(true));
+
+ // realtime get check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ // replica realtime get check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(true).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+
+ // Need to run a refresh, in order for the non realtime get to work.
+ client().admin().indices().prepareRefresh("test").execute().actionGet();
+
+ // non realtime get (stored) check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ // non realtime get going the replica check
+ getResponse = client().prepareGet("test", "type1", "1").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ getResponse = client().prepareGet("test", "type1", "with_routing").setRouting("routing").setFields("_ttl").setRealtime(false).execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+
+ @Test // issue 5053
+ public void testThatUpdatingMappingShouldNotRemoveTTLConfiguration() throws Exception {
+ String index = "foo";
+ String type = "mytype";
+
+ XContentBuilder builder = jsonBuilder().startObject().startObject("_ttl").field("enabled", true).endObject().endObject();
+ assertAcked(client().admin().indices().prepareCreate(index).addMapping(type, builder));
+
+ // check mapping again
+ assertTTLMappingEnabled(index, type);
+
+ // update some field in the mapping
+ XContentBuilder updateMappingBuilder = jsonBuilder().startObject().startObject("properties").startObject("otherField").field("type", "string").endObject().endObject();
+ PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping(index).setType(type).setSource(updateMappingBuilder).get();
+ assertAcked(putMappingResponse);
+
+ // make sure timestamp field is still in mapping
+ assertTTLMappingEnabled(index, type);
+ }
+
+ private void assertTTLMappingEnabled(String index, String type) throws IOException {
+ String errMsg = String.format(Locale.ROOT, "Expected ttl field mapping to be enabled for %s/%s", index, type);
+
+ GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes(type).get();
+ Map<String, Object> mappingSource = getMappingsResponse.getMappings().get(index).get(type).getSourceAsMap();
+ assertThat(errMsg, mappingSource, hasKey("_ttl"));
+ String ttlAsString = mappingSource.get("_ttl").toString();
+ assertThat(ttlAsString, is(notNullValue()));
+ assertThat(errMsg, ttlAsString, is("{enabled=true}"));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java b/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java
new file mode 100644
index 0000000000..71b4cb803d
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/update/UpdateByNativeScriptTests.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.update;
+
+import com.google.common.collect.Maps;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.AbstractExecutableScript;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptEngineService;
+import org.elasticsearch.script.NativeScriptFactory;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.is;
+
+/**
+ *
+ */
+@ClusterScope(scope= Scope.SUITE, numDataNodes =1)
+public class UpdateByNativeScriptTests extends ElasticsearchIntegrationTest {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.settingsBuilder()
+ .put(super.nodeSettings(nodeOrdinal))
+ .put("script.native.custom.type", CustomNativeScriptFactory.class.getName())
+ .build();
+ }
+
+ @Test
+ public void testThatUpdateUsingNativeScriptWorks() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ index("test", "type", "1", "text", "value");
+
+ Map<String, Object> params = Maps.newHashMap();
+ params.put("foo", "SETVALUE");
+ client().prepareUpdate("test", "type", "1")
+ .setScript(new Script("custom", ScriptService.ScriptType.INLINE, NativeScriptEngineService.NAME, params)).get();
+
+ Map<String, Object> data = client().prepareGet("test", "type", "1").get().getSource();
+ assertThat(data, hasKey("foo"));
+ assertThat(data.get("foo").toString(), is("SETVALUE"));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testThatUpdateUsingNativeScriptWorksOldScriptAPI() throws Exception {
+ createIndex("test");
+ ensureYellow();
+
+ index("test", "type", "1", "text", "value");
+
+ Map<String, Object> params = Maps.newHashMap();
+ params.put("foo", "SETVALUE");
+ client().prepareUpdate("test", "type", "1")
+ .setScript("custom", ScriptService.ScriptType.INLINE)
+ .setScriptLang(NativeScriptEngineService.NAME).setScriptParams(params).get();
+
+ Map<String, Object> data = client().prepareGet("test", "type", "1").get().getSource();
+ assertThat(data, hasKey("foo"));
+ assertThat(data.get("foo").toString(), is("SETVALUE"));
+ }
+
+ static class CustomNativeScriptFactory implements NativeScriptFactory {
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new CustomScript(params);
+ }
+ }
+
+ static class CustomScript extends AbstractExecutableScript {
+ private Map<String, Object> params;
+ private Map<String, Object> vars = Maps.newHashMapWithExpectedSize(2);
+
+ public CustomScript(Map<String, Object> params) {
+ this.params = params;
+ }
+
+ @Override
+ public Object run() {
+ if (vars.containsKey("ctx") && vars.get("ctx") instanceof Map) {
+ Map ctx = (Map) vars.get("ctx");
+ if (ctx.containsKey("_source") && ctx.get("_source") instanceof Map) {
+ Map source = (Map) ctx.get("_source");
+ source.putAll(params);
+ }
+ }
+ // return value does not matter, the UpdateHelper class
+ return null;
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ vars.put(name, value);
+ }
+
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/update/UpdateNoopTests.java b/core/src/test/java/org/elasticsearch/update/UpdateNoopTests.java
new file mode 100644
index 0000000000..9f4f203b29
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/update/UpdateNoopTests.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.update;
+
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.hamcrest.Matchers.notNullValue;
+
+/**
+ * Tests for noop updates.
+ */
+public class UpdateNoopTests extends ElasticsearchIntegrationTest {
+ @Test
+ public void singleField() throws Exception {
+ updateAndCheckSource(1, fields("bar", "baz"));
+ updateAndCheckSource(1, fields("bar", "baz"));
+ updateAndCheckSource(2, fields("bar", "bir"));
+ updateAndCheckSource(2, fields("bar", "bir"));
+ updateAndCheckSource(3, fields("bar", "foo"));
+ updateAndCheckSource(4, fields("bar", null));
+ updateAndCheckSource(4, fields("bar", null));
+ updateAndCheckSource(5, fields("bar", "foo"));
+
+ assertEquals(3, totalNoopUpdates());
+ }
+
+ @Test
+ public void twoFields() throws Exception {
+ // Use random keys so we get random iteration order.
+ String key1 = 1 + randomAsciiOfLength(3);
+ String key2 = 2 + randomAsciiOfLength(3);
+ String key3 = 3 + randomAsciiOfLength(3);
+ updateAndCheckSource(1, fields(key1, "foo", key2, "baz"));
+ updateAndCheckSource(1, fields(key1, "foo", key2, "baz"));
+ updateAndCheckSource(2, fields(key1, "foo", key2, "bir"));
+ updateAndCheckSource(2, fields(key1, "foo", key2, "bir"));
+ updateAndCheckSource(3, fields(key1, "foo", key2, "foo"));
+ updateAndCheckSource(4, fields(key1, "foo", key2, null));
+ updateAndCheckSource(4, fields(key1, "foo", key2, null));
+ updateAndCheckSource(5, fields(key1, "foo", key2, "foo"));
+ updateAndCheckSource(6, fields(key1, null, key2, "foo"));
+ updateAndCheckSource(6, fields(key1, null, key2, "foo"));
+ updateAndCheckSource(7, fields(key1, null, key2, null));
+ updateAndCheckSource(7, fields(key1, null, key2, null));
+ updateAndCheckSource(8, fields(key1, null, key2, null, key3, null));
+
+ assertEquals(5, totalNoopUpdates());
+ }
+
+ @Test
+ public void arrayField() throws Exception {
+ updateAndCheckSource(1, fields("bar", "baz"));
+ updateAndCheckSource(2, fields("bar", new String[] {"baz", "bort"}));
+ updateAndCheckSource(2, fields("bar", new String[] {"baz", "bort"}));
+ updateAndCheckSource(3, fields("bar", "bir"));
+ updateAndCheckSource(3, fields("bar", "bir"));
+ updateAndCheckSource(4, fields("bar", new String[] {"baz", "bort"}));
+ updateAndCheckSource(4, fields("bar", new String[] {"baz", "bort"}));
+ updateAndCheckSource(5, fields("bar", new String[] {"bir", "bort"}));
+ updateAndCheckSource(5, fields("bar", new String[] {"bir", "bort"}));
+ updateAndCheckSource(6, fields("bar", new String[] {"bir", "for"}));
+ updateAndCheckSource(6, fields("bar", new String[] {"bir", "for"}));
+ updateAndCheckSource(7, fields("bar", new String[] {"bir", "for", "far"}));
+
+ assertEquals(5, totalNoopUpdates());
+ }
+
+ @Test
+ public void map() throws Exception {
+ // Use random keys so we get variable iteration order.
+ String key1 = 1 + randomAsciiOfLength(3);
+ String key2 = 2 + randomAsciiOfLength(3);
+ String key3 = 3 + randomAsciiOfLength(3);
+ updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, "baz")
+ .endObject().endObject());
+ updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, "baz")
+ .endObject().endObject());
+ updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, "bir")
+ .endObject().endObject());
+ updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, "bir")
+ .endObject().endObject());
+ updateAndCheckSource(3, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, "foo")
+ .endObject().endObject());
+ updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, (Object) null)
+ .endObject().endObject());
+ updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, (Object) null)
+ .endObject().endObject());
+ updateAndCheckSource(5, XContentFactory.jsonBuilder().startObject()
+ .startObject("test")
+ .field(key1, "foo")
+ .field(key2, (Object) null)
+ .field(key3, (Object) null)
+ .endObject().endObject());
+
+ assertEquals(3, totalNoopUpdates());
+ }
+
+ @Test
+ public void mapAndField() throws Exception {
+ updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject()
+ .field("f", "foo")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "baz")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject()
+ .field("f", "foo")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "baz")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject()
+ .field("f", "foo")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "bir")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(2, XContentFactory.jsonBuilder().startObject()
+ .field("f", "foo")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "bir")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(3, XContentFactory.jsonBuilder().startObject()
+ .field("f", "foo")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "foo")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject()
+ .field("f", "bar")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "foo")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(4, XContentFactory.jsonBuilder().startObject()
+ .field("f", "bar")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "foo")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(5, XContentFactory.jsonBuilder().startObject()
+ .field("f", "baz")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "foo")
+ .endObject()
+ .endObject());
+ updateAndCheckSource(6, XContentFactory.jsonBuilder().startObject()
+ .field("f", "bop")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "foo")
+ .endObject()
+ .endObject());
+
+ assertEquals(3, totalNoopUpdates());
+ }
+
+ /**
+ * Totally empty requests are noop if and only if detect noops is true.
+ */
+ @Test
+ public void totallyEmpty() throws Exception {
+ updateAndCheckSource(1, XContentFactory.jsonBuilder().startObject()
+ .field("f", "foo")
+ .startObject("m")
+ .field("mf1", "foo")
+ .field("mf2", "baz")
+ .endObject()
+ .endObject());
+ update(true, 1, XContentFactory.jsonBuilder().startObject().endObject());
+ update(false, 2, XContentFactory.jsonBuilder().startObject().endObject());
+ }
+
+ private XContentBuilder fields(Object... fields) throws IOException {
+ assertEquals("Fields must field1, value1, field2, value2, etc", 0, fields.length % 2);
+
+ XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
+ for (int i = 0; i < fields.length; i += 2) {
+ builder.field((String) fields[i], fields[i + 1]);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+ private void updateAndCheckSource(long expectedVersion, XContentBuilder xContentBuilder) {
+ UpdateResponse updateResponse = update(true, expectedVersion, xContentBuilder);
+ assertEquals(updateResponse.getGetResult().sourceRef().toUtf8(), xContentBuilder.bytes().toUtf8());
+ }
+
+ private UpdateResponse update(boolean detectNoop, long expectedVersion, XContentBuilder xContentBuilder) {
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setDoc(xContentBuilder.bytes().toUtf8())
+ .setDocAsUpsert(true)
+ .setDetectNoop(detectNoop)
+ .setFields("_source")
+ .execute().actionGet();
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertEquals(expectedVersion, updateResponse.getVersion());
+ return updateResponse;
+ }
+
+ private long totalNoopUpdates() {
+ return client().admin().indices().prepareStats("test").setIndexing(true).get().getIndex("test").getTotal().getIndexing().getTotal()
+ .getNoopUpdateCount();
+ }
+
+ @Before
+ public void setup() {
+ createIndex("test");
+ ensureGreen();
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/update/UpdateTests.java b/core/src/test/java/org/elasticsearch/update/UpdateTests.java
new file mode 100644
index 0000000000..63741575af
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/update/UpdateTests.java
@@ -0,0 +1,1585 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.update;
+
+import org.elasticsearch.ElasticsearchTimeoutException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.delete.DeleteRequest;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.update.UpdateRequest;
+import org.elasticsearch.action.update.UpdateRequestBuilder;
+import org.elasticsearch.action.update.UpdateResponse;
+import org.elasticsearch.client.transport.NoNodeAvailableException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentMissingException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.index.merge.policy.MergePolicyModule;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.index.merge.NoMergePolicyProvider;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+
+public class UpdateTests extends ElasticsearchIntegrationTest {
+
+ private void createTestIndex() throws Exception {
+ logger.info("--> creating index test");
+
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject()));
+ }
+
+ @Test
+ public void testUpsert() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .execute().actionGet();
+ assertTrue(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ }
+
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .execute().actionGet();
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+ }
+
+ @Test
+ public void testScriptedUpsert() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ // Script logic is
+ // 1) New accounts take balance from "balance" in upsert doc and first payment is charged at 50%
+ // 2) Existing accounts subtract full payment from balance stored in elasticsearch
+
+ String script="int oldBalance=ctx._source.balance;"+
+ "int deduction=ctx.op == \"create\" ? (payment/2) : payment;"+
+ "ctx._source.balance=oldBalance-deduction;";
+ int openingBalance=10;
+
+ Map<String, Object> params = new HashMap<>();
+ params.put("payment", 2);
+
+ // Pay money from what will be a new account and opening balance comes from upsert doc
+ // provided by client
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject())
+ .setScriptedUpsert(true)
+.setScript(new Script(script, ScriptService.ScriptType.INLINE, null, params))
+ .execute().actionGet();
+ assertTrue(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("9"));
+ }
+
+ // Now pay money for an existing account where balance is stored in es
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject())
+ .setScriptedUpsert(true)
+.setScript(new Script(script, ScriptService.ScriptType.INLINE, null, params))
+ .execute().actionGet();
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("7"));
+ }
+ }
+
+ @Test
+ public void testUpsertDoc() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setDocAsUpsert(true)
+ .setFields("_source")
+ .execute().actionGet();
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ }
+
+ @Test
+ // See: https://github.com/elasticsearch/elasticsearch/issues/3265
+ public void testNotUpsertDoc() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ assertThrows(client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setDocAsUpsert(false)
+ .setFields("_source")
+ .execute(), DocumentMissingException.class);
+ }
+
+ @Test
+ public void testUpsertFields() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript(new Script("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE, null, null))
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript(new Script("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE, null, null))
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra").toString(), equalTo("foo"));
+ }
+
+ @Test
+ public void testVersionedUpdate() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ index("test", "type", "1", "text", "value"); // version is now 1
+
+ assertThrows(client().prepareUpdate(indexOrAlias(), "type", "1")
+ .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(2)
+ .execute(),
+ VersionConflictEngineException.class);
+
+ client().prepareUpdate(indexOrAlias(), "type", "1")
+ .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(1).get();
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l));
+
+ // and again with a higher version..
+ client().prepareUpdate(indexOrAlias(), "type", "1")
+ .setScript(new Script("ctx._source.text = 'v3'", ScriptService.ScriptType.INLINE, null, null)).setVersion(2).get();
+
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l));
+
+ // after delete
+ client().prepareDelete("test", "type", "1").get();
+ assertThrows(client().prepareUpdate("test", "type", "1")
+ .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(3)
+ .execute(),
+ DocumentMissingException.class);
+
+ // external versioning
+ client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get();
+
+ assertThrows(client().prepareUpdate(indexOrAlias(), "type", "2")
+ .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null)).setVersion(2)
+ .setVersionType(VersionType.EXTERNAL).execute(),
+ ActionRequestValidationException.class);
+
+ // upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
+
+ // With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index.
+ client().prepareUpdate(indexOrAlias(), "type", "3")
+ .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null))
+ .setVersion(10).setUpsert("{ \"text\": \"v0\" }").get();
+ GetResponse get = get("test", "type", "3");
+ assertThat(get.getVersion(), equalTo(1l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+ // With force version
+ client().prepareUpdate(indexOrAlias(), "type", "4")
+ .setScript(new Script("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE, null, null))
+ .setVersion(10).setVersionType(VersionType.FORCE).setUpsert("{ \"text\": \"v0\" }").get();
+
+ get = get("test", "type", "4");
+ assertThat(get.getVersion(), equalTo(10l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+
+ // retry on conflict is rejected:
+ assertThrows(client().prepareUpdate(indexOrAlias(), "type", "1").setVersion(10).setRetryOnConflict(5), ActionRequestValidationException.class);
+ }
+
+ @Test
+ public void testIndexAutoCreation() throws Exception {
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript(new Script("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE, null, null))
+ .setFields("_source")
+ .execute().actionGet();
+
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+ }
+
+ @Test
+ public void testUpdate() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx._source.field++", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet();
+ fail();
+ } catch (DocumentMissingException e) {
+ // all is well
+ }
+
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(2L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+
+ Map<String, Object> params = new HashMap<>();
+ params.put("count", 3);
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx._source.field += count", ScriptService.ScriptType.INLINE, null, params)).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check noop
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx.op = 'none'", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check delete
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx.op = 'delete'", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(4L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+
+ // check TTL is kept after an update without TTL
+ client().prepareIndex("test", "type1", "2").setSource("field", 1).setTTL(86400000L).setRefresh(true).execute().actionGet();
+ GetResponse getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ client().prepareUpdate(indexOrAlias(), "type1", "2")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+
+ // check TTL update
+ client().prepareUpdate(indexOrAlias(), "type1", "2")
+ .setScript(new Script("ctx._ttl = 3600000", ScriptService.ScriptType.INLINE, null, null)).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ assertThat(ttl, lessThanOrEqualTo(3600000L));
+
+ // check timestamp update
+ client().prepareIndex("test", "type1", "3").setSource("field", 1).setRefresh(true).execute().actionGet();
+ client().prepareUpdate(indexOrAlias(), "type1", "3")
+ .setScript(new Script("ctx._timestamp = \"2009-11-15T14:12:12\"", ScriptService.ScriptType.INLINE, null, null)).execute()
+ .actionGet();
+ getResponse = client().prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(1258294332000L));
+
+ // check fields parameter
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).setFields("_source", "field")
+ .execute().actionGet();
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceRef(), notNullValue());
+ assertThat(updateResponse.getGetResult().field("field").getValue(), notNullValue());
+
+ // check updates without script
+ // add new field
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // change existing field
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // recursive map
+ Map<String, Object> testMap = new HashMap<>();
+ Map<String, Object> testMap2 = new HashMap<>();
+ Map<String, Object> testMap3 = new HashMap<>();
+ testMap3.put("commonkey", testMap);
+ testMap3.put("map3", 5);
+ testMap2.put("map2", 6);
+ testMap.put("commonkey", testMap2);
+ testMap.put("map1", 8);
+
+ client().prepareIndex("test", "type1", "1").setSource("map", testMap).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ Map map1 = (Map) getResponse.getSourceAsMap().get("map");
+ assertThat(map1.size(), equalTo(3));
+ assertThat(map1.containsKey("map1"), equalTo(true));
+ assertThat(map1.containsKey("map3"), equalTo(true));
+ assertThat(map1.containsKey("commonkey"), equalTo(true));
+ Map map2 = (Map) map1.get("commonkey");
+ assertThat(map2.size(), equalTo(3));
+ assertThat(map2.containsKey("map1"), equalTo(true));
+ assertThat(map2.containsKey("map2"), equalTo(true));
+ assertThat(map2.containsKey("commonkey"), equalTo(true));
+ }
+ }
+
+ @Test
+ public void testUpdateRequestWithBothScriptAndDoc() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("can't provide both script and doc"));
+ assertThat(e.getMessage(), containsString("can't provide both script and doc"));
+ }
+ }
+
+ @Test
+ public void testUpdateRequestWithScriptAndShouldUpsertDoc() throws Exception {
+ createTestIndex();
+ ensureGreen();
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .setDocAsUpsert(true)
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("doc must be specified if doc_as_upsert is enabled"));
+ assertThat(e.getMessage(), containsString("doc must be specified if doc_as_upsert is enabled"));
+ }
+ }
+
+ @Test
+ public void testContextVariables() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject())
+ .addMapping("subtype1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("subtype1")
+ .startObject("_parent").field("type", "type1").endObject()
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject())
+ );
+ ensureGreen();
+
+ // Index some documents
+ long timestamp = System.currentTimeMillis();
+ client().prepareIndex()
+ .setIndex("test")
+ .setType("type1")
+ .setId("parentId1")
+ .setTimestamp(String.valueOf(timestamp-1))
+ .setSource("field1", 0, "content", "bar")
+ .execute().actionGet();
+
+ long ttl = 10000;
+ client().prepareIndex()
+ .setIndex("test")
+ .setType("subtype1")
+ .setId("id1")
+ .setParent("parentId1")
+ .setRouting("routing1")
+ .setTimestamp(String.valueOf(timestamp))
+ .setTTL(ttl)
+ .setSource("field1", 1, "content", "foo")
+ .execute().actionGet();
+
+ // Update the first object and note context variables values
+ Map<String, Object> scriptParams = new HashMap<>();
+ scriptParams.put("delim", "_");
+ UpdateResponse updateResponse = client().prepareUpdate("test", "subtype1", "id1")
+ .setRouting("routing1")
+ .setScript(
+ new Script("assert ctx._index == \"test\" : \"index should be \\\"test\\\"\"\n"
+ +
+ "assert ctx._type == \"subtype1\" : \"type should be \\\"subtype1\\\"\"\n" +
+ "assert ctx._id == \"id1\" : \"id should be \\\"id1\\\"\"\n" +
+ "assert ctx._version == 1 : \"version should be 1\"\n" +
+ "assert ctx._parent == \"parentId1\" : \"parent should be \\\"parentId1\\\"\"\n" +
+ "assert ctx._routing == \"routing1\" : \"routing should be \\\"routing1\\\"\"\n" +
+ "assert ctx._timestamp == " + timestamp + " : \"timestamp should be " + timestamp + "\"\n" +
+ // ttl has a 3-second leeway, because it's always counting down
+ "assert ctx._ttl <= " + ttl + " : \"ttl should be <= " + ttl + " but was \" + ctx._ttl\n" +
+ "assert ctx._ttl >= " + (ttl-3000) + " : \"ttl should be <= " + (ttl-3000) + " but was \" + ctx._ttl\n" +
+ "ctx._source.content = ctx._source.content + delim + ctx._source.content;\n" +
+ "ctx._source.field1 += 1;\n",
+ ScriptService.ScriptType.INLINE, null, scriptParams))
+ .execute().actionGet();
+
+ assertEquals(2, updateResponse.getVersion());
+
+ GetResponse getResponse = client().prepareGet("test", "subtype1", "id1").setRouting("routing1").execute().actionGet();
+ assertEquals(2, getResponse.getSourceAsMap().get("field1"));
+ assertEquals("foo_foo", getResponse.getSourceAsMap().get("content"));
+
+ // Idem with the second object
+ scriptParams = new HashMap<>();
+ scriptParams.put("delim", "_");
+ updateResponse = client().prepareUpdate("test", "type1", "parentId1")
+ .setScript(
+ new Script(
+ "assert ctx._index == \"test\" : \"index should be \\\"test\\\"\"\n" +
+ "assert ctx._type == \"type1\" : \"type should be \\\"type1\\\"\"\n" +
+ "assert ctx._id == \"parentId1\" : \"id should be \\\"parentId1\\\"\"\n" +
+ "assert ctx._version == 1 : \"version should be 1\"\n" +
+ "assert ctx._parent == null : \"parent should be null\"\n" +
+ "assert ctx._routing == null : \"routing should be null\"\n" +
+ "assert ctx._timestamp == " + (timestamp - 1) + " : \"timestamp should be " + (timestamp - 1) + "\"\n" +
+ "assert ctx._ttl == null : \"ttl should be null\"\n" +
+ "ctx._source.content = ctx._source.content + delim + ctx._source.content;\n" +
+ "ctx._source.field1 += 1;\n",
+ ScriptService.ScriptType.INLINE, null, scriptParams))
+ .execute().actionGet();
+
+ assertEquals(2, updateResponse.getVersion());
+
+ getResponse = client().prepareGet("test", "type1", "parentId1").execute().actionGet();
+ assertEquals(1, getResponse.getSourceAsMap().get("field1"));
+ assertEquals("bar_bar", getResponse.getSourceAsMap().get("content"));
+ }
+
+ @Test
+ @Slow
+ public void testConcurrentUpdateWithRetryOnConflict() throws Exception {
+ final boolean useBulkApi = randomBoolean();
+ createTestIndex();
+ ensureGreen();
+
+ int numberOfThreads = scaledRandomIntBetween(2,5);
+ final CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final int numberOfUpdatesPerThread = scaledRandomIntBetween(100, 10000);
+ final List<Throwable> failures = new CopyOnWriteArrayList<>();
+ for (int i = 0; i < numberOfThreads; i++) {
+ Runnable r = new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ startLatch.await();
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ if (useBulkApi) {
+ UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i))
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject());
+ client().prepareBulk().add(updateRequestBuilder).execute().actionGet();
+ } else {
+ client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i))
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
+ .execute().actionGet();
+ }
+ }
+ } catch (Throwable e) {
+ failures.add(e);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ };
+ new Thread(r).start();
+ }
+ startLatch.countDown();
+ latch.await();
+ for (Throwable throwable : failures) {
+ logger.info("Captured failure on concurrent update:", throwable);
+ }
+ assertThat(failures.size(), equalTo(0));
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(response.getId(), equalTo(Integer.toString(i)));
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getVersion(), equalTo((long) numberOfThreads));
+ assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads));
+ }
+ }
+
+ @Test
+ @Slow
+ public void stressUpdateDeleteConcurrency() throws Exception {
+ //We create an index with merging disabled so that deletes don't get merged away
+ assertAcked(prepareCreate("test")
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject())
+ .setSettings(Settings.builder().put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)));
+ ensureGreen();
+
+ final int numberOfThreads = scaledRandomIntBetween(3,5);
+ final int numberOfIdsPerThread = scaledRandomIntBetween(3,10);
+ final int numberOfUpdatesPerId = scaledRandomIntBetween(10,100);
+ final int retryOnConflict = randomIntBetween(0,1);
+ final CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final List<Throwable> failures = new CopyOnWriteArrayList<>();
+
+ final class UpdateThread extends Thread {
+ final Map<Integer,Integer> failedMap = new HashMap<>();
+ final int numberOfIds;
+ final int updatesPerId;
+ final int maxUpdateRequests = numberOfIdsPerThread*numberOfUpdatesPerId;
+ final int maxDeleteRequests = numberOfIdsPerThread*numberOfUpdatesPerId;
+ private final Semaphore updateRequestsOutstanding = new Semaphore(maxUpdateRequests);
+ private final Semaphore deleteRequestsOutstanding = new Semaphore(maxDeleteRequests);
+
+ public UpdateThread(int numberOfIds, int updatesPerId) {
+ this.numberOfIds = numberOfIds;
+ this.updatesPerId = updatesPerId;
+ }
+
+ final class UpdateListener implements ActionListener<UpdateResponse> {
+ int id;
+
+ public UpdateListener(int id) {
+ this.id = id;
+ }
+
+ @Override
+ public void onResponse(UpdateResponse updateResponse) {
+ updateRequestsOutstanding.release(1);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ synchronized (failedMap) {
+ incrementMapValue(id, failedMap);
+ }
+ updateRequestsOutstanding.release(1);
+ }
+
+ }
+
+ final class DeleteListener implements ActionListener<DeleteResponse> {
+ int id;
+
+ public DeleteListener(int id) {
+ this.id = id;
+ }
+
+ @Override
+ public void onResponse(DeleteResponse deleteResponse) {
+ deleteRequestsOutstanding.release(1);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ synchronized (failedMap) {
+ incrementMapValue(id, failedMap);
+ }
+ deleteRequestsOutstanding.release(1);
+ }
+ }
+
+ @Override
+ public void run(){
+ try {
+ startLatch.await();
+ boolean hasWaitedForNoNode = false;
+ for (int j = 0; j < numberOfIds; j++) {
+ for (int k = 0; k < numberOfUpdatesPerId; ++k) {
+ updateRequestsOutstanding.acquire();
+ try {
+ UpdateRequest ur = client().prepareUpdate("test", "type1", Integer.toString(j))
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .setRetryOnConflict(retryOnConflict)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
+ .request();
+ client().update(ur, new UpdateListener(j));
+ } catch (NoNodeAvailableException nne) {
+ updateRequestsOutstanding.release();
+ synchronized (failedMap) {
+ incrementMapValue(j, failedMap);
+ }
+ if (hasWaitedForNoNode) {
+ throw nne;
+ }
+ logger.warn("Got NoNodeException waiting for 1 second for things to recover.");
+ hasWaitedForNoNode = true;
+ Thread.sleep(1000);
+ }
+
+ try {
+ deleteRequestsOutstanding.acquire();
+ DeleteRequest dr = client().prepareDelete("test", "type1", Integer.toString(j)).request();
+ client().delete(dr, new DeleteListener(j));
+ } catch (NoNodeAvailableException nne) {
+ deleteRequestsOutstanding.release();
+ synchronized (failedMap) {
+ incrementMapValue(j, failedMap);
+ }
+ if (hasWaitedForNoNode) {
+ throw nne;
+ }
+ logger.warn("Got NoNodeException waiting for 1 second for things to recover.");
+ hasWaitedForNoNode = true;
+ Thread.sleep(1000); //Wait for no-node to clear
+ }
+ }
+ }
+ } catch (Throwable e) {
+ logger.error("Something went wrong", e);
+ failures.add(e);
+ } finally {
+ try {
+ waitForOutstandingRequests(TimeValue.timeValueSeconds(60), updateRequestsOutstanding, maxUpdateRequests, "Update");
+ waitForOutstandingRequests(TimeValue.timeValueSeconds(60), deleteRequestsOutstanding, maxDeleteRequests, "Delete");
+ } catch (ElasticsearchTimeoutException ete) {
+ failures.add(ete);
+ }
+ latch.countDown();
+ }
+ }
+
+ private void incrementMapValue(int j, Map<Integer,Integer> map) {
+ if (!map.containsKey(j)) {
+ map.put(j, 0);
+ }
+ map.put(j, map.get(j) + 1);
+ }
+
+ private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOutstanding, int maxRequests, String name) {
+ long start = System.currentTimeMillis();
+ do {
+ long msRemaining = timeOut.getMillis() - (System.currentTimeMillis() - start);
+ logger.info("[{}] going to try and acquire [{}] in [{}]ms [{}] available to acquire right now",name, maxRequests,msRemaining, requestsOutstanding.availablePermits());
+ try {
+ requestsOutstanding.tryAcquire(maxRequests, msRemaining, TimeUnit.MILLISECONDS );
+ return;
+ } catch (InterruptedException ie) {
+ //Just keep swimming
+ }
+ } while ((System.currentTimeMillis() - start) < timeOut.getMillis());
+ throw new ElasticsearchTimeoutException("Requests were still outstanding after the timeout [" + timeOut + "] for type [" + name + "]" );
+ }
+ }
+ final List<UpdateThread> threads = new ArrayList<>();
+
+ for (int i = 0; i < numberOfThreads; i++) {
+ UpdateThread ut = new UpdateThread(numberOfIdsPerThread, numberOfUpdatesPerId);
+ ut.start();
+ threads.add(ut);
+ }
+
+ startLatch.countDown();
+ latch.await();
+
+ for (UpdateThread ut : threads){
+ ut.join(); //Threads should have finished because of the latch.await
+ }
+
+ //If are no errors every request received a response otherwise the test would have timedout
+ //aquiring the request outstanding semaphores.
+ for (Throwable throwable : failures) {
+ logger.info("Captured failure on concurrent update:", throwable);
+ }
+
+ assertThat(failures.size(), equalTo(0));
+
+ //Upsert all the ids one last time to make sure they are available at get time
+ //This means that we add 1 to the expected versions and attempts
+ //All the previous operations should be complete or failed at this point
+ for (int i = 0; i < numberOfIdsPerThread; ++i) {
+ UpdateResponse ur = client().prepareUpdate("test", "type1", Integer.toString(i))
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null))
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
+ .execute().actionGet();
+ }
+
+ refresh();
+
+ for (int i = 0; i < numberOfIdsPerThread; ++i) {
+ int totalFailures = 0;
+ GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ if (response.isExists()) {
+ assertThat(response.getId(), equalTo(Integer.toString(i)));
+ int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1;
+ for (UpdateThread ut : threads) {
+ if (ut.failedMap.containsKey(i)) {
+ totalFailures += ut.failedMap.get(i);
+ }
+ }
+ expectedVersion -= totalFailures;
+ logger.error("Actual version [{}] Expected version [{}] Total failures [{}]", response.getVersion(), expectedVersion, totalFailures);
+ assertThat(response.getVersion(), equalTo((long) expectedVersion));
+ assertThat(response.getVersion() + totalFailures,
+ equalTo(
+ (long)((numberOfUpdatesPerId * numberOfThreads * 2) + 1)
+ ));
+ }
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUpsertOldScriptAPI() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).execute().actionGet();
+ assertTrue(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ }
+
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).execute().actionGet();
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testScriptedUpsertOldScriptAPI() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ // Script logic is
+ // 1) New accounts take balance from "balance" in upsert doc and first payment is charged at 50%
+ // 2) Existing accounts subtract full payment from balance stored in elasticsearch
+
+ String script = "int oldBalance=ctx._source.balance;" + "int deduction=ctx.op == \"create\" ? (payment/2) : payment;"
+ + "ctx._source.balance=oldBalance-deduction;";
+ int openingBalance = 10;
+
+ // Pay money from what will be a new account and opening balance comes from upsert doc
+ // provided by client
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject())
+ .setScriptedUpsert(true).addScriptParam("payment", 2).setScript(script, ScriptService.ScriptType.INLINE).execute()
+ .actionGet();
+ assertTrue(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("9"));
+ }
+
+ // Now pay money for an existing account where balance is stored in es
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("balance", openingBalance).endObject())
+ .setScriptedUpsert(true).addScriptParam("payment", 2).setScript(script, ScriptService.ScriptType.INLINE).execute()
+ .actionGet();
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("balance").toString(), equalTo("7"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUpsertFieldsOldScriptAPI() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE).setFields("_source").execute().actionGet();
+
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE).setFields("_source").execute().actionGet();
+
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra").toString(), equalTo("foo"));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testVersionedUpdateOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
+ ensureGreen();
+
+ index("test", "type", "1", "text", "value"); // version is now 1
+
+ assertThrows(
+ client().prepareUpdate(indexOrAlias(), "type", "1").setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE)
+ .setVersion(2).execute(), VersionConflictEngineException.class);
+
+ client().prepareUpdate(indexOrAlias(), "type", "1").setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE)
+ .setVersion(1).get();
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l));
+
+ // and again with a higher version..
+ client().prepareUpdate(indexOrAlias(), "type", "1").setScript("ctx._source.text = 'v3'", ScriptService.ScriptType.INLINE)
+ .setVersion(2).get();
+
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l));
+
+ // after delete
+ client().prepareDelete("test", "type", "1").get();
+ assertThrows(client().prepareUpdate("test", "type", "1").setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE)
+ .setVersion(3).execute(), DocumentMissingException.class);
+
+ // external versioning
+ client().prepareIndex("test", "type", "2").setSource("text", "value").setVersion(10).setVersionType(VersionType.EXTERNAL).get();
+
+ assertThrows(
+ client().prepareUpdate(indexOrAlias(), "type", "2").setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE)
+ .setVersion(2).setVersionType(VersionType.EXTERNAL).execute(), ActionRequestValidationException.class);
+
+ // upserts - the combination with versions is a bit weird. Test are here to ensure we do not change our behavior unintentionally
+
+ // With internal versions, tt means "if object is there with version X, update it or explode. If it is not there, index.
+ client().prepareUpdate(indexOrAlias(), "type", "3").setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE)
+ .setVersion(10).setUpsert("{ \"text\": \"v0\" }").get();
+ GetResponse get = get("test", "type", "3");
+ assertThat(get.getVersion(), equalTo(1l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+ // With force version
+ client().prepareUpdate(indexOrAlias(), "type", "4").setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE)
+ .setVersion(10).setVersionType(VersionType.FORCE).setUpsert("{ \"text\": \"v0\" }").get();
+
+ get = get("test", "type", "4");
+ assertThat(get.getVersion(), equalTo(10l));
+ assertThat((String) get.getSource().get("text"), equalTo("v0"));
+
+ // retry on conflict is rejected:
+ assertThrows(client().prepareUpdate(indexOrAlias(), "type", "1").setVersion(10).setRetryOnConflict(5),
+ ActionRequestValidationException.class);
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testIndexAutoCreationOldScriptAPI() throws Exception {
+ UpdateResponse updateResponse = client().prepareUpdate("test", "type1", "1")
+ .setUpsert(XContentFactory.jsonBuilder().startObject().field("bar", "baz").endObject())
+ .setScript("ctx._source.extra = \"foo\"", ScriptService.ScriptType.INLINE).setFields("_source").execute().actionGet();
+
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("bar").toString(), equalTo("baz"));
+ assertThat(updateResponse.getGetResult().sourceAsMap().get("extra"), nullValue());
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUpdateOldScriptAPI() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1").setScript("ctx._source.field++", ScriptService.ScriptType.INLINE)
+ .execute().actionGet();
+ fail();
+ } catch (DocumentMissingException e) {
+ // all is well
+ }
+
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+
+ UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(2L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("2"));
+ }
+
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript("ctx._source.field += count", ScriptService.ScriptType.INLINE).addScriptParam("count", 3).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check noop
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setScript("ctx.op = 'none'", ScriptService.ScriptType.INLINE)
+ .execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(3L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("5"));
+ }
+
+ // check delete
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript("ctx.op = 'delete'", ScriptService.ScriptType.INLINE).execute().actionGet();
+ assertThat(updateResponse.getVersion(), equalTo(4L));
+ assertFalse(updateResponse.isCreated());
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+
+ for (int i = 0; i < 5; i++) {
+ GetResponse getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.isExists(), equalTo(false));
+ }
+
+ // check TTL is kept after an update without TTL
+ client().prepareIndex("test", "type1", "2").setSource("field", 1).setTTL(86400000L).setRefresh(true).execute().actionGet();
+ GetResponse getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ long ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ client().prepareUpdate(indexOrAlias(), "type1", "2").setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).execute()
+ .actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+
+ // check TTL update
+ client().prepareUpdate(indexOrAlias(), "type1", "2").setScript("ctx._ttl = 3600000", ScriptService.ScriptType.INLINE).execute()
+ .actionGet();
+ getResponse = client().prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet();
+ ttl = ((Number) getResponse.getField("_ttl").getValue()).longValue();
+ assertThat(ttl, greaterThan(0L));
+ assertThat(ttl, lessThanOrEqualTo(3600000L));
+
+ // check timestamp update
+ client().prepareIndex("test", "type1", "3").setSource("field", 1).setRefresh(true).execute().actionGet();
+ client().prepareUpdate(indexOrAlias(), "type1", "3")
+ .setScript("ctx._timestamp = \"2009-11-15T14:12:12\"", ScriptService.ScriptType.INLINE).execute().actionGet();
+ getResponse = client().prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet();
+ long timestamp = ((Number) getResponse.getField("_timestamp").getValue()).longValue();
+ assertThat(timestamp, equalTo(1258294332000L));
+
+ // check fields parameter
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).setFields("_source", "field").execute().actionGet();
+ assertThat(updateResponse.getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult(), notNullValue());
+ assertThat(updateResponse.getGetResult().getIndex(), equalTo("test"));
+ assertThat(updateResponse.getGetResult().sourceRef(), notNullValue());
+ assertThat(updateResponse.getGetResult().field("field").getValue(), notNullValue());
+
+ // check updates without script
+ // add new field
+ client().prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("1"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // change existing field
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ assertThat(getResponse.getSourceAsMap().get("field").toString(), equalTo("3"));
+ assertThat(getResponse.getSourceAsMap().get("field2").toString(), equalTo("2"));
+ }
+
+ // recursive map
+ Map<String, Object> testMap = new HashMap<>();
+ Map<String, Object> testMap2 = new HashMap<>();
+ Map<String, Object> testMap3 = new HashMap<>();
+ testMap3.put("commonkey", testMap);
+ testMap3.put("map3", 5);
+ testMap2.put("map2", 6);
+ testMap.put("commonkey", testMap2);
+ testMap.put("map1", 8);
+
+ client().prepareIndex("test", "type1", "1").setSource("map", testMap).execute().actionGet();
+ updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()).execute().actionGet();
+ for (int i = 0; i < 5; i++) {
+ getResponse = client().prepareGet("test", "type1", "1").execute().actionGet();
+ Map map1 = (Map) getResponse.getSourceAsMap().get("map");
+ assertThat(map1.size(), equalTo(3));
+ assertThat(map1.containsKey("map1"), equalTo(true));
+ assertThat(map1.containsKey("map3"), equalTo(true));
+ assertThat(map1.containsKey("commonkey"), equalTo(true));
+ Map map2 = (Map) map1.get("commonkey");
+ assertThat(map2.size(), equalTo(3));
+ assertThat(map2.containsKey("map1"), equalTo(true));
+ assertThat(map2.containsKey("map2"), equalTo(true));
+ assertThat(map2.containsKey("commonkey"), equalTo(true));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUpdateRequestWithBothScriptAndDocOldScriptAPI() throws Exception {
+ createTestIndex();
+ ensureGreen();
+
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 1).endObject())
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("can't provide both script and doc"));
+ assertThat(e.getMessage(), containsString("can't provide both script and doc"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testUpdateRequestWithScriptAndShouldUpsertDocOldScriptAPI() throws Exception {
+ createTestIndex();
+ ensureGreen();
+ try {
+ client().prepareUpdate(indexOrAlias(), "type1", "1")
+ .setScript(new Script("ctx._source.field += 1", ScriptService.ScriptType.INLINE, null, null)).setDocAsUpsert(true)
+ .execute().actionGet();
+ fail("Should have thrown ActionRequestValidationException");
+ } catch (ActionRequestValidationException e) {
+ assertThat(e.validationErrors().size(), equalTo(1));
+ assertThat(e.validationErrors().get(0), containsString("doc must be specified if doc_as_upsert is enabled"));
+ assertThat(e.getMessage(), containsString("doc must be specified if doc_as_upsert is enabled"));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ public void testContextVariablesOldScriptAPI() throws Exception {
+ assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
+ .addMapping("type1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("type1")
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject())
+ .addMapping("subtype1", XContentFactory.jsonBuilder()
+ .startObject()
+ .startObject("subtype1")
+ .startObject("_parent").field("type", "type1").endObject()
+ .startObject("_timestamp").field("enabled", true).field("store", "yes").endObject()
+ .startObject("_ttl").field("enabled", true).endObject()
+ .endObject()
+ .endObject())
+ );
+ ensureGreen();
+
+ // Index some documents
+ long timestamp = System.currentTimeMillis();
+ client().prepareIndex().setIndex("test").setType("type1").setId("parentId1").setTimestamp(String.valueOf(timestamp - 1))
+ .setSource("field1", 0, "content", "bar").execute().actionGet();
+
+ long ttl = 10000;
+ client().prepareIndex().setIndex("test").setType("subtype1").setId("id1").setParent("parentId1").setRouting("routing1")
+ .setTimestamp(String.valueOf(timestamp)).setTTL(ttl).setSource("field1", 1, "content", "foo").execute().actionGet();
+
+ // Update the first object and note context variables values
+ Map<String, Object> scriptParams = new HashMap<>();
+ scriptParams.put("delim", "_");
+ UpdateResponse updateResponse = client()
+ .prepareUpdate("test", "subtype1", "id1")
+ .setRouting("routing1")
+ .setScript(
+ "assert ctx._index == \"test\" : \"index should be \\\"test\\\"\"\n"
+ + "assert ctx._type == \"subtype1\" : \"type should be \\\"subtype1\\\"\"\n"
+ + "assert ctx._id == \"id1\" : \"id should be \\\"id1\\\"\"\n"
+ + "assert ctx._version == 1 : \"version should be 1\"\n"
+ + "assert ctx._parent == \"parentId1\" : \"parent should be \\\"parentId1\\\"\"\n"
+ + "assert ctx._routing == \"routing1\" : \"routing should be \\\"routing1\\\"\"\n"
+ + "assert ctx._timestamp == " + timestamp + " : \"timestamp should be "
+ + timestamp
+ + "\"\n"
+ +
+ // ttl has a 3-second leeway, because it's always counting down
+ "assert ctx._ttl <= " + ttl + " : \"ttl should be <= " + ttl + " but was \" + ctx._ttl\n"
+ + "assert ctx._ttl >= " + (ttl - 3000) + " : \"ttl should be <= " + (ttl - 3000)
+ + " but was \" + ctx._ttl\n" + "ctx._source.content = ctx._source.content + delim + ctx._source.content;\n"
+ + "ctx._source.field1 += 1;\n", ScriptService.ScriptType.INLINE).setScriptParams(scriptParams).execute()
+ .actionGet();
+
+ assertEquals(2, updateResponse.getVersion());
+
+ GetResponse getResponse = client().prepareGet("test", "subtype1", "id1").setRouting("routing1").execute().actionGet();
+ assertEquals(2, getResponse.getSourceAsMap().get("field1"));
+ assertEquals("foo_foo", getResponse.getSourceAsMap().get("content"));
+
+ // Idem with the second object
+ scriptParams = new HashMap<>();
+ scriptParams.put("delim", "_");
+ updateResponse = client()
+ .prepareUpdate("test", "type1", "parentId1")
+ .setScript(
+ "assert ctx._index == \"test\" : \"index should be \\\"test\\\"\"\n"
+ + "assert ctx._type == \"type1\" : \"type should be \\\"type1\\\"\"\n"
+ + "assert ctx._id == \"parentId1\" : \"id should be \\\"parentId1\\\"\"\n"
+ + "assert ctx._version == 1 : \"version should be 1\"\n"
+ + "assert ctx._parent == null : \"parent should be null\"\n"
+ + "assert ctx._routing == null : \"routing should be null\"\n" + "assert ctx._timestamp == "
+ + (timestamp - 1) + " : \"timestamp should be " + (timestamp - 1) + "\"\n"
+ + "assert ctx._ttl == null : \"ttl should be null\"\n"
+ + "ctx._source.content = ctx._source.content + delim + ctx._source.content;\n"
+ + "ctx._source.field1 += 1;\n", ScriptService.ScriptType.INLINE).setScriptParams(scriptParams).execute()
+ .actionGet();
+
+ assertEquals(2, updateResponse.getVersion());
+
+ getResponse = client().prepareGet("test", "type1", "parentId1").execute().actionGet();
+ assertEquals(1, getResponse.getSourceAsMap().get("field1"));
+ assertEquals("bar_bar", getResponse.getSourceAsMap().get("content"));
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ @Slow
+ public void testConcurrentUpdateWithRetryOnConflictOldScriptAPI() throws Exception {
+ final boolean useBulkApi = randomBoolean();
+ createTestIndex();
+ ensureGreen();
+
+ int numberOfThreads = scaledRandomIntBetween(2, 5);
+ final CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final int numberOfUpdatesPerThread = scaledRandomIntBetween(100, 10000);
+ final List<Throwable> failures = new CopyOnWriteArrayList<>();
+ for (int i = 0; i < numberOfThreads; i++) {
+ Runnable r = new Runnable() {
+
+ @Override
+ public void run() {
+ try {
+ startLatch.await();
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ if (useBulkApi) {
+ UpdateRequestBuilder updateRequestBuilder = client()
+ .prepareUpdate(indexOrAlias(), "type1", Integer.toString(i))
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject());
+ client().prepareBulk().add(updateRequestBuilder).execute().actionGet();
+ } else {
+ client().prepareUpdate(indexOrAlias(), "type1", Integer.toString(i))
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
+ .setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()).execute().actionGet();
+ }
+ }
+ } catch (Throwable e) {
+ failures.add(e);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ };
+ new Thread(r).start();
+ }
+ startLatch.countDown();
+ latch.await();
+ for (Throwable throwable : failures) {
+ logger.info("Captured failure on concurrent update:", throwable);
+ }
+ assertThat(failures.size(), equalTo(0));
+ for (int i = 0; i < numberOfUpdatesPerThread; i++) {
+ GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ assertThat(response.getId(), equalTo(Integer.toString(i)));
+ assertThat(response.isExists(), equalTo(true));
+ assertThat(response.getVersion(), equalTo((long) numberOfThreads));
+ assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads));
+ }
+ }
+
+ /*
+ * TODO Remove in 2.0
+ */
+ @Test
+ @Slow
+ public void stressUpdateDeleteConcurrencyOldScriptAPI() throws Exception {
+ //We create an index with merging disabled so that deletes don't get merged away
+ assertAcked(prepareCreate("test").addMapping(
+ "type1",
+ XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_timestamp").field("enabled", true)
+ .field("store", "yes").endObject().startObject("_ttl").field("enabled", true).endObject().endObject().endObject())
+ .setSettings(Settings.builder().put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class)));
+ ensureGreen();
+
+ final int numberOfThreads = scaledRandomIntBetween(3, 5);
+ final int numberOfIdsPerThread = scaledRandomIntBetween(3, 10);
+ final int numberOfUpdatesPerId = scaledRandomIntBetween(10, 100);
+ final int retryOnConflict = randomIntBetween(0, 1);
+ final CountDownLatch latch = new CountDownLatch(numberOfThreads);
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final List<Throwable> failures = new CopyOnWriteArrayList<>();
+
+ final class UpdateThread extends Thread {
+ final Map<Integer, Integer> failedMap = new HashMap<>();
+ final int numberOfIds;
+ final int updatesPerId;
+ final int maxUpdateRequests = numberOfIdsPerThread * numberOfUpdatesPerId;
+ final int maxDeleteRequests = numberOfIdsPerThread * numberOfUpdatesPerId;
+ private final Semaphore updateRequestsOutstanding = new Semaphore(maxUpdateRequests);
+ private final Semaphore deleteRequestsOutstanding = new Semaphore(maxDeleteRequests);
+
+ public UpdateThread(int numberOfIds, int updatesPerId) {
+ this.numberOfIds = numberOfIds;
+ this.updatesPerId = updatesPerId;
+ }
+
+ final class UpdateListener implements ActionListener<UpdateResponse> {
+ int id;
+
+ public UpdateListener(int id) {
+ this.id = id;
+ }
+
+ @Override
+ public void onResponse(UpdateResponse updateResponse) {
+ updateRequestsOutstanding.release(1);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ synchronized (failedMap) {
+ incrementMapValue(id, failedMap);
+ }
+ updateRequestsOutstanding.release(1);
+ }
+
+ }
+
+ final class DeleteListener implements ActionListener<DeleteResponse> {
+ int id;
+
+ public DeleteListener(int id) {
+ this.id = id;
+ }
+
+ @Override
+ public void onResponse(DeleteResponse deleteResponse) {
+ deleteRequestsOutstanding.release(1);
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ synchronized (failedMap) {
+ incrementMapValue(id, failedMap);
+ }
+ deleteRequestsOutstanding.release(1);
+ }
+ }
+
+ @Override
+ public void run() {
+ try {
+ startLatch.await();
+ boolean hasWaitedForNoNode = false;
+ for (int j = 0; j < numberOfIds; j++) {
+ for (int k = 0; k < numberOfUpdatesPerId; ++k) {
+ updateRequestsOutstanding.acquire();
+ try {
+ UpdateRequest ur = client().prepareUpdate("test", "type1", Integer.toString(j))
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
+ .setRetryOnConflict(retryOnConflict)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()).request();
+ client().update(ur, new UpdateListener(j));
+ } catch (NoNodeAvailableException nne) {
+ updateRequestsOutstanding.release();
+ synchronized (failedMap) {
+ incrementMapValue(j, failedMap);
+ }
+ if (hasWaitedForNoNode) {
+ throw nne;
+ }
+ logger.warn("Got NoNodeException waiting for 1 second for things to recover.");
+ hasWaitedForNoNode = true;
+ Thread.sleep(1000);
+ }
+
+ try {
+ deleteRequestsOutstanding.acquire();
+ DeleteRequest dr = client().prepareDelete("test", "type1", Integer.toString(j)).request();
+ client().delete(dr, new DeleteListener(j));
+ } catch (NoNodeAvailableException nne) {
+ deleteRequestsOutstanding.release();
+ synchronized (failedMap) {
+ incrementMapValue(j, failedMap);
+ }
+ if (hasWaitedForNoNode) {
+ throw nne;
+ }
+ logger.warn("Got NoNodeException waiting for 1 second for things to recover.");
+ hasWaitedForNoNode = true;
+ Thread.sleep(1000); //Wait for no-node to clear
+ }
+ }
+ }
+ } catch (Throwable e) {
+ logger.error("Something went wrong", e);
+ failures.add(e);
+ } finally {
+ try {
+ waitForOutstandingRequests(TimeValue.timeValueSeconds(60), updateRequestsOutstanding, maxUpdateRequests, "Update");
+ waitForOutstandingRequests(TimeValue.timeValueSeconds(60), deleteRequestsOutstanding, maxDeleteRequests, "Delete");
+ } catch (ElasticsearchTimeoutException ete) {
+ failures.add(ete);
+ }
+ latch.countDown();
+ }
+ }
+
+ private void incrementMapValue(int j, Map<Integer, Integer> map) {
+ if (!map.containsKey(j)) {
+ map.put(j, 0);
+ }
+ map.put(j, map.get(j) + 1);
+ }
+
+ private void waitForOutstandingRequests(TimeValue timeOut, Semaphore requestsOutstanding, int maxRequests, String name) {
+ long start = System.currentTimeMillis();
+ do {
+ long msRemaining = timeOut.getMillis() - (System.currentTimeMillis() - start);
+ logger.info("[{}] going to try and acquire [{}] in [{}]ms [{}] available to acquire right now", name, maxRequests,
+ msRemaining, requestsOutstanding.availablePermits());
+ try {
+ requestsOutstanding.tryAcquire(maxRequests, msRemaining, TimeUnit.MILLISECONDS);
+ return;
+ } catch (InterruptedException ie) {
+ //Just keep swimming
+ }
+ } while ((System.currentTimeMillis() - start) < timeOut.getMillis());
+ throw new ElasticsearchTimeoutException("Requests were still outstanding after the timeout [" + timeOut + "] for type ["
+ + name + "]");
+ }
+ }
+ final List<UpdateThread> threads = new ArrayList<>();
+
+ for (int i = 0; i < numberOfThreads; i++) {
+ UpdateThread ut = new UpdateThread(numberOfIdsPerThread, numberOfUpdatesPerId);
+ ut.start();
+ threads.add(ut);
+ }
+
+ startLatch.countDown();
+ latch.await();
+
+ for (UpdateThread ut : threads) {
+ ut.join(); //Threads should have finished because of the latch.await
+ }
+
+ //If are no errors every request received a response otherwise the test would have timedout
+ //aquiring the request outstanding semaphores.
+ for (Throwable throwable : failures) {
+ logger.info("Captured failure on concurrent update:", throwable);
+ }
+
+ assertThat(failures.size(), equalTo(0));
+
+ //Upsert all the ids one last time to make sure they are available at get time
+ //This means that we add 1 to the expected versions and attempts
+ //All the previous operations should be complete or failed at this point
+ for (int i = 0; i < numberOfIdsPerThread; ++i) {
+ UpdateResponse ur = client().prepareUpdate("test", "type1", Integer.toString(i))
+ .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE).setRetryOnConflict(Integer.MAX_VALUE)
+ .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()).execute().actionGet();
+ }
+
+ refresh();
+
+ for (int i = 0; i < numberOfIdsPerThread; ++i) {
+ int totalFailures = 0;
+ GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
+ if (response.isExists()) {
+ assertThat(response.getId(), equalTo(Integer.toString(i)));
+ int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1;
+ for (UpdateThread ut : threads) {
+ if (ut.failedMap.containsKey(i)) {
+ totalFailures += ut.failedMap.get(i);
+ }
+ }
+ expectedVersion -= totalFailures;
+ logger.error("Actual version [{}] Expected version [{}] Total failures [{}]", response.getVersion(), expectedVersion,
+ totalFailures);
+ assertThat(response.getVersion(), equalTo((long) expectedVersion));
+ assertThat(response.getVersion() + totalFailures, equalTo((long) ((numberOfUpdatesPerId * numberOfThreads * 2) + 1)));
+ }
+ }
+ }
+
+ private static String indexOrAlias() {
+ return randomBoolean() ? "test" : "alias";
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java
new file mode 100644
index 0000000000..a00a773ad8
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryTests.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.validate;
+
+import com.google.common.base.Charsets;
+
+import org.elasticsearch.action.admin.indices.alias.Alias;
+import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.Fuzziness;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.indices.IndexMissingException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
+import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
+import org.hamcrest.Matcher;
+import org.joda.time.DateTime;
+import org.joda.time.DateTimeZone;
+import org.joda.time.format.ISODateTimeFormat;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+@ClusterScope(randomDynamicTemplates = false, scope = Scope.SUITE)
+public class SimpleValidateQueryTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void simpleValidateQuery() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setSource("foo".getBytes(Charsets.UTF_8)).execute().actionGet().isValid(), equalTo(false));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_id:1")).execute().actionGet().isValid(), equalTo(true));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("_i:d:1")).execute().actionGet().isValid(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1")).execute().actionGet().isValid(), equalTo(true));
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("bar:hey")).execute().actionGet().isValid(), equalTo(false));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("nonexistent:hello")).execute().actionGet().isValid(), equalTo(true));
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setQuery(QueryBuilders.queryStringQuery("foo:1 AND")).execute().actionGet().isValid(), equalTo(false));
+ }
+
+ @Test
+ public void explainValidateQueryTwoNodes() throws IOException {
+ createIndex("test");
+ ensureGreen();
+ client().admin().indices().preparePutMapping("test").setType("type1")
+ .setSource(XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
+ .startObject("foo").field("type", "string").endObject()
+ .startObject("bar").field("type", "integer").endObject()
+ .startObject("baz").field("type", "string").field("analyzer", "snowball").endObject()
+ .startObject("pin").startObject("properties").startObject("location").field("type", "geo_point").endObject().endObject().endObject()
+ .endObject().endObject().endObject())
+ .execute().actionGet();
+
+ refresh();
+
+ for (Client client : internalCluster()) {
+ ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
+ .setSource("foo".getBytes(Charsets.UTF_8))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(false));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), containsString("Failed to parse"));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), nullValue());
+
+ }
+
+ for (Client client : internalCluster()) {
+ ValidateQueryResponse response = client.admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.queryStringQuery("foo"))
+ .setExplain(true)
+ .execute().actionGet();
+ assertThat(response.isValid(), equalTo(true));
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), equalTo("_all:foo"));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ }
+ }
+
+ @Test //https://github.com/elasticsearch/elasticsearch/issues/3629
+ public void explainDateRangeInQueryString() {
+ assertAcked(prepareCreate("test").setSettings(Settings.settingsBuilder()
+ .put(indexSettings())
+ .put("index.number_of_shards", 1)));
+
+ String aMonthAgo = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).minusMonths(1));
+ String aMonthFromNow = ISODateTimeFormat.yearMonthDay().print(new DateTime(DateTimeZone.UTC).plusMonths(1));
+
+ client().prepareIndex("test", "type", "1").setSource("past", aMonthAgo, "future", aMonthFromNow).get();
+
+ refresh();
+
+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery()
+ .setQuery(queryStringQuery("past:[now-2M/d TO now/d]")).setExplain(true).get();
+
+ assertNoFailures(response);
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ DateTime twoMonthsAgo = new DateTime(DateTimeZone.UTC).minusMonths(2).withTimeAtStartOfDay();
+ DateTime now = new DateTime(DateTimeZone.UTC).plusDays(1).withTimeAtStartOfDay().minusMillis(1);
+ assertThat(response.getQueryExplanation().get(0).getExplanation(),
+ equalTo("past:[" + twoMonthsAgo.getMillis() + " TO " + now.getMillis() + "]"));
+ assertThat(response.isValid(), equalTo(true));
+ }
+
+ @Test(expected = IndexMissingException.class)
+ public void validateEmptyCluster() {
+ client().admin().indices().prepareValidateQuery().get();
+ }
+
+ @Test
+ public void explainNoQuery() {
+ createIndex("test");
+ ensureGreen();
+
+ ValidateQueryResponse validateQueryResponse = client().admin().indices().prepareValidateQuery().setExplain(true).get();
+ assertThat(validateQueryResponse.isValid(), equalTo(true));
+ assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getIndex(), equalTo("test"));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), equalTo("*:*"));
+ }
+
+ @Test
+ public void explainFilteredAlias() {
+ assertAcked(prepareCreate("test")
+ .addMapping("test", "field", "type=string")
+ .addAlias(new Alias("alias").filter(QueryBuilders.termQuery("field", "value1"))));
+ ensureGreen();
+
+ ValidateQueryResponse validateQueryResponse = client().admin().indices().prepareValidateQuery("alias")
+ .setQuery(QueryBuilders.matchAllQuery()).setExplain(true).get();
+ assertThat(validateQueryResponse.isValid(), equalTo(true));
+ assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getIndex(), equalTo("test"));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:value1"));
+ }
+
+ @Test
+ public void explainMatchPhrasePrefix() {
+ assertAcked(prepareCreate("test").setSettings(
+ Settings.settingsBuilder().put(indexSettings())
+ .put("index.analysis.filter.syns.type", "synonym")
+ .putArray("index.analysis.filter.syns.synonyms", "one,two")
+ .put("index.analysis.analyzer.syns.tokenizer", "standard")
+ .putArray("index.analysis.analyzer.syns.filter", "syns")
+ ).addMapping("test", "field","type=string,analyzer=syns"));
+ ensureGreen();
+
+ ValidateQueryResponse validateQueryResponse = client().admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.matchPhrasePrefixQuery("field", "foo")).setExplain(true).get();
+ assertThat(validateQueryResponse.isValid(), equalTo(true));
+ assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:\"foo*\""));
+
+ validateQueryResponse = client().admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.matchPhrasePrefixQuery("field", "foo bar")).setExplain(true).get();
+ assertThat(validateQueryResponse.isValid(), equalTo(true));
+ assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:\"foo bar*\""));
+
+ // Stacked tokens
+ validateQueryResponse = client().admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.matchPhrasePrefixQuery("field", "one bar")).setExplain(true).get();
+ assertThat(validateQueryResponse.isValid(), equalTo(true));
+ assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:\"(one two) bar*\""));
+
+ validateQueryResponse = client().admin().indices().prepareValidateQuery("test")
+ .setQuery(QueryBuilders.matchPhrasePrefixQuery("field", "foo one")).setExplain(true).get();
+ assertThat(validateQueryResponse.isValid(), equalTo(true));
+ assertThat(validateQueryResponse.getQueryExplanation().size(), equalTo(1));
+ assertThat(validateQueryResponse.getQueryExplanation().get(0).getExplanation(), containsString("field:\"foo (one* two*)\""));
+ }
+
+ @Test
+ public void explainWithRewriteValidateQuery() throws Exception {
+ client().admin().indices().prepareCreate("test")
+ .addMapping("type1", "field", "type=string,analyzer=whitespace")
+ .setSettings(SETTING_NUMBER_OF_SHARDS, 1).get();
+ client().prepareIndex("test", "type1", "1").setSource("field", "quick lazy huge brown pidgin").get();
+ client().prepareIndex("test", "type1", "2").setSource("field", "the quick brown fox").get();
+ client().prepareIndex("test", "type1", "3").setSource("field", "the quick lazy huge brown fox jumps over the tree").get();
+ client().prepareIndex("test", "type1", "4").setSource("field", "the lazy dog quacks like a duck").get();
+ refresh();
+
+ // prefix queries
+ assertExplanation(QueryBuilders.matchPhrasePrefixQuery("field", "qu"),
+ containsString("field:quick"), true);
+ assertExplanation(QueryBuilders.matchPhrasePrefixQuery("field", "ju"),
+ containsString("field:jumps"), true);
+
+ // common terms queries
+ assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1),
+ containsString("(field:huge field:brown) +field:pidgin"), true);
+ assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"),
+ containsString("field:brown"), true);
+
+ // match queries with cutoff frequency
+ assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1),
+ containsString("(field:huge field:brown) +field:pidgin"), true);
+ assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"),
+ containsString("field:brown"), true);
+
+ // fuzzy queries
+ assertExplanation(QueryBuilders.fuzzyQuery("field", "the").fuzziness(Fuzziness.fromEdits(2)),
+ containsString("field:the field:tree^0.3333333"), true);
+ assertExplanation(QueryBuilders.fuzzyQuery("field", "jump"),
+ containsString("field:jumps^0.75"), true);
+
+ // more like this queries
+ assertExplanation(QueryBuilders.moreLikeThisQuery("field").ids("1")
+ .include(true).minTermFreq(1).minDocFreq(1).maxQueryTerms(2),
+ containsString("field:huge field:pidgin"), true);
+ assertExplanation(QueryBuilders.moreLikeThisQuery("field").like("the huge pidgin")
+ .minTermFreq(1).minDocFreq(1).maxQueryTerms(2),
+ containsString("field:huge field:pidgin"), true);
+ }
+
+ @Test
+ public void irrelevantPropertiesBeforeQuery() throws IOException {
+ createIndex("test");
+ ensureGreen();
+ refresh();
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setSource(new BytesArray("{\"foo\": \"bar\", \"query\": {\"term\" : { \"user\" : \"kimchy\" }}}")).get().isValid(), equalTo(false));
+ }
+
+ @Test
+ public void irrelevantPropertiesAfterQuery() throws IOException {
+ createIndex("test");
+ ensureGreen();
+ refresh();
+
+ assertThat(client().admin().indices().prepareValidateQuery("test").setSource(new BytesArray("{\"query\": {\"term\" : { \"user\" : \"kimchy\" }}, \"foo\": \"bar\"}")).get().isValid(), equalTo(false));
+ }
+
+ private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matcher) {
+ assertExplanation(queryBuilder, matcher, false);
+ }
+
+ private void assertExplanation(QueryBuilder queryBuilder, Matcher<String> matcher, boolean withRewrite) {
+ ValidateQueryResponse response = client().admin().indices().prepareValidateQuery("test")
+ .setTypes("type1")
+ .setQuery(queryBuilder)
+ .setExplain(true)
+ .setRewrite(withRewrite)
+ .execute().actionGet();
+ assertThat(response.getQueryExplanation().size(), equalTo(1));
+ assertThat(response.getQueryExplanation().get(0).getError(), nullValue());
+ assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher);
+ assertThat(response.isValid(), equalTo(true));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java
new file mode 100644
index 0000000000..11d49cef82
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/versioning/ConcurrentDocumentOperationTests.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.versioning;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.nullValue;
+
+/**
+ *
+ */
+public class ConcurrentDocumentOperationTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void concurrentOperationOnSameDocTest() throws Exception {
+
+ logger.info("--> create an index with 1 shard and max replicas based on nodes");
+ assertAcked(prepareCreate("test")
+ .setSettings(settingsBuilder().put(indexSettings()).put("index.number_of_shards", 1)));
+
+ logger.info("execute concurrent updates on the same doc");
+ int numberOfUpdates = 100;
+ final AtomicReference<Throwable> failure = new AtomicReference<>();
+ final CountDownLatch latch = new CountDownLatch(numberOfUpdates);
+ for (int i = 0; i < numberOfUpdates; i++) {
+ client().prepareIndex("test", "type1", "1").setSource("field1", i).execute(new ActionListener<IndexResponse>() {
+ @Override
+ public void onResponse(IndexResponse response) {
+ latch.countDown();
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ e.printStackTrace();
+ failure.set(e);
+ latch.countDown();
+ }
+ });
+ }
+
+ latch.await();
+
+ assertThat(failure.get(), nullValue());
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+
+ logger.info("done indexing, check all have the same field value");
+ Map masterSource = client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap();
+ for (int i = 0; i < (cluster().size() * 5); i++) {
+ assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource));
+ }
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java
new file mode 100644
index 0000000000..d857163e76
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/versioning/SimpleVersioningTests.java
@@ -0,0 +1,889 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.versioning;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.engine.DocumentAlreadyExistsException;
+import org.elasticsearch.index.engine.FlushNotAllowedEngineException;
+import org.elasticsearch.index.engine.VersionConflictEngineException;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ *
+ */
+public class SimpleVersioningTests extends ElasticsearchIntegrationTest {
+
+ @Test
+ public void testExternalVersioningInitialDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ // Note - external version doesn't throw version conflicts on deletes of non existent records. This is different from internal versioning
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+
+ // this should conflict with the delete command transaction which told us that the object was deleted at version 17.
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class
+ );
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(18).
+ setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(18L));
+ }
+
+ @Test
+ public void testForce() throws Exception {
+ createIndex("test");
+ ensureGreen("test"); // we are testing force here which doesn't work if we are recovering at the same time - zzzzz...
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(12).setVersionType(VersionType.FORCE).get();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(12).setVersionType(VersionType.FORCE).get();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(14).setVersionType(VersionType.FORCE).get();
+ assertThat(indexResponse.getVersion(), equalTo(14l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.FORCE).get();
+ assertThat(indexResponse.getVersion(), equalTo(13l));
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ if (randomBoolean()) {
+ refresh();
+ }
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(13l));
+ }
+
+ // deleting with a lower version works.
+ long v= randomIntBetween(12,14);
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(v).setVersionType(VersionType.FORCE).get();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(v));
+ }
+
+ @Test
+ public void testExternalGTE() throws Exception {
+ createIndex("test");
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(12).setVersionType(VersionType.EXTERNAL_GTE).get();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(12).setVersionType(VersionType.EXTERNAL_GTE).get();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(14).setVersionType(VersionType.EXTERNAL_GTE).get();
+ assertThat(indexResponse.getVersion(), equalTo(14l));
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL_GTE),
+ VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ if (randomBoolean()) {
+ refresh();
+ }
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(14l));
+ }
+
+ // deleting with a lower version fails.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE),
+ VersionConflictEngineException.class);
+
+ // Delete with a higher or equal version deletes all versions up to the given one.
+ long v= randomIntBetween(14,17);
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(v).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(v));
+
+ // Deleting with a lower version keeps on failing after a delete.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE).execute(),
+ VersionConflictEngineException.class);
+
+
+ // But delete with a higher version is OK.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(18).setVersionType(VersionType.EXTERNAL_GTE).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(18l));
+ }
+
+ @Test
+ public void testExternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(12).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(12l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(14).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(14l));
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(13).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ if (randomBoolean()) {
+ refresh();
+ }
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(14l));
+ }
+
+ // deleting with a lower version fails.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+ // Delete with a higher version deletes all versions up to the given one.
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(17).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(17l));
+
+ // Deleting with a lower version keeps on failing after a delete.
+ assertThrows(
+ client().prepareDelete("test", "type", "1").setVersion(2).setVersionType(VersionType.EXTERNAL).execute(),
+ VersionConflictEngineException.class);
+
+
+ // But delete with a higher version is OK.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(18).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(18l));
+
+
+ // TODO: This behavior breaks rest api returning http status 201, good news is that it this is only the case until deletes GC kicks in.
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(19).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(19l));
+
+
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(20l));
+
+ // Make sure that the next delete will be GC. Note we do it on the index settings so it will be cleaned up
+ HashMap<String,Object> newSettings = new HashMap<>();
+ newSettings.put("index.gc_deletes",-1);
+ client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet();
+
+ Thread.sleep(300); // gc works based on estimated sampled time. Give it a chance...
+
+ // And now we have previous version return -1
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(20).setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(20l));
+ }
+
+ @Test
+ public void testRequireUnitsOnUpdateSettings() throws Exception {
+ createIndex("test");
+ ensureGreen();
+ HashMap<String,Object> newSettings = new HashMap<>();
+ newSettings.put("index.gc_deletes", "42");
+ try {
+ client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet();
+ fail("did not hit expected exception");
+ } catch (IllegalArgumentException iae) {
+ // expected
+ assertTrue(iae.getMessage().contains("Failed to parse setting [index.gc_deletes] with value [42] as a time value: unit is missing or unrecognized"));
+ }
+ }
+
+ @Test
+ public void testInternalVersioningInitialDelete() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(17).execute(),
+ VersionConflictEngineException.class);
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")
+ .setCreate(true).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+ }
+
+
+ @Test
+ public void testInternalVersioning() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(2l));
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(),
+ DocumentAlreadyExistsException.class);
+ assertThrows(
+ client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(2).execute(),
+ DocumentAlreadyExistsException.class);
+
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2l));
+ }
+
+ // search with versioning
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(2l));
+ }
+
+ // search without versioning
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(Versions.NOT_FOUND));
+ }
+
+ DeleteResponse deleteResponse = client().prepareDelete("test", "type", "1").setVersion(2).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(true));
+ assertThat(deleteResponse.getVersion(), equalTo(3l));
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(2).execute(), VersionConflictEngineException.class);
+
+
+ // This is intricate - the object was deleted but a delete transaction was with the right version. We add another one
+ // and thus the transaction is increased.
+ deleteResponse = client().prepareDelete("test", "type", "1").setVersion(3).execute().actionGet();
+ assertThat(deleteResponse.isFound(), equalTo(false));
+ assertThat(deleteResponse.getVersion(), equalTo(4l));
+ }
+
+ @Test
+ public void testSimpleVersioningWithFlush() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ IndexResponse indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ indexResponse = client().prepareIndex("test", "type", "1").setSource("field1", "value1_2").setVersion(1).execute().actionGet();
+ assertThat(indexResponse.getVersion(), equalTo(2l));
+
+ client().admin().indices().prepareFlush().execute().actionGet();
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+ assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareIndex("test", "type", "1").setCreate(true).setSource("field1", "value1_1").setVersion(1).execute(),
+ VersionConflictEngineException.class);
+
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+ assertThrows(client().prepareDelete("test", "type", "1").setVersion(1).execute(), VersionConflictEngineException.class);
+
+ client().admin().indices().prepareRefresh().execute().actionGet();
+ for (int i = 0; i < 10; i++) {
+ assertThat(client().prepareGet("test", "type", "1").execute().actionGet().getVersion(), equalTo(2l));
+ }
+
+ for (int i = 0; i < 10; i++) {
+ SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setVersion(true).execute().actionGet();
+ assertThat(searchResponse.getHits().getAt(0).version(), equalTo(2l));
+ }
+ }
+
+ @Test
+ public void testVersioningWithBulk() {
+ createIndex("test");
+ ensureGreen();
+
+ BulkResponse bulkResponse = client().prepareBulk().add(client().prepareIndex("test", "type", "1").setSource("field1", "value1_1")).execute().actionGet();
+ assertThat(bulkResponse.hasFailures(), equalTo(false));
+ assertThat(bulkResponse.getItems().length, equalTo(1));
+ IndexResponse indexResponse = bulkResponse.getItems()[0].getResponse();
+ assertThat(indexResponse.getVersion(), equalTo(1l));
+ }
+
+
+ // Poached from Lucene's TestIDVersionPostingsFormat:
+
+ private interface IDSource {
+ String next();
+ }
+
+ private IDSource getRandomIDs() {
+ IDSource ids;
+ final Random random = getRandom();
+ switch (random.nextInt(6)) {
+ case 0:
+ // random simple
+ if (VERBOSE) {
+ System.out.println("TEST: use random simple ids");
+ }
+ ids = new IDSource() {
+ @Override
+ public String next() {
+ return TestUtil.randomSimpleString(random);
+ }
+ };
+ break;
+ case 1:
+ // random realistic unicode
+ if (VERBOSE) {
+ System.out.println("TEST: use random realistic unicode ids");
+ }
+ ids = new IDSource() {
+ @Override
+ public String next() {
+ return TestUtil.randomRealisticUnicodeString(random);
+ }
+ };
+ break;
+ case 2:
+ // sequential
+ if (VERBOSE) {
+ System.out.println("TEST: use seuquential ids");
+ }
+ ids = new IDSource() {
+ int upto;
+ @Override
+ public String next() {
+ return Integer.toString(upto++);
+ }
+ };
+ break;
+ case 3:
+ // zero-pad sequential
+ if (VERBOSE) {
+ System.out.println("TEST: use zero-pad seuquential ids");
+ }
+ ids = new IDSource() {
+ final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX);
+ final String zeroPad = String.format(Locale.ROOT, "%0" + TestUtil.nextInt(random, 4, 20) + "d", 0);
+ int upto;
+ @Override
+ public String next() {
+ String s = Integer.toString(upto++);
+ return zeroPad.substring(zeroPad.length() - s.length()) + s;
+ }
+ };
+ break;
+ case 4:
+ // random long
+ if (VERBOSE) {
+ System.out.println("TEST: use random long ids");
+ }
+ ids = new IDSource() {
+ final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX);
+ int upto;
+ @Override
+ public String next() {
+ return Long.toString(random.nextLong() & 0x3ffffffffffffffL, radix);
+ }
+ };
+ break;
+ case 5:
+ // zero-pad random long
+ if (VERBOSE) {
+ System.out.println("TEST: use zero-pad random long ids");
+ }
+ ids = new IDSource() {
+ final int radix = TestUtil.nextInt(random, Character.MIN_RADIX, Character.MAX_RADIX);
+ final String zeroPad = String.format(Locale.ROOT, "%015d", 0);
+ int upto;
+ @Override
+ public String next() {
+ return Long.toString(random.nextLong() & 0x3ffffffffffffffL, radix);
+ }
+ };
+ break;
+ default:
+ throw new AssertionError();
+ }
+
+ return ids;
+ }
+
+
+ private static class IDAndVersion {
+ public String id;
+ public long version;
+ public boolean delete;
+ public int threadID = -1;
+ public long indexStartTime;
+ public long indexFinishTime;
+ public boolean versionConflict;
+ public boolean alreadyExists;
+ public ActionResponse response;
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("id=");
+ sb.append(id);
+ sb.append(" version=");
+ sb.append(version);
+ sb.append(" delete?=");
+ sb.append(delete);
+ sb.append(" threadID=");
+ sb.append(threadID);
+ sb.append(" indexStartTime=");
+ sb.append(indexStartTime);
+ sb.append(" indexFinishTime=");
+ sb.append(indexFinishTime);
+ sb.append(" versionConflict=");
+ sb.append(versionConflict);
+ sb.append(" alreadyExists?=");
+ sb.append(alreadyExists);
+
+ if (response != null) {
+ if (response instanceof DeleteResponse) {
+ DeleteResponse deleteResponse = (DeleteResponse) response;
+ sb.append(" response:");
+ sb.append(" index=");
+ sb.append(deleteResponse.getIndex());
+ sb.append(" id=");
+ sb.append(deleteResponse.getId());
+ sb.append(" type=");
+ sb.append(deleteResponse.getType());
+ sb.append(" version=");
+ sb.append(deleteResponse.getVersion());
+ sb.append(" found=");
+ sb.append(deleteResponse.isFound());
+ } else if (response instanceof IndexResponse) {
+ IndexResponse indexResponse = (IndexResponse) response;
+ sb.append(" index=");
+ sb.append(indexResponse.getIndex());
+ sb.append(" id=");
+ sb.append(indexResponse.getId());
+ sb.append(" type=");
+ sb.append(indexResponse.getType());
+ sb.append(" version=");
+ sb.append(indexResponse.getVersion());
+ sb.append(" created=");
+ sb.append(indexResponse.isCreated());
+ } else {
+ sb.append(" response: " + response);
+ }
+ } else {
+ sb.append(" response: null");
+ }
+
+ return sb.toString();
+ }
+ }
+
+
+ @Test
+ @Slow
+ public void testRandomIDsAndVersions() throws Exception {
+ createIndex("test");
+ ensureGreen();
+
+ // TODO: sometimes use _bulk API
+ // TODO: test non-aborting exceptions (Rob suggested field where positions overflow)
+
+ // TODO: not great we don't test deletes GC here:
+
+ // We test deletes, but can't rely on wall-clock delete GC:
+ HashMap<String,Object> newSettings = new HashMap<>();
+ newSettings.put("index.gc_deletes", "1000000h");
+ assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(newSettings).execute().actionGet());
+
+ Random random = getRandom();
+
+ // Generate random IDs:
+ IDSource idSource = getRandomIDs();
+ Set<String> idsSet = new HashSet<>();
+
+ String idPrefix;
+ if (randomBoolean()) {
+ idPrefix = "";
+ } else {
+ idPrefix = TestUtil.randomSimpleString(random);
+ if (VERBOSE) {
+ System.out.println("TEST: use id prefix: " + idPrefix);
+ }
+ }
+
+ int numIDs;
+ if (TEST_NIGHTLY) {
+ numIDs = scaledRandomIntBetween(300, 1000);
+ } else {
+ numIDs = scaledRandomIntBetween(50, 100);
+ }
+
+ while (idsSet.size() < numIDs) {
+ idsSet.add(idPrefix + idSource.next());
+ }
+
+ String[] ids = idsSet.toArray(new String[numIDs]);
+
+ boolean useMonotonicVersion = randomBoolean();
+
+ // Attach random versions to them:
+ long version = 0;
+ final IDAndVersion[] idVersions = new IDAndVersion[TestUtil.nextInt(random, numIDs/2, numIDs*(TEST_NIGHTLY ? 8 : 2))];
+ final Map<String,IDAndVersion> truth = new HashMap<>();
+
+ if (VERBOSE) {
+ System.out.println("TEST: use " + numIDs + " ids; " + idVersions.length + " operations");
+ }
+
+ for(int i=0;i<idVersions.length;i++) {
+
+ if (useMonotonicVersion) {
+ version += TestUtil.nextInt(random, 1, 10);
+ } else {
+ version = random.nextLong() & 0x3fffffffffffffffL;
+ }
+
+ idVersions[i] = new IDAndVersion();
+ idVersions[i].id = ids[random.nextInt(numIDs)];
+ idVersions[i].version = version;
+ // 20% of the time we delete:
+ idVersions[i].delete = random.nextInt(5) == 2;
+ IDAndVersion curVersion = truth.get(idVersions[i].id);
+ if (curVersion == null || idVersions[i].version > curVersion.version) {
+ // Save highest version per id:
+ truth.put(idVersions[i].id, idVersions[i]);
+ }
+ }
+
+ // Shuffle
+ for(int i = idVersions.length - 1; i > 0; i--) {
+ int index = random.nextInt(i + 1);
+ IDAndVersion x = idVersions[index];
+ idVersions[index] = idVersions[i];
+ idVersions[i] = x;
+ }
+
+ if (VERBOSE) {
+ for(IDAndVersion idVersion : idVersions) {
+ System.out.println("id=" + idVersion.id + " version=" + idVersion.version + " delete?=" + idVersion.delete + " truth?=" + (truth.get(idVersion.id) == idVersion));
+ }
+ }
+
+ final AtomicInteger upto = new AtomicInteger();
+ final CountDownLatch startingGun = new CountDownLatch(1);
+ Thread[] threads = new Thread[TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 20 : 5)];
+ final long startTime = System.nanoTime();
+ for(int i=0;i<threads.length;i++) {
+ final int threadID = i;
+ threads[i] = new Thread() {
+ @Override
+ public void run() {
+ try {
+ //final Random threadRandom = RandomizedContext.current().getRandom();
+ final Random threadRandom = getRandom();
+ startingGun.await();
+ while (true) {
+
+ // TODO: sometimes use bulk:
+
+ int index = upto.getAndIncrement();
+ if (index >= idVersions.length) {
+ break;
+ }
+ if (VERBOSE && index % 100 == 0) {
+ System.out.println(Thread.currentThread().getName() + ": index=" + index);
+ }
+ IDAndVersion idVersion = idVersions[index];
+
+ String id = idVersion.id;
+ idVersion.threadID = threadID;
+ idVersion.indexStartTime = System.nanoTime()-startTime;
+ long version = idVersion.version;
+ if (idVersion.delete) {
+ try {
+ idVersion.response = client().prepareDelete("test", "type", id)
+ .setVersion(version)
+ .setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ } catch (VersionConflictEngineException vcee) {
+ // OK: our version is too old
+ assertThat(version, lessThanOrEqualTo(truth.get(id).version));
+ idVersion.versionConflict = true;
+ }
+ } else {
+ for (int x=0;x<2;x++) {
+ // Try create first:
+
+ IndexRequest.OpType op;
+ if (x == 0) {
+ op = IndexRequest.OpType.CREATE;
+ } else {
+ op = IndexRequest.OpType.INDEX;
+ }
+
+ // index document
+ try {
+ idVersion.response = client().prepareIndex("test", "type", id)
+ .setSource("foo", "bar")
+ .setOpType(op)
+ .setVersion(version)
+ .setVersionType(VersionType.EXTERNAL).execute().actionGet();
+ break;
+ } catch (DocumentAlreadyExistsException daee) {
+ if (x == 0) {
+ // OK: id was already indexed by another thread, now use index:
+ idVersion.alreadyExists = true;
+ } else {
+ // Should not happen with op=INDEX:
+ throw daee;
+ }
+ } catch (VersionConflictEngineException vcee) {
+ // OK: our version is too old
+ assertThat(version, lessThanOrEqualTo(truth.get(id).version));
+ idVersion.versionConflict = true;
+ }
+ }
+ }
+ idVersion.indexFinishTime = System.nanoTime()-startTime;
+
+ if (threadRandom.nextInt(100) == 7) {
+ System.out.println(threadID + ": TEST: now refresh at " + (System.nanoTime()-startTime));
+ refresh();
+ System.out.println(threadID + ": TEST: refresh done at " + (System.nanoTime()-startTime));
+ }
+ if (threadRandom.nextInt(100) == 7) {
+ System.out.println(threadID + ": TEST: now flush at " + (System.nanoTime()-startTime));
+ try {
+ flush();
+ } catch (FlushNotAllowedEngineException fnaee) {
+ // OK
+ }
+ System.out.println(threadID + ": TEST: flush done at " + (System.nanoTime()-startTime));
+ }
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ };
+ threads[i].start();
+ }
+
+ startingGun.countDown();
+ for(Thread thread : threads) {
+ thread.join();
+ }
+
+ // Verify against truth:
+ boolean failed = false;
+ for(String id : ids) {
+ long expected;
+ IDAndVersion idVersion = truth.get(id);
+ if (idVersion != null && idVersion.delete == false) {
+ expected = idVersion.version;
+ } else {
+ expected = -1;
+ }
+ long actualVersion = client().prepareGet("test", "type", id).execute().actionGet().getVersion();
+ if (actualVersion != expected) {
+ System.out.println("FAILED: idVersion=" + idVersion + " actualVersion=" + actualVersion);
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ System.out.println("All versions:");
+ for(int i=0;i<idVersions.length;i++) {
+ System.out.println("i=" + i + " " + idVersions[i]);
+ }
+ fail("wrong versions for some IDs");
+ }
+ }
+
+ @Test
+ @Slow
+ public void testDeleteNotLost() throws Exception {
+
+ // We require only one shard for this test, so that the 2nd delete provokes pruning the deletes map:
+ client()
+ .admin()
+ .indices()
+ .prepareCreate("test")
+ .setSettings(Settings.settingsBuilder()
+ .put("index.number_of_shards", 1))
+ .execute().
+ actionGet();
+
+ ensureGreen();
+
+ HashMap<String,Object> newSettings = new HashMap<>();
+ newSettings.put("index.gc_deletes", "10ms");
+ newSettings.put("index.refresh_interval", "-1");
+ client()
+ .admin()
+ .indices()
+ .prepareUpdateSettings("test")
+ .setSettings(newSettings)
+ .execute()
+ .actionGet();
+
+ // Index a doc:
+ client()
+ .prepareIndex("test", "type", "id")
+ .setSource("foo", "bar")
+ .setOpType(IndexRequest.OpType.INDEX)
+ .setVersion(10)
+ .setVersionType(VersionType.EXTERNAL)
+ .execute()
+ .actionGet();
+
+ if (randomBoolean()) {
+ // Force refresh so the add is sometimes visible in the searcher:
+ refresh();
+ }
+
+ // Delete it
+ client()
+ .prepareDelete("test", "type", "id")
+ .setVersion(11)
+ .setVersionType(VersionType.EXTERNAL)
+ .execute()
+ .actionGet();
+
+ // Real-time get should reflect delete:
+ assertThat("doc should have been deleted",
+ client()
+ .prepareGet("test", "type", "id")
+ .execute()
+ .actionGet()
+ .getVersion(),
+ equalTo(-1L));
+
+ // ThreadPool.estimatedTimeInMillis has default granularity of 200 msec, so we must sleep at least that long; sleep much longer in
+ // case system is busy:
+ Thread.sleep(1000);
+
+ // Delete an unrelated doc (provokes pruning deletes from versionMap)
+ client()
+ .prepareDelete("test", "type", "id2")
+ .setVersion(11)
+ .setVersionType(VersionType.EXTERNAL)
+ .execute()
+ .actionGet();
+
+ // Real-time get should still reflect delete:
+ assertThat("doc should have been deleted",
+ client()
+ .prepareGet("test", "type", "id")
+ .execute()
+ .actionGet()
+ .getVersion(),
+ equalTo(-1L));
+ }
+
+ @Test
+ public void testGCDeletesZero() throws Exception {
+
+ createIndex("test");
+ ensureGreen();
+
+ // We test deletes, but can't rely on wall-clock delete GC:
+ HashMap<String,Object> newSettings = new HashMap<>();
+ newSettings.put("index.gc_deletes", "0ms");
+ client()
+ .admin()
+ .indices()
+ .prepareUpdateSettings("test")
+ .setSettings(newSettings)
+ .execute()
+ .actionGet();
+
+ // Index a doc:
+ client()
+ .prepareIndex("test", "type", "id")
+ .setSource("foo", "bar")
+ .setOpType(IndexRequest.OpType.INDEX)
+ .setVersion(10)
+ .setVersionType(VersionType.EXTERNAL)
+ .execute()
+ .actionGet();
+
+ if (randomBoolean()) {
+ // Force refresh so the add is sometimes visible in the searcher:
+ refresh();
+ }
+
+ // Delete it
+ client()
+ .prepareDelete("test", "type", "id")
+ .setVersion(11)
+ .setVersionType(VersionType.EXTERNAL)
+ .execute()
+ .actionGet();
+
+ // Real-time get should reflect delete even though index.gc_deletes is 0:
+ assertThat("doc should have been deleted",
+ client()
+ .prepareGet("test", "type", "id")
+ .execute()
+ .actionGet()
+ .getVersion(),
+ equalTo(-1L));
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java b/core/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java
new file mode 100644
index 0000000000..c5c2856f3e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/watcher/FileWatcherTest.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.watcher;
+
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.junit.Test;
+
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.List;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+public class FileWatcherTest extends ElasticsearchTestCase {
+
+ private class RecordingChangeListener extends FileChangesListener {
+
+ private Path rootDir;
+
+ private RecordingChangeListener(Path rootDir) {
+ this.rootDir = rootDir;
+ }
+
+ private String getRelativeFileName(Path file) {
+ return rootDir.toUri().relativize(file.toUri()).getPath();
+ }
+
+ private List<String> notifications = newArrayList();
+
+ @Override
+ public void onFileInit(Path file) {
+ notifications.add("onFileInit: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryInit(Path file) {
+ notifications.add("onDirectoryInit: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileCreated(Path file) {
+ notifications.add("onFileCreated: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileDeleted(Path file) {
+ notifications.add("onFileDeleted: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onFileChanged(Path file) {
+ notifications.add("onFileChanged: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryCreated(Path file) {
+ notifications.add("onDirectoryCreated: " + getRelativeFileName(file));
+ }
+
+ @Override
+ public void onDirectoryDeleted(Path file) {
+ notifications.add("onDirectoryDeleted: " + getRelativeFileName(file));
+ }
+
+ public List<String> notifications() {
+ return notifications;
+ }
+ }
+
+ @Test
+ public void testSimpleFileOperations() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testFile = tempDir.resolve("test.txt");
+ touch(testFile);
+ FileWatcher fileWatcher = new FileWatcher(testFile);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(equalTo("onFileInit: test.txt")));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ append("Test", testFile, Charset.defaultCharset());
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(equalTo("onFileChanged: test.txt")));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ Files.delete(testFile);
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(equalTo("onFileDeleted: test.txt")));
+
+ }
+
+ @Test
+ public void testSimpleDirectoryOperations() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testDir = tempDir.resolve("test-dir");
+ Files.createDirectories(testDir);
+ touch(testDir.resolve("test.txt"));
+ touch(testDir.resolve("test0.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onFileInit: test-dir/test.txt"),
+ equalTo("onFileInit: test-dir/test0.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ for (int i = 0; i < 4; i++) {
+ touch(testDir.resolve("test" + i + ".txt"));
+ }
+ // Make sure that first file is modified
+ append("Test", testDir.resolve("test0.txt"), Charset.defaultCharset());
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileChanged: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test1.txt"),
+ equalTo("onFileCreated: test-dir/test2.txt"),
+ equalTo("onFileCreated: test-dir/test3.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ Files.delete(testDir.resolve("test1.txt"));
+ Files.delete(testDir.resolve("test2.txt"));
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test1.txt"),
+ equalTo("onFileDeleted: test-dir/test2.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ Files.delete(testDir.resolve("test0.txt"));
+ touch(testDir.resolve("test2.txt"));
+ touch(testDir.resolve("test4.txt"));
+ fileWatcher.checkAndNotify();
+
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test2.txt"),
+ equalTo("onFileCreated: test-dir/test4.txt")
+ ));
+
+
+ changes.notifications().clear();
+
+ Files.delete(testDir.resolve("test3.txt"));
+ Files.delete(testDir.resolve("test4.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test3.txt"),
+ equalTo("onFileDeleted: test-dir/test4.txt")
+ ));
+
+
+ changes.notifications().clear();
+ if (Files.exists(testDir)) {
+ IOUtils.rm(testDir);
+ }
+ fileWatcher.checkAndNotify();
+
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test.txt"),
+ equalTo("onFileDeleted: test-dir/test2.txt"),
+ equalTo("onDirectoryDeleted: test-dir")
+ ));
+
+ }
+
+ @Test
+ public void testNestedDirectoryOperations() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testDir = tempDir.resolve("test-dir");
+ Files.createDirectories(testDir);
+ touch(testDir.resolve("test.txt"));
+ Files.createDirectories(testDir.resolve("sub-dir"));
+ touch(testDir.resolve("sub-dir/test0.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onDirectoryInit: test-dir/sub-dir/"),
+ equalTo("onFileInit: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileInit: test-dir/test.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Create new file in subdirectory
+ touch(testDir.resolve("sub-dir/test1.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileCreated: test-dir/sub-dir/test1.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Create new subdirectory in subdirectory
+ Files.createDirectories(testDir.resolve("first-level"));
+ touch(testDir.resolve("first-level/file1.txt"));
+ Files.createDirectories(testDir.resolve("first-level/second-level"));
+ touch(testDir.resolve("first-level/second-level/file2.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryCreated: test-dir/first-level/"),
+ equalTo("onFileCreated: test-dir/first-level/file1.txt"),
+ equalTo("onDirectoryCreated: test-dir/first-level/second-level/"),
+ equalTo("onFileCreated: test-dir/first-level/second-level/file2.txt")
+ ));
+
+ changes.notifications().clear();
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), hasSize(0));
+
+ // Delete a directory, check notifications for
+ Path path = testDir.resolve("first-level");
+ if (Files.exists(path)) {
+ IOUtils.rm(path);
+ }
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/first-level/file1.txt"),
+ equalTo("onFileDeleted: test-dir/first-level/second-level/file2.txt"),
+ equalTo("onDirectoryDeleted: test-dir/first-level/second-level"),
+ equalTo("onDirectoryDeleted: test-dir/first-level")
+ ));
+ }
+
+ @Test
+ public void testFileReplacingDirectory() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testDir = tempDir.resolve("test-dir");
+ Files.createDirectories(testDir);
+ Path subDir = testDir.resolve("sub-dir");
+ Files.createDirectories(subDir);
+ touch(subDir.resolve("test0.txt"));
+ touch(subDir.resolve("test1.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryInit: test-dir/"),
+ equalTo("onDirectoryInit: test-dir/sub-dir/"),
+ equalTo("onFileInit: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileInit: test-dir/sub-dir/test1.txt")
+ ));
+
+ changes.notifications().clear();
+
+ if (Files.exists(subDir)) {
+ IOUtils.rm(subDir);
+ }
+ touch(subDir);
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/sub-dir/test0.txt"),
+ equalTo("onFileDeleted: test-dir/sub-dir/test1.txt"),
+ equalTo("onDirectoryDeleted: test-dir/sub-dir"),
+ equalTo("onFileCreated: test-dir/sub-dir")
+ ));
+
+ changes.notifications().clear();
+
+ Files.delete(subDir);
+ Files.createDirectories(subDir);
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/sub-dir/"),
+ equalTo("onDirectoryCreated: test-dir/sub-dir/")
+ ));
+ }
+
+ @Test
+ public void testEmptyDirectory() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testDir = tempDir.resolve("test-dir");
+ Files.createDirectories(testDir);
+ touch(testDir.resolve("test0.txt"));
+ touch(testDir.resolve("test1.txt"));
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ changes.notifications().clear();
+
+ Files.delete(testDir.resolve("test0.txt"));
+ Files.delete(testDir.resolve("test1.txt"));
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileDeleted: test-dir/test0.txt"),
+ equalTo("onFileDeleted: test-dir/test1.txt")
+ ));
+ }
+
+ @Test
+ public void testNoDirectoryOnInit() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testDir = tempDir.resolve("test-dir");
+
+ FileWatcher fileWatcher = new FileWatcher(testDir);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), hasSize(0));
+ changes.notifications().clear();
+
+ Files.createDirectories(testDir);
+ touch(testDir.resolve("test0.txt"));
+ touch(testDir.resolve("test1.txt"));
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onDirectoryCreated: test-dir/"),
+ equalTo("onFileCreated: test-dir/test0.txt"),
+ equalTo("onFileCreated: test-dir/test1.txt")
+ ));
+ }
+
+ @Test
+ public void testNoFileOnInit() throws IOException {
+ Path tempDir = createTempDir();
+ RecordingChangeListener changes = new RecordingChangeListener(tempDir);
+ Path testFile = tempDir.resolve("testfile.txt");
+
+ FileWatcher fileWatcher = new FileWatcher(testFile);
+ fileWatcher.addListener(changes);
+ fileWatcher.init();
+ assertThat(changes.notifications(), hasSize(0));
+ changes.notifications().clear();
+
+ touch(testFile);
+
+ fileWatcher.checkAndNotify();
+ assertThat(changes.notifications(), contains(
+ equalTo("onFileCreated: testfile.txt")
+ ));
+ }
+
+ static void touch(Path path) throws IOException {
+ Files.newOutputStream(path).close();
+ }
+
+ static void append(String string, Path path, Charset cs) throws IOException {
+ try (BufferedWriter writer = Files.newBufferedWriter(path, cs, StandardOpenOption.APPEND)) {
+ writer.append(string);
+ }
+ }
+} \ No newline at end of file
diff --git a/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java b/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java
new file mode 100644
index 0000000000..501289eadc
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/watcher/ResourceWatcherServiceTests.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.watcher;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.ElasticsearchTestCase;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.Test;
+
+import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
+import static org.hamcrest.Matchers.*;
+
+/**
+ *
+ */
+public class ResourceWatcherServiceTests extends ElasticsearchTestCase {
+
+ @Test
+ public void testSettings() throws Exception {
+ ThreadPool threadPool = new ThreadPool("test");
+
+ // checking the defaults
+ Settings settings = Settings.builder().build();
+ ResourceWatcherService service = new ResourceWatcherService(settings, threadPool);
+ assertThat(service.highMonitor.interval, is(ResourceWatcherService.Frequency.HIGH.interval));
+ assertThat(service.mediumMonitor.interval, is(ResourceWatcherService.Frequency.MEDIUM.interval));
+ assertThat(service.lowMonitor.interval, is(ResourceWatcherService.Frequency.LOW.interval));
+
+ // checking bwc
+ settings = Settings.builder()
+ .put("watcher.interval", "40s") // only applies to medium
+ .build();
+ service = new ResourceWatcherService(settings, threadPool);
+ assertThat(service.highMonitor.interval.millis(), is(timeValueSeconds(5).millis()));
+ assertThat(service.mediumMonitor.interval.millis(), is(timeValueSeconds(40).millis()));
+ assertThat(service.lowMonitor.interval.millis(), is(timeValueSeconds(60).millis()));
+
+ // checking custom
+ settings = Settings.builder()
+ .put("watcher.interval.high", "10s")
+ .put("watcher.interval.medium", "20s")
+ .put("watcher.interval.low", "30s")
+ .build();
+ service = new ResourceWatcherService(settings, threadPool);
+ assertThat(service.highMonitor.interval.millis(), is(timeValueSeconds(10).millis()));
+ assertThat(service.mediumMonitor.interval.millis(), is(timeValueSeconds(20).millis()));
+ assertThat(service.lowMonitor.interval.millis(), is(timeValueSeconds(30).millis()));
+ terminate(threadPool);
+ }
+
+
+ @Test
+ public void testHandle() throws Exception {
+ ThreadPool threadPool = new ThreadPool("test");
+ Settings settings = Settings.builder().build();
+ ResourceWatcherService service = new ResourceWatcherService(settings, threadPool);
+ ResourceWatcher watcher = new ResourceWatcher() {
+ @Override
+ public void init() {
+ }
+
+ @Override
+ public void checkAndNotify() {
+ }
+ };
+
+ // checking default freq
+ WatcherHandle handle = service.add(watcher);
+ assertThat(handle, notNullValue());
+ assertThat(handle.frequency(), equalTo(ResourceWatcherService.Frequency.MEDIUM));
+ assertThat(service.lowMonitor.watchers.size(), is(0));
+ assertThat(service.highMonitor.watchers.size(), is(0));
+ assertThat(service.mediumMonitor.watchers.size(), is(1));
+ handle.stop();
+ assertThat(service.mediumMonitor.watchers.size(), is(0));
+ handle.resume();
+ assertThat(service.mediumMonitor.watchers.size(), is(1));
+ handle.stop();
+
+ // checking custom freq
+ handle = service.add(watcher, ResourceWatcherService.Frequency.HIGH);
+ assertThat(handle, notNullValue());
+ assertThat(handle.frequency(), equalTo(ResourceWatcherService.Frequency.HIGH));
+ assertThat(service.lowMonitor.watchers.size(), is(0));
+ assertThat(service.mediumMonitor.watchers.size(), is(0));
+ assertThat(service.highMonitor.watchers.size(), is(1));
+ handle.stop();
+ assertThat(service.highMonitor.watchers.size(), is(0));
+ handle.resume();
+ assertThat(service.highMonitor.watchers.size(), is(1));
+ terminate(threadPool);
+ }
+}
diff --git a/core/src/test/resources/config/elasticsearch.json b/core/src/test/resources/config/elasticsearch.json
new file mode 100644
index 0000000000..16433a2c88
--- /dev/null
+++ b/core/src/test/resources/config/elasticsearch.json
@@ -0,0 +1,3 @@
+{
+ "json.config.exists" : "true"
+}
diff --git a/core/src/test/resources/config/elasticsearch.properties b/core/src/test/resources/config/elasticsearch.properties
new file mode 100644
index 0000000000..d3f822cafb
--- /dev/null
+++ b/core/src/test/resources/config/elasticsearch.properties
@@ -0,0 +1,2 @@
+
+properties.config.exists: true
diff --git a/core/src/test/resources/config/elasticsearch.yaml b/core/src/test/resources/config/elasticsearch.yaml
new file mode 100644
index 0000000000..b6ebc6bd10
--- /dev/null
+++ b/core/src/test/resources/config/elasticsearch.yaml
@@ -0,0 +1,3 @@
+
+yaml.config.exists: true
+
diff --git a/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff
new file mode 100755
index 0000000000..2ddd985437
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff
@@ -0,0 +1,201 @@
+SET ISO8859-1
+TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
+NOSUGGEST !
+
+# ordinal numbers
+COMPOUNDMIN 1
+# only in compounds: 1th, 2th, 3th
+ONLYINCOMPOUND c
+# compound rules:
+# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
+# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
+WORDCHARS 0123456789
+
+PFX A Y 1
+PFX A 0 re .
+
+PFX I Y 1
+PFX I 0 in .
+
+PFX U Y 1
+PFX U 0 un .
+
+PFX C Y 1
+PFX C 0 de .
+
+PFX E Y 1
+PFX E 0 dis .
+
+PFX F Y 1
+PFX F 0 con .
+
+PFX K Y 1
+PFX K 0 pro .
+
+SFX V N 2
+SFX V e ive e
+SFX V 0 ive [^e]
+
+SFX N Y 3
+SFX N e ion e
+SFX N y ication y
+SFX N 0 en [^ey]
+
+SFX X Y 3
+SFX X e ions e
+SFX X y ications y
+SFX X 0 ens [^ey]
+
+SFX H N 2
+SFX H y ieth y
+SFX H 0 th [^y]
+
+SFX Y Y 1
+SFX Y 0 ly .
+
+SFX G Y 2
+SFX G e ing e
+SFX G 0 ing [^e]
+
+SFX J Y 2
+SFX J e ings e
+SFX J 0 ings [^e]
+
+SFX D Y 4
+SFX D 0 d e
+SFX D y ied [^aeiou]y
+SFX D 0 ed [^ey]
+SFX D 0 ed [aeiou]y
+
+SFX T N 4
+SFX T 0 st e
+SFX T y iest [^aeiou]y
+SFX T 0 est [aeiou]y
+SFX T 0 est [^ey]
+
+SFX R Y 4
+SFX R 0 r e
+SFX R y ier [^aeiou]y
+SFX R 0 er [aeiou]y
+SFX R 0 er [^ey]
+
+SFX Z Y 4
+SFX Z 0 rs e
+SFX Z y iers [^aeiou]y
+SFX Z 0 ers [aeiou]y
+SFX Z 0 ers [^ey]
+
+SFX S Y 4
+SFX S y ies [^aeiou]y
+SFX S 0 s [aeiou]y
+SFX S 0 es [sxzh]
+SFX S 0 s [^sxzhy]
+
+SFX P Y 3
+SFX P y iness [^aeiou]y
+SFX P 0 ness [aeiou]y
+SFX P 0 ness [^y]
+
+SFX M Y 1
+SFX M 0 's .
+
+SFX B Y 3
+SFX B 0 able [^aeiou]
+SFX B 0 able ee
+SFX B e able [^aeiou]e
+
+SFX L Y 1
+SFX L 0 ment .
+
+REP 88
+REP a ei
+REP ei a
+REP a ey
+REP ey a
+REP ai ie
+REP ie ai
+REP are air
+REP are ear
+REP are eir
+REP air are
+REP air ere
+REP ere air
+REP ere ear
+REP ere eir
+REP ear are
+REP ear air
+REP ear ere
+REP eir are
+REP eir ere
+REP ch te
+REP te ch
+REP ch ti
+REP ti ch
+REP ch tu
+REP tu ch
+REP ch s
+REP s ch
+REP ch k
+REP k ch
+REP f ph
+REP ph f
+REP gh f
+REP f gh
+REP i igh
+REP igh i
+REP i uy
+REP uy i
+REP i ee
+REP ee i
+REP j di
+REP di j
+REP j gg
+REP gg j
+REP j ge
+REP ge j
+REP s ti
+REP ti s
+REP s ci
+REP ci s
+REP k cc
+REP cc k
+REP k qu
+REP qu k
+REP kw qu
+REP o eau
+REP eau o
+REP o ew
+REP ew o
+REP oo ew
+REP ew oo
+REP ew ui
+REP ui ew
+REP oo ui
+REP ui oo
+REP ew u
+REP u ew
+REP oo u
+REP u oo
+REP u oe
+REP oe u
+REP u ieu
+REP ieu u
+REP ue ew
+REP ew ue
+REP uff ough
+REP oo ieu
+REP ieu oo
+REP ier ear
+REP ear ier
+REP ear air
+REP air ear
+REP w qu
+REP qu w
+REP z ss
+REP ss z
+REP shun tion
+REP shun sion
+REP shun cion
diff --git a/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic
new file mode 100755
index 0000000000..4f69807a28
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic
@@ -0,0 +1,62120 @@
+62118
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
+a
+A
+AA
+AAA
+Aachen/M
+aardvark/SM
+Aaren/M
+Aarhus/M
+Aarika/M
+Aaron/M
+AB
+aback
+abacus/SM
+abaft
+Abagael/M
+Abagail/M
+abalone/SM
+abandoner/M
+abandon/LGDRS
+abandonment/SM
+abase/LGDSR
+abasement/S
+abaser/M
+abashed/UY
+abashment/MS
+abash/SDLG
+abate/DSRLG
+abated/U
+abatement/MS
+abater/M
+abattoir/SM
+Abba/M
+Abbe/M
+abbé/S
+abbess/SM
+Abbey/M
+abbey/MS
+Abbie/M
+Abbi/M
+Abbot/M
+abbot/MS
+Abbott/M
+abbr
+abbrev
+abbreviated/UA
+abbreviates/A
+abbreviate/XDSNG
+abbreviating/A
+abbreviation/M
+Abbye/M
+Abby/M
+ABC/M
+Abdel/M
+abdicate/NGDSX
+abdication/M
+abdomen/SM
+abdominal/YS
+abduct/DGS
+abduction/SM
+abductor/SM
+Abdul/M
+ab/DY
+abeam
+Abelard/M
+Abel/M
+Abelson/M
+Abe/M
+Aberdeen/M
+Abernathy/M
+aberrant/YS
+aberrational
+aberration/SM
+abet/S
+abetted
+abetting
+abettor/SM
+Abeu/M
+abeyance/MS
+abeyant
+Abey/M
+abhorred
+abhorrence/MS
+abhorrent/Y
+abhorrer/M
+abhorring
+abhor/S
+abidance/MS
+abide/JGSR
+abider/M
+abiding/Y
+Abidjan/M
+Abie/M
+Abigael/M
+Abigail/M
+Abigale/M
+Abilene/M
+ability/IMES
+abjection/MS
+abjectness/SM
+abject/SGPDY
+abjuration/SM
+abjuratory
+abjurer/M
+abjure/ZGSRD
+ablate/VGNSDX
+ablation/M
+ablative/SY
+ablaze
+abler/E
+ables/E
+ablest
+able/U
+abloom
+ablution/MS
+Ab/M
+ABM/S
+abnegate/NGSDX
+abnegation/M
+Abner/M
+abnormality/SM
+abnormal/SY
+aboard
+abode/GMDS
+abolisher/M
+abolish/LZRSDG
+abolishment/MS
+abolitionism/SM
+abolitionist/SM
+abolition/SM
+abominable
+abominably
+abominate/XSDGN
+abomination/M
+aboriginal/YS
+aborigine/SM
+Aborigine/SM
+aborning
+abortionist/MS
+abortion/MS
+abortiveness/M
+abortive/PY
+abort/SRDVG
+Abo/SM!
+abound/GDS
+about/S
+aboveboard
+aboveground
+above/S
+abracadabra/S
+abrader/M
+abrade/SRDG
+Abraham/M
+Abrahan/M
+Abra/M
+Abramo/M
+Abram/SM
+Abramson/M
+Abran/M
+abrasion/MS
+abrasiveness/S
+abrasive/SYMP
+abreaction/MS
+abreast
+abridge/DSRG
+abridged/U
+abridger/M
+abridgment/SM
+abroad
+abrogate/XDSNG
+abrogation/M
+abrogator/SM
+abruptness/SM
+abrupt/TRYP
+ABS
+abscess/GDSM
+abscissa/SM
+abscission/SM
+absconder/M
+abscond/SDRZG
+abseil/SGDR
+absence/SM
+absenteeism/SM
+absentee/MS
+absentia/M
+absentmindedness/S
+absentminded/PY
+absent/SGDRY
+absinthe/SM
+abs/M
+absoluteness/SM
+absolute/NPRSYTX
+absolution/M
+absolutism/MS
+absolutist/SM
+absolve/GDSR
+absolver/M
+absorb/ASGD
+absorbed/U
+absorbency/MS
+absorbent/MS
+absorber/SM
+absorbing/Y
+absorption/MS
+absorptive
+absorptivity/M
+abstainer/M
+abstain/GSDRZ
+abstemiousness/MS
+abstemious/YP
+abstention/SM
+abstinence/MS
+abstinent/Y
+abstractedness/SM
+abstracted/YP
+abstracter/M
+abstractionism/M
+abstractionist/SM
+abstraction/SM
+abstractness/SM
+abstractor/MS
+abstract/PTVGRDYS
+abstruseness/SM
+abstruse/PRYT
+absurdity/SM
+absurdness/SM
+absurd/PRYST
+Abuja
+abundance/SM
+abundant/Y
+abused/E
+abuse/GVZDSRB
+abuser/M
+abuses/E
+abusing/E
+abusiveness/SM
+abusive/YP
+abut/LS
+abutment/SM
+abutted
+abutter/MS
+abutting
+abuzz
+abysmal/Y
+abyssal
+Abyssinia/M
+Abyssinian
+abyss/SM
+AC
+acacia/SM
+academe/MS
+academia/SM
+academical/Y
+academicianship
+academician/SM
+academic/S
+academy/SM
+Acadia/M
+acanthus/MS
+Acapulco/M
+accede/SDG
+accelerated/U
+accelerate/NGSDXV
+accelerating/Y
+acceleration/M
+accelerator/SM
+accelerometer/SM
+accented/U
+accent/SGMD
+accentual/Y
+accentuate/XNGSD
+accentuation/M
+acceptability/SM
+acceptability's/U
+acceptableness/SM
+acceptable/P
+acceptably/U
+acceptance/SM
+acceptant
+acceptation/SM
+accepted/Y
+accepter/M
+accepting/PY
+acceptor/MS
+accept/RDBSZVG
+accessed/A
+accessibility/IMS
+accessible/IU
+accessibly/I
+accession/SMDG
+accessors
+accessory/SM
+access/SDMG
+accidence/M
+accidentalness/M
+accidental/SPY
+accident/MS
+acclaimer/M
+acclaim/SDRG
+acclamation/MS
+acclimate/XSDGN
+acclimation/M
+acclimatisation
+acclimatise/DG
+acclimatization/AMS
+acclimatized/U
+acclimatize/RSDGZ
+acclimatizes/A
+acclivity/SM
+accolade/GDSM
+accommodated/U
+accommodate/XVNGSD
+accommodating/Y
+accommodation/M
+accommodativeness/M
+accommodative/P
+accompanied/U
+accompanier/M
+accompaniment/MS
+accompanist/SM
+accompany/DRSG
+accomplice/MS
+accomplished/U
+accomplisher/M
+accomplishment/SM
+accomplish/SRDLZG
+accordance/SM
+accordant/Y
+accorder/M
+according/Y
+accordionist/SM
+accordion/MS
+accord/SZGMRD
+accost/SGD
+accountability/MS
+accountability's/U
+accountableness/M
+accountable/U
+accountably/U
+accountancy/SM
+accountant/MS
+account/BMDSGJ
+accounted/U
+accounting/M
+accouter/GSD
+accouterments
+accouterment's
+accoutrement/M
+Accra/M
+accreditation/SM
+accredited/U
+accredit/SGD
+accretion/SM
+accrual/MS
+accrue/SDG
+acct
+acculturate/XSDVNG
+acculturation/M
+accumulate/VNGSDX
+accumulation/M
+accumulativeness/M
+accumulative/YP
+accumulator/MS
+accuracy/IMS
+accurate/IY
+accurateness/SM
+accursedness/SM
+accursed/YP
+accusal/M
+accusation/SM
+accusative/S
+accusatory
+accused/M
+accuser/M
+accuse/SRDZG
+accusing/Y
+accustomedness/M
+accustomed/P
+accustom/SGD
+ac/DRG
+aced/M
+acerbate/DSG
+acerbic
+acerbically
+acerbity/MS
+ace/SM
+acetaminophen/S
+acetate/MS
+acetic
+acetone/SM
+acetonic
+acetylene/MS
+Acevedo/M
+Achaean/M
+Achebe/M
+ached/A
+ache/DSG
+achene/SM
+Achernar/M
+aches/A
+Acheson/M
+achievable/U
+achieved/UA
+achieve/LZGRSDB
+achievement/SM
+achiever/M
+Achilles
+aching/Y
+achoo
+achromatic
+achy/TR
+acidic
+acidification/M
+acidify/NSDG
+acidity/SM
+acidness/M
+acidoses
+acidosis/M
+acid/SMYP
+acidulous
+acing/M
+Ackerman/M
+acknowledgeable
+acknowledgedly
+acknowledged/U
+acknowledge/GZDRS
+acknowledger/M
+acknowledgment/SAM
+ACLU
+Ac/M
+ACM
+acme/SM
+acne/MDS
+acolyte/MS
+Aconcagua/M
+aconite/MS
+acorn/SM
+Acosta/M
+acoustical/Y
+acoustician/M
+acoustic/S
+acoustics/M
+acquaintance/MS
+acquaintanceship/S
+acquainted/U
+acquaint/GASD
+acquiesce/GSD
+acquiescence/SM
+acquiescent/Y
+acquirable
+acquire/ASDG
+acquirement/SM
+acquisition's/A
+acquisition/SM
+acquisitiveness/MS
+acquisitive/PY
+acquit/S
+acquittal/MS
+acquittance/M
+acquitted
+acquitter/M
+acquitting
+acreage/MS
+acre/MS
+acridity/MS
+acridness/SM
+acrid/TPRY
+acrimoniousness/MS
+acrimonious/YP
+acrimony/MS
+acrobatically
+acrobatic/S
+acrobatics/M
+acrobat/SM
+acronym/SM
+acrophobia/SM
+Acropolis/M
+acropolis/SM
+across
+acrostic/SM
+Acrux/M
+acrylate/M
+acrylic/S
+ACT
+Actaeon/M
+Acta/M
+ACTH
+acting/S
+actinic
+actinide/SM
+actinium/MS
+actinometer/MS
+action/DMSGB
+actions/AI
+action's/IA
+activate/AXCDSNGI
+activated/U
+activation/AMCI
+activator/SM
+active/APY
+actively/I
+activeness/MS
+actives
+activism/MS
+activist/MS
+activities/A
+activity/MSI
+Acton/M
+actor/MAS
+actress/SM
+act's
+Acts
+act/SADVG
+actuality/SM
+actualization/MAS
+actualize/GSD
+actualizes/A
+actual/SY
+actuarial/Y
+actuary/MS
+actuate/GNXSD
+actuation/M
+actuator/SM
+acuity/MS
+acumen/SM
+acupressure/S
+acupuncture/SM
+acupuncturist/S
+acuteness/MS
+acute/YTSRP
+acyclic
+acyclically
+acyclovir/S
+AD
+adage/MS
+adagio/S
+Adah/M
+Adair/M
+Adaline/M
+Ada/M
+adamant/SY
+Adamo/M
+Adam/SM
+Adamson/M
+Adana/M
+Adan/M
+adaptability/MS
+adaptable/U
+adaptation/MS
+adaptedness/M
+adapted/P
+adapter/M
+adapting/A
+adaption
+adaptively
+adaptiveness/M
+adaptive/U
+adaptivity
+adapt/SRDBZVG
+Adara/M
+ad/AS
+ADC
+Adda/M
+Addams
+addenda
+addend/SM
+addendum/M
+adder/M
+Addia/M
+addiction/MS
+addictive/P
+addict/SGVD
+Addie/M
+Addi/M
+Addison/M
+additional/Y
+addition/MS
+additive/YMS
+additivity
+addle/GDS
+addressability
+addressable/U
+addressed/A
+addressee/SM
+addresser/M
+addresses/A
+address/MDRSZGB
+Addressograph/M
+adduce/GRSD
+adducer/M
+adduct/DGVS
+adduction/M
+adductor/M
+Addy/M
+add/ZGBSDR
+Adelaida/M
+Adelaide/M
+Adela/M
+Adelbert/M
+Adele/M
+Adelheid/M
+Adelice/M
+Adelina/M
+Adelind/M
+Adeline/M
+Adella/M
+Adelle/M
+Adel/M
+Ade/M
+Adena/M
+Adenauer/M
+adenine/SM
+Aden/M
+adenoidal
+adenoid/S
+adeptness/MS
+adept/RYPTS
+adequacy/IMS
+adequate/IPY
+adequateness's/I
+adequateness/SM
+Adey/M
+Adham/M
+Adhara/M
+adherence/SM
+adherent/YMS
+adherer/M
+adhere/ZGRSD
+adhesion/MS
+adhesiveness/MS
+adhesive/PYMS
+adiabatic
+adiabatically
+Adiana/M
+Adidas/M
+adieu/S
+Adi/M
+Adina/M
+adiós
+adipose/S
+Adirondack/SM
+adj
+adjacency/MS
+adjacent/Y
+adjectival/Y
+adjective/MYS
+adjoin/SDG
+adjoint/M
+adjourn/DGLS
+adjournment/SM
+adjudge/DSG
+adjudicate/VNGXSD
+adjudication/M
+adjudicator/SM
+adjudicatory
+adjunct/VSYM
+adjuration/SM
+adjure/GSD
+adjustable/U
+adjustably
+adjust/DRALGSB
+adjusted/U
+adjuster's/A
+adjuster/SM
+adjustive
+adjustment/MAS
+adjustor's
+adjutant/SM
+Adkins/M
+Adlai/M
+Adler/M
+adman/M
+admen
+administer/GDJS
+administrable
+administrate/XSDVNG
+administration/M
+administrative/Y
+administrator/MS
+administratrix/M
+admirableness/M
+admirable/P
+admirably
+admiral/SM
+admiralty/MS
+Admiralty/S
+admiration/MS
+admirer/M
+admire/RSDZBG
+admiring/Y
+admissibility/ISM
+admissible/I
+admissibly
+admission/AMS
+admit/AS
+admittance/MS
+admitted/A
+admittedly
+admitting/A
+admix/SDG
+admixture/SM
+Adm/M
+Ad/MN
+admonisher/M
+admonish/GLSRD
+admonishing/Y
+admonishment/SM
+admonition/MS
+admonitory
+adobe/MS
+adolescence/MS
+adolescent/SYM
+Adolf/M
+Adolfo/M
+Adolphe/M
+Adolph/M
+Adolpho/M
+Adolphus/M
+Ado/M
+ado/MS
+Adonis/SM
+adopted/AU
+adopter/M
+adoption/MS
+adoptive/Y
+adopt/RDSBZVG
+adopts/A
+adorableness/SM
+adorable/P
+adorably
+Adora/M
+adoration/SM
+adore/DSRGZB
+Adoree/M
+Adore/M
+adorer/M
+adoring/Y
+adorned/U
+Adorne/M
+adornment/SM
+adorn/SGLD
+ADP
+Adrea/M
+adrenalin
+adrenaline/MS
+Adrenalin/MS
+adrenal/YS
+Adria/MX
+Adriana/M
+Adriane/M
+Adrian/M
+Adrianna/M
+Adrianne/M
+Adriano/M
+Adriatic
+Adriena/M
+Adrien/M
+Adrienne/M
+adrift
+adroitness/MS
+adroit/RTYP
+ads
+ad's
+adsorbate/M
+adsorbent/S
+adsorb/GSD
+adsorption/MS
+adsorptive/Y
+adulate/GNDSX
+adulation/M
+adulator/SM
+adulatory
+adulterant/SM
+adulterated/U
+adulterate/NGSDX
+adulteration/M
+adulterer/SM
+adulteress/MS
+adulterous/Y
+adultery/SM
+adulthood/MS
+adult/MYPS
+adultness/M
+adumbrate/XSDVGN
+adumbration/M
+adumbrative/Y
+adv
+advance/DSRLZG
+advancement/MS
+advancer/M
+advantage/GMEDS
+advantageous/EY
+advantageousness/M
+Adventist/M
+adventist/S
+adventitiousness/M
+adventitious/PY
+adventive/Y
+Advent/SM
+advent/SVM
+adventurer/M
+adventuresome
+adventure/SRDGMZ
+adventuress/SM
+adventurousness/SM
+adventurous/YP
+adverbial/MYS
+adverb/SM
+adversarial
+adversary/SM
+adverse/DSRPYTG
+adverseness/MS
+adversity/SM
+advert/GSD
+advertised/U
+advertise/JGZSRDL
+advertisement/SM
+advertiser/M
+advertising/M
+advertorial/S
+advice/SM
+Advil/M
+advisability/SIM
+advisable/I
+advisableness/M
+advisably
+advisedly/I
+advised/YU
+advisee/MS
+advisement/MS
+adviser/M
+advise/ZRSDGLB
+advisor/S
+advisor's
+advisory/S
+advocacy/SM
+advocate/NGVDS
+advocation/M
+advt
+adze's
+adz/MDSG
+Aegean
+aegis/SM
+Aelfric/M
+Aeneas
+Aeneid/M
+aeolian
+Aeolus/M
+aeon's
+aerate/XNGSD
+aeration/M
+aerator/MS
+aerialist/MS
+aerial/SMY
+Aeriela/M
+Aeriell/M
+Aeriel/M
+aerie/SRMT
+aeroacoustic
+aerobatic/S
+aerobically
+aerobic/S
+aerodrome/SM
+aerodynamically
+aerodynamic/S
+aerodynamics/M
+aeronautical/Y
+aeronautic/S
+aeronautics/M
+aerosolize/D
+aerosol/MS
+aerospace/SM
+Aeschylus/M
+Aesculapius/M
+Aesop/M
+aesthete/S
+aesthetically
+aestheticism/MS
+aesthetics/M
+aesthetic/U
+aether/M
+aetiology/M
+AF
+AFAIK
+afar/S
+AFB
+AFC
+AFDC
+affability/MS
+affable/TR
+affably
+affair/SM
+affectation/MS
+affectedness/EM
+affected/UEYP
+affect/EGSD
+affecter/M
+affecting/Y
+affectionate/UY
+affectioned
+affection/EMS
+affectioning
+affective/MY
+afferent/YS
+affiance/GDS
+affidavit/SM
+affiliated/U
+affiliate/EXSDNG
+affiliation/EM
+affine
+affinity/SM
+affirm/ASDG
+affirmation/SAM
+affirmative/SY
+affix/SDG
+afflatus/MS
+afflict/GVDS
+affliction/SM
+afflictive/Y
+affluence/SM
+affluent/YS
+afford/DSBG
+afforest/A
+afforestation/SM
+afforested
+afforesting
+afforests
+affray/MDSG
+affricate/VNMS
+affrication/M
+affricative/M
+affright
+affront/GSDM
+Afghani/SM
+Afghanistan/M
+afghan/MS
+Afghan/SM
+aficionado/MS
+afield
+afire
+aflame
+afloat
+aflutter
+afoot
+afore
+aforementioned
+aforesaid
+aforethought/S
+afoul
+Afr
+afraid/U
+afresh
+Africa/M
+African/MS
+Afrikaans/M
+Afrikaner/SM
+afro
+Afrocentric
+Afrocentrism/S
+Afro/MS
+afterbirth/M
+afterbirths
+afterburner/MS
+aftercare/SM
+aftereffect/MS
+afterglow/MS
+afterimage/MS
+afterlife/M
+afterlives
+aftermath/M
+aftermaths
+aftermost
+afternoon/SM
+aftershave/S
+aftershock/SM
+afters/M
+aftertaste/SM
+afterthought/MS
+afterward/S
+afterworld/MS
+Afton/M
+aft/ZR
+Agace/M
+again
+against
+Agamemnon/M
+agapae
+agape/S
+agar/MS
+Agassiz/M
+Agata/M
+agate/SM
+Agatha/M
+Agathe/M
+agave/SM
+agedness/M
+aged/PY
+age/GJDRSMZ
+ageism/S
+ageist/S
+agelessness/MS
+ageless/YP
+agency/SM
+agenda/MS
+agent/AMS
+agented
+agenting
+agentive
+ageratum/M
+Aggie/M
+Aggi/M
+agglomerate/XNGVDS
+agglomeration/M
+agglutinate/VNGXSD
+agglutination/M
+agglutinin/MS
+aggrandize/LDSG
+aggrandizement/SM
+aggravate/SDNGX
+aggravating/Y
+aggravation/M
+aggregated/U
+aggregate/EGNVD
+aggregately
+aggregateness/M
+aggregates
+aggregation/SM
+aggregative/Y
+aggression/SM
+aggressively
+aggressiveness/S
+aggressive/U
+aggressor/MS
+aggrieved/Y
+aggrieve/GDS
+Aggy/SM
+aghast
+agile/YTR
+agility/MS
+agitated/Y
+agitate/XVNGSD
+agitation/M
+agitator/SM
+agitprop/MS
+Aglaia/M
+agleam
+aglitter
+aglow
+Ag/M
+Agna/M
+Agnella/M
+Agnese/M
+Agnes/M
+Agnesse/M
+Agneta/M
+Agnew/M
+Agni/M
+Agnola/M
+agnosticism/MS
+agnostic/SM
+ago
+agog
+agonizedly/S
+agonized/Y
+agonize/ZGRSD
+agonizing/Y
+agony/SM
+agoraphobia/MS
+agoraphobic/S
+Agosto/M
+Agra/M
+agrarianism/MS
+agrarian/S
+agreeable/EP
+agreeableness/SME
+agreeably/E
+agreeing/E
+agree/LEBDS
+agreement/ESM
+agreer/S
+Agretha/M
+agribusiness/SM
+Agricola/M
+agriculturalist/S
+agricultural/Y
+agriculture/MS
+agriculturist/SM
+Agrippa/M
+Agrippina/M
+agrochemicals
+agronomic/S
+agronomist/SM
+agronomy/MS
+aground
+Aguascalientes/M
+ague/MS
+Aguie/M
+Aguilar/M
+Aguinaldo/M
+Aguirre/M
+Aguistin/M
+Aguste/M
+Agustin/M
+ah
+Ahab/M
+Aharon/M
+aha/S
+ahead
+ahem/S
+Ahmadabad
+Ahmad/M
+Ahmed/M
+ahoy/S
+Ahriman/M
+AI
+Aida/M
+Aidan/M
+aided/U
+aide/MS
+aider/M
+AIDS
+aid/ZGDRS
+Aigneis/M
+aigrette/SM
+Aiken/M
+Aila/M
+Ailbert/M
+Ailee/M
+Aileen/M
+Aile/M
+Ailene/M
+aileron/MS
+Ailey/M
+Ailina/M
+Aili/SM
+ail/LSDG
+ailment/SM
+Ailsun/M
+Ailyn/M
+Aimee/M
+Aime/M
+aimer/M
+Aimil/M
+aimlessness/MS
+aimless/YP
+aim/ZSGDR
+Aindrea/M
+Ainslee/M
+Ainsley/M
+Ainslie/M
+ain't
+Ainu/M
+airbag/MS
+airbase/S
+airborne
+airbrush/SDMG
+Airbus/M
+airbus/SM
+aircraft/MS
+aircrew/M
+airdrop/MS
+airdropped
+airdropping
+Airedale/SM
+Aires
+airfare/S
+airfield/MS
+airflow/SM
+airfoil/MS
+airframe/MS
+airfreight/SGD
+airhead/MS
+airily
+airiness/MS
+airing/M
+airlessness/S
+airless/P
+airlift/MDSG
+airliner/M
+airline/SRMZ
+airlock/MS
+airmail/DSG
+airman/M
+airmass
+air/MDRTZGJS
+airmen
+airpark
+airplane/SM
+airplay/S
+airport/MS
+airship/MS
+airsickness/SM
+airsick/P
+airspace/SM
+airspeed/SM
+airstrip/MS
+airtightness/M
+airtight/P
+airtime
+airwaves
+airway/SM
+airworthiness/SM
+airworthy/PTR
+airy/PRT
+Aisha/M
+aisle/DSGM
+aitch/MS
+ajar
+Ajax/M
+Ajay/M
+AK
+aka
+Akbar/M
+Akihito/M
+akimbo
+Akim/M
+akin
+Akita/M
+Akkad/M
+Akron/M
+Aksel/M
+AL
+Alabama/M
+Alabaman/S
+Alabamian/MS
+alabaster/MS
+alack/S
+alacrity/SM
+Aladdin/M
+Alaine/M
+Alain/M
+Alair/M
+Alameda/M
+Alamogordo/M
+Alamo/SM
+ala/MS
+Ala/MS
+Alanah/M
+Alana/M
+Aland/M
+Alane/M
+alanine/M
+Alan/M
+Alanna/M
+Alano/M
+Alanson/M
+Alard/M
+Alaric/M
+Alar/M
+alarming/Y
+alarmist/MS
+alarm/SDG
+Alasdair/M
+Alaska/M
+Alaskan/S
+alas/S
+Alastair/M
+Alasteir/M
+Alaster/M
+Alayne/M
+albacore/SM
+alba/M
+Alba/M
+Albania/M
+Albanian/SM
+Albany/M
+albatross/SM
+albedo/M
+Albee/M
+albeit
+Alberich/M
+Alberik/M
+Alberio/M
+Alberta/M
+Albertan/S
+Albertina/M
+Albertine/M
+Albert/M
+Alberto/M
+Albie/M
+Albigensian
+Albina/M
+albinism/SM
+albino/MS
+Albion/M
+Albireo/M
+alb/MS
+Albrecht/M
+albumen/M
+albumin/MS
+albuminous
+album/MNXS
+Albuquerque/M
+Alcatraz/M
+Alcestis/M
+alchemical
+alchemist/SM
+alchemy/MS
+Alcibiades/M
+Alcmena/M
+Alcoa/M
+alcoholically
+alcoholic/MS
+alcoholism/SM
+alcohol/MS
+Alcott/M
+alcove/MSD
+Alcuin/M
+Alcyone/M
+Aldan/M
+Aldebaran/M
+aldehyde/M
+Alden/M
+Alderamin/M
+alderman/M
+aldermen
+alder/SM
+alderwoman
+alderwomen
+Aldin/M
+Aldis/M
+Aldo/M
+Aldon/M
+Aldous/M
+Aldrich/M
+Aldric/M
+Aldridge/M
+Aldrin/M
+Aldus/M
+Aldwin/M
+aleatory
+Alecia/M
+Aleck/M
+Alec/M
+Aleda/M
+alee
+Aleece/M
+Aleen/M
+alehouse/MS
+Aleichem/M
+Alejandra/M
+Alejandrina/M
+Alejandro/M
+Alejoa/M
+Aleksandr/M
+Alembert/M
+alembic/SM
+ale/MVS
+Alena/M
+Alene/M
+aleph/M
+Aleppo/M
+Aler/M
+alerted/Y
+alertness/MS
+alert/STZGPRDY
+Alessandra/M
+Alessandro/M
+Aleta/M
+Alethea/M
+Aleutian/S
+Aleut/SM
+alewife/M
+alewives
+Alexa/M
+Alexander/SM
+Alexandra/M
+Alexandre/M
+Alexandria/M
+Alexandrian/S
+Alexandrina/M
+Alexandr/M
+Alexandro/MS
+Alexei/M
+Alexia/M
+Alexina/M
+Alexine/M
+Alexio/M
+Alexi/SM
+Alex/M
+alfalfa/MS
+Alfa/M
+Alfie/M
+Alfi/M
+Alf/M
+Alfonse/M
+Alfons/M
+Alfonso/M
+Alfonzo/M
+Alford/M
+Alfreda/M
+Alfred/M
+Alfredo/M
+alfresco
+Alfy/M
+algae
+algaecide
+algal
+alga/M
+algebraic
+algebraical/Y
+algebraist/M
+algebra/MS
+Algenib/M
+Algeria/M
+Algerian/MS
+Alger/M
+Algernon/M
+Algieba/M
+Algiers/M
+alginate/SM
+ALGOL
+Algol/M
+Algonquian/SM
+Algonquin/SM
+algorithmic
+algorithmically
+algorithm/MS
+Alhambra/M
+Alhena/M
+Alia/M
+alias/GSD
+alibi/MDSG
+Alica/M
+Alicea/M
+Alice/M
+Alicia/M
+Alick/M
+Alic/M
+Alida/M
+Alidia/M
+Alie/M
+alienable/IU
+alienate/SDNGX
+alienation/M
+alienist/MS
+alien/RDGMBS
+Alighieri/M
+alight/DSG
+aligned/U
+aligner/SM
+align/LASDG
+alignment/SAM
+Alika/M
+Alikee/M
+alikeness/M
+alike/U
+alimentary
+aliment/SDMG
+alimony/MS
+Ali/MS
+Alina/M
+Aline/M
+alinement's
+Alioth/M
+aliquot/S
+Alisa/M
+Alisander/M
+Alisha/M
+Alison/M
+Alissa/M
+Alistair/M
+Alister/M
+Alisun/M
+aliveness/MS
+alive/P
+Alix/M
+aliyah/M
+aliyahs
+Aliza/M
+Alkaid/M
+alkalies
+alkali/M
+alkaline
+alkalinity/MS
+alkalize/SDG
+alkaloid/MS
+alkyd/S
+alkyl/M
+Allahabad/M
+Allah/M
+Alla/M
+Allan/M
+Allard/M
+allay/GDS
+Allayne/M
+Alleen/M
+allegation/SM
+alleged/Y
+allege/SDG
+Allegheny/MS
+allegiance/SM
+allegiant
+allegoric
+allegoricalness/M
+allegorical/YP
+allegorist/MS
+allegory/SM
+Allegra/M
+allegretto/MS
+allegri
+allegro/MS
+allele/SM
+alleluia/S
+allemande/M
+Allendale/M
+Allende/M
+Allene/M
+Allen/M
+Allentown/M
+allergenic
+allergen/MS
+allergic
+allergically
+allergist/MS
+allergy/MS
+alleviate/SDVGNX
+alleviation/M
+alleviator/MS
+Alley/M
+alley/MS
+Alleyn/M
+alleyway/MS
+Allhallows
+alliance/MS
+Allianora/M
+Allie/M
+allier
+allies/M
+alligator/DMGS
+Alli/MS
+Allina/M
+Allin/M
+Allison/M
+Allissa/M
+Allister/M
+Allistir/M
+alliterate/XVNGSD
+alliteration/M
+alliterative/Y
+Allix/M
+allocable/U
+allocatable
+allocate/ACSDNGX
+allocated/U
+allocation/AMC
+allocative
+allocator/AMS
+allophone/MS
+allophonic
+allotment/MS
+allotments/A
+allotrope/M
+allotropic
+allots/A
+allot/SDL
+allotted/A
+allotter/M
+allotting/A
+allover/S
+allowableness/M
+allowable/P
+allowably
+allowance/GSDM
+allowed/Y
+allowing/E
+allow/SBGD
+allows/E
+alloyed/U
+alloy/SGMD
+all/S
+allspice/MS
+Allstate/M
+Allsun/M
+allude/GSD
+allure/GLSD
+allurement/SM
+alluring/Y
+allusion/MS
+allusiveness/MS
+allusive/PY
+alluvial/S
+alluvions
+alluvium/MS
+Allx/M
+ally/ASDG
+Allyce/M
+Ally/MS
+Allyn/M
+Allys
+Allyson/M
+alma
+Almach/M
+Almaden/M
+almagest
+Alma/M
+almanac/MS
+Almaty/M
+Almeda/M
+Almeria/M
+Almeta/M
+almightiness/M
+Almighty/M
+almighty/P
+Almira/M
+Almire/M
+almond/SM
+almoner/MS
+almost
+Al/MRY
+alms/A
+almshouse/SM
+almsman/M
+alnico
+Alnilam/M
+Alnitak/M
+aloe/MS
+aloft
+aloha/SM
+Aloin/M
+Aloise/M
+Aloisia/M
+aloneness/M
+alone/P
+along
+alongshore
+alongside
+Alon/M
+Alonso/M
+Alonzo/M
+aloofness/MS
+aloof/YP
+aloud
+Aloysia/M
+Aloysius/M
+alpaca/SM
+Alpert/M
+alphabetical/Y
+alphabetic/S
+alphabetization/SM
+alphabetizer/M
+alphabetize/SRDGZ
+alphabet/SGDM
+alpha/MS
+alphanumerical/Y
+alphanumeric/S
+Alphard/M
+Alphecca/M
+Alpheratz/M
+Alphonse/M
+Alphonso/M
+Alpine
+alpine/S
+alp/MS
+Alps
+already
+Alric/M
+alright
+Alsace/M
+Alsatian/MS
+also
+Alsop/M
+Alston/M
+Altaic/M
+Altai/M
+Altair/M
+Alta/M
+altar/MS
+altarpiece/SM
+alterable/UI
+alteration/MS
+altercate/NX
+altercation/M
+altered/U
+alternate/SDVGNYX
+alternation/M
+alternativeness/M
+alternative/YMSP
+alternator/MS
+alter/RDZBG
+Althea/M
+although
+altimeter/SM
+Altiplano/M
+altitude/SM
+altogether/S
+Alton/M
+alto/SM
+Altos/M
+altruism/SM
+altruistic
+altruistically
+altruist/SM
+alt/RZS
+ALU
+Aludra/M
+Aluin/M
+Aluino/M
+alumina/SM
+aluminum/MS
+alumnae
+alumna/M
+alumni
+alumnus/MS
+alum/SM
+alundum
+Alva/M
+Alvan/M
+Alvarado/M
+Alvarez/M
+Alvaro/M
+alveolar/Y
+alveoli
+alveolus/M
+Alvera/M
+Alverta/M
+Alvie/M
+Alvina/M
+Alvinia/M
+Alvin/M
+Alvira/M
+Alvis/M
+Alvy/M
+alway/S
+Alwin/M
+Alwyn/M
+Alyce/M
+Alyda/M
+Alyosha/M
+Alysa/M
+Alyse/M
+Alysia/M
+Alys/M
+Alyson/M
+Alyss
+Alyssa/M
+Alzheimer/M
+AM
+AMA
+Amabelle/M
+Amabel/M
+Amadeus/M
+Amado/M
+amain
+Amalea/M
+Amalee/M
+Amaleta/M
+amalgamate/VNGXSD
+amalgamation/M
+amalgam/MS
+Amalia/M
+Amalie/M
+Amalita/M
+Amalle/M
+Amanda/M
+Amandie/M
+Amandi/M
+Amandy/M
+amanuenses
+amanuensis/M
+Amara/M
+amaranth/M
+amaranths
+amaretto/S
+Amargo/M
+Amarillo/M
+amaryllis/MS
+am/AS
+amasser/M
+amass/GRSD
+Amata/M
+amateurishness/MS
+amateurish/YP
+amateurism/MS
+amateur/SM
+Amati/M
+amatory
+amazed/Y
+amaze/LDSRGZ
+amazement/MS
+amazing/Y
+amazonian
+Amazonian
+amazon/MS
+Amazon/SM
+ambassadorial
+ambassador/MS
+ambassadorship/MS
+ambassadress/SM
+ambergris/SM
+Amberly/M
+amber/MS
+Amber/YM
+ambiance/MS
+ambidexterity/MS
+ambidextrous/Y
+ambience's
+ambient/S
+ambiguity/MS
+ambiguously/U
+ambiguousness/M
+ambiguous/YP
+ambition/GMDS
+ambitiousness/MS
+ambitious/PY
+ambit/M
+ambivalence/SM
+ambivalent/Y
+amble/GZDSR
+Amble/M
+ambler/M
+ambrose
+Ambrose/M
+ambrosial/Y
+ambrosia/SM
+Ambrosi/M
+Ambrosio/M
+Ambrosius/M
+Ambros/M
+ambulance/MS
+ambulant/S
+ambulate/DSNGX
+ambulation/M
+ambulatory/S
+Ambur/M
+ambuscade/MGSRD
+ambuscader/M
+ambusher/M
+ambush/MZRSDG
+Amby/M
+Amdahl/M
+ameba's
+Amelia/M
+Amelie/M
+Amelina/M
+Ameline/M
+ameliorate/XVGNSD
+amelioration/M
+Amelita/M
+amenability/SM
+amenably
+amended/U
+amender/M
+amendment/SM
+amen/DRGTSB
+amend/SBRDGL
+amends/M
+Amenhotep/M
+amenity/MS
+amenorrhea/M
+Amerada/M
+Amerasian/S
+amercement/MS
+amerce/SDLG
+Americana/M
+Americanism/SM
+Americanization/SM
+americanized
+Americanize/SDG
+American/MS
+America/SM
+americium/MS
+Amerigo/M
+Amerindian/MS
+Amerind/MS
+Amer/M
+Amery/M
+Ameslan/M
+Ame/SM
+amethystine
+amethyst/MS
+Amharic/M
+Amherst/M
+amiability/MS
+amiableness/M
+amiable/RPT
+amiably
+amicability/SM
+amicableness/M
+amicable/P
+amicably
+amide/SM
+amid/S
+amidships
+amidst
+Amie/M
+Amiga/M
+amigo/MS
+Amii/M
+Amil/M
+Ami/M
+amines
+aminobenzoic
+amino/M
+amir's
+Amish
+amiss
+Amitie/M
+Amity/M
+amity/SM
+Ammamaria/M
+Amman/M
+Ammerman/M
+ammeter/MS
+ammo/MS
+ammoniac
+ammonia/MS
+ammonium/M
+Am/MR
+ammunition/MS
+amnesiac/MS
+amnesia/SM
+amnesic/S
+amnesty/GMSD
+amniocenteses
+amniocentesis/M
+amnion/SM
+amniotic
+Amoco/M
+amoeba/SM
+amoebic
+amoeboid
+amok/MS
+among
+amongst
+Amontillado/M
+amontillado/MS
+amorality/MS
+amoral/Y
+amorousness/SM
+amorous/PY
+amorphousness/MS
+amorphous/PY
+amortization/SUM
+amortized/U
+amortize/SDG
+Amory/M
+Amos
+amount/SMRDZG
+amour/MS
+Amparo/M
+amperage/SM
+Ampere/M
+ampere/MS
+ampersand/MS
+Ampex/M
+amphetamine/MS
+amphibian/SM
+amphibiousness/M
+amphibious/PY
+amphibology/M
+amphitheater/SM
+amphorae
+amphora/M
+ampleness/M
+ample/PTR
+amplification/M
+amplifier/M
+amplify/DRSXGNZ
+amplitude/MS
+ampoule's
+amp/SGMDY
+ampule/SM
+amputate/DSNGX
+amputation/M
+amputee/SM
+Amritsar/M
+ams
+Amsterdam/M
+amt
+Amtrak/M
+amuck's
+amulet/SM
+Amundsen/M
+Amur/M
+amused/Y
+amuse/LDSRGVZ
+amusement/SM
+amuser/M
+amusingness/M
+amusing/YP
+Amway/M
+Amye/M
+amylase/MS
+amyl/M
+Amy/M
+Anabal/M
+Anabaptist/SM
+Anabella/M
+Anabelle/M
+Anabel/M
+anabolic
+anabolism/MS
+anachronism/SM
+anachronistic
+anachronistically
+Anacin/M
+anaconda/MS
+Anacreon/M
+anaerobe/SM
+anaerobic
+anaerobically
+anaglyph/M
+anagrammatic
+anagrammatically
+anagrammed
+anagramming
+anagram/MS
+Anaheim/M
+Analects/M
+analgesia/MS
+analgesic/S
+Analiese/M
+Analise/M
+Anallese/M
+Anallise/M
+analogical/Y
+analogize/SDG
+analogousness/MS
+analogous/YP
+analog/SM
+analogue/SM
+analogy/MS
+anal/Y
+analysand/MS
+analyses
+analysis/AM
+analyst/SM
+analytical/Y
+analyticity/S
+analytic/S
+analytics/M
+analyzable/U
+analyze/DRSZGA
+analyzed/U
+analyzer/M
+Ana/M
+anamorphic
+Ananias/M
+anapaest's
+anapestic/S
+anapest/SM
+anaphora/M
+anaphoric
+anaphorically
+anaplasmosis/M
+anarchic
+anarchical/Y
+anarchism/MS
+anarchistic
+anarchist/MS
+anarchy/MS
+Anastasia/M
+Anastasie/M
+Anastassia/M
+anastigmatic
+anastomoses
+anastomosis/M
+anastomotic
+anathema/MS
+anathematize/GSD
+Anatola/M
+Anatole/M
+Anatolia/M
+Anatolian
+Anatollo/M
+Anatol/M
+anatomic
+anatomical/YS
+anatomist/MS
+anatomize/GSD
+anatomy/MS
+Anaxagoras/M
+Ancell/M
+ancestor/SMDG
+ancestral/Y
+ancestress/SM
+ancestry/SM
+Anchorage/M
+anchorage/SM
+anchored/U
+anchorite/MS
+anchoritism/M
+anchorman/M
+anchormen
+anchorpeople
+anchorperson/S
+anchor/SGDM
+anchorwoman
+anchorwomen
+anchovy/MS
+ancientness/MS
+ancient/SRYTP
+ancillary/S
+an/CS
+Andalusia/M
+Andalusian
+Andaman
+andante/S
+and/DZGS
+Andean/M
+Andeee/M
+Andee/M
+Anderea/M
+Andersen/M
+Anders/N
+Anderson/M
+Andes
+Andie/M
+Andi/M
+andiron/MS
+Andonis/M
+Andorra/M
+Andover/M
+Andra/SM
+Andrea/MS
+Andreana/M
+Andree/M
+Andrei/M
+Andrej/M
+Andre/SM
+Andrew/MS
+Andrey/M
+Andria/M
+Andriana/M
+Andriette/M
+Andris
+androgenic
+androgen/SM
+androgynous
+androgyny/SM
+android/MS
+Andromache/M
+Andromeda/M
+Andropov/M
+Andros/M
+Andrus/M
+Andy/M
+anecdotal/Y
+anecdote/SM
+anechoic
+anemia/SM
+anemically
+anemic/S
+anemometer/MS
+anemometry/M
+anemone/SM
+anent
+aneroid
+Anestassia/M
+anesthesia/MS
+anesthesiologist/MS
+anesthesiology/SM
+anesthetically
+anesthetic/SM
+anesthetist/MS
+anesthetization/SM
+anesthetizer/M
+anesthetize/ZSRDG
+Anet/M
+Anetta/M
+Anette/M
+Anett/M
+aneurysm/MS
+anew
+Angara/M
+Angela/M
+Angeleno/SM
+Angele/SM
+angelfish/SM
+Angelia/M
+angelic
+angelical/Y
+Angelica/M
+angelica/MS
+Angelico/M
+Angelika/M
+Angeli/M
+Angelina/M
+Angeline/M
+Angelique/M
+Angelita/M
+Angelle/M
+Angel/M
+angel/MDSG
+Angelo/M
+Angelou/M
+Ange/M
+anger/GDMS
+Angevin/M
+Angie/M
+Angil/M
+angina/MS
+angiography
+angioplasty/S
+angiosperm/MS
+Angkor/M
+angle/GMZDSRJ
+angler/M
+Angles
+angleworm/MS
+Anglia/M
+Anglicanism/MS
+Anglican/MS
+Anglicism/SM
+Anglicization/MS
+anglicize/SDG
+Anglicize/SDG
+angling/M
+Anglo/MS
+Anglophile/SM
+Anglophilia/M
+Anglophobe/MS
+Anglophobia/M
+Angola/M
+Angolan/S
+angora/MS
+Angora/MS
+angrily
+angriness/M
+angry/RTP
+angst/MS
+Ångström/M
+angstrom/MS
+Anguilla/M
+anguish/DSMG
+angularity/MS
+angular/Y
+Angus/M
+Angy/M
+Anheuser/M
+anhydride/M
+anhydrite/M
+anhydrous/Y
+Aniakchak/M
+Ania/M
+Anibal/M
+Anica/M
+aniline/SM
+animadversion/SM
+animadvert/DSG
+animalcule/MS
+animal/MYPS
+animated/A
+animatedly
+animately/I
+animateness/MI
+animates/A
+animate/YNGXDSP
+animating/A
+animation/AMS
+animator/SM
+animism/SM
+animistic
+animist/S
+animized
+animosity/MS
+animus/SM
+anionic/S
+anion/MS
+aniseed/MS
+aniseikonic
+anise/MS
+anisette/SM
+anisotropic
+anisotropy/MS
+Anissa/M
+Anita/M
+Anitra/M
+Anjanette/M
+Anjela/M
+Ankara/M
+ankh/M
+ankhs
+anklebone/SM
+ankle/GMDS
+anklet/MS
+Annabal/M
+Annabela/M
+Annabella/M
+Annabelle/M
+Annabell/M
+Annabel/M
+Annadiana/M
+Annadiane/M
+Annalee/M
+Annaliese/M
+Annalise/M
+annalist/MS
+annal/MNS
+Anna/M
+Annamaria/M
+Annamarie/M
+Annapolis/M
+Annapurna/M
+anneal/DRSZG
+annealer/M
+Annecorinne/M
+annelid/MS
+Anneliese/M
+Annelise/M
+Anne/M
+Annemarie/M
+Annetta/M
+Annette/M
+annexation/SM
+annexe/M
+annex/GSD
+Annice/M
+Annie/M
+annihilate/XSDVGN
+annihilation/M
+annihilator/MS
+Anni/MS
+Annissa/M
+anniversary/MS
+Ann/M
+Annmaria/M
+Annmarie/M
+Annnora/M
+Annora/M
+annotated/U
+annotate/VNGXSD
+annotation/M
+annotator/MS
+announced/U
+announcement/SM
+announcer/M
+announce/ZGLRSD
+annoyance/MS
+annoyer/M
+annoying/Y
+annoy/ZGSRD
+annualized
+annual/YS
+annuitant/MS
+annuity/MS
+annular/YS
+annuli
+annulled
+annulling
+annulment/MS
+annul/SL
+annulus/M
+annum
+annunciate/XNGSD
+annunciation/M
+Annunciation/S
+annunciator/SM
+Anny/M
+anode/SM
+anodic
+anodize/GDS
+anodyne/SM
+anoint/DRLGS
+anointer/M
+anointment/SM
+anomalousness/M
+anomalous/YP
+anomaly/MS
+anomic
+anomie/M
+anon/S
+anonymity/MS
+anonymousness/M
+anonymous/YP
+anopheles/M
+anorak/SM
+anorectic/S
+anorexia/SM
+anorexic/S
+another/M
+Anouilh/M
+Ansell/M
+Ansel/M
+Anselma/M
+Anselm/M
+Anselmo/M
+Anshan/M
+ANSI/M
+Ansley/M
+ans/M
+Anson/M
+Anstice/M
+answerable/U
+answered/U
+answerer/M
+answer/MZGBSDR
+antacid/MS
+Antaeus/M
+antagonism/MS
+antagonistic
+antagonistically
+antagonist/MS
+antagonized/U
+antagonize/GZRSD
+antagonizing/U
+Antananarivo/M
+antarctic
+Antarctica/M
+Antarctic/M
+Antares
+anteater/MS
+antebellum
+antecedence/MS
+antecedent/SMY
+antechamber/SM
+antedate/GDS
+antediluvian/S
+anteing
+antelope/MS
+ante/MS
+antenatal
+antennae
+antenna/MS
+anterior/SY
+anteroom/SM
+ant/GSMD
+Anthea/M
+Anthe/M
+anthem/MGDS
+anther/MS
+Anthia/M
+Anthiathia/M
+anthill/S
+anthologist/MS
+anthologize/GDS
+anthology/SM
+Anthony/M
+anthraces
+anthracite/MS
+anthrax/M
+anthropic
+anthropocentric
+anthropogenic
+anthropoid/S
+anthropological/Y
+anthropologist/MS
+anthropology/SM
+anthropometric/S
+anthropometry/M
+anthropomorphic
+anthropomorphically
+anthropomorphism/SM
+anthropomorphizing
+anthropomorphous
+antiabortion
+antiabortionist/S
+antiaircraft
+antibacterial/S
+antibiotic/SM
+antibody/MS
+anticancer
+Antichrist/MS
+anticipated/U
+anticipate/XVGNSD
+anticipation/M
+anticipative/Y
+anticipatory
+anticked
+anticking
+anticlerical/S
+anticlimactic
+anticlimactically
+anticlimax/SM
+anticline/SM
+anticlockwise
+antic/MS
+anticoagulant/S
+anticoagulation/M
+anticommunism/SM
+anticommunist/SM
+anticompetitive
+anticyclone/MS
+anticyclonic
+antidemocratic
+antidepressant/SM
+antidisestablishmentarianism/M
+antidote/DSMG
+Antietam/M
+antifascist/SM
+antiformant
+antifreeze/SM
+antifundamentalist/M
+antigenic
+antigenicity/SM
+antigen/MS
+antigone
+Antigone/M
+Antigua/M
+antiheroes
+antihero/M
+antihistamine/MS
+antihistorical
+antiknock/MS
+antilabor
+Antillean
+Antilles
+antilogarithm/SM
+antilogs
+antimacassar/SM
+antimalarial/S
+antimatter/SM
+antimicrobial/S
+antimissile/S
+antimony/SM
+anting/M
+Antin/M
+antinomian
+antinomy/M
+antinuclear
+Antioch/M
+antioxidant/MS
+antiparticle/SM
+Antipas/M
+antipasti
+antipasto/MS
+antipathetic
+antipathy/SM
+antipersonnel
+antiperspirant/MS
+antiphonal/SY
+antiphon/SM
+antipodal/S
+antipodean/S
+antipode/MS
+Antipodes
+antipollution/S
+antipoverty
+antiquarianism/MS
+antiquarian/MS
+antiquary/SM
+antiquate/NGSD
+antiquation/M
+antique/MGDS
+antiquity/SM
+antiredeposition
+antiresonance/M
+antiresonator
+anti/S
+antisemitic
+antisemitism/M
+antisepses
+antisepsis/M
+antiseptically
+antiseptic/S
+antiserum/SM
+antislavery/S
+antisocial/Y
+antispasmodic/S
+antisubmarine
+antisymmetric
+antisymmetry
+antitank
+antitheses
+antithesis/M
+antithetic
+antithetical/Y
+antithyroid
+antitoxin/MS
+antitrust/MR
+antivenin/MS
+antiviral/S
+antivivisectionist/S
+antiwar
+antler/SDM
+Antofagasta/M
+Antoine/M
+Antoinette/M
+Antonella/M
+Antone/M
+Antonetta/M
+Antonia/M
+Antonie/M
+Antonietta/M
+Antoni/M
+Antonina/M
+Antonin/M
+Antonino/M
+Antoninus/M
+Antonio/M
+Antonius/M
+Anton/MS
+Antonovics/M
+Antony/M
+antonymous
+antonym/SM
+antral
+antsy/RT
+Antwan/M
+Antwerp/M
+Anubis/M
+anus/SM
+anvil/MDSG
+anxiety/MS
+anxiousness/SM
+anxious/PY
+any
+Anya/M
+anybody/S
+anyhow
+Any/M
+anymore
+anyone/MS
+anyplace
+anything/S
+anytime
+anyway/S
+anywhere/S
+anywise
+AOL/M
+aorta/MS
+aortic
+AP
+apace
+apache/MS
+Apache/MS
+Apalachicola/M
+apartheid/SM
+apart/LP
+apartment/MS
+apartness/M
+apathetic
+apathetically
+apathy/SM
+apatite/MS
+APB
+aped/A
+apelike
+ape/MDRSG
+Apennines
+aper/A
+aperiodic
+aperiodically
+aperiodicity/M
+aperitif/S
+aperture/MDS
+apex/MS
+aphasia/SM
+aphasic/S
+aphelia
+aphelion/SM
+aphid/MS
+aphonic
+aphorism/MS
+aphoristic
+aphoristically
+aphrodisiac/SM
+Aphrodite/M
+Apia/M
+apiarist/SM
+apiary/SM
+apical/YS
+apices's
+apiece
+apishness/M
+apish/YP
+aplenty
+aplomb/SM
+APO
+Apocalypse/M
+apocalypse/MS
+apocalyptic
+apocryphalness/M
+apocryphal/YP
+apocrypha/M
+Apocrypha/M
+apogee/MS
+apolar
+apolitical/Y
+Apollinaire/M
+Apollonian
+Apollo/SM
+apologetically/U
+apologetic/S
+apologetics/M
+apologia/SM
+apologist/MS
+apologize/GZSRD
+apologizer/M
+apologizes/A
+apologizing/U
+apology/MS
+apoplectic
+apoplexy/SM
+apostasy/SM
+apostate/SM
+apostatize/DSG
+apostleship/SM
+apostle/SM
+apostolic
+apostrophe/SM
+apostrophized
+apothecary/MS
+apothegm/MS
+apotheoses
+apotheosis/M
+apotheosized
+apotheosizes
+apotheosizing
+Appalachia/M
+Appalachian/MS
+appalling/Y
+appall/SDG
+Appaloosa/MS
+appaloosa/S
+appanage/M
+apparatus/SM
+apparel/SGMD
+apparency
+apparently/I
+apparentness/M
+apparent/U
+apparition/SM
+appealer/M
+appealing/UY
+appeal/SGMDRZ
+appear/AEGDS
+appearance/AMES
+appearer/S
+appease/DSRGZL
+appeased/U
+appeasement/MS
+appeaser/M
+appellant/MS
+appellate/VNX
+appellation/M
+appellative/MY
+appendage/MS
+appendectomy/SM
+appendices
+appendicitis/SM
+appendix/SM
+append/SGZDR
+appertain/DSG
+appetite/MVS
+appetizer/SM
+appetizing/YU
+Appia/M
+Appian/M
+applauder/M
+applaud/ZGSDR
+applause/MS
+applecart/M
+applejack/MS
+Apple/M
+apple/MS
+applesauce/SM
+Appleseed/M
+Appleton/M
+applet/S
+appliance/SM
+applicabilities
+applicability/IM
+applicable/I
+applicably
+applicant/MS
+applicate/V
+application/MA
+applicative/Y
+applicator/MS
+applier/SM
+appliquéd
+appliqué/MSG
+apply/AGSDXN
+appointee/SM
+appoint/ELSADG
+appointer/MS
+appointive
+appointment/ASEM
+Appolonia/M
+Appomattox/M
+apportion/GADLS
+apportionment/SAM
+appose/SDG
+appositeness/MS
+apposite/XYNVP
+apposition/M
+appositive/SY
+appraisal/SAM
+appraised/A
+appraisees
+appraiser/M
+appraises/A
+appraise/ZGDRS
+appraising/Y
+appreciable/I
+appreciably/I
+appreciated/U
+appreciate/XDSNGV
+appreciation/M
+appreciativeness/MI
+appreciative/PIY
+appreciator/MS
+appreciatory
+apprehend/DRSG
+apprehender/M
+apprehensible
+apprehension/SM
+apprehensiveness/SM
+apprehensive/YP
+apprentice/DSGM
+apprenticeship/SM
+apprise/DSG
+apprizer/SM
+apprizingly
+apprizings
+approachability/UM
+approachable/UI
+approach/BRSDZG
+approacher/M
+approbate/NX
+approbation/EMS
+appropriable
+appropriated/U
+appropriately/I
+appropriateness/SMI
+appropriate/XDSGNVYTP
+appropriation/M
+appropriator/SM
+approval/ESM
+approve/DSREG
+approved/U
+approver's/E
+approver/SM
+approving/YE
+approx
+approximate/XGNVYDS
+approximation/M
+approximative/Y
+appurtenance/MS
+appurtenant/S
+APR
+apricot/MS
+Aprilette/M
+April/MS
+Apr/M
+apron/SDMG
+apropos
+apse/MS
+apsis/M
+apter
+aptest
+aptitude/SM
+aptness/SMI
+aptness's/U
+apt/UPYI
+Apuleius/M
+aquaculture/MS
+aqualung/SM
+aquamarine/SM
+aquanaut/SM
+aquaplane/GSDM
+aquarium/MS
+Aquarius/MS
+aqua/SM
+aquatically
+aquatic/S
+aquavit/SM
+aqueduct/MS
+aqueous/Y
+aquiculture's
+aquifer/SM
+Aquila/M
+aquiline
+Aquinas/M
+Aquino/M
+Aquitaine/M
+AR
+Arabela/M
+Arabele/M
+Arabella/M
+Arabelle/M
+Arabel/M
+arabesque/SM
+Arabia/M
+Arabian/MS
+Arabic/M
+arability/MS
+Arabist/MS
+arable/S
+Arab/MS
+Araby/M
+Araceli/M
+arachnid/MS
+arachnoid/M
+arachnophobia
+Arafat/M
+Araguaya/M
+Araldo/M
+Aral/M
+Ara/M
+Aramaic/M
+Aramco/M
+Arapahoes
+Arapahoe's
+Arapaho/MS
+Ararat/M
+Araucanian/M
+Arawakan/M
+Arawak/M
+arbiter/MS
+arbitrage/GMZRSD
+arbitrager/M
+arbitrageur/S
+arbitrament/MS
+arbitrarily
+arbitrariness/MS
+arbitrary/P
+arbitrate/SDXVNG
+arbitration/M
+arbitrator/SM
+arbor/DMS
+arboreal/Y
+arbores
+arboretum/MS
+arborvitae/MS
+arbutus/SM
+ARC
+arcade/SDMG
+Arcadia/M
+Arcadian
+arcana/M
+arcane/P
+arc/DSGM
+archaeological/Y
+archaeologist/SM
+archaically
+archaic/P
+Archaimbaud/M
+archaism/SM
+archaist/MS
+archaize/GDRSZ
+archaizer/M
+Archambault/M
+archangel/SM
+archbishopric/SM
+archbishop/SM
+archdeacon/MS
+archdiocesan
+archdiocese/SM
+archduchess/MS
+archduke/MS
+Archean
+archenemy/SM
+archeologist's
+archeology/MS
+archer/M
+Archer/M
+archery/MS
+archetypal
+archetype/SM
+archfiend/SM
+archfool
+Archibald/M
+Archibaldo/M
+Archibold/M
+Archie/M
+archiepiscopal
+Archimedes/M
+arching/M
+archipelago/SM
+architect/MS
+architectonic/S
+architectonics/M
+architectural/Y
+architecture/SM
+architrave/MS
+archival
+archive/DRSGMZ
+archived/U
+archivist/MS
+Arch/MR
+archness/MS
+arch/PGVZTMYDSR
+archway/SM
+Archy/M
+arclike
+ARCO/M
+arcsine
+arctangent
+Arctic/M
+arctic/S
+Arcturus/M
+Ardabil
+Arda/MH
+Ardath/M
+Ardeen/M
+Ardelia/M
+Ardelis/M
+Ardella/M
+Ardelle/M
+ardency/M
+Ardene/M
+Ardenia/M
+Arden/M
+ardent/Y
+Ardine/M
+Ardisj/M
+Ardis/M
+Ardith/M
+ardor/SM
+Ardra/M
+arduousness/SM
+arduous/YP
+Ardyce/M
+Ardys
+Ardyth/M
+areal
+area/SM
+areawide
+are/BS
+Arel/M
+arenaceous
+arena/SM
+aren't
+Arequipa/M
+Ares
+Aretha/M
+Argentina/M
+Argentinean/S
+Argentine/SM
+Argentinian/S
+argent/MS
+arginine/MS
+Argonaut/MS
+argonaut/S
+argon/MS
+Argonne/M
+Argo/SM
+argosy/SM
+argot/SM
+arguable/IU
+arguably/IU
+argue/DSRGZ
+arguer/M
+argumentation/SM
+argumentativeness/MS
+argumentative/YP
+argument/SM
+Argus/M
+argyle/S
+Ariadne/M
+Ariana/M
+Arianism/M
+Arianist/SM
+aria/SM
+Aridatha/M
+aridity/SM
+aridness/M
+arid/TYRP
+Ariela/M
+Ariella/M
+Arielle/M
+Ariel/M
+Arie/SM
+Aries/S
+aright
+Ari/M
+Arin/M
+Ario/M
+Ariosto/M
+arise/GJSR
+arisen
+Aristarchus/M
+Aristides
+aristocracy/SM
+aristocratic
+aristocratically
+aristocrat/MS
+Aristophanes/M
+Aristotelean
+Aristotelian/M
+Aristotle/M
+arithmetical/Y
+arithmetician/SM
+arithmetic/MS
+arithmetize/SD
+Arius/M
+Ariz/M
+Arizona/M
+Arizonan/S
+Arizonian/S
+Arjuna/M
+Arkansan/MS
+Arkansas/M
+Arkhangelsk/M
+Ark/M
+ark/MS
+Arkwright/M
+Arlana/M
+Arlan/M
+Arlee/M
+Arleen/M
+Arlena/M
+Arlene/M
+Arlen/M
+Arleta/M
+Arlette/M
+Arley/M
+Arleyne/M
+Arlie/M
+Arliene/M
+Arlina/M
+Arlinda/M
+Arline/M
+Arlington/M
+Arlin/M
+Arluene/M
+Arly/M
+Arlyne/M
+Arlyn/M
+Armada/M
+armada/SM
+armadillo/MS
+Armageddon/SM
+Armagnac/M
+armament/EAS
+armament's/E
+Armand/M
+Armando/M
+Arman/M
+arm/ASEDG
+Armata/M
+armature/MGSD
+armband/SM
+armchair/MS
+Armco/M
+armed/U
+Armenia/M
+Armenian/MS
+armer/MES
+armful/SM
+armhole/MS
+arming/M
+Arminius/M
+Armin/M
+armistice/MS
+armless
+armlet/SM
+armload/M
+Armonk/M
+armored/U
+armorer/M
+armorial/S
+armory/DSM
+armor/ZRDMGS
+Armour/M
+armpit/MS
+armrest/MS
+arm's
+Armstrong/M
+Ar/MY
+army/SM
+Arnaldo/M
+Arneb/M
+Arne/M
+Arney/M
+Arnhem/M
+Arnie/M
+Arni/M
+Arnold/M
+Arnoldo/M
+Arno/M
+Arnuad/M
+Arnulfo/M
+Arny/M
+aroma/SM
+aromatherapist/S
+aromatherapy/S
+aromatically
+aromaticity/M
+aromaticness/M
+aromatic/SP
+Aron/M
+arose
+around
+arousal/MS
+aroused/U
+arouse/GSD
+ARPA/M
+Arpanet/M
+ARPANET/M
+arpeggio/SM
+arrack/M
+Arragon/M
+arraignment/MS
+arraign/SDGL
+arrangeable/A
+arranged/EA
+arrangement/AMSE
+arranger/M
+arranges/EA
+arrange/ZDSRLG
+arranging/EA
+arrant/Y
+arras/SM
+arrayer
+array/ESGMD
+arrear/SM
+arrest/ADSG
+arrestee/MS
+arrester/MS
+arresting/Y
+arrestor/MS
+Arrhenius/M
+arrhythmia/SM
+arrhythmic
+arrhythmical
+Arri/M
+arrival/MS
+arriver/M
+arrive/SRDG
+arrogance/MS
+arrogant/Y
+arrogate/XNGDS
+arrogation/M
+Arron/M
+arrowhead/SM
+arrowroot/MS
+arrow/SDMG
+arroyo/MS
+arr/TV
+arsenal/MS
+arsenate/M
+arsenic/MS
+arsenide/M
+arsine/MS
+arsonist/MS
+arson/SM
+Artair/M
+Artaxerxes/M
+artefact's
+Arte/M
+Artemas
+Artemis/M
+Artemus/M
+arterial/SY
+arteriolar
+arteriole/SM
+arterioscleroses
+arteriosclerosis/M
+artery/SM
+artesian
+artfulness/SM
+artful/YP
+Arther/M
+arthritic/S
+arthritides
+arthritis/M
+arthrogram/MS
+arthropod/SM
+arthroscope/S
+arthroscopic
+Arthurian
+Arthur/M
+artichoke/SM
+article/GMDS
+articulable/I
+articular
+articulated/EU
+articulately/I
+articulateness/IMS
+articulates/I
+articulate/VGNYXPSD
+articulation/M
+articulator/SM
+articulatory
+Artie/M
+artifact/MS
+artificer/M
+artifice/ZRSM
+artificiality/MS
+artificialness/M
+artificial/PY
+artillerist
+artilleryman/M
+artillerymen
+artillery/SM
+artiness/MS
+artisan/SM
+artiste/SM
+artistically/I
+artistic/I
+artist/MS
+artistry/SM
+artlessness/MS
+artless/YP
+Art/M
+art/SM
+artsy/RT
+Artur/M
+Arturo/M
+Artus/M
+artwork/MS
+Arty/M
+arty/TPR
+Aruba/M
+arum/MS
+Arvie/M
+Arvin/M
+Arv/M
+Arvy/M
+Aryan/MS
+Aryn/M
+as
+As
+A's
+Asa/M
+Asama/M
+asap
+ASAP
+asbestos/MS
+Ascella/M
+ascend/ADGS
+ascendancy/MS
+ascendant/SY
+ascender/SM
+Ascension/M
+ascension/SM
+ascent/SM
+ascertain/DSBLG
+ascertainment/MS
+ascetically
+asceticism/MS
+ascetic/SM
+ASCII
+ascot/MS
+ascribe/GSDB
+ascription/MS
+ascriptive
+Ase/M
+aseptically
+aseptic/S
+asexuality/MS
+asexual/Y
+Asgard/M
+ashame/D
+ashamed/UY
+Ashanti/M
+Ashbey/M
+Ashby/M
+ashcan/SM
+Ashely/M
+Asher/M
+Asheville/M
+Ashia/M
+Ashien/M
+Ashil/M
+Ashkenazim
+Ashkhabad/M
+Ashla/M
+Ashland/M
+Ashlan/M
+ashlar/GSDM
+Ashlee/M
+Ashleigh/M
+Ashlen/M
+Ashley/M
+Ashlie/M
+Ashli/M
+Ashlin/M
+Ashly/M
+ashman/M
+ash/MNDRSG
+Ashmolean/M
+Ash/MRY
+ashore
+ashram/SM
+Ashton/M
+ashtray/MS
+Ashurbanipal/M
+ashy/RT
+Asia/M
+Asian/MS
+Asiatic/SM
+aside/S
+Asilomar/M
+Asimov
+asinine/Y
+asininity/MS
+askance
+ask/DRZGS
+asked/U
+asker/M
+askew/P
+ASL
+aslant
+asleep
+Asmara/M
+asocial/S
+Asoka/M
+asparagus/MS
+aspartame/S
+ASPCA
+aspect/SM
+Aspell/M
+aspen/M
+Aspen/M
+asperity/SM
+asper/M
+aspersion/SM
+asphalt/MDRSG
+asphodel/MS
+asphyxia/MS
+asphyxiate/GNXSD
+asphyxiation/M
+aspic/MS
+Aspidiske/M
+aspidistra/MS
+aspirant/MS
+aspirate/NGDSX
+aspirational
+aspiration/M
+aspirator/SM
+aspire/GSRD
+aspirer/M
+aspirin/SM
+asplenium
+asp/MNRXS
+Asquith/M
+Assad/M
+assailable/U
+assailant/SM
+assail/BGDS
+Assamese/M
+Assam/M
+assassinate/DSGNX
+assassination/M
+assassin/MS
+assaulter/M
+assaultive/YP
+assault/SGVMDR
+assayer/M
+assay/SZGRD
+assemblage/MS
+assemble/ADSREG
+assembled/U
+assembler/EMS
+assemblies/A
+assembly/EAM
+assemblyman/M
+assemblymen
+Assembly/MS
+assemblywoman
+assemblywomen
+assent/SGMRD
+assert/ADGS
+asserter/MS
+assertional
+assertion/AMS
+assertiveness/SM
+assertive/PY
+assess/BLSDG
+assessed/A
+assesses/A
+assessment/SAM
+assessor/MS
+asset/SM
+asseverate/XSDNG
+asseveration/M
+asshole/MS!
+assiduity/SM
+assiduousness/SM
+assiduous/PY
+assign/ALBSGD
+assignation/MS
+assigned/U
+assignee/MS
+assigner/MS
+assignment/MAS
+assignor/MS
+assigns/CU
+assimilate/VNGXSD
+assimilationist/M
+assimilation/M
+Assisi/M
+assistance/SM
+assistantship/SM
+assistant/SM
+assisted/U
+assister/M
+assist/RDGS
+assize/MGSD
+ass/MNS
+assn
+assoc
+associable
+associated/U
+associate/SDEXNG
+associateship
+associational
+association/ME
+associative/Y
+associativity/S
+associator/MS
+assonance/SM
+assonant/S
+assorter/M
+assort/LRDSG
+assortment/SM
+asst
+assuaged/U
+assuage/SDG
+assumability
+assumer/M
+assume/SRDBJG
+assuming/UA
+assumption/SM
+assumptive
+assurance/AMS
+assure/AGSD
+assuredness/M
+assured/PYS
+assurer/SM
+assuring/YA
+Assyria/M
+Assyrian/SM
+Assyriology/M
+Astaire/SM
+Astarte/M
+astatine/MS
+aster/ESM
+asteria
+asterisked/U
+asterisk/SGMD
+astern
+asteroidal
+asteroid/SM
+asthma/MS
+asthmatic/S
+astigmatic/S
+astigmatism/SM
+astir
+astonish/GSDL
+astonishing/Y
+astonishment/SM
+Aston/M
+Astoria/M
+Astor/M
+astounding/Y
+astound/SDG
+astraddle
+Astrakhan/M
+astrakhan/SM
+astral/SY
+Astra/M
+astray
+astride
+Astrid/M
+astringency/SM
+astringent/YS
+Astrix/M
+astrolabe/MS
+astrologer/MS
+astrological/Y
+astrologist/M
+astrology/SM
+astronautical
+astronautic/S
+astronautics/M
+astronaut/SM
+astronomer/MS
+astronomic
+astronomical/Y
+astronomy/SM
+astrophysical
+astrophysicist/SM
+astrophysics/M
+Astroturf/M
+AstroTurf/S
+Asturias/M
+astuteness/MS
+astute/RTYP
+Asunción/M
+asunder
+Aswan/M
+asylum/MS
+asymmetric
+asymmetrical/Y
+asymmetry/MS
+asymptomatic
+asymptomatically
+asymptote/MS
+asymptotically
+asymptotic/Y
+asynchronism/M
+asynchronous/Y
+asynchrony
+at
+Atacama/M
+Atahualpa/M
+Atalanta/M
+Atari/M
+Atatürk/M
+atavism/MS
+atavistic
+atavist/MS
+ataxia/MS
+ataxic/S
+atelier/SM
+atemporal
+ate/S
+Athabasca/M
+Athabascan's
+Athabaskan/MS
+Athabaska's
+atheism/SM
+atheistic
+atheist/SM
+Athena/M
+Athene/M
+Athenian/SM
+Athens/M
+atheroscleroses
+atherosclerosis/M
+athirst
+athlete/MS
+athletically
+athleticism/M
+athletic/S
+athletics/M
+athwart
+atilt
+Atkins/M
+Atkinson/M
+Atlanta/M
+Atlante/MS
+atlantes
+Atlantic/M
+Atlantis/M
+atlas/SM
+Atlas/SM
+At/M
+Atman
+ATM/M
+atmosphere/DSM
+atmospherically
+atmospheric/S
+atoll/MS
+atomically
+atomicity/M
+atomic/S
+atomics/M
+atomistic
+atomization/SM
+atomize/GZDRS
+atomizer/M
+atom/SM
+atonality/MS
+atonal/Y
+atone/LDSG
+atonement/SM
+atop
+ATP
+Atreus/M
+atria
+atrial
+Atria/M
+atrium/M
+atrociousness/SM
+atrocious/YP
+atrocity/SM
+atrophic
+atrophy/DSGM
+atropine/SM
+Atropos/M
+Ats
+attach/BLGZMDRS
+attached/UA
+attacher/M
+attaché/S
+attachment/ASM
+attacker/M
+attack/GBZSDR
+attainabilities
+attainability/UM
+attainableness/M
+attainable/U
+attainably/U
+attain/AGSD
+attainder/MS
+attained/U
+attainer/MS
+attainment/MS
+attar/MS
+attempt/ADSG
+attempter/MS
+attendance/MS
+attendant/SM
+attended/U
+attendee/SM
+attender/M
+attend/SGZDR
+attentional
+attentionality
+attention/IMS
+attentiveness/IMS
+attentive/YIP
+attenuated/U
+attenuate/SDXGN
+attenuation/M
+attenuator/MS
+attestation/SM
+attested/U
+attester/M
+attest/GSDR
+Attic
+Attica/M
+attic/MS
+Attila/M
+attire/SDG
+attitude/MS
+attitudinal/Y
+attitudinize/SDG
+Attlee/M
+attn
+Attn
+attorney/SM
+attractant/SM
+attract/BSDGV
+attraction/MS
+attractivenesses
+attractiveness/UM
+attractive/UYP
+attractor/MS
+attributable/U
+attribute/BVNGRSDX
+attributed/U
+attributer/M
+attributional
+attribution/M
+attributive/SY
+attrition/MS
+Attucks
+attune/SDG
+atty
+ATV/S
+atwitter
+Atwood/M
+atypical/Y
+Aube/M
+Auberge/M
+aubergine/MS
+Auberon/M
+Auberta/M
+Aubert/M
+Aubine/M
+Aubree/M
+Aubrette/M
+Aubrey/M
+Aubrie/M
+Aubry/M
+auburn/SM
+Auckland/M
+auctioneer/SDMG
+auction/MDSG
+audaciousness/SM
+audacious/PY
+audacity/MS
+Auden/M
+audibility/MSI
+audible/I
+audibles
+audibly/I
+Audie/M
+audience/MS
+Audi/M
+audiogram/SM
+audiological
+audiologist/MS
+audiology/SM
+audiometer/MS
+audiometric
+audiometry/M
+audiophile/SM
+audio/SM
+audiotape/S
+audiovisual/S
+audited/U
+audition/MDSG
+auditorium/MS
+auditor/MS
+auditory/S
+audit/SMDVG
+Audra/M
+Audre/M
+Audrey/M
+Audrie/M
+Audrye/M
+Audry/M
+Audubon/M
+Audy/M
+Auerbach/M
+Augean
+auger/SM
+aught/S
+Augie/M
+Aug/M
+augmentation/SM
+augmentative/S
+augment/DRZGS
+augmenter/M
+augur/GDMS
+augury/SM
+Augusta/M
+Augustan/S
+Auguste/M
+Augustina/M
+Augustine/M
+Augustinian/S
+Augustin/M
+augustness/SM
+Augusto/M
+August/SM
+august/STPYR
+Augustus/M
+Augy/M
+auk/MS
+Au/M
+Aundrea/M
+auntie/MS
+aunt/MYS
+aunty's
+aural/Y
+Aura/M
+aura/SM
+Aurea/M
+Aurelea/M
+Aurelia/M
+Aurelie/M
+Aurelio/M
+Aurelius/M
+Aurel/M
+aureole/GMSD
+aureomycin
+Aureomycin/M
+Auria/M
+auric
+auricle/SM
+auricular
+Aurie/M
+Auriga/M
+Aurilia/M
+Aurlie/M
+Auroora/M
+auroral
+Aurora/M
+aurora/SM
+Aurore/M
+Aurthur/M
+Auschwitz/M
+auscultate/XDSNG
+auscultation/M
+auspice/SM
+auspicious/IPY
+auspiciousnesses
+auspiciousness/IM
+Aussie/MS
+Austen/M
+austereness/M
+austere/TYRP
+austerity/SM
+Austina/M
+Austine/M
+Austin/SM
+austral
+Australasia/M
+Australasian/S
+australes
+Australia/M
+Australian/MS
+Australis/M
+australites
+Australoid
+Australopithecus/M
+Austria/M
+Austrian/SM
+Austronesian
+authentically
+authenticated/U
+authenticate/GNDSX
+authentication/M
+authenticator/MS
+authenticity/MS
+authentic/UI
+author/DMGS
+authoress/S
+authorial
+authoritarianism/MS
+authoritarian/S
+authoritativeness/SM
+authoritative/PY
+authority/SM
+authorization/MAS
+authorize/AGDS
+authorized/U
+authorizer/SM
+authorizes/U
+authorship/MS
+autism/MS
+autistic/S
+autobahn/MS
+autobiographer/MS
+autobiographic
+autobiographical/Y
+autobiography/MS
+autoclave/SDGM
+autocollimator/M
+autocorrelate/GNSDX
+autocorrelation/M
+autocracy/SM
+autocratic
+autocratically
+autocrat/SM
+autodial/R
+autodidact/MS
+autofluorescence
+autograph/MDG
+autographs
+autoignition/M
+autoimmune
+autoimmunity/S
+autoloader
+automaker/S
+automata's
+automate/NGDSX
+automatically
+automatic/S
+automation/M
+automatism/SM
+automatize/DSG
+automaton/SM
+automobile/GDSM
+automorphism/SM
+automotive
+autonavigator/SM
+autonomic/S
+autonomous/Y
+autonomy/MS
+autopilot/SM
+autopsy/MDSG
+autoregressive
+autorepeat/GS
+auto/SDMG
+autostart
+autosuggestibility/M
+autotransformer/M
+autoworker/S
+autumnal/Y
+Autumn/M
+autumn/MS
+aux
+auxiliary/S
+auxin/MS
+AV
+availability/USM
+availableness/M
+available/U
+availably
+avail/BSZGRD
+availing/U
+avalanche/MGSD
+Avalon/M
+Ava/M
+avant
+avarice/SM
+avariciousness/M
+avaricious/PY
+avast/S
+avatar/MS
+avaunt/S
+avdp
+Aveline/M
+Ave/MS
+avenged/U
+avenger/M
+avenge/ZGSRD
+Aventine/M
+Aventino/M
+avenue/MS
+average/DSPGYM
+Averell/M
+Averill/M
+Averil/M
+Avernus/M
+averred
+averrer
+averring
+Averroes/M
+averseness/M
+averse/YNXP
+aversion/M
+avers/V
+avert/GSD
+Averyl/M
+Avery/M
+ave/S
+aves/C
+Avesta/M
+avg
+avian/S
+aviary/SM
+aviate/NX
+aviation/M
+aviator/SM
+aviatrices
+aviatrix/SM
+Avicenna/M
+Avictor/M
+avidity/MS
+avid/TPYR
+Avie/M
+Avigdor/M
+Avignon/M
+Avila/M
+avionic/S
+avionics/M
+Avior/M
+Avis
+avitaminoses
+avitaminosis/M
+Avivah/M
+Aviva/M
+Aviv/M
+avocado/MS
+avocational
+avocation/SM
+Avogadro/M
+avoidable/U
+avoidably/U
+avoidance/SM
+avoider/M
+avoid/ZRDBGS
+avoirdupois/MS
+Avon/M
+avouch/GDS
+avowal/EMS
+avowed/Y
+avower/M
+avow/GEDS
+Avram/M
+Avril/M
+Avrit/M
+Avrom/M
+avuncular
+av/ZR
+AWACS
+await/SDG
+awake/GS
+awakened/U
+awakener/M
+awakening/S
+awaken/SADG
+awarder/M
+award/RDSZG
+awareness/MSU
+aware/TRP
+awash
+away/PS
+aweigh
+awe/SM
+awesomeness/SM
+awesome/PY
+awestruck
+awfuller
+awfullest
+awfulness/SM
+awful/YP
+aw/GD
+awhile/S
+awkwardness/MS
+awkward/PRYT
+awl/MS
+awning/DM
+awn/MDJGS
+awoke
+awoken
+AWOL
+awry/RT
+ax/DRSZGM
+axehead/S
+Axel/M
+Axe/M
+axeman
+axial/Y
+axillary
+axiological/Y
+axiology/M
+axiomatically
+axiomatic/S
+axiomatization/MS
+axiomatize/GDS
+axiom/SM
+axion/SM
+axis/SM
+axle/MS
+axletree/MS
+Ax/M
+axolotl/SM
+axon/SM
+ayah/M
+ayahs
+Ayala/M
+ayatollah
+ayatollahs
+aye/MZRS
+Ayers
+Aylmar/M
+Aylmer/M
+Aymara/M
+Aymer/M
+Ayn/M
+AZ
+azalea/SM
+Azania/M
+Azazel/M
+Azerbaijan/M
+azimuthal/Y
+azimuth/M
+azimuths
+Azores
+Azov/M
+AZT
+Aztecan
+Aztec/MS
+azure/MS
+BA
+Baal/SM
+baa/SDG
+Babara/M
+Babar's
+Babbage/M
+Babbette/M
+Babbie/M
+babbitt/GDS
+Babbitt/M
+babbler/M
+babble/RSDGZ
+Babb/M
+Babcock/M
+Babel/MS
+babel/S
+babe/SM
+Babette/M
+Babita/M
+Babka/M
+baboon/MS
+Bab/SM
+babushka/MS
+babyhood/MS
+babyish
+Babylonia/M
+Babylonian/SM
+Babylon/MS
+babysat
+babysit/S
+babysitter/S
+babysitting
+baby/TDSRMG
+Bacall/M
+Bacardi/M
+baccalaureate/MS
+baccarat/SM
+bacchanalia
+Bacchanalia/M
+bacchanalian/S
+bacchanal/SM
+Bacchic
+Bacchus/M
+bachelorhood/SM
+bachelor/SM
+Bach/M
+bacillary
+bacilli
+bacillus/MS
+backache/SM
+backarrow
+backbencher/M
+backbench/ZR
+backbiter/M
+backbite/S
+backbitten
+backbit/ZGJR
+backboard/SM
+backbone/SM
+backbreaking
+backchaining
+backcloth/M
+backdate/GDS
+backdrop/MS
+backdropped
+backdropping
+backed/U
+backer/M
+backfield/SM
+backfill/SDG
+backfire/GDS
+backgammon/MS
+background/SDRMZG
+back/GZDRMSJ
+backhanded/Y
+backhander/M
+backhand/RDMSZG
+backhoe/S
+backing/M
+backlash/GRSDM
+backless
+backlogged
+backlogging
+backlog/MS
+backorder
+backpacker/M
+backpack/ZGSMRD
+backpedal/DGS
+backplane/MS
+backplate/SM
+backrest/MS
+backscatter/SMDG
+backseat/S
+backside/SM
+backslapper/MS
+backslapping/M
+backslash/DSG
+backslider/M
+backslide/S
+backslid/RZG
+backspace/GSD
+backspin/SM
+backstabber/M
+backstabbing
+backstage
+backstair/S
+backstitch/GDSM
+backstop/MS
+backstopped
+backstopping
+backstreet/M
+backstretch/SM
+backstroke/GMDS
+backtalk/S
+backtrack/SDRGZ
+backup/SM
+Backus/M
+backwardness/MS
+backward/YSP
+backwash/SDMG
+backwater/SM
+backwood/S
+backwoodsman/M
+backwoodsmen
+backyard/MS
+baconer/M
+Bacon/M
+bacon/SRM
+bacterial/Y
+bacteria/MS
+bactericidal
+bactericide/SM
+bacteriologic
+bacteriological
+bacteriologist/MS
+bacteriology/SM
+bacterium/M
+Bactria/M
+badder
+baddest
+baddie/MS
+bade
+Baden/M
+badge/DSRGMZ
+badger/DMG
+badinage/DSMG
+badland/S
+Badlands/M
+badman/M
+badmen
+badminton/MS
+badmouth/DG
+badmouths
+badness/SM
+bad/PSNY
+Baedeker/SM
+Baez/M
+Baffin/M
+bafflement/MS
+baffler/M
+baffle/RSDGZL
+baffling/Y
+bagatelle/MS
+bagel/SM
+bagful/MS
+baggageman
+baggagemen
+baggage/SM
+bagged/M
+bagger/SM
+baggily
+bagginess/MS
+bagging/M
+baggy/PRST
+Baghdad/M
+bagpiper/M
+bagpipe/RSMZ
+Bagrodia/MS
+bag/SM
+baguette/SM
+Baguio/M
+bah
+Baha'i
+Bahama/MS
+Bahamanian/S
+Bahamian/MS
+Baha'ullah
+Bahia/M
+Bahrain/M
+bahs
+Baikal/M
+Bailey/SM
+bail/GSMYDRB
+Bailie/M
+bailiff/SM
+bailiwick/MS
+Baillie/M
+Bail/M
+bailout/MS
+bailsman/M
+bailsmen
+Baily/M
+Baird/M
+bairn/SM
+baiter/M
+bait/GSMDR
+baize/GMDS
+Baja/M
+baked/U
+bakehouse/M
+Bakelite/M
+baker/M
+Baker/M
+Bakersfield/M
+bakery/SM
+bakeshop/S
+bake/ZGJDRS
+baking/M
+baklava/M
+baksheesh/SM
+Baku/M
+Bakunin/M
+balaclava/MS
+balalaika/MS
+balanced/A
+balancedness
+balancer/MS
+balance's
+balance/USDG
+Balanchine/M
+Balboa/M
+balboa/SM
+balcony/MSD
+balderdash/MS
+Balder/M
+baldfaced
+Bald/MR
+baldness/MS
+bald/PYDRGST
+baldric/SM
+Balduin/M
+Baldwin/M
+baldy
+Balearic/M
+baleen/MS
+balefuller
+balefullest
+balefulness/MS
+baleful/YP
+Bale/M
+bale/MZGDRS
+baler/M
+Balfour/M
+Bali/M
+Balinese
+balkanization
+balkanize/DG
+Balkan/SM
+balker/M
+balk/GDRS
+Balkhash/M
+balkiness/M
+balky/PRT
+balladeer/MS
+ballade/MS
+balladry/MS
+ballad/SM
+Ballard/SM
+ballast/SGMD
+ballcock/S
+ballerina/MS
+baller/M
+balletic
+ballet/MS
+ballfields
+ballgame/S
+ball/GZMSDR
+ballistic/S
+ballistics/M
+Ball/M
+balloonist/S
+balloon/RDMZGS
+balloter/M
+ballot/MRDGS
+ballpark/SM
+ballplayer/SM
+ballpoint/SM
+ballroom/SM
+ballsy/TR
+ballyhoo/SGMD
+balminess/SM
+balm/MS
+balmy/PRT
+baloney/SM
+balsam/GMDS
+balsamic
+balsa/MS
+Balthazar/M
+Baltic/M
+Baltimore/M
+Baluchistan/M
+baluster/MS
+balustrade/SM
+Balzac/M
+Ba/M
+Bamako/M
+Bamberger/M
+Bambie/M
+Bambi/M
+bamboo/SM
+bamboozle/GSD
+Bamby/M
+Banach/M
+banality/MS
+banal/TYR
+banana/SM
+Bancroft/M
+bandager/M
+bandage/RSDMG
+bandanna/SM
+bandbox/MS
+bandeau/M
+bandeaux
+band/EDGS
+bander/M
+banding/M
+bandit/MS
+banditry/MS
+bandmaster/MS
+bandoleer/SM
+bandpass
+band's
+bandsman/M
+bandsmen
+bandstand/SM
+bandstop
+Bandung/M
+bandwagon/MS
+bandwidth/M
+bandwidths
+bandy/TGRSD
+banefuller
+banefullest
+baneful/Y
+bane/MS
+Bangalore/M
+banger/M
+bang/GDRZMS
+bangkok
+Bangkok/M
+Bangladeshi/S
+Bangladesh/M
+bangle/MS
+Bangor/M
+Bangui/M
+bani
+banisher/M
+banishment/MS
+banish/RSDGL
+banister/MS
+Banjarmasin/M
+banjoist/SM
+banjo/MS
+Banjul/M
+bankbook/SM
+bankcard/S
+banker/M
+bank/GZJDRMBS
+banking/M
+Bank/MS
+banknote/S
+bankroll/DMSG
+bankruptcy/MS
+bankrupt/DMGS
+Banky/M
+Ban/M
+banned/U
+Banneker/M
+banner/SDMG
+banning/U
+Bannister/M
+bannister's
+bannock/SM
+banns
+banqueter/M
+banquet/SZGJMRD
+banquette/MS
+ban/SGMD
+banshee/MS
+bans/U
+bantam/MS
+bantamweight/MS
+banterer/M
+bantering/Y
+banter/RDSG
+Banting/M
+Bantu/SM
+banyan/MS
+banzai/S
+baobab/SM
+Baotou/M
+baptismal/Y
+baptism/SM
+Baptiste/M
+baptistery/MS
+baptist/MS
+Baptist/MS
+baptistry's
+baptized/U
+baptizer/M
+baptize/SRDZG
+baptizes/U
+Barabbas/M
+Barbabas/M
+Barbabra/M
+Barbadian/S
+Barbados/M
+Barbaraanne/M
+Barbara/M
+Barbarella/M
+barbarianism/MS
+barbarian/MS
+barbaric
+barbarically
+barbarism/MS
+barbarity/SM
+barbarize/SDG
+Barbarossa/M
+barbarousness/M
+barbarous/PY
+Barbary/M
+barb/DRMSGZ
+barbecue/DRSMG
+barbed/P
+Barbee/M
+barbell/SM
+barbel/MS
+Barbe/M
+barbeque's
+barber/DMG
+barbered/U
+Barber/M
+barberry/MS
+barbershop/MS
+Barbette/M
+Barbey/M
+Barbie/M
+Barbi/M
+barbital/M
+barbiturate/MS
+Barbour/M
+Barbra/M
+Barb/RM
+Barbuda/M
+barbwire/SM
+Barby/M
+barcarole/SM
+Barcelona/M
+Barclay/M
+Bardeen/M
+Barde/M
+bardic
+Bard/M
+bard/MDSG
+bareback/D
+barefacedness/M
+barefaced/YP
+barefoot/D
+barehanded
+bareheaded
+barelegged
+bareness/MS
+Barents/M
+bare/YSP
+barfly/SM
+barf/YDSG
+bargainer/M
+bargain/ZGSDRM
+barge/DSGM
+bargeman/M
+bargemen
+bargepole/M
+barhopped
+barhopping
+barhop/S
+Bari/M
+baritone/MS
+barium/MS
+barked/C
+barkeeper/M
+barkeep/SRZ
+barker/M
+Barker/M
+bark/GZDRMS
+Barkley/M
+barks/C
+barleycorn/MS
+barley/MS
+Barlow/M
+barmaid/SM
+barman/M
+barmen
+Bar/MH
+Barnabas
+Barnabe/M
+Barnaby/M
+barnacle/MDS
+Barnard/M
+Barnaul/M
+Barnebas/M
+Barnes
+Barnett/M
+Barney/M
+barnful
+barn/GDSM
+Barnhard/M
+Barnie/M
+Barn/M
+barnsful
+barnstorm/DRGZS
+barnstormer/M
+Barnum/M
+barnyard/MS
+Barny/M
+Baroda/M
+barometer/MS
+barometric
+barometrically
+baronage/MS
+baroness/MS
+baronetcy/SM
+baronet/MS
+baronial
+Baron/M
+baron/SM
+barony/SM
+baroque/SPMY
+barque's
+Barquisimeto/M
+barracker/M
+barrack/SDRG
+barracuda/MS
+barrage/MGSD
+Barranquilla/M
+barred/ECU
+barre/GMDSJ
+barrel/SGMD
+barrenness/SM
+barren/SPRT
+Barrera/M
+Barret/M
+barrette/SM
+Barrett/M
+barricade/SDMG
+Barrie/M
+barrier/MS
+barring/R
+barrio/SM
+Barri/SM
+barrister/MS
+Barr/M
+Barron/M
+barroom/SM
+barrow/MS
+Barry/M
+Barrymore/MS
+bars/ECU
+barstool/SM
+Barstow/M
+Bartel/M
+bartender/M
+bartend/ZR
+barterer/M
+barter/SRDZG
+bar/TGMDRS
+Barthel/M
+Barth/M
+Bartholdi/M
+Bartholemy/M
+Bartholomeo/M
+Bartholomeus/M
+Bartholomew/M
+Bartie/M
+Bartlet/M
+Bartlett/M
+Bart/M
+Bartók/M
+Bartolemo/M
+Bartolomeo/M
+Barton/M
+Bartram/M
+Barty/M
+barycenter
+barycentre's
+barycentric
+Bary/M
+baryon/SM
+Baryram/M
+Baryshnikov/M
+basaltic
+basalt/SM
+basal/Y
+Bascom/M
+bas/DRSTG
+baseball/MS
+baseband
+baseboard/MS
+base/CGRSDL
+baseless
+baseline/SM
+Basel/M
+basely
+Base/M
+baseman/M
+basemen
+basement/CSM
+baseness/MS
+baseplate/M
+base's
+basetting
+bashfulness/MS
+bashful/PY
+bash/JGDSR
+Basho/M
+Basia/M
+BASIC
+basically
+basic/S
+Basie/M
+basilar
+Basile/M
+basilica/SM
+Basilio/M
+basilisk/SM
+Basilius/M
+Basil/M
+basil/MS
+basin/DMS
+basinful/S
+basis/M
+basketball/MS
+basketry/MS
+basket/SM
+basketwork/SM
+bask/GSD
+basophilic
+Basque/SM
+Basra/M
+Basseterre/M
+basset/GMDS
+Bassett/M
+bassinet/SM
+bassist/MS
+Bass/M
+basso/MS
+bassoonist/MS
+bassoon/MS
+bass/SM
+basswood/SM
+bastardization/MS
+bastardized/U
+bastardize/SDG
+bastard/MYS
+bastardy/MS
+baste/NXS
+baster/M
+Bastian/M
+Bastien/M
+Bastille/M
+basting/M
+bastion/DM
+bast/SGZMDR
+Basutoland/M
+Bataan/M
+Batavia/M
+batch/MRSDG
+bated/U
+bate/KGSADC
+bater/AC
+Bates
+bathe
+bather/M
+bathetic
+bathhouse/SM
+bath/JMDSRGZ
+bathmat/S
+Batholomew/M
+bathos/SM
+bathrobe/MS
+bathroom/SDM
+baths
+Bathsheba/M
+bathtub/MS
+bathwater
+bathyscaphe's
+bathysphere/MS
+batik/DMSG
+Batista/M
+batiste/SM
+Bat/M
+batman/M
+Batman/M
+batmen
+baton/SM
+Batsheva/M
+batsman/M
+bat/SMDRG
+batsmen
+battalion/MS
+batted
+batten/SDMG
+batter/SRDZG
+battery/MS
+batting/MS
+battledore/MS
+battledress
+battlefield/SM
+battlefront/SM
+battle/GMZRSDL
+battleground/SM
+Battle/M
+battlement/SMD
+battler/M
+battleship/MS
+batty/RT
+Batu/M
+batwings
+bauble/SM
+Baudelaire/M
+baud/M
+Baudoin/M
+Baudouin/M
+Bauer/M
+Bauhaus/M
+baulk/GSDM
+Bausch/M
+bauxite/SM
+Bavaria/M
+Bavarian/S
+bawdily
+bawdiness/MS
+bawd/SM
+bawdy/PRST
+bawler/M
+bawl/SGDR
+Baxie/M
+Bax/M
+Baxter/M
+Baxy/M
+Bayamon
+Bayard/M
+bayberry/MS
+Bayda/M
+Bayer/M
+Bayes
+Bayesian
+bay/GSMDY
+Baylor/M
+Bay/MR
+bayonet/SGMD
+Bayonne/M
+bayou/MS
+Bayreuth/M
+bazaar/MS
+bazillion/S
+bazooka/MS
+BB
+BBB
+BBC
+bbl
+BBQ
+BBS
+BC
+BCD
+bdrm
+beachcomber/SM
+beachhead/SM
+Beach/M
+beach/MSDG
+beachwear/M
+beacon/DMSG
+beading/M
+Beadle/M
+beadle/SM
+bead/SJGMD
+beadsman/M
+beadworker
+beady/TR
+beagle/SDGM
+beaker/M
+beak/ZSDRM
+Beale/M
+Bealle/M
+Bea/M
+beam/MDRSGZ
+beanbag/SM
+bean/DRMGZS
+beanie/SM
+Bean/M
+beanpole/MS
+beanstalk/SM
+bearable/U
+bearably/U
+beard/DSGM
+bearded/P
+beardless
+Beard/M
+Beardmore/M
+Beardsley/M
+bearer/M
+bearing/M
+bearishness/SM
+bearish/PY
+bearlike
+Bear/M
+Bearnaise/M
+Bearnard/M
+bearskin/MS
+bear/ZBRSJG
+Beasley/M
+beasties
+beastings/M
+beastliness/MS
+beastly/PTR
+beast/SJMY
+beatable/U
+beatably/U
+beaten/U
+beater/M
+beatific
+beatifically
+beatification/M
+beatify/GNXDS
+beating/M
+beatitude/MS
+Beatlemania/M
+Beatles/M
+beatnik/SM
+beat/NRGSBZJ
+Beatrice/M
+Beatrisa/M
+Beatrix/M
+Beatriz/M
+Beauchamps
+Beaufort/M
+Beaujolais/M
+Beau/M
+Beaumarchais/M
+Beaumont/M
+beau/MS
+Beauregard/M
+beauteousness/M
+beauteous/YP
+beautician/MS
+beautification/M
+beautifier/M
+beautifully/U
+beautifulness/M
+beautiful/PTYR
+beautify/SRDNGXZ
+beaut/SM
+beauty/SM
+Beauvoir/M
+beaux's
+beaver/DMSG
+Beaverton/M
+Bebe/M
+bebop/MS
+becalm/GDS
+became
+because
+Becca/M
+Bechtel/M
+Becka/M
+Becker/M
+Becket/M
+Beckett/M
+beck/GSDM
+Beckie/M
+Becki/M
+beckon/SDG
+Beck/RM
+Becky/M
+becloud/SGD
+become/GJS
+becoming/UY
+Becquerel/M
+bedaub/GDS
+bedazzle/GLDS
+bedazzlement/SM
+bedbug/SM
+bedchamber/M
+bedclothes
+bedded
+bedder/MS
+bedding/MS
+bedeck/DGS
+Bede/M
+bedevil/DGLS
+bedevilment/SM
+bedfast
+bedfellow/MS
+Bedford/M
+bedimmed
+bedimming
+bedim/S
+bedizen/DGS
+bedlam/MS
+bedlinen
+bedmaker/SM
+bedmate/MS
+bed/MS
+Bedouin/SM
+bedpan/SM
+bedpost/SM
+bedraggle/GSD
+bedridden
+bedrock/SM
+bedroll/SM
+bedroom/DMS
+bedsheets
+bedside/MS
+bedsit
+bedsitter/M
+bedsore/MS
+bedspread/SM
+bedspring/SM
+bedstead/SM
+bedstraw/M
+bedtime/SM
+Beebe/M
+beebread/MS
+Beecher/M
+beech/MRSN
+beechnut/MS
+beechwood
+beefburger/SM
+beefcake/MS
+beef/GZSDRM
+beefiness/MS
+beefsteak/MS
+beefy/TRP
+beehive/MS
+beekeeper/MS
+beekeeping/SM
+beeline/MGSD
+Beelzebub/M
+Bee/M
+bee/MZGJRS
+been/S
+beeper/M
+beep/GZSMDR
+Beerbohm/M
+beer/M
+beermat/S
+beery/TR
+beeswax/DSMG
+Beethoven/M
+beetle/GMRSD
+Beeton/M
+beetroot/M
+beet/SM
+beeves/M
+befall/SGN
+befell
+befit/SM
+befitted
+befitting/Y
+befogged
+befogging
+befog/S
+before
+beforehand
+befoul/GSD
+befriend/DGS
+befuddle/GLDS
+befuddlement/SM
+began
+beget/S
+begetting
+beggar/DYMSG
+beggarliness/M
+beggarly/P
+beggary/MS
+begged
+begging
+Begin/M
+beginner/MS
+beginning/MS
+begin/S
+begone/S
+begonia/SM
+begot
+begotten
+begrime/SDG
+begrudge/GDRS
+begrudging/Y
+beg/S
+beguilement/SM
+beguiler/M
+beguile/RSDLZG
+beguiling/Y
+beguine/SM
+begum/MS
+begun
+behalf/M
+behalves
+Behan/M
+behave/GRSD
+behavioral/Y
+behaviorism/MS
+behavioristic/S
+behaviorist/S
+behavior/SMD
+behead/GSD
+beheld
+behemoth/M
+behemoths
+behest/SM
+behindhand
+behind/S
+beholder/M
+behold/ZGRNS
+behoofs
+behoove/SDJMG
+behooving/YM
+Behring/M
+Beiderbecke/M
+beige/MS
+Beijing
+Beilul/M
+being/M
+Beirut/M
+Beitris/M
+bejewel/SDG
+Bekesy/M
+Bekki/M
+be/KS
+belabor/MDSG
+Bela/M
+Belarus
+belate/D
+belatedness/M
+belated/PY
+Belau/M
+belay/GSD
+belch/GSD
+beleaguer/GDS
+Belem/M
+Belfast/M
+belfry/SM
+Belgian/MS
+Belgium/M
+Belg/M
+Belgrade/M
+Belia/M
+Belicia/M
+belie
+belief/ESUM
+belier/M
+believability's
+believability/U
+believable/U
+believably/U
+believed/U
+believe/EZGDRS
+believer/MUSE
+believing/U
+Belinda/M
+Belita/M
+belittlement/MS
+belittler/M
+belittle/RSDGL
+Belize/M
+belladonna/MS
+Bella/M
+Bellamy/M
+Bellanca/M
+Bellatrix/M
+bellboy/MS
+belled/A
+Belle/M
+belle/MS
+belletristic
+belletrist/SM
+Belleville/M
+bellflower/M
+bell/GSMD
+bellhop/MS
+bellicoseness/M
+bellicose/YP
+bellicosity/MS
+belligerence/SM
+belligerency/MS
+belligerent/SMY
+Bellina/M
+belling/A
+Bellini/M
+Bell/M
+bellman/M
+bellmen
+Bellovin/M
+bellow/DGS
+Bellow/M
+bellows/M
+bells/A
+bellwether/MS
+Bellwood/M
+bellyacher/M
+bellyache/SRDGM
+bellybutton/MS
+bellyfull
+bellyful/MS
+belly/SDGM
+Bel/M
+Belmont/M
+Belmopan/M
+Beloit/M
+belong/DGJS
+belonging/MP
+Belorussian/S
+Belorussia's
+belove/D
+beloved/S
+below/S
+Belshazzar/M
+belted/U
+belt/GSMD
+belting/M
+Belton/M
+Beltran/M
+Beltsville/M
+beltway/SM
+beluga/SM
+Belushi/M
+Belva/M
+belvedere/M
+Belvia/M
+bely/DSRG
+beman
+Be/MH
+bemire/SDG
+bemoan/GDS
+bemused/Y
+bemuse/GSDL
+bemusement/SM
+Benacerraf/M
+Benares's
+bencher/M
+benchmark/GDMS
+bench/MRSDG
+bend/BUSG
+bended
+Bender/M
+bender/MS
+Bendick/M
+Bendicty/M
+Bendite/M
+Bendix/M
+beneath
+Benedetta/M
+Benedetto/M
+Benedick/M
+Benedicta/M
+Benedictine/MS
+benediction/MS
+Benedict/M
+Benedicto/M
+benedictory
+Benedikta/M
+Benedikt/M
+benefaction/MS
+benefactor/MS
+benefactress/S
+benefice/MGSD
+beneficence/SM
+beneficent/Y
+beneficialness/M
+beneficial/PY
+beneficiary/MS
+benefiter/M
+benefit/SRDMZG
+Benelux/M
+Benet/M
+Benetta/M
+Benetton/M
+benevolence/SM
+benevolentness/M
+benevolent/YP
+Bengali/M
+Bengal/SM
+Benghazi/M
+Bengt/M
+Beniamino/M
+benightedness/M
+benighted/YP
+benignant
+benignity/MS
+benign/Y
+Beninese
+Benin/M
+Benita/M
+Benito/M
+Benjamen/M
+Benjamin/M
+Benjie/M
+Benji/M
+Benjy/M
+Ben/M
+Bennett/M
+Bennie/M
+Benni/M
+Bennington/M
+Benn/M
+Benny/M
+Benoite/M
+Benoit/M
+Benson/M
+Bentham/M
+Bentlee/M
+Bentley/MS
+Bent/M
+Benton/M
+bents
+bent/U
+bentwood/SM
+benumb/SGD
+Benyamin/M
+Benzedrine/M
+benzene/MS
+benzine/SM
+Benz/M
+Beograd's
+Beowulf/M
+bequeath/GSD
+bequeaths
+bequest/MS
+berate/GSD
+Berber/MS
+bereave/GLSD
+bereavement/MS
+bereft
+Berenice/M
+Beret/M
+beret/SM
+Bergen/M
+Bergerac/M
+Berger/M
+Berget/M
+Berglund/M
+Bergman/M
+Berg/NRM
+berg/NRSM
+Bergson/M
+Bergsten/M
+Bergstrom/M
+beribbon/D
+beriberi/SM
+Beringer/M
+Bering/RM
+Berkeley/M
+berkelium/SM
+Berke/M
+Berkie/M
+Berkley/M
+Berkly/M
+Berkowitz/M
+Berkshire/SM
+Berky/M
+Berk/YM
+Berle/M
+Berliner/M
+Berlin/SZRM
+Berlioz/M
+Berlitz/M
+Berman/M
+Ber/MG
+berm/SM
+Bermuda/MS
+Bermudan/S
+Bermudian/S
+Bernadene/M
+Bernadette/M
+Bernadina/M
+Bernadine/M
+Berna/M
+Bernardina/M
+Bernardine/M
+Bernardino/M
+Bernard/M
+Bernardo/M
+Bernarr/M
+Bernays/M
+Bernbach/M
+Bernelle/M
+Berne's
+Bernese
+Bernete/M
+Bernetta/M
+Bernette/M
+Bernhard/M
+Bernhardt/M
+Bernice/M
+Berniece/M
+Bernie/M
+Berni/M
+Bernini/M
+Bernita/M
+Bern/M
+Bernoulli/M
+Bernstein/M
+Berny/M
+Berra/M
+Berrie/M
+Berri/M
+berrylike
+Berry/M
+berry/SDMG
+berserker/M
+berserk/SR
+Berta/M
+Berte/M
+Bertha/M
+Berthe/M
+berth/MDGJ
+berths
+Bertie/M
+Bertillon/M
+Berti/M
+Bertina/M
+Bertine/M
+Bert/M
+Berton/M
+Bertram/M
+Bertrand/M
+Bertrando/M
+Berty/M
+Beryle/M
+beryllium/MS
+Beryl/M
+beryl/SM
+Berzelius/M
+bes
+beseecher/M
+beseeching/Y
+beseech/RSJZG
+beseem/GDS
+beset/S
+besetting
+beside/S
+besieger/M
+besiege/SRDZG
+besmear/GSD
+besmirch/GSD
+besom/GMDS
+besot/S
+besotted
+besotting
+besought
+bespangle/GSD
+bespatter/SGD
+bespeak/SG
+bespectacled
+bespoke
+bespoken
+Bess
+Bessel/M
+Bessemer/M
+Bessie/M
+Bessy/M
+best/DRSG
+bestiality/MS
+bestial/Y
+bestiary/MS
+bestirred
+bestirring
+bestir/S
+Best/M
+bestowal/SM
+bestow/SGD
+bestrew/DGS
+bestrewn
+bestridden
+bestride/SG
+bestrode
+bestseller/MS
+bestselling
+bestubble/D
+betaken
+betake/SG
+beta/SM
+betatron/M
+betcha
+Betelgeuse/M
+betel/MS
+Bethanne/M
+Bethany/M
+bethel/M
+Bethe/M
+Bethena/M
+Bethesda/M
+Bethina/M
+bethink/GS
+Bethlehem/M
+beth/M
+Beth/M
+bethought
+Bethune
+betide/GSD
+betimes
+bet/MS
+betoken/GSD
+betook
+betrayal/SM
+betrayer/M
+betray/SRDZG
+betrothal/SM
+betrothed/U
+betroth/GD
+betroths
+Betsey/M
+Betsy/M
+Betta/M
+Betteanne/M
+Betteann/M
+Bette/M
+betterment/MS
+better/SDLG
+Bettie/M
+Betti/M
+Bettina/M
+Bettine/M
+betting
+bettor/SM
+Bettye/M
+Betty/SM
+betweenness/M
+between/SP
+betwixt
+Beulah/M
+Bevan/M
+bevel/SJGMRD
+beverage/MS
+Beverie/M
+Beverlee/M
+Beverley/M
+Beverlie/M
+Beverly/M
+Bevin/M
+Bevon/M
+Bev's
+Bevvy/M
+bevy/SM
+bewail/GDS
+beware/GSD
+bewhisker/D
+bewigged
+bewildered/PY
+bewildering/Y
+bewilder/LDSG
+bewilderment/SM
+bewitching/Y
+bewitch/LGDS
+bewitchment/SM
+bey/MS
+beyond/S
+bezel/MS
+bf
+B/GT
+Bhopal/M
+Bhutanese
+Bhutan/M
+Bhutto/M
+Bialystok/M
+Bianca/M
+Bianco/M
+Bianka/M
+biannual/Y
+bias/DSMPG
+biased/U
+biathlon/MS
+biaxial/Y
+bibbed
+Bibbie/M
+bibbing
+Bibbye/M
+Bibby/M
+Bibi/M
+bible/MS
+Bible/MS
+biblical/Y
+biblicists
+bibliographer/MS
+bibliographical/Y
+bibliographic/S
+bibliography/MS
+bibliophile/MS
+Bib/M
+bib/MS
+bibulous
+bicameral
+bicameralism/MS
+bicarb/MS
+bicarbonate/MS
+bicentenary/S
+bicentennial/S
+bicep/S
+biceps/M
+bichromate/DM
+bickerer/M
+bickering/M
+bicker/SRDZG
+biconcave
+biconnected
+biconvex
+bicuspid/S
+bicycler/M
+bicycle/RSDMZG
+bicyclist/SM
+biddable
+bidden/U
+bidder/MS
+Biddie/M
+bidding/MS
+Biddle/M
+Biddy/M
+biddy/SM
+bider/M
+bide/S
+bidet/SM
+Bidget/M
+bid/GMRS
+bidiagonal
+bidirectional/Y
+bids/A
+biennial/SY
+biennium/SM
+Bienville/M
+Bierce/M
+bier/M
+bifocal/S
+bifurcate/SDXGNY
+bifurcation/M
+bigamist/SM
+bigamous
+bigamy/SM
+Bigelow/M
+Bigfoot
+bigged
+bigger
+biggest
+biggie/SM
+bigging
+biggish
+bighead/MS
+bigheartedness/S
+bighearted/P
+bighorn/MS
+bight/SMDG
+bigmouth/M
+bigmouths
+bigness/SM
+bigoted/Y
+bigot/MDSG
+bigotry/MS
+big/PYS
+bigwig/MS
+biharmonic
+bijection/MS
+bijective/Y
+bijou/M
+bijoux
+bike/MZGDRS
+biker/M
+bikini/SMD
+Biko/M
+bilabial/S
+bilateralness/M
+bilateral/PY
+bilayer/S
+Bilbao/M
+bilberry/MS
+Bilbo/M
+bile/SM
+bilge/GMDS
+biliary
+Bili/M
+bilinear
+bilingualism/SM
+bilingual/SY
+biliousness/SM
+bilious/P
+bilker/M
+bilk/GZSDR
+billboard/MDGS
+biller/M
+billet/MDGS
+billfold/MS
+billiard/SM
+Billie/M
+Billi/M
+billing/M
+billingsgate/SM
+Billings/M
+billionaire/MS
+billion/SHM
+billionths
+bill/JGZSBMDR
+Bill/JM
+billow/DMGS
+billowy/RT
+billposters
+Billye/M
+Billy/M
+billy/SM
+Bil/MY
+bi/M
+Bi/M
+bimbo/MS
+bimetallic/S
+bimetallism/MS
+Bimini/M
+bimodal
+bimolecular/Y
+bimonthly/S
+binary/S
+binaural/Y
+binder/M
+bindery/MS
+binding/MPY
+bindingness/M
+bind/JDRGZS
+bindle/M
+binds/AU
+bindweed/MS
+binge/MS
+bing/GNDM
+Bingham/M
+Binghamton/M
+Bing/M
+bingo/MS
+Bini/M
+Bink/M
+Binky/M
+binnacle/MS
+binned
+Binnie/M
+Binni/M
+binning
+Binny/M
+binocular/SY
+binodal
+binomial/SYM
+bin/SM
+binuclear
+biochemical/SY
+biochemist/MS
+biochemistry/MS
+biodegradability/S
+biodegradable
+biodiversity/S
+bioengineering/M
+bioethics
+biofeedback/SM
+biographer/M
+biographic
+biographical/Y
+biograph/RZ
+biography/MS
+biog/S
+Bioko/M
+biol
+biological/SY
+biologic/S
+biologist/SM
+biology/MS
+biomass/SM
+biomedical
+biomedicine/M
+biometric/S
+biometrics/M
+biometry/M
+biomolecule/S
+biomorph
+bionically
+bionic/S
+bionics/M
+biophysical/Y
+biophysicist/SM
+biophysic/S
+biophysics/M
+biopic/S
+biopsy/SDGM
+biorhythm/S
+BIOS
+bioscience/S
+biosphere/MS
+biostatistic/S
+biosynthesized
+biotechnological
+biotechnologist
+biotechnology/SM
+biotic
+biotin/SM
+bipartisan
+bipartisanship/MS
+bipartite/YN
+bipartition/M
+bipedal
+biped/MS
+biplane/MS
+bipolar
+bipolarity/MS
+biracial
+Birch/M
+birch/MRSDNG
+birdbath/M
+birdbaths
+birdbrain/SDM
+birdcage/SM
+birder/M
+birdhouse/MS
+birdieing
+Birdie/M
+birdie/MSD
+birdlike
+birdlime/MGDS
+Bird/M
+birdseed/MS
+Birdseye/M
+bird/SMDRGZ
+birdsong
+birdtables
+birdwatch/GZR
+birefringence/M
+birefringent
+biretta/SM
+Birgit/M
+Birgitta/M
+Birkenstock/M
+Birk/M
+Birmingham/M
+Biro/M
+Biron/M
+birthday/SM
+birthmark/MS
+birth/MDG
+birthplace/SM
+birthrate/MS
+birthright/MS
+birth's/A
+births/A
+birthstone/SM
+bis
+Biscay/M
+Biscayne/M
+biscuit/MS
+bisect/DSG
+bisection/MS
+bisector/MS
+biserial
+bisexuality/MS
+bisexual/YMS
+Bishkek
+bishop/DGSM
+Bishop/M
+bishopric/SM
+Bismarck/M
+Bismark/M
+bismuth/M
+bismuths
+bison/M
+bisque/SM
+Bissau/M
+bistable
+bistate
+bistro/SM
+bisyllabic
+bitblt/S
+bitchily
+bitchiness/MS
+bitch/MSDG
+bitchy/PTR
+biter/M
+bite/S
+biting/Y
+bitmap/SM
+bit/MRJSZG
+BITNET/M
+bit's/C
+bits/C
+bitser/M
+bitted
+bitten
+bitterness/SM
+bittern/SM
+bitternut/M
+bitter/PSRDYTG
+bitterroot/M
+bittersweet/YMSP
+bitting
+bitty/PRT
+bitumen/MS
+bituminous
+bitwise
+bivalent/S
+bivalve/MSD
+bivariate
+bivouacked
+bivouacking
+bivouac/MS
+biweekly/S
+biyearly
+bizarreness/M
+bizarre/YSP
+Bizet/M
+biz/M
+bizzes
+Bjorn/M
+bk
+b/KGD
+Bk/M
+blabbed
+blabber/GMDS
+blabbermouth/M
+blabbermouths
+blabbing
+blab/S
+blackamoor/SM
+blackball/SDMG
+blackberry/GMS
+blackbirder/M
+blackbird/SGDRM
+blackboard/SM
+blackbody/S
+Blackburn/M
+blackcurrant/M
+blackener/M
+blacken/GDR
+Blackfeet
+Blackfoot/M
+blackguard/MDSG
+blackhead/SM
+blacking/M
+blackish
+blackjack/SGMD
+blackleg/M
+blacklist/DRMSG
+blackmail/DRMGZS
+blackmailer/M
+Blackman/M
+Blackmer/M
+blackness/MS
+blackout/SM
+Blackpool/M
+Black's
+black/SJTXPYRDNG
+blacksmith/MG
+blacksmiths
+blacksnake/MS
+blackspot
+Blackstone/M
+blackthorn/MS
+blacktop/MS
+blacktopped
+blacktopping
+Blackwell/MS
+bladder/MS
+bladdernut/M
+bladderwort/M
+blade/DSGM
+blah/MDG
+blahs
+Blaine/M
+Blaire/M
+Blair/M
+Blakelee/M
+Blakeley/M
+Blake/M
+Blakey/M
+blame/DSRBGMZ
+blamelessness/SM
+blameless/YP
+blamer/M
+blameworthiness/SM
+blameworthy/P
+Blanca/M
+Blancha/M
+Blanchard/M
+blanch/DRSG
+Blanche/M
+blancher/M
+Blanch/M
+blanc/M
+blancmange/SM
+blandishment/MS
+blandish/SDGL
+blandness/MS
+bland/PYRT
+Blane/M
+Blankenship/M
+blanketing/M
+blanket/SDRMZG
+blankness/MS
+blank/SPGTYRD
+Blanton/M
+Blantyre/M
+blare/DSG
+blarney/DMGS
+blasé
+blasphemer/M
+blaspheme/RSDZG
+blasphemousness/M
+blasphemous/PY
+blasphemy/SM
+blaster/M
+blasting/M
+blastoff/SM
+blast/SMRDGZ
+blatancy/SM
+blatant/YP
+blather/DRGS
+blatting
+Blatz/M
+Blavatsky/M
+Blayne/M
+blaze/DSRGMZ
+blazer/M
+blazing/Y
+blazoner/M
+blazon/SGDR
+bl/D
+bldg
+bleach/DRSZG
+bleached/U
+bleacher/M
+bleakness/MS
+bleak/TPYRS
+blear/GDS
+blearily
+bleariness/SM
+bleary/PRT
+bleater/M
+bleat/RDGS
+bleeder/M
+bleed/ZRJSG
+Bleeker/M
+bleep/GMRDZS
+blemish/DSMG
+blemished/U
+blench/DSG
+blender/M
+blend/GZRDS
+Blenheim/M
+blessedness/MS
+blessed/PRYT
+blessing/M
+bless/JGSD
+Blevins/M
+blew
+Bligh/M
+blighter/M
+blight/GSMDR
+blimey/S
+blimp/MS
+blinded/U
+blinder/M
+blindfold/SDG
+blinding/MY
+blind/JGTZPYRDS
+blindness/MS
+blindside/SDG
+blinker/MDG
+blinking/U
+blink/RDGSZ
+blinks/M
+Blinnie/M
+Blinni/M
+Blinny/M
+blintze/M
+blintz/SM
+blip/MS
+blipped
+blipping
+Blisse/M
+blissfulness/MS
+blissful/PY
+Bliss/M
+bliss/SDMG
+blistering/Y
+blister/SMDG
+blistery
+Blithe/M
+blitheness/SM
+blither/G
+blithesome
+blithe/TYPR
+blitz/GSDM
+blitzkrieg/SM
+blizzard/MS
+bloater/M
+bloat/SRDGZ
+blobbed
+blobbing
+blob/MS
+Bloch/M
+blockader/M
+blockade/ZMGRSD
+blockage/MS
+blockbuster/SM
+blockbusting/MS
+blocker/MS
+blockhead/MS
+blockhouse/SM
+block's
+block/USDG
+blocky/R
+bloc/MS
+Bloemfontein/M
+bloke/SM
+Blomberg/M
+Blomquist/M
+Blondelle/M
+Blondell/M
+blonde's
+Blondie/M
+blondish
+blondness/MS
+blond/SPMRT
+Blondy/M
+bloodbath
+bloodbaths
+bloodcurdling
+bloodhound/SM
+bloodied/U
+bloodiness/MS
+bloodlessness/SM
+bloodless/PY
+bloodletting/MS
+bloodline/SM
+bloodmobile/MS
+bloodroot/M
+bloodshed/SM
+bloodshot
+blood/SMDG
+bloodsport/S
+bloodstain/MDS
+bloodstock/SM
+bloodstone/M
+bloodstream/SM
+bloodsucker/SM
+bloodsucking/S
+bloodthirstily
+bloodthirstiness/MS
+bloodthirsty/RTP
+bloodworm/M
+bloodymindedness
+bloody/TPGDRS
+bloomer/M
+Bloomer/M
+Bloomfield/M
+Bloomington/M
+Bloom/MR
+bloom/SMRDGZ
+blooper/M
+bloop/GSZRD
+blossom/DMGS
+blossomy
+blotch/GMDS
+blotchy/RT
+blot/MS
+blotted
+blotter/MS
+blotting
+blotto
+blouse/GMSD
+blower/M
+blowfish/M
+blowfly/MS
+blowgun/SM
+blow/GZRS
+blowing/M
+blown/U
+blowout/MS
+blowpipe/SM
+blowtorch/SM
+blowup/MS
+blowy/RST
+blowzy/RT
+BLT
+blubber/GSDR
+blubbery
+Blucher/M
+bludgeon/GSMD
+blueback
+Bluebeard/M
+bluebell/MS
+blueberry/SM
+bluebill/M
+bluebird/MS
+bluebonnet/SM
+bluebook/M
+bluebottle/MS
+bluebush
+bluefish/SM
+bluegill/SM
+bluegrass/MS
+blueing's
+blueish
+bluejacket/MS
+bluejeans
+blue/JMYTGDRSP
+blueness/MS
+bluenose/MS
+bluepoint/SM
+blueprint/GDMS
+bluer/M
+bluest/M
+bluestocking/SM
+bluesy/TR
+bluet/MS
+bluffer/M
+bluffness/MS
+bluff/SPGTZYRD
+bluing/M
+bluishness/M
+bluish/P
+Blumenthal/M
+Blum/M
+blunderbuss/MS
+blunderer/M
+blunder/GSMDRJZ
+blundering/Y
+bluntness/MS
+blunt/PSGTYRD
+blurb/GSDM
+blur/MS
+blurred/Y
+blurriness/S
+blurring/Y
+blurry/RPT
+blurt/GSRD
+blusher/M
+blushing/UY
+blush/RSDGZ
+blusterer/M
+blustering/Y
+blusterous
+bluster/SDRZG
+blustery
+blvd
+Blvd
+Blythe/M
+BM
+BMW/M
+BO
+boarded
+boarder/SM
+boardgames
+boardinghouse/SM
+boarding/SM
+board/IS
+boardroom/MS
+board's
+boardwalk/SM
+boar/MS
+boa/SM
+boaster/M
+boastfulness/MS
+boastful/YP
+boast/SJRDGZ
+boatclubs
+boater/M
+boathouse/SM
+boating/M
+boatload/SM
+boatman/M
+boat/MDRGZJS
+boatmen
+boatswain/SM
+boatyard/SM
+bobbed
+Bobbee/M
+Bobbe/M
+Bobbette/M
+Bobbie/M
+Bobbi/M
+bobbing/M
+bobbin/MS
+Bobbitt/M
+bobble/SDGM
+Bobbsey/M
+Bobbye/M
+Bobby/M
+bobby/SM
+bobbysoxer's
+bobcat/MS
+Bobette/M
+Bobina/M
+Bobine/M
+Bobinette/M
+Bob/M
+bobolink/SM
+Bobrow/M
+bobsledded
+bobsledder/MS
+bobsledding/M
+bobsled/MS
+bobsleigh/M
+bobsleighs
+bobs/M
+bob/SM
+bobtail/SGDM
+bobwhite/SM
+Boca/M
+Boccaccio/M
+boccie/SM
+bock/GDS
+bockwurst
+bodega/MS
+Bodenheim/M
+bode/S
+Bodhidharma/M
+bodhisattva
+Bodhisattva/M
+bodice/SM
+bodied/M
+bodiless
+bodily
+boding/M
+bodkin/SM
+bod/SGMD
+bodybuilder/SM
+bodybuilding/S
+body/DSMG
+bodyguard/MS
+bodying/M
+bodysuit/S
+bodyweight
+bodywork/SM
+Boeing/M
+Boeotia/M
+Boeotian
+Boer/M
+Bogartian/M
+Bogart/M
+Bogey/M
+bogeyman/M
+bogeymen
+bogey/SGMD
+bogged
+bogging
+boggle/SDG
+boggling/Y
+boggy/RT
+bogie's
+bog/MS
+Bogotá/M
+bogus
+bogyman
+bogymen
+bogy's
+Boheme/M
+bohemianism/S
+bohemian/S
+Bohemian/SM
+Bohemia/SM
+Bohr/M
+Boigie/M
+boiled/AU
+boiler/M
+boilermaker/MS
+boilerplate/SM
+boil/JSGZDR
+boils/A
+Boise/M
+Bois/M
+boisterousness/MS
+boisterous/YP
+bola/SM
+boldface/SDMG
+boldness/MS
+bold/YRPST
+bole/MS
+bolero/MS
+Boleyn/M
+bolivares
+Bolivar/M
+bolivar/MS
+Bolivia/M
+Bolivian/S
+bollard/SM
+bollix/GSD
+boll/MDSG
+Bologna/M
+bologna/MS
+bolometer/MS
+bolo/MS
+boloney's
+Bolshevik/MS
+Bolshevism/MS
+Bolshevistic/M
+Bolshevist/MS
+Bolshoi/M
+bolsterer/M
+bolster/SRDG
+bolted/U
+bolter/M
+bolt/MDRGS
+Bolton/M
+bolts/U
+Boltzmann/M
+bolus/SM
+bombardier/MS
+bombard/LDSG
+bombardment/SM
+bombastic
+bombastically
+bombast/RMS
+Bombay/M
+bomber/M
+bombproof
+bomb/SGZDRJ
+bombshell/SM
+Bo/MRZ
+bona
+bonanza/MS
+Bonaparte/M
+Bonaventure/M
+bonbon/SM
+bondage/SM
+bonder/M
+bondholder/SM
+Bondie/M
+bond/JMDRSGZ
+Bond/M
+bondman/M
+bondmen
+Bondon/M
+bonds/A
+bondsman/M
+bondsmen
+bondwoman/M
+bondwomen
+Bondy/M
+boned/U
+bonehead/SDM
+boneless
+Bone/M
+bone/MZDRSG
+boner/M
+bonfire/MS
+bong/GDMS
+bongo/MS
+Bonham/M
+bonhomie/MS
+Boniface/M
+boniness/MS
+Bonita/M
+bonito/MS
+bonjour
+bonkers
+Bonnee/M
+Bonner/M
+bonneted/U
+bonnet/SGMD
+Bonneville/M
+Bonnibelle/M
+bonnie
+Bonnie/M
+Bonni/M
+Bonn/RM
+Bonny/M
+bonny/RT
+bonsai/SM
+Bontempo/M
+bonus/SM
+bony/RTP
+bonzes
+boob/DMSG
+booby/SM
+boodle/GMSD
+boogeyman's
+boogieing
+boogie/SD
+boo/GSDH
+boohoo/GDS
+bookbinder/M
+bookbindery/SM
+bookbinding/M
+bookbind/JRGZ
+bookcase/MS
+booked/U
+bookend/SGD
+Booker/M
+book/GZDRMJSB
+bookie/SM
+booking/M
+bookishness/M
+bookish/PY
+bookkeeper/M
+bookkeep/GZJR
+bookkeeping/M
+booklet/MS
+bookmaker/MS
+bookmaking/MS
+bookmark/MDGS
+bookmobile/MS
+bookplate/SM
+bookseller/SM
+bookshelf/M
+bookshelves
+bookshop/MS
+bookstall/MS
+bookstore/SM
+bookwork/M
+bookworm/MS
+Boolean
+boolean/S
+Boole/M
+boom/DRGJS
+boomerang/MDSG
+boomer/M
+boomtown/S
+boondocks
+boondoggle/DRSGZ
+boondoggler/M
+Boone/M
+Boonie/M
+boonies
+boon/MS
+Boony/M
+boorishness/SM
+boorish/PY
+boor/MS
+boosterism
+booster/M
+boost/SGZMRD
+boot/AGDS
+bootblack/MS
+bootee/MS
+Boote/M
+Boötes
+Boothe/M
+booth/M
+Booth/M
+booths
+bootie's
+bootlaces
+bootlegged/M
+bootlegger/SM
+bootlegging/M
+bootleg/S
+Bootle/M
+bootless
+Boot/M
+bootprints
+boot's
+bootstrapped
+bootstrapping
+bootstrap/SM
+booty/SM
+booze/DSRGMZ
+boozer/M
+boozy/TR
+bopped
+bopping
+bop/S
+borate/MSD
+borax/MS
+Bordeaux/M
+bordello/MS
+Borden/M
+borderer/M
+border/JRDMGS
+borderland/SM
+borderline/MS
+Bordie/M
+Bord/MN
+Bordon/M
+Bordy/M
+Borealis/M
+Boreas/M
+boredom/MS
+boreholes
+borer/M
+bore/ZGJDRS
+Borges
+Borgia/M
+Borg/M
+boric
+boring/YMP
+Boris
+Bork/M
+born/AIU
+Borneo/M
+borne/U
+Born/M
+Borodin/M
+boron/SM
+borosilicate/M
+borough/M
+boroughs
+Borroughs/M
+borrower/M
+borrowing/M
+borrow/JZRDGBS
+borscht/SM
+borstal/MS
+Boru/M
+borzoi/MS
+Bosch/M
+Bose/M
+bosh/MS
+Bosnia/M
+Bosnian/S
+bosom's
+bosom/SGUD
+bosomy/RT
+boson/SM
+Bosporus/M
+boss/DSRMG
+bossily
+bossiness/MS
+bossism/MS
+bossy/PTSR
+Bostitch/M
+Bostonian/SM
+Boston/MS
+bosun's
+Boswell/MS
+botanical/SY
+botanic/S
+botanist/SM
+botany/SM
+botcher/M
+botch/SRDGZ
+botfly/M
+bother/DG
+bothersome
+bothy/M
+both/ZR
+bot/S
+Botswana/M
+Botticelli/M
+bottle/GMZSRD
+bottleneck/GSDM
+bottler/M
+bottomlessness/M
+bottomless/YP
+bottommost
+bottom/SMRDG
+botulin/M
+botulinus/M
+botulism/SM
+Boucher/M
+boudoir/MS
+bouffant/S
+bougainvillea/SM
+bough/MD
+boughs
+bought/N
+bouillabaisse/MS
+bouillon/MS
+boulder/GMDS
+Boulder/M
+boulevard/MS
+bouncer/M
+bounce/SRDGZ
+bouncily
+bouncing/Y
+bouncy/TRP
+boundary/MS
+bound/AUDI
+boundedness/MU
+bounded/UP
+bounden
+bounder/AM
+bounders
+bounding
+boundlessness/SM
+boundless/YP
+bounds/IA
+bounteousness/MS
+bounteous/PY
+bountifulness/SM
+bountiful/PY
+bounty/SDM
+bouquet/SM
+Bourbaki/M
+bourbon/SM
+Bourbon/SM
+bourgeoisie/SM
+bourgeois/M
+Bourke/M
+Bourne/M
+Bournemouth/M
+boutique/MS
+bout/MS
+boutonnière/MS
+Bouvier
+Bovary/M
+bovine/YS
+Bowditch/M
+bowdlerization/MS
+bowdlerize/GRSD
+bowed/U
+bowel/GMDS
+Bowell/M
+Bowen/M
+bower/DMG
+Bowers
+Bowery/M
+Bowes
+bowie
+Bowie/M
+bowing/M
+bowlder's
+bowlegged
+bowleg/SM
+bowler/M
+bowlful/S
+bowl/GZSMDR
+bowline/MS
+bowling/M
+bowman/M
+Bowman/M
+bowmen
+bowser/M
+bowsprit/SM
+bows/R
+bowstring/GSMD
+bow/SZGNDR
+bowwow/DMGS
+boxcar/SM
+box/DRSJZGM
+boxer/M
+boxful/M
+boxing/M
+boxlike
+boxtops
+boxwood/SM
+boxy/TPR
+Boyce/M
+Boycey/M
+Boycie/M
+boycotter/M
+boycott/RDGS
+Boyd/M
+Boyer/M
+boyfriend/MS
+boyhood/SM
+boyishness/MS
+boyish/PY
+Boyle/M
+Boy/MR
+boy/MRS
+boyscout
+boysenberry/SM
+bozo/SM
+bpi
+bps
+BR
+brace/DSRJGM
+braced/U
+bracelet/MS
+bracer/M
+brachia
+brachium/M
+bracken/SM
+bracketed/U
+bracketing/M
+bracket/SGMD
+brackishness/SM
+brackish/P
+bract/SM
+Bradan/M
+bradawl/M
+Bradbury/M
+Bradburys
+bradded
+bradding
+Braddock/M
+Brade/M
+Braden/M
+Bradford/M
+Bradley/M
+Bradly/M
+Brad/MYN
+Bradney/M
+Bradshaw/M
+brad/SM
+Bradstreet/M
+Brady/M
+brae/SM
+braggadocio/SM
+braggart/SM
+bragged
+bragger/MS
+braggest
+bragging
+Bragg/M
+brag/S
+Brahe/M
+Brahma/MS
+Brahmanism/MS
+Brahman/SM
+Brahmaputra/M
+Brahmin's
+Brahms
+braider/M
+braiding/M
+braid/RDSJG
+braille/DSG
+Braille/GDSM
+Brainard/SM
+braincell/S
+brainchild/M
+brainchildren
+brain/GSDM
+braininess/MS
+brainlessness/M
+brainless/YP
+Brain/M
+brainpower/M
+brainstorm/DRMGJS
+brainstorming/M
+brainteaser/S
+brainteasing
+brainwasher/M
+brainwashing/M
+brainwash/JGRSD
+brainwave/S
+brainy/RPT
+braise/SDG
+brake/DSGM
+brakeman/M
+brakemen/M
+bramble/DSGM
+brambling/M
+brambly/RT
+Bram/M
+Brampton/M
+bra/MS
+Brana/M
+branched/U
+branching/M
+branchlike
+Branch/M
+branch/MDSJG
+Branchville/M
+Brandais/M
+Brandea/M
+branded/U
+Brandeis/M
+Brandel/M
+Brande/M
+Brandenburg/M
+Branden/M
+brander/GDM
+Brander/M
+Brandice/M
+Brandie/M
+Brandi/M
+Brandise/M
+brandish/GSD
+Brand/MRN
+Brando/M
+Brandon/M
+brand/SMRDGZ
+Brandt/M
+Brandtr/M
+brandy/GDSM
+Brandy/M
+Brandyn/M
+brandywine
+Braniff/M
+Bran/M
+branned
+branning
+Brannon/M
+bran/SM
+Brantley/M
+Brant/M
+Braque/M
+brashness/MS
+brash/PYSRT
+Brasilia
+brasserie/SM
+brass/GSDM
+brassiere/MS
+brassily
+brassiness/SM
+brassy/RSPT
+Bratislava/M
+brat/SM
+Brattain/M
+bratty/RT
+bratwurst/MS
+Braun/M
+bravadoes
+bravado/M
+brave/DSRGYTP
+braveness/MS
+bravery/MS
+bravest/M
+bravo/SDG
+bravura/SM
+brawler/M
+brawl/MRDSGZ
+brawniness/SM
+brawn/MS
+brawny/TRP
+brayer/M
+Bray/M
+bray/SDRG
+braze/GZDSR
+brazenness/MS
+brazen/PYDSG
+brazer/M
+brazier/SM
+Brazilian/MS
+Brazil/M
+Brazos/M
+Brazzaville/M
+breacher/M
+breach/MDRSGZ
+breadbasket/SM
+breadboard/SMDG
+breadbox/S
+breadcrumb/S
+breadfruit/MS
+breadline/MS
+bread/SMDHG
+breadth/M
+breadths
+breadwinner/MS
+breakables
+breakable/U
+breakage/MS
+breakaway/MS
+breakdown/MS
+breaker/M
+breakfaster/M
+breakfast/RDMGZS
+breakfront/S
+breaking/M
+breakneck
+breakout/MS
+breakpoint/SMDG
+break/SZRBG
+breakthroughs
+breakthrough/SM
+breakup/SM
+breakwater/SM
+bream/SDG
+Breanne/M
+Brear/M
+breastbone/MS
+breastfed
+breastfeed/G
+breasting/M
+breast/MDSG
+breastplate/SM
+breaststroke/SM
+breastwork/MS
+breathable/U
+breathalyser/S
+Breathalyzer/SM
+breathe
+breather/M
+breathing/M
+breathlessness/SM
+breathless/PY
+breaths
+breathtaking/Y
+breathy/TR
+breath/ZBJMDRSG
+Brecht/M
+Breckenridge/M
+bred/DG
+bredes
+breeching/M
+breech/MDSG
+breeder/I
+breeder's
+breeding/IM
+breeds/I
+breed/SZJRG
+Bree/M
+Breena/M
+breeze/GMSD
+breezeway/SM
+breezily
+breeziness/SM
+breezy/RPT
+Bremen/M
+bremsstrahlung/M
+Brena/M
+Brenda/M
+Brendan/M
+Brenden/M
+Brendin/M
+Brendis/M
+Brendon/M
+Bren/M
+Brenna/M
+Brennan/M
+Brennen/M
+Brenner/M
+Brenn/RNM
+Brent/M
+Brenton/M
+Bresenham/M
+Brest/M
+brethren
+Bret/M
+Breton
+Brett/M
+breve/SM
+brevet/MS
+brevetted
+brevetting
+breviary/SM
+brevity/MS
+brew/DRGZS
+brewer/M
+Brewer/M
+brewery/MS
+brewing/M
+brewpub/S
+Brew/RM
+Brewster/M
+Brezhnev/M
+Bria/M
+Briana/M
+Brian/M
+Brianna/M
+Brianne/M
+Briano/M
+Briant/M
+briar's
+bribe/GZDSR
+briber/M
+bribery/MS
+Brice/M
+brickbat/SM
+brick/GRDSM
+bricklayer/MS
+bricklaying/SM
+brickmason/S
+brickwork/SM
+brickyard/M
+bridal/S
+Bridalveil/M
+bridegroom/MS
+Bride/M
+bride/MS
+bridesmaid/MS
+Bridewell/M
+bridgeable/U
+bridged/U
+bridgehead/MS
+Bridgeport/M
+Bridger/M
+Bridges
+bridge/SDGM
+Bridget/M
+Bridgetown/M
+Bridgette/M
+Bridgett/M
+Bridgewater/M
+bridgework/MS
+bridging/M
+Bridgman/M
+Bridie/M
+bridled/U
+bridle/SDGM
+bridleway/S
+briefcase/SM
+briefed/C
+briefing/M
+briefness/MS
+briefs/C
+brief/YRDJPGTS
+Brien/M
+Brier/M
+brier/MS
+Brie/RSM
+Brietta/M
+brigade/GDSM
+brigadier/MS
+Brigadoon
+brigandage/MS
+brigand/MS
+brigantine/MS
+Brigg/MS
+Brigham/M
+brightener/M
+brighten/RDZG
+bright/GXTPSYNR
+Bright/M
+brightness/SM
+Brighton/M
+Brigida/M
+Brigid/M
+Brigit/M
+Brigitta/M
+Brigitte/M
+Brig/M
+brig/SM
+brilliance/MS
+brilliancy/MS
+brilliantine/MS
+brilliantness/M
+brilliant/PSY
+Brillo
+Brillouin/M
+brimful
+brimless
+brimmed
+brimming
+brim/SM
+brimstone/MS
+Brina/M
+Brindisi/M
+brindle/DSM
+brine/GMDSR
+briner/M
+Briney/M
+bringer/M
+bring/RGZS
+brininess/MS
+Brinkley/M
+brinkmanship/SM
+brink/MS
+Brinna/M
+Brinn/M
+Briny/M
+briny/PTSR
+brioche/SM
+Brion/M
+briquet's
+briquette/MGSD
+Brisbane/M
+brisket/SM
+briskness/MS
+brisk/YRDPGTS
+bristle/DSGM
+bristly/TR
+Bristol/M
+bristol/S
+Britain/M
+Brita/M
+Britannia/M
+Britannic
+Britannica/M
+britches
+Briticism/MS
+Britisher/M
+Britishly/M
+British/RYZ
+Brit/MS
+Britney/M
+Britni/M
+Briton/MS
+Britta/M
+Brittaney/M
+Brittani/M
+Brittan/M
+Brittany/MS
+Britte/M
+Britten/M
+Britteny/M
+brittleness/MS
+brittle/YTPDRSG
+Britt/MN
+Brittne/M
+Brittney/M
+Brittni/M
+Brnaba/M
+Brnaby/M
+Brno/M
+broach/DRSG
+broacher/M
+broadband
+broadcaster/M
+broadcast/RSGZJ
+broadcasts/A
+broadcloth/M
+broadcloths
+broaden/JGRDZ
+broadleaved
+broadloom/SM
+broadminded/P
+broadness/S
+broadsheet/MS
+broadside/SDGM
+broadsword/MS
+broad/TXSYRNP
+Broadway/SM
+Brobdingnagian
+Brobdingnag/M
+brocade/DSGM
+broccoli/MS
+brochette/SM
+brochure/SM
+Brockie/M
+Brock/M
+Brocky/M
+Broddie/M
+Broddy/M
+Broderick/M
+Broderic/M
+Brodie/M
+Brod/M
+Brody/M
+brogan/MS
+Broglie/M
+brogue/MS
+broiler/M
+broil/RDSGZ
+brokenhearted/Y
+brokenness/MS
+broken/YP
+brokerage/MS
+broker/DMG
+broke/RGZ
+Brok/M
+bromide/MS
+bromidic
+bromine/MS
+bronchial
+bronchi/M
+bronchiolar
+bronchiole/MS
+bronchiolitis
+bronchitic/S
+bronchitis/MS
+broncho's
+bronchus/M
+broncobuster/SM
+bronco/SM
+bronc/S
+Bron/M
+Bronnie/M
+Bronny/M
+Bronson/M
+Bronte
+brontosaur/SM
+brontosaurus/SM
+Bronx/M
+bronzed/M
+bronze/SRDGM
+bronzing/M
+brooch/MS
+brooder/M
+broodiness/M
+brooding/Y
+broodmare/SM
+brood/SMRDGZ
+broody/PTR
+Brookdale/M
+Brooke/M
+Brookfield/M
+Brookhaven/M
+brooklet/MS
+Brooklyn/M
+Brookmont/M
+brook/SGDM
+brookside
+Brook/SM
+broom/SMDG
+broomstick/MS
+Bros
+Brose/M
+bro/SH
+bros/S
+brothel/MS
+brother/DYMG
+brotherhood/SM
+brotherliness/MS
+brotherly/P
+broths
+broth/ZMR
+brougham/MS
+brought
+brouhaha/MS
+browbeat/NSG
+brow/MS
+Brownell/M
+Browne/M
+Brownian/M
+Brownie/MS
+brownie/MTRS
+browning/M
+Browning/M
+brownish
+Brown/MG
+brownness/MS
+brownout/MS
+brownstone/MS
+Brownsville/M
+brown/YRDMSJGTP
+browse
+browser/M
+brows/SRDGZ
+brr
+Br/TMN
+Brubeck/M
+brucellosis/M
+Bruce/M
+Brucie/M
+Bruckner/M
+Bruegel/M
+Brueghel's
+bruin/MS
+bruised/U
+bruise/JGSRDZ
+bruiser/M
+Bruis/M
+bruit/DSG
+Brumidi/M
+Brummel/M
+brunch/MDSG
+Brunei/M
+Brunelleschi/M
+brunet/S
+brunette/SM
+Brunhilda/M
+Brunhilde/M
+Bruno/M
+Brunswick/M
+brunt/GSMD
+brusher/M
+brushfire/MS
+brushlike
+brush/MSRDG
+brushoff/S
+brushwood/SM
+brushwork/MS
+brushy/R
+brusqueness/MS
+brusque/PYTR
+Brussels
+brutality/SM
+brutalization/SM
+brutalized/U
+brutalizes/AU
+brutalize/SDG
+brutal/Y
+brute/DSRGM
+brutishness/SM
+brutish/YP
+Brutus/M
+Bruxelles/M
+Bryana/M
+Bryan/M
+Bryant/M
+Bryanty/M
+Bryce/M
+Bryna/M
+Bryn/M
+Brynna/M
+Brynne/M
+Brynner/M
+Brynn/RM
+Bryon/M
+Brzezinski/M
+B's
+BS
+BSA
+BSD
+Btu
+BTU
+BTW
+bu
+bubblegum/S
+bubbler/M
+bubble/RSDGM
+bubbly/TRS
+Buber/M
+bub/MS
+buboes
+bubo/M
+bubonic
+buccaneer/GMDS
+Buchanan/M
+Bucharest/M
+Buchenwald/M
+Buchwald/M
+buckaroo/SM
+buckboard/SM
+bucker/M
+bucketful/MS
+bucket/SGMD
+buckeye/SM
+buck/GSDRM
+buckhorn/M
+Buckie/M
+Buckingham/M
+buckled/U
+buckler/MDG
+buckle/RSDGMZ
+buckles/U
+Buckley/M
+buckling's
+buckling/U
+Buck/M
+Buckner/M
+buckram/GSDM
+bucksaw/SM
+buckshot/MS
+buckskin/SM
+buckteeth
+bucktooth/DM
+buckwheat/SM
+Bucky/M
+bucolically
+bucolic/S
+Budapest/M
+budded
+Buddha/MS
+Buddhism/SM
+Buddhist/SM
+Buddie/M
+budding/S
+Budd/M
+buddy/GSDM
+Buddy/M
+budge/GDS
+budgerigar/MS
+budgetary
+budgeter/M
+budget/GMRDZS
+budgie/MS
+budging/U
+Bud/M
+bud/MS
+Budweiser/MS
+Buehring/M
+Buena/M
+buffaloes
+Buffalo/M
+buffalo/MDG
+buff/ASGD
+buffered/U
+bufferer/M
+buffer/RDMSGZ
+buffet/GMDJS
+bufflehead/M
+buffoonery/MS
+buffoonish
+buffoon/SM
+buff's
+Buffy/M
+Buford/M
+bugaboo/SM
+Bugatti/M
+bugbear/SM
+bug/CS
+bugeyed
+bugged/C
+buggered
+buggering
+bugger/SCM!
+buggery/M
+bugging/C
+buggy/RSMT
+bugle/GMDSRZ
+bugler/M
+bug's
+Buick/M
+builder/SM
+building/SM
+build/SAG
+buildup/MS
+built/AUI
+Buiron/M
+Bujumbura/M
+Bukhara/M
+Bukharin/M
+Bulawayo/M
+Bulba/M
+bulb/DMGS
+bulblet
+bulbous
+Bulfinch/M
+Bulganin/M
+Bulgaria/M
+Bulgarian/S
+bulge/DSGM
+bulgy/RT
+bulimarexia/S
+bulimia/MS
+bulimic/S
+bulk/GDRMS
+bulkhead/SDM
+bulkiness/SM
+bulky/RPT
+bulldogged
+bulldogger
+bulldogging
+bulldog/SM
+bulldoze/GRSDZ
+bulldozer/M
+bullet/GMDS
+bulletin/SGMD
+bulletproof/SGD
+bullfighter/M
+bullfighting/M
+bullfight/SJGZMR
+bullfinch/MS
+bullfrog/SM
+bullhead/DMS
+bullheadedness/SM
+bullheaded/YP
+bullhide
+bullhorn/SM
+bullied/M
+bullion/SM
+bullishness/SM
+bullish/PY
+bull/MDGS
+Bullock/M
+bullock/MS
+bullpen/MS
+bullring/SM
+bullseye
+bullshit/MS!
+bullshitted/!
+bullshitter/S!
+bullshitting/!
+bullwhackers
+Bullwinkle/M
+bullyboy/MS
+bullying/M
+bully/TRSDGM
+bulrush/SM
+Bultmann/M
+bulwark/GMDS
+bumblebee/MS
+bumble/JGZRSD
+bumbler/M
+bumbling/Y
+Bumbry/M
+bummed/M
+bummer/MS
+bummest
+bumming/M
+bumper/DMG
+bump/GZDRS
+bumpiness/MS
+bumpkin/MS
+Bumppo/M
+bumptiousness/SM
+bumptious/PY
+bumpy/PRT
+bum/SM
+Bunche/M
+bunch/MSDG
+bunchy/RT
+buncombe's
+bunco's
+Bundestag/M
+bundled/U
+bundle/GMRSD
+bundler/M
+Bundy/M
+bungalow/MS
+bungee/SM
+bung/GDMS
+bunghole/MS
+bungle/GZRSD
+bungler/M
+bungling/Y
+Bunin/M
+bunion/SM
+bunk/CSGDR
+Bunker/M
+bunker's/C
+bunker/SDMG
+bunkhouse/SM
+bunkmate/MS
+bunko's
+bunk's
+bunkum/SM
+Bunnie/M
+Bunni/M
+Bunny/M
+bunny/SM
+Bunsen/SM
+bun/SM
+bunt/GJZDRS
+bunting/M
+Buñuel/M
+Bunyan/M
+buoyancy/MS
+buoyant/Y
+buoy/SMDG
+Burbank/M
+burbler/M
+burble/RSDG
+burbs
+Burch/M
+burden's
+burdensomeness/M
+burdensome/PY
+burden/UGDS
+burdock/SM
+bureaucracy/MS
+bureaucratically
+bureaucratic/U
+bureaucratization/MS
+bureaucratize/SDG
+bureaucrat/MS
+bureau/MS
+burgeon/GDS
+burger/M
+Burger/M
+Burgess/M
+burgess/MS
+burgher/M
+burgh/MRZ
+burghs
+burglarize/GDS
+burglarproof/DGS
+burglar/SM
+burglary/MS
+burgle/SDG
+burgomaster/SM
+Burgoyne/M
+Burg/RM
+burg/SZRM
+Burgundian/S
+Burgundy/MS
+burgundy/S
+burial/ASM
+buried/U
+burier/M
+Burke/M
+Burk/SM
+burlap/MS
+burler/M
+burlesquer/M
+burlesque/SRDMYG
+burley/M
+Burlie/M
+burliness/SM
+Burlingame/M
+Burlington/M
+Burl/M
+burl/SMDRG
+burly/PRT
+Burma/M
+Burmese
+bur/MYS
+burnable/S
+Burnaby/M
+Burnard/M
+burned/U
+Burne/MS
+burner/M
+Burnett/M
+burn/GZSDRBJ
+burning/Y
+burnisher/M
+burnish/GDRSZ
+burnoose/MS
+burnout/MS
+Burns
+Burnside/MS
+burnt/YP
+burp/SGMD
+burr/GSDRM
+Burris/M
+burrito/S
+Burr/M
+burro/SM
+Burroughs/M
+burrower/M
+burrow/GRDMZS
+bursae
+bursa/M
+Bursa/M
+bursar/MS
+bursary/MS
+bursitis/MS
+burster/M
+burst/SRG
+Burtie/M
+Burt/M
+Burton/M
+Burty/M
+Burundian/S
+Burundi/M
+bury/ASDG
+busboy/MS
+busby/SM
+Busch/M
+buses/A
+busgirl/S
+bus/GMDSJ
+bushel/MDJSG
+Bushido/M
+bushiness/MS
+bushing/M
+bush/JMDSRG
+bushland
+Bush/M
+bushman/M
+bushmaster/SM
+bushmen
+Bushnell/M
+bushwhacker/M
+bushwhacking/M
+bushwhack/RDGSZ
+bushy/PTR
+busily
+businesslike
+businessman/M
+businessmen
+business/MS
+businesspeople
+businessperson/S
+businesswoman/M
+businesswomen
+busker/M
+busk/GRM
+buskin/SM
+bus's/A
+buss/D
+bustard/MS
+buster/M
+bustle/GSD
+bustling/Y
+bust/MSDRGZ
+busty/RT
+busybody/MS
+busy/DSRPTG
+busyness/MS
+busywork/SM
+but/ACS
+butane/MS
+butcherer/M
+butcher/MDRYG
+butchery/MS
+Butch/M
+butch/RSZ
+butene/M
+Butler/M
+butler/SDMG
+butted/A
+butte/MS
+butterball/MS
+buttercup/SM
+buttered/U
+butterfat/MS
+Butterfield/M
+butterfingered
+butterfingers/M
+butterfly/MGSD
+buttermilk/MS
+butternut/MS
+butter/RDMGZ
+butterscotch/SM
+buttery/TRS
+butting/M
+buttock/SGMD
+buttoner/M
+buttonhole/GMRSD
+buttonholer/M
+button's
+button/SUDG
+buttonweed
+buttonwood/SM
+buttress/MSDG
+butt/SGZMDR
+butyl/M
+butyrate/M
+buxomness/M
+buxom/TPYR
+Buxtehude/M
+buyback/S
+buyer/M
+buyout/S
+buy/ZGRS
+buzzard/MS
+buzz/DSRMGZ
+buzzer/M
+buzzword/SM
+buzzy
+bx
+bxs
+byelaw's
+Byelorussia's
+bye/MZS
+Byers/M
+bygone/S
+bylaw/SM
+byliner/M
+byline/RSDGM
+BYOB
+bypass/GSDM
+bypath/M
+bypaths
+byplay/S
+byproduct/SM
+Byram/M
+Byran/M
+Byrann/M
+Byrd/M
+byre/SM
+Byrle/M
+Byrne/M
+byroad/MS
+Byrom/M
+Byronic
+Byronism/M
+Byron/M
+bystander/SM
+byte/SM
+byway/SM
+byword/SM
+byzantine
+Byzantine/S
+Byzantium/M
+by/ZR
+C
+ca
+CA
+cabala/MS
+caballed
+caballero/SM
+caballing
+cabal/SM
+cabana/MS
+cabaret/SM
+cabbage/MGSD
+cabbed
+cabbing
+cabby's
+cabdriver/SM
+caber/M
+Cabernet/M
+cabinetmaker/SM
+cabinetmaking/MS
+cabinet/MS
+cabinetry/SM
+cabinetwork/MS
+cabin/GDMS
+cablecast/SG
+cable/GMDS
+cablegram/SM
+cabochon/MS
+caboodle/SM
+caboose/MS
+Cabot/M
+Cabrera/M
+Cabrini/M
+cabriolet/MS
+cab/SMR
+cabstand/MS
+cacao/SM
+cacciatore
+cache/DSRGM
+cachepot/MS
+cachet/MDGS
+Cacilia/M
+Cacilie/M
+cackler/M
+cackle/RSDGZ
+cackly
+CACM
+cacophonist
+cacophonous
+cacophony/SM
+cacti
+cactus/M
+CAD
+cadaverous/Y
+cadaver/SM
+caddishness/SM
+caddish/PY
+Caddric/M
+caddy/GSDM
+cadence/CSM
+cadenced
+cadencing
+cadent/C
+cadenza/MS
+cadet/SM
+Cadette/S
+cadge/DSRGZ
+cadger/M
+Cadillac/MS
+Cadiz/M
+Cad/M
+cadmium/MS
+cadre/SM
+cad/SM
+caducei
+caduceus/M
+Caedmon/M
+Caesar/MS
+caesura/SM
+café/MS
+cafeteria/SM
+caffeine/SM
+caftan/SM
+caged/U
+Cage/M
+cage/MZGDRS
+cager/M
+cagey/P
+cagier
+cagiest
+cagily
+caginess/MS
+Cagney/M
+Cahokia/M
+cahoot/MS
+Cahra/M
+CAI
+Caiaphas/M
+caiman's
+Caine/M
+Cain/MS
+Cairistiona/M
+cairn/SDM
+Cairo/M
+caisson/SM
+caitiff/MS
+Caitlin/M
+Caitrin/M
+cajole/LGZRSD
+cajolement/MS
+cajoler/M
+cajolery/SM
+Cajun/MS
+cake/MGDS
+cakewalk/SMDG
+calabash/SM
+calaboose/MS
+Calais/M
+calamari/S
+calamine/GSDM
+calamitousness/M
+calamitous/YP
+calamity/MS
+cal/C
+calcareousness/M
+calcareous/PY
+calciferous
+calcification/M
+calcify/XGNSD
+calcimine/GMSD
+calcine/SDG
+calcite/SM
+calcium/SM
+Calcomp/M
+CalComp/M
+CALCOMP/M
+calculability/IM
+calculable/IP
+calculate/AXNGDS
+calculated/PY
+calculatingly
+calculating/U
+calculation/AM
+calculative
+calculator/SM
+calculi
+calculus/M
+Calcutta/M
+caldera/SM
+Calder/M
+Calderon/M
+caldron's
+Caldwell/M
+Caleb/M
+Caledonia/M
+Cale/M
+calendar/MDGS
+calender/MDGS
+calf/M
+calfskin/SM
+Calgary/M
+Calhoun/M
+Caliban/M
+caliber/SM
+calibrated/U
+calibrater's
+calibrate/XNGSD
+calibrating/A
+calibration/M
+calibrator/MS
+calicoes
+calico/M
+Calida/M
+Calif/M
+California/M
+Californian/MS
+californium/SM
+calif's
+Caligula/M
+Cali/M
+caliper/SDMG
+caliphate/SM
+caliph/M
+caliphs
+calisthenic/S
+calisthenics/M
+Callaghan/M
+call/AGRDBS
+Callahan/M
+calla/MS
+Calla/MS
+Callao/M
+callback/S
+Callean/M
+called/U
+callee/M
+caller/MS
+Calley/M
+Callida/M
+Callie/M
+calligrapher/M
+calligraphic
+calligraphist/MS
+calligraph/RZ
+calligraphy/MS
+Calli/M
+calling/SM
+Calliope/M
+calliope/SM
+callisthenics's
+Callisto/M
+callosity/MS
+callousness/SM
+callous/PGSDY
+callowness/MS
+callow/RTSP
+callus/SDMG
+Cally/M
+calming/Y
+calmness/MS
+calm/PGTYDRS
+Cal/MY
+Caloocan/M
+caloric/S
+calorie/SM
+calorific
+calorimeter/MS
+calorimetric
+calorimetry/M
+Caltech/M
+Calumet/M
+calumet/MS
+calumniate/NGSDX
+calumniation/M
+calumniator/SM
+calumnious
+calumny/MS
+calvary/M
+Calvary/M
+calve/GDS
+Calvert/M
+calves/M
+Calvinism/MS
+Calvinistic
+Calvinist/MS
+Calvin/M
+Calv/M
+calyces's
+Calypso/M
+calypso/SM
+calyx/MS
+Ca/M
+CAM
+Camacho/M
+Camala/M
+camaraderie/SM
+camber/DMSG
+cambial
+cambium/SM
+Cambodia/M
+Cambodian/S
+Cambrian/S
+cambric/MS
+Cambridge/M
+camcorder/S
+Camden/M
+camelhair's
+Camella/M
+Camellia/M
+camellia/MS
+Camel/M
+Camelopardalis/M
+Camelot/M
+camel/SM
+Camembert/MS
+cameo/GSDM
+camerae
+cameraman/M
+cameramen
+camera/MS
+camerawoman
+camerawomen
+Cameron/M
+Cameroonian/S
+Cameroon/SM
+came/N
+Camey/M
+Camila/M
+Camile/M
+Camilla/M
+Camille/M
+Cami/M
+Camino/M
+camion/M
+camisole/MS
+Cam/M
+cammed
+Cammie/M
+Cammi/M
+cam/MS
+Cammy/M
+Camoens/M
+camomile's
+camouflage/DRSGZM
+camouflager/M
+campaigner/M
+campaign/ZMRDSG
+campanile/SM
+campanological
+campanologist/SM
+campanology/MS
+Campbell/M
+Campbellsport/M
+camper/SM
+campesinos
+campest
+campfire/SM
+campground/MS
+camphor/MS
+Campinas/M
+camping/S
+Campos
+camp's
+camp/SCGD
+campsite/MS
+campus/GSDM
+campy/RT
+Camry/M
+camshaft/SM
+Camus/M
+Canaanite/SM
+Canaan/M
+Canada/M
+Canadianism/SM
+Canadian/S
+Canad/M
+Canaletto/M
+canalization/MS
+canalize/GSD
+canal/SGMD
+canapé/S
+canard/MS
+Canaries
+canary/SM
+canasta/SM
+Canaveral/M
+Canberra/M
+cancan/SM
+cancelate/D
+canceled/U
+canceler/M
+cancellation/MS
+cancel/RDZGS
+cancer/MS
+Cancer/MS
+cancerous/Y
+Cancun/M
+Candace/M
+candelabra/S
+candelabrum/M
+Candice/M
+candidacy/MS
+Candida/M
+candidate/SM
+candidature/S
+Candide/M
+candidly/U
+candidness/SM
+candid/TRYPS
+Candie/M
+Candi/SM
+candle/GMZRSD
+candlelight/SMR
+candlelit
+candlepower/SM
+candler/M
+candlestick/SM
+Candlewick/M
+candlewick/MS
+candor/MS
+Candra/M
+candy/GSDM
+Candy/M
+canebrake/SM
+caner/M
+cane/SM
+canine/S
+caning/M
+Canis/M
+canister/SGMD
+cankerous
+canker/SDMG
+Can/M
+can/MDRSZGJ
+cannabis/MS
+canned
+cannelloni
+canner/SM
+cannery/MS
+Cannes
+cannibalism/MS
+cannibalistic
+cannibalization/SM
+cannibalize/GSD
+cannibal/SM
+cannily/U
+canninesses
+canniness/UM
+canning/M
+cannister/SM
+cannonade/SDGM
+cannonball/SGDM
+Cannon/M
+cannon/SDMG
+cannot
+canny/RPUT
+canoe/DSGM
+canoeist/SM
+Canoga/M
+canonic
+canonicalization
+canonicalize/GSD
+canonical/SY
+canonist/M
+canonization/MS
+canonized/U
+canonize/SDG
+canon/SM
+Canopus/M
+canopy/GSDM
+canst
+can't
+cantabile/S
+Cantabrigian
+cantaloupe/MS
+cantankerousness/SM
+cantankerous/PY
+cantata/SM
+cant/CZGSRD
+canted/IA
+canteen/MS
+Canterbury/M
+canter/CM
+cantered
+cantering
+canticle/SM
+cantilever/SDMG
+canto/MS
+cantonal
+Cantonese/M
+Canton/M
+cantonment/SM
+canton/MGSLD
+Cantor/M
+cantor/MS
+Cantrell/M
+cant's
+cants/A
+Cantu/M
+Canute/M
+canvasback/MS
+canvas/RSDMG
+canvasser/M
+canvass/RSDZG
+canyon/MS
+CAP
+capability/ISM
+capableness/IM
+capable/PI
+capabler
+capablest
+capably/I
+capaciousness/MS
+capacious/PY
+capacitance/SM
+capacitate/V
+capacitive/Y
+capacitor/MS
+capacity/IMS
+caparison/SDMG
+Capek/M
+Capella/M
+caper/GDM
+capeskin/SM
+cape/SM
+Capet/M
+Capetown/M
+Caph/M
+capillarity/MS
+capillary/S
+Capistrano/M
+capitalism/SM
+capitalistic
+capitalistically
+capitalist/SM
+capitalization/SMA
+capitalized/AU
+capitalizer/M
+capitalize/RSDGZ
+capitalizes/A
+capital/SMY
+capita/M
+Capitan/M
+capitation/CSM
+Capitoline/M
+Capitol/MS
+capitol/SM
+capitulate/AXNGSD
+capitulation/MA
+caplet/S
+cap/MDRSZB
+Capone/M
+capon/SM
+capo/SM
+Capote/M
+capped/UA
+capping/M
+cappuccino/MS
+Cappy/M
+Capra/M
+Caprice/M
+caprice/MS
+capriciousness/MS
+capricious/PY
+Capricorn/MS
+Capri/M
+caps/AU
+capsicum/MS
+capsize/SDG
+capstan/MS
+capstone/MS
+capsular
+capsule/MGSD
+capsulize/GSD
+captaincy/MS
+captain/SGDM
+caption/GSDRM
+captiousness/SM
+captious/PY
+captivate/XGNSD
+captivation/M
+captivator/SM
+captive/MS
+captivity/SM
+Capt/M
+captor/SM
+capture/AGSD
+capturer/MS
+capt/V
+Capulet/M
+Caputo/M
+Caracalla/M
+Caracas/M
+caracul's
+carafe/SM
+Caralie/M
+Cara/M
+caramelize/SDG
+caramel/MS
+carapace/SM
+carapaxes
+carat/SM
+Caravaggio/M
+caravan/DRMGS
+caravaner/M
+caravansary/MS
+caravanserai's
+caravel/MS
+caraway/MS
+carbide/MS
+carbine/MS
+carbohydrate/MS
+carbolic
+Carboloy/M
+carbonaceous
+carbonate/SDXMNG
+carbonation/M
+Carbondale/M
+Carbone/MS
+carbonic
+carboniferous
+Carboniferous
+carbonization/SAM
+carbonizer/AS
+carbonizer's
+carbonizes/A
+carbonize/ZGRSD
+carbon/MS
+carbonyl/M
+carborundum
+Carborundum/MS
+carboy/MS
+carbuncle/SDM
+carbuncular
+carburetor/MS
+carburetter/S
+carburettor/SM
+carcase/MS
+carcass/SM
+Carce/M
+carcinogenic
+carcinogenicity/MS
+carcinogen/SM
+carcinoma/SM
+cardamom/MS
+cardboard/MS
+card/EDRSG
+Cardenas/M
+carder/MS
+carder's/E
+cardholders
+cardiac/S
+Cardiff/M
+cardigan/SM
+cardinality/SM
+cardinal/SYM
+carding/M
+Cardin/M
+Cardiod/M
+cardiogram/MS
+cardiograph/M
+cardiographs
+cardioid/M
+cardiologist/SM
+cardiology/MS
+cardiomegaly/M
+cardiopulmonary
+cardiovascular
+card's
+cardsharp/ZSMR
+CARE
+cared/U
+careen/DSG
+careerism/M
+careerist/MS
+career/SGRDM
+carefree
+carefuller
+carefullest
+carefulness/MS
+careful/PY
+caregiver/S
+carelessness/MS
+careless/YP
+Care/M
+Carena/M
+Caren/M
+carer/M
+care/S
+Caresa/M
+Caressa/M
+Caresse/M
+caresser/M
+caressing/Y
+caressive/Y
+caress/SRDMVG
+caretaker/SM
+caret/SM
+careworn
+Carey/M
+carfare/MS
+cargoes
+cargo/M
+carhopped
+carhopping
+carhop/SM
+Caria/M
+Caribbean/S
+Carib/M
+caribou/MS
+caricature/GMSD
+caricaturisation
+caricaturist/MS
+caricaturization
+Carie/M
+caries/M
+carillonned
+carillonning
+carillon/SM
+Caril/M
+Carilyn/M
+Cari/M
+Carina/M
+Carine/M
+caring/U
+Carin/M
+Cariotta/M
+carious
+Carissa/M
+Carita/M
+Caritta/M
+carjack/GSJDRZ
+Carla/M
+Carlee/M
+Carleen/M
+Carlene/M
+Carlen/M
+Carletonian/M
+Carleton/M
+Carley/M
+Carlie/M
+Carlina/M
+Carline/M
+Carling/M
+Carlin/M
+Carlita/M
+Carl/MNG
+carload/MSG
+Carlo/SM
+Carlota/M
+Carlotta/M
+Carlsbad/M
+Carlson/M
+Carlton/M
+Carlye/M
+Carlyle/M
+Carly/M
+Carlyn/M
+Carlynne/M
+Carlynn/M
+Carma/M
+Carmela/M
+Carmelia/M
+Carmelina/M
+Carmelita/M
+Carmella/M
+Carmelle/M
+Carmel/M
+Carmelo/M
+Carmencita/M
+Carmen/M
+Carmichael/M
+Carmina/M
+Carmine/M
+carmine/MS
+Carmita/M
+Car/MNY
+Carmon/M
+carnage/MS
+carnality/SM
+carnal/Y
+Carnap/M
+carnation/IMS
+Carnegie/M
+carnelian/SM
+Carney/M
+carney's
+carnival/MS
+carnivore/SM
+carnivorousness/MS
+carnivorous/YP
+Carnot/M
+Carny/M
+carny/SDG
+carob/SM
+Carola/M
+Carolan/M
+Carolann/M
+Carolee/M
+Carole/M
+caroler/M
+Carolina/MS
+Caroline/M
+Carolingian
+Carolinian/S
+Carolin/M
+Caroljean/M
+Carol/M
+carol/SGZMRD
+Carolus/M
+Carolyne/M
+Carolyn/M
+Carolynn/M
+Caro/M
+carom/GSMD
+Caron/M
+carotene/MS
+carotid/MS
+carousal/MS
+carousel/MS
+carouser/M
+carouse/SRDZG
+carpal/SM
+Carpathian/MS
+carpel/SM
+carpenter/DSMG
+carpentering/M
+Carpenter/M
+carpentry/MS
+carper/M
+carpetbagged
+carpetbagger/MS
+carpetbagging
+carpetbag/MS
+carpeting/M
+carpet/MDJGS
+carpi/M
+carping/Y
+carp/MDRSGZ
+carpool/DGS
+carport/MS
+carpus/M
+carrageen/M
+Carree/M
+carrel/SM
+carriage/SM
+carriageway/SM
+Carrie/M
+carrier/M
+Carrier/M
+Carrillo/M
+Carri/M
+carrion/SM
+Carrissa/M
+Carr/M
+Carroll/M
+Carrol/M
+carrot/MS
+carroty/RT
+carrousel's
+carryall/MS
+Carry/MR
+carryout/S
+carryover/S
+carry/RSDZG
+carsickness/SM
+carsick/P
+Carson/M
+cartage/MS
+cartel/SM
+carte/M
+carter/M
+Carter/M
+Cartesian
+Carthage/M
+Carthaginian/S
+carthorse/MS
+Cartier/M
+cartilage/MS
+cartilaginous
+cartload/MS
+cart/MDRGSZ
+Cart/MR
+cartographer/MS
+cartographic
+cartography/MS
+carton/GSDM
+cartoon/GSDM
+cartoonist/MS
+cartridge/SM
+cartwheel/MRDGS
+Cartwright/M
+Carty/RM
+Caruso/M
+carve/DSRJGZ
+carven
+carver/M
+Carver/M
+carving/M
+caryatid/MS
+Caryl/M
+Cary/M
+Caryn/M
+car/ZGSMDR
+casaba/SM
+Casablanca/M
+Casals/M
+Casandra/M
+Casanova/SM
+Casar/M
+casbah/M
+cascade/MSDG
+Cascades/M
+cascara/MS
+casebook/SM
+case/DSJMGL
+cased/U
+caseharden/SGD
+casein/SM
+caseload/MS
+Case/M
+casement/SM
+caseworker/M
+casework/ZMRS
+Casey/M
+cashbook/SM
+cashew/MS
+cash/GZMDSR
+cashier/SDMG
+cashless
+Cash/M
+cashmere/MS
+Casie/M
+Casi/M
+casing/M
+casino/MS
+casket/SGMD
+cask/GSDM
+Caspar/M
+Casper/M
+Caspian
+Cass
+Cassandra/SM
+Cassandre/M
+Cassandry/M
+Cassatt/M
+Cassaundra/M
+cassava/MS
+casserole/MGSD
+cassette/SM
+Cassey/M
+cassia/MS
+Cassie/M
+Cassi/M
+cassino's
+Cassiopeia/M
+Cassite/M
+Cassius/M
+cassock/SDM
+Cassondra/M
+cassowary/SM
+Cassy/M
+Castaneda/M
+castanet/SM
+castaway/SM
+castellated
+caste/MHS
+caster/M
+cast/GZSJMDR
+castigate/XGNSD
+castigation/M
+castigator/SM
+Castile's
+Castillo/M
+casting/M
+castle/GMSD
+castoff/S
+Castor/M
+castor's
+castrate/DSNGX
+castration/M
+Castries/M
+Castro/M
+casts/A
+casualness/SM
+casual/SYP
+casualty/SM
+casuistic
+casuist/MS
+casuistry/SM
+cataclysmal
+cataclysmic
+cataclysm/MS
+catacomb/MS
+catafalque/SM
+Catalan/MS
+catalepsy/MS
+cataleptic/S
+Catalina/M
+cataloger/M
+catalog/SDRMZG
+Catalonia/M
+catalpa/SM
+catalysis/M
+catalyst/SM
+catalytic
+catalytically
+catalyze/DSG
+catamaran/MS
+catapult/MGSD
+cataract/MS
+Catarina/M
+catarrh/M
+catarrhs
+catastrophe/SM
+catastrophic
+catastrophically
+catatonia/MS
+catatonic/S
+Catawba/M
+catbird/MS
+catboat/SM
+catcall/SMDG
+catchable/U
+catchall/MS
+catch/BRSJLGZ
+catcher/M
+catchment/SM
+catchpenny/S
+catchphrase/S
+catchup/MS
+catchword/MS
+catchy/TR
+catechism/MS
+catechist/SM
+catechize/SDG
+catecholamine/MS
+categoric
+categorical/Y
+categorization/MS
+categorized/AU
+categorize/RSDGZ
+category/MS
+Cate/M
+catenate/NF
+catenation/MF
+catercorner
+caterer/M
+cater/GRDZ
+Caterina/M
+catering/M
+Caterpillar
+caterpillar/SM
+caterwaul/DSG
+catfish/MS
+catgut/SM
+Catha/M
+Catharina/M
+Catharine/M
+catharses
+catharsis/M
+cathartic/S
+Cathay/M
+cathedral/SM
+Cathee/M
+Catherina/M
+Catherine/M
+Catherin/M
+Cather/M
+Cathe/RM
+catheterize/GSD
+catheter/SM
+Cathie/M
+Cathi/M
+Cathleen/M
+Cathlene/M
+cathode/MS
+cathodic
+catholicism
+Catholicism/SM
+catholicity/MS
+catholic/MS
+Catholic/S
+Cathrine/M
+Cathrin/M
+Cathryn/M
+Cathyleen/M
+Cathy/M
+Catie/M
+Catiline/M
+Cati/M
+Catina/M
+cationic
+cation/MS
+catkin/SM
+Catlaina/M
+Catlee/M
+catlike
+Catlin/M
+catnapped
+catnapping
+catnap/SM
+catnip/MS
+Cato/M
+Catrina/M
+Catriona/M
+Catskill/SM
+cat/SMRZ
+catsup's
+cattail/SM
+catted
+cattery/M
+cattily
+cattiness/SM
+catting
+cattle/M
+cattleman/M
+cattlemen
+Catt/M
+catty/PRST
+Catullus/M
+CATV
+catwalk/MS
+Caty/M
+Caucasian/S
+Caucasoid/S
+Caucasus/M
+Cauchy/M
+caucus/SDMG
+caudal/Y
+caught/U
+cauldron/MS
+cauliflower/MS
+caulker/M
+caulk/JSGZRD
+causality/SM
+causal/YS
+causate/XVN
+causation/M
+causative/SY
+cause/DSRGMZ
+caused/U
+causeless
+causerie/MS
+causer/M
+causeway/SGDM
+caustically
+causticity/MS
+caustic/YS
+cauterization/SM
+cauterized/U
+cauterize/GSD
+cautionary
+cautioner/M
+caution/GJDRMSZ
+cautiousness's/I
+cautiousness/SM
+cautious/PIY
+cavalcade/MS
+cavalierness/M
+cavalier/SGYDP
+cavalryman/M
+cavalrymen
+cavalry/MS
+caveat/SM
+caveatted
+caveatting
+cave/GFRSD
+caveman/M
+cavemen
+Cavendish/M
+caver/M
+cavern/GSDM
+cavernous/Y
+cave's
+caviar/MS
+caviler/M
+cavil/SJRDGZ
+caving/MS
+cavity/MFS
+cavort/SDG
+Cavour/M
+caw/SMDG
+Caxton/M
+Caye/M
+Cayenne/M
+cayenne/SM
+Cayla/M
+Cayman/M
+cayman/SM
+cay's
+cay/SC
+Cayuga/M
+cayuse/SM
+Caz/M
+Cazzie/M
+c/B
+CB
+CBC
+Cb/M
+CBS
+cc
+Cchaddie/M
+CCTV
+CCU
+CD
+CDC/M
+Cd/M
+CDT
+Ce
+cease/DSCG
+ceasefire/S
+ceaselessness/SM
+ceaseless/YP
+ceasing/U
+Ceausescu/M
+Cebuano/M
+Cebu/M
+ceca
+cecal
+Cecelia/M
+Cece/M
+Cecile/M
+Ceciley/M
+Cecilia/M
+Cecilio/M
+Cecilius/M
+Cecilla/M
+Cecil/M
+Cecily/M
+cecum/M
+cedar/SM
+ceded/A
+cede/FRSDG
+ceder's/F
+ceder/SM
+cedes/A
+cedilla/SM
+ceding/A
+Ced/M
+Cedric/M
+ceilidh/M
+ceiling/MDS
+Ceil/M
+celandine/MS
+Celanese/M
+Celebes's
+celebrant/MS
+celebratedness/M
+celebrated/P
+celebrate/XSDGN
+celebration/M
+celebrator/MS
+celebratory
+celebrity/MS
+Cele/M
+Celene/M
+celerity/SM
+celery/SM
+Celesta/M
+celesta/SM
+Celeste/M
+celestial/YS
+Celestia/M
+Celestina/M
+Celestine/M
+Celestyna/M
+Celestyn/M
+Celia/M
+celibacy/MS
+celibate/SM
+Celie/M
+Celina/M
+Celinda/M
+Celine/M
+Celinka/M
+Celisse/M
+Celka/M
+cellarer/M
+cellar/RDMGS
+Celle/M
+cell/GMDS
+Cellini/M
+cellist/SM
+Cello/M
+cello/MS
+cellophane/SM
+cellphone/S
+cellular/SY
+cellulite/S
+celluloid/SM
+cellulose/SM
+Celsius/S
+Celtic/SM
+Celt/MS
+cementa
+cementer/M
+cementum/SM
+cement/ZGMRDS
+cemetery/MS
+cenobite/MS
+cenobitic
+cenotaph/M
+cenotaphs
+Cenozoic
+censer/MS
+censored/U
+censor/GDMS
+censorial
+censoriousness/MS
+censorious/YP
+censorship/MS
+censure/BRSDZMG
+censurer/M
+census/SDMG
+centaur/SM
+Centaurus/M
+centavo/SM
+centenarian/MS
+centenary/S
+centennial/YS
+center/AC
+centerboard/SM
+centered
+centerer/S
+centerfold/S
+centering/SM
+centerline/SM
+centerpiece/SM
+center's
+Centigrade
+centigrade/S
+centigram/SM
+centiliter/MS
+centime/SM
+centimeter/SM
+centipede/MS
+Centralia/M
+centralism/M
+centralist/M
+centrality/MS
+centralization/CAMS
+centralize/CGSD
+centralizer/SM
+centralizes/A
+central/STRY
+centrefold's
+Centrex
+CENTREX/M
+centric/F
+centrifugal/SY
+centrifugate/NM
+centrifugation/M
+centrifuge/GMSD
+centripetal/Y
+centrist/MS
+centroid/MS
+cent/SZMR
+centurion/MS
+century/MS
+CEO
+cephalic/S
+Cepheid
+Cepheus/M
+ceramicist/S
+ceramic/MS
+ceramist/MS
+cerate/MD
+Cerberus/M
+cereal/MS
+cerebellar
+cerebellum/MS
+cerebra
+cerebral/SY
+cerebrate/XSDGN
+cerebration/M
+cerebrum/MS
+cerement/SM
+ceremonial/YSP
+ceremoniousness/MS
+ceremoniousness's/U
+ceremonious/YUP
+ceremony/MS
+Cerenkov/M
+Ceres/M
+Cerf/M
+cerise/SM
+cerium/MS
+cermet/SM
+CERN/M
+certainer
+certainest
+certainty/UMS
+certain/UY
+cert/FS
+certifiable
+certifiably
+certificate/SDGM
+certification/AMC
+certified/U
+certifier/M
+certify/DRSZGNX
+certiorari/M
+certitude/ISM
+cerulean/MS
+Cervantes/M
+cervical
+cervices/M
+cervix/M
+Cesarean
+cesarean/S
+Cesare/M
+Cesar/M
+Cesaro/M
+cesium/MS
+cessation/SM
+cession/FAMSK
+Cessna/M
+cesspit/M
+cesspool/SM
+Cesya/M
+cetacean/S
+cetera/S
+Cetus/M
+Ceylonese
+Ceylon/M
+Cezanne/S
+cf
+CF
+CFC
+Cf/M
+CFO
+cg
+Chablis/SM
+Chaddie/M
+Chadd/M
+Chaddy/M
+Chadian/S
+Chad/M
+Chadwick/M
+chafe/GDSR
+chafer/M
+chaffer/DRG
+chafferer/M
+Chaffey/M
+chaff/GRDMS
+chaffinch/SM
+Chagall/M
+chagrin/DGMS
+Chaim/M
+chainlike
+chain's
+chainsaw/SGD
+chain/SGUD
+chairlady/M
+chairlift/MS
+chairman/MDGS
+chairmanship/MS
+chairmen
+chairperson/MS
+chair/SGDM
+chairwoman/M
+chairwomen
+chaise/SM
+chalcedony/MS
+Chaldea/M
+Chaldean/M
+chalet/SM
+chalice/DSM
+chalkboard/SM
+chalk/DSMG
+chalkiness/S
+chalkline
+chalky/RPT
+challenged/U
+challenger/M
+challenge/ZGSRD
+challenging/Y
+challis/SM
+Chalmers
+chamberer/M
+Chamberlain/M
+chamberlain/MS
+chambermaid/MS
+chamberpot/S
+Chambers/M
+chamber/SZGDRM
+chambray/MS
+chameleon/SM
+chamfer/DMGS
+chammy's
+chamois/DSMG
+chamomile/MS
+champagne/MS
+champaign/M
+champ/DGSZ
+champion/MDGS
+championship/MS
+Champlain/M
+chanced/M
+chance/GMRSD
+chancellery/SM
+chancellorship/SM
+chancellor/SM
+Chancellorsville/M
+chancel/SM
+Chance/M
+chancery/SM
+Chancey/M
+chanciness/S
+chancing/M
+chancre/SM
+chancy/RPT
+Chandal/M
+Chanda/M
+chandelier/SM
+Chandigarh/M
+Chandler/M
+chandler/MS
+Chandragupta/M
+Chandra/M
+Chandrasekhar/M
+Chandy/M
+Chanel/M
+Chane/M
+Chaney/M
+Changchun/M
+changeabilities
+changeability/UM
+changeableness/SM
+changeable/U
+changeably/U
+changed/U
+change/GZRSD
+changeless
+changeling/M
+changeover/SM
+changer/M
+changing/U
+Chang/M
+Changsha/M
+Chan/M
+Channa/M
+channeler/M
+channeling/M
+channelization/SM
+channelize/GDS
+channellings
+channel/MDRZSG
+Channing/M
+chanson/SM
+Chantalle/M
+Chantal/M
+chanter/M
+chanteuse/MS
+chantey/SM
+chanticleer/SM
+Chantilly/M
+chantry/MS
+chant/SJGZMRD
+chanty's
+Chanukah's
+Chao/M
+chaos/SM
+chaotic
+chaotically
+chaparral/MS
+chapbook/SM
+chapeau/MS
+chapel/MS
+chaperonage/MS
+chaperoned/U
+chaperone's
+chaperon/GMDS
+chaplaincy/MS
+chaplain/MS
+chaplet/SM
+Chaplin/M
+Chapman/M
+chap/MS
+Chappaquiddick/M
+chapped
+chapping
+chapter/SGDM
+Chara
+charabanc/MS
+characterful
+characteristically/U
+characteristic/SM
+characterizable/MS
+characterization/MS
+characterize/DRSBZG
+characterized/U
+characterizer/M
+characterless
+character/MDSG
+charade/SM
+charbroil/SDG
+charcoal/MGSD
+Chardonnay
+chardonnay/S
+chard/SM
+chargeableness/M
+chargeable/P
+charged/U
+charge/EGRSDA
+charger/AME
+chargers
+char/GS
+Charil/M
+charily
+chariness/MS
+Charin/M
+charioteer/GSDM
+Chariot/M
+chariot/SMDG
+Charis
+charisma/M
+charismata
+charismatically
+charismatic/S
+Charissa/M
+Charisse/M
+charitablenesses
+charitableness/UM
+charitable/UP
+charitably/U
+Charita/M
+Charity/M
+charity/MS
+charlady/M
+Charla/M
+charlatanism/MS
+charlatanry/SM
+charlatan/SM
+Charlean/M
+Charleen/M
+Charlemagne/M
+Charlena/M
+Charlene/M
+Charles/M
+Charleston/SM
+Charley/M
+Charlie/M
+Charline/M
+Charlot/M
+Charlotta/M
+Charlotte/M
+Charlottesville/M
+Charlottetown/M
+Charlton/M
+Charmaine/M
+Charmain/M
+Charmane/M
+charmer/M
+Charmian/M
+Charmine/M
+charming/RYT
+Charmin/M
+Charmion/M
+charmless
+charm/SGMZRD
+Charolais
+Charo/M
+Charon/M
+charred
+charring
+charted/U
+charter/AGDS
+chartered/U
+charterer/SM
+charter's
+chartist/SM
+Chartres/M
+chartreuse/MS
+chartroom/S
+chart/SJMRDGBZ
+charwoman/M
+charwomen
+Charybdis/M
+Charyl/M
+chary/PTR
+Chas
+chase/DSRGZ
+Chase/M
+chaser/M
+chasing/M
+Chasity/M
+chasm/SM
+chassis/M
+chastely
+chasteness/SM
+chasten/GSD
+chaste/UTR
+chastisement/SM
+chastiser/M
+chastise/ZGLDRS
+Chastity/M
+chastity/SM
+chastity's/U
+chasuble/SM
+Chateaubriand
+château/M
+chateaus
+châteaux
+châtelaine/SM
+chat/MS
+Chattahoochee/M
+Chattanooga/M
+chatted
+chattel/MS
+chatterbox/MS
+chatterer/M
+Chatterley/M
+chatter/SZGDRY
+Chatterton/M
+chattily
+chattiness/SM
+chatting
+chatty/RTP
+Chaucer/M
+chauffeur/GSMD
+Chaunce/M
+Chauncey/M
+Chautauqua/M
+chauvinism/MS
+chauvinistic
+chauvinistically
+chauvinist/MS
+Chavez/M
+chaw
+Chayefsky/M
+cheapen/DG
+cheapish
+cheapness/MS
+cheapskate/MS
+cheap/YRNTXSP
+cheater/M
+cheat/RDSGZ
+Chechen/M
+Chechnya/M
+checkable/U
+checkbook/MS
+checked/UA
+checkerboard/MS
+checker/DMG
+check/GZBSRDM
+checklist/S
+checkmate/MSDG
+checkoff/SM
+checkout/S
+checkpoint/MS
+checkroom/MS
+check's/A
+checks/A
+checksummed
+checksumming
+checksum/SM
+checkup/MS
+Cheddar/MS
+cheddar/S
+cheekbone/SM
+cheek/DMGS
+cheekily
+cheekiness/SM
+cheeky/PRT
+cheep/GMDS
+cheerer/M
+cheerfuller
+cheerfullest
+cheerfulness/MS
+cheerful/YP
+cheerily
+cheeriness/SM
+cheerio/S
+Cheerios/M
+cheerleader/SM
+cheerlessness/SM
+cheerless/PY
+cheers/S
+cheery/PTR
+cheer/YRDGZS
+cheeseburger/SM
+cheesecake/SM
+cheesecloth/M
+cheesecloths
+cheeseparing/S
+cheese/SDGM
+cheesiness/SM
+cheesy/PRT
+cheetah/M
+cheetahs
+Cheeto/M
+Cheever/M
+cheffed
+cheffing
+chef/SM
+Chekhov/M
+chelate/XDMNG
+chelation/M
+Chelsae/M
+Chelsea/M
+Chelsey/M
+Chelsie/M
+Chelsy/M
+Chelyabinsk/M
+chem
+Che/M
+chemic
+chemical/SYM
+chemiluminescence/M
+chemiluminescent
+chemise/SM
+chemistry/SM
+chemist/SM
+chemotherapeutic/S
+chemotherapy/SM
+chemurgy/SM
+Chengdu
+Cheng/M
+chenille/SM
+Chen/M
+Cheops/M
+Chere/M
+Cherey/M
+Cherianne/M
+Cherice/M
+Cherida/M
+Cherie/M
+Cherilyn/M
+Cherilynn/M
+Cheri/M
+Cherin/M
+Cherise/M
+cherisher/M
+cherish/GDRS
+Cherish/M
+Cheriton/M
+Cherlyn/M
+Cher/M
+Chernenko/M
+Chernobyl/M
+Cherokee/MS
+cheroot/MS
+Cherri/M
+Cherrita/M
+Cherry/M
+cherry/SM
+chert/MS
+cherubic
+cherubim/S
+cherub/SM
+chervil/MS
+Cherye/M
+Cheryl/M
+Chery/M
+Chesapeake/M
+Cheshire/M
+Cheslie/M
+chessboard/SM
+chessman/M
+chessmen
+chess/SM
+Chesterfield/M
+chesterfield/MS
+Chester/M
+Chesterton/M
+chestful/S
+chest/MRDS
+chestnut/SM
+Cheston/M
+chesty/TR
+Chet/M
+Chevalier/M
+chevalier/SM
+Cheviot/M
+cheviot/S
+Chev/M
+Chevrolet/M
+chevron/DMS
+Chevy/M
+chewer/M
+chew/GZSDR
+chewiness/S
+chewy/RTP
+Cheyenne/SM
+chg
+chge
+Chiang/M
+chianti/M
+Chianti/S
+chiaroscuro/SM
+Chiarra/M
+Chiba/M
+Chicagoan/SM
+Chicago/M
+Chicana/MS
+chicane/MGDS
+chicanery/MS
+Chicano/MS
+chichi/RTS
+chickadee/SM
+Chickasaw/SM
+chickenfeed
+chicken/GDM
+chickenhearted
+chickenpox/MS
+Chickie/M
+Chick/M
+chickpea/MS
+chickweed/MS
+chick/XSNM
+Chicky/M
+chicle/MS
+Chic/M
+chicness/S
+Chico/M
+chicory/MS
+chic/SYRPT
+chide/GDS
+chiding/Y
+chiefdom/MS
+chieftain/SM
+chief/YRMST
+chiffonier/MS
+chiffon/MS
+chigger/MS
+chignon/MS
+Chihuahua/MS
+chihuahua/S
+chilblain/MS
+childbearing/MS
+childbirth/M
+childbirths
+childcare/S
+childes
+child/GMYD
+childhood/MS
+childishness/SM
+childish/YP
+childlessness/SM
+childless/P
+childlikeness/M
+childlike/P
+childminders
+childproof/GSD
+childrearing
+children/M
+Chilean/S
+Chile/MS
+chile's
+chilies
+chili/M
+chiller/M
+chilliness/MS
+chilling/Y
+chilli's
+chill/MRDJGTZPS
+chillness/MS
+chilly/TPRS
+Chilton/M
+Chi/M
+chimaera's
+chimaerical
+Chimborazo/M
+chime/DSRGMZ
+Chimera/S
+chimera/SM
+chimeric
+chimerical
+chimer/M
+Chimiques
+chimney/SMD
+chimpanzee/SM
+chimp/MS
+chi/MS
+Chimu/M
+Ch'in
+China/M
+Chinaman/M
+Chinamen
+china/MS
+Chinatown/SM
+chinchilla/SM
+chine/MS
+Chinese/M
+Ching/M
+chink/DMSG
+chinless
+Chin/M
+chinned
+chinner/S
+chinning
+chino/MS
+Chinook/MS
+chin/SGDM
+chinstrap/S
+chintz/SM
+chintzy/TR
+chipboard/M
+Chipewyan/M
+Chip/M
+chipmunk/SM
+chipped
+Chippendale/M
+chipper/DGS
+Chippewa/MS
+chipping/MS
+chip/SM
+Chiquia/M
+Chiquita/M
+chiral
+Chirico/M
+chirography/SM
+chiropodist/SM
+chiropody/MS
+chiropractic/MS
+chiropractor/SM
+chirp/GDS
+chirpy/RT
+chirrup/DGS
+chiseler/M
+chisel/ZGSJMDR
+Chisholm/M
+Chisinau/M
+chitchat/SM
+chitchatted
+chitchatting
+chitinous
+chitin/SM
+chit/SM
+Chittagong/M
+chitterlings
+chivalric
+chivalrously/U
+chivalrousness/MS
+chivalrous/YP
+chivalry/SM
+chive/GMDS
+chivvy/D
+chivying
+chlamydiae
+chlamydia/S
+Chloe/M
+Chloette/M
+Chlo/M
+chloral/MS
+chlorate/M
+chlordane/MS
+chloride/MS
+chlorinated/C
+chlorinates/C
+chlorinate/XDSGN
+chlorination/M
+chlorine/MS
+Chloris
+chlorofluorocarbon/S
+chloroform/DMSG
+chlorophyll/SM
+chloroplast/MS
+chloroquine/M
+chm
+Ch/MGNRS
+chockablock
+chock/SGRDM
+chocoholic/S
+chocolate/MS
+chocolaty
+Choctaw/MS
+choiceness/M
+choice/RSMTYP
+choirboy/MS
+choirmaster/SM
+choir/SDMG
+chokeberry/M
+chokecherry/SM
+choke/DSRGZ
+choker/M
+chokes/M
+choking/Y
+cholera/SM
+choleric
+choler/SM
+cholesterol/SM
+choline/M
+cholinesterase/M
+chomp/DSG
+Chomsky/M
+Chongqing
+choose/GZRS
+chooser/M
+choosiness/S
+choosy/RPT
+chophouse/SM
+Chopin/M
+chopped
+chopper/SDMG
+choppily
+choppiness/MS
+chopping
+choppy/RPT
+chop/S
+chopstick/SM
+chorale/MS
+choral/SY
+chordal
+chordata
+chordate/MS
+chording/M
+chord/SGMD
+chorea/MS
+chore/DSGNM
+choreographer/M
+choreographic
+choreographically
+choreographs
+choreography/MS
+choreograph/ZGDR
+chorines
+chorion/M
+chorister/SM
+choroid/S
+chortler/M
+chortle/ZGDRS
+chorus/GDSM
+chosen/U
+chose/S
+Chou/M
+chowder/SGDM
+chow/DGMS
+Chretien/M
+Chris/M
+chrism/SM
+chrissake
+Chrisse/M
+Chrissie/M
+Chrissy/M
+Christabella/M
+Christabel/M
+Christalle/M
+Christal/M
+Christa/M
+Christan/M
+Christchurch/M
+Christean/M
+Christel/M
+Christendom/MS
+christened/U
+christening/SM
+Christen/M
+christen/SAGD
+Christensen/M
+Christenson/M
+Christiana/M
+Christiane/M
+Christianity/SM
+Christianize/GSD
+Christian/MS
+Christiano/M
+Christiansen/M
+Christians/N
+Christie/SM
+Christi/M
+Christina/M
+Christine/M
+Christin/M
+Christlike
+Christmas/SM
+Christmastide/SM
+Christmastime/S
+Christoffel/M
+Christoffer/M
+Christoforo/M
+Christoper/M
+Christophe/M
+Christopher/M
+Christoph/MR
+Christophorus/M
+Christos/M
+Christ/SMN
+Christye/M
+Christyna/M
+Christy's
+Chrisy/M
+chroma/M
+chromate/M
+chromatically
+chromaticism/M
+chromaticness/M
+chromatic/PS
+chromatics/M
+chromatin/MS
+chromatogram/MS
+chromatograph
+chromatographic
+chromatography/M
+chrome/GMSD
+chromic
+chromite/M
+chromium/SM
+chromosomal
+chromosome/MS
+chromosphere/M
+chronically
+chronicled/U
+chronicler/M
+chronicle/SRDMZG
+chronic/S
+chronograph/M
+chronographs
+chronography
+chronological/Y
+chronologist/MS
+chronology/MS
+chronometer/MS
+chronometric
+Chrotoem/M
+chrysalids
+chrysalis/SM
+Chrysa/M
+chrysanthemum/MS
+Chrysler/M
+Chrysostom/M
+Chrystal/M
+Chrystel/M
+Chryste/M
+chubbiness/SM
+chubby/RTP
+chub/MS
+Chucho/M
+chuck/GSDM
+chuckhole/SM
+chuckle/DSG
+chuckling/Y
+Chuck/M
+chuff/DM
+chugged
+chugging
+chug/MS
+Chukchi/M
+chukka/S
+Chumash/M
+chummed
+chummily
+chumminess/MS
+chumming
+chum/MS
+chummy/SRTP
+chumping/M
+chump/MDGS
+Chungking's
+Chung/M
+chunkiness/MS
+chunk/SGDM
+chunky/RPT
+chuntering
+churchgoer/SM
+churchgoing/SM
+Churchillian
+Churchill/M
+churchliness/M
+churchly/P
+churchman/M
+church/MDSYG
+churchmen
+Church/MS
+churchwarden/SM
+churchwoman/M
+churchwomen
+churchyard/SM
+churlishness/SM
+churlish/YP
+churl/SM
+churner/M
+churning/M
+churn/SGZRDM
+chute/DSGM
+chutney/MS
+chutzpah/M
+chutzpahs
+chutzpa/SM
+Chuvash/M
+ch/VT
+chyme/SM
+Ci
+CIA
+ciao/S
+cicada/MS
+cicatrice/S
+cicatrix's
+Cicely/M
+Cicero/M
+cicerone/MS
+ciceroni
+Ciceronian
+Cicily/M
+CID
+cider's/C
+cider/SM
+Cid/M
+Ciel/M
+cigarette/MS
+cigarillo/MS
+cigar/SM
+cilantro/S
+cilia/M
+ciliate/FDS
+ciliately
+cilium/M
+Cilka/M
+cinch/MSDG
+cinchona/SM
+Cincinnati/M
+cincture/MGSD
+Cinda/M
+Cindee/M
+Cindelyn/M
+cinder/DMGS
+Cinderella/MS
+Cindie/M
+Cindi/M
+Cindra/M
+Cindy/M
+cine/M
+cinema/SM
+cinematic
+cinematographer/MS
+cinematographic
+cinematography/MS
+Cinerama/M
+cinnabar/MS
+Cinnamon/M
+cinnamon/MS
+ciphered/C
+cipher/MSGD
+ciphers/C
+cir
+circa
+circadian
+Circe/M
+circler/M
+circle/RSDGM
+circlet/MS
+circuital
+circuit/GSMD
+circuitousness/MS
+circuitous/YP
+circuitry/SM
+circuity/MS
+circulant
+circularity/SM
+circularize/GSD
+circularness/M
+circular/PSMY
+circulate/ASDNG
+circulation/MA
+circulations
+circulative
+circulatory
+circumcise/DRSXNG
+circumcised/U
+circumciser/M
+circumcision/M
+circumference/SM
+circumferential/Y
+circumflex/MSDG
+circumlocution/MS
+circumlocutory
+circumnavigate/DSNGX
+circumnavigational
+circumnavigation/M
+circumpolar
+circumscribe/GSD
+circumscription/SM
+circumspection/SM
+circumspect/Y
+circumsphere
+circumstance/SDMG
+circumstantial/YS
+circumvention/MS
+circumvent/SBGD
+circus/SM
+Cirillo/M
+Cirilo/M
+Ciro/M
+cirque/SM
+cirrhoses
+cirrhosis/M
+cirrhotic/S
+cirri/M
+cirrus/M
+Cissiee/M
+Cissy/M
+cistern/SM
+citadel/SM
+citations/I
+citation/SMA
+cit/DSG
+cite/ISDAG
+Citibank/M
+citified
+citizenry/SM
+citizenship/MS
+citizen/SYM
+citrate/DM
+citric
+Citroen/M
+citronella/MS
+citron/MS
+citrus/SM
+city/DSM
+cityscape/MS
+citywide
+civet/SM
+civic/S
+civics/M
+civilian/SM
+civility/IMS
+civilizational/MS
+civilization/AMS
+civilizedness/M
+civilized/PU
+civilize/DRSZG
+civilizer/M
+civilizes/AU
+civil/UY
+civvies
+ck/C
+clack/SDG
+cladding/SM
+clads
+clad/U
+Claiborne/M
+Claiborn/M
+claimable
+claimant/MS
+claim/CDRSKAEGZ
+claimed/U
+claimer/KMACE
+Claire/M
+Clair/M
+Clairol/M
+clairvoyance/MS
+clairvoyant/YS
+clambake/MS
+clamberer/M
+clamber/SDRZG
+clammed
+clammily
+clamminess/MS
+clamming
+clam/MS
+clammy/TPR
+clamorer/M
+clamor/GDRMSZ
+clamorousness/UM
+clamorous/PUY
+clampdown/SM
+clamper/M
+clamp/MRDGS
+clamshell/MS
+Clancy/M
+clandestineness/M
+clandestine/YP
+clanger/M
+clangor/MDSG
+clangorous/Y
+clang/SGZRD
+clanking/Y
+clank/SGDM
+clan/MS
+clannishness/SM
+clannish/PY
+clansman/M
+clansmen
+clapboard/SDGM
+Clapeyron/M
+clapped
+clapper/GMDS
+clapping
+clap/S
+Clapton/M
+claptrap/SM
+claque/MS
+Clarabelle/M
+Clara/M
+Clarance/M
+Clare/M
+Claremont/M
+Clarence/M
+Clarendon/M
+Claresta/M
+Clareta/M
+claret/MDGS
+Claretta/M
+Clarette/M
+Clarey/M
+Claribel/M
+Clarice/M
+Clarie/M
+clarification/M
+clarifier/M
+clarify/NGXDRS
+Clari/M
+Clarinda/M
+Clarine/M
+clarinetist/SM
+clarinet/SM
+clarinettist's
+clarion/GSMD
+Clarissa/M
+Clarisse/M
+Clarita/M
+clarities
+clarity/UM
+Clarke/M
+Clark/M
+Clarridge/M
+Clary/M
+clasher/M
+clash/RSDG
+clasped/M
+clasper/M
+clasp's
+clasp/UGSD
+classer/M
+class/GRSDM
+classical/Y
+classicism/SM
+classicist/SM
+classic/S
+classics/M
+classifiable/U
+classification/AMC
+classificatory
+classified/S
+classifier/SM
+classify/CNXASDG
+classiness/SM
+classless/P
+classmate/MS
+classroom/MS
+classwork/M
+classy/PRT
+clatterer/M
+clattering/Y
+clatter/SGDR
+clattery
+Claudelle/M
+Claudell/M
+Claude/M
+Claudetta/M
+Claudette/M
+Claudia/M
+Claudian/M
+Claudianus/M
+Claudie/M
+Claudina/M
+Claudine/M
+Claudio/M
+Claudius/M
+clausal
+clause/MS
+Clausen/M
+Clausewitz/M
+Clausius/M
+Claus/NM
+claustrophobia/SM
+claustrophobic
+clave/RM
+clave's/F
+clavichord/SM
+clavicle/MS
+clavier/MS
+clawer/M
+claw/GDRMS
+Clayborne/M
+Clayborn/M
+Claybourne/M
+clayey
+clayier
+clayiest
+Clay/M
+clay/MDGS
+claymore/MS
+Clayson/M
+Clayton/M
+Clea/M
+cleanable
+cleaner/MS
+cleaning/SM
+cleanliness/UMS
+cleanly/PRTU
+cleanness/MSU
+cleanse
+cleanser/M
+cleans/GDRSZ
+cleanup/MS
+clean/UYRDPT
+clearance/MS
+clearcut
+clearer/M
+clearheadedness/M
+clearheaded/PY
+clearinghouse/S
+clearing/MS
+clearly
+clearness/MS
+clears
+clear/UTRD
+Clearwater/M
+clearway/M
+cleat/MDSG
+cleavage/MS
+cleaver/M
+cleave/RSDGZ
+Cleavland/M
+clef/SM
+cleft/MDGS
+clematis/MS
+clemence
+Clemenceau/M
+Clemence/M
+clemency/ISM
+Clemente/M
+Clementia/M
+Clementina/M
+Clementine/M
+Clementius/M
+clement/IY
+Clement/MS
+clements
+Clemmie/M
+Clemmy/M
+Clemons
+Clemson/M
+Clem/XM
+clenches
+clenching
+clench/UD
+Cleo/M
+Cleon/M
+Cleopatra/M
+Clerc/M
+clerestory/MS
+clergyman/M
+clergymen
+clergy/MS
+clergywoman
+clergywomen
+clericalism/SM
+clerical/YS
+cleric/SM
+Clerissa/M
+clerk/SGYDM
+clerkship/MS
+Cletis
+Cletus/M
+Cleveland/M
+Cleve/M
+cleverness/SM
+clever/RYPT
+Clevey/M
+Clevie/M
+clevis/SM
+clew/DMGS
+cl/GJ
+Cliburn/M
+clichéd
+cliché/SM
+clicker/M
+click/GZSRDM
+clientèle/SM
+client/SM
+cliffhanger/MS
+cliffhanging
+Cliff/M
+Clifford/M
+cliff/SM
+Clifton/M
+climacteric/SM
+climactic
+climate/MS
+climatic
+climatically
+climatological/Y
+climatologist/SM
+climatology/MS
+climax/MDSG
+climbable/U
+climb/BGZSJRD
+climbdown
+climbed/U
+climber/M
+clime/SM
+Clim/M
+clinch/DRSZG
+clincher/M
+clinching/Y
+Cline/M
+clinger/MS
+clinging
+cling/U
+clingy/TR
+clinical/Y
+clinician/MS
+clinic/MS
+clinker/GMD
+clink/RDGSZ
+clinometer/MIS
+Clint/M
+Clinton/M
+Clio/M
+cliometrician/S
+cliometric/S
+clipboard/SM
+clipped/U
+clipper/MS
+clipping/SM
+clip/SM
+clique/SDGM
+cliquey
+cliquier
+cliquiest
+cliquishness/SM
+cliquish/YP
+clitoral
+clitorides
+clitoris/MS
+Clive/M
+cloacae
+cloaca/M
+cloakroom/MS
+cloak's
+cloak/USDG
+clobber/DGS
+cloche/MS
+clocker/M
+clockmaker/M
+clock/SGZRDMJ
+clockwatcher
+clockwise
+clockwork/MS
+clodded
+clodding
+cloddishness/M
+cloddish/P
+clodhopper/SM
+clod/MS
+Cloe/M
+clogged/U
+clogging/U
+clog's
+clog/US
+cloisonné
+cloisonnes
+cloister/MDGS
+cloistral
+Clo/M
+clomp/MDSG
+clonal
+clone/DSRGMZ
+clonk/SGD
+clopped
+clopping
+clop/S
+Cloris/M
+closed/U
+close/EDSRG
+closefisted
+closely
+closemouthed
+closeness/MS
+closeout/MS
+closer/EM
+closers
+closest
+closet/MDSG
+closeup/S
+closing/S
+closured
+closure/EMS
+closure's/I
+closuring
+clothbound
+clothesbrush
+clotheshorse/MS
+clothesline/SDGM
+clothesman
+clothesmen
+clothespin/MS
+clothe/UDSG
+cloth/GJMSD
+clothier/MS
+clothing/M
+Clotho/M
+cloths
+Clotilda/M
+clot/MS
+clotted
+clotting
+cloture/MDSG
+cloudburst/MS
+clouded/U
+cloudiness/SM
+cloudlessness/M
+cloudless/YP
+cloudscape/SM
+cloud/SGMD
+cloudy/TPR
+clout/GSMD
+cloven
+cloverleaf/MS
+clover/M
+clove/SRMZ
+Clovis/M
+clown/DMSG
+clownishness/SM
+clownish/PY
+cloy/DSG
+cloying/Y
+clubbed/M
+clubbing/M
+clubfeet
+clubfoot/DM
+clubhouse/SM
+club/MS
+clubroom/SM
+cluck/GSDM
+clueless
+clue/MGDS
+Cluj/M
+clump/MDGS
+clumpy/RT
+clumsily
+clumsiness/MS
+clumsy/PRT
+clung
+clunk/SGZRDM
+clunky/PRYT
+clustered/AU
+clusters/A
+cluster/SGJMD
+clutch/DSG
+cluttered/U
+clutter/GSD
+Cl/VM
+Clyde/M
+Clydesdale/M
+Cly/M
+Clytemnestra/M
+Clyve/M
+Clywd/M
+cm
+Cm/M
+CMOS
+cnidarian/MS
+CNN
+CNS
+CO
+coacher/M
+coachman/M
+coachmen
+coach/MSRDG
+coachwork/M
+coadjutor/MS
+coagulable
+coagulant/SM
+coagulate/GNXSD
+coagulation/M
+coagulator/S
+coaler/M
+coalesce/GDS
+coalescence/SM
+coalescent
+coalface/SM
+coalfield/MS
+coalitionist/SM
+coalition/MS
+coal/MDRGS
+coalminers
+coarseness/SM
+coarsen/SGD
+coarse/TYRP
+coastal
+coaster/M
+coastguard/MS
+coastline/SM
+coast/SMRDGZ
+coated/U
+Coates/M
+coating/M
+coat/MDRGZJS
+coattail/S
+coattest
+coauthor/MDGS
+coaxer/M
+coax/GZDSR
+coaxial/Y
+coaxing/Y
+Cobain/M
+cobalt/MS
+cobbed
+Cobbie/M
+cobbing
+cobbler/M
+cobble/SRDGMZ
+cobblestone/MSD
+Cobb/M
+Cobby/M
+coble/M
+Cob/M
+COBOL
+Cobol/M
+cobra/MS
+cob/SM
+cobwebbed
+cobwebbing
+cobwebby/RT
+cobweb/SM
+cocaine/MS
+coca/MS
+cocci/MS
+coccus/M
+coccyges
+coccyx/M
+Cochabamba/M
+cochineal/SM
+Cochin/M
+Cochise/M
+cochleae
+cochlear
+cochlea/SM
+Cochran/M
+cockade/SM
+cockamamie
+cockatoo/SM
+cockatrice/MS
+cockcrow/MS
+cockerel/MS
+cocker/M
+cockeye/DM
+cockeyed/PY
+cockfighting/M
+cockfight/MJSG
+cock/GDRMS
+cockily
+cockiness/MS
+cocklebur/M
+cockle/SDGM
+cockleshell/SM
+Cockney
+cockney/MS
+cockpit/MS
+cockroach/SM
+cockscomb/SM
+cockshies
+cocksucker/S!
+cocksure
+cocktail/GDMS
+cocky/RPT
+cocoa/SM
+coco/MS
+coconut/SM
+cocoon/GDMS
+Cocteau/M
+COD
+coda/SM
+codded
+codding
+coddle/GSRD
+coddler/M
+codebook/S
+codebreak/R
+coded/UA
+Codee/M
+codeine/MS
+codename/D
+codependency/S
+codependent/S
+coder/CM
+code's
+co/DES
+codes/A
+code/SCZGJRD
+codetermine/S
+codeword/SM
+codex/M
+codfish/SM
+codger/MS
+codices/M
+codicil/SM
+Codie/M
+codification/M
+codifier/M
+codify/NZXGRSD
+Codi/M
+coding/M
+codling/M
+Cod/M
+cod/MDRSZGJ
+codpiece/MS
+Cody/M
+coedited
+coediting
+coeditor/MS
+coedits
+coed/SM
+coeducational
+coeducation/SM
+coefficient/SYM
+coelenterate/MS
+coequal/SY
+coercer/M
+coerce/SRDXVGNZ
+coercible/I
+coercion/M
+coerciveness/M
+coercive/PY
+coeval/YS
+coexistence/MS
+coexistent
+coexist/GDS
+coextensive/Y
+cofactor/MS
+coffeecake/SM
+coffeecup
+coffeehouse/SM
+coffeemaker/S
+coffeepot/MS
+coffee/SM
+cofferdam/SM
+coffer/DMSG
+Coffey/M
+coffin/DMGS
+Coffman/M
+cogency/MS
+cogent/Y
+cogged
+cogging
+cogitate/DSXNGV
+cogitation/M
+cogitator/MS
+cog/MS
+Cognac/M
+cognac/SM
+cognate/SXYN
+cognation/M
+cognitional
+cognition/SAM
+cognitive/SY
+cognizable
+cognizance/MAI
+cognizances/A
+cognizant/I
+cognomen/SM
+cognoscente
+cognoscenti
+cogwheel/SM
+cohabitant/MS
+cohabitational
+cohabitation/SM
+cohabit/SDG
+Cohan/M
+coheir/MS
+Cohen/M
+cohere/GSRD
+coherence/SIM
+coherencies
+coherency/I
+coherent/IY
+coherer/M
+cohesion/MS
+cohesiveness/SM
+cohesive/PY
+Cohn/M
+cohoes
+coho/MS
+cohort/SM
+coiffed
+coiffing
+coiffure/MGSD
+coif/SM
+coil/UGSAD
+Coimbatore/M
+coinage's/A
+coinage/SM
+coincide/GSD
+coincidence/MS
+coincidental/Y
+coincident/Y
+coined/U
+coiner/M
+coin/GZSDRM
+coinsurance/SM
+Cointon/M
+cointreau
+coital/Y
+coitus/SM
+coke/MGDS
+Coke/MS
+COL
+COLA
+colander/SM
+Colan/M
+Colas
+cola/SM
+colatitude/MS
+Colbert/M
+Colby/M
+coldblooded
+coldish
+coldness/MS
+cold/YRPST
+Coleen/M
+Cole/M
+Coleman/M
+Colene/M
+Coleridge/M
+coleslaw/SM
+Colet/M
+Coletta/M
+Colette/M
+coleus/SM
+Colfax/M
+Colgate/M
+colicky
+colic/SM
+coliform
+Colin/M
+coliseum/SM
+colitis/MS
+collaborate/VGNXSD
+collaboration/M
+collaborative/SY
+collaborator/SM
+collage/MGSD
+collagen/M
+collapse/SDG
+collapsibility/M
+collapsible
+collarbone/MS
+collar/DMGS
+collard/SM
+collarless
+collated/U
+collateral/SYM
+collate/SDVNGX
+collation/M
+collator/MS
+colleague/SDGM
+collectedness/M
+collected/PY
+collectible/S
+collection/AMS
+collective/SY
+collectivism/SM
+collectivist/MS
+collectivity/MS
+collectivization/MS
+collectivize/DSG
+collector/MS
+collect/SAGD
+Colleen/M
+colleen/SM
+college/SM
+collegiality/S
+collegian/SM
+collegiate/Y
+Collen/M
+Collete/M
+Collette/M
+coll/G
+collide/SDG
+Collie/M
+collie/MZSRD
+collier/M
+Collier/M
+colliery/MS
+collimate/C
+collimated/U
+collimates
+collimating
+collimation/M
+collimator/M
+collinear
+collinearity/M
+Colline/M
+Collin/MS
+collisional
+collision/SM
+collocate/XSDGN
+collocation/M
+colloidal/Y
+colloid/MS
+colloq
+colloquialism/MS
+colloquial/SY
+colloquies
+colloquium/SM
+colloquy/M
+collude/SDG
+collusion/SM
+collusive
+collying
+Colly/RM
+Colman/M
+Col/MY
+Cologne/M
+cologne/MSD
+Colo/M
+Colombia/M
+Colombian/S
+Colombo/M
+colonelcy/MS
+colonel/MS
+colonialism/MS
+colonialist/MS
+colonial/SPY
+colonist/SM
+colonization/ACSM
+colonize/ACSDG
+colonized/U
+colonizer/MS
+colonizes/U
+Colon/M
+colonnade/MSD
+colon/SM
+colony/SM
+colophon/SM
+Coloradan/S
+Coloradoan/S
+Colorado/M
+colorant/SM
+coloration/EMS
+coloratura/SM
+colorblindness/S
+colorblind/P
+colored/USE
+colorer/M
+colorfastness/SM
+colorfast/P
+colorfulness/MS
+colorful/PY
+colorimeter/SM
+colorimetry
+coloring/M
+colorization/S
+colorize/GSD
+colorizing/C
+colorlessness/SM
+colorless/PY
+colors/EA
+color/SRDMGZJ
+colossal/Y
+Colosseum/M
+colossi
+colossus/M
+colostomy/SM
+colostrum/SM
+col/SD
+colter/M
+coltishness/M
+coltish/PY
+Colt/M
+colt/MRS
+Coltrane/M
+Columbia/M
+Columbian
+Columbine/M
+columbine/SM
+Columbus/M
+columnar
+columnist/MS
+columnize/GSD
+column/SDM
+Colver/M
+Co/M
+comae
+comaker/SM
+Comanche/MS
+coma/SM
+comatose
+combatant/SM
+combativeness/MS
+combative/PY
+combat/SVGMD
+combed/U
+comber/M
+combinational/A
+combination/ASM
+combinatorial/Y
+combinatoric/S
+combinator/SM
+combined/AU
+combiner/M
+combines/A
+combine/ZGBRSD
+combining/A
+combo/MS
+comb/SGZDRMJ
+Combs/M
+combusted
+combustibility/SM
+combustible/SI
+combustion/MS
+combustive
+Comdex/M
+Comdr/M
+comeback/SM
+comedian/SM
+comedic
+comedienne/SM
+comedown/MS
+comedy/SM
+come/IZSRGJ
+comeliness/SM
+comely/TPR
+comer/IM
+comes/M
+comestible/MS
+cometary
+cometh
+comet/SM
+comeuppance/SM
+comfit's
+comfit/SE
+comfortability/S
+comfortableness/MS
+comfortable/U
+comfortably/U
+comforted/U
+comforter/MS
+comfort/ESMDG
+comforting/YE
+comfy/RT
+comicality/MS
+comical/Y
+comic/MS
+Cominform/M
+comity/SM
+com/LJRTZG
+comm
+Com/M
+comma/MS
+commandant/MS
+commandeer/SDG
+commander/M
+commanding/Y
+commandment/SM
+commando/SM
+command/SZRDMGL
+commemorate/SDVNGX
+commemoration/M
+commemorative/YS
+commemorator/S
+commence/ALDSG
+commencement/AMS
+commencer/M
+commendably
+commendation/ASM
+commendatory/A
+commender/AM
+commend/GSADRB
+commensurable/I
+commensurate/IY
+commensurates
+commensuration/SM
+commentary/MS
+commentate/GSD
+commentator/SM
+commenter/M
+comment's
+comment/SUGD
+commerce/MGSD
+commercialism/MS
+commercialization/SM
+commercialize/GSD
+commercial/PYS
+Commie
+commie/SM
+commingle/GSD
+commiserate/VGNXSD
+commiseration/M
+commissariat/MS
+commissar/MS
+commissary/MS
+commission/ASCGD
+commissioner/SM
+commission's/A
+commitment/SM
+commit/SA
+committable
+committal/MA
+committals
+committed/UA
+committeeman/M
+committeemen
+committee/MS
+committeewoman/M
+committeewomen
+committing/A
+commode/MS
+commodes/IE
+commodiousness/MI
+commodious/YIP
+commodity/MS
+commodore/SM
+commonality/MS
+commonalty/MS
+commoner/MS
+commonness/MSU
+commonplaceness/M
+commonplace/SP
+common/RYUPT
+commonsense
+commons/M
+Commons/M
+commonweal/SHM
+commonwealth/M
+Commonwealth/M
+commonwealths
+Commonwealths
+commotion/MS
+communality/M
+communal/Y
+commune/XSDNG
+communicability/MS
+communicable/IU
+communicably
+communicant/MS
+communicate/VNGXSD
+communicational
+communication/M
+communicativeness/M
+communicative/PY
+communicator/SM
+communion/M
+Communion/SM
+communique/S
+communism/MS
+Communism/S
+communistic
+communist/MS
+Communist/S
+communitarian/M
+community/MS
+communize/SDG
+commutable/I
+commutate/XVGNSD
+commutation/M
+commutative/Y
+commutativity
+commutator/MS
+commute/BZGRSD
+commuter/M
+Comoros
+compaction/M
+compactness/MS
+compactor/MS
+compact/TZGSPRDY
+companionableness/M
+companionable/P
+companionably
+companion/GBSMD
+companionship/MS
+companionway/MS
+company/MSDG
+Compaq/M
+comparabilities
+comparability/IM
+comparableness/M
+comparable/P
+comparably/I
+comparativeness/M
+comparative/PYS
+comparator/SM
+compare/GRSDB
+comparer/M
+comparison/MS
+compartmental
+compartmentalization/SM
+compartmentalize/DSG
+compartment/SDMG
+compassionateness/M
+compassionate/PSDGY
+compassion/MS
+compass/MSDG
+compatibility/IMS
+compatibleness/M
+compatible/SI
+compatibly/I
+compatriot/SM
+compeer/DSGM
+compellable
+compelled
+compelling/YM
+compel/S
+compendious
+compendium/MS
+compensable
+compensated/U
+compensate/XVNGSD
+compensation/M
+compensator/M
+compensatory
+compete/GSD
+competence/ISM
+competency/IS
+competency's
+competent/IY
+competition/SM
+competitiveness/SM
+competitive/YP
+competitor/MS
+comp/GSYD
+compilable/U
+compilation/SAM
+compile/ASDCG
+compiler/CS
+compiler's
+complacence/S
+complacency/SM
+complacent/Y
+complainant/MS
+complainer/M
+complain/GZRDS
+complaining/YU
+complaint/MS
+complaisance/SM
+complaisant/Y
+complected
+complementariness/M
+complementarity
+complementary/SP
+complementation/M
+complementer/M
+complement/ZSMRDG
+complete/BTYVNGPRSDX
+completed/U
+completely/I
+completeness/ISM
+completer/M
+completion/MI
+complexional
+complexion/DMS
+complexity/MS
+complexness/M
+complex/TGPRSDY
+compliance/SM
+compliant/Y
+complicatedness/M
+complicated/YP
+complicate/SDG
+complication/M
+complicator/SM
+complicit
+complicity/MS
+complier/M
+complimentary/U
+complimenter/M
+compliment/ZSMRDG
+comply/ZXRSDNG
+component/SM
+comport/GLSD
+comportment/SM
+compose/CGASDE
+composedness/M
+composed/PY
+composer/CM
+composers
+composite/YSDXNG
+compositional/Y
+composition/CMA
+compositions/C
+compositor/MS
+compost/DMGS
+composure/ESM
+compote/MS
+compounded/U
+compounder/M
+compound/RDMBGS
+comprehend/DGS
+comprehending/U
+comprehensibility/SIM
+comprehensibleness/IM
+comprehensible/PI
+comprehensibly/I
+comprehension/IMS
+comprehensiveness/SM
+comprehensive/YPS
+compressed/Y
+compressibility/IM
+compressible/I
+compressional
+compression/CSM
+compressive/Y
+compressor/MS
+compress/SDUGC
+comprise/GSD
+compromiser/M
+compromise/SRDGMZ
+compromising/UY
+Compton/M
+comptroller/SM
+compulsion/SM
+compulsiveness/MS
+compulsive/PYS
+compulsivity
+compulsorily
+compulsory/S
+compunction/MS
+Compuserve/M
+CompuServe/M
+computability/M
+computable/UI
+computably
+computational/Y
+computation/SM
+computed/A
+computerese
+computerization/MS
+computerize/SDG
+computer/M
+compute/RSDZBG
+computes/A
+computing/A
+comradely/P
+comradeship/MS
+comrade/YMS
+Comte/M
+Conakry/M
+Conan/M
+Conant/M
+concatenate/XSDG
+concaveness/MS
+concave/YP
+conceal/BSZGRDL
+concealed/U
+concealer/M
+concealing/Y
+concealment/MS
+conceded/Y
+conceitedness/SM
+conceited/YP
+conceit/SGDM
+conceivable/IU
+conceivably/I
+conceive/BGRSD
+conceiver/M
+concentrate/VNGSDX
+concentration/M
+concentrator/MS
+concentrically
+Concepción/M
+conceptional
+conception/MS
+concept/SVM
+conceptuality/M
+conceptualization/A
+conceptualizations
+conceptualization's
+conceptualize/DRSG
+conceptualizing/A
+conceptual/Y
+concerned/YU
+concern/USGD
+concerted/PY
+concert/EDSG
+concertina/MDGS
+concertize/GDS
+concertmaster/MS
+concerto/SM
+concert's
+concessionaire/SM
+concessional
+concessionary
+concession/R
+Concetta/M
+Concettina/M
+Conchita/M
+conch/MDG
+conchs
+concierge/SM
+conciliar
+conciliate/GNVX
+conciliation/ASM
+conciliator/MS
+conciliatory/A
+conciseness/SM
+concise/TYRNPX
+concision/M
+conclave/S
+concluder/M
+conclude/RSDG
+conclusion/SM
+conclusive/IPY
+conclusiveness/ISM
+concocter/M
+concoction/SM
+concoct/RDVGS
+concomitant/YS
+concordance/MS
+concordant/Y
+concordat/SM
+Concorde/M
+Concordia/M
+Concord/MS
+concourse
+concreteness/MS
+concrete/NGXRSDPYM
+concretion/M
+concubinage/SM
+concubine/SM
+concupiscence/SM
+concupiscent
+concurrence/MS
+concur/S
+concussion/MS
+concuss/VD
+condemnate/XN
+condemnation/M
+condemnatory
+condemner/M
+condemn/ZSGRDB
+condensate/NMXS
+condensation/M
+condenser/M
+condense/ZGSD
+condensible
+condescend
+condescending/Y
+condescension/MS
+condign
+condiment/SM
+condition/AGSJD
+conditionals
+conditional/UY
+conditioned/U
+conditioner/MS
+conditioning/M
+condition's
+condole
+condolence/MS
+condominium/MS
+condom/SM
+condone/GRSD
+condoner/M
+Condorcet/M
+condor/MS
+condo/SM
+conduce/VGSD
+conduciveness/M
+conducive/P
+conductance/SM
+conductibility/SM
+conductible
+conduction/MS
+conductive/Y
+conductivity/MS
+conductor/MS
+conductress/MS
+conduct/V
+conduit/MS
+coneflower/M
+Conestoga
+coney's
+confabbed
+confabbing
+confab/MS
+confabulate/XSDGN
+confabulation/M
+confectioner/M
+confectionery/SM
+confectionist
+confection/RDMGZS
+confect/S
+Confederacy/M
+confederacy/MS
+confederate/M
+Confederate/S
+conferee/MS
+conference/DSGM
+conferrable
+conferral/SM
+conferred
+conferrer/SM
+conferring
+confer/SB
+confessed/Y
+confessional/SY
+confession/MS
+confessor/SM
+confetti/M
+confidante/SM
+confidant/SM
+confidence/SM
+confidentiality/MS
+confidentialness/M
+confidential/PY
+confident/Y
+confider/M
+confide/ZGRSD
+confiding/PY
+configuration/ASM
+configure/AGSDB
+confined/U
+confine/L
+confinement/MS
+confiner/M
+confirm/AGDS
+confirmation/ASM
+confirmatory
+confirmedness/M
+confirmed/YP
+confiscate/DSGNX
+confiscation/M
+confiscator/MS
+confiscatory
+conflagration/MS
+conflate/NGSDX
+conflation/M
+conflicting/Y
+conflict/SVGDM
+confluence/MS
+conformable/U
+conformal
+conformance/SM
+conformational/Y
+conform/B
+conformer/M
+conformism/SM
+conformist/SM
+conformities
+conformity/MUI
+confounded/Y
+confound/R
+confrère/MS
+confrontational
+confrontation/SM
+confronter/M
+confront/Z
+Confucianism/SM
+Confucian/S
+Confucius/M
+confusedness/M
+confused/PY
+confuse/RBZ
+confusing/Y
+confutation/MS
+confute/GRSD
+confuter/M
+conga/MDG
+congeal/GSDL
+congealment/MS
+congeniality/UM
+congenial/U
+congeries/M
+conger/SM
+congestion/MS
+congest/VGSD
+conglomerate/XDSNGVM
+conglomeration/M
+Cong/M
+Congolese
+Congo/M
+congrats
+congratulate/NGXSD
+congratulation/M
+congratulatory
+congregate/DSXGN
+congregational
+Congregational
+congregationalism/MS
+congregationalist/MS
+Congregationalist/S
+congregation/M
+congressional/Y
+congressman/M
+congressmen
+Congress/MS
+congress/MSDG
+congresspeople
+congressperson/S
+congresswoman/M
+congresswomen
+Congreve/M
+congruence/IM
+congruences
+congruency/M
+congruential
+congruent/YI
+congruity/MSI
+congruousness/IM
+congruous/YIP
+conicalness/M
+conical/PSY
+conic/S
+conics/M
+conifer/MS
+coniferous
+conjectural/Y
+conjecture/GMDRS
+conjecturer/M
+conjoint
+conjugacy
+conjugal/Y
+conjugate/XVNGYSDP
+conjugation/M
+conjunct/DSV
+conjunctiva/MS
+conjunctive/YS
+conjunctivitis/SM
+conjuration/MS
+conjurer/M
+conjure/RSDZG
+conjuring/M
+conker/M
+conk/ZDR
+Conley/M
+Con/M
+conman
+connect/ADGES
+connectedly/E
+connectedness/ME
+connected/U
+connectible
+Connecticut/M
+connection/AME
+connectionless
+connections/E
+connective/SYM
+connectivity/MS
+connector/MS
+Connelly/M
+Conner/M
+Connery/M
+connexion/MS
+Conney/M
+conn/GVDR
+Connie/M
+Conni/M
+conniption/MS
+connivance/MS
+conniver/M
+connive/ZGRSD
+connoisseur/MS
+Connor/SM
+connotative/Y
+Conn/RM
+connubial/Y
+Conny/M
+conquerable/U
+conquered/AU
+conqueror/MS
+conquer/RDSBZG
+conquers/A
+conquest/ASM
+conquistador/MS
+Conrade/M
+Conrad/M
+Conrado/M
+Conrail/M
+Conroy/M
+Consalve/M
+consanguineous/Y
+consanguinity/SM
+conscienceless
+conscientiousness/MS
+conscientious/YP
+conscionable/U
+consciousness/MUS
+conscious/UYSP
+conscription/SM
+consecrated/AU
+consecrates/A
+consecrate/XDSNGV
+consecrating/A
+consecration/AMS
+consecutiveness/M
+consecutive/YP
+consensus/SM
+consenter/M
+consenting/Y
+consent/SZGRD
+consequence
+consequentiality/S
+consequential/IY
+consequentialness/M
+consequently/I
+consequent/PSY
+conservancy/SM
+conservationism
+conservationist/SM
+conservation/SM
+conservatism/SM
+conservativeness/M
+Conservative/S
+conservative/SYP
+conservator/MS
+conservatory/MS
+con/SGM
+considerable/I
+considerables
+considerably/I
+considerateness/MSI
+considerate/XIPNY
+consideration/ASMI
+considered/U
+considerer/M
+consider/GASD
+considering/S
+consign/ASGD
+consignee/SM
+consignment/SM
+consist/DSG
+consistence/S
+consistency/IMS
+consistent/IY
+consistory/MS
+consolable/I
+Consolata/M
+consolation/MS
+consolation's/E
+consolatory
+consoled/U
+consoler/M
+console/ZBG
+consolidated/AU
+consolidate/NGDSX
+consolidates/A
+consolidation/M
+consolidator/SM
+consoling/Y
+consommé/S
+consonance/IM
+consonances
+consonantal
+consonant/MYS
+consortia
+consortium/M
+conspectus/MS
+conspicuousness/IMS
+conspicuous/YIP
+conspiracy/MS
+conspiratorial/Y
+conspirator/SM
+constable
+Constable/M
+constabulary/MS
+constance
+Constance/M
+Constancia/M
+constancy/IMS
+Constancy/M
+Constanta/M
+Constantia/M
+Constantina/M
+Constantine/M
+Constantin/M
+Constantino/M
+Constantinople/M
+constant/IY
+constants
+constellation/SM
+consternate/XNGSD
+consternation/M
+constipate/XDSNG
+constipation/M
+constituency/MS
+constituent/SYM
+constituted/A
+constitute/NGVXDS
+constitutes/A
+constituting/A
+Constitution
+constitutionality's
+constitutionality/US
+constitutionally/U
+constitutional/SY
+constitution/AMS
+constitutive/Y
+constrain
+constrainedly
+constrained/U
+constraint/MS
+constriction/MS
+constrictor/MS
+constrict/SDGV
+construable
+construct/ASDGV
+constructibility
+constructible/A
+constructional/Y
+constructionist/MS
+construction/MAS
+constructions/C
+constructiveness/SM
+constructive/YP
+constructor/MS
+construe/GSD
+Consuela/M
+Consuelo/M
+consular/S
+consulate/MS
+consul/KMS
+consulship/MS
+consultancy/S
+consultant/MS
+consultation/SM
+consultative
+consulted/A
+consulter/M
+consult/RDVGS
+consumable/S
+consumed/Y
+consume/JZGSDB
+consumerism/MS
+consumerist/S
+consumer/M
+consuming/Y
+consummate/DSGVY
+consummated/U
+consumption/SM
+consumptive/YS
+cont
+contact/BGD
+contacted/A
+contact's/A
+contacts/A
+contagion/SM
+contagiousness/MS
+contagious/YP
+containerization/SM
+containerize/GSD
+container/M
+containment/SM
+contain/SLZGBRD
+contaminant/SM
+contaminated/AU
+contaminates/A
+contaminate/SDCXNG
+contaminating/A
+contamination/CM
+contaminative
+contaminator/MS
+contd
+cont'd
+contemn/SGD
+contemplate/DVNGX
+contemplation/M
+contemplativeness/M
+contemplative/PSY
+contemporaneity/MS
+contemporaneousness/M
+contemporaneous/PY
+contemptibleness/M
+contemptible/P
+contemptibly
+contempt/M
+contemptuousness/SM
+contemptuous/PY
+contentedly/E
+contentedness/SM
+contented/YP
+content/EMDLSG
+contention/MS
+contentiousness/SM
+contentious/PY
+contently
+contentment/ES
+contentment's
+conterminous/Y
+contestable/I
+contestant/SM
+contested/U
+contextualize/GDS
+contiguity/MS
+contiguousness/M
+contiguous/YP
+continence/ISM
+Continental/S
+continental/SY
+continent/IY
+Continent/M
+continents
+continent's
+contingency/SM
+contingent/SMY
+continua
+continuable
+continual/Y
+continuance/ESM
+continuant/M
+continuation/ESM
+continue/ESDG
+continuer/M
+continuity/SEM
+continuousness/M
+continuous/YE
+continuum/M
+contortionist/SM
+contortion/MS
+contort/VGD
+contour
+contraband/SM
+contrabass/M
+contraception/SM
+contraceptive/S
+contract/DG
+contractible
+contractile
+contractual/Y
+contradict/GDS
+contradiction/MS
+contradictorily
+contradictoriness/M
+contradictory/PS
+contradistinction/MS
+contraflow/S
+contrail/M
+contraindicate/SDVNGX
+contraindication/M
+contralto/SM
+contrapositive/S
+contraption/MS
+contrapuntal/Y
+contrariety/MS
+contrarily
+contrariness/MS
+contrariwise
+contrary/PS
+contra/S
+contrasting/Y
+contrastive/Y
+contrast/SRDVGZ
+contravene/GSRD
+contravener/M
+contravention/MS
+Contreras/M
+contretemps/M
+contribute/XVNZRD
+contribution/M
+contributive/Y
+contributorily
+contributor/SM
+contributory/S
+contriteness/M
+contrite/NXP
+contrition/M
+contrivance/SM
+contriver/M
+contrive/ZGRSD
+control/CS
+controllability/M
+controllable/IU
+controllably/U
+controlled/CU
+controller/SM
+controlling/C
+control's
+controversialists
+controversial/UY
+controversy/MS
+controvert/DGS
+controvertible/I
+contumacious/Y
+contumacy/MS
+contumelious
+contumely/MS
+contuse/NGXSD
+contusion/M
+conundrum/SM
+conurbation/MS
+convalesce/GDS
+convalescence/SM
+convalescent/S
+convect/DSVG
+convectional
+convection/MS
+convector
+convene/ASDG
+convener/MS
+convenience/ISM
+convenient/IY
+conventicle/SM
+conventionalism/M
+conventionalist/M
+conventionality/SUM
+conventionalize/GDS
+conventional/UY
+convention/MA
+conventions
+convergence/MS
+convergent
+conversant/Y
+conversationalist/SM
+conversational/Y
+conversation/SM
+conversazione/M
+converse/Y
+conversion/AM
+conversioning
+converted/U
+converter/MS
+convert/GADS
+convertibility's/I
+convertibility/SM
+convertibleness/M
+convertible/PS
+convexity/MS
+convex/Y
+conveyance/DRSGMZ
+conveyancer/M
+conveyancing/M
+convey/BDGS
+conveyor/MS
+conviction/MS
+convict/SVGD
+convinced/U
+convincer/M
+convince/RSDZG
+convincingness/M
+convincing/PUY
+conviviality/MS
+convivial/Y
+convoke/GSD
+convolute/XDNY
+convolution/M
+convolve/C
+convolved
+convolves
+convolving
+convoy/GMDS
+convulse/SDXVNG
+convulsion/M
+convulsiveness/M
+convulsive/YP
+Conway/M
+cony/SM
+coo/GSD
+cookbook/SM
+cooked/AU
+Cooke/M
+cooker/M
+cookery/MS
+cook/GZDRMJS
+Cookie/M
+cookie/SM
+cooking/M
+Cook/M
+cookout/SM
+cooks/A
+cookware/SM
+cooky's
+coolant/SM
+cooled/U
+cooler/M
+Cooley/M
+coolheaded
+Coolidge/M
+coolie/MS
+coolness/MS
+cool/YDRPJGZTS
+coon/MS!
+coonskin/MS
+cooperage/MS
+cooperate/VNGXSD
+cooperation/M
+cooperativeness/SM
+cooperative/PSY
+cooperator/MS
+cooper/GDM
+Cooper/M
+coop/MDRGZS
+Coop/MR
+coordinated/U
+coordinateness/M
+coordinate/XNGVYPDS
+coordination/M
+coordinator/MS
+Coors/M
+cootie/SM
+coot/MS
+copay/S
+Copeland/M
+Copenhagen/M
+coper/M
+Copernican
+Copernicus/M
+cope/S
+copied/A
+copier/M
+copies/A
+copilot/SM
+coping/M
+copiousness/SM
+copious/YP
+coplanar
+Copland/M
+Copley/M
+copolymer/MS
+copora
+copped
+Copperfield/M
+copperhead/MS
+copper/MSGD
+copperplate/MS
+coppersmith/M
+coppersmiths
+coppery
+coppice's
+copping
+Coppola/M
+copra/MS
+coprolite/M
+coprophagous
+copse/M
+cops/GDS
+cop/SJMDRG
+copter/SM
+Coptic/M
+copula/MS
+copulate/XDSNGV
+copulation/M
+copulative/S
+copybook/MS
+copycat/SM
+copycatted
+copycatting
+copyist/SM
+copy/MZBDSRG
+copyrighter/M
+copyright/MSRDGZ
+copywriter/MS
+coquetry/MS
+coquette/DSMG
+coquettish/Y
+Corabella/M
+Corabelle/M
+Corabel/M
+coracle/SM
+Coralie/M
+Coraline/M
+coralline
+Coral/M
+coral/SM
+Coralyn/M
+Cora/M
+corbel/GMDJS
+Corbet/M
+Corbett/M
+Corbie/M
+Corbin/M
+Corby/M
+cordage/MS
+corded/AE
+Cordelia/M
+Cordelie/M
+Cordell/M
+corder/AM
+Cordey/M
+cord/FSAEM
+cordiality/MS
+cordialness/M
+cordial/PYS
+Cordie/M
+cordillera/MS
+Cordilleras
+Cordi/M
+cording/MA
+cordite/MS
+cordless
+Cord/M
+Cordoba
+cordon/DMSG
+cordovan/SM
+Cordula/M
+corduroy/GDMS
+Cordy/M
+cored/A
+Coreen/M
+Corella/M
+core/MZGDRS
+Corenda/M
+Corene/M
+corer/M
+corespondent/MS
+Coretta/M
+Corette/M
+Corey/M
+Corfu/M
+corgi/MS
+coriander/SM
+Corie/M
+Corilla/M
+Cori/M
+Corina/M
+Corine/M
+coring/M
+Corinna/M
+Corinne/M
+Corinthian/S
+Corinthians/M
+Corinth/M
+Coriolanus/M
+Coriolis/M
+Corissa/M
+Coriss/M
+corked/U
+corker/M
+cork/GZDRMS
+Cork/M
+corkscrew/DMGS
+corks/U
+Corliss/M
+Corly/M
+Cormack/M
+corm/MS
+cormorant/MS
+Cornall/M
+cornball/SM
+cornbread/S
+corncob/SM
+corncrake/M
+corneal
+cornea/SM
+Corneille/M
+Cornela/M
+Cornelia/M
+Cornelius/M
+Cornelle/M
+Cornell/M
+corner/GDM
+cornerstone/MS
+cornet/SM
+Corney/M
+cornfield/SM
+cornflake/S
+cornflour/M
+cornflower/SM
+corn/GZDRMS
+cornice/GSDM
+Cornie/M
+cornily
+corniness/S
+Cornish/S
+cornmeal/S
+cornrow/GDS
+cornstalk/MS
+cornstarch/SM
+cornucopia/MS
+Cornwallis/M
+Cornwall/M
+Corny/M
+corny/RPT
+corolla/MS
+corollary/SM
+Coronado/M
+coronal/MS
+coronary/S
+corona/SM
+coronate/NX
+coronation/M
+coroner/MS
+coronet/DMS
+Corot/M
+coroutine/SM
+Corp
+corporal/SYM
+corpora/MS
+corporate/INVXS
+corporately
+corporation/MI
+corporatism/M
+corporatist
+corporeality/MS
+corporeal/IY
+corporealness/M
+corp/S
+corpse/M
+corpsman/M
+corpsmen
+corps/SM
+corpulence/MS
+corpulentness/S
+corpulent/YP
+corpuscle/SM
+corpuscular
+corpus/M
+corr
+corralled
+corralling
+corral/MS
+correctable/U
+correct/BPSDRYTGV
+corrected/U
+correctional
+correction/MS
+corrective/YPS
+correctly/I
+correctness/MSI
+corrector/MS
+Correggio/M
+correlated/U
+correlate/SDXVNG
+correlation/M
+correlative/YS
+Correna/M
+correspond/DSG
+correspondence/MS
+correspondent/SM
+corresponding/Y
+Correy/M
+Corrianne/M
+corridor/SM
+Corrie/M
+corrigenda
+corrigendum/M
+corrigible/I
+Corri/M
+Corrina/M
+Corrine/M
+Corrinne/M
+corroborated/U
+corroborate/GNVXDS
+corroboration/M
+corroborative/Y
+corroborator/MS
+corroboratory
+corrode/SDG
+corrodible
+corrosion/SM
+corrosiveness/M
+corrosive/YPS
+corrugate/NGXSD
+corrugation/M
+corrupt/DRYPTSGV
+corrupted/U
+corrupter/M
+corruptibility/SMI
+corruptible/I
+corruption/IM
+corruptions
+corruptive/Y
+corruptness/MS
+Corry/M
+corsage/MS
+corsair/SM
+corset/GMDS
+Corsica/M
+Corsican/S
+cortège/MS
+Cortes/S
+cortex/M
+Cortez's
+cortical/Y
+cortices
+corticosteroid/SM
+Cortie/M
+cortisone/SM
+Cortland/M
+Cort/M
+Cortney/M
+Corty/M
+corundum/MS
+coruscate/XSDGN
+coruscation/M
+Corvallis/M
+corvette/MS
+Corvus/M
+Cory/M
+Cos
+Cosby/M
+Cosetta/M
+Cosette/M
+cos/GDS
+cosignatory/MS
+cosign/SRDZG
+cosily
+Cosimo/M
+cosine/MS
+cosiness/MS
+Cosme/M
+cosmetically
+cosmetician/MS
+cosmetic/SM
+cosmetologist/MS
+cosmetology/MS
+cosmic
+cosmical/Y
+cosmogonist/MS
+cosmogony/SM
+cosmological/Y
+cosmologist/MS
+cosmology/SM
+Cosmo/M
+cosmonaut/MS
+cosmopolitanism/MS
+cosmopolitan/SM
+cosmos/SM
+cosponsor/DSG
+cossack/S
+Cossack/SM
+cosset/GDS
+Costa/M
+Costanza/M
+costarred
+costarring
+costar/S
+Costello/M
+costiveness/M
+costive/PY
+costless
+costliness/SM
+costly/RTP
+cost/MYGVJS
+Costner/M
+costumer/M
+costume/ZMGSRD
+cotangent/SM
+Cote/M
+cote/MS
+coterie/MS
+coterminous/Y
+cotillion/SM
+Cotonou/M
+Cotopaxi/M
+cot/SGMD
+cottager/M
+cottage/ZMGSRD
+cottar's
+cotted
+cotter/SDM
+cotton/GSDM
+Cotton/M
+cottonmouth/M
+cottonmouths
+cottonseed/MS
+cottontail/SM
+cottonwood/SM
+cottony
+cotyledon/MS
+couching/M
+couch/MSDG
+cougar/MS
+cougher/M
+cough/RDG
+coughs
+couldn't
+could/T
+could've
+coulée/MS
+Coulomb/M
+coulomb/SM
+councilman/M
+councilmen
+councilor/MS
+councilperson/S
+council/SM
+councilwoman/M
+councilwomen
+counsel/GSDM
+counsellings
+counselor/MS
+countability/E
+countable/U
+countably/U
+countdown/SM
+counted/U
+count/EGARDS
+countenance/EGDS
+countenancer/M
+countenance's
+counteract/DSVG
+counteraction/SM
+counterargument/SM
+counterattack/DRMGS
+counterbalance/MSDG
+counterclaim/GSDM
+counterclockwise
+counterculture/MS
+countercyclical
+counterespionage/MS
+counterexample/S
+counterfeiter/M
+counterfeit/ZSGRD
+counterflow
+counterfoil/MS
+counterforce/M
+counter/GSMD
+counterinsurgency/MS
+counterintelligence/MS
+counterintuitive
+countermand/DSG
+counterman/M
+countermeasure/SM
+countermen
+counteroffensive/SM
+counteroffer/SM
+counterpane/SM
+counterpart/SM
+counterpoint/GSDM
+counterpoise/GMSD
+counterproductive
+counterproposal/M
+counterrevolutionary/MS
+counterrevolution/MS
+counter's/E
+counters/E
+countersignature/MS
+countersign/SDG
+countersink/SG
+counterspy/MS
+counterstrike
+countersunk
+countertenor/SM
+countervail/DSG
+counterweight/GMDS
+countess/MS
+countless/Y
+countrify/D
+countryman/M
+countrymen
+country/MS
+countryside/MS
+countrywide
+countrywoman/M
+countrywomen
+county/SM
+coup/ASDG
+coupe/MS
+Couperin/M
+couple/ACU
+coupled/CU
+coupler/C
+couplers
+coupler's
+couple's
+couples/CU
+couplet/SM
+coupling's/C
+coupling/SM
+coupon/SM
+coup's
+courage/MS
+courageously
+courageousness/MS
+courageous/U
+courages/E
+Courbet/M
+courgette/MS
+courier/GMDS
+course/EGSRDM
+courser's/E
+courser/SM
+course's/AF
+courses/FA
+coursework
+coursing/M
+Courtenay/M
+courteousness/EM
+courteousnesses
+courteous/PEY
+courtesan/MS
+courtesied
+courtesy/ESM
+courtesying
+court/GZMYRDS
+courthouse/MS
+courtier/SM
+courtliness/MS
+courtly/RTP
+Court/M
+Courtnay/M
+Courtney/M
+courtroom/MS
+courtship/SM
+courtyard/SM
+couscous/MS
+cousinly/U
+cousin/YMS
+Cousteau/M
+couture/SM
+couturier/SM
+covalent/Y
+covariance/SM
+covariant/S
+covariate/SN
+covary
+cove/DRSMZG
+covenanted/U
+covenanter/M
+covenant/SGRDM
+coven/SM
+Covent/M
+Coventry/MS
+coverable/E
+cover/AEGUDS
+coverage/MS
+coverall/DMS
+coverer/AME
+covering/MS
+coverlet/MS
+coversheet
+covers/M
+covertness/SM
+covert/YPS
+coveter/M
+coveting/Y
+covetousness/SM
+covetous/PY
+covet/SGRD
+covey/SM
+covington
+cowardice/MS
+cowardliness/MS
+cowardly/P
+Coward/M
+coward/MYS
+cowbell/MS
+cowbird/MS
+cowboy/MS
+cowcatcher/SM
+cowed/Y
+cowering/Y
+cower/RDGZ
+cowgirl/MS
+cowhand/S
+cowherd/SM
+cowhide/MGSD
+Cowley/M
+cowlick/MS
+cowling/M
+cowl/SGMD
+cowman/M
+cow/MDRSZG
+cowmen
+coworker/MS
+Cowper/M
+cowpoke/MS
+cowpony
+cowpox/MS
+cowpuncher/M
+cowpunch/RZ
+cowrie/SM
+cowshed/SM
+cowslip/MS
+coxcomb/MS
+Cox/M
+cox/MDSG
+coxswain/GSMD
+coy/CDSG
+coyer
+coyest
+coyly
+Coy/M
+coyness/MS
+coyote/SM
+coypu/SM
+cozenage/MS
+cozen/SGD
+cozily
+coziness/MS
+Cozmo/M
+Cozumel/M
+cozy/DSRTPG
+CPA
+cpd
+CPI
+cpl
+Cpl
+CPO
+CPR
+cps
+CPU/SM
+crabapple
+crabbedness/M
+crabbed/YP
+Crabbe/M
+crabber/MS
+crabbily
+crabbiness/S
+crabbing/M
+crabby/PRT
+crabgrass/S
+crablike
+crab/MS
+crackable/U
+crackdown/MS
+crackerjack/S
+cracker/M
+crackle/GJDS
+crackling/M
+crackly/RT
+crackpot/SM
+crackup/S
+crack/ZSBYRDG
+cradler/M
+cradle/SRDGM
+cradling/M
+craftily
+craftiness/SM
+Craft/M
+craft/MRDSG
+craftsman/M
+craftsmanship/SM
+craftsmen
+craftspeople
+craftspersons
+craftswoman
+craftswomen
+crafty/TRP
+Craggie/M
+cragginess/SM
+Craggy/M
+craggy/RTP
+crag/SM
+Craig/M
+Cramer/M
+crammed
+crammer/M
+cramming
+cramper/M
+cramp/MRDGS
+crampon/SM
+cram/S
+Cranach/M
+cranberry/SM
+Crandall/M
+crane/DSGM
+cranelike
+Crane/M
+Cranford/M
+cranial
+cranium/MS
+crankcase/MS
+crankily
+crankiness/MS
+crank/SGTRDM
+crankshaft/MS
+cranky/TRP
+Cranmer/M
+cranny/DSGM
+Cranston/M
+crape/SM
+crapped
+crappie/M
+crapping
+crappy/RST
+crapshooter/SM
+crap/SMDG!
+crasher/M
+crashing/Y
+crash/SRDGZ
+crassness/MS
+crass/TYRP
+crate/DSRGMZ
+crater/DMG
+Crater/M
+cravat/SM
+cravatted
+cravatting
+crave/DSRGJ
+cravenness/SM
+craven/SPYDG
+craver/M
+craving/M
+crawdad/S
+crawfish's
+Crawford/M
+crawler/M
+crawl/RDSGZ
+crawlspace/S
+crawlway
+crawly/TRS
+craw/SYM
+crayfish/GSDM
+Crayola/M
+crayon/GSDM
+Cray/SM
+craze/GMDS
+crazily
+craziness/MS
+crazy/SRTP
+creakily
+creakiness/SM
+creak/SDG
+creaky/PTR
+creamer/M
+creamery/MS
+creamily
+creaminess/SM
+cream/SMRDGZ
+creamy/TRP
+creased/CU
+crease/IDRSG
+crease's
+creases/C
+creasing/C
+created/U
+create/XKVNGADS
+creationism/MS
+creationist/MS
+Creation/M
+creation/MAK
+creativeness/SM
+creative/YP
+creativities
+creativity/K
+creativity's
+Creator/M
+creator/MS
+creatureliness/M
+creaturely/P
+creature/YMS
+crèche/SM
+credence/MS
+credent
+credential/SGMD
+credenza/SM
+credibility/IMS
+credible/I
+credibly/I
+creditability/M
+creditableness/M
+creditable/P
+creditably/E
+credited/U
+credit/EGBSD
+creditor/MS
+credit's
+creditworthiness
+credo/SM
+credulity/ISM
+credulous/IY
+credulousness/SM
+creedal
+creed/C
+creeds
+creed's
+creekside
+creek/SM
+Creek/SM
+creel/SMDG
+Cree/MDS
+creeper/M
+creepily
+creepiness/SM
+creep/SGZR
+creepy/PRST
+Creigh/M
+Creight/M
+Creighton/M
+cremate/XDSNG
+cremation/M
+crematoria
+crematorium/MS
+crematory/S
+creme/S
+crenelate/XGNSD
+crenelation/M
+Creole/MS
+creole/SM
+Creon/M
+creosote/MGDS
+crepe/DSGM
+crept
+crescendoed
+crescendoing
+crescendo/SCM
+crescent/MS
+cress/S
+crestfallenness/M
+crestfallen/PY
+cresting/M
+crestless
+crest/SGMD
+Crestview/M
+cretaceous
+Cretaceously/M
+Cretaceous/Y
+Cretan/S
+Crete/M
+cretinism/MS
+cretin/MS
+cretinous
+cretonne/SM
+crevasse/DSMG
+crevice/SM
+crew/DMGS
+crewel/SM
+crewelwork/SM
+crewman/M
+crewmen
+cribbage/SM
+cribbed
+cribber/SM
+cribbing/M
+crib/SM
+Crichton/M
+cricketer/M
+cricket/SMZRDG
+crick/GDSM
+Crick/M
+cried/C
+crier/CM
+cries/C
+Crimea/M
+Crimean
+crime/GMDS
+criminality/MS
+criminalization/C
+criminalize/GC
+criminal/SYM
+criminologist/SM
+criminology/MS
+crimper/M
+crimp/RDGS
+crimson/DMSG
+cringer/M
+cringe/SRDG
+crinkle/DSG
+crinkly/TRS
+Crin/M
+crinoline/SM
+cripple/GMZDRS
+crippler/M
+crippling/Y
+Crisco/M
+crises
+crisis/M
+Cris/M
+crisper/M
+crispiness/SM
+crispness/MS
+crisp/PGTYRDS
+crispy/RPT
+criss
+crisscross/GDS
+Crissie/M
+Crissy/M
+Cristabel/M
+Cristal/M
+Crista/M
+Cristen/M
+Cristian/M
+Cristiano/M
+Cristie/M
+Cristi/M
+Cristina/M
+Cristine/M
+Cristin/M
+Cristionna/M
+Cristobal/M
+Cristy/M
+criteria
+criterion/M
+criticality
+critically/U
+criticalness/M
+critical/YP
+criticism/MS
+criticized/U
+criticize/GSRDZ
+criticizer/M
+criticizes/A
+criticizingly/S
+criticizing/UY
+critic/MS
+critique/MGSD
+critter/SM
+Cr/M
+croaker/M
+croak/SRDGZ
+croaky/RT
+Croatia/M
+Croatian/S
+Croat/SM
+Croce/M
+crocheter/M
+crochet/RDSZJG
+crockery/SM
+Crockett/M
+Crockpot/M
+crock/SGRDM
+crocodile/MS
+crocus/SM
+Croesus/SM
+crofter/M
+croft/MRGZS
+croissant/MS
+Croix/M
+Cromwellian
+Cromwell/M
+crone/SM
+Cronin/M
+Cronkite/M
+Cronus/M
+crony/SM
+crookedness/SM
+crooked/TPRY
+Crookes/M
+crookneck/MS
+crook/SGDM
+crooner/M
+croon/SRDGZ
+cropland/MS
+crop/MS
+cropped
+cropper/SM
+cropping
+croquet/MDSG
+croquette/SM
+Crosby/M
+crosier/SM
+crossarm
+crossbarred
+crossbarring
+crossbar/SM
+crossbeam/MS
+crossbones
+crossbowman/M
+crossbowmen
+crossbow/SM
+crossbred/S
+crossbreed/SG
+crosscheck/SGD
+crosscurrent/SM
+crosscut/SM
+crosscutting
+crossed/UA
+crosses/UA
+crossfire/SM
+crosshatch/GDS
+crossing/M
+Cross/M
+crossness/MS
+crossover/MS
+crosspatch/MS
+crosspiece/SM
+crosspoint
+crossproduct/S
+crossroad/GSM
+crossroads/M
+crosstalk/M
+crosstown
+crosswalk/MS
+crossway/M
+crosswind/SM
+crosswise
+crossword/MS
+cross/ZTYSRDMPBJG
+crotchetiness/M
+crotchet/MS
+crotchety/P
+crotchless
+crotch/MDS
+crouch/DSG
+croupier/M
+croup/SMDG
+croupy/TZR
+croûton/MS
+crowbait
+crowbarred
+crowbarring
+crowbar/SM
+crowdedness/M
+crowded/P
+crowd/MRDSG
+crowfeet
+crowfoot/M
+crow/GDMS
+Crowley/M
+crowned/U
+crowner/M
+crown/RDMSJG
+crozier's
+CRT/S
+crucial/Y
+crucible/MS
+crucifiable
+crucifixion/MS
+Crucifixion/MS
+crucifix/SM
+cruciform/S
+crucify/NGDS
+crudded
+crudding
+cruddy/TR
+crudeness/MS
+crude/YSP
+crudités
+crudity/MS
+crud/STMR
+cruelness/MS
+cruelty/SM
+cruel/YRTSP
+cruet/MS
+cruft
+crufty
+Cruikshank/M
+cruise/GZSRD
+cruiser/M
+cruller/SM
+crumb/GSYDM
+crumble/DSJG
+crumbliness/MS
+crumbly/PTRS
+crumby/RT
+crumminess/S
+crummy/SRTP
+crump
+crumpet/SM
+crumple/DSG
+crunch/DSRGZ
+crunchiness/MS
+crunchy/TRP
+crupper/MS
+crusade/GDSRMZ
+crusader/M
+cruse/MS
+crushable/U
+crusher/M
+crushing/Y
+crushproof
+crush/SRDBGZ
+Crusoe/M
+crustacean/MS
+crustal
+crust/GMDS
+crustily
+crustiness/SM
+crusty/SRTP
+crutch/MDSG
+Crux/M
+crux/MS
+Cruz/M
+crybaby/MS
+cry/JGDRSZ
+cryogenic/S
+cryogenics/M
+cryostat/M
+cryosurgery/SM
+cryptanalysis/M
+cryptanalyst/M
+cryptanalytic
+crypt/CS
+cryptic
+cryptically
+cryptogram/MS
+cryptographer/MS
+cryptographic
+cryptographically
+cryptography/MS
+cryptologic
+cryptological
+cryptologist/M
+cryptology/M
+Cryptozoic/M
+crypt's
+crystalline/S
+crystallite/SM
+crystallization/AMS
+crystallized/UA
+crystallizes/A
+crystallize/SRDZG
+crystallizing/A
+crystallographer/MS
+crystallographic
+crystallography/M
+Crystal/M
+crystal/SM
+Crysta/M
+Crystie/M
+Cs
+C's
+cs/EA
+cs's
+CST
+ct
+CT
+Cthrine/M
+Ct/M
+ctn
+ctr
+Cuba/M
+Cuban/S
+cubbed
+cubbing
+cubbyhole/MS
+cuber/M
+cube/SM
+cubical/Y
+cubicle/SM
+cubic/YS
+cubism/SM
+cubist/MS
+cubit/MS
+cub/MDRSZG
+cuboid
+Cuchulain/M
+cuckold/GSDM
+cuckoldry/MS
+cuckoo/SGDM
+cucumber/MS
+cuddle/GSD
+cuddly/TRP
+cu/DG
+cudgel/GSJMD
+cud/MS
+cue/MS
+cuff/GSDM
+Cuisinart/M
+cuisine/MS
+Culbertson/M
+culinary
+Cullan/M
+cull/DRGS
+cullender's
+Cullen/M
+culler/M
+Culley/M
+Cullie/M
+Cullin/M
+Cull/MN
+Cully/M
+culminate/XSDGN
+culmination/M
+culotte/S
+culpability/MS
+culpable/I
+culpableness/M
+culpably
+culpa/SM
+culprit/SM
+cultism/SM
+cultist/SM
+cultivable
+cultivated/U
+cultivate/XBSDGN
+cultivation/M
+cultivator/SM
+cult/MS
+cultural/Y
+cultured/U
+culture/SDGM
+Culver/MS
+culvert/SM
+Cu/M
+cumber/DSG
+Cumberland/M
+cumbersomeness/MS
+cumbersome/YP
+cumbrous
+cumin/MS
+cummerbund/MS
+Cummings
+cumquat's
+cum/S
+cumulate/XVNGSD
+cumulation/M
+cumulative/Y
+cumuli
+cumulonimbi
+cumulonimbus/M
+cumulus/M
+Cunard/M
+cuneiform/S
+cunnilingus/SM
+Cunningham/M
+cunningness/M
+cunning/RYSPT
+cunt/SM!
+cupboard/SM
+cupcake/SM
+Cupertino/M
+cupful/SM
+cupidinously
+cupidity/MS
+Cupid/M
+cupid/S
+cup/MS
+cupola/MDGS
+cupped
+cupping/M
+cupric
+cuprous
+curability/MS
+curable/IP
+curableness/MI
+curably/I
+Curacao/M
+curacy/SM
+curare/MS
+curate/VGMSD
+curative/YS
+curatorial
+curator/KMS
+curbing/M
+curbside
+curb/SJDMG
+curbstone/MS
+Curcio/M
+curdle/SDG
+curd/SMDG
+cured/U
+cure/KBDRSGZ
+curer/MK
+curettage/SM
+curfew/SM
+curfs
+curiae
+curia/M
+cur/IBS
+Curie/M
+curie/SM
+curiosity/SM
+curio/SM
+curiousness/SM
+curious/TPRY
+Curitiba/M
+curium/MS
+curler/SM
+curlew/MS
+curlicue/MGDS
+curliness/SM
+curling/M
+curl/UDSG
+curlycue's
+curly/PRT
+curmudgeon/MYS
+Curran/M
+currant/SM
+curred/AFI
+currency's
+currency/SF
+current/FSY
+currently/A
+currentness/M
+Currey/M
+curricle/M
+curricula
+curricular
+curriculum/M
+Currie/M
+currier/M
+Currier/M
+curring/FAI
+Curr/M
+currycomb/DMGS
+Curry/MR
+curry/RSDMG
+cur's
+curs/ASDVG
+curse/A
+cursedness/M
+cursed/YRPT
+curse's
+cursive/EPYA
+cursiveness/EM
+cursives
+cursor/DMSG
+cursorily
+cursoriness/SM
+cursory/P
+curtailer/M
+curtail/LSGDR
+curtailment/SM
+curtain/GSMD
+Curtice/M
+Curtis/M
+Curt/M
+curtness/MS
+curtsey's
+curtsy/SDMG
+curt/TYRP
+curvaceousness/S
+curvaceous/YP
+curvature/MS
+curved/A
+curved's
+curve/DSGM
+curvilinearity/M
+curvilinear/Y
+curving/M
+curvy/RT
+cushion/SMDG
+Cushman/M
+cushy/TR
+cuspid/MS
+cuspidor/MS
+cusp/MS
+cussedness/M
+cussed/YP
+cuss/EGDSR
+cusses/F
+cussing/F
+cuss's
+custard/MS
+Custer/M
+custodial
+custodianship/MS
+custodian/SM
+custody/MS
+customarily
+customariness/M
+customary/PS
+customer/M
+customhouse/S
+customization/SM
+customize/ZGBSRD
+custom/SMRZ
+cutaneous/Y
+cutaway/SM
+cutback/SM
+cuteness/MS
+cute/SPY
+cutesy/RT
+cuticle/SM
+cutlass/MS
+cutler/SM
+cutlery/MS
+cutlet/SM
+cut/MRST
+cutoff/MS
+cutout/SM
+cutter/SM
+cutthroat/SM
+cutting/MYS
+cuttlebone/SM
+cuttlefish/MS
+cuttle/M
+cutup/MS
+cutworm/MS
+Cuvier/M
+Cuzco/M
+CV
+cw
+cwt
+Cyanamid/M
+cyanate/M
+cyanic
+cyanide/GMSD
+cyan/MS
+cyanogen/M
+Cybele/M
+cybernetic/S
+cybernetics/M
+cyberpunk/S
+cyberspace/S
+Cybill/M
+Cybil/M
+Cyb/M
+cyborg/S
+Cyclades
+cyclamen/MS
+cycle/ASDG
+cycler
+cycle's
+cycleway/S
+cyclic
+cyclical/SY
+cycling/M
+cyclist/MS
+cyclohexanol
+cycloidal
+cycloid/SM
+cyclometer/MS
+cyclone/SM
+cyclonic
+cyclopean
+cyclopedia/MS
+cyclopes
+Cyclopes
+cyclops
+Cyclops/M
+cyclotron/MS
+cyder/SM
+cygnet/MS
+Cygnus/M
+cylinder/GMDS
+cylindric
+cylindrical/Y
+Cy/M
+cymbalist/MS
+cymbal/SM
+Cymbre/M
+Cynde/M
+Cyndia/M
+Cyndie/M
+Cyndi/M
+Cyndy/M
+cynical/UY
+cynicism/MS
+cynic/MS
+cynosure/SM
+Cynthea/M
+Cynthia/M
+Cynthie/M
+Cynthy/M
+cypher/MGSD
+cypreses
+cypress/SM
+Cyprian
+Cypriot/SM
+Cyprus/M
+Cyrano/M
+Cyrille/M
+Cyrillic
+Cyrill/M
+Cyrillus/M
+Cyril/M
+Cyrus/M
+cystic
+cyst/MS
+cytochemistry/M
+cytochrome/M
+cytologist/MS
+cytology/MS
+cytolysis/M
+cytoplasmic
+cytoplasm/SM
+cytosine/MS
+cytotoxic
+CZ
+czarevitch/M
+czarina/SM
+czarism/M
+czarist/S
+czarship
+czar/SM
+Czech
+Czechoslovakia/M
+Czechoslovakian/S
+Czechoslovak/S
+Czechs
+Czerniak/M
+Czerny/M
+D
+DA
+dabbed
+dabber/MS
+dabbing
+dabbler/M
+dabble/RSDZG
+dab/S
+Dacca's
+dace/MS
+Dacey/M
+dacha/SM
+Dachau/M
+dachshund/SM
+Dacia/M
+Dacie/M
+Dacron/MS
+dactylic/S
+dactyl/MS
+Dacy/M
+Dadaism/M
+dadaism/S
+Dadaist/M
+dadaist/S
+Dada/M
+daddy/SM
+Dade/M
+dado/DMG
+dadoes
+dad/SM
+Daedalus/M
+Dael/M
+daemonic
+daemon/SM
+Daffie/M
+Daffi/M
+daffiness/S
+daffodil/MS
+Daffy/M
+daffy/PTR
+daftness/MS
+daft/TYRP
+DAG
+dagger/DMSG
+Dag/M
+Dagmar/M
+Dagny/M
+Daguerre/M
+daguerreotype/MGDS
+Dagwood/M
+Dahlia/M
+dahlia/MS
+Dahl/M
+Dahomey/M
+Daile/M
+dailiness/MS
+daily/PS
+Daimler/M
+daintily
+daintiness/MS
+dainty/TPRS
+daiquiri/SM
+dairying/M
+dairyland
+dairymaid/SM
+dairyman/M
+dairymen
+dairy/MJGS
+dairywoman/M
+dairywomen
+Daisey/M
+Daisie/M
+Daisi/M
+dais/SM
+Daisy/M
+daisy/SM
+Dakar/M
+Dakotan
+Dakota/SM
+Dale/M
+Dalenna/M
+dale/SMH
+daleth/M
+Daley/M
+Dalhousie/M
+Dalia/M
+Dalian/M
+Dalila/M
+Dali/SM
+Dallas/M
+dalliance/SM
+dallier/M
+Dalli/MS
+Dall/M
+Dallon/M
+dally/ZRSDG
+Dal/M
+Dalmatia/M
+dalmatian/S
+Dalmatian/SM
+Daloris/M
+Dalston/M
+Dalt/M
+Dalton/M
+Daly/M
+damageable
+damaged/U
+damage/MZGRSD
+damager/M
+damaging/Y
+Damara/M
+Damaris/M
+Damascus/M
+damask/DMGS
+dame/SM
+Dame/SMN
+Damian/M
+Damiano/M
+Damien/M
+Damion/M
+Damita/M
+dam/MDS
+dammed
+damming
+dammit/S
+damnably
+damnation/MS
+damnedest/MS
+damned/TR
+damn/GSBRD
+damning/Y
+Damocles/M
+Damon/M
+damped/U
+dampener/M
+dampen/RDZG
+damper/M
+dampness/MS
+damp/SGZTXYRDNP
+damselfly/MS
+damsel/MS
+damson/MS
+Danaë
+Dana/M
+Danbury/M
+dancelike
+dancer/M
+dance/SRDJGZ
+dandelion/MS
+dander/DMGS
+dandify/SDG
+dandily
+dandle/GSD
+dandruff/MS
+dandy/TRSM
+Danelaw/M
+Danella/M
+Danell/M
+Dane/SM
+Danette/M
+danger/DMG
+Dangerfield/M
+dangerousness/M
+dangerous/YP
+dangler/M
+dangle/ZGRSD
+dangling/Y
+dang/SGZRD
+Danial/M
+Dania/M
+Danica/M
+Danice/M
+Daniela/M
+Daniele/M
+Daniella/M
+Danielle/M
+Daniel/SM
+Danielson/M
+Danie/M
+Danika/M
+Danila/M
+Dani/M
+Danish
+danish/S
+Danita/M
+Danit/M
+dankness/MS
+dank/TPYR
+Danna/M
+Dannel/M
+Dannie/M
+Danni/M
+Dannye/M
+Danny/M
+danseuse/SM
+Dan/SM
+Dante/M
+Danton/M
+Danube/M
+Danubian
+Danville/M
+Danya/M
+Danyelle/M
+Danyette/M
+Danzig/M
+Daphene/M
+Daphna/M
+Daphne/M
+dapperness/M
+dapper/PSTRY
+dapple/SDG
+Dara/M
+Darbee/M
+Darbie/M
+Darb/M
+Darby/M
+Darcee/M
+Darcey/M
+Darcie/M
+Darci/M
+D'Arcy
+Darcy/M
+Darda/M
+Dardanelles
+daredevil/MS
+daredevilry/S
+Dareen/M
+Darelle/M
+Darell/M
+Dare/M
+Daren/M
+darer/M
+daresay
+dare/ZGDRSJ
+d'Arezzo
+Daria/M
+Darice/M
+Darill/M
+Dari/M
+daringness/M
+daring/PY
+Darin/M
+Dario/M
+Darius/M
+Darjeeling/M
+darkener/M
+darken/RDZG
+dark/GTXYRDNSP
+darkish
+darkly/TR
+darkness/MS
+darkroom/SM
+Darla/M
+Darleen/M
+Darlene/M
+Darline/M
+Darling/M
+darlingness/M
+Darlington/M
+darling/YMSP
+Darlleen/M
+Dar/MNH
+Darnall/M
+darned/TR
+Darnell/M
+darner/M
+darn/GRDZS
+darning/M
+Darn/M
+Daron/M
+DARPA/M
+Darrelle/M
+Darrell/M
+Darrel/M
+Darren/M
+Darrick/M
+Darrin/M
+Darrow/M
+Darryl/M
+Darsey/M
+Darsie/M
+d'art
+dartboard/SM
+darter/M
+Darth/M
+Dartmouth/M
+dart/MRDGZS
+Darvon/M
+Darwinian/S
+Darwinism/MS
+Darwinist/MS
+Darwin/M
+Darya/M
+Daryle/M
+Daryl/M
+Daryn/M
+Dasha/M
+dashboard/SM
+dasher/M
+dash/GZSRD
+dashiki/SM
+dashing/Y
+Dasie/M
+Dasi/M
+dastardliness/SM
+dastardly/P
+dastard/MYS
+Dasya/M
+DAT
+database/DSMG
+datafile
+datagram/MS
+data/M
+Datamation/M
+Datamedia/M
+dataset/S
+datedly
+datedness
+date/DRSMZGV
+dated/U
+dateless
+dateline/DSMG
+dater/M
+Datha/M
+dative/S
+Datsun/M
+datum/MS
+dauber/M
+daub/RDSGZ
+Daugherty/M
+daughter/MYS
+Daumier/M
+Daune/M
+daunt/DSG
+daunted/U
+daunting/Y
+dauntlessness/SM
+dauntless/PY
+dauphin/SM
+Davao/M
+Daveen/M
+Dave/M
+Daven/M
+Davenport/M
+davenport/MS
+Daveta/M
+Davey/M
+Davida/M
+Davidde/M
+Davide/M
+David/SM
+Davidson/M
+Davie/M
+Davina/M
+Davine/M
+Davinich/M
+Davin/M
+Davis/M
+Davita/M
+davit/SM
+Dav/MN
+Davon/M
+Davy/SM
+dawdler/M
+dawdle/ZGRSD
+Dawes/M
+Dawna/M
+dawn/GSDM
+Dawn/M
+Dawson/M
+daybed/S
+daybreak/SM
+daycare/S
+daydreamer/M
+daydream/RDMSZG
+Dayle/M
+daylight/GSDM
+Day/M
+Dayna/M
+daysack
+day/SM
+daytime/SM
+Dayton/M
+dazed/PY
+daze/DSG
+dazzler/M
+dazzle/ZGJRSD
+dazzling/Y
+db
+DB
+dbl
+dB/M
+DBMS
+DC
+DD
+Ddene/M
+DDS
+DDT
+DE
+deacon/DSMG
+deaconess/MS
+deadbeat/SM
+deadbolt/S
+deadener/M
+deadening/MY
+deaden/RDG
+deadhead/MS
+deadline/MGDS
+deadliness/SM
+deadlock/MGDS
+deadly/RPT
+deadness/M
+deadpanned
+deadpanner
+deadpanning
+deadpan/S
+dead/PTXYRN
+deadwood/SM
+deafening/MY
+deafen/JGD
+deafness/MS
+deaf/TXPYRN
+dealer/M
+dealership/MS
+dealing/M
+deallocator
+deal/RSGZJ
+dealt
+Deana/M
+dean/DMG
+Deandre/M
+Deane/M
+deanery/MS
+Dean/M
+Deanna/M
+Deanne/M
+Deann/M
+deanship/SM
+Dearborn/M
+dearness/MS
+dearth/M
+dearths
+dear/TYRHPS
+deary/MS
+deassign
+deathbed/MS
+deathblow/SM
+deathless/Y
+deathlike
+deathly/TR
+death/MY
+deaths
+deathtrap/SM
+deathward
+deathwatch/MS
+debacle/SM
+debarkation/SM
+debark/G
+debar/L
+debarment/SM
+debarring
+debaser/M
+debatable/U
+debate/BMZ
+debater/M
+debauchedness/M
+debauched/PY
+debauchee/SM
+debaucher/M
+debauchery/SM
+debauch/GDRS
+Debbie/M
+Debbi/M
+Debby/M
+Debee/M
+debenture/MS
+Debera/M
+debilitate/NGXSD
+debilitation/M
+debility/MS
+Debi/M
+debit/DG
+deb/MS
+Deb/MS
+debonairness/SM
+debonair/PY
+Deborah/M
+Debora/M
+Debor/M
+debouch/DSG
+Debra/M
+debrief/GJ
+debris/M
+debtor/SM
+debt/SM
+Debussy/M
+débutante/SM
+debut/MDG
+decade/MS
+decadency/S
+decadent/YS
+decaffeinate/DSG
+decaf/S
+decagon/MS
+Decalogue/M
+decal/SM
+decamp/L
+decampment/MS
+decapitate/GSD
+decapitator/SM
+decathlon/SM
+Decatur/M
+decay/GRD
+Decca/M
+Deccan/M
+decease/M
+decedent/MS
+deceitfulness/SM
+deceitful/PY
+deceit/SM
+deceived/U
+deceiver/M
+deceives/U
+deceive/ZGRSD
+deceivingly
+deceiving/U
+decelerate/XNGSD
+deceleration/M
+decelerator/SM
+December/SM
+decency/ISM
+decennial/SY
+decent/TIYR
+deception/SM
+deceptiveness/SM
+deceptive/YP
+decertify/N
+dechlorinate/N
+decibel/MS
+decidability/U
+decidable/U
+decidedness/M
+decided/PY
+decide/GRSDB
+deciduousness/M
+deciduous/YP
+decile/SM
+deciliter/SM
+decimal/SYM
+decimate/XNGDS
+decimation/M
+decimeter/MS
+decipherable/IU
+decipher/BRZG
+decipherer/M
+decisional
+decisioned
+decisioning
+decision/ISM
+decisive/IPY
+decisiveness/MSI
+deckchair
+decker/M
+Decker/M
+deck/GRDMSJ
+deckhand/S
+decking/M
+Deck/RM
+declamation/SM
+declamatory
+declarable
+declaration/MS
+declaration's/A
+declarative/SY
+declarator/MS
+declaratory
+declare/AGSD
+declared/U
+declarer/MS
+declension/SM
+declination/MS
+decliner/M
+decline/ZGRSD
+declivity/SM
+Dec/M
+DEC/M
+DECNET
+DECnet/M
+deco
+décolletage/S
+décolleté
+decolletes
+decolorising
+decomposability/M
+decomposable/IU
+decompose/B
+decompress/R
+decongestant/S
+deconstruction
+deconvolution
+decorated/AU
+decorate/NGVDSX
+decorates/A
+decorating/A
+decoration/ASM
+decorativeness/M
+decorative/YP
+decorator/SM
+decorousness/MS
+decorousness's/I
+decorous/PIY
+decor/S
+decorticate/GNDS
+decortication/M
+decorum/MS
+decoupage/MGSD
+decouple/G
+decoy/M
+decrease
+decreasing/Y
+decreeing
+decree/RSM
+decremental
+decrement/DMGS
+decrepit
+decrepitude/SM
+decriminalization/S
+decriminalize/DS
+decry/G
+decrypt/GD
+decryption
+DECstation/M
+DECsystem/M
+DECtape/M
+decustomised
+Dedekind/M
+Dede/M
+dedicate/AGDS
+dedicated/Y
+dedication/MS
+dedicative
+dedicator/MS
+dedicatory
+Dedie/M
+Dedra/M
+deduce/RSDG
+deducible
+deductibility/M
+deductible/S
+deduction/SM
+deductive/Y
+deduct/VG
+Deeanne/M
+Deeann/M
+deeded
+Deedee/M
+deeding
+deed/IS
+deed's
+deejay/MDSG
+Dee/M
+deem/ADGS
+deemphasis
+Deena/M
+deepen/DG
+deepish
+deepness/MS
+deep/PTXSYRN
+Deerdre/M
+Deere/M
+deerskin/MS
+deer/SM
+deerstalker/SM
+deerstalking/M
+Deeyn/M
+deface/LZ
+defacement/SM
+defaecate
+defalcate/NGXSD
+defalcation/M
+defamation/SM
+defamatory
+defamer/M
+defame/ZR
+defaulter/M
+default/ZR
+defeated/U
+defeater/M
+defeatism/SM
+defeatist/SM
+defeat/ZGD
+defecate/DSNGX
+defecation/M
+defection/SM
+defectiveness/MS
+defective/PYS
+defect/MDSVG
+defector/MS
+defendant/SM
+defended/U
+defenestrate/GSD
+defenselessness/MS
+defenseless/PY
+defenses/U
+defense/VGSDM
+defensibility/M
+defensible/I
+defensibly/I
+defensiveness/MS
+defensive/PSY
+deference/MS
+deferential/Y
+deferent/S
+deferrable
+deferral/SM
+deferred
+deferrer/MS
+deferring
+deffer
+defiance/MS
+defiant/Y
+defibrillator/M
+deficiency/MS
+deficient/SY
+deficit/MS
+defier/M
+defile/L
+defilement/MS
+definable/UI
+definably/I
+define/AGDRS
+defined/U
+definer/SM
+definite/IPY
+definiteness/IMS
+definitional
+definition/ASM
+definitiveness/M
+definitive/SYP
+defis
+deflate/XNGRSDB
+deflationary
+deflation/M
+deflect/DSGV
+deflected/U
+deflection/MS
+deflector/MS
+defocus
+defocussing
+Defoe/M
+defog
+defogger/S
+defoliant/SM
+defoliator/SM
+deformational
+deform/B
+deformed/U
+deformity/SM
+defrauder/M
+defraud/ZGDR
+defrayal/SM
+defroster/M
+defrost/RZ
+deftness/MS
+deft/TYRP
+defunct/S
+defying/Y
+defy/RDG
+def/Z
+deg
+Degas/M
+degassing
+degauss/GD
+degeneracy/MS
+degenerateness/M
+degenerate/PY
+degrade/B
+degradedness/M
+degraded/YP
+degrading/Y
+degrease
+degree/SM
+degum
+Dehlia/M
+dehumanize
+dehydrator/MS
+deicer/M
+deice/ZR
+deictic
+Deidre/M
+deification/M
+deify/SDXGN
+deign/DGS
+Deimos/M
+Deina/M
+Deirdre/MS
+deistic
+deist/SM
+Deity/M
+deity/SM
+deja
+deject/DSG
+dejectedness/M
+dejected/PY
+dejection/SM
+Dejesus/M
+DeKalb/M
+DeKastere/M
+Delacroix/M
+Delacruz/M
+Delainey/M
+Dela/M
+Delaney/M
+Delano/M
+Delawarean/SM
+Delaware/MS
+delay/D
+delayer/G
+Delbert/M
+Delcina/M
+Delcine/M
+delectableness/M
+delectable/SP
+delectably
+delectation/MS
+delegable
+Deleon/M
+deleted/U
+deleteriousness/M
+deleterious/PY
+delete/XBRSDNG
+deletion/M
+delfs
+Delft/M
+delft/MS
+delftware/S
+Delgado/M
+Delhi/M
+Delia/M
+deliberateness/SM
+deliberate/PVY
+deliberativeness/M
+deliberative/PY
+Delibes/M
+delicacy/IMS
+delicate/IYP
+delicatenesses
+delicateness/IM
+delicates
+delicatessen/MS
+deliciousness/MS
+delicious/YSP
+delicti
+delightedness/M
+delighted/YP
+delightfulness/M
+delightful/YP
+Delilah/M
+Delilahs
+Delila/M
+Delinda/M
+delineate/SDXVNG
+delineation/M
+delinquency/MS
+delinquent/SYM
+deliquesce/GSD
+deliquescent
+deliriousness/MS
+delirious/PY
+delirium/SM
+deli/SM
+Delius/M
+deliverables
+deliverable/U
+deliver/AGSD
+deliverance/SM
+delivered/U
+deliverer/SM
+delivery/AM
+deliverymen/M
+Della/M
+Dell/M
+dell/SM
+Dellwood/M
+Delly/M
+Delmar/M
+Delmarva/M
+Delmer/M
+Delmonico
+Delmore/M
+Delmor/M
+Del/MY
+Delora/M
+Delores/M
+Deloria/M
+Deloris/M
+Delphic
+Delphi/M
+Delphine/M
+Delphinia/M
+delphinium/SM
+Delphinus/M
+Delta/M
+delta/MS
+deltoid/SM
+deluder/M
+delude/RSDG
+deluding/Y
+deluge/SDG
+delusional
+delusion/SM
+delusiveness/M
+delusive/PY
+deluxe
+delve/GZSRD
+delver/M
+demagnify/N
+demagogic
+demagogue/GSDM
+demagoguery/SM
+demagogy/MS
+demander/M
+demand/GSRD
+demandingly
+demanding/U
+demarcate/SDNGX
+demarcation/M
+Demavend/M
+demean/GDS
+demeanor/SM
+dementedness/M
+demented/YP
+dementia/MS
+Demerol/M
+demesne/SM
+Demeter/M
+Demetra/M
+Demetre/M
+Demetria/M
+Demetri/MS
+Demetrius/M
+demigod/MS
+demijohn/MS
+demimondaine/SM
+demimonde/SM
+demineralization/SM
+Deming/M
+demise/DMG
+demit
+demitasse/MS
+demitted
+demitting
+Dem/MG
+democracy/MS
+Democratic
+democratically/U
+democratic/U
+democratization/MS
+democratize/DRSG
+democratizes/U
+Democrat/MS
+democrat/SM
+Democritus/M
+démodé
+demo/DMPG
+demographer/MS
+demographical/Y
+demographic/S
+demography/MS
+demolisher/M
+demolish/GSRD
+demolition/MS
+demonetization/S
+demoniacal/Y
+demoniac/S
+demonic
+demonology/M
+demon/SM
+demonstrable/I
+demonstrableness/M
+demonstrably/I
+demonstrate/XDSNGV
+demonstration/M
+demonstrativenesses
+demonstrativeness/UM
+demonstratives
+demonstrative/YUP
+demonstrator/MS
+demoralization/M
+demoralizer/M
+demoralizing/Y
+DeMorgan/M
+Demosthenes/M
+demote/DGX
+demotic/S
+Demott/M
+demount/B
+Dempsey/M
+demulcent/S
+demultiplex
+demureness/SM
+demure/YP
+demurral/MS
+demurred
+demurrer/MS
+demurring
+demur/RTS
+demythologization/M
+demythologize/R
+den
+Dena/M
+dendrite/MS
+Deneb/M
+Denebola/M
+Deneen/M
+Dene/M
+Deng/M
+dengue/MS
+deniable/U
+denial/SM
+Denice/M
+denier/M
+denigrate/VNGXSD
+denigration/M
+denim/SM
+Denise/M
+Deni/SM
+denizen/SMDG
+Den/M
+De/NM
+Denmark/M
+Denna/M
+denned
+Dennet/M
+Denney/M
+Dennie/M
+Denni/MS
+denning
+Dennison/M
+Denny/M
+denominate/V
+denominational/Y
+denote/B
+denouement/MS
+denounce/LZRSDG
+denouncement/SM
+denouncer/M
+dense/FR
+densely
+denseness/SM
+densitometer/MS
+densitometric
+densitometry/M
+density/MS
+dens/RT
+dental/YS
+dentifrice/SM
+dentine's
+dentin/SM
+dent/ISGD
+dentistry/MS
+dentist/SM
+dentition/MS
+dent's
+denture/IMS
+denuclearize/GSD
+denudation/SM
+denude/DG
+denuder/M
+denunciate/VNGSDX
+denunciation/M
+Denver/M
+denying/Y
+Deny/M
+Denys
+Denyse/M
+deny/SRDZG
+deodorant/SM
+deodorization/SM
+deodorize/GZSRD
+deodorizer/M
+Deon/M
+Deonne/M
+deoxyribonucleic
+depart/L
+departmentalization/SM
+departmentalize/DSG
+departmental/Y
+department/MS
+departure/MS
+dependability/MS
+dependableness/M
+dependable/P
+dependably
+Dependant/MS
+depend/B
+dependence/ISM
+dependency/MS
+dependent/IYS
+dependent's
+depicted/U
+depicter/M
+depiction/SM
+depict/RDSG
+depilatory/S
+deplete/VGNSDX
+depletion/M
+deplorableness/M
+deplorable/P
+deplorably
+deplorer/M
+deplore/SRDBG
+deploring/Y
+deployable
+deploy/AGDLS
+deployment/SAM
+depolarize
+deponent/S
+deportation/MS
+deportee/SM
+deport/LG
+deportment/MS
+depose
+deposit/ADGS
+depositary/M
+deposition/A
+depositor/SAM
+depository/MS
+depravedness/M
+depraved/PY
+deprave/GSRD
+depraver/M
+depravity/SM
+deprecate/XSDNG
+deprecating/Y
+deprecation/M
+deprecatory
+depreciable
+depreciate/XDSNGV
+depreciating/Y
+depreciation/M
+depreciative/Y
+depressant/S
+depressible
+depression/MS
+depressive/YS
+depressor/MS
+depress/V
+deprive/GSD
+depth/M
+depths
+Dept/M
+deputation/SM
+depute/SDG
+deputize/DSG
+deputy/MS
+dequeue
+derail/L
+dérailleur/MS
+derailment/MS
+derange/L
+derangement/MS
+Derbyshire/M
+derby/SM
+Derby/SM
+dereference/Z
+Derek/M
+dereliction/SM
+derelict/S
+Derick/M
+deride/D
+deriding/Y
+derision/SM
+derisiveness/MS
+derisive/PY
+derisory
+derivable/U
+derivate/XNV
+derivation/M
+derivativeness/M
+derivative/SPYM
+derive/B
+derived/U
+Derk/M
+Der/M
+dermal
+dermatitides
+dermatitis/MS
+dermatological
+dermatologist/MS
+dermatology/MS
+dermis/SM
+Dermot/M
+derogate/XDSNGV
+derogation/M
+derogatorily
+derogatory
+Derrek/M
+Derrick/M
+derrick/SMDG
+Derrida/M
+derrière/S
+Derrik/M
+Derril/M
+derringer/SM
+Derron/M
+Derry/M
+dervish/SM
+Derward/M
+Derwin/M
+Des
+desalinate/NGSDX
+desalination/M
+desalinization/MS
+desalinize/GSD
+desalt/G
+descant/M
+Descartes/M
+descendant/SM
+descended/FU
+descendent's
+descender/M
+descending/F
+descends/F
+descend/ZGSDR
+descent
+describable/I
+describe/ZB
+description/MS
+descriptiveness/MS
+descriptive/SYP
+descriptor/SM
+descry/SDG
+Desdemona/M
+desecrater/M
+desecrate/SRDGNX
+desecration/M
+deserter/M
+desertification
+desertion/MS
+desert/ZGMRDS
+deservedness/M
+deserved/YU
+deserve/J
+deserving/Y
+déshabillé's
+desiccant/S
+desiccate/XNGSD
+desiccation/M
+desiccator/SM
+desiderata
+desideratum/M
+designable
+design/ADGS
+designate/VNGSDX
+designational
+designation/M
+designator/SM
+designed/Y
+designer/M
+designing/U
+Desi/M
+desirabilia
+desirability's
+desirability/US
+desirableness/SM
+desirableness's/U
+desirable/UPS
+desirably/U
+Desirae/M
+desire/BR
+desired/U
+Desiree/M
+desirer/M
+Desiri/M
+desirousness/M
+desirous/PY
+desist/DSG
+desk/SM
+desktop/S
+Desmond/M
+Desmund/M
+desolateness/SM
+desolate/PXDRSYNG
+desolater/M
+desolating/Y
+desolation/M
+desorption/M
+despairer/M
+despairing/Y
+despair/SGDR
+desperadoes
+desperado/M
+desperateness/SM
+desperate/YNXP
+desperation/M
+despicable
+despicably
+despiser/M
+despise/SRDG
+despoil/L
+despoilment/MS
+despond
+despondence/S
+despondency/MS
+despondent/Y
+despotic
+despotically
+despotism/SM
+dessert/SM
+dessicate/DN
+d'Estaing
+destinate/NX
+destination/M
+destine/GSD
+destiny/MS
+destituteness/M
+destitute/NXP
+destitution/M
+destroy/BZGDRS
+destroyer/M
+destructibility/SMI
+destructible/I
+destruction/SM
+destructiveness/MS
+destructive/YP
+destructor/M
+destruct/VGSD
+desuetude/MS
+desultorily
+desultoriness/M
+desultory/P
+detachedness/M
+detached/YP
+detacher/M
+detach/LSRDBG
+detachment/SM
+detailedness/M
+detailed/YP
+detainee/S
+detainer/M
+detain/LGRDS
+detainment/MS
+d'etat
+detectability/U
+detectable/U
+detectably/U
+detect/DBSVG
+detected/U
+detection/SM
+detective/MS
+detector/MS
+détente
+detentes
+detention/SM
+detergency/M
+detergent/SM
+deteriorate/XDSNGV
+deterioration/M
+determent/SM
+determinability/M
+determinable/IP
+determinableness/IM
+determinacy/I
+determinant/MS
+determinateness/IM
+determinate/PYIN
+determination/IM
+determinativeness/M
+determinative/P
+determinedly
+determinedness/M
+determined/U
+determine/GASD
+determiner/SM
+determinism/MS
+determinism's/I
+deterministically
+deterministic/I
+deterred/U
+deterrence/SM
+deterrent/SMY
+deterring
+detersive/S
+deter/SL
+deters/V
+detestableness/M
+detestable/P
+detestably
+detestation/SM
+dethrone/L
+dethronement/SM
+detonable
+detonated/U
+detonate/XDSNGV
+detonation/M
+detonator/MS
+detour/G
+detoxification/M
+detoxify/NXGSD
+detox/SDG
+detract/GVD
+detractive/Y
+d'etre
+detribalize/GSD
+detrimental/SY
+detriment/SM
+detritus/M
+Detroit/M
+deuced/Y
+deuce/SDGM
+deus
+deuterium/MS
+deuteron/M
+Deuteronomy/M
+Deutsch/M
+Deva/M
+Devanagari/M
+Devan/M
+devastate/XVNGSD
+devastating/Y
+devastation/M
+devastator/SM
+develop/ALZSGDR
+developed/U
+developer/MA
+developmental/Y
+development/ASM
+deviance/MS
+deviancy/S
+deviant/YMS
+deviated/U
+deviate/XSDGN
+deviating/U
+deviation/M
+devilishness/MS
+devilish/PY
+devilment/SM
+devilry/MS
+devil/SLMDG
+deviltry/MS
+Devi/M
+Devina/M
+Devin/M
+Devinne/M
+deviousness/SM
+devious/YP
+devise/JR
+deviser/M
+Devland/M
+Devlen/M
+Devlin/M
+Dev/M
+devoice
+devolution/MS
+devolve/GSD
+Devondra/M
+Devonian
+Devon/M
+Devonna/M
+Devonne/M
+Devonshire/M
+Devora/M
+devoted/Y
+devotee/MS
+devote/XN
+devotional/YS
+devotion/M
+devourer/M
+devour/SRDZG
+devoutness/MS
+devout/PRYT
+Devy/M
+Dewain/M
+dewar
+Dewar/M
+Dewayne/M
+dewberry/MS
+dewclaw/SM
+dewdrop/MS
+Dewey/M
+Dewie/M
+dewiness/MS
+Dewitt/M
+dewlap/MS
+Dew/M
+dew/MDGS
+dewy/TPR
+Dexedrine/M
+dexes/I
+Dex/M
+dexter
+dexterity/MS
+Dexter/M
+dexterousness/MS
+dexterous/PY
+dextrose/SM
+DH
+Dhaka
+Dhaulagiri/M
+dhoti/SM
+dhow/MS
+DI
+diabase/M
+diabetes/M
+diabetic/S
+diabolic
+diabolicalness/M
+diabolical/YP
+diabolism/M
+diachronic/P
+diacritical/YS
+diacritic/MS
+diadem/GMDS
+diaereses
+diaeresis/M
+Diaghilev/M
+diagnometer/SM
+diagnosable/U
+diagnose/BGDS
+diagnosed/U
+diagnosis/M
+diagnostically
+diagnostician/SM
+diagnostic/MS
+diagnostics/M
+diagonalize/GDSB
+diagonal/YS
+diagrammable
+diagrammatic
+diagrammaticality
+diagrammatically
+diagrammed
+diagrammer/SM
+diagramming
+diagram/MS
+Diahann/M
+dialectal/Y
+dialectical/Y
+dialectic/MS
+dialect/MS
+dialed/A
+dialer/M
+dialing/M
+dial/MRDSGZJ
+dialogged
+dialogging
+dialog/MS
+dials/A
+dialysis/M
+dialyzed/U
+dialyzes
+diam
+diamagnetic
+diameter/MS
+diametric
+diametrical/Y
+diamondback/SM
+diamond/GSMD
+Diana/M
+Diandra/M
+Diane/M
+Dianemarie/M
+Dian/M
+Dianna/M
+Dianne/M
+Diann/M
+Diannne/M
+diapason/MS
+diaper/SGDM
+diaphanousness/M
+diaphanous/YP
+diaphragmatic
+diaphragm/SM
+diarist/SM
+Diarmid/M
+diarrheal
+diarrhea/MS
+diary/MS
+diaspora
+Diaspora/SM
+diastase/SM
+diastole/MS
+diastolic
+diathermy/SM
+diathesis/M
+diatomic
+diatom/SM
+diatonic
+diatribe/MS
+Diaz's
+dibble/SDMG
+dibs
+DiCaprio/M
+dice/GDRS
+dicer/M
+dicey
+dichloride/M
+dichotomization/M
+dichotomize/DSG
+dichotomous/PY
+dichotomy/SM
+dicier
+diciest
+dicing/M
+Dickensian/S
+dickens/M
+Dickens/M
+dicker/DG
+Dickerson/M
+dickey/SM
+dick/GZXRDMS!
+Dickie/M
+dickier
+dickiest
+Dickinson/M
+Dickson/M
+Dick/XM
+Dicky/M
+dicky's
+dicotyledonous
+dicotyledon/SM
+dicta/M
+Dictaphone/SM
+dictate/SDNGX
+dictation/M
+dictatorialness/M
+dictatorial/YP
+dictator/MS
+dictatorship/SM
+dictionary/SM
+diction/MS
+dictum/M
+didactically
+didactic/S
+didactics/M
+did/AU
+diddler/M
+diddle/ZGRSD
+Diderot/M
+Didi/M
+didn't
+didoes
+dido/M
+Dido/M
+didst
+die/DS
+Diefenbaker/M
+Diego/M
+dieing
+dielectric/MS
+diem
+Diem/M
+Diena/M
+Dierdre/M
+diereses
+dieresis/M
+diesel/GMDS
+Diesel's
+dies's
+dies/U
+dietary/S
+dieter/M
+Dieter/M
+dietetic/S
+dietetics/M
+diethylaminoethyl
+diethylstilbestrol/M
+dietitian/MS
+diet/RDGZSM
+Dietrich/M
+Dietz/M
+difference/DSGM
+difference's/I
+differences/I
+differentiability
+differentiable
+differential/SMY
+differentiated/U
+differentiate/XSDNG
+differentiation/M
+differentiator/SM
+differentness
+different/YI
+differ/SZGRD
+difficile
+difficult/Y
+difficulty/SM
+diffidence/MS
+diffident/Y
+diffract/GSD
+diffraction/SM
+diffractometer/SM
+diffuseness/MS
+diffuse/PRSDZYVXNG
+diffuser/M
+diffusible
+diffusional
+diffusion/M
+diffusiveness/M
+diffusive/YP
+diffusivity/M
+digerati
+digested/IU
+digester/M
+digestibility/MS
+digestible/I
+digestifs
+digestion/ISM
+digestive/YSP
+digest/RDVGS
+digger/MS
+digging/S
+digitalis/M
+digitalization/MS
+digitalized
+digitalizes
+digitalizing
+digital/SY
+digitization/M
+digitizer/M
+digitize/ZGDRS
+digit/SM
+dignified/U
+dignify/DSG
+dignitary/SM
+dignity/ISM
+digram
+digraph/M
+digraphs
+digress/GVDS
+digression/SM
+digressiveness/M
+digressive/PY
+dig/TS
+dihedral
+Dijkstra/M
+Dijon/M
+dike/DRSMG
+diker/M
+diktat/SM
+Dilan/M
+dilapidate/XGNSD
+dilapidation/M
+dilatation/SM
+dilated/YP
+dilate/XVNGSD
+dilation/M
+dilatoriness/M
+dilator/SM
+dilatory/P
+Dilbert/M
+dilemma/MS
+dilettante/MS
+dilettantish
+dilettantism/MS
+diligence/SM
+diligentness/M
+diligent/YP
+dilithium
+Dillard/M
+Dillie/M
+Dillinger/M
+dilling/R
+dillis
+Dill/M
+Dillon/M
+dill/SGMD
+dillydally/GSD
+Dilly/M
+dilly/SM
+dilogarithm
+diluent
+diluted/U
+diluteness/M
+dilute/RSDPXYVNG
+dilution/M
+Di/M
+DiMaggio/M
+dimensionality/M
+dimensional/Y
+dimensionless
+dimension/MDGS
+dimer/M
+dime/SM
+dimethylglyoxime
+dimethyl/M
+diminished/U
+diminish/SDGBJ
+diminuendo/SM
+diminution/SM
+diminutiveness/M
+diminutive/SYP
+Dimitri/M
+Dimitry/M
+dimity/MS
+dimmed/U
+dimmer/MS
+dimmest
+dimming
+dimness/SM
+dimorphism/M
+dimple/MGSD
+dimply/RT
+dim/RYPZS
+dimwit/MS
+dimwitted
+Dinah/M
+Dina/M
+dinar/SM
+diner/M
+dine/S
+dinette/MS
+dingbat/MS
+ding/GD
+dinghy/SM
+dingily
+dinginess/SM
+dingle/MS
+dingoes
+dingo/MS
+dingus/SM
+dingy/PRST
+dinky/RST
+din/MDRZGS
+dinned
+dinner/SM
+dinnertime/S
+dinnerware/MS
+Dinnie/M
+dinning
+Dinny/M
+Dino/M
+dinosaur/MS
+dint/SGMD
+diocesan/S
+diocese/SM
+Diocletian/M
+diode/SM
+Diogenes/M
+Dione/M
+Dionisio/M
+Dionis/M
+Dion/M
+Dionne/M
+Dionysian
+Dionysus/M
+Diophantine/M
+diopter/MS
+diorama/SM
+Dior/M
+dioxalate
+dioxide/MS
+dioxin/S
+diphtheria/SM
+diphthong/SM
+diplexers
+diploid/S
+diplomacy/SM
+diploma/SMDG
+diplomata
+diplomatically
+diplomatic/S
+diplomatics/M
+diplomatist/SM
+diplomat/MS
+dipodic
+dipody/M
+dipole/MS
+dipped
+Dipper/M
+dipper/SM
+dipping/S
+dippy/TR
+dip/S
+dipsomaniac/MS
+dipsomania/SM
+dipstick/MS
+dipterous
+diptych/M
+diptychs
+Dir
+Dirac/M
+directed/IUA
+directionality
+directional/SY
+direction/MIS
+directions/A
+directive/SM
+directivity/M
+directly/I
+directness/ISM
+director/AMS
+directorate/SM
+directorial
+directorship/SM
+directory/SM
+direct/RDYPTSVG
+directrix/MS
+directs/IA
+direful/Y
+direness/M
+dire/YTRP
+dirge/GSDM
+Dirichlet/M
+dirigible/S
+dirk/GDMS
+Dirk/M
+dirndl/MS
+dirtily
+dirtiness/SM
+dirt/MS
+dirty/GPRSDT
+Dis
+disable/LZGD
+disablement/MS
+disabler/M
+disabuse
+disadvantaged/P
+disagreeable/S
+disallow/D
+disambiguate/DSGNX
+disappointed/Y
+disappointing/Y
+disarming/Y
+disarrange/L
+disastrous/Y
+disband/L
+disbandment/SM
+disbar/L
+disbarment/MS
+disbarring
+disbelieving/Y
+disbursal/S
+disburse/GDRSL
+disbursement/MS
+disburser/M
+discerner/M
+discernibility
+discernible/I
+discernibly
+discerning/Y
+discernment/MS
+discern/SDRGL
+disc/GDM
+discharged/U
+disciple/DSMG
+discipleship/SM
+disciplinarian/SM
+disciplinary
+disciplined/U
+discipline/IDM
+discipliner/M
+disciplines
+disciplining
+disclosed/U
+discography/MS
+discolored/MP
+discoloreds/U
+discolor/G
+discombobulate/SDGNX
+discomfit/DG
+discomfiture/MS
+disco/MG
+discommode/DG
+disconcerting/Y
+disconnectedness/S
+disconnected/P
+disconnecter/M
+disconnect/R
+disconsolate/YN
+discordance/SM
+discordant/Y
+discord/G
+discorporate/D
+discotheque/MS
+discount/B
+discourage/LGDR
+discouragement/MS
+discouraging/Y
+discoverable/I
+discover/ADGS
+discovered/U
+discoverer/S
+discovery/SAM
+discreetly/I
+discreetness's/I
+discreetness/SM
+discreet/TRYP
+discrepancy/SM
+discrepant/Y
+discreteness/SM
+discrete/YPNX
+discretionary
+discretion/IMS
+discretization
+discretized
+discriminable
+discriminant/MS
+discriminated/U
+discriminate/SDVNGX
+discriminating/YI
+discrimination/MI
+discriminator/MS
+discriminatory
+discursiveness/S
+discussant/MS
+discussed/UA
+discusser/M
+discussion/SM
+discus/SM
+disdainfulness/M
+disdainful/YP
+disdain/MGSD
+disease/G
+disembowelment/SM
+disembowel/SLGD
+disengage/L
+disfigure/L
+disfigurement/MS
+disfranchise/L
+disfranchisement/MS
+disgorge
+disgrace/R
+disgracer/M
+disgruntle/DSLG
+disgruntlement/MS
+disguised/UY
+disguise/R
+disguiser/M
+disgust
+disgusted/Y
+disgustful/Y
+disgusting/Y
+dishabille/SM
+disharmonious
+dishcloth/M
+dishcloths
+dishevel/LDGS
+dishevelment/MS
+dish/GD
+dishonest
+dishonored/U
+dishpan/MS
+dishrag/SM
+dishtowel/SM
+dishwasher/MS
+dishwater/SM
+disillusion/LGD
+disillusionment/SM
+disinfectant/MS
+disinherit
+disinterestedness/SM
+disinterested/P
+disinvest/L
+disjoin
+disjointedness/S
+disjunctive/YS
+disjunct/VS
+disk/D
+diskette/S
+dislike/G
+dislodge/LG
+dislodgement/M
+dismalness/M
+dismal/PSTRY
+dismantle/L
+dismantlement/SM
+dismay/D
+dismayed/U
+dismaying/Y
+dis/MB
+dismember/LG
+dismemberment/MS
+dismissive/Y
+dismiss/RZ
+Disneyland/M
+Disney/M
+disoblige/G
+disorderedness/M
+disordered/YP
+disorderliness/M
+disorderly/P
+disorder/Y
+disorganize
+disorganized/U
+disparagement/MS
+disparager/M
+disparage/RSDLG
+disparaging/Y
+disparateness/M
+disparate/PSY
+dispatch/Z
+dispelled
+dispelling
+dispel/S
+dispensable/I
+dispensary/MS
+dispensate/NX
+dispensation/M
+dispenser/M
+dispense/ZGDRSB
+dispersal/MS
+dispersant/M
+dispersed/Y
+disperser/M
+disperse/XDRSZLNGV
+dispersible
+dispersion/M
+dispersiveness/M
+dispersive/PY
+dispirit/DSG
+displace/L
+display/AGDS
+displayed/U
+displeased/Y
+displease/G
+displeasure
+disport
+disposable/S
+disposal/SM
+dispose/IGSD
+dispositional
+disposition/ISM
+disproportional
+disproportionate/N
+disproportionation/M
+disprove/B
+disputable/I
+disputably/I
+disputant/SM
+disputation/SM
+disputatious/Y
+disputed/U
+disputer/M
+dispute/ZBGSRD
+disquieting/Y
+disquiet/M
+disquisition/SM
+Disraeli/M
+disregardful
+disrepair/M
+disreputableness/M
+disreputable/P
+disrepute/M
+disrespect
+disrupted/U
+disrupter/M
+disrupt/GVDRS
+disruption/MS
+disruptive/YP
+disruptor/M
+dissatisfy
+dissect/DG
+dissed
+dissembler/M
+dissemble/ZGRSD
+disseminate/XGNSD
+dissemination/M
+dissension/SM
+dissenter/M
+dissent/ZGSDR
+dissertation/SM
+disservice
+disses
+dissever
+dissidence/SM
+dissident/MS
+dissimilar/S
+dissing
+dissipatedly
+dissipatedness/M
+dissipated/U
+dissipater/M
+dissipate/XRSDVNG
+dissipation/M
+dissociable/I
+dissociate/DSXNGV
+dissociated/U
+dissociation/M
+dissociative/Y
+dissoluble/I
+dissoluteness/SM
+dissolute/PY
+dissolve/ASDG
+dissolved/U
+dissonance/SM
+dissonant/Y
+dissuade/GDRS
+dissuader/M
+dissuasive
+dist
+distaff/SM
+distal/Y
+distance/DSMG
+distantness/M
+distant/YP
+distaste
+distemper
+distend
+distension
+distention/SM
+distillate/XNMS
+distillation/M
+distillery/MS
+distincter
+distinctest
+distinction/MS
+distinctiveness/MS
+distinctive/YP
+distinct/IYVP
+distinctness/MSI
+distinguishable/I
+distinguishably/I
+distinguish/BDRSG
+distinguished/U
+distinguisher/M
+distort/BGDR
+distorted/U
+distorter/M
+distortion/MS
+distract/DG
+distractedness/M
+distracted/YP
+distracting/Y
+distrait
+distraught/Y
+distress
+distressful
+distressing/Y
+distribute/ADXSVNGB
+distributed/U
+distributer
+distributional
+distribution/AM
+distributiveness/M
+distributive/SPY
+distributivity
+distributorship/M
+distributor/SM
+district/GSAD
+district's
+distrust/G
+disturbance/SM
+disturbed/U
+disturber/M
+disturbing/Y
+disturb/ZGDRS
+disulfide/M
+disuse/M
+disyllable/M
+Dita/M
+ditcher/M
+ditch/MRSDG
+dither/RDZSG
+ditsy/TR
+ditto/DMGS
+ditty/SDGM
+Ditzel/M
+ditz/S
+diuresis/M
+diuretic/S
+diurnal/SY
+divalent/S
+diva/MS
+divan/SM
+dived/M
+divergence/SM
+divergent/Y
+diverge/SDG
+diver/M
+diverseness/MS
+diverse/XYNP
+diversification/M
+diversifier/M
+diversify/GSRDNX
+diversionary
+diversion/M
+diversity/SM
+divert/GSD
+diverticulitis/SM
+divertimento/M
+dive/S
+divestiture/MS
+divest/LDGS
+divestment/S
+dividable
+divide/AGDS
+divided/U
+dividend/MS
+divider/MS
+divination/SM
+diviner/M
+divine/RSDTZYG
+divinity/MS
+divisibility/IMS
+divisible/I
+divisional
+division/SM
+divisiveness/MS
+divisive/PY
+divisor/SM
+divorcée/MS
+divorce/GSDLM
+divorcement/MS
+divot/MS
+div/TZGJDRS
+divulge/GSD
+divvy/GSDM
+Dixiecrat/MS
+dixieland
+Dixieland/MS
+Dixie/M
+Dix/M
+Dixon/M
+dizzily
+dizziness/SM
+dizzying/Y
+dizzy/PGRSDT
+DJ
+Djakarta's
+djellabah's
+djellaba/S
+d/JGVX
+Djibouti/M
+DMD
+Dmitri/M
+DMZ
+DNA
+Dnepropetrovsk/M
+Dnepr's
+Dnieper's
+Dniester/M
+Dniren/M
+DOA
+doable
+DOB
+Dobbin/M
+dobbin/MS
+Doberman
+Dobro/M
+docent/SM
+docile/Y
+docility/MS
+docker/M
+docket/GSMD
+dock/GZSRDM
+dockland/MS
+dockside/M
+dockworker/S
+dockyard/SM
+doc/MS
+Doctor
+doctoral
+doctorate/SM
+doctor/GSDM
+Doctorow/M
+doctrinaire/S
+doctrinal/Y
+doctrine/SM
+docudrama/S
+documentary/MS
+documentation/MS
+documented/U
+document/RDMZGS
+DOD
+dodder/DGS
+dodecahedra
+dodecahedral
+dodecahedron/M
+Dode/M
+dodge/GZSRD
+Dodge/M
+dodgem/S
+dodger/M
+Dodgson/M
+Dodie/M
+Dodi/M
+Dodington/M
+Dodoma/M
+dodo/SM
+Dodson/M
+Dody/M
+DOE
+Doe/M
+doe/MS
+doer/MU
+does/AU
+doeskin/MS
+doesn't
+d'oeuvre
+doff/SGD
+dogcart/SM
+dogcatcher/MS
+dogeared
+Doge/M
+doge/SM
+dogfight/GMS
+dogfish/SM
+dogfought
+doggedness/SM
+dogged/PY
+doggerel/SM
+dogging
+doggone/RSDTG
+doggy/SRMT
+doghouse/SM
+dogie/SM
+doglegged
+doglegging
+dogleg/SM
+dogma/MS
+dogmatically/U
+dogmatic/S
+dogmatics/M
+dogmatism/SM
+dogmatist/SM
+dogsbody/M
+dog/SM
+dogtooth/M
+Dogtown/M
+dogtrot/MS
+dogtrotted
+dogtrotting
+dogwood/SM
+dogy's
+Doha/M
+doh's
+doily/SM
+doing/MU
+Dolby/SM
+doldrum/S
+doldrums/M
+doled/F
+dolefuller
+dolefullest
+dolefulness/MS
+doleful/PY
+Dole/M
+dole/MGDS
+doles/F
+Dolf/M
+doling/F
+dollar/SM
+Dolley/M
+Dollie/M
+Dolli/M
+Doll/M
+doll/MDGS
+dollop/GSMD
+Dolly/M
+dolly/SDMG
+dolmen/MS
+dolomite/SM
+dolomitic
+Dolores/M
+Dolorita/SM
+dolorous/Y
+dolor/SM
+dolphin/SM
+Dolph/M
+doltishness/SM
+doltish/YP
+dolt/MS
+domain/MS
+dome/DSMG
+Domenic/M
+Domenico/M
+Domeniga/M
+Domesday/M
+domestically
+domesticate/DSXGN
+domesticated/U
+domestication/M
+domesticity/MS
+domestic/S
+domicile/SDMG
+domiciliary
+dominance/MS
+dominant/YS
+dominate/VNGXSD
+domination/M
+dominator/M
+dominatrices
+dominatrix
+domineer/DSG
+domineeringness/M
+domineering/YP
+Dominga/M
+Domingo/M
+Dominguez/M
+Dominica/M
+Dominican/MS
+Dominick/M
+Dominic/M
+Dominik/M
+Domini/M
+dominion/MS
+Dominique/M
+dominoes
+domino/M
+Domitian/M
+Dom/M
+Donahue/M
+Donald/M
+Donaldson/M
+Donall/M
+Donal/M
+Donalt/M
+Dona/M
+dona/MS
+Donatello/M
+donate/XVGNSD
+donation/M
+donative/M
+Donaugh/M
+Donavon/M
+done/AUF
+Donella/M
+Donelle/M
+Donetsk/M
+Donetta/M
+dong/GDMS
+dongle/S
+Donia/M
+Donica/M
+Donielle/M
+Donizetti/M
+donkey/MS
+Donna/M
+Donnamarie/M
+donned
+Donnell/M
+Donnelly/M
+Donne/M
+Donner/M
+Donnie/M
+Donni/M
+donning
+donnishness/M
+donnish/YP
+Donn/RM
+donnybrook/MS
+Donny/M
+donor/MS
+Donovan/M
+don/S
+Don/SM
+don't
+donut/MS
+donutted
+donutting
+doodad/MS
+doodlebug/MS
+doodler/M
+doodle/SRDZG
+doohickey/MS
+Dooley/M
+Doolittle/M
+doom/MDGS
+doomsday/SM
+Doonesbury/M
+doorbell/SM
+door/GDMS
+doorhandles
+doorkeeper/M
+doorkeep/RZ
+doorknob/SM
+doorman/M
+doormat/SM
+doormen
+doornail/M
+doorplate/SM
+doors/I
+doorstep/MS
+doorstepped
+doorstepping
+doorstop/MS
+doorway/MS
+dooryard/SM
+dopamine
+dopant/M
+dopa/SM
+dope/DRSMZG
+doper/M
+dopey
+dopier
+dopiest
+dopiness/S
+Doppler/M
+Dorado/M
+Doralia/M
+Doralin/M
+Doralyn/M
+Doralynne/M
+Doralynn/M
+Dora/M
+Dorcas
+Dorchester/M
+Doreen/M
+Dorelia/M
+Dorella/M
+Dorelle/M
+Doré/M
+Dorena/M
+Dorene/M
+Doretta/M
+Dorette/M
+Dorey/M
+Doria/M
+Dorian/M
+Doric
+Dorice/M
+Dorie/M
+Dori/MS
+Dorine/M
+Dorisa/M
+Dorise/M
+Dorita/M
+dork/S
+dorky/RT
+dormancy/MS
+dormant/S
+dormer/M
+dormice
+dormitory/SM
+dorm/MRZS
+dormouse/M
+Dorolice/M
+Dorolisa/M
+Doro/M
+Dorotea/M
+Doroteya/M
+Dorothea/M
+Dorothee/M
+Dorothy/M
+Dorree/M
+Dorrie/M
+Dorri/SM
+Dorry/M
+dorsal/YS
+Dorsey/M
+Dorthea/M
+Dorthy/M
+Dortmund/M
+Dory/M
+dory/SM
+DOS
+dosage/SM
+dose/M
+dos/GDS
+Dosi/M
+dosimeter/MS
+dosimetry/M
+dossier/MS
+dost
+Dostoevsky/M
+DOT
+dotage/SM
+dotard/MS
+doter/M
+dote/S
+Doti/M
+doting/Y
+Dot/M
+dot/MDRSJZG
+Dotson/M
+dotted
+Dottie/M
+Dotti/M
+dottiness/M
+dotting
+Dotty/M
+dotty/PRT
+do/TZRHGJ
+Douala/M
+Douay/M
+Doubleday/M
+doubled/UA
+double/GPSRDZ
+doubleheader/MS
+doubleness/M
+doubler/M
+doubles/M
+doublespeak/S
+doublethink/M
+doublet/MS
+doubleton/M
+doubling/A
+doubloon/MS
+doubly
+doubt/AGSDMB
+doubted/U
+doubter/SM
+doubtfulness/SM
+doubtful/YP
+doubting/Y
+doubtlessness/M
+doubtless/YP
+douche/GSDM
+Dougherty/M
+dough/M
+doughs
+doughty/RT
+doughy/RT
+Dougie/M
+Douglas/M
+Douglass
+Doug/M
+Dougy/M
+dourness/MS
+Douro/M
+dour/TYRP
+douser/M
+douse/SRDG
+dovecote/MS
+Dover/M
+dove/RSM
+dovetail/GSDM
+dovish
+Dov/MR
+dowager/SM
+dowdily
+dowdiness/MS
+dowdy/TPSR
+dowel/GMDS
+dower/GDMS
+Dow/M
+downbeat/SM
+downcast/S
+downdraft/M
+downer/M
+Downey/M
+downfall/NMS
+downgrade/GSD
+down/GZSRD
+downheartedness/MS
+downhearted/PY
+downhill/RS
+downland
+download/DGS
+downpipes
+downplay/GDS
+downpour/MS
+downrange
+downrightness/M
+downright/YP
+downriver
+Downs
+downscale/GSD
+downside/S
+downsize/DSG
+downslope
+downspout/SM
+downstage/S
+downstairs
+downstate/SR
+downstream
+downswing/MS
+downtime/SM
+downtowner/M
+downtown/MRS
+downtrend/M
+downtrodden
+downturn/MS
+downwardness/M
+downward/YPS
+downwind
+downy/RT
+dowry/SM
+dowse/GZSRD
+dowser/M
+doxology/MS
+doyenne/SM
+doyen/SM
+Doyle/M
+Doy/M
+doze
+dozen/GHD
+dozenths
+dozer/M
+doz/XGNDRS
+dozy
+DP
+DPs
+dpt
+DPT
+drabbed
+drabber
+drabbest
+drabbing
+drabness/MS
+drab/YSP
+drachma/MS
+Draco/M
+draconian
+Draconian
+Dracula/M
+draft/AMDGS
+draftee/SM
+drafter/MS
+draftily
+draftiness/SM
+drafting/S
+draftsman/M
+draftsmanship/SM
+draftsmen
+draftsperson
+draftswoman
+draftswomen
+drafty/PTR
+dragged
+dragger/M
+dragging/Y
+draggy/RT
+drag/MS
+dragnet/MS
+dragonfly/SM
+dragonhead/M
+dragon/SM
+dragoon/DMGS
+drainage/MS
+drainboard/SM
+drained/U
+drainer/M
+drainpipe/MS
+drain/SZGRDM
+Drake/M
+drake/SM
+Dramamine/MS
+drama/SM
+dramatically/U
+dramatical/Y
+dramatic/S
+dramatics/M
+dramatist/MS
+dramatization/MS
+dramatized/U
+dramatizer/M
+dramatize/SRDZG
+dramaturgy/M
+Drambuie/M
+drammed
+dramming
+dram/MS
+drank
+Drano/M
+draper/M
+drapery/MS
+drape/SRDGZ
+drastic
+drastically
+drat/S
+dratted
+dratting
+Dravidian/M
+drawable
+draw/ASG
+drawback/MS
+drawbridge/SM
+drawer/SM
+drawing/SM
+drawler/M
+drawling/Y
+drawl/RDSG
+drawly
+drawn/AI
+drawnly
+drawnness
+drawstring/MS
+dray/SMDG
+dreadfulness/SM
+dreadful/YPS
+dreadlocks
+dreadnought/SM
+dread/SRDG
+dreamboat/SM
+dreamed/U
+dreamer/M
+dreamily
+dreaminess/SM
+dreaming/Y
+dreamland/SM
+dreamlessness/M
+dreamless/PY
+dreamlike
+dream/SMRDZG
+dreamworld/S
+dreamy/PTR
+drearily
+dreariness/SM
+drear/S
+dreary/TRSP
+Dreddy/M
+dredge/MZGSRD
+dredger/M
+Dredi/M
+dreg/MS
+Dreiser/M
+Dre/M
+drencher/M
+drench/GDRS
+Dresden/M
+dress/ADRSG
+dressage/MS
+dressed/U
+dresser/MS
+dresser's/A
+dresses/U
+dressiness/SM
+dressing/MS
+dressmaker/MS
+dressmaking/SM
+dressy/PTR
+drew/A
+Drew/M
+Drexel/M
+Dreyfus/M
+Dreyfuss
+dribble/DRSGZ
+dribbler/M
+driblet/SM
+drib/SM
+dried/U
+drier/M
+drifter/M
+drifting/Y
+drift/RDZSG
+driftwood/SM
+driller/M
+drilling/M
+drillmaster/SM
+drill/MRDZGS
+drinkable/S
+drink/BRSZG
+drinker/M
+dripped
+dripping/MS
+drippy/RT
+drip/SM
+driveler/M
+drivel/GZDRS
+driven/P
+driver/M
+drive/SRBGZJ
+driveway/MS
+drizzle/DSGM
+drizzling/Y
+drizzly/TR
+Dr/M
+drogue/MS
+drollery/SM
+drollness/MS
+droll/RDSPTG
+drolly
+dromedary/MS
+Drona/M
+drone/SRDGM
+droning/Y
+drool/GSRD
+droopiness/MS
+drooping/Y
+droop/SGD
+droopy/PRT
+drophead
+dropkick/S
+droplet/SM
+dropout/MS
+dropped
+dropper/SM
+dropping/MS
+dropsical
+drop/SM
+dropsy/MS
+drosophila/M
+dross/SM
+drought/SM
+drover/M
+drove/SRDGZ
+drowner/M
+drown/RDSJG
+drowse/SDG
+drowsily
+drowsiness/SM
+drowsy/PTR
+drubbed
+drubber/MS
+drubbing/SM
+drub/S
+Drucie/M
+Drucill/M
+Druci/M
+Drucy/M
+drudge/MGSRD
+drudger/M
+drudgery/SM
+drudging/Y
+Drud/M
+drugged
+druggie/SRT
+drugging
+druggist/SM
+Drugi/M
+drugless
+drug/SM
+drugstore/SM
+druidism/MS
+druid/MS
+Druid's
+Dru/M
+drumbeat/SGM
+drumhead/M
+drumlin/MS
+drummed
+drummer/SM
+drumming
+Drummond/M
+drum/SM
+drumstick/SM
+drunkard/SM
+drunkenness/SM
+drunken/YP
+drunk/SRNYMT
+drupe/SM
+Drury/M
+Drusie/M
+Drusilla/M
+Drusi/M
+Drusy/M
+druthers
+dryad/MS
+Dryden/M
+dryer/MS
+dry/GYDRSTZ
+dryish
+dryness/SM
+drys
+drystone
+drywall/GSD
+D's
+d's/A
+Dshubba/M
+DST
+DTP
+dualism/MS
+dualistic
+dualist/M
+duality/MS
+dual/YS
+Duane/M
+Dubai/M
+dubbed
+dubber/S
+dubbing/M
+dubbin/MS
+Dubcek/M
+Dubhe/M
+dubiety/MS
+dubiousness/SM
+dubious/YP
+Dublin/M
+Dubrovnik/M
+dub/S
+Dubuque/M
+ducal
+ducat/SM
+duce/CAIKF
+duce's
+Duchamp/M
+duchess/MS
+duchy/SM
+duckbill/SM
+ducker/M
+duck/GSRDM
+duckling/SM
+duckpins
+duckpond
+duckweed/MS
+ducky/RSMT
+ducted/CFI
+ductile/I
+ductility/SM
+ducting/F
+duct/KMSF
+ductless
+duct's/A
+ducts/CI
+ductwork/M
+dudder
+dude/MS
+dudgeon/SM
+dud/GMDS
+Dudley/M
+Dud/M
+duelist/MS
+duel/MRDGZSJ
+dueness/M
+duenna/MS
+due/PMS
+duet/MS
+duetted
+duetting
+duffel/M
+duffer/M
+duff/GZSRDM
+Duffie/M
+Duff/M
+Duffy/M
+Dugald/M
+dugout/SM
+dug/S
+duh
+DUI
+Duisburg/M
+dukedom/SM
+duke/DSMG
+Duke/M
+Dukey/M
+Dukie/M
+Duky/M
+Dulcea/M
+Dulce/M
+dulcet/SY
+Dulcia/M
+Dulciana/M
+Dulcie/M
+dulcify
+Dulci/M
+dulcimer/MS
+Dulcinea/M
+Dulcine/M
+Dulcy/M
+dullard/MS
+Dulles/M
+dullness/MS
+dull/SRDPGT
+dully
+dulness's
+Dulsea/M
+Duluth/M
+duly/U
+Du/M
+Dumas
+dumbbell/MS
+dumbfound/GSDR
+dumbness/MS
+Dumbo/M
+dumb/PSGTYRD
+dumbstruck
+dumbwaiter/SM
+dumdum/MS
+dummy/SDMG
+Dumont/M
+dumper/UM
+dumpiness/MS
+dumpling/MS
+dump/SGZRD
+dumpster/S
+Dumpster/S
+Dumpty/M
+dumpy/PRST
+Dunant/M
+Dunbar/M
+Duncan/M
+dunce/MS
+Dunc/M
+Dundee/M
+dunderhead/MS
+Dunedin/M
+dune/SM
+dungaree/SM
+dungeon/GSMD
+dunghill/MS
+dung/SGDM
+Dunham/M
+dunker/M
+dunk/GSRD
+Dunkirk/M
+Dunlap/M
+Dun/M
+dunned
+Dunne/M
+dunner
+dunnest
+dunning
+Dunn/M
+dunno/M
+dun/S
+Dunstan/M
+duodecimal/S
+duodena
+duodenal
+duodenum/M
+duologue/M
+duo/MS
+duopolist
+duopoly/M
+dupe/NGDRSMZ
+duper/M
+dupion/M
+duple
+duplexer/M
+duplex/MSRDG
+duplicability/M
+duplicable
+duplicate/ADSGNX
+duplication/AM
+duplicative
+duplicator/MS
+duplicitous
+duplicity/SM
+Dupont/MS
+DuPont/MS
+durability/MS
+durableness/M
+durable/PS
+durably
+Duracell/M
+durance/SM
+Durand/M
+Duran/M
+Durante/M
+Durant/M
+durational
+duration/MS
+Durban/M
+Dürer/M
+duress/SM
+Durex/M
+Durham/MS
+during
+Durkee/M
+Durkheim/M
+Dur/M
+Durocher/M
+durst
+durum/MS
+Durward/M
+Duse/M
+Dusenberg/M
+Dusenbury/M
+Dushanbe/M
+dusk/GDMS
+duskiness/MS
+dusky/RPT
+Düsseldorf
+dustbin/MS
+dustcart/M
+dustcover
+duster/M
+dustily
+dustiness/MS
+dusting/M
+Dustin/M
+dustless
+dustman/M
+dustmen
+dust/MRDGZS
+dustpan/SM
+Dusty/M
+dusty/RPT
+Dutch/M
+Dutchman/M
+Dutchmen
+dutch/MS
+Dutchwoman
+Dutchwomen
+duteous/Y
+dutiable
+dutifulness/S
+dutiful/UPY
+duty/SM
+Duvalier/M
+duvet/SM
+duxes
+Dvina/M
+Dvorák/M
+Dwain/M
+dwarfish
+dwarfism/MS
+dwarf/MTGSPRD
+Dwayne/M
+dweeb/S
+dweller/SM
+dwell/IGS
+dwelling/MS
+dwelt/I
+DWI
+Dwight/M
+dwindle/GSD
+dyadic
+dyad/MS
+Dyana/M
+Dyane/M
+Dyan/M
+Dyanna/M
+Dyanne/M
+Dyann/M
+dybbukim
+dybbuk/SM
+dyed/A
+dyeing/M
+dye/JDRSMZG
+dyer/M
+Dyer/M
+dyes/A
+dyestuff/SM
+dying/UA
+Dyke/M
+dyke's
+Dylan/M
+Dy/M
+Dynah/M
+Dyna/M
+dynamical/Y
+dynamic/S
+dynamics/M
+dynamism/SM
+dynamiter/M
+dynamite/RSDZMG
+dynamized
+dynamo/MS
+dynastic
+dynasty/MS
+dyne/M
+dysentery/SM
+dysfunctional
+dysfunction/MS
+dyslectic/S
+dyslexia/MS
+dyslexically
+dyslexic/S
+dyspepsia/MS
+dyspeptic/S
+dysprosium/MS
+dystopia/M
+dystrophy/M
+dz
+Dzerzhinsky/M
+E
+ea
+each
+Eachelle/M
+Eada/M
+Eadie/M
+Eadith/M
+Eadmund/M
+eagerness/MS
+eager/TSPRYM
+eagle/SDGM
+eaglet/SM
+Eakins/M
+Ealasaid/M
+Eal/M
+Eamon/M
+earache/SM
+eardrum/SM
+earful/MS
+ear/GSMDYH
+Earhart/M
+earing/M
+earldom/MS
+Earle/M
+Earlene/M
+Earlie/M
+Earline/M
+earliness/SM
+Earl/M
+earl/MS
+earlobe/S
+Early/M
+early/PRST
+earmark/DGSJ
+earmuff/SM
+earned/U
+earner/M
+Earnestine/M
+Earnest/M
+earnestness/MS
+earnest/PYS
+earn/GRDZTSJ
+earning/M
+earphone/MS
+earpieces
+earplug/MS
+Earp/M
+earring/MS
+earshot/MS
+earsplitting
+Eartha/M
+earthbound
+earthed/U
+earthenware/MS
+earthiness/SM
+earthliness/M
+earthling/MS
+earthly/TPR
+earth/MDNYG
+earthmen
+earthmover/M
+earthmoving
+earthquake/SDGM
+earthshaking
+earths/U
+earthward/S
+earthwork/MS
+earthworm/MS
+earthy/PTR
+Earvin/M
+earwax/MS
+earwigged
+earwigging
+earwig/MS
+eased/E
+ease/LDRSMG
+easel/MS
+easement/MS
+easer/M
+ease's/EU
+eases/UE
+easies
+easily/U
+easiness/MSU
+easing/M
+eastbound
+easterly/S
+Easter/M
+easterner/M
+Easterner/M
+easternmost
+Eastern/RZ
+eastern/ZR
+easter/Y
+east/GSMR
+Easthampton/M
+easting/M
+Eastland/M
+Eastman/M
+eastward/S
+Eastwick/M
+Eastwood/M
+East/ZSMR
+easygoingness/M
+easygoing/P
+easy/PUTR
+eatables
+eatable/U
+eaten/U
+eater/M
+eatery/MS
+eating/M
+Eaton/M
+eat/SJZGNRB
+eavesdropped
+eavesdropper/MS
+eavesdropping
+eavesdrop/S
+eave/SM
+Eba/M
+Ebba/M
+ebb/DSG
+EBCDIC
+Ebeneezer/M
+Ebeneser/M
+Ebenezer/M
+Eben/M
+Eberhard/M
+Eberto/M
+Eb/MN
+Ebola
+Ebonee/M
+Ebonics
+Ebony/M
+ebony/SM
+Ebro/M
+ebullience/SM
+ebullient/Y
+ebullition/SM
+EC
+eccentrically
+eccentricity/SM
+eccentric/MS
+eccl
+Eccles
+Ecclesiastes/M
+ecclesiastical/Y
+ecclesiastic/MS
+ECG
+echelon/SGDM
+echinoderm/SM
+echo/DMG
+echoed/A
+echoes/A
+echoic
+echolocation/SM
+éclair/MS
+éclat/MS
+eclectically
+eclecticism/MS
+eclectic/S
+eclipse/MGSD
+ecliptic/MS
+eclogue/MS
+ecocide/SM
+ecol
+Ecole/M
+ecologic
+ecological/Y
+ecologist/MS
+ecology/MS
+Eco/M
+econ
+Econometrica/M
+econometricians
+econometric/S
+econometrics/M
+economical/YU
+economic/S
+economics/M
+economist/MS
+economization
+economize/GZSRD
+economizer/M
+economizing/U
+economy/MS
+ecosystem/MS
+ecru/SM
+ecstasy/MS
+Ecstasy/S
+ecstatically
+ecstatic/S
+ectoplasm/M
+Ecuadoran/S
+Ecuadorean/S
+Ecuadorian/S
+Ecuador/M
+ecumenical/Y
+ecumenicism/SM
+ecumenicist/MS
+ecumenic/MS
+ecumenics/M
+ecumenism/SM
+ecumenist/MS
+eczema/MS
+Eda/M
+Edam/SM
+Edan/M
+ed/ASC
+Edda/M
+Eddie/M
+Eddi/M
+Edd/M
+Eddy/M
+eddy/SDMG
+Edee/M
+Edeline/M
+edelweiss/MS
+Ede/M
+edema/SM
+edematous
+eden
+Eden/M
+Edgard/M
+Edgardo/M
+Edgar/M
+edge/DRSMZGJ
+edgeless
+edger/M
+Edgerton/M
+Edgewater/M
+edgewise
+Edgewood/M
+edgily
+edginess/MS
+edging/M
+edgy/TRP
+edibility/MS
+edibleness/SM
+edible/SP
+edict/SM
+Edie/M
+edification/M
+edifice/SM
+edifier/M
+edifying/U
+edify/ZNXGRSD
+Edik/M
+Edi/MH
+Edinburgh/M
+Edin/M
+Edison/M
+editable
+Edita/M
+edited/IU
+Editha/M
+Edithe/M
+Edith/M
+edition/SM
+editorialist/M
+editorialize/DRSG
+editorializer/M
+editorial/YS
+editor/MS
+editorship/MS
+edit/SADG
+Ediva/M
+Edlin/M
+Edmond/M
+Edmon/M
+Edmonton/M
+Edmund/M
+Edna/M
+Edouard/M
+EDP
+eds
+Edsel/M
+Edsger/M
+EDT
+Eduard/M
+Eduardo/M
+educability/SM
+educable/S
+educated/YP
+educate/XASDGN
+educationalists
+educational/Y
+education/AM
+educationists
+educative
+educator/MS
+educ/DBG
+educe/S
+eduction/M
+Eduino/M
+edutainment/S
+Edvard/M
+Edwardian
+Edwardo/M
+Edward/SM
+Edwina/M
+Edwin/M
+Ed/XMN
+Edy/M
+Edythe/M
+Edyth/M
+EEC
+EEG
+eek/S
+eelgrass/M
+eel/MS
+e'en
+EEO
+EEOC
+e'er
+eerie/RT
+eerily
+eeriness/MS
+Eeyore/M
+effaceable/I
+effacement/MS
+effacer/M
+efface/SRDLG
+effectiveness/ISM
+effectives
+effective/YIP
+effector/MS
+effect/SMDGV
+effectual/IYP
+effectualness/MI
+effectuate/SDGN
+effectuation/M
+effeminacy/MS
+effeminate/SY
+effendi/MS
+efferent/SY
+effervesce/GSD
+effervescence/SM
+effervescent/Y
+effeteness/SM
+effete/YP
+efficacious/IPY
+efficaciousness/MI
+efficacy/IMS
+efficiency/MIS
+efficient/ISY
+Effie/M
+effigy/SM
+effloresce
+efflorescence/SM
+efflorescent
+effluence/SM
+effluent/MS
+effluvia
+effluvium/M
+effluxion
+efflux/M
+effortlessness/SM
+effortless/PY
+effort/MS
+effrontery/MS
+effulgence/SM
+effulgent
+effuse/XSDVGN
+effusion/M
+effusiveness/MS
+effusive/YP
+EFL
+e/FMDS
+Efrain/M
+Efrem/M
+Efren/M
+EFT
+egad
+egalitarian/I
+egalitarianism/MS
+egalitarians
+EGA/M
+Egan/M
+Egbert/M
+Egerton/M
+eggbeater/SM
+eggcup/MS
+egger/M
+egg/GMDRS
+eggheaded/P
+egghead/SDM
+eggnog/SM
+eggplant/MS
+eggshell/SM
+egis's
+eglantine/MS
+egocentrically
+egocentricity/SM
+egocentric/S
+egoism/SM
+egoistic
+egoistical/Y
+egoist/SM
+egomaniac/MS
+egomania/MS
+Egon/M
+Egor/M
+ego/SM
+egotism/SM
+egotistic
+egotistical/Y
+egotist/MS
+egregiousness/MS
+egregious/PY
+egress/SDMG
+egret/SM
+Egyptian/S
+Egypt/M
+Egyptology/M
+eh
+Ehrlich/M
+Eichmann/M
+eiderdown/SM
+eider/SM
+eidetic
+Eiffel/M
+eigenfunction/MS
+eigenstate/S
+eigenvalue/SM
+eigenvector/MS
+eighteen/MHS
+eighteenths
+eightfold
+eighth/MS
+eighths
+eightieths
+eightpence
+eight/SM
+eighty/SHM
+Eileen/M
+Eilis/M
+Eimile/M
+Einsteinian
+einsteinium/MS
+Einstein/SM
+Eire/M
+Eirena/M
+Eisenhower/M
+Eisenstein/M
+Eisner/M
+eisteddfod/M
+either
+ejaculate/SDXNG
+ejaculation/M
+ejaculatory
+ejecta
+ejection/SM
+ejector/SM
+eject/VGSD
+Ekaterina/M
+Ekberg/M
+eked/A
+eke/DSG
+EKG
+Ekstrom/M
+Ektachrome/M
+elaborateness/SM
+elaborate/SDYPVNGX
+elaboration/M
+elaborators
+Elaina/M
+Elaine/M
+Elana/M
+eland/SM
+Elane/M
+élan/M
+Elanor/M
+elans
+elapse/SDG
+el/AS
+elastically/I
+elasticated
+elasticity/SM
+elasticize/GDS
+elastic/S
+elastodynamics
+elastomer/M
+elatedness/M
+elated/PY
+elater/M
+elate/SRDXGN
+elation/M
+Elayne/M
+Elba/MS
+Elbe/M
+Elberta/M
+Elbertina/M
+Elbertine/M
+Elbert/M
+elbow/GDMS
+elbowroom/SM
+Elbrus/M
+Elden/M
+elderberry/MS
+elderflower
+elderliness/M
+elderly/PS
+elder/SY
+eldest
+Eldin/M
+Eldon/M
+Eldorado's
+Eldredge/M
+Eldridge/M
+Eleanora/M
+Eleanore/M
+Eleanor/M
+Eleazar/M
+electable/U
+elect/ASGD
+elected/U
+electioneer/GSD
+election/SAM
+electiveness/M
+elective/SPY
+electoral/Y
+electorate/SM
+elector/SM
+Electra/M
+electress/M
+electricalness/M
+electrical/PY
+electrician/SM
+electricity/SM
+electric/S
+electrification/M
+electrifier/M
+electrify/ZXGNDRS
+electrocardiogram/MS
+electrocardiograph/M
+electrocardiographs
+electrocardiography/MS
+electrochemical/Y
+electrocute/GNXSD
+electrocution/M
+electrode/SM
+electrodynamics/M
+electrodynamic/YS
+electroencephalogram/SM
+electroencephalographic
+electroencephalograph/M
+electroencephalographs
+electroencephalography/MS
+electrologist/MS
+electroluminescent
+electrolysis/M
+electrolyte/SM
+electrolytic
+electrolytically
+electrolyze/SDG
+electro/M
+electromagnetic
+electromagnetically
+electromagnetism/SM
+electromagnet/SM
+electromechanical
+electromechanics
+electromotive
+electromyograph
+electromyographic
+electromyographically
+electromyography/M
+electronegative
+electronically
+electronic/S
+electronics/M
+electron/MS
+electrophoresis/M
+electrophorus/M
+electroplate/DSG
+electroscope/MS
+electroscopic
+electroshock/GDMS
+electrostatic/S
+electrostatics/M
+electrotherapist/M
+electrotype/GSDZM
+electroweak
+eleemosynary
+Eleen/M
+elegance/ISM
+elegant/YI
+elegiacal
+elegiac/S
+elegy/SM
+elem
+elemental/YS
+elementarily
+elementariness/M
+elementary/P
+element/MS
+Elena/M
+Elene/M
+Eleni/M
+Elenore/M
+Eleonora/M
+Eleonore/M
+elephantiases
+elephantiasis/M
+elephantine
+elephant/SM
+elevated/S
+elevate/XDSNG
+elevation/M
+elevator/SM
+eleven/HM
+elevens/S
+elevenths
+elev/NX
+Elfie/M
+elfin/S
+elfish
+elf/M
+Elfreda/M
+Elfrida/M
+Elfrieda/M
+Elga/M
+Elgar/M
+Elianora/M
+Elianore/M
+Elia/SM
+Elicia/M
+elicitation/MS
+elicit/GSD
+elide/GSD
+Elie/M
+eligibility/ISM
+eligible/SI
+Elihu/M
+Elijah/M
+Eli/M
+eliminate/XSDYVGN
+elimination/M
+eliminator/SM
+Elinore/M
+Elinor/M
+Eliot/M
+Elisabeth/M
+Elisabet/M
+Elisabetta/M
+Elisa/M
+Elise/M
+Eliseo/M
+Elisha/M
+elision/SM
+Elissa/M
+Elita/M
+elite/MPS
+elitism/SM
+elitist/SM
+elixir/MS
+Elizabethan/S
+Elizabeth/M
+Elizabet/M
+Eliza/M
+Elka/M
+Elke/M
+Elkhart/M
+elk/MS
+Elladine/M
+Ella/M
+Ellary/M
+Elle/M
+Ellene/M
+Ellen/M
+Ellerey/M
+Ellery/M
+Ellesmere/M
+Ellette/M
+Ellie/M
+Ellington/M
+Elliot/M
+Elliott/M
+ellipse/MS
+ellipsis/M
+ellipsoidal
+ellipsoid/MS
+ellipsometer/MS
+ellipsometry
+elliptic
+elliptical/YS
+ellipticity/M
+Elli/SM
+Ellison/M
+Ellissa/M
+ell/MS
+Ellswerth/M
+Ellsworth/M
+Ellwood/M
+Elly/M
+Ellyn/M
+Ellynn/M
+Elma/M
+Elmer/M
+Elmhurst/M
+Elmira/M
+elm/MRS
+Elmo/M
+Elmore/M
+Elmsford/M
+El/MY
+Elna/MH
+Elnar/M
+Elnath/M
+Elnora/M
+Elnore/M
+elocutionary
+elocutionist/MS
+elocution/SM
+elodea/S
+Elohim/M
+Eloisa/M
+Eloise/M
+elongate/NGXSD
+elongation/M
+Elonore/M
+elopement/MS
+eloper/M
+elope/SRDLG
+eloquence/SM
+eloquent/IY
+Elora/M
+Eloy/M
+Elroy/M
+els
+Elsa/M
+Elsbeth/M
+else/M
+Else/M
+Elset/M
+elsewhere
+Elsey/M
+Elsie/M
+Elsi/M
+Elsinore/M
+Elspeth/M
+Elston/M
+Elsworth/M
+Elsy/M
+Eltanin/M
+Elton/M
+eluate/SM
+elucidate/SDVNGX
+elucidation/M
+elude/GSD
+elusiveness/SM
+elusive/YP
+elute/DGN
+elution/M
+Elva/M
+elven
+Elvera/M
+elver/SM
+elves/M
+Elvia/M
+Elvina/M
+Elvin/M
+Elvira/M
+elvish
+Elvis/M
+Elvyn/M
+Elwin/M
+Elwira/M
+Elwood/M
+Elwyn/M
+Ely/M
+Elyn/M
+Elysée/M
+Elysees
+Elyse/M
+Elysha/M
+Elysia/M
+elysian
+Elysian
+Elysium/SM
+Elyssa/M
+EM
+emaciate/NGXDS
+emaciation/M
+emacs/M
+Emacs/M
+email/SMDG
+Emalee/M
+Emalia/M
+Ema/M
+emanate/XSDVNG
+emanation/M
+emancipate/DSXGN
+emancipation/M
+emancipator/MS
+Emanuele/M
+Emanuel/M
+emasculate/GNDSX
+emasculation/M
+embalmer/M
+embalm/ZGRDS
+embank/GLDS
+embankment/MS
+embarcadero
+embargoes
+embargo/GMD
+embark/ADESG
+embarkation/EMS
+embarrassedly
+embarrassed/U
+embarrassing/Y
+embarrassment/MS
+embarrass/SDLG
+embassy/MS
+embattle/DSG
+embeddable
+embedded
+embedder
+embedding/MS
+embed/S
+embellished/U
+embellisher/M
+embellish/LGRSD
+embellishment/MS
+ember/MS
+embezzle/LZGDRS
+embezzlement/MS
+embezzler/M
+embitter/LGDS
+embitterment/SM
+emblazon/DLGS
+emblazonment/SM
+emblematic
+emblem/GSMD
+embodier/M
+embodiment/ESM
+embody/ESDGA
+embolden/DSG
+embolism/SM
+embosom
+embosser/M
+emboss/ZGRSD
+embouchure/SM
+embower/GSD
+embraceable
+embracer/M
+embrace/RSDVG
+embracing/Y
+embrasure/MS
+embrittle
+embrocation/SM
+embroiderer/M
+embroider/SGZDR
+embroidery/MS
+embroilment/MS
+embroil/SLDG
+embryologist/SM
+embryology/MS
+embryonic
+embryo/SM
+emceeing
+emcee/SDM
+Emelda/M
+Emelen/M
+Emelia/M
+Emelina/M
+Emeline/M
+Emelita/M
+Emelyne/M
+emendation/MS
+emend/SRDGB
+emerald/SM
+Emera/M
+emerge/ADSG
+emergence/MAS
+emergency/SM
+emergent/S
+emerita
+emeritae
+emeriti
+emeritus
+Emerson/M
+Emery/M
+emery/MGSD
+emetic/S
+emf/S
+emigrant/MS
+emigrate/SDXNG
+emigration/M
+émigré/S
+Emilee/M
+Emile/M
+Emilia/M
+Emilie/M
+Emili/M
+Emiline/M
+Emilio/M
+Emil/M
+Emily/M
+eminence/MS
+Eminence/MS
+eminent/Y
+emirate/SM
+emir/SM
+emissary/SM
+emission/AMS
+emissivity/MS
+emit/S
+emittance/M
+emitted
+emitter/SM
+emitting
+Emlen/M
+Emlyn/M
+Emlynne/M
+Emlynn/M
+em/M
+Em/M
+Emmalee/M
+Emmaline/M
+Emmalyn/M
+Emmalynne/M
+Emmalynn/M
+Emma/M
+Emmanuel/M
+Emmeline/M
+Emmerich/M
+Emmery/M
+Emmet/M
+Emmett/M
+Emmey/M
+Emmie/M
+Emmi/M
+Emmit/M
+Emmott/M
+Emmye/M
+Emmy/SM
+Emogene/M
+emollient/S
+emolument/SM
+Emory/M
+emote/SDVGNX
+emotionalism/MS
+emotionality/M
+emotionalize/GDS
+emotional/UY
+emotionless
+emotion/M
+emotive/Y
+empaneled
+empaneling
+empath
+empathetic
+empathetical/Y
+empathic
+empathize/SDG
+empathy/MS
+emperor/MS
+emphases
+emphasis/M
+emphasize/ZGCRSDA
+emphatically/U
+emphatic/U
+emphysema/SM
+emphysematous
+empire/MS
+empirical/Y
+empiricism/SM
+empiricist/SM
+empiric/SM
+emplace/L
+emplacement/MS
+employability/UM
+employable/US
+employed/U
+employee/SM
+employer/SM
+employ/LAGDS
+employment/UMAS
+emporium/MS
+empower/GLSD
+empowerment/MS
+empress/MS
+emptier/M
+emptily
+emptiness/SM
+empty/GRSDPT
+empyrean/SM
+ems/C
+EMT
+emulate/SDVGNX
+emulation/M
+emulative/Y
+emulator/MS
+emulsification/M
+emulsifier/M
+emulsify/NZSRDXG
+emulsion/SM
+emu/SM
+Emylee/M
+Emyle/M
+enabler/M
+enable/SRDZG
+enactment/ASM
+enact/SGALD
+enameler/M
+enamelware/SM
+enamel/ZGJMDRS
+enamor/DSG
+en/BM
+enc
+encamp/LSDG
+encampment/MS
+encapsulate/SDGNX
+encapsulation/M
+encase/GSDL
+encasement/SM
+encephalitic
+encephalitides
+encephalitis/M
+encephalographic
+encephalopathy/M
+enchain/SGD
+enchanter/MS
+enchant/ESLDG
+enchanting/Y
+enchantment/MSE
+enchantress/MS
+enchilada/SM
+encipherer/M
+encipher/SRDG
+encircle/GLDS
+encirclement/SM
+encl
+enclave/MGDS
+enclosed/U
+enclose/GDS
+enclosure/SM
+encoder/M
+encode/ZJGSRD
+encomium/SM
+encompass/GDS
+encore/GSD
+encounter/GSD
+encouragement/SM
+encourager/M
+encourage/SRDGL
+encouraging/Y
+encroacher/M
+encroach/LGRSD
+encroachment/MS
+encrustation/MS
+encrust/DSG
+encrypt/DGS
+encrypted/U
+encryption/SM
+encumbered/U
+encumber/SEDG
+encumbrancer/M
+encumbrance/SRM
+ency
+encyclical/SM
+encyclopaedia's
+encyclopedia/SM
+encyclopedic
+encyst/GSLD
+encystment/MS
+endanger/DGSL
+endangerment/SM
+endear/GSLD
+endearing/Y
+endearment/MS
+endeavored/U
+endeavorer/M
+endeavor/GZSMRD
+endemically
+endemicity
+endemic/S
+ender/M
+endgame/M
+Endicott/M
+ending/M
+endive/SM
+endlessness/MS
+endless/PY
+endmost
+endnote/MS
+endocrine/S
+endocrinologist/SM
+endocrinology/SM
+endogamous
+endogamy/M
+endogenous/Y
+endomorphism/SM
+endorse/DRSZGL
+endorsement/MS
+endorser/M
+endoscope/MS
+endoscopic
+endoscopy/SM
+endosperm/M
+endothelial
+endothermic
+endow/GSDL
+endowment/SM
+endpoint/MS
+endue/SDG
+endungeoned
+endurable/U
+endurably/U
+endurance/SM
+endure/BSDG
+enduringness/M
+enduring/YP
+endways
+Endymion/M
+end/ZGVMDRSJ
+ENE
+enema/SM
+enemy/SM
+energetically
+energetic/S
+energetics/M
+energized/U
+energizer/M
+energize/ZGDRS
+energy/MS
+enervate/XNGVDS
+enervation/M
+enfeeble/GLDS
+enfeeblement/SM
+enfilade/MGDS
+enfold/SGD
+enforceability/M
+enforceable/U
+enforced/Y
+enforce/LDRSZG
+enforcement/SM
+enforcer/M
+enforcible/U
+enfranchise/ELDRSG
+enfranchisement/EMS
+enfranchiser/M
+engage/ADSGE
+engagement/SEM
+engaging/Y
+Engelbert/M
+Engel/MS
+engender/DGS
+engineer/GSMDJ
+engineering/MY
+engine/MGSD
+England/M
+england/ZR
+Englebert/M
+Englewood/M
+English/GDRSM
+Englishman/M
+Englishmen
+Englishwoman/M
+Englishwomen
+Eng/M
+engorge/LGDS
+engorgement/MS
+Engracia/M
+engram/MS
+engraver/M
+engrave/ZGDRSJ
+engraving/M
+engrossed/Y
+engrosser/M
+engross/GLDRS
+engrossing/Y
+engrossment/SM
+engulf/GDSL
+engulfment/SM
+enhanceable
+enhance/LZGDRS
+enhancement/MS
+enhancer/M
+enharmonic
+Enid/M
+Enif/M
+enigma/MS
+enigmatic
+enigmatically
+Eniwetok/M
+enjambement's
+enjambment/MS
+enjoinder
+enjoin/GSD
+enjoyability
+enjoyableness/M
+enjoyable/P
+enjoyably
+enjoy/GBDSL
+enjoyment/SM
+Enkidu/M
+enlargeable
+enlarge/LDRSZG
+enlargement/MS
+enlarger/M
+enlightened/U
+enlighten/GDSL
+enlightening/U
+enlightenment/SM
+enlistee/MS
+enlister/M
+enlistment/SAM
+enlist/SAGDL
+enliven/LDGS
+enlivenment/SM
+enmesh/DSLG
+enmeshment/SM
+enmity/MS
+Ennis/M
+ennoble/LDRSG
+ennoblement/SM
+ennobler/M
+ennui/SM
+Enoch/M
+enormity/SM
+enormousness/MS
+enormous/YP
+Enos
+enough
+enoughs
+enplane/DSG
+enqueue/DS
+enquirer/S
+enquiringly
+enrage/SDG
+enrapture/GSD
+Enrica/M
+enricher/M
+Enrichetta/M
+enrich/LDSRG
+enrichment/SM
+Enrico/M
+Enrika/M
+Enrique/M
+Enriqueta/M
+enrobed
+enrollee/SM
+enroll/LGSD
+enrollment/SM
+ens
+ensconce/DSG
+ensemble/MS
+enshrine/DSLG
+enshrinement/SM
+enshroud/DGS
+ensign/SM
+ensilage/DSMG
+enslavement/MS
+enslaver/M
+enslave/ZGLDSR
+ensnare/GLDS
+ensnarement/SM
+Ensolite/M
+ensue/SDG
+ensurer/M
+ensure/SRDZG
+entailer/M
+entailment/MS
+entail/SDRLG
+entangle/EGDRSL
+entanglement/ESM
+entangler/EM
+entente/MS
+enter/ASDG
+entered/U
+enterer/M
+enteritides
+enteritis/SM
+enterprise/GMSR
+Enterprise/M
+enterpriser/M
+enterprising/Y
+entertainer/M
+entertaining/Y
+entertainment/SM
+entertain/SGZRDL
+enthalpy/SM
+enthrall/GDSL
+enthrallment/SM
+enthrone/GDSL
+enthronement/MS
+enthuse/DSG
+enthusiasm/SM
+enthusiastically/U
+enthusiastic/U
+enthusiast/MS
+enticement/SM
+entice/SRDJLZG
+enticing/Y
+entire/SY
+entirety/SM
+entitle/GLDS
+entitlement/MS
+entity/SM
+entomb/GDSL
+entombment/MS
+entomological
+entomologist/S
+entomology/MS
+entourage/SM
+entr'acte/S
+entrails
+entrainer/M
+entrain/GSLDR
+entrancement/MS
+entrance/MGDSL
+entranceway/M
+entrancing/Y
+entrant/MS
+entrapment/SM
+entrapped
+entrapping
+entrap/SL
+entreating/Y
+entreat/SGD
+entreaty/SM
+entrée/S
+entrench/LSDG
+entrenchment/MS
+entrepreneurial
+entrepreneur/MS
+entrepreneurship/M
+entropic
+entropy/MS
+entrust/DSG
+entry/ASM
+entryway/SM
+entwine/DSG
+enumerable
+enumerate/AN
+enumerated/U
+enumerates
+enumerating
+enumeration's/A
+enumeration/SM
+enumerative
+enumerator/SM
+enunciable
+enunciated/U
+enunciate/XGNSD
+enunciation/M
+enureses
+enuresis/M
+envelope/MS
+enveloper/M
+envelopment/MS
+envelop/ZGLSDR
+envenom/SDG
+enviableness/M
+enviable/U
+enviably
+envied/U
+envier/M
+enviousness/SM
+envious/PY
+environ/LGSD
+environmentalism/SM
+environmentalist/SM
+environmental/Y
+environment/MS
+envisage/DSG
+envision/GSD
+envoy/SM
+envying/Y
+envy/SRDMG
+enzymatic
+enzymatically
+enzyme/SM
+enzymology/M
+Eocene
+EOE
+eohippus/M
+Eolanda/M
+Eolande/M
+eolian
+eon/SM
+EPA
+epaulet/SM
+épée/S
+ephedrine/MS
+ephemeral/SY
+ephemera/MS
+ephemerids
+ephemeris/M
+Ephesian/S
+Ephesians/M
+Ephesus/M
+Ephraim/M
+Ephrayim/M
+Ephrem/M
+epically
+epicenter/SM
+epic/SM
+Epictetus/M
+Epicurean
+epicurean/S
+epicure/SM
+Epicurus/M
+epicycle/MS
+epicyclic
+epicyclical/Y
+epicycloid/M
+epidemically
+epidemic/MS
+epidemiological/Y
+epidemiologist/MS
+epidemiology/MS
+epidermal
+epidermic
+epidermis/MS
+epidural
+epigenetic
+epiglottis/SM
+epigrammatic
+epigram/MS
+epigrapher/M
+epigraph/RM
+epigraphs
+epigraphy/MS
+epilepsy/SM
+epileptic/S
+epilogue/SDMG
+Epimethius/M
+epinephrine/SM
+epiphany/SM
+Epiphany/SM
+epiphenomena
+episcopacy/MS
+episcopalian
+Episcopalian/S
+Episcopal/S
+episcopal/Y
+episcopate/MS
+episode/SM
+episodic
+episodically
+epistemic
+epistemological/Y
+epistemology/M
+epistle/MRS
+Epistle/SM
+epistolary/S
+epistolatory
+epitaph/GMD
+epitaphs
+epitaxial/Y
+epitaxy/M
+epithelial
+epithelium/MS
+epithet/MS
+epitome/MS
+epitomized/U
+epitomizer/M
+epitomize/SRDZG
+epochal/Y
+epoch/M
+epochs
+eponymous
+epoxy/GSD
+epsilon/SM
+Epsom/M
+Epstein/M
+equability/MS
+equableness/M
+equable/P
+equably
+equaling
+equality/ISM
+equalization/MS
+equalize/DRSGJZ
+equalized/U
+equalizer/M
+equalizes/U
+equal/USDY
+equanimity/MS
+equate/NGXBSD
+equation/M
+equatorial/S
+equator/SM
+equerry/MS
+equestrianism/SM
+equestrian/S
+equestrienne/SM
+equiangular
+equidistant/Y
+equilateral/S
+equilibrate/GNSD
+equilibration/M
+equilibrium/MSE
+equine/S
+equinoctial/S
+equinox/MS
+equipage/SM
+equipartition/M
+equip/AS
+equipment/SM
+equipoise/GMSD
+equipotent
+equipped/AU
+equipping/A
+equiproportional
+equiproportionality
+equiproportionate
+equitable/I
+equitableness/M
+equitably/I
+equitation/SM
+equity/IMS
+equiv
+equivalence/DSMG
+equivalent/SY
+equivocalness/MS
+equivocal/UY
+equivocate/NGSDX
+equivocation/M
+equivocator/SM
+Equuleus/M
+ER
+ERA
+eradicable/I
+eradicate/SDXVGN
+eradication/M
+eradicator/SM
+era/MS
+Eran/M
+erase/N
+eraser/M
+erasion/M
+Erasmus/M
+eras/SRDBGZ
+Erastus/M
+erasure/MS
+Erato/M
+Eratosthenes/M
+erbium/SM
+Erda/M
+ere
+Erebus/M
+erect/GPSRDY
+erectile
+erection/SM
+erectness/MS
+erector/SM
+Erek/M
+erelong
+eremite/MS
+Erena/M
+ergo
+ergodic
+ergodicity/M
+ergonomically
+ergonomics/M
+ergonomic/U
+ergophobia
+ergosterol/SM
+ergot/SM
+erg/SM
+Erhard/M
+Erhart/M
+Erica/M
+Ericha/M
+Erich/M
+Ericka/M
+Erick/M
+Erickson/M
+Eric/M
+Ericson's
+Ericsson's
+Eridanus/M
+Erie/SM
+Erika/M
+Erik/M
+Erikson/M
+Erina/M
+Erin/M
+Erinna/M
+Erinn/M
+eris
+Eris
+Eritrea/M
+Erlang/M
+Erlenmeyer/M
+Erl/M
+Er/M
+Erma/M
+Ermanno/M
+Ermengarde/M
+Ermentrude/M
+Ermina/M
+ermine/MSD
+Erminia/M
+Erminie/M
+Ermin/M
+Ernaline/M
+Erna/M
+Ernesta/M
+Ernestine/M
+Ernest/M
+Ernesto/M
+Ernestus/M
+Ernie/M
+Ernst/M
+Erny/M
+erode/SDG
+erodible
+erogenous
+erosible
+erosional
+erosion/SM
+erosiveness/M
+erosive/P
+Eros/SM
+erotically
+erotica/M
+eroticism/MS
+erotic/S
+errancy/MS
+errand/MS
+errantry/M
+errant/YS
+errata/SM
+erratically
+erratic/S
+erratum/MS
+err/DGS
+Errick/M
+erring/UY
+Erroll/M
+Errol/M
+erroneousness/M
+erroneous/YP
+error/SM
+ersatz/S
+Erse/M
+Erskine/M
+erst
+erstwhile
+Ertha/M
+eructation/MS
+eruct/DGS
+erudite/NYX
+erudition/M
+erupt/DSVG
+eruption/SM
+eruptive/SY
+Ervin/M
+ErvIn/M
+Erv/M
+Erwin/M
+Eryn/M
+erysipelas/SM
+erythrocyte/SM
+es
+e's
+Es
+E's
+Esau/M
+escadrille/M
+escalate/CDSXGN
+escalation/MC
+escalator/SM
+escallop/SGDM
+escapable/I
+escapade/SM
+escapee/MS
+escape/LGSRDB
+escapement/MS
+escaper/M
+escapism/SM
+escapist/S
+escapology
+escarole/MS
+escarpment/MS
+eschatology/M
+Escherichia/M
+Escher/M
+eschew/SGD
+Escondido/M
+escort/SGMD
+escritoire/SM
+escrow/DMGS
+escudo/MS
+escutcheon/SM
+Esdras/M
+ESE
+Eskimo/SM
+ESL
+Esma/M
+Esmaria/M
+Esmark/M
+Esme/M
+Esmeralda/M
+esophageal
+esophagi
+esophagus/M
+esoteric
+esoterica
+esoterically
+esp
+ESP
+espadrille/MS
+Espagnol/M
+espalier/SMDG
+especial/Y
+Esperanto/M
+Esperanza/M
+Espinoza/M
+espionage/SM
+esplanade/SM
+Esp/M
+Esposito/M
+espousal/MS
+espouser/M
+espouse/SRDG
+espresso/SM
+esprit/SM
+espy/GSD
+Esq/M
+esquire/GMSD
+Esquire/S
+Esra/M
+Essa/M
+essayer/M
+essayist/SM
+essay/SZMGRD
+essence/MS
+Essene/SM
+Essen/M
+essentialist/M
+essentially
+essentialness/M
+essential/USI
+Essequibo/M
+Essex/M
+Essie/M
+Essy/M
+EST
+established/U
+establisher/M
+establish/LAEGSD
+establishment/EMAS
+Establishment/MS
+Esta/M
+estate/GSDM
+Esteban/M
+esteem/EGDS
+Estela/M
+Estele/M
+Estella/M
+Estelle/M
+Estell/M
+Estel/M
+Esterházy/M
+ester/M
+Ester/M
+Estes
+Estevan/M
+Esther/M
+esthete's
+esthetically
+esthetic's
+esthetics's
+estimable/I
+estimableness/M
+estimate/XDSNGV
+estimating/A
+estimation/M
+estimator/SM
+Estonia/M
+Estonian/S
+estoppal
+Estrada/M
+estrange/DRSLG
+estrangement/SM
+estranger/M
+Estrella/M
+Estrellita/M
+estrogen/SM
+estrous
+estrus/SM
+est/RZ
+estuarine
+estuary/SM
+et
+ET
+ETA
+Etan/M
+eta/SM
+etc
+etcetera/SM
+etcher/M
+etch/GZJSRD
+etching/M
+ETD
+eternalness/SM
+eternal/PSY
+eternity/SM
+ethane/SM
+Ethan/M
+ethanol/MS
+Ethelbert/M
+Ethelda/M
+Ethelind/M
+Etheline/M
+Ethelin/M
+Ethel/M
+Ethelred/M
+Ethelyn/M
+Ethe/M
+etherealness/M
+ethereal/PY
+etherized
+Ethernet/MS
+ether/SM
+ethically/U
+ethicalness/M
+ethical/PYS
+ethicist/S
+ethic/MS
+Ethiopia/M
+Ethiopian/S
+ethnically
+ethnicity/MS
+ethnic/S
+ethnocentric
+ethnocentrism/MS
+ethnographers
+ethnographic
+ethnography/M
+ethnological
+ethnologist/SM
+ethnology/SM
+ethnomethodology
+ethological
+ethologist/MS
+ethology/SM
+ethos/SM
+ethylene/MS
+Ethyl/M
+ethyl/SM
+Etienne/M
+etiologic
+etiological
+etiology/SM
+etiquette/SM
+Etna/M
+Etruria/M
+Etruscan/MS
+Etta/M
+Ettie/M
+Etti/M
+Ettore/M
+Etty/M
+étude/MS
+etymological/Y
+etymologist/SM
+etymology/MS
+EU
+eucalypti
+eucalyptus/SM
+Eucharistic
+Eucharist/SM
+euchre/MGSD
+euclidean
+Euclid/M
+Eudora/M
+Euell/M
+Eugene/M
+Eugenia/M
+eugenically
+eugenicist/SM
+eugenic/S
+eugenics/M
+Eugenie/M
+Eugenio/M
+Eugenius/M
+Eugen/M
+Eugine/M
+Eulalie/M
+Eula/M
+Eulerian/M
+Euler/M
+eulogistic
+eulogist/MS
+eulogized/U
+eulogize/GRSDZ
+eulogizer/M
+eulogy/MS
+Eu/M
+Eumenides
+Eunice/M
+eunuch/M
+eunuchs
+Euphemia/M
+euphemism/MS
+euphemistic
+euphemistically
+euphemist/M
+euphonious/Y
+euphonium/M
+euphony/SM
+euphoria/SM
+euphoric
+euphorically
+Euphrates/M
+Eurasia/M
+Eurasian/S
+eureka/S
+Euripides/M
+Eur/M
+Eurodollar/SM
+Europa/M
+Europeanization/SM
+Europeanized
+European/MS
+Europe/M
+europium/MS
+Eurydice/M
+Eustace/M
+Eustachian/M
+Eustacia/M
+eutectic
+Euterpe/M
+euthanasia/SM
+euthenics/M
+evacuate/DSXNGV
+evacuation/M
+evacuee/MS
+evader/M
+evade/SRDBGZ
+Evaleen/M
+evaluable
+evaluate/ADSGNX
+evaluated/U
+evaluational
+evaluation/MA
+evaluative
+evaluator/MS
+Eva/M
+evanescence/MS
+evanescent
+Evangelia/M
+evangelic
+evangelicalism/SM
+Evangelical/S
+evangelical/YS
+Evangelina/M
+Evangeline/M
+Evangelin/M
+evangelism/SM
+evangelistic
+evangelist/MS
+Evangelist/MS
+evangelize/GDS
+Evania/M
+Evan/MS
+Evanne/M
+Evanston/M
+Evansville/M
+evaporate/VNGSDX
+evaporation/M
+evaporative/Y
+evaporator/MS
+evasion/SM
+evasiveness/SM
+evasive/PY
+Eveleen/M
+Evelina/M
+Eveline/M
+Evelin/M
+Evelyn/M
+Eve/M
+evened
+evener/M
+evenhanded/YP
+evening/SM
+Evenki/M
+Even/M
+evenness/MSU
+even/PUYRT
+evens
+evensong/MS
+eventfulness/SM
+eventful/YU
+eventide/SM
+event/SGM
+eventuality/MS
+eventual/Y
+eventuate/GSD
+Everard/M
+Eveready/M
+Evered/M
+Everest/M
+Everette/M
+Everett/M
+everglade/MS
+Everglades
+evergreen/S
+Everhart/M
+everlastingness/M
+everlasting/PYS
+everliving
+evermore
+EverReady/M
+eve/RSM
+ever/T
+every
+everybody/M
+everydayness/M
+everyday/P
+everyman
+everyone/MS
+everyplace
+everything
+everywhere
+eve's/A
+eves/A
+Evey/M
+evict/DGS
+eviction/SM
+evidence/MGSD
+evidential/Y
+evident/YS
+Evie/M
+evildoer/SM
+evildoing/MS
+evilness/MS
+evil/YRPTS
+evince/SDG
+Evin/M
+eviscerate/GNXDS
+evisceration/M
+Evita/M
+Ev/MN
+evocable
+evocate/NVX
+evocation/M
+evocativeness/M
+evocative/YP
+evoke/SDG
+evolute/NMXS
+evolutionarily
+evolutionary
+evolutionist/MS
+evolution/M
+evolve/SDG
+Evonne/M
+Evvie/M
+Evvy/M
+Evy/M
+Evyn/M
+Ewan/M
+Eward/M
+Ewart/M
+Ewell/M
+ewe/MZRS
+Ewen/M
+ewer/M
+Ewing/M
+exacerbate/NGXDS
+exacerbation/M
+exacter/M
+exactingness/M
+exacting/YP
+exaction/SM
+exactitude/ISM
+exactly/I
+exactness/MSI
+exact/TGSPRDY
+exaggerate/DSXNGV
+exaggerated/YP
+exaggeration/M
+exaggerative/Y
+exaggerator/MS
+exaltation/SM
+exalted/Y
+exalter/M
+exalt/ZRDGS
+examen/M
+examination/AS
+examination's
+examine/BGZDRS
+examined/AU
+examinees
+examiner/M
+examines/A
+examining/A
+exam/MNS
+example/DSGM
+exampled/U
+exasperate/DSXGN
+exasperated/Y
+exasperating/Y
+exasperation/M
+Excalibur/M
+excavate/NGDSX
+excavation/M
+excavator/SM
+Excedrin/M
+exceeder/M
+exceeding/Y
+exceed/SGDR
+excelled
+excellence/SM
+excellency/MS
+Excellency/MS
+excellent/Y
+excelling
+excel/S
+excelsior/S
+except/DSGV
+exceptionable/U
+exceptionalness/M
+exceptional/YU
+exception/BMS
+excerpter/M
+excerpt/GMDRS
+excess/GVDSM
+excessiveness/M
+excessive/PY
+exchangeable
+exchange/GDRSZ
+exchanger/M
+exchequer/SM
+Exchequer/SM
+excise/XMSDNGB
+excision/M
+excitability/MS
+excitableness/M
+excitable/P
+excitably
+excitation/SM
+excitatory
+excited/Y
+excitement/MS
+exciter/M
+excite/RSDLBZG
+excitingly
+exciting/U
+exciton/M
+exclaimer/M
+exclaim/SZDRG
+exclamation/MS
+exclamatory
+exclude/DRSG
+excluder/M
+exclusionary
+exclusioner/M
+exclusion/SZMR
+exclusiveness/SM
+exclusive/SPY
+exclusivity/MS
+excommunicate/XVNGSD
+excommunication/M
+excoriate/GNXSD
+excoriation/M
+excremental
+excrement/SM
+excrescence/MS
+excrescent
+excreta
+excrete/NGDRSX
+excreter/M
+excretion/M
+excretory/S
+excruciate/NGDS
+excruciating/Y
+excruciation/M
+exculpate/XSDGN
+exculpation/M
+exculpatory
+excursionist/SM
+excursion/MS
+excursiveness/SM
+excursive/PY
+excursus/MS
+excusable/IP
+excusableness/IM
+excusably/I
+excuse/BGRSD
+excused/U
+excuser/M
+exec/MS
+execrableness/M
+execrable/P
+execrably
+execrate/DSXNGV
+execration/M
+executable/MS
+execute/NGVZBXDRS
+executer/M
+executional
+executioner/M
+execution/ZMR
+executive/SM
+executor/SM
+executrices
+executrix/M
+exegeses
+exegesis/M
+exegete/M
+exegetical
+exegetic/S
+exemplariness/M
+exemplar/MS
+exemplary/P
+exemplification/M
+exemplifier/M
+exemplify/ZXNSRDG
+exemption/MS
+exempt/SDG
+exerciser/M
+exercise/ZDRSGB
+exertion/MS
+exert/SGD
+Exeter/M
+exeunt
+exhalation/SM
+exhale/GSD
+exhausted/Y
+exhauster/M
+exhaustible/I
+exhausting/Y
+exhaustion/SM
+exhaustiveness/MS
+exhaustive/YP
+exhaust/VGRDS
+exhibitioner/M
+exhibitionism/MS
+exhibitionist/MS
+exhibition/ZMRS
+exhibitor/SM
+exhibit/VGSD
+exhilarate/XSDVNG
+exhilarating/Y
+exhilaration/M
+exhortation/SM
+exhort/DRSG
+exhorter/M
+exhumation/SM
+exhume/GRSD
+exhumer/M
+exigence/S
+exigency/SM
+exigent/SY
+exiguity/SM
+exiguous
+exile/SDGM
+existence/MS
+existent/I
+existentialism/MS
+existentialistic
+existentialist/MS
+existential/Y
+existents
+exist/SDG
+exit/MDSG
+exobiology/MS
+exocrine
+Exodus/M
+exodus/SM
+exogamous
+exogamy/M
+exogenous/Y
+exonerate/SDVGNX
+exoneration/M
+exorbitance/MS
+exorbitant/Y
+exorcise/SDG
+exorcism/SM
+exorcist/SM
+exorcizer/M
+exoskeleton/MS
+exosphere/SM
+exothermic
+exothermically
+exotica
+exotically
+exoticism/SM
+exoticness/M
+exotic/PS
+exp
+expandability/M
+expand/DRSGZB
+expanded/U
+expander/M
+expanse/DSXGNVM
+expansible
+expansionary
+expansionism/MS
+expansionist/MS
+expansion/M
+expansiveness/S
+expansive/YP
+expatiate/XSDNG
+expatiation/M
+expatriate/SDNGX
+expatriation/M
+expectancy/MS
+expectant/YS
+expectational
+expectation/MS
+expected/UPY
+expecting/Y
+expectorant/S
+expectorate/NGXDS
+expectoration/M
+expect/SBGD
+expedience/IS
+expediency/IMS
+expedients
+expedient/YI
+expediter/M
+expedite/ZDRSNGX
+expeditionary
+expedition/M
+expeditiousness/MS
+expeditious/YP
+expeditor's
+expellable
+expelled
+expelling
+expel/S
+expendable/S
+expended/U
+expender/M
+expenditure/SM
+expend/SDRGB
+expense/DSGVM
+expensive/IYP
+expensiveness/SMI
+experienced/U
+experience/ISDM
+experiencing
+experiential/Y
+experimentalism/M
+experimentalist/SM
+experimental/Y
+experimentation/SM
+experimenter/M
+experiment/GSMDRZ
+experted
+experting
+expertise/SM
+expertize/GD
+expertnesses
+expertness/IM
+expert/PISY
+expert's
+expiable/I
+expiate/XGNDS
+expiation/M
+expiatory
+expiration/MS
+expired/U
+expire/SDG
+expiry/MS
+explainable/UI
+explain/ADSG
+explained/U
+explainer/SM
+explanation/MS
+explanatory
+expletive/SM
+explicable/I
+explicate/VGNSDX
+explication/M
+explicative/Y
+explicitness/SM
+explicit/PSY
+explode/DSRGZ
+exploded/U
+exploder/M
+exploitation/MS
+exploitative
+exploited/U
+exploiter/M
+exploit/ZGVSMDRB
+exploration/MS
+exploratory
+explore/DSRBGZ
+explored/U
+explorer/M
+explosion/MS
+explosiveness/SM
+explosive/YPS
+expo/MS
+exponential/SY
+exponentiate/XSDNG
+exponentiation/M
+exponent/MS
+exportability
+exportable
+export/AGSD
+exportation/SM
+exporter/MS
+export's
+expose
+exposed/U
+exposer/M
+exposit/D
+exposition/SM
+expositor/MS
+expository
+expos/RSDZG
+expostulate/DSXNG
+expostulation/M
+exposure/SM
+expounder/M
+expound/ZGSDR
+expressed/U
+expresser/M
+express/GVDRSY
+expressibility/I
+expressible/I
+expressibly/I
+expressionism/SM
+expressionistic
+expressionist/S
+expressionless/YP
+expression/MS
+expressive/IYP
+expressiveness/MS
+expressiveness's/I
+expressway/SM
+expropriate/XDSGN
+expropriation/M
+expropriator/SM
+expulsion/MS
+expunge/GDSR
+expunger/M
+expurgated/U
+expurgate/SDGNX
+expurgation/M
+exquisiteness/SM
+exquisite/YPS
+ex/S
+ext
+extant
+extemporaneousness/MS
+extemporaneous/YP
+extempore/S
+extemporization/SM
+extemporizer/M
+extemporize/ZGSRD
+extendability/M
+extendedly
+extendedness/M
+extended/U
+extender/M
+extendibility/M
+extendibles
+extend/SGZDR
+extensibility/M
+extensible/I
+extensional/Y
+extension/SM
+extensiveness/SM
+extensive/PY
+extensor/MS
+extent/SM
+extenuate/XSDGN
+extenuation/M
+exterior/MYS
+exterminate/XNGDS
+extermination/M
+exterminator/SM
+externalities
+externalization/SM
+externalize/GDS
+external/YS
+extern/M
+extinct/DGVS
+extinction/MS
+extinguishable/I
+extinguish/BZGDRS
+extinguisher/M
+extirpate/XSDVNG
+extirpation/M
+extolled
+extoller/M
+extolling
+extol/S
+extort/DRSGV
+extorter/M
+extortionate/Y
+extortioner/M
+extortionist/SM
+extortion/ZSRM
+extracellular/Y
+extract/GVSBD
+extraction/SM
+extractive/Y
+extractor/SM
+extracurricular/S
+extradite/XNGSDB
+extradition/M
+extragalactic
+extralegal/Y
+extramarital
+extramural
+extraneousness/M
+extraneous/YP
+extraordinarily
+extraordinariness/M
+extraordinary/PS
+extrapolate/XVGNSD
+extrapolation/M
+extra/S
+extrasensory
+extraterrestrial/S
+extraterritorial
+extraterritoriality/MS
+extravagance/MS
+extravagant/Y
+extravaganza/SM
+extravehicular
+extravert's
+extrema
+extremal
+extreme/DSRYTP
+extremeness/MS
+extremism/SM
+extremist/MS
+extremity/SM
+extricable/I
+extricate/XSDNG
+extrication/M
+extrinsic
+extrinsically
+extroversion/SM
+extrovert/GMDS
+extrude/GDSR
+extruder/M
+extrusion/MS
+extrusive
+exuberance/MS
+exuberant/Y
+exudate/XNM
+exudation/M
+exude/GSD
+exultant/Y
+exultation/SM
+exult/DGS
+exulting/Y
+exurban
+exurbanite/SM
+exurbia/MS
+exurb/MS
+Exxon/M
+Eyck/M
+Eyde/M
+Eydie/M
+eyeball/GSMD
+eyebrow/MS
+eyed/P
+eyedropper/MS
+eyeful/MS
+eye/GDRSMZ
+eyeglass/MS
+eyelash/MS
+eyeless
+eyelet/GSMD
+eyelid/SM
+eyeliner/MS
+eyeopener/MS
+eyeopening
+eyepiece/SM
+eyer/M
+eyeshadow
+eyesight/MS
+eyesore/SM
+eyestrain/MS
+eyeteeth
+eyetooth/M
+eyewash/MS
+eyewitness/SM
+Eyre/M
+eyrie's
+Eysenck/M
+Ezechiel/M
+Ezekiel/M
+Ezequiel/M
+Eziechiele/M
+Ezmeralda/M
+Ezra/M
+Ezri/M
+F
+FAA
+Fabe/MR
+Fabergé/M
+Faber/M
+Fabiano/M
+Fabian/S
+Fabien/M
+Fabio/M
+fable/GMSRD
+fabler/M
+fabricate/SDXNG
+fabrication/M
+fabricator/MS
+fabric/MS
+fabulists
+fabulousness/M
+fabulous/YP
+facade/GMSD
+face/AGCSD
+facecloth
+facecloths
+faceless/P
+faceplate/M
+facer/CM
+face's
+facetiousness/MS
+facetious/YP
+facet/SGMD
+facial/YS
+facileness/M
+facile/YP
+facilitate/VNGXSD
+facilitation/M
+facilitator/SM
+facilitatory
+facility/MS
+facing/MS
+facsimileing
+facsimile/MSD
+factional
+factionalism/SM
+faction/SM
+factiousness/M
+factious/PY
+factitious
+fact/MS
+facto
+factoid/S
+factorial/MS
+factoring/A
+factoring's
+factorisable
+factorization/SM
+factorize/GSD
+factor/SDMJG
+factory/MS
+factotum/MS
+factuality/M
+factualness/M
+factual/PY
+faculty/MS
+faddish
+faddist/SM
+fadedly
+faded/U
+fadeout
+fader/M
+fade/S
+fading's
+fading/U
+fad/ZGSMDR
+Fae/M
+faerie/MS
+Faeroe/M
+faery's
+Fafnir/M
+fagged
+fagging
+faggoting's
+Fagin/M
+fag/MS
+fagoting/M
+fagot/MDSJG
+Fahd/M
+Fahrenheit/S
+faïence/S
+failing's
+failing/UY
+fail/JSGD
+faille/MS
+failsafe
+failure/SM
+Faina/M
+fain/GTSRD
+fainter/M
+fainthearted
+faintness/MS
+faint/YRDSGPT
+Fairbanks
+Fairchild/M
+faired
+Fairfax/M
+Fairfield/M
+fairgoer/S
+fairground/MS
+fairing/MS
+fairish
+Fairleigh/M
+fairless
+Fairlie/M
+Fair/M
+Fairmont/M
+fairness's
+fairness/US
+Fairport/M
+fairs
+fair/TURYP
+Fairview/M
+fairway/MS
+fairyland/MS
+fairy/MS
+fairytale
+Faisalabad
+Faisal/M
+faithed
+faithfulness/MSU
+faithfuls
+faithful/UYP
+faithing
+faithlessness/SM
+faithless/YP
+Faith/M
+faiths
+faith's
+faith/U
+fajitas
+faker/M
+fake/ZGDRS
+fakir/SM
+falafel
+falconer/M
+falconry/MS
+falcon/ZSRM
+Falito/M
+Falkland/MS
+Falk/M
+Falkner/M
+fallaciousness/M
+fallacious/PY
+fallacy/MS
+faller/M
+fallibility/MSI
+fallible/I
+fallibleness/MS
+fallibly/I
+falloff/S
+Fallon/M
+fallopian
+Fallopian/M
+fallout/MS
+fallowness/M
+fallow/PSGD
+fall/SGZMRN
+falsehood/SM
+falseness/SM
+false/PTYR
+falsetto/SM
+falsie/MS
+falsifiability/M
+falsifiable/U
+falsification/M
+falsifier/M
+falsify/ZRSDNXG
+falsity/MS
+Falstaff/M
+falterer/M
+faltering/UY
+falter/RDSGJ
+Falwell/M
+fa/M
+famed/C
+fame/DSMG
+fames/C
+familial
+familiarity/MUS
+familiarization/MS
+familiarized/U
+familiarizer/M
+familiarize/ZGRSD
+familiarizing/Y
+familiarly/U
+familiarness/M
+familiar/YPS
+family/MS
+famine/SM
+faming/C
+famish/GSD
+famously/I
+famousness/M
+famous/PY
+fanaticalness/M
+fanatical/YP
+fanaticism/MS
+fanatic/SM
+Fanchette/M
+Fanchon/M
+fancied
+Fancie/M
+fancier/SM
+fanciest
+fancifulness/MS
+fanciful/YP
+fancily
+fanciness/SM
+fancying
+fancy/IS
+Fancy/M
+fancywork/SM
+fandango/SM
+Fanechka/M
+fanfare/SM
+fanfold/M
+fang/DMS
+fangled
+Fania/M
+fanlight/SM
+Fan/M
+fanned
+Fannie/M
+Fanni/M
+fanning
+fanny/SM
+Fanny/SM
+fanout
+fan/SM
+fantail/SM
+fantasia/SM
+fantasist/M
+fantasize/SRDG
+fantastical/Y
+fantastic/S
+fantasy/GMSD
+Fanya/M
+fanzine/S
+FAQ/SM
+Faraday/M
+farad/SM
+Farah/M
+Fara/M
+Farand/M
+faraway
+Farber/M
+farce/SDGM
+farcical/Y
+fare/MS
+farer/M
+farewell/DGMS
+farfetchedness/M
+far/GDR
+Fargo/M
+Farica/M
+farinaceous
+farina/MS
+Farkas/M
+Farlay/M
+Farlee/M
+Farleigh/M
+Farley/M
+Farlie/M
+Farly/M
+farmer/M
+Farmer/M
+farmhand/S
+farmhouse/SM
+farming/M
+Farmington/M
+farmland/SM
+farm/MRDGZSJ
+farmstead/SM
+farmworker/S
+Far/MY
+farmyard/MS
+faro/MS
+farragoes
+farrago/M
+Farragut/M
+Farrah/M
+Farrakhan/M
+Farra/M
+Farrand/M
+Farrell/M
+Farrel/M
+farrier/SM
+Farris/M
+Farr/M
+farrow/DMGS
+farseeing
+farsightedness/SM
+farsighted/YP
+farther
+farthermost
+farthest
+farthing/SM
+fart/MDGS!
+fas
+fascia/SM
+fascicle/DSM
+fasciculate/DNX
+fasciculation/M
+fascinate/SDNGX
+fascinating/Y
+fascination/M
+fascism/MS
+Fascism's
+fascistic
+Fascist's
+fascist/SM
+fashionableness/M
+fashionable/PS
+fashionably/U
+fashion/ADSG
+fashioner/SM
+fashion's
+Fassbinder/M
+fastback/MS
+fastball/S
+fasten/AGUDS
+fastener/MS
+fastening/SM
+fast/GTXSPRND
+fastidiousness/MS
+fastidious/PY
+fastness/MS
+fatalism/MS
+fatalistic
+fatalistically
+fatalist/MS
+fatality/MS
+fatal/SY
+fatback/SM
+fatefulness/MS
+fateful/YP
+fate/MS
+Fates
+fatheaded/P
+fathead/SMD
+father/DYMGS
+fathered/U
+fatherhood/MS
+fatherland/SM
+fatherless
+fatherliness/M
+fatherly/P
+Father/SM
+fathomable/U
+fathomless
+fathom/MDSBG
+fatigued/U
+fatigue/MGSD
+fatiguing/Y
+Fatima/M
+fatness/SM
+fat/PSGMDY
+fatso/M
+fatted
+fattener/M
+fatten/JZGSRD
+fatter
+fattest/M
+fattiness/SM
+fatting
+fatty/RSPT
+fatuity/MS
+fatuousness/SM
+fatuous/YP
+fatwa/SM
+faucet/SM
+Faulknerian
+Faulkner/M
+fault/CGSMD
+faultfinder/MS
+faultfinding/MS
+faultily
+faultiness/MS
+faultlessness/SM
+faultless/PY
+faulty/RTP
+fauna/MS
+Faunie/M
+Faun/M
+faun/MS
+Fauntleroy/M
+Faustian
+Faustina/M
+Faustine/M
+Faustino/M
+Faust/M
+Faustus/M
+fauvism/S
+favorableness/MU
+favorable/UMPS
+favorably/U
+favoredness/M
+favored's/U
+favored/YPSM
+favorer/EM
+favor/ESMRDGZ
+favoring/MYS
+favorings/U
+favorite/SMU
+favoritism/MS
+favors/A
+Fawkes/M
+Fawne/M
+fawner/M
+fawn/GZRDMS
+Fawnia/M
+fawning/Y
+Fawn/M
+fax/GMDS
+Fax/M
+Faydra/M
+Faye/M
+Fayette/M
+Fayetteville/M
+Fayina/M
+Fay/M
+fay/MDRGS
+Fayre/M
+Faythe/M
+Fayth/M
+faze/DSG
+FBI
+FCC
+FD
+FDA
+FDIC
+FDR/M
+fealty/MS
+fearfuller
+fearfullest
+fearfulness/MS
+fearful/YP
+fearlessness/MS
+fearless/PY
+fear/RDMSG
+fearsomeness/M
+fearsome/PY
+feasibility/SM
+feasibleness/M
+feasible/UI
+feasibly/U
+feaster/M
+feast/GSMRD
+feater/C
+featherbed
+featherbedding/SM
+featherbrain/MD
+feathered/U
+feathering/M
+featherless
+featherlight
+Featherman/M
+feathertop
+featherweight/SM
+feathery/TR
+feather/ZMDRGS
+feat/MYRGTS
+feats/C
+featureless
+feature/MGSD
+Feb/M
+febrile
+February/MS
+fecal
+feces
+fecklessness/M
+feckless/PY
+fecundability
+fecundate/XSDGN
+fecundation/M
+fecund/I
+fecundity/SM
+federalism/SM
+Federalist
+federalist/MS
+federalization/MS
+federalize/GSD
+Federal/S
+federal/YS
+federated/U
+federate/FSDXVNG
+federation/FM
+federative/Y
+Federica/M
+Federico/M
+FedEx/M
+Fedora/M
+fedora/SM
+feds
+Fed/SM
+fed/U
+feebleness/SM
+feeble/TPR
+feebly
+feedback/SM
+feedbag/MS
+feeder/M
+feed/GRZJS
+feeding/M
+feedlot/SM
+feedstock
+feedstuffs
+feeing
+feeler/M
+feel/GZJRS
+feelingly/U
+feeling/MYP
+feelingness/M
+Fee/M
+fee/MDS
+feet/M
+feigned/U
+feigner/M
+feign/RDGS
+feint/MDSG
+feisty/RT
+Felder/M
+Feldman/M
+feldspar/MS
+Felecia/M
+Felicdad/M
+Felice/M
+Felicia/M
+Felicio/M
+felicitate/XGNSD
+felicitation/M
+felicitous/IY
+felicitousness/M
+felicity/IMS
+Felicity/M
+Felicle/M
+Felic/M
+Felike/M
+Feliks/M
+feline/SY
+Felipa/M
+Felipe/M
+Felisha/M
+Felita/M
+Felix/M
+Feliza/M
+Felizio/M
+fella/S
+fellatio/SM
+felled/A
+feller/M
+felling/A
+Fellini/M
+fellness/M
+fellowman
+fellowmen
+fellow/SGDYM
+fellowshipped
+fellowshipping
+fellowship/SM
+fell/PSGZTRD
+feloniousness/M
+felonious/PY
+felon/MS
+felony/MS
+felt/GSD
+felting/M
+Fe/M
+female/MPS
+femaleness/SM
+feminineness/M
+feminine/PYS
+femininity/MS
+feminism/MS
+feminist/MS
+femme/MS
+femoral
+fem/S
+femur/MS
+fenced/U
+fencepost/M
+fencer/M
+fence/SRDJGMZ
+fencing/M
+fender/CM
+fend/RDSCZG
+Fenelia/M
+fenestration/CSM
+Fenian/M
+fenland/M
+fen/MS
+fennel/SM
+Fenwick/M
+Feodora/M
+Feodor/M
+feral
+Ferber/M
+Ferdie/M
+Ferdinanda/M
+Ferdinande/M
+Ferdinand/M
+Ferdinando/M
+Ferd/M
+Ferdy/M
+fer/FLC
+Fergus/M
+Ferguson/M
+Ferlinghetti/M
+Fermat/M
+fermentation/MS
+fermented
+fermenter
+ferment/FSCM
+fermenting
+Fermi/M
+fermion/MS
+fermium/MS
+Fernanda/M
+Fernande/M
+Fernandez/M
+Fernandina/M
+Fernando/M
+Ferne/M
+fernery/M
+Fern/M
+fern/MS
+ferny/TR
+ferociousness/MS
+ferocious/YP
+ferocity/MS
+Ferrari/M
+Ferraro/M
+Ferreira/M
+Ferrell/M
+Ferrel/M
+Ferrer/M
+ferreter/M
+ferret/SMRDG
+ferric
+ferris
+Ferris
+ferrite/M
+ferro
+ferroelectric
+ferromagnetic
+ferromagnet/M
+ferrous
+ferrule/MGSD
+ferryboat/MS
+ferryman/M
+ferrymen
+ferry/SDMG
+fertileness/M
+fertile/YP
+fertility/IMS
+fertilization/ASM
+fertilized/U
+fertilizer/M
+fertilizes/A
+fertilize/SRDZG
+ferule/SDGM
+fervency/MS
+fervent/Y
+fervidness/M
+fervid/YP
+fervor/MS
+fess/KGFSD
+Fess/M
+fess's
+festal/S
+fester/GD
+festival/SM
+festiveness/SM
+festive/PY
+festivity/SM
+festoon/SMDG
+fest/RVZ
+fetal
+feta/MS
+fetcher/M
+fetching/Y
+fetch/RSDGZ
+feted
+fête/MS
+fetich's
+fetidness/SM
+fetid/YP
+feting
+fetishism/SM
+fetishistic
+fetishist/SM
+fetish/MS
+fetlock/MS
+fetter's
+fetter/UGSD
+fettle/GSD
+fettling/M
+fettuccine/S
+fetus/SM
+feudalism/MS
+feudalistic
+feudal/Y
+feudatory/M
+feud/MDSG
+feverishness/SM
+feverish/PY
+fever/SDMG
+fewness/MS
+few/PTRS
+Fey/M
+Feynman/M
+fey/RT
+fez/M
+Fez/M
+fezzes
+ff
+FHA
+fiancée/S
+fiancé/MS
+Fianna/M
+Fiann/M
+fiascoes
+fiasco/M
+Fiat/M
+fiat/MS
+fibbed
+fibber/MS
+fibbing
+fiberboard/MS
+fiber/DM
+fiberfill/S
+Fiberglas/M
+fiberglass/DSMG
+Fibonacci/M
+fibrillate/XGNDS
+fibrillation/M
+fibril/MS
+fibrin/MS
+fibroblast/MS
+fibroid/S
+fibroses
+fibrosis/M
+fibrousness/M
+fibrous/YP
+fib/SZMR
+fibulae
+fibula/M
+fibular
+FICA
+fices
+fiche/SM
+Fichte/M
+fichu/SM
+fickleness/MS
+fickle/RTP
+ficos
+fictionalization/MS
+fictionalize/DSG
+fictional/Y
+fiction/SM
+fictitiousness/M
+fictitious/PY
+fictive/Y
+ficus
+fiddle/GMZJRSD
+fiddler/M
+fiddlestick/SM
+fiddly
+fide/F
+Fidela/M
+Fidelia/M
+Fidelio/M
+fidelity/IMS
+Fidelity/M
+Fidel/M
+fidget/DSG
+fidgety
+Fidole/M
+Fido/M
+fiducial/Y
+fiduciary/MS
+fiefdom/S
+fief/MS
+fielded
+fielder/IM
+fielding
+Fielding/M
+Field/MGS
+fieldstone/M
+fieldworker/M
+fieldwork/ZMRS
+field/ZISMR
+fiendishness/M
+fiendish/YP
+fiend/MS
+fierceness/SM
+fierce/RPTY
+fierily
+fieriness/MS
+fiery/PTR
+fie/S
+fies/C
+fiesta/MS
+fife/DRSMZG
+fifer/M
+Fifi/M
+Fifine/M
+FIFO
+fifteen/HRMS
+fifteenths
+fifths
+fifth/Y
+fiftieths
+fifty/HSM
+Figaro/M
+figged
+figging
+fightback
+fighter/MIS
+fighting/IS
+fight/ZSJRG
+figment/MS
+fig/MLS
+Figueroa/M
+figural
+figuration/FSM
+figurativeness/M
+figurative/YP
+figure/GFESD
+figurehead/SM
+figurer/SM
+figure's
+figurine/SM
+figuring/S
+Fijian/SM
+Fiji/M
+filamentary
+filament/MS
+filamentous
+Filberte/M
+Filbert/M
+filbert/MS
+Filberto/M
+filch/SDG
+filed/AC
+file/KDRSGMZ
+filename/SM
+filer/KMCS
+files/AC
+filet's
+filial/UY
+Filia/M
+filibusterer/M
+filibuster/MDRSZG
+Filide/M
+filigreeing
+filigree/MSD
+filing/AC
+filings
+Filipino/SM
+Filip/M
+Filippa/M
+Filippo/M
+fill/BAJGSD
+filled/U
+filler/MS
+filleting/M
+fillet/MDSG
+filling/M
+fillip/MDGS
+Fillmore/M
+filly/SM
+filmdom/M
+Filmer/M
+filminess/SM
+filming/M
+filmmaker/S
+Filmore/M
+film/SGMD
+filmstrip/SM
+filmy/RTP
+Filofax/S
+filtered/U
+filterer/M
+filter/RDMSZGB
+filthily
+filthiness/SM
+filth/M
+filths
+filthy/TRSDGP
+filtrated/I
+filtrate/SDXMNG
+filtrates/I
+filtrating/I
+filtration/IMS
+finagler/M
+finagle/RSDZG
+finale/MS
+finalist/MS
+finality/MS
+finalization/SM
+finalize/GSD
+final/SY
+Fina/M
+financed/A
+finance/MGSDJ
+finances/A
+financial/Y
+financier/DMGS
+financing/A
+Finch/M
+finch/MS
+findable/U
+find/BRJSGZ
+finder/M
+finding/M
+Findlay/M
+Findley/M
+fine/FGSCRDA
+finely
+fineness/MS
+finery/MAS
+fine's
+finespun
+finesse/SDMG
+fingerboard/SM
+fingerer/M
+fingering/M
+fingerless
+fingerling/M
+fingernail/MS
+fingerprint/SGDM
+finger/SGRDMJ
+fingertip/MS
+finial/SM
+finical
+finickiness/S
+finicky/RPT
+fining/M
+finished/UA
+finisher/M
+finishes/A
+finish/JZGRSD
+finis/SM
+finite/ISPY
+finitely/C
+finiteness/MIC
+fink/GDMS
+Finland/M
+Finlay/M
+Finley/M
+Fin/M
+Finnbogadottir/M
+finned
+Finnegan/M
+finner
+finning
+Finnish
+Finn/MS
+finny/RT
+fin/TGMDRS
+Fiona/M
+Fionna/M
+Fionnula/M
+fiord's
+Fiorello/M
+Fiorenze/M
+Fiori/M
+f/IRAC
+firearm/SM
+fireball/SM
+fireboat/M
+firebomb/MDSG
+firebox/MS
+firebrand/MS
+firebreak/SM
+firebrick/SM
+firebug/SM
+firecracker/SM
+firedamp/SM
+fired/U
+firefight/JRGZS
+firefly/MS
+Firefox/M
+fireguard/M
+firehouse/MS
+firelight/GZSM
+fireman/M
+firemen
+fire/MS
+fireplace/MS
+fireplug/MS
+firepower/SM
+fireproof/SGD
+firer/M
+firesafe
+fireside/SM
+Firestone/M
+firestorm/SM
+firetrap/SM
+firetruck/S
+firewall/S
+firewater/SM
+firewood/MS
+firework/MS
+firing/M
+firkin/M
+firmament/MS
+firmer
+firmest
+firm/ISFDG
+firmly/I
+firmness/MS
+firm's
+firmware/MS
+firring
+firstborn/S
+firsthand
+first/SY
+firth/M
+firths
+fir/ZGJMDRHS
+fiscal/YS
+Fischbein/M
+Fischer/M
+fishbowl/MS
+fishcake/S
+fisher/M
+Fisher/M
+fisherman/M
+fishermen/M
+fishery/MS
+fishhook/MS
+fishily
+fishiness/MS
+fishing/M
+fish/JGZMSRD
+Fishkill/M
+fishmeal
+fishmonger/MS
+fishnet/SM
+fishpond/SM
+fishtail/DMGS
+fishtanks
+fishwife/M
+fishwives
+fishy/TPR
+Fiske/M
+Fisk/M
+fissile
+fissionable/S
+fission/BSDMG
+fissure/MGSD
+fistfight/SM
+fistful/MS
+fisticuff/SM
+fist/MDGS
+fistula/SM
+fistulous
+Fitchburg/M
+Fitch/M
+fitfulness/SM
+fitful/PY
+fitments
+fitness/USM
+fits/AK
+fit's/K
+fitted/UA
+fitter/SM
+fittest
+fitting/AU
+fittingly
+fittingness/M
+fittings
+fit/UYPS
+Fitzgerald/M
+Fitz/M
+Fitzpatrick/M
+Fitzroy/M
+fivefold
+five/MRS
+fiver/M
+fixable
+fixate/VNGXSD
+fixatifs
+fixation/M
+fixative/S
+fixedness/M
+fixed/YP
+fixer/SM
+fixes/I
+fixing/SM
+fixity/MS
+fixture/SM
+fix/USDG
+Fizeau/M
+fizzer/M
+fizzle/GSD
+fizz/SRDG
+fizzy/RT
+fjord/SM
+FL
+flabbergast/GSD
+flabbergasting/Y
+flabbily
+flabbiness/SM
+flabby/TPR
+flab/MS
+flaccidity/MS
+flaccid/Y
+flack/SGDM
+flagella/M
+flagellate/DSNGX
+flagellation/M
+flagellum/M
+flagged
+flaggingly/U
+flagging/SMY
+flagman/M
+flagmen
+flag/MS
+flagon/SM
+flagpole/SM
+flagrance/MS
+flagrancy/SM
+flagrant/Y
+flagship/MS
+flagstaff/MS
+flagstone/SM
+flail/SGMD
+flair/SM
+flaker/M
+flake/SM
+flakiness/MS
+flak/RDMGS
+flaky/PRT
+Fla/M
+flambé/D
+flambeing
+flambes
+flamboyance/MS
+flamboyancy/MS
+flamboyant/YS
+flamenco/SM
+flamen/M
+flameproof/DGS
+flamer/IM
+flame's
+flame/SIGDR
+flamethrower/SM
+flamingo/SM
+flaming/Y
+flammability/ISM
+flammable/SI
+flam/MRNDJGZ
+Flanagan/M
+Flanders/M
+flange/GMSD
+flanker/M
+flank/SGZRDM
+flan/MS
+flannel/DMGS
+flannelet/MS
+flannelette's
+flapjack/SM
+flap/MS
+flapped
+flapper/SM
+flapping
+flaps/M
+flare/SDG
+flareup/S
+flaring/Y
+flashback/SM
+flashbulb/SM
+flashcard/S
+flashcube/MS
+flasher/M
+flashgun/S
+flashily
+flashiness/SM
+flashing/M
+flash/JMRSDGZ
+flashlight/MS
+flashy/TPR
+flask/SM
+flatbed/S
+flatboat/MS
+flatcar/MS
+flatfeet
+flatfish/SM
+flatfoot/SGDM
+flathead/M
+flatiron/SM
+flatland/RS
+flatmate/M
+flat/MYPS
+flatness/MS
+flatted
+flattener/M
+flatten/SDRG
+flatter/DRSZG
+flatterer/M
+flattering/YU
+flattery/SM
+flattest/M
+flatting
+flattish
+Flatt/M
+flattop/MS
+flatulence/SM
+flatulent/Y
+flatus/SM
+flatware/MS
+flatworm/SM
+Flaubert/M
+flaunting/Y
+flaunt/SDG
+flautist/SM
+flavored/U
+flavorer/M
+flavorful
+flavoring/M
+flavorless
+flavor/SJDRMZG
+flavorsome
+flaw/GDMS
+flawlessness/MS
+flawless/PY
+flax/MSN
+flaxseed/M
+flayer/M
+flay/RDGZS
+fleabag/MS
+fleabites
+flea/SM
+fleawort/M
+fleck/GRDMS
+Fledermaus/M
+fledged/U
+fledge/GSD
+fledgling/SM
+fleecer/M
+fleece/RSDGMZ
+fleeciness/SM
+fleecy/RTP
+fleeing
+flee/RS
+fleetingly/M
+fleetingness/SM
+fleeting/YP
+fleet/MYRDGTPS
+fleetness/MS
+Fleischer/M
+Fleischman/M
+Fleisher/M
+Fleming/M
+Flemished/M
+Flemish/GDSM
+Flemishing/M
+Flem/JGM
+Flemming/M
+flesher/M
+fleshiness/M
+flesh/JMYRSDG
+fleshless
+fleshly/TR
+fleshpot/SM
+fleshy/TPR
+fletch/DRSGJ
+fletcher/M
+Fletcher/M
+fletching/M
+Fletch/MR
+Fleurette/M
+Fleur/M
+flew/S
+flews/M
+flexed/I
+flexibility/MSI
+flexible/I
+flexibly/I
+flexitime's
+flex/MSDAG
+flextime/S
+flexural
+flexure/M
+fl/GJD
+flibbertigibbet/MS
+flicker/GD
+flickering/Y
+flickery
+flick/GZSRD
+flier/M
+flight/GMDS
+flightiness/SM
+flightless
+flightpath
+flighty/RTP
+flimflammed
+flimflamming
+flimflam/MS
+flimsily
+flimsiness/MS
+flimsy/PTRS
+flincher/M
+flinch/GDRS
+flinching/U
+flinger/M
+fling/RMG
+Flin/M
+Flinn/M
+flintiness/M
+flintless
+flintlock/MS
+Flint/M
+flint/MDSG
+Flintstones
+flinty/TRP
+flipflop
+flippable
+flippancy/MS
+flippant/Y
+flipped
+flipper/SM
+flippest
+flipping
+flip/S
+flirtation/SM
+flirtatiousness/MS
+flirtatious/PY
+flirt/GRDS
+flit/S
+flitted
+flitting
+floater/M
+float/SRDGJZ
+floaty
+flocculate/GNDS
+flocculation/M
+flock/SJDMG
+floe/MS
+flogged
+flogger/SM
+flogging/SM
+flog/S
+Flo/M
+floodgate/MS
+floodlight/DGMS
+floodlit
+floodplain/S
+flood/SMRDG
+floodwater/SM
+floorboard/MS
+floorer/M
+flooring/M
+floor/SJRDMG
+floorspace
+floorwalker/SM
+floozy/SM
+flophouse/SM
+flop/MS
+flopped
+flopper/M
+floppily
+floppiness/SM
+flopping
+floppy/TMRSP
+floral/SY
+Flora/M
+Florance/M
+flora/SM
+Florella/M
+Florence/M
+Florencia/M
+Florentia/M
+Florentine/S
+Florenza/M
+florescence/MIS
+florescent/I
+Flore/SM
+floret/MS
+Florette/M
+Floria/M
+Florian/M
+Florida/M
+Floridan/S
+Floridian/S
+floridness/SM
+florid/YP
+Florie/M
+Florina/M
+Florinda/M
+Florine/M
+florin/MS
+Flori/SM
+florist/MS
+Flor/M
+Florrie/M
+Florri/M
+Florry/M
+Flory/M
+floss/GSDM
+Flossie/M
+Flossi/M
+Flossy/M
+flossy/RST
+flotation/SM
+flotilla/SM
+flotsam/SM
+flounce/GDS
+flouncing/M
+flouncy/RT
+flounder/SDG
+flourisher/M
+flourish/GSRD
+flourishing/Y
+flour/SGDM
+floury/TR
+flouter/M
+flout/GZSRD
+flowchart/SG
+flowed
+flowerbed/SM
+flower/CSGD
+flowerer/M
+floweriness/SM
+flowerless
+flowerpot/MS
+flower's
+Flowers
+flowery/TRP
+flowing/Y
+flow/ISG
+flown
+flowstone
+Floyd/M
+Flss/M
+flt
+flubbed
+flubbing
+flub/S
+fluctuate/XSDNG
+fluctuation/M
+fluency/MS
+fluently
+fluent/SF
+flue/SM
+fluffiness/SM
+fluff/SGDM
+fluffy/PRT
+fluidity/SM
+fluidized
+fluid/MYSP
+fluidness/M
+fluke/SDGM
+fluky/RT
+flume/SDGM
+flummox/DSG
+flu/MS
+flung
+flunkey's
+flunk/SRDG
+flunky/MS
+fluoresce/GSRD
+fluorescence/MS
+fluorescent/S
+fluoridate/XDSGN
+fluoridation/M
+fluoride/SM
+fluorimetric
+fluorinated
+fluorine/SM
+fluorite/MS
+fluorocarbon/MS
+fluoroscope/MGDS
+fluoroscopic
+flurry/GMDS
+flushness/M
+flush/TRSDPBG
+fluster/DSG
+fluter/M
+flute/SRDGMJ
+fluting/M
+flutist/MS
+flutter/DRSG
+flutterer/M
+fluttery
+fluxed/A
+fluxes/A
+flux/IMS
+fluxing
+flyaway
+flyblown
+flyby/M
+flybys
+flycatcher/MS
+flyer's
+fly/JGBDRSTZ
+flyleaf/M
+flyleaves
+Flynn/M
+flyover/MS
+flypaper/MS
+flysheet/S
+flyspeck/MDGS
+flyswatter/S
+flyway/MS
+flyweight/MS
+flywheel/MS
+FM
+Fm/M
+FNMA/M
+foal/MDSG
+foaminess/MS
+foam/MRDSG
+foamy/RPT
+fobbed
+fobbing
+fob/SM
+focal/F
+focally
+Foch/M
+foci's
+focused/AU
+focuser/M
+focuses/A
+focus/SRDMBG
+fodder/GDMS
+foe/SM
+foetid
+FOFL
+fogbound
+fogged/C
+foggily
+fogginess/MS
+fogging/C
+foggy/RPT
+foghorn/SM
+fogs/C
+fog/SM
+fogyish
+fogy/SM
+foible/MS
+foil/GSD
+foist/GDS
+Fokker/M
+foldaway/S
+folded/AU
+folder/M
+foldout/MS
+fold/RDJSGZ
+folds/UA
+Foley/M
+foliage/MSD
+foliate/CSDXGN
+foliation/CM
+folio/SDMG
+folklike
+folklore/MS
+folkloric
+folklorist/SM
+folk/MS
+folksiness/MS
+folksinger/S
+folksinging/S
+folksong/S
+folksy/TPR
+folktale/S
+folkway/S
+foll
+follicle/SM
+follicular
+follower/M
+follow/JSZBGRD
+followup's
+folly/SM
+Folsom
+fol/Y
+Fomalhaut/M
+fomentation/SM
+fomenter/M
+foment/RDSG
+Fonda/M
+fondant/SM
+fondle/GSRD
+fondler/M
+fondness/MS
+fond/PMYRDGTS
+fondue/MS
+Fons
+Fonsie/M
+Fontainebleau/M
+Fontaine/M
+Fontana/M
+fontanelle's
+fontanel/MS
+font/MS
+Fonzie/M
+Fonz/M
+foodie/S
+food/MS
+foodstuff/MS
+foolery/MS
+foolhardily
+foolhardiness/SM
+foolhardy/PTR
+foolishness/SM
+foolish/PRYT
+fool/MDGS
+foolproof
+foolscap/MS
+footage/SM
+football/SRDMGZ
+footbridge/SM
+Foote/M
+footer/M
+footfall/SM
+foothill/SM
+foothold/MS
+footing/M
+footless
+footlights
+footling
+footlocker/SM
+footloose
+footman/M
+footmarks
+footmen
+footnote/MSDG
+footpad/SM
+footpath/M
+footpaths
+footplate/M
+footprint/MS
+footrace/S
+footrest/MS
+footsie/SM
+foot/SMRDGZJ
+footsore
+footstep/SM
+footstool/SM
+footwear/M
+footwork/SM
+fop/MS
+fopped
+foppery/MS
+fopping
+foppishness/SM
+foppish/YP
+forage/GSRDMZ
+forager/M
+forayer/M
+foray/SGMRD
+forbade
+forbearance/SM
+forbearer/M
+forbear/MRSG
+Forbes/M
+forbidden
+forbiddingness/M
+forbidding/YPS
+forbid/S
+forbore
+forborne
+forced/Y
+forcefield/MS
+forcefulness/MS
+forceful/PY
+forceps/M
+forcer/M
+force/SRDGM
+forcibleness/M
+forcible/P
+forcibly
+fordable/U
+Fordham/M
+Ford/M
+ford/SMDBG
+forearm/GSDM
+forebear/MS
+forebode/GJDS
+forebodingness/M
+foreboding/PYM
+forecaster/M
+forecastle/MS
+forecast/SZGR
+foreclose/GSD
+foreclosure/MS
+forecourt/SM
+foredoom/SDG
+forefather/SM
+forefeet
+forefinger/MS
+forefoot/M
+forefront/SM
+foregoer/M
+foregoing/S
+foregone
+foregos
+foreground/MGDS
+forehand/S
+forehead/MS
+foreigner/M
+foreignness/SM
+foreign/PRYZS
+foreknew
+foreknow/GS
+foreknowledge/MS
+foreknown
+foreleg/MS
+forelimb/MS
+forelock/MDSG
+foreman/M
+Foreman/M
+foremast/SM
+foremen
+foremost
+forename/DSM
+forenoon/SM
+forensically
+forensic/S
+forensics/M
+foreordain/DSG
+forepart/MS
+forepaws
+forepeople
+foreperson/S
+foreplay/MS
+forequarter/SM
+forerunner/MS
+fore/S
+foresail/SM
+foresaw
+foreseeable/U
+foreseeing
+foreseen/U
+foreseer/M
+foresee/ZSRB
+foreshadow/SGD
+foreshore/M
+foreshorten/DSG
+foresightedness/SM
+foresighted/PY
+foresight/SMD
+foreskin/SM
+forestaller/M
+forestall/LGSRD
+forestallment/M
+forestation/MCS
+forestations/A
+forest/CSAGD
+Forester/M
+forester/SM
+forestland/S
+Forest/MR
+forestry/MS
+forest's
+foretaste/MGSD
+foreteller/M
+foretell/RGS
+forethought/MS
+foretold
+forevermore
+forever/PS
+forewarner/M
+forewarn/GSJRD
+forewent
+forewoman/M
+forewomen
+foreword/SM
+forfeiter/M
+forfeiture/MS
+forfeit/ZGDRMS
+forfend/GSD
+forgather/GSD
+forgave
+forged/A
+forge/JVGMZSRD
+forger/M
+forgery/MS
+forges/A
+forgetfulness/SM
+forgetful/PY
+forget/SV
+forgettable/U
+forgettably/U
+forgetting
+forging/M
+forgivable/U
+forgivably/U
+forgiven
+forgiveness/SM
+forgiver/M
+forgive/SRPBZG
+forgivingly
+forgivingness/M
+forgiving/UP
+forgoer/M
+forgoes
+forgone
+forgo/RSGZ
+forgot
+forgotten/U
+for/HT
+forkful/S
+fork/GSRDM
+forklift/DMSG
+forlornness/M
+forlorn/PTRY
+formability/AM
+formaldehyde/SM
+formalin/M
+formalism/SM
+formalistic
+formalist/SM
+formality/SMI
+formal/IY
+formalization/SM
+formalized/U
+formalizer/M
+formalizes/I
+formalize/ZGSRD
+formalness/M
+formals
+formant/MIS
+format/AVS
+formate/MXGNSD
+formation/AFSCIM
+formatively/I
+formativeness/IM
+formative/SYP
+format's
+formatted/UA
+formatter/A
+formatters
+formatter's
+formatting/A
+form/CGSAFDI
+formed/U
+former/FSAI
+formerly
+formfitting
+formic
+Formica/MS
+formidableness/M
+formidable/P
+formidably
+formlessness/MS
+formless/PY
+Formosa/M
+Formosan
+form's
+formulaic
+formula/SM
+formulate/AGNSDX
+formulated/U
+formulation/AM
+formulator/SM
+fornicate/GNXSD
+fornication/M
+fornicator/SM
+Forrester/M
+Forrest/RM
+forsaken
+forsake/SG
+forsook
+forsooth
+Forster/M
+forswear/SG
+forswore
+forsworn
+forsythia/MS
+Fortaleza/M
+forte/MS
+forthcome/JG
+forthcoming/U
+FORTH/M
+forthrightness/SM
+forthright/PYS
+forthwith
+fortieths
+fortification/MS
+fortified/U
+fortifier/SM
+fortify/ADSG
+fortiori
+fortissimo/S
+fortitude/SM
+fortnightly/S
+fortnight/MYS
+FORTRAN
+Fortran/M
+fortress/GMSD
+fort/SM
+fortuitousness/SM
+fortuitous/YP
+fortuity/MS
+fortunateness/M
+fortunate/YUS
+fortune/MGSD
+fortuneteller/SM
+fortunetelling/SM
+forty/SRMH
+forum/MS
+forwarder/M
+forwarding/M
+forwardness/MS
+forward/PTZSGDRY
+forwent
+fossiliferous
+fossilization/MS
+fossilized/U
+fossilize/GSD
+fossil/MS
+Foss/M
+fosterer/M
+Foster/M
+foster/SRDG
+Foucault/M
+fought
+foulard/SM
+foulmouth/D
+foulness/MS
+fouls/M
+foul/SYRDGTP
+foundational
+foundation/SM
+founded/UF
+founder/MDG
+founder's/F
+founding/F
+foundling/MS
+found/RDGZS
+foundry/MS
+founds/KF
+fountainhead/SM
+fountain/SMDG
+fount/MS
+fourfold
+Fourier/M
+fourpence/M
+fourpenny
+fourposter/SM
+fourscore/S
+four/SHM
+foursome/SM
+foursquare
+fourteener/M
+fourteen/SMRH
+fourteenths
+Fourth
+fourths
+Fourths
+fourth/Y
+fovea/M
+fowler/M
+Fowler/M
+fowling/M
+fowl/SGMRD
+foxfire/SM
+foxglove/SM
+Foxhall/M
+foxhole/SM
+foxhound/SM
+foxily
+foxiness/MS
+foxing/M
+fox/MDSG
+Fox/MS
+foxtail/M
+foxtrot/MS
+foxtrotted
+foxtrotting
+foxy/TRP
+foyer/SM
+FPO
+fps
+fr
+fracas/SM
+fractal/SM
+fractional/Y
+fractionate/DNG
+fractionation/M
+fractioned
+fractioning
+fraction/ISMA
+fractiousness/SM
+fractious/PY
+fracture/MGDS
+fragile/Y
+fragility/MS
+fragmentarily
+fragmentariness/M
+fragmentary/P
+fragmentation/MS
+fragment/SDMG
+Fragonard/M
+fragrance/SM
+fragrant/Y
+frailness/MS
+frail/STPYR
+frailty/MS
+framed/U
+framer/M
+frame/SRDJGMZ
+framework/SM
+framing/M
+Francaise/M
+France/MS
+Francene/M
+Francesca/M
+Francesco/M
+franchisee/S
+franchise/ESDG
+franchiser/SM
+franchise's
+Franchot/M
+Francie/M
+Francine/M
+Francis
+Francisca/M
+Franciscan/MS
+Francisco/M
+Franciska/M
+Franciskus/M
+francium/MS
+Francklin/M
+Francklyn/M
+Franck/M
+Francoise/M
+Francois/M
+Franco/M
+francophone/M
+franc/SM
+Francyne/M
+frangibility/SM
+frangible
+Frankel/M
+Frankenstein/MS
+franker/M
+Frankford/M
+Frankfort/M
+Frankfurter/M
+frankfurter/MS
+Frankfurt/RM
+Frankie/M
+frankincense/MS
+Frankish/M
+franklin/M
+Franklin/M
+Franklyn/M
+frankness/MS
+frank/SGTYRDP
+Frank/SM
+Franky/M
+Fran/MS
+Frannie/M
+Franni/M
+Franny/M
+Fransisco/M
+frantically
+franticness/M
+frantic/PY
+Frants/M
+Franzen/M
+Franz/NM
+frappé
+frappeed
+frappeing
+frappes
+Frasco/M
+Fraser/M
+Frasier/M
+Frasquito/M
+fraternal/Y
+fraternity/MSF
+fraternization/SM
+fraternize/GZRSD
+fraternizer/M
+fraternizing/U
+frat/MS
+fratricidal
+fratricide/MS
+fraud/CS
+fraud's
+fraudsters
+fraudulence/S
+fraudulent/YP
+fraught/SGD
+Fraulein/S
+Frau/MN
+fray/CSDG
+Frayda/M
+Frayne/M
+fray's
+Fraze/MR
+Frazer/M
+Frazier/M
+frazzle/GDS
+freakishness/SM
+freakish/YP
+freak/SGDM
+freaky/RT
+freckle/GMDS
+freckly/RT
+Freda/M
+Freddie/M
+Freddi/M
+Freddy/M
+Fredek/M
+Fredelia/M
+Frederica/M
+Frederich/M
+Fredericka/M
+Frederick/MS
+Frederic/M
+Frederico/M
+Fredericton/M
+Frederigo/M
+Frederik/M
+Frederique/M
+Fredholm/M
+Fredia/M
+Fredi/M
+Fred/M
+Fredra/M
+Fredrick/M
+Fredrickson/M
+Fredric/M
+Fredrika/M
+freebase/GDS
+freebie/MS
+freebooter/M
+freeboot/ZR
+freeborn
+freedman/M
+Freedman/M
+freedmen
+freedom/MS
+freehand/D
+freehanded/Y
+freeholder/M
+freehold/ZSRM
+freeing/S
+freelance/SRDGZM
+Freeland/M
+freeloader/M
+freeload/SRDGZ
+Free/M
+freeman/M
+Freeman/M
+freemasonry/M
+Freemasonry/MS
+Freemason/SM
+freemen
+Freemon/M
+freeness/M
+Freeport/M
+freestanding
+freestone/SM
+freestyle/SM
+freethinker/MS
+freethinking/S
+Freetown/M
+freeway/MS
+freewheeler/M
+freewheeling/P
+freewheel/SRDMGZ
+freewill
+free/YTDRSP
+freezable
+freezer/SM
+freeze/UGSA
+freezing/S
+Freida/M
+freighter/M
+freight/ZGMDRS
+Fremont/M
+Frenchman/M
+French/MDSG
+Frenchmen
+Frenchwoman/M
+Frenchwomen
+frenetically
+frenetic/S
+frenzied/Y
+frenzy/MDSG
+freon/S
+Freon/SM
+freq
+frequency/ISM
+frequented/U
+frequenter/MS
+frequentest
+frequenting
+frequent/IY
+frequentness/M
+frequents
+fresco/DMG
+frescoes
+fresh/AZSRNDG
+freshener/M
+freshen/SZGDR
+fresher/MA
+freshest
+freshet/SM
+freshly
+freshman/M
+freshmen
+freshness/MS
+freshwater/SM
+Fresnel/M
+Fresno/M
+fretboard
+fretfulness/MS
+fretful/PY
+fret/S
+fretsaw/S
+fretted
+fretting
+fretwork/MS
+Freudian/S
+Freud/M
+Freya/M
+Frey/M
+friableness/M
+friable/P
+friary/MS
+friar/YMS
+fricasseeing
+fricassee/MSD
+frication/M
+fricative/MS
+Frick/M
+frictional/Y
+frictionless/Y
+friction/MS
+Friday/SM
+fridge/SM
+fried/A
+Frieda/M
+Friedan/M
+friedcake/SM
+Friederike/M
+Friedman/M
+Friedrich/M
+Friedrick/M
+friendlessness/M
+friendless/P
+friendlies
+friendlily
+friendliness/USM
+friendly/PUTR
+friend/SGMYD
+friendship/MS
+frier's
+fries/M
+frieze/SDGM
+frigate/SM
+Frigga/M
+frigged
+frigging/S
+frighten/DG
+frightening/Y
+frightfulness/MS
+frightful/PY
+fright/GXMDNS
+Frigidaire/M
+frigidity/MS
+frigidness/SM
+frigid/YP
+frig/S
+frill/MDGS
+frilly/RST
+Fri/M
+fringe/IGSD
+fringe's
+frippery/SM
+Frisbee/MS
+Frisco/M
+Frisian/SM
+frisker/M
+friskily
+friskiness/SM
+frisk/RDGS
+frisky/RTP
+frisson/M
+Frito/M
+fritterer/M
+fritter/RDSG
+Fritz/M
+fritz/SM
+frivolity/MS
+frivolousness/SM
+frivolous/PY
+frizz/GYSD
+frizzle/DSG
+frizzly/RT
+frizzy/RT
+Fr/MD
+Frobisher/M
+frocking/M
+frock's
+frock/SUDGC
+frogged
+frogging
+frogman/M
+frogmarched
+frogmen
+frog/MS
+fro/HS
+Froissart/M
+frolicked
+frolicker/SM
+frolicking
+frolic/SM
+frolicsome
+from
+Fromm/M
+frond/SM
+frontage/MS
+frontal/SY
+Frontenac/M
+front/GSFRD
+frontier/SM
+frontiersman/M
+frontiersmen
+frontispiece/SM
+frontrunner's
+front's
+frontward/S
+frosh/M
+Frostbelt/M
+frostbite/MS
+frostbit/G
+frostbiting/M
+frostbitten
+frost/CDSG
+frosteds
+frosted/U
+frostily
+frostiness/SM
+frosting/MS
+Frost/M
+frost's
+frosty/PTR
+froth/GMD
+frothiness/SM
+froths
+frothy/TRP
+froufrou/MS
+frowardness/MS
+froward/P
+frowner/M
+frowning/Y
+frown/RDSG
+frowzily
+frowziness/SM
+frowzy/RPT
+frozenness/M
+frozen/YP
+froze/UA
+fructify/GSD
+fructose/MS
+Fruehauf/M
+frugality/SM
+frugal/Y
+fruitcake/SM
+fruiterer/M
+fruiter/RM
+fruitfuller
+fruitfullest
+fruitfulness/MS
+fruitful/UYP
+fruit/GMRDS
+fruitiness/MS
+fruition/SM
+fruitlessness/MS
+fruitless/YP
+fruity/RPT
+frumpish
+frump/MS
+frumpy/TR
+Frunze/M
+frustrater/M
+frustrate/RSDXNG
+frustrating/Y
+frustration/M
+frustum/SM
+Frye/M
+fryer/MS
+Fry/M
+fry/NGDS
+F's
+f's/KA
+FSLIC
+ft/C
+FTC
+FTP
+fuchsia/MS
+Fuchs/M
+fucker/M!
+fuck/GZJRDMS!
+FUD
+fuddle/GSD
+fudge/GMSD
+fuel/ASDG
+fueler/SM
+fuel's
+Fuentes/M
+fugal
+Fugger/M
+fugitiveness/M
+fugitive/SYMP
+fugue/GMSD
+fuhrer/S
+Fuji/M
+Fujitsu/M
+Fujiyama
+Fukuoka/M
+Fulani/M
+Fulbright/M
+fulcrum/SM
+fulfilled/U
+fulfiller/M
+fulfill/GLSRD
+fulfillment/MS
+fullback/SMG
+fuller/DMG
+Fuller/M
+Fullerton/M
+fullish
+fullness/MS
+full/RDPSGZT
+fullstops
+fullword/SM
+fully
+fulminate/XSDGN
+fulmination/M
+fulness's
+fulsomeness/SM
+fulsome/PY
+Fulton/M
+Fulvia/M
+fumble/GZRSD
+fumbler/M
+fumbling/Y
+fume/DSG
+fumigant/MS
+fumigate/NGSDX
+fumigation/M
+fumigator/SM
+fuming/Y
+fumy/TR
+Funafuti
+functionalism/M
+functionalist/SM
+functionality/S
+functional/YS
+functionary/MS
+function/GSMD
+functor/SM
+fundamentalism/SM
+fundamentalist/SM
+fundamental/SY
+fund/ASMRDZG
+funded/U
+fundholders
+fundholding
+funding/S
+Fundy/M
+funeral/MS
+funerary
+funereal/Y
+funfair/M
+fungal/S
+fungible/M
+fungicidal
+fungicide/SM
+fungi/M
+fungoid/S
+fungous
+fungus/M
+funicular/SM
+funk/GSDM
+funkiness/S
+funky/RTP
+fun/MS
+funned
+funnel/SGMD
+funner
+funnest
+funnily/U
+funniness/SM
+funning
+funny/RSPT
+furbelow/MDSG
+furbisher/M
+furbish/GDRSA
+furiousness/M
+furious/RYP
+furlong/MS
+furlough/DGM
+furloughs
+furl/UDGS
+furn
+furnace/GMSD
+furnished/U
+furnisher/MS
+furnish/GASD
+furnishing/SM
+furniture/SM
+furore/MS
+furor/MS
+fur/PMS
+furred
+furrier/M
+furriness/SM
+furring/SM
+furrow/DMGS
+furry/RTZP
+furtherance/MS
+furtherer/M
+furthermore
+furthermost
+further/TGDRS
+furthest
+furtiveness/SM
+furtive/PY
+fury/SM
+furze/SM
+fusebox/S
+fusee/SM
+fuse/FSDAGCI
+fuselage/SM
+fuse's/A
+Fushun/M
+fusibility/SM
+fusible/I
+fusiform
+fusilier/MS
+fusillade/SDMG
+fusion/KMFSI
+fussbudget/MS
+fusser/M
+fussily
+fussiness/MS
+fusspot/SM
+fuss/SRDMG
+fussy/PTR
+fustian/MS
+fustiness/MS
+fusty/RPT
+fut
+futileness/M
+futile/PY
+futility/MS
+futon/S
+future/SM
+futurism/SM
+futuristic/S
+futurist/S
+futurity/MS
+futurologist/S
+futurology/MS
+futz/GSD
+fuze's
+Fuzhou/M
+Fuzzbuster/M
+fuzzily
+fuzziness/SM
+fuzz/SDMG
+fuzzy/PRT
+fwd
+FWD
+fwy
+FY
+FYI
+GA
+gabardine/SM
+gabbed
+Gabbey/M
+Gabbie/M
+Gabbi/M
+gabbiness/S
+gabbing
+gabble/SDG
+Gabby/M
+gabby/TRP
+Gabe/M
+gaberdine's
+Gabey/M
+gabfest/MS
+Gabie/M
+Gabi/M
+gable/GMSRD
+Gable/M
+Gabonese
+Gabon/M
+Gaborone/M
+Gabriela/M
+Gabriele/M
+Gabriella/M
+Gabrielle/M
+Gabriellia/M
+Gabriell/M
+Gabriello/M
+Gabriel/M
+Gabrila/M
+gab/S
+Gaby/M
+Gacrux/M
+gadabout/MS
+gadded
+gadder/MS
+gadding
+gadfly/MS
+gadgetry/MS
+gadget/SM
+gadolinium/MS
+gad/S
+Gadsden/M
+Gaea/M
+Gaelan/M
+Gaelic/M
+Gael/SM
+Gae/M
+gaffe/MS
+gaffer/M
+gaff/SGZRDM
+gaga
+Gagarin/M
+gag/DRSG
+Gage/M
+gager/M
+gage/SM
+gagged
+gagging
+gaggle/SDG
+gagwriter/S
+gaiety/MS
+Gaile/M
+Gail/M
+gaily
+gain/ADGS
+gainer/SM
+Gaines/M
+Gainesville/M
+gainfulness/M
+gainful/YP
+gaining/S
+gainly/U
+gainsaid
+gainsayer/M
+gainsay/RSZG
+Gainsborough/M
+gaiter/M
+gait/GSZMRD
+Gaithersburg/M
+galactic
+Galahad/MS
+Galapagos/M
+gal/AS
+gala/SM
+Galatea/M
+Galatia/M
+Galatians/M
+Galaxy/M
+galaxy/MS
+Galbraith/M
+Galbreath/M
+gale/AS
+Gale/M
+galen
+galena/MS
+galenite/M
+Galen/M
+gale's
+Galibi/M
+Galilean/MS
+Galilee/M
+Galileo/M
+Galina/M
+Gallagher/M
+gallanted
+gallanting
+gallantry/MS
+gallants
+gallant/UY
+Gallard/M
+gallbladder/MS
+Gallegos/M
+galleon/SM
+galleria/S
+gallery/MSDG
+galley/MS
+Gallic
+Gallicism/SM
+gallimaufry/MS
+galling/Y
+gallium/SM
+gallivant/GDS
+Gall/M
+gallonage/M
+gallon/SM
+galloper/M
+gallop/GSRDZ
+Galloway/M
+gallows/M
+gall/SGMD
+gallstone/MS
+Gallup/M
+Gal/MN
+Galois/M
+galoot/MS
+galore/S
+galosh/GMSD
+gal's
+Galsworthy/M
+galumph/GD
+galumphs
+galvanic
+Galvani/M
+galvanism/MS
+galvanization/SM
+galvanize/SDG
+Galvan/M
+galvanometer/SM
+galvanometric
+Galven/M
+Galveston/M
+Galvin/M
+Ga/M
+Gamaliel/M
+Gama/M
+Gambia/M
+Gambian/S
+gambit/MS
+gamble/GZRSD
+Gamble/M
+gambler/M
+gambol/SGD
+gamecock/SM
+gamekeeper/MS
+gameness/MS
+game/PJDRSMYTZG
+gamesmanship/SM
+gamesmen
+gamester/M
+gamest/RZ
+gamete/MS
+gametic
+gamine/SM
+gaminess/MS
+gaming/M
+gamin/MS
+gamma/MS
+gammon/DMSG
+Gamow/M
+gamut/MS
+gamy/TRP
+gander/DMGS
+Gandhian
+Gandhi/M
+gangbusters
+ganger/M
+Ganges/M
+gang/GRDMS
+gangland/SM
+ganglia/M
+gangling
+ganglionic
+ganglion/M
+gangplank/SM
+gangrene/SDMG
+gangrenous
+gangster/SM
+Gangtok/M
+gangway/MS
+Gan/M
+gannet/SM
+Gannie/M
+Gannon/M
+Ganny/M
+gantlet/GMDS
+Gantry/M
+gantry/MS
+Ganymede/M
+GAO
+gaoler/M
+gaol/MRDGZS
+gaper/M
+gape/S
+gaping/Y
+gapped
+gapping
+gap/SJMDRG
+garage/GMSD
+Garald/M
+garbageman/M
+garbage/SDMG
+garbanzo/MS
+garb/DMGS
+garbler/M
+garble/RSDG
+Garbo/M
+Garcia/M
+garçon/SM
+gardener/M
+Gardener/M
+gardenia/SM
+gardening/M
+garden/ZGRDMS
+Gardie/M
+Gardiner/M
+Gard/M
+Gardner/M
+Gardy/M
+Garek/M
+Gare/MH
+Gareth/M
+Garey/M
+Garfield/M
+garfish/MS
+Garfunkel/M
+Gargantua/M
+gargantuan
+gargle/SDG
+gargoyle/DSM
+Garibaldi/M
+Garik/M
+garishness/MS
+garish/YP
+Garland/M
+garland/SMDG
+garlicked
+garlicking
+garlicky
+garlic/SM
+garment/MDGS
+Gar/MH
+Garner/M
+garner/SGD
+Garnet/M
+garnet/SM
+Garnette/M
+Garnett/M
+garnish/DSLG
+garnisheeing
+garnishee/SDM
+garnishment/MS
+Garold/M
+garote's
+garotte's
+Garrard/M
+garred
+Garrek/M
+Garreth/M
+Garret/M
+garret/SM
+Garrett/M
+Garrick/M
+Garrik/M
+garring
+Garrison/M
+garrison/SGMD
+garroter/M
+garrote/SRDMZG
+Garrot/M
+garrotte's
+Garrott/M
+garrulity/SM
+garrulousness/MS
+garrulous/PY
+Garry/M
+gar/SLM
+garter/SGDM
+Garth/M
+Garvey/M
+Garvin/M
+Garv/M
+Garvy/M
+Garwin/M
+Garwood/M
+Gary/M
+Garza/M
+gasbag/MS
+Gascony/M
+gaseousness/M
+gaseous/YP
+gases/C
+gas/FC
+gash/GTMSRD
+gasification/M
+gasifier/M
+gasify/SRDGXZN
+gasket/SM
+gaslight/DMS
+gasohol/S
+gasoline/MS
+gasometer/M
+Gaspard/M
+Gaspar/M
+Gasparo/M
+gasper/M
+Gasper/M
+gasp/GZSRD
+gasping/Y
+gas's
+gassed/C
+Gasser/M
+gasser/MS
+Gasset/M
+gassiness/M
+gassing/SM
+gassy/PTR
+Gaston/M
+gastric
+gastritides
+gastritis/MS
+gastroenteritides
+gastroenteritis/M
+gastrointestinal
+gastronome/SM
+gastronomic
+gastronomical/Y
+gastronomy/MS
+gastropod/SM
+gasworks/M
+gateau/MS
+gateaux
+gatecrash/GZSRD
+gatehouse/MS
+gatekeeper/SM
+gate/MGDS
+gatepost/SM
+Gates
+gateway/MS
+gathered/IA
+gatherer/M
+gathering/M
+gather/JRDZGS
+gathers/A
+Gatlinburg/M
+Gatling/M
+Gatorade/M
+gator/MS
+Gatsby/M
+Gatun/M
+gaucheness/SM
+gaucherie/SM
+gauche/TYPR
+gaucho/SM
+gaudily
+gaudiness/MS
+gaudy/PRST
+gaugeable
+gauger/M
+Gauguin/M
+Gaulish/M
+Gaulle/M
+Gaul/MS
+Gaultiero/M
+gauntlet/GSDM
+Gauntley/M
+gauntness/MS
+gaunt/PYRDSGT
+gauss/C
+gausses
+Gaussian
+Gauss/M
+gauss's
+Gautama/M
+Gauthier/M
+Gautier/M
+gauze/SDGM
+gauziness/MS
+gauzy/TRP
+Gavan/M
+gave
+gavel/GMDS
+Gaven/M
+Gavin/M
+Gav/MN
+gavotte/MSDG
+Gavra/M
+Gavrielle/M
+Gawain/M
+Gawen/M
+gawkily
+gawkiness/MS
+gawk/SGRDM
+gawky/RSPT
+Gayel/M
+Gayelord/M
+Gaye/M
+gayety's
+Gayla/M
+Gayleen/M
+Gaylene/M
+Gayler/M
+Gayle/RM
+Gaylord/M
+Gaylor/M
+Gay/M
+gayness/SM
+Gaynor/M
+gay/RTPS
+Gaza/M
+gazebo/SM
+gaze/DRSZG
+gazelle/MS
+gazer/M
+gazetteer/SGDM
+gazette/MGSD
+Gaziantep/M
+gazillion/S
+gazpacho/MS
+GB
+G/B
+Gdansk/M
+Gd/M
+GDP
+Gearalt/M
+Gearard/M
+gearbox/SM
+gear/DMJSG
+gearing/M
+gearshift/MS
+gearstick
+gearwheel/SM
+Geary/M
+gecko/MS
+GED
+geegaw's
+geeing
+geek/SM
+geeky/RT
+geese/M
+geest/M
+gee/TDS
+geezer/MS
+Gehenna/M
+Gehrig/M
+Geiger/M
+Geigy/M
+geisha/M
+gelatinousness/M
+gelatinous/PY
+gelatin/SM
+gelcap
+gelding/M
+geld/JSGD
+gelid
+gelignite/MS
+gelled
+gelling
+gel/MBS
+Gelya/M
+Ge/M
+GE/M
+Gemini/SM
+gemlike
+Gemma/M
+gemmed
+gemming
+gem/MS
+gemological
+gemologist/MS
+gemology/MS
+gemstone/SM
+gen
+Gena/M
+Genaro/M
+gendarme/MS
+gender/DMGS
+genderless
+genealogical/Y
+genealogist/SM
+genealogy/MS
+Gene/M
+gene/MS
+generalissimo/SM
+generalist/MS
+generality/MS
+generalizable/SM
+generalization/MS
+generalized/U
+generalize/GZBSRD
+generalizer/M
+general/MSPY
+generalness/M
+generalship/SM
+genera/M
+generate/CXAVNGSD
+generational
+generation/MCA
+generative/AY
+generators/A
+generator/SM
+generically
+generic/PS
+generosity/MS
+generously/U
+generousness/SM
+generous/PY
+Genesco/M
+genesis/M
+Genesis/M
+genes/S
+genetically
+geneticist/MS
+genetic/S
+genetics/M
+Genet/M
+Geneva/M
+Genevieve/M
+Genevra/M
+Genghis/M
+geniality/FMS
+genially/F
+genialness/M
+genial/PY
+Genia/M
+genies/K
+genie/SM
+genii/M
+genitalia
+genitals
+genital/YF
+genitive/SM
+genitourinary
+genius/SM
+Gen/M
+Genna/M
+Gennie/M
+Gennifer/M
+Genni/M
+Genny/M
+Genoa/SM
+genocidal
+genocide/SM
+Geno/M
+genome/SM
+genotype/MS
+Genovera/M
+genre/MS
+gent/AMS
+genteelness/MS
+genteel/PRYT
+gentian/SM
+gentile/S
+Gentile's
+gentility/MS
+gentlefolk/S
+gentlemanliness/M
+gentlemanly/U
+gentleman/YM
+gentlemen
+gentleness/SM
+gentle/PRSDGT
+gentlewoman/M
+gentlewomen/M
+gently
+gentrification/M
+gentrify/NSDGX
+Gentry/M
+gentry/MS
+genuflect/GDS
+genuflection/MS
+genuineness/SM
+genuine/PY
+genus
+Genvieve/M
+geocentric
+geocentrically
+geocentricism
+geochemical/Y
+geochemistry/MS
+geochronology/M
+geodesic/S
+geode/SM
+geodesy/MS
+geodetic/S
+Geoff/M
+Geoffrey/M
+Geoffry/M
+geog
+geographer/MS
+geographic
+geographical/Y
+geography/MS
+geologic
+geological/Y
+geologist/MS
+geology/MS
+geom
+Geo/M
+geomagnetic
+geomagnetically
+geomagnetism/SM
+geometer/MS
+geometrical/Y
+geometrician/M
+geometric/S
+geometry/MS
+geomorphological
+geomorphology/M
+geophysical/Y
+geophysicist/MS
+geophysics/M
+geopolitical/Y
+geopolitic/S
+geopolitics/M
+Georas/M
+Geordie/M
+Georgeanna/M
+Georgeanne/M
+Georgena/M
+George/SM
+Georgeta/M
+Georgetown/M
+Georgetta/M
+Georgette/M
+Georgia/M
+Georgiana/M
+Georgianna/M
+Georgianne/M
+Georgian/S
+Georgie/M
+Georgi/M
+Georgina/M
+Georgine/M
+Georg/M
+Georgy/M
+geostationary
+geosynchronous
+geosyncline/SM
+geothermal
+geothermic
+Geralda/M
+Geraldine/M
+Gerald/M
+geranium/SM
+Gerard/M
+Gerardo/M
+Gerber/M
+gerbil/MS
+Gerda/M
+Gerek/M
+Gerhardine/M
+Gerhard/M
+Gerhardt/M
+Gerianna/M
+Gerianne/M
+geriatric/S
+geriatrics/M
+Gerick/M
+Gerik/M
+Geri/M
+Geritol/M
+Gerladina/M
+Ger/M
+Germaine/M
+Germain/M
+Germana/M
+germane
+Germania/M
+Germanic/M
+germanium/SM
+germanized
+German/SM
+Germantown/M
+Germany/M
+Germayne/M
+germen/M
+germicidal
+germicide/MS
+germinal/Y
+germinated/U
+germinate/XVGNSD
+germination/M
+germinative/Y
+germ/MNS
+Gerome/M
+Geronimo/M
+gerontocracy/M
+gerontological
+gerontologist/SM
+gerontology/SM
+Gerrard/M
+Gerrie/M
+Gerrilee/M
+Gerri/M
+Gerry/M
+gerrymander/SGD
+Gershwin/MS
+Gerta/M
+Gertie/M
+Gerti/M
+Gert/M
+Gertruda/M
+Gertrude/M
+Gertrudis/M
+Gertrud/M
+Gerty/M
+gerundive/M
+gerund/SVM
+Gery/M
+gestalt/M
+gestapo/S
+Gestapo/SM
+gestate/SDGNX
+gestational
+gestation/M
+gesticulate/XSDVGN
+gesticulation/M
+gesticulative/Y
+gestural
+gesture/SDMG
+gesundheit
+getaway/SM
+Gethsemane/M
+get/S
+getter/SDM
+getting
+Getty/M
+Gettysburg/M
+getup/MS
+gewgaw/MS
+Gewürztraminer
+geyser/GDMS
+Ghanaian/MS
+Ghana/M
+Ghanian's
+ghastliness/MS
+ghastly/TPR
+ghat/MS
+Ghats/M
+Ghent/M
+Gherardo/M
+gherkin/SM
+ghetto/DGMS
+ghettoize/SDG
+Ghibelline/M
+ghostlike
+ghostliness/MS
+ghostly/TRP
+ghost/SMYDG
+ghostwrite/RSGZ
+ghostwritten
+ghostwrote
+ghoulishness/SM
+ghoulish/PY
+ghoul/SM
+GHQ
+GI
+Giacinta/M
+Giacobo/M
+Giacometti/M
+Giacomo/M
+Giacopo/M
+Giana/M
+Gianina/M
+Gian/M
+Gianna/M
+Gianni/M
+Giannini/M
+giantess/MS
+giantkiller
+giant/SM
+Giauque/M
+Giavani/M
+gibber/DGS
+gibberish/MS
+gibbet/MDSG
+Gibbie/M
+Gibb/MS
+Gibbon/M
+gibbon/MS
+gibbousness/M
+gibbous/YP
+Gibby/M
+gibe/GDRS
+giber/M
+giblet/MS
+Gib/M
+Gibraltar/MS
+Gibson/M
+giddap
+giddily
+giddiness/SM
+Giddings/M
+giddy/GPRSDT
+Gide/M
+Gideon/MS
+Gielgud/M
+Gienah/M
+Giffard/M
+Giffer/M
+Giffie/M
+Gifford/M
+Giff/RM
+Giffy/M
+giftedness/M
+gifted/PY
+gift/SGMD
+gigabyte/S
+gigacycle/MS
+gigahertz/M
+gigantically
+giganticness/M
+gigantic/P
+gigavolt
+gigawatt/M
+gigged
+gigging
+giggler/M
+giggle/RSDGZ
+giggling/Y
+giggly/TR
+Gigi/M
+gig/MS
+GIGO
+gigolo/MS
+gila
+Gila/M
+Gilberta/M
+Gilberte/M
+Gilbertina/M
+Gilbertine/M
+gilbert/M
+Gilbert/M
+Gilberto/M
+Gilbertson/M
+Gilburt/M
+Gilchrist/M
+Gilda/M
+gilder/M
+gilding/M
+gild/JSGZRD
+Gilead/M
+Gilemette/M
+Giles
+Gilgamesh/M
+Gilkson/M
+Gillan/M
+Gilles
+Gillespie/M
+Gillette/M
+Gilliam/M
+Gillian/M
+Gillie/M
+Gilligan/M
+Gilli/M
+Gill/M
+gill/SGMRD
+Gilly/M
+Gilmore/M
+Gil/MY
+gilt/S
+gimbaled
+gimbals
+Gimbel/M
+gimcrackery/SM
+gimcrack/S
+gimlet/MDSG
+gimme/S
+gimmick/GDMS
+gimmickry/MS
+gimmicky
+gimp/GSMD
+gimpy/RT
+Gina/M
+Ginelle/M
+Ginevra/M
+gingerbread/SM
+gingerliness/M
+gingerly/P
+Ginger/M
+ginger/SGDYM
+gingersnap/SM
+gingery
+gingham/SM
+gingivitis/SM
+Gingrich/M
+ginkgoes
+ginkgo/M
+ginmill
+gin/MS
+ginned
+Ginnie/M
+Ginnifer/M
+Ginni/M
+ginning
+Ginny/M
+Gino/M
+Ginsberg/M
+Ginsburg/M
+ginseng/SM
+Gioconda/M
+Giordano/M
+Giorgia/M
+Giorgi/M
+Giorgio/M
+Giorgione/M
+Giotto/M
+Giovanna/M
+Giovanni/M
+Gipsy's
+giraffe/MS
+Giralda/M
+Giraldo/M
+Giraud/M
+Giraudoux/M
+girded/U
+girder/M
+girdle/GMRSD
+girdler/M
+gird/RDSGZ
+girlfriend/MS
+girlhood/SM
+girlie/M
+girlishness/SM
+girlish/YP
+girl/MS
+giro/M
+girt/GDS
+girth/MDG
+girths
+Gisela/M
+Giselbert/M
+Gisele/M
+Gisella/M
+Giselle/M
+Gish/M
+gist/MS
+git/M
+Giuditta/M
+Giulia/M
+Giuliano/M
+Giulietta/M
+Giulio/M
+Giuseppe/M
+Giustina/M
+Giustino/M
+Giusto/M
+giveaway/SM
+giveback/S
+give/HZGRS
+given/SP
+giver/M
+giving/Y
+Giza/M
+Gizela/M
+gizmo's
+gizzard/SM
+Gk/M
+glacé/DGS
+glacial/Y
+glaciate/XNGDS
+glaciation/M
+glacier/SM
+glaciological
+glaciologist/M
+glaciology/M
+gladded
+gladden/GDS
+gladder
+gladdest
+gladding
+gladdy
+glade/SM
+gladiatorial
+gladiator/SM
+Gladi/M
+gladiola/MS
+gladioli
+gladiolus/M
+gladly/RT
+Glad/M
+gladness/MS
+gladsome/RT
+Gladstone/MS
+Gladys
+glad/YSP
+glamor/DMGS
+glamorization/MS
+glamorizer/M
+glamorize/SRDZG
+glamorousness/M
+glamorous/PY
+glance/GJSD
+glancing/Y
+glanders/M
+glandes
+glandular/Y
+gland/ZSM
+glans/M
+glare/SDG
+glaringness/M
+glaring/YP
+Glaser/M
+Glasgow/M
+glasnost/S
+glassblower/S
+glassblowing/MS
+glassful/MS
+glass/GSDM
+glasshouse/SM
+glassily
+glassiness/SM
+glassless
+Glass/M
+glassware/SM
+glasswort/M
+glassy/PRST
+Glastonbury/M
+Glaswegian/S
+glaucoma/SM
+glaucous
+glazed/U
+glazer/M
+glaze/SRDGZJ
+glazier/SM
+glazing/M
+gleam/MDGS
+gleaner/M
+gleaning/M
+glean/RDGZJS
+Gleason/M
+Gleda/M
+gleed/M
+glee/DSM
+gleefulness/MS
+gleeful/YP
+gleeing
+Glendale/M
+Glenda/M
+Glenden/M
+Glendon/M
+Glenine/M
+Glen/M
+Glenna/M
+Glennie/M
+Glennis/M
+Glenn/M
+glen/SM
+glibber
+glibbest
+glibness/MS
+glib/YP
+glide/JGZSRD
+glider/M
+glim/M
+glimmer/DSJG
+glimmering/M
+glimpse/DRSZMG
+glimpser/M
+glint/DSG
+glissandi
+glissando/M
+glisten/DSG
+glister/DGS
+glitch/MS
+glitter/GDSJ
+glittering/Y
+glittery
+glitz/GSD
+glitzy/TR
+gloaming/MS
+gloater/M
+gloating/Y
+gloat/SRDG
+globalism/S
+globalist/S
+global/SY
+globe/SM
+globetrotter/MS
+glob/GDMS
+globularity/M
+globularness/M
+globular/PY
+globule/MS
+globulin/MS
+glockenspiel/SM
+glommed
+gloom/GSMD
+gloomily
+gloominess/MS
+gloomy/RTP
+glop/MS
+glopped
+glopping
+gloppy/TR
+Gloria/M
+Gloriana/M
+Gloriane/M
+glorification/M
+glorifier/M
+glorify/XZRSDNG
+Glori/M
+glorious/IYP
+gloriousness/IM
+Glory/M
+glory/SDMG
+glossary/MS
+gloss/GSDM
+glossily
+glossiness/SM
+glossolalia/SM
+glossy/RSPT
+glottal
+glottalization/M
+glottis/MS
+Gloucester/M
+gloveless
+glover/M
+Glover/M
+glove/SRDGMZ
+glower/GD
+glow/GZRDMS
+glowing/Y
+glowworm/SM
+glucose/SM
+glue/DRSMZG
+glued/U
+gluer/M
+gluey
+gluier
+gluiest
+glummer
+glummest
+glumness/MS
+glum/SYP
+gluon/M
+glutamate/M
+gluten/M
+glutenous
+glutinousness/M
+glutinous/PY
+glut/SMNX
+glutted
+glutting
+glutton/MS
+gluttonous/Y
+gluttony/SM
+glyceride/M
+glycerinate/MD
+glycerine's
+glycerin/SM
+glycerolized/C
+glycerol/SM
+glycine/M
+glycogen/SM
+glycol/MS
+Glynda/M
+Glynis/M
+Glyn/M
+Glynnis/M
+Glynn/M
+glyph/M
+glyphs
+gm
+GM
+GMT
+gnarl/SMDG
+gnash/SDG
+gnat/MS
+gnawer/M
+gnaw/GRDSJ
+gnawing/M
+gneiss/SM
+Gnni/M
+gnomelike
+GNOME/M
+gnome/SM
+gnomic
+gnomish
+gnomonic
+gnosticism
+Gnosticism/M
+gnostic/K
+Gnostic/M
+GNP
+gnu/MS
+goad/MDSG
+goalie/SM
+goalkeeper/MS
+goalkeeping/M
+goalless
+goal/MDSG
+goalmouth/M
+goalpost/S
+goalscorer
+goalscoring
+goaltender/SM
+Goa/M
+goatee/SM
+goatherd/MS
+goat/MS
+goatskin/SM
+gobbed
+gobbet/MS
+gobbing
+gobbledegook's
+gobbledygook/S
+gobbler/M
+gobble/SRDGZ
+Gobi/M
+goblet/MS
+goblin/SM
+gob/SM
+Godard/M
+Godart/M
+godchild/M
+godchildren
+goddammit
+goddamn/GS
+Goddard/M
+Goddart/M
+goddaughter/SM
+godded
+goddess/MS
+godding
+Gödel/M
+godfather/GSDM
+godforsaken
+Godfree/M
+Godfrey/M
+Godfry/M
+godhead/S
+godhood/SM
+Godiva/M
+godlessness/MS
+godless/P
+godlikeness/M
+godlike/P
+godliness/UMS
+godly/UTPR
+God/M
+godmother/MS
+Godot/M
+godparent/SM
+godsend/MS
+god/SMY
+godson/MS
+Godspeed/S
+Godthaab/M
+Godunov/M
+Godwin/M
+Godzilla/M
+Goebbels/M
+Goering/M
+goer/MG
+goes
+Goethals/M
+Goethe/M
+gofer/SM
+Goff/M
+goggler/M
+goggle/SRDGZ
+Gogh/M
+Gog/M
+Gogol/M
+Goiania/M
+going/M
+goiter/SM
+Golan/M
+Golconda/M
+Golda/M
+Goldarina/M
+Goldberg/M
+goldbricker/M
+goldbrick/GZRDMS
+Golden/M
+goldenness/M
+goldenrod/SM
+goldenseal/M
+golden/TRYP
+goldfinch/MS
+goldfish/SM
+Goldia/M
+Goldie/M
+Goldilocks/M
+Goldi/M
+Goldina/M
+Golding/M
+Goldman/M
+goldmine/S
+gold/MRNGTS
+goldsmith/M
+Goldsmith/M
+goldsmiths
+Goldstein/M
+Goldwater/M
+Goldwyn/M
+Goldy/M
+Goleta/M
+golfer/M
+golf/RDMGZS
+Golgotha/M
+Goliath/M
+Goliaths
+golly/S
+Gomez/M
+Gomorrah/M
+Gompers/M
+go/MRHZGJ
+gonadal
+gonad/SM
+gondola/SM
+gondolier/MS
+Gondwanaland/M
+goner/M
+gone/RZN
+gong/SGDM
+gonion/M
+gonna
+gonorrheal
+gonorrhea/MS
+Gonzales/M
+Gonzalez/M
+Gonzalo/M
+Goober/M
+goober/MS
+goodbye/MS
+goodhearted
+goodie's
+goodish
+goodly/TR
+Good/M
+Goodman/M
+goodness/MS
+goodnight
+Goodrich/M
+good/SYP
+goodwill/MS
+Goodwin/M
+Goodyear/M
+goody/SM
+gooey
+goofiness/MS
+goof/SDMG
+goofy/RPT
+Google/M
+gooier
+gooiest
+gook/SM
+goo/MS
+goon/SM
+goop/SM
+gooseberry/MS
+goosebumps
+goose/M
+goos/SDG
+GOP
+Gopher
+gopher/SM
+Goran/M
+Goraud/M
+Gorbachev
+Gordan/M
+Gorden/M
+Gordian/M
+Gordie/M
+Gordimer/M
+Gordon/M
+Gordy/M
+gore/DSMG
+Gore/M
+Goren/M
+Gorey/M
+Gorgas
+gorged/E
+gorge/GMSRD
+gorgeousness/SM
+gorgeous/YP
+gorger/EM
+gorges/E
+gorging/E
+Gorgon/M
+gorgon/S
+Gorgonzola/M
+Gorham/M
+gorilla/MS
+gorily
+goriness/MS
+goring/M
+Gorky/M
+gormandizer/M
+gormandize/SRDGZ
+gormless
+gorp/S
+gorse/SM
+gory/PRT
+gos
+goshawk/MS
+gosh/S
+gosling/M
+gospeler/M
+gospel/MRSZ
+Gospel/SM
+gossamer/SM
+gossipy
+gossip/ZGMRDS
+gotcha/SM
+Göteborg/M
+Gotham/M
+Gothart/M
+Gothicism/M
+Gothic/S
+Goth/M
+Goths
+got/IU
+goto
+GOTO/MS
+gotta
+gotten/U
+Gottfried/M
+Goucher/M
+Gouda/SM
+gouge/GZSRD
+gouger/M
+goulash/SM
+Gould/M
+Gounod/M
+gourde/SM
+gourd/MS
+gourmand/MS
+gourmet/MS
+gout/SM
+gouty/RT
+governable/U
+governance/SM
+governed/U
+governess/SM
+govern/LBGSD
+governmental/Y
+government/MS
+Governor
+governor/MS
+governorship/SM
+gov/S
+govt
+gown/GSDM
+Goya/M
+GP
+GPA
+GPO
+GPSS
+gr
+grabbed
+grabber/SM
+grabbing/S
+grab/S
+Gracchus/M
+grace/ESDMG
+graceful/EYPU
+gracefuller
+gracefullest
+gracefulness/ESM
+Graceland/M
+gracelessness/MS
+graceless/PY
+Grace/M
+Gracia/M
+Graciela/M
+Gracie/M
+graciousness/SM
+gracious/UY
+grackle/SM
+gradate/DSNGX
+gradation/MCS
+grade/ACSDG
+graded/U
+Gradeigh/M
+gradely
+grader/MC
+grade's
+Gradey/M
+gradient/RMS
+grad/MRDGZJS
+gradualism/MS
+gradualist/MS
+gradualness/MS
+gradual/SYP
+graduand/SM
+graduate/MNGDSX
+graduation/M
+Grady/M
+Graehme/M
+Graeme/M
+Graffias/M
+graffiti
+graffito/M
+Graff/M
+grafter/M
+grafting/M
+graft/MRDSGZ
+Grafton/M
+Grahame/M
+Graham/M
+graham/SM
+Graig/M
+grail/S
+Grail/SM
+grainer/M
+grain/IGSD
+graininess/MS
+graining/M
+grain's
+grainy/RTP
+gram/KSM
+Gram/M
+grammarian/SM
+grammar/MS
+grammaticality/M
+grammaticalness/M
+grammatical/UY
+grammatic/K
+gramme/SM
+Grammy/S
+gramophone/SM
+Grampians
+grampus/SM
+Granada/M
+granary/MS
+grandam/SM
+grandaunt/MS
+grandchild/M
+grandchildren
+granddaddy/MS
+granddad/SM
+granddaughter/MS
+grandee/SM
+grandeur/MS
+grandfather/MYDSG
+grandiloquence/SM
+grandiloquent/Y
+grandiose/YP
+grandiosity/MS
+grandkid/SM
+grandma/MS
+grandmaster/MS
+grandmother/MYS
+grandnephew/MS
+grandness/MS
+grandniece/SM
+grandpa/MS
+grandparent/MS
+grandson/MS
+grandstander/M
+grandstand/SRDMG
+grand/TPSYR
+granduncle/MS
+Grange/MR
+grange/MSR
+Granger/M
+granite/MS
+granitic
+Gran/M
+Grannie/M
+Granny/M
+granny/MS
+granola/S
+grantee/MS
+granter/M
+Grantham/M
+Granthem/M
+Grantley/M
+Grant/M
+grantor's
+grant/SGZMRD
+grantsmanship/S
+granularity/SM
+granular/Y
+granulate/SDXVGN
+granulation/M
+granule/SM
+granulocytic
+Granville/M
+grapefruit/SM
+grape/SDGM
+grapeshot/M
+grapevine/MS
+grapheme/M
+graph/GMD
+graphical/Y
+graphicness/M
+graphic/PS
+graphics/M
+graphite/SM
+graphologist/SM
+graphology/MS
+graphs
+grapnel/SM
+grapple/DRSG
+grappler/M
+grappling/M
+grasper/M
+graspingness/M
+grasping/PY
+grasp/SRDBG
+grass/GZSDM
+grasshopper/SM
+grassland/MS
+Grass/M
+grassroots
+grassy/RT
+Grata/M
+gratefuller
+gratefullest
+gratefulness/USM
+grateful/YPU
+grater/M
+grates/I
+grate/SRDJGZ
+Gratia/M
+Gratiana/M
+graticule/M
+gratification/M
+gratified/U
+gratifying/Y
+gratify/NDSXG
+grating/YM
+gratis
+gratitude/IMS
+gratuitousness/MS
+gratuitous/PY
+gratuity/SM
+gravamen/SM
+gravedigger/SM
+gravel/SGMYD
+graven
+graveness/MS
+graver/M
+graveside/S
+Graves/M
+grave/SRDPGMZTY
+gravestone/SM
+graveyard/MS
+gravidness/M
+gravid/PY
+gravimeter/SM
+gravimetric
+gravitas
+gravitate/XVGNSD
+gravitational/Y
+gravitation/M
+graviton/SM
+gravity/MS
+gravy/SM
+graybeard/MS
+Grayce/M
+grayish
+Gray/M
+grayness/S
+gray/PYRDGTS
+Grayson/M
+graze/GZSRD
+grazer/M
+Grazia/M
+grazing/M
+grease/GMZSRD
+greasepaint/MS
+greaseproof
+greaser/M
+greasily
+greasiness/SM
+greasy/PRT
+greatcoat/DMS
+greaten/DG
+greathearted
+greatness/MS
+great/SPTYRN
+grebe/MS
+Grecian/S
+Greece/M
+greed/C
+greedily
+greediness/SM
+greeds
+greed's
+greedy/RTP
+Greek/SM
+Greeley/M
+greenback/MS
+greenbelt/S
+Greenberg/M
+Greenblatt/M
+Greenbriar/M
+Greene/M
+greenery/MS
+Greenfeld/M
+greenfield
+Greenfield/M
+greenfly/M
+greengage/SM
+greengrocer/SM
+greengrocery/M
+greenhorn/SM
+greenhouse/SM
+greening/M
+greenish/P
+Greenland/M
+Green/M
+greenmail/GDS
+greenness/MS
+Greenpeace/M
+greenroom/SM
+Greensboro/M
+Greensleeves/M
+Greensville/M
+greensward/SM
+green/SYRDMPGT
+Greentree/M
+Greenville/M
+Greenwich/M
+greenwood/MS
+Greer/M
+greeter/M
+greeting/M
+greets/A
+greet/SRDJGZ
+gregariousness/MS
+gregarious/PY
+Gregg/M
+Greggory/M
+Greg/M
+Gregoire/M
+Gregoor/M
+Gregorian
+Gregorio/M
+Gregorius/M
+Gregor/M
+Gregory/M
+gremlin/SM
+Grenada/M
+grenade/MS
+Grenadian/S
+grenadier/SM
+Grenadines
+grenadine/SM
+Grendel/M
+Grenier/M
+Grenoble/M
+Grenville/M
+Gresham/M
+Gretal/M
+Greta/M
+Gretchen/M
+Gretel/M
+Grete/M
+Grethel/M
+Gretna/M
+Gretta/M
+Gretzky/M
+grew/A
+greybeard/M
+greyhound/MS
+Grey/M
+greyness/M
+gridded
+griddlecake/SM
+griddle/DSGM
+gridiron/GSMD
+gridlock/DSG
+grids/A
+grid/SGM
+grief/MS
+Grieg/M
+Grier/M
+grievance/SM
+griever/M
+grieve/SRDGZ
+grieving/Y
+grievousness/SM
+grievous/PY
+Griffie/M
+Griffin/M
+griffin/SM
+Griffith/M
+Griff/M
+griffon's
+Griffy/M
+griller/M
+grille/SM
+grill/RDGS
+grillwork/M
+grimace/DRSGM
+grimacer/M
+Grimaldi/M
+grime/MS
+Grimes
+griminess/MS
+grimmer
+grimmest
+Grimm/M
+grimness/MS
+grim/PGYD
+grimy/TPR
+Grinch/M
+grind/ASG
+grinder/MS
+grinding/SY
+grindstone/SM
+gringo/SM
+grinned
+grinner/M
+grinning/Y
+grin/S
+griper/M
+gripe/S
+grippe/GMZSRD
+gripper/M
+gripping/Y
+grip/SGZMRD
+Griselda/M
+grisliness/SM
+grisly/RPT
+Gris/M
+Grissel/M
+gristle/SM
+gristliness/M
+gristly/TRP
+gristmill/MS
+grist/MYS
+Griswold/M
+grit/MS
+gritted
+gritter/MS
+grittiness/SM
+gritting
+gritty/PRT
+Griz/M
+grizzle/DSG
+grizzling/M
+grizzly/TRS
+Gr/M
+groaner/M
+groan/GZSRDM
+groat/SM
+grocer/MS
+grocery/MS
+groggily
+grogginess/SM
+groggy/RPT
+grog/MS
+groin/MGSD
+grokked
+grokking
+grok/S
+grommet/GMDS
+Gromyko/M
+groofs
+groomer/M
+groom/GZSMRD
+groomsman/M
+groomsmen
+Groot/M
+groover/M
+groove/SRDGM
+groovy/TR
+groper/M
+grope/SRDJGZ
+Gropius/M
+grosbeak/SM
+grosgrain/MS
+Gross
+Grosset/M
+gross/GTYSRDP
+Grossman/M
+grossness/MS
+Grosvenor/M
+Grosz/M
+grotesqueness/MS
+grotesque/PSY
+Grotius/M
+Groton/M
+grottoes
+grotto/M
+grouch/GDS
+grouchily
+grouchiness/MS
+grouchy/RPT
+groundbreaking/S
+grounded/U
+grounder/M
+groundhog/SM
+ground/JGZMDRS
+groundlessness/M
+groundless/YP
+groundnut/MS
+groundsheet/M
+groundskeepers
+groundsman/M
+groundswell/S
+groundwater/S
+groundwork/SM
+grouped/A
+grouper/M
+groupie/MS
+grouping/M
+groups/A
+group/ZJSMRDG
+grouse/GMZSRD
+grouser/M
+grouter/M
+grout/GSMRD
+groveler/M
+grovelike
+groveling/Y
+grovel/SDRGZ
+Grover/M
+Grove/RM
+grove/SRMZ
+grower/M
+grow/GZYRHS
+growing/I
+growingly
+growler/M
+growling/Y
+growl/RDGZS
+growly/RP
+grown/IA
+grownup/MS
+grows/A
+growth/IMA
+growths/IA
+grubbed
+grubber/SM
+grubbily
+grubbiness/SM
+grubbing
+grubby/RTP
+grub/MS
+grubstake/MSDG
+grudge/GMSRDJ
+grudger/M
+grudging/Y
+grueling/Y
+gruel/MDGJS
+gruesomeness/SM
+gruesome/RYTP
+gruffness/MS
+gruff/PSGTYRD
+grumble/GZJDSR
+grumbler/M
+grumbling/Y
+Grumman/M
+grumpily
+grumpiness/MS
+grump/MDGS
+grumpy/TPR
+Grundy/M
+Grünewald/M
+grunge/S
+grungy/RT
+grunion/SM
+grunter/M
+grunt/SGRD
+Grusky/M
+Grus/M
+Gruyère
+Gruyeres
+gryphon's
+g's
+G's
+gs/A
+GSA
+gt
+GU
+guacamole/MS
+Guadalajara/M
+Guadalcanal/M
+Guadalquivir/M
+Guadalupe/M
+Guadeloupe/M
+Guallatiri/M
+Gualterio/M
+Guamanian/SM
+Guam/M
+Guangzhou
+guanine/MS
+guano/MS
+Guantanamo/M
+Guarani/M
+guarani/SM
+guaranteeing
+guarantee/RSDZM
+guarantor/SM
+guaranty/MSDG
+guardedness/UM
+guarded/UYP
+guarder/M
+guardhouse/SM
+Guardia/M
+guardianship/MS
+guardian/SM
+guardrail/SM
+guard/RDSGZ
+guardroom/SM
+guardsman/M
+guardsmen
+Guarnieri/M
+Guatemala/M
+Guatemalan/S
+guava/SM
+Guayaquil/M
+gubernatorial
+Gucci/M
+gudgeon/M
+Guelph/M
+Guendolen/M
+Guenevere/M
+Guenna/M
+Guenther/M
+guernsey/S
+Guernsey/SM
+Guerra/M
+Guerrero/M
+guerrilla/MS
+guessable/U
+guess/BGZRSD
+guessed/U
+guesser/M
+guesstimate/DSMG
+guesswork/MS
+guest/SGMD
+Guevara/M
+guffaw/GSDM
+guff/SM
+Guggenheim/M
+Guglielma/M
+Guglielmo/M
+Guhleman/M
+GUI
+Guiana/M
+guidance/MS
+guidebook/SM
+guided/U
+guide/GZSRD
+guideline/SM
+guidepost/MS
+guider/M
+Guido/M
+Guilbert/M
+guilder/M
+guildhall/SM
+guild/SZMR
+guileful
+guilelessness/MS
+guileless/YP
+guile/SDGM
+Guillaume/M
+Guillema/M
+Guillemette/M
+guillemot/MS
+Guillermo/M
+guillotine/SDGM
+guiltily
+guiltiness/MS
+guiltlessness/M
+guiltless/YP
+guilt/SM
+guilty/PTR
+Gui/M
+Guinea/M
+Guinean/S
+guinea/SM
+Guinevere/M
+Guinna/M
+Guinness/M
+guise's
+guise/SDEG
+guitarist/SM
+guitar/SM
+Guiyang
+Guizot/M
+Gujarati/M
+Gujarat/M
+Gujranwala/M
+gulag/S
+gulch/MS
+gulden/MS
+gulf/DMGS
+Gullah/M
+gullet/MS
+gulley's
+gullibility/MS
+gullible
+Gulliver/M
+gull/MDSG
+gully/SDMG
+gulp/RDGZS
+gumboil/MS
+gumbo/MS
+gumboots
+gumdrop/SM
+gummed
+gumminess/M
+gumming/C
+gum/MS
+gummy/RTP
+gumption/SM
+gumshoeing
+gumshoe/SDM
+gumtree/MS
+Gunar/M
+gunboat/MS
+Gunderson/M
+gunfighter/M
+gunfight/SRMGZ
+gunfire/SM
+gunflint/M
+gunfought
+Gunilla/M
+gunk/SM
+gunky/RT
+Gun/M
+gunman/M
+gunmen
+gunmetal/MS
+gun/MS
+Gunnar/M
+gunned
+gunnel's
+Gunner/M
+gunner/SM
+gunnery/MS
+gunning/M
+gunnysack/SM
+gunny/SM
+gunpoint/MS
+gunpowder/SM
+gunrunner/MS
+gunrunning/MS
+gunship/S
+gunshot/SM
+gunslinger/M
+gunsling/GZR
+gunsmith/M
+gunsmiths
+Guntar/M
+Gunter/M
+Gunther/M
+gunwale/MS
+Guofeng/M
+guppy/SM
+Gupta/M
+gurgle/SDG
+Gurkha/M
+gurney/S
+guru/MS
+Gusella/M
+gusher/M
+gush/SRDGZ
+gushy/TR
+Gus/M
+Guss
+gusset/MDSG
+Gussie/M
+Gussi/M
+gussy/GSD
+Gussy/M
+Gustaf/M
+Gustafson/M
+Gusta/M
+gustatory
+Gustave/M
+Gustav/M
+Gustavo/M
+Gustavus/M
+gusted/E
+Gustie/M
+gustily
+Gusti/M
+gustiness/M
+gusting/E
+gust/MDGS
+gustoes
+gusto/M
+gusts/E
+Gusty/M
+gusty/RPT
+Gutenberg/M
+Guthrey/M
+Guthrie/M
+Guthry/M
+Gutierrez/M
+gutlessness/S
+gutless/P
+gutser/M
+gutsiness/M
+gut/SM
+guts/R
+gutsy/PTR
+gutted
+gutter/GSDM
+guttering/M
+guttersnipe/M
+gutting
+gutturalness/M
+guttural/SPY
+gutty/RSMT
+Guyana/M
+Guyanese
+Guy/M
+guy/MDRZGS
+Guzman/M
+guzzle/GZRSD
+guzzler/M
+g/VBX
+Gwalior/M
+Gwendolen/M
+Gwendoline/M
+Gwendolin/M
+Gwendolyn/M
+Gweneth/M
+Gwenette/M
+Gwen/M
+Gwenneth/M
+Gwennie/M
+Gwenni/M
+Gwenny/M
+Gwenora/M
+Gwenore/M
+Gwyneth/M
+Gwyn/M
+Gwynne/M
+gymkhana/SM
+gym/MS
+gymnasia's
+gymnasium/SM
+gymnastically
+gymnastic/S
+gymnastics/M
+gymnast/SM
+gymnosperm/SM
+gynecologic
+gynecological/MS
+gynecologist/SM
+gynecology/MS
+gypped
+gypper/S
+gypping
+gyp/S
+gypsite
+gypster/S
+gypsum/MS
+gypsy/SDMG
+Gypsy/SM
+gyrate/XNGSD
+gyration/M
+gyrator/MS
+gyrfalcon/SM
+gyrocompass/M
+gyro/MS
+gyroscope/SM
+gyroscopic
+gyve/GDS
+H
+Haag/M
+Haas/M
+Habakkuk/M
+habeas
+haberdasher/SM
+haberdashery/SM
+Haber/M
+Haberman/M
+Habib/M
+habiliment/SM
+habitability/MS
+habitableness/M
+habitable/P
+habitant/ISM
+habitation/MI
+habitations
+habitat/MS
+habit/IBDGS
+habit's
+habitualness/SM
+habitual/SYP
+habituate/SDNGX
+habituation/M
+habitué/MS
+hacienda/MS
+hacker/M
+Hackett/M
+hack/GZSDRBJ
+hackler/M
+hackle/RSDMG
+hackney/SMDG
+hacksaw/SDMG
+hackwork/S
+Hadamard/M
+Hadar/M
+Haddad/M
+haddock/MS
+hades
+Hades
+had/GD
+hadji's
+hadj's
+Hadlee/M
+Hadleigh/M
+Hadley/M
+Had/M
+hadn't
+Hadria/M
+Hadrian/M
+hadron/MS
+hadst
+haemoglobin's
+haemophilia's
+haemorrhage's
+Hafiz/M
+hafnium/MS
+haft/GSMD
+Hagan/M
+Hagar/M
+Hagen/M
+Hager/M
+Haggai/M
+haggardness/MS
+haggard/SYP
+hagged
+hagging
+haggish
+haggis/SM
+haggler/M
+haggle/RSDZG
+Hagiographa/M
+hagiographer/SM
+hagiography/MS
+hag/SMN
+Hagstrom/M
+Hague/M
+ha/H
+hahnium/S
+Hahn/M
+Haifa/M
+haiku/M
+Hailee/M
+hailer/M
+Hailey/M
+hail/SGMDR
+hailstone/SM
+hailstorm/SM
+Haily/M
+Haiphong/M
+hairball/SM
+hairbreadth/M
+hairbreadths
+hairbrush/SM
+haircare
+haircloth/M
+haircloths
+haircut/MS
+haircutting
+hairdo/SM
+hairdresser/SM
+hairdressing/SM
+hairdryer/S
+hairiness/MS
+hairlessness/M
+hairless/P
+hairlike
+hairline/SM
+hairnet/MS
+hairpiece/MS
+hairpin/MS
+hairsbreadth
+hairsbreadths
+hair/SDM
+hairsplitter/SM
+hairsplitting/MS
+hairspray
+hairspring/SM
+hairstyle/SMG
+hairstylist/S
+hairy/PTR
+Haitian/S
+Haiti/M
+hajjes
+hajji/MS
+hajj/M
+Hakeem/M
+hake/MS
+Hakim/M
+Hakka/M
+Hakluyt/M
+halalled
+halalling
+halal/S
+halberd/SM
+halcyon/S
+Haldane/M
+Haleakala/M
+Haleigh/M
+hale/ISRDG
+Hale/M
+haler/IM
+halest
+Halette/M
+Haley/M
+halfback/SM
+halfbreed
+halfheartedness/MS
+halfhearted/PY
+halfpence/S
+halfpenny/MS
+halfpennyworth
+half/PM
+halftime/S
+halftone/MS
+halfway
+halfword/MS
+halibut/SM
+halide/SM
+Halie/M
+Halifax/M
+Hali/M
+Halimeda/M
+halite/MS
+halitoses
+halitosis/M
+hallelujah
+hallelujahs
+Halley/M
+halliard's
+Hallie/M
+Halli/M
+Hallinan/M
+Hall/M
+Hallmark/M
+hallmark/SGMD
+hallo/GDS
+halloo's
+Halloween/MS
+hallowing
+hallows
+hallow/UD
+hall/SMR
+Hallsy/M
+hallucinate/VNGSDX
+hallucination/M
+hallucinatory
+hallucinogenic/S
+hallucinogen/SM
+hallway/SM
+Hally/M
+halocarbon
+halogenated
+halogen/SM
+halon
+halo/SDMG
+Halpern/M
+Halsey/M
+Hal/SMY
+Halsy/M
+halter/GDM
+halt/GZJSMDR
+halting/Y
+halve/GZDS
+halves/M
+halyard/MS
+Ha/M
+Hamal/M
+Haman/M
+hamburger/M
+Hamburg/MS
+hamburg/SZRM
+Hamel/M
+Hamey/M
+Hamhung/M
+Hamid/M
+Hamilcar/M
+Hamil/M
+Hamiltonian/MS
+Hamilton/M
+Hamish/M
+Hamitic/M
+Hamlen/M
+Hamlet/M
+hamlet/MS
+Hamlin/M
+Ham/M
+Hammad/M
+Hammarskjold/M
+hammed
+hammerer/M
+hammerhead/SM
+hammering/M
+hammerless
+hammerlock/MS
+Hammerstein/M
+hammertoe/SM
+hammer/ZGSRDM
+Hammett/M
+hamming
+hammock/MS
+Hammond/M
+Hammurabi/M
+hammy/RT
+Hamnet/M
+hampered/U
+hamper/GSD
+Hampshire/M
+Hampton/M
+ham/SM
+hamster/MS
+hamstring/MGS
+hamstrung
+Hamsun/M
+Hana/M
+Hanan/M
+Hancock/M
+handbagged
+handbagging
+handbag/MS
+handball/SM
+handbarrow/MS
+handbasin
+handbill/MS
+handbook/SM
+handbrake/M
+handcar/SM
+handcart/MS
+handclasp/MS
+handcraft/GMDS
+handcuff/GSD
+handcuffs/M
+handedness/M
+handed/PY
+Handel/M
+hander/S
+handful/SM
+handgun/SM
+handhold/M
+handicapped
+handicapper/SM
+handicapping
+handicap/SM
+handicraftsman/M
+handicraftsmen
+handicraft/SMR
+handily/U
+handiness/SM
+handiwork/MS
+handkerchief/MS
+handleable
+handlebar/SM
+handle/MZGRSD
+handler/M
+handless
+handling/M
+handmade
+handmaiden/M
+handmaid/NMSX
+handout/SM
+handover
+handpick/GDS
+handrail/SM
+hand's
+handsaw/SM
+handset/SM
+handshake/GMSR
+handshaker/M
+handshaking/M
+handsomely/U
+handsomeness/MS
+handsome/RPTY
+handspike/SM
+handspring/SM
+handstand/MS
+hand/UDSG
+handwork/SM
+handwoven
+handwrite/GSJ
+handwriting/M
+handwritten
+Handy/M
+handyman/M
+handymen
+handy/URT
+Haney/M
+hangar/SGDM
+hangdog/S
+hanged/A
+hanger/M
+hang/GDRZBSJ
+hanging/M
+hangman/M
+hangmen
+hangnail/MS
+hangout/MS
+hangover/SM
+hangs/A
+Hangul/M
+hangup/S
+Hangzhou
+Hankel/M
+hankerer/M
+hanker/GRDJ
+hankering/M
+hank/GZDRMS
+hankie/SM
+Hank/M
+hanky's
+Hannah/M
+Hanna/M
+Hannibal/M
+Hannie/M
+Hanni/MS
+Hanny/M
+Hanoi/M
+Hanoverian
+Hanover/M
+Hansel/M
+Hansen/M
+Hansiain/M
+Han/SM
+Hans/N
+hansom/MS
+Hanson/M
+Hanuka/S
+Hanukkah/M
+Hanukkahs
+Hapgood/M
+haphazardness/SM
+haphazard/SPY
+haplessness/MS
+hapless/YP
+haploid/S
+happed
+happening/M
+happen/JDGS
+happenstance/SM
+happily/U
+happiness/UMS
+happing
+Happy/M
+happy/UTPR
+Hapsburg/M
+hap/SMY
+Harald/M
+harangue/GDRS
+haranguer/M
+Harare
+harasser/M
+harass/LSRDZG
+harassment/SM
+Harbert/M
+harbinger/DMSG
+Harbin/M
+harborer/M
+harbor/ZGRDMS
+Harcourt/M
+hardback/SM
+hardball/SM
+hardboard/SM
+hardboiled
+hardbound
+hardcore/MS
+hardcover/SM
+hardened/U
+hardener/M
+hardening/M
+harden/ZGRD
+hardhat/S
+hardheadedness/SM
+hardheaded/YP
+hardheartedness/SM
+hardhearted/YP
+hardihood/MS
+hardily
+hardiness/SM
+Harding/M
+Hardin/M
+hardliner/S
+hardness/MS
+hardscrabble
+hardshell
+hardship/MS
+hardstand/S
+hardtack/MS
+hardtop/MS
+hardware/SM
+hardwire/DSG
+hardwood/MS
+hardworking
+Hardy/M
+hard/YNRPJGXTS
+hardy/PTRS
+harebell/MS
+harebrained
+harelip/MS
+harelipped
+hare/MGDS
+harem/SM
+Hargreaves/M
+hark/GDS
+Harland/M
+Harlan/M
+Harlem/M
+Harlene/M
+Harlen/M
+Harlequin
+harlequin/MS
+Harley/M
+Harlie/M
+Harli/M
+Harlin/M
+harlotry/MS
+harlot/SM
+Harlow/M
+Harman/M
+harmed/U
+harmer/M
+harmfulness/MS
+harmful/PY
+harmlessness/SM
+harmless/YP
+harm/MDRGS
+Harmonia/M
+harmonically
+harmonica/MS
+harmonic/S
+harmonics/M
+Harmonie/M
+harmonious/IPY
+harmoniousness/MS
+harmoniousness's/I
+harmonium/MS
+harmonization/A
+harmonizations
+harmonization's
+harmonized/U
+harmonizer/M
+harmonizes/UA
+harmonize/ZGSRD
+Harmon/M
+harmony/EMS
+Harmony/M
+harness/DRSMG
+harnessed/U
+harnesser/M
+harnesses/U
+Harold/M
+Haroun/M
+harper/M
+Harper/M
+harping/M
+harpist/SM
+harp/MDRJGZS
+Harp/MR
+harpooner/M
+harpoon/SZGDRM
+harpsichordist/MS
+harpsichord/SM
+harpy/SM
+Harpy/SM
+Harrell/M
+harridan/SM
+Harrie/M
+harrier/M
+Harriet/M
+Harrietta/M
+Harriette/M
+Harriett/M
+Harrington/M
+Harriot/M
+Harriott/M
+Harrisburg/M
+Harri/SM
+Harrisonburg/M
+Harrison/M
+harrower/M
+harrow/RDMGS
+harrumph/SDG
+Harry/M
+harry/RSDGZ
+harshen/GD
+harshness/SM
+harsh/TRNYP
+Harte/M
+Hartford/M
+Hartley/M
+Hartline/M
+Hart/M
+Hartman/M
+hart/MS
+Hartwell/M
+Harvard/M
+harvested/U
+harvester/M
+harvestman/M
+harvest/MDRZGS
+Harvey/MS
+Harv/M
+Harwell/M
+Harwilll/M
+has
+Hasbro/M
+hash/AGSD
+Hasheem/M
+hasher/M
+Hashim/M
+hashing/M
+hashish/MS
+hash's
+Hasidim
+Haskell/M
+Haskel/M
+Haskins/M
+Haslett/M
+hasn't
+hasp/GMDS
+hassle/MGRSD
+hassock/MS
+haste/MS
+hastener/M
+hasten/GRD
+hast/GXJDN
+Hastie/M
+hastily
+hastiness/MS
+Hastings/M
+Hasty/M
+hasty/RPT
+hatchback/SM
+hatcheck/S
+hatched/U
+hatcher/M
+hatchery/MS
+hatchet/MDSG
+hatching/M
+hatch/RSDJG
+Hatchure/M
+hatchway/MS
+hatefulness/MS
+hateful/YP
+hater/M
+hate/S
+Hatfield/M
+Hathaway/M
+hatless
+hat/MDRSZG
+hatred/SM
+hatstands
+hatted
+Hatteras/M
+hatter/SM
+Hattie/M
+Hatti/M
+hatting
+Hatty/M
+hauberk/SM
+Haugen/M
+haughtily
+haughtiness/SM
+haughty/TPR
+haulage/MS
+hauler/M
+haul/SDRGZ
+haunch/GMSD
+haunter/M
+haunting/Y
+haunt/JRDSZG
+Hauptmann/M
+Hausa/M
+Hausdorff/M
+Hauser/M
+hauteur/MS
+Havana/SM
+Havarti
+Havel/M
+haven/DMGS
+Haven/M
+haven't
+haver/G
+haversack/SM
+have/ZGSR
+havocked
+havocking
+havoc/SM
+Haw
+Hawaiian/S
+Hawaii/M
+hawker/M
+hawk/GZSDRM
+Hawking
+hawking/M
+Hawkins/M
+hawkishness/S
+hawkish/P
+Hawley/M
+haw/MDSG
+hawser/M
+haws/RZ
+Hawthorne/M
+hawthorn/MS
+haycock/SM
+Hayden/M
+Haydn/M
+Haydon/M
+Hayes
+hayfield/MS
+hay/GSMDR
+Hayley/M
+hayloft/MS
+haymow/MS
+Haynes
+hayrick/MS
+hayride/MS
+hayseed/MS
+Hay/SM
+haystack/SM
+haywain
+Hayward/M
+haywire/MS
+Haywood/M
+Hayyim/M
+hazard/MDGS
+hazardousness/M
+hazardous/PY
+haze/DSRJMZG
+Hazel/M
+hazel/MS
+hazelnut/SM
+Haze/M
+hazer/M
+hazily
+haziness/MS
+hazing/M
+Hazlett/M
+Hazlitt/M
+hazy/PTR
+HBO/M
+hdqrs
+HDTV
+headache/MS
+headband/SM
+headboard/MS
+headcount
+headdress/MS
+header/M
+headfirst
+headgear/SM
+headhunter/M
+headhunting/M
+headhunt/ZGSRDMJ
+headily
+headiness/S
+heading/M
+headlamp/S
+headland/MS
+headlessness/M
+headless/P
+headlight/MS
+headline/DRSZMG
+headliner/M
+headlock/MS
+headlong
+Head/M
+headman/M
+headmaster/MS
+headmastership/M
+headmen
+headmistress/MS
+headphone/SM
+headpiece/SM
+headpin/MS
+headquarter/GDS
+headrest/MS
+headroom/SM
+headscarf/M
+headset/SM
+headship/SM
+headshrinker/MS
+head/SJGZMDR
+headsman/M
+headsmen
+headstall/SM
+headstand/MS
+headstock/M
+headstone/MS
+headstrong
+headwaiter/SM
+headwall/S
+headwater/S
+headway/MS
+headwind/SM
+headword/MS
+heady/PTR
+heal/DRHSGZ
+healed/U
+healer/M
+Heall/M
+healthfully
+healthfulness/SM
+healthful/U
+healthily/U
+healthiness/MSU
+health/M
+healths
+healthy/URPT
+heap/SMDG
+heard/UA
+hearer/M
+hearing/AM
+hearken/SGD
+hearsay/SM
+hearse/M
+hears/SDAG
+Hearst/M
+heartache/SM
+heartbeat/MS
+heartbreak/GMS
+heartbreaking/Y
+heartbroke
+heartbroken
+heartburning/M
+heartburn/SGM
+hearted/Y
+hearten/EGDS
+heartening/EY
+heartfelt
+hearth/M
+hearthrug
+hearths
+hearthstone/MS
+heartily
+heartiness/SM
+heartland/SM
+heartlessness/SM
+heartless/YP
+heartrending/Y
+heartsickness/MS
+heartsick/P
+heart/SMDNXG
+heartstrings
+heartthrob/MS
+heartwarming
+Heartwood/M
+heartwood/SM
+hearty/TRSP
+hear/ZTSRHJG
+heatedly
+heated/UA
+heater/M
+heathendom/SM
+heathenish/Y
+heathenism/MS
+heathen/M
+heather/M
+Heather/M
+heathery
+Heathkit/M
+heathland
+Heathman/M
+Heath/MR
+heath/MRNZX
+heaths
+heatproof
+heats/A
+heat/SMDRGZBJ
+heatstroke/MS
+heatwave
+heave/DSRGZ
+heavenliness/M
+heavenly/PTR
+heaven/SYM
+heavenward/S
+heaver/M
+heaves/M
+heavily
+heaviness/MS
+Heaviside/M
+heavyhearted
+heavyset
+heavy/TPRS
+heavyweight/SM
+Hebe/M
+hebephrenic
+Hebert/M
+Heb/M
+Hebraic
+Hebraism/MS
+Hebrew/SM
+Hebrides/M
+Hecate/M
+hecatomb/M
+heckler/M
+heckle/RSDZG
+heck/S
+hectare/MS
+hectically
+hectic/S
+hectogram/MS
+hectometer/SM
+Hector/M
+hector/SGD
+Hecuba/M
+he'd
+Heda/M
+Hedda/M
+Heddie/M
+Heddi/M
+hedge/DSRGMZ
+hedgehog/MS
+hedgehopped
+hedgehopping
+hedgehop/S
+hedger/M
+hedgerow/SM
+hedging/Y
+Hedi/M
+hedonism/SM
+hedonistic
+hedonist/MS
+Hedvige/M
+Hedvig/M
+Hedwiga/M
+Hedwig/M
+Hedy/M
+heeded/U
+heedfulness/M
+heedful/PY
+heeding/U
+heedlessness/SM
+heedless/YP
+heed/SMGD
+heehaw/DGS
+heeler/M
+heeling/M
+heelless
+heel/SGZMDR
+Heep/M
+Hefner/M
+heft/GSD
+heftily
+heftiness/SM
+hefty/TRP
+Hegelian
+Hegel/M
+hegemonic
+hegemony/MS
+Hegira/M
+hegira/S
+Heida/M
+Heidegger/M
+Heidelberg/M
+Heidie/M
+Heidi/M
+heifer/MS
+Heifetz/M
+heighten/GD
+height/SMNX
+Heimlich/M
+Heindrick/M
+Heineken/M
+Heine/M
+Heinlein/M
+heinousness/SM
+heinous/PY
+Heinrich/M
+Heinrick/M
+Heinrik/M
+Heinze/M
+Heinz/M
+heiress/MS
+heirloom/MS
+heir/SDMG
+Heisenberg/M
+Heiser/M
+heister/M
+heist/GSMRD
+Hejira's
+Helaina/M
+Helaine/M
+held
+Helena/M
+Helene/M
+Helenka/M
+Helen/M
+Helga/M
+Helge/M
+helical/Y
+helices/M
+helicon/M
+Helicon/M
+helicopter/GSMD
+heliocentric
+heliography/M
+Heliopolis/M
+Helios/M
+heliosphere
+heliotrope/SM
+heliport/MS
+helium/MS
+helix/M
+he'll
+hellbender/M
+hellbent
+hellcat/SM
+hellebore/SM
+Hellene/SM
+Hellenic
+Hellenism/MS
+Hellenistic
+Hellenist/MS
+Hellenization/M
+Hellenize
+heller/M
+Heller/M
+Hellespont/M
+hellfire/M
+hell/GSMDR
+hellhole/SM
+Helli/M
+hellion/SM
+hellishness/SM
+hellish/PY
+Hellman/M
+hello/GMS
+Hell's
+helluva
+helmed
+helmet/GSMD
+Helmholtz/M
+helming
+helms
+helm's
+helmsman/M
+helmsmen
+helm/U
+Helmut/M
+Héloise/M
+helot/S
+helper/M
+helpfulness/MS
+helpful/UY
+help/GZSJDR
+helping/M
+helplessness/SM
+helpless/YP
+helpline/S
+helpmate/SM
+helpmeet's
+Helsa/M
+Helsinki/M
+helve/GMDS
+Helvetian/S
+Helvetius/M
+Helyn/M
+He/M
+hematite/MS
+hematologic
+hematological
+hematologist/SM
+hematology/MS
+heme/MS
+Hemingway/M
+hemisphere/MSD
+hemispheric
+hemispherical
+hemline/SM
+hemlock/MS
+hemmed
+hemmer/SM
+hemming
+hem/MS
+hemoglobin/MS
+hemolytic
+hemophiliac/SM
+hemophilia/SM
+hemorrhage/GMDS
+hemorrhagic
+hemorrhoid/MS
+hemostat/SM
+hemp/MNS
+h/EMS
+hemstitch/DSMG
+henceforth
+henceforward
+hence/S
+Hench/M
+henchman/M
+henchmen
+Henderson/M
+Hendrick/SM
+Hendrickson/M
+Hendrika/M
+Hendrik/M
+Hendrix/M
+henge/M
+Henka/M
+Henley/M
+hen/MS
+henna/MDSG
+Hennessey/M
+henning
+henpeck/GSD
+Henrie/M
+Henrieta/M
+Henrietta/M
+Henriette/M
+Henrik/M
+Henri/M
+Henryetta/M
+henry/M
+Henry/M
+Hensley/M
+Henson/M
+heparin/MS
+hepatic/S
+hepatitides
+hepatitis/M
+Hepburn/M
+Hephaestus/M
+Hephzibah/M
+hepper
+heppest
+Hepplewhite
+hep/S
+heptagonal
+heptagon/SM
+heptane/M
+heptathlon/S
+her
+Heracles/M
+Heraclitus/M
+heralded/U
+heraldic
+herald/MDSG
+heraldry/MS
+Hera/M
+herbaceous
+herbage/MS
+herbalism
+herbalist/MS
+herbal/S
+Herbart/M
+Herbert/M
+herbicidal
+herbicide/MS
+Herbie/M
+herbivore/SM
+herbivorous/Y
+Herb/M
+herb/MS
+Herby/M
+Herc/M
+Herculaneum/M
+herculean
+Herculean
+Hercule/MS
+Herculie/M
+herder/M
+Herder/M
+herd/MDRGZS
+herdsman/M
+herdsmen
+hereabout/S
+hereafter/S
+hereby
+hereditary
+heredity/MS
+Hereford/SM
+herein
+hereinafter
+here/IS
+hereof
+hereon
+here's
+heres/M
+heresy/SM
+heretical
+heretic/SM
+hereto
+heretofore
+hereunder
+hereunto
+hereupon
+herewith
+Heriberto/M
+heritable
+heritage/MS
+heritor/IM
+Herkimer/M
+Herman/M
+Hermann/M
+hermaphrodite/SM
+hermaphroditic
+Hermaphroditus/M
+hermeneutic/S
+hermeneutics/M
+Hermes
+hermetical/Y
+hermetic/S
+Hermia/M
+Hermie/M
+Hermina/M
+Hermine/M
+Herminia/M
+Hermione/M
+hermitage/SM
+Hermite/M
+hermitian
+hermit/MS
+Hermon/M
+Hermosa/M
+Hermosillo/M
+Hermy/M
+Hernandez/M
+Hernando/M
+hernial
+hernia/MS
+herniate/NGXDS
+Herod/M
+Herodotus/M
+heroes
+heroically
+heroics
+heroic/U
+heroine/SM
+heroin/MS
+heroism/SM
+Herold/M
+hero/M
+heron/SM
+herpes/M
+herpetologist/SM
+herpetology/MS
+Herrera/M
+Herrick/M
+herringbone/SDGM
+Herring/M
+herring/SM
+Herrington/M
+Herr/MG
+Herschel/M
+Hersch/M
+herself
+Hersey/M
+Hershel/M
+Hershey/M
+Hersh/M
+Herta/M
+Hertha/M
+hertz/M
+Hertz/M
+Hertzog/M
+Hertzsprung/M
+Herve/M
+Hervey/M
+Herzegovina/M
+Herzl/M
+hes
+Hesiod/M
+hesitance/S
+hesitancy/SM
+hesitantly
+hesitant/U
+hesitater/M
+hesitate/XDRSNG
+hesitating/UY
+hesitation/M
+Hesperus/M
+Hesse/M
+Hessian/MS
+Hess/M
+Hester/M
+Hesther/M
+Hestia/M
+Heston/M
+heterodox
+heterodoxy/MS
+heterodyne
+heterogamous
+heterogamy/M
+heterogeneity/SM
+heterogeneousness/M
+heterogeneous/PY
+heterosexuality/SM
+heterosexual/YMS
+heterostructure
+heterozygous
+Hettie/M
+Hetti/M
+Hetty/M
+Heublein/M
+heuristically
+heuristic/SM
+Heusen/M
+Heuser/M
+he/VMZ
+hew/DRZGS
+Hewe/M
+hewer/M
+Hewet/M
+Hewett/M
+Hewie/M
+Hewitt/M
+Hewlett/M
+Hew/M
+hexachloride/M
+hexadecimal/YS
+hexafluoride/M
+hexagonal/Y
+hexagon/SM
+hexagram/SM
+hexameter/SM
+hex/DSRG
+hexer/M
+hey
+heyday/MS
+Heyerdahl/M
+Heywood/M
+Hezekiah/M
+hf
+HF
+Hf/M
+Hg/M
+hgt
+hgwy
+HHS
+HI
+Hialeah/M
+hiatus/SM
+Hiawatha/M
+hibachi/MS
+hibernate/XGNSD
+hibernation/M
+hibernator/SM
+Hibernia/M
+Hibernian/S
+hibiscus/MS
+hiccup/MDGS
+hickey/SM
+Hickey/SM
+Hickman/M
+Hickok/M
+hickory/MS
+hick/SM
+Hicks/M
+hi/D
+hidden/U
+hideaway/SM
+hidebound
+hideousness/SM
+hideous/YP
+hideout/MS
+hider/M
+hide/S
+hiding/M
+hid/ZDRGJ
+hieing
+hierarchal
+hierarchic
+hierarchical/Y
+hierarchy/SM
+hieratic
+hieroglyph
+hieroglyphic/S
+hieroglyphics/M
+hieroglyphs
+Hieronymus/M
+hie/S
+hifalutin
+Higashiosaka
+Higgins/M
+highball/GSDM
+highborn
+highboy/MS
+highbrow/SM
+highchair/SM
+highfalutin
+Highfield/M
+highhandedness/SM
+highhanded/PY
+highish
+Highlander/SM
+Highlands
+highland/ZSRM
+highlight/GZRDMS
+Highness/M
+highness/MS
+highpoint
+high/PYRT
+highroad/MS
+highs
+hight
+hightail/DGS
+highwayman/M
+highwaymen
+highway/MS
+hijacker/M
+hijack/JZRDGS
+hiker/M
+hike/ZGDSR
+Hilario/M
+hilariousness/MS
+hilarious/YP
+hilarity/MS
+Hilarius/M
+Hilary/M
+Hilbert/M
+Hildagarde/M
+Hildagard/M
+Hilda/M
+Hildebrand/M
+Hildegaard/M
+Hildegarde/M
+Hilde/M
+Hildy/M
+Hillard/M
+Hillary/M
+hillbilly/MS
+Hillcrest/M
+Hillel/M
+hiller/M
+Hillery/M
+hill/GSMDR
+Hilliard/M
+Hilliary/M
+Hillie/M
+Hillier/M
+hilliness/SM
+Hill/M
+hillman
+hillmen
+hillock/SM
+Hillsboro/M
+Hillsdale/M
+hillside/SM
+hilltop/MS
+hillwalking
+Hillyer/M
+Hilly/RM
+hilly/TRP
+hilt/MDGS
+Hilton/M
+Hi/M
+Himalaya/MS
+Himalayan/S
+Himmler/M
+him/S
+himself
+Hinayana/M
+Hinda/M
+Hindemith/M
+Hindenburg/M
+hindered/U
+hinderer/M
+hinder/GRD
+Hindi/M
+hindmost
+hindquarter/SM
+hindrance/SM
+hind/RSZ
+hindsight/SM
+Hinduism/SM
+Hindu/MS
+Hindustani/MS
+Hindustan/M
+Hines/M
+hinger
+hinge's
+hinge/UDSG
+Hinkle/M
+Hinsdale/M
+hinterland/MS
+hinter/M
+hint/GZMDRS
+Hinton/M
+Hinze/M
+hipbone/SM
+hipness/S
+Hipparchus/M
+hipped
+hipper
+hippest
+hippie/MTRS
+hipping/M
+Hippocrates/M
+Hippocratic
+hippodrome/MS
+hippo/MS
+hippopotamus/SM
+hip/PSM
+hippy's
+hipster/MS
+hiragana
+Hiram/M
+hire/AGSD
+hireling/SM
+hirer/SM
+Hirey/M
+hiring/S
+Hirohito/M
+Hiroshi/M
+Hiroshima/M
+Hirsch/M
+hirsuteness/MS
+hirsute/P
+his
+Hispanic/SM
+Hispaniola/M
+hiss/DSRMJG
+hisser/M
+hissing/M
+Hiss/M
+histamine/SM
+histidine/SM
+histochemic
+histochemical
+histochemistry/M
+histogram/MS
+histological
+histologist/MS
+histology/SM
+historian/MS
+historic
+historicalness/M
+historical/PY
+historicism/M
+historicist/M
+historicity/MS
+historiographer/SM
+historiography/MS
+history/MS
+histrionically
+histrionic/S
+histrionics/M
+hist/SDG
+Hitachi/M
+Hitchcock/M
+hitcher/MS
+hitchhike/RSDGZ
+hitch/UGSD
+hither
+hitherto
+Hitler/SM
+hitless
+hit/MS
+hittable
+hitter/SM
+hitting
+Hittite/SM
+HIV
+hive/MGDS
+h'm
+HM
+HMO
+Hmong
+HMS
+hoarder/M
+hoarding/M
+hoard/RDJZSGM
+hoarfrost/SM
+hoariness/MS
+hoar/M
+hoarseness/SM
+hoarse/RTYP
+hoary/TPR
+hoaxer/M
+hoax/GZMDSR
+Hobard/M
+Hobart/M
+hobbed
+Hobbes/M
+hobbing
+hobbit
+hobbler/M
+hobble/ZSRDG
+Hobbs/M
+hobbyhorse/SM
+hobbyist/SM
+hobby/SM
+Hobday/M
+Hobey/M
+hobgoblin/MS
+Hobie/M
+hobnail/GDMS
+hobnobbed
+hobnobbing
+hobnob/S
+Hoboken/M
+hobo/SDMG
+hob/SM
+hoc
+hocker/M
+hockey/SM
+hock/GDRMS
+Hockney/M
+hockshop/SM
+hodge/MS
+Hodge/MS
+hodgepodge/SM
+Hodgkin/M
+ho/DRYZ
+hod/SM
+Hoebart/M
+hoecake/SM
+hoedown/MS
+hoeing
+hoer/M
+hoe/SM
+Hoffa/M
+Hoff/M
+Hoffman/M
+Hofstadter/M
+Hogan/M
+hogan/SM
+Hogarth/M
+hogback/MS
+hogged
+hogger
+hogging
+hoggish/Y
+hogshead/SM
+hog/SM
+hogtie/SD
+hogtying
+hogwash/SM
+Hohenlohe/M
+Hohenstaufen/M
+Hohenzollern/M
+Hohhot/M
+hoister/M
+hoist/GRDS
+hoke/DSG
+hokey/PRT
+hokier
+hokiest
+Hokkaido/M
+hokum/MS
+Hokusai/M
+Holbein/M
+Holbrook/M
+Holcomb/M
+holdall/MS
+Holden/M
+holder/M
+Holder/M
+holding/IS
+holding's
+hold/NRBSJGZ
+holdout/SM
+holdover/SM
+holdup/MS
+hole/MGDS
+holey
+holiday/GRDMS
+Holiday/M
+holidaymaker/S
+holier/U
+Holiness/MS
+holiness/MSU
+holistic
+holistically
+hollandaise
+Hollandaise/M
+Hollander/M
+Holland/RMSZ
+holler/GDS
+Hollerith/M
+Holley/M
+Hollie/M
+Holli/SM
+Hollister/M
+Holloway/M
+hollowness/MS
+hollow/RDYTGSP
+hollowware/M
+Hollyanne/M
+hollyhock/MS
+Holly/M
+holly/SM
+Hollywood/M
+Holman/M
+Holmes
+holmium/MS
+Holm/M
+Holocaust
+holocaust/MS
+Holocene
+hologram/SM
+holograph/GMD
+holographic
+holographs
+holography/MS
+Holstein/MS
+holster/MDSG
+Holst/M
+Holt/M
+Holyoke/M
+holy/SRTP
+holystone/MS
+Holzman/M
+Ho/M
+homage/MGSRD
+homager/M
+hombre/SM
+homburg/SM
+homebody/MS
+homebound
+homeboy/S
+homebuilder/S
+homebuilding
+homebuilt
+homecoming/MS
+home/DSRMYZG
+homegrown
+homeland/SM
+homelessness/SM
+homeless/P
+homelike
+homeliness/SM
+homely/RPT
+homemade
+homemake/JRZG
+homemaker/M
+homemaking/M
+homeomorphic
+homeomorphism/MS
+homeomorph/M
+homeopath
+homeopathic
+homeopaths
+homeopathy/MS
+homeostases
+homeostasis/M
+homeostatic
+homeowner/S
+homeownership
+homepage
+Homere/M
+homer/GDM
+Homeric
+homerists
+Homer/M
+homeroom/MS
+Homerus/M
+homeschooling/S
+homesickness/MS
+homesick/P
+homespun/S
+homesteader/M
+homestead/GZSRDM
+homestretch/SM
+hometown/SM
+homeward
+homeworker/M
+homework/ZSMR
+homeyness/MS
+homey/PS
+homicidal/Y
+homicide/SM
+homier
+homiest
+homiletic/S
+homily/SM
+hominess's
+homing/M
+hominid/MS
+hominy/SM
+Hom/MR
+homogamy/M
+homogenate/MS
+homogeneity/ISM
+homogeneous/PY
+homogenization/MS
+homogenize/DRSGZ
+homogenizer/M
+homograph/M
+homographs
+homological
+homologous
+homologue/M
+homology/MS
+homomorphic
+homomorphism/SM
+homonym/SM
+homophobia/S
+homophobic
+homophone/MS
+homopolymers
+homosexuality/SM
+homosexual/YMS
+homo/SM
+homotopy
+homozygous/Y
+honcho/DSG
+Honda/M
+Hondo/M
+Honduran/S
+Honduras/M
+Honecker/M
+hone/SM
+honestly/E
+honest/RYT
+honesty/ESM
+honeybee/SM
+honeycomb/SDMG
+honeydew/SM
+honey/GSMD
+honeylocust
+Honey/M
+honeymooner/M
+honeymoon/RDMGZS
+honeysuckle/MS
+Honeywell/M
+hong/M
+Honiara/M
+honker/M
+honk/GZSDRM
+honky/SM
+Hon/M
+hon/MDRSZTG
+Honolulu/M
+honorableness/SM
+honorable/PSM
+honorables/U
+honorablies/U
+honorably/UE
+honorarily
+honorarium/SM
+honorary/S
+honored/U
+honoree/S
+honor/ERDBZGS
+honorer/EM
+Honoria/M
+honorific/S
+Honor/M
+honor's
+honors/A
+Honshu/M
+hooch/MS
+hoodedness/M
+hooded/P
+hoodlum/SM
+Hood/M
+hood/MDSG
+hoodoo/DMGS
+hoodwinker/M
+hoodwink/SRDG
+hooey/SM
+hoof/DRMSG
+hoofer/M
+hoofmark/S
+hookah/M
+hookahs
+hookedness/M
+hooked/P
+Hooke/MR
+hooker/M
+Hooker/M
+hookey's
+hook/GZDRMS
+hooks/U
+hookup/SM
+hookworm/MS
+hooky/SRMT
+hooliganism/SM
+hooligan/SM
+hooper/M
+Hooper/M
+hoopla/SM
+hoop/MDRSG
+hooray/SMDG
+hoosegow/MS
+Hoosier/SM
+hootch's
+hootenanny/SM
+hooter/M
+hoot/MDRSGZ
+Hoover/MS
+hooves/M
+hoped/U
+hopefulness/MS
+hopeful/SPY
+hopelessness/SM
+hopeless/YP
+Hope/M
+hoper/M
+hope/SM
+Hopewell/M
+Hopi/SM
+Hopkinsian/M
+Hopkins/M
+hopped
+Hopper/M
+hopper/MS
+hopping/M
+hoppled
+hopples
+hopscotch/MDSG
+hop/SMDRG
+Horace/M
+Horacio/M
+Horatia/M
+Horatio/M
+Horatius/M
+horde/DSGM
+horehound/MS
+horizon/MS
+horizontal/YS
+Hormel/M
+hormonal/Y
+hormone/MS
+Hormuz/M
+hornbeam/M
+hornblende/MS
+Hornblower/M
+hornedness/M
+horned/P
+Horne/M
+hornet/MS
+horn/GDRMS
+horniness/M
+hornless
+hornlike
+Horn/M
+hornpipe/MS
+horny/TRP
+horologic
+horological
+horologist/MS
+horology/MS
+horoscope/MS
+Horowitz/M
+horrendous/Y
+horribleness/SM
+horrible/SP
+horribly
+horridness/M
+horrid/PY
+horrific
+horrifically
+horrify/DSG
+horrifying/Y
+horror/MS
+hors/DSGX
+horseback/MS
+horsedom
+horseflesh/M
+horsefly/MS
+horsehair/SM
+horsehide/SM
+horselaugh/M
+horselaughs
+horseless
+horselike
+horsely
+horseman/M
+horsemanship/MS
+horsemen
+horseplayer/M
+horseplay/SMR
+horsepower/SM
+horseradish/SM
+horse's
+horseshoeing
+horseshoe/MRSD
+horseshoer/M
+horsetail/SM
+horse/UGDS
+horsewhipped
+horsewhipping
+horsewhip/SM
+horsewoman/M
+horsewomen
+horsey
+horsier
+horsiest
+horsing/M
+Horst/M
+hortatory
+Horten/M
+Hortense/M
+Hortensia/M
+horticultural
+horticulture/SM
+horticulturist/SM
+Hort/MN
+Horton/M
+Horus/M
+hosanna/SDG
+Hosea/M
+hose/M
+hosepipe
+hos/GDS
+hosier/MS
+hosiery/SM
+hosp
+hospice/MS
+hospitable/I
+hospitably/I
+hospitality/MS
+hospitality's/I
+hospitalization/MS
+hospitalize/GSD
+hospital/MS
+hostage/MS
+hosteler/M
+hostelry/MS
+hostel/SZGMRD
+hostess/MDSG
+hostile/YS
+hostility/SM
+hostler/MS
+Host/MS
+host/MYDGS
+hotbed/MS
+hotblooded
+hotbox/MS
+hotcake/S
+hotchpotch/M
+hotelier/MS
+hotelman/M
+hotel/MS
+hotfoot/DGS
+hothead/DMS
+hotheadedness/SM
+hotheaded/PY
+hothouse/MGDS
+hotness/MS
+hotplate/SM
+hotpot/M
+hot/PSY
+hotrod
+hotshot/S
+hotted
+Hottentot/SM
+hotter
+hottest
+hotting
+Houdaille/M
+Houdini/M
+hough/M
+hounder/M
+hounding/M
+hound/MRDSG
+hourglass/MS
+houri/MS
+hourly/S
+hour/YMS
+house/ASDG
+houseboat/SM
+housebound
+houseboy/SM
+housebreaker/M
+housebreaking/M
+housebreak/JSRZG
+housebroke
+housebroken
+housebuilding
+housecleaning/M
+houseclean/JDSG
+housecoat/MS
+housefly/MS
+houseful/SM
+householder/M
+household/ZRMS
+househusband/S
+housekeeper/M
+housekeeping/M
+housekeep/JRGZ
+houselights
+House/M
+housemaid/MS
+houseman/M
+housemen
+housemother/MS
+housemoving
+houseparent/SM
+houseplant/S
+houser
+house's
+housetop/MS
+housewares
+housewarming/MS
+housewifeliness/M
+housewifely/P
+housewife/YM
+housewives
+houseworker/M
+housework/ZSMR
+housing/MS
+Housman/M
+Houston/M
+Houyhnhnm/M
+HOV
+hovel/GSMD
+hovercraft/M
+hoverer/M
+hover/GRD
+hove/ZR
+Howard/M
+howbeit
+howdah/M
+howdahs
+howdy/GSD
+Howell/MS
+Howe/M
+however
+Howey/M
+Howie/M
+howitzer/MS
+howler/M
+howl/GZSMDR
+Howrah/M
+how/SM
+howsoever
+hoyden/DMGS
+hoydenish
+Hoyle/SM
+hoy/M
+Hoyt/M
+hp
+HP
+HQ
+hr
+HR
+HRH
+Hrothgar/M
+hrs
+h's
+H's
+HS
+HST
+ht
+HTML
+Hts/M
+HTTP
+Huang/M
+huarache/SM
+hubba
+Hubbard/M
+Hubble/M
+hubbub/SM
+hubby/SM
+hubcap/SM
+Huber/M
+Hube/RM
+Hubert/M
+Huberto/M
+Hubey/M
+Hubie/M
+hub/MS
+hubris/SM
+huckleberry/SM
+Huck/M
+huckster/SGMD
+HUD
+Huddersfield/M
+huddler/M
+huddle/RSDMG
+Hudson/M
+hue/MDS
+Huerta/M
+Huey/M
+huffily
+huffiness/SM
+Huff/M
+Huffman/M
+huff/SGDM
+huffy/TRP
+hugeness/MS
+huge/YP
+hugged
+hugger
+hugging/S
+Huggins
+Hughie/M
+Hugh/MS
+Hugibert/M
+Hugo/M
+hug/RTS
+Huguenot/SM
+Hugues/M
+huh
+huhs
+Hui/M
+Huitzilopitchli/M
+hula/MDSG
+Hulda/M
+hulk/GDMS
+hullabaloo/SM
+huller/M
+hulling/M
+Hull/M
+hull/MDRGZS
+hullo/GSDM
+humane/IY
+humaneness/SM
+humaner
+humanest
+human/IPY
+humanism/SM
+humanistic
+humanist/SM
+humanitarianism/SM
+humanitarian/S
+humanity/ISM
+humanization/CSM
+humanized/C
+humanizer/M
+humanize/RSDZG
+humanizes/IAC
+humanizing/C
+humankind/M
+humannesses
+humanness/IM
+humanoid/S
+humans
+Humbert/M
+Humberto/M
+humbleness/SM
+humble/TZGPRSDJ
+humbly
+Humboldt/M
+humbugged
+humbugging
+humbug/MS
+humdinger/MS
+humdrum/S
+Hume/M
+humeral/S
+humeri
+humerus/M
+Humfrey/M
+Humfrid/M
+Humfried/M
+humidification/MC
+humidifier/CM
+humidify/RSDCXGNZ
+humidistat/M
+humidity/MS
+humidor/MS
+humid/Y
+humiliate/SDXNG
+humiliating/Y
+humiliation/M
+humility/MS
+hummed
+Hummel/M
+hummer/SM
+humming
+hummingbird/SM
+hummock/MDSG
+hummocky
+hummus/S
+humongous
+humored/U
+humorist/MS
+humorlessness/MS
+humorless/PY
+humorousness/MS
+humorous/YP
+humor/RDMZGS
+humpback/SMD
+hump/GSMD
+humph/DG
+Humphrey/SM
+humphs
+Humpty/M
+hum/S
+humus/SM
+Humvee
+hunchback/DSM
+hunch/GMSD
+hundredfold/S
+hundred/SHRM
+hundredths
+hundredweight/SM
+Hunfredo/M
+hung/A
+Hungarian/MS
+Hungary/M
+hunger/SDMG
+Hung/M
+hungover
+hungrily
+hungriness/SM
+hungry/RTP
+hunker/DG
+hunky/RST
+hunk/ZRMS
+Hun/MS
+hunter/M
+Hunter/M
+hunt/GZJDRS
+hunting/M
+Huntington/M
+Huntlee/M
+Huntley/M
+Hunt/MR
+huntress/MS
+huntsman/M
+huntsmen
+Huntsville/M
+hurdle/JMZGRSD
+hurdler/M
+hurl/DRGZJS
+Hurlee/M
+Hurleigh/M
+hurler/M
+Hurley/M
+hurling/M
+Huron/SM
+hurray/SDG
+hurricane/MS
+hurriedness/M
+hurried/UY
+hurry/RSDG
+Hurst/M
+hurter/M
+hurtfulness/MS
+hurtful/PY
+hurting/Y
+hurtle/SDG
+hurts
+hurt/U
+Hurwitz/M
+Hus
+Husain's
+husbander/M
+husband/GSDRYM
+husbandman/M
+husbandmen
+husbandry/SM
+Husein/M
+hush/DSG
+husker/M
+huskily
+huskiness/MS
+husking/M
+husk/SGZDRM
+husky/RSPT
+hussar/MS
+Hussein/M
+Husserl/M
+hussy/SM
+hustings/M
+hustler/M
+hustle/RSDZG
+Huston/M
+Hutchins/M
+Hutchinson/M
+Hutchison/M
+hutch/MSDG
+hut/MS
+hutted
+hutting
+Hutton/M
+Hutu/M
+Huxley/M
+Huygens/M
+huzzah/GD
+huzzahs
+hwy
+Hyacintha/M
+Hyacinthe/M
+Hyacinthia/M
+Hyacinthie/M
+hyacinth/M
+Hyacinth/M
+hyacinths
+Hyades
+hyaena's
+Hyannis/M
+Hyatt/M
+hybridism/SM
+hybridization/S
+hybridize/GSD
+hybrid/MS
+Hyde/M
+Hyderabad/M
+Hydra/M
+hydra/MS
+hydrangea/SM
+hydrant/SM
+hydrate/CSDNGX
+hydrate's
+hydration/MC
+hydraulically
+hydraulicked
+hydraulicking
+hydraulic/S
+hydraulics/M
+hydrazine/M
+hydride/MS
+hydrocarbon/SM
+hydrocephali
+hydrocephalus/MS
+hydrochemistry
+hydrochloric
+hydrochloride/M
+hydrodynamical
+hydrodynamic/S
+hydrodynamics/M
+hydroelectric
+hydroelectrically
+hydroelectricity/SM
+hydrofluoric
+hydrofoil/MS
+hydrogenate/CDSGN
+hydrogenate's
+hydrogenation/MC
+hydrogenations
+hydrogen/MS
+hydrogenous
+hydrological/Y
+hydrologist/MS
+hydrology/SM
+hydrolysis/M
+hydrolyzed/U
+hydrolyze/GSD
+hydromagnetic
+hydromechanics/M
+hydrometer/SM
+hydrometry/MS
+hydrophilic
+hydrophobia/SM
+hydrophobic
+hydrophone/SM
+hydroplane/DSGM
+hydroponic/S
+hydroponics/M
+hydro/SM
+hydrosphere/MS
+hydrostatic/S
+hydrostatics/M
+hydrotherapy/SM
+hydrothermal/Y
+hydrous
+hydroxide/MS
+hydroxy
+hydroxylate/N
+hydroxyl/SM
+hydroxyzine/M
+hyena/MS
+hygiene/MS
+hygienically
+hygienic/S
+hygienics/M
+hygienist/MS
+hygrometer/SM
+hygroscopic
+hying
+Hy/M
+Hyman/M
+hymeneal/S
+Hymen/M
+hymen/MS
+Hymie/M
+hymnal/SM
+hymnbook/S
+hymn/GSDM
+Hynda/M
+hype/MZGDSR
+hyperactive/S
+hyperactivity/SM
+hyperbola/MS
+hyperbole/MS
+hyperbolic
+hyperbolically
+hyperboloidal
+hyperboloid/SM
+hypercellularity
+hypercritical/Y
+hypercube/MS
+hyperemia/M
+hyperemic
+hyperfine
+hypergamous/Y
+hypergamy/M
+hyperglycemia/MS
+hyperinflation
+Hyperion/M
+hypermarket/SM
+hypermedia/S
+hyperplane/SM
+hyperplasia/M
+hypersensitiveness/MS
+hypersensitive/P
+hypersensitivity/MS
+hypersonic
+hyperspace/M
+hypersphere/M
+hypertension/MS
+hypertensive/S
+hypertext/SM
+hyperthyroid
+hyperthyroidism/MS
+hypertrophy/MSDG
+hypervelocity
+hyperventilate/XSDGN
+hyperventilation/M
+hyphenated/U
+hyphenate/NGXSD
+hyphenation/M
+hyphen/DMGS
+hypnoses
+hypnosis/M
+hypnotherapy/SM
+hypnotically
+hypnotic/S
+hypnotism/MS
+hypnotist/SM
+hypnotize/SDG
+hypoactive
+hypoallergenic
+hypocellularity
+hypochondriac/SM
+hypochondria/MS
+hypocrisy/SM
+hypocrite/MS
+hypocritical/Y
+hypodermic/S
+hypo/DMSG
+hypoglycemia/SM
+hypoglycemic/S
+hypophyseal
+hypophysectomized
+hypotenuse/MS
+hypothalami
+hypothalamic
+hypothalamically
+hypothalamus/M
+hypothermia/SM
+hypotheses
+hypothesis/M
+hypothesizer/M
+hypothesize/ZGRSD
+hypothetic
+hypothetical/Y
+hypothyroid
+hypothyroidism/SM
+hypoxia/M
+hyssop/MS
+hysterectomy/MS
+hysteresis/M
+hysteria/SM
+hysterical/YU
+hysteric/SM
+Hyundai/M
+Hz
+i
+I
+IA
+Iaccoca/M
+Iago/M
+Iain/M
+Ia/M
+iambi
+iambic/S
+iamb/MS
+iambus/SM
+Ian/M
+Ianthe/M
+Ibadan/M
+Ibbie/M
+Ibby/M
+Iberia/M
+Iberian/MS
+Ibero/M
+ibex/MS
+ibid
+ibidem
+ibis/SM
+IBM/M
+Ibo/M
+Ibrahim/M
+Ibsen/M
+ibuprofen/S
+Icarus/M
+ICBM/S
+ICC
+iceberg/SM
+iceboat/MS
+icebound
+icebox/MS
+icebreaker/SM
+icecap/SM
+ice/GDSC
+Icelander/M
+Icelandic
+Iceland/MRZ
+Ice/M
+iceman/M
+icemen
+icepack
+icepick/S
+ice's
+Ichabod/M
+ichneumon/M
+ichthyologist/MS
+ichthyology/MS
+icicle/SM
+icily
+iciness/SM
+icing/MS
+icky/RT
+iconic
+icon/MS
+iconoclasm/MS
+iconoclastic
+iconoclast/MS
+iconography/MS
+icosahedra
+icosahedral
+icosahedron/M
+ictus/SM
+ICU
+icy/RPT
+I'd
+ID
+Idahoan/S
+Idahoes
+Idaho/MS
+Idalia/M
+Idalina/M
+Idaline/M
+Ida/M
+idealism/MS
+idealistic
+idealistically
+idealist/MS
+idealization/MS
+idealized/U
+idealize/GDRSZ
+idealizer/M
+ideal/MYS
+idealogical
+idea/SM
+ideate/SN
+ideation/M
+Idelle/M
+Idell/M
+idem
+idempotent/S
+identicalness/M
+identical/YP
+identifiability
+identifiable/U
+identifiably
+identification/M
+identified/U
+identifier/M
+identify/XZNSRDG
+identity/SM
+ideogram/MS
+ideographic
+ideograph/M
+ideographs
+ideological/Y
+ideologist/SM
+ideologue/S
+ideology/SM
+ides
+Idette/M
+idiocy/MS
+idiolect/M
+idiomatically
+idiomatic/P
+idiom/MS
+idiopathic
+idiosyncrasy/SM
+idiosyncratic
+idiosyncratically
+idiotic
+idiotically
+idiot/MS
+idleness/MS
+idle/PZTGDSR
+idler/M
+id/MY
+idolater/MS
+idolatress/S
+idolatrous
+idolatry/SM
+idolization/SM
+idolized/U
+idolizer/M
+idolize/ZGDRS
+idol/MS
+ids
+IDs
+idyllic
+idyllically
+idyll/MS
+IE
+IEEE
+Ieyasu/M
+if
+iffiness/S
+iffy/TPR
+Ifni/M
+ifs
+Iggie/M
+Iggy/M
+igloo/MS
+Ignace/M
+Ignacio/M
+Ignacius/M
+Ignatius/M
+Ignazio/M
+Ignaz/M
+igneous
+ignitable
+ignite/ASDG
+igniter/M
+ignition/MS
+ignobleness/M
+ignoble/P
+ignobly
+ignominious/Y
+ignominy/MS
+ignoramus/SM
+ignorance/MS
+ignorantness/M
+ignorant/SPY
+ignorer/M
+ignore/SRDGB
+Igor/M
+iguana/MS
+Iguassu/M
+ii
+iii
+Ijsselmeer/M
+Ike/M
+Ikey/M
+Ikhnaton/M
+ikon's
+IL
+Ilaire/M
+Ila/M
+Ilario/M
+ilea
+Ileana/M
+Ileane/M
+ileitides
+ileitis/M
+Ilene/M
+ileum/M
+ilia
+iliac
+Iliad/MS
+Ilise/M
+ilium/M
+Ilka/M
+ilk/MS
+I'll
+Illa/M
+illegality/MS
+illegal/YS
+illegibility/MS
+illegible
+illegibly
+illegitimacy/SM
+illegitimate/SDGY
+illiberality/SM
+illiberal/Y
+illicitness/MS
+illicit/YP
+illimitableness/M
+illimitable/P
+Illinoisan/MS
+Illinois/M
+illiquid
+illiteracy/MS
+illiterateness/M
+illiterate/PSY
+Ill/M
+illness/MS
+illogicality/SM
+illogicalness/M
+illogical/PY
+illogic/M
+ill/PS
+illume/DG
+illuminate/XSDVNG
+Illuminati
+illuminatingly
+illuminating/U
+illumination/M
+illumine/BGSD
+illusionary
+illusion/ES
+illusionist/MS
+illusion's
+illusiveness/M
+illusive/PY
+illusoriness/M
+illusory/P
+illustrated/U
+illustrate/VGNSDX
+illustration/M
+illustrative/Y
+illustrator/SM
+illustriousness/SM
+illustrious/PY
+illus/V
+illy
+Ilona/M
+Ilsa/M
+Ilse/M
+Ilysa/M
+Ilyse/M
+Ilyssa/M
+Ilyushin/M
+I'm
+image/DSGM
+Imagen/M
+imagery/MS
+imaginableness
+imaginable/U
+imaginably/U
+imaginariness/M
+imaginary/PS
+imagination/MS
+imaginativeness/M
+imaginative/UY
+imagined/U
+imaginer/M
+imagine/RSDJBG
+imagoes
+imago/M
+imam/MS
+imbalance/SDM
+imbecile/YMS
+imbecilic
+imbecility/MS
+imbiber/M
+imbibe/ZRSDG
+imbrication/SM
+Imbrium/M
+imbroglio/MS
+imbruing
+imbue/GDS
+Imelda/M
+IMF
+IMHO
+imitable/I
+imitate/SDVNGX
+imitation/M
+imitativeness/MS
+imitative/YP
+imitator/SM
+immaculateness/SM
+immaculate/YP
+immanence/S
+immanency/MS
+immanent/Y
+Immanuel/M
+immateriality/MS
+immaterialness/MS
+immaterial/PY
+immatureness/M
+immature/SPY
+immaturity/MS
+immeasurableness/M
+immeasurable/P
+immeasurably
+immediacy/MS
+immediateness/SM
+immediate/YP
+immemorial/Y
+immenseness/M
+immense/PRTY
+immensity/MS
+immerse/RSDXNG
+immersible
+immersion/M
+immigrant/SM
+immigrate/NGSDX
+immigration/M
+imminence/SM
+imminentness/M
+imminent/YP
+immobile
+immobility/MS
+immobilization/MS
+immobilize/DSRG
+immoderateness/M
+immoderate/NYP
+immoderation/M
+immodest/Y
+immodesty/SM
+immolate/SDNGX
+immolation/M
+immorality/MS
+immoral/Y
+immortality/SM
+immortalized/U
+immortalize/GDS
+immortal/SY
+immovability/SM
+immovableness/M
+immovable/PS
+immovably
+immune/S
+immunity/SM
+immunization/MS
+immunize/GSD
+immunoassay/M
+immunodeficiency/S
+immunodeficient
+immunologic
+immunological/Y
+immunologist/SM
+immunology/MS
+immure/GSD
+immutability/MS
+immutableness/M
+immutable/P
+immutably
+IMNSHO
+IMO
+Imogene/M
+Imogen/M
+Imojean/M
+impaction/SM
+impactor/SM
+impact/VGMRDS
+impaired/U
+impairer/M
+impair/LGRDS
+impairment/SM
+impala/MS
+impale/GLRSD
+impalement/SM
+impaler/M
+impalpable
+impalpably
+impanel/DGS
+impartation/M
+impart/GDS
+impartiality/SM
+impartial/Y
+impassableness/M
+impassable/P
+impassably
+impasse/SXBMVN
+impassibility/SM
+impassible
+impassibly
+impassion/DG
+impassioned/U
+impassiveness/MS
+impassive/YP
+impassivity/MS
+impasto/SM
+impatience/SM
+impatiens/M
+impatient/Y
+impeachable/U
+impeach/DRSZGLB
+impeacher/M
+impeachment/MS
+impeccability/SM
+impeccable/S
+impeccably
+impecuniousness/MS
+impecunious/PY
+impedance/MS
+impeded/U
+impeder/M
+impede/S
+imped/GRD
+impedimenta
+impediment/SM
+impelled
+impeller/MS
+impelling
+impel/S
+impend/DGS
+impenetrability/MS
+impenetrableness/M
+impenetrable/P
+impenetrably
+impenitence/MS
+impenitent/YS
+imperativeness/M
+imperative/PSY
+imperceivable
+imperceptibility/MS
+imperceptible
+imperceptibly
+imperceptive
+imperf
+imperfectability
+imperfection/MS
+imperfectness/SM
+imperfect/YSVP
+imperialism/MS
+imperialistic
+imperialistically
+imperialist/SM
+imperial/YS
+imperil/GSLD
+imperilment/SM
+imperiousness/MS
+imperious/YP
+imperishableness/M
+imperishable/SP
+imperishably
+impermanence/MS
+impermanent/Y
+impermeability/SM
+impermeableness/M
+impermeable/P
+impermeably
+impermissible
+impersonality/M
+impersonalized
+impersonal/Y
+impersonate/XGNDS
+impersonation/M
+impersonator/SM
+impertinence/SM
+impertinent/YS
+imperturbability/SM
+imperturbable
+imperturbably
+imperviousness/M
+impervious/PY
+impetigo/MS
+impetuosity/MS
+impetuousness/MS
+impetuous/YP
+impetus/MS
+impiety/MS
+impinge/LS
+impingement/MS
+imping/GD
+impiousness/SM
+impious/PY
+impishness/MS
+impish/YP
+implacability/SM
+implacableness/M
+implacable/P
+implacably
+implantation/SM
+implant/BGSDR
+implanter/M
+implausibility/MS
+implausible
+implausibly
+implementability
+implementable/U
+implementation/A
+implementations
+implementation's
+implemented/AU
+implementer/M
+implementing/A
+implementor/MS
+implement/SMRDGZB
+implicant/SM
+implicate/VGSD
+implication/M
+implicative/PY
+implicitness/SM
+implicit/YP
+implied/Y
+implode/GSD
+implore/GSD
+imploring/Y
+implosion/SM
+implosive/S
+imply/GNSDX
+impoliteness/MS
+impolite/YP
+impoliticness/M
+impolitic/PY
+imponderableness/M
+imponderable/PS
+importance/SM
+important/Y
+importation/MS
+importer/M
+importing/A
+import/SZGBRD
+importunateness/M
+importunate/PYGDS
+importuner/M
+importune/SRDZYG
+importunity/SM
+imposable
+impose/ASDG
+imposer/SM
+imposingly
+imposing/U
+imposition/SM
+impossibility/SM
+impossibleness/M
+impossible/PS
+impossibly
+imposter's
+impostor/SM
+impost/SGMD
+imposture/SM
+impotence/MS
+impotency/S
+impotent/SY
+impound/GDS
+impoundments
+impoverisher/M
+impoverish/LGDRS
+impoverishment/SM
+impracticableness/M
+impracticable/P
+impracticably
+impracticality/SM
+impracticalness/M
+impractical/PY
+imprecate/NGXSD
+imprecation/M
+impreciseness/MS
+imprecise/PYXN
+imprecision/M
+impregnability/MS
+impregnableness/M
+impregnable/P
+impregnably
+impregnate/DSXNG
+impregnation/M
+impresario/SM
+impress/DRSGVL
+impressed/U
+impresser/M
+impressibility/MS
+impressible
+impressionability/SM
+impressionableness/M
+impressionable/P
+impression/BMS
+impressionism/SM
+impressionistic
+impressionist/MS
+impressiveness/MS
+impressive/YP
+impressment/M
+imprimatur/SM
+imprinter/M
+imprinting/M
+imprint/SZDRGM
+imprison/GLDS
+imprisonment/MS
+improbability/MS
+improbableness/M
+improbable/P
+improbably
+impromptu/S
+improperness/M
+improper/PY
+impropitious
+impropriety/SM
+improved/U
+improvement/MS
+improver/M
+improve/SRDGBL
+improvidence/SM
+improvident/Y
+improvisational
+improvisation/MS
+improvisatory
+improviser/M
+improvise/RSDZG
+imprudence/SM
+imprudent/Y
+imp/SGMDRY
+impudence/MS
+impudent/Y
+impugner/M
+impugn/SRDZGB
+impulse/XMVGNSD
+impulsion/M
+impulsiveness/MS
+impulsive/YP
+impunity/SM
+impureness/M
+impure/RPTY
+impurity/MS
+imputation/SM
+impute/SDBG
+Imus/M
+IN
+inaction
+inactive
+inadequate/S
+inadvertence/MS
+inadvertent/Y
+inalienability/MS
+inalienably
+inalterableness/M
+inalterable/P
+Ina/M
+inamorata/MS
+inane/SRPYT
+inanimateness/S
+inanimate/P
+inanity/MS
+inappeasable
+inappropriate/P
+inarticulate/P
+in/AS
+inasmuch
+inaugural/S
+inaugurate/XSDNG
+inauguration/M
+inauthenticity
+inbound/G
+inbred/S
+inbreed/JG
+incalculableness/M
+incalculably
+incandescence/SM
+incandescent/YS
+incant
+incantation/SM
+incantatory
+incapable/S
+incapacitate/GNSD
+incapacitation/M
+incarcerate/XGNDS
+incarceration/M
+incarnadine/GDS
+incarnate/AGSDNX
+incarnation/AM
+Inca/SM
+incendiary/S
+incense/MGDS
+incentive/ESM
+incentively
+incept/DGVS
+inception/MS
+inceptive/Y
+inceptor/M
+incessant/Y
+incest/SM
+incestuousness/MS
+incestuous/PY
+inch/GMDS
+inchoate/DSG
+Inchon/M
+inchworm/MS
+incidence/MS
+incidental/YS
+incident/SM
+incinerate/XNGSD
+incineration/M
+incinerator/SM
+incipience/SM
+incipiency/M
+incipient/Y
+incise/SDVGNX
+incision/M
+incisiveness/MS
+incisive/YP
+incisor/MS
+incitement/MS
+inciter/M
+incite/RZL
+incl
+inclination/ESM
+incline/EGSD
+incliner/M
+inclining/M
+include/GDS
+inclusion/MS
+inclusiveness/MS
+inclusive/PY
+Inc/M
+incognito/S
+incoherency/M
+income/M
+incommode/DG
+incommunicado
+incomparable
+incompetent/MS
+incomplete/P
+inconceivability/MS
+inconceivableness/M
+inconceivable/P
+incondensable
+incongruousness/S
+inconsiderableness/M
+inconsiderable/P
+inconsistence
+inconsolableness/M
+inconsolable/P
+inconsolably
+incontestability/SM
+incontestably
+incontrovertibly
+inconvenience/DG
+inconvertibility
+inconvertible
+incorporable
+incorporated/UE
+incorporate/GASDXN
+incorrect/P
+incorrigibility/MS
+incorrigibleness/M
+incorrigible/SP
+incorrigibly
+incorruptible/S
+incorruptibly
+increase/JB
+increaser/M
+increasing/Y
+incredibleness/M
+incredible/P
+incremental/Y
+incrementation
+increment/DMGS
+incriminate/XNGSD
+incrimination/M
+incriminatory
+incrustation/SM
+inc/T
+incubate/XNGVDS
+incubation/M
+incubator/MS
+incubus/MS
+inculcate/SDGNX
+inculcation/M
+inculpate/SDG
+incumbency/MS
+incumbent/S
+incunabula
+incunabulum
+incurable/S
+incurious
+incursion/SM
+ind
+indebtedness/SM
+indebted/P
+indefatigableness/M
+indefatigable/P
+indefatigably
+indefeasible
+indefeasibly
+indefinableness/M
+indefinable/PS
+indefinite/S
+indelible
+indelibly
+indemnification/M
+indemnify/NXSDG
+indemnity/SM
+indentation/SM
+indented/U
+indenter/M
+indention/SM
+indent/R
+indenture/DG
+Independence/M
+indescribableness/M
+indescribable/PS
+indescribably
+indestructibleness/M
+indestructible/P
+indestructibly
+indeterminably
+indeterminacy/MS
+indeterminism
+indexation/S
+indexer/M
+index/MRDZGB
+India/M
+Indiana/M
+Indianan/S
+Indianapolis/M
+Indianian/S
+Indian/SM
+indicant/MS
+indicate/DSNGVX
+indication/M
+indicative/SY
+indicator/MS
+indices's
+indicter/M
+indictment/SM
+indict/SGLBDR
+indifference
+indigence/MS
+indigenousness/M
+indigenous/YP
+indigent/SY
+indigestible/S
+indignant/Y
+indignation/MS
+indigo/SM
+Indira/M
+indirect/PG
+indiscreet/P
+indiscriminateness/M
+indiscriminate/PY
+indispensability/MS
+indispensableness/M
+indispensable/SP
+indispensably
+indisputableness/M
+indisputable/P
+indissolubleness/M
+indissoluble/P
+indissolubly
+indistinguishableness/M
+indistinguishable/P
+indite/SDG
+indium/SM
+individualism/MS
+individualistic
+individualistically
+individualist/MS
+individuality/MS
+individualization/SM
+individualize/DRSGZ
+individualized/U
+individualizer/M
+individualizes/U
+individualizing/Y
+individual/YMS
+individuate/DSXGN
+individuation/M
+indivisibleness/M
+indivisible/SP
+indivisibly
+Ind/M
+Indochina/M
+Indochinese
+indoctrinate/GNXSD
+indoctrination/M
+indoctrinator/SM
+indolence/SM
+indolent/Y
+indomitableness/M
+indomitable/P
+indomitably
+Indonesia/M
+Indonesian/S
+indoor
+Indore/M
+Indra/M
+indubitableness/M
+indubitable/P
+indubitably
+inducement/MS
+inducer/M
+induce/ZGLSRD
+inducible
+inductance/MS
+inductee/SM
+induct/GV
+induction/SM
+inductiveness/M
+inductive/PY
+inductor/MS
+indulge/GDRS
+indulgence/SDGM
+indulgent/Y
+indulger/M
+Indus/M
+industrialism/MS
+industrialist/MS
+industrialization/MS
+industrialized/U
+industrialize/SDG
+industrial/SY
+industriousness/SM
+industrious/YP
+industry/SM
+Indy/SM
+inebriate/NGSDX
+inebriation/M
+inedible
+ineducable
+ineffability/MS
+ineffableness/M
+ineffable/P
+ineffably
+inelastic
+ineligibly
+ineluctable
+ineluctably
+ineptitude/SM
+ineptness/MS
+inept/YP
+inequivalent
+inerrant
+inertial/Y
+inertia/SM
+inertness/MS
+inert/SPY
+Ines
+inescapably
+Inesita/M
+Inessa/M
+inestimably
+inevitability/MS
+inevitableness/M
+inevitable/P
+inevitably
+inexact/P
+inexhaustibleness/M
+inexhaustible/P
+inexhaustibly
+inexorability/M
+inexorableness/M
+inexorable/P
+inexorably
+inexpedience/M
+inexplicableness/M
+inexplicable/P
+inexplicably
+inexplicit
+inexpressibility/M
+inexpressibleness/M
+inexpressible/PS
+inextricably
+Inez/M
+infamous
+infamy/SM
+infancy/M
+infanticide/MS
+infantile
+infant/MS
+infantryman/M
+infantrymen
+infantry/SM
+infarction/SM
+infarct/SM
+infatuate/XNGSD
+infatuation/M
+infauna
+infected/U
+infecter
+infect/ESGDA
+infection/EASM
+infectiousness/MS
+infectious/PY
+infective
+infer/B
+inference/GMSR
+inferential/Y
+inferiority/MS
+inferior/SMY
+infernal/Y
+inferno/MS
+inferred
+inferring
+infertile
+infestation/MS
+infester/M
+infest/GSDR
+infidel/SM
+infighting/M
+infill/MG
+infiltrate/V
+infiltrator/MS
+infinitesimal/SY
+infinite/V
+infinitival
+infinitive/YMS
+infinitude/MS
+infinitum
+infinity/SM
+infirmary/SM
+infirmity/SM
+infix/M
+inflammableness/M
+inflammable/P
+inflammation/MS
+inflammatory
+inflatable/MS
+inflate/NGBDRSX
+inflater/M
+inflationary
+inflation/ESM
+inflect/GVDS
+inflectional/Y
+inflection/SM
+inflexibleness/M
+inflexible/P
+inflexion/SM
+inflict/DRSGV
+inflicter/M
+infliction/SM
+inflow/M
+influenced/U
+influencer/M
+influence/SRDGM
+influent
+influential/SY
+influenza/MS
+infomercial/S
+Informatica/M
+informatics
+informational
+information/ES
+informativeness/S
+informative/UY
+informatory
+informed/U
+informer/M
+info/SM
+infotainment/S
+infra
+infrared/SM
+infrasonic
+infrastructural
+infrastructure/MS
+infrequence/S
+infringe/LR
+infringement/SM
+infringer/M
+infuriate/GNYSD
+infuriating/Y
+infuriation/M
+infuser/M
+infuse/RZ
+infusibleness/M
+infusible/P
+inf/ZT
+Ingaberg/M
+Ingaborg/M
+Inga/M
+Ingamar/M
+Ingar/M
+Ingeberg/M
+Ingeborg/M
+Ingelbert/M
+Ingemar/M
+ingeniousness/MS
+ingenious/YP
+ingénue/S
+ingenuity/SM
+ingenuous/EY
+ingenuousness/MS
+Inger/M
+Inge/RM
+Ingersoll/M
+ingest/DGVS
+ingestible
+ingestion/SM
+Inglebert/M
+inglenook/MS
+Inglewood/M
+Inglis/M
+Ingmar/M
+ingoing
+ingot/SMDG
+ingrained/Y
+Ingra/M
+Ingram/M
+ingrate/M
+ingratiate/DSGNX
+ingratiating/Y
+ingratiation/M
+ingredient/SM
+Ingres/M
+ingression/M
+ingress/MS
+Ingrid/M
+Ingrim/M
+ingrown/P
+inguinal
+Ingunna/M
+inhabitable/U
+inhabitance
+inhabited/U
+inhabiter/M
+inhabit/R
+inhalant/S
+inhalation/SM
+inhalator/SM
+inhale/Z
+inhere/DG
+inherent/Y
+inheritableness/M
+inheritable/P
+inheritance/EMS
+inherit/BDSG
+inherited/E
+inheriting/E
+inheritor/S
+inheritress/MS
+inheritrix/MS
+inherits/E
+inhibit/DVGS
+inhibited/U
+inhibiter's
+inhibition/MS
+inhibitor/MS
+inhibitory
+inhomogeneous
+inhospitableness/M
+inhospitable/P
+inhospitality
+Inigo/M
+inimical/Y
+inimitableness/M
+inimitable/P
+inimitably
+inion
+iniquitousness/M
+iniquitous/PY
+iniquity/MS
+initialer/M
+initial/GSPRDY
+initialization/A
+initializations
+initialization's
+initialize/ASDG
+initialized/U
+initializer/S
+initiates
+initiate/UD
+initiating
+initiation/SM
+initiative/SM
+initiator/MS
+initiatory
+injectable/U
+inject/GVSDB
+injection/MS
+injector/SM
+injunctive
+injured/U
+injurer/M
+injure/SRDZG
+injuriousness/M
+injurious/YP
+inkblot/SM
+inker/M
+inkiness/MS
+inkling/SM
+inkstand/SM
+inkwell/SM
+inky/TP
+ink/ZDRJ
+inland
+inlander/M
+inlay/RG
+inletting
+inly/G
+inmost
+Inna/M
+innards
+innateness/SM
+innate/YP
+innermost/S
+innersole/S
+innerspring
+innervate/GNSDX
+innervation/M
+inner/Y
+inning/M
+Innis/M
+innkeeper/MS
+innocence/SM
+Innocent/M
+innocent/SYRT
+innocuousness/MS
+innocuous/PY
+innovate/SDVNGX
+innovation/M
+innovative/P
+innovator/MS
+innovatory
+Innsbruck/M
+innuendo/MDGS
+innumerability/M
+innumerableness/M
+innumerable/P
+innumerably
+innumerate
+inn/ZGDRSJ
+inoculate/ASDG
+inoculation/MS
+inoculative
+inoffensive/P
+Inonu/M
+inopportuneness/M
+inopportune/P
+inordinateness/M
+inordinate/PY
+inorganic
+inpatient
+In/PM
+input/MRDG
+inquirer/M
+inquire/ZR
+inquiring/Y
+inquiry/MS
+inquisitional
+inquisition/MS
+Inquisition/MS
+inquisitiveness/MS
+inquisitive/YP
+inquisitorial/Y
+inquisitor/MS
+INRI
+inrush/M
+ins
+INS
+insalubrious
+insanitary
+insatiability/MS
+insatiableness/M
+insatiable/P
+insatiably
+inscribe/Z
+inscription/SM
+inscrutability/SM
+inscrutableness/SM
+inscrutable/P
+inscrutably
+inseam
+insecticidal
+insecticide/MS
+insectivore/SM
+insectivorous
+insecureness/M
+insecure/P
+inseminate/NGXSD
+insemination/M
+insensateness/M
+insensate/P
+insensible/P
+insentient
+inseparable/S
+insert/ADSG
+inserter/M
+insertion/AMS
+insetting
+inshore
+insider/M
+inside/Z
+insidiousness/MS
+insidious/YP
+insightful/Y
+insigne's
+insignia/SM
+insignificant
+insinuate/VNGXSD
+insinuating/Y
+insinuation/M
+insinuator/SM
+insipidity/MS
+insipid/Y
+insistence/SM
+insistent/Y
+insisting/Y
+insist/SGD
+insociable
+insofar
+insole/M
+insolence/SM
+insolent/YS
+insolubleness/M
+insoluble/P
+insolubly
+insomniac/S
+insomnia/MS
+insomuch
+insouciance/SM
+insouciant/Y
+inspect/AGSD
+inspection/SM
+inspective
+inspectorate/MS
+inspector/SM
+inspirational/Y
+inspiration/MS
+inspired/U
+inspire/R
+inspirer/M
+inspiring/U
+inspirit/DG
+Inst
+installable
+install/ADRSG
+installation/SM
+installer/MS
+installment/MS
+instance/GD
+instantaneousness/M
+instantaneous/PY
+instantiated/U
+instantiate/SDXNG
+instantiation/M
+instant/SRYMP
+instate/AGSD
+inst/B
+instead
+instigate/XSDVGN
+instigation/M
+instigator/SM
+instillation/SM
+instinctive/Y
+instinctual
+instinct/VMS
+instituter/M
+institutes/M
+institute/ZXVGNSRD
+institutionalism/M
+institutionalist/M
+institutionalization/SM
+institutionalize/GDS
+institutional/Y
+institution/AM
+institutor's
+instr
+instruct/DSVG
+instructed/U
+instructional
+instruction/MS
+instructiveness/M
+instructive/PY
+instructor/MS
+instrumentalist/MS
+instrumentality/SM
+instrumental/SY
+instrumentation/SM
+instrument/GMDS
+insubordinate
+insubstantial
+insufferable
+insufferably
+insularity/MS
+insular/YS
+insulate/DSXNG
+insulated/U
+insulation/M
+insulator/MS
+insulin/MS
+insult/DRSG
+insulter/M
+insulting/Y
+insuperable
+insuperably
+insupportableness/M
+insupportable/P
+insurance/MS
+insurance's/A
+insure/BZGS
+insured/S
+insurer/M
+insurgence/SM
+insurgency/MS
+insurgent/MS
+insurmountably
+insurrectionist/SM
+insurrection/SM
+intactness/M
+intact/P
+intaglio/GMDS
+intake/M
+intangible/M
+integer/MS
+integrability/M
+integrable
+integral/SYM
+integrand/MS
+integrate/AGNXEDS
+integration/EMA
+integrative/E
+integrator/MS
+integrity/SM
+integument/SM
+intellective/Y
+intellect/MVS
+intellectualism/MS
+intellectuality/M
+intellectualize/GSD
+intellectualness/M
+intellectual/YPS
+intelligence/MSR
+intelligencer/M
+intelligentsia/MS
+intelligent/UY
+intelligibilities
+intelligibility/UM
+intelligibleness/MU
+intelligible/PU
+intelligibly/U
+Intel/M
+Intelsat/M
+intemperate/P
+intendant/MS
+intendedness/M
+intended/SYP
+intender/M
+intensification/M
+intensifier/M
+intensify/GXNZRSD
+intensional/Y
+intensiveness/MS
+intensive/PSY
+intentionality/M
+intentional/UY
+intention/SDM
+intentness/SM
+intent/YP
+interaction/MS
+interactive/PY
+interactivity
+interact/VGDS
+interaxial
+interbank
+interbred
+interbreed/GS
+intercalate/GNVDS
+intercalation/M
+intercase
+intercaste
+interceder/M
+intercede/SRDG
+intercensal
+intercept/DGS
+interception/MS
+interceptor/MS
+intercession/MS
+intercessor/SM
+intercessory
+interchangeability/M
+interchangeableness/M
+interchangeable/P
+interchangeably
+interchange/DSRGJ
+interchanger/M
+intercity
+interclass
+intercohort
+intercollegiate
+intercommunicate/SDXNG
+intercommunication/M
+intercom/SM
+interconnectedness/M
+interconnected/P
+interconnect/GDS
+interconnection/SM
+interconnectivity
+intercontinental
+interconversion/M
+intercorrelated
+intercourse/SM
+Interdata/M
+interdenominational
+interdepartmental/Y
+interdependence/MS
+interdependency/SM
+interdependent/Y
+interdiction/MS
+interdict/MDVGS
+interdisciplinary
+interested/UYE
+interest/GEMDS
+interestingly/U
+interestingness/M
+interesting/YP
+inter/ESTL
+interface/SRDGM
+interfacing/M
+interfaith
+interference/MS
+interferer/M
+interfere/SRDG
+interfering/Y
+interferometer/SM
+interferometric
+interferometry/M
+interferon/MS
+interfile/GSD
+intergalactic
+intergenerational
+intergeneration/M
+interglacial
+intergovernmental
+intergroup
+interim/S
+interindex
+interindustry
+interior/SMY
+interj
+interject/GDS
+interjectional
+interjection/MS
+interlace/GSD
+interlard/SGD
+interlayer/G
+interleave/SDG
+interleukin/S
+interlibrary
+interlinear/S
+interline/JGSD
+interlingual
+interlingua/M
+interlining/M
+interlink/GDS
+interlisp/M
+interlobular
+interlocker/M
+interlock/RDSG
+interlocutor/MS
+interlocutory
+interlope/GZSRD
+interloper/M
+interlude/MSDG
+intermarriage/MS
+intermarry/GDS
+intermediary/MS
+intermediateness/M
+intermediate/YMNGSDP
+intermediation/M
+interment/SME
+intermeshed
+intermetrics
+intermezzi
+intermezzo/SM
+interminably
+intermingle/DSG
+intermission/MS
+intermittent/Y
+intermix/GSRD
+intermodule
+intermolecular/Y
+internalization/SM
+internalize/GDS
+internal/SY
+Internationale/M
+internationalism/SM
+internationalist/SM
+internationality/M
+internationalization/MS
+internationalize/DSG
+international/YS
+internecine
+internee/SM
+interne's
+Internet/M
+INTERNET/M
+internetwork
+internist/SM
+intern/L
+internment/SM
+internship/MS
+internuclear
+interocular
+interoffice
+interoperability
+interpenetrates
+interpersonal/Y
+interplanetary
+interplay/GSMD
+interpol
+interpolate/XGNVBDS
+interpolation/M
+Interpol/M
+interpose/GSRD
+interposer/M
+interposition/MS
+interpretable/U
+interpret/AGSD
+interpretation/MSA
+interpretative/Y
+interpreted/U
+interpreter/SM
+interpretive/Y
+interpretor/S
+interprocess
+interprocessor
+interquartile
+interracial
+interred/E
+interregional
+interregnum/MS
+interrelatedness/M
+interrelated/PY
+interrelate/GNDSX
+interrelation/M
+interrelationship/SM
+interring/E
+interrogate/DSXGNV
+interrogation/M
+interrogative/SY
+interrogator/SM
+interrogatory/S
+interrupted/U
+interrupter/M
+interruptibility
+interruptible
+interruption/MS
+interrupt/VGZRDS
+interscholastic
+intersect/GDS
+intersection/MS
+intersession/MS
+interspecies
+intersperse/GNDSX
+interspersion/M
+interstage
+interstate/S
+interstellar
+interstice/SM
+interstitial/SY
+intersurvey
+intertask
+intertwine/GSD
+interurban/S
+interval/MS
+intervene/GSRD
+intervener/M
+intervenor/M
+interventionism/MS
+interventionist/S
+intervention/MS
+interview/AMD
+interviewed/U
+interviewee/SM
+interviewer/SM
+interviewing
+interviews
+intervocalic
+interweave/GS
+interwove
+interwoven
+intestacy/SM
+intestinal/Y
+intestine/SM
+inti
+intifada
+intimacy/SM
+intimal
+intimateness/M
+intimater/M
+intimate/XYNGPDRS
+intimation/M
+intimidate/SDXNG
+intimidating/Y
+intimidation/M
+into
+intolerableness/M
+intolerable/P
+intolerant/PS
+intonate/NX
+intonation/M
+intoxicant/MS
+intoxicate/DSGNX
+intoxicated/Y
+intoxication/M
+intra
+intracellular
+intracity
+intraclass
+intracohort
+intractability/M
+intractableness/M
+intractable/P
+intradepartmental
+intrafamily
+intragenerational
+intraindustry
+intraline
+intrametropolitan
+intramural/Y
+intramuscular/Y
+intranasal
+intransigence/MS
+intransigent/YS
+intransitive/S
+intraoffice
+intraprocess
+intrapulmonary
+intraregional
+intrasectoral
+intrastate
+intratissue
+intrauterine
+intravenous/YS
+intrepidity/SM
+intrepidness/M
+intrepid/YP
+intricacy/SM
+intricateness/M
+intricate/PY
+intrigue/DRSZG
+intriguer/M
+intriguing/Y
+intrinsically
+intrinsic/S
+introduce/ADSG
+introducer/M
+introduction/ASM
+introductory
+introit/SM
+introject/SD
+intro/S
+introspection/MS
+introspectiveness/M
+introspective/YP
+introspect/SGVD
+introversion/SM
+introvert/SMDG
+intruder/M
+intrude/ZGDSR
+intrusion/SM
+intrusiveness/MS
+intrusive/SYP
+intubate/NGDS
+intubation/M
+intuit/GVDSB
+intuitionist/M
+intuitiveness/MS
+intuitive/YP
+int/ZR
+Inuit/MS
+inundate/SXNG
+inundation/M
+inure/GDS
+invader/M
+invade/ZSRDG
+invalid/GSDM
+invalidism/MS
+invariable/P
+invariant/M
+invasion/SM
+invasive/P
+invectiveness/M
+invective/PSMY
+inveigh/DRG
+inveigher/M
+inveighs
+inveigle/DRSZG
+inveigler/M
+invent/ADGS
+invented/U
+invention/ASM
+inventiveness/MS
+inventive/YP
+inventor/MS
+inventory/SDMG
+Inverness/M
+inverse/YV
+inverter/M
+invertible
+invert/ZSGDR
+invest/ADSLG
+investigate/XDSNGV
+investigation/MA
+investigator/MS
+investigatory
+investiture/SM
+investment/ESA
+investment's/A
+investor/SM
+inveteracy/MS
+inveterate/Y
+inviability
+invidiousness/MS
+invidious/YP
+invigilate/GD
+invigilator/SM
+invigorate/ANGSD
+invigorating/Y
+invigoration/AM
+invigorations
+invincibility/SM
+invincibleness/M
+invincible/P
+invincibly
+inviolability/MS
+inviolably
+inviolateness/M
+inviolate/YP
+inviscid
+invisibleness/M
+invisible/S
+invitational/S
+invitation/MS
+invited/U
+invitee/S
+inviter/M
+invite/SRDG
+inviting/Y
+invocable
+invocate
+invoked/A
+invoke/GSRDBZ
+invoker/M
+invokes/A
+involuntariness/S
+involuntary/P
+involute/XYN
+involution/M
+involutorial
+involvedly
+involved/U
+involve/GDSRL
+involvement/SM
+involver/M
+invulnerability/M
+invulnerableness/M
+inwardness/M
+inward/PY
+ioctl
+iodate/MGND
+iodation/M
+iodide/MS
+iodinate/DNG
+iodine/MS
+iodize/GSD
+Iolande/M
+Iolanthe/M
+Io/M
+Iona/M
+Ionesco/M
+Ionian/M
+ionic/S
+Ionic/S
+ionization's
+ionization/SU
+ionized/UC
+ionize/GNSRDJXZ
+ionizer's
+ionizer/US
+ionizes/U
+ionizing/U
+ionosphere/SM
+ionospheric
+ion's/I
+ion/SMU
+Iorgo/MS
+Iormina/M
+Iosep/M
+iota/SM
+IOU
+Iowan/S
+Iowa/SM
+IPA
+ipecac/MS
+Iphigenia/M
+ipso
+Ipswich/M
+IQ
+Iqbal/M
+Iquitos/M
+Ira/M
+Iranian/MS
+Iran/M
+Iraqi/SM
+Iraq/M
+IRA/S
+irascibility/SM
+irascible
+irascibly
+irateness/S
+irate/RPYT
+ireful
+Ireland/M
+ire/MGDS
+Irena/M
+Irene/M
+irenic/S
+iridescence/SM
+iridescent/Y
+irides/M
+iridium/MS
+irids
+Irina/M
+Iris
+iris/GDSM
+Irishman/M
+Irishmen
+Irish/R
+Irishwoman/M
+Irishwomen
+Irita/M
+irk/GDS
+irksomeness/SM
+irksome/YP
+Irkutsk/M
+Ir/M
+Irma/M
+ironclad/S
+iron/DRMPSGJ
+ironer/M
+ironic
+ironicalness/M
+ironical/YP
+ironing/M
+ironmonger/M
+ironmongery/M
+ironside/MS
+ironstone/MS
+ironware/SM
+ironwood/SM
+ironworker/M
+ironwork/MRS
+irony/SM
+Iroquoian/MS
+Iroquois/M
+irradiate/XSDVNG
+irradiation/M
+irrationality/MS
+irrationalness/M
+irrational/YSP
+Irrawaddy/M
+irreclaimable
+irreconcilability/MS
+irreconcilableness/M
+irreconcilable/PS
+irreconcilably
+irrecoverableness/M
+irrecoverable/P
+irrecoverably
+irredeemable/S
+irredeemably
+irredentism/M
+irredentist/M
+irreducibility/M
+irreducible
+irreducibly
+irreflexive
+irrefutable
+irrefutably
+irregardless
+irregularity/SM
+irregular/YS
+irrelevance/SM
+irrelevancy/MS
+irrelevant/Y
+irreligious
+irremediableness/M
+irremediable/P
+irremediably
+irremovable
+irreparableness/M
+irreparable/P
+irreparably
+irreplaceable/P
+irrepressible
+irrepressibly
+irreproachableness/M
+irreproachable/P
+irreproachably
+irreproducibility
+irreproducible
+irresistibility/M
+irresistibleness/M
+irresistible/P
+irresistibly
+irresoluteness/SM
+irresolute/PNXY
+irresolution/M
+irresolvable
+irrespective/Y
+irresponsibility/SM
+irresponsibleness/M
+irresponsible/PS
+irresponsibly
+irretrievable
+irretrievably
+irreverence/MS
+irreverent/Y
+irreversible
+irreversibly
+irrevocableness/M
+irrevocable/P
+irrevocably
+irrigable
+irrigate/DSXNG
+irrigation/M
+irritability/MS
+irritableness/M
+irritable/P
+irritably
+irritant/S
+irritate/DSXNGV
+irritated/Y
+irritating/Y
+irritation/M
+irrupt/GVSD
+irruption/SM
+IRS
+Irtish/M
+Irvine/M
+Irving/M
+Irvin/M
+Irv/MG
+Irwin/M
+Irwinn/M
+is
+i's
+Isaac/SM
+Isaak/M
+Isabelita/M
+Isabella/M
+Isabelle/M
+Isabel/M
+Isacco/M
+Isac/M
+Isadora/M
+Isadore/M
+Isador/M
+Isahella/M
+Isaiah/M
+Isak/M
+Isa/M
+ISBN
+Iscariot/M
+Iseabal/M
+Isfahan/M
+Isherwood/M
+Ishim/M
+Ishmael/M
+Ishtar/M
+Isiahi/M
+Isiah/M
+Isidora/M
+Isidore/M
+Isidor/M
+Isidoro/M
+Isidro/M
+isinglass/MS
+Isis/M
+Islamabad/M
+Islamic/S
+Islam/SM
+islander/M
+island/GZMRDS
+Islandia/M
+isle/MS
+islet/SM
+isl/GD
+Ismael/M
+ism/MCS
+isn't
+ISO
+isobaric
+isobar/MS
+Isobel/M
+isochronal/Y
+isochronous/Y
+isocline/M
+isocyanate/M
+isodine
+isolate/SDXNG
+isolationism/SM
+isolationistic
+isolationist/SM
+isolation/M
+isolator/MS
+Isolde/M
+isomeric
+isomerism/SM
+isomer/SM
+isometrically
+isometric/S
+isometrics/M
+isomorphic
+isomorphically
+isomorphism/MS
+isomorph/M
+isoperimetrical
+isopleth/M
+isopleths
+isosceles
+isostatic
+isothermal/Y
+isotherm/MS
+isotonic
+isotope/SM
+isotopic
+isotropic
+isotropically
+isotropy/M
+Ispahan's
+ispell/M
+Ispell/M
+Israeli/MS
+Israelite/SM
+Israel/MS
+Issac/M
+Issiah/M
+Issie/M
+Issi/M
+issuable
+issuance/MS
+issuant
+issued/A
+issue/GMZDSR
+issuer/AMS
+issues/A
+issuing/A
+Issy/M
+Istanbul/M
+isthmian/S
+isthmus/SM
+Istvan/M
+Isuzu/M
+It
+IT
+Itaipu/M
+ital
+Italianate/GSD
+Italian/MS
+italicization/MS
+italicized/U
+italicize/GSD
+italic/S
+Ital/M
+Italy/M
+Itasca/M
+itch/GMDS
+itchiness/MS
+Itch/M
+itchy/RTP
+ITcorp/M
+ITCorp/M
+it'd
+Itel/M
+itemization/SM
+itemized/U
+itemize/GZDRS
+itemizer/M
+itemizes/A
+item/MDSG
+iterate/ASDXVGN
+iteration/M
+iterative/YA
+iterator/MS
+Ithaca/M
+Ithacan
+itinerant/SY
+itinerary/MS
+it'll
+it/MUS
+Ito/M
+its
+itself
+ITT
+IUD/S
+IV
+Iva/M
+Ivanhoe/M
+Ivan/M
+Ivar/M
+I've
+Ive/MRS
+Iver/M
+Ivette/M
+Ivett/M
+Ivie/M
+iv/M
+Ivonne/M
+Ivor/M
+Ivory/M
+ivory/SM
+IVs
+Ivy/M
+ivy/MDS
+ix
+Izaak/M
+Izabel/M
+Izak/M
+Izanagi/M
+Izanami/M
+Izhevsk/M
+Izmir/M
+Izvestia/M
+Izzy/M
+jabbed
+jabberer/M
+jabber/JRDSZG
+jabbing
+Jabez/M
+Jablonsky/M
+jabot/MS
+jab/SM
+jacaranda/MS
+Jacenta/M
+Jacinda/M
+Jacinta/M
+Jacintha/M
+Jacinthe/M
+jackal/SM
+jackass/SM
+jackboot/DMS
+jackdaw/SM
+Jackelyn/M
+jacketed/U
+jacket/GSMD
+jack/GDRMS
+jackhammer/MDGS
+Jackie/M
+Jacki/M
+jackknife/MGSD
+jackknives
+Jacklin/M
+Jacklyn/M
+Jack/M
+Jackman/M
+jackpot/MS
+Jackqueline/M
+Jackquelin/M
+jackrabbit/DGS
+Jacksonian
+Jackson/SM
+Jacksonville/M
+jackstraw/MS
+Jacky/M
+Jaclin/M
+Jaclyn/M
+Jacobean
+Jacobian/M
+Jacobi/M
+Jacobin/M
+Jacobite/M
+Jacobo/M
+Jacobsen/M
+Jacob/SM
+Jacobs/N
+Jacobson/M
+Jacobus
+Jacoby/M
+jacquard/MS
+Jacquard/SM
+Jacqueline/M
+Jacquelin/M
+Jacquelyn/M
+Jacquelynn/M
+Jacquenetta/M
+Jacquenette/M
+Jacques/M
+Jacquetta/M
+Jacquette/M
+Jacquie/M
+Jacqui/M
+jacuzzi
+Jacuzzi/S
+Jacynth/M
+Jada/M
+jadedness/SM
+jaded/PY
+jadeite/SM
+Jade/M
+jade/MGDS
+Jaeger/M
+Jae/M
+jaggedness/SM
+jagged/RYTP
+Jagger/M
+jaggers
+jagging
+jag/S
+jaguar/MS
+jailbird/MS
+jailbreak/SM
+jailer/M
+jail/GZSMDR
+Jaime/M
+Jaimie/M
+Jaine/M
+Jainism/M
+Jain/M
+Jaipur/M
+Jakarta/M
+Jake/MS
+Jakie/M
+Jakob/M
+jalapeño/S
+jalopy/SM
+jalousie/MS
+Jamaal/M
+Jamaica/M
+Jamaican/S
+Jamal/M
+Jamar/M
+jambalaya/MS
+jamb/DMGS
+jamboree/MS
+Jamel/M
+Jame/MS
+Jameson/M
+Jamestown/M
+Jamesy/M
+Jamey/M
+Jamie/M
+Jamill/M
+Jamil/M
+Jami/M
+Jamima/M
+Jamison/M
+Jammal/M
+jammed/U
+Jammie/M
+jamming/U
+jam/SM
+Janacek/M
+Jana/M
+Janaya/M
+Janaye/M
+Jandy/M
+Janean/M
+Janeczka/M
+Janeen/M
+Janeiro/M
+Janek/M
+Janela/M
+Janella/M
+Janelle/M
+Janell/M
+Janel/M
+Jane/M
+Janene/M
+Janenna/M
+Janessa/M
+Janesville/M
+Janeta/M
+Janet/M
+Janetta/M
+Janette/M
+Janeva/M
+Janey/M
+jangler/M
+jangle/RSDGZ
+jangly
+Jania/M
+Janice/M
+Janie/M
+Janifer/M
+Janina/M
+Janine/M
+Janis/M
+janissary/MS
+Janith/M
+janitorial
+janitor/SM
+Janka/M
+Jan/M
+Janna/M
+Jannelle/M
+Jannel/M
+Jannie/M
+Janos/M
+Janot/M
+Jansenist/M
+Jansen/M
+January/MS
+Janus/M
+Jany/M
+Japanese/SM
+Japan/M
+japanned
+japanner
+japanning
+japan/SM
+jape/DSMG
+Japura/M
+Jaquelin/M
+Jaquelyn/M
+Jaquenetta/M
+Jaquenette/M
+Jaquith/M
+Jarad/M
+jardinière/MS
+Jard/M
+Jareb/M
+Jared/M
+jarful/S
+jargon/SGDM
+Jarib/M
+Jarid/M
+Jarlsberg
+jar/MS
+Jarrad/M
+jarred
+Jarred/M
+Jarret/M
+Jarrett/M
+Jarrid/M
+jarring/SY
+Jarrod/M
+Jarvis/M
+Jase/M
+Jasen/M
+Jasmina/M
+Jasmine/M
+jasmine/MS
+Jasmin/M
+Jason/M
+Jasper/M
+jasper/MS
+Jastrow/M
+Jasun/M
+jato/SM
+jaundice/DSMG
+jaundiced/U
+jauntily
+jauntiness/MS
+jaunt/MDGS
+jaunty/SRTP
+Javanese
+Java/SM
+javelin/SDMG
+Javier/M
+jawbone/SDMG
+jawbreaker/SM
+jawline
+jaw/SMDG
+Jaxartes/M
+Jayapura/M
+jaybird/SM
+Jaycee/SM
+Jaye/M
+Jay/M
+Jaymee/M
+Jayme/M
+Jaymie/M
+Jaynell/M
+Jayne/M
+jay/SM
+Jayson/M
+jaywalker/M
+jaywalk/JSRDZG
+Jazmin/M
+jazziness/M
+jazzmen
+jazz/MGDS
+jazzy/PTR
+JCS
+jct
+JD
+Jdavie/M
+jealousness/M
+jealous/PY
+jealousy/MS
+Jeana/M
+Jeanelle/M
+Jeane/M
+Jeanette/M
+Jeanie/M
+Jeanine/M
+Jean/M
+jean/MS
+Jeanna/M
+Jeanne/M
+Jeannette/M
+Jeannie/M
+Jeannine/M
+Jecho/M
+Jedd/M
+Jeddy/M
+Jedediah/M
+Jedidiah/M
+Jedi/M
+Jed/M
+jeep/GZSMD
+Jeep/S
+jeerer/M
+jeering/Y
+jeer/SJDRMG
+Jeeves/M
+jeez
+Jefferey/M
+Jeffersonian/S
+Jefferson/M
+Jeffery/M
+Jeffie/M
+Jeff/M
+Jeffrey/SM
+Jeffry/M
+Jeffy/M
+jehad's
+Jehanna/M
+Jehoshaphat/M
+Jehovah/M
+Jehu/M
+jejuna
+jejuneness/M
+jejune/PY
+jejunum/M
+Jekyll/M
+Jelene/M
+jell/GSD
+Jello/M
+jello's
+jellybean/SM
+jellyfish/MS
+jellying/M
+jellylike
+jellyroll/S
+jelly/SDMG
+Jemie/M
+Jemimah/M
+Jemima/M
+Jemmie/M
+jemmy/M
+Jemmy/M
+Jena/M
+Jenda/M
+Jenelle/M
+Jenica/M
+Jeniece/M
+Jenifer/M
+Jeniffer/M
+Jenilee/M
+Jeni/M
+Jenine/M
+Jenkins/M
+Jen/M
+Jenna/M
+Jennee/M
+Jenner/M
+jennet/SM
+Jennette/M
+Jennica/M
+Jennie/M
+Jennifer/M
+Jennilee/M
+Jenni/M
+Jennine/M
+Jennings/M
+Jenn/RMJ
+Jenny/M
+jenny/SM
+Jeno/M
+Jensen/M
+Jens/N
+jeopard
+jeopardize/GSD
+jeopardy/MS
+Jephthah/M
+Jerad/M
+Jerald/M
+Jeralee/M
+Jeramey/M
+Jeramie/M
+Jere/M
+Jereme/M
+jeremiad/SM
+Jeremiah/M
+Jeremiahs
+Jeremias/M
+Jeremie/M
+Jeremy/M
+Jericho/M
+Jeri/M
+jerker/M
+jerk/GSDRJ
+jerkily
+jerkiness/SM
+jerkin/SM
+jerkwater/S
+jerky/RSTP
+Jermaine/M
+Jermain/M
+Jermayne/M
+Jeroboam/M
+Jerold/M
+Jerome/M
+Jeromy/M
+Jerrie/M
+Jerrilee/M
+Jerrilyn/M
+Jerri/M
+Jerrine/M
+Jerrod/M
+Jerrold/M
+Jerrome/M
+jerrybuilt
+Jerrylee/M
+jerry/M
+Jerry/M
+jersey/MS
+Jersey/MS
+Jerusalem/M
+Jervis/M
+Jes
+Jessalin/M
+Jessalyn/M
+Jessa/M
+Jessamine/M
+jessamine's
+Jessamyn/M
+Jessee/M
+Jesselyn/M
+Jesse/M
+Jessey/M
+Jessica/M
+Jessie/M
+Jessika/M
+Jessi/M
+jess/M
+Jess/M
+Jessy/M
+jest/DRSGZM
+jester/M
+jesting/Y
+Jesuit/SM
+Jesus
+Jeth/M
+Jethro/M
+jetliner/MS
+jet/MS
+jetport/SM
+jetsam/MS
+jetted/M
+jetting/M
+jettison/DSG
+jetty/RSDGMT
+jeweler/M
+jewelery/S
+jewel/GZMRDS
+Jewelled/M
+Jewelle/M
+jewellery's
+Jewell/MD
+Jewel/M
+jewelry/MS
+Jewess/SM
+Jewishness/MS
+Jewish/P
+Jew/MS
+Jewry/MS
+Jezebel/MS
+j/F
+JFK/M
+jg/M
+jibbed
+jibbing
+jibe/S
+jib/MDSG
+Jidda/M
+jiff/S
+jiffy/SM
+jigged
+jigger/SDMG
+jigging/M
+jiggle/SDG
+jiggly/TR
+jig/MS
+jigsaw/GSDM
+jihad/SM
+Jilin
+Jillana/M
+Jillane/M
+Jillayne/M
+Jilleen/M
+Jillene/M
+Jillian/M
+Jillie/M
+Jilli/M
+Jill/M
+Jilly/M
+jilt/DRGS
+jilter/M
+Jimenez/M
+Jim/M
+Jimmie/M
+jimmy/GSDM
+Jimmy/M
+jimsonweed/S
+Jinan
+jingler/M
+jingle/RSDG
+jingly/TR
+jingoism/SM
+jingoistic
+jingoist/SM
+jingo/M
+Jinnah/M
+jinni's
+jinn/MS
+Jinny/M
+jinrikisha/SM
+jinx/GMDS
+jitney/MS
+jitterbugged
+jitterbugger
+jitterbugging
+jitterbug/SM
+jitter/S
+jittery/TR
+jiujitsu's
+Jivaro/M
+jive/MGDS
+Joachim/M
+Joana/M
+Joane/M
+Joanie/M
+Joan/M
+Joanna/M
+Joanne/SM
+Joann/M
+Joaquin/M
+jobbed
+jobber/MS
+jobbery/M
+jobbing/M
+Jobey/M
+jobholder/SM
+Jobie/M
+Jobi/M
+Jobina/M
+joblessness/MS
+jobless/P
+Jobrel/M
+job/SM
+Job/SM
+Jobye/M
+Joby/M
+Jobyna/M
+Jocasta/M
+Joceline/M
+Jocelin/M
+Jocelyne/M
+Jocelyn/M
+jockey/SGMD
+jock/GDMS
+Jock/M
+Jocko/M
+jockstrap/MS
+jocoseness/MS
+jocose/YP
+jocosity/SM
+jocularity/SM
+jocular/Y
+jocundity/SM
+jocund/Y
+Jodee/M
+jodhpurs
+Jodie/M
+Jodi/M
+Jody/M
+Joeann/M
+Joela/M
+Joelie/M
+Joella/M
+Joelle/M
+Joellen/M
+Joell/MN
+Joelly/M
+Joellyn/M
+Joel/MY
+Joelynn/M
+Joe/M
+Joesph/M
+Joete/M
+joey/M
+Joey/M
+jogged
+jogger/SM
+jogging/S
+joggler/M
+joggle/SRDG
+Jogjakarta/M
+jog/S
+Johan/M
+Johannah/M
+Johanna/M
+Johannes
+Johannesburg/M
+Johann/M
+Johansen/M
+Johanson/M
+Johna/MH
+Johnathan/M
+Johnath/M
+Johnathon/M
+Johnette/M
+Johnie/M
+Johnna/M
+Johnnie/M
+johnnycake/SM
+Johnny/M
+johnny/SM
+Johnsen/M
+john/SM
+John/SM
+Johns/N
+Johnson/M
+Johnston/M
+Johnstown/M
+Johny/M
+Joice/M
+join/ADGFS
+joined/U
+joiner/FSM
+joinery/MS
+jointed/EYP
+jointedness/ME
+joint/EGDYPS
+jointer/M
+jointly/F
+joint's
+jointures
+joist/GMDS
+Jojo/M
+joke/MZDSRG
+joker/M
+jokey
+jokier
+jokiest
+jokily
+joking/Y
+Jolee/M
+Joleen/M
+Jolene/M
+Joletta/M
+Jolie/M
+Joliet's
+Joli/M
+Joline/M
+Jolla/M
+jollification/MS
+jollily
+jolliness/SM
+jollity/MS
+jolly/TSRDGP
+Jolson/M
+jolt/DRGZS
+jolter/M
+Joly/M
+Jolyn/M
+Jolynn/M
+Jo/MY
+Jonah/M
+Jonahs
+Jonas
+Jonathan/M
+Jonathon/M
+Jonell/M
+Jone/MS
+Jones/S
+Jonie/M
+Joni/MS
+Jon/M
+jonquil/MS
+Jonson/M
+Joplin/M
+Jordain/M
+Jordana/M
+Jordanian/S
+Jordan/M
+Jordanna/M
+Jordon/M
+Jorey/M
+Jorgan/M
+Jorge/M
+Jorgensen/M
+Jorgenson/M
+Jorie/M
+Jori/M
+Jorrie/M
+Jorry/M
+Jory/M
+Joscelin/M
+Josee/M
+Josefa/M
+Josefina/M
+Josef/M
+Joseito/M
+Jose/M
+Josepha/M
+Josephina/M
+Josephine/M
+Joseph/M
+Josephs
+Josephson/M
+Josephus/M
+Josey/M
+josh/DSRGZ
+josher/M
+Joshia/M
+Josh/M
+Joshuah/M
+Joshua/M
+Josiah/M
+Josias/M
+Josie/M
+Josi/M
+Josselyn/M
+joss/M
+jostle/SDG
+Josue/M
+Josy/M
+jot/S
+jotted
+jotter/SM
+jotting/SM
+Joule/M
+joule/SM
+jounce/SDG
+jouncy/RT
+Jourdain/M
+Jourdan/M
+journalese/MS
+journal/GSDM
+journalism/SM
+journalistic
+journalist/SM
+journalize/DRSGZ
+journalized/U
+journalizer/M
+journey/DRMZSGJ
+journeyer/M
+journeyman/M
+journeymen
+jouster/M
+joust/ZSMRDG
+Jovanovich/M
+Jove/M
+joviality/SM
+jovial/Y
+Jovian
+jowl/SMD
+jowly/TR
+Joya/M
+Joyan/M
+Joyann/M
+Joycean
+Joycelin/M
+Joyce/M
+Joye/M
+joyfuller
+joyfullest
+joyfulness/SM
+joyful/PY
+joylessness/MS
+joyless/PY
+Joy/M
+joy/MDSG
+Joyner/M
+joyousness/MS
+joyous/YP
+joyridden
+joyride/SRZMGJ
+joyrode
+joystick/S
+Jozef/M
+JP
+Jpn
+Jr/M
+j's
+J's
+Jsandye/M
+Juana/M
+Juanita/M
+Juan/M
+Juarez
+Jubal/M
+jubilant/Y
+jubilate/XNGDS
+jubilation/M
+jubilee/SM
+Judah/M
+Judaic
+Judaical
+Judaism/SM
+Judas/S
+juddered
+juddering
+Judd/M
+Judea/M
+Jude/M
+judge/AGDS
+judger/M
+judge's
+judgeship/SM
+judgmental/Y
+judgment/MS
+judicable
+judicatory/S
+judicature/MS
+judicial/Y
+judiciary/S
+judicious/IYP
+judiciousness/SMI
+Judie/M
+Judi/MH
+Juditha/M
+Judith/M
+Jud/M
+judo/MS
+Judon/M
+Judson/M
+Judye/M
+Judy/M
+jugate/F
+jugful/SM
+jugged
+Juggernaut/M
+juggernaut/SM
+jugging
+juggler/M
+juggle/RSDGZ
+jugglery/MS
+jug/MS
+jugular/S
+juice/GMZDSR
+juicer/M
+juicily
+juiciness/MS
+juicy/TRP
+Juieta/M
+jujitsu/MS
+jujube/SM
+juju/M
+jujutsu's
+jukebox/SM
+juke/GS
+Julee/M
+Jule/MS
+julep/SM
+Julia/M
+Juliana/M
+Juliane/M
+Julian/M
+Julianna/M
+Julianne/M
+Juliann/M
+Julie/M
+julienne/GSD
+Julienne/M
+Julieta/M
+Juliet/M
+Julietta/M
+Juliette/M
+Juli/M
+Julina/M
+Juline/M
+Julio/M
+Julissa/M
+Julita/M
+Julius/M
+Jul/M
+Julys
+July/SM
+jumble/GSD
+jumbo/MS
+jumper/M
+jump/GZDRS
+jumpily
+jumpiness/MS
+jumpsuit/S
+jumpy/PTR
+jun
+junco/MS
+junction/IMESF
+juncture/SFM
+Juneau/M
+June/MS
+Junette/M
+Jungfrau/M
+Jungian
+jungle/SDM
+Jung/M
+Junia/M
+Junie/M
+Junina/M
+juniority/M
+junior/MS
+Junior/S
+juniper/SM
+junkerdom
+Junker/SM
+junketeer/SGDM
+junket/SMDG
+junk/GZDRMS
+junkie/RSMT
+junkyard/MS
+Jun/M
+Juno/M
+junta/MS
+Jupiter/M
+Jurassic
+juridic
+juridical/Y
+juried
+jurisdictional/Y
+jurisdiction/SM
+jurisprudence/SM
+jurisprudent
+jurisprudential/Y
+juristic
+jurist/MS
+juror/MS
+Jurua/M
+jury/IMS
+jurying
+juryman/M
+jurymen
+jurywoman/M
+jurywomen
+justed
+Justen/M
+juster/M
+justest
+Justice/M
+justice/MIS
+justiciable
+justifiability/M
+justifiable/U
+justifiably/U
+justification/M
+justified/UA
+justifier/M
+justify/GDRSXZN
+Justina/M
+Justine/M
+justing
+Justinian/M
+Justin/M
+Justinn/M
+Justino/M
+Justis/M
+justness/MS
+justness's/U
+justs
+just/UPY
+Justus/M
+jute/SM
+Jutish
+Jutland/M
+jut/S
+jutted
+jutting
+Juvenal/M
+juvenile/SM
+juxtapose/SDG
+juxtaposition/SM
+JV
+J/X
+Jyoti/M
+Kaaba/M
+kabob/SM
+kaboom
+Kabuki
+kabuki/SM
+Kabul/M
+Kacey/M
+Kacie/M
+Kacy/M
+Kaddish/M
+kaddish/S
+Kaela/M
+kaffeeklatch
+kaffeeklatsch/S
+Kafkaesque
+Kafka/M
+kaftan's
+Kagoshima/M
+Kahaleel/M
+Kahlil/M
+Kahlua/M
+Kahn/M
+Kaia/M
+Kaifeng/M
+Kaila/M
+Kaile/M
+Kailey/M
+Kai/M
+Kaine/M
+Kain/M
+kaiser/MS
+Kaiser/SM
+Kaitlin/M
+Kaitlyn/M
+Kaitlynn/M
+Kaja/M
+Kajar/M
+Kakalina/M
+Kalahari/M
+Kala/M
+Kalamazoo/M
+Kalashnikov/M
+Kalb/M
+Kaleb/M
+Kaleena/M
+kaleidescope
+kaleidoscope/SM
+kaleidoscopic
+kaleidoscopically
+Kale/M
+kale/MS
+Kalgoorlie/M
+Kalie/M
+Kalila/M
+Kalil/M
+Kali/M
+Kalina/M
+Kalinda/M
+Kalindi/M
+Kalle/M
+Kalli/M
+Kally/M
+Kalmyk
+Kalvin/M
+Kama/M
+Kamchatka/M
+Kamehameha/M
+Kameko/M
+Kamikaze/MS
+kamikaze/SM
+Kamilah/M
+Kamila/M
+Kamillah/M
+Kampala/M
+Kampuchea/M
+Kanchenjunga/M
+Kandace/M
+Kandahar/M
+Kandinsky/M
+Kandy/M
+Kane/M
+kangaroo/SGMD
+Kania/M
+Kankakee/M
+Kan/MS
+Kannada/M
+Kano/M
+Kanpur/M
+Kansan/S
+Kansas
+Kantian
+Kant/M
+Kanya/M
+Kaohsiung/M
+kaolinite/M
+kaolin/MS
+Kaplan/M
+kapok/SM
+Kaposi/M
+kappa/MS
+kaput/M
+Karachi/M
+Karaganda/M
+Karakorum/M
+karakul/MS
+Karalee/M
+Karalynn/M
+Kara/M
+Karamazov/M
+karaoke/S
+karate/MS
+karat/SM
+Karee/M
+Kareem/M
+Karel/M
+Kare/M
+Karena/M
+Karenina/M
+Karen/M
+Karia/M
+Karie/M
+Karil/M
+Karilynn/M
+Kari/M
+Karim/M
+Karina/M
+Karine/M
+Karin/M
+Kariotta/M
+Karisa/M
+Karissa/M
+Karita/M
+Karla/M
+Karlan/M
+Karlee/M
+Karleen/M
+Karlene/M
+Karlen/M
+Karlie/M
+Karlik/M
+Karlis
+Karl/MNX
+Karloff/M
+Karlotta/M
+Karlotte/M
+Karly/M
+Karlyn/M
+karma/SM
+Karmen/M
+karmic
+Karna/M
+Karney/M
+Karola/M
+Karole/M
+Karolina/M
+Karoline/M
+Karol/M
+Karoly/M
+Karon/M
+Karo/YM
+Karp/M
+Karrah/M
+Karrie/M
+Karroo/M
+Karry/M
+kart/MS
+Karylin/M
+Karyl/M
+Kary/M
+Karyn/M
+Kasai/M
+Kasey/M
+Kashmir/SM
+Kaspar/M
+Kasparov/M
+Kasper/M
+Kass
+Kassandra/M
+Kassey/M
+Kassia/M
+Kassie/M
+Kassi/M
+katakana
+Katalin/M
+Kata/M
+Katee/M
+Katelyn/M
+Kate/M
+Katerina/M
+Katerine/M
+Katey/M
+Katha/M
+Katharina/M
+Katharine/M
+Katharyn/M
+Kathe/M
+Katherina/M
+Katherine/M
+Katheryn/M
+Kathiawar/M
+Kathie/M
+Kathi/M
+Kathleen/M
+Kathlin/M
+Kath/M
+Kathmandu
+Kathrine/M
+Kathryne/M
+Kathryn/M
+Kathye/M
+Kathy/M
+Katie/M
+Kati/M
+Katina/M
+Katine/M
+Katinka/M
+Katleen/M
+Katlin/M
+Kat/M
+Katmai/M
+Katmandu's
+Katowice/M
+Katrina/M
+Katrine/M
+Katrinka/M
+Kattie/M
+Katti/M
+Katuscha/M
+Katusha/M
+Katya/M
+katydid/SM
+Katy/M
+Katz/M
+Kauai/M
+Kauffman/M
+Kaufman/M
+Kaunas/M
+Kaunda/M
+Kawabata/M
+Kawasaki/M
+kayak/SGDM
+Kaycee/M
+Kaye/M
+Kayla/M
+Kaylee/M
+Kayle/M
+Kayley/M
+Kaylil/M
+Kaylyn/M
+Kay/M
+Kayne/M
+kayo/DMSG
+Kazakh/M
+Kazakhstan
+Kazan/M
+Kazantzakis/M
+kazoo/SM
+Kb
+KB
+KC
+kcal/M
+kc/M
+KDE/M
+Keane/M
+Kean/M
+Kearney/M
+Keary/M
+Keaton/M
+Keats/M
+kebab/SM
+Keck/M
+Keefe/MR
+Keefer/M
+Keegan/M
+Keelby/M
+Keeley/M
+keel/GSMDR
+keelhaul/SGD
+Keelia/M
+Keely/M
+Keenan/M
+Keene/M
+keener/M
+keen/GTSPYDR
+keening/M
+Keen/M
+keenness/MS
+keeper/M
+keep/GZJSR
+keeping/M
+keepsake/SM
+Keewatin/M
+kegged
+kegging
+keg/MS
+Keillor/M
+Keir/M
+Keisha/M
+Keith/M
+Kelbee/M
+Kelby/M
+Kelcey/M
+Kelcie/M
+Kelci/M
+Kelcy/M
+Kele/M
+Kelila/M
+Kellby/M
+Kellen/M
+Keller/M
+Kelley/M
+Kellia/M
+Kellie/M
+Kelli/M
+Kellina/M
+Kellogg/M
+Kellsie/M
+Kellyann/M
+Kelly/M
+kelp/GZMDS
+Kelsey/M
+Kelsi/M
+Kelsy/M
+Kelt's
+Kelvin/M
+kelvin/MS
+Kelwin/M
+Kemerovo/M
+Kempis/M
+Kemp/M
+Kendall/M
+Kendal/M
+Kendell/M
+Kendra/M
+Kendre/M
+Kendrick/MS
+Kenilworth/M
+Ken/M
+Kenmore/M
+ken/MS
+Kenna/M
+Kennan/M
+Kennecott/M
+kenned
+Kennedy/M
+kennel/GSMD
+Kenneth/M
+Kennett/M
+Kennie/M
+kenning
+Kennith/M
+Kenn/M
+Kenny/M
+keno/M
+Kenon/M
+Kenosha/M
+Kensington/M
+Kent/M
+Kenton/M
+Kentuckian/S
+Kentucky/M
+Kenya/M
+Kenyan/S
+Kenyatta/M
+Kenyon/M
+Keogh/M
+Keokuk/M
+kepi/SM
+Kepler/M
+kept
+keratin/MS
+kerbside
+Kerby/M
+kerchief/MDSG
+Kerensky/M
+Kerianne/M
+Keriann/M
+Keri/M
+Kerk/M
+Ker/M
+Kermie/M
+Kermit/M
+Kermy/M
+kerned
+kernel/GSMD
+kerning
+Kern/M
+kerosene/MS
+Kerouac/M
+Kerrie/M
+Kerrill/M
+Kerri/M
+Kerrin/M
+Kerr/M
+Kerry/M
+Kerstin/M
+Kerwin/M
+Kerwinn/M
+Kesley/M
+Keslie/M
+Kessiah/M
+Kessia/M
+Kessler/M
+kestrel/SM
+ketch/MS
+ketchup/SM
+ketone/M
+ketosis/M
+Kettering/M
+Kettie/M
+Ketti/M
+kettledrum/SM
+kettleful
+kettle/SM
+Ketty/M
+Kevan/M
+Keven/M
+Kevina/M
+Kevin/M
+Kevlar
+Kev/MN
+Kevon/M
+Kevorkian/M
+Kevyn/M
+Kewaskum/M
+Kewaunee/M
+Kewpie/M
+keyboardist/S
+keyboard/RDMZGS
+keyclick/SM
+keyhole/MS
+Key/M
+Keynesian/M
+Keynes/M
+keynoter/M
+keynote/SRDZMG
+keypad/MS
+keypuncher/M
+keypunch/ZGRSD
+keyring
+key/SGMD
+keystone/SM
+keystroke/SDMG
+keyword/SM
+k/FGEIS
+kg
+K/G
+KGB
+Khabarovsk/M
+Khachaturian/M
+khaki/SM
+Khalid/M
+Khalil/M
+Khan/M
+khan/MS
+Kharkov/M
+Khartoum/M
+Khayyam/M
+Khmer/M
+Khoisan/M
+Khomeini/M
+Khorana/M
+Khrushchev/SM
+Khufu/M
+Khulna/M
+Khwarizmi/M
+Khyber/M
+kHz/M
+KIA
+Kiah/M
+Kial/M
+kibble/GMSD
+kibbutzim
+kibbutz/M
+kibitzer/M
+kibitz/GRSDZ
+kibosh/GMSD
+Kickapoo/M
+kickback/SM
+kickball/MS
+kicker/M
+kick/GZDRS
+kickoff/SM
+kickstand/MS
+kicky/RT
+kidded
+kidder/SM
+kiddie/SD
+kidding/YM
+kiddish
+Kidd/M
+kiddo/SM
+kiddying
+kiddy's
+kidless
+kid/MS
+kidnaper's
+kidnaping's
+kidnap/MSJ
+kidnapped
+kidnapper/SM
+kidnapping/S
+kidney/MS
+kidskin/SM
+Kieffer/M
+kielbasa/SM
+kielbasi
+Kiele/M
+Kiel/M
+Kienan/M
+kier/I
+Kierkegaard/M
+Kiersten/M
+Kieth/M
+Kiev/M
+Kigali/M
+Kikelia/M
+Kikuyu/M
+Kilauea/M
+Kile/M
+Kiley/M
+Kilian/M
+Kilimanjaro/M
+kill/BJGZSDR
+killdeer/SM
+Killebrew/M
+killer/M
+Killian/M
+Killie/M
+killing/Y
+killjoy/S
+Killy/M
+kiln/GDSM
+kilobaud/M
+kilobit/S
+kilobuck
+kilobyte/S
+kilocycle/MS
+kilogauss/M
+kilogram/MS
+kilohertz/M
+kilohm/M
+kilojoule/MS
+kiloliter/MS
+kilometer/SM
+kilo/SM
+kiloton/SM
+kilovolt/SM
+kilowatt/SM
+kiloword
+kilter/M
+kilt/MDRGZS
+Ki/M
+Kimball/M
+Kimbell/M
+Kimberlee/M
+Kimberley/M
+Kimberli/M
+Kimberly/M
+Kimberlyn/M
+Kimble/M
+Kimbra/M
+Kim/M
+Kimmie/M
+Kimmi/M
+Kimmy/M
+kimono/MS
+Kincaid/M
+kinda
+kindergarten/MS
+kindergärtner/SM
+kinder/U
+kindheartedness/MS
+kindhearted/YP
+kindle/AGRSD
+kindler/M
+kindliness/SM
+kindliness's/U
+kindling/M
+kindly/TUPR
+kindness's
+kindness/US
+kind/PSYRT
+kindred/S
+kinematic/S
+kinematics/M
+kinesics/M
+kine/SM
+kinesthesis
+kinesthetically
+kinesthetic/S
+kinetically
+kinetic/S
+kinetics/M
+kinfolk/S
+kingbird/M
+kingdom/SM
+kingfisher/MS
+kinglet/M
+kingliness/M
+kingly/TPR
+King/M
+kingpin/MS
+Kingsbury/M
+king/SGYDM
+kingship/SM
+Kingsley/M
+Kingsly/M
+Kingston/M
+Kingstown/M
+Kingwood/M
+kink/GSDM
+kinkily
+kinkiness/SM
+kinky/PRT
+Kin/M
+kin/MS
+Kinna/M
+Kinney/M
+Kinnickinnic/M
+Kinnie/M
+Kinny/M
+Kinsey/M
+kinsfolk/S
+Kinshasa/M
+Kinshasha/M
+kinship/SM
+Kinsley/M
+kinsman/M
+kinsmen/M
+kinswoman/M
+kinswomen
+kiosk/SM
+Kiowa/SM
+Kipling/M
+Kip/M
+kip/MS
+Kippar/M
+kipped
+kipper/DMSG
+Kipper/M
+Kippie/M
+kipping
+Kipp/MR
+Kippy/M
+Kira/M
+Kirbee/M
+Kirbie/M
+Kirby/M
+Kirchhoff/M
+Kirchner/M
+Kirchoff/M
+Kirghistan/M
+Kirghizia/M
+Kirghiz/M
+Kiribati
+Kiri/M
+Kirinyaga/M
+kirk/GDMS
+Kirkland/M
+Kirk/M
+Kirkpatrick/M
+Kirkwood/M
+Kirov/M
+kirsch/S
+Kirsteni/M
+Kirsten/M
+Kirsti/M
+Kirstin/M
+Kirstyn/M
+Kisangani/M
+Kishinev/M
+kismet/SM
+kiss/DSRBJGZ
+Kissee/M
+kisser/M
+Kissiah/M
+Kissie/M
+Kissinger/M
+Kitakyushu/M
+kitbag's
+kitchener/M
+Kitchener/M
+kitchenette/SM
+kitchen/GDRMS
+kitchenware/SM
+kiter/M
+kite/SM
+kith/MDG
+kiths
+Kit/M
+kit/MDRGS
+kitsch/MS
+kitschy
+kitted
+kittenishness/M
+kittenish/YP
+kitten/SGDM
+Kittie/M
+Kitti/M
+kitting
+kittiwakes
+Kitty/M
+kitty/SM
+Kiwanis/M
+kiwifruit/S
+kiwi/SM
+Kizzee/M
+Kizzie/M
+KKK
+kl
+Klan/M
+Klansman/M
+Klara/M
+Klarika/M
+Klarrisa/M
+Klaus/M
+klaxon/M
+Klee/M
+Kleenex/SM
+Klein/M
+Kleinrock/M
+Klemens/M
+Klement/M
+Kleon/M
+kleptomaniac/SM
+kleptomania/MS
+Kliment/M
+Kline/M
+Klingon/M
+Klondike/SDMG
+kludger/M
+kludge/RSDGMZ
+kludgey
+klutziness/S
+klutz/SM
+klutzy/TRP
+Klux/M
+klystron/MS
+km
+kn
+knacker/M
+knack/SGZRDM
+knackwurst/MS
+Knapp/M
+knapsack/MS
+Knauer/M
+knavery/MS
+knave/SM
+knavish/Y
+kneader/M
+knead/GZRDS
+kneecap/MS
+kneecapped
+kneecapping
+knee/DSM
+kneeing
+kneeler/M
+kneel/GRS
+kneepad/SM
+knell/SMDG
+knelt
+Knesset/M
+knew
+Kngwarreye/M
+Knickerbocker/MS
+knickerbocker/S
+knickknack/SM
+knick/ZR
+Knievel/M
+knife/DSGM
+knighthood/MS
+knightliness/MS
+knightly/P
+Knight/M
+knight/MDYSG
+knish/MS
+knit/AU
+knits
+knitted
+knitter/MS
+knitting/SM
+knitwear/M
+knives/M
+knobbly
+knobby/RT
+Knobeloch/M
+knob/MS
+knockabout/M
+knockdown/S
+knocker/M
+knock/GZSJRD
+knockoff/S
+knockout/MS
+knockwurst's
+knoll/MDSG
+Knopf/M
+Knossos/M
+knothole/SM
+knot/MS
+knotted
+knottiness/M
+knotting/M
+knotty/TPR
+knowable/U
+knower/M
+know/GRBSJ
+knowhow
+knowingly/U
+knowing/RYT
+knowings/U
+knowledgeableness/M
+knowledgeable/P
+knowledgeably
+knowledge/SM
+Knowles
+known/SU
+Knox/M
+Knoxville/M
+knuckleball/R
+knuckle/DSMG
+knuckleduster
+knucklehead/MS
+Knudsen/M
+Knudson/M
+knurl/DSG
+Knuth/M
+Knutsen/M
+Knutson/M
+KO
+koala/SM
+Kobayashi/M
+Kobe/M
+Kochab/M
+Koch/M
+Kodachrome/M
+Kodak/SM
+Kodaly/M
+Kodiak/M
+Koenig/M
+Koenigsberg/M
+Koenraad/M
+Koestler/M
+Kohinoor/M
+Kohler/M
+Kohl/MR
+kohlrabies
+kohlrabi/M
+kola/SM
+Kolyma/M
+Kommunizma/M
+Kong/M
+Kongo/M
+Konrad/M
+Konstance/M
+Konstantine/M
+Konstantin/M
+Konstanze/M
+kookaburra/SM
+kook/GDMS
+kookiness/S
+kooky/PRT
+Koo/M
+Koontz/M
+kopeck/MS
+Koppers/M
+Koralle/M
+Koral/M
+Kora/M
+Koranic
+Koran/SM
+Kordula/M
+Korea/M
+Korean/S
+Korella/M
+Kore/M
+Koren/M
+Koressa/M
+Korey/M
+Korie/M
+Kori/M
+Kornberg/M
+Korney/M
+Korrie/M
+Korry/M
+Kort/M
+Kory/M
+Korzybski/M
+Kosciusko/M
+kosher/DGS
+Kossuth/M
+Kosygin/M
+Kovacs/M
+Kowalewski/M
+Kowalski/M
+Kowloon/M
+kowtow/SGD
+KP
+kph
+kraal/SMDG
+Kraemer/M
+kraft/M
+Kraft/M
+Krakatau's
+Krakatoa/M
+Krakow/M
+Kramer/M
+Krasnodar/M
+Krasnoyarsk/M
+Krause/M
+kraut/S!
+Krebs/M
+Kremlin/M
+Kremlinologist/MS
+Kremlinology/MS
+Kresge/M
+Krieger/M
+kriegspiel/M
+krill/MS
+Kringle/M
+Krisha/M
+Krishnah/M
+Krishna/M
+Kris/M
+Krispin/M
+Krissie/M
+Krissy/M
+Kristal/M
+Krista/M
+Kristan/M
+Kristel/M
+Kriste/M
+Kristen/M
+Kristian/M
+Kristie/M
+Kristien/M
+Kristi/MN
+Kristina/M
+Kristine/M
+Kristin/M
+Kristofer/M
+Kristoffer/M
+Kristofor/M
+Kristoforo/M
+Kristo/MS
+Kristopher/M
+Kristy/M
+Kristyn/M
+Kr/M
+Kroc/M
+Kroger/M
+króna/M
+Kronecker/M
+krone/RM
+kronor
+krónur
+Kropotkin/M
+Krueger/M
+Kruger/M
+Krugerrand/S
+Krupp/M
+Kruse/M
+krypton/SM
+Krystalle/M
+Krystal/M
+Krysta/M
+Krystle/M
+Krystyna/M
+ks
+K's
+KS
+k's/IE
+kt
+Kublai/M
+Kubrick/M
+kuchen/MS
+kudos/M
+kudzu/SM
+Kuenning/M
+Kuhn/M
+Kuibyshev/M
+Ku/M
+Kumar/M
+kumquat/SM
+Kunming/M
+Kuomintang/M
+Kurdish/M
+Kurdistan/SM
+Kurd/SM
+Kurosawa/M
+Kurtis/M
+Kurt/M
+kurtosis/M
+Kusch/M
+Kuwaiti/SM
+Kuwait/M
+Kuznetsk/M
+Kuznets/M
+kvetch/DSG
+kw
+kW
+Kwakiutl/M
+Kwangchow's
+Kwangju/M
+Kwanzaa/S
+kWh
+KY
+Kyla/M
+kyle/M
+Kyle/M
+Kylen/M
+Kylie/M
+Kylila/M
+Kylynn/M
+Ky/MH
+Kym/M
+Kynthia/M
+Kyoto/M
+Kyrgyzstan
+Kyrstin/M
+Kyushu/M
+L
+LA
+Laban/M
+labeled/U
+labeler/M
+label/GAZRDS
+labellings/A
+label's
+labial/YS
+labia/M
+labile
+labiodental
+labium/M
+laboratory/MS
+laboredness/M
+labored/PMY
+labored's/U
+laborer/M
+laboring/MY
+laborings/U
+laboriousness/MS
+laborious/PY
+labor/RDMJSZG
+laborsaving
+Labradorean/S
+Labrador/SM
+lab/SM
+Lab/SM
+laburnum/SM
+labyrinthine
+labyrinth/M
+labyrinths
+laced/U
+Lacee/M
+lace/MS
+lacerate/NGVXDS
+laceration/M
+lacer/M
+laces/U
+lacewing/MS
+Lacey/M
+Lachesis/M
+lachrymal/S
+lachrymose
+Lacie/M
+lacing/M
+lackadaisic
+lackadaisical/Y
+Lackawanna/M
+lacker/M
+lackey/SMDG
+lack/GRDMS
+lackluster/S
+Lac/M
+laconic
+laconically
+lacquerer/M
+lacquer/ZGDRMS
+lacrosse/MS
+lac/SGMDR
+lactate/MNGSDX
+lactational/Y
+lactation/M
+lacteal
+lactic
+lactose/MS
+lacunae
+lacuna/M
+Lacy/M
+lacy/RT
+ladder/GDMS
+laddie/MS
+laded/U
+ladened
+ladening
+laden/U
+lade/S
+lading/M
+ladle/SDGM
+Ladoga/M
+Ladonna/M
+lad/XGSJMND
+ladybird/SM
+ladybug/MS
+ladyfinger/SM
+ladylike/U
+ladylove/MS
+Ladyship/MS
+ladyship/SM
+lady/SM
+Lady/SM
+Laetitia/M
+laetrile/S
+Lafayette/M
+Lafitte/M
+lager/DMG
+laggard/MYSP
+laggardness/M
+lagged
+lagging/MS
+lagniappe/SM
+lagoon/MS
+Lagos/M
+Lagrange/M
+Lagrangian/M
+Laguerre/M
+Laguna/M
+lag/ZSR
+Lahore/M
+laid/AI
+Laidlaw/M
+lain
+Laina/M
+Lainey/M
+Laird/M
+laird/MS
+lair/GDMS
+laissez
+laity/SM
+Laius/M
+lake/DSRMG
+Lakehurst/M
+Lakeisha/M
+laker/M
+lakeside
+Lakewood/M
+Lakisha/M
+Lakshmi/M
+lallygagged
+lallygagging
+lallygag/S
+Lalo/M
+La/M
+Lamaism/SM
+Lamarck/M
+Lamar/M
+lamasery/MS
+lama/SM
+Lamaze
+lambada/S
+lambaste/SDG
+lambda/SM
+lambency/MS
+lambent/Y
+Lambert/M
+lambkin/MS
+Lamb/M
+Lamborghini/M
+lambskin/MS
+lamb/SRDMG
+lambswool
+lamebrain/SM
+lamed/M
+lameness/MS
+lamentableness/M
+lamentable/P
+lamentably
+lamentation/SM
+lament/DGSB
+lamented/U
+lame/SPY
+la/MHLG
+laminae
+lamina/M
+laminar
+laminate/XNGSD
+lamination/M
+lam/MDRSTG
+lammed
+lammer
+lamming
+Lammond/M
+Lamond/M
+Lamont/M
+L'Amour
+lampblack/SM
+lamplighter/M
+lamplight/ZRMS
+lampooner/M
+lampoon/RDMGS
+Lamport/M
+lamppost/SM
+lamprey/MS
+lamp/SGMRD
+lampshade/MS
+LAN
+Lanae/M
+Lanai/M
+lanai/SM
+Lana/M
+Lancashire/M
+Lancaster/M
+Lancelot/M
+Lance/M
+lancer/M
+lance/SRDGMZ
+lancet/MS
+landau/MS
+lander/I
+landfall/SM
+landfill/DSG
+landforms
+landholder/M
+landhold/JGZR
+landing/M
+Landis/M
+landlady/MS
+landless
+landlines
+landlocked
+landlord/MS
+landlubber/SM
+Land/M
+landmark/GSMD
+landmass/MS
+Landon/M
+landowner/MS
+landownership/M
+landowning/SM
+Landry/M
+Landsat
+landscape/GMZSRD
+landscaper/M
+lands/I
+landslide/MS
+landslid/G
+landslip
+landsman/M
+landsmen
+land/SMRDJGZ
+Landsteiner/M
+landward/S
+Landwehr/M
+Lane/M
+lane/SM
+Lanette/M
+Laney/M
+Langeland/M
+Lange/M
+Langerhans/M
+Langford/M
+Langland/M
+Langley/M
+Lang/M
+Langmuir/M
+Langsdon/M
+Langston/M
+language/MS
+languidness/MS
+languid/PY
+languisher/M
+languishing/Y
+languish/SRDG
+languorous/Y
+languor/SM
+Lanie/M
+Lani/M
+Lanita/M
+lankiness/SM
+lankness/MS
+lank/PTYR
+lanky/PRT
+Lanna/M
+Lannie/M
+Lanni/M
+Lanny/M
+lanolin/MS
+Lansing/M
+lantern/GSDM
+lanthanide/M
+lanthanum/MS
+lanyard/MS
+Lanzhou
+Laocoon/M
+Lao/SM
+Laotian/MS
+lapboard/MS
+lapdog/S
+lapel/MS
+lapidary/MS
+lapin/MS
+Laplace/M
+Lapland/ZMR
+lapped
+lappet/MS
+lapping
+Lapp/SM
+lapsed/A
+lapse/KSDMG
+lapser/MA
+lapses/A
+lapsing/A
+lap/SM
+laps/SRDG
+laptop/SM
+lapwing/MS
+Laraine/M
+Lara/M
+Laramie/M
+larboard/MS
+larcenist/S
+larcenous
+larceny/MS
+larch/MS
+larder/M
+lard/MRDSGZ
+Lardner/M
+lardy/RT
+Laredo/M
+largehearted
+largemouth
+largeness/SM
+large/SRTYP
+largess/SM
+largish
+largo/S
+lariat/MDGS
+Lari/M
+Larina/M
+Larine/M
+Larisa/M
+Larissa/M
+larker/M
+lark/GRDMS
+Lark/M
+larkspur/MS
+Larousse/M
+Larry/M
+Larsen/M
+Lars/NM
+Larson/M
+larvae
+larval
+larva/M
+laryngeal/YS
+larynges
+laryngitides
+laryngitis/M
+larynx/M
+Laryssa/M
+lasagna/S
+lasagne's
+Lascaux/M
+lasciviousness/MS
+lascivious/YP
+lase
+laser/M
+lashed/U
+lasher/M
+lashing/M
+lash/JGMSRD
+Lassa/M
+Lassen/M
+Lassie/M
+lassie/SM
+lassitude/MS
+lassoer/M
+lasso/GRDMS
+las/SRZG
+lass/SM
+laster/M
+lastingness/M
+lasting/PY
+last/JGSYRD
+Laszlo/M
+Latasha/M
+Latashia/M
+latching/M
+latchkey/SM
+latch's
+latch/UGSD
+latecomer/SM
+lated/A
+late/KA
+lately
+latency/MS
+lateness/MS
+latent/YS
+later/A
+lateral/GDYS
+lateralization
+Lateran/M
+latest/S
+LaTeX/M
+latex/MS
+lathe/M
+latherer/M
+lather/RDMG
+lathery
+lathing/M
+lath/MSRDGZ
+Lathrop/M
+laths
+Latia/M
+latices/M
+Latina/SM
+Latinate
+Latino/S
+Latin/RMS
+latish
+Latisha/M
+latitude/SM
+latitudinal/Y
+latitudinarian/S
+latitudinary
+Lat/M
+Latonya/M
+Latoya/M
+Latrena/M
+Latrina/M
+latrine/MS
+Latrobe/M
+lat/SDRT
+latter/YM
+latte/SR
+lattice/SDMG
+latticework/MS
+latticing/M
+Lattimer/M
+Latvia/M
+Latvian/S
+laudably
+laudanum/MS
+laudatory
+Lauderdale/M
+lauder/M
+Lauder/M
+Laud/MR
+laud/RDSBG
+lauds/M
+Laue/M
+laughableness/M
+laughable/P
+laughably
+laugh/BRDZGJ
+laugher/M
+laughing/MY
+laughingstock/SM
+laughs
+laughter/MS
+Laughton/M
+Launce/M
+launch/AGSD
+launcher/MS
+launching/S
+launchpad/S
+laundered/U
+launderer/M
+launderette/MS
+launder/SDRZJG
+laundress/MS
+laundrette/S
+laundromat/S
+Laundromat/SM
+laundryman/M
+laundrymen
+laundry/MS
+laundrywoman/M
+laundrywomen
+Lauraine/M
+Lauralee/M
+Laural/M
+laura/M
+Laura/M
+Laurasia/M
+laureate/DSNG
+laureateship/SM
+Lauree/M
+Laureen/M
+Laurella/M
+Laurel/M
+laurel/SGMD
+Laure/M
+Laurena/M
+Laurence/M
+Laurene/M
+Lauren/SM
+Laurentian
+Laurent/M
+Lauretta/M
+Laurette/M
+Laurianne/M
+Laurice/M
+Laurie/M
+Lauri/M
+Lauritz/M
+Lauryn/M
+Lausanne/M
+lavage/MS
+lavaliere/MS
+Laval/M
+lava/SM
+lavatory/MS
+lave/GDS
+Lavena/M
+lavender/MDSG
+Laverna/M
+Laverne/M
+Lavern/M
+Lavina/M
+Lavinia/M
+Lavinie/M
+lavishness/MS
+lavish/SRDYPTG
+Lavoisier/M
+Lavonne/M
+Lawanda/M
+lawbreaker/SM
+lawbreaking/MS
+Lawford/M
+lawfulness/SMU
+lawful/PUY
+lawgiver/MS
+lawgiving/M
+lawlessness/MS
+lawless/PY
+Law/M
+lawmaker/MS
+lawmaking/SM
+lawman/M
+lawmen
+lawnmower/S
+lawn/SM
+Lawrence/M
+Lawrenceville/M
+lawrencium/SM
+Lawry/M
+law/SMDG
+Lawson/M
+lawsuit/MS
+Lawton/M
+lawyer/DYMGS
+laxativeness/M
+laxative/PSYM
+laxer/A
+laxes/A
+laxity/SM
+laxness/SM
+lax/PTSRY
+layabout/MS
+Layamon/M
+layaway/S
+lay/CZGSR
+layered/C
+layer/GJDM
+layering/M
+layer's/IC
+layette/SM
+Layla/M
+Lay/M
+layman/M
+laymen
+Layne/M
+Layney/M
+layoff/MS
+layout/SM
+layover/SM
+laypeople
+layperson/S
+lays/AI
+Layton/M
+layup/MS
+laywoman/M
+laywomen
+Lazare/M
+Lazar/M
+Lazaro/M
+Lazarus/M
+laze/DSG
+lazily
+laziness/MS
+lazuli/M
+lazybones/M
+lazy/PTSRDG
+lb
+LBJ/M
+lbs
+LC
+LCD
+LCM
+LDC
+leachate
+Leach/M
+leach/SDG
+Leadbelly/M
+leaded/U
+leadenness/M
+leaden/PGDY
+leaderless
+leader/M
+leadership/MS
+lead/SGZXJRDN
+leadsman/M
+leadsmen
+leafage/MS
+leaf/GSDM
+leafhopper/M
+leafiness/M
+leafless
+leaflet/SDMG
+leafstalk/SM
+leafy/PTR
+leaguer/M
+league/RSDMZG
+Leah/M
+leakage/SM
+leaker/M
+Leakey/M
+leak/GSRDM
+leakiness/MS
+leaky/PRT
+Lea/M
+lea/MS
+Leander/M
+Leandra/M
+leaner/M
+leaning/M
+Lean/M
+Leanna/M
+Leanne/M
+leanness/MS
+Leann/M
+Leanora/M
+Leanor/M
+lean/YRDGTJSP
+leaper/M
+leapfrogged
+leapfrogging
+leapfrog/SM
+leap/RDGZS
+Lear/M
+learnedly
+learnedness/M
+learned/UA
+learner/M
+learning/M
+learns/UA
+learn/SZGJRD
+Leary/M
+lease/ARSDG
+leaseback/MS
+leaseholder/M
+leasehold/SRMZ
+leaser/MA
+lease's
+leash's
+leash/UGSD
+leasing/M
+leas/SRDGZ
+least/S
+leastwise
+leatherette/S
+leather/MDSG
+leathern
+leatherneck/SM
+leathery
+leaven/DMJGS
+leavened/U
+leavening/M
+Leavenworth/M
+leaver/M
+leaves/M
+leave/SRDJGZ
+leaving/M
+Lebanese
+Lebanon/M
+Lebbie/M
+lebensraum
+Lebesgue/M
+Leblanc/M
+lecher/DMGS
+lecherousness/MS
+lecherous/YP
+lechery/MS
+lecithin/SM
+lectern/SM
+lecturer/M
+lecture/RSDZMG
+lectureship/SM
+led
+Leda/M
+Lederberg/M
+ledger/DMG
+ledge/SRMZ
+LED/SM
+Leeanne/M
+Leeann/M
+leech/MSDG
+Leeds/M
+leek/SM
+Leelah/M
+Leela/M
+Leeland/M
+Lee/M
+lee/MZRS
+Leena/M
+leer/DG
+leeriness/MS
+leering/Y
+leery/PTR
+Leesa/M
+Leese/M
+Leeuwenhoek/M
+Leeward/M
+leeward/S
+leeway/MS
+leftism/SM
+leftist/SM
+leftmost
+leftover/MS
+Left/S
+left/TRS
+leftward/S
+Lefty/M
+lefty/SM
+legacy/MS
+legalese/MS
+legalism/SM
+legalistic
+legality/MS
+legalization/MS
+legalize/DSG
+legalized/U
+legal/SY
+legate/AXCNGSD
+legatee/MS
+legate's/C
+legation/AMC
+legato/SM
+legendarily
+legendary/S
+Legendre/M
+legend/SM
+legerdemain/SM
+Leger/SM
+legged
+legginess/MS
+legging/MS
+leggy/PRT
+leghorn/SM
+Leghorn/SM
+legibility/MS
+legible
+legibly
+legionary/S
+legionnaire/SM
+legion/SM
+legislate/SDXVNG
+legislation/M
+legislative/SY
+legislator/SM
+legislature/MS
+legitimacy/MS
+legitimate/SDNGY
+legitimation/M
+legitimatize/SDG
+legitimization/MS
+legitimize/RSDG
+legit/S
+legless
+legman/M
+legmen
+leg/MS
+Lego/M
+Legra/M
+Legree/M
+legroom/MS
+legstraps
+legume/SM
+leguminous
+legwork/SM
+Lehigh/M
+Lehman/M
+Leia/M
+Leibniz/M
+Leicester/SM
+Leiden/M
+Leif/M
+Leigha/M
+Leigh/M
+Leighton/M
+Leilah/M
+Leila/M
+lei/MS
+Leipzig/M
+Leisha/M
+leisureliness/MS
+leisurely/P
+leisure/SDYM
+leisurewear
+leitmotif/SM
+leitmotiv/MS
+Lek/M
+Lelah/M
+Lela/M
+Leland/M
+Lelia/M
+Lemaitre/M
+Lemar/M
+Lemke/M
+Lem/M
+lemma/MS
+lemme/GJ
+Lemmie/M
+lemming/M
+Lemmy/M
+lemonade/SM
+lemon/GSDM
+lemony
+Lemuel/M
+Lemuria/M
+lemur/MS
+Lena/M
+Lenard/M
+Lenci/M
+lender/M
+lend/SRGZ
+Lenee/M
+Lenette/M
+lengthener/M
+lengthen/GRD
+lengthily
+lengthiness/MS
+length/MNYX
+lengths
+lengthwise
+lengthy/TRP
+lenience/S
+leniency/MS
+lenient/SY
+Leningrad/M
+Leninism/M
+Leninist
+Lenin/M
+lenitive/S
+Lenka/M
+Len/M
+Le/NM
+Lenna/M
+Lennard/M
+Lennie/M
+Lennon/M
+Lenny/M
+Lenoir/M
+Leno/M
+Lenora/M
+Lenore/M
+lens/SRDMJGZ
+lent/A
+lenticular
+lentil/SM
+lento/S
+Lent/SMN
+Leodora/M
+Leoine/M
+Leola/M
+Leoline/M
+Leo/MS
+Leona/M
+Leonanie/M
+Leonard/M
+Leonardo/M
+Leoncavallo/M
+Leonelle/M
+Leonel/M
+Leone/M
+Leonerd/M
+Leonhard/M
+Leonidas/M
+Leonid/M
+Leonie/M
+leonine
+Leon/M
+Leonora/M
+Leonore/M
+Leonor/M
+Leontine/M
+Leontyne/M
+leopardess/SM
+leopard/MS
+leopardskin
+Leopold/M
+Leopoldo/M
+Leopoldville/M
+Leora/M
+leotard/MS
+leper/SM
+Lepidus/M
+Lepke/M
+leprechaun/SM
+leprosy/MS
+leprous
+lepta
+lepton/SM
+Lepus/M
+Lerner/M
+Leroi/M
+Leroy/M
+Lesa/M
+lesbianism/MS
+lesbian/MS
+Leshia/M
+lesion/DMSG
+Lesley/M
+Leslie/M
+Lesli/M
+Lesly/M
+Lesotho/M
+lessee/MS
+lessen/GDS
+Lesseps/M
+lesser
+lesses
+Lessie/M
+lessing
+lesson/DMSG
+lessor/MS
+less/U
+Lester/M
+lest/R
+Les/Y
+Lesya/M
+Leta/M
+letdown/SM
+lethality/M
+lethal/YS
+Letha/M
+lethargic
+lethargically
+lethargy/MS
+Lethe/M
+Lethia/M
+Leticia/M
+Letisha/M
+let/ISM
+Letitia/M
+Letizia/M
+Letta/M
+letterbox/S
+lettered/U
+letterer/M
+letterhead/SM
+lettering/M
+letter/JSZGRDM
+letterman/M
+Letterman/M
+lettermen
+letterpress/MS
+Lettie/M
+Letti/M
+letting/S
+lettuce/SM
+Letty/M
+letup/MS
+leukemia/SM
+leukemic/S
+leukocyte/MS
+Leupold/M
+Levant/M
+leveeing
+levee/SDM
+leveled/U
+leveler/M
+levelheadedness/S
+levelheaded/P
+leveling/U
+levelness/SM
+level/STZGRDYP
+leverage/MGDS
+lever/SDMG
+Levesque/M
+Levey/M
+Leviathan
+leviathan/MS
+levier/M
+Levi/MS
+Levine/M
+Levin/M
+levitate/XNGDS
+levitation/M
+Leviticus/M
+Levitt/M
+levity/MS
+Lev/M
+Levon/M
+Levy/M
+levy/SRDZG
+lewdness/MS
+lewd/PYRT
+Lewellyn/M
+Lewes
+Lewie/M
+Lewinsky/M
+lewis/M
+Lewis/M
+Lewiss
+Lew/M
+lex
+lexeme/MS
+lexical/Y
+lexicographer/MS
+lexicographic
+lexicographical/Y
+lexicography/SM
+lexicon/SM
+Lexie/M
+Lexi/MS
+Lexine/M
+Lexington/M
+Lexus/M
+Lexy/M
+Leyden/M
+Leyla/M
+Lezley/M
+Lezlie/M
+lg
+Lhasa/SM
+Lhotse/M
+liability/SAM
+liable/AP
+liaise/GSD
+liaison/SM
+Lia/M
+Liam/M
+Liana/M
+Liane/M
+Lian/M
+Lianna/M
+Lianne/M
+liar/MS
+libation/SM
+libbed
+Libbey/M
+Libbie/M
+Libbi/M
+libbing
+Libby/M
+libeler/M
+libel/GMRDSZ
+libelous/Y
+Liberace/M
+liberalism/MS
+liberality/MS
+liberalization/SM
+liberalized/U
+liberalize/GZSRD
+liberalizer/M
+liberalness/MS
+liberal/YSP
+liberate/NGDSCX
+liberationists
+liberation/MC
+liberator/SCM
+Liberia/M
+Liberian/S
+libertarianism/M
+libertarian/MS
+libertine/MS
+liberty/MS
+libidinal
+libidinousness/M
+libidinous/PY
+libido/MS
+Lib/M
+lib/MS
+librarian/MS
+library/MS
+Libra/SM
+libretoes
+libretos
+librettist/MS
+libretto/MS
+Libreville/M
+Librium/M
+Libya/M
+Libyan/S
+lice/M
+licensed/AU
+licensee/SM
+license/MGBRSD
+licenser/M
+licenses/A
+licensing/A
+licensor/M
+licentiate/MS
+licentiousness/MS
+licentious/PY
+Licha/M
+lichee's
+lichen/DMGS
+Lichtenstein/M
+Lichter/M
+licit/Y
+licked/U
+lickerish
+licker/M
+lick/GRDSJ
+licking/M
+licorice/SM
+Lida/M
+lidded
+lidding
+Lidia/M
+lidless
+lid/MS
+lido/MS
+Lieberman/M
+Liebfraumilch/M
+Liechtenstein/RMZ
+lied/MR
+lie/DRS
+Lief/M
+liefs/A
+lief/TSR
+Liege/M
+liege/SR
+Lie/M
+lien/SM
+lier/IMA
+lies/A
+Liesa/M
+lieu/SM
+lieut
+lieutenancy/MS
+lieutenant/SM
+Lieut/M
+lifeblood/SM
+lifeboat/SM
+lifebuoy/S
+lifeforms
+lifeguard/MDSG
+lifelessness/SM
+lifeless/PY
+lifelikeness/M
+lifelike/P
+lifeline/SM
+lifelong
+life/MZR
+lifer/M
+lifesaver/SM
+lifesaving/S
+lifespan/S
+lifestyle/S
+lifetaking
+lifetime/MS
+lifework/MS
+LIFO
+lifter/M
+lift/GZMRDS
+liftoff/MS
+ligament/MS
+ligand/MS
+ligate/XSDNG
+ligation/M
+ligature/DSGM
+light/ADSCG
+lighted/U
+lightener/M
+lightening/M
+lighten/ZGDRS
+lighter/CM
+lightered
+lightering
+lighters
+lightest
+lightface/SDM
+lightheaded
+lightheartedness/MS
+lighthearted/PY
+lighthouse/MS
+lighting/MS
+lightly
+lightness/MS
+lightning/SMD
+lightproof
+light's
+lightship/SM
+lightweight/S
+ligneous
+lignite/MS
+lignum
+likability/MS
+likableness/MS
+likable/P
+likeability's
+liked/E
+likelihood/MSU
+likely/UPRT
+likeness/MSU
+liken/GSD
+liker/E
+liker's
+likes/E
+likest
+like/USPBY
+likewise
+liking/SM
+lilac/MS
+Lilah/M
+Lila/SM
+Lilia/MS
+Liliana/M
+Liliane/M
+Lilian/M
+Lilith/M
+Liliuokalani/M
+Lilla/M
+Lille/M
+Lillian/M
+Lillie/M
+Lilli/MS
+lilliputian/S
+Lilliputian/SM
+Lilliput/M
+Lilllie/M
+Lilly/M
+Lil/MY
+Lilongwe/M
+lilting/YP
+lilt/MDSG
+Lilyan/M
+Lily/M
+lily/MSD
+Lima/M
+Limbaugh/M
+limbered/U
+limberness/SM
+limber/RDYTGP
+limbers/U
+limbic
+limbless
+Limbo
+limbo/GDMS
+limb/SGZRDM
+Limburger/SM
+limeade/SM
+lime/DSMG
+limekiln/M
+limelight/DMGS
+limerick/SM
+limestone/SM
+limitability
+limitably
+limitation/MCS
+limit/CSZGRD
+limitedly/U
+limitedness/M
+limited/PSY
+limiter/M
+limiting/S
+limitlessness/SM
+limitless/PY
+limit's
+limn/GSD
+Limoges/M
+limo/S
+limousine/SM
+limper/M
+limpet/SM
+limpidity/MS
+limpidness/SM
+limpid/YP
+limpness/MS
+Limpopo/M
+limp/SGTPYRD
+Li/MY
+limy/TR
+linage/MS
+Lina/M
+linchpin/MS
+Linc/M
+Lincoln/SM
+Linda/M
+Lindbergh/M
+Lindberg/M
+linden/MS
+Lindholm/M
+Lindie/M
+Lindi/M
+Lind/M
+Lindon/M
+Lindquist/M
+Lindsay/M
+Lindsey/M
+Lindstrom/M
+Lindsy/M
+Lindy/M
+line/AGDS
+lineage/SM
+lineal/Y
+Linea/M
+lineament/MS
+linearity/MS
+linearize/SDGNB
+linear/Y
+linebacker/SM
+lined/U
+linefeed
+Linell/M
+lineman/M
+linemen
+linen/SM
+liner/SM
+line's
+linesman/M
+linesmen
+Linet/M
+Linette/M
+lineup/S
+lingerer/M
+lingerie/SM
+lingering/Y
+linger/ZGJRD
+lingoes
+lingo/M
+lingual/SY
+lingua/M
+linguine
+linguini's
+linguistically
+linguistic/S
+linguistics/M
+linguist/SM
+ling/ZR
+liniment/MS
+lining/SM
+linkable
+linkage/SM
+linked/A
+linker/S
+linking/S
+Link/M
+link's
+linkup/S
+link/USGD
+Lin/M
+Linnaeus/M
+Linnea/M
+Linnell/M
+Linnet/M
+linnet/SM
+Linnie/M
+Linn/M
+Linoel/M
+linoleum/SM
+lino/M
+Linotype/M
+linseed/SM
+lintel/SM
+linter/M
+Linton/M
+lint/SMR
+linty/RST
+Linus/M
+Linux/M
+Linwood/M
+Linzy/M
+Lionello/M
+Lionel/M
+lioness/SM
+lionhearted
+lionization/SM
+lionizer/M
+lionize/ZRSDG
+Lion/M
+lion/MS
+lipase/M
+lipid/MS
+lip/MS
+liposuction/S
+lipped
+lipper
+Lippi/M
+lipping
+Lippmann/M
+lippy/TR
+lipread/GSRJ
+Lipschitz/M
+Lipscomb/M
+lipstick/MDSG
+Lipton/M
+liq
+liquefaction/SM
+liquefier/M
+liquefy/DRSGZ
+liqueur/DMSG
+liquidate/GNXSD
+liquidation/M
+liquidator/SM
+liquidity/SM
+liquidizer/M
+liquidize/ZGSRD
+liquidness/M
+liquid/SPMY
+liquorice/SM
+liquorish
+liquor/SDMG
+lira/M
+Lira/M
+lire
+Lisabeth/M
+Lisa/M
+Lisbeth/M
+Lisbon/M
+Lise/M
+Lisetta/M
+Lisette/M
+Lisha/M
+Lishe/M
+Lisle/M
+lisle/SM
+lisper/M
+lisp/MRDGZS
+Lissajous/M
+Lissa/M
+Lissie/M
+Lissi/M
+Liss/M
+lissomeness/M
+lissome/P
+lissomness/M
+Lissy/M
+listed/U
+listener/M
+listen/ZGRD
+Listerine/M
+lister/M
+Lister/M
+listing/M
+list/JMRDNGZXS
+listlessness/SM
+listless/PY
+Liston/M
+Liszt/M
+Lita/M
+litany/MS
+litchi/SM
+literacy/MS
+literalism/M
+literalistic
+literalness/MS
+literal/PYS
+literariness/SM
+literary/P
+literate/YNSP
+literati
+literation/M
+literature/SM
+liter/M
+lite/S
+litheness/SM
+lithe/PRTY
+lithesome
+lithium/SM
+lithograph/DRMGZ
+lithographer/M
+lithographic
+lithographically
+lithographs
+lithography/MS
+lithology/M
+lithosphere/MS
+lithospheric
+Lithuania/M
+Lithuanian/S
+litigant/MS
+litigate/NGXDS
+litigation/M
+litigator/SM
+litigiousness/MS
+litigious/PY
+litmus/SM
+litotes/M
+lit/RZS
+littérateur/S
+litterbug/SM
+litter/SZGRDM
+Little/M
+littleneck/M
+littleness/SM
+little/RSPT
+Littleton/M
+Litton/M
+littoral/S
+liturgical/Y
+liturgic/S
+liturgics/M
+liturgist/MS
+liturgy/SM
+Liuka/M
+livability/MS
+livableness/M
+livable/U
+livably
+Liva/M
+lived/A
+livelihood/SM
+liveliness/SM
+livelong/S
+lively/RTP
+liveness/M
+liven/SDG
+liver/CSGD
+liveried
+liverish
+Livermore/M
+Liverpool/M
+Liverpudlian/MS
+liver's
+liverwort/SM
+liverwurst/SM
+livery/CMS
+liveryman/MC
+liverymen/C
+lives/A
+lives's
+livestock/SM
+live/YHZTGJDSRPB
+Livia/M
+lividness/M
+livid/YP
+livingness/M
+Livingstone/M
+Livingston/M
+living/YP
+Liv/M
+Livonia/M
+Livvie/M
+Livvy/M
+Livvyy/M
+Livy/M
+Lizabeth/M
+Liza/M
+lizard/MS
+Lizbeth/M
+Lizette/M
+Liz/M
+Lizzie/M
+Lizzy/M
+l/JGVXT
+Ljubljana/M
+LL
+llama/SM
+llano/SM
+LLB
+ll/C
+LLD
+Llewellyn/M
+Lloyd/M
+Llywellyn/M
+LNG
+lo
+loadable
+loaded/A
+loader/MU
+loading/MS
+load's/A
+loads/A
+loadstar's
+loadstone's
+load/SURDZG
+loafer/M
+Loafer/S
+loaf/SRDMGZ
+loam/SMDG
+loamy/RT
+loaner/M
+loaning/M
+loan/SGZRDMB
+loansharking/S
+loanword/S
+loathe
+loather/M
+loathing/M
+loath/JPSRDYZG
+loathness/M
+loathsomeness/MS
+loathsome/PY
+loaves/M
+Lobachevsky/M
+lobar
+lobbed
+lobber/MS
+lobbing
+lobby/GSDM
+lobbyist/MS
+lobe/SM
+lob/MDSG
+lobotomist
+lobotomize/GDS
+lobotomy/MS
+lobster/MDGS
+lobularity
+lobular/Y
+lobule/SM
+locale/MS
+localisms
+locality/MS
+localization/MS
+localized/U
+localizer/M
+localizes/U
+localize/ZGDRS
+local/SGDY
+locatable
+locate/AXESDGN
+locater/M
+locational/Y
+location/EMA
+locative/S
+locator's
+Lochinvar/M
+loch/M
+lochs
+loci/M
+lockable
+Lockean/M
+locked/A
+Locke/M
+locker/SM
+locket/SM
+Lockhart/M
+Lockheed/M
+Lockian/M
+locking/S
+lockjaw/SM
+Lock/M
+locknut/M
+lockout/MS
+lock's
+locksmithing/M
+locksmith/MG
+locksmiths
+lockstep/S
+lock/UGSD
+lockup/MS
+Lockwood/M
+locomotion/SM
+locomotive/YMS
+locomotor
+locomotory
+loco/SDMG
+locoweed/MS
+locus/M
+locust/SM
+locution/MS
+lode/SM
+lodestar/MS
+lodestone/MS
+lodged/E
+lodge/GMZSRDJ
+Lodge/M
+lodgepole
+lodger/M
+lodges/E
+lodging/M
+lodgment/M
+Lodovico/M
+Lodowick/M
+Lodz
+Loeb/M
+Loella/M
+Loewe/M
+Loewi/M
+lofter/M
+loftily
+loftiness/SM
+loft/SGMRD
+lofty/PTR
+loganberry/SM
+Logan/M
+logarithmic
+logarithmically
+logarithm/MS
+logbook/MS
+loge/SMNX
+logged/U
+loggerhead/SM
+logger/SM
+loggia/SM
+logging/MS
+logicality/MS
+logicalness/M
+logical/SPY
+logician/SM
+logic/SM
+login/S
+logion/M
+logistical/Y
+logistic/MS
+logjam/SM
+LOGO
+logo/SM
+logotype/MS
+logout
+logrolling/SM
+log's/K
+log/SM
+logy/RT
+Lohengrin/M
+loincloth/M
+loincloths
+loin/SM
+Loire/M
+Loise/M
+Lois/M
+loiterer/M
+loiter/RDJSZG
+Loki/M
+Lola/M
+Loleta/M
+Lolita/M
+loller/M
+lollipop/MS
+loll/RDGS
+Lolly/M
+lolly/SM
+Lombardi/M
+Lombard/M
+Lombardy/M
+Lomb/M
+Lome
+Lona/M
+Londonderry/M
+Londoner/M
+London/RMZ
+Lonee/M
+loneliness/SM
+lonely/TRP
+loneness/M
+lone/PYZR
+loner/M
+lonesomeness/MS
+lonesome/PSY
+longboat/MS
+longbow/SM
+longed/K
+longeing
+longer/K
+longevity/MS
+Longfellow/M
+longhair/SM
+longhand/SM
+longhorn/SM
+longing/MY
+longish
+longitude/MS
+longitudinal/Y
+long/JGTYRDPS
+Long/M
+longness/M
+longshoreman/M
+longshoremen
+longsighted
+longs/K
+longstanding
+Longstreet/M
+longsword
+longterm
+longtime
+Longueuil/M
+longueur/SM
+longways
+longword/SM
+Loni/M
+Lon/M
+Lonna/M
+Lonnard/M
+Lonnie/M
+Lonni/M
+Lonny/M
+loofah/M
+loofahs
+lookahead
+lookalike/S
+looker/M
+look/GZRDS
+lookout/MS
+lookup/SM
+looming/M
+Loomis/M
+loom/MDGS
+loon/MS
+loony/SRT
+looper/M
+loophole/MGSD
+loop/MRDGS
+loopy/TR
+loosed/U
+looseleaf
+loosener/M
+looseness/MS
+loosen/UDGS
+loose/SRDPGTY
+looses/U
+loosing/M
+looter/M
+loot/MRDGZS
+loper/M
+lope/S
+Lopez/M
+lopped
+lopper/MS
+lopping
+lop/SDRG
+lopsidedness/SM
+lopsided/YP
+loquaciousness/MS
+loquacious/YP
+loquacity/SM
+Loraine/M
+Lorain/M
+Loralee/M
+Loralie/M
+Loralyn/M
+Lora/M
+Lorant/M
+lording/M
+lordliness/SM
+lordly/PTR
+Lord/MS
+lord/MYDGS
+lordship/SM
+Lordship/SM
+Loree/M
+Loreen/M
+Lorelei/M
+Lorelle/M
+lore/MS
+Lorena/M
+Lorene/M
+Loren/SM
+Lorentzian/M
+Lorentz/M
+Lorenza/M
+Lorenz/M
+Lorenzo/M
+Loretta/M
+Lorette/M
+lorgnette/SM
+Loria/M
+Lorianna/M
+Lorianne/M
+Lorie/M
+Lorilee/M
+Lorilyn/M
+Lori/M
+Lorinda/M
+Lorine/M
+Lorin/M
+loris/SM
+Lorita/M
+lorn
+Lorna/M
+Lorne/M
+Lorraine/M
+Lorrayne/M
+Lorre/M
+Lorrie/M
+Lorri/M
+Lorrin/M
+lorryload/S
+Lorry/M
+lorry/SM
+Lory/M
+Los
+loser/M
+lose/ZGJBSR
+lossage
+lossless
+loss/SM
+lossy/RT
+lost/P
+Lothaire/M
+Lothario/MS
+lotion/MS
+Lot/M
+lot/MS
+Lotta/M
+lotted
+Lotte/M
+lotter
+lottery/MS
+Lottie/M
+Lotti/M
+lotting
+Lott/M
+lotto/MS
+Lotty/M
+lotus/SM
+louden/DG
+loudhailer/S
+loudly/RT
+loudmouth/DM
+loudmouths
+loudness/MS
+loudspeaker/SM
+loudspeaking
+loud/YRNPT
+Louella/M
+Louie/M
+Louisa/M
+Louise/M
+Louisette/M
+Louisiana/M
+Louisianan/S
+Louisianian/S
+Louis/M
+Louisville/M
+Lou/M
+lounger/M
+lounge/SRDZG
+Lourdes/M
+lour/GSD
+louse/CSDG
+louse's
+lousewort/M
+lousily
+lousiness/MS
+lousy/PRT
+loutishness/M
+loutish/YP
+Loutitia/M
+lout/SGMD
+louver/DMS
+L'Ouverture
+Louvre/M
+lovableness/MS
+lovable/U
+lovably
+lovebird/SM
+lovechild
+Lovecraft/M
+love/DSRMYZGJB
+loved/U
+Lovejoy/M
+Lovelace/M
+Loveland/M
+lovelessness/M
+loveless/YP
+lovelies
+lovelinesses
+loveliness/UM
+Lovell/M
+lovelornness/M
+lovelorn/P
+lovely/URPT
+Love/M
+lovemaking/SM
+lover/YMG
+lovesick
+lovestruck
+lovingly
+lovingness/M
+loving/U
+lowborn
+lowboy/SM
+lowbrow/MS
+lowdown/S
+Lowell/M
+Lowe/M
+lowercase/GSD
+lower/DG
+lowermost
+Lowery/M
+lowish
+lowland/RMZS
+Lowlands/M
+lowlife/SM
+lowlight/MS
+lowliness/MS
+lowly/PTR
+lowness/MS
+low/PDRYSZTG
+Lowrance/M
+lox/MDSG
+loyaler
+loyalest
+loyal/EY
+loyalism/SM
+loyalist/SM
+loyalty/EMS
+Loyang/M
+Loydie/M
+Loyd/M
+Loy/M
+Loyola/M
+lozenge/SDM
+LP
+LPG
+LPN/S
+Lr
+ls
+l's
+L's
+LSD
+ltd
+Ltd/M
+Lt/M
+Luanda/M
+Luann/M
+luau/MS
+lubber/YMS
+Lubbock/M
+lube/DSMG
+lubricant/SM
+lubricate/VNGSDX
+lubrication/M
+lubricator/MS
+lubricious/Y
+lubricity/SM
+Lubumbashi/M
+Lucais/M
+Luca/MS
+Luce/M
+lucent/Y
+Lucerne/M
+Lucho/M
+Lucia/MS
+Luciana/M
+Lucian/M
+Luciano/M
+lucidity/MS
+lucidness/MS
+lucid/YP
+Lucie/M
+Lucien/M
+Lucienne/M
+Lucifer/M
+Lucila/M
+Lucile/M
+Lucilia/M
+Lucille/M
+Luci/MN
+Lucina/M
+Lucinda/M
+Lucine/M
+Lucio/M
+Lucita/M
+Lucite/MS
+Lucius/M
+luck/GSDM
+luckier/U
+luckily/U
+luckiness/UMS
+luckless
+Lucknow/M
+Lucky/M
+lucky/RSPT
+lucrativeness/SM
+lucrative/YP
+lucre/MS
+Lucretia/M
+Lucretius/M
+lucubrate/GNSDX
+lucubration/M
+Lucy/M
+Luddite/SM
+Ludhiana/M
+ludicrousness/SM
+ludicrous/PY
+Ludlow/M
+Ludmilla/M
+ludo/M
+Ludovico/M
+Ludovika/M
+Ludvig/M
+Ludwig/M
+Luella/M
+Luelle/M
+luff/GSDM
+Lufthansa/M
+Luftwaffe/M
+luge/MC
+Luger/M
+luggage/SM
+lugged
+lugger/SM
+lugging
+Lugosi/M
+lug/RS
+lugsail/SM
+lugubriousness/MS
+lugubrious/YP
+Luigi/M
+Luisa/M
+Luise/M
+Luis/M
+Lukas/M
+Luke/M
+lukewarmness/SM
+lukewarm/PY
+Lula/M
+Lulita/M
+lullaby/GMSD
+lull/SDG
+lulu/M
+Lulu/M
+Lu/M
+lumbago/SM
+lumbar/S
+lumberer/M
+lumbering/M
+lumberjack/MS
+lumberman/M
+lumbermen
+lumber/RDMGZSJ
+lumberyard/MS
+lumen/M
+Lumière/M
+luminance/M
+luminary/MS
+luminescence/SM
+luminescent
+luminosity/MS
+luminousness/M
+luminous/YP
+lummox/MS
+lumper/M
+lumpiness/MS
+lumpishness/M
+lumpish/YP
+lump/SGMRDN
+lumpy/TPR
+lunacy/MS
+Luna/M
+lunar/S
+lunary
+lunate/YND
+lunatic/S
+lunation/M
+luncheonette/SM
+luncheon/SMDG
+luncher/M
+lunch/GMRSD
+lunchpack
+lunchroom/MS
+lunchtime/MS
+Lundberg/M
+Lund/M
+Lundquist/M
+lune/M
+lunge/MS
+lunger/M
+lungfish/SM
+lungful
+lung/SGRDM
+lunkhead/SM
+Lupe/M
+lupine/SM
+Lupus/M
+lupus/SM
+Lura/M
+lurcher/M
+lurch/RSDG
+lure/DSRG
+lurer/M
+Lurette/M
+lurex
+Luria/M
+luridness/SM
+lurid/YP
+lurker/M
+lurk/GZSRD
+Lurleen/M
+Lurlene/M
+Lurline/M
+Lusaka/M
+Lusa/M
+lusciousness/MS
+luscious/PY
+lushness/MS
+lush/YSRDGTP
+Lusitania/M
+luster/GDM
+lustering/M
+lusterless
+lustfulness/M
+lustful/PY
+lustily
+lustiness/MS
+lust/MRDGZS
+lustrousness/M
+lustrous/PY
+lusty/PRT
+lutanist/MS
+lute/DSMG
+lutenist/MS
+Lutero/M
+lutetium/MS
+Lutheranism/MS
+Lutheran/SM
+Luther/M
+luting/M
+Lutz
+Luxembourgian
+Luxembourg/RMZ
+Luxemburg's
+luxe/MS
+luxuriance/MS
+luxuriant/Y
+luxuriate/GNSDX
+luxuriation/M
+luxuriousness/SM
+luxurious/PY
+luxury/MS
+Luz/M
+Luzon/M
+L'vov
+Lyallpur/M
+lyceum/MS
+lychee's
+lycopodium/M
+Lycra/S
+Lycurgus/M
+Lyda/M
+Lydia/M
+Lydian/S
+Lydie/M
+Lydon/M
+lye/JSMG
+Lyell/M
+lying/Y
+Lyle/M
+Lyly/M
+Lyman/M
+Lyme/M
+lymphatic/S
+lymph/M
+lymphocyte/SM
+lymphoid
+lymphoma/MS
+lymphs
+Ly/MY
+Lynchburg/M
+lyncher/M
+lynching/M
+Lynch/M
+lynch/ZGRSDJ
+Lynda/M
+Lyndell/M
+Lyndel/M
+Lynde/M
+Lyndon/M
+Lyndsay/M
+Lyndsey/M
+Lyndsie/M
+Lyndy/M
+Lynea/M
+Lynelle/M
+Lynette/M
+Lynett/M
+Lyn/M
+Lynna/M
+Lynnea/M
+Lynnelle/M
+Lynnell/M
+Lynne/M
+Lynnet/M
+Lynnette/M
+Lynnett/M
+Lynn/M
+Lynsey/M
+lynx/MS
+Lyon/SM
+Lyra/M
+lyrebird/MS
+lyre/SM
+lyricalness/M
+lyrical/YP
+lyricism/SM
+lyricist/SM
+lyric/S
+Lysenko/M
+lysine/M
+Lysistrata/M
+Lysol/M
+Lyssa/M
+LyX/M
+MA
+Maalox/M
+ma'am
+Mabelle/M
+Mabel/M
+Mable/M
+Mab/M
+macabre/Y
+macadamize/SDG
+macadam/SM
+Macao/M
+macaque/SM
+macaroni/SM
+macaroon/MS
+Macarthur/M
+MacArthur/M
+Macaulay/M
+macaw/SM
+Macbeth/M
+Maccabees/M
+Maccabeus/M
+Macdonald/M
+MacDonald/M
+MacDraw/M
+Macedonia/M
+Macedonian/S
+Macedon/M
+mace/MS
+Mace/MS
+macerate/DSXNG
+maceration/M
+macer/M
+Macgregor/M
+MacGregor/M
+machete/SM
+Machiavellian/S
+Machiavelli/M
+machinate/SDXNG
+machination/M
+machinelike
+machine/MGSDB
+machinery/SM
+machinist/MS
+machismo/SM
+Mach/M
+macho/S
+Machs
+Macias/M
+Macintosh/M
+MacIntosh/M
+macintosh's
+Mackenzie/M
+MacKenzie/M
+mackerel/SM
+Mackinac/M
+Mackinaw
+mackinaw/SM
+mackintosh/SM
+mack/M
+Mack/M
+MacLeish/M
+Macmillan/M
+MacMillan/M
+Macon/SM
+MacPaint/M
+macramé/S
+macrobiotic/S
+macrobiotics/M
+macrocosm/MS
+macrodynamic
+macroeconomic/S
+macroeconomics/M
+macromolecular
+macromolecule/SM
+macron/MS
+macrophage/SM
+macroscopic
+macroscopically
+macrosimulation
+macro/SM
+macrosocioeconomic
+Mac/SGMD
+mac/SGMDR
+Macy/M
+Madagascan/SM
+Madagascar/M
+Madalena/M
+Madalyn/M
+Mada/M
+madame/M
+Madame/MS
+madam/SM
+madcap/S
+Maddalena/M
+madded
+madden/GSD
+maddening/Y
+Madden/M
+madder/MS
+maddest
+Maddie/M
+Maddi/M
+madding
+Maddox/M
+Maddy/M
+made/AU
+Madeira/SM
+Madelaine/M
+Madeleine/M
+Madelena/M
+Madelene/M
+Madelina/M
+Madeline/M
+Madelin/M
+Madella/M
+Madelle/M
+Madel/M
+Madelon/M
+Madelyn/M
+mademoiselle/MS
+Madge/M
+madhouse/SM
+Madhya/M
+Madison/M
+Madlen/M
+Madlin/M
+madman/M
+madmen
+madness/SM
+Madonna/MS
+mad/PSY
+Madras
+madras/SM
+Madrid/M
+madrigal/MSG
+Madsen/M
+Madurai/M
+madwoman/M
+madwomen
+Mady/M
+Maegan/M
+Maelstrom/M
+maelstrom/SM
+Mae/M
+maestro/MS
+Maeterlinck/M
+Mafia/MS
+mafia/S
+mafiosi
+mafioso/M
+Mafioso/S
+MAG
+magazine/DSMG
+Magdaia/M
+Magdalena/M
+Magdalene/M
+Magdalen/M
+Magda/M
+Magellanic
+Magellan/M
+magenta/MS
+magged
+Maggee/M
+Maggie/M
+Maggi/M
+magging
+maggot/MS
+maggoty/RT
+Maggy/M
+magi
+magical/Y
+magician/MS
+magicked
+magicking
+magic/SM
+Magill/M
+Magi/M
+Maginot/M
+magisterial/Y
+magistracy/MS
+magistrate/MS
+Mag/M
+magma/SM
+magnanimity/SM
+magnanimosity
+magnanimous/PY
+magnate/SM
+magnesia/MS
+magnesite/M
+magnesium/SM
+magnetically
+magnetic/S
+magnetics/M
+magnetism/SM
+magnetite/SM
+magnetizable
+magnetization/ASCM
+magnetize/CGDS
+magnetized/U
+magnetodynamics
+magnetohydrodynamical
+magnetohydrodynamics/M
+magnetometer/MS
+magneto/MS
+magnetosphere/M
+magnetron/M
+magnet/SM
+magnification/M
+magnificence/SM
+magnificent/Y
+magnified/U
+magnify/DRSGNXZ
+magniloquence/MS
+magniloquent
+Magnitogorsk/M
+magnitude/SM
+magnolia/SM
+Magnum
+magnum/SM
+Magnuson/M
+Magog/M
+Magoo/M
+magpie/SM
+Magritte/M
+Magruder/M
+mag/S
+Magsaysay/M
+Maguire/SM
+Magus/M
+Magyar/MS
+Mahabharata
+Mahala/M
+Mahalia/M
+maharajah/M
+maharajahs
+maharanee's
+maharani/MS
+Maharashtra/M
+maharishi/SM
+mahatma/SM
+Mahavira/M
+Mahayana/M
+Mahayanist
+Mahdi/M
+Mahfouz/M
+Mahican/SM
+mahjong's
+Mahler/M
+Mahmoud/M
+Mahmud/M
+mahogany/MS
+Mahomet's
+mahout/SM
+Maia/M
+Maible/M
+maidenhair/MS
+maidenhead/SM
+maidenhood/SM
+maidenly/P
+maiden/YM
+maidservant/MS
+maid/SMNX
+maier
+Maier/M
+Maiga/M
+Maighdiln/M
+Maigret/M
+mailbag/MS
+mailbox/MS
+mail/BSJGZMRD
+mailer/M
+Mailer/M
+Maillol/M
+maillot/SM
+mailman/M
+mailmen
+Maiman/M
+maimedness/M
+maimed/P
+maimer/M
+Maimonides/M
+Mai/MR
+maim/SGZRD
+mainbrace/M
+Maine/MZR
+Mainer/M
+mainframe/MS
+mainlander/M
+mainland/SRMZ
+mainliner/M
+mainline/RSDZG
+mainly
+mainmast/SM
+main/SA
+mainsail/SM
+mains/M
+mainspring/SM
+mainstay/MS
+mainstream/DRMSG
+maintainability
+maintainable/U
+maintain/BRDZGS
+maintained/U
+maintainer/M
+maintenance/SM
+maintop/SM
+maiolica's
+Maire/M
+Mair/M
+Maisey/M
+Maisie/M
+maisonette/MS
+Maison/M
+Maitilde/M
+maize/MS
+Maj
+Maje/M
+majestic
+majestically
+majesty/MS
+Majesty/MS
+majolica/SM
+Majorca/M
+major/DMGS
+majordomo/S
+majorette/SM
+majority/SM
+Major/M
+Majuro/M
+makable
+Makarios/M
+makefile/S
+makeover/S
+Maker/M
+maker/SM
+makeshift/S
+make/UGSA
+makeup/MS
+making/SM
+Malabar/M
+Malabo/M
+Malacca/M
+Malachi/M
+malachite/SM
+maladapt/DV
+maladjust/DLV
+maladjustment/MS
+maladministration
+maladroitness/MS
+maladroit/YP
+malady/MS
+Malagasy/M
+malaise/SM
+Mala/M
+Malamud/M
+malamute/SM
+Malanie/M
+malaprop
+malapropism/SM
+Malaprop/M
+malarial
+malaria/MS
+malarious
+malarkey/SM
+malathion/S
+Malawian/S
+Malawi/M
+Malayalam/M
+Malaya/M
+Malayan/MS
+Malaysia/M
+Malaysian/S
+Malay/SM
+Malchy/M
+Malcolm/M
+malcontentedness/M
+malcontented/PY
+malcontent/SMD
+Maldive/SM
+Maldivian/S
+Maldonado/M
+maledict
+malediction/MS
+malefaction/MS
+malefactor/MS
+malefic
+maleficence/MS
+maleficent
+Male/M
+Malena/M
+maleness/MS
+male/PSM
+malevolence/S
+malevolencies
+malevolent/Y
+malfeasance/SM
+malfeasant
+malformation/MS
+malformed
+malfunction/SDG
+Malia/M
+Malian/S
+Malibu/M
+malice/MGSD
+maliciousness/MS
+malicious/YU
+malignancy/SM
+malignant/YS
+malign/GSRDYZ
+malignity/MS
+Mali/M
+Malina/M
+Malinda/M
+Malinde/M
+malingerer/M
+malinger/GZRDS
+Malinowski/M
+Malissa/M
+Malissia/M
+mallard/SM
+Mallarmé/M
+malleability/SM
+malleableness/M
+malleable/P
+mallet/MS
+Mallissa/M
+Mallorie/M
+Mallory/M
+mallow/MS
+mall/SGMD
+Mal/M
+malnourished
+malnutrition/SM
+malocclusion/MS
+malodorous
+Malone/M
+Malorie/M
+Malory/M
+malposed
+malpractice/SM
+Malraux/M
+Malta/M
+malted/S
+Maltese
+Malthusian/S
+Malthus/M
+malting/M
+maltose/SM
+maltreat/GDSL
+maltreatment/S
+malt/SGMD
+malty/RT
+Malva/M
+Malvina/M
+Malvin/M
+Malynda/M
+mama/SM
+mamba/SM
+mambo/GSDM
+Mame/M
+Mamet/M
+ma/MH
+Mamie/M
+mammalian/SM
+mammal/SM
+mammary
+mamma's
+mammogram/S
+mammography/S
+Mammon's
+mammon/SM
+mammoth/M
+mammoths
+mammy/SM
+Mamore/M
+manacle/SDMG
+manageability/S
+manageableness
+manageable/U
+managed/U
+management/SM
+manageress/M
+managerial/Y
+manager/M
+managership/M
+manage/ZLGRSD
+Managua/M
+Manama/M
+mañana/M
+mananas
+Manasseh/M
+manatee/SM
+Manaus's
+Manchester/M
+Manchu/MS
+Manchuria/M
+Manchurian/S
+Mancini/M
+manciple/M
+Mancunian/MS
+mandala/SM
+Mandalay/M
+Manda/M
+mandamus/GMSD
+Mandarin
+mandarin/MS
+mandate/SDMG
+mandatory/S
+Mandela
+Mandelbrot/M
+Mandel/M
+mandible/MS
+mandibular
+Mandie/M
+Mandi/M
+Mandingo/M
+mandolin/MS
+mandrake/MS
+mandrel/SM
+mandrill/SM
+Mandy/M
+manège/GSD
+mane/MDS
+Manet/M
+maneuverability/MS
+maneuverer/M
+maneuver/MRDSGB
+Manfred/M
+manful/Y
+manganese/MS
+mange/GMSRDZ
+manger/M
+manginess/S
+mangler/M
+mangle/RSDG
+mangoes
+mango/M
+mangrove/MS
+mangy/PRT
+manhandle/GSD
+Manhattan/SM
+manhole/MS
+manhood/MS
+manhunt/SM
+maniacal/Y
+maniac/SM
+mania/SM
+manically
+Manichean/M
+manic/S
+manicure/MGSD
+manicurist/SM
+manifestation/SM
+manifesto/GSDM
+manifest/YDPGS
+manifolder/M
+manifold/GPYRDMS
+manifoldness/M
+manikin/MS
+Manila/MS
+manila/S
+manilla's
+Mani/M
+manioc/SM
+manipulability
+manipulable
+manipulate/SDXBVGN
+manipulative/PM
+manipulator/MS
+manipulatory
+Manitoba/M
+Manitoulin/M
+Manitowoc/M
+mankind/M
+Mankowski/M
+Manley/M
+manlike
+manliness/SM
+manliness's/U
+manly/URPT
+manna/MS
+manned/U
+mannequin/MS
+mannered/U
+mannerism/SM
+mannerist/M
+mannerliness/MU
+mannerly/UP
+manner/SDYM
+Mann/GM
+Mannheim/M
+Mannie/M
+mannikin's
+Manning/M
+manning/U
+mannishness/SM
+mannish/YP
+Manny/M
+Manolo/M
+Mano/M
+manometer/SM
+Manon/M
+manorial
+manor/MS
+manpower/SM
+manqué/M
+man's
+mansard/SM
+manservant/M
+manse/XNM
+Mansfield/M
+mansion/M
+manslaughter/SM
+Man/SM
+Manson/M
+mans/S
+manta/MS
+Mantegna/M
+mantelpiece/MS
+mantel/SM
+mantes
+mantilla/MS
+mantissa/SM
+mantis/SM
+mantle/ESDG
+Mantle/M
+mantle's
+mantling/M
+mantra/MS
+mantrap/SM
+manual/SMY
+Manuela/M
+Manuel/M
+manufacture/JZGDSR
+manufacturer/M
+manumission/MS
+manumit/S
+manumitted
+manumitting
+manure/RSDMZG
+manuscript/MS
+man/USY
+Manville/M
+Manx
+many
+Manya/M
+Maoism/MS
+Maoist/S
+Mao/M
+Maori/SM
+Maplecrest/M
+maple/MS
+mapmaker/S
+mappable
+mapped/UA
+mapper/S
+mapping/MS
+Mapplethorpe/M
+maps/AU
+map/SM
+Maputo/M
+Marabel/M
+marabou/MS
+marabout's
+Maracaibo/M
+maraca/MS
+Mara/M
+maraschino/SM
+Marathi
+marathoner/M
+Marathon/M
+marathon/MRSZ
+Marat/M
+marauder/M
+maraud/ZGRDS
+marbleize/GSD
+marble/JRSDMG
+marbler/M
+marbling/M
+Marceau/M
+Marcela/M
+Marcelia/M
+Marcelino/M
+Marcella/M
+Marcelle/M
+Marcellina/M
+Marcelline/M
+Marcello/M
+Marcellus/M
+Marcel/M
+Marcelo/M
+Marchall/M
+Marchelle/M
+marcher/M
+marchioness/SM
+March/MS
+march/RSDZG
+Marcia/M
+Marciano/M
+Marcie/M
+Marcile/M
+Marcille/M
+Marci/M
+Marc/M
+Marconi/M
+Marco/SM
+Marcotte/M
+Marcus/M
+Marcy/M
+Mardi/SM
+Marduk/M
+Mareah/M
+mare/MS
+Marena/M
+Maren/M
+Maressa/M
+Margalit/M
+Margalo/M
+Marga/M
+Margareta/M
+Margarete/M
+Margaretha/M
+Margarethe/M
+Margaret/M
+Margaretta/M
+Margarette/M
+margarine/MS
+Margarita/M
+margarita/SM
+Margarito/M
+Margaux/M
+Margeaux/M
+Marge/M
+Margery/M
+Marget/M
+Margette/M
+Margie/M
+Margi/M
+marginalia
+marginality
+marginalization
+marginalize/SDG
+marginal/YS
+margin/GSDM
+Margit/M
+Margo/M
+Margot/M
+Margrethe/M
+Margret/M
+Marguerite/M
+Margy/M
+mariachi/SM
+maria/M
+Maria/M
+Mariam/M
+Mariana/SM
+Marian/MS
+Marianna/M
+Marianne/M
+Mariann/M
+Mariano/M
+Maribelle/M
+Maribel/M
+Maribeth/M
+Maricela/M
+Marice/M
+Maridel/M
+Marieann/M
+Mariejeanne/M
+Mariele/M
+Marielle/M
+Mariellen/M
+Mariel/M
+Marie/M
+Marietta/M
+Mariette/M
+Marigold/M
+marigold/MS
+Marijn/M
+Marijo/M
+marijuana/SM
+Marika/M
+Marilee/M
+Marilin/M
+Marillin/M
+Marilyn/M
+marimba/SM
+Mari/MS
+marinade/MGDS
+Marina/M
+marina/MS
+marinara/SM
+marinate/NGXDS
+marination/M
+mariner/M
+Marine/S
+marine/ZRS
+Marin/M
+Marinna/M
+Marino/M
+Mario/M
+marionette/MS
+Marion/M
+Mariquilla/M
+Marisa/M
+Mariska/M
+Marisol/M
+Marissa/M
+Maritain/M
+marital/Y
+Marita/M
+maritime/R
+Maritsa/M
+Maritza/M
+Mariupol/M
+Marius/M
+Mariya/M
+Marja/M
+Marje/M
+Marjie/M
+Marji/M
+Marj/M
+marjoram/SM
+Marjorie/M
+Marjory/M
+Marjy/M
+Markab/M
+markdown/SM
+marked/AU
+markedly
+marker/M
+marketability/SM
+marketable/U
+Marketa/M
+marketeer/S
+marketer/M
+market/GSMRDJBZ
+marketing/M
+marketplace/MS
+mark/GZRDMBSJ
+Markham/M
+marking/M
+Markism/M
+markkaa
+markka/M
+Mark/MS
+Markos
+Markov
+Markovian
+Markovitz/M
+marks/A
+marksman/M
+marksmanship/S
+marksmen
+markup/SM
+Markus/M
+Marla/M
+Marlane/M
+Marlboro/M
+Marlborough/M
+Marleah/M
+Marlee/M
+Marleen/M
+Marlena/M
+Marlene/M
+Marley/M
+Marlie/M
+Marline/M
+marlinespike/SM
+Marlin/M
+marlin/SM
+marl/MDSG
+Marlo/M
+Marlon/M
+Marlowe/M
+Marlow/M
+Marlyn/M
+Marmaduke/M
+marmalade/MS
+Marmara/M
+marmoreal
+marmoset/MS
+marmot/SM
+Marna/M
+Marne/M
+Marney/M
+Marnia/M
+Marnie/M
+Marni/M
+maroon/GRDS
+marquee/MS
+Marquesas/M
+marque/SM
+marquess/MS
+marquetry/SM
+Marquette/M
+Marquez/M
+marquise/M
+marquisette/MS
+Marquis/M
+marquis/SM
+Marquita/M
+Marrakesh/M
+marred/U
+marriageability/SM
+marriageable
+marriage/ASM
+married/US
+Marrilee/M
+marring
+Marriott/M
+Marris/M
+Marrissa/M
+marrowbone/MS
+marrow/GDMS
+marry/SDGA
+mar/S
+Marseillaise/SM
+Marseilles
+Marseille's
+marshal/GMDRSZ
+Marshalled/M
+marshaller
+Marshall/GDM
+Marshalling/M
+marshallings
+Marshal/M
+Marsha/M
+marshiness/M
+marshland/MS
+Marsh/M
+marshmallow/SM
+marsh/MS
+marshy/PRT
+Marsiella/M
+Mar/SMN
+marsupial/MS
+Martainn/M
+Marta/M
+Martelle/M
+Martel/M
+marten/M
+Marten/M
+Martguerita/M
+Martha/M
+Marthe/M
+Marthena/M
+Martial
+martial/Y
+Martian/S
+Martica/M
+Martie/M
+Marti/M
+Martina/M
+martinet/SM
+Martinez/M
+martingale/MS
+martini/MS
+Martinique/M
+Martin/M
+Martino/M
+martin/SM
+Martinson/M
+Martita/M
+mart/MDNGXS
+Mart/MN
+Marty/M
+Martyn/M
+Martynne/M
+martyrdom/SM
+martyr/GDMS
+Marva/M
+marvel/DGS
+Marvell/M
+marvelous/PY
+Marve/M
+Marven/M
+Marvin/M
+Marv/NM
+Marwin/M
+Marxian/S
+Marxism/SM
+Marxist/SM
+Marx/M
+Marya/M
+Maryanna/M
+Maryanne/M
+Maryann/M
+Marybelle/M
+Marybeth/M
+Maryellen/M
+Maryjane/M
+Maryjo/M
+Maryland/MZR
+Marylee/M
+Marylinda/M
+Marylin/M
+Maryl/M
+Marylou/M
+Marylynne/M
+Mary/M
+Maryrose/M
+Marys
+Marysa/M
+marzipan/SM
+Masada/M
+Masai/M
+Masaryk/M
+masc
+Mascagni/M
+mascara/SGMD
+mascot/SM
+masculineness/M
+masculine/PYS
+masculinity/SM
+Masefield/M
+maser/M
+Maseru/M
+MASH
+Masha/M
+Mashhad/M
+mash/JGZMSRD
+m/ASK
+masked/U
+masker/M
+mask/GZSRDMJ
+masks/U
+masochism/MS
+masochistic
+masochistically
+masochist/MS
+masonic
+Masonic
+Masonite/M
+masonry/MS
+mason/SDMG
+Mason/SM
+masquerader/M
+masquerade/RSDGMZ
+masquer/M
+masque/RSMZ
+Massachusetts/M
+massacre/DRSMG
+massager/M
+massage/SRDMG
+Massasoit/M
+Massenet/M
+masseur/MS
+masseuse/SM
+Massey/M
+massif/SM
+Massimiliano/M
+Massimo/M
+massing/R
+massiveness/SM
+massive/YP
+massless
+mas/SRZ
+Mass/S
+mass/VGSD
+mastectomy/MS
+masterclass
+mastered/A
+masterfulness/M
+masterful/YP
+master/JGDYM
+masterliness/M
+masterly/P
+mastermind/GDS
+masterpiece/MS
+mastership/M
+Master/SM
+masterstroke/MS
+masterwork/S
+mastery/MS
+mast/GZSMRD
+masthead/SDMG
+masticate/SDXGN
+mastication/M
+mastic/SM
+mastiff/MS
+mastodon/MS
+mastoid/S
+masturbate/SDNGX
+masturbation/M
+masturbatory
+matador/SM
+Mata/M
+matchable/U
+match/BMRSDZGJ
+matchbook/SM
+matchbox/SM
+matched/UA
+matcher/M
+matches/A
+matchless/Y
+matchlock/MS
+matchmake/GZJR
+matchmaker/M
+matchmaking/M
+matchplay
+match's/A
+matchstick/MS
+matchwood/SM
+mated/U
+mate/IMS
+Matelda/M
+Mateo/M
+materialism/SM
+materialistic
+materialistically
+materialist/SM
+materiality/M
+materialization/SM
+materialize/CDS
+materialized/A
+materializer/SM
+materializes/A
+materializing
+materialness/M
+material/SPYM
+matériel/MS
+mater/M
+maternal/Y
+maternity/MS
+mates/U
+mathematical/Y
+Mathematica/M
+mathematician/SM
+mathematic/S
+mathematics/M
+Mathematik/M
+Mather/M
+Mathe/RM
+Mathew/MS
+Mathewson/M
+Mathian/M
+Mathias
+Mathieu/M
+Mathilda/M
+Mathilde/M
+Mathis
+math/M
+maths
+Matias/M
+Matilda/M
+Matilde/M
+matinée/S
+mating/M
+matins/M
+Matisse/SM
+matriarchal
+matriarch/M
+matriarchs
+matriarchy/MS
+matrices
+matricidal
+matricide/MS
+matriculate/XSDGN
+matriculation/M
+matrimonial/Y
+matrimony/SM
+matrix/M
+matron/YMS
+mat/SJGMDR
+Matsumoto/M
+matte/JGMZSRD
+Mattel/M
+Matteo/M
+matter/GDM
+Matterhorn/M
+Matthaeus/M
+Mattheus/M
+Matthew/MS
+Matthias
+Matthieu/M
+Matthiew/M
+Matthus/M
+Mattias/M
+Mattie/M
+Matti/M
+matting/M
+mattins's
+Matt/M
+mattock/MS
+mattress/MS
+matt's
+Matty/M
+maturate/DSNGVX
+maturational
+maturation/M
+matureness/M
+maturer/M
+mature/RSDTPYG
+maturity/MS
+matzo/SHM
+matzot
+Maude/M
+Maudie/M
+maudlin/Y
+Maud/M
+Maugham/M
+Maui/M
+mauler/M
+maul/RDGZS
+maunder/GDS
+Maupassant/M
+Maura/M
+Maureene/M
+Maureen/M
+Maure/M
+Maurene/M
+Mauriac/M
+Maurice/M
+Mauricio/M
+Maurie/M
+Maurine/M
+Maurise/M
+Maurita/M
+Mauritania/M
+Mauritanian/S
+Mauritian/S
+Mauritius/M
+Maurits/M
+Maurizia/M
+Maurizio/M
+Maurois/M
+Mauro/M
+Maury/M
+Mauser/M
+mausoleum/SM
+mauve/SM
+maven/S
+maverick/SMDG
+mavin's
+Mavis/M
+Mavra/M
+mawkishness/SM
+mawkish/PY
+Mawr/M
+maw/SGMD
+max/GDS
+Maxie/M
+maxillae
+maxilla/M
+maxillary/S
+Maxi/M
+maximality
+maximal/SY
+maxima's
+Maximilian/M
+Maximilianus/M
+Maximilien/M
+maximization/SM
+maximizer/M
+maximize/RSDZG
+Maxim/M
+Maximo/M
+maxim/SM
+maximum/MYS
+Maxine/M
+maxi/S
+Max/M
+Maxtor/M
+Maxwellian
+maxwell/M
+Maxwell/M
+Maxy/M
+Maya/MS
+Mayan/S
+Maybelle/M
+maybe/S
+mayday/S
+may/EGS
+Maye/M
+mayer
+Mayer/M
+mayest
+Mayfair/M
+Mayflower/M
+mayflower/SM
+mayfly/MS
+mayhap
+mayhem/MS
+Maynard/M
+Mayne/M
+Maynord/M
+mayn't
+Mayo/M
+mayonnaise/MS
+mayoral
+mayoralty/MS
+mayoress/MS
+Mayor/M
+mayor/MS
+mayorship/M
+mayo/S
+maypole/MS
+Maypole/SM
+Mayra/M
+May/SMR
+mayst
+Mazama/M
+Mazarin/M
+Mazatlan/M
+Mazda/M
+mazedness/SM
+mazed/YP
+maze/MGDSR
+mazurka/SM
+Mazzini/M
+Mb
+MB
+MBA
+Mbabane/M
+Mbini/M
+MC
+McAdam/MS
+McAllister/M
+McBride/M
+McCabe/M
+McCain/M
+McCall/M
+McCarthyism/M
+McCarthy/M
+McCartney/M
+McCarty/M
+McCauley/M
+McClain/M
+McClellan/M
+McClure/M
+McCluskey/M
+McConnell/M
+McCormick/M
+McCoy/SM
+McCracken/M
+McCray/M
+McCullough/M
+McDaniel/M
+McDermott/M
+McDonald/M
+McDonnell/M
+McDougall/M
+McDowell/M
+McElhaney/M
+McEnroe/M
+McFadden/M
+McFarland/M
+McGee/M
+McGill/M
+McGovern/M
+McGowan/M
+McGrath/M
+McGraw/M
+McGregor/M
+McGuffey/M
+McGuire/M
+MCI/M
+McIntosh/M
+McIntyre/M
+McKay/M
+McKee/M
+McKenzie/M
+McKesson/M
+McKinley/M
+McKinney/M
+McKnight/M
+McLanahan/M
+McLaughlin/M
+McLean/M
+McLeod/M
+McLuhan/M
+McMahon/M
+McMartin/M
+McMillan/M
+McNamara/M
+McNaughton/M
+McNeil/M
+McPherson/M
+MD
+Md/M
+mdse
+MDT
+ME
+Meade/M
+Mead/M
+meadowland
+meadowlark/SM
+meadow/MS
+Meadows
+meadowsweet/M
+mead/SM
+Meagan/M
+meagerness/SM
+meager/PY
+Meaghan/M
+meagres
+mealiness/MS
+meal/MDGS
+mealtime/MS
+mealybug/S
+mealymouthed
+mealy/PRST
+meander/JDSG
+meaneing
+meanie/MS
+meaningfulness/SM
+meaningful/YP
+meaninglessness/SM
+meaningless/PY
+meaning/M
+meanness/S
+means/M
+meantime/SM
+meant/U
+meanwhile/S
+Meany/M
+mean/YRGJTPS
+meany's
+Meara/M
+measle/SD
+measles/M
+measly/TR
+measurable/U
+measurably
+measure/BLMGRSD
+measured/Y
+measureless
+measurement/SM
+measurer/M
+measures/A
+measuring/A
+meas/Y
+meataxe
+meatball/MS
+meatiness/MS
+meatless
+meatloaf
+meatloaves
+meat/MS
+meatpacking/S
+meaty/RPT
+Mecca/MS
+mecca/S
+mechanical/YS
+mechanic/MS
+mechanism/SM
+mechanistic
+mechanistically
+mechanist/M
+mechanization/SM
+mechanized/U
+mechanizer/M
+mechanize/RSDZGB
+mechanizes/U
+mechanochemically
+Mechelle/M
+med
+medalist/MS
+medallion/MS
+medal/SGMD
+Medan/M
+meddle/GRSDZ
+meddlesome
+Medea/M
+Medellin
+Medfield/M
+mediaeval's
+medial/AY
+medials
+median/YMS
+media/SM
+mediateness/M
+mediate/PSDYVNGX
+mediation/ASM
+mediator/SM
+Medicaid/SM
+medical/YS
+medicament/MS
+Medicare/MS
+medicate/DSXNGV
+medication/M
+Medici/MS
+medicinal/SY
+medicine/DSMG
+medico/SM
+medic/SM
+medievalist/MS
+medieval/YMS
+Medina/M
+mediocre
+mediocrity/MS
+meditate/NGVXDS
+meditation/M
+meditativeness/M
+meditative/PY
+Mediterranean/MS
+mediumistic
+medium/SM
+medley/SM
+medulla/SM
+Medusa/M
+meed/MS
+meekness/MS
+meek/TPYR
+meerschaum/MS
+meeter/M
+meetinghouse/S
+meeting/M
+meet/JGSYR
+me/G
+mega
+megabit/MS
+megabuck/S
+megabyte/S
+megacycle/MS
+megadeath/M
+megadeaths
+megahertz/M
+megalithic
+megalith/M
+megaliths
+megalomaniac/SM
+megalomania/SM
+megalopolis/SM
+Megan/M
+megaphone/SDGM
+megaton/MS
+megavolt/M
+megawatt/SM
+megaword/S
+Megen/M
+Meggie/M
+Meggi/M
+Meggy/M
+Meghan/M
+Meghann/M
+Meg/MN
+megohm/MS
+Mehetabel/M
+Meier/M
+Meighen/M
+Meiji/M
+Mei/MR
+meioses
+meiosis/M
+meiotic
+Meir/M
+Meister/M
+Meistersinger/M
+Mejia/M
+Mekong/M
+Mela/M
+Melamie/M
+melamine/SM
+melancholia/SM
+melancholic/S
+melancholy/MS
+Melanesia/M
+Melanesian/S
+melange/S
+Melania/M
+Melanie/M
+melanin/MS
+melanoma/SM
+Melantha/M
+Melany/M
+Melba/M
+Melbourne/M
+Melcher/M
+Melchior/M
+meld/SGD
+mêlée/MS
+Melendez/M
+Melesa/M
+Melessa/M
+Melicent/M
+Melina/M
+Melinda/M
+Melinde/M
+meliorate/XSDVNG
+melioration/M
+Melisa/M
+Melisande/M
+Melisandra/M
+Melisenda/M
+Melisent/M
+Melissa/M
+Melisse/M
+Melita/M
+Melitta/M
+Mella/M
+Mellicent/M
+Mellie/M
+mellifluousness/SM
+mellifluous/YP
+Melli/M
+Mellisa/M
+Mellisent/M
+Melloney/M
+Mellon/M
+mellowness/MS
+mellow/TGRDYPS
+Melly/M
+Mel/MY
+Melodee/M
+melodically
+melodic/S
+Melodie/M
+melodiousness/S
+melodious/YP
+melodrama/SM
+melodramatically
+melodramatic/S
+Melody/M
+melody/MS
+Melonie/M
+melon/MS
+Melony/M
+Melosa/M
+Melpomene/M
+meltdown/S
+melter/M
+melting/Y
+Melton/M
+melt/SAGD
+Melva/M
+Melville/M
+Melvin/M
+Melvyn/M
+Me/M
+member/DMS
+membered/AE
+members/EA
+membership/SM
+membrane/MSD
+membranous
+memento/SM
+Memling/M
+memoir/MS
+memorabilia
+memorability/SM
+memorableness/M
+memorable/P
+memorably
+memorandum/SM
+memorialize/DSG
+memorialized/U
+memorial/SY
+memoriam
+memorization/MS
+memorized/U
+memorizer/M
+memorize/RSDZG
+memorizes/A
+memoryless
+memory/MS
+memo/SM
+Memphis/M
+menace/GSD
+menacing/Y
+menagerie/SM
+menage/S
+Menander/M
+menarche/MS
+Menard/M
+Mencius/M
+Mencken/M
+mendaciousness/M
+mendacious/PY
+mendacity/MS
+Mendeleev/M
+mendelevium/SM
+Mendelian
+Mendel/M
+Mendelssohn/M
+mender/M
+Mendez/M
+mendicancy/MS
+mendicant/S
+Mendie/M
+mending/M
+Mendocino/M
+Mendoza/M
+mend/RDSJGZ
+Mendy/M
+Menelaus/M
+Menes/M
+menfolk/S
+menhaden/M
+menial/YS
+meningeal
+meninges
+meningitides
+meningitis/M
+meninx
+menisci
+meniscus/M
+Menkalinan/M
+Menkar/M
+Menkent/M
+Menlo/M
+men/MS
+Mennonite/SM
+Menominee
+menopausal
+menopause/SM
+menorah/M
+menorahs
+Menotti/M
+Mensa/M
+Mensch/M
+mensch/S
+menservants/M
+mens/SDG
+menstrual
+menstruate/NGDSX
+menstruation/M
+mensurable/P
+mensuration/MS
+menswear/M
+mentalist/MS
+mentality/MS
+mental/Y
+mentholated
+menthol/SM
+mentionable/U
+mentioned/U
+mentioner/M
+mention/ZGBRDS
+mentor/DMSG
+Menuhin/M
+menu/SM
+Menzies/M
+meow/DSG
+Mephistopheles/M
+Merak/M
+Mercado/M
+mercantile
+Mercator/M
+Mercedes
+mercenariness/M
+mercenary/SMP
+mercerize/SDG
+Mercer/M
+mercer/SM
+merchandiser/M
+merchandise/SRDJMZG
+merchantability
+merchantman/M
+merchantmen
+merchant/SBDMG
+Mercie/M
+mercifully/U
+mercifulness/M
+merciful/YP
+mercilessness/SM
+merciless/YP
+Merci/M
+Merck/M
+mercurial/SPY
+mercuric
+Mercurochrome/M
+mercury/MS
+Mercury/MS
+Mercy/M
+mercy/SM
+Meredeth/M
+Meredithe/M
+Meredith/M
+Merell/M
+meretriciousness/SM
+meretricious/YP
+mere/YS
+merganser/MS
+merger/M
+merge/SRDGZ
+Meridel/M
+meridian/MS
+meridional
+Meridith/M
+Meriel/M
+Merilee/M
+Merill/M
+Merilyn/M
+meringue/MS
+merino/MS
+Meris
+Merissa/M
+merited/U
+meritocracy/MS
+meritocratic
+meritocrats
+meritoriousness/MS
+meritorious/PY
+merit/SCGMD
+Meriwether/M
+Merla/M
+Merle/M
+Merlina/M
+Merline/M
+merlin/M
+Merlin/M
+Merl/M
+mermaid/MS
+merman/M
+mermen
+Merna/M
+Merola/M
+meromorphic
+Merralee/M
+Merrel/M
+Merriam/M
+Merrick/M
+Merridie/M
+Merrielle/M
+Merrie/M
+Merrilee/M
+Merrile/M
+Merrili/M
+Merrill/M
+merrily
+Merrily/M
+Merrimack/M
+Merrimac/M
+merriment/MS
+merriness/S
+Merritt/M
+Merry/M
+merrymaker/MS
+merrymaking/SM
+merry/RPT
+Mersey/M
+mer/TGDR
+Merton/M
+Mervin/M
+Merv/M
+Merwin/M
+Merwyn/M
+Meryl/M
+Mesa
+Mesabi/M
+mesa/SM
+mescaline/SM
+mescal/SM
+mesdames/M
+mesdemoiselles/M
+Meshed's
+meshed/U
+mesh/GMSD
+mesmeric
+mesmerism/SM
+mesmerized/U
+mesmerizer/M
+mesmerize/SRDZG
+Mesolithic/M
+mesomorph/M
+mesomorphs
+meson/MS
+Mesopotamia/M
+Mesopotamian/S
+mesosphere/MS
+mesozoic
+Mesozoic
+mesquite/MS
+mes/S
+message/SDMG
+messeigneurs
+messenger/GSMD
+Messerschmidt/M
+mess/GSDM
+Messiaen/M
+messiah
+Messiah/M
+messiahs
+Messiahs
+messianic
+Messianic
+messieurs/M
+messily
+messiness/MS
+messmate/MS
+Messrs/M
+messy/PRT
+mestizo/MS
+meta
+metabolic
+metabolically
+metabolism/MS
+metabolite/SM
+metabolize/GSD
+metacarpal/S
+metacarpi
+metacarpus/M
+metacircular
+metacircularity
+metalanguage/MS
+metalization/SM
+metalized
+metallic/S
+metalliferous
+metallings
+metallography/M
+metalloid/M
+metallurgic
+metallurgical/Y
+metallurgist/S
+metallurgy/MS
+metal/SGMD
+metalsmith/MS
+metalworking/M
+metalwork/RMJGSZ
+Meta/M
+metamathematical
+metamorphic
+metamorphism/SM
+metamorphose/GDS
+metamorphosis/M
+metaphoric
+metaphorical/Y
+metaphor/MS
+metaphosphate/M
+metaphysical/Y
+metaphysic/SM
+metastability/M
+metastable
+metastases
+metastasis/M
+metastasize/DSG
+metastatic
+metatarsal/S
+metatarsi
+metatarsus/M
+metatheses
+metathesis/M
+metathesized
+metathesizes
+metathesizing
+metavariable
+metempsychoses
+metempsychosis/M
+meteoric
+meteorically
+meteorite/SM
+meteoritic/S
+meteoritics/M
+meteoroid/SM
+meteorologic
+meteorological
+meteorologist/S
+meteorology/MS
+meteor/SM
+meter/GDM
+mete/ZDGSR
+methadone/SM
+methane/MS
+methanol/SM
+methinks
+methionine/M
+methodicalness/SM
+methodical/YP
+methodism
+Methodism/SM
+methodist/MS
+Methodist/MS
+method/MS
+methodological/Y
+methodologists
+methodology/MS
+methought
+Methuen/M
+Methuselah/M
+Methuselahs
+methylated
+methylene/M
+methyl/SM
+meticulousness/MS
+meticulous/YP
+métier/S
+metonymy/M
+Metrecal/M
+metrical/Y
+metricate/SDNGX
+metricize/GSD
+metrics/M
+metric/SM
+metronome/MS
+metropolis/SM
+metropolitanization
+metropolitan/S
+metro/SM
+mets
+Metternich/M
+mettle/SDM
+mettlesome
+met/U
+Metzler/M
+Meuse/M
+mewl/GSD
+mew/SGD
+mews/SM
+Mex
+Mexicali/M
+Mexican/S
+Mexico/M
+Meyerbeer/M
+Meyer/SM
+mezzanine/MS
+mezzo/S
+MFA
+mfg
+mfr/S
+mg
+M/GB
+Mg/M
+MGM/M
+mgr
+Mgr
+MHz
+MI
+MIA
+Mia/M
+Miami/SM
+Miaplacidus/M
+miasmal
+miasma/SM
+Micaela/M
+Micah/M
+mica/MS
+micelles
+mice/M
+Michaela/M
+Michaelangelo/M
+Michaelina/M
+Michaeline/M
+Michaella/M
+Michaelmas/MS
+Michael/SM
+Michaelson/M
+Michail/M
+Michale/M
+Michal/M
+Micheal/M
+Micheil/M
+Michelangelo/M
+Michele/M
+Michelina/M
+Micheline/M
+Michelin/M
+Michelle/M
+Michell/M
+Michel/M
+Michelson/M
+Michigander/S
+Michiganite/S
+Michigan/M
+Mich/M
+Mickelson/M
+Mickey/M
+mickey/SM
+Mickie/M
+Micki/M
+Mick/M
+Micky/M
+Mic/M
+Micmac/M
+micra's
+microamp
+microanalysis/M
+microanalytic
+microbe/MS
+microbial
+microbicidal
+microbicide/M
+microbiological
+microbiologist/MS
+microbiology/SM
+microbrewery/S
+microchemistry/M
+microchip/S
+microcircuit/MS
+microcode/GSD
+microcomputer/MS
+microcosmic
+microcosm/MS
+microdensitometer
+microdot/MS
+microeconomic/S
+microeconomics/M
+microelectronic/S
+microelectronics/M
+microfiber/S
+microfiche/M
+microfilm/DRMSG
+microfossils
+micrography/M
+microgroove/MS
+microhydrodynamics
+microinstruction/SM
+microjoule
+microlevel
+microlight/S
+micromanage/GDSL
+micromanagement/S
+micrometeorite/MS
+micrometeoritic
+micrometer/SM
+Micronesia/M
+Micronesian/S
+micron/MS
+microorganism/SM
+microphone/SGM
+Microport/M
+microprocessing
+microprocessor/SM
+microprogrammed
+microprogramming
+microprogram/SM
+micro/S
+microscope/SM
+microscopic
+microscopical/Y
+microscopy/MS
+microsecond/MS
+microsimulation/S
+Microsystems
+micros/M
+Microsoft/M
+microsomal
+microstore
+microsurgery/SM
+MicroVAXes
+MicroVAX/M
+microvolt/SM
+microwaveable
+microwave/BMGSD
+microword/S
+midair/MS
+midas
+Midas/M
+midband/M
+midday/MS
+midden/SM
+middest
+middlebrow/SM
+Middlebury/M
+middle/GJRSD
+middleman/M
+middlemen
+middlemost
+Middlesex/M
+Middleton/M
+Middletown/M
+middleweight/SM
+middling/Y
+middy/SM
+Mideastern
+Mideast/M
+midfield/RM
+Midge/M
+midge/SM
+midget/MS
+midi/S
+midland/MRS
+Midland/MS
+midlife
+midlives
+midmorn/G
+midmost/S
+midnight/SYM
+midpoint/MS
+midrange
+midrib/MS
+midriff/MS
+mid/S
+midscale
+midsection/M
+midshipman/M
+midshipmen
+midship/S
+midspan
+midstream/MS
+midst/SM
+midsummer/MS
+midterm/MS
+midtown/MS
+Midway/M
+midway/S
+midweek/SYM
+Midwesterner/M
+Midwestern/ZR
+Midwest/M
+midwicket
+midwifery/SM
+midwife/SDMG
+midwinter/YMS
+midwives
+midyear/MS
+mien/M
+miff/GDS
+mightily
+mightiness/MS
+mightn't
+might/S
+mighty/TPR
+mignon
+mignonette/SM
+Mignon/M
+Mignonne/M
+migraine/SM
+migrant/MS
+migrate/ASDG
+migration/MS
+migrative
+migratory/S
+MIG/S
+Miguela/M
+Miguelita/M
+Miguel/M
+mikado/MS
+Mikaela/M
+Mikael/M
+mike/DSMG
+Mikel/M
+Mike/M
+Mikey/M
+Mikhail/M
+Mikkel/M
+Mikol/M
+Mikoyan/M
+milady/MS
+Milagros/M
+Milanese
+Milan/M
+milch/M
+mildew/DMGS
+mildness/MS
+Mildred/M
+Mildrid/M
+mild/STYRNP
+mileage/SM
+Milena/M
+milepost/SM
+miler/M
+mile/SM
+Mile/SM
+milestone/MS
+Milford/M
+Milicent/M
+milieu/SM
+Milissent/M
+militancy/MS
+militantness/M
+militant/YPS
+militarily
+militarism/SM
+militaristic
+militarist/MS
+militarization/SCM
+militarize/SDCG
+military
+militate/SDG
+militiaman/M
+militiamen
+militia/SM
+Milka/M
+Milken/M
+milker/M
+milk/GZSRDM
+milkiness/MS
+milkmaid/SM
+milkman/M
+milkmen
+milkshake/S
+milksop/SM
+milkweed/MS
+milky/RPT
+millage/S
+Millard/M
+Millay/M
+millenarian
+millenarianism/M
+millennial
+millennialism
+millennium/MS
+millepede's
+miller/M
+Miller/M
+Millet/M
+millet/MS
+milliamp
+milliampere/S
+milliard/MS
+millibar/MS
+Millicent/M
+millidegree/S
+Millie/M
+milligram/MS
+millijoule/S
+Millikan/M
+milliliter/MS
+Milli/M
+millimeter/SM
+milliner/SM
+millinery/MS
+milling/M
+millionaire/MS
+million/HDMS
+millionth/M
+millionths
+millipede/SM
+millisecond/MS
+Millisent/M
+millivoltmeter/SM
+millivolt/SM
+milliwatt/S
+millpond/MS
+millrace/SM
+mill/SGZMRD
+Mill/SMR
+millstone/SM
+millstream/SM
+millwright/MS
+Milly/M
+mil/MRSZ
+Mil/MY
+Milne/M
+Milo/M
+Milquetoast/S
+milquetoast/SM
+Miltiades/M
+Miltie/M
+Milt/M
+milt/MDSG
+Miltonic
+Milton/M
+Miltown/M
+Milty/M
+Milwaukee/M
+Milzie/M
+MIMD
+mime/DSRMG
+mimeograph/GMDS
+mimeographs
+mimer/M
+mimesis/M
+mimetic
+mimetically
+mimicked
+mimicker/SM
+mimicking
+mimicry/MS
+mimic/S
+Mimi/M
+mi/MNX
+Mimosa/M
+mimosa/SM
+Mina/M
+minaret/MS
+minatory
+mincemeat/MS
+mincer/M
+mince/SRDGZJ
+mincing/Y
+Minda/M
+Mindanao/M
+mind/ARDSZG
+mindbogglingly
+minded/P
+minder/M
+mindfully
+mindfulness/MS
+mindful/U
+mindlessness/SM
+mindless/YP
+Mindoro/M
+min/DRZGJ
+mind's
+mindset/S
+Mindy/M
+minefield/MS
+mineralization/C
+mineralized/U
+mineralogical
+mineralogist/SM
+mineralogy/MS
+mineral/SM
+miner/M
+Miner/M
+Minerva/M
+mineshaft
+mine/SNX
+minestrone/MS
+minesweeper/MS
+Minetta/M
+Minette/M
+mineworkers
+mingle/SDG
+Ming/M
+Mingus/M
+miniature/GMSD
+miniaturist/SM
+miniaturization/MS
+miniaturize/SDG
+minibike/S
+minibus/SM
+minicab/M
+minicam/MS
+minicomputer/SM
+minidress/SM
+minify/GSD
+minimalism/S
+minimalistic
+minimalist/MS
+minimality
+minimal/SY
+minima's
+minimax/M
+minimization/MS
+minimized/U
+minimizer/M
+minimize/RSDZG
+minim/SM
+minimum/MS
+mining/M
+minion/M
+mini/S
+miniseries
+miniskirt/MS
+ministerial/Y
+minister/MDGS
+ministrant/S
+ministration/SM
+ministry/MS
+minivan/S
+miniver/M
+minke
+mink/SM
+Min/MR
+Minna/M
+Minnaminnie/M
+Minneapolis/M
+Minne/M
+minnesinger/MS
+Minnesota/M
+Minnesotan/S
+Minnie/M
+Minni/M
+Minn/M
+Minnnie/M
+minnow/SM
+Minny/M
+Minoan/S
+Minolta/M
+minor/DMSG
+minority/MS
+Minor/M
+Minos
+Minotaur/M
+minotaur/S
+Minot/M
+minoxidil/S
+Minsk/M
+Minsky/M
+minster/SM
+minstrel/SM
+minstrelsy/MS
+mintage/SM
+Mintaka/M
+Minta/M
+minter/M
+mint/GZSMRD
+minty/RT
+minuend/SM
+minuet/SM
+Minuit/M
+minuscule/SM
+minus/S
+minuteman
+Minuteman/M
+minutemen
+minuteness/SM
+minute/RSDPMTYG
+minutiae
+minutia/M
+minx/MS
+Miocene
+MIPS
+Miquela/M
+Mirabeau/M
+Mirabella/M
+Mirabelle/M
+Mirabel/M
+Mirach/M
+miracle/MS
+miraculousness/M
+miraculous/PY
+mirage/GSDM
+Mira/M
+Miranda/M
+Miran/M
+Mireielle/M
+Mireille/M
+Mirella/M
+Mirelle/M
+mire/MGDS
+Mirfak/M
+Miriam/M
+Mirilla/M
+Mir/M
+Mirna/M
+Miro
+mirror/DMGS
+mirthfulness/SM
+mirthful/PY
+mirthlessness/M
+mirthless/YP
+mirth/M
+mirths
+MIRV/DSG
+miry/RT
+Mirzam/M
+misaddress/SDG
+misadventure/SM
+misalign/DSGL
+misalignment/MS
+misalliance/MS
+misanalysed
+misanthrope/MS
+misanthropic
+misanthropically
+misanthropist/S
+misanthropy/SM
+misapplier/M
+misapply/GNXRSD
+misapprehend/GDS
+misapprehension/MS
+misappropriate/GNXSD
+misbegotten
+misbehaver/M
+misbehave/RSDG
+misbehavior/SM
+misbrand/DSG
+misc
+miscalculate/XGNSD
+miscalculation/M
+miscall/SDG
+miscarriage/MS
+miscarry/SDG
+miscast/GS
+miscegenation/SM
+miscellanea
+miscellaneous/PY
+miscellany/MS
+Mischa/M
+mischance/MGSD
+mischief/MDGS
+mischievousness/MS
+mischievous/PY
+miscibility/S
+miscible/C
+misclassification/M
+misclassified
+misclassifying
+miscode/SDG
+miscommunicate/NDS
+miscomprehended
+misconceive/GDS
+misconception/MS
+misconduct/GSMD
+misconfiguration
+misconstruction/MS
+misconstrue/DSG
+miscopying
+miscount/DGS
+miscreant/MS
+miscue/MGSD
+misdeal/SG
+misdealt
+misdeed/MS
+misdemeanant/SM
+misdemeanor/SM
+misdiagnose/GSD
+misdid
+misdirect/GSD
+misdirection/MS
+misdirector/S
+misdoes
+misdo/JG
+misdone
+miserableness/SM
+miserable/SP
+miserably
+miser/KM
+miserliness/MS
+miserly/P
+misery/MS
+mises/KC
+misfeasance/MS
+misfeature/M
+misfield
+misfile/SDG
+misfire/SDG
+misfit/MS
+misfitted
+misfitting
+misfortune/SM
+misgauge/GDS
+misgiving/MYS
+misgovern/LDGS
+misgovernment/S
+misguidance/SM
+misguidedness/M
+misguided/PY
+misguide/DRSG
+misguider/M
+Misha/M
+mishandle/SDG
+mishap/MS
+mishapped
+mishapping
+misheard
+mishear/GS
+mishitting
+mishmash/SM
+misidentification/M
+misidentify/GNSD
+misinformation/SM
+misinform/GDS
+misinterpretation/MS
+misinterpreter/M
+misinterpret/RDSZG
+misjudge/DSG
+misjudging/Y
+misjudgment/MS
+Miskito
+mislabel/DSG
+mislaid
+mislay/GS
+misleader/M
+mislead/GRJS
+misleading/Y
+misled
+mismanage/LGSD
+mismanagement/MS
+mismatch/GSD
+misname/GSD
+misnomer/GSMD
+misogamist/MS
+misogamy/MS
+misogynistic
+misogynist/MS
+misogynous
+misogyny/MS
+misperceive/SD
+misplace/GLDS
+misplacement/MS
+misplay/GSD
+mispositioned
+misprint/SGDM
+misprision/SM
+mispronounce/DSG
+mispronunciation/MS
+misquotation/MS
+misquote/GDS
+misreader/M
+misread/RSGJ
+misrelated
+misremember/DG
+misreport/DGS
+misrepresentation/MS
+misrepresenter/M
+misrepresent/SDRG
+misroute/DS
+misrule/SDG
+missal/ESM
+misshape/DSG
+misshapenness/SM
+misshapen/PY
+Missie/M
+missile/MS
+missilery/SM
+mission/AMS
+missionary/MS
+missioned
+missioner/SM
+missioning
+missis's
+Mississauga/M
+Mississippian/S
+Mississippi/M
+missive/MS
+Missoula/M
+Missourian/S
+Missouri/M
+misspeak/SG
+misspecification
+misspecified
+misspelling/M
+misspell/SGJD
+misspend/GS
+misspent
+misspoke
+misspoken
+mis/SRZ
+miss/SDEGV
+Miss/SM
+misstate/GLDRS
+misstatement/MS
+misstater/M
+misstep/MS
+misstepped
+misstepping
+missus/SM
+Missy/M
+mistakable/U
+mistake/BMGSR
+mistaken/Y
+mistaker/M
+mistaking/Y
+Mistassini/M
+mister/GDM
+Mister/SM
+mistily
+Misti/M
+mistime/GSD
+mistiness/S
+mistletoe/MS
+mist/MRDGZS
+mistook
+mistral/MS
+mistranslated
+mistranslates
+mistranslating
+mistranslation/SM
+mistreat/DGSL
+mistreatment/SM
+Mistress/MS
+mistress/MSY
+mistrial/SM
+mistruster/M
+mistrustful/Y
+mistrust/SRDG
+Misty/M
+mistype/SDGJ
+misty/PRT
+misunderstander/M
+misunderstanding/M
+misunderstand/JSRZG
+misunderstood
+misuser/M
+misuse/RSDMG
+miswritten
+Mitchael/M
+Mitchell/M
+Mitchel/M
+Mitch/M
+miterer/M
+miter/GRDM
+mite/SRMZ
+Mitford/M
+Mithra/M
+Mithridates/M
+mitigated/U
+mitigate/XNGVDS
+mitigation/M
+MIT/M
+mitoses
+mitosis/M
+mitotic
+MITRE/SM
+Mitsubishi/M
+mitten/M
+Mitterrand/M
+mitt/XSMN
+Mitty/M
+Mitzi/M
+mitzvahs
+mixable
+mix/AGSD
+mixed/U
+mixer/SM
+mixture/SM
+Mizar/M
+mizzenmast/SM
+mizzen/MS
+Mk
+mks
+ml
+Mlle/M
+mm
+MM
+MMe
+Mme/SM
+MN
+mnemonically
+mnemonics/M
+mnemonic/SM
+Mnemosyne/M
+Mn/M
+MO
+moan/GSZRDM
+moat/SMDG
+mobbed
+mobber
+mobbing
+mobcap/SM
+Mobile/M
+mobile/S
+mobility/MS
+mobilizable
+mobilization/AMCS
+mobilize/CGDS
+mobilized/U
+mobilizer/MS
+mobilizes/A
+Mobil/M
+mob/MS
+mobster/MS
+Mobutu/M
+moccasin/SM
+mocha/SM
+mockers/M
+mockery/MS
+mock/GZSRD
+mockingbird/MS
+mocking/Y
+mo/CSK
+modality/MS
+modal/Y
+modeled/A
+modeler/M
+modeling/M
+models/A
+model/ZGSJMRD
+mode/MS
+modem/SM
+moderated/U
+moderateness/SM
+moderate/PNGDSXY
+moderation/M
+moderator/MS
+modernism/MS
+modernistic
+modernist/S
+modernity/SM
+modernization/MS
+modernized/U
+modernizer/M
+modernize/SRDGZ
+modernizes/U
+modernness/SM
+modern/PTRYS
+Modesta/M
+Modestia/M
+Modestine/M
+Modesto/M
+modest/TRY
+Modesty/M
+modesty/MS
+modicum/SM
+modifiability/M
+modifiableness/M
+modifiable/U
+modification/M
+modified/U
+modifier/M
+modify/NGZXRSD
+Modigliani/M
+modishness/MS
+modish/YP
+mod/TSR
+Modula/M
+modularity/SM
+modularization
+modularize/SDG
+modular/SY
+modulate/ADSNCG
+modulation/CMS
+modulator/ACSM
+module/SM
+moduli
+modulo
+modulus/M
+modus
+Moe/M
+Moen/M
+Mogadiscio's
+Mogadishu
+mogul/MS
+Mogul/MS
+mohair/SM
+Mohamed/M
+Mohammad/M
+Mohammedanism/MS
+Mohammedan/SM
+Mohammed's
+Mohandas/M
+Mohandis/M
+Mohawk/MS
+Mohegan/S
+Mohican's
+Moho/M
+Mohorovicic/M
+Mohr/M
+moiety/MS
+moil/SGD
+Moina/M
+Moines/M
+Moira/M
+moire/MS
+Moise/MS
+Moiseyev/M
+Moishe/M
+moistener/M
+moisten/ZGRD
+moistness/MS
+moist/TXPRNY
+moisture/MS
+moisturize/GZDRS
+Mojave/M
+molal
+molarity/SM
+molar/MS
+molasses/MS
+Moldavia/M
+Moldavian/S
+moldboard/SM
+molder/DG
+moldiness/SM
+molding/M
+mold/MRDJSGZ
+Moldova
+moldy/PTR
+molecularity/SM
+molecular/Y
+molecule/MS
+molehill/SM
+mole/MTS
+moleskin/MS
+molestation/SM
+molested/U
+molester/M
+molest/RDZGS
+Moliere
+Molina/M
+Moline/M
+Mollee/M
+Mollie/M
+mollification/M
+mollify/XSDGN
+Molli/M
+Moll/M
+moll/MS
+mollusc's
+mollusk/S
+mollycoddler/M
+mollycoddle/SRDG
+Molly/M
+molly/SM
+Molnar/M
+Moloch/M
+Molokai/M
+Molotov/M
+molter/M
+molt/RDNGZS
+Moluccas
+molybdenite/M
+molybdenum/MS
+Mombasa/M
+momenta
+momentarily
+momentariness/SM
+momentary/P
+moment/MYS
+momentousness/MS
+momentous/YP
+momentum/SM
+momma/S
+Mommy/M
+mommy/SM
+Mo/MN
+mom/SM
+Monaco/M
+monadic
+monad/SM
+Monah/M
+Mona/M
+monarchic
+monarchical
+monarchism/MS
+monarchistic
+monarchist/MS
+monarch/M
+monarchs
+monarchy/MS
+Monash/M
+monastery/MS
+monastical/Y
+monasticism/MS
+monastic/S
+monaural/Y
+Mondale/M
+Monday/MS
+Mondrian/M
+Monegasque/SM
+Monera/M
+monetarily
+monetarism/S
+monetarist/MS
+monetary
+monetization/CMA
+monetize/CGADS
+Monet/M
+moneybag/SM
+moneychangers
+moneyer/M
+moneylender/SM
+moneymaker/MS
+moneymaking/MS
+money/SMRD
+Monfort/M
+monger/SGDM
+Mongolia/M
+Mongolian/S
+Mongolic/M
+mongolism/SM
+mongoloid/S
+Mongoloid/S
+Mongol/SM
+mongoose/SM
+mongrel/SM
+Monica/M
+monies/M
+Monika/M
+moniker/MS
+Monique/M
+monism/MS
+monist/SM
+monition/SM
+monitored/U
+monitor/GSMD
+monitory/S
+monkeyshine/S
+monkey/SMDG
+monkish
+Monk/M
+monk/MS
+monkshood/SM
+Monmouth/M
+monochromatic
+monochromator
+monochrome/MS
+monocle/SDM
+monoclinic
+monoclonal/S
+monocotyledonous
+monocotyledon/SM
+monocular/SY
+monodic
+monodist/S
+monody/MS
+monogamist/MS
+monogamous/PY
+monogamy/MS
+monogrammed
+monogramming
+monogram/MS
+monograph/GMDS
+monographs
+monolingualism
+monolingual/S
+monolithic
+monolithically
+monolith/M
+monoliths
+monologist/S
+monologue/GMSD
+monomaniacal
+monomaniac/MS
+monomania/MS
+monomeric
+monomer/SM
+monomial/SM
+mono/MS
+Monongahela/M
+mononuclear
+mononucleoses
+mononucleosis/M
+monophonic
+monoplane/MS
+monopole/S
+monopolistic
+monopolist/MS
+monopolization/MS
+monopolized/U
+monopolize/GZDSR
+monopolizes/U
+monopoly/MS
+monorail/SM
+monostable
+monosyllabic
+monosyllable/MS
+monotheism/SM
+monotheistic
+monotheist/S
+monotone/SDMG
+monotonic
+monotonically
+monotonicity
+monotonousness/MS
+monotonous/YP
+monotony/MS
+monovalent
+monoxide/SM
+Monroe/M
+Monro/M
+Monrovia/M
+Monsanto/M
+monseigneur
+monsieur/M
+Monsignori
+Monsignor/MS
+monsignor/S
+Mon/SM
+monsoonal
+monsoon/MS
+monster/SM
+monstrance/ASM
+monstrosity/SM
+monstrousness/M
+monstrous/YP
+montage/SDMG
+Montague/M
+Montaigne/M
+Montana/M
+Montanan/MS
+Montcalm/M
+Montclair/M
+Monte/M
+Montenegrin
+Montenegro/M
+Monterey/M
+Monterrey/M
+Montesquieu/M
+Montessori/M
+Monteverdi/M
+Montevideo/M
+Montezuma
+Montgomery/M
+monthly/S
+month/MY
+months
+Monticello/M
+Monti/M
+Mont/M
+Montmartre/M
+Montoya/M
+Montpelier/M
+Montrachet/M
+Montreal/M
+Montserrat/M
+Monty/M
+monumentality/M
+monumental/Y
+monument/DMSG
+mooch/ZSRDG
+moodily
+moodiness/MS
+mood/MS
+Moody/M
+moody/PTR
+Moog
+moo/GSD
+moonbeam/SM
+Mooney/M
+moon/GDMS
+moonless
+moonlight/GZDRMS
+moonlighting/M
+moonlit
+Moon/M
+moonscape/MS
+moonshiner/M
+moonshine/SRZM
+moonshot/MS
+moonstone/SM
+moonstruck
+moonwalk/SDG
+Moore/M
+moor/GDMJS
+mooring/M
+Moorish
+moorland/MS
+Moor/MS
+moose/M
+moot/RDGS
+moped/MS
+moper/M
+mope/S
+mopey
+mopier
+mopiest
+mopish
+mopped
+moppet/MS
+mopping
+mop/SZGMDR
+moraine/MS
+morale/MS
+Morales/M
+moralistic
+moralistically
+moralist/MS
+morality/UMS
+moralization/CS
+moralize/CGDRSZ
+moralled
+moraller
+moralling
+moral/SMY
+Mora/M
+Moran/M
+morass/SM
+moratorium/SM
+Moravia/M
+Moravian
+moray/SM
+morbidity/SM
+morbidness/S
+morbid/YP
+mordancy/MS
+mordant/GDYS
+Mordecai/M
+Mord/M
+Mordred/M
+Mordy/M
+more/DSN
+Moreen/M
+Morehouse/M
+Moreland/M
+morel/SM
+More/M
+Morena/M
+Moreno/M
+moreover
+Morey/M
+Morgana/M
+Morganica/M
+Morgan/MS
+Morganne/M
+morgen/M
+Morgen/M
+morgue/SM
+Morgun/M
+Moria/M
+Moriarty/M
+moribundity/M
+moribund/Y
+Morie/M
+Morin/M
+morion/M
+Morison/M
+Morissa/M
+Morita/M
+Moritz/M
+Morlee/M
+Morley/M
+Morly/M
+Mormonism/MS
+Mormon/SM
+Morna/M
+morning/MY
+morn/SGJDM
+Moroccan/S
+Morocco/M
+morocco/SM
+Moro/M
+moronic
+moronically
+Moroni/M
+moron/SM
+moroseness/MS
+morose/YP
+morpheme/DSMG
+morphemic/S
+Morpheus/M
+morph/GDJ
+morphia/S
+morphine/MS
+morphism/MS
+morphologic
+morphological/Y
+morphology/MS
+morphophonemic/S
+morphophonemics/M
+morphs
+Morrie/M
+morris
+Morris/M
+Morrison/M
+Morristown/M
+Morrow/M
+morrow/MS
+Morry/M
+morsel/GMDS
+Morse/M
+mortality/SM
+mortal/SY
+mortarboard/SM
+mortar/GSDM
+Morten/M
+mortgageable
+mortgagee/SM
+mortgage/MGDS
+mortgagor/SM
+mortice's
+mortician/SM
+Mortie/M
+mortification/M
+mortified/Y
+mortifier/M
+mortify/DRSXGN
+Mortimer/M
+mortise/MGSD
+Mort/MN
+Morton/M
+mortuary/MS
+Morty/M
+Mosaic
+mosaicked
+mosaicking
+mosaic/MS
+Moscone/M
+Moscow/M
+Moseley/M
+Moselle/M
+Mose/MSR
+Moser/M
+mosey/SGD
+Moshe/M
+Moslem's
+Mosley/M
+mosque/SM
+mosquitoes
+mosquito/M
+mos/S
+mossback/MS
+Mossberg/M
+Moss/M
+moss/SDMG
+mossy/SRT
+most/SY
+Mosul/M
+mote/ASCNK
+motel/MS
+mote's
+motet/SM
+mothball/DMGS
+motherboard/MS
+motherfucker/MS!
+motherfucking/!
+motherhood/SM
+mothering/M
+motherland/SM
+motherless
+motherliness/MS
+motherly/P
+mother/RDYMZG
+moths
+moth/ZMR
+motif/MS
+motile/S
+motility/MS
+motional/K
+motioner/M
+motion/GRDMS
+motionlessness/S
+motionless/YP
+motion's/ACK
+motions/K
+motivated/U
+motivate/XDSNGV
+motivational/Y
+motivation/M
+motivator/S
+motiveless
+motive/MGSD
+motley/S
+motlier
+motliest
+mot/MSV
+motocross/SM
+motorbike/SDGM
+motorboat/MS
+motorcade/MSDG
+motorcar/MS
+motorcycle/GMDS
+motorcyclist/SM
+motor/DMSG
+motoring/M
+motorist/SM
+motorization/SM
+motorize/DSG
+motorized/U
+motorman/M
+motormen
+motormouth
+motormouths
+Motorola/M
+motorway/SM
+Motown/M
+mottle/GSRD
+mottler/M
+Mott/M
+mottoes
+motto/M
+moue/DSMG
+moulder/DSG
+moult/GSD
+mound/GMDS
+mountable
+mountaineering/M
+mountaineer/JMDSG
+mountainousness/M
+mountainous/PY
+mountainside/MS
+mountain/SM
+mountaintop/SM
+Mountbatten/M
+mountebank/SGMD
+mounted/U
+mount/EGACD
+mounter/SM
+mounties
+Mountie/SM
+mounting/MS
+Mount/M
+mounts/AE
+mourner/M
+mournfuller
+mournfullest
+mournfulness/S
+mournful/YP
+mourning/M
+mourn/ZGSJRD
+mouser/M
+mouse/SRDGMZ
+mousetrapped
+mousetrapping
+mousetrap/SM
+mousiness/MS
+mousing/M
+mousse/MGSD
+Moussorgsky/M
+mousy/PRT
+Mouthe/M
+mouthful/MS
+mouthiness/SM
+mouth/MSRDG
+mouthorgan
+mouthpiece/SM
+mouths
+mouthwash/SM
+mouthwatering
+mouthy/PTR
+Mouton/M
+mouton/SM
+movable/ASP
+movableness/AM
+move/ARSDGZB
+moved/U
+movement/SM
+mover/AM
+moviegoer/S
+movie/SM
+moving/YS
+mower/M
+Mowgli/M
+mowing/M
+mow/SDRZG
+moxie/MS
+Moyer/M
+Moyna/M
+Moyra/M
+Mozambican/S
+Mozambique/M
+Mozart/M
+Mozelle/M
+Mozes/M
+Mozilla/M
+mozzarella/MS
+mp
+MP
+mpg
+mph
+MPH
+MRI
+Mr/M
+Mrs
+ms
+M's
+MS
+MSG
+Msgr/M
+m's/K
+Ms/S
+MST
+MSW
+mt
+MT
+mtg
+mtge
+Mt/M
+MTS
+MTV
+Muawiya/M
+Mubarak/M
+muchness/M
+much/SP
+mucilage/MS
+mucilaginous
+mucker/M
+muck/GRDMS
+muckraker/M
+muckrake/ZMDRSG
+mucky/RT
+mucosa/M
+mucous
+mucus/SM
+mudded
+muddily
+muddiness/SM
+mudding
+muddle/GRSDZ
+muddleheaded/P
+muddlehead/SMD
+muddler/M
+muddy/TPGRSD
+mudflat/S
+mudguard/SM
+mudlarks
+mud/MS
+mudroom/S
+mudslide/S
+mudslinger/M
+mudslinging/M
+mudsling/JRGZ
+Mueller/M
+Muenster
+muenster/MS
+muesli/M
+muezzin/MS
+muff/GDMS
+Muffin/M
+muffin/SM
+muffler/M
+muffle/ZRSDG
+Mufi/M
+Mufinella/M
+mufti/MS
+Mugabe/M
+mugged
+mugger/SM
+mugginess/S
+mugging/S
+muggy/RPT
+mugshot/S
+mug/SM
+mugwump/MS
+Muhammadanism/S
+Muhammadan/SM
+Muhammad/M
+Muire/M
+Muir/M
+Mukden/M
+mukluk/SM
+mulattoes
+mulatto/M
+mulberry/MS
+mulch/GMSD
+mulct/SDG
+Mulder/M
+mule/MGDS
+muleskinner/S
+muleteer/MS
+mulishness/MS
+mulish/YP
+mullah/M
+mullahs
+mullein/MS
+Mullen/M
+muller/M
+Muller/M
+mullet/MS
+Mulligan/M
+mulligan/SM
+mulligatawny/SM
+Mullikan/M
+Mullins
+mullion/MDSG
+mull/RDSG
+Multan/M
+multi
+Multibus/M
+multicellular
+multichannel/M
+multicollinearity/M
+multicolor/SDM
+multicolumn
+multicomponent
+multicomputer/MS
+Multics/M
+MULTICS/M
+multicultural
+multiculturalism/S
+multidimensional
+multidimensionality
+multidisciplinary
+multifaceted
+multifamily
+multifariousness/SM
+multifarious/YP
+multifigure
+multiform
+multifunction/D
+multilateral/Y
+multilayer
+multilevel/D
+multilingual
+multilingualism/S
+multimedia/S
+multimegaton/M
+multimeter/M
+multimillionaire/SM
+multinational/S
+multinomial/M
+multiphase
+multiple/SM
+multiplet/SM
+multiplex/GZMSRD
+multiplexor's
+multipliable
+multiplicand/SM
+multiplication/M
+multiplicative/YS
+multiplicity/MS
+multiplier/M
+multiply/ZNSRDXG
+multiprocess/G
+multiprocessor/MS
+multiprogram
+multiprogrammed
+multiprogramming/MS
+multipurpose
+multiracial
+multistage
+multistory/S
+multisyllabic
+multitasking/S
+multitude/MS
+multitudinousness/M
+multitudinous/YP
+multiuser
+multivalent
+multivalued
+multivariate
+multiversity/M
+multivitamin/S
+mu/M
+mumbler/M
+mumbletypeg/S
+mumble/ZJGRSD
+Mumford/M
+mummed
+mummer/SM
+mummery/MS
+mummification/M
+mummify/XSDGN
+mumming
+mum/MS
+mummy/GSDM
+mumps/M
+muncher/M
+Münchhausen/M
+munchies
+Munch/M
+munch/ZRSDG
+Muncie/M
+mundane/YSP
+Mundt/M
+munge/JGZSRD
+Munich/M
+municipality/SM
+municipal/YS
+munificence/MS
+munificent/Y
+munition/SDG
+Munmro/M
+Munoz/M
+Munroe/M
+Munro/M
+mun/S
+Munsey/M
+Munson/M
+Munster/MS
+Muong/M
+muon/M
+Muppet/M
+muralist/SM
+mural/SM
+Murasaki/M
+Murat/M
+Murchison/M
+Murcia/M
+murderer/M
+murderess/S
+murder/GZRDMS
+murderousness/M
+murderous/YP
+Murdoch/M
+Murdock/M
+Mureil/M
+Murial/M
+muriatic
+Murielle/M
+Muriel/M
+Murillo/M
+murkily
+murkiness/S
+murk/TRMS
+murky/RPT
+Murmansk/M
+murmurer/M
+murmuring/U
+murmurous
+murmur/RDMGZSJ
+Murphy/M
+murrain/SM
+Murray/M
+Murrow/M
+Murrumbidgee/M
+Murry/M
+Murvyn/M
+muscatel/MS
+Muscat/M
+muscat/SM
+musclebound
+muscle/SDMG
+Muscovite/M
+muscovite/MS
+Muscovy/M
+muscularity/SM
+muscular/Y
+musculature/SM
+muse
+Muse/M
+muser/M
+musette/SM
+museum/MS
+mus/GJDSR
+musher/M
+mushiness/MS
+mush/MSRDG
+mushroom/DMSG
+mushy/PTR
+Musial/M
+musicale/SM
+musicality/SM
+musicals
+musical/YU
+musician/MYS
+musicianship/MS
+musicked
+musicking
+musicological
+musicologist/MS
+musicology/MS
+music/SM
+musing/Y
+Muskegon/M
+muskeg/SM
+muskellunge/SM
+musketeer/MS
+musketry/MS
+musket/SM
+musk/GDMS
+muskie/M
+muskiness/MS
+muskmelon/MS
+muskox/N
+muskrat/MS
+musky/RSPT
+Muslim/MS
+muslin/MS
+mussel/MS
+Mussolini/MS
+Mussorgsky/M
+muss/SDG
+mussy/RT
+mustache/DSM
+mustachio/MDS
+mustang/MS
+mustard/MS
+muster/GD
+mustily
+mustiness/MS
+mustn't
+must/RDGZS
+must've
+musty/RPT
+mutability/SM
+mutableness/M
+mutable/P
+mutably
+mutagen/SM
+mutant/MS
+mutate/XVNGSD
+mutational/Y
+mutation/M
+mutator/S
+muted/Y
+muteness/S
+mute/PDSRBYTG
+mutilate/XDSNG
+mutilation/M
+mutilator/MS
+mutineer/SMDG
+mutinous/Y
+mutiny/MGSD
+Mutsuhito/M
+mutterer/M
+mutter/GZRDJ
+muttonchops
+mutton/SM
+mutt/ZSMR
+mutuality/S
+mutual/SY
+muumuu/MS
+muzak
+Muzak/SM
+Muzo/M
+muzzled/U
+muzzle/MGRSD
+muzzler/M
+MVP
+MW
+Myanmar
+Mycah/M
+Myca/M
+Mycenaean
+Mycenae/M
+Mychal/M
+mycologist/MS
+mycology/MS
+myelitides
+myelitis/M
+Myer/MS
+myers
+mylar
+Mylar/S
+Myles/M
+Mylo/M
+My/M
+myna/SM
+Mynheer/M
+myocardial
+myocardium/M
+myopia/MS
+myopically
+myopic/S
+Myrah/M
+Myra/M
+Myranda/M
+Myrdal/M
+myriad/S
+Myriam/M
+Myrilla/M
+Myrle/M
+Myrlene/M
+myrmidon/S
+Myrna/M
+Myron/M
+myrrh/M
+myrrhs
+Myrta/M
+Myrtia/M
+Myrtice/M
+Myrtie/M
+Myrtle/M
+myrtle/SM
+Myrvyn/M
+Myrwyn/M
+mys
+my/S
+myself
+Mysore/M
+mysteriousness/MS
+mysterious/YP
+mystery/MDSG
+mystical/Y
+mysticism/MS
+mystic/SM
+mystification/M
+mystifier/M
+mystify/CSDGNX
+mystifying/Y
+mystique/MS
+Myst/M
+mythic
+mythical/Y
+myth/MS
+mythographer/SM
+mythography/M
+mythological/Y
+mythologist/MS
+mythologize/CSDG
+mythology/SM
+myths
+N
+NAACP
+nabbed
+nabbing
+Nabisco/M
+nabob/SM
+Nabokov/M
+nab/S
+nacelle/SM
+nacho/S
+NaCl/M
+nacre/MS
+nacreous
+Nada/M
+Nadean/M
+Nadeen/M
+Nader/M
+Nadia/M
+Nadine/M
+nadir/SM
+Nadiya/M
+Nadya/M
+Nady/M
+nae/VM
+Nagasaki/M
+nagged
+nagger/S
+nagging/Y
+nag/MS
+Nagoya/M
+Nagpur/M
+Nagy/M
+Nahuatl/SM
+Nahum/M
+naiad/SM
+naifs
+nailbrush/SM
+nailer/M
+nail/SGMRD
+Naipaul/M
+Nair/M
+Nairobi/M
+Naismith/M
+naive/SRTYP
+naiveté/SM
+naivety/MS
+Nakamura/M
+Nakayama/M
+nakedness/MS
+naked/TYRP
+Nakoma/M
+Nalani/M
+Na/M
+Namath/M
+nameable/U
+name/ADSG
+namedrop
+namedropping
+named's
+named/U
+nameless/PY
+namely
+nameplate/MS
+namer/SM
+name's
+namesake/SM
+Namibia/M
+Namibian/S
+naming/M
+Nam/M
+Nanak/M
+Nana/M
+Nananne/M
+Nancee/M
+Nance/M
+Nancey/M
+Nanchang/M
+Nancie/M
+Nanci/M
+Nancy/M
+Nanete/M
+Nanette/M
+Nanice/M
+Nani/M
+Nanine/M
+Nanjing
+Nanking's
+Nan/M
+Nannette/M
+Nannie/M
+Nanni/M
+Nanny/M
+nanny/SDMG
+nanometer/MS
+Nanon/M
+Nanook/M
+nanosecond/SM
+Nansen/M
+Nantes/M
+Nantucket/M
+Naoma/M
+Naomi/M
+napalm/MDGS
+nape/SM
+Naphtali/M
+naphthalene/MS
+naphtha/SM
+Napier/M
+napkin/SM
+Naples/M
+napless
+Nap/M
+Napoleonic
+napoleon/MS
+Napoleon/MS
+napped
+napper/MS
+Nappie/M
+napping
+Nappy/M
+nappy/TRSM
+nap/SM
+Nara/M
+Narbonne/M
+narc/DGS
+narcissism/MS
+narcissistic
+narcissist/MS
+narcissus/M
+Narcissus/M
+narcoleptic
+narcoses
+narcosis/M
+narcotic/SM
+narcotization/S
+narcotize/GSD
+Nariko/M
+Nari/M
+nark's
+Narmada/M
+Narragansett/M
+narrate/VGNSDX
+narration/M
+narrative/MYS
+narratology
+narrator/SM
+narrowing/P
+narrowness/SM
+narrow/RDYTGPS
+narwhal/MS
+nary
+nasality/MS
+nasalization/MS
+nasalize/GDS
+nasal/YS
+NASA/MS
+nascence/ASM
+nascent/A
+NASDAQ
+Nash/M
+Nashua/M
+Nashville/M
+Nassau/M
+Nasser/M
+nastily
+nastiness/MS
+nasturtium/SM
+nasty/TRSP
+natal
+Natala/M
+Natalee/M
+Natale/M
+Natalia/M
+Natalie/M
+Natalina/M
+Nataline/M
+natalist
+natality/M
+Natal/M
+Natalya/M
+Nata/M
+Nataniel/M
+Natasha/M
+Natassia/M
+Natchez
+natch/S
+Nate/XMN
+Nathalia/M
+Nathalie/M
+Nathanael/M
+Nathanial/M
+Nathaniel/M
+Nathanil/M
+Nathan/MS
+nationalism/SM
+nationalistic
+nationalistically
+nationalist/MS
+nationality/MS
+nationalization/MS
+nationalize/CSDG
+nationalized/AU
+nationalizer/SM
+national/YS
+nationhood/SM
+nation/MS
+nationwide
+nativeness/M
+native/PYS
+Natividad/M
+Nativity/M
+nativity/MS
+Natka/M
+natl
+Nat/M
+NATO/SM
+natter/SGD
+nattily
+nattiness/SM
+Natty/M
+natty/TRP
+naturalism/MS
+naturalistic
+naturalist/MS
+naturalization/SM
+naturalized/U
+naturalize/GSD
+naturalness/US
+natural/PUY
+naturals
+nature/ASDCG
+nature's
+naturist
+Naugahyde/S
+naughtily
+naughtiness/SM
+naught/MS
+naughty/TPRS
+Naur/M
+Nauru/M
+nausea/SM
+nauseate/DSG
+nauseating/Y
+nauseousness/SM
+nauseous/P
+nautical/Y
+nautilus/MS
+Navaho's
+Navajoes
+Navajo/S
+naval/Y
+Navarro/M
+navel/MS
+nave/SM
+navigability/SM
+navigableness/M
+navigable/P
+navigate/DSXNG
+navigational
+navigation/M
+navigator/MS
+Navona/M
+Navratilova/M
+navvy/M
+Navy/S
+navy/SM
+nay/MS
+naysayer/S
+Nazarene/MS
+Nazareth/M
+Nazi/SM
+Nazism/S
+NB
+NBA
+NBC
+Nb/M
+NBS
+NC
+NCAA
+NCC
+NCO
+NCR
+ND
+N'Djamena
+Ndjamena/M
+Nd/M
+Ne
+NE
+Neala/M
+Neale/M
+Neall/M
+Neal/M
+Nealon/M
+Nealson/M
+Nealy/M
+Neanderthal/S
+neap/DGS
+Neapolitan/SM
+nearby
+nearly/RT
+nearness/MS
+nearside/M
+nearsightedness/S
+nearsighted/YP
+near/TYRDPSG
+neaten/DG
+neath
+neatness/MS
+neat/YRNTXPS
+Neb/M
+Nebraska/M
+Nebraskan/MS
+Nebr/M
+Nebuchadnezzar/MS
+nebulae
+nebula/M
+nebular
+nebulousness/SM
+nebulous/PY
+necessaries
+necessarily/U
+necessary/U
+necessitate/DSNGX
+necessitation/M
+necessitous
+necessity/SM
+neckband/M
+neckerchief/MS
+neck/GRDMJS
+necking/M
+necklace/DSMG
+neckline/MS
+necktie/MS
+necrology/SM
+necromancer/MS
+necromancy/MS
+necromantic
+necrophiliac/S
+necrophilia/M
+necropolis/SM
+necropsy/M
+necroses
+necrosis/M
+necrotic
+nectarine/SM
+nectarous
+nectar/SM
+nectary/MS
+Neda/M
+Nedda/M
+Neddie/M
+Neddy/M
+Nedi/M
+Ned/M
+née
+needed/U
+needer/M
+needful/YSP
+Needham/M
+neediness/MS
+needlecraft/M
+needle/GMZRSD
+needlepoint/SM
+needlessness/S
+needless/YP
+needlewoman/M
+needlewomen
+needlework/RMS
+needn't
+need/YRDGS
+needy/TPR
+Neel/M
+Neely/M
+ne'er
+nefariousness/MS
+nefarious/YP
+Nefen/M
+Nefertiti/M
+negated/U
+negater/M
+negate/XRSDVNG
+negation/M
+negativeness/SM
+negative/PDSYG
+negativism/MS
+negativity/MS
+negator/MS
+Negev/M
+neglecter/M
+neglectfulness/SM
+neglectful/YP
+neglect/SDRG
+negligee/SM
+negligence/MS
+negligent/Y
+negligibility/M
+negligible
+negligibly
+negotiability/MS
+negotiable/A
+negotiant/M
+negotiate/ASDXGN
+negotiation/MA
+negotiator/MS
+Negress/MS
+negritude/MS
+Negritude/S
+Negroes
+negroid
+Negroid/S
+Negro/M
+neg/S
+Nehemiah/M
+Nehru/M
+neighbored/U
+neighborer/M
+neighborhood/SM
+neighborlinesses
+neighborliness/UM
+neighborly/UP
+neighbor/SMRDYZGJ
+neigh/MDG
+neighs
+Neila/M
+Neile/M
+Neilla/M
+Neille/M
+Neill/M
+Neil/SM
+neither
+Nelda/M
+Nelia/M
+Nelie/M
+Nelle/M
+Nellie/M
+Nelli/M
+Nell/M
+Nelly/M
+Nelsen/M
+Nels/N
+Nelson/M
+nelson/MS
+nematic
+nematode/SM
+Nembutal/M
+nemeses
+nemesis
+Nemesis/M
+neoclassical
+neoclassicism/MS
+neoclassic/M
+neocolonialism/MS
+neocortex/M
+neodymium/MS
+Neogene
+neolithic
+Neolithic/M
+neologism/SM
+neomycin/M
+neonatal/Y
+neonate/MS
+neon/DMS
+neophyte/MS
+neoplasm/SM
+neoplastic
+neoprene/SM
+Nepalese
+Nepali/MS
+Nepal/M
+nepenthe/MS
+nephew/MS
+nephrite/SM
+nephritic
+nephritides
+nephritis/M
+nepotism/MS
+nepotist/S
+Neptune/M
+neptunium/MS
+nerd/S
+nerdy/RT
+Nereid/M
+Nerf/M
+Nerissa/M
+Nerita/M
+Nero/M
+Neron/M
+Nerta/M
+Nerte/M
+Nertie/M
+Nerti/M
+Nert/M
+Nerty/M
+Neruda/M
+nervelessness/SM
+nerveless/YP
+nerve's
+nerve/UGSD
+nerviness/SM
+nerving/M
+nervousness/SM
+nervous/PY
+nervy/TPR
+Nessa/M
+Nessie/M
+Nessi/M
+Nessy/M
+Nesta/M
+nester/M
+Nester/M
+Nestle/M
+nestler/M
+nestle/RSDG
+nestling/M
+Nestorius/M
+Nestor/M
+nest/RDGSBM
+netball/M
+nether
+Netherlander/SM
+Netherlands/M
+nethermost
+netherworld/S
+Netscape/M
+net/SM
+Netta/M
+Nettie/M
+Netti/M
+netting/M
+nett/JGRDS
+Nettle/M
+nettle/MSDG
+nettlesome
+Netty/M
+network/SJMDG
+Netzahualcoyotl/M
+Neumann/M
+neuralgia/MS
+neuralgic
+neural/Y
+neurasthenia/MS
+neurasthenic/S
+neuritic/S
+neuritides
+neuritis/M
+neuroanatomy
+neurobiology/M
+neurological/Y
+neurologist/MS
+neurology/SM
+neuromuscular
+neuronal
+neurone/S
+neuron/MS
+neuropathology/M
+neurophysiology/M
+neuropsychiatric
+neuroses
+neurosis/M
+neurosurgeon/MS
+neurosurgery/SM
+neurotically
+neurotic/S
+neurotransmitter/S
+neuter/JZGRD
+neutralise's
+neutralism/MS
+neutralist/S
+neutrality/MS
+neutralization/MS
+neutralized/U
+neutralize/GZSRD
+neutral/PYS
+neutrino/MS
+neutron/MS
+neut/ZR
+Nevada/M
+Nevadan/S
+Nevadian/S
+Neva/M
+never
+nevermore
+nevertheless
+nevi
+Nevile/M
+Neville/M
+Nevil/M
+Nevin/SM
+Nevis/M
+Nev/M
+Nevsa/M
+Nevsky/M
+nevus/M
+Newark/M
+newbie/S
+newborn/S
+Newbury/M
+Newburyport/M
+Newcastle/M
+newcomer/MS
+newed/A
+Newell/M
+newel/MS
+newer/A
+newfangled
+newfound
+newfoundland
+Newfoundlander/M
+Newfoundland/SRMZ
+newish
+newline/SM
+newlywed/MS
+Newman/M
+newness/MS
+Newport/M
+news/A
+newsagent/MS
+newsboy/SM
+newscaster/M
+newscasting/M
+newscast/SRMGZ
+newsdealer/MS
+newsed
+newses
+newsflash/S
+newsgirl/S
+newsgroup/SM
+newsing
+newsletter/SM
+NeWS/M
+newsman/M
+newsmen
+newspaperman/M
+newspapermen
+newspaper/SMGD
+newspaperwoman/M
+newspaperwomen
+newsprint/MS
+new/SPTGDRY
+newsreader/MS
+newsreel/SM
+newsroom/S
+news's
+newsstand/MS
+Newsweekly/M
+newsweekly/S
+Newsweek/MY
+newswire
+newswoman/M
+newswomen
+newsworthiness/SM
+newsworthy/RPT
+newsy/TRS
+newt/MS
+Newtonian
+Newton/M
+newton/SM
+Nexis/M
+next
+nexus/SM
+Neysa/M
+NF
+NFC
+NFL
+NFS
+Ngaliema/M
+Nguyen/M
+NH
+NHL
+niacin/SM
+Niagara/M
+Niall/M
+Nial/M
+Niamey/M
+nibbed
+nibbing
+nibbler/M
+nibble/RSDGZ
+Nibelung/M
+nib/SM
+Nicaean
+Nicaragua/M
+Nicaraguan/S
+Niccolo/M
+Nice/M
+Nicene
+niceness/MS
+nicety/MS
+nice/YTPR
+niche/SDGM
+Nicholas
+Nichole/M
+Nicholle/M
+Nichol/MS
+Nicholson/M
+nichrome
+nickelodeon/SM
+nickel/SGMD
+nicker/GD
+Nickey/M
+nick/GZRDMS
+Nickie/M
+Nicki/M
+Nicklaus/M
+Nick/M
+nicknack's
+nickname/MGDRS
+nicknamer/M
+Nickolai/M
+Nickola/MS
+Nickolaus/M
+Nicko/M
+Nicky/M
+Nicobar/M
+Nicodemus/M
+Nicolai/MS
+Nicola/MS
+Nicolea/M
+Nicole/M
+Nicolette/M
+Nicoli/MS
+Nicolina/M
+Nicoline/M
+Nicolle/M
+Nicol/M
+Nico/M
+Nicosia/M
+nicotine/MS
+Niebuhr/M
+niece/MS
+Niel/MS
+Nielsen/M
+Niels/N
+Nielson/M
+Nietzsche/M
+Nieves/M
+nifty/TRS
+Nigel/M
+Nigeria/M
+Nigerian/S
+Nigerien
+Niger/M
+niggardliness/SM
+niggardly/P
+niggard/SGMDY
+nigger/SGDM!
+niggler/M
+niggle/RSDGZJ
+niggling/Y
+nigh/RDGT
+nighs
+nightcap/SM
+nightclothes
+nightclubbed
+nightclubbing
+nightclub/MS
+nightdress/MS
+nightfall/SM
+nightgown/MS
+nighthawk/MS
+nightie/MS
+Nightingale/M
+nightingale/SM
+nightlife/MS
+nightlong
+nightmare/MS
+nightmarish/Y
+nightshade/SM
+nightshirt/MS
+night/SMYDZ
+nightspot/MS
+nightstand/SM
+nightstick/S
+nighttime/S
+nightwear/M
+nighty's
+NIH
+nihilism/MS
+nihilistic
+nihilist/MS
+Nijinsky/M
+Nikaniki/M
+Nike/M
+Niki/M
+Nikita/M
+Nikkie/M
+Nikki/M
+Nikko/M
+Nikolai/M
+Nikola/MS
+Nikolaos/M
+Nikolaus/M
+Nikolayev's
+Nikoletta/M
+Nikolia/M
+Nikolos/M
+Niko/MS
+Nikon/M
+Nile/SM
+nilled
+nilling
+Nil/MS
+nil/MYS
+nilpotent
+Nilsen/M
+Nils/N
+Nilson/M
+Nilsson/M
+Ni/M
+nimbi
+nimbleness/SM
+nimble/TRP
+nimbly
+nimbus/DM
+NIMBY
+Nimitz/M
+Nimrod/MS
+Nina/M
+nincompoop/MS
+ninefold
+nine/MS
+ninepence/M
+ninepin/S
+ninepins/M
+nineteen/SMH
+nineteenths
+ninetieths
+Ninetta/M
+Ninette/M
+ninety/MHS
+Nineveh/M
+ninja/S
+Ninnetta/M
+Ninnette/M
+ninny/SM
+Ninon/M
+Nintendo/M
+ninth
+ninths
+Niobe/M
+niobium/MS
+nipped
+nipper/DMGS
+nippiness/S
+nipping/Y
+nipple/GMSD
+Nipponese
+Nippon/M
+nippy/TPR
+nip/S
+Nirenberg/M
+nirvana/MS
+Nirvana/S
+nisei
+Nisei/MS
+Nissa/M
+Nissan/M
+Nisse/M
+Nissie/M
+Nissy/M
+Nita/M
+niter/M
+nitpick/DRSJZG
+nitrate/MGNXSD
+nitration/M
+nitric
+nitride/MGS
+nitriding/M
+nitrification/SM
+nitrite/MS
+nitrocellulose/MS
+nitrogenous
+nitrogen/SM
+nitroglycerin/MS
+nitrous
+nitwit/MS
+nit/ZSMR
+Niven/M
+nixer/M
+nix/GDSR
+Nixie/M
+Nixon/M
+NJ
+Nkrumah/M
+NLRB
+nm
+NM
+no/A
+NOAA
+Noach/M
+Noah/M
+Noak/M
+Noami/M
+Noam/M
+Nobelist/SM
+nobelium/MS
+Nobel/M
+Nobe/M
+Nobie/M
+nobility/MS
+Noble/M
+nobleman/M
+noblemen
+nobleness/SM
+noblesse/M
+noble/TPSR
+noblewoman
+noblewomen
+nob/MY
+nobody/MS
+Noby/M
+nocturnal/SY
+nocturne/SM
+nodal/Y
+nodded
+nodding
+noddle/MSDG
+noddy/M
+node/MS
+NoDoz/M
+nod/SM
+nodular
+nodule/SM
+Noelani/M
+Noella/M
+Noelle/M
+Noell/M
+Noellyn/M
+Noel/MS
+noel/S
+Noelyn/M
+Noe/M
+Noemi/M
+noes/S
+noggin/SM
+nohow
+noise/GMSD
+noiselessness/SM
+noiseless/YP
+noisemaker/M
+noisemake/ZGR
+noisily
+noisiness/MS
+noisome
+noisy/TPR
+Nola/M
+Nolana/M
+Noland/M
+Nolan/M
+Nolie/M
+Nollie/M
+Noll/M
+Nolly/M
+No/M
+nomadic
+nomad/SM
+Nome/M
+nomenclature/MS
+Nomi/M
+nominalized
+nominal/K
+nominally
+nominals
+nominate/CDSAXNG
+nomination/MAC
+nominative/SY
+nominator/CSM
+nominee/MS
+non
+nonabrasive
+nonabsorbent/S
+nonacademic/S
+nonacceptance/MS
+nonacid/MS
+nonactive
+nonadaptive
+nonaddictive
+nonadhesive
+nonadjacent
+nonadjustable
+nonadministrative
+nonage/MS
+nonagenarian/MS
+nonaggression/SM
+nonagricultural
+Nonah/M
+nonalcoholic/S
+nonaligned
+nonalignment/SM
+nonallergic
+Nona/M
+nonappearance/MS
+nonassignable
+nonathletic
+nonattendance/SM
+nonautomotive
+nonavailability/SM
+nonbasic
+nonbeliever/SM
+nonbelligerent/S
+nonblocking
+nonbreakable
+nonburnable
+nonbusiness
+noncaloric
+noncancerous
+noncarbohydrate/M
+nonce/MS
+nonchalance/SM
+nonchalant/YP
+nonchargeable
+nonclerical/S
+nonclinical
+noncollectable
+noncombatant/MS
+noncombustible/S
+noncommercial/S
+noncommissioned
+noncommittal/Y
+noncom/MS
+noncommunicable
+noncompeting
+noncompetitive
+noncompliance/MS
+noncomplying/S
+noncomprehending
+nonconducting
+nonconductor/MS
+nonconforming
+nonconformist/SM
+nonconformity/SM
+nonconsecutive
+nonconservative
+nonconstructive
+noncontagious
+noncontiguous
+noncontinuous
+noncontributing
+noncontributory
+noncontroversial
+nonconvertible
+noncooperation/SM
+noncorroding/S
+noncorrosive
+noncredit
+noncriminal/S
+noncritical
+noncrystalline
+noncumulative
+noncustodial
+noncyclic
+nondairy
+nondecreasing
+nondeductible
+nondelivery/MS
+nondemocratic
+nondenominational
+nondepartmental
+nondepreciating
+nondescript/YS
+nondestructive/Y
+nondetachable
+nondeterminacy
+nondeterminate/Y
+nondeterminism
+nondeterministic
+nondeterministically
+nondisciplinary
+nondisclosure/SM
+nondiscrimination/SM
+nondiscriminatory
+nondramatic
+nondrinker/SM
+nondrying
+nondurable
+noneconomic
+noneducational
+noneffective/S
+nonelastic
+nonelectrical
+nonelectric/S
+nonemergency
+nonempty
+nonenforceable
+nonentity/MS
+nonequivalence/M
+nonequivalent/S
+none/S
+nones/M
+nonessential/S
+nonesuch/SM
+nonetheless
+nonevent/MS
+nonexchangeable
+nonexclusive
+nonexempt
+nonexistence/MS
+nonexistent
+nonexplosive/S
+nonextensible
+nonfactual
+nonfading
+nonfat
+nonfatal
+nonfattening
+nonferrous
+nonfictional
+nonfiction/SM
+nonflammable
+nonflowering
+nonfluctuating
+nonflying
+nonfood/M
+nonfreezing
+nonfunctional
+nongovernmental
+nongranular
+nonhazardous
+nonhereditary
+nonhuman
+nonidentical
+Nonie/M
+Noni/M
+noninclusive
+nonindependent
+nonindustrial
+noninfectious
+noninflammatory
+noninflationary
+noninflected
+nonintellectual/S
+noninteracting
+noninterchangeable
+noninterference/MS
+nonintervention/SM
+nonintoxicating
+nonintuitive
+noninvasive
+nonionic
+nonirritating
+nonjudgmental
+nonjudicial
+nonlegal
+nonlethal
+nonlinearity/MS
+nonlinear/Y
+nonlinguistic
+nonliterary
+nonliving
+nonlocal
+nonmagical
+nonmagnetic
+nonmalignant
+nonmember/SM
+nonmetallic
+nonmetal/MS
+nonmigratory
+nonmilitant/S
+nonmilitary
+Nonnah/M
+Nonna/M
+nonnarcotic/S
+nonnative/S
+nonnegative
+nonnegotiable
+nonnuclear
+nonnumerical/S
+nonobjective
+nonobligatory
+nonobservance/MS
+nonobservant
+nonoccupational
+nonoccurence
+nonofficial
+nonogenarian
+nonoperational
+nonoperative
+nonorthogonal
+nonorthogonality
+nonparallel/S
+nonparametric
+nonpareil/SM
+nonparticipant/SM
+nonparticipating
+nonpartisan/S
+nonpaying
+nonpayment/SM
+nonperformance/SM
+nonperforming
+nonperishable/S
+nonperson/S
+nonperturbing
+nonphysical/Y
+nonplus/S
+nonplussed
+nonplussing
+nonpoisonous
+nonpolitical
+nonpolluting
+nonporous
+nonpracticing
+nonprejudicial
+nonprescription
+nonprocedural/Y
+nonproductive
+nonprofessional/S
+nonprofit/SB
+nonprogrammable
+nonprogrammer
+nonproliferation/SM
+nonpublic
+nonpunishable
+nonracial
+nonradioactive
+nonrandom
+nonreactive
+nonreciprocal/S
+nonreciprocating
+nonrecognition/SM
+nonrecoverable
+nonrecurring
+nonredeemable
+nonreducing
+nonrefillable
+nonrefundable
+nonreligious
+nonrenewable
+nonrepresentational
+nonresidential
+nonresident/SM
+nonresidual
+nonresistance/SM
+nonresistant/S
+nonrespondent/S
+nonresponse
+nonrestrictive
+nonreturnable/S
+nonrhythmic
+nonrigid
+nonsalaried
+nonscheduled
+nonscientific
+nonscoring
+nonseasonal
+nonsectarian
+nonsecular
+nonsegregated
+nonsense/MS
+nonsensicalness/M
+nonsensical/PY
+nonsensitive
+nonsexist
+nonsexual
+nonsingular
+nonskid
+nonslip
+nonsmoker/SM
+nonsmoking
+nonsocial
+nonspeaking
+nonspecialist/MS
+nonspecializing
+nonspecific
+nonspiritual/S
+nonstaining
+nonstandard
+nonstarter/SM
+nonstick
+nonstop
+nonstrategic
+nonstriking
+nonstructural
+nonsuccessive
+nonsupervisory
+nonsupport/GS
+nonsurgical
+nonsustaining
+nonsympathizer/M
+nontarnishable
+nontaxable/S
+nontechnical/Y
+nontenured
+nonterminal/MS
+nonterminating
+nontermination/M
+nontheatrical
+nonthinking/S
+nonthreatening
+nontoxic
+nontraditional
+nontransferable
+nontransparent
+nontrivial
+nontropical
+nonuniform
+nonunion/S
+nonuser/SM
+nonvenomous
+nonverbal/Y
+nonveteran/MS
+nonviable
+nonviolence/SM
+nonviolent/Y
+nonvirulent
+nonvocal
+nonvocational
+nonvolatile
+nonvolunteer/S
+nonvoter/MS
+nonvoting
+nonwhite/SM
+nonworking
+nonyielding
+nonzero
+noodle/GMSD
+nook/MS
+noonday/MS
+noon/GDMS
+nooning/M
+noontide/MS
+noontime/MS
+noose/SDGM
+nope/S
+NORAD/M
+noradrenalin
+noradrenaline/M
+Norah/M
+Nora/M
+Norbert/M
+Norberto/M
+Norbie/M
+Norby/M
+Nordhoff/M
+Nordic/S
+Nordstrom/M
+Norean/M
+Noreen/M
+Norene/M
+Norfolk/M
+nor/H
+Norina/M
+Norine/M
+normalcy/MS
+normality/SM
+normalization/A
+normalizations
+normalization's
+normalized/AU
+normalizes/AU
+normalize/SRDZGB
+normal/SY
+Norma/M
+Normand/M
+Normandy/M
+Norman/SM
+normativeness/M
+normative/YP
+Normie/M
+norm/SMGD
+Normy/M
+Norplant
+Norrie/M
+Norri/SM
+Norristown/M
+Norry/M
+Norse
+Norseman/M
+Norsemen
+Northampton/M
+northbound
+northeastern
+northeaster/YM
+Northeast/SM
+northeastward/S
+northeast/ZSMR
+northerly/S
+norther/MY
+Northerner/M
+northernmost
+northern/RYZS
+Northfield/M
+northing/M
+northland
+North/M
+northmen
+north/MRGZ
+Northrop/M
+Northrup/M
+norths
+Norths
+Northumberland/M
+northward/S
+northwestern
+northwester/YM
+northwest/MRZS
+Northwest/MS
+northwestward/S
+Norton/M
+Norwalk/M
+Norway/M
+Norwegian/S
+Norwich/M
+Norw/M
+nosebag/M
+nosebleed/SM
+nosecone/S
+nosedive/DSG
+nosed/V
+nosegay/MS
+nose/M
+Nosferatu/M
+nos/GDS
+nosh/MSDG
+nosily
+nosiness/MS
+nosing/M
+nostalgia/SM
+nostalgically
+nostalgic/S
+Nostradamus/M
+Nostrand/M
+nostril/SM
+nostrum/SM
+nosy/SRPMT
+notability/SM
+notableness/M
+notable/PS
+notably
+notarial
+notarization/S
+notarize/DSG
+notary/MS
+notate/VGNXSD
+notational/CY
+notation/CMSF
+notative/CF
+notch/MSDG
+not/DRGB
+notebook/MS
+note/CSDFG
+notedness/M
+noted/YP
+notepad/S
+notepaper/MS
+note's
+noteworthiness/SM
+noteworthy/P
+nothingness/SM
+nothing/PS
+noticeable/U
+noticeably
+noticeboard/S
+noticed/U
+notice/MSDG
+notifiable
+notification/M
+notifier/M
+notify/NGXSRDZ
+notional/Y
+notion/MS
+notoriety/S
+notoriousness/M
+notorious/YP
+Notre/M
+Nottingham/M
+notwithstanding
+Nouakchott/M
+nougat/MS
+Noumea/M
+noun/SMK
+nourish/DRSGL
+nourished/U
+nourisher/M
+nourishment/SM
+nous/M
+nouveau
+nouvelle
+novae
+Novak/M
+Nova/M
+nova/MS
+novelette/SM
+Novelia/M
+novelist/SM
+novelization/S
+novelize/GDS
+Novell/SM
+novella/SM
+novel/SM
+novelty/MS
+November/SM
+novena/SM
+novene
+Novgorod/M
+novice/MS
+novitiate/MS
+Nov/M
+Novocaine/M
+Novocain/S
+Novokuznetsk/M
+Novosibirsk/M
+NOW
+nowadays
+noway/S
+Nowell/M
+nowhere/S
+nowise
+now/S
+noxiousness/M
+noxious/PY
+Noyce/M
+Noyes/M
+nozzle/MS
+Np
+NP
+NRA
+nroff/M
+N's
+NS
+n's/CI
+NSF
+n/T
+NT
+nth
+nuance/SDM
+nubbin/SM
+nubby/RT
+Nubia/M
+Nubian/M
+nubile
+nub/MS
+nuclear/K
+nuclease/M
+nucleated/A
+nucleate/DSXNG
+nucleation/M
+nucleic
+nuclei/M
+nucleoli
+nucleolus/M
+nucleon/MS
+nucleotide/MS
+nucleus/M
+nuclide/M
+nude/CRS
+nudely
+nudeness/M
+nudest
+nudge/GSRD
+nudger/M
+nudism/MS
+nudist/MS
+nudity/MS
+nugatory
+Nugent/M
+nugget/SM
+nuisance/MS
+nuke/DSMG
+Nukualofa
+null/DSG
+nullification/M
+nullifier/M
+nullify/RSDXGNZ
+nullity/SM
+nu/M
+numbered/UA
+numberer/M
+numberless
+numberplate/M
+number/RDMGJ
+numbers/A
+Numbers/M
+numbing/Y
+numbness/MS
+numb/SGZTYRDP
+numbskull's
+numerable/IC
+numeracy/SI
+numeral/YMS
+numerate/SDNGX
+numerates/I
+numeration/M
+numerator/MS
+numerical/Y
+numeric/S
+numerological
+numerologist/S
+numerology/MS
+numerousness/M
+numerous/YP
+numinous/S
+numismatic/S
+numismatics/M
+numismatist/MS
+numskull/SM
+Nunavut/M
+nuncio/SM
+Nunez/M
+Nunki/M
+nun/MS
+nunnery/MS
+nuptial/S
+Nuremberg/M
+Nureyev/M
+nursemaid/MS
+nurser/M
+nurseryman/M
+nurserymen
+nursery/MS
+nurse/SRDJGMZ
+nursling/M
+nurturer/M
+nurture/SRDGZM
+nus
+nutate/NGSD
+nutation/M
+nutcracker/M
+nutcrack/RZ
+nuthatch/SM
+nutmeat/SM
+nutmegged
+nutmegging
+nutmeg/MS
+nut/MS
+nutpick/MS
+Nutrasweet/M
+nutria/SM
+nutrient/MS
+nutriment/MS
+nutritional/Y
+nutritionist/MS
+nutrition/SM
+nutritiousness/MS
+nutritious/PY
+nutritive/Y
+nutshell/MS
+nutted
+nuttiness/SM
+nutting
+nutty/TRP
+nuzzle/GZRSD
+NV
+NW
+NWT
+NY
+Nyasa/M
+NYC
+Nydia/M
+Nye/M
+Nyerere/M
+nylon/SM
+nymphet/MS
+nymph/M
+nympholepsy/M
+nymphomaniac/S
+nymphomania/MS
+nymphs
+Nyquist/M
+NYSE
+Nyssa/M
+NZ
+o
+O
+oafishness/S
+oafish/PY
+oaf/MS
+Oahu/M
+Oakland/M
+Oakley/M
+Oakmont/M
+oak/SMN
+oakum/MS
+oakwood
+oar/GSMD
+oarlock/MS
+oarsman/M
+oarsmen
+oarswoman
+oarswomen
+OAS
+oases
+oasis/M
+oatcake/MS
+oater/M
+Oates/M
+oath/M
+oaths
+oatmeal/SM
+oat/SMNR
+Oaxaca/M
+ob
+OB
+Obadiah/M
+Obadias/M
+obbligato/S
+obduracy/S
+obdurateness/S
+obdurate/PDSYG
+Obediah/M
+obedience/EMS
+obedient/EY
+Obed/M
+obeisance/MS
+obeisant/Y
+obelisk/SM
+Oberlin/M
+Oberon/M
+obese
+obesity/MS
+obey/EDRGS
+obeyer/EM
+obfuscate/SRDXGN
+obfuscation/M
+obfuscatory
+Obidiah/M
+Obie/M
+obi/MDGS
+obit/SMR
+obituary/SM
+obj
+objectify/GSDXN
+objectionableness/M
+objectionable/U
+objectionably
+objection/SMB
+objectiveness/MS
+objective/PYS
+objectivity/MS
+objector/SM
+object/SGVMD
+objurgate/GNSDX
+objurgation/M
+oblate/NYPSX
+oblation/M
+obligate/NGSDXY
+obligational
+obligation/M
+obligatorily
+obligatory
+obliged/E
+obliger/M
+obliges/E
+oblige/SRDG
+obligingness/M
+obliging/PY
+oblique/DSYGP
+obliqueness/S
+obliquity/MS
+obliterate/VNGSDX
+obliteration/M
+obliterative/Y
+oblivion/MS
+obliviousness/MS
+oblivious/YP
+oblongness/M
+oblong/SYP
+obloquies
+obloquy/M
+Ob/MD
+obnoxiousness/MS
+obnoxious/YP
+oboe/SM
+oboist/S
+obos
+O'Brien/M
+obs
+obscene/RYT
+obscenity/MS
+obscurantism/MS
+obscurantist/MS
+obscuration
+obscureness/M
+obscure/YTPDSRGL
+obscurity/MS
+obsequies
+obsequiousness/S
+obsequious/YP
+obsequy
+observability/M
+observable/SU
+observably
+observance/MS
+observantly
+observants
+observant/U
+observational/Y
+observation/MS
+observatory/MS
+observed/U
+observer/M
+observe/ZGDSRB
+observing/Y
+obsess/GVDS
+obsessional
+obsession/MS
+obsessiveness/S
+obsessive/PYS
+obsidian/SM
+obsolesce/GSD
+obsolescence/S
+obsolescent/Y
+obsolete/GPDSY
+obsoleteness/M
+obstacle/SM
+obstetrical
+obstetrician/SM
+obstetric/S
+obstetrics/M
+obstinacy/SM
+obstinateness/M
+obstinate/PY
+obstreperousness/SM
+obstreperous/PY
+obstructed/U
+obstructer/M
+obstructionism/SM
+obstructionist/MS
+obstruction/SM
+obstructiveness/MS
+obstructive/PSY
+obstruct/RDVGS
+obtainable/U
+obtainably
+obtain/LSGDRB
+obtainment/S
+obtrude/DSRG
+obtruder/M
+obtrusion/S
+obtrusiveness/MSU
+obtrusive/UPY
+obtuseness/S
+obtuse/PRTY
+obverse/YS
+obviate/XGNDS
+obviousness/SM
+obvious/YP
+Oby/M
+ocarina/MS
+O'Casey
+Occam/M
+occasional/Y
+occasion/MDSJG
+Occidental/S
+occidental/SY
+occident/M
+Occident/SM
+occipital/Y
+occlude/GSD
+occlusion/MS
+occlusive/S
+occulter/M
+occultism/SM
+occult/SRDYG
+occupancy/SM
+occupant/MS
+occupational/Y
+occupation/SAM
+occupied/AU
+occupier/M
+occupies/A
+occupy/RSDZG
+occur/AS
+occurred/A
+occurrence/SM
+occurring/A
+oceanfront/MS
+oceangoing
+Oceania/M
+oceanic
+ocean/MS
+oceanographer/SM
+oceanographic
+oceanography/SM
+oceanology/MS
+oceanside
+Oceanside/M
+Oceanus/M
+ocelot/SM
+ocher/DMGS
+Ochoa/M
+o'clock
+O'Clock
+O'Connell/M
+O'Connor/M
+Oconomowoc/M
+OCR
+octagonal/Y
+octagon/SM
+octahedral
+octahedron/M
+octal/S
+octane/MS
+octant/M
+octave/MS
+Octavia/M
+Octavian/M
+Octavio/M
+Octavius/M
+octavo/MS
+octennial
+octet/SM
+octile
+octillion/M
+Oct/M
+October/MS
+octogenarian/MS
+octopus/SM
+octoroon/M
+ocular/S
+oculist/SM
+OD
+odalisque/SM
+oddball/SM
+oddity/MS
+oddment/MS
+oddness/MS
+odd/TRYSPL
+Odele/M
+Odelia/M
+Odelinda/M
+Odella/M
+Odelle/M
+Odell/M
+O'Dell/M
+ode/MDRS
+Ode/MR
+Oderberg/MS
+Oder/M
+Odessa/M
+Odets/M
+Odetta/M
+Odette/M
+Odey/M
+Odie/M
+Odilia/M
+Odille/M
+Odin/M
+odiousness/MS
+odious/PY
+Odis/M
+odium/MS
+Odo/M
+odometer/SM
+Odom/M
+O'Donnell/M
+odor/DMS
+odoriferous
+odorless
+odorous/YP
+ODs
+O'Dwyer/M
+Ody/M
+Odysseus/M
+Odyssey/M
+odyssey/S
+OE
+OED
+oedipal
+Oedipal/Y
+Oedipus/M
+OEM/M
+OEMS
+oenology/MS
+oenophile/S
+o'er
+O'Er
+Oersted/M
+oesophagi
+oeuvre/SM
+Ofelia/M
+Ofella/M
+offal/MS
+offbeat/MS
+offcuts
+Offenbach/M
+offender/M
+offend/SZGDR
+offense/MSV
+offensively/I
+offensiveness/MSI
+offensive/YSP
+offerer/M
+offering/M
+offer/RDJGZ
+offertory/SM
+offhand/D
+offhandedness/S
+offhanded/YP
+officeholder/SM
+officemate/S
+officer/GMD
+officership/S
+office/SRMZ
+officialdom/SM
+officialism/SM
+officially/U
+official/PSYM
+officiant/SM
+officiate/XSDNG
+officiation/M
+officiator/MS
+officio
+officiousness/MS
+officious/YP
+offing/M
+offish
+offload/GDS
+offprint/GSDM
+offramp
+offset/SM
+offsetting
+offshoot/MS
+offshore
+offside/RS
+offspring/M
+offstage/S
+off/SZGDRJ
+offtrack
+Ofilia/M
+of/K
+often/RT
+oftentimes
+oft/NRT
+ofttimes
+Ogbomosho/M
+Ogdan/M
+Ogden/M
+Ogdon/M
+Ogilvy/M
+ogive/M
+Oglethorpe/M
+ogle/ZGDSR
+ogreish
+ogre/MS
+ogress/S
+oh
+OH
+O'Hara
+O'Hare/M
+O'Higgins
+Ohioan/S
+Ohio/M
+ohmic
+ohmmeter/MS
+ohm/SM
+oho/S
+ohs
+OHSA/M
+oilcloth/M
+oilcloths
+oiler/M
+oilfield/MS
+oiliness/SM
+oilman/M
+oil/MDRSZG
+oilmen
+oilseed/SM
+oilskin/MS
+oily/TPR
+oink/GDS
+ointment/SM
+Oise/M
+OJ
+Ojibwa/SM
+Okamoto/M
+okapi/SM
+Okayama/M
+okay/M
+Okeechobee/M
+O'Keeffe
+Okefenokee
+Okhotsk/M
+Okinawa/M
+Okinawan/S
+Oklahoma/M
+Oklahoman/SM
+Okla/M
+OK/MDG
+okra/MS
+OKs
+Oktoberfest
+Olaf/M
+Olag/M
+Ola/M
+Olav/M
+Oldenburg/M
+olden/DG
+Oldfield/M
+oldie/MS
+oldish
+oldness/S
+Oldsmobile/M
+oldster/SM
+Olduvai/M
+old/XTNRPS
+olé
+oleaginous
+oleander/SM
+O'Leary/M
+olefin/M
+Oleg/M
+Ole/MV
+Olenek/M
+Olenka/M
+Olen/M
+Olenolin/M
+oleomargarine/SM
+oleo/S
+oles
+olfactory
+Olga/M
+Olia/M
+oligarchic
+oligarchical
+oligarch/M
+oligarchs
+oligarchy/SM
+Oligocene
+oligopolistic
+oligopoly/MS
+Olimpia/M
+Olin/M
+olive/MSR
+Olive/MZR
+Oliver/M
+Olivero/M
+Olivette/M
+Olivetti/M
+Olivia/M
+Olivier/M
+Olivie/RM
+Oliviero/M
+Oliy/M
+Ollie/M
+Olly/M
+Olmec
+Olmsted/M
+Olsen/M
+Olson/M
+Olva/M
+Olvan/M
+Olwen/M
+Olympe/M
+Olympiad/MS
+Olympian/S
+Olympia/SM
+Olympic/S
+Olympie/M
+Olympus/M
+Omaha/SM
+Oman/M
+Omar/M
+ombudsman/M
+ombudsmen
+Omdurman/M
+omega/MS
+omelet/SM
+omelette's
+omen/DMG
+Omero/M
+omicron/MS
+ominousness/SM
+ominous/YP
+omission/MS
+omit/S
+omitted
+omitting
+omnibus/MS
+omni/M
+omnipotence/SM
+Omnipotent
+omnipotent/SY
+omnipresence/MS
+omnipresent/Y
+omniscience/SM
+omniscient/YS
+omnivore/MS
+omnivorousness/MS
+omnivorous/PY
+oms
+Omsk/M
+om/XN
+ON
+onanism/M
+Onassis/M
+oncer/M
+once/SR
+oncogene/S
+oncologist/S
+oncology/SM
+oncoming/S
+Ondrea/M
+Oneal/M
+Onega/M
+Onegin/M
+Oneida/SM
+O'Neil
+O'Neill
+oneness/MS
+one/NPMSX
+oner/M
+onerousness/SM
+onerous/YP
+oneself
+onetime
+oneupmanship
+Onfre/M
+Onfroi/M
+ongoing/S
+Onida/M
+onion/GDM
+onionskin/MS
+onlooker/MS
+onlooking
+only/TP
+Onofredo/M
+Ono/M
+onomatopoeia/SM
+onomatopoeic
+onomatopoetic
+Onondaga/MS
+onrush/GMS
+on/RY
+ons
+Onsager/M
+onset/SM
+onsetting
+onshore
+onside
+onslaught/MS
+Ontarian/S
+Ontario/M
+Ont/M
+onto
+ontogeny/SM
+ontological/Y
+ontology/SM
+onus/SM
+onward/S
+onyx/MS
+oodles
+ooh/GD
+oohs
+oolitic
+Oona/M
+OOo/M
+oops/S
+Oort/M
+ooze/GDS
+oozy/RT
+opacity/SM
+opalescence/S
+opalescent/Y
+Opalina/M
+Opaline/M
+Opal/M
+opal/SM
+opaque/GTPYRSD
+opaqueness/SM
+opcode/MS
+OPEC
+Opel/M
+opencast
+opened/AU
+opener/M
+openhandedness/SM
+openhanded/P
+openhearted
+opening/M
+openness/S
+OpenOffice.org/M
+opens/A
+openwork/MS
+open/YRDJGZTP
+operable/I
+operandi
+operand/SM
+operant/YS
+opera/SM
+operate/XNGVDS
+operatically
+operatic/S
+operationalization/S
+operationalize/D
+operational/Y
+operation/M
+operative/IP
+operatively
+operativeness/MI
+operatives
+operator/SM
+operetta/MS
+ope/S
+Ophelia/M
+Ophelie/M
+Ophiuchus/M
+ophthalmic/S
+ophthalmologist/SM
+ophthalmology/MS
+opiate/GMSD
+opine/XGNSD
+opinionatedness/M
+opinionated/PY
+opinion/M
+opioid
+opium/MS
+opossum/SM
+opp
+Oppenheimer/M
+opponent/MS
+opportune/IY
+opportunism/SM
+opportunistic
+opportunistically
+opportunist/SM
+opportunity/MS
+oppose/BRSDG
+opposed/U
+opposer/M
+oppositeness/M
+opposite/SXYNP
+oppositional
+opposition/M
+oppress/DSGV
+oppression/MS
+oppressiveness/MS
+oppressive/YP
+oppressor/MS
+opprobrious/Y
+opprobrium/SM
+Oprah/M
+ops
+opt/DSG
+opthalmic
+opthalmologic
+opthalmology
+optical/Y
+optician/SM
+optic/S
+optics/M
+optima
+optimality
+optimal/Y
+optimise's
+optimism/SM
+optimistic
+optimistically
+optimist/SM
+optimization/SM
+optimize/DRSZG
+optimized/U
+optimizer/M
+optimizes/U
+optimum/SM
+optionality/M
+optional/YS
+option/GDMS
+optoelectronic
+optometric
+optometrist/MS
+optometry/SM
+opulence/SM
+opulent/Y
+opus/SM
+op/XGDN
+OR
+oracle/GMSD
+oracular
+Oralee/M
+Oralia/M
+Oralie/M
+Oralla/M
+Oralle/M
+oral/YS
+Ora/M
+orangeade/MS
+Orange/M
+orange/MS
+orangery/SM
+orangutan/MS
+Oranjestad/M
+Oran/M
+orate/SDGNX
+oration/M
+oratorical/Y
+oratorio/MS
+orator/MS
+oratory/MS
+Orazio/M
+Orbadiah/M
+orbicular
+orbiculares
+orbital/MYS
+orbit/MRDGZS
+orb/SMDG
+orchard/SM
+orchestral/Y
+orchestra/MS
+orchestrate/GNSDX
+orchestrater's
+orchestration/M
+orchestrator/M
+orchid/SM
+ordainer/M
+ordainment/MS
+ordain/SGLDR
+ordeal/SM
+order/AESGD
+ordered/U
+orderer
+ordering/S
+orderless
+orderliness/SE
+orderly/PS
+order's/E
+ordinal/S
+ordinance/MS
+ordinarily
+ordinariness/S
+ordinary/RSPT
+ordinated
+ordinate/I
+ordinates
+ordinate's
+ordinating
+ordination/SM
+ordnance/SM
+Ordovician
+ordure/MS
+oregano/SM
+Oreg/M
+Oregonian/S
+Oregon/M
+Orelee/M
+Orelia/M
+Orelie/M
+Orella/M
+Orelle/M
+Orel/M
+Oren/M
+Ore/NM
+ore/NSM
+Oreo
+Orestes
+organdie's
+organdy/MS
+organelle/MS
+organically/I
+organic/S
+organismic
+organism/MS
+organist/MS
+organizable/UMS
+organizational/MYS
+organization/MEAS
+organize/AGZDRS
+organized/UE
+organizer/MA
+organizes/E
+organizing/E
+organ/MS
+organometallic
+organza/SM
+orgasm/GSMD
+orgasmic
+orgiastic
+orgy/SM
+Oriana/M
+oriel/MS
+orientable
+Oriental/S
+oriental/SY
+orientated/A
+orientate/ESDXGN
+orientates/A
+orientation/AMES
+orienteering/M
+orienter
+orient/GADES
+orient's
+Orient/SM
+orifice/MS
+orig
+origami/MS
+originality/SM
+originally
+original/US
+originate/VGNXSD
+origination/M
+originative/Y
+originator/SM
+origin/MS
+Orin/M
+Orinoco/M
+oriole/SM
+Orion/M
+orison/SM
+Oriya/M
+Orizaba/M
+Orkney/M
+Orland/M
+Orlando/M
+Orlan/M
+Orleans
+Orlick/M
+Orlon/SM
+Orly/M
+ormolu/SM
+or/MY
+ornamental/SY
+ornamentation/SM
+ornament/GSDM
+ornateness/SM
+ornate/YP
+orneriness/SM
+ornery/PRT
+ornithological
+ornithologist/SM
+ornithology/MS
+orographic/M
+orography/M
+Orono/M
+orotund
+orotundity/MS
+orphanage/MS
+orphanhood/M
+orphan/SGDM
+Orpheus/M
+Orphic
+Orran/M
+Orren/M
+Orrin/M
+orris/SM
+Orr/MN
+ors
+Orsa/M
+Orsola/M
+Orson/M
+Ortega/M
+Ortensia/M
+orthodontia/S
+orthodontic/S
+orthodontics/M
+orthodontist/MS
+orthodoxies
+orthodoxly/U
+Orthodox/S
+orthodoxy's
+orthodox/YS
+orthodoxy/U
+orthogonality/M
+orthogonalization/M
+orthogonalized
+orthogonal/Y
+orthographic
+orthographically
+orthography/MS
+orthonormal
+orthopedic/S
+orthopedics/M
+orthopedist/SM
+orthophosphate/MS
+orthorhombic
+Ortiz/M
+Orton/M
+Orval/M
+Orville/M
+Orv/M
+Orwellian
+Orwell/M
+o's
+Osage/SM
+Osaka/M
+Osbert/M
+Osborne/M
+Osborn/M
+Osbourne/M
+Osbourn/M
+Oscar/SM
+Osceola/M
+oscillate/SDXNG
+oscillation/M
+oscillator/SM
+oscillatory
+oscilloscope/SM
+osculate/XDSNG
+osculation/M
+Osgood/M
+OSHA
+Oshawa/M
+O'Shea/M
+Oshkosh/M
+osier/MS
+Osiris/M
+Oslo/M
+Os/M
+OS/M
+Osman/M
+osmium/MS
+Osmond/M
+osmoses
+osmosis/M
+osmotic
+Osmund/M
+osprey/SM
+osseous/Y
+Ossie/M
+ossification/M
+ossify/NGSDX
+ostensible
+ostensibly
+ostentation/MS
+ostentatiousness/M
+ostentatious/PY
+osteoarthritides
+osteoarthritis/M
+osteology/M
+osteopathic
+osteopath/M
+osteopaths
+osteopathy/MS
+osteoporoses
+osteoporosis/M
+ostracise's
+ostracism/MS
+ostracize/GSD
+Ostrander/M
+ostrich/MS
+Ostrogoth/M
+Ostwald/M
+O'Sullivan/M
+Osvaldo/M
+Oswald/M
+Oswell/M
+OT
+OTB
+OTC
+Otes
+Otha/M
+Othelia/M
+Othella/M
+Othello/M
+otherness/M
+other/SMP
+otherwise
+otherworldly/P
+otherworld/Y
+Othilia/M
+Othilie/M
+Otho/M
+otiose
+Otis/M
+OTOH
+Ottawa/MS
+otter/DMGS
+Ottilie/M
+Otto/M
+Ottoman
+ottoman/MS
+Ouagadougou/M
+oubliette/SM
+ouch/SDG
+oughtn't
+ought/SGD
+Ouija/MS
+ounce/MS
+our/S
+ourself
+ourselves
+ouster/M
+oust/RDGZS
+outage/MS
+outargue/GDS
+outback/MRS
+outbalance/GDS
+outbidding
+outbid/S
+outboard/S
+outboast/GSD
+outbound/S
+outbreak/SMG
+outbroke
+outbroken
+outbuilding/SM
+outburst/MGS
+outcast/GSM
+outclass/SDG
+outcome/SM
+outcropped
+outcropping/S
+outcrop/SM
+outcry/MSDG
+outdated/P
+outdid
+outdistance/GSD
+outdoes
+outdo/G
+outdone
+outdoor/S
+outdoorsy
+outdraw/GS
+outdrawn
+outdrew
+outermost
+outerwear/M
+outface/SDG
+outfall/MS
+outfielder/M
+outfield/RMSZ
+outfight/SG
+outfit/MS
+outfitted
+outfitter/MS
+outfitting
+outflank/SGD
+outflow/SMDG
+outfought
+outfox/GSD
+outgeneraled
+outgoes
+outgo/GJ
+outgoing/P
+outgrew
+outgrip
+outgrow/GSH
+outgrown
+outgrowth/M
+outgrowths
+outguess/SDG
+outhit/S
+outhitting
+outhouse/SM
+outing/M
+outlaid
+outlander/M
+outlandishness/MS
+outlandish/PY
+outland/ZR
+outlast/GSD
+outlawry/M
+outlaw/SDMG
+outlay/GSM
+outlet/SM
+outliers
+outline/SDGM
+outlive/GSD
+outlook/MDGS
+outlying
+outmaneuver/GSD
+outmatch/SDG
+outmigration
+outmoded
+outness/M
+outnumber/GDS
+outpaced
+outpatient/SM
+outperform/DGS
+out/PJZGSDR
+outplacement/S
+outplay/GDS
+outpoint/GDS
+outpost/SM
+outpouring/M
+outpour/MJG
+outproduce/GSD
+output/SM
+outputted
+outputting
+outrace/GSD
+outrage/GSDM
+outrageousness/M
+outrageous/YP
+outran
+outrank/GSD
+outré
+outreach/SDG
+outrider/MS
+outrigger/SM
+outright/Y
+outrunning
+outrun/S
+outscore/GDS
+outsell/GS
+outset/MS
+outsetting
+outshine/SG
+outshone
+outshout/GDS
+outsider/PM
+outside/ZSR
+outsize/S
+outskirt/SM
+outsmart/SDG
+outsold
+outsource/SDJG
+outspend/SG
+outspent
+outspoke
+outspokenness/SM
+outspoken/YP
+outspread/SG
+outstanding/Y
+outstate/NX
+outstation/M
+outstay/SDG
+outstretch/GSD
+outstripped
+outstripping
+outstrip/S
+outtake/S
+outvote/GSD
+outwardness/M
+outward/SYP
+outwear/SG
+outweigh/GD
+outweighs
+outwit/S
+outwitted
+outwitting
+outwore
+outwork/SMDG
+outworn
+ouzo/SM
+oval/MYPS
+ovalness/M
+ova/M
+ovarian
+ovary/SM
+ovate/SDGNX
+ovation/GMD
+ovenbird/SM
+oven/MS
+overabundance/MS
+overabundant
+overachieve/SRDGZ
+overact/DGVS
+overage/S
+overaggressive
+overallocation
+overall/SM
+overambitious
+overanxious
+overarching
+overarm/GSD
+overate
+overattentive
+overawe/GDS
+overbalance/DSG
+overbear/GS
+overbearingness/M
+overbearing/YP
+overbidding
+overbid/S
+overbite/MS
+overblown
+overboard
+overbold
+overbook/SDG
+overbore
+overborne
+overbought
+overbuild/GS
+overbuilt
+overburdening/Y
+overburden/SDG
+overbuy/GS
+overcame
+overcapacity/M
+overcapitalize/DSG
+overcareful
+overcast/GS
+overcasting/M
+overcautious
+overcerebral
+overcharge/DSG
+overcloud/DSG
+overcoating/M
+overcoat/SMG
+overcomer/M
+overcome/RSG
+overcommitment/S
+overcompensate/XGNDS
+overcompensation/M
+overcomplexity/M
+overcomplicated
+overconfidence/MS
+overconfident/Y
+overconscientious
+overconsumption/M
+overcook/SDG
+overcooled
+overcorrection
+overcritical
+overcrowd/DGS
+overcurious
+overdecorate/SDG
+overdependent
+overdetermined
+overdevelop/SDG
+overdid
+overdoes
+overdo/G
+overdone
+overdose/DSMG
+overdraft/SM
+overdraw/GS
+overdrawn
+overdress/GDS
+overdrew
+overdrive/GSM
+overdriven
+overdrove
+overdubbed
+overdubbing
+overdub/S
+overdue
+overeagerness/M
+overeager/PY
+overeater/M
+overeat/GNRS
+overeducated
+overemotional
+overemphases
+overemphasis/M
+overemphasize/GZDSR
+overenthusiastic
+overestimate/DSXGN
+overestimation/M
+overexcite/DSG
+overexercise/SDG
+overexert/GDS
+overexertion/SM
+overexploitation
+overexploited
+overexpose/GDS
+overexposure/SM
+overextend/DSG
+overextension
+overfall/M
+overfed
+overfeed/GS
+overfill/GDS
+overfishing
+overflew
+overflight/SM
+overflow/DGS
+overflown
+overfly/GS
+overfond
+overfull
+overgeneralize/GDS
+overgenerous
+overgraze/SDG
+overgrew
+overground
+overgrow/GSH
+overgrown
+overgrowth/M
+overgrowths
+overhand/DGS
+overhang/GS
+overhasty
+overhaul/GRDJS
+overhead/S
+overheard
+overhearer/M
+overhear/SRG
+overheat/SGD
+overhung
+overincredulous
+overindulgence/SM
+overindulgent
+overindulge/SDG
+overinflated
+overjoy/SGD
+overkill/SDMG
+overladed
+overladen
+overlaid
+overlain
+overland/S
+overlap/MS
+overlapped
+overlapping
+overlarge
+overlay/GS
+overleaf
+overlie
+overload/SDG
+overlong
+overlook/DSG
+overlord/DMSG
+overloud
+overly/GRS
+overmanning
+overmaster/GSD
+overmatching
+overmodest
+overmuch/S
+overnice
+overnight/SDRGZ
+overoptimism/SM
+overoptimistic
+overpaid
+overparticular
+overpass/GMSD
+overpay/LSG
+overpayment/M
+overplay/SGD
+overpopulate/DSNGX
+overpopulation/M
+overpopulous
+overpower/GSD
+overpowering/Y
+overpraise/DSG
+overprecise
+overpressure
+overprice/SDG
+overprint/DGS
+overproduce/SDG
+overproduction/S
+overprotect/GVDS
+overprotection/M
+overqualified
+overran
+overrate/DSG
+overreach/DSRG
+overreaction/SM
+overreact/SGD
+overred
+overrefined
+overrepresented
+overridden
+overrider/M
+override/RSG
+overripe
+overrode
+overrule/GDS
+overrunning
+overrun/S
+oversample/DG
+oversaturate
+oversaw
+oversea/S
+overseeing
+overseen
+overseer/M
+oversee/ZRS
+oversell/SG
+oversensitiveness/S
+oversensitive/P
+oversensitivity
+oversexed
+overshadow/GSD
+overshoe/SM
+overshoot/SG
+overshot/S
+oversight/SM
+oversimple
+oversimplification/M
+oversimplify/GXNDS
+oversize/GS
+oversleep/GS
+overslept
+oversoftness/M
+oversoft/P
+oversold
+overspecialization/MS
+overspecialize/GSD
+overspend/SG
+overspent
+overspill/DMSG
+overspread/SG
+overstaffed
+overstatement/SM
+overstate/SDLG
+overstay/GSD
+overstepped
+overstepping
+overstep/S
+overstimulate/DSG
+overstock/SGD
+overstraining
+overstressed
+overstretch/D
+overstrict
+overstrike/GS
+overstrung
+overstuffed
+oversubscribe/SDG
+oversubtle
+oversupply/MDSG
+oversuspicious
+overtaken
+overtake/RSZG
+overtax/DSG
+overthrew
+overthrow/GS
+overthrown
+overtightened
+overtime/MGDS
+overtire/DSG
+overtone/MS
+overtook
+overt/PY
+overture/DSMG
+overturn/SDG
+overuse/DSG
+overvalue/GSD
+overview/MS
+overweening
+overweight/GSD
+overwhelm/GDS
+overwhelming/Y
+overwinter/SDG
+overwork/GSD
+overwrap
+overwrite/SG
+overwritten
+overwrote
+overwrought
+over/YGS
+overzealousness/M
+overzealous/P
+Ovid/M
+oviduct/SM
+oviform
+oviparous
+ovoid/S
+ovular
+ovulate/GNXDS
+ovulatory
+ovule/MS
+ovum/MS
+ow/DYG
+Owen/MS
+owe/S
+owlet/SM
+owl/GSMDR
+owlishness/M
+owlish/PY
+owned/U
+own/EGDS
+ownership/MS
+owner/SM
+oxalate/M
+oxalic
+oxaloacetic
+oxblood/S
+oxbow/SM
+oxcart/MS
+oxen/M
+oxford/MS
+Oxford/MS
+oxidant/SM
+oxidate/NVX
+oxidation/M
+oxidative/Y
+oxide/SM
+oxidization/MS
+oxidized/U
+oxidize/JDRSGZ
+oxidizer/M
+oxidizes/A
+ox/MNS
+Oxnard
+Oxonian
+oxtail/M
+Oxus/M
+oxyacetylene/MS
+oxygenate/XSDMGN
+oxygenation/M
+oxygen/MS
+oxyhydroxides
+oxymora
+oxymoron/M
+oyster/GSDM
+oystering/M
+oz
+Ozark/SM
+Oz/M
+ozone/SM
+Ozymandias/M
+Ozzie/M
+Ozzy/M
+P
+PA
+Pablo/M
+Pablum/M
+pablum/S
+Pabst/M
+pabulum/SM
+PAC
+pace/DRSMZG
+Pace/M
+pacemaker/SM
+pacer/M
+pacesetter/MS
+pacesetting
+Pacheco/M
+pachyderm/MS
+pachysandra/MS
+pacific
+pacifically
+pacification/M
+Pacific/M
+pacifier/M
+pacifism/MS
+pacifistic
+pacifist/MS
+pacify/NRSDGXZ
+package/ARSDG
+packaged/U
+packager/S
+package's
+packages/U
+packaging/SM
+Packard/SM
+packed/AU
+packer/MUS
+packet/MSDG
+pack/GZSJDRMB
+packhorse/M
+packinghouse/S
+packing/M
+packsaddle/SM
+Packston/M
+packs/UA
+Packwood/M
+Paco/M
+Pacorro/M
+pact/SM
+Padang/M
+padded/U
+Paddie/M
+padding/SM
+paddle/MZGRSD
+paddler/M
+paddock/SDMG
+Paddy/M
+paddy/SM
+Padget/M
+Padgett/M
+Padilla/M
+padlock/SGDM
+pad/MS
+Padraic/M
+Padraig/M
+padre/MS
+Padrewski/M
+Padriac/M
+paean/MS
+paediatrician/MS
+paediatrics/M
+paedophilia's
+paella/SM
+paeony/M
+Paganini/M
+paganism/MS
+pagan/SM
+pageantry/SM
+pageant/SM
+pageboy/SM
+paged/U
+pageful
+Page/M
+page/MZGDRS
+pager/M
+paginate/DSNGX
+Paglia/M
+pagoda/MS
+Pahlavi/M
+paid/AU
+Paige/M
+pailful/SM
+Pail/M
+pail/SM
+Paine/M
+painfuller
+painfullest
+painfulness/MS
+painful/YP
+pain/GSDM
+painkiller/MS
+painkilling
+painlessness/S
+painless/YP
+painstaking/SY
+paint/ADRZGS
+paintbox/M
+paintbrush/SM
+painted/U
+painterly/P
+painter/YM
+painting/SM
+paint's
+paintwork
+paired/UA
+pair/JSDMG
+pairs/A
+pairwise
+paisley/MS
+pajama/MDS
+Pakistani/S
+Pakistan/M
+palace/MS
+paladin/MS
+palaeolithic
+palaeontologists
+palaeontology/M
+palanquin/MS
+palatability/M
+palatableness/M
+palatable/P
+palatalization/MS
+palatalize/SDG
+palatal/YS
+palate/BMS
+palatial/Y
+palatinate/SM
+Palatine
+palatine/S
+palaver/GSDM
+paleface/SM
+Palembang/M
+paleness/S
+Paleocene
+Paleogene
+paleographer/SM
+paleography/SM
+paleolithic
+Paleolithic
+paleontologist/S
+paleontology/MS
+Paleozoic
+Palermo/M
+pale/SPY
+Palestine/M
+Palestinian/S
+Palestrina/M
+palette/MS
+Paley/M
+palfrey/MS
+palimony/S
+palimpsest/MS
+palindrome/MS
+palindromic
+paling/M
+palisade/MGSD
+Palisades/M
+palish
+Palladio/M
+palladium/SM
+pallbearer/SM
+palletized
+pallet/SMGD
+pall/GSMD
+palliate/SDVNGX
+palliation/M
+palliative/SY
+pallidness/MS
+pallid/PY
+Pall/M
+pallor/MS
+palmate
+palmer/M
+Palmer/M
+Palmerston/M
+palmetto/MS
+palm/GSMDR
+palmist/MS
+palmistry/MS
+Palm/MR
+Palmolive/M
+palmtop/S
+Palmyra/M
+palmy/RT
+Palo/M
+Paloma/M
+Palomar/M
+palomino/MS
+palpable
+palpably
+palpate/SDNGX
+palpation/M
+palpitate/NGXSD
+palpitation/M
+pal/SJMDRYTG
+palsy/GSDM
+paltriness/SM
+paltry/TRP
+paludal
+Pa/M
+Pamela/M
+Pamelina/M
+Pamella/M
+pa/MH
+Pamirs
+Pam/M
+Pammie/M
+Pammi/M
+Pammy/M
+pampas/M
+pamperer/M
+pamper/RDSG
+Pampers
+pamphleteer/DMSG
+pamphlet/SM
+panacea/MS
+panache/MS
+Panama/MS
+Panamanian/S
+panama/S
+pancake/MGSD
+Panchito/M
+Pancho/M
+panchromatic
+pancreas/MS
+pancreatic
+panda/SM
+pandemic/S
+pandemonium/SM
+pander/ZGRDS
+Pandora/M
+panegyric/SM
+pane/KMS
+paneling/M
+panelist/MS
+panelization
+panelized
+panel/JSGDM
+Pangaea/M
+pang/GDMS
+pangolin/M
+panhandle/RSDGMZ
+panicked
+panicking
+panicky/RT
+panic/SM
+panier's
+panjandrum/M
+Pankhurst/M
+Pan/M
+Panmunjom/M
+panned
+pannier/SM
+panning
+panoply/MSD
+panorama/MS
+panoramic
+panpipes
+Pansie/M
+pan/SMD
+Pansy/M
+pansy/SM
+Pantagruel/M
+Pantaloon/M
+pantaloons
+pant/GDS
+pantheism/MS
+pantheistic
+pantheist/S
+pantheon/MS
+panther/SM
+pantie/SM
+pantiled
+pantograph/M
+pantomime/SDGM
+pantomimic
+pantomimist/SM
+pantry/SM
+pantsuit/SM
+pantyhose
+pantyliner
+pantywaist/SM
+Panza/M
+Paola/M
+Paoli/M
+Paolina/M
+Paolo/M
+papacy/SM
+Papagena/M
+Papageno/M
+papal/Y
+papa/MS
+paparazzi
+papaw/SM
+papaya/MS
+paperback/GDMS
+paperboard/MS
+paperboy/SM
+paperer/M
+papergirl/SM
+paper/GJMRDZ
+paperhanger/SM
+paperhanging/SM
+paperiness/M
+paperless
+paperweight/MS
+paperwork/SM
+papery/P
+papillae
+papilla/M
+papillary
+papist/MS
+papoose/SM
+Pappas/M
+papped
+papping
+pappy/RST
+paprika/MS
+pap/SZMNR
+papyri
+papyrus/M
+Paquito/M
+parable/MGSD
+parabola/MS
+parabolic
+paraboloidal/M
+paraboloid/MS
+Paracelsus/M
+paracetamol/M
+parachuter/M
+parachute/RSDMG
+parachutist/MS
+Paraclete/M
+parader/M
+parade/RSDMZG
+paradigmatic
+paradigm/SM
+paradisaic
+paradisaical
+Paradise/M
+paradise/MS
+paradoxic
+paradoxicalness/M
+paradoxical/YP
+paradox/MS
+paraffin/GSMD
+paragon/SGDM
+paragrapher/M
+paragraph/MRDG
+paragraphs
+Paraguayan/S
+Paraguay/M
+parakeet/MS
+paralegal/S
+paralinguistic
+parallax/SM
+parallel/DSG
+paralleled/U
+parallelepiped/MS
+parallelism/SM
+parallelization/MS
+parallelize/ZGDSR
+parallelogram/MS
+paralysis/M
+paralytically
+paralytic/S
+paralyzedly/S
+paralyzed/Y
+paralyzer/M
+paralyze/ZGDRS
+paralyzingly/S
+paralyzing/Y
+paramagnetic
+paramagnet/M
+Paramaribo/M
+paramecia
+paramecium/M
+paramedical/S
+paramedic/MS
+parameterization/SM
+parameterize/BSDG
+parameterized/U
+parameterless
+parameter/SM
+parametric
+parametrically
+parametrization
+parametrize/DS
+paramilitary/S
+paramount/S
+paramour/MS
+para/MS
+Paramus/M
+Paraná
+paranoiac/S
+paranoia/SM
+paranoid/S
+paranormal/SY
+parapet/SMD
+paraphernalia
+paraphrase/GMSRD
+paraphraser/M
+paraplegia/MS
+paraplegic/S
+paraprofessional/SM
+parapsychologist/S
+parapsychology/MS
+paraquat/S
+parasite/SM
+parasitically
+parasitic/S
+parasitism/SM
+parasitologist/M
+parasitology/M
+parasol/SM
+parasympathetic/S
+parathion/SM
+parathyroid/S
+paratrooper/M
+paratroop/RSZ
+paratyphoid/S
+parboil/DSG
+parceled/U
+parceling/M
+parcel/SGMD
+Parcheesi/M
+parch/GSDL
+parchment/SM
+PARC/M
+pardonableness/M
+pardonable/U
+pardonably/U
+pardoner/M
+pardon/ZBGRDS
+paregoric/SM
+parentage/MS
+parental/Y
+parenteral
+parentheses
+parenthesis/M
+parenthesize/GSD
+parenthetic
+parenthetical/Y
+parenthood/MS
+parent/MDGJS
+pare/S
+paresis/M
+pares/S
+Pareto/M
+parfait/SM
+pariah/M
+pariahs
+parietal/S
+parimutuel/S
+paring/M
+parishioner/SM
+parish/MS
+Parisian/SM
+Paris/M
+parity/ESM
+parka/MS
+Parke/M
+Parker/M
+Parkersburg/M
+park/GJZDRMS
+Parkhouse/M
+parking/M
+Parkinson/M
+parkish
+parkland/M
+parklike
+Parkman
+Park/RMS
+parkway/MS
+parlance/SM
+parlay/DGS
+parley/MDSG
+parliamentarian/SM
+parliamentary/U
+parliament/MS
+Parliament/MS
+parlor/SM
+parlous
+Parmesan/S
+parmigiana
+Parnassus/SM
+Parnell/M
+parochialism/SM
+parochiality
+parochial/Y
+parodied/U
+parodist/SM
+parody/SDGM
+parolee/MS
+parole/MSDG
+paroxysmal
+paroxysm/MS
+parquetry/SM
+parquet/SMDG
+parrakeet's
+parred
+parricidal
+parricide/MS
+parring
+Parrish/M
+Parr/M
+Parrnell/M
+parrot/GMDS
+parrotlike
+parry/GSD
+Parry/M
+parse
+parsec/SM
+parsed/U
+Parsee's
+parser/M
+Parsifal/M
+parsimonious/Y
+parsimony/SM
+pars/JDSRGZ
+parsley/MS
+parsnip/MS
+parsonage/MS
+parson/MS
+Parsons/M
+partaken
+partaker/M
+partake/ZGSR
+part/CDGS
+parterre/MS
+parter/S
+parthenogeneses
+parthenogenesis/M
+Parthenon/M
+Parthia/M
+partiality/MS
+partial/SY
+participant/MS
+participate/NGVDSX
+participation/M
+participator/S
+participatory
+participial/Y
+participle/MS
+particleboard/S
+particle/MS
+particolored
+particularistic
+particularity/SM
+particularization/MS
+particularize/GSD
+particular/SY
+particulate/S
+parting/MS
+partisanship/SM
+partisan/SM
+partition/AMRDGS
+partitioned/U
+partitioner/M
+partitive/S
+partizan's
+partly
+partner/DMGS
+partnership/SM
+partook
+partridge/MS
+part's
+parturition/SM
+partway
+party/RSDMG
+parvenu/SM
+par/ZGSJBMDR
+Pasadena/M
+PASCAL
+Pascale/M
+Pascal/M
+pascal/SM
+paschal/S
+pasha/MS
+Paso/M
+Pasquale/M
+pas/S
+passably
+passage/MGSD
+passageway/MS
+Passaic/M
+passband
+passbook/MS
+passel/MS
+passé/M
+passenger/MYS
+passerby
+passer/M
+passersby
+passim
+passing/Y
+passionated
+passionate/EYP
+passionateness/EM
+passionates
+passionating
+passioned
+passionflower/MS
+passioning
+passionless
+passion/SEM
+Passion/SM
+passivated
+passiveness/S
+passive/SYP
+passivity/S
+pass/JGVBZDSR
+passkey/SM
+passmark
+passover
+Passover/MS
+passport/SM
+password/SDM
+pasta/MS
+pasteboard/SM
+pasted/UA
+pastel/MS
+paste/MS
+Pasternak/M
+pastern/SM
+pasteup
+pasteurization/MS
+pasteurized/U
+pasteurizer/M
+pasteurize/RSDGZ
+Pasteur/M
+pastiche/MS
+pastille/SM
+pastime/SM
+pastiness/SM
+pastoralization/M
+pastoral/SPY
+pastorate/MS
+pastor/GSDM
+past/PGMDRS
+pastrami/MS
+pastry/SM
+past's/A
+pasts/A
+pasturage/SM
+pasture/MGSRD
+pasturer/M
+pasty/PTRS
+Patagonia/M
+Patagonian/S
+patch/EGRSD
+patcher/EM
+patchily
+patchiness/S
+patch's
+patchwork/RMSZ
+patchy/PRT
+patellae
+patella/MS
+Patel/M
+Pate/M
+paten/M
+Paten/M
+patentee/SM
+patent/ZGMRDYSB
+paterfamilias/SM
+pater/M
+paternalism/MS
+paternalist
+paternalistic
+paternal/Y
+paternity/SM
+paternoster/SM
+Paterson/M
+pate/SM
+pathetic
+pathetically
+pathfinder/MS
+pathless/P
+path/M
+pathname/SM
+pathogenesis/M
+pathogenic
+pathogen/SM
+pathologic
+pathological/Y
+pathologist/MS
+pathology/SM
+pathos/SM
+paths
+pathway/MS
+Patience/M
+patience/SM
+patient/MRYTS
+patient's/I
+patients/I
+patina/SM
+patine
+Patin/M
+patio/MS
+Pat/MN
+pat/MNDRS
+Patna/M
+patois/M
+Paton/M
+patresfamilias
+patriarchal
+patriarchate/MS
+patriarch/M
+patriarchs
+patriarchy/MS
+Patrica/M
+Patrice/M
+Patricia/M
+patrician/MS
+patricide/MS
+Patricio/M
+Patrick/M
+Patric/M
+patrimonial
+patrimony/SM
+patriotically
+patriotic/U
+patriotism/SM
+patriot/SM
+patristic/S
+Patrizia/M
+Patrizio/M
+Patrizius/M
+patrolled
+patrolling
+patrolman/M
+patrolmen
+patrol/MS
+patrolwoman
+patrolwomen
+patronage/MS
+patroness/S
+patronization
+patronized/U
+patronize/GZRSDJ
+patronizer/M
+patronizes/A
+patronizing's/U
+patronizing/YM
+patronymically
+patronymic/S
+patron/YMS
+patroon/MS
+patsy/SM
+Patsy/SM
+patted
+Patten/M
+patten/MS
+patterer/M
+pattern/GSDM
+patternless
+patter/RDSGJ
+Patterson/M
+Pattie/M
+Patti/M
+patting
+Pattin/M
+Patton/M
+Patty/M
+patty/SM
+paucity/SM
+Paula/M
+Paule/M
+Pauletta/M
+Paulette/M
+Paulie/M
+Pauli/M
+Paulina/M
+Pauline
+Pauling/M
+Paulita/M
+Paul/MG
+Paulo/M
+Paulsen/M
+Paulson/M
+Paulus/M
+Pauly/M
+paunch/GMSD
+paunchiness/M
+paunchy/RTP
+pauperism/SM
+pauperize/SDG
+pauper/SGDM
+pause/DSG
+Pavarotti
+paved/UA
+pave/GDRSJL
+Pavel/M
+pavement/SGDM
+paver/M
+paves/A
+Pavia/M
+pavilion/SMDG
+paving/A
+paving's
+Pavla/M
+Pavlova/MS
+Pavlovian
+Pavlov/M
+pawl/SM
+paw/MDSG
+pawnbroker/SM
+pawnbroking/S
+Pawnee/SM
+pawner/M
+pawn/GSDRM
+pawnshop/MS
+pawpaw's
+Pawtucket/M
+paxes
+Paxon/M
+Paxton/M
+payable/S
+pay/AGSLB
+payback/S
+paycheck/SM
+payday/MS
+payed
+payee/SM
+payer/SM
+payload/SM
+paymaster/SM
+payment/ASM
+Payne/SM
+payoff/MS
+payola/MS
+payout/S
+payroll/MS
+payslip/S
+Payson/M
+Payton/M
+Paz/M
+Pb/M
+PBS
+PBX
+PCB
+PC/M
+PCP
+PCs
+pct
+pd
+PD
+Pd/M
+PDP
+PDQ
+PDT
+PE
+Peabody/M
+peaceableness/M
+peaceable/P
+peaceably
+peacefuller
+peacefullest
+peacefulness/S
+peaceful/PY
+peace/GMDS
+peacekeeping/S
+Peace/M
+peacemaker/MS
+peacemaking/MS
+peacetime/MS
+peach/GSDM
+Peachtree/M
+peachy/RT
+peacock/SGMD
+Peadar/M
+peafowl/SM
+peahen/MS
+peaked/P
+peakiness/M
+peak/SGDM
+peaky/P
+pealed/A
+Peale/M
+peal/MDSG
+peals/A
+pea/MS
+peanut/SM
+Pearce/M
+Pearla/M
+Pearle/M
+pearler/M
+Pearlie/M
+Pearline/M
+Pearl/M
+pearl/SGRDM
+pearly/TRS
+Pearson/M
+pear/SYM
+peartrees
+Peary/M
+peasanthood
+peasantry/SM
+peasant/SM
+peashooter/MS
+peats/A
+peat/SM
+peaty/TR
+pebble/MGSD
+pebbling/M
+pebbly/TR
+Pebrook/M
+pecan/SM
+peccadilloes
+peccadillo/M
+peccary/MS
+Pechora/M
+pecker/M
+peck/GZSDRM
+Peckinpah/M
+Peck/M
+Pecos/M
+pectic
+pectin/SM
+pectoral/S
+peculate/NGDSX
+peculator/S
+peculiarity/MS
+peculiar/SY
+pecuniary
+pedagogical/Y
+pedagogic/S
+pedagogics/M
+pedagogue/SDGM
+pedagogy/MS
+pedal/SGRDM
+pedantic
+pedantically
+pedantry/MS
+pedant/SM
+peddler/M
+peddle/ZGRSD
+pederast/SM
+pederasty/SM
+Peder/M
+pedestal/GDMS
+pedestrianization
+pedestrianize/GSD
+pedestrian/MS
+pediatrician/SM
+pediatric/S
+pedicab/SM
+pedicure/DSMG
+pedicurist/SM
+pedigree/DSM
+pediment/DMS
+pedlar's
+pedometer/MS
+pedophile/S
+pedophilia
+Pedro/M
+peduncle/MS
+peeing
+peekaboo/SM
+peek/GSD
+peeler/M
+peeling/M
+Peel/M
+peel/SJGZDR
+peen/GSDM
+peeper/M
+peephole/SM
+peep/SGZDR
+peepshow/MS
+peepy
+peerage/MS
+peer/DMG
+peeress/MS
+peerlessness/M
+peerless/PY
+peeve/GZMDS
+peevers/M
+peevishness/SM
+peevish/YP
+peewee/S
+pee/ZDRS
+Pegasus/MS
+pegboard/SM
+Pegeen/M
+pegged
+Peggie/M
+Peggi/M
+pegging
+Peggy/M
+Peg/M
+peg/MS
+peignoir/SM
+Pei/M
+Peiping/M
+Peirce/M
+pejoration/SM
+pejorative/SY
+peke/MS
+Pekinese's
+pekingese
+Pekingese/SM
+Peking/SM
+pekoe/SM
+pelagic
+Pelee/M
+Pele/M
+pelf/SM
+Pelham/M
+pelican/SM
+pellagra/SM
+pellet/SGMD
+pellucid
+Peloponnese/M
+pelter/M
+pelt/GSDR
+pelvic/S
+pelvis/SM
+Pembroke/M
+pemmican/SM
+penalization/SM
+penalized/U
+penalize/SDG
+penalty/MS
+penal/Y
+Pena/M
+penance/SDMG
+pence/M
+penchant/MS
+pencil/SGJMD
+pendant/SM
+pend/DCGS
+pendent/CS
+Penderecki/M
+Pendleton/M
+pendulous
+pendulum/MS
+Penelopa/M
+Penelope/M
+penetrability/SM
+penetrable
+penetrate/SDVGNX
+penetrating/Y
+penetration/M
+penetrativeness/M
+penetrative/PY
+penetrator/MS
+penguin/MS
+penicillin/SM
+penile
+peninsular
+peninsula/SM
+penis/MS
+penitence/MS
+penitential/YS
+penitentiary/MS
+penitent/SY
+penknife/M
+penknives
+penlight/MS
+pen/M
+Pen/M
+penman/M
+penmanship/MS
+penmen
+Penna
+pennant/SM
+penned
+Penney/M
+Pennie/M
+penniless
+Penni/M
+penning
+Pennington/M
+pennis
+Penn/M
+pennon/SM
+Pennsylvania/M
+Pennsylvanian/S
+Penny/M
+penny/SM
+pennyweight/SM
+pennyworth/M
+penologist/MS
+penology/MS
+Penrod/M
+Pensacola/M
+pensioner/M
+pension/ZGMRDBS
+pensiveness/S
+pensive/PY
+pens/V
+pentacle/MS
+pentagonal/SY
+Pentagon/M
+pentagon/SM
+pentagram/MS
+pentameter/SM
+pent/AS
+Pentateuch/M
+pentathlete/S
+pentathlon/MS
+pentatonic
+pentecostal
+Pentecostalism/S
+Pentecostal/S
+Pentecost/SM
+penthouse/SDGM
+Pentium/M
+penuche/SM
+penultimate/SY
+penumbrae
+penumbra/MS
+penuriousness/MS
+penurious/YP
+penury/SM
+peonage/MS
+peon/MS
+peony/SM
+people/SDMG
+Peoria/M
+Pepe/M
+Pepillo/M
+Pepi/M
+Pepin/M
+Pepita/M
+Pepito/M
+pepped
+peppercorn/MS
+pepperer/M
+peppergrass/M
+peppermint/MS
+pepperoni/S
+pepper/SGRDM
+peppery
+peppiness/SM
+pepping
+peppy/PRT
+Pepsico/M
+PepsiCo/M
+Pepsi/M
+pepsin/SM
+pep/SM
+peptic/S
+peptidase/SM
+peptide/SM
+peptizing
+Pepys/M
+Pequot/M
+peradventure/S
+perambulate/DSNGX
+perambulation/M
+perambulator/MS
+percale/MS
+perceivably
+perceive/DRSZGB
+perceived/U
+perceiver/M
+percentage/MS
+percentile/SM
+percent/MS
+perceptible
+perceptibly
+perceptional
+perception/MS
+perceptiveness/MS
+perceptive/YP
+perceptual/Y
+percept/VMS
+Perceval/M
+perchance
+perch/GSDM
+perchlorate/M
+perchlorination
+percipience/MS
+percipient/S
+Percival/M
+percolate/NGSDX
+percolation/M
+percolator/MS
+percuss/DSGV
+percussionist/MS
+percussion/SAM
+percussiveness/M
+percussive/PY
+percutaneous/Y
+Percy/M
+perdition/MS
+perdurable
+peregrinate/XSDNG
+peregrination/M
+peregrine/S
+Perelman/M
+peremptorily
+peremptory/P
+perennial/SY
+pères
+perestroika/S
+Perez/M
+perfecta/S
+perfect/DRYSTGVP
+perfecter/M
+perfectibility/MS
+perfectible
+perfectionism/MS
+perfectionist/MS
+perfection/MS
+perfectiveness/M
+perfective/PY
+perfectness/MS
+perfidiousness/M
+perfidious/YP
+perfidy/MS
+perforated/U
+perforate/XSDGN
+perforation/M
+perforce
+performance/MS
+performed/U
+performer/M
+perform/SDRZGB
+perfumer/M
+perfumery/SM
+perfume/ZMGSRD
+perfunctorily
+perfunctoriness/M
+perfunctory/P
+perfused
+perfusion/M
+Pergamon/M
+pergola/SM
+perhaps/S
+Peria/M
+pericardia
+pericardium/M
+Perice/M
+Periclean
+Pericles/M
+perigee/SM
+perihelia
+perihelion/M
+peril/GSDM
+Perilla/M
+perilousness/M
+perilous/PY
+Peri/M
+perimeter/MS
+perinatal
+perinea
+perineum/M
+periodic
+periodical/YMS
+periodicity/MS
+period/MS
+periodontal/Y
+periodontics/M
+periodontist/S
+peripatetic/S
+peripheral/SY
+periphery/SM
+periphrases
+periphrasis/M
+periphrastic
+periscope/SDMG
+perishable/SM
+perish/BZGSRD
+perishing/Y
+peristalses
+peristalsis/M
+peristaltic
+peristyle/MS
+peritoneal
+peritoneum/SM
+peritonitis/MS
+periwigged
+periwigging
+periwig/MS
+periwinkle/SM
+perjurer/M
+perjure/SRDZG
+perjury/MS
+per/K
+perk/GDS
+perkily
+perkiness/S
+Perkin/SM
+perky/TRP
+Perla/M
+Perle/M
+Perl/M
+permafrost/MS
+permalloy/M
+Permalloy/M
+permanence/SM
+permanency/MS
+permanentness/M
+permanent/YSP
+permeability/SM
+permeableness/M
+permeable/P
+permeate/NGVDSX
+Permian
+permissibility/M
+permissibleness/M
+permissible/P
+permissibly
+permission/SM
+permissiveness/MS
+permissive/YP
+permit/SM
+permitted
+permitting
+Perm/M
+perm/MDGS
+permutation/MS
+permute/SDG
+Pernell/M
+perniciousness/MS
+pernicious/PY
+Pernod/M
+Peron/M
+peroration/SM
+Perot/M
+peroxidase/M
+peroxide/MGDS
+perpend/DG
+perpendicularity/SM
+perpendicular/SY
+perpetrate/NGXSD
+perpetration/M
+perpetrator/SM
+perpetual/SY
+perpetuate/NGSDX
+perpetuation/M
+perpetuity/MS
+perplex/DSG
+perplexed/Y
+perplexity/MS
+perquisite/SM
+Perren/M
+Perri/M
+Perrine/M
+Perry/MR
+persecute/XVNGSD
+persecution/M
+persecutor/MS
+persecutory
+Perseid/M
+Persephone/M
+Perseus/M
+perseverance/MS
+persevere/GSD
+persevering/Y
+Pershing/M
+Persia/M
+Persian/S
+persiflage/MS
+persimmon/SM
+Persis/M
+persist/DRSG
+persistence/SM
+persistent/Y
+persnickety
+personableness/M
+personable/P
+personae
+personage/SM
+personality/SM
+personalization/CMS
+personalize/CSDG
+personalized/U
+personalty/MS
+personal/YS
+persona/M
+person/BMS
+personification/M
+personifier/M
+personify/XNGDRS
+personnel/SM
+person's/U
+persons/U
+perspective/YMS
+perspex
+perspicaciousness/M
+perspicacious/PY
+perspicacity/S
+perspicuity/SM
+perspicuousness/M
+perspicuous/YP
+perspiration/MS
+perspire/DSG
+persuaded/U
+persuader/M
+persuade/ZGDRSB
+persuasion/SM
+persuasively
+persuasiveness/MS
+persuasive/U
+pertain/GSD
+Perth/M
+pertinaciousness/M
+pertinacious/YP
+pertinacity/MS
+pertinence/S
+pertinent/YS
+pertness/MS
+perturbation/MS
+perturbed/U
+perturb/GDS
+pertussis/SM
+pert/YRTSP
+peruke/SM
+Peru/M
+perusal/SM
+peruser/M
+peruse/RSDZG
+Peruvian/S
+pervade/SDG
+pervasion/M
+pervasiveness/MS
+pervasive/PY
+perverseness/SM
+perverse/PXYNV
+perversion/M
+perversity/MS
+pervert/DRSG
+perverted/YP
+perverter/M
+perviousness
+peseta/SM
+Peshawar/M
+peskily
+peskiness/S
+pesky/RTP
+peso/MS
+pessimal/Y
+pessimism/SM
+pessimistic
+pessimistically
+pessimist/SM
+pester/DG
+pesticide/MS
+pestiferous
+pestilence/SM
+pestilential/Y
+pestilent/Y
+pestle/SDMG
+pesto/S
+pest/RZSM
+PET
+Pétain/M
+petal/SDM
+Peta/M
+petard/MS
+petcock/SM
+Pete/M
+peter/GD
+Peter/M
+Petersburg/M
+Petersen/M
+Peters/N
+Peterson/M
+Peterus/M
+Petey/M
+pethidine/M
+petiole/SM
+petiteness/M
+petite/XNPS
+petitioner/M
+petition/GZMRD
+petition's/A
+petitions/A
+petits
+Petkiewicz/M
+Pet/MRZ
+Petra/M
+Petrarch/M
+petrel/SM
+petri
+petrifaction/SM
+petrify/NDSG
+Petrina/M
+Petr/M
+petrochemical/SM
+petrodollar/MS
+petroglyph/M
+petrolatum/MS
+petroleum/MS
+petrolled
+petrolling
+petrol/MS
+petrologist/MS
+petrology/MS
+Petronella/M
+Petronia/M
+Petronilla/M
+Petronille/M
+pet/SMRZ
+petted
+petter/MS
+Pettibone/M
+petticoat/SMD
+pettifogged
+pettifogger/SM
+pettifogging
+pettifog/S
+pettily
+pettiness/S
+petting
+pettis
+pettishness/M
+pettish/YP
+Petty/M
+petty/PRST
+petulance/MS
+petulant/Y
+Petunia/M
+petunia/SM
+Peugeot/M
+Pewaukee/M
+pewee/MS
+pewit/MS
+pew/SM
+pewter/SRM
+peyote/SM
+Peyter/M
+Peyton/M
+pf
+Pfc
+PFC
+pfennig/SM
+Pfizer/M
+pg
+PG
+Phaedra/M
+Phaethon/M
+phaeton/MS
+phage/M
+phagocyte/SM
+Phaidra/M
+phalanger/MS
+phalanges
+phalanx/SM
+phalli
+phallic
+phallus/M
+Phanerozoic
+phantasmagoria/SM
+phantasmal
+phantasm/SM
+phantasy's
+phantom/MS
+pharaoh
+Pharaoh/M
+pharaohs
+Pharaohs
+pharisaic
+Pharisaic
+Pharisaical
+pharisee/S
+Pharisee/SM
+pharmaceutical/SY
+pharmaceutic/S
+pharmaceutics/M
+pharmacist/SM
+pharmacological/Y
+pharmacologist/SM
+pharmacology/SM
+pharmacopoeia/SM
+pharmacy/SM
+pharyngeal/S
+pharynges
+pharyngitides
+pharyngitis/M
+pharynx/M
+phase/DSRGZM
+phaseout/S
+PhD
+pheasant/SM
+Phebe/M
+Phedra/M
+Phekda/M
+Phelia/M
+Phelps/M
+phenacetin/MS
+phenobarbital/SM
+phenolic
+phenol/MS
+phenolphthalein/M
+phenomenal/Y
+phenomena/SM
+phenomenological/Y
+phenomenology/MS
+phenomenon/SM
+phenotype/MS
+phenylalanine/M
+phenyl/M
+pheromone/MS
+phew/S
+phialled
+phialling
+phial/MS
+Phidias/M
+Philadelphia/M
+philanderer/M
+philander/SRDGZ
+philanthropic
+philanthropically
+philanthropist/MS
+philanthropy/SM
+philatelic
+philatelist/MS
+philately/SM
+Philbert/M
+Philco/M
+philharmonic/S
+Philipa/M
+Philip/M
+Philippa/M
+Philippe/M
+Philippians/M
+philippic/SM
+Philippine/SM
+Philis/M
+philistine/S
+Philistine/SM
+philistinism/S
+Phillida/M
+Phillie/M
+Phillipa/M
+Phillipe/M
+Phillip/MS
+Phillipp/M
+Phillis/M
+Philly/SM
+Phil/MY
+philodendron/MS
+philological/Y
+philologist/MS
+philology/MS
+Philomena/M
+philosopher/MS
+philosophic
+philosophical/Y
+philosophized/U
+philosophizer/M
+philosophizes/U
+philosophize/ZDRSG
+philosophy/MS
+philter/SGDM
+philtre/DSMG
+Phineas/M
+Phip/M
+Phipps/M
+phi/SM
+phlebitides
+phlebitis/M
+phlegmatic
+phlegmatically
+phlegm/SM
+phloem/MS
+phlox/M
+pH/M
+Ph/M
+phobia/SM
+phobic/S
+Phobos/M
+Phoebe/M
+phoebe/SM
+Phoenicia/M
+Phoenician/SM
+Phoenix/M
+phoenix/MS
+phone/DSGM
+phoneme/SM
+phonemically
+phonemic/S
+phonemics/M
+phonetically
+phonetician/SM
+phonetic/S
+phonetics/M
+phonically
+phonic/S
+phonics/M
+phoniness/MS
+phonographer/M
+phonographic
+phonograph/RM
+phonographs
+phonologic
+phonological/Y
+phonologist/MS
+phonology/MS
+phonon/M
+phony/PTRSDG
+phooey/S
+phosphatase/M
+phosphate/MS
+phosphide/M
+phosphine/MS
+phosphoresce
+phosphorescence/SM
+phosphorescent/Y
+phosphoric
+phosphor/MS
+phosphorous
+phosphorus/SM
+photocell/MS
+photochemical/Y
+photochemistry/M
+photocopier/M
+photocopy/MRSDZG
+photoelectric
+photoelectrically
+photoelectronic
+photoelectrons
+photoengraver/M
+photoengrave/RSDJZG
+photoengraving/M
+photofinishing/MS
+photogenic
+photogenically
+photograph/AGD
+photographer/SM
+photographic
+photographically
+photograph's
+photographs/A
+photography/MS
+photojournalism/SM
+photojournalist/SM
+photoluminescence/M
+photolysis/M
+photolytic
+photometer/SM
+photometric
+photometrically
+photometry/M
+photomicrograph/M
+photomicrography/M
+photomultiplier/M
+photon/MS
+photorealism
+photosensitive
+photo/SGMD
+photosphere/M
+photostatic
+Photostat/MS
+Photostatted
+Photostatting
+photosyntheses
+photosynthesis/M
+photosynthesize/DSG
+photosynthetic
+phototypesetter
+phototypesetting/M
+phrasal
+phrase/AGDS
+phrasebook
+phrasemaking
+phraseology/MS
+phrase's
+phrasing/SM
+phrenological/Y
+phrenologist/MS
+phrenology/MS
+phylactery/MS
+phylae
+phyla/M
+Phylis/M
+Phyllida/M
+Phyllis/M
+Phyllys/M
+phylogeny/MS
+phylum/M
+Phylys/M
+phys
+physicality/M
+physical/PYS
+physician/SM
+physicist/MS
+physicked
+physicking
+physic/SM
+physiochemical
+physiognomy/SM
+physiography/MS
+physiologic
+physiological/Y
+physiologist/SM
+physiology/MS
+physiotherapist/MS
+physiotherapy/SM
+physique/MSD
+phytoplankton/M
+Piaf/M
+Piaget/M
+Pia/M
+pianism/M
+pianissimo/S
+pianistic
+pianist/SM
+pianoforte/MS
+pianola
+Pianola/M
+piano/SM
+piaster/MS
+piazza/SM
+pibroch/M
+pibrochs
+picador/MS
+picaresque/S
+pica/SM
+Picasso/M
+picayune/S
+Piccadilly/M
+piccalilli/MS
+piccolo/MS
+pickaback's
+pickaxe's
+pickax/GMSD
+pickerel/MS
+Pickering/M
+picker/MG
+picketer/M
+picket/MSRDZG
+Pickett/M
+Pickford/M
+pick/GZSJDR
+pickle/SDMG
+Pickman/M
+pickoff/S
+pickpocket/GSM
+pickup/SM
+Pickwick/M
+picky/RT
+picnicked
+picnicker/MS
+picnicking
+picnic/SM
+picofarad/MS
+picojoule
+picoseconds
+picot/DMGS
+Pict/M
+pictograph/M
+pictographs
+pictorialness/M
+pictorial/PYS
+picture/MGSD
+picturesqueness/SM
+picturesque/PY
+piddle/GSD
+piddly
+pidgin/SM
+piebald/S
+piece/GMDSR
+piecemeal
+piecer/M
+piecewise
+pieceworker/M
+piecework/ZSMR
+piedmont
+Piedmont/M
+pieing
+pie/MS
+Pierce/M
+piercer/M
+pierce/RSDZGJ
+piercing/Y
+Pierette/M
+pier/M
+Pier/M
+Pierre/M
+Pierrette/M
+Pierrot/M
+Pierson/M
+Pieter/M
+Pietra/M
+Pietrek/M
+Pietro/M
+piety/SM
+piezoelectric
+piezoelectricity/M
+piffle/MGSD
+pigeon/DMGS
+pigeonhole/SDGM
+pigged
+piggery/M
+pigging
+piggishness/SM
+piggish/YP
+piggyback/MSDG
+Piggy/M
+piggy/RSMT
+pigheadedness/S
+pigheaded/YP
+piglet/MS
+pigmentation/MS
+pigment/MDSG
+pig/MLS
+Pigmy's
+pigpen/SM
+pigroot
+pigskin/MS
+pigsty/SM
+pigswill/M
+pigtail/SMD
+Pike/M
+pike/MZGDRS
+piker/M
+pikestaff/MS
+pilaf/MS
+pilaster/SM
+Pilate/M
+pilau's
+pilchard/SM
+Pilcomayo/M
+pile/JDSMZG
+pileup/MS
+pilferage/SM
+pilferer/M
+pilfer/ZGSRD
+Pilgrim
+pilgrimage/DSGM
+pilgrim/MS
+piling/M
+pillage/RSDZG
+pillar/DMSG
+pillbox/MS
+pill/GSMD
+pillion/DMGS
+pillory/MSDG
+pillowcase/SM
+pillow/GDMS
+pillowslip/S
+Pillsbury/M
+pilot/DMGS
+pilothouse/SM
+piloting/M
+pimento/MS
+pimiento/SM
+pimpernel/SM
+pimp/GSMYD
+pimple/SDM
+pimplike
+pimply/TRM
+PIN
+pinafore/MS
+piñata/S
+Pinatubo/M
+pinball/MS
+Pincas/M
+pincer/GSD
+Pinchas/M
+pincher/M
+pinch/GRSD
+pincushion/SM
+Pincus/M
+Pindar/M
+pineapple/MS
+pined/A
+Pinehurst/M
+pine/MNGXDS
+pines/A
+pinfeather/SM
+ping/GDRM
+pinheaded/P
+pinhead/SMD
+pinhole/SM
+pining/A
+pinion/DMG
+Pinkerton/M
+pinkeye/MS
+pink/GTYDRMPS
+pinkie/SM
+pinkish/P
+pinkness/S
+pinko/MS
+pinky's
+pinnacle/MGSD
+pinnate
+pinned/U
+pinning/S
+Pinocchio/M
+Pinochet/M
+pinochle/SM
+piñon/S
+pinpoint/SDG
+pinprick/MDSG
+pin's
+pinsetter/SM
+Pinsky/M
+pinstripe/SDM
+pintail/SM
+Pinter/M
+pint/MRS
+pinto/S
+pinup/MS
+pin/US
+pinwheel/DMGS
+pinyin
+Pinyin
+piny/RT
+pioneer/SDMG
+pion/M
+Piotr/M
+piousness/MS
+pious/YP
+pipeline/DSMG
+pipe/MS
+piper/M
+Piper/M
+Pipestone/M
+pipet's
+pipette/MGSD
+pipework
+piping/YM
+pipit/MS
+pip/JSZMGDR
+Pip/MR
+Pippa/M
+pipped
+pipping
+pippin/SM
+Pippo/M
+Pippy/M
+pipsqueak/SM
+piquancy/MS
+piquantness/M
+piquant/PY
+pique/GMDS
+piracy/MS
+Piraeus/M
+Pirandello/M
+piranha/SM
+pirate/MGSD
+piratical/Y
+pirogi
+pirogies
+pirouette/MGSD
+pis
+Pisa/M
+piscatorial
+Pisces/M
+Pisistratus/M
+pismire/SM
+Pissaro/M
+piss/DSRG!
+pistachio/MS
+piste/SM
+pistillate
+pistil/MS
+pistoleers
+pistole/M
+pistol/SMGD
+piston/SM
+pitapat/S
+pitapatted
+pitapatting
+pita/SM
+Pitcairn/M
+pitchblende/SM
+pitcher/M
+pitchfork/GDMS
+pitching/M
+pitchman/M
+pitchmen
+pitch/RSDZG
+pitchstone/M
+piteousness/SM
+piteous/YP
+pitfall/SM
+pithily
+pithiness/SM
+pith/MGDS
+piths
+pithy/RTP
+pitiableness/M
+pitiable/P
+pitiably
+pitier/M
+pitifuller
+pitifullest
+pitifulness/M
+pitiful/PY
+pitilessness/SM
+pitiless/PY
+pitman/M
+pit/MS
+Pitney/M
+piton/SM
+pittance/SM
+pitted
+pitting
+Pittman/M
+Pittsburgh/ZM
+Pittsfield/M
+Pitt/SM
+Pittston/M
+pituitary/SM
+pitying/Y
+pity/ZDSRMG
+Pius/M
+pivotal/Y
+pivot/DMSG
+pivoting/M
+pix/DSG
+pixel/SM
+pixie/MS
+pixiness
+pixmap/SM
+Pizarro/M
+pizazz/S
+pi/ZGDRH
+pizza/SM
+pizzeria/SM
+pizzicati
+pizzicato
+pj's
+PJ's
+pk
+pkg
+pkt
+pkwy
+Pkwy
+pl
+placard/DSMG
+placate/NGVXDRS
+placatory
+placeable/A
+placebo/SM
+placed/EAU
+place/DSRJLGZM
+placeholder/S
+placekick/DGS
+placeless/Y
+placement/AMES
+placental/S
+placenta/SM
+placer/EM
+places/EA
+placidity/SM
+placidness/M
+placid/PY
+placing/AE
+placket/SM
+plagiarism/MS
+plagiarist/MS
+plagiarize/GZDSR
+plagiary/SM
+plagued/U
+plague/MGRSD
+plaguer/M
+plaice/M
+plaid/DMSG
+plainclothes
+plainclothesman
+plainclothesmen
+Plainfield/M
+plainness/MS
+plainsman/M
+plainsmen
+plainsong/SM
+plainspoken
+plain/SPTGRDY
+plaintiff/MS
+plaintiveness/M
+plaintive/YP
+plaint/VMS
+Plainview/M
+plaiting/M
+plait/SRDMG
+planar
+planarity
+Planck/M
+plan/DRMSGZ
+planeload
+planer/M
+plane's
+plane/SCGD
+planetarium/MS
+planetary
+planetesimal/M
+planet/MS
+planetoid/SM
+plangency/S
+plangent
+planking/M
+plank/SJMDG
+plankton/MS
+planned/U
+planner/SM
+planning
+Plano
+planoconcave
+planoconvex
+Plantagenet/M
+plantain/MS
+plantar
+plantation/MS
+planter/MS
+planting/S
+plantlike
+plant's
+plant/SADG
+plaque/MS
+plash/GSDM
+plasma/MS
+plasmid/S
+plasm/M
+plasterboard/MS
+plasterer/M
+plastering/M
+plaster/MDRSZG
+plasterwork/M
+plastically
+plasticine
+Plasticine/M
+plasticity/SM
+plasticize/GDS
+plastic/MYS
+plateau/GDMS
+plateful/S
+platelet/SM
+platen/M
+plater/M
+plate/SM
+platform/SGDM
+Plath/M
+plating/M
+platinize/GSD
+platinum/MS
+platitude/SM
+platitudinous/Y
+plat/JDNRSGXZ
+Plato/M
+platonic
+Platonic
+Platonism/M
+Platonist
+platoon/MDSG
+platted
+Platte/M
+platter/MS
+Platteville/M
+platting
+platypus/MS
+platys
+platy/TR
+plaudit/MS
+plausibility/S
+plausible/P
+plausibly
+Plautus/M
+playability/U
+playable/U
+playacting/M
+playact/SJDG
+playback/MS
+playbill/SM
+Playboy/M
+playboy/SM
+play/DRSEBG
+played/A
+player's/E
+player/SM
+playfellow/S
+playfulness/MS
+playful/PY
+playgirl/SM
+playgoer/MS
+playground/MS
+playgroup/S
+playhouse/SM
+playing/S
+playmate/MS
+playoff/S
+playpen/SM
+playroom/SM
+plays/A
+Playtex/M
+plaything/MS
+playtime/SM
+playwright/SM
+playwriting/M
+plaza/SM
+pleader/MA
+pleading/MY
+plead/ZGJRDS
+pleasanter
+pleasantest
+pleasantness/SMU
+pleasantry/MS
+pleasant/UYP
+pleased/EU
+pleaser/M
+pleases/E
+please/Y
+pleasingness/M
+pleasing/YP
+plea/SM
+pleas/RSDJG
+pleasurableness/M
+pleasurable/P
+pleasurably
+pleasureful
+pleasure/MGBDS
+pleasure's/E
+pleasures/E
+pleater/M
+pleat/RDMGS
+plebeian/SY
+plebe/MS
+plebiscite/SM
+plectra
+plectrum/SM
+pledger/M
+pledge/RSDMG
+Pleiads
+Pleistocene
+plenary/S
+plenipotentiary/S
+plenitude/MS
+plenteousness/M
+plenteous/PY
+plentifulness/M
+plentiful/YP
+plenty/SM
+plenum/M
+pleonasm/MS
+plethora/SM
+pleurae
+pleural
+pleura/M
+pleurisy/SM
+Plexiglas/MS
+plexus/SM
+pliability/MS
+pliableness/M
+pliable/P
+pliancy/MS
+pliantness/M
+pliant/YP
+plication/MA
+plier/MA
+plight/GMDRS
+plimsolls
+plinker/M
+plink/GRDS
+plinth/M
+plinths
+Pliny/M
+Pliocene/S
+PLO
+plodded
+plodder/SM
+plodding/SY
+plod/S
+plopped
+plopping
+plop/SM
+plosive
+plot/SM
+plotted/A
+plotter/MDSG
+plotting
+plover/MS
+plowed/U
+plower/M
+plowman/M
+plowmen
+plow/SGZDRM
+plowshare/MS
+ploy's
+ploy/SCDG
+plucker/M
+pluckily
+pluckiness/SM
+pluck/SGRD
+plucky/TPR
+pluggable
+plugged/UA
+plugging/AU
+plughole
+plug's
+plug/US
+plumage/DSM
+plumbago/M
+plumbed/U
+plumber/M
+plumbing/M
+plumb/JSZGMRD
+plume/SM
+plummer
+plummest
+plummet/DSG
+plummy
+plumper/M
+plumpness/S
+plump/RDNYSTGP
+plum/SMDG
+plumy/TR
+plunder/GDRSZ
+plunger/M
+plunge/RSDZG
+plunker/M
+plunk/ZGSRD
+pluperfect/S
+pluralism/MS
+pluralistic
+pluralist/S
+plurality/SM
+pluralization/MS
+pluralize/GZRSD
+pluralizer/M
+plural/SY
+plushness/MS
+plush/RSYMTP
+plushy/RPT
+plus/S
+plussed
+plussing
+Plutarch/M
+plutocracy/MS
+plutocratic
+plutocrat/SM
+Pluto/M
+plutonium/SM
+pluvial/S
+ply/AZNGRSD
+Plymouth/M
+plywood/MS
+pm
+PM
+Pm/M
+PMS
+pneumatically
+pneumatic/S
+pneumatics/M
+pneumonia/MS
+PO
+poacher/M
+poach/ZGSRD
+Pocahontas/M
+pocketbook/SM
+pocketful/SM
+pocketing/M
+pocketknife/M
+pocketknives
+pocket/MSRDG
+pock/GDMS
+pockmark/MDSG
+Pocono/MS
+podded
+podding
+podge/ZR
+Podgorica/M
+podiatrist/MS
+podiatry/MS
+podium/MS
+pod/SM
+Podunk/M
+Poe/M
+poem/MS
+poesy/GSDM
+poetaster/MS
+poetess/MS
+poetically
+poeticalness
+poetical/U
+poetic/S
+poetics/M
+poet/MS
+poetry/SM
+pogo
+Pogo/M
+pogrom/GMDS
+poignancy/MS
+poignant/Y
+Poincaré/M
+poinciana/SM
+Poindexter/M
+poinsettia/SM
+pointblank
+pointedness/M
+pointed/PY
+pointer/M
+pointillism/SM
+pointillist/SM
+pointing/M
+pointlessness/SM
+pointless/YP
+point/RDMZGS
+pointy/TR
+poise/M
+pois/GDS
+poi/SM
+poisoner/M
+poisoning/M
+poisonous/PY
+poison/RDMZGSJ
+Poisson/M
+poke/DRSZG
+Pokemon/M
+pokerface/D
+poker/M
+poky/SRT
+Poland/M
+Polanski/M
+polarimeter/SM
+polarimetry
+polariscope/M
+Polaris/M
+polarity/MS
+polarization/CMS
+polarized/UC
+polarize/RSDZG
+polarizes/C
+polarizing/C
+polarogram/SM
+polarograph
+polarography/M
+Polaroid/SM
+polar/S
+polecat/SM
+polemical/Y
+polemicist/S
+polemic/S
+polemics/M
+pole/MS
+Pole/MS
+poler/M
+polestar/S
+poleward/S
+pol/GMDRS
+policeman/M
+policemen/M
+police/MSDG
+policewoman/M
+policewomen
+policyholder/MS
+policymaker/S
+policymaking
+policy/SM
+poliomyelitides
+poliomyelitis/M
+polio/SM
+Polish
+polished/U
+polisher/M
+polish/RSDZGJ
+polis/M
+Politburo/M
+politburo/S
+politeness/MS
+polite/PRTY
+politesse/SM
+politically
+political/U
+politician/MS
+politicization/S
+politicize/CSDG
+politicked
+politicking/SM
+politico/SM
+politic/S
+politics/M
+polity/MS
+polka/SDMG
+Polk/M
+pollack/SM
+Pollard/M
+polled/U
+pollen/GDM
+pollinate/XSDGN
+pollination/M
+pollinator/MS
+polliwog/SM
+poll/MDNRSGX
+pollock's
+Pollock/SM
+pollster/MS
+pollutant/MS
+polluted/U
+polluter/M
+pollute/RSDXZVNG
+pollution/M
+Pollux/M
+Pollyanna/M
+Polly/M
+pollywog's
+Pol/MY
+Polo/M
+polo/MS
+polonaise/MS
+polonium/MS
+poltergeist/SM
+poltroon/MS
+polyandrous
+polyandry/MS
+polyatomic
+polybutene/MS
+polycarbonate
+polychemicals
+polychrome
+polyclinic/MS
+polycrystalline
+polyelectrolytes
+polyester/SM
+polyether/S
+polyethylene/SM
+polygamist/MS
+polygamous/Y
+polygamy/MS
+polyglot/S
+polygonal/Y
+polygon/MS
+polygraph/MDG
+polygraphs
+polygynous
+polyhedral
+polyhedron/MS
+Polyhymnia/M
+polyisobutylene
+polyisocyanates
+polymath/M
+polymaths
+polymerase/S
+polymeric
+polymerization/SM
+polymerize/SDG
+polymer/MS
+polymorphic
+polymorphism/MS
+polymorph/M
+polymyositis
+Polynesia/M
+Polynesian/S
+polynomial/YMS
+Polyphemus/M
+polyphonic
+polyphony/MS
+polyphosphate/S
+polyp/MS
+polypropylene/MS
+polystyrene/SM
+polysyllabic
+polysyllable/SM
+polytechnic/MS
+polytheism/SM
+polytheistic
+polytheist/SM
+polythene/M
+polytonal/Y
+polytopes
+polyunsaturated
+polyurethane/SM
+polyvinyl/MS
+Po/M
+pomade/MGSD
+pomander/MS
+pomegranate/SM
+Pomerania/M
+Pomeranian
+pommel/GSMD
+Pomona/M
+Pompadour/M
+pompadour/MDS
+pompano/SM
+Pompeian/S
+Pompeii/M
+Pompey/M
+pompom/SM
+pompon's
+pomposity/MS
+pompousness/S
+pompous/YP
+pomp/SM
+ponce/M
+Ponce/M
+Ponchartrain/M
+poncho/MS
+ponderer/M
+ponderousness/MS
+ponderous/PY
+ponder/ZGRD
+pond/SMDRGZ
+pone/SM
+pongee/MS
+poniard/GSDM
+pons/M
+Pontchartrain/M
+Pontiac/M
+Pontianak/M
+pontiff/MS
+pontifical/YS
+pontificate/XGNDS
+pontoon/SMDG
+pony/DSMG
+ponytail/SM
+pooch/GSDM
+poodle/MS
+poof/MS
+pooh/DG
+Pooh/M
+poohs
+Poole/M
+pool/MDSG
+poolroom/MS
+poolside
+Poona/M
+poop/MDSG
+poorboy
+poorhouse/MS
+poorness/MS
+poor/TYRP
+popcorn/MS
+Popek/MS
+pope/SM
+Pope/SM
+Popeye/M
+popgun/SM
+popinjay/MS
+poplar/SM
+poplin/MS
+Popocatepetl/M
+popover/SM
+poppa/MS
+popped
+Popper/M
+popper/SM
+poppet/M
+popping
+Poppins/M
+poppycock/MS
+Poppy/M
+poppy/SDM
+poppyseed
+Popsicle/MS
+pop/SM
+populace/MS
+popularism
+popularity/UMS
+popularization/SM
+popularize/A
+popularized
+popularizer/MS
+popularizes/U
+popularizing
+popular/YS
+populate/CXNGDS
+populated/UA
+populates/A
+populating/A
+population/MC
+populism/S
+populist/SM
+populousness/MS
+populous/YP
+porcelain/SM
+porch/SM
+porcine
+porcupine/MS
+pore/ZGDRS
+Porfirio/M
+porgy/SM
+poring/Y
+porker/M
+porky/TSR
+pork/ZRMS
+pornographer/SM
+pornographic
+pornographically
+pornography/SM
+porno/S
+porn/S
+porosity/SM
+porousness/MS
+porous/PY
+porphyritic
+porphyry/MS
+porpoise/DSGM
+porridge/MS
+Porrima/M
+porringer/MS
+Porsche/M
+portability/S
+portables
+portable/U
+portably
+port/ABSGZMRD
+portage/ASM
+portaged
+portaging
+portal/SM
+portamento/M
+portcullis/MS
+ported/CE
+Porte/M
+portend/SDG
+portentousness/M
+portentous/PY
+portent/SM
+porterage/M
+porter/DMG
+porterhouse/SM
+Porter/M
+porter's/A
+portfolio/MS
+porthole/SM
+Portia/M
+porticoes
+portico/M
+Portie/M
+portière/SM
+porting/E
+portion/KGSMD
+Portland/M
+portliness/SM
+portly/PTR
+portmanteau/SM
+Port/MR
+Pôrto/M
+portraitist/SM
+portrait/MS
+portraiture/MS
+portrayal/SM
+portrayer/M
+portray/GDRS
+ports/CE
+Portsmouth/M
+Portugal/M
+Portuguese/M
+portulaca/MS
+Porty/M
+posed/CA
+Poseidon/M
+poser/KME
+poses/CA
+poseur/MS
+pose/ZGKDRSE
+posh/DSRGT
+posing/CA
+positifs
+positionable
+positional/KY
+position/KGASMD
+position's/EC
+positions/EC
+positiveness/S
+positive/RSPYT
+positivism/M
+positivist/S
+positivity
+positron/SM
+posit/SCGD
+Posner/M
+posse/M
+possess/AGEDS
+possessed/PY
+possession/AEMS
+possessional
+possessiveness/MS
+possessive/PSMY
+possessor/MS
+possibility/SM
+possible/TRS
+possibly
+poss/S
+possum/MS
+postage/MS
+postal/S
+post/ASDRJG
+postbag/M
+postbox/SM
+postcard/SM
+postcode/SM
+postcondition/S
+postconsonantal
+postdate/DSG
+postdoctoral
+posteriori
+posterior/SY
+posterity/SM
+poster/MS
+postfix/GDS
+postgraduate/SM
+posthaste/S
+posthumousness/M
+posthumous/YP
+posthypnotic
+postilion/MS
+postindustrial
+posting/M
+postlude/MS
+Post/M
+postman/M
+postmarital
+postmark/GSMD
+postmaster/SM
+postmen
+postmeridian
+postmistress/MS
+postmodern
+postmodernist
+postmortem/S
+postnasal
+postnatal
+postoperative/Y
+postorder
+postpaid
+postpartum
+postpone/GLDRS
+postponement/S
+postpositions
+postprandial
+post's
+postscript/SM
+postsecondary
+postulate/XGNSD
+postulation/M
+postural
+posture/MGSRD
+posturer/M
+postvocalic
+postwar
+posy/SM
+potability/SM
+potableness/M
+potable/SP
+potage/M
+potash/MS
+potassium/MS
+potatoes
+potato/M
+potbelly/MSD
+potboiler/M
+potboil/ZR
+pot/CMS
+Potemkin/M
+potency/MS
+potentate/SM
+potentiality/MS
+potential/SY
+potentiating
+potentiometer/SM
+potent/YS
+potful/SM
+pothead/MS
+potherb/MS
+pother/GDMS
+potholder/MS
+pothole/SDMG
+potholing/M
+pothook/SM
+potion/SM
+potlatch/SM
+potluck/MS
+Potomac/M
+potpie/SM
+potpourri/SM
+Potsdam/M
+potsherd/MS
+potshot/S
+pottage/SM
+Pottawatomie/M
+potted
+Potter/M
+potter/RDMSG
+pottery/MS
+potting
+Potts/M
+potty/SRT
+pouch/SDMG
+Poughkeepsie/M
+Poul/M
+poulterer/MS
+poultice/DSMG
+poultry/MS
+pounce/SDG
+poundage/MS
+pounder/MS
+pound/KRDGS
+Pound/M
+pour/DSG
+pourer's
+Poussin/MS
+pouter/M
+pout/GZDRS
+poverty/MS
+POW
+powderpuff
+powder/RDGMS
+powdery
+Powell/M
+powerboat/MS
+powerfulness/M
+powerful/YP
+power/GMD
+powerhouse/MS
+powerlessness/SM
+powerless/YP
+Powers
+Powhatan/M
+pow/RZ
+powwow/GDMS
+pox/GMDS
+Poznan/M
+pp
+PP
+ppm
+ppr
+PPS
+pr
+PR
+practicability/S
+practicable/P
+practicably
+practicality/SM
+practicalness/M
+practical/YPS
+practice/BDRSMG
+practiced/U
+practicer/M
+practicum/SM
+practitioner/SM
+Pradesh/M
+Prado/M
+Praetorian
+praetorian/S
+praetor/MS
+pragmatical/Y
+pragmatic/S
+pragmatics/M
+pragmatism/MS
+pragmatist/MS
+Prague/M
+Praia
+prairie/MS
+praise/ESDG
+praiser/S
+praise's
+praiseworthiness/MS
+praiseworthy/P
+praising/Y
+Prakrit/M
+praline/MS
+pram/MS
+prancer/M
+prance/ZGSRD
+prancing/Y
+prank/SMDG
+prankster/SM
+praseodymium/SM
+Pratchett/M
+prate/DSRGZ
+prater/M
+pratfall/MS
+prating/Y
+prattle/DRSGZ
+prattler/M
+prattling/Y
+Pratt/M
+Prattville/M
+Pravda/M
+prawn/MDSG
+praxes
+praxis/M
+Praxiteles/M
+pray/DRGZS
+prayerbook
+prayerfulness/M
+prayerful/YP
+prayer/M
+PRC
+preach/DRSGLZJ
+preacher/M
+preaching/Y
+preachment/MS
+preachy/RT
+preadolescence/S
+Preakness/M
+preallocate/XGNDS
+preallocation/M
+preallocator/S
+preamble/MGDS
+preamp
+preamplifier/M
+prearrange/LSDG
+prearrangement/SM
+preassign/SDG
+preauthorize
+prebendary/M
+Precambrian
+precancel/DGS
+precancerous
+precariousness/MS
+precarious/PY
+precautionary
+precaution/SGDM
+precede/DSG
+precedence/SM
+precedented/U
+precedent/SDM
+preceptive/Y
+preceptor/MS
+precept/SMV
+precess/DSG
+precession/M
+precinct/MS
+preciosity/MS
+preciousness/S
+precious/PYS
+precipice/MS
+precipitable
+precipitant/S
+precipitateness/M
+precipitate/YNGVPDSX
+precipitation/M
+precipitousness/M
+precipitous/YP
+preciseness/SM
+precise/XYTRSPN
+precision/M
+précis/MDG
+preclude/GDS
+preclusion/S
+precociousness/MS
+precocious/YP
+precocity/SM
+precode/D
+precognition/SM
+precognitive
+precollege/M
+precolonial
+precomputed
+preconceive/GSD
+preconception/SM
+precondition/GMDS
+preconscious
+precook/GDS
+precursor/SM
+precursory
+precut
+predate/NGDSX
+predation/CMS
+predator/SM
+predatory
+predecease/SDG
+predecessor/MS
+predeclared
+predecline
+predefine/GSD
+predefinition/SM
+predesignate/GDS
+predestination/SM
+predestine/SDG
+predetermination/MS
+predeterminer/M
+predetermine/ZGSRD
+predicable/S
+predicament/SM
+predicate/VGNXSD
+predication/M
+predicator
+predictability/UMS
+predictable/U
+predictably/U
+predict/BSDGV
+predicted/U
+prediction/MS
+predictive/Y
+predictor/MS
+predigest/GDS
+predilect
+predilection/SM
+predispose/SDG
+predisposition/MS
+predoctoral
+predominance/SM
+predominant/Y
+predominate/YSDGN
+predomination/M
+preemie/MS
+preeminence/SM
+preeminent/Y
+preemployment/M
+preempt/GVSD
+preemption/SM
+preemptive/Y
+preemptor/M
+preener/M
+preen/SRDG
+preexist/DSG
+preexistence/SM
+preexistent
+prefabbed
+prefabbing
+prefab/MS
+prefabricate/XNGDS
+prefabrication/M
+preface/DRSGM
+prefacer/M
+prefatory
+prefect/MS
+prefecture/MS
+preferableness/M
+preferable/P
+preferably
+prefer/BL
+preference/MS
+preferential/Y
+preferment/SM
+preferred
+preferring
+prefiguration/M
+prefigure/SDG
+prefix/MDSG
+preflight/SGDM
+preform/DSG
+pref/RZ
+pregnancy/SM
+pregnant/Y
+preheat/GDS
+prehensile
+prehistoric
+prehistorical/Y
+prehistory/SM
+preindustrial
+preinitialize/SDG
+preinterview/M
+preisolated
+prejudge/DRSG
+prejudger/M
+prejudgment/SM
+prejudiced/U
+prejudice/MSDG
+prejudicial/PY
+prekindergarten/MS
+prelacy/MS
+prelate/SM
+preliminarily
+preliminary/S
+preliterate/S
+preloaded
+prelude/GMDRS
+preluder/M
+premarital/Y
+premarket
+prematureness/M
+premature/SPY
+prematurity/M
+premedical
+premeditated/Y
+premeditate/XDSGNV
+premeditation/M
+premed/S
+premenstrual
+premiere/MS
+premier/GSDM
+premiership/SM
+Preminger/M
+premise/GMDS
+premiss's
+premium/MS
+premix/GDS
+premolar/S
+premonition/SM
+premonitory
+prenatal/Y
+Pren/M
+Prenticed/M
+Prentice/MGD
+Prenticing/M
+Prentiss/M
+Prent/M
+prenuptial
+preoccupation/MS
+preoccupy/DSG
+preoperative
+preordain/DSLG
+prepackage/GSD
+prepaid
+preparation/SM
+preparative/SYM
+preparatory
+preparedly
+preparedness/USM
+prepared/UP
+prepare/ZDRSG
+prepay/GLS
+prepayment/SM
+prepender/S
+prepends
+preplanned
+preponderance/SM
+preponderant/Y
+preponderate/DSYGN
+prepositional/Y
+preposition/SDMG
+prepossess/GSD
+prepossessing/U
+prepossession/MS
+preposterousness/M
+preposterous/PY
+prepped
+prepping
+preppy/RST
+preprepared
+preprint/SGDM
+preprocessed
+preprocessing
+preprocessor/S
+preproduction
+preprogrammed
+prep/SM
+prepubescence/S
+prepubescent/S
+prepublication/M
+prepuce/SM
+prequel/S
+preradiation
+prerecord/DGS
+preregister/DSG
+preregistration/MS
+prerequisite/SM
+prerogative/SDM
+Pres
+presage/GMDRS
+presager/M
+presbyopia/MS
+presbyterian
+Presbyterianism/S
+Presbyterian/S
+presbyter/MS
+presbytery/MS
+preschool/RSZ
+prescience/SM
+prescient/Y
+Prescott/M
+prescribed/U
+prescriber/M
+prescribe/RSDG
+prescription/SM
+prescriptive/Y
+prescript/SVM
+preselect/SGD
+presence/SM
+presentableness/M
+presentable/P
+presentably/A
+presentational/A
+presentation/AMS
+presented/A
+presenter/A
+presentiment/MS
+presentment/SM
+presents/A
+present/SLBDRYZGP
+preservationist/S
+preservation/SM
+preservative/SM
+preserve/DRSBZG
+preserved/U
+preserver/M
+preset/S
+presetting
+preshrank
+preshrink/SG
+preshrunk
+preside/DRSG
+presidency/MS
+presidential/Y
+president/SM
+presider/M
+presidia
+presidium/M
+Presley/M
+presoaks
+presort/GDS
+pres/S
+press/ACDSG
+pressed/U
+presser/MS
+pressingly/C
+pressing/YS
+pressman/M
+pressmen
+pressure/DSMG
+pressurization/MS
+pressurize/DSRGZ
+pressurized/U
+prestidigitate/NX
+prestidigitation/M
+prestidigitatorial
+prestidigitator/M
+prestige/MS
+prestigious/PY
+Preston/M
+presto/S
+presumably
+presume/BGDRS
+presumer/M
+presuming/Y
+presumption/MS
+presumptive/Y
+presumptuousness/SM
+presumptuous/YP
+presuppose/GDS
+presupposition/S
+pretax
+preteen/S
+pretended/Y
+pretender/M
+pretending/U
+pretend/SDRZG
+pretense/MNVSX
+pretension/GDM
+pretentiousness/S
+pretentious/UYP
+preterite's
+preterit/SM
+preternatural/Y
+pretest/SDG
+pretext/SMDG
+Pretoria/M
+pretreated
+pretreatment/S
+pretrial
+prettify/SDG
+prettily
+prettiness/SM
+pretty/TGPDRS
+pretzel/SM
+prevailing/Y
+prevail/SGD
+prevalence/MS
+prevalent/SY
+prevaricate/DSXNG
+prevaricator/MS
+preventable/U
+preventably
+preventative/S
+prevent/BSDRGV
+preventer/M
+prevention/MS
+preventiveness/M
+preventive/SPY
+preview/ZGSDRM
+previous/Y
+prevision/SGMD
+prewar
+prexes
+preyer's
+prey/SMDG
+Priam/M
+priapic
+Pribilof/M
+price/AGSD
+priced/U
+priceless
+Price/M
+pricer/MS
+price's
+pricey
+pricier
+priciest
+pricker/M
+pricking/M
+prickle/GMDS
+prickliness/S
+prickly/RTP
+prick/RDSYZG
+prideful/Y
+pride/GMDS
+prier/M
+priestess/MS
+priesthood/SM
+Priestley/M
+priestliness/SM
+priestly/PTR
+priest/SMYDG
+prigged
+prigging
+priggishness/S
+priggish/PYM
+prig/SM
+primacy/MS
+primal
+primarily
+primary/MS
+primate/MS
+primed/U
+primely/M
+primeness/M
+prime/PYS
+primer/M
+Prime's
+primeval/Y
+priming/M
+primitiveness/SM
+primitive/YPS
+primitivism/M
+primmed
+primmer
+primmest
+primming
+primness/MS
+primogenitor/MS
+primogeniture/MS
+primordial/YS
+primp/DGS
+primrose/MGSD
+prim/SPJGZYDR
+princedom/MS
+princeliness/SM
+princely/PRT
+Prince/M
+prince/SMY
+princess/MS
+Princeton/M
+principality/MS
+principal/SY
+Principe/M
+Principia/M
+principled/U
+principle/SDMG
+printable/U
+printably
+print/AGDRS
+printed/U
+printer/AM
+printers
+printing/SM
+printmaker/M
+printmake/ZGR
+printmaking/M
+printout/S
+Prinz/M
+prioress/MS
+priori
+prioritize/DSRGZJ
+priority/MS
+prior/YS
+priory/SM
+Pris
+Prisca/M
+Priscella/M
+Priscilla/M
+prised
+prise/GMAS
+prismatic
+prism/MS
+prison/DRMSGZ
+prisoner/M
+Prissie/M
+prissily
+prissiness/SM
+prissy/RSPT
+pristine/Y
+prithee/S
+privacy/MS
+privateer/SMDG
+privateness/M
+private/NVYTRSXP
+privation/MCS
+privative/Y
+privatization/S
+privatize/GSD
+privet/SM
+privileged/U
+privilege/SDMG
+privily
+privy/SRMT
+prized/A
+prize/DSRGZM
+prizefighter/M
+prizefighting/M
+prizefight/SRMGJZ
+prizewinner/S
+prizewinning
+Pr/MN
+PRO
+proactive
+probabilist
+probabilistic
+probabilistically
+probability/SM
+probable/S
+probably
+probated/A
+probate/NVMX
+probates/A
+probating/A
+probational
+probationary/S
+probationer/M
+probation/MRZ
+probation's/A
+probative/A
+prober/M
+probity/SM
+problematical/UY
+problematic/S
+problem/SM
+proboscis/MS
+prob/RBJ
+procaine/MS
+procedural/SY
+procedure/MS
+proceeder/M
+proceeding/M
+proceed/JRDSG
+process/BSDMG
+processed/UA
+processes/A
+processional/YS
+procession/GD
+processor/MS
+proclamation/MS
+proclivity/MS
+proconsular
+procrastinate/XNGDS
+procrastination/M
+procrastinator/MS
+procreational
+procreatory
+procrustean
+Procrustean
+Procrustes/M
+proctor/GSDM
+proctorial
+procurable/U
+procure/L
+procurement/MS
+Procyon/M
+prodded
+prodding
+prodigality/S
+prodigal/SY
+prodigiousness/M
+prodigious/PY
+prodigy/MS
+prod/S
+produce/AZGDRS
+producer/AM
+producible/A
+production/ASM
+productively/UA
+productiveness/MS
+productive/PY
+productivities
+productivity/A
+productivity's
+productize/GZRSD
+product/V
+Prof
+profanation/S
+profaneness/MS
+profane/YPDRSG
+profanity/MS
+professed/Y
+professionalism/SM
+professionalize/GSD
+professional/USY
+profession/SM
+professorial/Y
+professorship/SM
+professor/SM
+proffer/GSD
+proficiency/SM
+proficient/YS
+profitability/MS
+profitableness/MU
+profitable/UP
+profitably/U
+profiteer/GSMD
+profiterole/MS
+profit/GZDRB
+profitless
+profligacy/S
+profligate/YS
+proforma/S
+profoundity
+profoundness/SM
+profound/PTYR
+prof/S
+profundity/MS
+profuseness/MS
+profuse/YP
+progenitor/SM
+progeny/M
+progesterone/SM
+prognathous
+prognoses
+prognosis/M
+prognosticate/NGVXDS
+prognostication/M
+prognosticator/S
+prognostic/S
+program/CSA
+programed
+programing
+programmability
+programmable/S
+programmed/CA
+programmer/ASM
+programming/CA
+programmings
+progression/SM
+progressiveness/SM
+progressive/SPY
+progressivism
+progress/MSDVG
+prohibiter/M
+prohibitionist/MS
+prohibition/MS
+Prohibition/MS
+prohibitiveness/M
+prohibitive/PY
+prohibitory
+prohibit/VGSRD
+projected/AU
+projectile/MS
+projectionist/MS
+projection/MS
+projective/Y
+project/MDVGS
+projector/SM
+Prokofieff/M
+Prokofiev/M
+prolegomena
+proletarianization/M
+proletarianized
+proletarian/S
+proletariat/SM
+proliferate/GNVDSX
+proliferation/M
+prolifically
+prolific/P
+prolixity/MS
+prolix/Y
+prologize
+prologue/MGSD
+prologuize
+prolongate/NGSDX
+prolongation/M
+prolonger/M
+prolong/G
+promenade/GZMSRD
+promenader/M
+Promethean
+Prometheus/M
+promethium/SM
+prominence/MS
+prominent/Y
+promiscuity/MS
+promiscuousness/M
+promiscuous/PY
+promise/GD
+promising/UY
+promissory
+promontory/MS
+promote/GVZBDR
+promoter/M
+promotiveness/M
+promotive/P
+prompted/U
+prompter/M
+promptitude/SM
+promptness/MS
+prompt/SGJTZPYDR
+pro/MS
+promulgate/NGSDX
+promulgation/M
+promulgator/MS
+pron
+proneness/MS
+prone/PY
+pronghorn/SM
+prong/SGMD
+pronominalization
+pronominalize
+pronounceable/U
+pronouncedly
+pronounced/U
+pronounce/GLSRD
+pronouncement/SM
+pronouncer/M
+pronto
+pronunciation/SM
+proofed/A
+proofer
+proofing/M
+proofreader/M
+proofread/GZSR
+proof/SEAM
+propaganda/SM
+propagandistic
+propagandist/SM
+propagandize/DSG
+propagated/U
+propagate/SDVNGX
+propagation/M
+propagator/MS
+propellant/MS
+propelled
+propeller/MS
+propelling
+propel/S
+propensity/MS
+properness/M
+proper/PYRT
+propertied/U
+property/SDM
+prophecy/SM
+prophesier/M
+prophesy/GRSDZ
+prophetess/S
+prophetic
+prophetical/Y
+prophet/SM
+prophylactic/S
+prophylaxes
+prophylaxis/M
+propinquity/MS
+propionate/M
+propitiate/GNXSD
+propitiatory
+propitiousness/M
+propitious/YP
+proponent/MS
+proportionality/M
+proportional/SY
+proportionate/YGESD
+proportioner/M
+proportion/ESGDM
+proportionment/M
+proposal/SM
+propped
+propping
+proprietary/S
+proprietorial
+proprietorship/SM
+proprietor/SM
+proprietress/MS
+propriety/MS
+proprioception
+proprioceptive
+prop/SZ
+propulsion/MS
+propulsive
+propylene/M
+prorogation/SM
+prorogue
+prosaic
+prosaically
+proscenium/MS
+prosciutti
+prosciutto/SM
+proscription/SM
+proscriptive
+pros/DSRG
+prosecute/SDBXNG
+prosecution/M
+prosecutor/MS
+proselyte/SDGM
+proselytism/MS
+proselytize/ZGDSR
+prose/M
+proser/M
+Proserpine/M
+prosodic/S
+prosody/MS
+prospect/DMSVG
+prospection/SM
+prospectiveness/M
+prospective/SYP
+prospector/MS
+prospectus/SM
+prosper/GSD
+prosperity/MS
+prosperousness/M
+prosperous/PY
+prostate
+prostheses
+prosthesis/M
+prosthetic/S
+prosthetics/M
+prostitute/DSXNGM
+prostitution/M
+prostrate/SDXNG
+prostration/M
+prosy/RT
+protactinium/MS
+protagonist/SM
+Protagoras/M
+protean/S
+protease/M
+protect/DVGS
+protected/UY
+protectionism/MS
+protectionist/MS
+protection/MS
+protectiveness/S
+protective/YPS
+protectorate/SM
+protector/MS
+protégées
+protégé/SM
+protein/MS
+proteolysis/M
+proteolytic
+Proterozoic/M
+protestantism
+Protestantism/MS
+protestant/S
+Protestant/SM
+protestation/MS
+protest/G
+protesting/Y
+Proteus/M
+protocol/DMGS
+protoplasmic
+protoplasm/MS
+prototype/SDGM
+prototypic
+prototypical/Y
+protozoa
+protozoan/MS
+protozoic
+protozoon's
+protract/DG
+protrude/SDG
+protrusile
+protrusion/MS
+protrusive/PY
+protuberance/S
+protuberant
+Proudhon/M
+proud/TRY
+Proust/M
+provabilities
+provability's
+provability/U
+provableness/M
+provable/P
+provably
+prov/DRGZB
+proved/U
+proven/U
+prove/ESDAG
+provenance/SM
+Provençal
+Provencals
+Provence/M
+provender/SDG
+provenience/SM
+provenly
+proverb/DG
+proverbial/Y
+Proverbs/M
+prover/M
+provide/DRSBGZ
+provided/U
+providence/SM
+Providence/SM
+providential/Y
+provident/Y
+provider/M
+province/SM
+provincialism/SM
+provincial/SY
+provisional/YS
+provisioner/M
+provision/R
+proviso/MS
+provocateur/S
+provocativeness/SM
+provocative/P
+provoked/U
+provoke/GZDRS
+provoking/Y
+provolone/SM
+Provo/M
+provost/MS
+prowess/SM
+prowler/M
+prowl/RDSZG
+prow/TRMS
+proximal/Y
+proximateness/M
+proximate/PY
+proximity/MS
+Proxmire/M
+proxy/SM
+Prozac
+prude/MS
+Prudence/M
+prudence/SM
+Prudential/M
+prudential/SY
+prudent/Y
+prudery/MS
+Prudi/M
+prudishness/SM
+prudish/YP
+Prudy/M
+Prue/M
+Pruitt/M
+Pru/M
+prune/DSRGZM
+pruner/M
+prurience/MS
+prurient/Y
+Prussia/M
+Prussian/S
+prussic
+Prut/M
+Pryce/M
+pry/DRSGTZ
+pryer's
+prying/Y
+P's
+PS
+p's/A
+psalmist/SM
+psalm/SGDM
+Psalms/M
+psalter
+Psalter/SM
+psaltery/MS
+psephologist/M
+pseudonymous
+pseudonym/SM
+pseudopod
+pseudo/S
+pseudoscience/S
+pshaw/SDG
+psi/S
+psittacoses
+psittacosis/M
+psoriases
+psoriasis/M
+psst/S
+PST
+psychedelically
+psychedelic/S
+psyche/M
+Psyche/M
+psychiatric
+psychiatrist/SM
+psychiatry/MS
+psychical/Y
+psychic/MS
+psychoacoustic/S
+psychoacoustics/M
+psychoactive
+psychoanalysis/M
+psychoanalyst/S
+psychoanalytic
+psychoanalytical
+psychoanalyze/SDG
+psychobabble/S
+psychobiology/M
+psychocultural
+psychodrama/MS
+psychogenic
+psychokinesis/M
+psycholinguistic/S
+psycholinguistics/M
+psycholinguists
+psychological/Y
+psychologist/MS
+psychology/MS
+psychometric/S
+psychometrics/M
+psychometry/M
+psychoneuroses
+psychoneurosis/M
+psychopathic/S
+psychopath/M
+psychopathology/M
+psychopaths
+psychopathy/SM
+psychophysical/Y
+psychophysic/S
+psychophysics/M
+psychophysiology/M
+psychosis/M
+psycho/SM
+psychosocial/Y
+psychosomatic/S
+psychosomatics/M
+psychos/S
+psychotherapeutic/S
+psychotherapist/MS
+psychotherapy/SM
+psychotically
+psychotic/S
+psychotropic/S
+psychs
+psych/SDG
+PT
+PTA
+Ptah/M
+ptarmigan/MS
+pt/C
+pterodactyl/SM
+Pt/M
+PTO
+Ptolemaic
+Ptolemaists
+Ptolemy/MS
+ptomaine/MS
+Pu
+pubbed
+pubbing
+pubertal
+puberty/MS
+pubes
+pubescence/S
+pubescent
+pubic
+pubis/M
+publican/AMS
+publication/AMS
+publicist/SM
+publicity/SM
+publicized/U
+publicize/SDG
+publicness/M
+publics/A
+public/YSP
+publishable/U
+published/UA
+publisher/ASM
+publishes/A
+publishing/M
+publish/JDRSBZG
+pub/MS
+Puccini/M
+puce/SM
+pucker/DG
+Puckett/M
+puck/GZSDRM
+puckishness/S
+puckish/YP
+Puck/M
+pudding/MS
+puddle/JMGRSD
+puddler/M
+puddling/M
+puddly
+pudenda
+pudendum/M
+pudginess/SM
+pudgy/PRT
+Puebla/M
+Pueblo/MS
+pueblo/SM
+puerile/Y
+puerility/SM
+puerperal
+puers
+Puerto/M
+puffball/SM
+puffer/M
+puffery/M
+puffiness/S
+puffin/SM
+Puff/M
+puff/SGZDRM
+puffy/PRT
+Puget/M
+pugged
+pugging
+Pugh/M
+pugilism/SM
+pugilistic
+pugilist/S
+pug/MS
+pugnaciousness/MS
+pugnacious/YP
+pugnacity/SM
+puissant/Y
+puke/GDS
+pukka
+Pulaski/SM
+pulchritude/SM
+pulchritudinous/M
+pule/GDS
+Pulitzer/SM
+pullback/S
+pull/DRGZSJ
+pullet/SM
+pulley/SM
+Pullman/MS
+pullout/S
+pullover/SM
+pulmonary
+pulpiness/S
+pulpit/MS
+pulp/MDRGS
+pulpwood/MS
+pulpy/PTR
+pulsar/MS
+pulsate/NGSDX
+pulsation/M
+pulse/ADSG
+pulser
+pulse's
+pulverable
+pulverization/MS
+pulverized/U
+pulverize/GZSRD
+pulverizer/M
+pulverizes/UA
+puma/SM
+pumice/SDMG
+pummel/SDG
+pumpernickel/SM
+pump/GZSMDR
+pumping/M
+pumpkin/MS
+punchbowl/M
+punched/U
+puncheon/MS
+puncher/M
+punch/GRSDJBZ
+punchline/S
+Punch/M
+punchy/RT
+punctilio/SM
+punctiliousness/SM
+punctilious/PY
+punctualities
+punctuality/UM
+punctualness/M
+punctual/PY
+punctuate/SDXNG
+punctuational
+punctuation/M
+puncture/SDMG
+punditry/S
+pundit/SM
+pungency/MS
+pungent/Y
+Punic
+puniness/MS
+punished/U
+punisher/M
+punishment/MS
+punish/RSDGBL
+punitiveness/M
+punitive/YP
+Punjabi/M
+Punjab/M
+punk/TRMS
+punky/PRS
+pun/MS
+punned
+punning
+punster/SM
+punter/M
+punt/GZMDRS
+puny/PTR
+pupae
+pupal
+pupa/M
+pupate/NGSD
+pupillage/M
+pupil/SM
+pup/MS
+pupped
+puppeteer/SM
+puppetry/MS
+puppet/SM
+pupping
+puppy/GSDM
+puppyish
+purblind
+Purcell/M
+purchasable
+purchase/GASD
+purchaser/MS
+purdah/M
+purdahs
+Purdue/M
+purebred/S
+puree/DSM
+pureeing
+pureness/MS
+pure/PYTGDR
+purgation/M
+purgative/MS
+purgatorial
+purgatory/SM
+purge/GZDSR
+purger/M
+purify/GSRDNXZ
+Purim/SM
+Purina/M
+purine/SM
+purism/MS
+puristic
+purist/MS
+puritanic
+puritanical/Y
+Puritanism/MS
+puritanism/S
+puritan/SM
+Puritan/SM
+purity/SM
+purlieu/SM
+purl/MDGS
+purloin/DRGS
+purloiner/M
+purple/MTGRSD
+purplish
+purport/DRSZG
+purported/Y
+purposefulness/S
+purposeful/YP
+purposelessness/M
+purposeless/PY
+purpose/SDVGYM
+purposiveness/M
+purposive/YP
+purr/DSG
+purring/Y
+purse/DSRGZM
+purser/M
+pursuance/MS
+pursuant
+pursuer/M
+pursue/ZGRSD
+pursuit/MS
+purulence/MS
+purulent
+Purus
+purveyance/MS
+purvey/DGS
+purveyor/MS
+purview/SM
+Pusan/M
+Pusey/M
+pushbutton/S
+pushcart/SM
+pushchair/SM
+pushdown
+push/DSRBGZ
+pusher/M
+pushily
+pushiness/MS
+Pushkin/M
+pushover/SM
+Pushtu/M
+pushy/PRT
+pusillanimity/MS
+pusillanimous/Y
+pus/SM
+puss/S
+pussycat/S
+pussyfoot/DSG
+pussy/TRSM
+pustular
+pustule/MS
+putative/Y
+Putin/M
+put/IS
+Putnam/M
+Putnem/M
+putout/S
+putrefaction/SM
+putrefactive
+putrefy/DSG
+putrescence/MS
+putrescent
+putridity/M
+putridness/M
+putrid/YP
+putsch/S
+putted/I
+puttee/MS
+putter/RDMGZ
+putting/I
+putt/SGZMDR
+puttying/M
+putty/SDMG
+puzzle/JRSDZLG
+puzzlement/MS
+puzzler/M
+PVC
+pvt
+Pvt/M
+PW
+PX
+p/XTGJ
+Pygmalion/M
+pygmy/SM
+Pygmy/SM
+Pyhrric/M
+pyknotic
+Pyle/M
+pylon/SM
+pylori
+pyloric
+pylorus/M
+Pym/M
+Pynchon/M
+Pyongyang/M
+pyorrhea/SM
+Pyotr/M
+pyramidal/Y
+pyramid/GMDS
+pyre/MS
+Pyrenees
+Pyrex/SM
+pyridine/M
+pyrimidine/SM
+pyrite/MS
+pyroelectric
+pyroelectricity/SM
+pyrolysis/M
+pyrolyze/RSM
+pyromaniac/SM
+pyromania/MS
+pyrometer/MS
+pyrometry/M
+pyrophosphate/M
+pyrotechnical
+pyrotechnic/S
+pyrotechnics/M
+pyroxene/M
+pyroxenite/M
+Pyrrhic
+Pythagoras/M
+Pythagorean/S
+Pythias
+Python/M
+python/MS
+pyx/MDSG
+q
+Q
+QA
+Qaddafi/M
+Qantas/M
+Qatar/M
+QB
+QC
+QED
+Qingdao
+Qiqihar/M
+QM
+Qom/M
+qr
+q's
+Q's
+qt
+qty
+qua
+Quaalude/M
+quackery/MS
+quackish
+quack/SDG
+quadded
+quadding
+quadrangle/MS
+quadrangular/M
+quadrant/MS
+quadraphonic/S
+quadrapole
+quadratical/Y
+quadratic/SM
+quadrature/MS
+quadrennial/SY
+quadrennium/MS
+quadric
+quadriceps/SM
+quadrilateral/S
+quadrille/XMGNSD
+quadrillion/MH
+quadripartite/NY
+quadriplegia/SM
+quadriplegic/SM
+quadrivia
+quadrivium/M
+quadrupedal
+quadruped/MS
+quadruple/GSD
+quadruplet/SM
+quadruplicate/GDS
+quadruply/NX
+quadrupole
+quad/SM
+quadword/MS
+quaffer/M
+quaff/SRDG
+quagmire/DSMG
+quahog/MS
+quail/GSDM
+quaintness/MS
+quaint/PTYR
+quake/GZDSR
+Quakeress/M
+Quakerism/S
+Quaker/SM
+quaky/RT
+qualification/ME
+qualified/UY
+qualifier/SM
+qualify/EGXSDN
+qualitative/Y
+quality/MS
+qualmish
+qualm/SM
+quandary/MS
+quangos
+quanta/M
+Quantico/M
+quantifiable/U
+quantified/U
+quantifier/M
+quantify/GNSRDZX
+quantile/S
+quantitativeness/M
+quantitative/PY
+quantity/MS
+quantization/MS
+quantizer/M
+quantize/ZGDRS
+quantum/M
+quarantine/DSGM
+quark/SM
+quarreler/M
+quarrellings
+quarrelsomeness/MS
+quarrelsome/PY
+quarrel/SZDRMG
+quarrier/M
+quarryman/M
+quarrymen
+quarry/RSDGM
+quarterback/SGMD
+quarterdeck/MS
+quarterer/M
+quarterfinal/MS
+quartering/M
+quarterly/S
+quartermaster/MS
+quarter/MDRYG
+quarterstaff/M
+quarterstaves
+quartet/SM
+quartic/S
+quartile/SM
+quarto/SM
+quart/RMSZ
+quartzite/M
+quartz/SM
+quasar/SM
+quash/GSD
+quasi
+quasilinear
+Quasimodo/M
+Quaternary
+quaternary/S
+quaternion/SM
+quatrain/SM
+quaver/GDS
+quavering/Y
+quavery
+Quayle/M
+quayside/M
+quay/SM
+queasily
+queasiness/SM
+queasy/TRP
+Quebec/M
+Quechua/M
+Queenie/M
+queenly/RT
+queen/SGMDY
+Queensland/M
+Queen/SM
+queerness/S
+queer/STGRDYP
+queller/M
+quell/SRDG
+Que/M
+quenchable/U
+quenched/U
+quencher/M
+quench/GZRSDB
+quenchless
+Quentin/M
+Quent/M
+Querida/M
+quern/M
+querulousness/S
+querulous/YP
+query/MGRSD
+quested/A
+quester/AS
+quester's
+quest/FSIM
+questing
+questionableness/M
+questionable/P
+questionably/U
+questioned/UA
+questioner/M
+questioning/UY
+questionnaire/MS
+question/SMRDGBZJ
+quests/A
+Quetzalcoatl/M
+queued/C
+queue/GZMDSR
+queuer/M
+queues/C
+queuing/C
+Quezon/M
+quibble/GZRSD
+quibbler/M
+quiche/SM
+quicken/RDG
+quickie/MS
+quicklime/SM
+quickness/MS
+quick/RNYTXPS
+quicksand/MS
+quicksilver/GDMS
+quickstep/SM
+quid/SM
+quiesce/D
+quiescence/MS
+quiescent/YP
+quieted/E
+quieten/SGD
+quieter/E
+quieter's
+quieting/E
+quietly/E
+quietness/MS
+quiets/E
+quietude/IEMS
+quietus/MS
+quiet/UTGPSDRY
+Quillan/M
+quill/GSDM
+Quill/M
+quilter/M
+quilting/M
+quilt/SZJGRDM
+quincentenary/M
+quince/SM
+Quincey/M
+quincy/M
+Quincy/M
+quinine/MS
+Quinlan/M
+Quinn/M
+quinquennial/Y
+quinsy/SM
+Quinta/M
+Quintana/M
+quintessence/SM
+quintessential/Y
+quintet/SM
+quintic
+quintile/SM
+Quintilian/M
+Quintilla/M
+quintillion/MH
+quintillionth/M
+Quintina/M
+Quintin/M
+Quint/M
+quint/MS
+Quinton/M
+quintuple/SDG
+quintuplet/MS
+Quintus/M
+quip/MS
+quipped
+quipper
+quipping
+quipster/SM
+quired/AI
+quire/MDSG
+quires/AI
+Quirinal/M
+quiring/IA
+quirkiness/SM
+quirk/SGMD
+quirky/PTR
+quirt/SDMG
+Quisling/M
+quisling/SM
+quitclaim/GDMS
+quit/DGS
+quite/SADG
+Quito/M
+quittance/SM
+quitter/SM
+quitting
+quiver/GDS
+quivering/Y
+quivery
+Quixote/M
+quixotic
+quixotically
+Quixotism/M
+quiz/M
+quizzed
+quizzer/SM
+quizzes
+quizzical/Y
+quizzing
+quo/H
+quoin/SGMD
+quoit/GSDM
+quondam
+quonset
+Quonset
+quorate/I
+quorum/MS
+quotability/S
+quota/MS
+quotation/SM
+quoter/M
+quote/UGSD
+quot/GDRB
+quotidian/S
+quotient/SM
+qwerty
+qwertys
+Rabat/M
+rabbet/GSMD
+Rabbi/M
+rabbi/MS
+rabbinate/MS
+rabbinic
+rabbinical/Y
+rabbiter/M
+rabbit/MRDSG
+rabble/GMRSD
+rabbler/M
+Rabelaisian
+Rabelais/M
+rabidness/SM
+rabid/YP
+rabies
+Rabi/M
+Rabin/M
+rabis
+Rab/M
+raccoon/SM
+racecourse/MS
+racegoers
+racehorse/SM
+raceme/MS
+race/MZGDRSJ
+racer/M
+racetrack/SMR
+raceway/SM
+Rachael/M
+Rachele/M
+Rachelle/M
+Rachel/M
+Rachmaninoff/M
+racialism/MS
+racialist/MS
+racial/Y
+racily
+Racine/M
+raciness/MS
+racism/S
+racist/MS
+racketeer/MDSJG
+racket/SMDG
+rackety
+rack/GDRMS
+raconteur/SM
+racoon's
+racquetball/S
+racquet's
+racy/RTP
+radarscope/MS
+radar/SM
+Radcliffe/M
+radded
+radder
+raddest
+Raddie/M
+radding
+Raddy/M
+radial/SY
+radiance/SM
+radian/SM
+radiant/YS
+radiate/XSDYVNG
+radiation/M
+radiative/Y
+radiator/MS
+radicalism/MS
+radicalization/S
+radicalize/GSD
+radicalness/M
+radical/SPY
+radices's
+radii/M
+radioactive/Y
+radioactivity/MS
+radioastronomical
+radioastronomy
+radiocarbon/MS
+radiochemical/Y
+radiochemistry/M
+radiogalaxy/S
+radiogram/SM
+radiographer/MS
+radiographic
+radiography/MS
+radioisotope/SM
+radiologic
+radiological/Y
+radiologist/MS
+radiology/MS
+radioman/M
+radiomen
+radiometer/SM
+radiometric
+radiometry/MS
+radionics
+radionuclide/M
+radiopasteurization
+radiophone/MS
+radiophysics
+radioscopy/SM
+radio/SMDG
+radiosonde/SM
+radiosterilization
+radiosterilized
+radiotelegraph
+radiotelegraphs
+radiotelegraphy/MS
+radiotelephone/SM
+radiotherapist/SM
+radiotherapy/SM
+radish/MS
+radium/MS
+radius/M
+radix/SM
+Rad/M
+radon/SM
+rad/S
+Raeann/M
+Rae/M
+RAF
+Rafaela/M
+Rafaelia/M
+Rafaelita/M
+Rafaellle/M
+Rafaello/M
+Rafael/M
+Rafa/M
+Rafe/M
+Raffaello/M
+Raffarty/M
+Rafferty/M
+raffia/SM
+raffishness/SM
+raffish/PY
+raffle/MSDG
+Raff/M
+Rafi/M
+Raf/M
+rafter/DM
+raft/GZSMDR
+raga/MS
+ragamuffin/MS
+ragbag/SM
+rage/MS
+raggedness/SM
+ragged/PRYT
+raggedy/TR
+ragging
+rag/GSMD
+raging/Y
+raglan/MS
+Ragnar/M
+Ragnarök
+ragout/SMDG
+ragtag/MS
+ragtime/MS
+ragweed/MS
+ragwort/M
+Rahal/M
+rah/DG
+Rahel/M
+rahs
+raider/M
+raid/MDRSGZ
+railbird/S
+rail/CDGS
+railer/SM
+railhead/SM
+railing/MS
+raillery/MS
+railroader/M
+railroading/M
+railroad/SZRDMGJ
+rail's
+railwaymen
+railway/MS
+raiment/SM
+Raimondo/M
+Raimund/M
+Raimundo/M
+Raina/M
+rainbow/MS
+raincloud/S
+raincoat/SM
+raindrop/SM
+Raine/MR
+Rainer/M
+rainfall/SM
+rainforest's
+rain/GSDM
+Rainier/M
+rainless
+rainmaker/SM
+rainmaking/MS
+rainproof/GSD
+rainstorm/SM
+rainwater/MS
+rainy/RT
+raise/DSRGZ
+raiser/M
+raising/M
+raisin/MS
+rajah/M
+rajahs
+Rajive/M
+raj/M
+Rakel/M
+rake/MGDRS
+raker/M
+rakishness/MS
+rakish/PY
+Raleigh/M
+Ralf/M
+Ralina/M
+rally/GSD
+Ralph/M
+Ralston/M
+Ra/M
+Ramada/M
+Ramadan/SM
+Ramakrishna/M
+Rama/M
+Raman/M
+Ramayana/M
+ramble/JRSDGZ
+rambler/M
+rambling/Y
+Rambo/M
+rambunctiousness/S
+rambunctious/PY
+ramekin/SM
+ramie/MS
+ramification/M
+ramify/XNGSD
+Ramirez/M
+Ramiro/M
+ramjet/SM
+Ram/M
+rammed
+ramming
+Ramo/MS
+Ramona/M
+Ramonda/M
+Ramon/M
+rampage/SDG
+rampancy/S
+rampant/Y
+rampart/SGMD
+ramp/GMDS
+ramrodded
+ramrodding
+ramrod/MS
+RAM/S
+Ramsay/M
+Ramses/M
+Ramsey/M
+ramshackle
+ram/SM
+rams/S
+ran/A
+Rana/M
+Rancell/M
+Rance/M
+rancher/M
+rancho/SM
+ranch/ZRSDMJG
+rancidity/MS
+rancidness/SM
+rancid/P
+rancorous/Y
+rancor/SM
+Randall/M
+Randal/M
+Randa/M
+Randee/M
+Randell/M
+Randene/M
+Randie/M
+Randi/M
+randiness/S
+Rand/M
+rand/MDGS
+Randolf/M
+Randolph/M
+randomization/SM
+randomize/SRDG
+randomness/SM
+random/PYS
+Randy/M
+randy/PRST
+Ranee/M
+ranee/SM
+ranged/C
+rangeland/S
+ranger/M
+ranges/C
+range/SM
+rang/GZDR
+ranginess/S
+ranging/C
+Rangoon/M
+rangy/RPT
+Rania/M
+Ranice/M
+Ranier/M
+Rani/MR
+Ranique/M
+rani's
+ranked/U
+ranker/M
+rank/GZTYDRMPJS
+Rankine/M
+ranking/M
+Rankin/M
+rankle/SDG
+rankness/MS
+Ranna/M
+ransacker/M
+ransack/GRDS
+Ransell/M
+ransomer/M
+Ransom/M
+ransom/ZGMRDS
+ranter/M
+rant/GZDRJS
+ranting/Y
+Raoul/M
+rapaciousness/MS
+rapacious/YP
+rapacity/MS
+rapeseed/M
+rape/SM
+Raphaela/M
+Raphael/M
+rapidity/MS
+rapidness/S
+rapid/YRPST
+rapier/SM
+rapine/SM
+rapist/MS
+rap/MDRSZG
+rapped
+rappelled
+rappelling
+rappel/S
+rapper/SM
+rapping/M
+rapporteur/SM
+rapport/SM
+rapprochement/SM
+rapscallion/MS
+raptness/S
+rapture/MGSD
+rapturousness/M
+rapturous/YP
+rapt/YP
+Rapunzel/M
+Raquela/M
+Raquel/M
+rarebit/MS
+rarefaction/MS
+rarefy/GSD
+rareness/MS
+rare/YTPGDRS
+rarity/SM
+Rasalgethi/M
+Rasalhague/M
+rascal/SMY
+rasher/M
+rashness/S
+rash/PZTYSR
+Rasia/M
+Rasla/M
+Rasmussen/M
+raspberry/SM
+rasper/M
+rasping/Y
+rasp/SGJMDR
+Rasputin/M
+raspy/RT
+Rastaban/M
+Rastafarian/M
+raster/MS
+Rastus/M
+ratchet/MDSG
+rateable
+rated/U
+rate/KNGSD
+ratepayer/SM
+rater/M
+rate's
+Ratfor/M
+rather
+Rather/M
+rathskeller/SM
+ratifier/M
+ratify/ZSRDGXN
+rating/M
+ratiocinate/VNGSDX
+ratiocination/M
+ratio/MS
+rationale/SM
+rationalism/SM
+rationalistic
+rationalist/S
+rationality/MS
+rationalization/SM
+rationalizer/M
+rationalize/ZGSRD
+rationalness/M
+rational/YPS
+ration/DSMG
+Ratliff/M
+ratlike
+ratline/SM
+rat/MDRSJZGB
+rattail
+rattan/MS
+ratted
+ratter/MS
+ratting
+rattlebrain/DMS
+rattle/RSDJGZ
+rattlesnake/MS
+rattletrap/MS
+rattling/Y
+rattly/TR
+rattrap/SM
+ratty/RT
+raucousness/SM
+raucous/YP
+Raul/M
+raunchily
+raunchiness/S
+raunchy/RTP
+ravage/GZRSD
+ravager/M
+raveling/S
+Ravel/M
+ravel/UGDS
+raven/JGMRDS
+Raven/M
+ravenous/YP
+raver/M
+rave/ZGDRSJ
+Ravid/M
+Ravi/M
+ravine/SDGM
+ravioli/SM
+ravisher/M
+ravishing/Y
+ravish/LSRDZG
+ravishment/SM
+Raviv/M
+Rawalpindi/M
+rawboned
+rawhide/SDMG
+Rawley/M
+Rawlings/M
+Rawlins/M
+Rawlinson/M
+rawness/SM
+raw/PSRYT
+Rawson/M
+Rayburn/M
+Raychel/M
+Raye/M
+ray/GSMD
+Rayleigh/M
+Ray/M
+Raymond/M
+Raymondville/M
+Raymund/M
+Raymundo/M
+Rayna/M
+Raynard/M
+Raynell/M
+Rayner/M
+Raynor/M
+rayon/SM
+Rayshell/M
+Raytheon/M
+raze/DRSG
+razer/M
+razorback/SM
+razorblades
+razor/MDGS
+razz/GDS
+razzmatazz/S
+Rb
+RBI/S
+RC
+RCA
+rcpt
+RCS
+rd
+RD
+RDA
+Rd/M
+reabbreviate
+reachability
+reachable/U
+reachably
+reached/U
+reacher/M
+reach/GRB
+reacquisition
+reactant/SM
+reacted/U
+reaction
+reactionary/SM
+reactivity
+readability/MS
+readable/P
+readably
+readdress/G
+Reade/M
+reader/M
+readership/MS
+Read/GM
+readied
+readies
+readily
+readinesses
+readiness/UM
+reading/M
+Reading/M
+read/JGZBR
+readopt/G
+readout/MS
+reads/A
+readying
+ready/TUPR
+Reagan/M
+Reagen/M
+realisms
+realism's
+realism/U
+realistically/U
+realistic/U
+realist/SM
+reality/USM
+realizability/MS
+realizableness/M
+realizable/SMP
+realizably/S
+realization/MS
+realized/U
+realize/JRSDBZG
+realizer/M
+realizes/U
+realizing/MY
+realm/M
+realness/S
+realpolitik/SM
+real/RSTP
+realtor's
+Realtor/S
+realty/SM
+Rea/M
+reamer/M
+ream/MDRGZ
+Reamonn/M
+reanimate
+reaper/M
+reappraise/G
+reap/SGZ
+rear/DRMSG
+rearguard/MS
+rearmost
+rearrange/L
+rearward/S
+reasonableness/SMU
+reasonable/UP
+reasonably/U
+Reasoner/M
+reasoner/SM
+reasoning/MS
+reasonless
+reasons
+reason/UBDMG
+reassess/GL
+reassuringly/U
+reattach/GSL
+reawakening/M
+Reba/M
+rebate/M
+Rebbecca/M
+Rebeca/M
+Rebecca's
+Rebecka/M
+Rebekah/M
+Rebeka/M
+Rebekkah/M
+rebeller
+rebellion/SM
+rebelliousness/MS
+rebellious/YP
+rebel/MS
+Rebe/M
+rebid
+rebidding
+rebind/G
+rebirth
+reboil/G
+rebook
+reboot/ZR
+rebound/G
+rebroadcast/MG
+rebuke/RSDG
+rebuking/Y
+rebus
+rebuttal/SM
+rebutting
+rec
+recalcitrance/SM
+recalcitrant/S
+recalibrate/N
+recantation/S
+recant/G
+recap
+recappable
+recapping
+recast/G
+recd
+rec'd
+recede
+receipt/SGDM
+receivable/S
+received/U
+receiver/M
+receivership/SM
+receive/ZGRSDB
+recency/M
+recension/M
+recentness/SM
+recent/YPT
+receptacle/SM
+receptionist/MS
+reception/MS
+receptiveness/S
+receptive/YP
+receptivity/S
+receptor/MS
+recessional/S
+recessionary
+recessiveness/M
+recessive/YPS
+recess/SDMVG
+rechargeable
+recheck/G
+recherché
+recherches
+recidivism/MS
+recidivist/MS
+Recife/M
+recipe/MS
+recipiency
+recipient/MS
+reciprocal/SY
+reciprocate/NGXVDS
+reciprocation/M
+reciprocity/MS
+recitalist/S
+recital/MS
+recitative/MS
+reciter/M
+recite/ZR
+recked
+recking
+recklessness/S
+reckless/PY
+reckoner/M
+reckoning/M
+reckon/SGRDJ
+reclaim/B
+reclamation/SM
+recliner/M
+recline/RSDZG
+recluse/MVNS
+reclusion/M
+recode/G
+recognizability
+recognizable/U
+recognizably
+recognize/BZGSRD
+recognizedly/S
+recognized/U
+recognizer/M
+recognizingly/S
+recognizing/UY
+recoilless
+recoinage
+recolor/GD
+recombinant
+recombine
+recommended/U
+recompense/GDS
+recompute/B
+reconciled/U
+reconciler/M
+reconcile/SRDGB
+reconditeness/M
+recondite/YP
+reconfigurability
+reconfigure/R
+reconnaissance/MS
+reconnect/R
+reconnoiter/GSD
+reconquer/G
+reconsecrate
+reconstitute
+reconstructed/U
+Reconstruction/M
+reconsult/G
+recontact/G
+recontaminate/N
+recontribute
+recook/G
+recopy/G
+recorded/AU
+records/A
+record/ZGJ
+recourse
+recoverability
+recoverable/U
+recover/B
+recovery/MS
+recreant/S
+recreational
+recriminate/GNVXDS
+recrimination/M
+recriminatory
+recross/G
+recrudesce/GDS
+recrudescence/MS
+recrudescent
+recruiter/M
+recruitment/MS
+recruit/ZSGDRML
+recrystallize
+rectal/Y
+rectangle/SM
+rectangular/Y
+recta's
+rectifiable
+rectification/M
+rectifier/M
+rectify/DRSGXZN
+rectilinear/Y
+rectitude/MS
+recto/MS
+rector/SM
+rectory/MS
+rectum/SM
+recumbent/Y
+recuperate/VGNSDX
+recuperation/M
+recur
+recurrence/MS
+recurrent
+recurse/NX
+recursion/M
+recusant/M
+recuse
+recyclable/S
+recycle/BZ
+redact/DGS
+redaction/SM
+redactor/MS
+redbird/SM
+redbreast/SM
+redbrick/M
+redbud/M
+redcap/MS
+redcoat/SM
+redcurrant/M
+redden/DGS
+redder
+reddest
+redding
+reddish/P
+Redd/M
+redeclaration
+redecorate
+redeemable/U
+redeem/BRZ
+redeemed/U
+redeemer/M
+Redeemer/M
+redemptioner/M
+redemption/RMS
+redemptive
+redeposit/M
+redetermination
+Redford/M
+Redgrave/M
+redhead/DRMS
+Redhook/M
+redial/G
+redirect/G
+redirection
+redlining/S
+Redmond/M
+redneck/SMD
+redness/MS
+redo/G
+redolence/MS
+redolent
+Redondo/M
+redouble/S
+redoubtably
+redound/GDS
+red/PYS
+redshift/S
+redskin/SM
+Redstone/M
+reduced/U
+reducer/M
+reduce/RSDGZ
+reducibility/M
+reducible
+reducibly
+reductionism/M
+reductionist/S
+reduction/SM
+reduct/V
+redundancy/SM
+redundant/Y
+redwood/SM
+redye
+redyeing
+Reeba/M
+Reebok/M
+Reece/M
+reecho/G
+reed/GMDR
+reediness/SM
+reeding/M
+Reed/M
+Reedville/M
+reedy/PTR
+reefer/M
+reef/GZSDRM
+reeker/M
+reek/GSR
+reeler/M
+reel's
+reel/USDG
+Ree/MDS
+Reena/M
+reenforcement
+reentrant
+Reese/M
+reestimate/M
+Reeta/M
+Reeva/M
+reeve/G
+Reeves
+reexamine
+refection/SM
+refectory/SM
+refer/B
+refereed/U
+refereeing
+referee/MSD
+reference/CGSRD
+referenced/U
+reference's
+referencing/U
+referendum/MS
+referentiality
+referential/YM
+referent/SM
+referral/SM
+referred
+referrer/S
+referring
+reffed
+reffing
+refile
+refinance
+refined/U
+refine/LZ
+refinement/MS
+refinish/G
+refit
+reflectance/M
+reflected/U
+reflectional
+reflection/SM
+reflectiveness/M
+reflective/YP
+reflectivity/M
+reflector/MS
+reflect/SDGV
+reflexion/MS
+reflexiveness/M
+reflexive/PSY
+reflexivity/M
+reflex/YV
+reflooring
+refluent
+reflux/G
+refocus/G
+refold/G
+reforestation
+reforge/G
+reformatory/SM
+reform/B
+reformed/U
+reformer/M
+reformism/M
+reformist/S
+refract/DGVS
+refractiveness/M
+refractive/PY
+refractometer/MS
+refractoriness/M
+refractory/PS
+refrain/DGS
+refreshed/U
+refreshing/Y
+refresh/LB
+refreshment/MS
+refrigerant/MS
+refrigerated/U
+refrigerate/XDSGN
+refrigeration/M
+refrigerator/MS
+refrozen
+refry/GS
+refugee/MS
+refuge/SDGM
+Refugio/M
+refulgence/SM
+refulgent
+refund/B
+refunder/M
+refurbish/L
+refurbishment/S
+refusal/SM
+refuse/R
+refuser/M
+refutation/MS
+refute/GZRSDB
+refuter/M
+ref/ZS
+reg
+regale/L
+regalement/S
+regal/GYRD
+regalia/M
+Regan/M
+regard/EGDS
+regardless/PY
+regather/G
+regatta/MS
+regency/MS
+regeneracy/MS
+regenerately
+regenerateness/M
+regenerate/U
+Regen/M
+reggae/SM
+Reggie/M
+Reggi/MS
+Reggy/M
+regicide/SM
+regime/MS
+regimen/MS
+regimental/S
+regimentation/MS
+regiment/SDMG
+Reginae
+Reginald/M
+Regina/M
+Reginauld/M
+Regine/M
+regionalism/MS
+regional/SY
+region/SM
+Regis/M
+register's
+register/UDSG
+registrable
+registrant/SM
+registrar/SM
+registration/AM
+registrations
+registry/MS
+Reg/MN
+regnant
+Regor/M
+regress/DSGV
+regression/MS
+regressiveness/M
+regressive/PY
+regressors
+regretfulness/M
+regretful/PY
+regret/S
+regrettable
+regrettably
+regretted
+regretting
+reground
+regroup/G
+regrow/G
+regularity/MS
+regularization/MS
+regularize/SDG
+regular/YS
+regulate/CSDXNG
+regulated/U
+regulation/M
+regulative
+regulator/SM
+regulatory
+Regulus/M
+regurgitate/XGNSD
+regurgitation/M
+rehabbed
+rehabbing
+rehabilitate/SDXVGN
+rehabilitation/M
+rehab/S
+rehang/G
+rehear/GJ
+rehearsal/SM
+rehearse
+rehearsed/U
+rehearser/M
+rehears/R
+reheat/G
+reheating/M
+Rehnquist
+rehydrate
+Reichenberg/M
+Reich/M
+Reichstags
+Reichstag's
+Reidar/M
+Reider/M
+Reid/MR
+reign/MDSG
+Reiko/M
+Reilly/M
+reimburse/GSDBL
+reimbursement/MS
+Reinald/M
+Reinaldo/MS
+Reina/M
+reindeer/M
+Reine/M
+reinforced/U
+reinforce/GSRDL
+reinforcement/MS
+reinforcer/M
+rein/GDM
+Reinhard/M
+Reinhardt/M
+Reinhold/M
+Reinold/M
+reinstate/L
+reinstatement/MS
+reinsurance
+Reinwald/M
+reissue
+REIT
+reiterative/SP
+rejecter/M
+rejecting/Y
+rejection/SM
+rejector/MS
+reject/RDVGS
+rejigger
+rejoice/RSDJG
+rejoicing/Y
+rejoinder/SM
+rejuvenate/NGSDX
+rejuvenatory
+relapse
+relatedly
+relatedness/MS
+related/U
+relater/M
+relate/XVNGSZ
+relational/Y
+relation/M
+relationship/MS
+relativeness/M
+relative/SPY
+relativism/M
+relativistic
+relativistically
+relativist/MS
+relativity/MS
+relator's
+relaxant/SM
+relaxation/MS
+relaxedness/M
+relaxed/YP
+relax/GZD
+relaxing/Y
+relay/GDM
+relearn/G
+releasable/U
+release/B
+released/U
+relenting/U
+relentlessness/SM
+relentless/PY
+relent/SDG
+relevance/SM
+relevancy/MS
+relevant/Y
+reliability/UMS
+reliables
+reliable/U
+reliably/U
+reliance/MS
+reliant/Y
+relicense/R
+relic/MS
+relict/C
+relict's
+relief/M
+relievedly
+relieved/U
+reliever/M
+relieve/RSDZG
+religionists
+religion/SM
+religiosity/M
+religiousness/MS
+religious/PY
+relink/G
+relinquish/GSDL
+relinquishment/SM
+reliquary/MS
+relish/GSD
+relive/GB
+reload/GR
+relocate/B
+reluctance/MS
+reluctant/Y
+rel/V
+rely/DG
+rem
+Re/M
+remade/S
+remainder/SGMD
+remain/GD
+remake/M
+remand/DGS
+remap
+remapping
+remarkableness/S
+remarkable/U
+remarkably
+remark/BG
+remarked/U
+Remarque/M
+rematch/G
+Rembrandt/M
+remeasure/D
+remediableness/M
+remediable/P
+remedy/SDMG
+remembered/U
+rememberer/M
+remember/GR
+remembrance/MRS
+remembrancer/M
+Remington/M
+reminisce/GSD
+reminiscence/SM
+reminiscent/Y
+remissness/MS
+remiss/YP
+remit/S
+remittance/MS
+remitted
+remitting/U
+Rem/M
+remnant/MS
+remodel/G
+remolding
+remonstrant/MS
+remonstrate/SDXVNG
+remonstration/M
+remonstrative/Y
+remorsefulness/M
+remorseful/PY
+remorselessness/MS
+remorseless/YP
+remorse/SM
+remoteness/MS
+remote/RPTY
+remoulds
+removal/MS
+REM/S
+remunerated/U
+remunerate/VNGXSD
+remuneration/M
+remunerativeness/M
+remunerative/YP
+Remus/M
+Remy/M
+Renado/M
+Renae/M
+renaissance/S
+Renaissance/SM
+renal
+Renaldo/M
+Rena/M
+Renard/M
+Renascence/SM
+Renata/M
+Renate/M
+Renato/M
+renaturation
+Renaud/M
+Renault/MS
+rend
+renderer/M
+render/GJRD
+rendering/M
+rendezvous/DSMG
+rendition/GSDM
+rend/RGZS
+Renee/M
+renegade/SDMG
+renege/GZRSD
+reneger/M
+Renelle/M
+Renell/M
+Rene/M
+renewal/MS
+renew/BG
+renewer/M
+Renie/M
+rennet/MS
+Rennie/M
+rennin/SM
+Renoir/M
+Reno/M
+renounce/LGRSD
+renouncement/MS
+renouncer/M
+renovate/NGXSD
+renovation/M
+renovator/SM
+renown/SGDM
+Rensselaer/M
+rentaller
+rental/SM
+renter/M
+rent/GZMDRS
+renumber/G
+renumeration
+renunciate/VNX
+renunciation/M
+Renville/M
+reoccupy/G
+reopen/G
+reorganized/U
+repack/G
+repairable/U
+repair/BZGR
+repairer/M
+repairman/M
+repairmen
+repairs/E
+repaper
+reparable
+reparation/SM
+reparteeing
+repartee/MDS
+repartition/Z
+repast/G
+repatriate/SDXNG
+repave
+repealer/M
+repeal/GR
+repeatability/M
+repeatable/U
+repeatably
+repeated/Y
+repeater/M
+repeat/RDJBZG
+repelled
+repellent/SY
+repelling/Y
+repel/S
+repentance/SM
+repentant/SY
+repent/RDG
+repertoire/SM
+repertory/SM
+repetition
+repetitiousness/S
+repetitious/YP
+repetitiveness/MS
+repetitive/PY
+repine/R
+repiner/M
+replace/RL
+replay/GM
+replenish/LRSDG
+replenishment/S
+repleteness/MS
+replete/SDPXGN
+repletion/M
+replica/SM
+replicate/SDVG
+replicator/S
+replug
+reply/X
+Rep/M
+repopulate
+reported/Y
+reportorial/Y
+reposeful
+repose/M
+repository/MS
+reprehend/GDS
+reprehensibility/MS
+reprehensibleness/M
+reprehensible/P
+reprehensibly
+reprehension/MS
+representable/U
+representational/Y
+representativeness/M
+Representative/S
+representative/SYMP
+representativity
+represented/U
+represent/GB
+repression/SM
+repressiveness/M
+repressive/YP
+repress/V
+reprieve/GDS
+reprimand/SGMD
+reprint/M
+reprisal/MS
+reproacher/M
+reproachfulness/M
+reproachful/YP
+reproach/GRSDB
+reproaching/Y
+reprobate/N
+reprocess/G
+reproducibility/MS
+reproducible/S
+reproducibly
+reproductive/S
+reproof/G
+reprove/R
+reproving/Y
+rep/S
+reptile/SM
+reptilian/S
+Republicanism/S
+republicanism/SM
+Republican/S
+republic/M
+republish/G
+repudiate/XGNSD
+repudiation/M
+repudiator/S
+repugnance/MS
+repugnant/Y
+repulse/VNX
+repulsion/M
+repulsiveness/MS
+repulsive/PY
+reputability/SM
+reputably/E
+reputation/SM
+reputed/Y
+repute/ESB
+reputing
+requested/U
+request/G
+Requiem/MS
+requiem/SM
+require/LR
+requirement/MS
+requisiteness/M
+requisite/PNXS
+requisitioner/M
+requisition/GDRM
+requital/MS
+requited/U
+requiter/M
+requite/RZ
+reread/G
+rerecord/G
+rerouteing
+rerunning
+res/C
+rescale
+rescind/SDRG
+rescission/SM
+rescue/GZRSD
+reseal/BG
+research/MB
+reselect/G
+resemblant
+resemble/DSG
+resend/G
+resent/DSLG
+resentfulness/SM
+resentful/PY
+resentment/MS
+reserpine/MS
+reservation/MS
+reservednesses
+reservedness/UM
+reserved/UYP
+reservist/SM
+reservoir/MS
+reset/RDG
+resettle/L
+reshipping
+reshow/G
+reshuffle/M
+reside/G
+residence/MS
+residency/SM
+residential/Y
+resident/SM
+resider/M
+residua
+residual/YS
+residuary
+residue/SM
+residuum/M
+resignation/MS
+resigned/YP
+resilience/MS
+resiliency/S
+resilient/Y
+resin/D
+resinlike
+resinous
+resiny
+resistance/SM
+Resistance/SM
+resistantly
+resistants
+resistant/U
+resisted/U
+resistible
+resistibly
+resisting/U
+resistiveness/M
+resistive/PY
+resistivity/M
+resistless
+resistor/MS
+resist/RDZVGS
+resize/G
+resold
+resole/G
+resoluble
+resoluteness/MS
+resolute/PYTRV
+resolvability/M
+resolvable/U
+resolved/U
+resolvent
+resonance/SM
+resonant/YS
+resonate/DSG
+resonator/MS
+resorption/MS
+resort/R
+resound/G
+resourcefulness/SM
+resourceful/PY
+resp
+respectability/SM
+respectable/SP
+respectably
+respect/BSDRMZGV
+respected/E
+respectful/EY
+respectfulness/SM
+respecting/E
+respectiveness/M
+respective/PY
+respect's/E
+respects/E
+respell/G
+respiration/MS
+respirator/SM
+respiratory/M
+resplendence/MS
+resplendent/Y
+respondent/MS
+respond/SDRZG
+responser/M
+response/RSXMV
+responsibility/MS
+responsibleness/M
+responsible/P
+responsibly
+responsiveness/MSU
+responsive/YPU
+respray/G
+restart/B
+restate/L
+restaurant/SM
+restaurateur/SM
+rest/DRSGVM
+rested/U
+rester/M
+restfuller
+restfullest
+restfulness/MS
+restful/YP
+restitution/SM
+restiveness/SM
+restive/PY
+restlessness/MS
+restless/YP
+restorability
+Restoration/M
+restoration/MS
+restorative/PYS
+restorer/M
+restore/Z
+restrained/UY
+restraint/MS
+restrict/DVGS
+restricted/YU
+restriction/SM
+restrictively
+restrictiveness/MS
+restrictives
+restrictive/U
+restroom/SM
+restructurability
+restructure
+rest's/U
+rests/U
+restudy/M
+restyle
+resubstitute
+resultant/YS
+result/SGMD
+resume/SDBG
+resumption/MS
+resurface
+resurgence/MS
+resurgent
+resurrect/GSD
+resurrection/SM
+resurvey/G
+resuscitate/XSDVNG
+resuscitation/M
+resuscitator/MS
+retail/Z
+retainer/M
+retain/LZGSRD
+retake
+retaliate/VNGXSD
+retaliation/M
+retaliatory
+Reta/M
+retardant/SM
+retardation/SM
+retarder/M
+retard/ZGRDS
+retch/SDG
+retention/SM
+retentiveness/S
+retentive/YP
+retentivity/M
+retest/G
+Retha/M
+rethought
+reticence/S
+reticent/Y
+reticle/SM
+reticular
+reticulate/GNYXSD
+reticulation/M
+reticule/MS
+reticulum/M
+retinal/S
+retina/SM
+retinue/MS
+retiredness/M
+retiree/MS
+retire/L
+retirement/SM
+retiring/YP
+retort/GD
+retract/DG
+retractile
+retrench/L
+retrenchment/MS
+retributed
+retribution/MS
+retributive
+retrieval/SM
+retriever/M
+retrieve/ZGDRSB
+retroactive/Y
+retrofire/GMSD
+retrofit/S
+retrofitted
+retrofitting
+retroflection
+retroflex/D
+retroflexion/M
+retrogradations
+retrograde/GYDS
+retrogression/MS
+retrogressive/Y
+retrogress/SDVG
+retrorocket/MS
+retro/SM
+retrospection/MS
+retrospective/SY
+retrospect/SVGMD
+retrovirus/S
+retrovision
+retry/G
+retsina/SM
+returnable/S
+returned/U
+returnee/SM
+retype
+Reube/M
+Reuben/M
+Reub/NM
+Reunion/M
+reuse/B
+Reuters
+Reuther/M
+reutilization
+Reuven/M
+Reva/M
+revanchist
+revealed/U
+revealingly
+revealing/U
+reveal/JBG
+reveille/MS
+revelation/MS
+Revelation/MS
+revelatory
+revelry/MS
+revel/SJRDGZ
+revenge/MGSRD
+revenger/M
+revenuer/M
+revenue/ZR
+reverberant
+reverberate/XVNGSD
+reverberation/M
+revere/GSD
+Revere/M
+reverencer/M
+reverence/SRDGM
+Reverend
+reverend/SM
+reverential/Y
+reverent/Y
+reverie/SM
+reversal/MS
+reverser/M
+reverse/Y
+reversibility/M
+reversible/S
+reversibly
+reversioner/M
+reversion/R
+revers/M
+reverter/M
+revertible
+revert/RDVGS
+revet/L
+revetment/SM
+review/G
+revile/GZSDL
+revilement/MS
+reviler/M
+revise/BRZ
+revised/U
+revisionary
+revisionism/SM
+revisionist/SM
+revitalize/ZR
+revivalism/MS
+revivalist/MS
+revival/SM
+reviver/M
+revive/RSDG
+revivification/M
+revivify/X
+Revkah/M
+Revlon/M
+Rev/M
+revocable
+revoke/GZRSD
+revolter/M
+revolt/GRD
+revolting/Y
+revolutionariness/M
+revolutionary/MSP
+revolutionist/MS
+revolutionize/GDSRZ
+revolutionizer/M
+revolution/SM
+revolve/BSRDZJG
+revolver/M
+revue/MS
+revulsion/MS
+revved
+revving
+rev/ZM
+rewarded/U
+rewarding/Y
+rewarm/G
+reweave
+rewedding
+reweigh/G
+rewind/BGR
+rewire/G
+rework/G
+rexes
+Rex/M
+Reyes
+Reykjavik/M
+re/YM
+Rey/M
+Reynaldo/M
+Reyna/M
+Reynard/M
+Reynold/SM
+rezone
+Rf
+RF
+RFC
+RFD
+R/G
+rhapsodic
+rhapsodical
+rhapsodize/GSD
+rhapsody/SM
+Rhea/M
+rhea/SM
+Rheba/M
+Rhee/M
+Rheims/M
+Rheinholdt/M
+Rhenish
+rhenium/MS
+rheology/M
+rheostat/MS
+rhesus/S
+Rheta/M
+rhetorical/YP
+rhetorician/MS
+rhetoric/MS
+Rhetta/M
+Rhett/M
+rheumatically
+rheumatic/S
+rheumatics/M
+rheumatism/SM
+rheumatoid
+rheum/MS
+rheumy/RT
+Rhiamon/M
+Rhianna/M
+Rhiannon/M
+Rhianon/M
+Rhinelander/M
+Rhineland/RM
+Rhine/M
+rhinestone/SM
+rhinitides
+rhinitis/M
+rhinoceros/MS
+rhino/MS
+rhinotracheitis
+rhizome/MS
+Rh/M
+Rhoda/M
+Rhodes
+Rhodesia/M
+Rhodesian/S
+Rhodia/M
+Rhodie/M
+rhodium/MS
+rhododendron/SM
+rhodolite/M
+rhodonite/M
+Rhody/M
+rhombic
+rhomboidal
+rhomboid/SM
+rhombus/SM
+rho/MS
+Rhona/M
+Rhonda/M
+Rhone
+rhubarb/MS
+rhyme/DSRGZM
+rhymester/MS
+Rhys/M
+rhythmical/Y
+rhythmic/S
+rhythmics/M
+rhythm/MS
+RI
+rial/MS
+Riane/M
+Riannon/M
+Rianon/M
+ribaldry/MS
+ribald/S
+ribbed
+Ribbentrop/M
+ribber/S
+ribbing/M
+ribbon/DMSG
+ribcage
+rib/MS
+riboflavin/MS
+ribonucleic
+ribosomal
+ribosome/MS
+Rica/M
+Rican/SM
+Ricard/M
+Ricardo/M
+Ricca/M
+Riccardo/M
+rice/DRSMZG
+Rice/M
+ricer/M
+Richard/MS
+Richardo/M
+Richardson/M
+Richart/M
+Richelieu/M
+richen/DG
+Richey/M
+Richfield/M
+Richie/M
+Richland/M
+Rich/M
+Richmond/M
+Richmound/M
+richness/MS
+Richter/M
+Richthofen/M
+Richy/M
+rich/YNSRPT
+Rici/M
+Rickard/M
+Rickenbacker/M
+Rickenbaugh/M
+Rickert/M
+rickets/M
+rickety/RT
+Rickey/M
+rick/GSDM
+Rickie/M
+Ricki/M
+Rick/M
+Rickover/M
+rickrack/MS
+rickshaw/SM
+Ricky/M
+Ric/M
+ricochet/GSD
+Rico/M
+Ricoriki/M
+ricotta/MS
+riddance/SM
+ridden
+ridding
+riddle/GMRSD
+Riddle/M
+ride/CZSGR
+Ride/M
+rider/CM
+riderless
+ridership/S
+ridge/DSGM
+Ridgefield/M
+ridgepole/SM
+Ridgway/M
+ridgy/RT
+ridicule/MGDRS
+ridiculer/M
+ridiculousness/MS
+ridiculous/PY
+riding/M
+rid/ZGRJSB
+Riemann/M
+Riesling/SM
+rife/RT
+riff/GSDM
+riffle/SDG
+riffraff/SM
+rifled/U
+rifle/GZMDSR
+rifleman/M
+riflemen
+rifler/M
+rifling/M
+rift/GSMD
+Riga/M
+rigamarole's
+rigatoni/M
+Rigel/M
+rigged
+rigger/SM
+rigging/MS
+Riggs/M
+righteousnesses/U
+righteousness/MS
+righteous/PYU
+rightfulness/MS
+rightful/PY
+rightism/SM
+rightist/S
+rightmost
+rightness/MS
+Right/S
+right/SGTPYRDN
+rightsize/SDG
+rights/M
+rightward/S
+rigidify/S
+rigidity/S
+rigidness/S
+rigid/YP
+rigmarole/MS
+rig/MS
+Rigoberto/M
+Rigoletto/M
+rigor/MS
+rigorousness/S
+rigorous/YP
+Riki/M
+Rikki/M
+Rik/M
+rile/DSG
+Riley/M
+Rilke/M
+rill/GSMD
+Rimbaud/M
+rime/MS
+rimer/M
+rim/GSMDR
+rimless
+rimmed
+rimming
+Rinaldo/M
+Rina/M
+rind/MDGS
+Rinehart/M
+ringer/M
+ring/GZJDRM
+ringing/Y
+ringleader/MS
+ringlet/SM
+ringlike
+Ringling/M
+Ring/M
+ringmaster/MS
+Ringo/M
+ringside/ZMRS
+ringworm/SM
+rink/GDRMS
+rinse/DSRG
+Riobard/M
+Rio/MS
+Riordan/M
+rioter/M
+riotousness/M
+riotous/PY
+riot/SMDRGZJ
+RIP
+riparian/S
+ripcord/SM
+ripened/U
+ripenesses
+ripeness/UM
+ripen/RDG
+ripe/PSY
+riper/U
+ripest/U
+Ripley/M
+Rip/M
+rip/NDRSXTG
+ripoff/S
+riposte/SDMG
+ripped
+ripper/SM
+ripping
+rippler/M
+ripple/RSDGM
+ripply/TR
+ripsaw/GDMS
+riptide/SM
+Risa/M
+RISC
+risen
+riser/M
+rise/RSJZG
+risibility/SM
+risible/S
+rising/M
+risker/M
+risk/GSDRM
+riskily
+riskiness/MS
+risky/RTP
+risotto/SM
+risqué
+rissole/M
+Ritalin
+Rita/M
+Ritchie/M
+rite/DSM
+Ritter/M
+ritualism/SM
+ritualistic
+ritualistically
+ritualized
+ritual/MSY
+Ritz/M
+ritzy/TR
+rivaled/U
+Rivalee/M
+rivalry/MS
+rival/SGDM
+Riva/MS
+rive/CSGRD
+Rivera/M
+riverbank/SM
+riverbed/S
+riverboat/S
+river/CM
+riverfront
+riverine
+Rivers
+Riverside/M
+riverside/S
+Riverview/M
+riveter/M
+rivet/GZSRDM
+riveting/Y
+Riviera/MS
+Rivi/M
+Rivkah/M
+rivulet/SM
+Rivy/M
+riv/ZGNDR
+Riyadh/M
+riyal/SM
+rm
+RMS
+RN
+RNA
+Rn/M
+roach/GSDM
+Roach/M
+roadbed/MS
+roadblock/SMDG
+roadhouse/SM
+roadie/S
+roadkill/S
+road/MIS
+roadrunner/MS
+roadshow/S
+roadside/S
+roadsigns
+roadster/SM
+roadsweepers
+roadway/SM
+roadwork/SM
+roadworthy
+roam/DRGZS
+Roana/M
+Roanna/M
+Roanne/M
+Roanoke/M
+roan/S
+roar/DRSJGZ
+roarer/M
+roaring/T
+Roarke/M
+roaster/M
+roast/SGJZRD
+robbed
+robber/SM
+Robbert/M
+robbery/SM
+Robbie/M
+Robbi/M
+robbing
+Robbin/MS
+Robb/M
+Robby/M
+Robbyn/M
+robe/ESDG
+Robena/M
+Robenia/M
+Robers/M
+Roberson/M
+Roberta/M
+Robert/MS
+Roberto/M
+Robertson/SM
+robe's
+Robeson/M
+Robespierre/M
+Robina/M
+Robinet/M
+Robinetta/M
+Robinette/M
+Robinett/M
+Robinia/M
+Robin/M
+robin/MS
+Robinson/M
+Robinsonville/M
+Robles/M
+Rob/MZ
+robotic/S
+robotism
+robotize/GDS
+robot/MS
+rob/SDG
+Robson/M
+Robt/M
+robustness/SM
+robust/RYPT
+Roby/M
+Robyn/M
+Rocco/M
+Rocha/M
+Rochambeau/M
+Rochella/M
+Rochelle/M
+Rochell/M
+Roche/M
+Rochester/M
+Rochette/M
+Roch/M
+rockabilly/MS
+rockabye
+Rockaway/MS
+rockbound
+Rockefeller/M
+rocker/M
+rocketry/MS
+rocket/SMDG
+Rockey/M
+rockfall/S
+Rockford/M
+rock/GZDRMS
+Rockie/M
+rockiness/MS
+Rockland/M
+Rock/M
+Rockne/M
+Rockville/M
+Rockwell/M
+Rocky/SM
+rocky/SRTP
+rococo/MS
+Roda/M
+rodded
+Roddenberry/M
+rodder
+Roddie/M
+rodding
+Rodd/M
+Roddy/M
+rodent/MS
+rodeo/SMDG
+Roderich/M
+Roderick/M
+Roderic/M
+Roderigo/M
+rode/S
+Rodger/M
+Rodge/ZMR
+Rodie/M
+Rodi/M
+Rodina/M
+Rodin/M
+Rod/M
+Rodney/M
+Rodolfo/M
+Rodolphe/M
+Rodolph/M
+Rodrick/M
+Rodrigo/M
+Rodriguez/M
+Rodrique/M
+Rodriquez/M
+rod/SGMD
+roebuck/SM
+Roentgen's
+roentgen/SM
+roe/SM
+ROFL
+Rogelio/M
+roger/GSD
+Rogerio/M
+Roger/M
+Roget/M
+Rog/MRZ
+rogued/K
+rogue/GMDS
+roguery/MS
+rogues/K
+roguing/K
+roguishness/SM
+roguish/PY
+roil/SGD
+Roi/SM
+roisterer/M
+roister/SZGRD
+Rojas/M
+Roland/M
+Rolando/M
+Roldan/M
+role/MS
+Roley/M
+Rolfe/M
+Rolf/M
+Rolland/M
+rollback/SM
+rolled/A
+Rollerblade/S
+rollerskating
+roller/SM
+rollick/DGS
+rollicking/Y
+Rollie/M
+rolling/S
+Rollin/SM
+Rollo/M
+rollover/S
+roll/UDSG
+Rolodex
+Rolph/M
+Rolvaag/M
+ROM
+romaine/MS
+Romain/M
+Roma/M
+romancer/M
+romance/RSDZMG
+Romanesque/S
+Romania/M
+Romanian/SM
+Romano/MS
+Romanov/M
+roman/S
+Romansh/M
+Romans/M
+Roman/SM
+romantically/U
+romanticism/MS
+Romanticism/S
+romanticist/S
+romanticize/SDG
+romantic/MS
+Romany/SM
+Romeo/MS
+romeo/S
+Romero/M
+Rome/SM
+Rommel/M
+Romney/M
+Romola/M
+Romona/M
+Romonda/M
+romper/M
+romp/GSZDR
+Rom/SM
+Romulus/M
+Romy/M
+Ronalda/M
+Ronald/M
+Rona/M
+Ronda/M
+rondo/SM
+Ronica/M
+Ron/M
+Ronna/M
+Ronnica/M
+Ronnie/M
+Ronni/M
+Ronny/M
+Ronstadt/M
+Rontgen
+Roobbie/M
+rood/MS
+roof/DRMJGZS
+roofer/M
+roofgarden
+roofing/M
+roofless
+rooftop/S
+rookery/MS
+rook/GDMS
+rookie/SRMT
+roomer/M
+roomette/SM
+roomful/MS
+roominess/MS
+roommate/SM
+room/MDRGZS
+roomy/TPSR
+Rooney/M
+Rooseveltian
+Roosevelt/M
+rooster/M
+roost/SGZRDM
+rooted/P
+rooter/M
+rootlessness/M
+rootless/P
+rootlet/SM
+Root/M
+root/MGDRZS
+rootstock/M
+rope/DRSMZG
+roper/M
+roping/M
+Roquefort/MS
+Roquemore/M
+Rora/M
+Rorie/M
+Rori/M
+Rorke/M
+Rorschach
+Rory/M
+Rosabella/M
+Rosabelle/M
+Rosabel/M
+Rosaleen/M
+Rosales/M
+Rosalia/M
+Rosalie/M
+Rosalinda/M
+Rosalinde/M
+Rosalind/M
+Rosaline/M
+Rosalynd/M
+Rosalyn/M
+Rosa/M
+Rosamond/M
+Rosamund/M
+Rosana/M
+Rosanna/M
+Rosanne/M
+Rosario/M
+rosary/SM
+Roscoe/M
+Rosco/M
+Roseanna/M
+Roseanne/M
+Roseann/M
+roseate/Y
+Roseau
+rosebud/MS
+rosebush/SM
+Rosecrans/M
+Roseland/M
+Roselia/M
+Roseline/M
+Roselin/M
+Rosella/M
+Roselle/M
+Rose/M
+Rosemaria/M
+Rosemarie/M
+Rosemary/M
+rosemary/MS
+rose/MGDS
+Rosemonde/M
+Rosenberg/M
+Rosenblum/M
+Rosendo/M
+Rosene/M
+Rosen/M
+Rosenthal/M
+Rosenzweig/M
+Rosetta/M
+Rosette/M
+rosette/SDMG
+rosewater
+rosewood/SM
+Roshelle/M
+Rosicrucian/M
+Rosie/M
+rosily
+Rosina/M
+rosiness/MS
+rosin/SMDG
+Rosita/M
+Roslyn/M
+Rosmunda/M
+Ros/N
+Ross
+Rossetti/M
+Rossie/M
+Rossi/M
+Rossini/M
+Rossy/M
+Rostand/M
+roster/DMGS
+Rostov/M
+rostra's
+rostrum/SM
+Roswell/M
+Rosy/M
+rosy/RTP
+rota/MS
+Rotarian/SM
+rotary/S
+rotated/U
+rotate/VGNXSD
+rotational/Y
+rotation/M
+rotative/Y
+rotator/SM
+rotatory
+ROTC
+rote/MS
+rotgut/MS
+Roth/M
+Rothschild/M
+rotisserie/MS
+rotogravure/SM
+rotor/MS
+rototill/RZ
+rot/SDG
+rotted
+rottenness/S
+rotten/RYSTP
+Rotterdam/M
+rotter/M
+rotting
+rotunda/SM
+rotundity/S
+rotundness/S
+rotund/SDYPG
+Rouault/M
+roué/MS
+rouge/GMDS
+roughage/SM
+roughen/DG
+rougher/M
+roughhouse/GDSM
+roughish
+roughneck/MDSG
+roughness/MS
+roughs
+roughshod
+rough/XPYRDNGT
+roulette/MGDS
+roundabout/PSM
+roundedness/M
+rounded/P
+roundelay/SM
+roundels
+rounder/M
+roundhead/D
+roundheadedness/M
+roundheaded/P
+roundhouse/SM
+roundish
+roundness/MS
+roundoff
+roundup/MS
+roundworm/MS
+round/YRDSGPZT
+Rourke/M
+rouse/DSRG
+rouser/M
+Rousseau/M
+roustabout/SM
+roust/SGD
+route/ASRDZGJ
+router/M
+route's
+rout/GZJMDRS
+routine/SYM
+routing/M
+routinize/GSD
+Rouvin/M
+rover/M
+Rover/M
+rove/ZGJDRS
+roving/M
+Rowan/M
+rowboat/SM
+rowdily
+rowdiness/MS
+rowdyism/MS
+rowdy/PTSR
+rowel/DMSG
+Rowe/M
+Rowena/M
+rowen/M
+Rowen/M
+rower/M
+Rowland/M
+Rowley/M
+Row/MN
+Rowney/M
+row/SJZMGNDR
+Roxana/M
+Roxane/M
+Roxanna/M
+Roxanne/M
+Roxie/M
+Roxi/M
+Roxine/M
+Roxy/M
+royalist/SM
+Royall/M
+Royal/M
+royal/SY
+royalty/MS
+Royce/M
+Roy/M
+Rozalie/M
+Rozalin/M
+Rozamond/M
+Rozanna/M
+Rozanne/M
+Rozele/M
+Rozella/M
+Rozelle/M
+Roze/M
+Rozina/M
+Roz/M
+RP
+rpm
+RPM
+rps
+RR
+Rriocard/M
+rs
+r's
+R's
+RSFSR
+RSI
+RSV
+RSVP
+RSX
+rt
+rte
+Rte
+RTFM
+r/TGVJ
+Rubaiyat/M
+rubato/MS
+rubbed
+rubberize/GSD
+rubberneck/DRMGSZ
+rubber/SDMG
+rubbery/TR
+rubbing/M
+rubbish/DSMG
+rubbishy
+rubble/GMSD
+rubdown/MS
+rubella/MS
+Rube/M
+Ruben/MS
+rube/SM
+Rubetta/M
+Rubia/M
+Rubicon/SM
+rubicund
+rubidium/SM
+Rubie/M
+Rubik/M
+Rubi/M
+Rubina/M
+Rubin/M
+Rubinstein/M
+ruble/MS
+rubout
+rubric/MS
+rub/S
+Ruby/M
+ruby/MTGDSR
+Ruchbah/M
+ruck/M
+rucksack/SM
+ruckus/SM
+ruction/SM
+rudderless
+rudder/MS
+Ruddie/M
+ruddiness/MS
+Rudd/M
+Ruddy/M
+ruddy/PTGRSD
+rudeness/MS
+rude/PYTR
+Rudie/M
+Rudiger/M
+rudimentariness/M
+rudimentary/P
+rudiment/SM
+Rudolf/M
+Rudolfo/M
+Rudolph/M
+Rudyard/M
+Rudy/M
+ruefulness/S
+rueful/PY
+rue/GDS
+Rufe/M
+ruff/GSYDM
+ruffian/GSMDY
+ruffled/U
+ruffler/M
+ruffle/RSDG
+ruffly/TR
+Rufus/M
+Rugby's
+rugby/SM
+ruggedness/S
+rugged/PYRT
+Ruggiero/M
+rugging
+rug/MS
+Ruhr/M
+ruination/MS
+ruiner/M
+ruin/MGSDR
+ruinousness/M
+ruinous/YP
+Ruiz/M
+rulebook/S
+ruled/U
+rule/MZGJDRS
+ruler/GMD
+ruling/M
+Rumanian's
+Rumania's
+rumba/GDMS
+rumble/JRSDG
+rumbler/M
+rumbustious
+rumen/M
+Rumford/M
+Ru/MH
+ruminant/YMS
+ruminate/VNGXSD
+ruminative/Y
+rummage/GRSD
+rummager/M
+Rummel/M
+rummer
+rummest
+rummy/TRSM
+rumored/U
+rumorer/M
+rumormonger/SGMD
+rumor/ZMRDSG
+Rumpelstiltskin/M
+rump/GMYDS
+rumple/SDG
+rumply/TR
+rumpus/SM
+rum/XSMN
+runabout/SM
+runaround/S
+run/AS
+runaway/S
+rundown/SM
+rune/MS
+Runge/M
+rung/MS
+runic
+runlet/SM
+runnable
+runnel/SM
+runner/MS
+running/S
+Runnymede/M
+runny/RT
+runoff/MS
+runtime
+runtiness/M
+runt/MS
+runty/RPT
+runway/MS
+Runyon/M
+rupee/MS
+Ruperta/M
+Rupert/M
+Ruperto/M
+rupiah/M
+rupiahs
+Ruppert/M
+Ruprecht/M
+rupture/GMSD
+rurality/M
+rural/Y
+Rurik/M
+ruse/MS
+Rushdie/M
+rush/DSRGZ
+rusher/M
+rushes/I
+rushing/M
+Rush/M
+Rushmore/M
+rushy/RT
+Ruskin/M
+rusk/MS
+Russell/M
+Russel/M
+russet/MDS
+russetting
+Russia/M
+Russian/SM
+Russo/M
+Russ/S
+Rustbelt/M
+rustically
+rusticate/GSD
+rustication/M
+rusticity/S
+rustic/S
+Rustie/M
+rustiness/MS
+Rustin/M
+rustler/M
+rustle/RSDGZ
+rust/MSDG
+rustproof/DGS
+Rusty/M
+rusty/XNRTP
+rutabaga/SM
+Rutger/SM
+Ruthanne/M
+Ruthann/M
+Ruthe/M
+ruthenium/MS
+rutherfordium/SM
+Rutherford/M
+Ruthie/M
+Ruthi/M
+ruthlessness/MS
+ruthless/YP
+Ruth/M
+Ruthy/M
+Rutland/M
+Rutledge/M
+rut/MS
+rutted
+Rutter/M
+Ruttger/M
+rutting
+rutty/RT
+Ruy/M
+RV
+RVs
+Rwandan/S
+Rwanda/SM
+Rwy/M
+Rx/M
+Ryan/M
+Ryann/M
+Rycca/M
+Rydberg/M
+Ryder/M
+rye/MS
+Ryley/M
+Ry/M
+Ryon/M
+Ryukyu/M
+Ryun/M
+S
+SA
+Saab/M
+Saar/M
+Saba/M
+sabbath
+Sabbath/M
+Sabbaths
+sabbatical/S
+sabered/U
+saber/GSMD
+Sabik/M
+Sabina/M
+Sabine/M
+Sabin/M
+sable/GMDS
+sabotage/DSMG
+saboteur/SM
+sabot/MS
+Sabra/M
+sabra/MS
+Sabrina/M
+SAC
+Sacajawea/M
+saccharides
+saccharine
+saccharin/MS
+Sacco/M
+sacerdotal
+Sacha/M
+sachem/MS
+sachet/SM
+Sachs/M
+sackcloth/M
+sackcloths
+sacker/M
+sackful/MS
+sack/GJDRMS
+sacking/M
+sacral
+sacra/L
+sacramental/S
+sacrament/DMGS
+Sacramento/M
+sacredness/S
+sacred/PY
+sacrificer/M
+sacrifice/RSDZMG
+sacrificial/Y
+sacrilege/MS
+sacrilegious/Y
+sacristan/SM
+sacristy/MS
+sacroiliac/S
+sacrosanctness/MS
+sacrosanct/P
+sacrum/M
+sac/SM
+Sada/M
+Sadat/M
+Saddam/M
+sadden/DSG
+sadder
+saddest
+saddlebag/SM
+saddler/M
+saddle's
+saddle/UGDS
+Sadducee/M
+Sadella/M
+Sade/M
+sades
+Sadie/M
+sadism/MS
+sadistic
+sadistically
+sadist/MS
+sadness/SM
+sadomasochism/MS
+sadomasochistic
+sadomasochist/S
+sad/PY
+Sadr/M
+Sadye/M
+safari/GMDS
+safeguard/MDSG
+safekeeping/MS
+safeness/MS
+safeness's/U
+safes
+safety/SDMG
+safe/URPTY
+safflower/SM
+saffron/MS
+sagaciousness/M
+sagacious/YP
+sagacity/MS
+saga/MS
+Sagan/M
+sagebrush/SM
+sage/MYPS
+sagged
+sagger
+sagging
+saggy/RT
+Saginaw/M
+Sagittarius/MS
+sago/MS
+sag/TSR
+saguaro/SM
+Sahara/M
+Saharan/M
+Sahel
+sahib/MS
+Saidee/M
+saids
+said/U
+Saigon/M
+sailboard/DGS
+sailboat/SRMZG
+sailcloth/M
+sailcloths
+sailer/M
+sailfish/SM
+sail/GJMDRS
+sailing/M
+sailor/YMS
+sailplane/SDMG
+sainthood/MS
+saintlike
+saintliness/MS
+saintly/RTP
+saint/YDMGS
+Saiph/M
+saith
+saiths
+Sakai/M
+sake/MRS
+saker/M
+Sakhalin/M
+Sakharov/M
+Saki/M
+saki's
+salaam/GMDS
+salable/U
+salaciousness/MS
+salacious/YP
+salacity/MS
+Saladin/M
+Salado/M
+salad/SM
+Salaidh/M
+salamander/MS
+salami/MS
+salary/SDMG
+Salas/M
+Salazar/M
+saleability/M
+sale/ABMS
+Saleem/M
+Salem/M
+Salerno/M
+salesclerk/SM
+salesgirl/SM
+saleslady/S
+salesman/M
+salesmanship/SM
+salesmen
+salespeople/M
+salesperson/MS
+salesroom/M
+saleswoman
+saleswomen
+salience/MS
+saliency
+salient/SY
+Salim/M
+Salina/MS
+saline/S
+salinger
+Salinger/M
+salinity/MS
+Salisbury/M
+Salish/M
+saliva/MS
+salivary
+salivate/XNGSD
+salivation/M
+Salk/M
+Sallee/M
+Salle/M
+Sallie/M
+Salli/M
+sallowness/MS
+sallow/TGRDSP
+Sallust/M
+Sallyanne/M
+Sallyann/M
+sally/GSDM
+Sally/M
+salmonellae
+salmonella/M
+Salmon/M
+salmon/SM
+Sal/MY
+Saloma/M
+Salome/M
+Salomi/M
+Salomo/M
+Salomone/M
+Salomon/M
+Salonika/M
+salon/SM
+saloonkeeper
+saloon/MS
+salsa/MS
+salsify/M
+SALT
+saltcellar/SM
+salted/UC
+salter/M
+salt/GZTPMDRS
+saltine/MS
+saltiness/SM
+saltness/M
+Salton/M
+saltpeter/SM
+salts/C
+saltshaker/S
+saltwater
+salty/RSPT
+salubriousness/M
+salubrious/YP
+salubrity/M
+salutariness/M
+salutary/P
+salutation/SM
+salutatory/S
+saluter/M
+salute/RSDG
+Salvadoran/S
+Salvadorian/S
+Salvador/M
+salvageable
+salvage/MGRSD
+salvager/M
+salvation/MS
+Salvatore/M
+salve/GZMDSR
+salver/M
+Salvidor/M
+salvo/GMDS
+Salween/M
+Salyut/M
+Salz/M
+SAM
+Samantha/M
+Samara/M
+Samaria/M
+Samaritan/MS
+samarium/MS
+Samarkand/M
+samba/GSDM
+sameness/MS
+same/SP
+Sam/M
+Sammie/M
+Sammy/M
+Samoa
+Samoan/S
+Samoset/M
+samovar/SM
+Samoyed/M
+sampan/MS
+sampler/M
+sample/RSDJGMZ
+sampling/M
+Sampson/M
+Samsonite/M
+Samson/M
+Samuele/M
+Samuel/SM
+Samuelson/M
+samurai/M
+San'a
+Sana/M
+sanatorium/MS
+Sanborn/M
+Sanchez/M
+Sancho/M
+sanctification/M
+sanctifier/M
+sanctify/RSDGNX
+sanctimoniousness/MS
+sanctimonious/PY
+sanctimony/MS
+sanctioned/U
+sanction/SMDG
+sanctity/SM
+sanctuary/MS
+sanctum/SM
+sandal/MDGS
+sandalwood/SM
+sandbagged
+sandbagging
+sandbag/MS
+sandbank/SM
+sandbar/S
+sandblaster/M
+sandblast/GZSMRD
+sandbox/MS
+Sandburg/M
+sandcastle/S
+Sande/M
+Sanderling/M
+sander/M
+Sander/M
+Sanderson/M
+sandhill
+sandhog/SM
+Sandia/M
+Sandie/M
+Sandi/M
+sandiness/S
+Sandinista
+sandlot/SM
+sandlotter/S
+sandman/M
+sandmen
+Sand/MRZ
+Sandor/M
+Sandoval/M
+sandpaper/DMGS
+sandpile
+sandpiper/MS
+sandpit/M
+Sandra/M
+Sandro/M
+sand/SMDRGZ
+sandstone/MS
+sandstorm/SM
+Sandusky/M
+sandwich/SDMG
+Sandye/M
+Sandy/M
+sandy/PRT
+saned
+sane/IRYTP
+saneness/MS
+saneness's/I
+sanes
+Sanford/M
+Sanforized
+Sanger/M
+sangfroid/S
+sangria/SM
+Sang/RM
+sang/S
+sanguinary
+sanguined
+sanguine/F
+sanguinely
+sanguineness/M
+sanguineous/F
+sanguines
+sanguining
+Sanhedrin/M
+saning
+sanitarian/S
+sanitarium/SM
+sanitary/S
+sanitate/NX
+sanitation/M
+sanitizer/M
+sanitize/RSDZG
+sanity/SIM
+sank
+Sankara/M
+San/M
+sans
+sanserif
+Sanskritic
+Sanskritize/M
+Sanskrit/M
+Sansone/M
+Sanson/M
+Santa/M
+Santana/M
+Santayana/M
+Santeria
+Santiago/M
+Santo/MS
+sapience/MS
+sapient
+sapless
+sapling/SM
+sap/MS
+sapped
+sapper/SM
+Sapphira/M
+Sapphire/M
+sapphire/MS
+Sappho/M
+sappiness/SM
+sapping
+Sapporo/M
+sappy/RPT
+saprophyte/MS
+saprophytic
+sapsucker/SM
+sapwood/SM
+Saraann/M
+Saracen/MS
+Saragossa/M
+Sarah/M
+Sarajane/M
+Sarajevo/M
+Sara/M
+Saran/M
+saran/SM
+sarape's
+Sarasota/M
+Saratoga/M
+Saratov/M
+Sarawak/M
+sarcasm/MS
+sarcastic
+sarcastically
+sarcoma/MS
+sarcophagi
+sarcophagus/M
+sardine/SDMG
+Sardinia/M
+sardonic
+sardonically
+Saree/M
+Sarena/M
+Sarene/M
+Sarette/M
+Sargasso/M
+Sarge/M
+Sargent/M
+sarge/SM
+Sargon/M
+Sari/M
+sari/MS
+Sarina/M
+Sarine/M
+Sarita/M
+Sarnoff/M
+sarong/MS
+Saroyan/M
+sarsaparilla/MS
+Sarto/M
+sartorial/Y
+sartorius/M
+Sartre/M
+Sascha/M
+SASE
+Sasha/M
+sashay/GDS
+Sashenka/M
+sash/GMDS
+Saskatchewan/M
+Saskatoon/M
+Sask/M
+sassafras/MS
+sass/GDSM
+Sassoon/M
+sassy/TRS
+SAT
+satanic
+satanical/Y
+Satanism/M
+satanism/S
+Satanist/M
+satanist/S
+Satan/M
+satchel/SM
+sat/DG
+sateen/MS
+satellite/GMSD
+sate/S
+satiable/I
+satiate/GNXSD
+satiation/M
+satiety/MS
+satin/MDSG
+satinwood/MS
+satiny
+satire/SM
+satiric
+satirical/Y
+satirist/SM
+satirize/DSG
+satirizes/U
+satisfaction/ESM
+satisfactorily/U
+satisfactoriness/MU
+satisfactory/UP
+satisfiability/U
+satisfiable/U
+satisfied/UE
+satisfier/M
+satisfies/E
+satisfy/GZDRS
+satisfying/EU
+satisfyingly
+Sat/M
+satori/SM
+satrap/SM
+saturated/CUA
+saturater/M
+saturates/A
+saturate/XDRSNG
+saturation/M
+Saturday/MS
+saturnalia
+Saturnalia/M
+saturnine/Y
+Saturn/M
+Satyanarayanan/M
+satyriases
+satyriasis/M
+satyric
+satyr/MS
+sauce/DSRGZM
+saucepan/SM
+saucer/M
+saucily
+sauciness/S
+saucy/TRP
+Saudi/S
+Saud/M
+Saudra/M
+sauerkraut/SM
+Saukville/M
+Saul/M
+Sault/M
+sauna/DMSG
+Sauncho/M
+Saunder/SM
+Saunderson/M
+Saundra/M
+saunter/DRSG
+saurian/S
+sauropod/SM
+sausage/MS
+Saussure/M
+sauté/DGS
+Sauternes/M
+Sauveur/M
+savage/GTZYPRSD
+Savage/M
+savageness/SM
+savagery/MS
+Savannah/M
+savanna/MS
+savant/SM
+saved/U
+saveloy/M
+saver/M
+save/ZGJDRSB
+Savina/M
+Savior/M
+savior/SM
+Saviour/M
+Savonarola/M
+savored/U
+savorer/M
+savorier
+savoriest
+savoriness/S
+savoringly/S
+savoring/Y
+savor/SMRDGZ
+savory/UMPS
+Savoyard/M
+Savoy/M
+savoy/SM
+savvy/GTRSD
+sawbones/M
+sawbuck/SM
+sawdust/MDSG
+sawer/M
+sawfly/SM
+sawhorse/MS
+Saw/M
+sawmill/SM
+saw/SMDRG
+sawtooth
+Sawyere/M
+Sawyer/M
+sawyer/MS
+Saxe/M
+saxifrage/SM
+Sax/M
+sax/MS
+Saxon/SM
+Saxony/M
+saxophone/MS
+saxophonist/SM
+Saxton/M
+Sayer/M
+sayer/SM
+sayest
+saying/MS
+Sayre/MS
+says/M
+say/USG
+Say/ZMR
+SBA
+Sb/M
+SC
+scabbard/SGDM
+scabbed
+scabbiness/SM
+scabbing
+scabby/RTP
+scabies/M
+scabrousness/M
+scabrous/YP
+scab/SM
+scad/SM
+scaffolding/M
+scaffold/JGDMS
+scalability
+Scala/M
+scalar/SM
+scalawag/SM
+scald/GJRDS
+scaled/AU
+scale/JGZMBDSR
+scaleless
+scalene
+scaler/M
+scales/A
+scaliness/MS
+scaling/A
+scallion/MS
+scalloper/M
+scallop/GSMDR
+scalloping/M
+scalpel/SM
+scalper/M
+scalp/GZRDMS
+scalping/M
+scaly/TPR
+scammed
+scamming
+scamper/GD
+scampi/M
+scamp/RDMGZS
+scam/SM
+Scan
+scan/AS
+scandal/GMDS
+scandalized/U
+scandalize/GDS
+scandalmonger/SM
+scandalousness/M
+scandalous/YP
+Scandinavia/M
+Scandinavian/S
+scandium/MS
+scanned/A
+scanner/SM
+scanning/A
+scansion/SM
+scant/CDRSG
+scantest
+scantily
+scantiness/MS
+scantly
+scantness/MS
+scanty/TPRS
+scapegoat/SGDM
+scapegrace/MS
+scape/M
+scapulae
+scapula/M
+scapular/S
+scarab/SM
+Scaramouch/M
+Scarborough/M
+scarceness/SM
+scarce/RTYP
+scarcity/MS
+scar/DRMSG
+scarecrow/MS
+scaremongering/M
+scaremonger/SGM
+scarer/M
+scare/S
+scarface
+Scarface/M
+scarf/SDGM
+scarification/M
+scarify/DRSNGX
+scarily
+scariness/S
+scarlatina/MS
+Scarlatti/M
+Scarlet/M
+scarlet/MDSG
+Scarlett/M
+scarp/SDMG
+scarred
+scarring
+scarves/M
+scary/PTR
+scathe/DG
+scathed/U
+scathing/Y
+scatological
+scatology/SM
+scat/S
+scatted
+scatterbrain/MDS
+scatter/DRJZSG
+scatterer/M
+scattergun
+scattering/YM
+scatting
+scavenge/GDRSZ
+scavenger/M
+SCCS
+scenario/SM
+scenarist/MS
+scene/GMDS
+scenery/SM
+scenically
+scenic/S
+scented/U
+scent/GDMS
+scentless
+scent's/C
+scents/C
+scepter/DMSG
+scepters/U
+sceptically
+sch
+Schaefer/M
+Schaeffer/M
+Schafer/M
+Schaffner/M
+Schantz/M
+Schapiro/M
+Scheat/M
+Schedar/M
+schedule/ADSRG
+scheduled/U
+scheduler/MS
+schedule's
+Scheherazade/M
+Scheherezade/M
+Schelling/M
+schema/M
+schemata
+schematically
+schematic/S
+scheme/JSRDGMZ
+schemer/M
+schemta
+Schenectady/M
+scherzo/MS
+Schick/M
+Schiller/M
+schilling/SM
+schismatic/S
+schism/SM
+schist/SM
+schizoid/S
+schizomycetes
+schizophrenia/SM
+schizophrenically
+schizophrenic/S
+schizo/S
+schlemiel/MS
+schlepped
+schlepping
+schlep/S
+Schlesinger/M
+Schliemann/M
+Schlitz/M
+schlock/SM
+schlocky/TR
+Schloss/M
+schmaltz/MS
+schmaltzy/TR
+Schmidt/M
+Schmitt/M
+schmoes
+schmo/M
+schmooze/GSD
+schmuck/MS
+Schnabel/M
+schnapps/M
+schnauzer/MS
+Schneider/M
+schnitzel/MS
+schnook/SM
+schnoz/S
+schnozzle/MS
+Schoenberg/M
+Schofield/M
+scholarship/MS
+scholar/SYM
+scholastically
+scholastic/S
+schoolbag/SM
+schoolbook/SM
+schoolboy/MS
+schoolchild/M
+schoolchildren
+schooldays
+schooled/U
+schoolfellow/S
+schoolfriend
+schoolgirlish
+schoolgirl/MS
+schoolhouse/MS
+schooling/M
+schoolmarmish
+schoolmarm/MS
+schoolmaster/SGDM
+schoolmate/MS
+schoolmistress/MS
+schoolroom/SM
+schoolteacher/MS
+schoolwork/SM
+schoolyard/SM
+school/ZGMRDJS
+schooner/SM
+Schopenhauer/M
+Schottky/M
+Schrieffer/M
+Schrödinger/M
+Schroeder/M
+Schroedinger/M
+Schubert/M
+Schultz/M
+Schulz/M
+Schumacher/M
+Schuman/M
+Schumann/M
+schussboomer/S
+schuss/SDMG
+Schuster/M
+Schuyler/M
+Schuylkill/M
+Schwab/M
+Schwartzkopf/M
+Schwartz/M
+Schwarzenegger/M
+schwa/SM
+Schweitzer/M
+Schweppes/M
+Schwinger/M
+Schwinn/M
+sci
+sciatica/SM
+sciatic/S
+science/FMS
+scientifically/U
+scientific/U
+scientist/SM
+Scientology/M
+scimitar/SM
+scintilla/MS
+scintillate/GNDSX
+scintillation/M
+scintillator/SM
+scion/SM
+Scipio/M
+scissor/SGD
+scleroses
+sclerosis/M
+sclerotic/S
+Sc/M
+scoffer/M
+scofflaw/MS
+scoff/RDGZS
+scolder/M
+scold/GSJRD
+scolioses
+scoliosis/M
+scollop's
+sconce/SDGM
+scone/SM
+scooper/M
+scoop/SRDMG
+scooter/M
+scoot/SRDGZ
+scope/DSGM
+Scopes/M
+scops
+scorbutic
+scorcher/M
+scorching/Y
+scorch/ZGRSD
+scoreboard/MS
+scorecard/MS
+scored/M
+scorekeeper/SM
+scoreless
+scoreline
+score/ZMDSRJG
+scorner/M
+scornfulness/M
+scornful/PY
+scorn/SGZMRD
+scorpion/SM
+Scorpio/SM
+Scorpius/M
+Scorsese/M
+Scotchgard/M
+Scotchman/M
+Scotchmen
+scotch/MSDG
+scotchs
+Scotch/S
+Scotchwoman
+Scotchwomen
+Scotia/M
+Scotian/M
+Scotland/M
+Scot/MS
+Scotsman/M
+Scotsmen
+Scotswoman
+Scotswomen
+Scottie/SM
+Scotti/M
+Scottish
+Scott/M
+Scottsdale/M
+Scotty's
+scoundrel/YMS
+scourer/M
+scourge/MGRSD
+scourger/M
+scouring/M
+scour/SRDGZ
+scouter/M
+scouting/M
+scoutmaster/SM
+Scout's
+scout/SRDMJG
+scow/DMGS
+scowler/M
+scowl/SRDG
+scrabble/DRSZG
+scrabbler/M
+Scrabble/SM
+scragged
+scragging
+scraggly/TR
+scraggy/TR
+scrag/SM
+scrambler/MS
+scrambler's/U
+scramble/UDSRG
+scrammed
+scramming
+scram/S
+Scranton/M
+scrapbook/SM
+scraper/M
+scrape/S
+scrapheap/SM
+scrapped
+scrapper/SM
+scrapping
+scrappy/RT
+scrap/SGZJRDM
+scrapyard/S
+scratched/U
+scratcher/M
+scratches/M
+scratchily
+scratchiness/S
+scratch/JDRSZG
+scratchy/TRP
+scrawler/M
+scrawl/GRDS
+scrawly/RT
+scrawniness/MS
+scrawny/TRP
+screamer/M
+screaming/Y
+scream/ZGSRD
+screecher/M
+screech/GMDRS
+screechy/TR
+screed/MS
+scree/DSM
+screened/U
+screening/M
+screenplay/MS
+screen/RDMJSG
+screenwriter/MS
+screwball/SM
+screwdriver/SM
+screwer/M
+screw/GUSD
+screwiness/S
+screw's
+screwup
+screwworm/MS
+screwy/RTP
+Scriabin/M
+scribal
+scribble/JZDRSG
+scribbler/M
+scribe/CDRSGIK
+scriber/MKIC
+scribe's
+Scribner/MS
+scrimmager/M
+scrimmage/RSDMG
+scrimp/DGS
+scrimshaw/GSDM
+scrim/SM
+Scripps/M
+scrip/SM
+scripted/U
+script/FGMDS
+scriptural/Y
+scripture/MS
+Scripture/MS
+scriptwriter/SM
+scriptwriting/M
+scrivener/M
+scriven/ZR
+scrod/M
+scrofula/MS
+scrofulous
+scrollbar/SM
+scroll/GMDSB
+Scrooge/MS
+scrooge/SDMG
+scrota
+scrotal
+scrotum/M
+scrounge/ZGDRS
+scroungy/TR
+scrubbed
+scrubber/MS
+scrubbing
+scrubby/TR
+scrub/S
+scruffily
+scruffiness/S
+scruff/SM
+scruffy/PRT
+Scruggs/M
+scrummage/MG
+scrum/MS
+scrumptious/Y
+scrunch/DSG
+scrunchy/S
+scruple/SDMG
+scrupulosity/SM
+scrupulousness's
+scrupulousness/US
+scrupulous/UPY
+scrutable/I
+scrutinized/U
+scrutinizer/M
+scrutinize/RSDGZ
+scrutinizingly/S
+scrutinizing/UY
+scrutiny/MS
+SCSI
+scuba/SDMG
+scudded
+scudding
+Scud/M
+scud/S
+scuff/GSD
+scuffle/SDG
+sculler/M
+scullery/MS
+Sculley/M
+scullion/MS
+scull/SRDMGZ
+sculptor/MS
+sculptress/MS
+sculpt/SDG
+sculptural/Y
+sculpture/SDGM
+scumbag/S
+scummed
+scumming
+scum/MS
+scummy/TR
+scupper/SDMG
+scurf/MS
+scurfy/TR
+scurrility/MS
+scurrilousness/MS
+scurrilous/PY
+scurry/GJSD
+scurvily
+scurviness/M
+scurvy/SRTP
+scutcheon/SM
+scuttlebutt/MS
+scuttle/MGSD
+scuzzy/RT
+Scylla/M
+scythe/SDGM
+Scythia/M
+SD
+SDI
+SE
+seabed/S
+seabird/S
+seaboard/MS
+Seaborg/M
+seaborne
+Seabrook/M
+seacoast/MS
+seafare/JRZG
+seafarer/M
+seafood/MS
+seafront/MS
+Seagate/M
+seagoing
+Seagram/M
+seagull/S
+seahorse/S
+sealant/MS
+sealed/AU
+sealer/M
+seal/MDRSGZ
+sealskin/SM
+seals/UA
+seamail
+seamanship/SM
+seaman/YM
+seamer/M
+seaminess/M
+seamlessness/M
+seamless/PY
+seam/MNDRGS
+seams/I
+seamstress/MS
+Seamus/M
+sea/MYS
+seamy/TRP
+Seana/M
+séance/SM
+Sean/M
+seaplane/SM
+seaport/SM
+seaquake/M
+Seaquarium/M
+searcher/AM
+searching/YS
+searchlight/SM
+search/RSDAGZ
+sear/DRSJGT
+searing/Y
+Sears/M
+seascape/SM
+seashell/MS
+seashore/SM
+seasickness/SM
+seasick/P
+seaside/SM
+seasonableness/M
+seasonable/UP
+seasonably/U
+seasonality
+seasonal/Y
+seasoned/U
+seasoner/M
+seasoning/M
+season/JRDYMBZSG
+seatbelt
+seated/A
+seater/M
+seating/SM
+SEATO
+seat's
+Seattle/M
+seat/UDSG
+seawall/S
+seaward/S
+seawater/S
+seaway/MS
+seaweed/SM
+seaworthinesses
+seaworthiness/MU
+seaworthy/TRP
+sebaceous
+Sebastian/M
+Sebastiano/M
+Sebastien/M
+seborrhea/SM
+SEC
+secant/SM
+secede/GRSD
+secessionist/MS
+secession/MS
+secludedness/M
+secluded/YP
+seclude/GSD
+seclusion/SM
+seclusive
+Seconal
+secondarily
+secondary/PS
+seconder/M
+secondhand
+second/RDYZGSL
+secrecy/MS
+secretarial
+secretariat/MS
+secretaryship/MS
+secretary/SM
+secrete/XNS
+secretion/M
+secretiveness/S
+secretive/PY
+secretory
+secret/TVGRDYS
+sec/S
+sectarianism/MS
+sectarian/S
+sectary/MS
+sectionalism/MS
+sectionalized
+sectional/SY
+section/ASEM
+sectioned
+sectioning
+sect/ISM
+sectoral
+sectored
+sector/EMS
+sectoring
+sects/E
+secularism/MS
+secularist/MS
+secularity/M
+secularization/MS
+secularized/U
+secularize/GSD
+secular/SY
+secured/U
+securely/I
+secure/PGTYRSDJ
+security/MSI
+secy
+sec'y
+sedan/SM
+sedateness/SM
+sedate/PXVNGTYRSD
+sedation/M
+sedative/S
+sedentary
+Seder/SM
+sedge/SM
+Sedgwick/M
+sedgy/RT
+sedimentary
+sedimentation/SM
+sediment/SGDM
+sedition/SM
+seditiousness/M
+seditious/PY
+seducer/M
+seduce/RSDGZ
+seduction/MS
+seductiveness/MS
+seductive/YP
+seductress/SM
+sedulous/Y
+Seebeck/M
+seed/ADSG
+seedbed/MS
+seedcase/SM
+seeded/U
+seeder/MS
+seediness/MS
+seeding/S
+seedless
+seedling/SM
+seedpod/S
+seed's
+seedy/TPR
+seeings
+seeing's
+seeing/U
+seeker/M
+seek/GZSR
+seeking/Y
+Seeley/M
+See/M
+seem/GJSYD
+seeming/Y
+seemliness's
+seemliness/US
+seemly/UTPR
+seen/U
+seepage/MS
+seep/GSD
+seer/SM
+seersucker/MS
+sees
+seesaw/DMSG
+seethe/SDGJ
+see/U
+segmental/Y
+segmentation/SM
+segmented/U
+segment/SGDM
+Segovia/M
+segregant
+segregated/U
+segregate/XCNGSD
+segregation/CM
+segregationist/SM
+segregative
+Segre/M
+segue/DS
+segueing
+Segundo/M
+Se/H
+Seidel/M
+seigneur/MS
+seignior/SM
+Seiko/M
+seine/GZMDSR
+Seine/M
+seiner/M
+Seinfeld/M
+seismic
+seismically
+seismographer/M
+seismographic
+seismographs
+seismography/SM
+seismograph/ZMR
+seismologic
+seismological
+seismologist/MS
+seismology/SM
+seismometer/S
+seize/BJGZDSR
+seizer/M
+seizing/M
+seizin/MS
+seizor/MS
+seizure/MS
+Seka/M
+Sela/M
+Selassie/M
+Selby/M
+seldom
+selected/UAC
+selectional
+selection/MS
+selectiveness/M
+selective/YP
+selectivity/MS
+selectman/M
+selectmen
+selectness/SM
+selector/SM
+select/PDSVGB
+Selectric/M
+selects/A
+Selena/M
+selenate/M
+Selene/M
+selenite/M
+selenium/MS
+selenographer/SM
+selenography/MS
+Selestina/M
+Seleucid/M
+Seleucus/M
+self/GPDMS
+selfishness/SU
+selfish/PUY
+selflessness/MS
+selfless/YP
+selfness/M
+Selfridge/M
+selfsameness/M
+selfsame/P
+Selia/M
+Selie/M
+Selig/M
+Selim/M
+Selina/M
+Selinda/M
+Seline/M
+Seljuk/M
+Selkirk/M
+Sella/M
+sell/AZGSR
+seller/AM
+Sellers/M
+Selle/ZM
+sellout/MS
+Selma/M
+seltzer/S
+selvage/MGSD
+selves/M
+Selznick/M
+semantical/Y
+semanticist/SM
+semantic/S
+semantics/M
+semaphore/GMSD
+Semarang/M
+semblance/ASME
+semen/SM
+semester/SM
+semiannual/Y
+semiarid
+semiautomated
+semiautomatic/S
+semicircle/SM
+semicircular
+semicolon/MS
+semiconductor/SM
+semiconscious
+semidefinite
+semidetached
+semidrying/M
+semifinalist/MS
+semifinal/MS
+semilogarithmic
+semimonthly/S
+seminal/Y
+seminarian/MS
+seminar/SM
+seminary/MS
+Seminole/SM
+semiofficial
+semioticians
+semiotic/S
+semiotics/M
+semipermanent/Y
+semipermeable
+semiprecious
+semiprivate
+semiprofessional/YS
+semipublic
+semiquantitative/Y
+Semiramis/M
+semiretired
+semisecret
+semiskilled
+semi/SM
+semisolid/S
+semistructured
+semisweet
+Semite/SM
+Semitic/MS
+semitic/S
+semitone/SM
+semitrailer/SM
+semitrance
+semitransparent
+semitropical
+semivowel/MS
+semiweekly/S
+semiyearly
+semolina/SM
+sempiternal
+sempstress/SM
+Semtex
+sen
+Sen
+Sena/M
+senate/MS
+Senate/MS
+senatorial
+senator/MS
+Sendai/M
+sender/M
+sends/A
+send/SRGZ
+Seneca/MS
+Senegalese
+Senegal/M
+senescence/SM
+senescent
+senile/SY
+senility/MS
+seniority/SM
+senior/MS
+Senior/S
+Sennacherib/M
+senna/MS
+Sennett/M
+Señora/M
+senora/S
+senorita/S
+senor/MS
+sensately/I
+sensate/YNX
+sensationalism/MS
+sensationalist/S
+sensationalize/GSD
+sensational/Y
+sensation/M
+sens/DSG
+senselessness/SM
+senseless/PY
+sense/M
+sensibility/ISM
+sensibleness/MS
+sensible/PRST
+sensibly/I
+sensitiveness/MS
+sensitiveness's/I
+sensitives
+sensitive/YIP
+sensitivity/ISM
+sensitization/CSM
+sensitized/U
+sensitizers
+sensitize/SDCG
+sensor/MS
+sensory
+sensualist/MS
+sensuality/MS
+sensual/YF
+sensuousness/S
+sensuous/PY
+Sensurround/M
+sentence/SDMG
+sentential/Y
+sententious/Y
+sentience/ISM
+sentient/YS
+sentimentalism/SM
+sentimentalist/SM
+sentimentality/SM
+sentimentalization/SM
+sentimentalize/RSDZG
+sentimentalizes/U
+sentimental/Y
+sentiment/MS
+sentinel/GDMS
+sentry/SM
+sent/UFEA
+Seoul/M
+sepal/SM
+separability/MSI
+separableness/MI
+separable/PI
+separably/I
+separateness/MS
+separates/M
+separate/YNGVDSXP
+separation/M
+separatism/SM
+separatist/SM
+separator/SM
+Sephardi/M
+Sephira/M
+sepia/MS
+Sepoy/M
+sepses
+sepsis/M
+septa/M
+septate/N
+September/MS
+septennial/Y
+septet/MS
+septicemia/SM
+septicemic
+septic/S
+septillion/M
+sept/M
+Sept/M
+septuagenarian/MS
+Septuagint/MS
+septum/M
+sepulcher/MGSD
+sepulchers/UA
+sepulchral/Y
+seq
+sequel/MS
+sequenced/A
+sequence/DRSJZMG
+sequencer/M
+sequence's/F
+sequences/F
+sequent/F
+sequentiality/FM
+sequentialize/DSG
+sequential/YF
+sequester/SDG
+sequestrate/XGNDS
+sequestration/M
+sequin/SDMG
+sequitur
+Sequoia/M
+sequoia/MS
+Sequoya/M
+Serafin/M
+seraglio/SM
+serape/S
+seraphic
+seraphically
+seraphim's
+seraph/M
+seraphs
+sera's
+Serbia/M
+Serbian/S
+Serb/MS
+Serbo/M
+serenade/MGDRS
+serenader/M
+Serena/M
+serendipitous/Y
+serendipity/MS
+serene/GTYRSDP
+Serene/M
+sereneness/SM
+Serengeti/M
+serenity/MS
+sere/TGDRS
+serfdom/MS
+serf/MS
+Sergeant/M
+sergeant/SM
+serge/DSGM
+Sergei/M
+Serge/M
+Sergent/M
+Sergio/M
+serialization/MS
+serialize/GSD
+serial/MYS
+series/M
+serif/SMD
+serigraph/M
+serigraphs
+seriousness/SM
+serious/PY
+sermonize/GSD
+sermon/SGDM
+serological/Y
+serology/MS
+serons
+serous
+Serpens/M
+serpent/GSDM
+serpentine/GYS
+Serra/M
+Serrano/M
+serrate/GNXSD
+serration/M
+serried
+serum/MS
+servant/SDMG
+serve/AGCFDSR
+served/U
+server/MCF
+servers
+serviceability/SM
+serviceableness/M
+serviceable/P
+serviced/U
+serviceman/M
+servicemen
+service/MGSRD
+service's/E
+services/E
+servicewoman
+servicewomen
+serviette/MS
+servilely
+servileness/M
+serviles
+servile/U
+servility/SM
+serving/SM
+servitor/SM
+servitude/MS
+servomechanism/MS
+servomotor/MS
+servo/S
+sesame/MS
+sesquicentennial/S
+sessile
+session/SM
+setback/S
+Seth/M
+Set/M
+Seton/M
+set's
+setscrew/SM
+set/SIA
+settable/A
+sett/BJGZSMR
+settee/MS
+setter/M
+setting/AS
+setting's
+settle/AUDSG
+settlement/ASM
+settler/MS
+settling/S
+setup/MS
+Seumas/M
+Seurat/M
+Seuss/M
+Sevastopol/M
+sevenfold
+sevenpence
+seven/SMH
+seventeen/HMS
+seventeenths
+sevenths
+seventieths
+seventy/MSH
+severalfold
+severalty/M
+several/YS
+severance/SM
+severed/E
+severeness/SM
+severe/PY
+severing/E
+severity/MS
+Severn/M
+severs/E
+sever/SGTRD
+Severus/M
+Seville/M
+sewage/MS
+Seward/M
+sewerage/SM
+sewer/GSMD
+sewing/SM
+sewn
+sew/SAGD
+sexagenarian/MS
+sex/GMDS
+sexily
+sexiness/MS
+sexism/SM
+sexist/SM
+sexless
+sexologist/SM
+sexology/MS
+sexpot/SM
+Sextans/M
+sextant/SM
+sextet/SM
+sextillion/M
+Sexton/M
+sexton/MS
+sextuple/MDG
+sextuplet/MS
+sexuality/MS
+sexualized
+sexual/Y
+sexy/RTP
+Seychelles
+Seyfert
+Seymour/M
+sf
+SF
+Sgt
+shabbily
+shabbiness/SM
+shabby/RTP
+shack/GMDS
+shackler/M
+shackle's
+Shackleton/M
+shackle/UGDS
+shad/DRJGSM
+shaded/U
+shadeless
+shade/SM
+shadily
+shadiness/MS
+shading/M
+shadowbox/SDG
+shadower/M
+shadow/GSDRM
+shadowiness/M
+Shadow/M
+shadowy/TRP
+shady/TRP
+Shae/M
+Shafer/M
+Shaffer/M
+shafting/M
+shaft/SDMG
+shagged
+shagginess/SM
+shagging
+shaggy/TPR
+shag/MS
+shah/M
+shahs
+Shaina/M
+Shaine/M
+shakable/U
+shakably/U
+shakeable
+shakedown/S
+shaken/U
+shakeout/SM
+shaker/M
+Shaker/S
+Shakespearean/S
+Shakespeare/M
+Shakespearian
+shake/SRGZB
+shakeup/S
+shakily
+shakiness/S
+shaking/M
+shaky/TPR
+shale/SM
+shall
+shallot/SM
+shallowness/SM
+shallow/STPGDRY
+Shalna/M
+Shalne/M
+shalom
+Shalom/M
+shalt
+shamanic
+shaman/SM
+shamble/DSG
+shambles/M
+shamefaced/Y
+shamefulness/S
+shameful/YP
+shamelessness/SM
+shameless/PY
+shame/SM
+sham/MDSG
+shammed
+shammer
+shamming
+shammy's
+shampoo/DRSMZG
+shampooer/M
+shamrock/SM
+Shamus/M
+Shana/M
+Shanan/M
+Shanda/M
+Shandee/M
+Shandeigh/M
+Shandie/M
+Shandra/M
+shandy/M
+Shandy/M
+Shane/M
+Shanghai/GM
+Shanghaiing/M
+shanghai/SDG
+Shanie/M
+Shani/M
+shank/SMDG
+Shannah/M
+Shanna/M
+Shannan/M
+Shannen/M
+Shannon/M
+Shanon/M
+shan't
+Shanta/M
+Shantee/M
+shantis
+Shantung/M
+shantung/MS
+shanty/SM
+shantytown/SM
+shape/AGDSR
+shaped/U
+shapelessness/SM
+shapeless/PY
+shapeliness/S
+shapely/RPT
+shaper/S
+shape's
+Shapiro/M
+sharable/U
+Sharai/M
+Shara/M
+shard/SM
+shareable
+sharecropped
+sharecropper/MS
+sharecropping
+sharecrop/S
+share/DSRGZMB
+shared/U
+shareholder/MS
+shareholding/S
+sharer/M
+shareware/S
+Shari'a
+Sharia/M
+sharia/SM
+Shari/M
+Sharity/M
+shark/SGMD
+sharkskin/SM
+Sharla/M
+Sharleen/M
+Sharlene/M
+Sharline/M
+Sharl/M
+Sharona/M
+Sharon/M
+Sharpe/M
+sharpen/ASGD
+sharpened/U
+sharpener/S
+sharper/M
+sharpie/SM
+Sharp/M
+sharpness/MS
+sharp/SGTZXPYRDN
+sharpshooter/M
+sharpshooting/M
+sharpshoot/JRGZ
+sharpy's
+Sharron/M
+Sharyl/M
+Shasta/M
+shat
+shatter/DSG
+shattering/Y
+shatterproof
+Shaughn/M
+Shaula/M
+Shauna/M
+Shaun/M
+shave/DSRJGZ
+shaved/U
+shaver/M
+Shavian
+shaving/M
+Shavuot/M
+Shawano/M
+shawl/SDMG
+shaw/M
+Shaw/M
+Shawna/M
+Shawnee/SM
+Shawn/M
+Shaylah/M
+Shayla/M
+Shaylyn/M
+Shaylynn/M
+Shay/M
+shay/MS
+Shayna/M
+Shayne/M
+Shcharansky/M
+sh/DRS
+sheaf/MDGS
+Shea/M
+shearer/M
+shear/RDGZS
+sheather/M
+sheathe/UGSD
+sheath/GJMDRS
+sheathing/M
+sheaths
+sheave/SDG
+sheaves/M
+Sheba/M
+shebang/MS
+Shebeli/M
+Sheboygan/M
+she'd
+shedding
+Shedir/M
+sheds
+shed's
+shed/U
+Sheelagh/M
+Sheelah/M
+Sheela/M
+Sheena/M
+sheen/MDGS
+sheeny/TRSM
+sheepdog/SM
+sheepfold/MS
+sheepherder/MS
+sheepishness/SM
+sheepish/YP
+sheep/M
+sheepskin/SM
+Sheeree/M
+sheerness/S
+sheer/PGTYRDS
+sheeting/M
+sheetlike
+sheet/RDMJSG
+Sheetrock
+Sheffielder/M
+Sheffield/RMZ
+Sheffie/M
+Sheff/M
+Sheffy/M
+sheikdom/SM
+sheikh's
+sheik/SM
+Sheilah/M
+Sheila/M
+shekel/MS
+Shelagh/M
+Shela/M
+Shelba/M
+Shelbi/M
+Shelby/M
+Shelden/M
+Sheldon/M
+shelf/MDGS
+Shelia/M
+she'll
+shellacked
+shellacking/MS
+shellac/S
+shelled/U
+Shelley/M
+shellfire/SM
+shellfish/SM
+Shellie/M
+Shelli/M
+Shell/M
+shell/RDMGS
+Shelly/M
+Shel/MY
+shelter/DRMGS
+sheltered/U
+shelterer/M
+Shelton/M
+shelve/JRSDG
+shelver/M
+shelves/M
+shelving/M
+she/M
+Shem/M
+Shena/M
+Shenandoah/M
+shenanigan/SM
+Shenyang/M
+Sheol/M
+Shepard/M
+shepherd/DMSG
+shepherdess/S
+Shepherd/M
+Shep/M
+Sheppard/M
+Shepperd/M
+Sheratan/M
+Sheraton/M
+sherbet/MS
+sherd's
+Sheree/M
+Sheridan/M
+Sherie/M
+sheriff/SM
+Sherill/M
+Sherilyn/M
+Sheri/M
+Sherline/M
+Sherlocke/M
+sherlock/M
+Sherlock/M
+Sher/M
+Sherman/M
+Shermie/M
+Sherm/M
+Shermy/M
+Sherpa/SM
+Sherrie/M
+Sherri/M
+Sherry/M
+sherry/MS
+Sherwin/M
+Sherwood/M
+Sherwynd/M
+Sherye/M
+Sheryl/M
+Shetland/S
+Shevardnadze/M
+shew/GSD
+shewn
+shh
+shiatsu/S
+shibboleth/M
+shibboleths
+shielded/U
+shielder/M
+shield/MDRSG
+Shields/M
+shiftily
+shiftiness/SM
+shiftlessness/S
+shiftless/PY
+shift/RDGZS
+shifty/TRP
+Shi'ite
+Shiite/SM
+Shijiazhuang
+Shikoku/M
+shill/DJSG
+shillelagh/M
+shillelaghs
+shilling/M
+Shillong/M
+Shiloh/M
+shimmed
+shimmer/DGS
+shimmery
+shimming
+shimmy/DSMG
+shim/SM
+Shina/M
+shinbone/SM
+shindig/MS
+shiner/M
+shine/S
+shingle/MDRSG
+shingler/M
+shinguard
+shininess/MS
+shining/Y
+shinned
+shinning
+shinny/GDSM
+shin/SGZDRM
+shinsplints
+Shintoism/S
+Shintoist/MS
+Shinto/MS
+shiny/PRT
+shipboard/MS
+shipborne
+shipbuilder/M
+shipbuild/RGZJ
+shipload/SM
+shipman/M
+shipmate/SM
+shipmen
+shipment/AMS
+shipowner/MS
+shippable
+shipped/A
+shipper/SM
+shipping/MS
+ship's
+shipshape
+ship/SLA
+shipwreck/GSMD
+shipwright/MS
+shipyard/MS
+Shiraz/M
+shire/MS
+shirker/M
+shirk/RDGZS
+Shirlee/M
+Shirleen/M
+Shirlene/M
+Shirley/M
+Shirline/M
+Shirl/M
+Shir/M
+shirr/GJDS
+shirtfront/S
+shirting/M
+shirt/JDMSG
+shirtless
+shirtmake/R
+shirtmaker/M
+shirtsleeve/MS
+shirttail/S
+shirtwaist/SM
+shit/S!
+shitting/!
+shitty/RT!
+Shiva/M
+shiverer/M
+shiver/GDR
+shivery
+shiv/SZRM
+shivved
+shivving
+shlemiel's
+Shmuel/M
+shoal/SRDMGT
+shoat/SM
+shocker/M
+shocking/Y
+Shockley/M
+shockproof
+shock/SGZRD
+shoddily
+shoddiness/SM
+shoddy/RSTP
+shod/U
+shoehorn/GSMD
+shoeing
+shoelace/MS
+shoemaker/M
+shoemake/RZ
+shoe/MS
+shoer's
+shoeshine/MS
+shoestring/MS
+shoetree/MS
+shogunate/SM
+shogun/MS
+Shoji/M
+Sholom/M
+shone
+shoo/DSG
+shoofly
+shook/SM
+shooter/M
+shootout/MS
+shoot/SJRGZ
+shopkeeper/M
+shopkeep/RGZ
+shoplifter/M
+shoplifting/M
+shoplift/SRDGZ
+shop/MS
+shopped/M
+shopper/M
+shoppe/RSDGZJ
+shopping/M
+shoptalk/SM
+shopworn
+shorebird/S
+shore/DSRGMJ
+shoreline/SM
+Shorewood/M
+shoring/M
+shortage/MS
+shortbread/MS
+shortcake/SM
+shortchange/DSG
+shortcoming/MS
+shortcrust
+shortcut/MS
+shortcutting
+shortener/M
+shortening/M
+shorten/RDGJ
+shortfall/SM
+shorthand/DMS
+Shorthorn/M
+shorthorn/MS
+shortie's
+shortish
+shortlist/GD
+Short/M
+shortness/MS
+short/SGTXYRDNP
+shortsightedness/S
+shortsighted/YP
+shortstop/MS
+shortwave/SM
+shorty/SM
+Shoshana/M
+Shoshanna/M
+Shoshone/SM
+Shostakovitch/M
+shotgunned
+shotgunner
+shotgunning
+shotgun/SM
+shot/MS
+shotted
+shotting
+shoulder/GMD
+shouldn't
+should/TZR
+shout/SGZRDM
+shove/DSRG
+shoveler/M
+shovelful/MS
+shovel/MDRSZG
+shover/M
+showbiz
+showbizzes
+showboat/SGDM
+showcase/MGSD
+showdown/MS
+shower/GDM
+showery/TR
+show/GDRZJS
+showgirl/SM
+showily
+showiness/MS
+showing/M
+showman/M
+showmanship/SM
+showmen
+shown
+showoff/S
+showpiece/SM
+showplace/SM
+showroom/MS
+showy/RTP
+shpt
+shrank
+shrapnel/SM
+shredded
+shredder/MS
+shredding
+shred/MS
+Shreveport/M
+shrewdness/SM
+shrewd/RYTP
+shrew/GSMD
+shrewishness/M
+shrewish/PY
+shrieker/M
+shriek/SGDRMZ
+shrift/SM
+shrike/SM
+shrill/DRTGPS
+shrillness/MS
+shrilly
+shrimp/MDGS
+shrine/SDGM
+shrinkage/SM
+shrinker/M
+shrinking/U
+shrink/SRBG
+shrivel/GSD
+shriven
+shrive/RSDG
+Shropshire/M
+shroud/GSMD
+shrubbed
+shrubbery/SM
+shrubbing
+shrubby/TR
+shrub/SM
+shrugged
+shrugging
+shrug/S
+shrunk/N
+shtick/S
+shucker/M
+shuck/SGMRD
+shucks/S
+shudder/DSG
+shuddery
+shuffleboard/MS
+shuffled/A
+shuffle/GDSRZ
+shuffles/A
+shuffling/A
+Shulman/M
+Shu/M
+shunned
+shunning
+shun/S
+shunter/M
+shunt/GSRD
+Shurlocke/M
+Shurlock/M
+Shurwood/M
+shush/SDG
+shutdown/MS
+shuteye/SM
+shutoff/M
+shutout/SM
+shut/S
+shutterbug/S
+shutter/DMGS
+shuttering/M
+shutting
+shuttlecock/MDSG
+shuttle/MGDS
+shy/DRSGTZY
+shyer
+shyest
+Shylockian/M
+Shylock/M
+shyness/SM
+shyster/SM
+Siamese/M
+Siam/M
+Siana/M
+Sianna/M
+Sian's
+Sibbie/M
+Sibby/M
+Sibeal/M
+Sibelius/M
+Sibella/M
+Sibelle/M
+Sibel/M
+Siberia/M
+Siberian/S
+sibilance/M
+sibilancy/M
+sibilant/SY
+Sibilla/M
+Sibley/M
+sibling/SM
+Sib/M
+Sibylla/M
+Sibylle/M
+sibylline
+Sibyl/M
+sibyl/SM
+Siciliana/M
+Sicilian/S
+Sicily/M
+sickbay/M
+sickbed/S
+sickener/M
+sickening/Y
+sicken/JRDG
+sicker/Y
+sick/GXTYNDRSP
+sickie/SM
+sickish/PY
+sickle/SDGM
+sickliness/M
+sickly/TRSDPG
+sickness/MS
+sicko/S
+sickout/S
+sickroom/SM
+sic/S
+sidearm/S
+sideband/MS
+sidebar/MS
+sideboard/SM
+sideburns
+sidecar/MS
+sided/A
+sidedness
+side/ISRM
+sidekick/MS
+sidelight/SM
+sideline/MGDRS
+sidelong
+sideman/M
+sidemen
+sidepiece/S
+sidereal
+sider/FA
+sides/A
+sidesaddle/MS
+sideshow/MS
+sidesplitting
+sidestepped
+sidestepping
+sidestep/S
+sidestroke/GMSD
+sideswipe/GSDM
+sidetrack/SDG
+sidewalk/MS
+sidewall/MS
+sidewards
+sideway/SM
+sidewinder/SM
+siding/SM
+sidle/DSG
+Sid/M
+Sidnee/M
+Sidney/M
+Sidoney/M
+Sidonia/M
+Sidonnie/M
+SIDS
+siege/GMDS
+Siegel/M
+Siegfried/M
+Sieglinda/M
+Siegmund/M
+Siemens/M
+Siena/M
+sienna/SM
+Sierpinski/M
+sierra/SM
+siesta/MS
+sieve/GZMDS
+Siffre/M
+sifted/UA
+sifter/M
+sift/GZJSDR
+Sigfrid/M
+Sigfried/M
+SIGGRAPH/M
+sigh/DRG
+sigher/M
+sighs
+sighted/P
+sighter/M
+sighting/S
+sight/ISM
+sightless/Y
+sightliness/UM
+sightly/TURP
+sightread
+sightseeing/S
+sightsee/RZ
+Sigismond/M
+Sigismondo/M
+Sigismund/M
+Sigismundo/M
+Sig/M
+sigma/SM
+sigmoid
+Sigmund/M
+signal/A
+signaled
+signaler/S
+signaling
+signalization/S
+signalize/GSD
+signally
+signalman/M
+signalmen
+signals
+signal's
+signatory/SM
+signature/MS
+signboard/MS
+signed/FU
+signer/SC
+signet/SGMD
+sign/GARDCS
+significance/IMS
+significantly/I
+significant/YS
+signification/M
+signify/DRSGNX
+signing/S
+Signora/M
+signora/SM
+signore/M
+signori
+signories
+signorina/SM
+signorine
+Signor/M
+signor/SFM
+signpost/DMSG
+sign's
+signs/F
+Sigrid/M
+Sigurd/M
+Sigvard/M
+Sihanouk/M
+Sikhism/MS
+Sikh/MS
+Sikhs
+Sikkimese
+Sikkim/M
+Sikorsky/M
+silage/GMSD
+Silas/M
+Sileas/M
+siled
+Sile/M
+silence/MZGRSD
+silencer/M
+silentness/M
+silent/TSPRY
+Silesia/M
+silhouette/GMSD
+silica/SM
+silicate/SM
+siliceous
+silicide/M
+silicone/SM
+silicon/MS
+silicoses
+silicosis/M
+silken/DG
+silk/GXNDMS
+silkily
+silkiness/SM
+silkscreen/SM
+silkworm/MS
+silky/RSPT
+silliness/SM
+sill/MS
+silly/PRST
+silo/GSM
+siltation/M
+silt/MDGS
+siltstone/M
+silty/RT
+Silurian/S
+Silvain/M
+Silva/M
+Silvana/M
+Silvan/M
+Silvano/M
+Silvanus/M
+silverer/M
+silverfish/MS
+Silverman/M
+silver/RDYMGS
+silversmith/M
+silversmiths
+Silverstein/M
+silverware/SM
+silvery/RTP
+Silvester/M
+Silvia/M
+Silvie/M
+Silvio/M
+Si/M
+SIMD
+Simenon/M
+Simeon/M
+simian/S
+similar/EY
+similarity/EMS
+simile/SM
+similitude/SME
+Simla/M
+simmer/GSD
+Simmonds/M
+Simmons/M
+Simmonsville/M
+Sim/MS
+Simms/M
+Simona/M
+Simone/M
+Simonette/M
+simonize/SDG
+Simon/M
+Simonne/M
+simony/MS
+simpatico
+simper/GDS
+simpleminded/YP
+simpleness/S
+simple/RSDGTP
+simpleton/SM
+simplex/S
+simplicity/MS
+simplified/U
+simplify/ZXRSDNG
+simplistic
+simplistically
+simply
+Simpson/M
+simulacrum/M
+Simula/M
+SIMULA/M
+simulate/XENGSD
+simulation/ME
+simulative
+simulator/SEM
+simulcast/GSD
+simultaneity/SM
+simultaneousness/M
+simultaneous/YP
+Sinai/M
+Sinatra/M
+since
+sincere/IY
+sincereness/M
+sincerer
+sincerest
+sincerity/MIS
+Sinclair/M
+Sinclare/M
+Sindbad/M
+Sindee/M
+Sindhi/M
+sinecure/MS
+sinecurist/M
+sine/SM
+sinew/SGMD
+sinewy
+sinfulness/SM
+sinful/YP
+Singaporean/S
+Singapore/M
+sing/BGJZYDR
+Singborg/M
+singeing
+singer/M
+Singer/M
+singe/S
+singing/Y
+singlehanded/Y
+singleness/SM
+single/PSDG
+Singleton/M
+singleton/SM
+singletree/SM
+singlet/SM
+singsong/GSMD
+singularity/SM
+singularization/M
+singular/SY
+Sinhalese/M
+sinisterness/M
+sinister/YP
+sinistral/Y
+sinkable/U
+sinker/M
+sink/GZSDRB
+sinkhole/SM
+Sinkiang/M
+sinking/M
+sinlessness/M
+sinless/YP
+sin/MAGS
+sinned
+sinner/MS
+sinning
+sinter/DM
+sinuosity/MS
+sinuousities
+sinuousness/M
+sinuous/PY
+sinusitis/SM
+sinus/MS
+sinusoidal/Y
+sinusoid/MS
+Siobhan/M
+Siouxie/M
+Sioux/M
+siphon/DMSG
+siphons/U
+sipped
+sipper/SM
+sipping
+sip/S
+sired/C
+sire/MS
+siren/M
+sires/C
+siring/C
+Sirius/M
+sirloin/MS
+Sir/MS
+sirocco/MS
+sirred
+sirring
+sirup's
+sir/XGMNDS
+sisal/MS
+Sisely/M
+Sisile/M
+sis/S
+Sissie/M
+sissified
+Sissy/M
+sissy/TRSM
+sister/GDYMS
+sisterhood/MS
+sisterliness/MS
+sisterly/P
+sister's/A
+Sistine
+Sisyphean
+Sisyphus/M
+sit/AG
+sitarist/SM
+sitar/SM
+sitcom/SM
+site/DSJM
+sits
+sitter/MS
+sitting/SM
+situate/GNSDX
+situational/Y
+situationist
+situation/M
+situ/S
+situs/M
+Siusan/M
+Siva/M
+Siward/M
+sixfold
+sixgun
+six/MRSH
+sixpence/MS
+sixpenny
+sixshooter
+sixteen/HRSM
+sixteenths
+sixths
+sixth/Y
+sixtieths
+sixty/SMH
+sizableness/M
+sizable/P
+sized/UA
+size/GJDRSBMZ
+sizer/M
+sizes/A
+sizing/M
+sizzler/M
+sizzle/RSDG
+SJ
+Sjaelland/M
+SK
+ska/S
+skateboard/SJGZMDR
+skater/M
+skate/SM
+skat/JMDRGZ
+skedaddle/GSD
+skeet/RMS
+skein/MDGS
+skeletal/Y
+skeleton/MS
+Skell/M
+Skelly/M
+skeptical/Y
+skepticism/MS
+skeptic/SM
+sketchbook/SM
+sketcher/M
+sketchily
+sketchiness/MS
+sketch/MRSDZG
+sketchpad
+sketchy/PRT
+skew/DRSPGZ
+skewer/GDM
+skewing/M
+skewness/M
+skidded
+skidding
+skid/S
+skiff/GMDS
+skiing/M
+skilfully
+skill/DMSG
+skilled/U
+skillet/MS
+skillfulnesses
+skillfulness/MU
+skillful/YUP
+skilling/M
+skimmed
+skimmer/MS
+skimming/SM
+ski/MNJSG
+skimp/GDS
+skimpily
+skimpiness/MS
+skimpy/PRT
+skim/SM
+skincare
+skindive/G
+skinflint/MS
+skinhead/SM
+skinless
+skinned
+Skinner/M
+skinner/SM
+skinniness/MS
+skinning
+skinny/TRSP
+skin/SM
+skintight
+Skip/M
+skipped
+Skipper/M
+skipper/SGDM
+Skippie/M
+skipping
+Skipp/RM
+Skippy/M
+skip/S
+Skipton/M
+skirmisher/M
+skirmish/RSDMZG
+skirter/M
+skirting/M
+skirt/RDMGS
+skit/GSMD
+skitter/SDG
+skittishness/SM
+skittish/YP
+skittle/SM
+skivvy/GSDM
+skoal/SDG
+Skopje/M
+skulduggery/MS
+skulker/M
+skulk/SRDGZ
+skullcap/MS
+skullduggery's
+skull/SDM
+skunk/GMDS
+skycap/MS
+skydiver/SM
+skydiving/MS
+Skye/M
+skyhook
+skyjacker/M
+skyjack/ZSGRDJ
+Skylab/M
+skylarker/M
+skylark/SRDMG
+Skylar/M
+Skyler/M
+skylight/MS
+skyline/MS
+Sky/M
+sky/MDRSGZ
+skyrocket/GDMS
+skyscraper/M
+skyscrape/RZ
+skyward/S
+skywave
+skyway/M
+skywriter/MS
+skywriting/MS
+slabbed
+slabbing
+slab/MS
+slacken/DG
+slacker/M
+slackness/MS
+slack/SPGTZXYRDN
+Slade/M
+slagged
+slagging
+slag/MS
+slain
+slake/DSG
+slaked/U
+slalom/SGMD
+slammed
+slammer/S
+slamming
+slam/S
+slander/MDRZSG
+slanderousness/M
+slanderous/PY
+slang/SMGD
+slangy/TR
+slanting/Y
+slant/SDG
+slantwise
+slapdash/S
+slaphappy/TR
+slap/MS
+slapped
+slapper
+slapping
+slapstick/MS
+slash/GZRSD
+slashing/Y
+slater/M
+Slater/M
+slate/SM
+slather/SMDG
+slating/M
+slat/MDRSGZ
+slatted
+slattern/MYS
+slatting
+slaughterer/M
+slaughterhouse/SM
+slaughter/SJMRDGZ
+slave/DSRGZM
+slaveholder/SM
+slaver/GDM
+slavery/SM
+Slavic/M
+slavishness/SM
+slavish/YP
+Slav/MS
+Slavonic/M
+slaw/MS
+slay/RGZS
+sleaze/S
+sleazily
+sleaziness/SM
+sleazy/RTP
+sledded
+sledder/S
+sledding
+sledgehammer/MDGS
+sledge/SDGM
+sled/SM
+sleekness/S
+sleek/PYRDGTS
+sleeper/M
+sleepily
+sleepiness/SM
+sleeping/M
+sleeplessness/SM
+sleepless/YP
+sleepover/S
+sleep/RMGZS
+sleepwalker/M
+sleepwalk/JGRDZS
+sleepwear/M
+sleepyhead/MS
+sleepy/PTR
+sleet/DMSG
+sleety/TR
+sleeveless
+sleeve/SDGM
+sleeving/M
+sleigh/GMD
+sleighs
+sleight/SM
+sleken/DG
+slenderize/DSG
+slenderness/MS
+slender/RYTP
+slept
+Slesinger/M
+sleuth/GMD
+sleuths
+slew/DGS
+slice/DSRGZM
+sliced/U
+slicer/M
+slicker/M
+slickness/MS
+slick/PSYRDGTZ
+slider/M
+slide/S
+slid/GZDR
+slight/DRYPSTG
+slighter/M
+slighting/Y
+slightness/S
+slime/SM
+sliminess/S
+slimline
+slimmed
+slimmer/S
+slimmest
+slimming/S
+slimness/S
+slim/SPGYD
+slimy/PTR
+sling/GMRS
+slingshot/MS
+slings/U
+slink/GS
+slinky/RT
+slipcase/MS
+slipcover/GMDS
+slipknot/SM
+slippage/SM
+slipped
+slipper/GSMD
+slipperiness/S
+slippery/PRT
+slipping
+slipshod
+slip/SM
+slipstream/MDGS
+slipway/SM
+slither/DSG
+slithery
+slit/SM
+slitted
+slitter/S
+slitting
+sliver/GSDM
+slivery
+Sloane/M
+Sloan/M
+slobber/SDG
+slobbery
+slob/MS
+Slocum/M
+sloe/MS
+sloganeer/MG
+slogan/MS
+slogged
+slogging
+slog/S
+sloop/SM
+slop/DRSGZ
+sloped/U
+slope/S
+slopped
+sloppily
+sloppiness/SM
+slopping
+sloppy/RTP
+slosh/GSDM
+slothfulness/MS
+slothful/PY
+sloth/GDM
+sloths
+slot/MS
+slotted
+slotting
+slouch/DRSZG
+sloucher/M
+slouchy/RT
+slough/GMD
+sloughs
+Slovakia/M
+Slovakian/S
+Slovak/S
+Slovene/S
+Slovenia/M
+Slovenian/S
+slovenliness/SM
+slovenly/TRP
+sloven/YMS
+slowcoaches
+slowdown/MS
+slowish
+slowness/MS
+slow/PGTYDRS
+slowpoke/MS
+SLR
+sludge/SDGM
+sludgy/TR
+slue/MGDS
+sluggard/MS
+slugged
+slugger/SM
+slugging
+sluggishness/SM
+sluggish/YP
+slug/MS
+sluice/SDGM
+slumberer/M
+slumber/MDRGS
+slumberous
+slumlord/MS
+slummed
+slummer
+slumming
+slum/MS
+slummy/TR
+slump/DSG
+slung/U
+slunk
+slur/MS
+slurp/GSD
+slurred
+slurried/M
+slurring
+slurrying/M
+slurry/MGDS
+slushiness/SM
+slush/SDMG
+slushy/RTP
+slut/MS
+sluttish
+slutty/TR
+Sly/M
+slyness/MS
+sly/RTY
+smacker/M
+smack/SMRDGZ
+smallholders
+smallholding/MS
+smallish
+Small/M
+smallness/S
+smallpox/SM
+small/SGTRDP
+smalltalk
+smalltime
+Smallwood/M
+smarmy/RT
+smarten/GD
+smartness/S
+smartypants
+smart/YRDNSGTXP
+smasher/M
+smash/GZRSD
+smashing/Y
+smashup/S
+smattering/SM
+smearer/M
+smear/GRDS
+smeary/TR
+smeller/M
+smelliness/MS
+smell/SBRDG
+smelly/TRP
+smelter/M
+smelt/SRDGZ
+Smetana/M
+smidgen/MS
+smilax/MS
+smile/GMDSR
+smiley/M
+smilies
+smiling/UY
+smirch/SDG
+smirk/GSMD
+Smirnoff/M
+smite/GSR
+smiter/M
+smith/DMG
+smithereens
+Smithfield/M
+Smith/M
+smiths
+Smithsonian/M
+Smithson/M
+Smithtown/M
+smithy/SM
+smitten
+Smitty/M
+Sm/M
+smocking/M
+smock/SGMDJ
+smoggy/TR
+smog/SM
+smoke/GZMDSRBJ
+smokehouse/MS
+smokeless
+smoker/M
+smokescreen/S
+smokestack/MS
+Smokey/M
+smokiness/S
+smoking/M
+smoky/RSPT
+smoldering/Y
+smolder/SGD
+Smolensk/M
+Smollett/M
+smooch/SDG
+smoothen/DG
+smoother/M
+smoothie/SM
+smoothness/MS
+smooths
+smooth/TZGPRDNY
+smörgåsbord/SM
+smote
+smother/GSD
+SMSA/MS
+SMTP
+Smucker/M
+smudge/GSD
+smudginess/M
+smudgy/TRP
+smugged
+smugger
+smuggest
+smugging
+smuggle/JZGSRD
+smuggler/M
+smugness/MS
+smug/YSP
+smut/SM
+Smuts/M
+smutted
+smuttiness/SM
+smutting
+smutty/TRP
+Smyrna/M
+snack/SGMD
+snaffle/GDSM
+snafu/DMSG
+snagged
+snagging
+snag/MS
+snail/GSDM
+Snake
+snakebird/M
+snakebite/MS
+snake/DSGM
+snakelike
+snakeroot/M
+snaky/TR
+snapback/M
+snapdragon/MS
+snapped/U
+snapper/SM
+snappily
+snappiness/SM
+snapping/U
+snappishness/SM
+snappish/PY
+snappy/PTR
+snapshot/MS
+snapshotted
+snapshotting
+snap/US
+snare/DSRGM
+snarer/M
+snarf/JSGD
+snarler/M
+snarling/Y
+snarl/UGSD
+snarly/RT
+snatch/DRSZG
+snatcher/M
+snazzily
+snazzy/TR
+Snead/M
+sneaker/MD
+sneakily
+sneakiness/SM
+sneaking/Y
+sneak/RDGZS
+sneaky/PRT
+Sneed/M
+sneerer/M
+sneer/GMRDJS
+sneering/Y
+sneeze/SRDG
+Snell/M
+snicker/GMRD
+snick/MRZ
+snideness/M
+Snider/M
+snide/YTSRP
+sniffer/M
+sniff/GZSRD
+sniffle/GDRS
+sniffler/M
+sniffles/M
+snifter/MDSG
+snigger's
+sniper/M
+snipe/SM
+snipped
+snipper/SM
+snippet/SM
+snipping
+snippy/RT
+snip/SGDRZ
+snitch/GDS
+snit/SM
+sniveler/M
+snivel/JSZGDR
+Sn/M
+snobbery/SM
+snobbishness/S
+snobbish/YP
+snobby/RT
+snob/MS
+Snodgrass/M
+snood/SGDM
+snooker/GMD
+snook/SMRZ
+snooper/M
+snoop/SRDGZ
+Snoopy/M
+snoopy/RT
+snootily
+snootiness/MS
+snoot/SDMG
+snooty/TRP
+snooze/GSD
+snore/DSRGZ
+snorkel/ZGSRDM
+snorter/M
+snort/GSZRD
+snot/MS
+snotted
+snottily
+snottiness/SM
+snotting
+snotty/TRP
+snout/SGDM
+snowball/SDMG
+snowbank/SM
+Snowbelt/SM
+snowbird/SM
+snowblower/S
+snowboard/GZDRJS
+snowbound
+snowcapped
+snowdrift/MS
+snowdrop/MS
+snowfall/MS
+snowfield/MS
+snowflake/MS
+snow/GDMS
+snowily
+snowiness/MS
+Snow/M
+snowman/M
+snowmen
+snowmobile/GMDRS
+snowplough/M
+snowploughs
+snowplow/SMGD
+snowshed
+snowshoeing
+snowshoe/MRS
+snowshoer/M
+snowstorm/MS
+snowsuit/S
+snowy/RTP
+snubbed
+snubber
+snubbing
+snub/SP
+snuffbox/SM
+snuffer/M
+snuff/GZSYRD
+snuffle/GDSR
+snuffler/M
+snuffly/RT
+snugged
+snugger
+snuggest
+snugging
+snuggle/GDS
+snuggly
+snugness/MS
+snug/SYP
+Snyder/M
+so
+SO
+soaker/M
+soak/GDRSJ
+soapbox/DSMG
+soapiness/S
+soap/MDRGS
+soapstone/MS
+soapsud/S
+soapy/RPT
+soar/DRJSG
+soarer/M
+soaring/Y
+sobbed
+sobbing/Y
+soberer/M
+soberness/SM
+sober/PGTYRD
+sobriety/SIM
+sobriquet/MS
+sob/SZR
+Soc
+soccer/MS
+sociabilities
+sociability/IM
+sociable/S
+sociably/IU
+socialism/SM
+socialistic
+socialist/SM
+socialite/SM
+sociality/M
+socialization/SM
+socialized/U
+socializer/M
+socialize/RSDG
+socially/U
+social/SY
+societal/Y
+society/MS
+socio
+sociobiology/M
+sociocultural/Y
+sociodemographic
+socioeconomically
+socioeconomic/S
+sociolinguistics/M
+sociological/MY
+sociologist/SM
+sociology/SM
+sociometric
+sociometry/M
+sociopath/M
+sociopaths
+socket/SMDG
+sock/GDMS
+Socorro/M
+Socrates/M
+Socratic/S
+soc/S
+soda/SM
+sodded
+sodden/DYPSG
+soddenness/M
+sodding
+Soddy/M
+sodium/MS
+sod/MS
+sodomite/MS
+sodomize/GDS
+Sodom/M
+sodomy/SM
+soever
+sofa/SM
+Sofia/M
+Sofie/M
+softball/MS
+softbound
+softener/M
+soften/ZGRD
+softhearted
+softie's
+softness/MS
+soft/SPXTYNR
+software/MS
+softwood/SM
+softy/SM
+soggily
+sogginess/S
+soggy/RPT
+Soho/M
+soigné
+soiled/U
+soil/SGMD
+soirée/SM
+sojourn/RDZGSM
+solace/GMSRD
+solacer/M
+solaria
+solarium/M
+solar/S
+solder/RDMSZG
+soldier/MDYSG
+soldiery/MS
+sold/RU
+solecism/MS
+soled/FA
+solemness
+solemnify/GSD
+solemnity/MS
+solemnization/SM
+solemnize/GSD
+solemnness/SM
+solemn/PTRY
+solenoid/MS
+soler/F
+soles/IFA
+sole/YSP
+sol/GSMDR
+solicitation/S
+solicited/U
+solicitor/MS
+solicitousness/S
+solicitous/YP
+solicit/SDG
+solicitude/MS
+solidarity/MS
+solidi
+solidification/M
+solidify/NXSDG
+solidity/S
+solidness/SM
+solid/STYRP
+solidus/M
+soliloquies
+soliloquize/DSG
+soliloquy/M
+soling/NM
+solipsism/MS
+solipsist/S
+Solis/M
+solitaire/SM
+solitary/SP
+solitude/SM
+Sollie/M
+Solly/M
+Sol/MY
+solo/DMSG
+soloist/SM
+Solomon/SM
+Solon/M
+Soloviev/M
+solstice/SM
+solubility/IMS
+soluble/SI
+solute/ENAXS
+solute's
+solution/AME
+solvable/UI
+solvating
+solve/ABSRDZG
+solved/EU
+solvency/IMS
+solvent/IS
+solvently
+solvent's
+solver/MEA
+solves/E
+solving/E
+Solzhenitsyn/M
+Somalia/M
+Somalian/S
+Somali/MS
+soma/M
+somatic
+somberness/SM
+somber/PY
+sombre
+sombrero/SM
+somebody'll
+somebody/SM
+someday
+somehow
+someone'll
+someone/SM
+someplace/M
+somersault/DSGM
+Somerset/M
+somerset/S
+somersetted
+somersetting
+Somerville/M
+something/S
+sometime/S
+someway/S
+somewhat/S
+somewhere/S
+some/Z
+sommelier/SM
+Somme/M
+somnambulism/SM
+somnambulist/SM
+somnolence/MS
+somnolent/Y
+Somoza/M
+sonar/SM
+sonata/MS
+sonatina/SM
+Sondheim/M
+Sondra/M
+Sonenberg/M
+songbag
+songbird/SM
+songbook/S
+songfest/MS
+songfulness/M
+songful/YP
+Songhai/M
+Songhua/M
+song/MS
+songster/MS
+songstress/SM
+songwriter/SM
+songwriting
+Sonia/M
+sonic/S
+Sonja/M
+Son/M
+sonnet/MDSG
+Sonnie/M
+Sonni/M
+Sonnnie/M
+Sonny/M
+sonny/SM
+Sonoma/M
+Sonora/M
+sonority/S
+sonorousness/SM
+sonorous/PY
+son/SMY
+Sontag/M
+sonuvabitch
+Sonya/M
+Sony/M
+soonish
+soon/TR
+soothe
+soother/M
+sooth/GZTYSRDMJ
+soothingness/M
+soothing/YP
+sooths
+soothsayer/M
+soothsay/JGZR
+soot/MGDS
+sooty/RT
+SOP
+Sophey/M
+Sophia/SM
+Sophie/M
+Sophi/M
+sophism/SM
+sophister/M
+sophistical
+sophisticatedly
+sophisticated/U
+sophisticate/XNGDS
+sophistication/MU
+sophistic/S
+sophist/RMS
+sophistry/SM
+Sophoclean
+Sophocles/M
+sophomore/SM
+sophomoric
+Sophronia/M
+soporifically
+soporific/SM
+sopped
+sopping/S
+soppy/RT
+soprano/SM
+sop/SM
+Sopwith/M
+sorbet/SM
+Sorbonne/M
+sorcerer/MS
+sorceress/S
+sorcery/MS
+Sorcha/M
+sordidness/SM
+sordid/PY
+sorehead/SM
+soreness/S
+Sorensen/M
+Sorenson/M
+sore/PYTGDRS
+sorghum/MS
+sorority/MS
+sorrel/SM
+Sorrentine/M
+sorrily
+sorriness/SM
+sorrower/M
+sorrowfulness/SM
+sorrowful/YP
+sorrow/GRDMS
+sorry/PTSR
+sorta
+sortable
+sorted/U
+sorter/MS
+sort/FSAGD
+sortieing
+sortie/MSD
+sort's
+sos
+SOS
+Sosa/M
+Sosanna/M
+Soto/M
+sot/SM
+sottish
+soubriquet's
+soufflé/MS
+sough/DG
+soughs
+sought/U
+soulfulness/MS
+soulful/YP
+soulless/Y
+soul/MDS
+sound/AUD
+soundboard/MS
+sounders
+sounder's
+sounder/U
+soundest
+sounding/AY
+soundings
+sounding's
+soundless/Y
+soundly/U
+soundness/UMS
+soundproof/GSD
+soundproofing/M
+sound's
+sounds/A
+soundtrack/MS
+soupçon/SM
+soup/GMDS
+Souphanouvong/M
+soupy/RT
+source/ASDMG
+sourceless
+sourdough
+sourdoughs
+sourish
+sourness/MS
+sourpuss/MS
+sour/TYDRPSG
+Sousa/M
+sousaphone/SM
+sous/DSG
+souse
+sou/SMH
+Southampton/M
+southbound
+southeastern
+southeaster/YM
+Southeast/MS
+southeast/RZMS
+southeastward/S
+southerly/S
+souther/MY
+southerner/M
+Southerner/MS
+southernisms
+southernmost
+southern/PZSYR
+Southey/M
+Southfield/M
+southing/M
+southland/M
+South/M
+southpaw/MS
+south/RDMG
+souths
+Souths
+southward/S
+southwestern
+southwester/YM
+Southwest/MS
+southwest/RMSZ
+southwestward/S
+souvenir/SM
+sou'wester
+sovereignty/MS
+sovereign/YMS
+soviet/MS
+Soviet/S
+sow/ADGS
+sowbelly/M
+sowens/M
+sower/DS
+Soweto/M
+sown/A
+sox's
+soybean/MS
+Soyinka/M
+soy/MS
+Soyuz/M
+Spaatz/M
+spacecraft/MS
+space/DSRGZMJ
+spaceflight/S
+spaceman/M
+spacemen
+spaceport/SM
+spacer/M
+spaceship/MS
+spacesuit/MS
+spacewalk/GSMD
+Spacewar/M
+spacewoman
+spacewomen
+spacey
+spacial
+spacier
+spaciest
+spaciness
+spacing/M
+spaciousness/SM
+spacious/PY
+Spackle
+spade/DSRGM
+spadeful/SM
+spader/M
+spadework/SM
+spadices
+spadix/M
+Spafford/M
+spaghetti/SM
+Spahn/M
+Spain/M
+spake
+Spalding/M
+Spam/M
+spa/MS
+Span
+spandex/MS
+spandrels
+spangle/GMDS
+Spanglish/S
+Spaniard/SM
+spanielled
+spanielling
+spaniel/SM
+Spanish/M
+spanker/M
+spanking/M
+spank/SRDJG
+span/MS
+spanned/U
+spanner/SM
+spanning
+SPARC/M
+SPARCstation/M
+spar/DRMGTS
+spareness/MS
+spare/PSY
+spareribs
+sparer/M
+sparing/UY
+sparker/M
+sparkle/DRSGZ
+sparkler/M
+Sparkman/M
+Sparks
+spark/SGMRD
+sparky/RT
+sparling/SM
+sparred
+sparrer
+sparring/U
+sparrow/MS
+sparseness/S
+sparse/YP
+sparsity/S
+spars/TR
+Spartacus/M
+Sparta/M
+spartan
+Spartan/S
+spasm/GSDM
+spasmodic
+spasmodically
+spastic/S
+spate/SM
+spathe/MS
+spatiality/M
+spatial/Y
+spat/MS
+spatted
+spatter/DGS
+spatterdock/M
+spatting
+spatula/SM
+spavin/DMS
+spawner/M
+spawn/MRDSG
+spay/DGS
+SPCA
+speakable/U
+speakeasy/SM
+speaker/M
+Speaker's
+speakership/M
+speaking/U
+speak/RBGZJS
+spearer/M
+spearfish/SDMG
+spearhead/GSDM
+spearmint/MS
+spear/MRDGS
+Spears
+spec'd
+specialism/MS
+specialist/MS
+specialization/SM
+specialized/U
+specialize/GZDSR
+specializing/U
+special/SRYP
+specialty/MS
+specie/MS
+specif
+specifiability
+specifiable
+specifiably
+specifically
+specification/SM
+specificity/S
+specific/SP
+specified/U
+specifier/SM
+specifies
+specify/AD
+specifying
+specimen/SM
+spec'ing
+speciousness/SM
+specious/YP
+speck/GMDS
+speckle/GMDS
+spec/SM
+spectacle/MSD
+spectacular/SY
+spectator/SM
+specter/DMS
+specter's/A
+spectralness/M
+spectral/YP
+spectra/M
+spectrogram/MS
+spectrographically
+spectrograph/M
+spectrography/M
+spectrometer/MS
+spectrometric
+spectrometry/M
+spectrophotometer/SM
+spectrophotometric
+spectrophotometry/M
+spectroscope/SM
+spectroscopic
+spectroscopically
+spectroscopy/SM
+spectrum/M
+specularity
+specular/Y
+speculate/VNGSDX
+speculation/M
+speculative/Y
+speculator/SM
+sped
+speech/GMDS
+speechlessness/SM
+speechless/YP
+speedboat/GSRM
+speedboating/M
+speeder/M
+speedily
+speediness/SM
+speedometer/MS
+speed/RMJGZS
+speedster/SM
+speedup/MS
+speedway/SM
+speedwell/MS
+speedy/PTR
+speer/M
+speleological
+speleologist/S
+speleology/MS
+spellbinder/M
+spellbind/SRGZ
+spellbound
+spelldown/MS
+spelled/A
+speller/M
+spelling/M
+spell/RDSJGZ
+spells/A
+spelunker/MS
+spelunking/S
+Spencerian
+Spencer/M
+Spence/RM
+spender/M
+spend/SBJRGZ
+spendthrift/MS
+Spenglerian
+Spengler/M
+Spense/MR
+Spenserian
+Spenser/M
+spent/U
+spermatophyte/M
+spermatozoa
+spermatozoon/M
+spermicidal
+spermicide/MS
+sperm/SM
+Sperry/M
+spew/DRGZJS
+spewer/M
+SPF
+sphagnum/SM
+sphere/SDGM
+spherical/Y
+spheric/S
+spherics/M
+spheroidal/Y
+spheroid/SM
+spherule/MS
+sphincter/SM
+Sphinx/M
+sphinx/MS
+Spica/M
+spic/DGM
+spicebush/M
+spice/SM
+spicily
+spiciness/SM
+spicule/MS
+spicy/PTR
+spider/SM
+spiderweb/S
+spiderwort/M
+spidery/TR
+Spiegel/M
+Spielberg/M
+spiel/GDMS
+spier/M
+spiffy/TDRSG
+spigot/MS
+spike/GMDSR
+Spike/M
+spiker/M
+spikiness/SM
+spiky/PTR
+spillage/SM
+Spillane/M
+spillover/SM
+spill/RDSG
+spillway/SM
+spinach/MS
+spinal/YS
+spindle/JGMDRS
+spindly/RT
+spinelessness/M
+spineless/YP
+spine/MS
+spinet/SM
+spininess/M
+spinnability/M
+spinnaker/SM
+spinneret/MS
+spinner/SM
+spinning/SM
+Spinoza/M
+spin/S
+spinsterhood/SM
+spinsterish
+spinster/MS
+spiny/PRT
+spiracle/SM
+spiraea's
+spiral/YDSG
+spire/AIDSGF
+spirea/MS
+spire's
+spiritedness/M
+spirited/PY
+spirit/GMDS
+spiritless
+spirits/I
+spiritualism/SM
+spiritualistic
+spiritualist/SM
+spirituality/SM
+spiritual/SYP
+spirituous
+spirochete/SM
+Spiro/M
+spiry/TR
+spitball/SM
+spite/CSDAG
+spitefuller
+spitefullest
+spitefulness/MS
+spiteful/PY
+spite's/A
+spitfire/SM
+spit/SGD
+spitted
+spitting
+spittle/SM
+spittoon/SM
+Spitz/M
+splashdown/MS
+splasher/M
+splash/GZDRS
+splashily
+splashiness/MS
+splashy/RTP
+splat/SM
+splatted
+splatter/DSG
+splatting
+splayfeet
+splayfoot/MD
+splay/SDG
+spleen/SM
+splendidness/M
+splendid/YRPT
+splendorous
+splendor/SM
+splenetic/S
+splicer/M
+splice/RSDGZJ
+spline/MSD
+splinter/GMD
+splintery
+splint/SGZMDR
+splits/M
+split/SM
+splittable
+splitter/MS
+splitting/S
+splodge/SM
+splotch/MSDG
+splotchy/RT
+splurge/GMDS
+splutterer/M
+splutter/RDSG
+Sp/M
+Spock/M
+spoilables
+spoilage/SM
+spoil/CSZGDR
+spoiled/U
+spoiler/MC
+spoilsport/SM
+Spokane/M
+spoke/DSG
+spoken/U
+spokeshave/MS
+spokesman/M
+spokesmen
+spokespeople
+spokesperson/S
+spokeswoman/M
+spokeswomen
+spoliation/MCS
+spongecake
+sponge/GMZRSD
+sponger/M
+sponginess/S
+spongy/TRP
+sponsor/DGMS
+sponsorship/S
+spontaneity/SM
+spontaneousness/M
+spontaneous/PY
+spoof/SMDG
+spookiness/MS
+spook/SMDG
+spooky/PRT
+spool/SRDMGZ
+spoonbill/SM
+spoonerism/SM
+spoonful/MS
+spoon/GSMD
+spoor/GSMD
+sporadically
+sporadic/Y
+spore/DSGM
+sporran/MS
+sportiness/SM
+sporting/Y
+sportiveness/M
+sportive/PY
+sportscast/RSGZM
+sportsmanlike/U
+sportsman/MY
+sportsmanship/MS
+sportsmen
+sportswear/M
+sportswoman/M
+sportswomen
+sportswriter/S
+sport/VGSRDM
+sporty/PRT
+Sposato/M
+spotlessness/MS
+spotless/YP
+spotlight/GDMS
+spotlit
+spot/MSC
+spotted/U
+spotter/MS
+spottily
+spottiness/SM
+spotting/M
+spotty/RTP
+spousal/MS
+spouse/GMSD
+spouter/M
+spout/SGRD
+sprain/SGD
+sprang/S
+sprat/SM
+sprawl/GSD
+sprayed/UA
+sprayer/M
+spray/GZSRDM
+sprays/A
+spreadeagled
+spreader/M
+spread/RSJGZB
+spreadsheet/S
+spreeing
+spree/MDS
+sprigged
+sprigging
+sprightliness/MS
+sprightly/PRT
+sprig/MS
+springboard/MS
+springbok/MS
+springeing
+springer/M
+Springfield/M
+springily
+springiness/SM
+springing/M
+springlike
+spring/SGZR
+Springsteen/M
+springtime/MS
+springy/TRP
+sprinkle/DRSJZG
+sprinkler/DM
+sprinkling/M
+Sprint/M
+sprint/SGZMDR
+sprite/SM
+spritz/GZDSR
+sprocket/DMGS
+sprocketed/U
+Sproul/M
+sprout/GSD
+spruce/GMTYRSDP
+spruceness/SM
+sprue/M
+sprung/U
+spryness/S
+spry/TRY
+SPSS
+spudded
+spudding
+spud/MS
+Spuds/M
+spume/DSGM
+spumone's
+spumoni/S
+spumy/TR
+spun
+spunk/GSMD
+spunky/SRT
+spurge/MS
+spuriousness/SM
+spurious/PY
+spur/MS
+spurn/RDSG
+spurred
+spurring
+spurt/SGD
+sputa
+Sputnik
+sputnik/MS
+sputter/DRGS
+sputum/M
+spy/DRSGM
+spyglass/MS
+sq
+sqq
+sqrt
+squabbed
+squabber
+squabbest
+squabbing
+squabbler/M
+squabble/ZGDRS
+squab/SM
+squadded
+squadding
+squadron/MDGS
+squad/SM
+squalidness/SM
+squalid/PRYT
+squaller/M
+squall/GMRDS
+squally/RT
+squalor/SM
+squamous/Y
+squander/GSRD
+Squanto
+square/GMTYRSDP
+squareness/SM
+squarer/M
+Squaresville/M
+squarish
+squash/GSRD
+squashiness/M
+squashy/RTP
+squatness/MS
+squat/SPY
+squatted
+squatter/SMDG
+squattest
+squatting
+squawker/M
+squawk/GRDMZS
+squaw/SM
+squeaker/M
+squeakily
+squeakiness/S
+squeak/RDMGZS
+squeaky/RPT
+squealer/M
+squeal/MRDSGZ
+squeamishness/SM
+squeamish/YP
+squeegee/DSM
+squeegeeing
+squeeze/GZSRDB
+squeezer/M
+squelcher/M
+squelch/GDRS
+squelchy/RT
+squibbed
+Squibb/GM
+squibbing
+Squibbing/M
+squib/SM
+squidded
+squidding
+squid/SM
+squiggle/MGDS
+squiggly/RT
+squinter/M
+squint/GTSRD
+squinting/Y
+squirehood
+squire/SDGM
+squirm/SGD
+squirmy/TR
+squirrel/SGYDM
+squirter/M
+squirt/GSRD
+squish/GSD
+squishy/RTP
+Sr
+Srinagar/M
+SRO
+S's
+SS
+SSA
+SSE
+ssh
+s's/KI
+SSS
+SST
+SSW
+ST
+stabbed
+stabber/S
+stabbing/S
+stability/ISM
+stabilizability
+stabilization/CS
+stabilization's
+stabilize/CGSD
+stabilizer/MS
+stableman/M
+stablemate
+stablemen
+stableness/UM
+stable/RSDGMTP
+stabler/U
+stable's/F
+stables/F
+stablest/U
+stabling/M
+stably/U
+stab/YS
+staccato/S
+Stacee/M
+Stace/M
+Stacey/M
+Stacia/M
+Stacie/M
+Staci/M
+stackable
+stacker/M
+stack's
+stack/USDG
+Stacy/M
+stadias
+stadia's
+stadium/MS
+Stael/M
+Stafani/M
+staff/ADSG
+Staffard/M
+staffer/MS
+Stafford/M
+Staffordshire/M
+staffroom
+staff's
+Staford/M
+stag/DRMJSGZ
+stagecoach/MS
+stagecraft/MS
+stagehand/MS
+stager/M
+stage/SM
+stagestruck
+stagflation/SM
+stagged
+staggerer/M
+stagger/GSJDR
+staggering/Y
+staggers/M
+stagging
+staginess/M
+staging/M
+stagnancy/SM
+stagnant/Y
+stagnate/NGDSX
+stagnation/M
+stagy/PTR
+Stahl/M
+staidness/MS
+staid/YRTP
+stained/U
+stainer/M
+stainless/YS
+stain/SGRD
+staircase/SM
+stair/MS
+stairway/SM
+stairwell/MS
+stake/DSGM
+stakeholder/S
+stakeout/SM
+stalactite/SM
+stalag/M
+stalagmite/SM
+stalemate/SDMG
+staleness/MS
+stale/PGYTDSR
+Staley/M
+Stalingrad/M
+Stalinist
+Stalin/SM
+stalker/M
+stalk/MRDSGZJ
+stall/DMSJG
+stalled/I
+stallholders
+stallion/SM
+Stallone/M
+stalls/I
+stalwartness/M
+stalwart/PYS
+Sta/M
+stamen/MS
+Stamford/M
+stamina/SM
+staminate
+stammer/DRSZG
+stammerer/M
+stammering/Y
+stampede/MGDRS
+stampeder/M
+stamped/U
+stamper/M
+stamp/RDSGZJ
+stance/MIS
+stancher/M
+stanch/GDRST
+stanchion/SGMD
+standalone
+standardization/AMS
+standardized/U
+standardize/GZDSR
+standardizer/M
+standardizes/A
+standard/YMS
+standby
+standbys
+standee/MS
+Standford/M
+standing/M
+Standish/M
+standoffish
+standoff/SM
+standout/MS
+standpipe/MS
+standpoint/SM
+stand/SJGZR
+standstill/SM
+Stanfield/M
+Stanford/M
+Stanislas/M
+Stanislaus/M
+Stanislavsky/M
+Stanislaw/M
+stank/S
+Stanleigh/M
+Stanley/M
+Stanly/M
+stannic
+stannous
+Stanton/M
+Stanwood/M
+Stan/YMS
+stanza/MS
+staph/M
+staphs
+staphylococcal
+staphylococci
+staphylococcus/M
+stapled/U
+stapler/M
+Stapleton/M
+staple/ZRSDGM
+starboard/SDMG
+starchily
+starchiness/MS
+starch/MDSG
+starchy/TRP
+stardom/MS
+star/DRMGZS
+stardust/MS
+stare/S
+starfish/SM
+Stargate/M
+stargaze/ZGDRS
+staring/U
+Starkey/M
+Stark/M
+starkness/MS
+stark/SPGTYRD
+Starla/M
+Starlene/M
+starless
+starlet/MS
+starlight/MS
+starling/MS
+Starlin/M
+starlit
+Star/M
+starred
+starring
+Starr/M
+starry/TR
+starship
+starstruck
+start/ASGDR
+starter/MS
+startle/GDS
+startling/PY
+startup/SM
+starvation/MS
+starveling/M
+starver/M
+starve/RSDG
+stash/GSD
+stasis/M
+stat/DRSGV
+statecraft/MS
+stated/U
+statehood/MS
+statehouse/S
+Statehouse's
+state/IGASD
+statelessness/MS
+stateless/P
+stateliness/MS
+stately/PRT
+statement/MSA
+Staten/M
+stater/M
+stateroom/SM
+stateside
+state's/K
+states/K
+statesmanlike
+statesman/MY
+statesmanship/SM
+statesmen
+stateswoman
+stateswomen
+statewide
+statical/Y
+static/S
+statics/M
+stationarity
+stationary/S
+stationer/M
+stationery/MS
+stationmaster/M
+station/SZGMDR
+statistical/Y
+statistician/MS
+statistic/MS
+Statler/M
+stator/SM
+statuary/SM
+statue/MSD
+statuesque/YP
+statuette/MS
+stature/MS
+status/SM
+statute/SM
+statutorily
+statutory/P
+Stauffer/M
+staunchness/S
+staunch/PDRSYTG
+stave/DGM
+Stavro/MS
+stay/DRGZS
+stayer/M
+std
+STD
+stdio
+steadfastness/MS
+steadfast/PY
+steadily/U
+steadiness's
+steadiness/US
+steading/M
+stead/SGDM
+steady/DRSUTGP
+steakhouse/SM
+steak/SM
+stealer/M
+stealing/M
+steal/SRHG
+stealthily
+stealthiness/MS
+stealth/M
+stealths
+stealthy/PTR
+steamboat/MS
+steamer/MDG
+steamfitter/S
+steamfitting/S
+steamily
+steaminess/SM
+steamroller/DMG
+steamroll/GZRDS
+steam/SGZRDMJ
+steamship/SM
+steamy/RSTP
+Stearne/M
+Stearn/SM
+steed/SM
+Steele/M
+steeliness/SM
+steelmaker/M
+steel/SDMGZ
+steelworker/M
+steelwork/ZSMR
+steelyard/MS
+steely/TPRS
+Steen/M
+steepen/GD
+steeper/M
+steeplebush/M
+steeplechase/GMSD
+steeplejack/MS
+steeple/MS
+steepness/S
+steep/SYRNDPGTX
+steerage/MS
+steerer/M
+steer/SGBRDJ
+steersman/M
+steersmen
+steeves
+Stefa/M
+Stefania/M
+Stefanie/M
+Stefan/M
+Stefano/M
+Steffane/M
+Steffen/M
+Steffie/M
+Steffi/M
+stegosauri
+stegosaurus/S
+Steinbeck/SM
+Steinberg/M
+Steinem/M
+Steiner/M
+Steinmetz/M
+Stein/RM
+stein/SGZMRD
+Steinway/M
+Stella/M
+stellar
+stellated
+Ste/M
+stemless
+stemmed/U
+stemming
+stem/MS
+stemware/MS
+stench/GMDS
+stenciler/M
+stencil/GDRMSZ
+stencillings
+Stendhal/M
+Stendler/M
+Stengel/M
+stenographer/SM
+stenographic
+stenography/SM
+steno/SM
+stenotype/M
+stentorian
+stepbrother/MS
+stepchild/M
+stepchildren
+stepdaughter/MS
+stepfather/SM
+Stepha/M
+Stephana/M
+Stephanie/M
+Stephani/M
+Stephan/M
+Stephannie/M
+Stephanus/M
+Stephenie/M
+Stephen/MS
+Stephenson/M
+Stephie/M
+Stephi/M
+Stephine/M
+stepladder/SM
+step/MIS
+stepmother/SM
+stepparent/SM
+stepper/M
+steppe/RSDGMZ
+steppingstone/S
+stepsister/SM
+stepson/SM
+stepwise
+stereographic
+stereography/M
+stereo/GSDM
+stereophonic
+stereoscope/MS
+stereoscopic
+stereoscopically
+stereoscopy/M
+stereotype/GMZDRS
+stereotypic
+stereotypical/Y
+sterile
+sterility/SM
+sterilization/SM
+sterilized/U
+sterilize/RSDGZ
+sterilizes/A
+Sterling/M
+sterling/MPYS
+sterlingness/M
+sternal
+Sternberg/M
+Sterne/M
+Stern/M
+sternness/S
+Sterno
+stern/SYRDPGT
+sternum/SM
+steroidal
+steroid/MS
+stertorous
+Stesha/M
+stethoscope/SM
+stet/MS
+stetson/MS
+Stetson/SM
+stetted
+stetting
+Steuben/M
+Stevana/M
+stevedore/GMSD
+Steve/M
+Stevena/M
+Steven/MS
+Stevenson/M
+Stevie/M
+Stevy/M
+steward/DMSG
+stewardess/SM
+Steward/M
+stewardship/MS
+Stewart/M
+stew/GDMS
+st/GBJ
+sticker/M
+stickily
+stickiness/SM
+stickleback/MS
+stickle/GZDR
+stickler/M
+stick/MRDSGZ
+stickpin/SM
+stickup/SM
+sticky/GPTDRS
+Stieglitz/M
+stiffen/JZRDG
+stiff/GTXPSYRND
+stiffness/MS
+stifle/GJRSD
+stifler/M
+stifling/Y
+stigma/MS
+stigmata
+stigmatic/S
+stigmatization/C
+stigmatizations
+stigmatization's
+stigmatize/DSG
+stigmatized/U
+stile/GMDS
+stiletto/MDSG
+stillbirth/M
+stillbirths
+stillborn/S
+stiller/MI
+stillest
+Stillman/M
+Stillmann/M
+stillness/MS
+still/RDIGS
+Stillwell/M
+stilted/PY
+stilt/GDMS
+Stilton/MS
+Stimson/M
+stimulant/MS
+stimulated/U
+stimulate/SDVGNX
+stimulation/M
+stimulative/S
+stimulator/M
+stimulatory
+stimuli/M
+stimulus/MS
+Stine/M
+stinger/M
+sting/GZR
+stingily
+stinginess/MS
+stinging/Y
+stingray/MS
+stingy/RTP
+stinkbug/S
+stinker/M
+stink/GZRJS
+stinking/Y
+stinkpot/M
+Stinky/M
+stinky/RT
+stinter/M
+stinting/U
+stint/JGRDMS
+stipendiary
+stipend/MS
+stipple/JDRSG
+stippler/M
+stipulate/XNGSD
+stipulation/M
+Stirling/M
+stirred/U
+stirrer/SM
+stirring/YS
+stirrup/SM
+stir/S
+stitch/ASDG
+stitcher/M
+stitchery/S
+stitching/MS
+stitch's
+St/M
+stoat/SM
+stochastic
+stochastically
+stochasticity
+stockade/SDMG
+stockbreeder/SM
+stockbroker/MS
+stockbroking/S
+stocker/SM
+Stockhausen/M
+stockholder/SM
+Stockholm/M
+stockily
+stockiness/SM
+stockinet's
+stockinette/S
+stocking/MDS
+stockist/MS
+stockpile/GRSD
+stockpiler/M
+stockpot/MS
+stockroom/MS
+stock's
+stock/SGAD
+stocktaking/MS
+Stockton/M
+stockyard/SM
+stocky/PRT
+Stoddard/M
+stodge/M
+stodgily
+stodginess/S
+stodgy/TRP
+stogy/SM
+stoical/Y
+stoichiometric
+stoichiometry/M
+stoicism/SM
+Stoicism/SM
+stoic/MS
+Stoic/MS
+stoke/DSRGZ
+stoker/M
+stokes/M
+Stokes/M
+STOL
+stole/MDS
+stolen
+stolidity/S
+stolidness/S
+stolid/PTYR
+stolon/SM
+stomachache/MS
+stomacher/M
+stomach/RSDMZG
+stomachs
+stomp/DSG
+stonecutter/SM
+stone/DSRGM
+Stonehenge/M
+stoneless
+Stone/M
+stonemason/MS
+stoner/M
+stonewall/GDS
+stoneware/MS
+stonewashed
+stonework/SM
+stonewort/M
+stonily
+stoniness/MS
+stony/TPR
+stood
+stooge/SDGM
+stool/SDMG
+stoop/SDG
+stopcock/MS
+stopgap/SM
+stoplight/SM
+stopover/MS
+stoppable/U
+stoppage/MS
+Stoppard/M
+stopped/U
+stopper/GMDS
+stopping/M
+stopple/GDSM
+stop's
+stops/M
+stop/US
+stopwatch/SM
+storage/SM
+store/ADSRG
+storefront/SM
+storehouse/MS
+storekeeper/M
+storekeep/ZR
+storeroom/SM
+store's
+stork/SM
+stormbound
+stormer/M
+Stormie/M
+stormily
+Stormi/M
+storminess/S
+Storm/M
+storm/SRDMGZ
+stormtroopers
+Stormy/M
+stormy/PTR
+storyboard/MDSG
+storybook/MS
+story/GSDM
+storyline
+storyteller/SM
+storytelling/MS
+Stouffer/M
+stoup/SM
+stouten/DG
+stouthearted
+Stout/M
+stoutness/MS
+stout/STYRNP
+stove/DSRGM
+stovepipe/SM
+stover/M
+stowage/SM
+stowaway/MS
+Stowe/M
+stow/GDS
+Strabo/M
+straddler/M
+straddle/ZDRSG
+Stradivari/SM
+Stradivarius/M
+strafe/GRSD
+strafer/M
+straggle/GDRSZ
+straggly/RT
+straightaway/S
+straightedge/MS
+straightener/M
+straighten/ZGDR
+straightforwardness/MS
+straightforward/SYP
+straightjacket's
+straightness/MS
+straight/RNDYSTXGP
+straightway/S
+strain/ASGZDR
+strained/UF
+strainer/MA
+straining/F
+strains/F
+straiten/DG
+straitjacket/GDMS
+straitlaced
+straitness/M
+strait/XTPSMGYDNR
+stranded/P
+strand/SDRG
+strangeness/SM
+strange/PYZTR
+stranger/GMD
+stranglehold/MS
+strangle/JDRSZG
+strangles/M
+strangulate/NGSDX
+strangulation/M
+strapless/S
+strapped/U
+strapping/S
+strap's
+strap/US
+Strasbourg/M
+stratagem/SM
+strata/MS
+strategical/Y
+strategic/S
+strategics/M
+strategist/SM
+strategy/SM
+Stratford/M
+strati
+stratification/M
+stratified/U
+stratify/NSDGX
+stratigraphic
+stratigraphical
+stratigraphy/M
+stratosphere/SM
+stratospheric
+stratospherically
+stratum/M
+stratus/M
+Strauss
+Stravinsky/M
+strawberry/SM
+strawflower/SM
+straw/SMDG
+strayer/M
+stray/GSRDM
+streak/DRMSGZ
+streaker/M
+streaky/TR
+streamed/U
+streamer/M
+stream/GZSMDR
+streaming/M
+streamline/SRDGM
+streetcar/MS
+streetlight/SM
+street/SMZ
+streetwalker/MS
+streetwise
+Streisand/M
+strengthen/AGDS
+strengthener/MS
+strength/NMX
+strengths
+strenuousness/SM
+strenuous/PY
+strep/MS
+streptococcal
+streptococci
+streptococcus/M
+streptomycin/SM
+stress/DSMG
+stressed/U
+stressful/YP
+stretchability/M
+stretchable/U
+stretch/BDRSZG
+stretcher/DMG
+stretchy/TRP
+strew/GDHS
+strewn
+striae
+stria/M
+striate/DSXGN
+striated/U
+striation/M
+stricken
+Strickland/M
+strict/AF
+stricter
+strictest
+strictly
+strictness/S
+stricture/SM
+stridden
+stridency/S
+strident/Y
+strider/M
+stride/RSGM
+strife/SM
+strikebreaker/M
+strikebreaking/M
+strikebreak/ZGR
+strikeout/S
+striker/M
+strike/RSGZJ
+striking/Y
+Strindberg/M
+stringed
+stringency/S
+stringent/Y
+stringer/MS
+stringiness/SM
+stringing/M
+string's
+string/SAG
+stringy/RTP
+striper/M
+stripe/SM
+strip/GRDMS
+stripling/M
+stripped/U
+stripper/MS
+stripping
+stripteaser/M
+striptease/SRDGZM
+stripy/RT
+strive/JRSG
+striven
+striver/M
+strobe/SDGM
+stroboscope/SM
+stroboscopic
+strode
+stroke/ZRSDGM
+stroking/M
+stroller/M
+stroll/GZSDR
+Stromberg/M
+Stromboli/M
+Strom/M
+strongbow
+strongbox/MS
+Strongheart/M
+stronghold/SM
+strongish
+Strong/M
+strongman/M
+strongmen
+strongroom/MS
+strong/YRT
+strontium/SM
+strophe/MS
+strophic
+stropped
+stropping
+strop/SM
+strove
+struck
+structuralism/M
+structuralist/SM
+structural/Y
+structured/AU
+structureless
+structures/A
+structure/SRDMG
+structuring/A
+strudel/MS
+struggle/GDRS
+struggler/M
+strummed
+strumming
+strumpet/GSDM
+strum/S
+strung/UA
+strut/S
+strutted
+strutter/M
+strutting
+strychnine/MS
+Stuart/MS
+stubbed/M
+stubbing
+Stubblefield/MS
+stubble/SM
+stubbly/RT
+stubbornness/SM
+stubborn/SGTYRDP
+stubby/SRT
+stub/MS
+stuccoes
+stucco/GDM
+stuck/U
+studbook/SM
+studded
+studding/SM
+Studebaker/M
+studentship/MS
+student/SM
+studiedness/M
+studied/PY
+studier/SM
+studio/MS
+studiousness/SM
+studious/PY
+stud/MS
+study/AGDS
+stuffily
+stuffiness/SM
+stuffing/M
+stuff/JGSRD
+stuffy/TRP
+stultify/NXGSD
+Stu/M
+stumble/GZDSR
+stumbling/Y
+stumpage/M
+stumper/M
+stump/RDMSG
+stumpy/RT
+stung
+stunk
+stunned
+stunner/M
+stunning/Y
+stun/S
+stunted/P
+stunt/GSDM
+stupefaction/SM
+stupefy/DSG
+stupendousness/M
+stupendous/PY
+stupidity/SM
+stupidness/M
+stupid/PTYRS
+stupor/MS
+sturdily
+sturdiness/SM
+sturdy/SRPT
+sturgeon/SM
+Sturm/M
+stutter/DRSZG
+Stuttgart/M
+Stuyvesant/M
+sty/DSGM
+Stygian
+styled/A
+style/GZMDSR
+styles/A
+styli
+styling/A
+stylishness/S
+stylish/PY
+stylistically
+stylistic/S
+stylist/MS
+stylites
+stylization/MS
+stylize/DSG
+stylos
+stylus/SM
+stymieing
+stymie/SD
+stymy's
+styptic/S
+styrene/MS
+Styrofoam/S
+Styx/M
+suable
+Suarez/M
+suasion/EMS
+suaveness/S
+suave/PRYT
+suavity/SM
+subaltern/SM
+subarctic/S
+subareas
+Subaru/M
+subassembly/M
+subatomic/S
+subbasement/SM
+subbed
+subbing
+subbranch/S
+subcaste/M
+subcategorizing
+subcategory/SM
+subchain
+subclassifications
+subclass/MS
+subclauses
+subcommand/S
+subcommittee/SM
+subcompact/S
+subcomponent/MS
+subcomputation/MS
+subconcept
+subconsciousness/SM
+subconscious/PSY
+subconstituent
+subcontinental
+subcontinent/MS
+subcontractor/SM
+subcontract/SMDG
+subcultural
+subculture/GMDS
+subcutaneous/Y
+subdirectory/S
+subdistrict/M
+subdivide/SRDG
+subdivision/SM
+subdued/Y
+subdue/GRSD
+subduer/M
+subexpression/MS
+subfamily/SM
+subfield/MS
+subfile/SM
+subfreezing
+subgoal/SM
+subgraph
+subgraphs
+subgroup/SGM
+subharmonic/S
+subheading/M
+subhead/MGJS
+subhuman/S
+subindex/M
+subinterval/MS
+subj
+subject/GVDMS
+subjection/SM
+subjectiveness/M
+subjective/PSY
+subjectivist/S
+subjectivity/SM
+subjoin/DSG
+subjugate/NGXSD
+subjugation/M
+subjunctive/S
+sublayer
+sublease/DSMG
+sublet/S
+subletting
+sublimate/GNSDX
+sublimation/M
+sublime/GRSDTYP
+sublimeness/M
+sublimer/M
+subliminal/Y
+sublimity/SM
+sublist/SM
+subliterary
+sublunary
+submachine
+submarginal
+submarine/MZGSRD
+submariner/M
+submerge/DSG
+submergence/SM
+submerse/XNGDS
+submersible/S
+submersion/M
+submicroscopic
+submission/SAM
+submissiveness/MS
+submissive/PY
+submit/SA
+submittable
+submittal
+submitted/A
+submitter/S
+submitting/A
+submode/S
+submodule/MS
+sub/MS
+subnational
+subnet/SM
+subnetwork/SM
+subnormal/SY
+suboptimal
+suborbital
+suborder/MS
+subordinately/I
+subordinates/I
+subordinate/YVNGXPSD
+subordination/IMS
+subordinator
+subornation/SM
+suborn/GSD
+subpage
+subparagraph/M
+subpart/MS
+subplot/MS
+subpoena/GSDM
+subpopulation/MS
+subproblem/SM
+subprocess/SM
+subprofessional/S
+subprogram/SM
+subproject
+subproof/SM
+subquestion/MS
+subrange/SM
+subregional/Y
+subregion/MS
+subrogation/M
+subroutine/SM
+subsample/MS
+subschema/MS
+subscribe/ASDG
+subscriber/SM
+subscripted/U
+subscription/MS
+subscript/SGD
+subsection/SM
+subsegment/SM
+subsentence
+subsequence/MS
+subsequent/SYP
+subservience/SM
+subservient/SY
+subset/MS
+subsidence/MS
+subside/SDG
+subsidiarity
+subsidiary/MS
+subsidization/MS
+subsidized/U
+subsidizer/M
+subsidize/ZRSDG
+subsidy/MS
+subsistence/MS
+subsistent
+subsist/SGD
+subsocietal
+subsoil/DRMSG
+subsonic
+subspace/MS
+subspecies/M
+substance/MS
+substandard
+substantially/IU
+substantialness/M
+substantial/PYS
+substantiated/U
+substantiate/VGNSDX
+substantiation/MFS
+substantiveness/M
+substantive/PSYM
+substantivity
+substation/MS
+substerilization
+substitutability
+substituted/U
+substitute/NGVBXDRS
+substitutionary
+substitution/M
+substitutive/Y
+substrata
+substrate/MS
+substratum/M
+substring/S
+substructure/SM
+subsume/SDG
+subsurface/S
+subsystem/MS
+subtable/S
+subtask/SM
+subteen/SM
+subtenancy/MS
+subtenant/SM
+subtend/DS
+subterfuge/SM
+subterranean/SY
+subtest
+subtext/SM
+subtitle/DSMG
+subtleness/M
+subtle/RPT
+subtlety/MS
+subtly/U
+subtopic/SM
+subtotal/GSDM
+subtracter/M
+subtraction/MS
+subtract/SRDZVG
+subtrahend/SM
+subtree/SM
+subtropical
+subtropic/S
+subtype/MS
+subunit/SM
+suburbanite/MS
+suburbanization/MS
+suburbanized
+suburbanizing
+suburban/S
+suburbia/SM
+suburb/MS
+subvention/MS
+subversion/SM
+subversiveness/MS
+subversive/SPY
+subverter/M
+subvert/SGDR
+subway/MDGS
+subzero
+succeeder/M
+succeed/GDRS
+successfulness/M
+successful/UY
+succession/SM
+successiveness/M
+successive/YP
+success/MSV
+successor/MS
+successorship
+succinctness/SM
+succinct/RYPT
+succored/U
+succorer/M
+succor/SGZRDM
+succotash/SM
+succubus/M
+succulence/SM
+succulency/MS
+succulent/S
+succumb/SDG
+such
+suchlike
+sucker/DMG
+suck/GZSDRB
+suckle/SDJG
+suckling/M
+Sucre/M
+sucrose/MS
+suction/SMGD
+Sudanese/M
+Sudanic/M
+Sudan/M
+suddenness/SM
+sudden/YPS
+Sudetenland/M
+sud/S
+suds/DSRG
+sudsy/TR
+sued/DG
+suede/SM
+Suellen/M
+Sue/M
+suer/M
+suet/MS
+Suetonius/M
+suety
+sue/ZGDRS
+Suez/M
+sufferance/SM
+sufferer/M
+suffering/M
+suffer/SJRDGZ
+suffice/GRSD
+sufficiency/SIM
+sufficient/IY
+suffixation/S
+suffixed/U
+suffix/GMRSD
+suffocate/XSDVGN
+suffocating/Y
+Suffolk/M
+suffragan/S
+suffrage/MS
+suffragette/MS
+suffragist/SM
+suffuse/VNGSDX
+suffusion/M
+Sufi/M
+Sufism/M
+sugarcane/S
+sugarcoat/GDS
+sugarless
+sugarplum/MS
+sugar/SJGMD
+sugary/TR
+suggest/DRZGVS
+suggester/M
+suggestibility/SM
+suggestible
+suggestion/MS
+suggestiveness/MS
+suggestive/PY
+sugillate
+Suharto/M
+suicidal/Y
+suicide/GSDM
+Sui/M
+suitability/SU
+suitableness/S
+suitable/P
+suitably/U
+suitcase/MS
+suited/U
+suite/SM
+suiting/M
+suit/MDGZBJS
+suitor/SM
+Sukarno/M
+Sukey/M
+Suki/M
+sukiyaki/SM
+Sukkoth's
+Sukkot/S
+Sula/M
+Sulawesi/M
+Suleiman/M
+sulfaquinoxaline
+sulfa/S
+sulfate/MSDG
+sulfide/S
+sulfite/M
+sulfonamide/SM
+sulfur/DMSG
+sulfuric
+sulfurousness/M
+sulfurous/YP
+sulk/GDS
+sulkily
+sulkiness/S
+sulky/RSPT
+Sulla/M
+sullenness/MS
+sullen/TYRP
+sullied/U
+Sullivan/M
+sully/GSD
+Sully/M
+sulphate/SM
+sulphide/MS
+sulphuric
+sultana/SM
+sultanate/MS
+sultan/SM
+sultrily
+sultriness/SM
+sultry/PRT
+Sulzberger/M
+sumach's
+sumac/SM
+Sumatra/M
+Sumatran/S
+sumer/F
+Sumeria/M
+Sumerian/M
+summability/M
+summable
+summand/MS
+summarily
+summarization/MS
+summarized/U
+summarize/GSRDZ
+summarizer/M
+summary/MS
+summation/FMS
+summed
+Summerdale/M
+summerhouse/MS
+summer/SGDM
+Summer/SM
+summertime/MS
+summery/TR
+summing
+summit/GMDS
+summitry/MS
+summoner/M
+summon/JSRDGZ
+summons/MSDG
+sum/MRS
+Sumner/M
+sumo/SM
+sump/SM
+sumptuousness/SM
+sumptuous/PY
+Sumter/M
+Sun
+sunbaked
+sunbathe
+sunbather/M
+sunbathing/M
+sunbaths
+sunbath/ZRSDG
+sunbeam/MS
+Sunbelt/M
+sunblock/S
+sunbonnet/MS
+sunburn/GSMD
+sunburst/MS
+suncream
+sundae/MS
+Sundanese/M
+Sundas
+Sunday/MS
+sunder/SDG
+sundial/MS
+sundowner/M
+sundown/MRDSZG
+sundris
+sundry/S
+sunfish/SM
+sunflower/MS
+sunglass/MS
+Sung/M
+sung/U
+sunk/SN
+sunlamp/S
+sunless
+sunlight/MS
+sunlit
+sun/MS
+sunned
+Sunni/MS
+sunniness/SM
+sunning
+Sunnite/SM
+Sunny/M
+sunny/RSTP
+Sunnyvale/M
+sunrise/GMS
+sunroof/S
+sunscreen/S
+sunset/MS
+sunsetting
+sunshade/MS
+Sunshine/M
+sunshine/MS
+sunshiny
+sunspot/SM
+sunstroke/MS
+suntanned
+suntanning
+suntan/SM
+sunup/MS
+superabundance/MS
+superabundant
+superannuate/GNXSD
+superannuation/M
+superbness/M
+superb/YRPT
+supercargoes
+supercargo/M
+supercharger/M
+supercharge/SRDZG
+superciliousness/SM
+supercilious/PY
+supercity/S
+superclass/M
+supercomputer/MS
+supercomputing
+superconcept
+superconducting
+superconductivity/SM
+superconductor/SM
+supercooled
+supercooling
+supercritical
+superdense
+super/DG
+superego/SM
+supererogation/MS
+supererogatory
+superficiality/S
+superficial/SPY
+superfine
+superfix/M
+superfluity/MS
+superfluousness/S
+superfluous/YP
+superheat/D
+superheroes
+superhero/SM
+superhighway/MS
+superhumanness/M
+superhuman/YP
+superimpose/SDG
+superimposition/MS
+superintendence/S
+superintendency/SM
+superintendent/SM
+superintend/GSD
+superiority/MS
+Superior/M
+superior/SMY
+superlativeness/M
+superlative/PYS
+superlunary
+supermachine
+superman/M
+Superman/M
+supermarket/SM
+supermen
+supermodel
+supermom/S
+supernal
+supernatant
+supernaturalism/M
+supernaturalness/M
+supernatural/SPY
+supernormal/Y
+supernovae
+supernova/MS
+supernumerary/S
+superordinate
+superpose/BSDG
+superposition/MS
+superpower/MS
+superpredicate
+supersaturate/XNGDS
+supersaturation/M
+superscribe/GSD
+superscript/DGS
+superscription/SM
+superseder/M
+supersede/SRDG
+supersensitiveness/M
+supersensitive/P
+superset/MS
+supersonically
+supersonic/S
+supersonics/M
+superstar/SM
+superstition/SM
+superstitious/YP
+superstore/S
+superstructural
+superstructure/SM
+supertanker/SM
+supertitle/MSDG
+superuser/MS
+supervene/GSD
+supervention/S
+supervised/U
+supervise/SDGNX
+supervision/M
+supervisor/SM
+supervisory
+superwoman/M
+superwomen
+supineness/M
+supine/PSY
+supper/DMG
+supplanter/M
+supplant/SGRD
+supplemental/S
+supplementary/S
+supplementation/S
+supplementer/M
+supplement/SMDRG
+suppleness/SM
+supple/SPLY
+suppliant/S
+supplicant/MS
+supplicate/NGXSD
+supplication/M
+supplier/AM
+suppl/RDGT
+supply/MAZGSRD
+supportability/M
+supportable/UI
+supported/U
+supporter/M
+supporting/Y
+supportive/Y
+support/ZGVSBDR
+supposed/Y
+suppose/SRDBJG
+supposition/MS
+suppository/MS
+suppressant/S
+suppressed/U
+suppressible/I
+suppression/SM
+suppressive/P
+suppressor/S
+suppress/VGSD
+suppurate/NGXSD
+suppuration/M
+supp/YDRGZ
+supra
+supranational
+supranationalism/M
+suprasegmental
+supremacist/SM
+supremacy/SM
+supremal
+supremeness/M
+supreme/PSRTY
+supremo/M
+sup/RSZ
+supt
+Supt/M
+Surabaya/M
+Surat/M
+surcease/DSMG
+surcharge/MGSD
+surcingle/MGSD
+surd/M
+sured/I
+surefire
+surefooted
+surely
+sureness/MS
+sureness's/U
+sure/PU
+surer/I
+surest
+surety/SM
+surfaced/UA
+surface/GSRDPZM
+surfacer/AMS
+surfaces/A
+surfacing/A
+surfactant/SM
+surfboard/MDSG
+surfeit/SDRMG
+surfer/M
+surfing/M
+surf/SJDRGMZ
+surged/A
+surge/GYMDS
+surgeon/MS
+surgery/MS
+surges/A
+surgical/Y
+Suriname
+Surinamese
+Surinam's
+surliness/SM
+surly/TPR
+surmiser/M
+surmise/SRDG
+surmountable/IU
+surmount/DBSG
+surname/GSDM
+surpassed/U
+surpass/GDS
+surpassing/Y
+surplice/SM
+surplus/MS
+surplussed
+surplussing
+surprised/U
+surprise/MGDRSJ
+surpriser/M
+surprising/YU
+surrealism/MS
+surrealistic
+surrealistically
+surrealist/S
+surreality
+surreal/S
+surrender/DRSG
+surrenderer/M
+surreptitiousness/S
+surreptitious/PY
+surrey/SM
+surrogacy/S
+surrogate/SDMNG
+surrogation/M
+surrounding/M
+surround/JGSD
+surtax/SDGM
+surveillance/SM
+surveillant
+surveyed/A
+surveying/M
+survey/JDSG
+surveyor/MS
+surveys/A
+survivability/M
+survivable/U
+survivalist/S
+survival/MS
+survive/SRDBG
+survivor/MS
+survivorship/M
+Surya/M
+Sus
+Susana/M
+Susanetta/M
+Susan/M
+Susannah/M
+Susanna/M
+Susanne/M
+Susann/M
+susceptibilities
+susceptibility/IM
+susceptible/I
+Susette/M
+sushi/SM
+Susie/M
+Susi/M
+suspected/U
+suspecter/M
+suspect/GSDR
+suspecting/U
+suspend/DRZGS
+suspended/UA
+suspender/M
+suspenseful
+suspense/MXNVS
+suspension/AM
+suspensive/Y
+suspensor/M
+suspicion/GSMD
+suspiciousness/M
+suspicious/YP
+Susquehanna/M
+Sussex/M
+sustainability
+sustainable/U
+sustain/DRGLBS
+sustainer/M
+sustainment/M
+sustenance/MS
+Susy/M
+Sutherland/M
+Sutherlan/M
+sutler/MS
+Sutton/M
+suture/GMSD
+SUV
+Suva/M
+Suwanee/M
+Suzanna/M
+Suzanne/M
+Suzann/M
+suzerain/SM
+suzerainty/MS
+Suzette/M
+Suzhou/M
+Suzie/M
+Suzi/M
+Suzuki/M
+Suzy/M
+Svalbard/M
+svelte/RPTY
+Svend/M
+Svengali
+Sven/M
+Sverdlovsk/M
+Svetlana/M
+SW
+swabbed
+swabbing
+swabby/S
+Swabian/SM
+swab/MS
+swaddle/SDG
+swagged
+swagger/GSDR
+swagging
+swag/GMS
+Swahili/MS
+swain/SM
+SWAK
+swallower/M
+swallow/GDRS
+swallowtail/SM
+swam
+swami/SM
+swamper/M
+swampland/MS
+swamp/SRDMG
+swampy/RPT
+Swanee/M
+swankily
+swankiness/MS
+swank/RDSGT
+swanky/PTRS
+swanlike
+swan/MS
+swanned
+swanning
+Swansea/M
+Swanson/M
+swappable/U
+swapped
+swapper/SM
+swapping
+swap/S
+sward/MSGD
+swarmer/M
+swarm/GSRDM
+swarthiness/M
+Swarthmore/M
+swarthy/RTP
+swart/P
+Swartz/M
+swashbuckler/SM
+swashbuckling/S
+swash/GSRD
+swastika/SM
+SWAT
+swatch/MS
+swathe
+swather/M
+swaths
+swath/SRDMGJ
+swat/S
+swatted
+swatter/MDSG
+swatting
+swayback/SD
+sway/DRGS
+swayer/M
+Swaziland/M
+Swazi/SM
+swearer/M
+swear/SGZR
+swearword/SM
+sweatband/MS
+sweater/M
+sweatily
+sweatiness/M
+sweatpants
+sweat/SGZRM
+sweatshirt/S
+sweatshop/MS
+sweaty/TRP
+Swedenborg/M
+Sweden/M
+swede/SM
+Swede/SM
+Swedish
+Swed/MN
+Sweeney/SM
+sweeper/M
+sweepingness/M
+sweeping/PY
+sweep/SBRJGZ
+sweeps/M
+sweepstakes
+sweepstake's
+sweetbread/SM
+sweetbrier/SM
+sweetcorn
+sweetened/U
+sweetener/M
+sweetening/M
+sweeten/ZDRGJ
+sweetheart/MS
+sweetie/MS
+sweeting/M
+sweetish/Y
+Sweet/M
+sweetmeat/MS
+sweetness/MS
+sweetshop
+sweet/TXSYRNPG
+swellhead/DS
+swelling/M
+swell/SJRDGT
+swelter/DJGS
+sweltering/Y
+Swen/M
+Swenson/M
+swept
+sweptback
+swerve/GSD
+swerving/U
+swifter/M
+swift/GTYRDPS
+Swift/M
+swiftness/MS
+swigged
+swigging
+swig/SM
+swill/SDG
+swimmer/MS
+swimming/MYS
+swim/S
+swimsuit/MS
+Swinburne/M
+swindle/GZRSD
+swindler/M
+swineherd/MS
+swine/SM
+swingeing
+swinger/M
+swinging/Y
+swing/SGRZJB
+swingy/R
+swinishness/M
+swinish/PY
+Swink/M
+swipe/DSG
+swirling/Y
+swirl/SGRD
+swirly/TR
+swish/GSRD
+swishy/R
+swiss
+Swiss/S
+switchback/GDMS
+switchblade/SM
+switchboard/MS
+switcher/M
+switch/GBZMRSDJ
+switchgear
+switchman/M
+switchmen/M
+switchover/M
+Switzerland/M
+Switzer/M
+Switz/MR
+swivel/GMDS
+swizzle/RDGM
+swob's
+swollen
+swoon/GSRD
+swooning/Y
+swoop/RDSG
+swoosh/GSD
+swop's
+sword/DMSG
+swordfish/SM
+swordplayer/M
+swordplay/RMS
+swordsman/M
+swordsmanship/SM
+swordsmen
+swordtail/M
+swore
+sworn
+swot/S
+swum
+swung
+s/XJBG
+sybarite/MS
+sybaritic
+Sybila/M
+Sybilla/M
+Sybille/M
+Sybil/M
+Sybyl/M
+sycamore/SM
+sycophancy/S
+sycophantic
+sycophantically
+sycophant/SYM
+Sydelle/M
+Sydel/M
+Syd/M
+Sydney/M
+Sykes/M
+Sylas/M
+syllabicate/GNDSX
+syllabication/M
+syllabicity
+syllabic/S
+syllabification/M
+syllabify/GSDXN
+syllabi's
+syllable/SDMG
+syllabub/M
+syllabus/MS
+syllabusss
+syllogism/MS
+syllogistic
+Sylow/M
+sylphic
+sylphlike
+sylph/M
+sylphs
+Sylvania/M
+Sylvan/M
+sylvan/S
+Sylvester/M
+Sylvia/M
+Sylvie/M
+Syman/M
+symbiont/M
+symbioses
+symbiosis/M
+symbiotic
+symbol/GMDS
+symbolical/Y
+symbolics/M
+symbolic/SM
+symbolism/MS
+symbolist/MS
+symbolization/MAS
+symbolized/U
+symbolize/GZRSD
+symbolizes/A
+Symington/M
+symmetric
+symmetrically/U
+symmetricalness/M
+symmetrical/PY
+symmetrization/M
+symmetrizing
+symmetry/MS
+Symon/M
+sympathetically/U
+sympathetic/S
+sympathized/U
+sympathizer/M
+sympathize/SRDJGZ
+sympathizing/MYUS
+sympathy/MS
+symphonic
+symphonists
+symphony/MS
+symposium/MS
+symptomatic
+symptomatically
+symptomatology/M
+symptom/MS
+syn
+synagogal
+synagogue/SM
+synapse/SDGM
+synaptic
+synchronism/M
+synchronization's
+synchronization/SA
+synchronize/AGCDS
+synchronized/U
+synchronizer/MS
+synchronousness/M
+synchronous/YP
+synchrony
+synchrotron/M
+syncopate/VNGXSD
+syncopation/M
+syncope/MS
+sync/SGD
+syndicalist
+syndicate/XSDGNM
+syndic/SM
+syndrome/SM
+synergism/SM
+synergistic
+synergy/MS
+synfuel/S
+Synge/M
+synod/SM
+synonymic
+synonymous/Y
+synonym/SM
+synonymy/MS
+synopses
+synopsis/M
+synopsized
+synopsizes
+synopsizing
+synoptic/S
+syntactical/Y
+syntactics/M
+syntactic/SY
+syntax/MS
+syntheses
+synthesis/M
+synthesized/U
+synthesize/GZSRD
+synthesizer/M
+synthesizes/A
+synthetically
+synthetic/S
+syphilis/MS
+syphilitic/S
+syphilized
+syphilizing
+Syracuse/M
+Syriac/M
+Syria/M
+Syrian/SM
+syringe/GMSD
+syrup/DMSG
+syrupy
+sys
+systematical/Y
+systematics/M
+systematic/SP
+systematization/SM
+systematized/U
+systematizer/M
+systematize/ZDRSG
+systematizing/U
+systemically
+systemic/S
+systemization/SM
+system/MS
+systole/MS
+systolic
+Szilard/M
+Szymborska/M
+TA
+Tabasco/MS
+Tabatha/M
+Tabbatha/M
+tabbed
+Tabbie/M
+Tabbi/M
+tabbing
+Tabbitha/M
+Tabb/M
+tabbouleh
+tabboulehs
+tabby/GSD
+Tabby/M
+Taber/M
+Tabernacle/S
+tabernacle/SDGM
+Tabina/M
+Tabitha/M
+tabla/MS
+tableau/M
+tableaux
+tablecloth/M
+tablecloths
+table/GMSD
+tableland/SM
+tablespoonful/MS
+tablespoon/SM
+tablet/MDGS
+tabletop/MS
+tableware/SM
+tabling/M
+tabloid/MS
+Tab/MR
+taboo/GSMD
+Tabor/M
+tabor/MDGS
+Tabriz/SM
+tab/SM
+tabula
+tabular/Y
+tabulate/XNGDS
+tabulation/M
+tabulator/MS
+tachometer/SM
+tachometry
+tachycardia/MS
+tachyon/SM
+tacitness/MS
+taciturnity/MS
+taciturn/Y
+Tacitus/M
+tacit/YP
+tacker/M
+tack/GZRDMS
+tackiness/MS
+tackler/M
+tackle/RSDMZG
+tackling/M
+tacky/RSTP
+Tacoma/M
+taco/MS
+tact/FSM
+tactfulness/S
+tactful/YP
+tactical/Y
+tactician/MS
+tactic/SM
+tactile/Y
+tactility/S
+tactlessness/SM
+tactless/PY
+tactual/Y
+Taddeo/M
+Taddeusz/M
+Tadd/M
+Tadeas/M
+Tadeo/M
+Tades
+Tadio/M
+Tad/M
+tadpole/MS
+tad/SM
+Tadzhikistan's
+Tadzhikstan/M
+Taegu/M
+Taejon/M
+taffeta/MS
+taffrail/SM
+Taffy/M
+taffy/SM
+Taft/M
+Tagalog/SM
+tagged/U
+tagger/S
+tagging
+Tagore/M
+tag/SM
+Tagus/M
+Tahitian/S
+Tahiti/M
+Tahoe/M
+Taichung/M
+taiga/MS
+tailback/MS
+tail/CMRDGAS
+tailcoat/S
+tailer/AM
+tailgate/MGRSD
+tailgater/M
+tailing/MS
+taillessness/M
+tailless/P
+taillight/MS
+tailor/DMJSGB
+Tailor/M
+tailpipe/SM
+tailspin/MS
+tailwind/SM
+Tainan/M
+Taine/M
+taint/DGS
+tainted/U
+Taipei/M
+Taite/M
+Tait/M
+Taiwanese
+Taiwan/M
+Taiyuan/M
+Tajikistan
+takeaway/S
+taken/A
+takeoff/SM
+takeout/S
+takeover/SM
+taker/M
+take/RSHZGJ
+takes/IA
+taking/IA
+Taklamakan/M
+Talbert/M
+Talbot/M
+talcked
+talcking
+talc/SM
+talcum/S
+talebearer/SM
+talented/M
+talentless
+talent/SMD
+taler/M
+tale/RSMN
+tali
+Talia/M
+Taliesin/M
+talion/M
+talismanic
+talisman/SM
+talkativeness/MS
+talkative/YP
+talker/M
+talk/GZSRD
+talkie/M
+talky/RST
+Talladega/M
+Tallahassee/M
+Tallahatchie/M
+Tallahoosa/M
+tallboy/MS
+Tallchief/M
+Talley/M
+Talleyrand/M
+Tallia/M
+Tallie/M
+Tallinn/M
+tallish
+tallness/MS
+Tallou/M
+tallow/DMSG
+tallowy
+tall/TPR
+Tallulah/M
+tally/GRSDZ
+tallyho/DMSG
+Tally/M
+Talmudic
+Talmudist/MS
+Talmud/MS
+talon/SMD
+talus/MS
+Talyah/M
+Talya/M
+Ta/M
+tamable/M
+tamale/SM
+tamarack/SM
+Tamarah/M
+Tamara/M
+tamarind/MS
+Tamar/M
+Tamarra/M
+Tamas
+tambourine/MS
+tamed/U
+Tameka/M
+tameness/S
+Tamera/M
+Tamerlane/M
+tame/SYP
+Tamika/M
+Tamiko/M
+Tamil/MS
+Tami/M
+Tam/M
+Tamma/M
+Tammany/M
+Tammara/M
+tam/MDRSTZGB
+Tammie/M
+Tammi/M
+Tammy/M
+Tampa/M
+Tampax/M
+tampered/U
+tamperer/M
+tamper/ZGRD
+tampon/DMSG
+tamp/SGZRD
+Tamqrah/M
+Tamra/M
+tanager/MS
+Tanaka/M
+Tana/M
+Tananarive/M
+tanbark/SM
+Tancred/M
+tandem/SM
+Tandie/M
+Tandi/M
+tandoori/S
+Tandy/M
+Taney/M
+T'ang
+Tanganyika/M
+tangelo/SM
+tangency/M
+tangential/Y
+tangent/SM
+tangerine/MS
+tang/GSYDM
+tangibility/MIS
+tangible/IPS
+tangibleness's/I
+tangibleness/SM
+tangibly/I
+Tangier/M
+tangle's
+tangle/UDSG
+tango/MDSG
+Tangshan/M
+tangy/RST
+Tanhya/M
+Tania/M
+Tani/M
+Tanisha/M
+Tanitansy/M
+tankard/MS
+tanker/M
+tankful/MS
+tank/GZSRDM
+Tan/M
+tan/MS
+tanned/U
+Tannenbaum/M
+Tanner/M
+tanner/SM
+tannery/MS
+tannest
+Tanney/M
+Tannhäuser/M
+Tannie/M
+tanning/SM
+tannin/SM
+Tann/RM
+Tanny/M
+Tansy/M
+tansy/SM
+tantalization/SM
+tantalized/U
+tantalize/GZSRD
+tantalizingly/S
+tantalizingness/S
+tantalizing/YP
+tantalum/MS
+Tantalus/M
+tantamount
+tantra/S
+tantrum/SM
+Tanya/M
+Tanzania/M
+Tanzanian/S
+taoism
+Taoism/MS
+Taoist/MS
+taoist/S
+Tao/M
+tao/S
+Tapdance/M
+taped/U
+tapeline/S
+taperer/M
+taper/GRD
+tape/SM
+tapestry/GMSD
+tapeworm/MS
+tapioca/MS
+tapir/MS
+tap/MSDRJZG
+tapped/U
+tapper/MS
+tappet/MS
+tapping/M
+taproom/MS
+taproot/SM
+taps/M
+Tarah/M
+Tara/M
+tarantella/MS
+tarantula/MS
+Tarawa/M
+Tarazed/M
+Tarbell/M
+tardily
+tardiness/S
+tardy/TPRS
+tare/MS
+target/GSMD
+tar/GSMD
+tariff/DMSG
+Tarim/M
+Tarkington/M
+tarmacked
+tarmacking
+tarmac/S
+tarnished/U
+tarnish/GDS
+tarn/MS
+taro/MS
+tarot/MS
+tarpapered
+tarpaulin/MS
+tarp/MS
+tarpon/MS
+tarragon/SM
+Tarrah/M
+Tarra/M
+Tarrance/M
+tarred/M
+tarring/M
+tarry/TGRSD
+Tarrytown/M
+tarsal/S
+tarsi
+tarsus/M
+tartan/MS
+tartaric
+Tartar's
+tartar/SM
+Tartary/M
+tartness/MS
+tart/PMYRDGTS
+Tartuffe/M
+Taryn/M
+Tarzan/M
+Tasha/M
+Tashkent/M
+Tasia/M
+task/GSDM
+taskmaster/SM
+taskmistress/MS
+Tasmania/M
+Tasmanian/S
+tassellings
+tassel/MDGS
+Tass/M
+tasted/EU
+tastefulness/SME
+tasteful/PEY
+taste/GZMJSRD
+tastelessness/SM
+tasteless/YP
+taster/M
+taste's/E
+tastes/E
+tastily
+tastiness/MS
+tasting/E
+tasty/RTP
+tatami/MS
+Tatar/SM
+Tate/M
+tater/M
+Tatiana/M
+Tatiania/M
+tat/SRZ
+tatted
+tatterdemalion/SM
+tattered/M
+tatter/GDS
+tatting/SM
+tattler/M
+tattle/RSDZG
+tattletale/SM
+tattooer/M
+tattooist/MS
+tattoo/ZRDMGS
+tatty/R
+Tatum/M
+taught/AU
+taunter/M
+taunting/Y
+taunt/ZGRDS
+taupe/SM
+Taurus/SM
+tau/SM
+tauten/GD
+tautness/S
+tautological/Y
+tautologous
+tautology/SM
+taut/PGTXYRDNS
+taverner/M
+tavern/RMS
+tawdrily
+tawdriness/SM
+tawdry/SRTP
+Tawney/M
+Tawnya/M
+tawny/RSMPT
+Tawsha/M
+taxable/S
+taxably
+taxation/MS
+taxed/U
+taxicab/MS
+taxidermist/SM
+taxidermy/MS
+taxi/MDGS
+taximeter/SM
+taxing/Y
+taxiway/MS
+taxonomic
+taxonomically
+taxonomist/SM
+taxonomy/SM
+taxpayer/MS
+taxpaying/M
+tax/ZGJMDRSB
+Taylor/SM
+Tb
+TB
+TBA
+Tbilisi/M
+tbs
+tbsp
+Tchaikovsky/M
+Tc/M
+TCP
+TD
+TDD
+Te
+teabag/S
+teacake/MS
+teacart/M
+teachable/P
+teach/AGS
+teacher/MS
+teaching/SM
+teacloth
+teacupful/MS
+teacup/MS
+Teador/M
+teahouse/SM
+teakettle/SM
+teak/SM
+teakwood/M
+tealeaves
+teal/MS
+tea/MDGS
+teammate/MS
+team/MRDGS
+teamster/MS
+teamwork/SM
+teapot/MS
+tearaway
+teardrop/MS
+tearer/M
+tearfulness/M
+tearful/YP
+teargas/S
+teargassed
+teargassing
+tearjerker/S
+tearoom/MS
+tear/RDMSG
+teary/RT
+Teasdale/M
+tease/KS
+teasel/DGSM
+teaser/M
+teashop/SM
+teasing/Y
+teaspoonful/MS
+teaspoon/MS
+teas/SRDGZ
+teatime/MS
+teat/MDS
+tech/D
+technetium/SM
+technicality/MS
+technicalness/M
+technical/YSP
+technician/MS
+Technicolor/MS
+Technion/M
+technique/SM
+technocracy/MS
+technocratic
+technocrat/S
+technological/Y
+technologist/MS
+technology/MS
+technophobia
+technophobic
+techs
+tectonically
+tectonic/S
+tectonics/M
+Tecumseh/M
+Tedda/M
+Teddie/M
+Teddi/M
+Tedd/M
+Teddy/M
+teddy/SM
+Tedie/M
+Tedi/M
+tediousness/SM
+tedious/YP
+tedium/MS
+Ted/M
+Tedman/M
+Tedmund/M
+Tedra/M
+tee/DRSMH
+teeing
+teem/GSD
+teemingness/M
+teeming/PY
+teenager/M
+teenage/RZ
+Teena/M
+teen/SR
+teenybopper/SM
+teeny/RT
+teepee's
+teeshirt/S
+teeter/GDS
+teethe
+teether/M
+teething/M
+teethmarks
+teeth/RSDJMG
+teetotaler/M
+teetotalism/MS
+teetotal/SRDGZ
+TEFL
+Teflon/MS
+Tegucigalpa/M
+Teheran's
+Tehran
+TEirtza/M
+tektite/SM
+Tektronix/M
+telecast/SRGZ
+telecommunicate/NX
+telecommunication/M
+telecommute/SRDZGJ
+telecoms
+teleconference/GMJSD
+Teledyne/M
+Telefunken/M
+telegenic
+telegrammed
+telegramming
+telegram/MS
+telegraphic
+telegraphically
+telegraphist/MS
+telegraph/MRDGZ
+telegraphs
+telegraphy/MS
+telekineses
+telekinesis/M
+telekinetic
+Telemachus/M
+Telemann/M
+telemarketer/S
+telemarketing/S
+telemeter/DMSG
+telemetric
+telemetry/MS
+teleological/Y
+teleology/M
+telepathic
+telepathically
+telepathy/SM
+telephone/SRDGMZ
+telephonic
+telephonist/SM
+telephony/MS
+telephotography/MS
+telephoto/S
+teleprinter/MS
+teleprocessing/S
+teleprompter
+TelePrompter/M
+TelePrompTer/S
+telescope/GSDM
+telescopic
+telescopically
+teletext/S
+telethon/MS
+teletype/SM
+Teletype/SM
+teletypewriter/SM
+televangelism/S
+televangelist/S
+televise/SDXNG
+television/M
+televisor/MS
+televisual
+telex/GSDM
+Telex/M
+tell/AGS
+Teller/M
+teller/SDMG
+telling/YS
+Tell/MR
+telltale/MS
+tellurium/SM
+telly/SM
+Telnet/M
+TELNET/M
+telnet/S
+telomeric
+tel/SY
+Telugu/M
+temblor/SM
+temerity/MS
+Tempe/M
+temperamental/Y
+temperament/SM
+temperance/IMS
+tempera/SLM
+temperately/I
+temperateness's/I
+temperateness/SM
+temperate/SDGPY
+temperature/MS
+tempered/UE
+temper/GRDM
+tempering/E
+temper's/E
+tempers/E
+tempest/DMSG
+tempestuousness/SM
+tempestuous/PY
+template/FS
+template's
+Temple/M
+Templeman/M
+temple/SDM
+Templeton/M
+Temp/M
+tempoes
+tempo/MS
+temporal/YS
+temporarily
+temporarinesses
+temporariness/FM
+temporary/SFP
+temporize/GJZRSD
+temporizer/M
+temporizings/U
+temporizing/YM
+temp/SGZTMRD
+temptation/MS
+tempted
+tempter/S
+tempt/FS
+tempting/YS
+temptress/MS
+tempura/SM
+tenabilities
+tenability/UM
+tenableness/M
+tenable/P
+tenably
+tenaciousness/S
+tenacious/YP
+tenacity/S
+tenancy/MS
+tenanted/U
+tenant/MDSG
+tenantry/MS
+tench/M
+tended/UE
+tendency/MS
+tendentiousness/SM
+tendentious/PY
+tendered
+tenderer
+tenderest
+tenderfoot/MS
+tender/FS
+tenderheartedness/MS
+tenderhearted/YP
+tendering
+tenderizer/M
+tenderize/SRDGZ
+tenderloin/SM
+tenderly
+tenderness/SM
+tending/E
+tendinitis/S
+tend/ISFRDG
+tendon/MS
+tendril/SM
+tends/E
+tenebrous
+tenement/MS
+tenet/SM
+Tenex/M
+TENEX/M
+tenfold/S
+ten/MHB
+Tenneco/M
+tenner
+Tennessean/S
+Tennessee/M
+Tenney/M
+tennis/SM
+Tenn/M
+Tennyson/M
+Tenochtitlan/M
+tenon/GSMD
+tenor/MS
+tenpin/SM
+tense/IPYTNVR
+tenseness's/I
+tenseness/SM
+tensile
+tensional/I
+tension/GMRDS
+tensionless
+tensions/E
+tension's/I
+tensity/IMS
+tensorial
+tensor/MS
+tenspot
+tens/SRDVGT
+tentacle/MSD
+tentativeness/S
+tentative/SPY
+tented/UF
+tenterhook/MS
+tenter/M
+tent/FSIM
+tenths
+tenth/SY
+tenting/F
+tenuity/S
+tenuousness/SM
+tenuous/YP
+tenure/SDM
+Teodoor/M
+Teodora/M
+Teodorico/M
+Teodor/M
+Teodoro/M
+tepee/MS
+tepidity/S
+tepidness/S
+tepid/YP
+tequila/SM
+Tera/M
+teratogenic
+teratology/MS
+terbium/SM
+tercel/M
+tercentenary/S
+tercentennial/S
+Terence/M
+Terencio/M
+Teresa/M
+Terese/M
+Tereshkova/M
+Teresina/M
+Teresita/M
+Teressa/M
+Teriann/M
+Teri/M
+Terkel/M
+termagant/SM
+termcap
+termer/M
+terminable/CPI
+terminableness/IMC
+terminal/SYM
+terminate/CXNV
+terminated/U
+terminates
+terminating
+termination/MC
+terminative/YC
+terminator/SM
+termini
+terminological/Y
+terminology/MS
+terminus/M
+termite/SM
+term/MYRDGS
+ternary/S
+tern/GIDS
+tern's
+terpsichorean
+Terpsichore/M
+terrace/MGSD
+terracing/M
+terracotta
+terrain/MS
+Terra/M
+terramycin
+Terrance/M
+Terran/M
+terrapin/MS
+terrarium/MS
+terrazzo/SM
+Terrell/M
+Terrel/M
+Terre/M
+Terrence/M
+terrestrial/YMS
+terribleness/SM
+terrible/P
+terribly
+Terrie/M
+terrier/M
+terrifically
+terrific/Y
+terrify/GDS
+terrifying/Y
+Terrijo/M
+Terrill/M
+Terri/M
+terrine/M
+territoriality/M
+Territorial/SM
+territorial/SY
+Territory's
+territory/SM
+terrorism/MS
+terroristic
+terrorist/MS
+terrorized/U
+terrorizer/M
+terrorize/RSDZG
+terror/MS
+terr/S
+terrycloth
+Terrye/M
+Terry/M
+terry/ZMRS
+terseness/SM
+terse/RTYP
+Tersina/M
+tertian
+Tertiary
+tertiary/S
+Terza/M
+TESL
+Tesla/M
+TESOL
+Tessa/M
+tessellate/XDSNG
+tessellation/M
+tesseral
+Tessie/M
+Tessi/M
+Tess/M
+Tessy/M
+testability/M
+testable/U
+testamentary
+testament/SM
+testate/IS
+testator/MS
+testatrices
+testatrix
+testbed/S
+testcard
+tested/AKU
+tester/MFCKS
+testes/M
+testicle/SM
+testicular
+testifier/M
+testify/GZDRS
+testily
+testimonial/SM
+testimony/SM
+testiness/S
+testing/S
+testis/M
+testosterone/SM
+test/RDBFZGSC
+tests/AK
+test's/AKF
+testy/RTP
+tetanus/MS
+tetchy/TR
+tether/DMSG
+tethered/U
+Tethys/M
+Tetons
+tetrachloride/M
+tetracycline/SM
+tetrafluoride
+tetragonal/Y
+tetrahalides
+tetrahedral/Y
+tetrahedron/SM
+tetrameron
+tetrameter/SM
+tetra/MS
+tetrasodium
+tetravalent
+Teutonic
+Teuton/SM
+Texaco/M
+Texan/S
+Texas/MS
+Tex/M
+TeX/M
+textbook/SM
+text/FSM
+textile/SM
+Textron/M
+textual/FY
+textural/Y
+textured/U
+texture/MGSD
+T/G
+Thacher/M
+Thackeray/M
+Thaddeus/M
+Thaddus/M
+Thadeus/M
+Thad/M
+Thailand/M
+Thaine/M
+Thain/M
+Thai/S
+thalami
+thalamus/M
+Thales/M
+Thalia/M
+thalidomide/MS
+thallium/SM
+thallophyte/M
+Thames
+than
+Thane/M
+thane/SM
+Thanh/M
+thanker/M
+thankfuller
+thankfullest
+thankfulness/SM
+thankful/YP
+thanklessness/SM
+thankless/PY
+thanksgiving/MS
+Thanksgiving/S
+thank/SRDG
+Thant/M
+Thar/M
+Thatcher/M
+thatching/M
+thatch/JMDRSZG
+Thatch/MR
+that'd
+that'll
+that/MS
+thaumaturge/M
+thaw/DGS
+Thaxter/M
+Thayer/M
+Thayne/M
+THC
+the
+Theadora/M
+Thea/M
+theatergoer/MS
+theatergoing/MS
+theater/SM
+theatricality/SM
+theatrical/YS
+theatric/S
+theatrics/M
+Thebault/M
+Thebes
+Theda/M
+Thedrick/M
+Thedric/M
+thee/DS
+theeing
+theft/MS
+Theiler/M
+their/MS
+theism/SM
+theistic
+theist/SM
+Thekla/M
+Thelma/M
+themas
+thematically
+thematics
+thematic/U
+theme/MS
+them/GD
+Themistocles/M
+themselves
+thence
+thenceforth
+thenceforward/S
+Theobald/M
+theocracy/SM
+theocratic
+Theocritus/M
+theodolite/MS
+Theodora/M
+Theodore/M
+Theodoric/M
+Theodor/M
+Theodosia/M
+Theodosian
+Theodosius/M
+theologian/SM
+theological/Y
+theologists
+theology/MS
+Theo/M
+theorem/MS
+theoretical/Y
+theoretician/MS
+theoretic/S
+theoretics/M
+theorist/SM
+theorization/SM
+theorize/ZGDRS
+theory/MS
+theosophic
+theosophical
+theosophist/MS
+Theosophy
+theosophy/SM
+therapeutically
+therapeutic/S
+therapeutics/M
+therapist/MS
+therapy/MS
+Theravada/M
+thereabout/S
+thereafter
+thereat
+thereby
+there'd
+therefor
+therefore
+therefrom
+therein
+there'll
+there/MS
+thereof
+thereon
+Theresa/M
+Therese/M
+Theresina/M
+Theresita/M
+Theressa/M
+thereto
+theretofore
+thereunder
+thereunto
+thereupon
+therewith
+Therine/M
+thermal/YS
+thermionic/S
+thermionics/M
+thermistor/MS
+therm/MS
+thermocouple/MS
+thermodynamical/Y
+thermodynamic/S
+thermodynamics/M
+thermoelastic
+thermoelectric
+thermoformed
+thermoforming
+thermogravimetric
+thermoluminescence/M
+thermometer/MS
+thermometric
+thermometry/M
+thermonuclear
+thermopile/M
+thermoplastic/S
+thermopower
+thermo/S
+thermosetting
+thermos/S
+Thermos/SM
+thermostable
+thermostatically
+thermostatic/S
+thermostatics/M
+thermostat/SM
+thermostatted
+thermostatting
+Theron/M
+thesauri
+thesaurus/MS
+these/S
+Theseus/M
+thesis/M
+thespian/S
+Thespian/S
+Thespis/M
+Thessalonian
+Thessaloníki/M
+Thessaly/M
+theta/MS
+thew/SM
+they
+they'd
+they'll
+they're
+they've
+th/GNJX
+Thia/M
+thiamine/MS
+Thibaud/M
+Thibaut/M
+thickener/M
+thickening/M
+thicken/RDJZG
+thicket/SMD
+thickheaded/M
+thickish
+thickness/MS
+thickset/S
+thick/TXPSRNY
+thief/M
+Thiensville/M
+Thieu/M
+thievery/MS
+thieve/SDJG
+thievishness/M
+thievish/P
+thighbone/SM
+thigh/DM
+thighs
+thimble/DSMG
+thimbleful/MS
+Thimbu/M
+Thimphu
+thine
+thingamabob/MS
+thingamajig/SM
+thing/MP
+thinkableness/M
+thinkable/U
+thinkably/U
+think/AGRS
+thinker/MS
+thinkingly/U
+thinking/SMYP
+thinned
+thinner/MS
+thinness/MS
+thinnest
+thinning
+thinnish
+thin/STPYR
+thiocyanate/M
+thiouracil/M
+third/DYGS
+thirster/M
+thirst/GSMDR
+thirstily
+thirstiness/S
+thirsty/TPR
+thirteen/MHS
+thirteenths
+thirtieths
+thirty/HMS
+this
+this'll
+thistledown/MS
+thistle/SM
+thither
+Th/M
+tho
+thole/GMSD
+Thomasa/M
+Thomasina/M
+Thomasine/M
+Thomasin/M
+Thoma/SM
+Thomism/M
+Thomistic
+Thom/M
+Thompson/M
+Thomson/M
+thong/SMD
+thoracic
+thorax/MS
+Thorazine
+Thoreau/M
+thoriate/D
+Thorin/M
+thorium/MS
+Thor/M
+Thornburg/M
+Thorndike/M
+Thornie/M
+thorniness/S
+Thorn/M
+thorn/SMDG
+Thornton/M
+Thorny/M
+thorny/PTR
+thoroughbred/S
+thoroughfare/MS
+thoroughgoing
+thoroughness/SM
+thorough/PTYR
+Thorpe/M
+Thorstein/M
+Thorsten/M
+Thorvald/M
+those
+Thoth/M
+thou/DSG
+though
+thoughtfully
+thoughtfulness/S
+thoughtful/U
+thoughtlessness/MS
+thoughtless/YP
+thought/MS
+thousandfold
+thousand/SHM
+thousandths
+Thrace/M
+Thracian/M
+thralldom/S
+thrall/GSMD
+thrash/DSRZGJ
+thrasher/M
+thrashing/M
+threadbare/P
+threader/M
+threading/A
+threadlike
+thread/MZDRGS
+thready/RT
+threatener/M
+threaten/GJRD
+threatening/Y
+threat/MDNSXG
+threefold
+three/MS
+threepence/M
+threepenny
+threescore/S
+threesome/SM
+threnody/SM
+thresh/DSRZG
+thresher/M
+threshold/MDGS
+threw
+thrice
+thriftily
+thriftiness/S
+thriftless
+thrift/SM
+thrifty/PTR
+thriller/M
+thrilling/Y
+thrill/ZMGDRS
+thriver/M
+thrive/RSDJG
+thriving/Y
+throatily
+throatiness/MS
+throat/MDSG
+throaty/PRT
+throbbed
+throbbing
+throb/S
+throeing
+throe/SDM
+thrombi
+thromboses
+thrombosis/M
+thrombotic
+thrombus/M
+Throneberry/M
+throne/CGSD
+throne's
+throng/GDSM
+throttle/DRSZMG
+throttler/M
+throughout
+throughput/SM
+throughway's
+through/Y
+throwaway/SM
+throwback/MS
+thrower/M
+thrown
+throwout
+throw/SZGR
+thrummed
+thrumming
+thrum/S
+thrush/MS
+thruster/M
+thrust/ZGSR
+Thruway/MS
+thruway/SM
+Thunderbird/M
+Thu
+Thucydides/M
+thudded
+thudding
+thud/MS
+thuggee/M
+thuggery/SM
+thuggish
+thug/MS
+Thule/M
+thulium/SM
+thumbnail/MS
+thumbscrew/SM
+thumb/SMDG
+thumbtack/GMDS
+thump/RDMSG
+thunderbolt/MS
+thunderclap/SM
+thundercloud/SM
+thunderer/M
+thunderhead/SM
+thundering/Y
+thunderous/Y
+thundershower/MS
+thunderstorm/MS
+thunderstruck
+thundery
+thunder/ZGJDRMS
+thunk
+Thurber/M
+Thurman/M
+Thur/MS
+Thursday/SM
+Thurstan/M
+Thurston/M
+thus/Y
+thwack/DRSZG
+thwacker/M
+thwarter/M
+thwart/GSDRY
+thy
+thyme/SM
+thymine/MS
+thymus/SM
+thyratron/M
+thyristor/MS
+thyroglobulin
+thyroidal
+thyroid/S
+thyronine
+thyrotoxic
+thyrotrophic
+thyrotrophin
+thyrotropic
+thyrotropin/M
+thyroxine/M
+thyself
+Tia/M
+Tianjin
+tiara/MS
+Tiberius/M
+Tiber/M
+Tibetan/S
+Tibet/M
+tibiae
+tibial
+tibia/M
+Tibold/M
+Tiburon/M
+ticker/M
+ticket/SGMD
+tick/GZJRDMS
+ticking/M
+tickler/M
+tickle/RSDZG
+ticklishness/MS
+ticklish/PY
+ticktacktoe/S
+ticktock/SMDG
+tic/MS
+Ticonderoga/M
+tidal/Y
+tidbit/MS
+tiddlywinks/M
+tide/GJDS
+tideland/MS
+tidewater/SM
+tideway/SM
+tidily/U
+tidiness/USM
+tidying/M
+tidy/UGDSRPT
+tie/AUDS
+tieback/MS
+Tiebold/M
+Tiebout/M
+tiebreaker/SM
+Tieck/M
+Tiena/M
+Tienanmen/M
+Tientsin's
+tier/DGM
+Tierney/M
+Tiertza/M
+Tiffanie/M
+Tiffani/M
+tiffany/M
+Tiffany/M
+tiff/GDMS
+Tiffie/M
+Tiffi/M
+Tiff/M
+Tiffy/M
+tigerish
+tiger/SM
+tightener/M
+tighten/JZGDR
+tightfisted
+tightness/MS
+tightrope/SM
+tight/STXPRNY
+tightwad/MS
+tigress/SM
+Tigris/M
+Tijuana/M
+tike's
+Tilda/M
+tilde/MS
+Tildie/M
+Tildi/M
+Tildy/M
+tile/DRSJMZG
+tiled/UE
+Tiler/M
+tiles/U
+tiling/M
+tillable
+tillage/SM
+till/EGSZDR
+tiller/GDM
+tiller's/E
+Tillich/M
+Tillie/M
+Tillman/M
+Tilly/M
+tilth/M
+tilt/RDSGZ
+Ti/M
+timber/DMSG
+timbering/M
+timberland/SM
+timberline/S
+timbrel/SM
+timbre/MS
+Timbuktu/M
+ti/MDRZ
+timebase
+time/DRSJMYZG
+timekeeper/MS
+timekeeping/SM
+timelessness/S
+timeless/PY
+timeliness/SMU
+timely/UTRP
+timeout/S
+timepiece/MS
+timer/M
+timescale/S
+timeserver/MS
+timeserving/S
+timeshare/SDG
+timespan
+timestamped
+timestamps
+timetable/GMSD
+timeworn
+Timex/M
+timezone/S
+timidity/SM
+timidness/MS
+timid/RYTP
+Timi/M
+timing/M
+Timmie/M
+Timmi/M
+Tim/MS
+Timmy/M
+Timofei/M
+Timon/M
+timorousness/MS
+timorous/YP
+Timoteo/M
+Timothea/M
+Timothee/M
+Timotheus/M
+Timothy/M
+timothy/MS
+timpani
+timpanist/S
+Timur/M
+Tina/M
+tincture/SDMG
+tinderbox/MS
+tinder/MS
+Tine/M
+tine/SM
+tinfoil/MS
+tingeing
+tinge/S
+ting/GYDM
+tingle/SDG
+tingling/Y
+tingly/TR
+Ting/M
+tinily
+tininess/MS
+tinker/SRDMZG
+Tinkertoy
+tinkle/SDG
+tinkling/M
+tinkly
+tin/MDGS
+tinned
+tinner/M
+tinnily
+tinniness/SM
+tinning/M
+tinnitus/MS
+tinny/RSTP
+tinplate/S
+tinsel/GMDYS
+Tinseltown/M
+tinsmith/M
+tinsmiths
+tinter/M
+tintinnabulation/MS
+Tintoretto/M
+tint/SGMRDB
+tintype/SM
+tinware/MS
+tiny/RPT
+Tioga/M
+Tiphanie/M
+Tiphani/M
+Tiphany/M
+tipi's
+tip/MS
+tipoff
+Tippecanoe/M
+tipped
+Tipperary/M
+tipper/MS
+tippet/MS
+tipping
+tippler/M
+tipple/ZGRSD
+tippy/R
+tipsily
+tipsiness/SM
+tipster/SM
+tipsy/TPR
+tiptoeing
+tiptoe/SD
+tiptop/S
+tirade/SM
+Tirana's
+Tirane
+tired/AYP
+tireder
+tiredest
+tiredness/S
+tirelessness/SM
+tireless/PY
+tire/MGDSJ
+tires/A
+Tiresias/M
+tiresomeness/S
+tiresome/PY
+tiring/AU
+Tirolean/S
+Tirol/M
+tiro's
+Tirrell/M
+tis
+Tisha/M
+Tish/M
+tissue/MGSD
+titanate/M
+Titania/M
+titanic
+titanically
+Titanic/M
+titanium/SM
+titan/SM
+Titan/SM
+titbit's
+titer/M
+tither/M
+tithe/SRDGZM
+tithing/M
+Titian/M
+titian/S
+Titicaca/M
+titillate/XSDVNG
+titillating/Y
+titillation/M
+titivate/NGDSX
+titivation/M
+titled/AU
+title/GMSRD
+titleholder/SM
+titling/A
+titmice
+titmouse/M
+tit/MRZS
+Tito/SM
+titrate/SDGN
+titration/M
+titted
+titter/GDS
+titting
+tittle/SDMG
+titular/SY
+Titus/M
+tizzy/SM
+TKO
+Tlaloc/M
+TLC
+Tlingit/M
+Tl/M
+TM
+Tm/M
+tn
+TN
+tnpk
+TNT
+toad/SM
+toadstool/SM
+toady/GSDM
+toadyism/M
+toaster/M
+toastmaster/MS
+toastmistress/S
+toast/SZGRDM
+toasty/TRS
+tobacconist/SM
+tobacco/SM
+tobaggon/SM
+Tobago/M
+Tobe/M
+Tobey/M
+Tobiah/M
+Tobias/M
+Tobie/M
+Tobi/M
+Tobin/M
+Tobit/M
+toboggan/MRDSZG
+Tobye/M
+Toby/M
+Tocantins/M
+toccata/M
+Tocqueville
+tocsin/MS
+to/D
+today'll
+today/SM
+Toddie/M
+toddler/M
+toddle/ZGSRD
+Todd/M
+Toddy/M
+toddy/SM
+Tod/M
+toecap/SM
+toeclip/S
+TOEFL
+toehold/MS
+toeing
+toe/MS
+toenail/DMGS
+toffee/SM
+tofu/S
+toga/SMD
+toge
+togetherness/MS
+together/P
+togged
+togging
+toggle/SDMG
+Togolese/M
+Togo/M
+tog/SMG
+Toiboid/M
+toilet/GMDS
+toiletry/MS
+toilette/SM
+toil/SGZMRD
+toilsomeness/M
+toilsome/PY
+Toinette/M
+Tojo/M
+tokamak
+Tokay/M
+toke/GDS
+tokenism/SM
+tokenized
+token/SMDG
+Tokugawa/M
+Tokyoite/MS
+Tokyo/M
+Toland/M
+told/AU
+Toledo/SM
+tole/MGDS
+tolerability/IM
+tolerable/I
+tolerably/I
+tolerance/SIM
+tolerant/IY
+tolerate/XVNGSD
+toleration/M
+Tolkien
+tollbooth/M
+tollbooths
+toll/DGS
+Tolley/M
+tollgate/MS
+tollhouse/M
+tollway/S
+Tolstoy/M
+toluene/MS
+Tolyatti/M
+tomahawk/SGMD
+Tomasina/M
+Tomasine/M
+Toma/SM
+Tomaso/M
+tomatoes
+tomato/M
+Tombaugh/M
+tomb/GSDM
+Tombigbee/M
+tomblike
+tombola/M
+tomboyish
+tomboy/MS
+tombstone/MS
+tomcat/SM
+tomcatted
+tomcatting
+Tome/M
+tome/SM
+tomfoolery/MS
+tomfool/M
+Tomi/M
+Tomkin/M
+Tomlin/M
+Tom/M
+tommed
+Tommie/M
+Tommi/M
+tomming
+tommy/M
+Tommy/M
+tomographic
+tomography/MS
+tomorrow/MS
+Tompkins/M
+Tomsk/M
+tom/SM
+tomtit/SM
+tonality/MS
+tonal/Y
+tonearm/S
+tone/ISRDZG
+tonelessness/M
+toneless/YP
+toner/IM
+tone's
+Tonga/M
+Tongan/SM
+tong/GRDS
+tongueless
+tongue/SDMG
+tonguing/M
+Tonia/M
+tonic/SM
+Tonie/M
+tonight/MS
+Toni/M
+Tonio/M
+tonk/MS
+tonnage/SM
+tonne/MS
+Tonnie/M
+tonsillectomy/MS
+tonsillitis/SM
+tonsil/SM
+ton/SKM
+tonsorial
+tonsure/SDGM
+Tonto/M
+Tonya/M
+Tonye/M
+Tony/M
+tony/RT
+toodle
+too/H
+took/A
+tool/AGDS
+toolbox/SM
+tooler/SM
+tooling/M
+toolkit/SM
+toolmaker/M
+toolmake/ZRG
+toolmaking/M
+tool's
+toolsmith
+Toomey/M
+tooter/M
+toot/GRDZS
+toothache/SM
+toothbrush/MSG
+tooth/DMG
+toothily
+toothless
+toothmarks
+toothpaste/SM
+toothpick/MS
+tooths
+toothsome
+toothy/TR
+tootle/SRDG
+tootsie
+Tootsie/M
+toots/M
+tootsy/MS
+topaz/MS
+topcoat/MS
+topdressing/S
+Topeka/M
+toper/M
+topflight
+topgallant/M
+topiary/S
+topicality/MS
+topical/Y
+topic/MS
+topknot/MS
+topless
+topmast/MS
+topmost
+topnotch/R
+topocentric
+topographer/SM
+topographic
+topographical/Y
+topography/MS
+topological/Y
+topologist/MS
+topology/MS
+topped
+topper/MS
+topping/MS
+topple/GSD
+topsail/MS
+topside/SRM
+top/SMDRG
+topsoil/GDMS
+topspin/MS
+Topsy/M
+toque/MS
+Torah/M
+Torahs
+torchbearer/SM
+torchlight/S
+torch/SDMG
+toreador/SM
+Tore/M
+tore/S
+Torey/M
+Torie/M
+tori/M
+Tori/M
+Torin/M
+torment/GSD
+tormenting/Y
+tormentor/MS
+torn
+tornadoes
+tornado/M
+toroidal/Y
+toroid/MS
+Toronto/M
+torpedoes
+torpedo/GMD
+torpidity/S
+torpid/SY
+torpor/MS
+Torquemada/M
+torque/MZGSRD
+Torrance/M
+Torre/MS
+torrence
+Torrence/M
+Torrens/M
+torrential
+torrent/MS
+Torrey/M
+Torricelli/M
+torridity/SM
+torridness/SM
+torrid/RYTP
+Torrie/M
+Torrin/M
+Torr/XM
+Torry/M
+torsional/Y
+torsion/IAM
+torsions
+torsi's
+tor/SLM
+torso/SM
+tors/S
+tort/ASFE
+tortellini/MS
+torte/MS
+torten
+tortilla/MS
+tortoiseshell/SM
+tortoise/SM
+Tortola/M
+tortoni/MS
+tort's
+Tortuga/M
+tortuousness/MS
+tortuous/PY
+torture/ZGSRD
+torturous
+torus/MS
+Tory/SM
+Tosca/M
+Toscanini/M
+Toshiba/M
+toss/SRDGZ
+tossup/MS
+totaler/M
+totalistic
+totalitarianism/SM
+totalitarian/S
+totality/MS
+totalizator/S
+totalizing
+total/ZGSRDYM
+totemic
+totem/MS
+toter/M
+tote/S
+toting/M
+tot/MDRSG
+Toto/M
+totted
+totterer/M
+tottering/Y
+totter/ZGRDS
+totting
+toucan/MS
+touchable/U
+touch/ASDG
+touchdown/SM
+touché
+touched/U
+toucher/M
+touchily
+touchiness/SM
+touching/SY
+touchline/M
+touchscreen
+touchstone/SM
+touchy/TPR
+toughen/DRZG
+toughener/M
+toughness/SM
+toughs
+tough/TXGRDNYP
+Toulouse/M
+toupee/SM
+toured/CF
+tourer/M
+tour/GZSRDM
+touring/F
+tourism/SM
+touristic
+tourist/SM
+touristy
+tourmaline/SM
+tournament/MS
+tourney/GDMS
+tourniquet/MS
+tour's/CF
+tours/CF
+tousle/GSD
+touter/M
+tout/SGRD
+Tova/M
+Tove/M
+towardliness/M
+towardly/P
+towards
+toward/YU
+towboat/MS
+tow/DRSZG
+towelette/S
+towel/GJDMS
+toweling/M
+tower/GMD
+towering/Y
+towhead/MSD
+towhee/SM
+towline/MS
+towner/M
+Townes
+Towney/M
+townhouse/S
+Townie/M
+townie/S
+Townley/M
+Town/M
+Townsend/M
+townsfolk
+township/MS
+townsman/M
+townsmen
+townspeople/M
+town/SRM
+townswoman/M
+townswomen
+Towny/M
+towpath/M
+towpaths
+towrope/MS
+Towsley/M
+toxemia/MS
+toxicity/MS
+toxicological
+toxicologist/SM
+toxicology/MS
+toxic/S
+toxin/MS
+toyer/M
+toymaker
+toy/MDRSG
+Toynbee/M
+Toyoda/M
+Toyota/M
+toyshop
+tr
+traceability/M
+traceableness/M
+traceable/P
+trace/ASDG
+traceback/MS
+traced/U
+Tracee/M
+traceless/Y
+Trace/M
+tracepoint/SM
+tracer/MS
+tracery/MDS
+trace's
+Tracey/M
+tracheae
+tracheal/M
+trachea/M
+tracheotomy/SM
+Tracie/M
+Traci/M
+tracing/SM
+trackage
+trackball/S
+trackbed
+tracked/U
+tracker/M
+trackless
+tracksuit/SM
+track/SZGMRD
+tractability/SI
+tractable/I
+tractably/I
+tract/ABS
+Tractarians
+traction/KSCEMAF
+tractive/KFE
+tractor/FKMASC
+tract's
+tracts/CEFK
+Tracy/M
+trademark/GSMD
+trader/M
+tradesman/M
+tradesmen
+tradespeople
+tradespersons
+trade/SRDGZM
+tradeswoman/M
+tradeswomen
+traditionalism/MS
+traditionalistic
+traditionalist/MS
+traditionalized
+traditionally
+traditional/U
+tradition/SM
+traduce/DRSGZ
+Trafalgar/M
+trafficked
+trafficker/MS
+trafficking/S
+traffic/SM
+tragedian/SM
+tragedienne/MS
+tragedy/MS
+tragically
+tragicomedy/SM
+tragicomic
+tragic/S
+trailblazer/MS
+trailblazing/S
+trailer/GDM
+trails/F
+trailside
+trail/SZGJRD
+trainable
+train/ASDG
+trained/U
+trainee/MS
+traineeships
+trainer/MS
+training/SM
+trainman/M
+trainmen
+trainspotter/S
+traipse/DSG
+trait/MS
+traitorous/Y
+traitor/SM
+Trajan/M
+trajectory/MS
+trammed
+trammeled/U
+trammel/GSD
+tramming
+tram/MS
+trample/DGRSZ
+trampler/M
+trampoline/GMSD
+tramp/RDSZG
+tramway/M
+trance/MGSD
+tranche/SM
+Tran/M
+tranquility/S
+tranquilized/U
+tranquilize/JGZDSR
+tranquilizer/M
+tranquilizes/A
+tranquilizing/YM
+tranquillize/GRSDZ
+tranquillizer/M
+tranquilness/M
+tranquil/PTRY
+transact/GSD
+transactional
+transaction/MS
+transactor/SM
+transalpine
+transaminase
+transatlantic
+Transcaucasia/M
+transceiver/SM
+transcendence/MS
+transcendentalism/SM
+transcendentalist/SM
+transcendental/YS
+transcendent/Y
+transcend/SDG
+transconductance
+transcontinental
+transcribe/DSRGZ
+transcriber/M
+transcription/SM
+transcript/SM
+transcultural
+transducer/SM
+transduction/M
+transect/DSG
+transept/SM
+transferability/M
+transferal/MS
+transfer/BSMD
+transferee/M
+transference/SM
+transferor/MS
+transferral/SM
+transferred
+transferrer/SM
+transferring
+transfiguration/SM
+transfigure/SDG
+transfinite/Y
+transfix/SDG
+transformational
+transformation/MS
+transform/DRZBSG
+transformed/U
+transformer/M
+transfuse/XSDGNB
+transfusion/M
+transgression/SM
+transgressor/S
+transgress/VGSD
+trans/I
+transience/SM
+transiency/S
+transient/YS
+transistorize/GDS
+transistor/SM
+Transite/M
+transitional/Y
+transition/MDGS
+transitivenesses
+transitiveness/IM
+transitive/PIY
+transitivity/MS
+transitoriness/M
+transitory/P
+transit/SGVMD
+transl
+translatability/M
+translatable/U
+translated/AU
+translate/VGNXSDB
+translational
+translation/M
+translator/SM
+transliterate/XNGSD
+translucence/SM
+translucency/MS
+translucent/Y
+transmigrate/XNGSD
+transmissible
+transmission/MSA
+transmissive
+transmit/AS
+transmittable
+transmittal/SM
+transmittance/MS
+transmitted/A
+transmitter/SM
+transmitting/A
+transmogrification/M
+transmogrify/GXDSN
+transmutation/SM
+transmute/GBSD
+transnational/S
+transoceanic
+transom/SM
+transonic
+transpacific
+transparency/MS
+transparentness/M
+transparent/YP
+transpiration/SM
+transpire/GSD
+transplantation/S
+transplant/GRDBS
+transpolar
+transponder/MS
+transportability
+transportable/U
+transportation/SM
+transport/BGZSDR
+transpose/BGSD
+transposed/U
+transposition/SM
+Transputer/M
+transsexualism/MS
+transsexual/SM
+transship/LS
+transshipment/SM
+transshipped
+transshipping
+transubstantiation/MS
+Transvaal/M
+transversal/YM
+transverse/GYDS
+transvestism/SM
+transvestite/SM
+transvestitism
+Transylvania/M
+trapdoor/S
+trapeze/DSGM
+trapezium/MS
+trapezoidal
+trapezoid/MS
+trap/MS
+trappable/U
+trapped
+trapper/SM
+trapping/S
+Trappist/MS
+trapshooting/SM
+trashcan/SM
+trashiness/SM
+trash/SRDMG
+trashy/TRP
+Trastevere/M
+trauma/MS
+traumatic
+traumatically
+traumatize/SDG
+travail/SMDG
+traveled/U
+traveler/M
+travelog's
+travelogue/S
+travel/SDRGZJ
+Traver/MS
+traversal/SM
+traverse/GBDRS
+traverser/M
+travertine/M
+travesty/SDGM
+Travis/M
+Travus/M
+trawler/M
+trawl/RDMSZG
+tray/SM
+treacherousness/SM
+treacherous/PY
+treachery/SM
+treacle/DSGM
+treacly
+treader/M
+treadle/GDSM
+treadmill/MS
+tread/SAGD
+Treadwell/M
+treas
+treason/BMS
+treasonous
+treasure/DRSZMG
+treasurer/M
+treasurership
+treasury/SM
+Treasury/SM
+treatable
+treated/U
+treater/S
+treatise/MS
+treatment/MS
+treat's
+treat/SAGDR
+treaty/MS
+treble/SDG
+Treblinka/M
+treeing
+treeless
+treelike
+tree/MDS
+treetop/SM
+trefoil/SM
+Trefor/M
+trekked
+trekker/MS
+Trekkie/M
+trekking
+trek/MS
+trellis/GDSM
+Tremaine/M
+Tremain/M
+trematode/SM
+Tremayne/M
+tremble/JDRSG
+trembler/M
+trembles/M
+trembly
+tremendousness/M
+tremendous/YP
+tremolo/MS
+tremor/MS
+tremulousness/SM
+tremulous/YP
+trenchancy/MS
+trenchant/Y
+trencherman/M
+trenchermen
+trencher/SM
+trench/GASD
+trench's
+trendily
+trendiness/S
+trend/SDMG
+trendy/PTRS
+Trenna/M
+Trent/M
+Trenton/M
+trepanned
+trepidation/MS
+Tresa/M
+Trescha/M
+trespasser/M
+trespass/ZRSDG
+Tressa/M
+tressed/E
+tresses/E
+tressing/E
+tress/MSDG
+trestle/MS
+Trevar/M
+Trevelyan/M
+Trever/M
+Trevino/M
+Trevor/M
+Trev/RM
+Trey/M
+trey/MS
+triableness/M
+triable/P
+triadic
+triad/MS
+triage/SDMG
+trial/ASM
+trialization
+trialled
+trialling
+triamcinolone
+triangle/SM
+triangulable
+triangularization/S
+triangular/Y
+triangulate/YGNXSD
+triangulation/M
+Triangulum/M
+Trianon/M
+Triassic
+triathlon/S
+triatomic
+tribalism/MS
+tribal/Y
+tribe/MS
+tribesman/M
+tribesmen
+tribeswoman
+tribeswomen
+tribulate/NX
+tribulation/M
+tribunal/MS
+tribune/SM
+tributary/MS
+tribute/EGSF
+tribute's
+trice/GSDM
+tricentennial/S
+triceps/SM
+triceratops/M
+trichinae
+trichina/M
+trichinoses
+trichinosis/M
+trichloroacetic
+trichloroethane
+trichotomy/M
+trichromatic
+Tricia/M
+trickery/MS
+trick/GMSRD
+trickily
+trickiness/SM
+trickle/DSG
+trickster/MS
+tricky/RPT
+tricolor/SMD
+tricycle/SDMG
+trident/SM
+tridiagonal
+tried/UA
+triennial/SY
+trier/AS
+trier's
+tries/A
+Trieste/M
+triffid/S
+trifle/MZGJSRD
+trifler/M
+trifluoride/M
+trifocals
+trigged
+trigger/GSDM
+triggest
+trigging
+triglyceride/MS
+trigonal/Y
+trigonometric
+trigonometrical
+trigonometry/MS
+trigram/S
+trig/S
+trihedral
+trike/GMSD
+trilateral/S
+trilby/SM
+trilingual
+trillion/SMH
+trillionth/M
+trillionths
+trillium/SM
+trill/RDMGS
+trilobite/MS
+trilogy/MS
+trimaran/MS
+Trimble/M
+trimer/M
+trimester/MS
+trimmed/U
+trimmer/MS
+trimmest
+trimming/MS
+trimness/S
+trimodal
+trimonthly
+trim/PSYR
+Trimurti/M
+Trina/M
+Trinidad/M
+trinitarian/S
+trinitrotoluene/SM
+trinity/MS
+Trinity/MS
+trinketer/M
+trinket/MRDSG
+triode/MS
+trio/SM
+trioxide/M
+tripartite/N
+tripartition/M
+tripe/MS
+triphenylarsine
+triphenylphosphine
+triphenylstibine
+triphosphopyridine
+triple/GSD
+triplet/SM
+triplex/S
+triplicate/SDG
+triplication/M
+triply/GDSN
+Trip/M
+tripodal
+tripod/MS
+tripoli/M
+Tripoli/M
+tripolyphosphate
+tripos/SM
+tripped
+Trippe/M
+tripper/MS
+tripping/Y
+Tripp/M
+trip/SMY
+triptych/M
+triptychs
+tripwire/MS
+trireme/SM
+Tris
+trisect/GSD
+trisection/S
+trisector
+Trisha/M
+Trish/M
+trisodium
+Trista/M
+Tristam/M
+Tristan/M
+tristate
+trisyllable/M
+tritely/F
+triteness/SF
+trite/SRPTY
+tritium/MS
+triton/M
+Triton/M
+triumphal
+triumphalism
+triumphant/Y
+triumph/GMD
+triumphs
+triumvirate/MS
+triumvir/MS
+triune
+trivalent
+trivet/SM
+trivia
+triviality/MS
+trivialization/MS
+trivialize/DSG
+trivial/Y
+trivium/M
+Trixie/M
+Trixi/M
+Trix/M
+Trixy/M
+Trobriand/M
+trochaic/S
+trochee/SM
+trod/AU
+trodden/UA
+trodes
+troff/MR
+troglodyte/MS
+troika/SM
+Trojan/MS
+troll/DMSG
+trolled/F
+trolleybus/S
+trolley/SGMD
+trolling/F
+trollish
+Trollope/M
+trollop/GSMD
+trolly's
+trombone/MS
+trombonist/SM
+tromp/DSG
+Trondheim/M
+trooper/M
+troopship/SM
+troop/SRDMZG
+trope/SM
+Tropez/M
+trophic
+trophy/MGDS
+tropical/SY
+tropic/MS
+tropism/SM
+tropocollagen
+troposphere/MS
+tropospheric
+troth/GDM
+troths
+trot/S
+Trotsky/M
+trotted
+trotter/SM
+trotting
+troubadour/SM
+troubled/U
+trouble/GDRSM
+troublemaker/MS
+troubler/M
+troubleshooter/M
+troubleshoot/SRDZG
+troubleshot
+troublesomeness/M
+troublesome/YP
+trough/M
+troughs
+trounce/GZDRS
+trouncer/M
+troupe/MZGSRD
+trouper/M
+trouser/DMGS
+trousseau/M
+trousseaux
+Troutman/M
+trout/SM
+trove/SM
+troweler/M
+trowel/SMDRGZ
+trow/SGD
+Troyes
+Troy/M
+troy/S
+Trstram/M
+truancy/MS
+truant/SMDG
+truce/SDGM
+Truckee/M
+trucker/M
+trucking/M
+truckle/GDS
+truckload/MS
+truck/SZGMRDJ
+truculence/SM
+truculent/Y
+Truda/M
+Trudeau/M
+Trude/M
+Trudey/M
+trudge/SRDG
+Trudie/M
+Trudi/M
+Trudy/M
+true/DRSPTG
+truelove/MS
+Trueman/M
+trueness/M
+truer/U
+truest/U
+truffle/MS
+truism/SM
+Trujillo/M
+Trula/M
+truly/U
+Trumaine/M
+Truman/M
+Trumann/M
+Trumbull/M
+trump/DMSG
+trumpery/SM
+trumpeter/M
+trumpet/MDRZGS
+Trump/M
+truncate/NGDSX
+truncation/M
+truncheon/MDSG
+trundle/GZDSR
+trundler/M
+trunk/GSMD
+trunnion/SM
+trusser/M
+trussing/M
+truss/SRDG
+trusted/EU
+trusteeing
+trustee/MDS
+trusteeship/SM
+truster/M
+trustful/EY
+trustfulness/SM
+trustiness/M
+trusting/Y
+trust/RDMSG
+trusts/E
+trustworthier
+trustworthiest
+trustworthiness/MS
+trustworthy/UP
+trusty/PTMSR
+Truth
+truthfulness/US
+truthful/UYP
+truths/U
+truth/UM
+TRW
+trying/Y
+try/JGDRSZ
+tryout/MS
+trypsin/M
+tryst/GDMS
+ts
+T's
+tsarevich
+tsarina's
+tsarism/M
+tsarist
+tsetse/S
+Tsimshian/M
+Tsiolkovsky/M
+Tsitsihar/M
+tsp
+tsunami/MS
+Tsunematsu/M
+Tswana/M
+TTL
+tty/M
+ttys
+Tuamotu/M
+Tuareg/M
+tubae
+tubal
+tuba/SM
+tubbed
+tubbing
+tubby/TR
+tubeless
+tubercle/MS
+tubercular/S
+tuberculin/MS
+tuberculoses
+tuberculosis/M
+tuberculous
+tuber/M
+tuberose/SM
+tuberous
+tube/SM
+tubing/M
+tub/JMDRSZG
+Tubman/M
+tubular/Y
+tubule/SM
+tucker/GDM
+Tucker/M
+tuck/GZSRD
+Tuckie/M
+Tuck/RM
+Tucky/M
+Tucson/M
+Tucuman/M
+Tudor/MS
+Tue/S
+Tuesday/SM
+tufter/M
+tuft/GZSMRD
+tufting/M
+tugboat/MS
+tugged
+tugging
+tug/S
+tuition/ISM
+Tulane/M
+tularemia/S
+tulip/SM
+tulle/SM
+Tulley/M
+Tull/M
+Tully/M
+Tulsa/M
+tum
+tumbledown
+tumbler/M
+tumbleweed/MS
+tumble/ZGRSDJ
+tumbrel/SM
+tumescence/S
+tumescent
+tumidity/MS
+tumid/Y
+tummy/SM
+tumor/MDS
+tumorous
+Tums/M
+tumult/SGMD
+tumultuousness/M
+tumultuous/PY
+tumulus/M
+tunableness/M
+tunable/P
+tuna/SM
+tundra/SM
+tun/DRJZGBS
+tune/CSDG
+tunefulness/MS
+tuneful/YP
+tuneless/Y
+tuner/M
+tune's
+tuneup/S
+tung
+tungstate/M
+tungsten/SM
+Tunguska/M
+Tungus/M
+tunic/MS
+tuning/A
+tuning's
+Tunisia/M
+Tunisian/S
+Tunis/M
+tunned
+tunneler/M
+tunnel/MRDSJGZ
+tunning
+tunny/SM
+tupelo/M
+Tupi/M
+tuple/SM
+tuppence/M
+Tupperware
+Tupungato/M
+turban/SDM
+turbid
+turbidity/SM
+turbinate/SD
+turbine/SM
+turbocharged
+turbocharger/SM
+turbofan/MS
+turbojet/MS
+turboprop/MS
+turbo/SM
+turbot/MS
+turbulence/SM
+turbulent/Y
+turd/MS
+tureen/MS
+turf/DGSM
+turfy/RT
+Turgenev/M
+turgidity/SM
+turgidness/M
+turgid/PY
+Turing/M
+Turin/M
+Turkestan/M
+Turkey/M
+turkey/SM
+Turkic/SM
+Turkish
+Turkmenistan/M
+turk/S
+Turk/SM
+turmeric/MS
+turmoil/SDMG
+turnabout/SM
+turnaround/MS
+turn/AZGRDBS
+turnbuckle/SM
+turncoat/SM
+turned/U
+turner/M
+Turner/M
+turning/MS
+turnip/SMDG
+turnkey/MS
+turnoff/MS
+turnout/MS
+turnover/SM
+turnpike/MS
+turnround/MS
+turnstile/SM
+turnstone/M
+turntable/SM
+turpentine/GMSD
+Turpin/M
+turpitude/SM
+turquoise/SM
+turret/SMD
+turtleback/MS
+turtledove/MS
+turtleneck/SDM
+turtle/SDMG
+turves's
+turvy
+Tuscaloosa/M
+Tuscan
+Tuscany/M
+Tuscarora/M
+Tuscon/M
+tush/SDG
+Tuskegee/M
+tusker/M
+tusk/GZRDMS
+tussle/GSD
+tussock/MS
+tussocky
+Tussuad/M
+Tutankhamen/M
+tutelage/MS
+tutelary/S
+Tut/M
+tutored/U
+tutorial/MS
+tutor/MDGS
+tutorship/S
+tut/S
+Tutsi
+tutted
+tutting
+tutti/S
+Tuttle/M
+tutu/SM
+Tuvalu
+tuxedo/SDM
+tux/S
+TVA
+TV/M
+TVs
+twaddle/GZMRSD
+twaddler/M
+Twain/M
+twain/S
+TWA/M
+twang/MDSG
+twangy/TR
+twas
+tweak/SGRD
+tweediness/M
+Tweedledee/M
+Tweedledum/M
+Tweed/M
+twee/DP
+tweed/SM
+tweedy/PTR
+tween
+tweeter/M
+tweet/ZSGRD
+tweezer/M
+tweeze/ZGRD
+twelfth
+twelfths
+twelvemonth/M
+twelvemonths
+twelve/MS
+twentieths
+twenty/MSH
+twerp/MS
+twice/R
+twiddle/GRSD
+twiddler/M
+twiddly/RT
+twigged
+twigging
+twiggy/RT
+twig/SM
+Twila/M
+twilight/MS
+twilit
+twill/SGD
+twiner/M
+twine/SM
+twinge/SDMG
+Twinkie
+twinkler/M
+twinkle/RSDG
+twinkling/M
+twinkly
+twinned
+twinning
+twin/RDMGZS
+twirler/M
+twirling/Y
+twirl/SZGRD
+twirly/TR
+twisted/U
+twister/M
+twists/U
+twist/SZGRD
+twisty
+twitch/GRSD
+twitchy/TR
+twit/S
+twitted
+twitterer/M
+twitter/SGRD
+twittery
+twitting
+twixt
+twofer/MS
+twofold/S
+two/MS
+twopence/SM
+twopenny/S
+twosome/MS
+twp
+Twp
+TWX
+Twyla/M
+TX
+t/XTJBG
+Tybalt/M
+Tybie/M
+Tybi/M
+tycoon/MS
+tyeing
+Tye/M
+tying/UA
+tyke/SM
+Tylenol/M
+Tyler/M
+Ty/M
+Tymon/M
+Tymothy/M
+tympani
+tympanist/SM
+tympanum/SM
+Tynan/M
+Tyndale/M
+Tyndall/M
+Tyne/M
+typeahead
+typecast/SG
+typed/AU
+typedef/S
+typeface/MS
+typeless
+type/MGDRSJ
+types/A
+typescript/SM
+typeset/S
+typesetter/MS
+typesetting/SM
+typewriter/M
+typewrite/SRJZG
+typewriting/M
+typewritten
+typewrote
+typhoid/SM
+Typhon/M
+typhoon/SM
+typhus/SM
+typicality/MS
+typically
+typicalness/M
+typical/U
+typification/M
+typify/SDNXG
+typing/A
+typist/MS
+typographer/SM
+typographic
+typographical/Y
+typography/MS
+typological/Y
+typology/MS
+typo/MS
+tyrannic
+tyrannicalness/M
+tyrannical/PY
+tyrannicide/M
+tyrannizer/M
+tyrannize/ZGJRSD
+tyrannizing/YM
+tyrannosaur/MS
+tyrannosaurus/S
+tyrannous
+tyranny/MS
+tyrant/MS
+Tyree/M
+tyreo
+Tyrolean/S
+Tyrol's
+Tyrone/M
+tyrosine/M
+tyro/SM
+Tyrus/M
+Tyson/M
+tzarina's
+tzar's
+Tzeltal/M
+u
+U
+UAR
+UART
+UAW
+Ubangi/M
+ubiquitous/YP
+ubiquity/S
+Ucayali/M
+Uccello/M
+UCLA/M
+Udale/M
+Udall/M
+udder/SM
+Udell/M
+Ufa/M
+ufologist/S
+ufology/MS
+UFO/S
+Uganda/M
+Ugandan/S
+ugh
+ughs
+uglification
+ugliness/MS
+uglis
+ugly/PTGSRD
+Ugo/M
+uh
+UHF
+Uighur
+Ujungpandang/M
+UK
+ukase/SM
+Ukraine/M
+Ukrainian/S
+ukulele/SM
+UL
+Ula/M
+Ulberto/M
+ulcerate/NGVXDS
+ulceration/M
+ulcer/MDGS
+ulcerous
+Ulick/M
+Ulises/M
+Ulla/M
+Ullman/M
+ulnae
+ulna/M
+ulnar
+Ulrica/M
+Ulrich/M
+Ulrick/M
+Ulric/M
+Ulrika/M
+Ulrikaumeko/M
+Ulrike/M
+Ulster/M
+ulster/MS
+ult
+ulterior/Y
+ultimas
+ultimate/DSYPG
+ultimateness/M
+ultimatum/MS
+ultimo
+ultracentrifugally
+ultracentrifugation
+ultracentrifuge/M
+ultraconservative/S
+ultrafast
+ultrahigh
+ultralight/S
+ultramarine/SM
+ultramodern
+ultramontane
+ultra/S
+ultrashort
+ultrasonically
+ultrasonic/S
+ultrasonics/M
+ultrasound/SM
+ultrastructure/M
+Ultrasuede
+ultraviolet/SM
+Ultrix/M
+ULTRIX/M
+ululate/DSXGN
+ululation/M
+Ulyanovsk/M
+Ulysses/M
+um
+umbel/MS
+umber/GMDS
+Umberto/M
+umbilical/S
+umbilici
+umbilicus/M
+umbrage/MGSD
+umbrageous
+umbra/MS
+umbrella/GDMS
+Umbriel/M
+Umeko/M
+umiak/MS
+umlaut/GMDS
+umpire/MGSD
+ump/MDSG
+umpteen/H
+UN
+unabated/Y
+unabridged/S
+unacceptability
+unacceptable
+unaccepted
+unaccommodating
+unaccountability
+unaccustomed/Y
+unadapted
+unadulterated/Y
+unadventurous
+unalienability
+unalterableness/M
+unalterable/P
+unalterably
+Una/M
+unambiguity
+unambiguous
+unambitious
+unamused
+unanimity/SM
+unanimous/Y
+unanticipated/Y
+unapologetic
+unapologizing/M
+unappeasable
+unappeasably
+unappreciative
+unary
+unassailableness/M
+unassailable/P
+unassertive
+unassumingness/M
+unassuming/PY
+unauthorized/PY
+unavailing/PY
+unaware/SPY
+unbalanced/P
+unbar
+unbarring
+unbecoming/P
+unbeknown
+unbelieving/Y
+unbiased/P
+unbid
+unbind/G
+unblessed
+unblinking/Y
+unbodied
+unbolt/G
+unbreakability
+unbred
+unbroken
+unbuckle
+unbudging/Y
+unburnt
+uncap
+uncapping
+uncatalogued
+uncauterized/MS
+unceasing/Y
+uncelebrated
+uncertain/P
+unchallengeable
+unchangingness/M
+unchanging/PY
+uncharacteristic
+uncharismatic
+unchastity
+unchristian
+uncial/S
+uncivilized/Y
+unclassified
+uncle/MSD
+unclouded/Y
+uncodable
+uncollected
+uncoloredness/M
+uncolored/PY
+uncombable
+uncommunicative
+uncompetitive
+uncomplicated
+uncomprehending/Y
+uncompromisable
+unconcerned/P
+unconcern/M
+unconfirmed
+unconfused
+unconscionableness/M
+unconscionable/P
+unconscionably
+unconstitutional
+unconsumed
+uncontentious
+uncontrollability
+unconvertible
+uncool
+uncooperative
+uncork/G
+uncouple/G
+uncouthness/M
+uncouth/YP
+uncreate/V
+uncritical
+uncross/GB
+uncrowded
+unction/IM
+unctions
+unctuousness/MS
+unctuous/PY
+uncustomary
+uncut
+undated/I
+undaunted/Y
+undeceive
+undecided/S
+undedicated
+undefinability
+undefinedness/M
+undefined/P
+undelete
+undeliverability
+undeniableness/M
+undeniable/P
+undeniably
+undependable
+underachiever/M
+underachieve/SRDGZ
+underact/GDS
+underadjusting
+underage/S
+underarm/DGS
+underbedding
+underbelly/MS
+underbidding
+underbid/S
+underbracing
+underbrush/MSDG
+undercarriage/MS
+undercharge/GSD
+underclassman
+underclassmen
+underclass/S
+underclothes
+underclothing/MS
+undercoating/M
+undercoat/JMDGS
+underconsumption/M
+undercooked
+undercount/S
+undercover
+undercurrent/SM
+undercut/S
+undercutting
+underdeveloped
+underdevelopment/MS
+underdog/MS
+underdone
+undereducated
+underemphasis
+underemployed
+underemployment/SM
+underenumerated
+underenumeration
+underestimate/NGXSD
+underexploited
+underexpose/SDG
+underexposure/SM
+underfed
+underfeed/SG
+underfloor
+underflow/GDMS
+underfoot
+underfund/DG
+underfur/MS
+undergarment/SM
+undergirding
+undergoes
+undergo/G
+undergone
+undergrad/MS
+undergraduate/MS
+underground/RMS
+undergrowth/M
+undergrowths
+underhand/D
+underhandedness/MS
+underhanded/YP
+underheat
+underinvestment
+underlaid
+underlain/S
+underlay/GS
+underlie
+underline/GSDJ
+underling/MS
+underlip/SM
+underloaded
+underly/GS
+undermanned
+undermentioned
+undermine/SDG
+undermost
+underneath
+underneaths
+undernourished
+undernourishment/SM
+underpaid
+underpants
+underpart/MS
+underpass/SM
+underpay/GSL
+underpayment/SM
+underperformed
+underpinned
+underpinning/MS
+underpin/S
+underplay/SGD
+underpopulated
+underpopulation/M
+underpowered
+underpricing
+underprivileged
+underproduction/MS
+underrate/GSD
+underregistration/M
+underreported
+underreporting
+underrepresentation/M
+underrepresented
+underscore/SDG
+undersealed
+undersea/S
+undersecretary/SM
+undersell/SG
+undersexed
+undershirt/SM
+undershoot/SG
+undershorts
+undershot
+underside/SM
+undersigned/M
+undersign/SGD
+undersized
+undersizes
+undersizing
+underskirt/MS
+undersold
+underspecification
+underspecified
+underspend/G
+understaffed
+understandability/M
+understandably
+understanding/YM
+understand/RGSJB
+understate/GSDL
+understatement/MS
+understocked
+understood
+understrength
+understructure/SM
+understudy/GMSD
+undertaken
+undertaker/M
+undertake/SRGZJ
+undertaking/M
+underthings
+undertone/SM
+undertook
+undertow/MS
+underused
+underusing
+underutilization/M
+underutilized
+undervaluation/S
+undervalue/SDG
+underwater/S
+underway
+underwear/M
+underweight/S
+underwent
+underwhelm/DGS
+underwood/M
+Underwood/M
+underworld/MS
+underwrite/GZSR
+underwriter/M
+underwritten
+underwrote
+under/Y
+undeserving
+undesigned
+undeviating/Y
+undialyzed/SM
+undiplomatic
+undiscerning
+undiscriminating
+undo/GJ
+undoubted/Y
+undramatic
+undramatized/SM
+undress/G
+undrinkability
+undrinkable
+undroppable
+undue
+undulant
+undulate/XDSNG
+undulation/M
+unearthliness/S
+unearthly/P
+unearth/YG
+unease
+uneconomic
+uneducated
+unemployed/S
+unencroachable
+unending/Y
+unendurable/P
+unenergized/MS
+unenforced
+unenterprising
+UNESCO
+unethical
+uneulogized/SM
+unexacting
+unexceptionably
+unexcited
+unexpectedness/MS
+unfading/Y
+unfailingness/M
+unfailing/P
+unfamiliar
+unfashionable
+unfathomably
+unfavored
+unfeeling
+unfeigned/Y
+unfelt
+unfeminine
+unfertile
+unfetchable
+unflagging
+unflappability/S
+unflappable
+unflappably
+unflinching/Y
+unfold/LG
+unfoldment/M
+unforced
+unforgeable
+unfossilized/MS
+unfraternizing/SM
+unfrozen
+unfulfillable
+unfunny
+unfussy
+ungainliness/MS
+ungainly/PRT
+Ungava/M
+ungenerous
+ungentle
+unglamorous
+ungrammaticality
+ungrudging
+unguent/MS
+ungulate/MS
+unharmonious
+unharness/G
+unhistorical
+unholy/TP
+unhook/DG
+unhydrolyzed/SM
+unhygienic
+Unibus/M
+unicameral
+UNICEF
+unicellular
+Unicode/M
+unicorn/SM
+unicycle/MGSD
+unicyclist/MS
+unideal
+unidimensional
+unidiomatic
+unidirectionality
+unidirectional/Y
+unidolized/MS
+unifiable
+unification/MA
+unifier/MS
+unifilar
+uniformity/MS
+uniformness/M
+uniform/TGSRDYMP
+unify/AXDSNG
+unilateralism/M
+unilateralist
+unilateral/Y
+unimodal
+unimpeachably
+unimportance
+unimportant
+unimpressive
+unindustrialized/MS
+uninhibited/YP
+uninominal
+uninsured
+unintellectual
+unintended
+uninteresting
+uninterruptedness/M
+uninterrupted/YP
+unintuitive
+uninviting
+union/AEMS
+unionism/SM
+unionist/SM
+Unionist/SM
+unionize
+Union/MS
+UniPlus/M
+unipolar
+uniprocessor/SM
+uniqueness/S
+unique/TYSRP
+Uniroyal/M
+unisex/S
+UniSoft/M
+unison/MS
+Unisys/M
+unitarianism/M
+Unitarianism/SM
+unitarian/MS
+Unitarian/MS
+unitary
+unite/AEDSG
+united/Y
+uniter/M
+unitize/GDS
+unit/VGRD
+unity/SEM
+univ
+Univac/M
+univalent/S
+univalve/MS
+univariate
+universalism/M
+universalistic
+universality/SM
+universalize/DSRZG
+universalizer/M
+universal/YSP
+universe/MS
+university/MS
+Unix/M
+UNIX/M
+unjam
+unkempt
+unkind/TP
+unkink
+unknightly
+unknowable/S
+unknowing
+unlabored
+unlace/G
+unlearn/G
+unlikeable
+unlikeliness/S
+unlimber/G
+unlimited
+unlit
+unliterary
+unloose/G
+unlucky/TP
+unmagnetized/MS
+unmanageably
+unmannered/Y
+unmask/G
+unmeaning
+unmeasured
+unmeetable
+unmelodious
+unmemorable
+unmemorialized/MS
+unmentionable/S
+unmerciful
+unmeritorious
+unmethodical
+unmineralized/MS
+unmissable
+unmistakably
+unmitigated/YP
+unmnemonic
+unmobilized/SM
+unmoral
+unmount/B
+unmovable
+unmoving
+unnaturalness/M
+unnavigable
+unnerving/Y
+unobliging
+unoffensive
+unofficial
+unorganized/YP
+unorthodox
+unpack/G
+unpaintable
+unpalatability
+unpalatable
+unpartizan
+unpatronizing
+unpeople
+unperceptive
+unperson
+unperturbed/Y
+unphysical
+unpick/G
+unpicturesque
+unpinning
+unpleasing
+unploughed
+unpolarized/SM
+unpopular
+unpractical
+unprecedented/Y
+unpredictable/S
+unpreemphasized
+unpremeditated
+unpretentiousness/M
+unprincipled/P
+unproblematic
+unproductive
+unpropitious
+unprovable
+unproven
+unprovocative
+unpunctual
+unquestionable
+unraisable
+unravellings
+unreadability
+unread/B
+unreal
+unrealizable
+unreasoning/Y
+unreceptive
+unrecordable
+unreflective
+unrelenting/Y
+unremitting/Y
+unrepeatability
+unrepeated
+unrepentant
+unreported
+unrepresentative
+unreproducible
+unrest/G
+unrestrained/P
+unrewarding
+unriddle
+unripe/P
+unromantic
+unruliness/SM
+unruly/PTR
+unsaleable
+unsanitary
+unsavored/YP
+unsavoriness/M
+unseal/GB
+unsearchable
+unseasonal
+unseeing/Y
+unseen/S
+unselfconsciousness/M
+unselfconscious/P
+unselfishness/M
+unsellable
+unsentimental
+unset
+unsettledness/M
+unsettled/P
+unsettling/Y
+unshapely
+unshaven
+unshorn
+unsighted
+unsightliness/S
+unskilful
+unsociability
+unsociable/P
+unsocial
+unsound/PT
+unspeakably
+unspecific
+unspectacular
+unspoilt
+unspoke
+unsporting
+unstable/P
+unstigmatized/SM
+unstilted
+unstinting/Y
+unstopping
+unstrapping
+unstudied
+unstuffy
+unsubdued
+unsubstantial
+unsubtle
+unsuitable
+unsuspecting/Y
+unswerving/Y
+unsymmetrical
+unsympathetic
+unsystematic
+unsystematized/Y
+untactful
+untalented
+untaxing
+unteach/B
+untellable
+untenable
+unthinking
+until/G
+untiring/Y
+unto
+untouchable/MS
+untowardness/M
+untoward/P
+untraceable
+untrue
+untruthfulness/M
+untwist/G
+Unukalhai/M
+unusualness/M
+unutterable
+unutterably
+unvocalized/MS
+unvulcanized/SM
+unwaivering
+unwarrantable
+unwarrantably
+unwashed/PS
+unwearable
+unwearied/Y
+unwed
+unwedge
+unwelcome
+unwell/M
+unwieldiness/MS
+unwieldy/TPR
+unwind/B
+unwomanly
+unworkable/S
+unworried
+unwrap
+unwrapping
+unyielding/Y
+unyoke
+unzip
+up
+Upanishads
+uparrow
+upbeat/SM
+upbraid/GDRS
+upbringing/M
+upbring/JG
+UPC
+upchuck/SDG
+upcome/G
+upcountry/S
+updatability
+updater/M
+update/RSDG
+Updike/M
+updraft/SM
+upend/SDG
+upfield
+upfront
+upgradeable
+upgrade/DSJG
+upheaval/MS
+upheld
+uphill/S
+upholder/M
+uphold/RSGZ
+upholster/ADGS
+upholsterer/SM
+upholstery/MS
+UPI
+upkeep/SM
+uplander/M
+upland/MRS
+uplifter/M
+uplift/SJDRG
+upload/GSD
+upmarket
+upon
+upped
+uppercase/GSD
+upperclassman/M
+upperclassmen
+uppercut/S
+uppercutting
+uppermost
+upper/S
+upping
+uppish
+uppity
+upraise/GDS
+uprated
+uprating
+uprear/DSG
+upright/DYGSP
+uprightness/S
+uprise/RGJ
+uprising/M
+upriver/S
+uproariousness/M
+uproarious/PY
+uproar/MS
+uproot/DRGS
+uprooter/M
+ups
+UPS
+upscale/GDS
+upset/S
+upsetting/MS
+upshot/SM
+upside/MS
+upsilon/MS
+upslope
+upstage/DSRG
+upstairs
+upstandingness/M
+upstanding/P
+upstart/MDGS
+upstate/SR
+upstream/DSG
+upstroke/MS
+upsurge/DSG
+upswing/GMS
+upswung
+uptake/SM
+upthrust/GMS
+uptight
+uptime
+Upton/M
+uptown/RS
+uptrend/M
+upturn/GDS
+upwardness/M
+upward/SYP
+upwelling
+upwind/S
+uracil/MS
+Ural/MS
+Urania/M
+uranium/MS
+Uranus/M
+uranyl/M
+Urbain/M
+Urbana/M
+urbane/Y
+urbanism/M
+urbanite/SM
+urbanity/SM
+urbanization/MS
+urbanize/DSG
+Urban/M
+urbanologist/S
+urbanology/S
+Urbano/M
+urban/RT
+Urbanus/M
+urchin/SM
+Urdu/M
+urea/SM
+uremia/MS
+uremic
+ureter/MS
+urethane/MS
+urethrae
+urethral
+urethra/M
+urethritis/M
+Urey/M
+urge/GDRSJ
+urgency/SM
+urgent/Y
+urger/M
+Uriah/M
+uric
+Uriel/M
+urinal/MS
+urinalyses
+urinalysis/M
+urinary/MS
+urinate/XDSNG
+urination/M
+urine/MS
+Uri/SM
+URL
+Ur/M
+urning/M
+urn/MDGS
+urogenital
+urological
+urologist/S
+urology/MS
+Urquhart/M
+Ursala/M
+Ursa/M
+ursine
+Ursola/M
+Urson/M
+Ursula/M
+Ursulina/M
+Ursuline/M
+urticaria/MS
+Uruguayan/S
+Uruguay/M
+Urumqi
+US
+USA
+usability/S
+usable/U
+usably/U
+USAF
+usage/SM
+USART
+USCG
+USC/M
+USDA
+us/DRSBZG
+used/U
+use/ESDAG
+usefulness/SM
+useful/YP
+uselessness/MS
+useless/PY
+Usenet/M
+Usenix/M
+user/M
+USG/M
+usherette/SM
+usher/SGMD
+USIA
+USMC
+USN
+USO
+USP
+USPS
+USS
+USSR
+Ustinov/M
+usu
+usuals
+usual/UPY
+usurer/SM
+usuriousness/M
+usurious/PY
+usurpation/MS
+usurper/M
+usurp/RDZSG
+usury/SM
+UT
+Utahan/SM
+Utah/M
+Uta/M
+Ute/M
+utensil/SM
+uteri
+uterine
+uterus/M
+Utica/M
+utile/I
+utilitarianism/MS
+utilitarian/S
+utility/MS
+utilization/MS
+utilization's/A
+utilize/GZDRS
+utilizer/M
+utilizes/A
+utmost/S
+Utopia/MS
+utopianism/M
+utopian's
+Utopian/S
+utopia/S
+Utrecht/M
+Utrillo/M
+utterance/MS
+uttered/U
+utterer/M
+uttermost/S
+utter/TRDYGS
+uucp/M
+UV
+uvula/MS
+uvular/S
+uxorious
+Uzbekistan
+Uzbek/M
+Uzi/M
+V
+VA
+vacancy/MS
+vacantness/M
+vacant/PY
+vacate/NGXSD
+vacationist/SM
+vacationland
+vacation/MRDZG
+vaccinate/NGSDX
+vaccination/M
+vaccine/SM
+vaccinial
+vaccinia/M
+Vachel/M
+vacillate/XNGSD
+vacillating/Y
+vacillation/M
+vacillator/SM
+Vaclav/M
+vacua's
+vacuity/MS
+vacuo
+vacuolated/U
+vacuolate/SDGN
+vacuole/SM
+vacuolization/SM
+vacuousness/MS
+vacuous/PY
+vacuum/GSMD
+Vader/M
+Vaduz/M
+vagabondage/MS
+vagabond/DMSG
+vagarious
+vagary/MS
+vaginae
+vaginal/Y
+vagina/M
+vagrancy/MS
+vagrant/SMY
+vagueing
+vagueness/MS
+vague/TYSRDP
+Vail/M
+vaingloriousness/M
+vainglorious/YP
+vainglory/MS
+vain/TYRP
+val
+valance/SDMG
+Valaree/M
+Valaria/M
+Valarie/M
+Valdemar/M
+Valdez/M
+Valeda/M
+valediction/MS
+valedictorian/MS
+valedictory/MS
+Vale/M
+valence/SM
+Valencia/MS
+valency/MS
+Valene/M
+Valenka/M
+Valentia/M
+Valentijn/M
+Valentina/M
+Valentine/M
+valentine/SM
+Valentin/M
+Valentino/M
+Valenzuela/M
+Valera/M
+Valeria/M
+Valerian/M
+Valerie/M
+Valerye/M
+Valéry/M
+vale/SM
+valet/GDMS
+valetudinarianism/MS
+valetudinarian/MS
+Valhalla/M
+valiance/S
+valiantness/M
+valiant/SPY
+Valida/M
+validated/AU
+validate/INGSDX
+validates/A
+validation/AMI
+validity/IMS
+validnesses
+validness/MI
+valid/PIY
+Valina/M
+valise/MS
+Valium/S
+Valkyrie/SM
+Vallejo
+Valle/M
+Valletta/M
+valley/SM
+Vallie/M
+Valli/M
+Vally/M
+Valma/M
+Val/MY
+Valois/M
+valor/MS
+valorous/Y
+Valparaiso/M
+Valry/M
+valuable/IP
+valuableness/IM
+valuables
+valuably/I
+valuate/NGXSD
+valuation/CSAM
+valuator/SM
+value/CGASD
+valued/U
+valuelessness/M
+valueless/P
+valuer/SM
+value's
+values/E
+valve/GMSD
+valveless
+valvular
+Va/M
+vamoose/GSD
+vamp/ADSG
+vamper
+vampire/MGSD
+vamp's
+vanadium/MS
+Vance/M
+Vancouver/M
+vandalism/MS
+vandalize/GSD
+vandal/MS
+Vandal/MS
+Vanda/M
+Vandenberg/M
+Vanderbilt/M
+Vanderburgh/M
+Vanderpoel/M
+Vandyke/SM
+vane/MS
+Vanessa/M
+Vang/M
+vanguard/MS
+Vania/M
+vanilla/MS
+vanisher/M
+vanish/GRSDJ
+vanishing/Y
+vanity/SM
+Van/M
+Vanna/M
+vanned
+Vannie/M
+Vanni/M
+vanning
+Vanny/M
+vanquisher/M
+vanquish/RSDGZ
+van/SMD
+vantage/MS
+Vanuatu
+Vanya/M
+Vanzetti/M
+vapidity/MS
+vapidness/SM
+vapid/PY
+vaporer/M
+vaporing/MY
+vaporisation
+vaporise/DSG
+vaporization/AMS
+vaporize/DRSZG
+vaporizer/M
+vapor/MRDJGZS
+vaporous
+vapory
+vaquero/SM
+VAR
+Varanasi/M
+Varese/M
+Vargas/M
+variability/IMS
+variableness/IM
+variable/PMS
+variables/I
+variably/I
+variance/I
+variances
+variance's
+Varian/M
+variant/ISY
+variate/MGNSDX
+variational
+variation/M
+varicolored/MS
+varicose/S
+variedly
+varied/U
+variegate/NGXSD
+variegation/M
+varier/M
+varietal/S
+variety/MS
+various/PY
+varistor/M
+Varityping/M
+varlet/MS
+varmint/SM
+varnished/U
+varnisher/M
+varnish/ZGMDRS
+var/S
+varsity/MS
+varying/UY
+vary/SRDJG
+vascular
+vasectomy/SM
+Vaseline/DSMG
+vase/SM
+Vasili/MS
+Vasily/M
+vasomotor
+Vasquez/M
+vassalage/MS
+vassal/GSMD
+Vassar/M
+Vassili/M
+Vassily/M
+vastness/MS
+vast/PTSYR
+v/ASV
+VAT
+Vatican/M
+vat/SM
+vatted
+vatting
+vaudeville/SM
+vaudevillian/SM
+Vaudois
+Vaughan/M
+Vaughn/M
+vaulter/M
+vaulting/M
+vault/ZSRDMGJ
+vaunter/M
+vaunt/GRDS
+VAXes
+Vax/M
+VAX/M
+Vazquez/M
+vb
+VCR
+VD
+VDT
+VDU
+vealed/A
+vealer/MA
+veal/MRDGS
+veals/A
+Veblen/M
+vectorial
+vectorization
+vectorized
+vectorizing
+vector's/F
+vector/SGDM
+Veda/MS
+Vedanta/M
+veejay/S
+veep/S
+veer/DSG
+veering/Y
+vegan/SM
+Vega/SM
+Vegemite/M
+veges
+vegetable/MS
+vegetarianism/MS
+vegetarian/SM
+vegetate/DSNGVX
+vegetation/M
+vegetative/PY
+vegged
+veggie/S
+vegging
+veg/M
+vehemence/MS
+vehemency/S
+vehement/Y
+vehicle/SM
+vehicular
+veiling/MU
+veil's
+veil/UGSD
+vein/GSRDM
+veining/M
+vela/M
+Vela/M
+velarize/SDG
+velar/S
+Velásquez/M
+Velázquez
+Velcro/SM
+veld/SM
+veldt's
+Velez/M
+Vella/M
+vellum/MS
+Velma/M
+velocipede/SM
+velocity/SM
+velor/S
+velour's
+velum/M
+Velveeta/M
+velveteen/MS
+velvet/GSMD
+Velvet/M
+velvety/RT
+venality/MS
+venal/Y
+venation/SM
+vend/DSG
+vender's/K
+vendetta/MS
+vendible/S
+vendor/MS
+veneerer/M
+veneer/GSRDM
+veneering/M
+venerability/S
+venerable/P
+venerate/XNGSD
+veneration/M
+venereal
+venetian
+Venetian/SM
+Venezuela/M
+Venezuelan/S
+vengeance/MS
+vengeful/APY
+vengefulness/AM
+venialness/M
+venial/YP
+Venice/M
+venireman/M
+veniremen
+venison/SM
+Venita/M
+Venn/M
+venomousness/M
+venomous/YP
+venom/SGDM
+venous/Y
+venter/M
+ventilated/U
+ventilate/XSDVGN
+ventilation/M
+ventilator/MS
+vent/ISGFD
+ventral/YS
+ventricle/MS
+ventricular
+ventriloquies
+ventriloquism/MS
+ventriloquist/MS
+ventriloquy
+vent's/F
+Ventura/M
+venture/RSDJZG
+venturesomeness/SM
+venturesome/YP
+venturi/S
+venturousness/MS
+venturous/YP
+venue/MAS
+Venusian/S
+Venus/S
+veraciousness/M
+veracious/YP
+veracities
+veracity/IM
+Veracruz/M
+Veradis
+Vera/M
+verandahed
+veranda/SDM
+verbalization/MS
+verbalized/U
+verbalizer/M
+verbalize/ZGRSD
+verballed
+verballing
+verbal/SY
+verbatim
+verbena/MS
+verbiage/SM
+verb/KSM
+verbose/YP
+verbosity/SM
+verboten
+verdant/Y
+Verde/M
+Verderer/M
+verdict/SM
+verdigris/GSDM
+Verdi/M
+verdure/SDM
+Vere/M
+Verena/M
+Verene/M
+verge/FGSD
+Verge/M
+verger/SM
+verge's
+Vergil's
+veridical/Y
+Veriee/M
+verifiability/M
+verifiableness/M
+verifiable/U
+verification/S
+verified/U
+verifier/MS
+verify/GASD
+Verile/M
+verily
+Verina/M
+Verine/M
+verisimilitude/SM
+veritableness/M
+veritable/P
+veritably
+verity/MS
+Verlag/M
+Verlaine/M
+Verla/M
+Vermeer/M
+vermicelli/MS
+vermiculite/MS
+vermiform
+vermilion/MS
+vermin/M
+verminous
+Vermonter/M
+Vermont/ZRM
+vermouth/M
+vermouths
+vernacular/YS
+vernal/Y
+Verna/M
+Verne/M
+Vernen/M
+Verney/M
+Vernice/M
+vernier/SM
+Vern/NM
+Vernon/M
+Vernor/M
+Verona/M
+Veronese/M
+Veronica/M
+veronica/SM
+Veronika/M
+Veronike/M
+Veronique/M
+verrucae
+verruca/MS
+versa
+Versailles/M
+Versatec/M
+versatileness/M
+versatile/YP
+versatility/SM
+versed/UI
+verse's
+verses/I
+verse/XSRDAGNF
+versicle/M
+versification/M
+versifier/M
+versify/GDRSZXN
+versing/I
+version/MFISA
+verso/SM
+versus
+vertebrae
+vertebral/Y
+vertebra/M
+vertebrate/IMS
+vertebration/M
+vertex/SM
+vertical/YPS
+vertices's
+vertiginous
+vertigoes
+vertigo/M
+verve/SM
+very/RT
+Vesalius/M
+vesicle/SM
+vesicular/Y
+vesiculate/GSD
+Vespasian/M
+vesper/SM
+Vespucci/M
+vessel/MS
+vestal/YS
+Vesta/M
+vest/DIGSL
+vestibular
+vestibule/SDM
+vestige/SM
+vestigial/Y
+vesting/SM
+vestment/ISM
+vestryman/M
+vestrymen
+vestry/MS
+vest's
+vesture/SDMG
+Vesuvius/M
+vetch/SM
+veteran/SM
+veterinarian/MS
+veterinary/S
+veter/M
+veto/DMG
+vetoes
+vet/SMR
+vetted
+vetting/A
+Vevay/M
+vexation/SM
+vexatiousness/M
+vexatious/PY
+vexed/Y
+vex/GFSD
+VF
+VFW
+VG
+VGA
+vhf
+VHF
+VHS
+VI
+via
+viability/SM
+viable/I
+viably
+viaduct/MS
+Viagra/M
+vial/MDGS
+viand/SM
+vibe/S
+vibraharp/MS
+vibrancy/MS
+vibrant/YS
+vibraphone/MS
+vibraphonist/SM
+vibrate/XNGSD
+vibrational/Y
+vibration/M
+vibrato/MS
+vibrator/SM
+vibratory
+vibrio/M
+vibrionic
+viburnum/SM
+vicarage/SM
+vicariousness/MS
+vicarious/YP
+vicar/SM
+vice/CMS
+viced
+vicegerent/MS
+vicennial
+Vicente/M
+viceregal
+viceroy/SM
+Vichy/M
+vichyssoise/MS
+vicing
+vicinity/MS
+viciousness/S
+vicious/YP
+vicissitude/MS
+Vickers/M
+Vickie/M
+Vicki/M
+Vicksburg/M
+Vicky/M
+Vick/ZM
+Vic/M
+victimization/SM
+victimized/U
+victimizer/M
+victimize/SRDZG
+victim/SM
+Victoir/M
+Victoria/M
+Victorianism/S
+Victorian/S
+victoriousness/M
+victorious/YP
+Victor/M
+victor/SM
+victory/MS
+Victrola/SM
+victualer/M
+victual/ZGSDR
+vicuña/S
+Vidal/M
+Vida/M
+videlicet
+videocassette/S
+videoconferencing
+videodisc/S
+videodisk/SM
+video/GSMD
+videophone/SM
+videotape/SDGM
+Vidovic/M
+Vidovik/M
+Vienna/M
+Viennese/M
+Vientiane/M
+vier/M
+vie/S
+Vietcong/M
+Viet/M
+Vietminh/M
+Vietnamese/M
+Vietnam/M
+viewed/A
+viewer/AS
+viewer's
+viewfinder/MS
+viewgraph/SM
+viewing/M
+viewless/Y
+view/MBGZJSRD
+viewpoint/SM
+views/A
+vigesimal
+vigilance/MS
+vigilante/SM
+vigilantism/MS
+vigilantist
+vigilant/Y
+vigil/SM
+vignette/MGDRS
+vignetter/M
+vignetting/M
+vignettist/MS
+vigor/MS
+vigorousness/M
+vigorous/YP
+vii
+viii
+Vijayawada/M
+Viki/M
+Viking/MS
+viking/S
+Vikki/M
+Vikky/M
+Vikram/M
+Vila
+vile/AR
+vilely
+vileness/MS
+vilest
+Vilhelmina/M
+vilification/M
+vilifier/M
+vilify/GNXRSD
+villager/M
+village/RSMZ
+villainousness/M
+villainous/YP
+villain/SM
+villainy/MS
+Villa/M
+villa/MS
+Villarreal/M
+ville
+villeinage/SM
+villein/MS
+villi
+Villon/M
+villus/M
+Vilma/M
+Vilnius/M
+Vilyui/M
+Vi/M
+vi/MDR
+vim/MS
+vinaigrette/MS
+Vina/M
+Vince/M
+Vincent/MS
+Vincenty/M
+Vincenz/M
+vincible/I
+Vinci/M
+Vindemiatrix/M
+vindicate/XSDVGN
+vindication/M
+vindicator/SM
+vindictiveness/MS
+vindictive/PY
+vinegar/DMSG
+vinegary
+vine/MGDS
+vineyard/SM
+Vinita/M
+Vin/M
+Vinnie/M
+Vinni/M
+Vinny/M
+vino/MS
+vinous
+Vinson/M
+vintage/MRSDG
+vintager/M
+vintner/MS
+vinyl/SM
+violable/I
+Viola/M
+Violante/M
+viola/SM
+violate/VNGXSD
+violator/MS
+Viole/M
+violence/SM
+violent/Y
+Violet/M
+violet/SM
+Violetta/M
+Violette/M
+violinist/SM
+violin/MS
+violist/MS
+viol/MSB
+violoncellist/S
+violoncello/MS
+viper/MS
+viperous
+VIP/S
+viragoes
+virago/M
+viral/Y
+vireo/SM
+Virge/M
+Virgie/M
+Virgilio/M
+Virgil/M
+virginal/YS
+Virgina/M
+Virginia/M
+Virginian/S
+Virginie/M
+virginity/SM
+virgin/SM
+Virgo/MS
+virgule/MS
+virile
+virility/MS
+virologist/S
+virology/SM
+virtual/Y
+virtue/SM
+virtuosity/MS
+virtuosoes
+virtuoso/MS
+virtuousness/SM
+virtuous/PY
+virulence/SM
+virulent/Y
+virus/MS
+visage/MSD
+Visakhapatnam's
+Visa/M
+visa/SGMD
+Visayans
+viscera
+visceral/Y
+viscid/Y
+viscoelastic
+viscoelasticity
+viscometer/SM
+viscose/MS
+viscosity/MS
+viscountcy/MS
+viscountess/SM
+viscount/MS
+viscousness/M
+viscous/PY
+viscus/M
+vise/CAXNGSD
+viselike
+vise's
+Vishnu/M
+visibility/ISM
+visible/PI
+visibly/I
+Visigoth/M
+Visigoths
+visionariness/M
+visionary/PS
+vision/KMDGS
+vision's/A
+visitable/U
+visitant/SM
+visitation/SM
+visited/U
+visit/GASD
+visitor/MS
+vis/MDSGV
+visor/SMDG
+VISTA
+vista/GSDM
+Vistula/M
+visualization/AMS
+visualized/U
+visualizer/M
+visualizes/A
+visualize/SRDZG
+visual/SY
+vitae
+vitality/MS
+vitalization/AMS
+vitalize/ASDGC
+vital/SY
+vita/M
+Vita/M
+vitamin/SM
+Vite/M
+Vitia/M
+vitiate/XGNSD
+vitiation/M
+viticulture/SM
+viticulturist/S
+Vitim/M
+Vito/M
+Vitoria/M
+vitreous/YSP
+vitrifaction/S
+vitrification/M
+vitrify/XDSNG
+vitrine/SM
+vitriolic
+vitriol/MDSG
+vitro
+vittles
+Vittoria/M
+Vittorio/M
+vituperate/SDXVGN
+vituperation/M
+vituperative/Y
+Vitus/M
+vivace/S
+vivaciousness/MS
+vivacious/YP
+vivacity/SM
+viva/DGS
+Vivaldi
+Viva/M
+vivaria
+vivarium/MS
+vivaxes
+Vivekananda/M
+vive/Z
+Vivia/M
+Viviana/M
+Vivian/M
+Vivianna/M
+Vivianne/M
+vividness/SM
+vivid/PTYR
+Vivie/M
+Viviene/M
+Vivien/M
+Vivienne/M
+vivifier
+vivify/NGASD
+Vivi/MN
+viviparous
+vivisect/DGS
+vivisectional
+vivisectionist/SM
+vivisection/MS
+Viviyan/M
+Viv/M
+vivo
+Vivyan/M
+Vivyanne/M
+vixenish/Y
+vixen/SM
+viz
+vizier/MS
+vizor's
+VJ
+Vladamir/M
+Vladimir/M
+Vladivostok/M
+Vlad/M
+VLF
+VLSI
+VMS/M
+VOA
+vocable/SM
+vocab/S
+vocabularian
+vocabularianism
+vocabulary/MS
+vocalic/S
+vocalise's
+vocalism/M
+vocalist/MS
+vocalization/SM
+vocalized/U
+vocalizer/M
+vocalize/ZGDRS
+vocal/SY
+vocation/AKMISF
+vocational/Y
+vocative/KYS
+vociferate/NGXSD
+vociferation/M
+vociferousness/MS
+vociferous/YP
+vocoded
+vocoder
+vodka/MS
+voe/S
+Vogel/M
+vogue/GMSRD
+vogueing
+voguish
+voiceband
+voiced/CU
+voice/IMGDS
+voicelessness/SM
+voiceless/YP
+voicer/S
+voices/C
+voicing/C
+voidable
+void/C
+voided
+voider/M
+voiding
+voidness/M
+voids
+voilà
+voile/MS
+volar
+volatileness/M
+volatile/PS
+volatility/MS
+volatilization/MS
+volatilize/SDG
+volcanically
+volcanic/S
+volcanism/M
+volcanoes
+volcano/M
+vole/MS
+Volga/M
+Volgograd/M
+vol/GSD
+volitionality
+volitional/Y
+volition/MS
+Volkswagen/SM
+volleyball/MS
+volleyer/M
+volley/SMRDG
+Vol/M
+Volstead/M
+voltage/SM
+voltaic
+Voltaire/M
+Volta/M
+volt/AMS
+Volterra/M
+voltmeter/MS
+volubility/S
+voluble/P
+volubly
+volume/SDGM
+volumetric
+volumetrically
+voluminousness/MS
+voluminous/PY
+voluntarily/I
+voluntariness/MI
+voluntarism/MS
+voluntary/PS
+volunteer/DMSG
+voluptuary/SM
+voluptuousness/S
+voluptuous/YP
+volute/S
+Volvo/M
+vomit/GRDS
+Vonda/M
+Von/M
+Vonnegut/M
+Vonnie/M
+Vonni/M
+Vonny/M
+voodoo/GDMS
+voodooism/S
+voraciousness/MS
+voracious/YP
+voracity/MS
+Voronezh/M
+Vorster/M
+vortex/SM
+vortices's
+vorticity/M
+votary/MS
+vote/CSDG
+voter/SM
+vote's
+votive/YP
+voucher/GMD
+vouchsafe/SDG
+vouch/SRDGZ
+vowelled
+vowelling
+vowel/MS
+vower/M
+vow/SMDRG
+voyage/GMZJSRD
+voyager/M
+voyageur/SM
+voyeurism/MS
+voyeuristic
+voyeur/MS
+VP
+vs
+V's
+VT
+Vt/M
+VTOL
+vulcanization/SM
+vulcanized/U
+vulcanize/SDG
+Vulcan/M
+vulgarian/MS
+vulgarism/MS
+vulgarity/MS
+vulgarization/S
+vulgarize/GZSRD
+vulgar/TSYR
+Vulgate/SM
+Vulg/M
+vulnerability/SI
+vulnerable/IP
+vulnerably/I
+vulpine
+vulturelike
+vulture/SM
+vulturous
+vulvae
+vulva/M
+vying
+Vyky/M
+WA
+Waals
+Wabash/M
+WAC
+Wacke/M
+wackes
+wackiness/MS
+wacko/MS
+wacky/RTP
+Waco/M
+Wac/S
+wadded
+wadding/SM
+waddle/GRSD
+Wade/M
+wader/M
+wade/S
+wadi/SM
+wad/MDRZGS
+Wadsworth/M
+wafer/GSMD
+waffle/GMZRSD
+Wafs
+wafter/M
+waft/SGRD
+wag/DRZGS
+waged/U
+wager/GZMRD
+wage/SM
+wagged
+waggery/MS
+wagging
+waggishness/SM
+waggish/YP
+waggle/SDG
+waggly
+Wagnerian
+Wagner/M
+wagoner/M
+wagon/SGZMRD
+wagtail/SM
+Wahl/M
+waif/SGDM
+Waikiki/M
+wailer/M
+wail/SGZRD
+wain/GSDM
+Wain/M
+wainscot/SGJD
+Wainwright/M
+wainwright/SM
+waistband/MS
+waistcoat/GDMS
+waister/M
+waist/GSRDM
+waistline/MS
+Waite/M
+waiter/DMG
+Waiter/M
+wait/GSZJRD
+Wait/MR
+waitpeople
+waitperson/S
+waitress/GMSD
+waiver/MB
+waive/SRDGZ
+Wakefield/M
+wakefulness/MS
+wakeful/PY
+Wake/M
+wake/MGDRSJ
+waken/SMRDG
+waker/M
+wakeup
+Waksman/M
+Walbridge/M
+Walcott/M
+Waldemar/M
+Walden/M
+Waldensian
+Waldheim/M
+Wald/MN
+Waldo/M
+Waldon/M
+Waldorf/M
+wale/DRSMG
+Wales
+Walesa/M
+Walford/M
+Walgreen/M
+waling/M
+walkabout/M
+walkaway/SM
+walker/M
+Walker/M
+walk/GZSBJRD
+walkie
+Walkman/S
+walkout/SM
+walkover/SM
+walkway/MS
+wallaby/MS
+Wallace/M
+Wallache/M
+wallah/M
+Wallas/M
+wallboard/MS
+Wallenstein/M
+Waller/M
+wallet/SM
+walleye/MSD
+wallflower/MS
+Wallie/M
+Wallis
+Walliw/M
+Walloon/SM
+walloper/M
+walloping/M
+wallop/RDSJG
+wallower/M
+wallow/RDSG
+wallpaper/DMGS
+wall/SGMRD
+Wall/SMR
+Wally/M
+wally/S
+walnut/SM
+Walpole/M
+Walpurgisnacht
+walrus/SM
+Walsh/M
+Walter/M
+Walther/M
+Walton/M
+waltzer/M
+Walt/ZMR
+waltz/MRSDGZ
+Walworth/M
+Waly/M
+wampum/SM
+Wanamaker/M
+Wanda/M
+wanderer/M
+wander/JZGRD
+wanderlust/SM
+Wandie/M
+Wandis/M
+wand/MRSZ
+wane/S
+Waneta/M
+wangler/M
+wangle/RSDGZ
+Wang/M
+Wanids/M
+Wankel/M
+wanna
+wannabe/S
+wanned
+wanner
+wanness/S
+wannest
+wanning
+wan/PGSDY
+Wansee/M
+Wansley/M
+wanted/U
+wanter/M
+want/GRDSJ
+wantonness/S
+wanton/PGSRDY
+wapiti/MS
+warble/GZRSD
+warbler/M
+warbonnet/S
+ward/AGMRDS
+Warde/M
+warden/DMGS
+Warden/M
+warder/DMGS
+Ward/MN
+wardrobe/MDSG
+wardroom/MS
+wardship/M
+wards/I
+warehouseman/M
+warehouse/MGSRD
+Ware/MG
+ware/MS
+warfare/SM
+Warfield/M
+war/GSMD
+warhead/MS
+Warhol/M
+warhorse/SM
+warily/U
+warinesses/U
+wariness/MS
+Waring/M
+warless
+warlike
+warlock/SM
+warlord/MS
+warmblooded
+warmed/A
+warmer/M
+warmheartedness/SM
+warmhearted/PY
+warmish
+warmness/MS
+warmongering/M
+warmonger/JGSM
+warms/A
+warmth/M
+warmths
+warm/YRDHPGZTS
+warned/U
+warner/M
+Warner/M
+warn/GRDJS
+warning/YM
+Warnock/M
+warpaint
+warpath/M
+warpaths
+warper/M
+warplane/MS
+warp/MRDGS
+warranted/U
+warranter/M
+warrant/GSMDR
+warranty/SDGM
+warred/M
+warrener/M
+Warren/M
+warren/SZRM
+warring/M
+warrior/MS
+Warsaw/M
+wars/C
+warship/MS
+warthog/S
+wartime/SM
+wart/MDS
+warty/RT
+Warwick/M
+wary/URPT
+Wasatch/M
+washable/S
+wash/AGSD
+washbasin/SM
+washboard/SM
+washbowl/SM
+Washburn/M
+washcloth/M
+washcloths
+washday/M
+washed/U
+washer/GDMS
+washerwoman/M
+washerwomen
+washing/SM
+Washingtonian/S
+Washington/M
+Wash/M
+Washoe/M
+washout/SM
+washrag/SM
+washroom/MS
+washstand/SM
+washtub/MS
+washy/RT
+wasn't
+WASP
+waspishness/SM
+waspish/PY
+Wasp's
+wasp/SM
+was/S
+wassail/GMDS
+Wasserman/M
+Wassermann/M
+wastage/SM
+wastebasket/SM
+wastefulness/S
+wasteful/YP
+wasteland/MS
+wastepaper/MS
+waster/DG
+waste/S
+wastewater
+wast/GZSRD
+wasting/Y
+wastrel/MS
+Watanabe/M
+watchable/U
+watchband/SM
+watchdogged
+watchdogging
+watchdog/SM
+watched/U
+watcher/M
+watchfulness/MS
+watchful/PY
+watch/JRSDGZB
+watchmake/JRGZ
+watchmaker/M
+watchman/M
+watchmen
+watchpoints
+watchtower/MS
+watchword/MS
+waterbird/S
+waterborne
+Waterbury/M
+watercolor/DMGS
+watercolorist/SM
+watercourse/SM
+watercraft/M
+watercress/SM
+waterer/M
+waterfall/SM
+waterfowl/M
+waterfront/SM
+Watergate/M
+waterhole/S
+Waterhouse/M
+wateriness/SM
+watering/M
+water/JGSMRD
+waterless
+waterlily/S
+waterline/S
+waterlogged
+waterloo
+Waterloo/SM
+waterman/M
+watermark/GSDM
+watermelon/SM
+watermill/S
+waterproof/PGRDSJ
+watershed/SM
+waterside/MSR
+watersider/M
+Waters/M
+waterspout/MS
+watertightness/M
+watertight/P
+Watertown/M
+waterway/MS
+waterwheel/S
+waterworks/M
+watery/PRT
+Watkins
+WATS
+Watson/M
+wattage/SM
+Watteau/M
+Wattenberg/M
+Watterson/M
+wattle/SDGM
+Watt/MS
+watt/TMRS
+Watusi/M
+Wat/ZM
+Waugh/M
+Waukesha/M
+Waunona/M
+Waupaca/M
+Waupun/M
+Wausau/M
+Wauwatosa/M
+waveband/MS
+waveform/SM
+wavefront/MS
+waveguide/MS
+Waveland/M
+wavelength/M
+wavelengths
+wavelet/SM
+wavelike
+wavenumber
+waver/GZRD
+wavering/YU
+Waverley/M
+Waverly/M
+Wave/S
+wave/ZGDRS
+wavily
+waviness/MS
+wavy/SRTP
+waxer/M
+waxiness/MS
+wax/MNDRSZG
+waxwing/MS
+waxwork/MS
+waxy/PRT
+wayfarer/MS
+wayfaring/S
+waylaid
+Wayland/M
+Waylan/M
+waylayer/M
+waylay/GRSZ
+wayleave/MS
+Waylen/M
+Waylin/M
+Waylon/M
+Way/M
+waymarked
+way/MS
+Wayne/M
+Waynesboro/M
+wayside/MS
+waywardness/S
+wayward/YP
+WC
+we
+weakener/M
+weaken/ZGRD
+weakfish/SM
+weakish
+weakliness/M
+weakling/SM
+weakly/RTP
+weakness/MS
+weak/TXPYRN
+weal/MHS
+wealthiness/MS
+wealth/M
+wealths
+wealthy/PTR
+weaner/M
+weanling/M
+wean/RDGS
+weapon/GDMS
+weaponless
+weaponry/MS
+wearable/S
+wearer/M
+wearied/U
+wearily
+weariness/MS
+wearing/Y
+wearisomeness/M
+wearisome/YP
+wear/RBSJGZ
+wearying/Y
+weary/TGPRSD
+weasel/SGMDY
+weatherbeaten
+weathercock/SDMG
+weatherer/M
+Weatherford/M
+weathering/M
+weatherize/GSD
+weatherman/M
+weather/MDRYJGS
+weathermen
+weatherperson/S
+weatherproof/SGPD
+weatherstripped
+weatherstripping/S
+weatherstrip/S
+weaver/M
+Weaver/M
+weaves/A
+weave/SRDGZ
+weaving/A
+webbed
+Webber/M
+webbing/MS
+Webb/RM
+weber/M
+Weber/M
+Webern/M
+webfeet
+webfoot/M
+Web/MR
+website/S
+web/SMR
+Webster/MS
+Websterville/M
+we'd
+wedded/A
+Weddell/M
+wedder
+wedding/SM
+wedge/SDGM
+wedgie/RST
+Wedgwood/M
+wedlock/SM
+Wed/M
+Wednesday/SM
+wed/SA
+weeder/M
+weediness/M
+weedkiller/M
+weedless
+wee/DRST
+weed/SGMRDZ
+weedy/TRP
+weeing
+weekday/MS
+weekender/M
+weekend/SDRMG
+weekly/S
+weeknight/SM
+Weeks/M
+week/SYM
+weenie/M
+ween/SGD
+weeny/RSMT
+weeper/M
+weep/SGZJRD
+weepy/RST
+weevil/MS
+weft/SGMD
+Wehr/M
+Weibull/M
+Weidar/M
+Weider/M
+Weidman/M
+Weierstrass/M
+weighed/UA
+weigher/M
+weigh/RDJG
+weighs/A
+weighted/U
+weighter/M
+weightily
+weightiness/SM
+weighting/M
+weight/JMSRDG
+weightlessness/SM
+weightless/YP
+weightlifter/S
+weightlifting/MS
+weighty/TPR
+Weill/M
+Wei/M
+Weinberg/M
+Weiner/M
+Weinstein/M
+weirdie/SM
+weirdness/MS
+weirdo/SM
+weird/YRDPGTS
+weir/SDMG
+Weisenheimer/M
+Weiss/M
+Weissman/M
+Weissmuller/M
+Weizmann/M
+Welbie/M
+Welby/M
+Welcher/M
+Welches
+welcomeness/M
+welcome/PRSDYG
+welcoming/U
+welder/M
+Weldon/M
+weld/SBJGZRD
+Weldwood/M
+welfare/SM
+welkin/SM
+we'll
+Welland/M
+wellbeing/M
+Weller/M
+Wellesley/M
+Welles/M
+wellhead/SM
+Wellington/MS
+wellington/S
+Wellman/M
+wellness/MS
+well/SGPD
+Wells/M
+wellspring/SM
+Wellsville/M
+Welmers/M
+Welsh
+welsher/M
+Welshman/M
+Welshmen
+welsh/RSDGZ
+Welshwoman/M
+Welshwomen
+welter/GD
+welterweight/MS
+welt/GZSMRD
+wencher/M
+wench/GRSDM
+Wendall/M
+Wenda/M
+wend/DSG
+Wendeline/M
+Wendell/M
+Wendel/M
+Wendie/M
+Wendi/M
+Wendye/M
+Wendy/M
+wen/M
+Wenonah/M
+Wenona/M
+went
+Wentworth/M
+wept/U
+were
+we're
+weren't
+werewolf/M
+werewolves
+Werner/M
+Wernher/M
+Werther/M
+werwolf's
+Wes
+Wesleyan
+Wesley/M
+Wessex/M
+Wesson/M
+westbound
+Westbrooke/M
+Westbrook/M
+Westchester/M
+wester/DYG
+westerly/S
+westerner/M
+westernization/MS
+westernize/GSD
+westernmost
+Western/ZRS
+western/ZSR
+Westfield/M
+Westhampton/M
+Westinghouse/M
+westing/M
+Westleigh/M
+Westley/M
+Westminster/M
+Westmore/M
+West/MS
+Weston/M
+Westphalia/M
+Westport/M
+west/RDGSM
+westward/S
+Westwood/M
+wetback/MS
+wetland/S
+wetness/MS
+wet/SPY
+wettable
+wetter/S
+wettest
+wetting
+we've
+Weyden/M
+Weyerhauser/M
+Weylin/M
+Wezen/M
+WFF
+whacker/M
+whack/GZRDS
+whaleboat/MS
+whalebone/SM
+whale/GSRDZM
+Whalen/M
+whaler/M
+whaling/M
+whammed
+whamming/M
+wham/MS
+whammy/S
+wharf/SGMD
+Wharton/M
+wharves
+whatchamacallit/MS
+what'd
+whatever
+what/MS
+whatnot/MS
+what're
+whatsoever
+wheal/MS
+wheatgerm
+Wheaties/M
+Wheatland/M
+wheat/NMXS
+Wheaton/M
+Wheatstone/M
+wheedle/ZDRSG
+wheelbarrow/GSDM
+wheelbase/MS
+wheelchair/MS
+wheeler/M
+Wheeler/M
+wheelhouse/SM
+wheelie/MS
+wheeling/M
+Wheeling/M
+Wheelock/M
+wheel/RDMJSGZ
+wheelwright/MS
+whee/S
+wheeze/SDG
+wheezily
+wheeziness/SM
+wheezy/PRT
+Whelan/M
+whelk/MDS
+Wheller/M
+whelm/DGS
+whelp/DMGS
+whence/S
+whenever
+when/S
+whensoever
+whereabout/S
+whereas/S
+whereat
+whereby
+where'd
+wherefore/MS
+wherein
+where/MS
+whereof
+whereon
+where're
+wheresoever
+whereto
+whereupon
+wherever
+wherewith
+wherewithal/SM
+wherry/DSGM
+whether
+whet/S
+whetstone/MS
+whetted
+whetting
+whew/GSD
+whey/MS
+which
+whichever
+whiff/GSMD
+whiffle/DRSG
+whiffler/M
+whiffletree/SM
+whig/S
+Whig/SM
+while/GSD
+whilom
+whilst
+whimmed
+whimming
+whimper/DSG
+whimsey's
+whimsicality/MS
+whimsical/YP
+whim/SM
+whimsy/TMDRS
+whine/GZMSRD
+whining/Y
+whinny/GTDRS
+whiny/RT
+whipcord/SM
+whiplash/SDMG
+Whippany/M
+whipped
+whipper/MS
+whippersnapper/MS
+whippet/MS
+whipping/SM
+Whipple/M
+whippletree/SM
+whippoorwill/SM
+whipsaw/GDMS
+whips/M
+whip/SM
+whirligig/MS
+whirlpool/MS
+whirl/RDGS
+whirlwind/MS
+whirlybird/MS
+whirly/MS
+whirred
+whirring
+whir/SY
+whisker/DM
+whiskery
+whiskey/SM
+whisk/GZRDS
+whisperer/M
+whisper/GRDJZS
+whispering/YM
+whist/GDMS
+whistleable
+whistle/DRSZG
+whistler/M
+Whistler/M
+whistling/M
+Whitaker/M
+Whitby/M
+Whitcomb/M
+whitebait/M
+whitecap/MS
+whiteface/M
+Whitefield/M
+whitefish/SM
+Whitehall/M
+Whitehead/M
+whitehead/S
+Whitehorse/M
+Whiteleaf/M
+Whiteley/M
+White/MS
+whitener/M
+whiteness/MS
+whitening/M
+whiten/JZDRG
+whiteout/S
+white/PYS
+whitespace
+whitetail/S
+whitewall/SM
+whitewash/GRSDM
+whitewater
+Whitewater/M
+whitey/MS
+Whitfield/M
+whither/DGS
+whitier
+whitiest
+whiting/M
+whitish
+Whitley/M
+Whitlock/M
+Whit/M
+Whitman/M
+Whitney/M
+whit/SJGTXMRND
+Whitsunday/MS
+Whittaker/M
+whitter
+Whittier
+whittle/JDRSZG
+whittler/M
+whiz
+whizkid
+whizzbang/S
+whizzed
+whizzes
+whizzing
+WHO
+whoa/S
+who'd
+whodunit/SM
+whoever
+wholegrain
+wholeheartedness/MS
+wholehearted/PY
+wholemeal
+wholeness/S
+wholesale/GZMSRD
+wholesaler/M
+wholesomeness/USM
+wholesome/UYP
+whole/SP
+wholewheat
+who'll
+wholly
+whom
+who/M
+whomever
+whomsoever
+whoopee/S
+whooper/M
+whoop/SRDGZ
+whoosh/DSGM
+whop
+whopper/MS
+whopping/S
+who're
+whorehouse/SM
+whoreish
+whore/SDGM
+whorish
+whorl/SDM
+whose
+whoso
+whosoever
+who've
+why
+whys
+WI
+Wiatt/M
+Wichita/M
+wickedness/MS
+wicked/RYPT
+wicker/M
+wickerwork/MS
+wicketkeeper/SM
+wicket/SM
+wick/GZRDMS
+wicking/M
+widemouthed
+widener/M
+wideness/S
+widen/SGZRD
+wide/RSYTP
+widespread
+widgeon's
+widget/SM
+widower/M
+widowhood/S
+widow/MRDSGZ
+width/M
+widths
+widthwise
+Wieland/M
+wielder/M
+wield/GZRDS
+Wiemar/M
+wiener/SM
+wienie/SM
+Wier/M
+Wiesel/M
+wife/DSMYG
+wifeless
+wifely/RPT
+wigeon/MS
+wigged
+wigging/M
+Wiggins
+wiggler/M
+wiggle/RSDGZ
+wiggly/RT
+wight/SGDM
+wiglet/S
+wigmaker
+wig/MS
+Wigner/M
+wigwagged
+wigwagging
+wigwag/S
+wigwam/MS
+Wilberforce/M
+Wilbert/M
+Wilbur/M
+Wilburn/M
+Wilburt/M
+Wilcox/M
+Wilda/M
+wildcat/SM
+wildcatted
+wildcatter/MS
+wildcatting
+wildebeest/SM
+Wilde/MR
+Wilden/M
+Wilder/M
+wilderness/SM
+wilder/P
+wildfire/MS
+wildflower/S
+wildfowl/M
+wilding/M
+wildlife/M
+wildness/MS
+Wildon/M
+wild/SPGTYRD
+wile/DSMG
+Wileen/M
+Wilek/M
+Wiley/M
+Wilford/M
+Wilfred/M
+Wilfredo/M
+Wilfrid/M
+wilfulness's
+Wilhelmina/M
+Wilhelmine/M
+Wilhelm/M
+Wilie/M
+wilily
+wiliness/MS
+Wilkerson/M
+Wilkes/M
+Wilkins/M
+Wilkinson/M
+Willabella/M
+Willa/M
+Willamette/M
+Willamina/M
+Willard/M
+Willcox/M
+Willdon/M
+willed/U
+Willem/M
+Willemstad/M
+willer/M
+Willetta/M
+Willette/M
+Willey/M
+willfulness/S
+willful/YP
+Williamsburg/M
+William/SM
+Williamson/M
+Willied/M
+Willie/M
+willies
+Willi/MS
+willinger
+willingest
+willingness's
+willingness/US
+willing/UYP
+Willisson/M
+williwaw/MS
+Will/M
+Willoughby/M
+willower/M
+Willow/M
+willow/RDMSG
+willowy/TR
+willpower/MS
+will/SGJRD
+Willy/SDM
+Willyt/M
+Wilma/M
+Wilmar/M
+Wilmer/M
+Wilmette/M
+Wilmington/M
+Wilona/M
+Wilone/M
+Wilow/M
+Wilshire/M
+Wilsonian
+Wilson/M
+wilt/DGS
+Wilt/M
+Wilton/M
+wily/PTR
+Wimbledon/M
+wimp/GSMD
+wimpish
+wimple/SDGM
+wimpy/RT
+wince/SDG
+Winchell/M
+wincher/M
+winchester/M
+Winchester/MS
+winch/GRSDM
+windbag/SM
+windblown
+windbreak/MZSR
+windburn/GSMD
+winded
+winder/UM
+windfall/SM
+windflower/MS
+Windham/M
+Windhoek/M
+windily
+windiness/SM
+winding/MS
+windjammer/SM
+windlass/GMSD
+windless/YP
+windmill/GDMS
+window/DMGS
+windowless
+windowpane/SM
+Windows
+windowsill/SM
+windpipe/SM
+windproof
+windrow/GDMS
+wind's
+winds/A
+windscreen/MS
+windshield/SM
+windsock/MS
+Windsor/MS
+windstorm/MS
+windsurf/GZJSRD
+windswept
+windup/MS
+wind/USRZG
+Windward/M
+windward/SY
+Windy/M
+windy/TPR
+wineglass/SM
+winegrower/SM
+Winehead/M
+winemake
+winemaster
+wine/MS
+winery/MS
+Winesap/M
+wineskin/M
+Winfield/M
+Winfred/M
+Winfrey/M
+wingback/M
+wingding/MS
+wingeing
+winger/M
+wing/GZRDM
+wingless
+winglike
+wingman
+wingmen
+wingspan/SM
+wingspread/MS
+wingtip/S
+Winifield/M
+Winifred/M
+Wini/M
+winker/M
+wink/GZRDS
+winking/U
+Winkle/M
+winkle/SDGM
+winless
+Win/M
+winnable
+Winnah/M
+Winna/M
+Winnebago/M
+Winne/M
+winner/MS
+Winnetka/M
+Winnie/M
+Winnifred/M
+Winni/M
+winning/SY
+Winnipeg/M
+Winn/M
+winnow/SZGRD
+Winny/M
+Winograd/M
+wino/MS
+Winonah/M
+Winona/M
+Winooski/M
+Winsborough/M
+Winsett/M
+Winslow/M
+winsomeness/SM
+winsome/PRTY
+Winston/M
+winterer/M
+wintergreen/SM
+winterize/GSD
+Winters
+winter/SGRDYM
+wintertime/MS
+Winthrop/M
+wintriness/M
+wintry/TPR
+winy/RT
+win/ZGDRS
+wipe/DRSZG
+wiper/M
+wirehair/MS
+wireless/MSDG
+wireman/M
+wiremen
+wirer/M
+wire's
+wires/A
+wiretap/MS
+wiretapped
+wiretapper/SM
+wiretapping
+wire/UDA
+wiriness/S
+wiring/SM
+wiry/RTP
+Wisc
+Wisconsinite/SM
+Wisconsin/M
+wisdoms
+wisdom/UM
+wiseacre/MS
+wisecrack/GMRDS
+wised
+wisely/TR
+Wise/M
+wiseness
+wisenheimer/M
+Wisenheimer/M
+wises
+wise/URTY
+wishbone/MS
+wishfulness/M
+wishful/PY
+wish/GZSRD
+wishy
+wising
+Wis/M
+wisp/MDGS
+wispy/RT
+wist/DGS
+wisteria/SM
+wistfulness/MS
+wistful/PY
+witchcraft/SM
+witchdoctor/S
+witchery/MS
+witch/SDMG
+withal
+withdrawal/MS
+withdrawer/M
+withdrawnness/M
+withdrawn/P
+withdraw/RGS
+withdrew
+withe/M
+wither/GDJ
+withering/Y
+Witherspoon/M
+with/GSRDZ
+withheld
+withholder/M
+withhold/SJGZR
+within/S
+without/S
+withs
+withstand/SG
+withstood
+witlessness/MS
+witless/PY
+Wit/M
+witness/DSMG
+witnessed/U
+wit/PSM
+witted
+witter/G
+Wittgenstein/M
+witticism/MS
+Wittie/M
+wittily
+wittiness/SM
+wittings
+witting/UY
+Witt/M
+Witty/M
+witty/RTP
+Witwatersrand/M
+wive/GDS
+wives/M
+wizard/MYS
+wizardry/MS
+wizen/D
+wiz's
+wk/Y
+Wm/M
+WNW
+woad/MS
+wobble/GSRD
+wobbler/M
+wobbliness/S
+wobbly/PRST
+Wodehouse/M
+woebegone/P
+woefuller
+woefullest
+woefulness/SM
+woeful/PY
+woe/PSM
+woke
+wok/SMN
+Wolcott/M
+wold/MS
+Wolfe/M
+wolfer/M
+Wolff/M
+Wolfgang/M
+wolfhound/MS
+Wolfie/M
+wolfishness/M
+wolfish/YP
+Wolf/M
+wolfram/MS
+wolf/RDMGS
+Wolfy/M
+Wollongong/M
+Wollstonecraft/M
+Wolsey/M
+Wolverhampton/M
+wolverine/SM
+Wolverton/M
+wolves/M
+woman/GSMYD
+womanhood/MS
+womanish
+womanized/U
+womanizer/M
+womanize/RSDZG
+womanizes/U
+womankind/M
+womanlike
+womanliness/SM
+womanly/PRT
+wombat/MS
+womb/SDM
+womenfolk/MS
+women/MS
+wonderer/M
+wonderfulness/SM
+wonderful/PY
+wonder/GLRDMS
+wondering/Y
+wonderland/SM
+wonderment/SM
+wondrousness/M
+wondrous/YP
+Wong/M
+wonk/S
+wonky/RT
+wonned
+wonning
+won/SG
+won't
+wontedness/MU
+wonted/PUY
+wont/SGMD
+Woodard/M
+Woodberry/M
+woodbine/SM
+woodblock/S
+Woodbury/M
+woodcarver/S
+woodcarving/MS
+woodchopper/SM
+woodchuck/MS
+woodcock/MS
+woodcraft/MS
+woodcut/SM
+woodcutter/MS
+woodcutting/MS
+woodenness/SM
+wooden/TPRY
+woodgrain/G
+woodhen
+Woodhull/M
+Woodie/M
+woodiness/MS
+woodland/SRM
+Woodlawn/M
+woodlice
+woodlot/S
+woodlouse/M
+woodman/M
+Woodman/M
+woodmen
+woodpecker/SM
+woodpile/SM
+Woodrow/M
+woodruff/M
+woo/DRZGS
+woodshedded
+woodshedding
+woodshed/SM
+woodside
+Wood/SM
+woodsman/M
+woodsmen
+wood/SMNDG
+woodsmoke
+woods/R
+Woodstock/M
+woodsy/TRP
+Woodward/MS
+woodwind/S
+woodworker/M
+woodworking/M
+woodwork/SMRGZJ
+woodworm/M
+woodyard
+Woody/M
+woody/TPSR
+woofer/M
+woof/SRDMGZ
+Woolf/M
+woolgatherer/M
+woolgathering/M
+woolgather/RGJ
+woolliness/MS
+woolly/RSPT
+Woolongong/M
+wool/SMYNDX
+Woolworth/M
+Woonsocket/M
+Wooster/M
+Wooten/M
+woozily
+wooziness/MS
+woozy/RTP
+wop/MS!
+Worcestershire/M
+Worcester/SM
+wordage/SM
+word/AGSJD
+wordbook/MS
+Worden/M
+wordily
+wordiness/SM
+wording/AM
+wordless/Y
+wordplay/SM
+word's
+Wordsworth/M
+wordy/TPR
+wore
+workability's
+workability/U
+workableness/M
+workable/U
+workably
+workaday
+workaholic/S
+workaround/SM
+workbench/MS
+workbook/SM
+workday/SM
+worked/A
+worker/M
+workfare/S
+workforce/S
+work/GZJSRDMB
+workhorse/MS
+workhouse/SM
+working/M
+workingman/M
+workingmen
+workingwoman/M
+workingwomen
+workload/SM
+workmanlike
+Workman/M
+workman/MY
+workmanship/MS
+workmate/S
+workmen/M
+workout/SM
+workpiece/SM
+workplace/SM
+workroom/MS
+works/A
+worksheet/S
+workshop/MS
+workspace/S
+workstation/MS
+worktable/SM
+worktop/S
+workup/S
+workweek/SM
+worldlier
+worldliest
+worldliness/USM
+worldly/UP
+worldwide
+world/ZSYM
+wormer/M
+wormhole/SM
+worm/SGMRD
+Worms/M
+wormwood/SM
+wormy/RT
+worn/U
+worried/Y
+worrier/M
+worriment/MS
+worrisome/YP
+worrying/Y
+worrywart/SM
+worry/ZGSRD
+worsen/GSD
+worse/SR
+worshiper/M
+worshipfulness/M
+worshipful/YP
+worship/ZDRGS
+worsted/MS
+worst/SGD
+worth/DG
+worthily/U
+worthinesses/U
+worthiness/SM
+Worthington/M
+worthlessness/SM
+worthless/PY
+Worth/M
+worths
+worthwhile/P
+Worthy/M
+worthy/UTSRP
+wort/SM
+wost
+wot
+Wotan/M
+wouldn't
+would/S
+wouldst
+would've
+wound/AU
+wounded/U
+wounder
+wounding
+wounds
+wound's
+wove/A
+woven/AU
+wovens
+wow/SDG
+Wozniak/M
+WP
+wpm
+wrack/SGMD
+wraith/M
+wraiths
+Wrangell/M
+wrangle/GZDRS
+wrangler/M
+wraparound/S
+wrap/MS
+wrapped/U
+wrapper/MS
+wrapping/SM
+wraps/U
+wrasse/SM
+wrathful/YP
+wrath/GDM
+wraths
+wreak/SDG
+wreathe
+wreath/GMDS
+wreaths
+wreckage/MS
+wrecker/M
+wreck/GZRDS
+wrenching/Y
+wrench/MDSG
+wren/MS
+Wren/MS
+Wrennie/M
+wrester/M
+wrestle/JGZDRS
+wrestler/M
+wrestling/M
+wrest/SRDG
+wretchedness/SM
+wretched/TPYR
+wretch/MDS
+wriggle/DRSGZ
+wriggler/M
+wriggly/RT
+Wright/M
+wright/MS
+Wrigley/M
+wringer/M
+wring/GZRS
+wrinkled/U
+wrinkle/GMDS
+wrinkly/RST
+wristband/SM
+wrist/MS
+wristwatch/MS
+writable/U
+write/ASBRJG
+writer/MA
+writeup
+writhe/SDG
+writing/M
+writ/MRSBJGZ
+written/UA
+Wroclaw
+wrongdoer/MS
+wrongdoing/MS
+wronger/M
+wrongfulness/MS
+wrongful/PY
+wrongheadedness/MS
+wrongheaded/PY
+wrongness/MS
+wrong/PSGTYRD
+Wronskian/M
+wrote/A
+wroth
+wrought/I
+wrung
+wry/DSGY
+wryer
+wryest
+wryness/SM
+W's
+WSW
+wt
+W/T
+Wuhan/M
+Wu/M
+Wurlitzer/M
+wurst/SM
+wuss/S
+wussy/TRS
+WV
+WW
+WWI
+WWII
+WWW
+w/XTJGV
+WY
+Wyatan/M
+Wyatt/M
+Wycherley/M
+Wycliffe/M
+Wye/MH
+Wyeth/M
+Wylie/M
+Wylma/M
+Wyman/M
+Wyndham/M
+Wyn/M
+Wynne/M
+Wynnie/M
+Wynn/M
+Wynny/M
+Wyo/M
+Wyomingite/SM
+Wyoming/M
+WYSIWYG
+x
+X
+Xanadu
+Xanthippe/M
+Xanthus/M
+Xaviera/M
+Xavier/M
+Xebec/M
+Xe/M
+XEmacs/M
+Xenakis/M
+Xena/M
+Xenia/M
+Xenix/M
+xenon/SM
+xenophobe/MS
+xenophobia/SM
+xenophobic
+Xenophon/M
+Xenos
+xerographic
+xerography/MS
+xerox/GSD
+Xerox/MGSD
+Xerxes/M
+Xever/M
+Xhosa/M
+Xi'an
+Xian/S
+Xiaoping/M
+xii
+xiii
+xi/M
+Ximenes/M
+Ximenez/M
+Ximian/SM
+Xingu/M
+xis
+xiv
+xix
+XL
+Xmas/SM
+XML
+Xochipilli/M
+XOR
+X's
+XS
+xterm/M
+Xuzhou/M
+xv
+xvi
+xvii
+xviii
+xx
+XXL
+xylem/SM
+xylene/M
+Xylia/M
+Xylina/M
+xylophone/MS
+xylophonist/S
+Xymenes/M
+Y
+ya
+yacc/M
+Yacc/M
+yachting/M
+yachtsman
+yachtsmen
+yachtswoman/M
+yachtswomen
+yacht/ZGJSDM
+yack's
+Yagi/M
+yahoo/MS
+Yahweh/M
+Yakima/M
+yakked
+yakking
+yak/SM
+Yakut/M
+Yakutsk/M
+Yale/M
+Yalies/M
+y'all
+Yalonda/M
+Yalow/M
+Yalta/M
+Yalu/M
+Yamaha/M
+yammer/RDZGS
+Yamoussoukro
+yam/SM
+Yanaton/M
+Yance/M
+Yancey/M
+Yancy/M
+Yang/M
+Yangon
+yang/S
+Yangtze/M
+Yankee/SM
+yank/GDS
+Yank/MS
+Yaounde/M
+yapped
+yapping
+yap/S
+Yaqui/M
+yardage/SM
+yardarm/SM
+Yardley/M
+Yard/M
+yardman/M
+yardmaster/S
+yardmen
+yard/SMDG
+yardstick/SM
+yarmulke/SM
+yarn/SGDM
+Yaroslavl/M
+yarrow/MS
+Yasmeen/M
+Yasmin/M
+Yates
+yaw/DSG
+yawl/SGMD
+yawner/M
+yawn/GZSDR
+yawning/Y
+Yb/M
+yd
+Yeager/M
+yeah
+yeahs
+yearbook/SM
+yearling/M
+yearlong
+yearly/S
+yearner/M
+yearning/MY
+yearn/JSGRD
+year/YMS
+yea/S
+yeastiness/M
+yeast/SGDM
+yeasty/PTR
+Yeats/M
+yecch
+yegg/MS
+Yehudi/M
+Yehudit/M
+Yekaterinburg/M
+Yelena/M
+yell/GSDR
+yellowhammers
+yellowish
+Yellowknife/M
+yellowness/MS
+Yellowstone/M
+yellow/TGPSRDM
+yellowy
+yelper/M
+yelp/GSDR
+Yeltsin
+Yemeni/S
+Yemenite/SM
+Yemen/M
+Yenisei/M
+yenned
+yenning
+yen/SM
+Yentl/M
+yeomanry/MS
+yeoman/YM
+yeomen
+yep/S
+Yerevan/M
+Yerkes/M
+Yesenia/M
+yeshiva/SM
+yes/S
+yessed
+yessing
+yesterday/MS
+yesteryear/SM
+yet
+ye/T
+yeti/SM
+Yetta/M
+Yettie/M
+Yetty/M
+Yevette/M
+Yevtushenko/M
+yew/SM
+y/F
+Yggdrasil/M
+Yiddish/M
+yielded/U
+yielding/U
+yield/JGRDS
+yikes
+yin/S
+yipe/S
+yipped
+yippee/S
+yipping
+yip/S
+YMCA
+YMHA
+Ymir/M
+YMMV
+Ynes/M
+Ynez/M
+yo
+Yoda/M
+yodeler/M
+yodel/SZRDG
+Yoder/M
+yoga/MS
+yoghurt's
+yogi/MS
+yogurt/SM
+yoke/DSMG
+yoked/U
+yokel/SM
+yokes/U
+yoking/U
+Yoknapatawpha/M
+Yokohama/M
+Yoko/M
+Yolanda/M
+Yolande/M
+Yolane/M
+Yolanthe/M
+yolk/DMS
+yon
+yonder
+Yong/M
+Yonkers/M
+yore/MS
+Yorgo/MS
+Yorick/M
+Yorke/M
+Yorker/M
+yorker/SM
+Yorkshire/MS
+Yorktown/M
+York/ZRMS
+Yoruba/M
+Yosemite/M
+Yoshiko/M
+Yoshi/M
+Yost/M
+you'd
+you'll
+youngish
+Young/M
+youngster/MS
+Youngstown/M
+young/TRYP
+you're
+your/MS
+yourself
+yourselves
+you/SH
+youthfulness/SM
+youthful/YP
+youths
+youth/SM
+you've
+Yovonnda/M
+yow
+yowl/GSD
+Ypres/M
+Ypsilanti/M
+yr
+yrs
+Y's
+Ysabel/M
+YT
+ytterbium/MS
+yttrium/SM
+yuan/M
+Yuba/M
+Yucatan
+yucca/MS
+yuck/GSD
+yucky/RT
+Yugo/M
+Yugoslavia/M
+Yugoslavian/S
+Yugoslav/M
+Yuh/M
+Yuki/M
+yukked
+yukking
+Yukon/M
+yuk/S
+yule/MS
+Yule/MS
+yuletide/MS
+Yuletide/S
+Yul/M
+Yulma/M
+yum
+Yuma/M
+yummy/TRS
+Yunnan/M
+yuppie/SM
+yup/S
+Yurik/M
+Yuri/M
+yurt/SM
+Yves/M
+Yvette/M
+Yvon/M
+Yvonne/M
+Yvor/M
+YWCA
+YWHA
+Zabrina/M
+Zaccaria/M
+Zachariah/M
+Zacharia/SM
+Zacharie/M
+Zachary/M
+Zacherie/M
+Zachery/M
+Zach/M
+Zackariah/M
+Zack/M
+zagging
+Zagreb/M
+zag/S
+Zahara/M
+Zaire/M
+Zairian/S
+Zak/M
+Zambezi/M
+Zambia/M
+Zambian/S
+Zamboni
+Zamenhof/M
+Zamora/M
+Zandra/M
+Zane/M
+Zaneta/M
+zaniness/MS
+Zan/M
+Zanuck/M
+zany/PDSRTG
+Zanzibar/M
+Zapata/M
+Zaporozhye/M
+Zappa/M
+zapped
+zapper/S
+zapping
+zap/S
+Zarah/M
+Zara/M
+Zared/M
+Zaria/M
+Zarla/M
+Zealand/M
+zeal/MS
+zealot/MS
+zealotry/MS
+zealousness/SM
+zealous/YP
+Zea/M
+Zebadiah/M
+Zebedee/M
+Zeb/M
+zebra/MS
+Zebulen/M
+Zebulon/M
+zebu/SM
+Zechariah/M
+Zedekiah/M
+Zed/M
+Zedong/M
+zed/SM
+Zeffirelli/M
+Zeiss/M
+zeitgeist/S
+Zeke/M
+Zelda/M
+Zelig/M
+Zellerbach/M
+Zelma/M
+Zena/M
+Zenger/M
+Zenia/M
+zenith/M
+zeniths
+Zen/M
+Zennist/M
+Zeno/M
+Zephaniah/M
+zephyr/MS
+Zephyrus/M
+Zeppelin's
+zeppelin/SM
+Zerk/M
+zeroed/M
+zeroing/M
+zero/SDHMG
+zestfulness/MS
+zestful/YP
+zest/MDSG
+zesty/RT
+zeta/SM
+zeugma/M
+Zeus/M
+Zhdanov/M
+Zhengzhou
+Zhivago/M
+Zhukov/M
+Zia/M
+Zibo/M
+Ziegfeld/MS
+Ziegler/M
+zig
+zigged
+zigging
+Ziggy/M
+zigzagged
+zigzagger
+zigzagging
+zigzag/MS
+zilch/S
+zillion/MS
+Zilvia/M
+Zimbabwean/S
+Zimbabwe/M
+Zimmerman/M
+zincked
+zincking
+zinc/MS
+zing/GZDRM
+zingy/RT
+zinnia/SM
+Zionism/MS
+Zionist/MS
+Zion/SM
+zip/MS
+zipped/U
+zipper/GSDM
+zipping/U
+zippy/RT
+zips/U
+zirconium/MS
+zircon/SM
+Zita/M
+Zitella/M
+zither/SM
+zit/S
+zloty/SM
+Zn/M
+zodiacal
+zodiac/SM
+Zoe/M
+Zola/M
+Zollie/M
+Zolly/M
+Zomba/M
+zombie/SM
+zombi's
+zonal/Y
+Zonda/M
+Zondra/M
+zoned/A
+zone/MYDSRJG
+zones/A
+zoning/A
+zonked
+Zonnya/M
+zookeepers
+zoological/Y
+zoologist/SM
+zoology/MS
+zoom/DGS
+zoophyte/SM
+zoophytic
+zoo/SM
+Zorah/M
+Zora/M
+Zorana/M
+Zorina/M
+Zorine/M
+Zorn/M
+Zoroaster/M
+Zoroastrianism/MS
+Zoroastrian/S
+Zorro/M
+Zosma/M
+zounds/S
+Zr/M
+Zs
+Zsazsa/M
+Zsigmondy/M
+z/TGJ
+Zubenelgenubi/M
+Zubeneschamali/M
+zucchini/SM
+Zukor/M
+Zulema/M
+Zululand/M
+Zulu/MS
+Zuni/S
+Zürich/M
+Zuzana/M
+zwieback/MS
+Zwingli/M
+Zworykin/M
+Z/X
+zydeco/S
+zygote/SM
+zygotic
+zymurgy/S
diff --git a/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff
new file mode 100755
index 0000000000..2ddd985437
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.aff
@@ -0,0 +1,201 @@
+SET ISO8859-1
+TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
+NOSUGGEST !
+
+# ordinal numbers
+COMPOUNDMIN 1
+# only in compounds: 1th, 2th, 3th
+ONLYINCOMPOUND c
+# compound rules:
+# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
+# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
+WORDCHARS 0123456789
+
+PFX A Y 1
+PFX A 0 re .
+
+PFX I Y 1
+PFX I 0 in .
+
+PFX U Y 1
+PFX U 0 un .
+
+PFX C Y 1
+PFX C 0 de .
+
+PFX E Y 1
+PFX E 0 dis .
+
+PFX F Y 1
+PFX F 0 con .
+
+PFX K Y 1
+PFX K 0 pro .
+
+SFX V N 2
+SFX V e ive e
+SFX V 0 ive [^e]
+
+SFX N Y 3
+SFX N e ion e
+SFX N y ication y
+SFX N 0 en [^ey]
+
+SFX X Y 3
+SFX X e ions e
+SFX X y ications y
+SFX X 0 ens [^ey]
+
+SFX H N 2
+SFX H y ieth y
+SFX H 0 th [^y]
+
+SFX Y Y 1
+SFX Y 0 ly .
+
+SFX G Y 2
+SFX G e ing e
+SFX G 0 ing [^e]
+
+SFX J Y 2
+SFX J e ings e
+SFX J 0 ings [^e]
+
+SFX D Y 4
+SFX D 0 d e
+SFX D y ied [^aeiou]y
+SFX D 0 ed [^ey]
+SFX D 0 ed [aeiou]y
+
+SFX T N 4
+SFX T 0 st e
+SFX T y iest [^aeiou]y
+SFX T 0 est [aeiou]y
+SFX T 0 est [^ey]
+
+SFX R Y 4
+SFX R 0 r e
+SFX R y ier [^aeiou]y
+SFX R 0 er [aeiou]y
+SFX R 0 er [^ey]
+
+SFX Z Y 4
+SFX Z 0 rs e
+SFX Z y iers [^aeiou]y
+SFX Z 0 ers [aeiou]y
+SFX Z 0 ers [^ey]
+
+SFX S Y 4
+SFX S y ies [^aeiou]y
+SFX S 0 s [aeiou]y
+SFX S 0 es [sxzh]
+SFX S 0 s [^sxzhy]
+
+SFX P Y 3
+SFX P y iness [^aeiou]y
+SFX P 0 ness [aeiou]y
+SFX P 0 ness [^y]
+
+SFX M Y 1
+SFX M 0 's .
+
+SFX B Y 3
+SFX B 0 able [^aeiou]
+SFX B 0 able ee
+SFX B e able [^aeiou]e
+
+SFX L Y 1
+SFX L 0 ment .
+
+REP 88
+REP a ei
+REP ei a
+REP a ey
+REP ey a
+REP ai ie
+REP ie ai
+REP are air
+REP are ear
+REP are eir
+REP air are
+REP air ere
+REP ere air
+REP ere ear
+REP ere eir
+REP ear are
+REP ear air
+REP ear ere
+REP eir are
+REP eir ere
+REP ch te
+REP te ch
+REP ch ti
+REP ti ch
+REP ch tu
+REP tu ch
+REP ch s
+REP s ch
+REP ch k
+REP k ch
+REP f ph
+REP ph f
+REP gh f
+REP f gh
+REP i igh
+REP igh i
+REP i uy
+REP uy i
+REP i ee
+REP ee i
+REP j di
+REP di j
+REP j gg
+REP gg j
+REP j ge
+REP ge j
+REP s ti
+REP ti s
+REP s ci
+REP ci s
+REP k cc
+REP cc k
+REP k qu
+REP qu k
+REP kw qu
+REP o eau
+REP eau o
+REP o ew
+REP ew o
+REP oo ew
+REP ew oo
+REP ew ui
+REP ui ew
+REP oo ui
+REP ui oo
+REP ew u
+REP u ew
+REP oo u
+REP u oo
+REP u oe
+REP oe u
+REP u ieu
+REP ieu u
+REP ue ew
+REP ew ue
+REP uff ough
+REP oo ieu
+REP ieu oo
+REP ier ear
+REP ear ier
+REP ear air
+REP air ear
+REP w qu
+REP qu w
+REP z ss
+REP ss z
+REP shun tion
+REP shun sion
+REP shun cion
diff --git a/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic
new file mode 100755
index 0000000000..4f69807a28
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/en_US.dic
@@ -0,0 +1,62120 @@
+62118
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
+a
+A
+AA
+AAA
+Aachen/M
+aardvark/SM
+Aaren/M
+Aarhus/M
+Aarika/M
+Aaron/M
+AB
+aback
+abacus/SM
+abaft
+Abagael/M
+Abagail/M
+abalone/SM
+abandoner/M
+abandon/LGDRS
+abandonment/SM
+abase/LGDSR
+abasement/S
+abaser/M
+abashed/UY
+abashment/MS
+abash/SDLG
+abate/DSRLG
+abated/U
+abatement/MS
+abater/M
+abattoir/SM
+Abba/M
+Abbe/M
+abbé/S
+abbess/SM
+Abbey/M
+abbey/MS
+Abbie/M
+Abbi/M
+Abbot/M
+abbot/MS
+Abbott/M
+abbr
+abbrev
+abbreviated/UA
+abbreviates/A
+abbreviate/XDSNG
+abbreviating/A
+abbreviation/M
+Abbye/M
+Abby/M
+ABC/M
+Abdel/M
+abdicate/NGDSX
+abdication/M
+abdomen/SM
+abdominal/YS
+abduct/DGS
+abduction/SM
+abductor/SM
+Abdul/M
+ab/DY
+abeam
+Abelard/M
+Abel/M
+Abelson/M
+Abe/M
+Aberdeen/M
+Abernathy/M
+aberrant/YS
+aberrational
+aberration/SM
+abet/S
+abetted
+abetting
+abettor/SM
+Abeu/M
+abeyance/MS
+abeyant
+Abey/M
+abhorred
+abhorrence/MS
+abhorrent/Y
+abhorrer/M
+abhorring
+abhor/S
+abidance/MS
+abide/JGSR
+abider/M
+abiding/Y
+Abidjan/M
+Abie/M
+Abigael/M
+Abigail/M
+Abigale/M
+Abilene/M
+ability/IMES
+abjection/MS
+abjectness/SM
+abject/SGPDY
+abjuration/SM
+abjuratory
+abjurer/M
+abjure/ZGSRD
+ablate/VGNSDX
+ablation/M
+ablative/SY
+ablaze
+abler/E
+ables/E
+ablest
+able/U
+abloom
+ablution/MS
+Ab/M
+ABM/S
+abnegate/NGSDX
+abnegation/M
+Abner/M
+abnormality/SM
+abnormal/SY
+aboard
+abode/GMDS
+abolisher/M
+abolish/LZRSDG
+abolishment/MS
+abolitionism/SM
+abolitionist/SM
+abolition/SM
+abominable
+abominably
+abominate/XSDGN
+abomination/M
+aboriginal/YS
+aborigine/SM
+Aborigine/SM
+aborning
+abortionist/MS
+abortion/MS
+abortiveness/M
+abortive/PY
+abort/SRDVG
+Abo/SM!
+abound/GDS
+about/S
+aboveboard
+aboveground
+above/S
+abracadabra/S
+abrader/M
+abrade/SRDG
+Abraham/M
+Abrahan/M
+Abra/M
+Abramo/M
+Abram/SM
+Abramson/M
+Abran/M
+abrasion/MS
+abrasiveness/S
+abrasive/SYMP
+abreaction/MS
+abreast
+abridge/DSRG
+abridged/U
+abridger/M
+abridgment/SM
+abroad
+abrogate/XDSNG
+abrogation/M
+abrogator/SM
+abruptness/SM
+abrupt/TRYP
+ABS
+abscess/GDSM
+abscissa/SM
+abscission/SM
+absconder/M
+abscond/SDRZG
+abseil/SGDR
+absence/SM
+absenteeism/SM
+absentee/MS
+absentia/M
+absentmindedness/S
+absentminded/PY
+absent/SGDRY
+absinthe/SM
+abs/M
+absoluteness/SM
+absolute/NPRSYTX
+absolution/M
+absolutism/MS
+absolutist/SM
+absolve/GDSR
+absolver/M
+absorb/ASGD
+absorbed/U
+absorbency/MS
+absorbent/MS
+absorber/SM
+absorbing/Y
+absorption/MS
+absorptive
+absorptivity/M
+abstainer/M
+abstain/GSDRZ
+abstemiousness/MS
+abstemious/YP
+abstention/SM
+abstinence/MS
+abstinent/Y
+abstractedness/SM
+abstracted/YP
+abstracter/M
+abstractionism/M
+abstractionist/SM
+abstraction/SM
+abstractness/SM
+abstractor/MS
+abstract/PTVGRDYS
+abstruseness/SM
+abstruse/PRYT
+absurdity/SM
+absurdness/SM
+absurd/PRYST
+Abuja
+abundance/SM
+abundant/Y
+abused/E
+abuse/GVZDSRB
+abuser/M
+abuses/E
+abusing/E
+abusiveness/SM
+abusive/YP
+abut/LS
+abutment/SM
+abutted
+abutter/MS
+abutting
+abuzz
+abysmal/Y
+abyssal
+Abyssinia/M
+Abyssinian
+abyss/SM
+AC
+acacia/SM
+academe/MS
+academia/SM
+academical/Y
+academicianship
+academician/SM
+academic/S
+academy/SM
+Acadia/M
+acanthus/MS
+Acapulco/M
+accede/SDG
+accelerated/U
+accelerate/NGSDXV
+accelerating/Y
+acceleration/M
+accelerator/SM
+accelerometer/SM
+accented/U
+accent/SGMD
+accentual/Y
+accentuate/XNGSD
+accentuation/M
+acceptability/SM
+acceptability's/U
+acceptableness/SM
+acceptable/P
+acceptably/U
+acceptance/SM
+acceptant
+acceptation/SM
+accepted/Y
+accepter/M
+accepting/PY
+acceptor/MS
+accept/RDBSZVG
+accessed/A
+accessibility/IMS
+accessible/IU
+accessibly/I
+accession/SMDG
+accessors
+accessory/SM
+access/SDMG
+accidence/M
+accidentalness/M
+accidental/SPY
+accident/MS
+acclaimer/M
+acclaim/SDRG
+acclamation/MS
+acclimate/XSDGN
+acclimation/M
+acclimatisation
+acclimatise/DG
+acclimatization/AMS
+acclimatized/U
+acclimatize/RSDGZ
+acclimatizes/A
+acclivity/SM
+accolade/GDSM
+accommodated/U
+accommodate/XVNGSD
+accommodating/Y
+accommodation/M
+accommodativeness/M
+accommodative/P
+accompanied/U
+accompanier/M
+accompaniment/MS
+accompanist/SM
+accompany/DRSG
+accomplice/MS
+accomplished/U
+accomplisher/M
+accomplishment/SM
+accomplish/SRDLZG
+accordance/SM
+accordant/Y
+accorder/M
+according/Y
+accordionist/SM
+accordion/MS
+accord/SZGMRD
+accost/SGD
+accountability/MS
+accountability's/U
+accountableness/M
+accountable/U
+accountably/U
+accountancy/SM
+accountant/MS
+account/BMDSGJ
+accounted/U
+accounting/M
+accouter/GSD
+accouterments
+accouterment's
+accoutrement/M
+Accra/M
+accreditation/SM
+accredited/U
+accredit/SGD
+accretion/SM
+accrual/MS
+accrue/SDG
+acct
+acculturate/XSDVNG
+acculturation/M
+accumulate/VNGSDX
+accumulation/M
+accumulativeness/M
+accumulative/YP
+accumulator/MS
+accuracy/IMS
+accurate/IY
+accurateness/SM
+accursedness/SM
+accursed/YP
+accusal/M
+accusation/SM
+accusative/S
+accusatory
+accused/M
+accuser/M
+accuse/SRDZG
+accusing/Y
+accustomedness/M
+accustomed/P
+accustom/SGD
+ac/DRG
+aced/M
+acerbate/DSG
+acerbic
+acerbically
+acerbity/MS
+ace/SM
+acetaminophen/S
+acetate/MS
+acetic
+acetone/SM
+acetonic
+acetylene/MS
+Acevedo/M
+Achaean/M
+Achebe/M
+ached/A
+ache/DSG
+achene/SM
+Achernar/M
+aches/A
+Acheson/M
+achievable/U
+achieved/UA
+achieve/LZGRSDB
+achievement/SM
+achiever/M
+Achilles
+aching/Y
+achoo
+achromatic
+achy/TR
+acidic
+acidification/M
+acidify/NSDG
+acidity/SM
+acidness/M
+acidoses
+acidosis/M
+acid/SMYP
+acidulous
+acing/M
+Ackerman/M
+acknowledgeable
+acknowledgedly
+acknowledged/U
+acknowledge/GZDRS
+acknowledger/M
+acknowledgment/SAM
+ACLU
+Ac/M
+ACM
+acme/SM
+acne/MDS
+acolyte/MS
+Aconcagua/M
+aconite/MS
+acorn/SM
+Acosta/M
+acoustical/Y
+acoustician/M
+acoustic/S
+acoustics/M
+acquaintance/MS
+acquaintanceship/S
+acquainted/U
+acquaint/GASD
+acquiesce/GSD
+acquiescence/SM
+acquiescent/Y
+acquirable
+acquire/ASDG
+acquirement/SM
+acquisition's/A
+acquisition/SM
+acquisitiveness/MS
+acquisitive/PY
+acquit/S
+acquittal/MS
+acquittance/M
+acquitted
+acquitter/M
+acquitting
+acreage/MS
+acre/MS
+acridity/MS
+acridness/SM
+acrid/TPRY
+acrimoniousness/MS
+acrimonious/YP
+acrimony/MS
+acrobatically
+acrobatic/S
+acrobatics/M
+acrobat/SM
+acronym/SM
+acrophobia/SM
+Acropolis/M
+acropolis/SM
+across
+acrostic/SM
+Acrux/M
+acrylate/M
+acrylic/S
+ACT
+Actaeon/M
+Acta/M
+ACTH
+acting/S
+actinic
+actinide/SM
+actinium/MS
+actinometer/MS
+action/DMSGB
+actions/AI
+action's/IA
+activate/AXCDSNGI
+activated/U
+activation/AMCI
+activator/SM
+active/APY
+actively/I
+activeness/MS
+actives
+activism/MS
+activist/MS
+activities/A
+activity/MSI
+Acton/M
+actor/MAS
+actress/SM
+act's
+Acts
+act/SADVG
+actuality/SM
+actualization/MAS
+actualize/GSD
+actualizes/A
+actual/SY
+actuarial/Y
+actuary/MS
+actuate/GNXSD
+actuation/M
+actuator/SM
+acuity/MS
+acumen/SM
+acupressure/S
+acupuncture/SM
+acupuncturist/S
+acuteness/MS
+acute/YTSRP
+acyclic
+acyclically
+acyclovir/S
+AD
+adage/MS
+adagio/S
+Adah/M
+Adair/M
+Adaline/M
+Ada/M
+adamant/SY
+Adamo/M
+Adam/SM
+Adamson/M
+Adana/M
+Adan/M
+adaptability/MS
+adaptable/U
+adaptation/MS
+adaptedness/M
+adapted/P
+adapter/M
+adapting/A
+adaption
+adaptively
+adaptiveness/M
+adaptive/U
+adaptivity
+adapt/SRDBZVG
+Adara/M
+ad/AS
+ADC
+Adda/M
+Addams
+addenda
+addend/SM
+addendum/M
+adder/M
+Addia/M
+addiction/MS
+addictive/P
+addict/SGVD
+Addie/M
+Addi/M
+Addison/M
+additional/Y
+addition/MS
+additive/YMS
+additivity
+addle/GDS
+addressability
+addressable/U
+addressed/A
+addressee/SM
+addresser/M
+addresses/A
+address/MDRSZGB
+Addressograph/M
+adduce/GRSD
+adducer/M
+adduct/DGVS
+adduction/M
+adductor/M
+Addy/M
+add/ZGBSDR
+Adelaida/M
+Adelaide/M
+Adela/M
+Adelbert/M
+Adele/M
+Adelheid/M
+Adelice/M
+Adelina/M
+Adelind/M
+Adeline/M
+Adella/M
+Adelle/M
+Adel/M
+Ade/M
+Adena/M
+Adenauer/M
+adenine/SM
+Aden/M
+adenoidal
+adenoid/S
+adeptness/MS
+adept/RYPTS
+adequacy/IMS
+adequate/IPY
+adequateness's/I
+adequateness/SM
+Adey/M
+Adham/M
+Adhara/M
+adherence/SM
+adherent/YMS
+adherer/M
+adhere/ZGRSD
+adhesion/MS
+adhesiveness/MS
+adhesive/PYMS
+adiabatic
+adiabatically
+Adiana/M
+Adidas/M
+adieu/S
+Adi/M
+Adina/M
+adiós
+adipose/S
+Adirondack/SM
+adj
+adjacency/MS
+adjacent/Y
+adjectival/Y
+adjective/MYS
+adjoin/SDG
+adjoint/M
+adjourn/DGLS
+adjournment/SM
+adjudge/DSG
+adjudicate/VNGXSD
+adjudication/M
+adjudicator/SM
+adjudicatory
+adjunct/VSYM
+adjuration/SM
+adjure/GSD
+adjustable/U
+adjustably
+adjust/DRALGSB
+adjusted/U
+adjuster's/A
+adjuster/SM
+adjustive
+adjustment/MAS
+adjustor's
+adjutant/SM
+Adkins/M
+Adlai/M
+Adler/M
+adman/M
+admen
+administer/GDJS
+administrable
+administrate/XSDVNG
+administration/M
+administrative/Y
+administrator/MS
+administratrix/M
+admirableness/M
+admirable/P
+admirably
+admiral/SM
+admiralty/MS
+Admiralty/S
+admiration/MS
+admirer/M
+admire/RSDZBG
+admiring/Y
+admissibility/ISM
+admissible/I
+admissibly
+admission/AMS
+admit/AS
+admittance/MS
+admitted/A
+admittedly
+admitting/A
+admix/SDG
+admixture/SM
+Adm/M
+Ad/MN
+admonisher/M
+admonish/GLSRD
+admonishing/Y
+admonishment/SM
+admonition/MS
+admonitory
+adobe/MS
+adolescence/MS
+adolescent/SYM
+Adolf/M
+Adolfo/M
+Adolphe/M
+Adolph/M
+Adolpho/M
+Adolphus/M
+Ado/M
+ado/MS
+Adonis/SM
+adopted/AU
+adopter/M
+adoption/MS
+adoptive/Y
+adopt/RDSBZVG
+adopts/A
+adorableness/SM
+adorable/P
+adorably
+Adora/M
+adoration/SM
+adore/DSRGZB
+Adoree/M
+Adore/M
+adorer/M
+adoring/Y
+adorned/U
+Adorne/M
+adornment/SM
+adorn/SGLD
+ADP
+Adrea/M
+adrenalin
+adrenaline/MS
+Adrenalin/MS
+adrenal/YS
+Adria/MX
+Adriana/M
+Adriane/M
+Adrian/M
+Adrianna/M
+Adrianne/M
+Adriano/M
+Adriatic
+Adriena/M
+Adrien/M
+Adrienne/M
+adrift
+adroitness/MS
+adroit/RTYP
+ads
+ad's
+adsorbate/M
+adsorbent/S
+adsorb/GSD
+adsorption/MS
+adsorptive/Y
+adulate/GNDSX
+adulation/M
+adulator/SM
+adulatory
+adulterant/SM
+adulterated/U
+adulterate/NGSDX
+adulteration/M
+adulterer/SM
+adulteress/MS
+adulterous/Y
+adultery/SM
+adulthood/MS
+adult/MYPS
+adultness/M
+adumbrate/XSDVGN
+adumbration/M
+adumbrative/Y
+adv
+advance/DSRLZG
+advancement/MS
+advancer/M
+advantage/GMEDS
+advantageous/EY
+advantageousness/M
+Adventist/M
+adventist/S
+adventitiousness/M
+adventitious/PY
+adventive/Y
+Advent/SM
+advent/SVM
+adventurer/M
+adventuresome
+adventure/SRDGMZ
+adventuress/SM
+adventurousness/SM
+adventurous/YP
+adverbial/MYS
+adverb/SM
+adversarial
+adversary/SM
+adverse/DSRPYTG
+adverseness/MS
+adversity/SM
+advert/GSD
+advertised/U
+advertise/JGZSRDL
+advertisement/SM
+advertiser/M
+advertising/M
+advertorial/S
+advice/SM
+Advil/M
+advisability/SIM
+advisable/I
+advisableness/M
+advisably
+advisedly/I
+advised/YU
+advisee/MS
+advisement/MS
+adviser/M
+advise/ZRSDGLB
+advisor/S
+advisor's
+advisory/S
+advocacy/SM
+advocate/NGVDS
+advocation/M
+advt
+adze's
+adz/MDSG
+Aegean
+aegis/SM
+Aelfric/M
+Aeneas
+Aeneid/M
+aeolian
+Aeolus/M
+aeon's
+aerate/XNGSD
+aeration/M
+aerator/MS
+aerialist/MS
+aerial/SMY
+Aeriela/M
+Aeriell/M
+Aeriel/M
+aerie/SRMT
+aeroacoustic
+aerobatic/S
+aerobically
+aerobic/S
+aerodrome/SM
+aerodynamically
+aerodynamic/S
+aerodynamics/M
+aeronautical/Y
+aeronautic/S
+aeronautics/M
+aerosolize/D
+aerosol/MS
+aerospace/SM
+Aeschylus/M
+Aesculapius/M
+Aesop/M
+aesthete/S
+aesthetically
+aestheticism/MS
+aesthetics/M
+aesthetic/U
+aether/M
+aetiology/M
+AF
+AFAIK
+afar/S
+AFB
+AFC
+AFDC
+affability/MS
+affable/TR
+affably
+affair/SM
+affectation/MS
+affectedness/EM
+affected/UEYP
+affect/EGSD
+affecter/M
+affecting/Y
+affectionate/UY
+affectioned
+affection/EMS
+affectioning
+affective/MY
+afferent/YS
+affiance/GDS
+affidavit/SM
+affiliated/U
+affiliate/EXSDNG
+affiliation/EM
+affine
+affinity/SM
+affirm/ASDG
+affirmation/SAM
+affirmative/SY
+affix/SDG
+afflatus/MS
+afflict/GVDS
+affliction/SM
+afflictive/Y
+affluence/SM
+affluent/YS
+afford/DSBG
+afforest/A
+afforestation/SM
+afforested
+afforesting
+afforests
+affray/MDSG
+affricate/VNMS
+affrication/M
+affricative/M
+affright
+affront/GSDM
+Afghani/SM
+Afghanistan/M
+afghan/MS
+Afghan/SM
+aficionado/MS
+afield
+afire
+aflame
+afloat
+aflutter
+afoot
+afore
+aforementioned
+aforesaid
+aforethought/S
+afoul
+Afr
+afraid/U
+afresh
+Africa/M
+African/MS
+Afrikaans/M
+Afrikaner/SM
+afro
+Afrocentric
+Afrocentrism/S
+Afro/MS
+afterbirth/M
+afterbirths
+afterburner/MS
+aftercare/SM
+aftereffect/MS
+afterglow/MS
+afterimage/MS
+afterlife/M
+afterlives
+aftermath/M
+aftermaths
+aftermost
+afternoon/SM
+aftershave/S
+aftershock/SM
+afters/M
+aftertaste/SM
+afterthought/MS
+afterward/S
+afterworld/MS
+Afton/M
+aft/ZR
+Agace/M
+again
+against
+Agamemnon/M
+agapae
+agape/S
+agar/MS
+Agassiz/M
+Agata/M
+agate/SM
+Agatha/M
+Agathe/M
+agave/SM
+agedness/M
+aged/PY
+age/GJDRSMZ
+ageism/S
+ageist/S
+agelessness/MS
+ageless/YP
+agency/SM
+agenda/MS
+agent/AMS
+agented
+agenting
+agentive
+ageratum/M
+Aggie/M
+Aggi/M
+agglomerate/XNGVDS
+agglomeration/M
+agglutinate/VNGXSD
+agglutination/M
+agglutinin/MS
+aggrandize/LDSG
+aggrandizement/SM
+aggravate/SDNGX
+aggravating/Y
+aggravation/M
+aggregated/U
+aggregate/EGNVD
+aggregately
+aggregateness/M
+aggregates
+aggregation/SM
+aggregative/Y
+aggression/SM
+aggressively
+aggressiveness/S
+aggressive/U
+aggressor/MS
+aggrieved/Y
+aggrieve/GDS
+Aggy/SM
+aghast
+agile/YTR
+agility/MS
+agitated/Y
+agitate/XVNGSD
+agitation/M
+agitator/SM
+agitprop/MS
+Aglaia/M
+agleam
+aglitter
+aglow
+Ag/M
+Agna/M
+Agnella/M
+Agnese/M
+Agnes/M
+Agnesse/M
+Agneta/M
+Agnew/M
+Agni/M
+Agnola/M
+agnosticism/MS
+agnostic/SM
+ago
+agog
+agonizedly/S
+agonized/Y
+agonize/ZGRSD
+agonizing/Y
+agony/SM
+agoraphobia/MS
+agoraphobic/S
+Agosto/M
+Agra/M
+agrarianism/MS
+agrarian/S
+agreeable/EP
+agreeableness/SME
+agreeably/E
+agreeing/E
+agree/LEBDS
+agreement/ESM
+agreer/S
+Agretha/M
+agribusiness/SM
+Agricola/M
+agriculturalist/S
+agricultural/Y
+agriculture/MS
+agriculturist/SM
+Agrippa/M
+Agrippina/M
+agrochemicals
+agronomic/S
+agronomist/SM
+agronomy/MS
+aground
+Aguascalientes/M
+ague/MS
+Aguie/M
+Aguilar/M
+Aguinaldo/M
+Aguirre/M
+Aguistin/M
+Aguste/M
+Agustin/M
+ah
+Ahab/M
+Aharon/M
+aha/S
+ahead
+ahem/S
+Ahmadabad
+Ahmad/M
+Ahmed/M
+ahoy/S
+Ahriman/M
+AI
+Aida/M
+Aidan/M
+aided/U
+aide/MS
+aider/M
+AIDS
+aid/ZGDRS
+Aigneis/M
+aigrette/SM
+Aiken/M
+Aila/M
+Ailbert/M
+Ailee/M
+Aileen/M
+Aile/M
+Ailene/M
+aileron/MS
+Ailey/M
+Ailina/M
+Aili/SM
+ail/LSDG
+ailment/SM
+Ailsun/M
+Ailyn/M
+Aimee/M
+Aime/M
+aimer/M
+Aimil/M
+aimlessness/MS
+aimless/YP
+aim/ZSGDR
+Aindrea/M
+Ainslee/M
+Ainsley/M
+Ainslie/M
+ain't
+Ainu/M
+airbag/MS
+airbase/S
+airborne
+airbrush/SDMG
+Airbus/M
+airbus/SM
+aircraft/MS
+aircrew/M
+airdrop/MS
+airdropped
+airdropping
+Airedale/SM
+Aires
+airfare/S
+airfield/MS
+airflow/SM
+airfoil/MS
+airframe/MS
+airfreight/SGD
+airhead/MS
+airily
+airiness/MS
+airing/M
+airlessness/S
+airless/P
+airlift/MDSG
+airliner/M
+airline/SRMZ
+airlock/MS
+airmail/DSG
+airman/M
+airmass
+air/MDRTZGJS
+airmen
+airpark
+airplane/SM
+airplay/S
+airport/MS
+airship/MS
+airsickness/SM
+airsick/P
+airspace/SM
+airspeed/SM
+airstrip/MS
+airtightness/M
+airtight/P
+airtime
+airwaves
+airway/SM
+airworthiness/SM
+airworthy/PTR
+airy/PRT
+Aisha/M
+aisle/DSGM
+aitch/MS
+ajar
+Ajax/M
+Ajay/M
+AK
+aka
+Akbar/M
+Akihito/M
+akimbo
+Akim/M
+akin
+Akita/M
+Akkad/M
+Akron/M
+Aksel/M
+AL
+Alabama/M
+Alabaman/S
+Alabamian/MS
+alabaster/MS
+alack/S
+alacrity/SM
+Aladdin/M
+Alaine/M
+Alain/M
+Alair/M
+Alameda/M
+Alamogordo/M
+Alamo/SM
+ala/MS
+Ala/MS
+Alanah/M
+Alana/M
+Aland/M
+Alane/M
+alanine/M
+Alan/M
+Alanna/M
+Alano/M
+Alanson/M
+Alard/M
+Alaric/M
+Alar/M
+alarming/Y
+alarmist/MS
+alarm/SDG
+Alasdair/M
+Alaska/M
+Alaskan/S
+alas/S
+Alastair/M
+Alasteir/M
+Alaster/M
+Alayne/M
+albacore/SM
+alba/M
+Alba/M
+Albania/M
+Albanian/SM
+Albany/M
+albatross/SM
+albedo/M
+Albee/M
+albeit
+Alberich/M
+Alberik/M
+Alberio/M
+Alberta/M
+Albertan/S
+Albertina/M
+Albertine/M
+Albert/M
+Alberto/M
+Albie/M
+Albigensian
+Albina/M
+albinism/SM
+albino/MS
+Albion/M
+Albireo/M
+alb/MS
+Albrecht/M
+albumen/M
+albumin/MS
+albuminous
+album/MNXS
+Albuquerque/M
+Alcatraz/M
+Alcestis/M
+alchemical
+alchemist/SM
+alchemy/MS
+Alcibiades/M
+Alcmena/M
+Alcoa/M
+alcoholically
+alcoholic/MS
+alcoholism/SM
+alcohol/MS
+Alcott/M
+alcove/MSD
+Alcuin/M
+Alcyone/M
+Aldan/M
+Aldebaran/M
+aldehyde/M
+Alden/M
+Alderamin/M
+alderman/M
+aldermen
+alder/SM
+alderwoman
+alderwomen
+Aldin/M
+Aldis/M
+Aldo/M
+Aldon/M
+Aldous/M
+Aldrich/M
+Aldric/M
+Aldridge/M
+Aldrin/M
+Aldus/M
+Aldwin/M
+aleatory
+Alecia/M
+Aleck/M
+Alec/M
+Aleda/M
+alee
+Aleece/M
+Aleen/M
+alehouse/MS
+Aleichem/M
+Alejandra/M
+Alejandrina/M
+Alejandro/M
+Alejoa/M
+Aleksandr/M
+Alembert/M
+alembic/SM
+ale/MVS
+Alena/M
+Alene/M
+aleph/M
+Aleppo/M
+Aler/M
+alerted/Y
+alertness/MS
+alert/STZGPRDY
+Alessandra/M
+Alessandro/M
+Aleta/M
+Alethea/M
+Aleutian/S
+Aleut/SM
+alewife/M
+alewives
+Alexa/M
+Alexander/SM
+Alexandra/M
+Alexandre/M
+Alexandria/M
+Alexandrian/S
+Alexandrina/M
+Alexandr/M
+Alexandro/MS
+Alexei/M
+Alexia/M
+Alexina/M
+Alexine/M
+Alexio/M
+Alexi/SM
+Alex/M
+alfalfa/MS
+Alfa/M
+Alfie/M
+Alfi/M
+Alf/M
+Alfonse/M
+Alfons/M
+Alfonso/M
+Alfonzo/M
+Alford/M
+Alfreda/M
+Alfred/M
+Alfredo/M
+alfresco
+Alfy/M
+algae
+algaecide
+algal
+alga/M
+algebraic
+algebraical/Y
+algebraist/M
+algebra/MS
+Algenib/M
+Algeria/M
+Algerian/MS
+Alger/M
+Algernon/M
+Algieba/M
+Algiers/M
+alginate/SM
+ALGOL
+Algol/M
+Algonquian/SM
+Algonquin/SM
+algorithmic
+algorithmically
+algorithm/MS
+Alhambra/M
+Alhena/M
+Alia/M
+alias/GSD
+alibi/MDSG
+Alica/M
+Alicea/M
+Alice/M
+Alicia/M
+Alick/M
+Alic/M
+Alida/M
+Alidia/M
+Alie/M
+alienable/IU
+alienate/SDNGX
+alienation/M
+alienist/MS
+alien/RDGMBS
+Alighieri/M
+alight/DSG
+aligned/U
+aligner/SM
+align/LASDG
+alignment/SAM
+Alika/M
+Alikee/M
+alikeness/M
+alike/U
+alimentary
+aliment/SDMG
+alimony/MS
+Ali/MS
+Alina/M
+Aline/M
+alinement's
+Alioth/M
+aliquot/S
+Alisa/M
+Alisander/M
+Alisha/M
+Alison/M
+Alissa/M
+Alistair/M
+Alister/M
+Alisun/M
+aliveness/MS
+alive/P
+Alix/M
+aliyah/M
+aliyahs
+Aliza/M
+Alkaid/M
+alkalies
+alkali/M
+alkaline
+alkalinity/MS
+alkalize/SDG
+alkaloid/MS
+alkyd/S
+alkyl/M
+Allahabad/M
+Allah/M
+Alla/M
+Allan/M
+Allard/M
+allay/GDS
+Allayne/M
+Alleen/M
+allegation/SM
+alleged/Y
+allege/SDG
+Allegheny/MS
+allegiance/SM
+allegiant
+allegoric
+allegoricalness/M
+allegorical/YP
+allegorist/MS
+allegory/SM
+Allegra/M
+allegretto/MS
+allegri
+allegro/MS
+allele/SM
+alleluia/S
+allemande/M
+Allendale/M
+Allende/M
+Allene/M
+Allen/M
+Allentown/M
+allergenic
+allergen/MS
+allergic
+allergically
+allergist/MS
+allergy/MS
+alleviate/SDVGNX
+alleviation/M
+alleviator/MS
+Alley/M
+alley/MS
+Alleyn/M
+alleyway/MS
+Allhallows
+alliance/MS
+Allianora/M
+Allie/M
+allier
+allies/M
+alligator/DMGS
+Alli/MS
+Allina/M
+Allin/M
+Allison/M
+Allissa/M
+Allister/M
+Allistir/M
+alliterate/XVNGSD
+alliteration/M
+alliterative/Y
+Allix/M
+allocable/U
+allocatable
+allocate/ACSDNGX
+allocated/U
+allocation/AMC
+allocative
+allocator/AMS
+allophone/MS
+allophonic
+allotment/MS
+allotments/A
+allotrope/M
+allotropic
+allots/A
+allot/SDL
+allotted/A
+allotter/M
+allotting/A
+allover/S
+allowableness/M
+allowable/P
+allowably
+allowance/GSDM
+allowed/Y
+allowing/E
+allow/SBGD
+allows/E
+alloyed/U
+alloy/SGMD
+all/S
+allspice/MS
+Allstate/M
+Allsun/M
+allude/GSD
+allure/GLSD
+allurement/SM
+alluring/Y
+allusion/MS
+allusiveness/MS
+allusive/PY
+alluvial/S
+alluvions
+alluvium/MS
+Allx/M
+ally/ASDG
+Allyce/M
+Ally/MS
+Allyn/M
+Allys
+Allyson/M
+alma
+Almach/M
+Almaden/M
+almagest
+Alma/M
+almanac/MS
+Almaty/M
+Almeda/M
+Almeria/M
+Almeta/M
+almightiness/M
+Almighty/M
+almighty/P
+Almira/M
+Almire/M
+almond/SM
+almoner/MS
+almost
+Al/MRY
+alms/A
+almshouse/SM
+almsman/M
+alnico
+Alnilam/M
+Alnitak/M
+aloe/MS
+aloft
+aloha/SM
+Aloin/M
+Aloise/M
+Aloisia/M
+aloneness/M
+alone/P
+along
+alongshore
+alongside
+Alon/M
+Alonso/M
+Alonzo/M
+aloofness/MS
+aloof/YP
+aloud
+Aloysia/M
+Aloysius/M
+alpaca/SM
+Alpert/M
+alphabetical/Y
+alphabetic/S
+alphabetization/SM
+alphabetizer/M
+alphabetize/SRDGZ
+alphabet/SGDM
+alpha/MS
+alphanumerical/Y
+alphanumeric/S
+Alphard/M
+Alphecca/M
+Alpheratz/M
+Alphonse/M
+Alphonso/M
+Alpine
+alpine/S
+alp/MS
+Alps
+already
+Alric/M
+alright
+Alsace/M
+Alsatian/MS
+also
+Alsop/M
+Alston/M
+Altaic/M
+Altai/M
+Altair/M
+Alta/M
+altar/MS
+altarpiece/SM
+alterable/UI
+alteration/MS
+altercate/NX
+altercation/M
+altered/U
+alternate/SDVGNYX
+alternation/M
+alternativeness/M
+alternative/YMSP
+alternator/MS
+alter/RDZBG
+Althea/M
+although
+altimeter/SM
+Altiplano/M
+altitude/SM
+altogether/S
+Alton/M
+alto/SM
+Altos/M
+altruism/SM
+altruistic
+altruistically
+altruist/SM
+alt/RZS
+ALU
+Aludra/M
+Aluin/M
+Aluino/M
+alumina/SM
+aluminum/MS
+alumnae
+alumna/M
+alumni
+alumnus/MS
+alum/SM
+alundum
+Alva/M
+Alvan/M
+Alvarado/M
+Alvarez/M
+Alvaro/M
+alveolar/Y
+alveoli
+alveolus/M
+Alvera/M
+Alverta/M
+Alvie/M
+Alvina/M
+Alvinia/M
+Alvin/M
+Alvira/M
+Alvis/M
+Alvy/M
+alway/S
+Alwin/M
+Alwyn/M
+Alyce/M
+Alyda/M
+Alyosha/M
+Alysa/M
+Alyse/M
+Alysia/M
+Alys/M
+Alyson/M
+Alyss
+Alyssa/M
+Alzheimer/M
+AM
+AMA
+Amabelle/M
+Amabel/M
+Amadeus/M
+Amado/M
+amain
+Amalea/M
+Amalee/M
+Amaleta/M
+amalgamate/VNGXSD
+amalgamation/M
+amalgam/MS
+Amalia/M
+Amalie/M
+Amalita/M
+Amalle/M
+Amanda/M
+Amandie/M
+Amandi/M
+Amandy/M
+amanuenses
+amanuensis/M
+Amara/M
+amaranth/M
+amaranths
+amaretto/S
+Amargo/M
+Amarillo/M
+amaryllis/MS
+am/AS
+amasser/M
+amass/GRSD
+Amata/M
+amateurishness/MS
+amateurish/YP
+amateurism/MS
+amateur/SM
+Amati/M
+amatory
+amazed/Y
+amaze/LDSRGZ
+amazement/MS
+amazing/Y
+amazonian
+Amazonian
+amazon/MS
+Amazon/SM
+ambassadorial
+ambassador/MS
+ambassadorship/MS
+ambassadress/SM
+ambergris/SM
+Amberly/M
+amber/MS
+Amber/YM
+ambiance/MS
+ambidexterity/MS
+ambidextrous/Y
+ambience's
+ambient/S
+ambiguity/MS
+ambiguously/U
+ambiguousness/M
+ambiguous/YP
+ambition/GMDS
+ambitiousness/MS
+ambitious/PY
+ambit/M
+ambivalence/SM
+ambivalent/Y
+amble/GZDSR
+Amble/M
+ambler/M
+ambrose
+Ambrose/M
+ambrosial/Y
+ambrosia/SM
+Ambrosi/M
+Ambrosio/M
+Ambrosius/M
+Ambros/M
+ambulance/MS
+ambulant/S
+ambulate/DSNGX
+ambulation/M
+ambulatory/S
+Ambur/M
+ambuscade/MGSRD
+ambuscader/M
+ambusher/M
+ambush/MZRSDG
+Amby/M
+Amdahl/M
+ameba's
+Amelia/M
+Amelie/M
+Amelina/M
+Ameline/M
+ameliorate/XVGNSD
+amelioration/M
+Amelita/M
+amenability/SM
+amenably
+amended/U
+amender/M
+amendment/SM
+amen/DRGTSB
+amend/SBRDGL
+amends/M
+Amenhotep/M
+amenity/MS
+amenorrhea/M
+Amerada/M
+Amerasian/S
+amercement/MS
+amerce/SDLG
+Americana/M
+Americanism/SM
+Americanization/SM
+americanized
+Americanize/SDG
+American/MS
+America/SM
+americium/MS
+Amerigo/M
+Amerindian/MS
+Amerind/MS
+Amer/M
+Amery/M
+Ameslan/M
+Ame/SM
+amethystine
+amethyst/MS
+Amharic/M
+Amherst/M
+amiability/MS
+amiableness/M
+amiable/RPT
+amiably
+amicability/SM
+amicableness/M
+amicable/P
+amicably
+amide/SM
+amid/S
+amidships
+amidst
+Amie/M
+Amiga/M
+amigo/MS
+Amii/M
+Amil/M
+Ami/M
+amines
+aminobenzoic
+amino/M
+amir's
+Amish
+amiss
+Amitie/M
+Amity/M
+amity/SM
+Ammamaria/M
+Amman/M
+Ammerman/M
+ammeter/MS
+ammo/MS
+ammoniac
+ammonia/MS
+ammonium/M
+Am/MR
+ammunition/MS
+amnesiac/MS
+amnesia/SM
+amnesic/S
+amnesty/GMSD
+amniocenteses
+amniocentesis/M
+amnion/SM
+amniotic
+Amoco/M
+amoeba/SM
+amoebic
+amoeboid
+amok/MS
+among
+amongst
+Amontillado/M
+amontillado/MS
+amorality/MS
+amoral/Y
+amorousness/SM
+amorous/PY
+amorphousness/MS
+amorphous/PY
+amortization/SUM
+amortized/U
+amortize/SDG
+Amory/M
+Amos
+amount/SMRDZG
+amour/MS
+Amparo/M
+amperage/SM
+Ampere/M
+ampere/MS
+ampersand/MS
+Ampex/M
+amphetamine/MS
+amphibian/SM
+amphibiousness/M
+amphibious/PY
+amphibology/M
+amphitheater/SM
+amphorae
+amphora/M
+ampleness/M
+ample/PTR
+amplification/M
+amplifier/M
+amplify/DRSXGNZ
+amplitude/MS
+ampoule's
+amp/SGMDY
+ampule/SM
+amputate/DSNGX
+amputation/M
+amputee/SM
+Amritsar/M
+ams
+Amsterdam/M
+amt
+Amtrak/M
+amuck's
+amulet/SM
+Amundsen/M
+Amur/M
+amused/Y
+amuse/LDSRGVZ
+amusement/SM
+amuser/M
+amusingness/M
+amusing/YP
+Amway/M
+Amye/M
+amylase/MS
+amyl/M
+Amy/M
+Anabal/M
+Anabaptist/SM
+Anabella/M
+Anabelle/M
+Anabel/M
+anabolic
+anabolism/MS
+anachronism/SM
+anachronistic
+anachronistically
+Anacin/M
+anaconda/MS
+Anacreon/M
+anaerobe/SM
+anaerobic
+anaerobically
+anaglyph/M
+anagrammatic
+anagrammatically
+anagrammed
+anagramming
+anagram/MS
+Anaheim/M
+Analects/M
+analgesia/MS
+analgesic/S
+Analiese/M
+Analise/M
+Anallese/M
+Anallise/M
+analogical/Y
+analogize/SDG
+analogousness/MS
+analogous/YP
+analog/SM
+analogue/SM
+analogy/MS
+anal/Y
+analysand/MS
+analyses
+analysis/AM
+analyst/SM
+analytical/Y
+analyticity/S
+analytic/S
+analytics/M
+analyzable/U
+analyze/DRSZGA
+analyzed/U
+analyzer/M
+Ana/M
+anamorphic
+Ananias/M
+anapaest's
+anapestic/S
+anapest/SM
+anaphora/M
+anaphoric
+anaphorically
+anaplasmosis/M
+anarchic
+anarchical/Y
+anarchism/MS
+anarchistic
+anarchist/MS
+anarchy/MS
+Anastasia/M
+Anastasie/M
+Anastassia/M
+anastigmatic
+anastomoses
+anastomosis/M
+anastomotic
+anathema/MS
+anathematize/GSD
+Anatola/M
+Anatole/M
+Anatolia/M
+Anatolian
+Anatollo/M
+Anatol/M
+anatomic
+anatomical/YS
+anatomist/MS
+anatomize/GSD
+anatomy/MS
+Anaxagoras/M
+Ancell/M
+ancestor/SMDG
+ancestral/Y
+ancestress/SM
+ancestry/SM
+Anchorage/M
+anchorage/SM
+anchored/U
+anchorite/MS
+anchoritism/M
+anchorman/M
+anchormen
+anchorpeople
+anchorperson/S
+anchor/SGDM
+anchorwoman
+anchorwomen
+anchovy/MS
+ancientness/MS
+ancient/SRYTP
+ancillary/S
+an/CS
+Andalusia/M
+Andalusian
+Andaman
+andante/S
+and/DZGS
+Andean/M
+Andeee/M
+Andee/M
+Anderea/M
+Andersen/M
+Anders/N
+Anderson/M
+Andes
+Andie/M
+Andi/M
+andiron/MS
+Andonis/M
+Andorra/M
+Andover/M
+Andra/SM
+Andrea/MS
+Andreana/M
+Andree/M
+Andrei/M
+Andrej/M
+Andre/SM
+Andrew/MS
+Andrey/M
+Andria/M
+Andriana/M
+Andriette/M
+Andris
+androgenic
+androgen/SM
+androgynous
+androgyny/SM
+android/MS
+Andromache/M
+Andromeda/M
+Andropov/M
+Andros/M
+Andrus/M
+Andy/M
+anecdotal/Y
+anecdote/SM
+anechoic
+anemia/SM
+anemically
+anemic/S
+anemometer/MS
+anemometry/M
+anemone/SM
+anent
+aneroid
+Anestassia/M
+anesthesia/MS
+anesthesiologist/MS
+anesthesiology/SM
+anesthetically
+anesthetic/SM
+anesthetist/MS
+anesthetization/SM
+anesthetizer/M
+anesthetize/ZSRDG
+Anet/M
+Anetta/M
+Anette/M
+Anett/M
+aneurysm/MS
+anew
+Angara/M
+Angela/M
+Angeleno/SM
+Angele/SM
+angelfish/SM
+Angelia/M
+angelic
+angelical/Y
+Angelica/M
+angelica/MS
+Angelico/M
+Angelika/M
+Angeli/M
+Angelina/M
+Angeline/M
+Angelique/M
+Angelita/M
+Angelle/M
+Angel/M
+angel/MDSG
+Angelo/M
+Angelou/M
+Ange/M
+anger/GDMS
+Angevin/M
+Angie/M
+Angil/M
+angina/MS
+angiography
+angioplasty/S
+angiosperm/MS
+Angkor/M
+angle/GMZDSRJ
+angler/M
+Angles
+angleworm/MS
+Anglia/M
+Anglicanism/MS
+Anglican/MS
+Anglicism/SM
+Anglicization/MS
+anglicize/SDG
+Anglicize/SDG
+angling/M
+Anglo/MS
+Anglophile/SM
+Anglophilia/M
+Anglophobe/MS
+Anglophobia/M
+Angola/M
+Angolan/S
+angora/MS
+Angora/MS
+angrily
+angriness/M
+angry/RTP
+angst/MS
+Ångström/M
+angstrom/MS
+Anguilla/M
+anguish/DSMG
+angularity/MS
+angular/Y
+Angus/M
+Angy/M
+Anheuser/M
+anhydride/M
+anhydrite/M
+anhydrous/Y
+Aniakchak/M
+Ania/M
+Anibal/M
+Anica/M
+aniline/SM
+animadversion/SM
+animadvert/DSG
+animalcule/MS
+animal/MYPS
+animated/A
+animatedly
+animately/I
+animateness/MI
+animates/A
+animate/YNGXDSP
+animating/A
+animation/AMS
+animator/SM
+animism/SM
+animistic
+animist/S
+animized
+animosity/MS
+animus/SM
+anionic/S
+anion/MS
+aniseed/MS
+aniseikonic
+anise/MS
+anisette/SM
+anisotropic
+anisotropy/MS
+Anissa/M
+Anita/M
+Anitra/M
+Anjanette/M
+Anjela/M
+Ankara/M
+ankh/M
+ankhs
+anklebone/SM
+ankle/GMDS
+anklet/MS
+Annabal/M
+Annabela/M
+Annabella/M
+Annabelle/M
+Annabell/M
+Annabel/M
+Annadiana/M
+Annadiane/M
+Annalee/M
+Annaliese/M
+Annalise/M
+annalist/MS
+annal/MNS
+Anna/M
+Annamaria/M
+Annamarie/M
+Annapolis/M
+Annapurna/M
+anneal/DRSZG
+annealer/M
+Annecorinne/M
+annelid/MS
+Anneliese/M
+Annelise/M
+Anne/M
+Annemarie/M
+Annetta/M
+Annette/M
+annexation/SM
+annexe/M
+annex/GSD
+Annice/M
+Annie/M
+annihilate/XSDVGN
+annihilation/M
+annihilator/MS
+Anni/MS
+Annissa/M
+anniversary/MS
+Ann/M
+Annmaria/M
+Annmarie/M
+Annnora/M
+Annora/M
+annotated/U
+annotate/VNGXSD
+annotation/M
+annotator/MS
+announced/U
+announcement/SM
+announcer/M
+announce/ZGLRSD
+annoyance/MS
+annoyer/M
+annoying/Y
+annoy/ZGSRD
+annualized
+annual/YS
+annuitant/MS
+annuity/MS
+annular/YS
+annuli
+annulled
+annulling
+annulment/MS
+annul/SL
+annulus/M
+annum
+annunciate/XNGSD
+annunciation/M
+Annunciation/S
+annunciator/SM
+Anny/M
+anode/SM
+anodic
+anodize/GDS
+anodyne/SM
+anoint/DRLGS
+anointer/M
+anointment/SM
+anomalousness/M
+anomalous/YP
+anomaly/MS
+anomic
+anomie/M
+anon/S
+anonymity/MS
+anonymousness/M
+anonymous/YP
+anopheles/M
+anorak/SM
+anorectic/S
+anorexia/SM
+anorexic/S
+another/M
+Anouilh/M
+Ansell/M
+Ansel/M
+Anselma/M
+Anselm/M
+Anselmo/M
+Anshan/M
+ANSI/M
+Ansley/M
+ans/M
+Anson/M
+Anstice/M
+answerable/U
+answered/U
+answerer/M
+answer/MZGBSDR
+antacid/MS
+Antaeus/M
+antagonism/MS
+antagonistic
+antagonistically
+antagonist/MS
+antagonized/U
+antagonize/GZRSD
+antagonizing/U
+Antananarivo/M
+antarctic
+Antarctica/M
+Antarctic/M
+Antares
+anteater/MS
+antebellum
+antecedence/MS
+antecedent/SMY
+antechamber/SM
+antedate/GDS
+antediluvian/S
+anteing
+antelope/MS
+ante/MS
+antenatal
+antennae
+antenna/MS
+anterior/SY
+anteroom/SM
+ant/GSMD
+Anthea/M
+Anthe/M
+anthem/MGDS
+anther/MS
+Anthia/M
+Anthiathia/M
+anthill/S
+anthologist/MS
+anthologize/GDS
+anthology/SM
+Anthony/M
+anthraces
+anthracite/MS
+anthrax/M
+anthropic
+anthropocentric
+anthropogenic
+anthropoid/S
+anthropological/Y
+anthropologist/MS
+anthropology/SM
+anthropometric/S
+anthropometry/M
+anthropomorphic
+anthropomorphically
+anthropomorphism/SM
+anthropomorphizing
+anthropomorphous
+antiabortion
+antiabortionist/S
+antiaircraft
+antibacterial/S
+antibiotic/SM
+antibody/MS
+anticancer
+Antichrist/MS
+anticipated/U
+anticipate/XVGNSD
+anticipation/M
+anticipative/Y
+anticipatory
+anticked
+anticking
+anticlerical/S
+anticlimactic
+anticlimactically
+anticlimax/SM
+anticline/SM
+anticlockwise
+antic/MS
+anticoagulant/S
+anticoagulation/M
+anticommunism/SM
+anticommunist/SM
+anticompetitive
+anticyclone/MS
+anticyclonic
+antidemocratic
+antidepressant/SM
+antidisestablishmentarianism/M
+antidote/DSMG
+Antietam/M
+antifascist/SM
+antiformant
+antifreeze/SM
+antifundamentalist/M
+antigenic
+antigenicity/SM
+antigen/MS
+antigone
+Antigone/M
+Antigua/M
+antiheroes
+antihero/M
+antihistamine/MS
+antihistorical
+antiknock/MS
+antilabor
+Antillean
+Antilles
+antilogarithm/SM
+antilogs
+antimacassar/SM
+antimalarial/S
+antimatter/SM
+antimicrobial/S
+antimissile/S
+antimony/SM
+anting/M
+Antin/M
+antinomian
+antinomy/M
+antinuclear
+Antioch/M
+antioxidant/MS
+antiparticle/SM
+Antipas/M
+antipasti
+antipasto/MS
+antipathetic
+antipathy/SM
+antipersonnel
+antiperspirant/MS
+antiphonal/SY
+antiphon/SM
+antipodal/S
+antipodean/S
+antipode/MS
+Antipodes
+antipollution/S
+antipoverty
+antiquarianism/MS
+antiquarian/MS
+antiquary/SM
+antiquate/NGSD
+antiquation/M
+antique/MGDS
+antiquity/SM
+antiredeposition
+antiresonance/M
+antiresonator
+anti/S
+antisemitic
+antisemitism/M
+antisepses
+antisepsis/M
+antiseptically
+antiseptic/S
+antiserum/SM
+antislavery/S
+antisocial/Y
+antispasmodic/S
+antisubmarine
+antisymmetric
+antisymmetry
+antitank
+antitheses
+antithesis/M
+antithetic
+antithetical/Y
+antithyroid
+antitoxin/MS
+antitrust/MR
+antivenin/MS
+antiviral/S
+antivivisectionist/S
+antiwar
+antler/SDM
+Antofagasta/M
+Antoine/M
+Antoinette/M
+Antonella/M
+Antone/M
+Antonetta/M
+Antonia/M
+Antonie/M
+Antonietta/M
+Antoni/M
+Antonina/M
+Antonin/M
+Antonino/M
+Antoninus/M
+Antonio/M
+Antonius/M
+Anton/MS
+Antonovics/M
+Antony/M
+antonymous
+antonym/SM
+antral
+antsy/RT
+Antwan/M
+Antwerp/M
+Anubis/M
+anus/SM
+anvil/MDSG
+anxiety/MS
+anxiousness/SM
+anxious/PY
+any
+Anya/M
+anybody/S
+anyhow
+Any/M
+anymore
+anyone/MS
+anyplace
+anything/S
+anytime
+anyway/S
+anywhere/S
+anywise
+AOL/M
+aorta/MS
+aortic
+AP
+apace
+apache/MS
+Apache/MS
+Apalachicola/M
+apartheid/SM
+apart/LP
+apartment/MS
+apartness/M
+apathetic
+apathetically
+apathy/SM
+apatite/MS
+APB
+aped/A
+apelike
+ape/MDRSG
+Apennines
+aper/A
+aperiodic
+aperiodically
+aperiodicity/M
+aperitif/S
+aperture/MDS
+apex/MS
+aphasia/SM
+aphasic/S
+aphelia
+aphelion/SM
+aphid/MS
+aphonic
+aphorism/MS
+aphoristic
+aphoristically
+aphrodisiac/SM
+Aphrodite/M
+Apia/M
+apiarist/SM
+apiary/SM
+apical/YS
+apices's
+apiece
+apishness/M
+apish/YP
+aplenty
+aplomb/SM
+APO
+Apocalypse/M
+apocalypse/MS
+apocalyptic
+apocryphalness/M
+apocryphal/YP
+apocrypha/M
+Apocrypha/M
+apogee/MS
+apolar
+apolitical/Y
+Apollinaire/M
+Apollonian
+Apollo/SM
+apologetically/U
+apologetic/S
+apologetics/M
+apologia/SM
+apologist/MS
+apologize/GZSRD
+apologizer/M
+apologizes/A
+apologizing/U
+apology/MS
+apoplectic
+apoplexy/SM
+apostasy/SM
+apostate/SM
+apostatize/DSG
+apostleship/SM
+apostle/SM
+apostolic
+apostrophe/SM
+apostrophized
+apothecary/MS
+apothegm/MS
+apotheoses
+apotheosis/M
+apotheosized
+apotheosizes
+apotheosizing
+Appalachia/M
+Appalachian/MS
+appalling/Y
+appall/SDG
+Appaloosa/MS
+appaloosa/S
+appanage/M
+apparatus/SM
+apparel/SGMD
+apparency
+apparently/I
+apparentness/M
+apparent/U
+apparition/SM
+appealer/M
+appealing/UY
+appeal/SGMDRZ
+appear/AEGDS
+appearance/AMES
+appearer/S
+appease/DSRGZL
+appeased/U
+appeasement/MS
+appeaser/M
+appellant/MS
+appellate/VNX
+appellation/M
+appellative/MY
+appendage/MS
+appendectomy/SM
+appendices
+appendicitis/SM
+appendix/SM
+append/SGZDR
+appertain/DSG
+appetite/MVS
+appetizer/SM
+appetizing/YU
+Appia/M
+Appian/M
+applauder/M
+applaud/ZGSDR
+applause/MS
+applecart/M
+applejack/MS
+Apple/M
+apple/MS
+applesauce/SM
+Appleseed/M
+Appleton/M
+applet/S
+appliance/SM
+applicabilities
+applicability/IM
+applicable/I
+applicably
+applicant/MS
+applicate/V
+application/MA
+applicative/Y
+applicator/MS
+applier/SM
+appliquéd
+appliqué/MSG
+apply/AGSDXN
+appointee/SM
+appoint/ELSADG
+appointer/MS
+appointive
+appointment/ASEM
+Appolonia/M
+Appomattox/M
+apportion/GADLS
+apportionment/SAM
+appose/SDG
+appositeness/MS
+apposite/XYNVP
+apposition/M
+appositive/SY
+appraisal/SAM
+appraised/A
+appraisees
+appraiser/M
+appraises/A
+appraise/ZGDRS
+appraising/Y
+appreciable/I
+appreciably/I
+appreciated/U
+appreciate/XDSNGV
+appreciation/M
+appreciativeness/MI
+appreciative/PIY
+appreciator/MS
+appreciatory
+apprehend/DRSG
+apprehender/M
+apprehensible
+apprehension/SM
+apprehensiveness/SM
+apprehensive/YP
+apprentice/DSGM
+apprenticeship/SM
+apprise/DSG
+apprizer/SM
+apprizingly
+apprizings
+approachability/UM
+approachable/UI
+approach/BRSDZG
+approacher/M
+approbate/NX
+approbation/EMS
+appropriable
+appropriated/U
+appropriately/I
+appropriateness/SMI
+appropriate/XDSGNVYTP
+appropriation/M
+appropriator/SM
+approval/ESM
+approve/DSREG
+approved/U
+approver's/E
+approver/SM
+approving/YE
+approx
+approximate/XGNVYDS
+approximation/M
+approximative/Y
+appurtenance/MS
+appurtenant/S
+APR
+apricot/MS
+Aprilette/M
+April/MS
+Apr/M
+apron/SDMG
+apropos
+apse/MS
+apsis/M
+apter
+aptest
+aptitude/SM
+aptness/SMI
+aptness's/U
+apt/UPYI
+Apuleius/M
+aquaculture/MS
+aqualung/SM
+aquamarine/SM
+aquanaut/SM
+aquaplane/GSDM
+aquarium/MS
+Aquarius/MS
+aqua/SM
+aquatically
+aquatic/S
+aquavit/SM
+aqueduct/MS
+aqueous/Y
+aquiculture's
+aquifer/SM
+Aquila/M
+aquiline
+Aquinas/M
+Aquino/M
+Aquitaine/M
+AR
+Arabela/M
+Arabele/M
+Arabella/M
+Arabelle/M
+Arabel/M
+arabesque/SM
+Arabia/M
+Arabian/MS
+Arabic/M
+arability/MS
+Arabist/MS
+arable/S
+Arab/MS
+Araby/M
+Araceli/M
+arachnid/MS
+arachnoid/M
+arachnophobia
+Arafat/M
+Araguaya/M
+Araldo/M
+Aral/M
+Ara/M
+Aramaic/M
+Aramco/M
+Arapahoes
+Arapahoe's
+Arapaho/MS
+Ararat/M
+Araucanian/M
+Arawakan/M
+Arawak/M
+arbiter/MS
+arbitrage/GMZRSD
+arbitrager/M
+arbitrageur/S
+arbitrament/MS
+arbitrarily
+arbitrariness/MS
+arbitrary/P
+arbitrate/SDXVNG
+arbitration/M
+arbitrator/SM
+arbor/DMS
+arboreal/Y
+arbores
+arboretum/MS
+arborvitae/MS
+arbutus/SM
+ARC
+arcade/SDMG
+Arcadia/M
+Arcadian
+arcana/M
+arcane/P
+arc/DSGM
+archaeological/Y
+archaeologist/SM
+archaically
+archaic/P
+Archaimbaud/M
+archaism/SM
+archaist/MS
+archaize/GDRSZ
+archaizer/M
+Archambault/M
+archangel/SM
+archbishopric/SM
+archbishop/SM
+archdeacon/MS
+archdiocesan
+archdiocese/SM
+archduchess/MS
+archduke/MS
+Archean
+archenemy/SM
+archeologist's
+archeology/MS
+archer/M
+Archer/M
+archery/MS
+archetypal
+archetype/SM
+archfiend/SM
+archfool
+Archibald/M
+Archibaldo/M
+Archibold/M
+Archie/M
+archiepiscopal
+Archimedes/M
+arching/M
+archipelago/SM
+architect/MS
+architectonic/S
+architectonics/M
+architectural/Y
+architecture/SM
+architrave/MS
+archival
+archive/DRSGMZ
+archived/U
+archivist/MS
+Arch/MR
+archness/MS
+arch/PGVZTMYDSR
+archway/SM
+Archy/M
+arclike
+ARCO/M
+arcsine
+arctangent
+Arctic/M
+arctic/S
+Arcturus/M
+Ardabil
+Arda/MH
+Ardath/M
+Ardeen/M
+Ardelia/M
+Ardelis/M
+Ardella/M
+Ardelle/M
+ardency/M
+Ardene/M
+Ardenia/M
+Arden/M
+ardent/Y
+Ardine/M
+Ardisj/M
+Ardis/M
+Ardith/M
+ardor/SM
+Ardra/M
+arduousness/SM
+arduous/YP
+Ardyce/M
+Ardys
+Ardyth/M
+areal
+area/SM
+areawide
+are/BS
+Arel/M
+arenaceous
+arena/SM
+aren't
+Arequipa/M
+Ares
+Aretha/M
+Argentina/M
+Argentinean/S
+Argentine/SM
+Argentinian/S
+argent/MS
+arginine/MS
+Argonaut/MS
+argonaut/S
+argon/MS
+Argonne/M
+Argo/SM
+argosy/SM
+argot/SM
+arguable/IU
+arguably/IU
+argue/DSRGZ
+arguer/M
+argumentation/SM
+argumentativeness/MS
+argumentative/YP
+argument/SM
+Argus/M
+argyle/S
+Ariadne/M
+Ariana/M
+Arianism/M
+Arianist/SM
+aria/SM
+Aridatha/M
+aridity/SM
+aridness/M
+arid/TYRP
+Ariela/M
+Ariella/M
+Arielle/M
+Ariel/M
+Arie/SM
+Aries/S
+aright
+Ari/M
+Arin/M
+Ario/M
+Ariosto/M
+arise/GJSR
+arisen
+Aristarchus/M
+Aristides
+aristocracy/SM
+aristocratic
+aristocratically
+aristocrat/MS
+Aristophanes/M
+Aristotelean
+Aristotelian/M
+Aristotle/M
+arithmetical/Y
+arithmetician/SM
+arithmetic/MS
+arithmetize/SD
+Arius/M
+Ariz/M
+Arizona/M
+Arizonan/S
+Arizonian/S
+Arjuna/M
+Arkansan/MS
+Arkansas/M
+Arkhangelsk/M
+Ark/M
+ark/MS
+Arkwright/M
+Arlana/M
+Arlan/M
+Arlee/M
+Arleen/M
+Arlena/M
+Arlene/M
+Arlen/M
+Arleta/M
+Arlette/M
+Arley/M
+Arleyne/M
+Arlie/M
+Arliene/M
+Arlina/M
+Arlinda/M
+Arline/M
+Arlington/M
+Arlin/M
+Arluene/M
+Arly/M
+Arlyne/M
+Arlyn/M
+Armada/M
+armada/SM
+armadillo/MS
+Armageddon/SM
+Armagnac/M
+armament/EAS
+armament's/E
+Armand/M
+Armando/M
+Arman/M
+arm/ASEDG
+Armata/M
+armature/MGSD
+armband/SM
+armchair/MS
+Armco/M
+armed/U
+Armenia/M
+Armenian/MS
+armer/MES
+armful/SM
+armhole/MS
+arming/M
+Arminius/M
+Armin/M
+armistice/MS
+armless
+armlet/SM
+armload/M
+Armonk/M
+armored/U
+armorer/M
+armorial/S
+armory/DSM
+armor/ZRDMGS
+Armour/M
+armpit/MS
+armrest/MS
+arm's
+Armstrong/M
+Ar/MY
+army/SM
+Arnaldo/M
+Arneb/M
+Arne/M
+Arney/M
+Arnhem/M
+Arnie/M
+Arni/M
+Arnold/M
+Arnoldo/M
+Arno/M
+Arnuad/M
+Arnulfo/M
+Arny/M
+aroma/SM
+aromatherapist/S
+aromatherapy/S
+aromatically
+aromaticity/M
+aromaticness/M
+aromatic/SP
+Aron/M
+arose
+around
+arousal/MS
+aroused/U
+arouse/GSD
+ARPA/M
+Arpanet/M
+ARPANET/M
+arpeggio/SM
+arrack/M
+Arragon/M
+arraignment/MS
+arraign/SDGL
+arrangeable/A
+arranged/EA
+arrangement/AMSE
+arranger/M
+arranges/EA
+arrange/ZDSRLG
+arranging/EA
+arrant/Y
+arras/SM
+arrayer
+array/ESGMD
+arrear/SM
+arrest/ADSG
+arrestee/MS
+arrester/MS
+arresting/Y
+arrestor/MS
+Arrhenius/M
+arrhythmia/SM
+arrhythmic
+arrhythmical
+Arri/M
+arrival/MS
+arriver/M
+arrive/SRDG
+arrogance/MS
+arrogant/Y
+arrogate/XNGDS
+arrogation/M
+Arron/M
+arrowhead/SM
+arrowroot/MS
+arrow/SDMG
+arroyo/MS
+arr/TV
+arsenal/MS
+arsenate/M
+arsenic/MS
+arsenide/M
+arsine/MS
+arsonist/MS
+arson/SM
+Artair/M
+Artaxerxes/M
+artefact's
+Arte/M
+Artemas
+Artemis/M
+Artemus/M
+arterial/SY
+arteriolar
+arteriole/SM
+arterioscleroses
+arteriosclerosis/M
+artery/SM
+artesian
+artfulness/SM
+artful/YP
+Arther/M
+arthritic/S
+arthritides
+arthritis/M
+arthrogram/MS
+arthropod/SM
+arthroscope/S
+arthroscopic
+Arthurian
+Arthur/M
+artichoke/SM
+article/GMDS
+articulable/I
+articular
+articulated/EU
+articulately/I
+articulateness/IMS
+articulates/I
+articulate/VGNYXPSD
+articulation/M
+articulator/SM
+articulatory
+Artie/M
+artifact/MS
+artificer/M
+artifice/ZRSM
+artificiality/MS
+artificialness/M
+artificial/PY
+artillerist
+artilleryman/M
+artillerymen
+artillery/SM
+artiness/MS
+artisan/SM
+artiste/SM
+artistically/I
+artistic/I
+artist/MS
+artistry/SM
+artlessness/MS
+artless/YP
+Art/M
+art/SM
+artsy/RT
+Artur/M
+Arturo/M
+Artus/M
+artwork/MS
+Arty/M
+arty/TPR
+Aruba/M
+arum/MS
+Arvie/M
+Arvin/M
+Arv/M
+Arvy/M
+Aryan/MS
+Aryn/M
+as
+As
+A's
+Asa/M
+Asama/M
+asap
+ASAP
+asbestos/MS
+Ascella/M
+ascend/ADGS
+ascendancy/MS
+ascendant/SY
+ascender/SM
+Ascension/M
+ascension/SM
+ascent/SM
+ascertain/DSBLG
+ascertainment/MS
+ascetically
+asceticism/MS
+ascetic/SM
+ASCII
+ascot/MS
+ascribe/GSDB
+ascription/MS
+ascriptive
+Ase/M
+aseptically
+aseptic/S
+asexuality/MS
+asexual/Y
+Asgard/M
+ashame/D
+ashamed/UY
+Ashanti/M
+Ashbey/M
+Ashby/M
+ashcan/SM
+Ashely/M
+Asher/M
+Asheville/M
+Ashia/M
+Ashien/M
+Ashil/M
+Ashkenazim
+Ashkhabad/M
+Ashla/M
+Ashland/M
+Ashlan/M
+ashlar/GSDM
+Ashlee/M
+Ashleigh/M
+Ashlen/M
+Ashley/M
+Ashlie/M
+Ashli/M
+Ashlin/M
+Ashly/M
+ashman/M
+ash/MNDRSG
+Ashmolean/M
+Ash/MRY
+ashore
+ashram/SM
+Ashton/M
+ashtray/MS
+Ashurbanipal/M
+ashy/RT
+Asia/M
+Asian/MS
+Asiatic/SM
+aside/S
+Asilomar/M
+Asimov
+asinine/Y
+asininity/MS
+askance
+ask/DRZGS
+asked/U
+asker/M
+askew/P
+ASL
+aslant
+asleep
+Asmara/M
+asocial/S
+Asoka/M
+asparagus/MS
+aspartame/S
+ASPCA
+aspect/SM
+Aspell/M
+aspen/M
+Aspen/M
+asperity/SM
+asper/M
+aspersion/SM
+asphalt/MDRSG
+asphodel/MS
+asphyxia/MS
+asphyxiate/GNXSD
+asphyxiation/M
+aspic/MS
+Aspidiske/M
+aspidistra/MS
+aspirant/MS
+aspirate/NGDSX
+aspirational
+aspiration/M
+aspirator/SM
+aspire/GSRD
+aspirer/M
+aspirin/SM
+asplenium
+asp/MNRXS
+Asquith/M
+Assad/M
+assailable/U
+assailant/SM
+assail/BGDS
+Assamese/M
+Assam/M
+assassinate/DSGNX
+assassination/M
+assassin/MS
+assaulter/M
+assaultive/YP
+assault/SGVMDR
+assayer/M
+assay/SZGRD
+assemblage/MS
+assemble/ADSREG
+assembled/U
+assembler/EMS
+assemblies/A
+assembly/EAM
+assemblyman/M
+assemblymen
+Assembly/MS
+assemblywoman
+assemblywomen
+assent/SGMRD
+assert/ADGS
+asserter/MS
+assertional
+assertion/AMS
+assertiveness/SM
+assertive/PY
+assess/BLSDG
+assessed/A
+assesses/A
+assessment/SAM
+assessor/MS
+asset/SM
+asseverate/XSDNG
+asseveration/M
+asshole/MS!
+assiduity/SM
+assiduousness/SM
+assiduous/PY
+assign/ALBSGD
+assignation/MS
+assigned/U
+assignee/MS
+assigner/MS
+assignment/MAS
+assignor/MS
+assigns/CU
+assimilate/VNGXSD
+assimilationist/M
+assimilation/M
+Assisi/M
+assistance/SM
+assistantship/SM
+assistant/SM
+assisted/U
+assister/M
+assist/RDGS
+assize/MGSD
+ass/MNS
+assn
+assoc
+associable
+associated/U
+associate/SDEXNG
+associateship
+associational
+association/ME
+associative/Y
+associativity/S
+associator/MS
+assonance/SM
+assonant/S
+assorter/M
+assort/LRDSG
+assortment/SM
+asst
+assuaged/U
+assuage/SDG
+assumability
+assumer/M
+assume/SRDBJG
+assuming/UA
+assumption/SM
+assumptive
+assurance/AMS
+assure/AGSD
+assuredness/M
+assured/PYS
+assurer/SM
+assuring/YA
+Assyria/M
+Assyrian/SM
+Assyriology/M
+Astaire/SM
+Astarte/M
+astatine/MS
+aster/ESM
+asteria
+asterisked/U
+asterisk/SGMD
+astern
+asteroidal
+asteroid/SM
+asthma/MS
+asthmatic/S
+astigmatic/S
+astigmatism/SM
+astir
+astonish/GSDL
+astonishing/Y
+astonishment/SM
+Aston/M
+Astoria/M
+Astor/M
+astounding/Y
+astound/SDG
+astraddle
+Astrakhan/M
+astrakhan/SM
+astral/SY
+Astra/M
+astray
+astride
+Astrid/M
+astringency/SM
+astringent/YS
+Astrix/M
+astrolabe/MS
+astrologer/MS
+astrological/Y
+astrologist/M
+astrology/SM
+astronautical
+astronautic/S
+astronautics/M
+astronaut/SM
+astronomer/MS
+astronomic
+astronomical/Y
+astronomy/SM
+astrophysical
+astrophysicist/SM
+astrophysics/M
+Astroturf/M
+AstroTurf/S
+Asturias/M
+astuteness/MS
+astute/RTYP
+Asunción/M
+asunder
+Aswan/M
+asylum/MS
+asymmetric
+asymmetrical/Y
+asymmetry/MS
+asymptomatic
+asymptomatically
+asymptote/MS
+asymptotically
+asymptotic/Y
+asynchronism/M
+asynchronous/Y
+asynchrony
+at
+Atacama/M
+Atahualpa/M
+Atalanta/M
+Atari/M
+Atatürk/M
+atavism/MS
+atavistic
+atavist/MS
+ataxia/MS
+ataxic/S
+atelier/SM
+atemporal
+ate/S
+Athabasca/M
+Athabascan's
+Athabaskan/MS
+Athabaska's
+atheism/SM
+atheistic
+atheist/SM
+Athena/M
+Athene/M
+Athenian/SM
+Athens/M
+atheroscleroses
+atherosclerosis/M
+athirst
+athlete/MS
+athletically
+athleticism/M
+athletic/S
+athletics/M
+athwart
+atilt
+Atkins/M
+Atkinson/M
+Atlanta/M
+Atlante/MS
+atlantes
+Atlantic/M
+Atlantis/M
+atlas/SM
+Atlas/SM
+At/M
+Atman
+ATM/M
+atmosphere/DSM
+atmospherically
+atmospheric/S
+atoll/MS
+atomically
+atomicity/M
+atomic/S
+atomics/M
+atomistic
+atomization/SM
+atomize/GZDRS
+atomizer/M
+atom/SM
+atonality/MS
+atonal/Y
+atone/LDSG
+atonement/SM
+atop
+ATP
+Atreus/M
+atria
+atrial
+Atria/M
+atrium/M
+atrociousness/SM
+atrocious/YP
+atrocity/SM
+atrophic
+atrophy/DSGM
+atropine/SM
+Atropos/M
+Ats
+attach/BLGZMDRS
+attached/UA
+attacher/M
+attaché/S
+attachment/ASM
+attacker/M
+attack/GBZSDR
+attainabilities
+attainability/UM
+attainableness/M
+attainable/U
+attainably/U
+attain/AGSD
+attainder/MS
+attained/U
+attainer/MS
+attainment/MS
+attar/MS
+attempt/ADSG
+attempter/MS
+attendance/MS
+attendant/SM
+attended/U
+attendee/SM
+attender/M
+attend/SGZDR
+attentional
+attentionality
+attention/IMS
+attentiveness/IMS
+attentive/YIP
+attenuated/U
+attenuate/SDXGN
+attenuation/M
+attenuator/MS
+attestation/SM
+attested/U
+attester/M
+attest/GSDR
+Attic
+Attica/M
+attic/MS
+Attila/M
+attire/SDG
+attitude/MS
+attitudinal/Y
+attitudinize/SDG
+Attlee/M
+attn
+Attn
+attorney/SM
+attractant/SM
+attract/BSDGV
+attraction/MS
+attractivenesses
+attractiveness/UM
+attractive/UYP
+attractor/MS
+attributable/U
+attribute/BVNGRSDX
+attributed/U
+attributer/M
+attributional
+attribution/M
+attributive/SY
+attrition/MS
+Attucks
+attune/SDG
+atty
+ATV/S
+atwitter
+Atwood/M
+atypical/Y
+Aube/M
+Auberge/M
+aubergine/MS
+Auberon/M
+Auberta/M
+Aubert/M
+Aubine/M
+Aubree/M
+Aubrette/M
+Aubrey/M
+Aubrie/M
+Aubry/M
+auburn/SM
+Auckland/M
+auctioneer/SDMG
+auction/MDSG
+audaciousness/SM
+audacious/PY
+audacity/MS
+Auden/M
+audibility/MSI
+audible/I
+audibles
+audibly/I
+Audie/M
+audience/MS
+Audi/M
+audiogram/SM
+audiological
+audiologist/MS
+audiology/SM
+audiometer/MS
+audiometric
+audiometry/M
+audiophile/SM
+audio/SM
+audiotape/S
+audiovisual/S
+audited/U
+audition/MDSG
+auditorium/MS
+auditor/MS
+auditory/S
+audit/SMDVG
+Audra/M
+Audre/M
+Audrey/M
+Audrie/M
+Audrye/M
+Audry/M
+Audubon/M
+Audy/M
+Auerbach/M
+Augean
+auger/SM
+aught/S
+Augie/M
+Aug/M
+augmentation/SM
+augmentative/S
+augment/DRZGS
+augmenter/M
+augur/GDMS
+augury/SM
+Augusta/M
+Augustan/S
+Auguste/M
+Augustina/M
+Augustine/M
+Augustinian/S
+Augustin/M
+augustness/SM
+Augusto/M
+August/SM
+august/STPYR
+Augustus/M
+Augy/M
+auk/MS
+Au/M
+Aundrea/M
+auntie/MS
+aunt/MYS
+aunty's
+aural/Y
+Aura/M
+aura/SM
+Aurea/M
+Aurelea/M
+Aurelia/M
+Aurelie/M
+Aurelio/M
+Aurelius/M
+Aurel/M
+aureole/GMSD
+aureomycin
+Aureomycin/M
+Auria/M
+auric
+auricle/SM
+auricular
+Aurie/M
+Auriga/M
+Aurilia/M
+Aurlie/M
+Auroora/M
+auroral
+Aurora/M
+aurora/SM
+Aurore/M
+Aurthur/M
+Auschwitz/M
+auscultate/XDSNG
+auscultation/M
+auspice/SM
+auspicious/IPY
+auspiciousnesses
+auspiciousness/IM
+Aussie/MS
+Austen/M
+austereness/M
+austere/TYRP
+austerity/SM
+Austina/M
+Austine/M
+Austin/SM
+austral
+Australasia/M
+Australasian/S
+australes
+Australia/M
+Australian/MS
+Australis/M
+australites
+Australoid
+Australopithecus/M
+Austria/M
+Austrian/SM
+Austronesian
+authentically
+authenticated/U
+authenticate/GNDSX
+authentication/M
+authenticator/MS
+authenticity/MS
+authentic/UI
+author/DMGS
+authoress/S
+authorial
+authoritarianism/MS
+authoritarian/S
+authoritativeness/SM
+authoritative/PY
+authority/SM
+authorization/MAS
+authorize/AGDS
+authorized/U
+authorizer/SM
+authorizes/U
+authorship/MS
+autism/MS
+autistic/S
+autobahn/MS
+autobiographer/MS
+autobiographic
+autobiographical/Y
+autobiography/MS
+autoclave/SDGM
+autocollimator/M
+autocorrelate/GNSDX
+autocorrelation/M
+autocracy/SM
+autocratic
+autocratically
+autocrat/SM
+autodial/R
+autodidact/MS
+autofluorescence
+autograph/MDG
+autographs
+autoignition/M
+autoimmune
+autoimmunity/S
+autoloader
+automaker/S
+automata's
+automate/NGDSX
+automatically
+automatic/S
+automation/M
+automatism/SM
+automatize/DSG
+automaton/SM
+automobile/GDSM
+automorphism/SM
+automotive
+autonavigator/SM
+autonomic/S
+autonomous/Y
+autonomy/MS
+autopilot/SM
+autopsy/MDSG
+autoregressive
+autorepeat/GS
+auto/SDMG
+autostart
+autosuggestibility/M
+autotransformer/M
+autoworker/S
+autumnal/Y
+Autumn/M
+autumn/MS
+aux
+auxiliary/S
+auxin/MS
+AV
+availability/USM
+availableness/M
+available/U
+availably
+avail/BSZGRD
+availing/U
+avalanche/MGSD
+Avalon/M
+Ava/M
+avant
+avarice/SM
+avariciousness/M
+avaricious/PY
+avast/S
+avatar/MS
+avaunt/S
+avdp
+Aveline/M
+Ave/MS
+avenged/U
+avenger/M
+avenge/ZGSRD
+Aventine/M
+Aventino/M
+avenue/MS
+average/DSPGYM
+Averell/M
+Averill/M
+Averil/M
+Avernus/M
+averred
+averrer
+averring
+Averroes/M
+averseness/M
+averse/YNXP
+aversion/M
+avers/V
+avert/GSD
+Averyl/M
+Avery/M
+ave/S
+aves/C
+Avesta/M
+avg
+avian/S
+aviary/SM
+aviate/NX
+aviation/M
+aviator/SM
+aviatrices
+aviatrix/SM
+Avicenna/M
+Avictor/M
+avidity/MS
+avid/TPYR
+Avie/M
+Avigdor/M
+Avignon/M
+Avila/M
+avionic/S
+avionics/M
+Avior/M
+Avis
+avitaminoses
+avitaminosis/M
+Avivah/M
+Aviva/M
+Aviv/M
+avocado/MS
+avocational
+avocation/SM
+Avogadro/M
+avoidable/U
+avoidably/U
+avoidance/SM
+avoider/M
+avoid/ZRDBGS
+avoirdupois/MS
+Avon/M
+avouch/GDS
+avowal/EMS
+avowed/Y
+avower/M
+avow/GEDS
+Avram/M
+Avril/M
+Avrit/M
+Avrom/M
+avuncular
+av/ZR
+AWACS
+await/SDG
+awake/GS
+awakened/U
+awakener/M
+awakening/S
+awaken/SADG
+awarder/M
+award/RDSZG
+awareness/MSU
+aware/TRP
+awash
+away/PS
+aweigh
+awe/SM
+awesomeness/SM
+awesome/PY
+awestruck
+awfuller
+awfullest
+awfulness/SM
+awful/YP
+aw/GD
+awhile/S
+awkwardness/MS
+awkward/PRYT
+awl/MS
+awning/DM
+awn/MDJGS
+awoke
+awoken
+AWOL
+awry/RT
+ax/DRSZGM
+axehead/S
+Axel/M
+Axe/M
+axeman
+axial/Y
+axillary
+axiological/Y
+axiology/M
+axiomatically
+axiomatic/S
+axiomatization/MS
+axiomatize/GDS
+axiom/SM
+axion/SM
+axis/SM
+axle/MS
+axletree/MS
+Ax/M
+axolotl/SM
+axon/SM
+ayah/M
+ayahs
+Ayala/M
+ayatollah
+ayatollahs
+aye/MZRS
+Ayers
+Aylmar/M
+Aylmer/M
+Aymara/M
+Aymer/M
+Ayn/M
+AZ
+azalea/SM
+Azania/M
+Azazel/M
+Azerbaijan/M
+azimuthal/Y
+azimuth/M
+azimuths
+Azores
+Azov/M
+AZT
+Aztecan
+Aztec/MS
+azure/MS
+BA
+Baal/SM
+baa/SDG
+Babara/M
+Babar's
+Babbage/M
+Babbette/M
+Babbie/M
+babbitt/GDS
+Babbitt/M
+babbler/M
+babble/RSDGZ
+Babb/M
+Babcock/M
+Babel/MS
+babel/S
+babe/SM
+Babette/M
+Babita/M
+Babka/M
+baboon/MS
+Bab/SM
+babushka/MS
+babyhood/MS
+babyish
+Babylonia/M
+Babylonian/SM
+Babylon/MS
+babysat
+babysit/S
+babysitter/S
+babysitting
+baby/TDSRMG
+Bacall/M
+Bacardi/M
+baccalaureate/MS
+baccarat/SM
+bacchanalia
+Bacchanalia/M
+bacchanalian/S
+bacchanal/SM
+Bacchic
+Bacchus/M
+bachelorhood/SM
+bachelor/SM
+Bach/M
+bacillary
+bacilli
+bacillus/MS
+backache/SM
+backarrow
+backbencher/M
+backbench/ZR
+backbiter/M
+backbite/S
+backbitten
+backbit/ZGJR
+backboard/SM
+backbone/SM
+backbreaking
+backchaining
+backcloth/M
+backdate/GDS
+backdrop/MS
+backdropped
+backdropping
+backed/U
+backer/M
+backfield/SM
+backfill/SDG
+backfire/GDS
+backgammon/MS
+background/SDRMZG
+back/GZDRMSJ
+backhanded/Y
+backhander/M
+backhand/RDMSZG
+backhoe/S
+backing/M
+backlash/GRSDM
+backless
+backlogged
+backlogging
+backlog/MS
+backorder
+backpacker/M
+backpack/ZGSMRD
+backpedal/DGS
+backplane/MS
+backplate/SM
+backrest/MS
+backscatter/SMDG
+backseat/S
+backside/SM
+backslapper/MS
+backslapping/M
+backslash/DSG
+backslider/M
+backslide/S
+backslid/RZG
+backspace/GSD
+backspin/SM
+backstabber/M
+backstabbing
+backstage
+backstair/S
+backstitch/GDSM
+backstop/MS
+backstopped
+backstopping
+backstreet/M
+backstretch/SM
+backstroke/GMDS
+backtalk/S
+backtrack/SDRGZ
+backup/SM
+Backus/M
+backwardness/MS
+backward/YSP
+backwash/SDMG
+backwater/SM
+backwood/S
+backwoodsman/M
+backwoodsmen
+backyard/MS
+baconer/M
+Bacon/M
+bacon/SRM
+bacterial/Y
+bacteria/MS
+bactericidal
+bactericide/SM
+bacteriologic
+bacteriological
+bacteriologist/MS
+bacteriology/SM
+bacterium/M
+Bactria/M
+badder
+baddest
+baddie/MS
+bade
+Baden/M
+badge/DSRGMZ
+badger/DMG
+badinage/DSMG
+badland/S
+Badlands/M
+badman/M
+badmen
+badminton/MS
+badmouth/DG
+badmouths
+badness/SM
+bad/PSNY
+Baedeker/SM
+Baez/M
+Baffin/M
+bafflement/MS
+baffler/M
+baffle/RSDGZL
+baffling/Y
+bagatelle/MS
+bagel/SM
+bagful/MS
+baggageman
+baggagemen
+baggage/SM
+bagged/M
+bagger/SM
+baggily
+bagginess/MS
+bagging/M
+baggy/PRST
+Baghdad/M
+bagpiper/M
+bagpipe/RSMZ
+Bagrodia/MS
+bag/SM
+baguette/SM
+Baguio/M
+bah
+Baha'i
+Bahama/MS
+Bahamanian/S
+Bahamian/MS
+Baha'ullah
+Bahia/M
+Bahrain/M
+bahs
+Baikal/M
+Bailey/SM
+bail/GSMYDRB
+Bailie/M
+bailiff/SM
+bailiwick/MS
+Baillie/M
+Bail/M
+bailout/MS
+bailsman/M
+bailsmen
+Baily/M
+Baird/M
+bairn/SM
+baiter/M
+bait/GSMDR
+baize/GMDS
+Baja/M
+baked/U
+bakehouse/M
+Bakelite/M
+baker/M
+Baker/M
+Bakersfield/M
+bakery/SM
+bakeshop/S
+bake/ZGJDRS
+baking/M
+baklava/M
+baksheesh/SM
+Baku/M
+Bakunin/M
+balaclava/MS
+balalaika/MS
+balanced/A
+balancedness
+balancer/MS
+balance's
+balance/USDG
+Balanchine/M
+Balboa/M
+balboa/SM
+balcony/MSD
+balderdash/MS
+Balder/M
+baldfaced
+Bald/MR
+baldness/MS
+bald/PYDRGST
+baldric/SM
+Balduin/M
+Baldwin/M
+baldy
+Balearic/M
+baleen/MS
+balefuller
+balefullest
+balefulness/MS
+baleful/YP
+Bale/M
+bale/MZGDRS
+baler/M
+Balfour/M
+Bali/M
+Balinese
+balkanization
+balkanize/DG
+Balkan/SM
+balker/M
+balk/GDRS
+Balkhash/M
+balkiness/M
+balky/PRT
+balladeer/MS
+ballade/MS
+balladry/MS
+ballad/SM
+Ballard/SM
+ballast/SGMD
+ballcock/S
+ballerina/MS
+baller/M
+balletic
+ballet/MS
+ballfields
+ballgame/S
+ball/GZMSDR
+ballistic/S
+ballistics/M
+Ball/M
+balloonist/S
+balloon/RDMZGS
+balloter/M
+ballot/MRDGS
+ballpark/SM
+ballplayer/SM
+ballpoint/SM
+ballroom/SM
+ballsy/TR
+ballyhoo/SGMD
+balminess/SM
+balm/MS
+balmy/PRT
+baloney/SM
+balsam/GMDS
+balsamic
+balsa/MS
+Balthazar/M
+Baltic/M
+Baltimore/M
+Baluchistan/M
+baluster/MS
+balustrade/SM
+Balzac/M
+Ba/M
+Bamako/M
+Bamberger/M
+Bambie/M
+Bambi/M
+bamboo/SM
+bamboozle/GSD
+Bamby/M
+Banach/M
+banality/MS
+banal/TYR
+banana/SM
+Bancroft/M
+bandager/M
+bandage/RSDMG
+bandanna/SM
+bandbox/MS
+bandeau/M
+bandeaux
+band/EDGS
+bander/M
+banding/M
+bandit/MS
+banditry/MS
+bandmaster/MS
+bandoleer/SM
+bandpass
+band's
+bandsman/M
+bandsmen
+bandstand/SM
+bandstop
+Bandung/M
+bandwagon/MS
+bandwidth/M
+bandwidths
+bandy/TGRSD
+banefuller
+banefullest
+baneful/Y
+bane/MS
+Bangalore/M
+banger/M
+bang/GDRZMS
+bangkok
+Bangkok/M
+Bangladeshi/S
+Bangladesh/M
+bangle/MS
+Bangor/M
+Bangui/M
+bani
+banisher/M
+banishment/MS
+banish/RSDGL
+banister/MS
+Banjarmasin/M
+banjoist/SM
+banjo/MS
+Banjul/M
+bankbook/SM
+bankcard/S
+banker/M
+bank/GZJDRMBS
+banking/M
+Bank/MS
+banknote/S
+bankroll/DMSG
+bankruptcy/MS
+bankrupt/DMGS
+Banky/M
+Ban/M
+banned/U
+Banneker/M
+banner/SDMG
+banning/U
+Bannister/M
+bannister's
+bannock/SM
+banns
+banqueter/M
+banquet/SZGJMRD
+banquette/MS
+ban/SGMD
+banshee/MS
+bans/U
+bantam/MS
+bantamweight/MS
+banterer/M
+bantering/Y
+banter/RDSG
+Banting/M
+Bantu/SM
+banyan/MS
+banzai/S
+baobab/SM
+Baotou/M
+baptismal/Y
+baptism/SM
+Baptiste/M
+baptistery/MS
+baptist/MS
+Baptist/MS
+baptistry's
+baptized/U
+baptizer/M
+baptize/SRDZG
+baptizes/U
+Barabbas/M
+Barbabas/M
+Barbabra/M
+Barbadian/S
+Barbados/M
+Barbaraanne/M
+Barbara/M
+Barbarella/M
+barbarianism/MS
+barbarian/MS
+barbaric
+barbarically
+barbarism/MS
+barbarity/SM
+barbarize/SDG
+Barbarossa/M
+barbarousness/M
+barbarous/PY
+Barbary/M
+barb/DRMSGZ
+barbecue/DRSMG
+barbed/P
+Barbee/M
+barbell/SM
+barbel/MS
+Barbe/M
+barbeque's
+barber/DMG
+barbered/U
+Barber/M
+barberry/MS
+barbershop/MS
+Barbette/M
+Barbey/M
+Barbie/M
+Barbi/M
+barbital/M
+barbiturate/MS
+Barbour/M
+Barbra/M
+Barb/RM
+Barbuda/M
+barbwire/SM
+Barby/M
+barcarole/SM
+Barcelona/M
+Barclay/M
+Bardeen/M
+Barde/M
+bardic
+Bard/M
+bard/MDSG
+bareback/D
+barefacedness/M
+barefaced/YP
+barefoot/D
+barehanded
+bareheaded
+barelegged
+bareness/MS
+Barents/M
+bare/YSP
+barfly/SM
+barf/YDSG
+bargainer/M
+bargain/ZGSDRM
+barge/DSGM
+bargeman/M
+bargemen
+bargepole/M
+barhopped
+barhopping
+barhop/S
+Bari/M
+baritone/MS
+barium/MS
+barked/C
+barkeeper/M
+barkeep/SRZ
+barker/M
+Barker/M
+bark/GZDRMS
+Barkley/M
+barks/C
+barleycorn/MS
+barley/MS
+Barlow/M
+barmaid/SM
+barman/M
+barmen
+Bar/MH
+Barnabas
+Barnabe/M
+Barnaby/M
+barnacle/MDS
+Barnard/M
+Barnaul/M
+Barnebas/M
+Barnes
+Barnett/M
+Barney/M
+barnful
+barn/GDSM
+Barnhard/M
+Barnie/M
+Barn/M
+barnsful
+barnstorm/DRGZS
+barnstormer/M
+Barnum/M
+barnyard/MS
+Barny/M
+Baroda/M
+barometer/MS
+barometric
+barometrically
+baronage/MS
+baroness/MS
+baronetcy/SM
+baronet/MS
+baronial
+Baron/M
+baron/SM
+barony/SM
+baroque/SPMY
+barque's
+Barquisimeto/M
+barracker/M
+barrack/SDRG
+barracuda/MS
+barrage/MGSD
+Barranquilla/M
+barred/ECU
+barre/GMDSJ
+barrel/SGMD
+barrenness/SM
+barren/SPRT
+Barrera/M
+Barret/M
+barrette/SM
+Barrett/M
+barricade/SDMG
+Barrie/M
+barrier/MS
+barring/R
+barrio/SM
+Barri/SM
+barrister/MS
+Barr/M
+Barron/M
+barroom/SM
+barrow/MS
+Barry/M
+Barrymore/MS
+bars/ECU
+barstool/SM
+Barstow/M
+Bartel/M
+bartender/M
+bartend/ZR
+barterer/M
+barter/SRDZG
+bar/TGMDRS
+Barthel/M
+Barth/M
+Bartholdi/M
+Bartholemy/M
+Bartholomeo/M
+Bartholomeus/M
+Bartholomew/M
+Bartie/M
+Bartlet/M
+Bartlett/M
+Bart/M
+Bartók/M
+Bartolemo/M
+Bartolomeo/M
+Barton/M
+Bartram/M
+Barty/M
+barycenter
+barycentre's
+barycentric
+Bary/M
+baryon/SM
+Baryram/M
+Baryshnikov/M
+basaltic
+basalt/SM
+basal/Y
+Bascom/M
+bas/DRSTG
+baseball/MS
+baseband
+baseboard/MS
+base/CGRSDL
+baseless
+baseline/SM
+Basel/M
+basely
+Base/M
+baseman/M
+basemen
+basement/CSM
+baseness/MS
+baseplate/M
+base's
+basetting
+bashfulness/MS
+bashful/PY
+bash/JGDSR
+Basho/M
+Basia/M
+BASIC
+basically
+basic/S
+Basie/M
+basilar
+Basile/M
+basilica/SM
+Basilio/M
+basilisk/SM
+Basilius/M
+Basil/M
+basil/MS
+basin/DMS
+basinful/S
+basis/M
+basketball/MS
+basketry/MS
+basket/SM
+basketwork/SM
+bask/GSD
+basophilic
+Basque/SM
+Basra/M
+Basseterre/M
+basset/GMDS
+Bassett/M
+bassinet/SM
+bassist/MS
+Bass/M
+basso/MS
+bassoonist/MS
+bassoon/MS
+bass/SM
+basswood/SM
+bastardization/MS
+bastardized/U
+bastardize/SDG
+bastard/MYS
+bastardy/MS
+baste/NXS
+baster/M
+Bastian/M
+Bastien/M
+Bastille/M
+basting/M
+bastion/DM
+bast/SGZMDR
+Basutoland/M
+Bataan/M
+Batavia/M
+batch/MRSDG
+bated/U
+bate/KGSADC
+bater/AC
+Bates
+bathe
+bather/M
+bathetic
+bathhouse/SM
+bath/JMDSRGZ
+bathmat/S
+Batholomew/M
+bathos/SM
+bathrobe/MS
+bathroom/SDM
+baths
+Bathsheba/M
+bathtub/MS
+bathwater
+bathyscaphe's
+bathysphere/MS
+batik/DMSG
+Batista/M
+batiste/SM
+Bat/M
+batman/M
+Batman/M
+batmen
+baton/SM
+Batsheva/M
+batsman/M
+bat/SMDRG
+batsmen
+battalion/MS
+batted
+batten/SDMG
+batter/SRDZG
+battery/MS
+batting/MS
+battledore/MS
+battledress
+battlefield/SM
+battlefront/SM
+battle/GMZRSDL
+battleground/SM
+Battle/M
+battlement/SMD
+battler/M
+battleship/MS
+batty/RT
+Batu/M
+batwings
+bauble/SM
+Baudelaire/M
+baud/M
+Baudoin/M
+Baudouin/M
+Bauer/M
+Bauhaus/M
+baulk/GSDM
+Bausch/M
+bauxite/SM
+Bavaria/M
+Bavarian/S
+bawdily
+bawdiness/MS
+bawd/SM
+bawdy/PRST
+bawler/M
+bawl/SGDR
+Baxie/M
+Bax/M
+Baxter/M
+Baxy/M
+Bayamon
+Bayard/M
+bayberry/MS
+Bayda/M
+Bayer/M
+Bayes
+Bayesian
+bay/GSMDY
+Baylor/M
+Bay/MR
+bayonet/SGMD
+Bayonne/M
+bayou/MS
+Bayreuth/M
+bazaar/MS
+bazillion/S
+bazooka/MS
+BB
+BBB
+BBC
+bbl
+BBQ
+BBS
+BC
+BCD
+bdrm
+beachcomber/SM
+beachhead/SM
+Beach/M
+beach/MSDG
+beachwear/M
+beacon/DMSG
+beading/M
+Beadle/M
+beadle/SM
+bead/SJGMD
+beadsman/M
+beadworker
+beady/TR
+beagle/SDGM
+beaker/M
+beak/ZSDRM
+Beale/M
+Bealle/M
+Bea/M
+beam/MDRSGZ
+beanbag/SM
+bean/DRMGZS
+beanie/SM
+Bean/M
+beanpole/MS
+beanstalk/SM
+bearable/U
+bearably/U
+beard/DSGM
+bearded/P
+beardless
+Beard/M
+Beardmore/M
+Beardsley/M
+bearer/M
+bearing/M
+bearishness/SM
+bearish/PY
+bearlike
+Bear/M
+Bearnaise/M
+Bearnard/M
+bearskin/MS
+bear/ZBRSJG
+Beasley/M
+beasties
+beastings/M
+beastliness/MS
+beastly/PTR
+beast/SJMY
+beatable/U
+beatably/U
+beaten/U
+beater/M
+beatific
+beatifically
+beatification/M
+beatify/GNXDS
+beating/M
+beatitude/MS
+Beatlemania/M
+Beatles/M
+beatnik/SM
+beat/NRGSBZJ
+Beatrice/M
+Beatrisa/M
+Beatrix/M
+Beatriz/M
+Beauchamps
+Beaufort/M
+Beaujolais/M
+Beau/M
+Beaumarchais/M
+Beaumont/M
+beau/MS
+Beauregard/M
+beauteousness/M
+beauteous/YP
+beautician/MS
+beautification/M
+beautifier/M
+beautifully/U
+beautifulness/M
+beautiful/PTYR
+beautify/SRDNGXZ
+beaut/SM
+beauty/SM
+Beauvoir/M
+beaux's
+beaver/DMSG
+Beaverton/M
+Bebe/M
+bebop/MS
+becalm/GDS
+became
+because
+Becca/M
+Bechtel/M
+Becka/M
+Becker/M
+Becket/M
+Beckett/M
+beck/GSDM
+Beckie/M
+Becki/M
+beckon/SDG
+Beck/RM
+Becky/M
+becloud/SGD
+become/GJS
+becoming/UY
+Becquerel/M
+bedaub/GDS
+bedazzle/GLDS
+bedazzlement/SM
+bedbug/SM
+bedchamber/M
+bedclothes
+bedded
+bedder/MS
+bedding/MS
+bedeck/DGS
+Bede/M
+bedevil/DGLS
+bedevilment/SM
+bedfast
+bedfellow/MS
+Bedford/M
+bedimmed
+bedimming
+bedim/S
+bedizen/DGS
+bedlam/MS
+bedlinen
+bedmaker/SM
+bedmate/MS
+bed/MS
+Bedouin/SM
+bedpan/SM
+bedpost/SM
+bedraggle/GSD
+bedridden
+bedrock/SM
+bedroll/SM
+bedroom/DMS
+bedsheets
+bedside/MS
+bedsit
+bedsitter/M
+bedsore/MS
+bedspread/SM
+bedspring/SM
+bedstead/SM
+bedstraw/M
+bedtime/SM
+Beebe/M
+beebread/MS
+Beecher/M
+beech/MRSN
+beechnut/MS
+beechwood
+beefburger/SM
+beefcake/MS
+beef/GZSDRM
+beefiness/MS
+beefsteak/MS
+beefy/TRP
+beehive/MS
+beekeeper/MS
+beekeeping/SM
+beeline/MGSD
+Beelzebub/M
+Bee/M
+bee/MZGJRS
+been/S
+beeper/M
+beep/GZSMDR
+Beerbohm/M
+beer/M
+beermat/S
+beery/TR
+beeswax/DSMG
+Beethoven/M
+beetle/GMRSD
+Beeton/M
+beetroot/M
+beet/SM
+beeves/M
+befall/SGN
+befell
+befit/SM
+befitted
+befitting/Y
+befogged
+befogging
+befog/S
+before
+beforehand
+befoul/GSD
+befriend/DGS
+befuddle/GLDS
+befuddlement/SM
+began
+beget/S
+begetting
+beggar/DYMSG
+beggarliness/M
+beggarly/P
+beggary/MS
+begged
+begging
+Begin/M
+beginner/MS
+beginning/MS
+begin/S
+begone/S
+begonia/SM
+begot
+begotten
+begrime/SDG
+begrudge/GDRS
+begrudging/Y
+beg/S
+beguilement/SM
+beguiler/M
+beguile/RSDLZG
+beguiling/Y
+beguine/SM
+begum/MS
+begun
+behalf/M
+behalves
+Behan/M
+behave/GRSD
+behavioral/Y
+behaviorism/MS
+behavioristic/S
+behaviorist/S
+behavior/SMD
+behead/GSD
+beheld
+behemoth/M
+behemoths
+behest/SM
+behindhand
+behind/S
+beholder/M
+behold/ZGRNS
+behoofs
+behoove/SDJMG
+behooving/YM
+Behring/M
+Beiderbecke/M
+beige/MS
+Beijing
+Beilul/M
+being/M
+Beirut/M
+Beitris/M
+bejewel/SDG
+Bekesy/M
+Bekki/M
+be/KS
+belabor/MDSG
+Bela/M
+Belarus
+belate/D
+belatedness/M
+belated/PY
+Belau/M
+belay/GSD
+belch/GSD
+beleaguer/GDS
+Belem/M
+Belfast/M
+belfry/SM
+Belgian/MS
+Belgium/M
+Belg/M
+Belgrade/M
+Belia/M
+Belicia/M
+belie
+belief/ESUM
+belier/M
+believability's
+believability/U
+believable/U
+believably/U
+believed/U
+believe/EZGDRS
+believer/MUSE
+believing/U
+Belinda/M
+Belita/M
+belittlement/MS
+belittler/M
+belittle/RSDGL
+Belize/M
+belladonna/MS
+Bella/M
+Bellamy/M
+Bellanca/M
+Bellatrix/M
+bellboy/MS
+belled/A
+Belle/M
+belle/MS
+belletristic
+belletrist/SM
+Belleville/M
+bellflower/M
+bell/GSMD
+bellhop/MS
+bellicoseness/M
+bellicose/YP
+bellicosity/MS
+belligerence/SM
+belligerency/MS
+belligerent/SMY
+Bellina/M
+belling/A
+Bellini/M
+Bell/M
+bellman/M
+bellmen
+Bellovin/M
+bellow/DGS
+Bellow/M
+bellows/M
+bells/A
+bellwether/MS
+Bellwood/M
+bellyacher/M
+bellyache/SRDGM
+bellybutton/MS
+bellyfull
+bellyful/MS
+belly/SDGM
+Bel/M
+Belmont/M
+Belmopan/M
+Beloit/M
+belong/DGJS
+belonging/MP
+Belorussian/S
+Belorussia's
+belove/D
+beloved/S
+below/S
+Belshazzar/M
+belted/U
+belt/GSMD
+belting/M
+Belton/M
+Beltran/M
+Beltsville/M
+beltway/SM
+beluga/SM
+Belushi/M
+Belva/M
+belvedere/M
+Belvia/M
+bely/DSRG
+beman
+Be/MH
+bemire/SDG
+bemoan/GDS
+bemused/Y
+bemuse/GSDL
+bemusement/SM
+Benacerraf/M
+Benares's
+bencher/M
+benchmark/GDMS
+bench/MRSDG
+bend/BUSG
+bended
+Bender/M
+bender/MS
+Bendick/M
+Bendicty/M
+Bendite/M
+Bendix/M
+beneath
+Benedetta/M
+Benedetto/M
+Benedick/M
+Benedicta/M
+Benedictine/MS
+benediction/MS
+Benedict/M
+Benedicto/M
+benedictory
+Benedikta/M
+Benedikt/M
+benefaction/MS
+benefactor/MS
+benefactress/S
+benefice/MGSD
+beneficence/SM
+beneficent/Y
+beneficialness/M
+beneficial/PY
+beneficiary/MS
+benefiter/M
+benefit/SRDMZG
+Benelux/M
+Benet/M
+Benetta/M
+Benetton/M
+benevolence/SM
+benevolentness/M
+benevolent/YP
+Bengali/M
+Bengal/SM
+Benghazi/M
+Bengt/M
+Beniamino/M
+benightedness/M
+benighted/YP
+benignant
+benignity/MS
+benign/Y
+Beninese
+Benin/M
+Benita/M
+Benito/M
+Benjamen/M
+Benjamin/M
+Benjie/M
+Benji/M
+Benjy/M
+Ben/M
+Bennett/M
+Bennie/M
+Benni/M
+Bennington/M
+Benn/M
+Benny/M
+Benoite/M
+Benoit/M
+Benson/M
+Bentham/M
+Bentlee/M
+Bentley/MS
+Bent/M
+Benton/M
+bents
+bent/U
+bentwood/SM
+benumb/SGD
+Benyamin/M
+Benzedrine/M
+benzene/MS
+benzine/SM
+Benz/M
+Beograd's
+Beowulf/M
+bequeath/GSD
+bequeaths
+bequest/MS
+berate/GSD
+Berber/MS
+bereave/GLSD
+bereavement/MS
+bereft
+Berenice/M
+Beret/M
+beret/SM
+Bergen/M
+Bergerac/M
+Berger/M
+Berget/M
+Berglund/M
+Bergman/M
+Berg/NRM
+berg/NRSM
+Bergson/M
+Bergsten/M
+Bergstrom/M
+beribbon/D
+beriberi/SM
+Beringer/M
+Bering/RM
+Berkeley/M
+berkelium/SM
+Berke/M
+Berkie/M
+Berkley/M
+Berkly/M
+Berkowitz/M
+Berkshire/SM
+Berky/M
+Berk/YM
+Berle/M
+Berliner/M
+Berlin/SZRM
+Berlioz/M
+Berlitz/M
+Berman/M
+Ber/MG
+berm/SM
+Bermuda/MS
+Bermudan/S
+Bermudian/S
+Bernadene/M
+Bernadette/M
+Bernadina/M
+Bernadine/M
+Berna/M
+Bernardina/M
+Bernardine/M
+Bernardino/M
+Bernard/M
+Bernardo/M
+Bernarr/M
+Bernays/M
+Bernbach/M
+Bernelle/M
+Berne's
+Bernese
+Bernete/M
+Bernetta/M
+Bernette/M
+Bernhard/M
+Bernhardt/M
+Bernice/M
+Berniece/M
+Bernie/M
+Berni/M
+Bernini/M
+Bernita/M
+Bern/M
+Bernoulli/M
+Bernstein/M
+Berny/M
+Berra/M
+Berrie/M
+Berri/M
+berrylike
+Berry/M
+berry/SDMG
+berserker/M
+berserk/SR
+Berta/M
+Berte/M
+Bertha/M
+Berthe/M
+berth/MDGJ
+berths
+Bertie/M
+Bertillon/M
+Berti/M
+Bertina/M
+Bertine/M
+Bert/M
+Berton/M
+Bertram/M
+Bertrand/M
+Bertrando/M
+Berty/M
+Beryle/M
+beryllium/MS
+Beryl/M
+beryl/SM
+Berzelius/M
+bes
+beseecher/M
+beseeching/Y
+beseech/RSJZG
+beseem/GDS
+beset/S
+besetting
+beside/S
+besieger/M
+besiege/SRDZG
+besmear/GSD
+besmirch/GSD
+besom/GMDS
+besot/S
+besotted
+besotting
+besought
+bespangle/GSD
+bespatter/SGD
+bespeak/SG
+bespectacled
+bespoke
+bespoken
+Bess
+Bessel/M
+Bessemer/M
+Bessie/M
+Bessy/M
+best/DRSG
+bestiality/MS
+bestial/Y
+bestiary/MS
+bestirred
+bestirring
+bestir/S
+Best/M
+bestowal/SM
+bestow/SGD
+bestrew/DGS
+bestrewn
+bestridden
+bestride/SG
+bestrode
+bestseller/MS
+bestselling
+bestubble/D
+betaken
+betake/SG
+beta/SM
+betatron/M
+betcha
+Betelgeuse/M
+betel/MS
+Bethanne/M
+Bethany/M
+bethel/M
+Bethe/M
+Bethena/M
+Bethesda/M
+Bethina/M
+bethink/GS
+Bethlehem/M
+beth/M
+Beth/M
+bethought
+Bethune
+betide/GSD
+betimes
+bet/MS
+betoken/GSD
+betook
+betrayal/SM
+betrayer/M
+betray/SRDZG
+betrothal/SM
+betrothed/U
+betroth/GD
+betroths
+Betsey/M
+Betsy/M
+Betta/M
+Betteanne/M
+Betteann/M
+Bette/M
+betterment/MS
+better/SDLG
+Bettie/M
+Betti/M
+Bettina/M
+Bettine/M
+betting
+bettor/SM
+Bettye/M
+Betty/SM
+betweenness/M
+between/SP
+betwixt
+Beulah/M
+Bevan/M
+bevel/SJGMRD
+beverage/MS
+Beverie/M
+Beverlee/M
+Beverley/M
+Beverlie/M
+Beverly/M
+Bevin/M
+Bevon/M
+Bev's
+Bevvy/M
+bevy/SM
+bewail/GDS
+beware/GSD
+bewhisker/D
+bewigged
+bewildered/PY
+bewildering/Y
+bewilder/LDSG
+bewilderment/SM
+bewitching/Y
+bewitch/LGDS
+bewitchment/SM
+bey/MS
+beyond/S
+bezel/MS
+bf
+B/GT
+Bhopal/M
+Bhutanese
+Bhutan/M
+Bhutto/M
+Bialystok/M
+Bianca/M
+Bianco/M
+Bianka/M
+biannual/Y
+bias/DSMPG
+biased/U
+biathlon/MS
+biaxial/Y
+bibbed
+Bibbie/M
+bibbing
+Bibbye/M
+Bibby/M
+Bibi/M
+bible/MS
+Bible/MS
+biblical/Y
+biblicists
+bibliographer/MS
+bibliographical/Y
+bibliographic/S
+bibliography/MS
+bibliophile/MS
+Bib/M
+bib/MS
+bibulous
+bicameral
+bicameralism/MS
+bicarb/MS
+bicarbonate/MS
+bicentenary/S
+bicentennial/S
+bicep/S
+biceps/M
+bichromate/DM
+bickerer/M
+bickering/M
+bicker/SRDZG
+biconcave
+biconnected
+biconvex
+bicuspid/S
+bicycler/M
+bicycle/RSDMZG
+bicyclist/SM
+biddable
+bidden/U
+bidder/MS
+Biddie/M
+bidding/MS
+Biddle/M
+Biddy/M
+biddy/SM
+bider/M
+bide/S
+bidet/SM
+Bidget/M
+bid/GMRS
+bidiagonal
+bidirectional/Y
+bids/A
+biennial/SY
+biennium/SM
+Bienville/M
+Bierce/M
+bier/M
+bifocal/S
+bifurcate/SDXGNY
+bifurcation/M
+bigamist/SM
+bigamous
+bigamy/SM
+Bigelow/M
+Bigfoot
+bigged
+bigger
+biggest
+biggie/SM
+bigging
+biggish
+bighead/MS
+bigheartedness/S
+bighearted/P
+bighorn/MS
+bight/SMDG
+bigmouth/M
+bigmouths
+bigness/SM
+bigoted/Y
+bigot/MDSG
+bigotry/MS
+big/PYS
+bigwig/MS
+biharmonic
+bijection/MS
+bijective/Y
+bijou/M
+bijoux
+bike/MZGDRS
+biker/M
+bikini/SMD
+Biko/M
+bilabial/S
+bilateralness/M
+bilateral/PY
+bilayer/S
+Bilbao/M
+bilberry/MS
+Bilbo/M
+bile/SM
+bilge/GMDS
+biliary
+Bili/M
+bilinear
+bilingualism/SM
+bilingual/SY
+biliousness/SM
+bilious/P
+bilker/M
+bilk/GZSDR
+billboard/MDGS
+biller/M
+billet/MDGS
+billfold/MS
+billiard/SM
+Billie/M
+Billi/M
+billing/M
+billingsgate/SM
+Billings/M
+billionaire/MS
+billion/SHM
+billionths
+bill/JGZSBMDR
+Bill/JM
+billow/DMGS
+billowy/RT
+billposters
+Billye/M
+Billy/M
+billy/SM
+Bil/MY
+bi/M
+Bi/M
+bimbo/MS
+bimetallic/S
+bimetallism/MS
+Bimini/M
+bimodal
+bimolecular/Y
+bimonthly/S
+binary/S
+binaural/Y
+binder/M
+bindery/MS
+binding/MPY
+bindingness/M
+bind/JDRGZS
+bindle/M
+binds/AU
+bindweed/MS
+binge/MS
+bing/GNDM
+Bingham/M
+Binghamton/M
+Bing/M
+bingo/MS
+Bini/M
+Bink/M
+Binky/M
+binnacle/MS
+binned
+Binnie/M
+Binni/M
+binning
+Binny/M
+binocular/SY
+binodal
+binomial/SYM
+bin/SM
+binuclear
+biochemical/SY
+biochemist/MS
+biochemistry/MS
+biodegradability/S
+biodegradable
+biodiversity/S
+bioengineering/M
+bioethics
+biofeedback/SM
+biographer/M
+biographic
+biographical/Y
+biograph/RZ
+biography/MS
+biog/S
+Bioko/M
+biol
+biological/SY
+biologic/S
+biologist/SM
+biology/MS
+biomass/SM
+biomedical
+biomedicine/M
+biometric/S
+biometrics/M
+biometry/M
+biomolecule/S
+biomorph
+bionically
+bionic/S
+bionics/M
+biophysical/Y
+biophysicist/SM
+biophysic/S
+biophysics/M
+biopic/S
+biopsy/SDGM
+biorhythm/S
+BIOS
+bioscience/S
+biosphere/MS
+biostatistic/S
+biosynthesized
+biotechnological
+biotechnologist
+biotechnology/SM
+biotic
+biotin/SM
+bipartisan
+bipartisanship/MS
+bipartite/YN
+bipartition/M
+bipedal
+biped/MS
+biplane/MS
+bipolar
+bipolarity/MS
+biracial
+Birch/M
+birch/MRSDNG
+birdbath/M
+birdbaths
+birdbrain/SDM
+birdcage/SM
+birder/M
+birdhouse/MS
+birdieing
+Birdie/M
+birdie/MSD
+birdlike
+birdlime/MGDS
+Bird/M
+birdseed/MS
+Birdseye/M
+bird/SMDRGZ
+birdsong
+birdtables
+birdwatch/GZR
+birefringence/M
+birefringent
+biretta/SM
+Birgit/M
+Birgitta/M
+Birkenstock/M
+Birk/M
+Birmingham/M
+Biro/M
+Biron/M
+birthday/SM
+birthmark/MS
+birth/MDG
+birthplace/SM
+birthrate/MS
+birthright/MS
+birth's/A
+births/A
+birthstone/SM
+bis
+Biscay/M
+Biscayne/M
+biscuit/MS
+bisect/DSG
+bisection/MS
+bisector/MS
+biserial
+bisexuality/MS
+bisexual/YMS
+Bishkek
+bishop/DGSM
+Bishop/M
+bishopric/SM
+Bismarck/M
+Bismark/M
+bismuth/M
+bismuths
+bison/M
+bisque/SM
+Bissau/M
+bistable
+bistate
+bistro/SM
+bisyllabic
+bitblt/S
+bitchily
+bitchiness/MS
+bitch/MSDG
+bitchy/PTR
+biter/M
+bite/S
+biting/Y
+bitmap/SM
+bit/MRJSZG
+BITNET/M
+bit's/C
+bits/C
+bitser/M
+bitted
+bitten
+bitterness/SM
+bittern/SM
+bitternut/M
+bitter/PSRDYTG
+bitterroot/M
+bittersweet/YMSP
+bitting
+bitty/PRT
+bitumen/MS
+bituminous
+bitwise
+bivalent/S
+bivalve/MSD
+bivariate
+bivouacked
+bivouacking
+bivouac/MS
+biweekly/S
+biyearly
+bizarreness/M
+bizarre/YSP
+Bizet/M
+biz/M
+bizzes
+Bjorn/M
+bk
+b/KGD
+Bk/M
+blabbed
+blabber/GMDS
+blabbermouth/M
+blabbermouths
+blabbing
+blab/S
+blackamoor/SM
+blackball/SDMG
+blackberry/GMS
+blackbirder/M
+blackbird/SGDRM
+blackboard/SM
+blackbody/S
+Blackburn/M
+blackcurrant/M
+blackener/M
+blacken/GDR
+Blackfeet
+Blackfoot/M
+blackguard/MDSG
+blackhead/SM
+blacking/M
+blackish
+blackjack/SGMD
+blackleg/M
+blacklist/DRMSG
+blackmail/DRMGZS
+blackmailer/M
+Blackman/M
+Blackmer/M
+blackness/MS
+blackout/SM
+Blackpool/M
+Black's
+black/SJTXPYRDNG
+blacksmith/MG
+blacksmiths
+blacksnake/MS
+blackspot
+Blackstone/M
+blackthorn/MS
+blacktop/MS
+blacktopped
+blacktopping
+Blackwell/MS
+bladder/MS
+bladdernut/M
+bladderwort/M
+blade/DSGM
+blah/MDG
+blahs
+Blaine/M
+Blaire/M
+Blair/M
+Blakelee/M
+Blakeley/M
+Blake/M
+Blakey/M
+blame/DSRBGMZ
+blamelessness/SM
+blameless/YP
+blamer/M
+blameworthiness/SM
+blameworthy/P
+Blanca/M
+Blancha/M
+Blanchard/M
+blanch/DRSG
+Blanche/M
+blancher/M
+Blanch/M
+blanc/M
+blancmange/SM
+blandishment/MS
+blandish/SDGL
+blandness/MS
+bland/PYRT
+Blane/M
+Blankenship/M
+blanketing/M
+blanket/SDRMZG
+blankness/MS
+blank/SPGTYRD
+Blanton/M
+Blantyre/M
+blare/DSG
+blarney/DMGS
+blasé
+blasphemer/M
+blaspheme/RSDZG
+blasphemousness/M
+blasphemous/PY
+blasphemy/SM
+blaster/M
+blasting/M
+blastoff/SM
+blast/SMRDGZ
+blatancy/SM
+blatant/YP
+blather/DRGS
+blatting
+Blatz/M
+Blavatsky/M
+Blayne/M
+blaze/DSRGMZ
+blazer/M
+blazing/Y
+blazoner/M
+blazon/SGDR
+bl/D
+bldg
+bleach/DRSZG
+bleached/U
+bleacher/M
+bleakness/MS
+bleak/TPYRS
+blear/GDS
+blearily
+bleariness/SM
+bleary/PRT
+bleater/M
+bleat/RDGS
+bleeder/M
+bleed/ZRJSG
+Bleeker/M
+bleep/GMRDZS
+blemish/DSMG
+blemished/U
+blench/DSG
+blender/M
+blend/GZRDS
+Blenheim/M
+blessedness/MS
+blessed/PRYT
+blessing/M
+bless/JGSD
+Blevins/M
+blew
+Bligh/M
+blighter/M
+blight/GSMDR
+blimey/S
+blimp/MS
+blinded/U
+blinder/M
+blindfold/SDG
+blinding/MY
+blind/JGTZPYRDS
+blindness/MS
+blindside/SDG
+blinker/MDG
+blinking/U
+blink/RDGSZ
+blinks/M
+Blinnie/M
+Blinni/M
+Blinny/M
+blintze/M
+blintz/SM
+blip/MS
+blipped
+blipping
+Blisse/M
+blissfulness/MS
+blissful/PY
+Bliss/M
+bliss/SDMG
+blistering/Y
+blister/SMDG
+blistery
+Blithe/M
+blitheness/SM
+blither/G
+blithesome
+blithe/TYPR
+blitz/GSDM
+blitzkrieg/SM
+blizzard/MS
+bloater/M
+bloat/SRDGZ
+blobbed
+blobbing
+blob/MS
+Bloch/M
+blockader/M
+blockade/ZMGRSD
+blockage/MS
+blockbuster/SM
+blockbusting/MS
+blocker/MS
+blockhead/MS
+blockhouse/SM
+block's
+block/USDG
+blocky/R
+bloc/MS
+Bloemfontein/M
+bloke/SM
+Blomberg/M
+Blomquist/M
+Blondelle/M
+Blondell/M
+blonde's
+Blondie/M
+blondish
+blondness/MS
+blond/SPMRT
+Blondy/M
+bloodbath
+bloodbaths
+bloodcurdling
+bloodhound/SM
+bloodied/U
+bloodiness/MS
+bloodlessness/SM
+bloodless/PY
+bloodletting/MS
+bloodline/SM
+bloodmobile/MS
+bloodroot/M
+bloodshed/SM
+bloodshot
+blood/SMDG
+bloodsport/S
+bloodstain/MDS
+bloodstock/SM
+bloodstone/M
+bloodstream/SM
+bloodsucker/SM
+bloodsucking/S
+bloodthirstily
+bloodthirstiness/MS
+bloodthirsty/RTP
+bloodworm/M
+bloodymindedness
+bloody/TPGDRS
+bloomer/M
+Bloomer/M
+Bloomfield/M
+Bloomington/M
+Bloom/MR
+bloom/SMRDGZ
+blooper/M
+bloop/GSZRD
+blossom/DMGS
+blossomy
+blotch/GMDS
+blotchy/RT
+blot/MS
+blotted
+blotter/MS
+blotting
+blotto
+blouse/GMSD
+blower/M
+blowfish/M
+blowfly/MS
+blowgun/SM
+blow/GZRS
+blowing/M
+blown/U
+blowout/MS
+blowpipe/SM
+blowtorch/SM
+blowup/MS
+blowy/RST
+blowzy/RT
+BLT
+blubber/GSDR
+blubbery
+Blucher/M
+bludgeon/GSMD
+blueback
+Bluebeard/M
+bluebell/MS
+blueberry/SM
+bluebill/M
+bluebird/MS
+bluebonnet/SM
+bluebook/M
+bluebottle/MS
+bluebush
+bluefish/SM
+bluegill/SM
+bluegrass/MS
+blueing's
+blueish
+bluejacket/MS
+bluejeans
+blue/JMYTGDRSP
+blueness/MS
+bluenose/MS
+bluepoint/SM
+blueprint/GDMS
+bluer/M
+bluest/M
+bluestocking/SM
+bluesy/TR
+bluet/MS
+bluffer/M
+bluffness/MS
+bluff/SPGTZYRD
+bluing/M
+bluishness/M
+bluish/P
+Blumenthal/M
+Blum/M
+blunderbuss/MS
+blunderer/M
+blunder/GSMDRJZ
+blundering/Y
+bluntness/MS
+blunt/PSGTYRD
+blurb/GSDM
+blur/MS
+blurred/Y
+blurriness/S
+blurring/Y
+blurry/RPT
+blurt/GSRD
+blusher/M
+blushing/UY
+blush/RSDGZ
+blusterer/M
+blustering/Y
+blusterous
+bluster/SDRZG
+blustery
+blvd
+Blvd
+Blythe/M
+BM
+BMW/M
+BO
+boarded
+boarder/SM
+boardgames
+boardinghouse/SM
+boarding/SM
+board/IS
+boardroom/MS
+board's
+boardwalk/SM
+boar/MS
+boa/SM
+boaster/M
+boastfulness/MS
+boastful/YP
+boast/SJRDGZ
+boatclubs
+boater/M
+boathouse/SM
+boating/M
+boatload/SM
+boatman/M
+boat/MDRGZJS
+boatmen
+boatswain/SM
+boatyard/SM
+bobbed
+Bobbee/M
+Bobbe/M
+Bobbette/M
+Bobbie/M
+Bobbi/M
+bobbing/M
+bobbin/MS
+Bobbitt/M
+bobble/SDGM
+Bobbsey/M
+Bobbye/M
+Bobby/M
+bobby/SM
+bobbysoxer's
+bobcat/MS
+Bobette/M
+Bobina/M
+Bobine/M
+Bobinette/M
+Bob/M
+bobolink/SM
+Bobrow/M
+bobsledded
+bobsledder/MS
+bobsledding/M
+bobsled/MS
+bobsleigh/M
+bobsleighs
+bobs/M
+bob/SM
+bobtail/SGDM
+bobwhite/SM
+Boca/M
+Boccaccio/M
+boccie/SM
+bock/GDS
+bockwurst
+bodega/MS
+Bodenheim/M
+bode/S
+Bodhidharma/M
+bodhisattva
+Bodhisattva/M
+bodice/SM
+bodied/M
+bodiless
+bodily
+boding/M
+bodkin/SM
+bod/SGMD
+bodybuilder/SM
+bodybuilding/S
+body/DSMG
+bodyguard/MS
+bodying/M
+bodysuit/S
+bodyweight
+bodywork/SM
+Boeing/M
+Boeotia/M
+Boeotian
+Boer/M
+Bogartian/M
+Bogart/M
+Bogey/M
+bogeyman/M
+bogeymen
+bogey/SGMD
+bogged
+bogging
+boggle/SDG
+boggling/Y
+boggy/RT
+bogie's
+bog/MS
+Bogotá/M
+bogus
+bogyman
+bogymen
+bogy's
+Boheme/M
+bohemianism/S
+bohemian/S
+Bohemian/SM
+Bohemia/SM
+Bohr/M
+Boigie/M
+boiled/AU
+boiler/M
+boilermaker/MS
+boilerplate/SM
+boil/JSGZDR
+boils/A
+Boise/M
+Bois/M
+boisterousness/MS
+boisterous/YP
+bola/SM
+boldface/SDMG
+boldness/MS
+bold/YRPST
+bole/MS
+bolero/MS
+Boleyn/M
+bolivares
+Bolivar/M
+bolivar/MS
+Bolivia/M
+Bolivian/S
+bollard/SM
+bollix/GSD
+boll/MDSG
+Bologna/M
+bologna/MS
+bolometer/MS
+bolo/MS
+boloney's
+Bolshevik/MS
+Bolshevism/MS
+Bolshevistic/M
+Bolshevist/MS
+Bolshoi/M
+bolsterer/M
+bolster/SRDG
+bolted/U
+bolter/M
+bolt/MDRGS
+Bolton/M
+bolts/U
+Boltzmann/M
+bolus/SM
+bombardier/MS
+bombard/LDSG
+bombardment/SM
+bombastic
+bombastically
+bombast/RMS
+Bombay/M
+bomber/M
+bombproof
+bomb/SGZDRJ
+bombshell/SM
+Bo/MRZ
+bona
+bonanza/MS
+Bonaparte/M
+Bonaventure/M
+bonbon/SM
+bondage/SM
+bonder/M
+bondholder/SM
+Bondie/M
+bond/JMDRSGZ
+Bond/M
+bondman/M
+bondmen
+Bondon/M
+bonds/A
+bondsman/M
+bondsmen
+bondwoman/M
+bondwomen
+Bondy/M
+boned/U
+bonehead/SDM
+boneless
+Bone/M
+bone/MZDRSG
+boner/M
+bonfire/MS
+bong/GDMS
+bongo/MS
+Bonham/M
+bonhomie/MS
+Boniface/M
+boniness/MS
+Bonita/M
+bonito/MS
+bonjour
+bonkers
+Bonnee/M
+Bonner/M
+bonneted/U
+bonnet/SGMD
+Bonneville/M
+Bonnibelle/M
+bonnie
+Bonnie/M
+Bonni/M
+Bonn/RM
+Bonny/M
+bonny/RT
+bonsai/SM
+Bontempo/M
+bonus/SM
+bony/RTP
+bonzes
+boob/DMSG
+booby/SM
+boodle/GMSD
+boogeyman's
+boogieing
+boogie/SD
+boo/GSDH
+boohoo/GDS
+bookbinder/M
+bookbindery/SM
+bookbinding/M
+bookbind/JRGZ
+bookcase/MS
+booked/U
+bookend/SGD
+Booker/M
+book/GZDRMJSB
+bookie/SM
+booking/M
+bookishness/M
+bookish/PY
+bookkeeper/M
+bookkeep/GZJR
+bookkeeping/M
+booklet/MS
+bookmaker/MS
+bookmaking/MS
+bookmark/MDGS
+bookmobile/MS
+bookplate/SM
+bookseller/SM
+bookshelf/M
+bookshelves
+bookshop/MS
+bookstall/MS
+bookstore/SM
+bookwork/M
+bookworm/MS
+Boolean
+boolean/S
+Boole/M
+boom/DRGJS
+boomerang/MDSG
+boomer/M
+boomtown/S
+boondocks
+boondoggle/DRSGZ
+boondoggler/M
+Boone/M
+Boonie/M
+boonies
+boon/MS
+Boony/M
+boorishness/SM
+boorish/PY
+boor/MS
+boosterism
+booster/M
+boost/SGZMRD
+boot/AGDS
+bootblack/MS
+bootee/MS
+Boote/M
+Boötes
+Boothe/M
+booth/M
+Booth/M
+booths
+bootie's
+bootlaces
+bootlegged/M
+bootlegger/SM
+bootlegging/M
+bootleg/S
+Bootle/M
+bootless
+Boot/M
+bootprints
+boot's
+bootstrapped
+bootstrapping
+bootstrap/SM
+booty/SM
+booze/DSRGMZ
+boozer/M
+boozy/TR
+bopped
+bopping
+bop/S
+borate/MSD
+borax/MS
+Bordeaux/M
+bordello/MS
+Borden/M
+borderer/M
+border/JRDMGS
+borderland/SM
+borderline/MS
+Bordie/M
+Bord/MN
+Bordon/M
+Bordy/M
+Borealis/M
+Boreas/M
+boredom/MS
+boreholes
+borer/M
+bore/ZGJDRS
+Borges
+Borgia/M
+Borg/M
+boric
+boring/YMP
+Boris
+Bork/M
+born/AIU
+Borneo/M
+borne/U
+Born/M
+Borodin/M
+boron/SM
+borosilicate/M
+borough/M
+boroughs
+Borroughs/M
+borrower/M
+borrowing/M
+borrow/JZRDGBS
+borscht/SM
+borstal/MS
+Boru/M
+borzoi/MS
+Bosch/M
+Bose/M
+bosh/MS
+Bosnia/M
+Bosnian/S
+bosom's
+bosom/SGUD
+bosomy/RT
+boson/SM
+Bosporus/M
+boss/DSRMG
+bossily
+bossiness/MS
+bossism/MS
+bossy/PTSR
+Bostitch/M
+Bostonian/SM
+Boston/MS
+bosun's
+Boswell/MS
+botanical/SY
+botanic/S
+botanist/SM
+botany/SM
+botcher/M
+botch/SRDGZ
+botfly/M
+bother/DG
+bothersome
+bothy/M
+both/ZR
+bot/S
+Botswana/M
+Botticelli/M
+bottle/GMZSRD
+bottleneck/GSDM
+bottler/M
+bottomlessness/M
+bottomless/YP
+bottommost
+bottom/SMRDG
+botulin/M
+botulinus/M
+botulism/SM
+Boucher/M
+boudoir/MS
+bouffant/S
+bougainvillea/SM
+bough/MD
+boughs
+bought/N
+bouillabaisse/MS
+bouillon/MS
+boulder/GMDS
+Boulder/M
+boulevard/MS
+bouncer/M
+bounce/SRDGZ
+bouncily
+bouncing/Y
+bouncy/TRP
+boundary/MS
+bound/AUDI
+boundedness/MU
+bounded/UP
+bounden
+bounder/AM
+bounders
+bounding
+boundlessness/SM
+boundless/YP
+bounds/IA
+bounteousness/MS
+bounteous/PY
+bountifulness/SM
+bountiful/PY
+bounty/SDM
+bouquet/SM
+Bourbaki/M
+bourbon/SM
+Bourbon/SM
+bourgeoisie/SM
+bourgeois/M
+Bourke/M
+Bourne/M
+Bournemouth/M
+boutique/MS
+bout/MS
+boutonnière/MS
+Bouvier
+Bovary/M
+bovine/YS
+Bowditch/M
+bowdlerization/MS
+bowdlerize/GRSD
+bowed/U
+bowel/GMDS
+Bowell/M
+Bowen/M
+bower/DMG
+Bowers
+Bowery/M
+Bowes
+bowie
+Bowie/M
+bowing/M
+bowlder's
+bowlegged
+bowleg/SM
+bowler/M
+bowlful/S
+bowl/GZSMDR
+bowline/MS
+bowling/M
+bowman/M
+Bowman/M
+bowmen
+bowser/M
+bowsprit/SM
+bows/R
+bowstring/GSMD
+bow/SZGNDR
+bowwow/DMGS
+boxcar/SM
+box/DRSJZGM
+boxer/M
+boxful/M
+boxing/M
+boxlike
+boxtops
+boxwood/SM
+boxy/TPR
+Boyce/M
+Boycey/M
+Boycie/M
+boycotter/M
+boycott/RDGS
+Boyd/M
+Boyer/M
+boyfriend/MS
+boyhood/SM
+boyishness/MS
+boyish/PY
+Boyle/M
+Boy/MR
+boy/MRS
+boyscout
+boysenberry/SM
+bozo/SM
+bpi
+bps
+BR
+brace/DSRJGM
+braced/U
+bracelet/MS
+bracer/M
+brachia
+brachium/M
+bracken/SM
+bracketed/U
+bracketing/M
+bracket/SGMD
+brackishness/SM
+brackish/P
+bract/SM
+Bradan/M
+bradawl/M
+Bradbury/M
+Bradburys
+bradded
+bradding
+Braddock/M
+Brade/M
+Braden/M
+Bradford/M
+Bradley/M
+Bradly/M
+Brad/MYN
+Bradney/M
+Bradshaw/M
+brad/SM
+Bradstreet/M
+Brady/M
+brae/SM
+braggadocio/SM
+braggart/SM
+bragged
+bragger/MS
+braggest
+bragging
+Bragg/M
+brag/S
+Brahe/M
+Brahma/MS
+Brahmanism/MS
+Brahman/SM
+Brahmaputra/M
+Brahmin's
+Brahms
+braider/M
+braiding/M
+braid/RDSJG
+braille/DSG
+Braille/GDSM
+Brainard/SM
+braincell/S
+brainchild/M
+brainchildren
+brain/GSDM
+braininess/MS
+brainlessness/M
+brainless/YP
+Brain/M
+brainpower/M
+brainstorm/DRMGJS
+brainstorming/M
+brainteaser/S
+brainteasing
+brainwasher/M
+brainwashing/M
+brainwash/JGRSD
+brainwave/S
+brainy/RPT
+braise/SDG
+brake/DSGM
+brakeman/M
+brakemen/M
+bramble/DSGM
+brambling/M
+brambly/RT
+Bram/M
+Brampton/M
+bra/MS
+Brana/M
+branched/U
+branching/M
+branchlike
+Branch/M
+branch/MDSJG
+Branchville/M
+Brandais/M
+Brandea/M
+branded/U
+Brandeis/M
+Brandel/M
+Brande/M
+Brandenburg/M
+Branden/M
+brander/GDM
+Brander/M
+Brandice/M
+Brandie/M
+Brandi/M
+Brandise/M
+brandish/GSD
+Brand/MRN
+Brando/M
+Brandon/M
+brand/SMRDGZ
+Brandt/M
+Brandtr/M
+brandy/GDSM
+Brandy/M
+Brandyn/M
+brandywine
+Braniff/M
+Bran/M
+branned
+branning
+Brannon/M
+bran/SM
+Brantley/M
+Brant/M
+Braque/M
+brashness/MS
+brash/PYSRT
+Brasilia
+brasserie/SM
+brass/GSDM
+brassiere/MS
+brassily
+brassiness/SM
+brassy/RSPT
+Bratislava/M
+brat/SM
+Brattain/M
+bratty/RT
+bratwurst/MS
+Braun/M
+bravadoes
+bravado/M
+brave/DSRGYTP
+braveness/MS
+bravery/MS
+bravest/M
+bravo/SDG
+bravura/SM
+brawler/M
+brawl/MRDSGZ
+brawniness/SM
+brawn/MS
+brawny/TRP
+brayer/M
+Bray/M
+bray/SDRG
+braze/GZDSR
+brazenness/MS
+brazen/PYDSG
+brazer/M
+brazier/SM
+Brazilian/MS
+Brazil/M
+Brazos/M
+Brazzaville/M
+breacher/M
+breach/MDRSGZ
+breadbasket/SM
+breadboard/SMDG
+breadbox/S
+breadcrumb/S
+breadfruit/MS
+breadline/MS
+bread/SMDHG
+breadth/M
+breadths
+breadwinner/MS
+breakables
+breakable/U
+breakage/MS
+breakaway/MS
+breakdown/MS
+breaker/M
+breakfaster/M
+breakfast/RDMGZS
+breakfront/S
+breaking/M
+breakneck
+breakout/MS
+breakpoint/SMDG
+break/SZRBG
+breakthroughs
+breakthrough/SM
+breakup/SM
+breakwater/SM
+bream/SDG
+Breanne/M
+Brear/M
+breastbone/MS
+breastfed
+breastfeed/G
+breasting/M
+breast/MDSG
+breastplate/SM
+breaststroke/SM
+breastwork/MS
+breathable/U
+breathalyser/S
+Breathalyzer/SM
+breathe
+breather/M
+breathing/M
+breathlessness/SM
+breathless/PY
+breaths
+breathtaking/Y
+breathy/TR
+breath/ZBJMDRSG
+Brecht/M
+Breckenridge/M
+bred/DG
+bredes
+breeching/M
+breech/MDSG
+breeder/I
+breeder's
+breeding/IM
+breeds/I
+breed/SZJRG
+Bree/M
+Breena/M
+breeze/GMSD
+breezeway/SM
+breezily
+breeziness/SM
+breezy/RPT
+Bremen/M
+bremsstrahlung/M
+Brena/M
+Brenda/M
+Brendan/M
+Brenden/M
+Brendin/M
+Brendis/M
+Brendon/M
+Bren/M
+Brenna/M
+Brennan/M
+Brennen/M
+Brenner/M
+Brenn/RNM
+Brent/M
+Brenton/M
+Bresenham/M
+Brest/M
+brethren
+Bret/M
+Breton
+Brett/M
+breve/SM
+brevet/MS
+brevetted
+brevetting
+breviary/SM
+brevity/MS
+brew/DRGZS
+brewer/M
+Brewer/M
+brewery/MS
+brewing/M
+brewpub/S
+Brew/RM
+Brewster/M
+Brezhnev/M
+Bria/M
+Briana/M
+Brian/M
+Brianna/M
+Brianne/M
+Briano/M
+Briant/M
+briar's
+bribe/GZDSR
+briber/M
+bribery/MS
+Brice/M
+brickbat/SM
+brick/GRDSM
+bricklayer/MS
+bricklaying/SM
+brickmason/S
+brickwork/SM
+brickyard/M
+bridal/S
+Bridalveil/M
+bridegroom/MS
+Bride/M
+bride/MS
+bridesmaid/MS
+Bridewell/M
+bridgeable/U
+bridged/U
+bridgehead/MS
+Bridgeport/M
+Bridger/M
+Bridges
+bridge/SDGM
+Bridget/M
+Bridgetown/M
+Bridgette/M
+Bridgett/M
+Bridgewater/M
+bridgework/MS
+bridging/M
+Bridgman/M
+Bridie/M
+bridled/U
+bridle/SDGM
+bridleway/S
+briefcase/SM
+briefed/C
+briefing/M
+briefness/MS
+briefs/C
+brief/YRDJPGTS
+Brien/M
+Brier/M
+brier/MS
+Brie/RSM
+Brietta/M
+brigade/GDSM
+brigadier/MS
+Brigadoon
+brigandage/MS
+brigand/MS
+brigantine/MS
+Brigg/MS
+Brigham/M
+brightener/M
+brighten/RDZG
+bright/GXTPSYNR
+Bright/M
+brightness/SM
+Brighton/M
+Brigida/M
+Brigid/M
+Brigit/M
+Brigitta/M
+Brigitte/M
+Brig/M
+brig/SM
+brilliance/MS
+brilliancy/MS
+brilliantine/MS
+brilliantness/M
+brilliant/PSY
+Brillo
+Brillouin/M
+brimful
+brimless
+brimmed
+brimming
+brim/SM
+brimstone/MS
+Brina/M
+Brindisi/M
+brindle/DSM
+brine/GMDSR
+briner/M
+Briney/M
+bringer/M
+bring/RGZS
+brininess/MS
+Brinkley/M
+brinkmanship/SM
+brink/MS
+Brinna/M
+Brinn/M
+Briny/M
+briny/PTSR
+brioche/SM
+Brion/M
+briquet's
+briquette/MGSD
+Brisbane/M
+brisket/SM
+briskness/MS
+brisk/YRDPGTS
+bristle/DSGM
+bristly/TR
+Bristol/M
+bristol/S
+Britain/M
+Brita/M
+Britannia/M
+Britannic
+Britannica/M
+britches
+Briticism/MS
+Britisher/M
+Britishly/M
+British/RYZ
+Brit/MS
+Britney/M
+Britni/M
+Briton/MS
+Britta/M
+Brittaney/M
+Brittani/M
+Brittan/M
+Brittany/MS
+Britte/M
+Britten/M
+Britteny/M
+brittleness/MS
+brittle/YTPDRSG
+Britt/MN
+Brittne/M
+Brittney/M
+Brittni/M
+Brnaba/M
+Brnaby/M
+Brno/M
+broach/DRSG
+broacher/M
+broadband
+broadcaster/M
+broadcast/RSGZJ
+broadcasts/A
+broadcloth/M
+broadcloths
+broaden/JGRDZ
+broadleaved
+broadloom/SM
+broadminded/P
+broadness/S
+broadsheet/MS
+broadside/SDGM
+broadsword/MS
+broad/TXSYRNP
+Broadway/SM
+Brobdingnagian
+Brobdingnag/M
+brocade/DSGM
+broccoli/MS
+brochette/SM
+brochure/SM
+Brockie/M
+Brock/M
+Brocky/M
+Broddie/M
+Broddy/M
+Broderick/M
+Broderic/M
+Brodie/M
+Brod/M
+Brody/M
+brogan/MS
+Broglie/M
+brogue/MS
+broiler/M
+broil/RDSGZ
+brokenhearted/Y
+brokenness/MS
+broken/YP
+brokerage/MS
+broker/DMG
+broke/RGZ
+Brok/M
+bromide/MS
+bromidic
+bromine/MS
+bronchial
+bronchi/M
+bronchiolar
+bronchiole/MS
+bronchiolitis
+bronchitic/S
+bronchitis/MS
+broncho's
+bronchus/M
+broncobuster/SM
+bronco/SM
+bronc/S
+Bron/M
+Bronnie/M
+Bronny/M
+Bronson/M
+Bronte
+brontosaur/SM
+brontosaurus/SM
+Bronx/M
+bronzed/M
+bronze/SRDGM
+bronzing/M
+brooch/MS
+brooder/M
+broodiness/M
+brooding/Y
+broodmare/SM
+brood/SMRDGZ
+broody/PTR
+Brookdale/M
+Brooke/M
+Brookfield/M
+Brookhaven/M
+brooklet/MS
+Brooklyn/M
+Brookmont/M
+brook/SGDM
+brookside
+Brook/SM
+broom/SMDG
+broomstick/MS
+Bros
+Brose/M
+bro/SH
+bros/S
+brothel/MS
+brother/DYMG
+brotherhood/SM
+brotherliness/MS
+brotherly/P
+broths
+broth/ZMR
+brougham/MS
+brought
+brouhaha/MS
+browbeat/NSG
+brow/MS
+Brownell/M
+Browne/M
+Brownian/M
+Brownie/MS
+brownie/MTRS
+browning/M
+Browning/M
+brownish
+Brown/MG
+brownness/MS
+brownout/MS
+brownstone/MS
+Brownsville/M
+brown/YRDMSJGTP
+browse
+browser/M
+brows/SRDGZ
+brr
+Br/TMN
+Brubeck/M
+brucellosis/M
+Bruce/M
+Brucie/M
+Bruckner/M
+Bruegel/M
+Brueghel's
+bruin/MS
+bruised/U
+bruise/JGSRDZ
+bruiser/M
+Bruis/M
+bruit/DSG
+Brumidi/M
+Brummel/M
+brunch/MDSG
+Brunei/M
+Brunelleschi/M
+brunet/S
+brunette/SM
+Brunhilda/M
+Brunhilde/M
+Bruno/M
+Brunswick/M
+brunt/GSMD
+brusher/M
+brushfire/MS
+brushlike
+brush/MSRDG
+brushoff/S
+brushwood/SM
+brushwork/MS
+brushy/R
+brusqueness/MS
+brusque/PYTR
+Brussels
+brutality/SM
+brutalization/SM
+brutalized/U
+brutalizes/AU
+brutalize/SDG
+brutal/Y
+brute/DSRGM
+brutishness/SM
+brutish/YP
+Brutus/M
+Bruxelles/M
+Bryana/M
+Bryan/M
+Bryant/M
+Bryanty/M
+Bryce/M
+Bryna/M
+Bryn/M
+Brynna/M
+Brynne/M
+Brynner/M
+Brynn/RM
+Bryon/M
+Brzezinski/M
+B's
+BS
+BSA
+BSD
+Btu
+BTU
+BTW
+bu
+bubblegum/S
+bubbler/M
+bubble/RSDGM
+bubbly/TRS
+Buber/M
+bub/MS
+buboes
+bubo/M
+bubonic
+buccaneer/GMDS
+Buchanan/M
+Bucharest/M
+Buchenwald/M
+Buchwald/M
+buckaroo/SM
+buckboard/SM
+bucker/M
+bucketful/MS
+bucket/SGMD
+buckeye/SM
+buck/GSDRM
+buckhorn/M
+Buckie/M
+Buckingham/M
+buckled/U
+buckler/MDG
+buckle/RSDGMZ
+buckles/U
+Buckley/M
+buckling's
+buckling/U
+Buck/M
+Buckner/M
+buckram/GSDM
+bucksaw/SM
+buckshot/MS
+buckskin/SM
+buckteeth
+bucktooth/DM
+buckwheat/SM
+Bucky/M
+bucolically
+bucolic/S
+Budapest/M
+budded
+Buddha/MS
+Buddhism/SM
+Buddhist/SM
+Buddie/M
+budding/S
+Budd/M
+buddy/GSDM
+Buddy/M
+budge/GDS
+budgerigar/MS
+budgetary
+budgeter/M
+budget/GMRDZS
+budgie/MS
+budging/U
+Bud/M
+bud/MS
+Budweiser/MS
+Buehring/M
+Buena/M
+buffaloes
+Buffalo/M
+buffalo/MDG
+buff/ASGD
+buffered/U
+bufferer/M
+buffer/RDMSGZ
+buffet/GMDJS
+bufflehead/M
+buffoonery/MS
+buffoonish
+buffoon/SM
+buff's
+Buffy/M
+Buford/M
+bugaboo/SM
+Bugatti/M
+bugbear/SM
+bug/CS
+bugeyed
+bugged/C
+buggered
+buggering
+bugger/SCM!
+buggery/M
+bugging/C
+buggy/RSMT
+bugle/GMDSRZ
+bugler/M
+bug's
+Buick/M
+builder/SM
+building/SM
+build/SAG
+buildup/MS
+built/AUI
+Buiron/M
+Bujumbura/M
+Bukhara/M
+Bukharin/M
+Bulawayo/M
+Bulba/M
+bulb/DMGS
+bulblet
+bulbous
+Bulfinch/M
+Bulganin/M
+Bulgaria/M
+Bulgarian/S
+bulge/DSGM
+bulgy/RT
+bulimarexia/S
+bulimia/MS
+bulimic/S
+bulk/GDRMS
+bulkhead/SDM
+bulkiness/SM
+bulky/RPT
+bulldogged
+bulldogger
+bulldogging
+bulldog/SM
+bulldoze/GRSDZ
+bulldozer/M
+bullet/GMDS
+bulletin/SGMD
+bulletproof/SGD
+bullfighter/M
+bullfighting/M
+bullfight/SJGZMR
+bullfinch/MS
+bullfrog/SM
+bullhead/DMS
+bullheadedness/SM
+bullheaded/YP
+bullhide
+bullhorn/SM
+bullied/M
+bullion/SM
+bullishness/SM
+bullish/PY
+bull/MDGS
+Bullock/M
+bullock/MS
+bullpen/MS
+bullring/SM
+bullseye
+bullshit/MS!
+bullshitted/!
+bullshitter/S!
+bullshitting/!
+bullwhackers
+Bullwinkle/M
+bullyboy/MS
+bullying/M
+bully/TRSDGM
+bulrush/SM
+Bultmann/M
+bulwark/GMDS
+bumblebee/MS
+bumble/JGZRSD
+bumbler/M
+bumbling/Y
+Bumbry/M
+bummed/M
+bummer/MS
+bummest
+bumming/M
+bumper/DMG
+bump/GZDRS
+bumpiness/MS
+bumpkin/MS
+Bumppo/M
+bumptiousness/SM
+bumptious/PY
+bumpy/PRT
+bum/SM
+Bunche/M
+bunch/MSDG
+bunchy/RT
+buncombe's
+bunco's
+Bundestag/M
+bundled/U
+bundle/GMRSD
+bundler/M
+Bundy/M
+bungalow/MS
+bungee/SM
+bung/GDMS
+bunghole/MS
+bungle/GZRSD
+bungler/M
+bungling/Y
+Bunin/M
+bunion/SM
+bunk/CSGDR
+Bunker/M
+bunker's/C
+bunker/SDMG
+bunkhouse/SM
+bunkmate/MS
+bunko's
+bunk's
+bunkum/SM
+Bunnie/M
+Bunni/M
+Bunny/M
+bunny/SM
+Bunsen/SM
+bun/SM
+bunt/GJZDRS
+bunting/M
+Buñuel/M
+Bunyan/M
+buoyancy/MS
+buoyant/Y
+buoy/SMDG
+Burbank/M
+burbler/M
+burble/RSDG
+burbs
+Burch/M
+burden's
+burdensomeness/M
+burdensome/PY
+burden/UGDS
+burdock/SM
+bureaucracy/MS
+bureaucratically
+bureaucratic/U
+bureaucratization/MS
+bureaucratize/SDG
+bureaucrat/MS
+bureau/MS
+burgeon/GDS
+burger/M
+Burger/M
+Burgess/M
+burgess/MS
+burgher/M
+burgh/MRZ
+burghs
+burglarize/GDS
+burglarproof/DGS
+burglar/SM
+burglary/MS
+burgle/SDG
+burgomaster/SM
+Burgoyne/M
+Burg/RM
+burg/SZRM
+Burgundian/S
+Burgundy/MS
+burgundy/S
+burial/ASM
+buried/U
+burier/M
+Burke/M
+Burk/SM
+burlap/MS
+burler/M
+burlesquer/M
+burlesque/SRDMYG
+burley/M
+Burlie/M
+burliness/SM
+Burlingame/M
+Burlington/M
+Burl/M
+burl/SMDRG
+burly/PRT
+Burma/M
+Burmese
+bur/MYS
+burnable/S
+Burnaby/M
+Burnard/M
+burned/U
+Burne/MS
+burner/M
+Burnett/M
+burn/GZSDRBJ
+burning/Y
+burnisher/M
+burnish/GDRSZ
+burnoose/MS
+burnout/MS
+Burns
+Burnside/MS
+burnt/YP
+burp/SGMD
+burr/GSDRM
+Burris/M
+burrito/S
+Burr/M
+burro/SM
+Burroughs/M
+burrower/M
+burrow/GRDMZS
+bursae
+bursa/M
+Bursa/M
+bursar/MS
+bursary/MS
+bursitis/MS
+burster/M
+burst/SRG
+Burtie/M
+Burt/M
+Burton/M
+Burty/M
+Burundian/S
+Burundi/M
+bury/ASDG
+busboy/MS
+busby/SM
+Busch/M
+buses/A
+busgirl/S
+bus/GMDSJ
+bushel/MDJSG
+Bushido/M
+bushiness/MS
+bushing/M
+bush/JMDSRG
+bushland
+Bush/M
+bushman/M
+bushmaster/SM
+bushmen
+Bushnell/M
+bushwhacker/M
+bushwhacking/M
+bushwhack/RDGSZ
+bushy/PTR
+busily
+businesslike
+businessman/M
+businessmen
+business/MS
+businesspeople
+businessperson/S
+businesswoman/M
+businesswomen
+busker/M
+busk/GRM
+buskin/SM
+bus's/A
+buss/D
+bustard/MS
+buster/M
+bustle/GSD
+bustling/Y
+bust/MSDRGZ
+busty/RT
+busybody/MS
+busy/DSRPTG
+busyness/MS
+busywork/SM
+but/ACS
+butane/MS
+butcherer/M
+butcher/MDRYG
+butchery/MS
+Butch/M
+butch/RSZ
+butene/M
+Butler/M
+butler/SDMG
+butted/A
+butte/MS
+butterball/MS
+buttercup/SM
+buttered/U
+butterfat/MS
+Butterfield/M
+butterfingered
+butterfingers/M
+butterfly/MGSD
+buttermilk/MS
+butternut/MS
+butter/RDMGZ
+butterscotch/SM
+buttery/TRS
+butting/M
+buttock/SGMD
+buttoner/M
+buttonhole/GMRSD
+buttonholer/M
+button's
+button/SUDG
+buttonweed
+buttonwood/SM
+buttress/MSDG
+butt/SGZMDR
+butyl/M
+butyrate/M
+buxomness/M
+buxom/TPYR
+Buxtehude/M
+buyback/S
+buyer/M
+buyout/S
+buy/ZGRS
+buzzard/MS
+buzz/DSRMGZ
+buzzer/M
+buzzword/SM
+buzzy
+bx
+bxs
+byelaw's
+Byelorussia's
+bye/MZS
+Byers/M
+bygone/S
+bylaw/SM
+byliner/M
+byline/RSDGM
+BYOB
+bypass/GSDM
+bypath/M
+bypaths
+byplay/S
+byproduct/SM
+Byram/M
+Byran/M
+Byrann/M
+Byrd/M
+byre/SM
+Byrle/M
+Byrne/M
+byroad/MS
+Byrom/M
+Byronic
+Byronism/M
+Byron/M
+bystander/SM
+byte/SM
+byway/SM
+byword/SM
+byzantine
+Byzantine/S
+Byzantium/M
+by/ZR
+C
+ca
+CA
+cabala/MS
+caballed
+caballero/SM
+caballing
+cabal/SM
+cabana/MS
+cabaret/SM
+cabbage/MGSD
+cabbed
+cabbing
+cabby's
+cabdriver/SM
+caber/M
+Cabernet/M
+cabinetmaker/SM
+cabinetmaking/MS
+cabinet/MS
+cabinetry/SM
+cabinetwork/MS
+cabin/GDMS
+cablecast/SG
+cable/GMDS
+cablegram/SM
+cabochon/MS
+caboodle/SM
+caboose/MS
+Cabot/M
+Cabrera/M
+Cabrini/M
+cabriolet/MS
+cab/SMR
+cabstand/MS
+cacao/SM
+cacciatore
+cache/DSRGM
+cachepot/MS
+cachet/MDGS
+Cacilia/M
+Cacilie/M
+cackler/M
+cackle/RSDGZ
+cackly
+CACM
+cacophonist
+cacophonous
+cacophony/SM
+cacti
+cactus/M
+CAD
+cadaverous/Y
+cadaver/SM
+caddishness/SM
+caddish/PY
+Caddric/M
+caddy/GSDM
+cadence/CSM
+cadenced
+cadencing
+cadent/C
+cadenza/MS
+cadet/SM
+Cadette/S
+cadge/DSRGZ
+cadger/M
+Cadillac/MS
+Cadiz/M
+Cad/M
+cadmium/MS
+cadre/SM
+cad/SM
+caducei
+caduceus/M
+Caedmon/M
+Caesar/MS
+caesura/SM
+café/MS
+cafeteria/SM
+caffeine/SM
+caftan/SM
+caged/U
+Cage/M
+cage/MZGDRS
+cager/M
+cagey/P
+cagier
+cagiest
+cagily
+caginess/MS
+Cagney/M
+Cahokia/M
+cahoot/MS
+Cahra/M
+CAI
+Caiaphas/M
+caiman's
+Caine/M
+Cain/MS
+Cairistiona/M
+cairn/SDM
+Cairo/M
+caisson/SM
+caitiff/MS
+Caitlin/M
+Caitrin/M
+cajole/LGZRSD
+cajolement/MS
+cajoler/M
+cajolery/SM
+Cajun/MS
+cake/MGDS
+cakewalk/SMDG
+calabash/SM
+calaboose/MS
+Calais/M
+calamari/S
+calamine/GSDM
+calamitousness/M
+calamitous/YP
+calamity/MS
+cal/C
+calcareousness/M
+calcareous/PY
+calciferous
+calcification/M
+calcify/XGNSD
+calcimine/GMSD
+calcine/SDG
+calcite/SM
+calcium/SM
+Calcomp/M
+CalComp/M
+CALCOMP/M
+calculability/IM
+calculable/IP
+calculate/AXNGDS
+calculated/PY
+calculatingly
+calculating/U
+calculation/AM
+calculative
+calculator/SM
+calculi
+calculus/M
+Calcutta/M
+caldera/SM
+Calder/M
+Calderon/M
+caldron's
+Caldwell/M
+Caleb/M
+Caledonia/M
+Cale/M
+calendar/MDGS
+calender/MDGS
+calf/M
+calfskin/SM
+Calgary/M
+Calhoun/M
+Caliban/M
+caliber/SM
+calibrated/U
+calibrater's
+calibrate/XNGSD
+calibrating/A
+calibration/M
+calibrator/MS
+calicoes
+calico/M
+Calida/M
+Calif/M
+California/M
+Californian/MS
+californium/SM
+calif's
+Caligula/M
+Cali/M
+caliper/SDMG
+caliphate/SM
+caliph/M
+caliphs
+calisthenic/S
+calisthenics/M
+Callaghan/M
+call/AGRDBS
+Callahan/M
+calla/MS
+Calla/MS
+Callao/M
+callback/S
+Callean/M
+called/U
+callee/M
+caller/MS
+Calley/M
+Callida/M
+Callie/M
+calligrapher/M
+calligraphic
+calligraphist/MS
+calligraph/RZ
+calligraphy/MS
+Calli/M
+calling/SM
+Calliope/M
+calliope/SM
+callisthenics's
+Callisto/M
+callosity/MS
+callousness/SM
+callous/PGSDY
+callowness/MS
+callow/RTSP
+callus/SDMG
+Cally/M
+calming/Y
+calmness/MS
+calm/PGTYDRS
+Cal/MY
+Caloocan/M
+caloric/S
+calorie/SM
+calorific
+calorimeter/MS
+calorimetric
+calorimetry/M
+Caltech/M
+Calumet/M
+calumet/MS
+calumniate/NGSDX
+calumniation/M
+calumniator/SM
+calumnious
+calumny/MS
+calvary/M
+Calvary/M
+calve/GDS
+Calvert/M
+calves/M
+Calvinism/MS
+Calvinistic
+Calvinist/MS
+Calvin/M
+Calv/M
+calyces's
+Calypso/M
+calypso/SM
+calyx/MS
+Ca/M
+CAM
+Camacho/M
+Camala/M
+camaraderie/SM
+camber/DMSG
+cambial
+cambium/SM
+Cambodia/M
+Cambodian/S
+Cambrian/S
+cambric/MS
+Cambridge/M
+camcorder/S
+Camden/M
+camelhair's
+Camella/M
+Camellia/M
+camellia/MS
+Camel/M
+Camelopardalis/M
+Camelot/M
+camel/SM
+Camembert/MS
+cameo/GSDM
+camerae
+cameraman/M
+cameramen
+camera/MS
+camerawoman
+camerawomen
+Cameron/M
+Cameroonian/S
+Cameroon/SM
+came/N
+Camey/M
+Camila/M
+Camile/M
+Camilla/M
+Camille/M
+Cami/M
+Camino/M
+camion/M
+camisole/MS
+Cam/M
+cammed
+Cammie/M
+Cammi/M
+cam/MS
+Cammy/M
+Camoens/M
+camomile's
+camouflage/DRSGZM
+camouflager/M
+campaigner/M
+campaign/ZMRDSG
+campanile/SM
+campanological
+campanologist/SM
+campanology/MS
+Campbell/M
+Campbellsport/M
+camper/SM
+campesinos
+campest
+campfire/SM
+campground/MS
+camphor/MS
+Campinas/M
+camping/S
+Campos
+camp's
+camp/SCGD
+campsite/MS
+campus/GSDM
+campy/RT
+Camry/M
+camshaft/SM
+Camus/M
+Canaanite/SM
+Canaan/M
+Canada/M
+Canadianism/SM
+Canadian/S
+Canad/M
+Canaletto/M
+canalization/MS
+canalize/GSD
+canal/SGMD
+canapé/S
+canard/MS
+Canaries
+canary/SM
+canasta/SM
+Canaveral/M
+Canberra/M
+cancan/SM
+cancelate/D
+canceled/U
+canceler/M
+cancellation/MS
+cancel/RDZGS
+cancer/MS
+Cancer/MS
+cancerous/Y
+Cancun/M
+Candace/M
+candelabra/S
+candelabrum/M
+Candice/M
+candidacy/MS
+Candida/M
+candidate/SM
+candidature/S
+Candide/M
+candidly/U
+candidness/SM
+candid/TRYPS
+Candie/M
+Candi/SM
+candle/GMZRSD
+candlelight/SMR
+candlelit
+candlepower/SM
+candler/M
+candlestick/SM
+Candlewick/M
+candlewick/MS
+candor/MS
+Candra/M
+candy/GSDM
+Candy/M
+canebrake/SM
+caner/M
+cane/SM
+canine/S
+caning/M
+Canis/M
+canister/SGMD
+cankerous
+canker/SDMG
+Can/M
+can/MDRSZGJ
+cannabis/MS
+canned
+cannelloni
+canner/SM
+cannery/MS
+Cannes
+cannibalism/MS
+cannibalistic
+cannibalization/SM
+cannibalize/GSD
+cannibal/SM
+cannily/U
+canninesses
+canniness/UM
+canning/M
+cannister/SM
+cannonade/SDGM
+cannonball/SGDM
+Cannon/M
+cannon/SDMG
+cannot
+canny/RPUT
+canoe/DSGM
+canoeist/SM
+Canoga/M
+canonic
+canonicalization
+canonicalize/GSD
+canonical/SY
+canonist/M
+canonization/MS
+canonized/U
+canonize/SDG
+canon/SM
+Canopus/M
+canopy/GSDM
+canst
+can't
+cantabile/S
+Cantabrigian
+cantaloupe/MS
+cantankerousness/SM
+cantankerous/PY
+cantata/SM
+cant/CZGSRD
+canted/IA
+canteen/MS
+Canterbury/M
+canter/CM
+cantered
+cantering
+canticle/SM
+cantilever/SDMG
+canto/MS
+cantonal
+Cantonese/M
+Canton/M
+cantonment/SM
+canton/MGSLD
+Cantor/M
+cantor/MS
+Cantrell/M
+cant's
+cants/A
+Cantu/M
+Canute/M
+canvasback/MS
+canvas/RSDMG
+canvasser/M
+canvass/RSDZG
+canyon/MS
+CAP
+capability/ISM
+capableness/IM
+capable/PI
+capabler
+capablest
+capably/I
+capaciousness/MS
+capacious/PY
+capacitance/SM
+capacitate/V
+capacitive/Y
+capacitor/MS
+capacity/IMS
+caparison/SDMG
+Capek/M
+Capella/M
+caper/GDM
+capeskin/SM
+cape/SM
+Capet/M
+Capetown/M
+Caph/M
+capillarity/MS
+capillary/S
+Capistrano/M
+capitalism/SM
+capitalistic
+capitalistically
+capitalist/SM
+capitalization/SMA
+capitalized/AU
+capitalizer/M
+capitalize/RSDGZ
+capitalizes/A
+capital/SMY
+capita/M
+Capitan/M
+capitation/CSM
+Capitoline/M
+Capitol/MS
+capitol/SM
+capitulate/AXNGSD
+capitulation/MA
+caplet/S
+cap/MDRSZB
+Capone/M
+capon/SM
+capo/SM
+Capote/M
+capped/UA
+capping/M
+cappuccino/MS
+Cappy/M
+Capra/M
+Caprice/M
+caprice/MS
+capriciousness/MS
+capricious/PY
+Capricorn/MS
+Capri/M
+caps/AU
+capsicum/MS
+capsize/SDG
+capstan/MS
+capstone/MS
+capsular
+capsule/MGSD
+capsulize/GSD
+captaincy/MS
+captain/SGDM
+caption/GSDRM
+captiousness/SM
+captious/PY
+captivate/XGNSD
+captivation/M
+captivator/SM
+captive/MS
+captivity/SM
+Capt/M
+captor/SM
+capture/AGSD
+capturer/MS
+capt/V
+Capulet/M
+Caputo/M
+Caracalla/M
+Caracas/M
+caracul's
+carafe/SM
+Caralie/M
+Cara/M
+caramelize/SDG
+caramel/MS
+carapace/SM
+carapaxes
+carat/SM
+Caravaggio/M
+caravan/DRMGS
+caravaner/M
+caravansary/MS
+caravanserai's
+caravel/MS
+caraway/MS
+carbide/MS
+carbine/MS
+carbohydrate/MS
+carbolic
+Carboloy/M
+carbonaceous
+carbonate/SDXMNG
+carbonation/M
+Carbondale/M
+Carbone/MS
+carbonic
+carboniferous
+Carboniferous
+carbonization/SAM
+carbonizer/AS
+carbonizer's
+carbonizes/A
+carbonize/ZGRSD
+carbon/MS
+carbonyl/M
+carborundum
+Carborundum/MS
+carboy/MS
+carbuncle/SDM
+carbuncular
+carburetor/MS
+carburetter/S
+carburettor/SM
+carcase/MS
+carcass/SM
+Carce/M
+carcinogenic
+carcinogenicity/MS
+carcinogen/SM
+carcinoma/SM
+cardamom/MS
+cardboard/MS
+card/EDRSG
+Cardenas/M
+carder/MS
+carder's/E
+cardholders
+cardiac/S
+Cardiff/M
+cardigan/SM
+cardinality/SM
+cardinal/SYM
+carding/M
+Cardin/M
+Cardiod/M
+cardiogram/MS
+cardiograph/M
+cardiographs
+cardioid/M
+cardiologist/SM
+cardiology/MS
+cardiomegaly/M
+cardiopulmonary
+cardiovascular
+card's
+cardsharp/ZSMR
+CARE
+cared/U
+careen/DSG
+careerism/M
+careerist/MS
+career/SGRDM
+carefree
+carefuller
+carefullest
+carefulness/MS
+careful/PY
+caregiver/S
+carelessness/MS
+careless/YP
+Care/M
+Carena/M
+Caren/M
+carer/M
+care/S
+Caresa/M
+Caressa/M
+Caresse/M
+caresser/M
+caressing/Y
+caressive/Y
+caress/SRDMVG
+caretaker/SM
+caret/SM
+careworn
+Carey/M
+carfare/MS
+cargoes
+cargo/M
+carhopped
+carhopping
+carhop/SM
+Caria/M
+Caribbean/S
+Carib/M
+caribou/MS
+caricature/GMSD
+caricaturisation
+caricaturist/MS
+caricaturization
+Carie/M
+caries/M
+carillonned
+carillonning
+carillon/SM
+Caril/M
+Carilyn/M
+Cari/M
+Carina/M
+Carine/M
+caring/U
+Carin/M
+Cariotta/M
+carious
+Carissa/M
+Carita/M
+Caritta/M
+carjack/GSJDRZ
+Carla/M
+Carlee/M
+Carleen/M
+Carlene/M
+Carlen/M
+Carletonian/M
+Carleton/M
+Carley/M
+Carlie/M
+Carlina/M
+Carline/M
+Carling/M
+Carlin/M
+Carlita/M
+Carl/MNG
+carload/MSG
+Carlo/SM
+Carlota/M
+Carlotta/M
+Carlsbad/M
+Carlson/M
+Carlton/M
+Carlye/M
+Carlyle/M
+Carly/M
+Carlyn/M
+Carlynne/M
+Carlynn/M
+Carma/M
+Carmela/M
+Carmelia/M
+Carmelina/M
+Carmelita/M
+Carmella/M
+Carmelle/M
+Carmel/M
+Carmelo/M
+Carmencita/M
+Carmen/M
+Carmichael/M
+Carmina/M
+Carmine/M
+carmine/MS
+Carmita/M
+Car/MNY
+Carmon/M
+carnage/MS
+carnality/SM
+carnal/Y
+Carnap/M
+carnation/IMS
+Carnegie/M
+carnelian/SM
+Carney/M
+carney's
+carnival/MS
+carnivore/SM
+carnivorousness/MS
+carnivorous/YP
+Carnot/M
+Carny/M
+carny/SDG
+carob/SM
+Carola/M
+Carolan/M
+Carolann/M
+Carolee/M
+Carole/M
+caroler/M
+Carolina/MS
+Caroline/M
+Carolingian
+Carolinian/S
+Carolin/M
+Caroljean/M
+Carol/M
+carol/SGZMRD
+Carolus/M
+Carolyne/M
+Carolyn/M
+Carolynn/M
+Caro/M
+carom/GSMD
+Caron/M
+carotene/MS
+carotid/MS
+carousal/MS
+carousel/MS
+carouser/M
+carouse/SRDZG
+carpal/SM
+Carpathian/MS
+carpel/SM
+carpenter/DSMG
+carpentering/M
+Carpenter/M
+carpentry/MS
+carper/M
+carpetbagged
+carpetbagger/MS
+carpetbagging
+carpetbag/MS
+carpeting/M
+carpet/MDJGS
+carpi/M
+carping/Y
+carp/MDRSGZ
+carpool/DGS
+carport/MS
+carpus/M
+carrageen/M
+Carree/M
+carrel/SM
+carriage/SM
+carriageway/SM
+Carrie/M
+carrier/M
+Carrier/M
+Carrillo/M
+Carri/M
+carrion/SM
+Carrissa/M
+Carr/M
+Carroll/M
+Carrol/M
+carrot/MS
+carroty/RT
+carrousel's
+carryall/MS
+Carry/MR
+carryout/S
+carryover/S
+carry/RSDZG
+carsickness/SM
+carsick/P
+Carson/M
+cartage/MS
+cartel/SM
+carte/M
+carter/M
+Carter/M
+Cartesian
+Carthage/M
+Carthaginian/S
+carthorse/MS
+Cartier/M
+cartilage/MS
+cartilaginous
+cartload/MS
+cart/MDRGSZ
+Cart/MR
+cartographer/MS
+cartographic
+cartography/MS
+carton/GSDM
+cartoon/GSDM
+cartoonist/MS
+cartridge/SM
+cartwheel/MRDGS
+Cartwright/M
+Carty/RM
+Caruso/M
+carve/DSRJGZ
+carven
+carver/M
+Carver/M
+carving/M
+caryatid/MS
+Caryl/M
+Cary/M
+Caryn/M
+car/ZGSMDR
+casaba/SM
+Casablanca/M
+Casals/M
+Casandra/M
+Casanova/SM
+Casar/M
+casbah/M
+cascade/MSDG
+Cascades/M
+cascara/MS
+casebook/SM
+case/DSJMGL
+cased/U
+caseharden/SGD
+casein/SM
+caseload/MS
+Case/M
+casement/SM
+caseworker/M
+casework/ZMRS
+Casey/M
+cashbook/SM
+cashew/MS
+cash/GZMDSR
+cashier/SDMG
+cashless
+Cash/M
+cashmere/MS
+Casie/M
+Casi/M
+casing/M
+casino/MS
+casket/SGMD
+cask/GSDM
+Caspar/M
+Casper/M
+Caspian
+Cass
+Cassandra/SM
+Cassandre/M
+Cassandry/M
+Cassatt/M
+Cassaundra/M
+cassava/MS
+casserole/MGSD
+cassette/SM
+Cassey/M
+cassia/MS
+Cassie/M
+Cassi/M
+cassino's
+Cassiopeia/M
+Cassite/M
+Cassius/M
+cassock/SDM
+Cassondra/M
+cassowary/SM
+Cassy/M
+Castaneda/M
+castanet/SM
+castaway/SM
+castellated
+caste/MHS
+caster/M
+cast/GZSJMDR
+castigate/XGNSD
+castigation/M
+castigator/SM
+Castile's
+Castillo/M
+casting/M
+castle/GMSD
+castoff/S
+Castor/M
+castor's
+castrate/DSNGX
+castration/M
+Castries/M
+Castro/M
+casts/A
+casualness/SM
+casual/SYP
+casualty/SM
+casuistic
+casuist/MS
+casuistry/SM
+cataclysmal
+cataclysmic
+cataclysm/MS
+catacomb/MS
+catafalque/SM
+Catalan/MS
+catalepsy/MS
+cataleptic/S
+Catalina/M
+cataloger/M
+catalog/SDRMZG
+Catalonia/M
+catalpa/SM
+catalysis/M
+catalyst/SM
+catalytic
+catalytically
+catalyze/DSG
+catamaran/MS
+catapult/MGSD
+cataract/MS
+Catarina/M
+catarrh/M
+catarrhs
+catastrophe/SM
+catastrophic
+catastrophically
+catatonia/MS
+catatonic/S
+Catawba/M
+catbird/MS
+catboat/SM
+catcall/SMDG
+catchable/U
+catchall/MS
+catch/BRSJLGZ
+catcher/M
+catchment/SM
+catchpenny/S
+catchphrase/S
+catchup/MS
+catchword/MS
+catchy/TR
+catechism/MS
+catechist/SM
+catechize/SDG
+catecholamine/MS
+categoric
+categorical/Y
+categorization/MS
+categorized/AU
+categorize/RSDGZ
+category/MS
+Cate/M
+catenate/NF
+catenation/MF
+catercorner
+caterer/M
+cater/GRDZ
+Caterina/M
+catering/M
+Caterpillar
+caterpillar/SM
+caterwaul/DSG
+catfish/MS
+catgut/SM
+Catha/M
+Catharina/M
+Catharine/M
+catharses
+catharsis/M
+cathartic/S
+Cathay/M
+cathedral/SM
+Cathee/M
+Catherina/M
+Catherine/M
+Catherin/M
+Cather/M
+Cathe/RM
+catheterize/GSD
+catheter/SM
+Cathie/M
+Cathi/M
+Cathleen/M
+Cathlene/M
+cathode/MS
+cathodic
+catholicism
+Catholicism/SM
+catholicity/MS
+catholic/MS
+Catholic/S
+Cathrine/M
+Cathrin/M
+Cathryn/M
+Cathyleen/M
+Cathy/M
+Catie/M
+Catiline/M
+Cati/M
+Catina/M
+cationic
+cation/MS
+catkin/SM
+Catlaina/M
+Catlee/M
+catlike
+Catlin/M
+catnapped
+catnapping
+catnap/SM
+catnip/MS
+Cato/M
+Catrina/M
+Catriona/M
+Catskill/SM
+cat/SMRZ
+catsup's
+cattail/SM
+catted
+cattery/M
+cattily
+cattiness/SM
+catting
+cattle/M
+cattleman/M
+cattlemen
+Catt/M
+catty/PRST
+Catullus/M
+CATV
+catwalk/MS
+Caty/M
+Caucasian/S
+Caucasoid/S
+Caucasus/M
+Cauchy/M
+caucus/SDMG
+caudal/Y
+caught/U
+cauldron/MS
+cauliflower/MS
+caulker/M
+caulk/JSGZRD
+causality/SM
+causal/YS
+causate/XVN
+causation/M
+causative/SY
+cause/DSRGMZ
+caused/U
+causeless
+causerie/MS
+causer/M
+causeway/SGDM
+caustically
+causticity/MS
+caustic/YS
+cauterization/SM
+cauterized/U
+cauterize/GSD
+cautionary
+cautioner/M
+caution/GJDRMSZ
+cautiousness's/I
+cautiousness/SM
+cautious/PIY
+cavalcade/MS
+cavalierness/M
+cavalier/SGYDP
+cavalryman/M
+cavalrymen
+cavalry/MS
+caveat/SM
+caveatted
+caveatting
+cave/GFRSD
+caveman/M
+cavemen
+Cavendish/M
+caver/M
+cavern/GSDM
+cavernous/Y
+cave's
+caviar/MS
+caviler/M
+cavil/SJRDGZ
+caving/MS
+cavity/MFS
+cavort/SDG
+Cavour/M
+caw/SMDG
+Caxton/M
+Caye/M
+Cayenne/M
+cayenne/SM
+Cayla/M
+Cayman/M
+cayman/SM
+cay's
+cay/SC
+Cayuga/M
+cayuse/SM
+Caz/M
+Cazzie/M
+c/B
+CB
+CBC
+Cb/M
+CBS
+cc
+Cchaddie/M
+CCTV
+CCU
+CD
+CDC/M
+Cd/M
+CDT
+Ce
+cease/DSCG
+ceasefire/S
+ceaselessness/SM
+ceaseless/YP
+ceasing/U
+Ceausescu/M
+Cebuano/M
+Cebu/M
+ceca
+cecal
+Cecelia/M
+Cece/M
+Cecile/M
+Ceciley/M
+Cecilia/M
+Cecilio/M
+Cecilius/M
+Cecilla/M
+Cecil/M
+Cecily/M
+cecum/M
+cedar/SM
+ceded/A
+cede/FRSDG
+ceder's/F
+ceder/SM
+cedes/A
+cedilla/SM
+ceding/A
+Ced/M
+Cedric/M
+ceilidh/M
+ceiling/MDS
+Ceil/M
+celandine/MS
+Celanese/M
+Celebes's
+celebrant/MS
+celebratedness/M
+celebrated/P
+celebrate/XSDGN
+celebration/M
+celebrator/MS
+celebratory
+celebrity/MS
+Cele/M
+Celene/M
+celerity/SM
+celery/SM
+Celesta/M
+celesta/SM
+Celeste/M
+celestial/YS
+Celestia/M
+Celestina/M
+Celestine/M
+Celestyna/M
+Celestyn/M
+Celia/M
+celibacy/MS
+celibate/SM
+Celie/M
+Celina/M
+Celinda/M
+Celine/M
+Celinka/M
+Celisse/M
+Celka/M
+cellarer/M
+cellar/RDMGS
+Celle/M
+cell/GMDS
+Cellini/M
+cellist/SM
+Cello/M
+cello/MS
+cellophane/SM
+cellphone/S
+cellular/SY
+cellulite/S
+celluloid/SM
+cellulose/SM
+Celsius/S
+Celtic/SM
+Celt/MS
+cementa
+cementer/M
+cementum/SM
+cement/ZGMRDS
+cemetery/MS
+cenobite/MS
+cenobitic
+cenotaph/M
+cenotaphs
+Cenozoic
+censer/MS
+censored/U
+censor/GDMS
+censorial
+censoriousness/MS
+censorious/YP
+censorship/MS
+censure/BRSDZMG
+censurer/M
+census/SDMG
+centaur/SM
+Centaurus/M
+centavo/SM
+centenarian/MS
+centenary/S
+centennial/YS
+center/AC
+centerboard/SM
+centered
+centerer/S
+centerfold/S
+centering/SM
+centerline/SM
+centerpiece/SM
+center's
+Centigrade
+centigrade/S
+centigram/SM
+centiliter/MS
+centime/SM
+centimeter/SM
+centipede/MS
+Centralia/M
+centralism/M
+centralist/M
+centrality/MS
+centralization/CAMS
+centralize/CGSD
+centralizer/SM
+centralizes/A
+central/STRY
+centrefold's
+Centrex
+CENTREX/M
+centric/F
+centrifugal/SY
+centrifugate/NM
+centrifugation/M
+centrifuge/GMSD
+centripetal/Y
+centrist/MS
+centroid/MS
+cent/SZMR
+centurion/MS
+century/MS
+CEO
+cephalic/S
+Cepheid
+Cepheus/M
+ceramicist/S
+ceramic/MS
+ceramist/MS
+cerate/MD
+Cerberus/M
+cereal/MS
+cerebellar
+cerebellum/MS
+cerebra
+cerebral/SY
+cerebrate/XSDGN
+cerebration/M
+cerebrum/MS
+cerement/SM
+ceremonial/YSP
+ceremoniousness/MS
+ceremoniousness's/U
+ceremonious/YUP
+ceremony/MS
+Cerenkov/M
+Ceres/M
+Cerf/M
+cerise/SM
+cerium/MS
+cermet/SM
+CERN/M
+certainer
+certainest
+certainty/UMS
+certain/UY
+cert/FS
+certifiable
+certifiably
+certificate/SDGM
+certification/AMC
+certified/U
+certifier/M
+certify/DRSZGNX
+certiorari/M
+certitude/ISM
+cerulean/MS
+Cervantes/M
+cervical
+cervices/M
+cervix/M
+Cesarean
+cesarean/S
+Cesare/M
+Cesar/M
+Cesaro/M
+cesium/MS
+cessation/SM
+cession/FAMSK
+Cessna/M
+cesspit/M
+cesspool/SM
+Cesya/M
+cetacean/S
+cetera/S
+Cetus/M
+Ceylonese
+Ceylon/M
+Cezanne/S
+cf
+CF
+CFC
+Cf/M
+CFO
+cg
+Chablis/SM
+Chaddie/M
+Chadd/M
+Chaddy/M
+Chadian/S
+Chad/M
+Chadwick/M
+chafe/GDSR
+chafer/M
+chaffer/DRG
+chafferer/M
+Chaffey/M
+chaff/GRDMS
+chaffinch/SM
+Chagall/M
+chagrin/DGMS
+Chaim/M
+chainlike
+chain's
+chainsaw/SGD
+chain/SGUD
+chairlady/M
+chairlift/MS
+chairman/MDGS
+chairmanship/MS
+chairmen
+chairperson/MS
+chair/SGDM
+chairwoman/M
+chairwomen
+chaise/SM
+chalcedony/MS
+Chaldea/M
+Chaldean/M
+chalet/SM
+chalice/DSM
+chalkboard/SM
+chalk/DSMG
+chalkiness/S
+chalkline
+chalky/RPT
+challenged/U
+challenger/M
+challenge/ZGSRD
+challenging/Y
+challis/SM
+Chalmers
+chamberer/M
+Chamberlain/M
+chamberlain/MS
+chambermaid/MS
+chamberpot/S
+Chambers/M
+chamber/SZGDRM
+chambray/MS
+chameleon/SM
+chamfer/DMGS
+chammy's
+chamois/DSMG
+chamomile/MS
+champagne/MS
+champaign/M
+champ/DGSZ
+champion/MDGS
+championship/MS
+Champlain/M
+chanced/M
+chance/GMRSD
+chancellery/SM
+chancellorship/SM
+chancellor/SM
+Chancellorsville/M
+chancel/SM
+Chance/M
+chancery/SM
+Chancey/M
+chanciness/S
+chancing/M
+chancre/SM
+chancy/RPT
+Chandal/M
+Chanda/M
+chandelier/SM
+Chandigarh/M
+Chandler/M
+chandler/MS
+Chandragupta/M
+Chandra/M
+Chandrasekhar/M
+Chandy/M
+Chanel/M
+Chane/M
+Chaney/M
+Changchun/M
+changeabilities
+changeability/UM
+changeableness/SM
+changeable/U
+changeably/U
+changed/U
+change/GZRSD
+changeless
+changeling/M
+changeover/SM
+changer/M
+changing/U
+Chang/M
+Changsha/M
+Chan/M
+Channa/M
+channeler/M
+channeling/M
+channelization/SM
+channelize/GDS
+channellings
+channel/MDRZSG
+Channing/M
+chanson/SM
+Chantalle/M
+Chantal/M
+chanter/M
+chanteuse/MS
+chantey/SM
+chanticleer/SM
+Chantilly/M
+chantry/MS
+chant/SJGZMRD
+chanty's
+Chanukah's
+Chao/M
+chaos/SM
+chaotic
+chaotically
+chaparral/MS
+chapbook/SM
+chapeau/MS
+chapel/MS
+chaperonage/MS
+chaperoned/U
+chaperone's
+chaperon/GMDS
+chaplaincy/MS
+chaplain/MS
+chaplet/SM
+Chaplin/M
+Chapman/M
+chap/MS
+Chappaquiddick/M
+chapped
+chapping
+chapter/SGDM
+Chara
+charabanc/MS
+characterful
+characteristically/U
+characteristic/SM
+characterizable/MS
+characterization/MS
+characterize/DRSBZG
+characterized/U
+characterizer/M
+characterless
+character/MDSG
+charade/SM
+charbroil/SDG
+charcoal/MGSD
+Chardonnay
+chardonnay/S
+chard/SM
+chargeableness/M
+chargeable/P
+charged/U
+charge/EGRSDA
+charger/AME
+chargers
+char/GS
+Charil/M
+charily
+chariness/MS
+Charin/M
+charioteer/GSDM
+Chariot/M
+chariot/SMDG
+Charis
+charisma/M
+charismata
+charismatically
+charismatic/S
+Charissa/M
+Charisse/M
+charitablenesses
+charitableness/UM
+charitable/UP
+charitably/U
+Charita/M
+Charity/M
+charity/MS
+charlady/M
+Charla/M
+charlatanism/MS
+charlatanry/SM
+charlatan/SM
+Charlean/M
+Charleen/M
+Charlemagne/M
+Charlena/M
+Charlene/M
+Charles/M
+Charleston/SM
+Charley/M
+Charlie/M
+Charline/M
+Charlot/M
+Charlotta/M
+Charlotte/M
+Charlottesville/M
+Charlottetown/M
+Charlton/M
+Charmaine/M
+Charmain/M
+Charmane/M
+charmer/M
+Charmian/M
+Charmine/M
+charming/RYT
+Charmin/M
+Charmion/M
+charmless
+charm/SGMZRD
+Charolais
+Charo/M
+Charon/M
+charred
+charring
+charted/U
+charter/AGDS
+chartered/U
+charterer/SM
+charter's
+chartist/SM
+Chartres/M
+chartreuse/MS
+chartroom/S
+chart/SJMRDGBZ
+charwoman/M
+charwomen
+Charybdis/M
+Charyl/M
+chary/PTR
+Chas
+chase/DSRGZ
+Chase/M
+chaser/M
+chasing/M
+Chasity/M
+chasm/SM
+chassis/M
+chastely
+chasteness/SM
+chasten/GSD
+chaste/UTR
+chastisement/SM
+chastiser/M
+chastise/ZGLDRS
+Chastity/M
+chastity/SM
+chastity's/U
+chasuble/SM
+Chateaubriand
+château/M
+chateaus
+châteaux
+châtelaine/SM
+chat/MS
+Chattahoochee/M
+Chattanooga/M
+chatted
+chattel/MS
+chatterbox/MS
+chatterer/M
+Chatterley/M
+chatter/SZGDRY
+Chatterton/M
+chattily
+chattiness/SM
+chatting
+chatty/RTP
+Chaucer/M
+chauffeur/GSMD
+Chaunce/M
+Chauncey/M
+Chautauqua/M
+chauvinism/MS
+chauvinistic
+chauvinistically
+chauvinist/MS
+Chavez/M
+chaw
+Chayefsky/M
+cheapen/DG
+cheapish
+cheapness/MS
+cheapskate/MS
+cheap/YRNTXSP
+cheater/M
+cheat/RDSGZ
+Chechen/M
+Chechnya/M
+checkable/U
+checkbook/MS
+checked/UA
+checkerboard/MS
+checker/DMG
+check/GZBSRDM
+checklist/S
+checkmate/MSDG
+checkoff/SM
+checkout/S
+checkpoint/MS
+checkroom/MS
+check's/A
+checks/A
+checksummed
+checksumming
+checksum/SM
+checkup/MS
+Cheddar/MS
+cheddar/S
+cheekbone/SM
+cheek/DMGS
+cheekily
+cheekiness/SM
+cheeky/PRT
+cheep/GMDS
+cheerer/M
+cheerfuller
+cheerfullest
+cheerfulness/MS
+cheerful/YP
+cheerily
+cheeriness/SM
+cheerio/S
+Cheerios/M
+cheerleader/SM
+cheerlessness/SM
+cheerless/PY
+cheers/S
+cheery/PTR
+cheer/YRDGZS
+cheeseburger/SM
+cheesecake/SM
+cheesecloth/M
+cheesecloths
+cheeseparing/S
+cheese/SDGM
+cheesiness/SM
+cheesy/PRT
+cheetah/M
+cheetahs
+Cheeto/M
+Cheever/M
+cheffed
+cheffing
+chef/SM
+Chekhov/M
+chelate/XDMNG
+chelation/M
+Chelsae/M
+Chelsea/M
+Chelsey/M
+Chelsie/M
+Chelsy/M
+Chelyabinsk/M
+chem
+Che/M
+chemic
+chemical/SYM
+chemiluminescence/M
+chemiluminescent
+chemise/SM
+chemistry/SM
+chemist/SM
+chemotherapeutic/S
+chemotherapy/SM
+chemurgy/SM
+Chengdu
+Cheng/M
+chenille/SM
+Chen/M
+Cheops/M
+Chere/M
+Cherey/M
+Cherianne/M
+Cherice/M
+Cherida/M
+Cherie/M
+Cherilyn/M
+Cherilynn/M
+Cheri/M
+Cherin/M
+Cherise/M
+cherisher/M
+cherish/GDRS
+Cherish/M
+Cheriton/M
+Cherlyn/M
+Cher/M
+Chernenko/M
+Chernobyl/M
+Cherokee/MS
+cheroot/MS
+Cherri/M
+Cherrita/M
+Cherry/M
+cherry/SM
+chert/MS
+cherubic
+cherubim/S
+cherub/SM
+chervil/MS
+Cherye/M
+Cheryl/M
+Chery/M
+Chesapeake/M
+Cheshire/M
+Cheslie/M
+chessboard/SM
+chessman/M
+chessmen
+chess/SM
+Chesterfield/M
+chesterfield/MS
+Chester/M
+Chesterton/M
+chestful/S
+chest/MRDS
+chestnut/SM
+Cheston/M
+chesty/TR
+Chet/M
+Chevalier/M
+chevalier/SM
+Cheviot/M
+cheviot/S
+Chev/M
+Chevrolet/M
+chevron/DMS
+Chevy/M
+chewer/M
+chew/GZSDR
+chewiness/S
+chewy/RTP
+Cheyenne/SM
+chg
+chge
+Chiang/M
+chianti/M
+Chianti/S
+chiaroscuro/SM
+Chiarra/M
+Chiba/M
+Chicagoan/SM
+Chicago/M
+Chicana/MS
+chicane/MGDS
+chicanery/MS
+Chicano/MS
+chichi/RTS
+chickadee/SM
+Chickasaw/SM
+chickenfeed
+chicken/GDM
+chickenhearted
+chickenpox/MS
+Chickie/M
+Chick/M
+chickpea/MS
+chickweed/MS
+chick/XSNM
+Chicky/M
+chicle/MS
+Chic/M
+chicness/S
+Chico/M
+chicory/MS
+chic/SYRPT
+chide/GDS
+chiding/Y
+chiefdom/MS
+chieftain/SM
+chief/YRMST
+chiffonier/MS
+chiffon/MS
+chigger/MS
+chignon/MS
+Chihuahua/MS
+chihuahua/S
+chilblain/MS
+childbearing/MS
+childbirth/M
+childbirths
+childcare/S
+childes
+child/GMYD
+childhood/MS
+childishness/SM
+childish/YP
+childlessness/SM
+childless/P
+childlikeness/M
+childlike/P
+childminders
+childproof/GSD
+childrearing
+children/M
+Chilean/S
+Chile/MS
+chile's
+chilies
+chili/M
+chiller/M
+chilliness/MS
+chilling/Y
+chilli's
+chill/MRDJGTZPS
+chillness/MS
+chilly/TPRS
+Chilton/M
+Chi/M
+chimaera's
+chimaerical
+Chimborazo/M
+chime/DSRGMZ
+Chimera/S
+chimera/SM
+chimeric
+chimerical
+chimer/M
+Chimiques
+chimney/SMD
+chimpanzee/SM
+chimp/MS
+chi/MS
+Chimu/M
+Ch'in
+China/M
+Chinaman/M
+Chinamen
+china/MS
+Chinatown/SM
+chinchilla/SM
+chine/MS
+Chinese/M
+Ching/M
+chink/DMSG
+chinless
+Chin/M
+chinned
+chinner/S
+chinning
+chino/MS
+Chinook/MS
+chin/SGDM
+chinstrap/S
+chintz/SM
+chintzy/TR
+chipboard/M
+Chipewyan/M
+Chip/M
+chipmunk/SM
+chipped
+Chippendale/M
+chipper/DGS
+Chippewa/MS
+chipping/MS
+chip/SM
+Chiquia/M
+Chiquita/M
+chiral
+Chirico/M
+chirography/SM
+chiropodist/SM
+chiropody/MS
+chiropractic/MS
+chiropractor/SM
+chirp/GDS
+chirpy/RT
+chirrup/DGS
+chiseler/M
+chisel/ZGSJMDR
+Chisholm/M
+Chisinau/M
+chitchat/SM
+chitchatted
+chitchatting
+chitinous
+chitin/SM
+chit/SM
+Chittagong/M
+chitterlings
+chivalric
+chivalrously/U
+chivalrousness/MS
+chivalrous/YP
+chivalry/SM
+chive/GMDS
+chivvy/D
+chivying
+chlamydiae
+chlamydia/S
+Chloe/M
+Chloette/M
+Chlo/M
+chloral/MS
+chlorate/M
+chlordane/MS
+chloride/MS
+chlorinated/C
+chlorinates/C
+chlorinate/XDSGN
+chlorination/M
+chlorine/MS
+Chloris
+chlorofluorocarbon/S
+chloroform/DMSG
+chlorophyll/SM
+chloroplast/MS
+chloroquine/M
+chm
+Ch/MGNRS
+chockablock
+chock/SGRDM
+chocoholic/S
+chocolate/MS
+chocolaty
+Choctaw/MS
+choiceness/M
+choice/RSMTYP
+choirboy/MS
+choirmaster/SM
+choir/SDMG
+chokeberry/M
+chokecherry/SM
+choke/DSRGZ
+choker/M
+chokes/M
+choking/Y
+cholera/SM
+choleric
+choler/SM
+cholesterol/SM
+choline/M
+cholinesterase/M
+chomp/DSG
+Chomsky/M
+Chongqing
+choose/GZRS
+chooser/M
+choosiness/S
+choosy/RPT
+chophouse/SM
+Chopin/M
+chopped
+chopper/SDMG
+choppily
+choppiness/MS
+chopping
+choppy/RPT
+chop/S
+chopstick/SM
+chorale/MS
+choral/SY
+chordal
+chordata
+chordate/MS
+chording/M
+chord/SGMD
+chorea/MS
+chore/DSGNM
+choreographer/M
+choreographic
+choreographically
+choreographs
+choreography/MS
+choreograph/ZGDR
+chorines
+chorion/M
+chorister/SM
+choroid/S
+chortler/M
+chortle/ZGDRS
+chorus/GDSM
+chosen/U
+chose/S
+Chou/M
+chowder/SGDM
+chow/DGMS
+Chretien/M
+Chris/M
+chrism/SM
+chrissake
+Chrisse/M
+Chrissie/M
+Chrissy/M
+Christabella/M
+Christabel/M
+Christalle/M
+Christal/M
+Christa/M
+Christan/M
+Christchurch/M
+Christean/M
+Christel/M
+Christendom/MS
+christened/U
+christening/SM
+Christen/M
+christen/SAGD
+Christensen/M
+Christenson/M
+Christiana/M
+Christiane/M
+Christianity/SM
+Christianize/GSD
+Christian/MS
+Christiano/M
+Christiansen/M
+Christians/N
+Christie/SM
+Christi/M
+Christina/M
+Christine/M
+Christin/M
+Christlike
+Christmas/SM
+Christmastide/SM
+Christmastime/S
+Christoffel/M
+Christoffer/M
+Christoforo/M
+Christoper/M
+Christophe/M
+Christopher/M
+Christoph/MR
+Christophorus/M
+Christos/M
+Christ/SMN
+Christye/M
+Christyna/M
+Christy's
+Chrisy/M
+chroma/M
+chromate/M
+chromatically
+chromaticism/M
+chromaticness/M
+chromatic/PS
+chromatics/M
+chromatin/MS
+chromatogram/MS
+chromatograph
+chromatographic
+chromatography/M
+chrome/GMSD
+chromic
+chromite/M
+chromium/SM
+chromosomal
+chromosome/MS
+chromosphere/M
+chronically
+chronicled/U
+chronicler/M
+chronicle/SRDMZG
+chronic/S
+chronograph/M
+chronographs
+chronography
+chronological/Y
+chronologist/MS
+chronology/MS
+chronometer/MS
+chronometric
+Chrotoem/M
+chrysalids
+chrysalis/SM
+Chrysa/M
+chrysanthemum/MS
+Chrysler/M
+Chrysostom/M
+Chrystal/M
+Chrystel/M
+Chryste/M
+chubbiness/SM
+chubby/RTP
+chub/MS
+Chucho/M
+chuck/GSDM
+chuckhole/SM
+chuckle/DSG
+chuckling/Y
+Chuck/M
+chuff/DM
+chugged
+chugging
+chug/MS
+Chukchi/M
+chukka/S
+Chumash/M
+chummed
+chummily
+chumminess/MS
+chumming
+chum/MS
+chummy/SRTP
+chumping/M
+chump/MDGS
+Chungking's
+Chung/M
+chunkiness/MS
+chunk/SGDM
+chunky/RPT
+chuntering
+churchgoer/SM
+churchgoing/SM
+Churchillian
+Churchill/M
+churchliness/M
+churchly/P
+churchman/M
+church/MDSYG
+churchmen
+Church/MS
+churchwarden/SM
+churchwoman/M
+churchwomen
+churchyard/SM
+churlishness/SM
+churlish/YP
+churl/SM
+churner/M
+churning/M
+churn/SGZRDM
+chute/DSGM
+chutney/MS
+chutzpah/M
+chutzpahs
+chutzpa/SM
+Chuvash/M
+ch/VT
+chyme/SM
+Ci
+CIA
+ciao/S
+cicada/MS
+cicatrice/S
+cicatrix's
+Cicely/M
+Cicero/M
+cicerone/MS
+ciceroni
+Ciceronian
+Cicily/M
+CID
+cider's/C
+cider/SM
+Cid/M
+Ciel/M
+cigarette/MS
+cigarillo/MS
+cigar/SM
+cilantro/S
+cilia/M
+ciliate/FDS
+ciliately
+cilium/M
+Cilka/M
+cinch/MSDG
+cinchona/SM
+Cincinnati/M
+cincture/MGSD
+Cinda/M
+Cindee/M
+Cindelyn/M
+cinder/DMGS
+Cinderella/MS
+Cindie/M
+Cindi/M
+Cindra/M
+Cindy/M
+cine/M
+cinema/SM
+cinematic
+cinematographer/MS
+cinematographic
+cinematography/MS
+Cinerama/M
+cinnabar/MS
+Cinnamon/M
+cinnamon/MS
+ciphered/C
+cipher/MSGD
+ciphers/C
+cir
+circa
+circadian
+Circe/M
+circler/M
+circle/RSDGM
+circlet/MS
+circuital
+circuit/GSMD
+circuitousness/MS
+circuitous/YP
+circuitry/SM
+circuity/MS
+circulant
+circularity/SM
+circularize/GSD
+circularness/M
+circular/PSMY
+circulate/ASDNG
+circulation/MA
+circulations
+circulative
+circulatory
+circumcise/DRSXNG
+circumcised/U
+circumciser/M
+circumcision/M
+circumference/SM
+circumferential/Y
+circumflex/MSDG
+circumlocution/MS
+circumlocutory
+circumnavigate/DSNGX
+circumnavigational
+circumnavigation/M
+circumpolar
+circumscribe/GSD
+circumscription/SM
+circumspection/SM
+circumspect/Y
+circumsphere
+circumstance/SDMG
+circumstantial/YS
+circumvention/MS
+circumvent/SBGD
+circus/SM
+Cirillo/M
+Cirilo/M
+Ciro/M
+cirque/SM
+cirrhoses
+cirrhosis/M
+cirrhotic/S
+cirri/M
+cirrus/M
+Cissiee/M
+Cissy/M
+cistern/SM
+citadel/SM
+citations/I
+citation/SMA
+cit/DSG
+cite/ISDAG
+Citibank/M
+citified
+citizenry/SM
+citizenship/MS
+citizen/SYM
+citrate/DM
+citric
+Citroen/M
+citronella/MS
+citron/MS
+citrus/SM
+city/DSM
+cityscape/MS
+citywide
+civet/SM
+civic/S
+civics/M
+civilian/SM
+civility/IMS
+civilizational/MS
+civilization/AMS
+civilizedness/M
+civilized/PU
+civilize/DRSZG
+civilizer/M
+civilizes/AU
+civil/UY
+civvies
+ck/C
+clack/SDG
+cladding/SM
+clads
+clad/U
+Claiborne/M
+Claiborn/M
+claimable
+claimant/MS
+claim/CDRSKAEGZ
+claimed/U
+claimer/KMACE
+Claire/M
+Clair/M
+Clairol/M
+clairvoyance/MS
+clairvoyant/YS
+clambake/MS
+clamberer/M
+clamber/SDRZG
+clammed
+clammily
+clamminess/MS
+clamming
+clam/MS
+clammy/TPR
+clamorer/M
+clamor/GDRMSZ
+clamorousness/UM
+clamorous/PUY
+clampdown/SM
+clamper/M
+clamp/MRDGS
+clamshell/MS
+Clancy/M
+clandestineness/M
+clandestine/YP
+clanger/M
+clangor/MDSG
+clangorous/Y
+clang/SGZRD
+clanking/Y
+clank/SGDM
+clan/MS
+clannishness/SM
+clannish/PY
+clansman/M
+clansmen
+clapboard/SDGM
+Clapeyron/M
+clapped
+clapper/GMDS
+clapping
+clap/S
+Clapton/M
+claptrap/SM
+claque/MS
+Clarabelle/M
+Clara/M
+Clarance/M
+Clare/M
+Claremont/M
+Clarence/M
+Clarendon/M
+Claresta/M
+Clareta/M
+claret/MDGS
+Claretta/M
+Clarette/M
+Clarey/M
+Claribel/M
+Clarice/M
+Clarie/M
+clarification/M
+clarifier/M
+clarify/NGXDRS
+Clari/M
+Clarinda/M
+Clarine/M
+clarinetist/SM
+clarinet/SM
+clarinettist's
+clarion/GSMD
+Clarissa/M
+Clarisse/M
+Clarita/M
+clarities
+clarity/UM
+Clarke/M
+Clark/M
+Clarridge/M
+Clary/M
+clasher/M
+clash/RSDG
+clasped/M
+clasper/M
+clasp's
+clasp/UGSD
+classer/M
+class/GRSDM
+classical/Y
+classicism/SM
+classicist/SM
+classic/S
+classics/M
+classifiable/U
+classification/AMC
+classificatory
+classified/S
+classifier/SM
+classify/CNXASDG
+classiness/SM
+classless/P
+classmate/MS
+classroom/MS
+classwork/M
+classy/PRT
+clatterer/M
+clattering/Y
+clatter/SGDR
+clattery
+Claudelle/M
+Claudell/M
+Claude/M
+Claudetta/M
+Claudette/M
+Claudia/M
+Claudian/M
+Claudianus/M
+Claudie/M
+Claudina/M
+Claudine/M
+Claudio/M
+Claudius/M
+clausal
+clause/MS
+Clausen/M
+Clausewitz/M
+Clausius/M
+Claus/NM
+claustrophobia/SM
+claustrophobic
+clave/RM
+clave's/F
+clavichord/SM
+clavicle/MS
+clavier/MS
+clawer/M
+claw/GDRMS
+Clayborne/M
+Clayborn/M
+Claybourne/M
+clayey
+clayier
+clayiest
+Clay/M
+clay/MDGS
+claymore/MS
+Clayson/M
+Clayton/M
+Clea/M
+cleanable
+cleaner/MS
+cleaning/SM
+cleanliness/UMS
+cleanly/PRTU
+cleanness/MSU
+cleanse
+cleanser/M
+cleans/GDRSZ
+cleanup/MS
+clean/UYRDPT
+clearance/MS
+clearcut
+clearer/M
+clearheadedness/M
+clearheaded/PY
+clearinghouse/S
+clearing/MS
+clearly
+clearness/MS
+clears
+clear/UTRD
+Clearwater/M
+clearway/M
+cleat/MDSG
+cleavage/MS
+cleaver/M
+cleave/RSDGZ
+Cleavland/M
+clef/SM
+cleft/MDGS
+clematis/MS
+clemence
+Clemenceau/M
+Clemence/M
+clemency/ISM
+Clemente/M
+Clementia/M
+Clementina/M
+Clementine/M
+Clementius/M
+clement/IY
+Clement/MS
+clements
+Clemmie/M
+Clemmy/M
+Clemons
+Clemson/M
+Clem/XM
+clenches
+clenching
+clench/UD
+Cleo/M
+Cleon/M
+Cleopatra/M
+Clerc/M
+clerestory/MS
+clergyman/M
+clergymen
+clergy/MS
+clergywoman
+clergywomen
+clericalism/SM
+clerical/YS
+cleric/SM
+Clerissa/M
+clerk/SGYDM
+clerkship/MS
+Cletis
+Cletus/M
+Cleveland/M
+Cleve/M
+cleverness/SM
+clever/RYPT
+Clevey/M
+Clevie/M
+clevis/SM
+clew/DMGS
+cl/GJ
+Cliburn/M
+clichéd
+cliché/SM
+clicker/M
+click/GZSRDM
+clientèle/SM
+client/SM
+cliffhanger/MS
+cliffhanging
+Cliff/M
+Clifford/M
+cliff/SM
+Clifton/M
+climacteric/SM
+climactic
+climate/MS
+climatic
+climatically
+climatological/Y
+climatologist/SM
+climatology/MS
+climax/MDSG
+climbable/U
+climb/BGZSJRD
+climbdown
+climbed/U
+climber/M
+clime/SM
+Clim/M
+clinch/DRSZG
+clincher/M
+clinching/Y
+Cline/M
+clinger/MS
+clinging
+cling/U
+clingy/TR
+clinical/Y
+clinician/MS
+clinic/MS
+clinker/GMD
+clink/RDGSZ
+clinometer/MIS
+Clint/M
+Clinton/M
+Clio/M
+cliometrician/S
+cliometric/S
+clipboard/SM
+clipped/U
+clipper/MS
+clipping/SM
+clip/SM
+clique/SDGM
+cliquey
+cliquier
+cliquiest
+cliquishness/SM
+cliquish/YP
+clitoral
+clitorides
+clitoris/MS
+Clive/M
+cloacae
+cloaca/M
+cloakroom/MS
+cloak's
+cloak/USDG
+clobber/DGS
+cloche/MS
+clocker/M
+clockmaker/M
+clock/SGZRDMJ
+clockwatcher
+clockwise
+clockwork/MS
+clodded
+clodding
+cloddishness/M
+cloddish/P
+clodhopper/SM
+clod/MS
+Cloe/M
+clogged/U
+clogging/U
+clog's
+clog/US
+cloisonné
+cloisonnes
+cloister/MDGS
+cloistral
+Clo/M
+clomp/MDSG
+clonal
+clone/DSRGMZ
+clonk/SGD
+clopped
+clopping
+clop/S
+Cloris/M
+closed/U
+close/EDSRG
+closefisted
+closely
+closemouthed
+closeness/MS
+closeout/MS
+closer/EM
+closers
+closest
+closet/MDSG
+closeup/S
+closing/S
+closured
+closure/EMS
+closure's/I
+closuring
+clothbound
+clothesbrush
+clotheshorse/MS
+clothesline/SDGM
+clothesman
+clothesmen
+clothespin/MS
+clothe/UDSG
+cloth/GJMSD
+clothier/MS
+clothing/M
+Clotho/M
+cloths
+Clotilda/M
+clot/MS
+clotted
+clotting
+cloture/MDSG
+cloudburst/MS
+clouded/U
+cloudiness/SM
+cloudlessness/M
+cloudless/YP
+cloudscape/SM
+cloud/SGMD
+cloudy/TPR
+clout/GSMD
+cloven
+cloverleaf/MS
+clover/M
+clove/SRMZ
+Clovis/M
+clown/DMSG
+clownishness/SM
+clownish/PY
+cloy/DSG
+cloying/Y
+clubbed/M
+clubbing/M
+clubfeet
+clubfoot/DM
+clubhouse/SM
+club/MS
+clubroom/SM
+cluck/GSDM
+clueless
+clue/MGDS
+Cluj/M
+clump/MDGS
+clumpy/RT
+clumsily
+clumsiness/MS
+clumsy/PRT
+clung
+clunk/SGZRDM
+clunky/PRYT
+clustered/AU
+clusters/A
+cluster/SGJMD
+clutch/DSG
+cluttered/U
+clutter/GSD
+Cl/VM
+Clyde/M
+Clydesdale/M
+Cly/M
+Clytemnestra/M
+Clyve/M
+Clywd/M
+cm
+Cm/M
+CMOS
+cnidarian/MS
+CNN
+CNS
+CO
+coacher/M
+coachman/M
+coachmen
+coach/MSRDG
+coachwork/M
+coadjutor/MS
+coagulable
+coagulant/SM
+coagulate/GNXSD
+coagulation/M
+coagulator/S
+coaler/M
+coalesce/GDS
+coalescence/SM
+coalescent
+coalface/SM
+coalfield/MS
+coalitionist/SM
+coalition/MS
+coal/MDRGS
+coalminers
+coarseness/SM
+coarsen/SGD
+coarse/TYRP
+coastal
+coaster/M
+coastguard/MS
+coastline/SM
+coast/SMRDGZ
+coated/U
+Coates/M
+coating/M
+coat/MDRGZJS
+coattail/S
+coattest
+coauthor/MDGS
+coaxer/M
+coax/GZDSR
+coaxial/Y
+coaxing/Y
+Cobain/M
+cobalt/MS
+cobbed
+Cobbie/M
+cobbing
+cobbler/M
+cobble/SRDGMZ
+cobblestone/MSD
+Cobb/M
+Cobby/M
+coble/M
+Cob/M
+COBOL
+Cobol/M
+cobra/MS
+cob/SM
+cobwebbed
+cobwebbing
+cobwebby/RT
+cobweb/SM
+cocaine/MS
+coca/MS
+cocci/MS
+coccus/M
+coccyges
+coccyx/M
+Cochabamba/M
+cochineal/SM
+Cochin/M
+Cochise/M
+cochleae
+cochlear
+cochlea/SM
+Cochran/M
+cockade/SM
+cockamamie
+cockatoo/SM
+cockatrice/MS
+cockcrow/MS
+cockerel/MS
+cocker/M
+cockeye/DM
+cockeyed/PY
+cockfighting/M
+cockfight/MJSG
+cock/GDRMS
+cockily
+cockiness/MS
+cocklebur/M
+cockle/SDGM
+cockleshell/SM
+Cockney
+cockney/MS
+cockpit/MS
+cockroach/SM
+cockscomb/SM
+cockshies
+cocksucker/S!
+cocksure
+cocktail/GDMS
+cocky/RPT
+cocoa/SM
+coco/MS
+coconut/SM
+cocoon/GDMS
+Cocteau/M
+COD
+coda/SM
+codded
+codding
+coddle/GSRD
+coddler/M
+codebook/S
+codebreak/R
+coded/UA
+Codee/M
+codeine/MS
+codename/D
+codependency/S
+codependent/S
+coder/CM
+code's
+co/DES
+codes/A
+code/SCZGJRD
+codetermine/S
+codeword/SM
+codex/M
+codfish/SM
+codger/MS
+codices/M
+codicil/SM
+Codie/M
+codification/M
+codifier/M
+codify/NZXGRSD
+Codi/M
+coding/M
+codling/M
+Cod/M
+cod/MDRSZGJ
+codpiece/MS
+Cody/M
+coedited
+coediting
+coeditor/MS
+coedits
+coed/SM
+coeducational
+coeducation/SM
+coefficient/SYM
+coelenterate/MS
+coequal/SY
+coercer/M
+coerce/SRDXVGNZ
+coercible/I
+coercion/M
+coerciveness/M
+coercive/PY
+coeval/YS
+coexistence/MS
+coexistent
+coexist/GDS
+coextensive/Y
+cofactor/MS
+coffeecake/SM
+coffeecup
+coffeehouse/SM
+coffeemaker/S
+coffeepot/MS
+coffee/SM
+cofferdam/SM
+coffer/DMSG
+Coffey/M
+coffin/DMGS
+Coffman/M
+cogency/MS
+cogent/Y
+cogged
+cogging
+cogitate/DSXNGV
+cogitation/M
+cogitator/MS
+cog/MS
+Cognac/M
+cognac/SM
+cognate/SXYN
+cognation/M
+cognitional
+cognition/SAM
+cognitive/SY
+cognizable
+cognizance/MAI
+cognizances/A
+cognizant/I
+cognomen/SM
+cognoscente
+cognoscenti
+cogwheel/SM
+cohabitant/MS
+cohabitational
+cohabitation/SM
+cohabit/SDG
+Cohan/M
+coheir/MS
+Cohen/M
+cohere/GSRD
+coherence/SIM
+coherencies
+coherency/I
+coherent/IY
+coherer/M
+cohesion/MS
+cohesiveness/SM
+cohesive/PY
+Cohn/M
+cohoes
+coho/MS
+cohort/SM
+coiffed
+coiffing
+coiffure/MGSD
+coif/SM
+coil/UGSAD
+Coimbatore/M
+coinage's/A
+coinage/SM
+coincide/GSD
+coincidence/MS
+coincidental/Y
+coincident/Y
+coined/U
+coiner/M
+coin/GZSDRM
+coinsurance/SM
+Cointon/M
+cointreau
+coital/Y
+coitus/SM
+coke/MGDS
+Coke/MS
+COL
+COLA
+colander/SM
+Colan/M
+Colas
+cola/SM
+colatitude/MS
+Colbert/M
+Colby/M
+coldblooded
+coldish
+coldness/MS
+cold/YRPST
+Coleen/M
+Cole/M
+Coleman/M
+Colene/M
+Coleridge/M
+coleslaw/SM
+Colet/M
+Coletta/M
+Colette/M
+coleus/SM
+Colfax/M
+Colgate/M
+colicky
+colic/SM
+coliform
+Colin/M
+coliseum/SM
+colitis/MS
+collaborate/VGNXSD
+collaboration/M
+collaborative/SY
+collaborator/SM
+collage/MGSD
+collagen/M
+collapse/SDG
+collapsibility/M
+collapsible
+collarbone/MS
+collar/DMGS
+collard/SM
+collarless
+collated/U
+collateral/SYM
+collate/SDVNGX
+collation/M
+collator/MS
+colleague/SDGM
+collectedness/M
+collected/PY
+collectible/S
+collection/AMS
+collective/SY
+collectivism/SM
+collectivist/MS
+collectivity/MS
+collectivization/MS
+collectivize/DSG
+collector/MS
+collect/SAGD
+Colleen/M
+colleen/SM
+college/SM
+collegiality/S
+collegian/SM
+collegiate/Y
+Collen/M
+Collete/M
+Collette/M
+coll/G
+collide/SDG
+Collie/M
+collie/MZSRD
+collier/M
+Collier/M
+colliery/MS
+collimate/C
+collimated/U
+collimates
+collimating
+collimation/M
+collimator/M
+collinear
+collinearity/M
+Colline/M
+Collin/MS
+collisional
+collision/SM
+collocate/XSDGN
+collocation/M
+colloidal/Y
+colloid/MS
+colloq
+colloquialism/MS
+colloquial/SY
+colloquies
+colloquium/SM
+colloquy/M
+collude/SDG
+collusion/SM
+collusive
+collying
+Colly/RM
+Colman/M
+Col/MY
+Cologne/M
+cologne/MSD
+Colo/M
+Colombia/M
+Colombian/S
+Colombo/M
+colonelcy/MS
+colonel/MS
+colonialism/MS
+colonialist/MS
+colonial/SPY
+colonist/SM
+colonization/ACSM
+colonize/ACSDG
+colonized/U
+colonizer/MS
+colonizes/U
+Colon/M
+colonnade/MSD
+colon/SM
+colony/SM
+colophon/SM
+Coloradan/S
+Coloradoan/S
+Colorado/M
+colorant/SM
+coloration/EMS
+coloratura/SM
+colorblindness/S
+colorblind/P
+colored/USE
+colorer/M
+colorfastness/SM
+colorfast/P
+colorfulness/MS
+colorful/PY
+colorimeter/SM
+colorimetry
+coloring/M
+colorization/S
+colorize/GSD
+colorizing/C
+colorlessness/SM
+colorless/PY
+colors/EA
+color/SRDMGZJ
+colossal/Y
+Colosseum/M
+colossi
+colossus/M
+colostomy/SM
+colostrum/SM
+col/SD
+colter/M
+coltishness/M
+coltish/PY
+Colt/M
+colt/MRS
+Coltrane/M
+Columbia/M
+Columbian
+Columbine/M
+columbine/SM
+Columbus/M
+columnar
+columnist/MS
+columnize/GSD
+column/SDM
+Colver/M
+Co/M
+comae
+comaker/SM
+Comanche/MS
+coma/SM
+comatose
+combatant/SM
+combativeness/MS
+combative/PY
+combat/SVGMD
+combed/U
+comber/M
+combinational/A
+combination/ASM
+combinatorial/Y
+combinatoric/S
+combinator/SM
+combined/AU
+combiner/M
+combines/A
+combine/ZGBRSD
+combining/A
+combo/MS
+comb/SGZDRMJ
+Combs/M
+combusted
+combustibility/SM
+combustible/SI
+combustion/MS
+combustive
+Comdex/M
+Comdr/M
+comeback/SM
+comedian/SM
+comedic
+comedienne/SM
+comedown/MS
+comedy/SM
+come/IZSRGJ
+comeliness/SM
+comely/TPR
+comer/IM
+comes/M
+comestible/MS
+cometary
+cometh
+comet/SM
+comeuppance/SM
+comfit's
+comfit/SE
+comfortability/S
+comfortableness/MS
+comfortable/U
+comfortably/U
+comforted/U
+comforter/MS
+comfort/ESMDG
+comforting/YE
+comfy/RT
+comicality/MS
+comical/Y
+comic/MS
+Cominform/M
+comity/SM
+com/LJRTZG
+comm
+Com/M
+comma/MS
+commandant/MS
+commandeer/SDG
+commander/M
+commanding/Y
+commandment/SM
+commando/SM
+command/SZRDMGL
+commemorate/SDVNGX
+commemoration/M
+commemorative/YS
+commemorator/S
+commence/ALDSG
+commencement/AMS
+commencer/M
+commendably
+commendation/ASM
+commendatory/A
+commender/AM
+commend/GSADRB
+commensurable/I
+commensurate/IY
+commensurates
+commensuration/SM
+commentary/MS
+commentate/GSD
+commentator/SM
+commenter/M
+comment's
+comment/SUGD
+commerce/MGSD
+commercialism/MS
+commercialization/SM
+commercialize/GSD
+commercial/PYS
+Commie
+commie/SM
+commingle/GSD
+commiserate/VGNXSD
+commiseration/M
+commissariat/MS
+commissar/MS
+commissary/MS
+commission/ASCGD
+commissioner/SM
+commission's/A
+commitment/SM
+commit/SA
+committable
+committal/MA
+committals
+committed/UA
+committeeman/M
+committeemen
+committee/MS
+committeewoman/M
+committeewomen
+committing/A
+commode/MS
+commodes/IE
+commodiousness/MI
+commodious/YIP
+commodity/MS
+commodore/SM
+commonality/MS
+commonalty/MS
+commoner/MS
+commonness/MSU
+commonplaceness/M
+commonplace/SP
+common/RYUPT
+commonsense
+commons/M
+Commons/M
+commonweal/SHM
+commonwealth/M
+Commonwealth/M
+commonwealths
+Commonwealths
+commotion/MS
+communality/M
+communal/Y
+commune/XSDNG
+communicability/MS
+communicable/IU
+communicably
+communicant/MS
+communicate/VNGXSD
+communicational
+communication/M
+communicativeness/M
+communicative/PY
+communicator/SM
+communion/M
+Communion/SM
+communique/S
+communism/MS
+Communism/S
+communistic
+communist/MS
+Communist/S
+communitarian/M
+community/MS
+communize/SDG
+commutable/I
+commutate/XVGNSD
+commutation/M
+commutative/Y
+commutativity
+commutator/MS
+commute/BZGRSD
+commuter/M
+Comoros
+compaction/M
+compactness/MS
+compactor/MS
+compact/TZGSPRDY
+companionableness/M
+companionable/P
+companionably
+companion/GBSMD
+companionship/MS
+companionway/MS
+company/MSDG
+Compaq/M
+comparabilities
+comparability/IM
+comparableness/M
+comparable/P
+comparably/I
+comparativeness/M
+comparative/PYS
+comparator/SM
+compare/GRSDB
+comparer/M
+comparison/MS
+compartmental
+compartmentalization/SM
+compartmentalize/DSG
+compartment/SDMG
+compassionateness/M
+compassionate/PSDGY
+compassion/MS
+compass/MSDG
+compatibility/IMS
+compatibleness/M
+compatible/SI
+compatibly/I
+compatriot/SM
+compeer/DSGM
+compellable
+compelled
+compelling/YM
+compel/S
+compendious
+compendium/MS
+compensable
+compensated/U
+compensate/XVNGSD
+compensation/M
+compensator/M
+compensatory
+compete/GSD
+competence/ISM
+competency/IS
+competency's
+competent/IY
+competition/SM
+competitiveness/SM
+competitive/YP
+competitor/MS
+comp/GSYD
+compilable/U
+compilation/SAM
+compile/ASDCG
+compiler/CS
+compiler's
+complacence/S
+complacency/SM
+complacent/Y
+complainant/MS
+complainer/M
+complain/GZRDS
+complaining/YU
+complaint/MS
+complaisance/SM
+complaisant/Y
+complected
+complementariness/M
+complementarity
+complementary/SP
+complementation/M
+complementer/M
+complement/ZSMRDG
+complete/BTYVNGPRSDX
+completed/U
+completely/I
+completeness/ISM
+completer/M
+completion/MI
+complexional
+complexion/DMS
+complexity/MS
+complexness/M
+complex/TGPRSDY
+compliance/SM
+compliant/Y
+complicatedness/M
+complicated/YP
+complicate/SDG
+complication/M
+complicator/SM
+complicit
+complicity/MS
+complier/M
+complimentary/U
+complimenter/M
+compliment/ZSMRDG
+comply/ZXRSDNG
+component/SM
+comport/GLSD
+comportment/SM
+compose/CGASDE
+composedness/M
+composed/PY
+composer/CM
+composers
+composite/YSDXNG
+compositional/Y
+composition/CMA
+compositions/C
+compositor/MS
+compost/DMGS
+composure/ESM
+compote/MS
+compounded/U
+compounder/M
+compound/RDMBGS
+comprehend/DGS
+comprehending/U
+comprehensibility/SIM
+comprehensibleness/IM
+comprehensible/PI
+comprehensibly/I
+comprehension/IMS
+comprehensiveness/SM
+comprehensive/YPS
+compressed/Y
+compressibility/IM
+compressible/I
+compressional
+compression/CSM
+compressive/Y
+compressor/MS
+compress/SDUGC
+comprise/GSD
+compromiser/M
+compromise/SRDGMZ
+compromising/UY
+Compton/M
+comptroller/SM
+compulsion/SM
+compulsiveness/MS
+compulsive/PYS
+compulsivity
+compulsorily
+compulsory/S
+compunction/MS
+Compuserve/M
+CompuServe/M
+computability/M
+computable/UI
+computably
+computational/Y
+computation/SM
+computed/A
+computerese
+computerization/MS
+computerize/SDG
+computer/M
+compute/RSDZBG
+computes/A
+computing/A
+comradely/P
+comradeship/MS
+comrade/YMS
+Comte/M
+Conakry/M
+Conan/M
+Conant/M
+concatenate/XSDG
+concaveness/MS
+concave/YP
+conceal/BSZGRDL
+concealed/U
+concealer/M
+concealing/Y
+concealment/MS
+conceded/Y
+conceitedness/SM
+conceited/YP
+conceit/SGDM
+conceivable/IU
+conceivably/I
+conceive/BGRSD
+conceiver/M
+concentrate/VNGSDX
+concentration/M
+concentrator/MS
+concentrically
+Concepción/M
+conceptional
+conception/MS
+concept/SVM
+conceptuality/M
+conceptualization/A
+conceptualizations
+conceptualization's
+conceptualize/DRSG
+conceptualizing/A
+conceptual/Y
+concerned/YU
+concern/USGD
+concerted/PY
+concert/EDSG
+concertina/MDGS
+concertize/GDS
+concertmaster/MS
+concerto/SM
+concert's
+concessionaire/SM
+concessional
+concessionary
+concession/R
+Concetta/M
+Concettina/M
+Conchita/M
+conch/MDG
+conchs
+concierge/SM
+conciliar
+conciliate/GNVX
+conciliation/ASM
+conciliator/MS
+conciliatory/A
+conciseness/SM
+concise/TYRNPX
+concision/M
+conclave/S
+concluder/M
+conclude/RSDG
+conclusion/SM
+conclusive/IPY
+conclusiveness/ISM
+concocter/M
+concoction/SM
+concoct/RDVGS
+concomitant/YS
+concordance/MS
+concordant/Y
+concordat/SM
+Concorde/M
+Concordia/M
+Concord/MS
+concourse
+concreteness/MS
+concrete/NGXRSDPYM
+concretion/M
+concubinage/SM
+concubine/SM
+concupiscence/SM
+concupiscent
+concurrence/MS
+concur/S
+concussion/MS
+concuss/VD
+condemnate/XN
+condemnation/M
+condemnatory
+condemner/M
+condemn/ZSGRDB
+condensate/NMXS
+condensation/M
+condenser/M
+condense/ZGSD
+condensible
+condescend
+condescending/Y
+condescension/MS
+condign
+condiment/SM
+condition/AGSJD
+conditionals
+conditional/UY
+conditioned/U
+conditioner/MS
+conditioning/M
+condition's
+condole
+condolence/MS
+condominium/MS
+condom/SM
+condone/GRSD
+condoner/M
+Condorcet/M
+condor/MS
+condo/SM
+conduce/VGSD
+conduciveness/M
+conducive/P
+conductance/SM
+conductibility/SM
+conductible
+conduction/MS
+conductive/Y
+conductivity/MS
+conductor/MS
+conductress/MS
+conduct/V
+conduit/MS
+coneflower/M
+Conestoga
+coney's
+confabbed
+confabbing
+confab/MS
+confabulate/XSDGN
+confabulation/M
+confectioner/M
+confectionery/SM
+confectionist
+confection/RDMGZS
+confect/S
+Confederacy/M
+confederacy/MS
+confederate/M
+Confederate/S
+conferee/MS
+conference/DSGM
+conferrable
+conferral/SM
+conferred
+conferrer/SM
+conferring
+confer/SB
+confessed/Y
+confessional/SY
+confession/MS
+confessor/SM
+confetti/M
+confidante/SM
+confidant/SM
+confidence/SM
+confidentiality/MS
+confidentialness/M
+confidential/PY
+confident/Y
+confider/M
+confide/ZGRSD
+confiding/PY
+configuration/ASM
+configure/AGSDB
+confined/U
+confine/L
+confinement/MS
+confiner/M
+confirm/AGDS
+confirmation/ASM
+confirmatory
+confirmedness/M
+confirmed/YP
+confiscate/DSGNX
+confiscation/M
+confiscator/MS
+confiscatory
+conflagration/MS
+conflate/NGSDX
+conflation/M
+conflicting/Y
+conflict/SVGDM
+confluence/MS
+conformable/U
+conformal
+conformance/SM
+conformational/Y
+conform/B
+conformer/M
+conformism/SM
+conformist/SM
+conformities
+conformity/MUI
+confounded/Y
+confound/R
+confrère/MS
+confrontational
+confrontation/SM
+confronter/M
+confront/Z
+Confucianism/SM
+Confucian/S
+Confucius/M
+confusedness/M
+confused/PY
+confuse/RBZ
+confusing/Y
+confutation/MS
+confute/GRSD
+confuter/M
+conga/MDG
+congeal/GSDL
+congealment/MS
+congeniality/UM
+congenial/U
+congeries/M
+conger/SM
+congestion/MS
+congest/VGSD
+conglomerate/XDSNGVM
+conglomeration/M
+Cong/M
+Congolese
+Congo/M
+congrats
+congratulate/NGXSD
+congratulation/M
+congratulatory
+congregate/DSXGN
+congregational
+Congregational
+congregationalism/MS
+congregationalist/MS
+Congregationalist/S
+congregation/M
+congressional/Y
+congressman/M
+congressmen
+Congress/MS
+congress/MSDG
+congresspeople
+congressperson/S
+congresswoman/M
+congresswomen
+Congreve/M
+congruence/IM
+congruences
+congruency/M
+congruential
+congruent/YI
+congruity/MSI
+congruousness/IM
+congruous/YIP
+conicalness/M
+conical/PSY
+conic/S
+conics/M
+conifer/MS
+coniferous
+conjectural/Y
+conjecture/GMDRS
+conjecturer/M
+conjoint
+conjugacy
+conjugal/Y
+conjugate/XVNGYSDP
+conjugation/M
+conjunct/DSV
+conjunctiva/MS
+conjunctive/YS
+conjunctivitis/SM
+conjuration/MS
+conjurer/M
+conjure/RSDZG
+conjuring/M
+conker/M
+conk/ZDR
+Conley/M
+Con/M
+conman
+connect/ADGES
+connectedly/E
+connectedness/ME
+connected/U
+connectible
+Connecticut/M
+connection/AME
+connectionless
+connections/E
+connective/SYM
+connectivity/MS
+connector/MS
+Connelly/M
+Conner/M
+Connery/M
+connexion/MS
+Conney/M
+conn/GVDR
+Connie/M
+Conni/M
+conniption/MS
+connivance/MS
+conniver/M
+connive/ZGRSD
+connoisseur/MS
+Connor/SM
+connotative/Y
+Conn/RM
+connubial/Y
+Conny/M
+conquerable/U
+conquered/AU
+conqueror/MS
+conquer/RDSBZG
+conquers/A
+conquest/ASM
+conquistador/MS
+Conrade/M
+Conrad/M
+Conrado/M
+Conrail/M
+Conroy/M
+Consalve/M
+consanguineous/Y
+consanguinity/SM
+conscienceless
+conscientiousness/MS
+conscientious/YP
+conscionable/U
+consciousness/MUS
+conscious/UYSP
+conscription/SM
+consecrated/AU
+consecrates/A
+consecrate/XDSNGV
+consecrating/A
+consecration/AMS
+consecutiveness/M
+consecutive/YP
+consensus/SM
+consenter/M
+consenting/Y
+consent/SZGRD
+consequence
+consequentiality/S
+consequential/IY
+consequentialness/M
+consequently/I
+consequent/PSY
+conservancy/SM
+conservationism
+conservationist/SM
+conservation/SM
+conservatism/SM
+conservativeness/M
+Conservative/S
+conservative/SYP
+conservator/MS
+conservatory/MS
+con/SGM
+considerable/I
+considerables
+considerably/I
+considerateness/MSI
+considerate/XIPNY
+consideration/ASMI
+considered/U
+considerer/M
+consider/GASD
+considering/S
+consign/ASGD
+consignee/SM
+consignment/SM
+consist/DSG
+consistence/S
+consistency/IMS
+consistent/IY
+consistory/MS
+consolable/I
+Consolata/M
+consolation/MS
+consolation's/E
+consolatory
+consoled/U
+consoler/M
+console/ZBG
+consolidated/AU
+consolidate/NGDSX
+consolidates/A
+consolidation/M
+consolidator/SM
+consoling/Y
+consommé/S
+consonance/IM
+consonances
+consonantal
+consonant/MYS
+consortia
+consortium/M
+conspectus/MS
+conspicuousness/IMS
+conspicuous/YIP
+conspiracy/MS
+conspiratorial/Y
+conspirator/SM
+constable
+Constable/M
+constabulary/MS
+constance
+Constance/M
+Constancia/M
+constancy/IMS
+Constancy/M
+Constanta/M
+Constantia/M
+Constantina/M
+Constantine/M
+Constantin/M
+Constantino/M
+Constantinople/M
+constant/IY
+constants
+constellation/SM
+consternate/XNGSD
+consternation/M
+constipate/XDSNG
+constipation/M
+constituency/MS
+constituent/SYM
+constituted/A
+constitute/NGVXDS
+constitutes/A
+constituting/A
+Constitution
+constitutionality's
+constitutionality/US
+constitutionally/U
+constitutional/SY
+constitution/AMS
+constitutive/Y
+constrain
+constrainedly
+constrained/U
+constraint/MS
+constriction/MS
+constrictor/MS
+constrict/SDGV
+construable
+construct/ASDGV
+constructibility
+constructible/A
+constructional/Y
+constructionist/MS
+construction/MAS
+constructions/C
+constructiveness/SM
+constructive/YP
+constructor/MS
+construe/GSD
+Consuela/M
+Consuelo/M
+consular/S
+consulate/MS
+consul/KMS
+consulship/MS
+consultancy/S
+consultant/MS
+consultation/SM
+consultative
+consulted/A
+consulter/M
+consult/RDVGS
+consumable/S
+consumed/Y
+consume/JZGSDB
+consumerism/MS
+consumerist/S
+consumer/M
+consuming/Y
+consummate/DSGVY
+consummated/U
+consumption/SM
+consumptive/YS
+cont
+contact/BGD
+contacted/A
+contact's/A
+contacts/A
+contagion/SM
+contagiousness/MS
+contagious/YP
+containerization/SM
+containerize/GSD
+container/M
+containment/SM
+contain/SLZGBRD
+contaminant/SM
+contaminated/AU
+contaminates/A
+contaminate/SDCXNG
+contaminating/A
+contamination/CM
+contaminative
+contaminator/MS
+contd
+cont'd
+contemn/SGD
+contemplate/DVNGX
+contemplation/M
+contemplativeness/M
+contemplative/PSY
+contemporaneity/MS
+contemporaneousness/M
+contemporaneous/PY
+contemptibleness/M
+contemptible/P
+contemptibly
+contempt/M
+contemptuousness/SM
+contemptuous/PY
+contentedly/E
+contentedness/SM
+contented/YP
+content/EMDLSG
+contention/MS
+contentiousness/SM
+contentious/PY
+contently
+contentment/ES
+contentment's
+conterminous/Y
+contestable/I
+contestant/SM
+contested/U
+contextualize/GDS
+contiguity/MS
+contiguousness/M
+contiguous/YP
+continence/ISM
+Continental/S
+continental/SY
+continent/IY
+Continent/M
+continents
+continent's
+contingency/SM
+contingent/SMY
+continua
+continuable
+continual/Y
+continuance/ESM
+continuant/M
+continuation/ESM
+continue/ESDG
+continuer/M
+continuity/SEM
+continuousness/M
+continuous/YE
+continuum/M
+contortionist/SM
+contortion/MS
+contort/VGD
+contour
+contraband/SM
+contrabass/M
+contraception/SM
+contraceptive/S
+contract/DG
+contractible
+contractile
+contractual/Y
+contradict/GDS
+contradiction/MS
+contradictorily
+contradictoriness/M
+contradictory/PS
+contradistinction/MS
+contraflow/S
+contrail/M
+contraindicate/SDVNGX
+contraindication/M
+contralto/SM
+contrapositive/S
+contraption/MS
+contrapuntal/Y
+contrariety/MS
+contrarily
+contrariness/MS
+contrariwise
+contrary/PS
+contra/S
+contrasting/Y
+contrastive/Y
+contrast/SRDVGZ
+contravene/GSRD
+contravener/M
+contravention/MS
+Contreras/M
+contretemps/M
+contribute/XVNZRD
+contribution/M
+contributive/Y
+contributorily
+contributor/SM
+contributory/S
+contriteness/M
+contrite/NXP
+contrition/M
+contrivance/SM
+contriver/M
+contrive/ZGRSD
+control/CS
+controllability/M
+controllable/IU
+controllably/U
+controlled/CU
+controller/SM
+controlling/C
+control's
+controversialists
+controversial/UY
+controversy/MS
+controvert/DGS
+controvertible/I
+contumacious/Y
+contumacy/MS
+contumelious
+contumely/MS
+contuse/NGXSD
+contusion/M
+conundrum/SM
+conurbation/MS
+convalesce/GDS
+convalescence/SM
+convalescent/S
+convect/DSVG
+convectional
+convection/MS
+convector
+convene/ASDG
+convener/MS
+convenience/ISM
+convenient/IY
+conventicle/SM
+conventionalism/M
+conventionalist/M
+conventionality/SUM
+conventionalize/GDS
+conventional/UY
+convention/MA
+conventions
+convergence/MS
+convergent
+conversant/Y
+conversationalist/SM
+conversational/Y
+conversation/SM
+conversazione/M
+converse/Y
+conversion/AM
+conversioning
+converted/U
+converter/MS
+convert/GADS
+convertibility's/I
+convertibility/SM
+convertibleness/M
+convertible/PS
+convexity/MS
+convex/Y
+conveyance/DRSGMZ
+conveyancer/M
+conveyancing/M
+convey/BDGS
+conveyor/MS
+conviction/MS
+convict/SVGD
+convinced/U
+convincer/M
+convince/RSDZG
+convincingness/M
+convincing/PUY
+conviviality/MS
+convivial/Y
+convoke/GSD
+convolute/XDNY
+convolution/M
+convolve/C
+convolved
+convolves
+convolving
+convoy/GMDS
+convulse/SDXVNG
+convulsion/M
+convulsiveness/M
+convulsive/YP
+Conway/M
+cony/SM
+coo/GSD
+cookbook/SM
+cooked/AU
+Cooke/M
+cooker/M
+cookery/MS
+cook/GZDRMJS
+Cookie/M
+cookie/SM
+cooking/M
+Cook/M
+cookout/SM
+cooks/A
+cookware/SM
+cooky's
+coolant/SM
+cooled/U
+cooler/M
+Cooley/M
+coolheaded
+Coolidge/M
+coolie/MS
+coolness/MS
+cool/YDRPJGZTS
+coon/MS!
+coonskin/MS
+cooperage/MS
+cooperate/VNGXSD
+cooperation/M
+cooperativeness/SM
+cooperative/PSY
+cooperator/MS
+cooper/GDM
+Cooper/M
+coop/MDRGZS
+Coop/MR
+coordinated/U
+coordinateness/M
+coordinate/XNGVYPDS
+coordination/M
+coordinator/MS
+Coors/M
+cootie/SM
+coot/MS
+copay/S
+Copeland/M
+Copenhagen/M
+coper/M
+Copernican
+Copernicus/M
+cope/S
+copied/A
+copier/M
+copies/A
+copilot/SM
+coping/M
+copiousness/SM
+copious/YP
+coplanar
+Copland/M
+Copley/M
+copolymer/MS
+copora
+copped
+Copperfield/M
+copperhead/MS
+copper/MSGD
+copperplate/MS
+coppersmith/M
+coppersmiths
+coppery
+coppice's
+copping
+Coppola/M
+copra/MS
+coprolite/M
+coprophagous
+copse/M
+cops/GDS
+cop/SJMDRG
+copter/SM
+Coptic/M
+copula/MS
+copulate/XDSNGV
+copulation/M
+copulative/S
+copybook/MS
+copycat/SM
+copycatted
+copycatting
+copyist/SM
+copy/MZBDSRG
+copyrighter/M
+copyright/MSRDGZ
+copywriter/MS
+coquetry/MS
+coquette/DSMG
+coquettish/Y
+Corabella/M
+Corabelle/M
+Corabel/M
+coracle/SM
+Coralie/M
+Coraline/M
+coralline
+Coral/M
+coral/SM
+Coralyn/M
+Cora/M
+corbel/GMDJS
+Corbet/M
+Corbett/M
+Corbie/M
+Corbin/M
+Corby/M
+cordage/MS
+corded/AE
+Cordelia/M
+Cordelie/M
+Cordell/M
+corder/AM
+Cordey/M
+cord/FSAEM
+cordiality/MS
+cordialness/M
+cordial/PYS
+Cordie/M
+cordillera/MS
+Cordilleras
+Cordi/M
+cording/MA
+cordite/MS
+cordless
+Cord/M
+Cordoba
+cordon/DMSG
+cordovan/SM
+Cordula/M
+corduroy/GDMS
+Cordy/M
+cored/A
+Coreen/M
+Corella/M
+core/MZGDRS
+Corenda/M
+Corene/M
+corer/M
+corespondent/MS
+Coretta/M
+Corette/M
+Corey/M
+Corfu/M
+corgi/MS
+coriander/SM
+Corie/M
+Corilla/M
+Cori/M
+Corina/M
+Corine/M
+coring/M
+Corinna/M
+Corinne/M
+Corinthian/S
+Corinthians/M
+Corinth/M
+Coriolanus/M
+Coriolis/M
+Corissa/M
+Coriss/M
+corked/U
+corker/M
+cork/GZDRMS
+Cork/M
+corkscrew/DMGS
+corks/U
+Corliss/M
+Corly/M
+Cormack/M
+corm/MS
+cormorant/MS
+Cornall/M
+cornball/SM
+cornbread/S
+corncob/SM
+corncrake/M
+corneal
+cornea/SM
+Corneille/M
+Cornela/M
+Cornelia/M
+Cornelius/M
+Cornelle/M
+Cornell/M
+corner/GDM
+cornerstone/MS
+cornet/SM
+Corney/M
+cornfield/SM
+cornflake/S
+cornflour/M
+cornflower/SM
+corn/GZDRMS
+cornice/GSDM
+Cornie/M
+cornily
+corniness/S
+Cornish/S
+cornmeal/S
+cornrow/GDS
+cornstalk/MS
+cornstarch/SM
+cornucopia/MS
+Cornwallis/M
+Cornwall/M
+Corny/M
+corny/RPT
+corolla/MS
+corollary/SM
+Coronado/M
+coronal/MS
+coronary/S
+corona/SM
+coronate/NX
+coronation/M
+coroner/MS
+coronet/DMS
+Corot/M
+coroutine/SM
+Corp
+corporal/SYM
+corpora/MS
+corporate/INVXS
+corporately
+corporation/MI
+corporatism/M
+corporatist
+corporeality/MS
+corporeal/IY
+corporealness/M
+corp/S
+corpse/M
+corpsman/M
+corpsmen
+corps/SM
+corpulence/MS
+corpulentness/S
+corpulent/YP
+corpuscle/SM
+corpuscular
+corpus/M
+corr
+corralled
+corralling
+corral/MS
+correctable/U
+correct/BPSDRYTGV
+corrected/U
+correctional
+correction/MS
+corrective/YPS
+correctly/I
+correctness/MSI
+corrector/MS
+Correggio/M
+correlated/U
+correlate/SDXVNG
+correlation/M
+correlative/YS
+Correna/M
+correspond/DSG
+correspondence/MS
+correspondent/SM
+corresponding/Y
+Correy/M
+Corrianne/M
+corridor/SM
+Corrie/M
+corrigenda
+corrigendum/M
+corrigible/I
+Corri/M
+Corrina/M
+Corrine/M
+Corrinne/M
+corroborated/U
+corroborate/GNVXDS
+corroboration/M
+corroborative/Y
+corroborator/MS
+corroboratory
+corrode/SDG
+corrodible
+corrosion/SM
+corrosiveness/M
+corrosive/YPS
+corrugate/NGXSD
+corrugation/M
+corrupt/DRYPTSGV
+corrupted/U
+corrupter/M
+corruptibility/SMI
+corruptible/I
+corruption/IM
+corruptions
+corruptive/Y
+corruptness/MS
+Corry/M
+corsage/MS
+corsair/SM
+corset/GMDS
+Corsica/M
+Corsican/S
+cortège/MS
+Cortes/S
+cortex/M
+Cortez's
+cortical/Y
+cortices
+corticosteroid/SM
+Cortie/M
+cortisone/SM
+Cortland/M
+Cort/M
+Cortney/M
+Corty/M
+corundum/MS
+coruscate/XSDGN
+coruscation/M
+Corvallis/M
+corvette/MS
+Corvus/M
+Cory/M
+Cos
+Cosby/M
+Cosetta/M
+Cosette/M
+cos/GDS
+cosignatory/MS
+cosign/SRDZG
+cosily
+Cosimo/M
+cosine/MS
+cosiness/MS
+Cosme/M
+cosmetically
+cosmetician/MS
+cosmetic/SM
+cosmetologist/MS
+cosmetology/MS
+cosmic
+cosmical/Y
+cosmogonist/MS
+cosmogony/SM
+cosmological/Y
+cosmologist/MS
+cosmology/SM
+Cosmo/M
+cosmonaut/MS
+cosmopolitanism/MS
+cosmopolitan/SM
+cosmos/SM
+cosponsor/DSG
+cossack/S
+Cossack/SM
+cosset/GDS
+Costa/M
+Costanza/M
+costarred
+costarring
+costar/S
+Costello/M
+costiveness/M
+costive/PY
+costless
+costliness/SM
+costly/RTP
+cost/MYGVJS
+Costner/M
+costumer/M
+costume/ZMGSRD
+cotangent/SM
+Cote/M
+cote/MS
+coterie/MS
+coterminous/Y
+cotillion/SM
+Cotonou/M
+Cotopaxi/M
+cot/SGMD
+cottager/M
+cottage/ZMGSRD
+cottar's
+cotted
+cotter/SDM
+cotton/GSDM
+Cotton/M
+cottonmouth/M
+cottonmouths
+cottonseed/MS
+cottontail/SM
+cottonwood/SM
+cottony
+cotyledon/MS
+couching/M
+couch/MSDG
+cougar/MS
+cougher/M
+cough/RDG
+coughs
+couldn't
+could/T
+could've
+coulée/MS
+Coulomb/M
+coulomb/SM
+councilman/M
+councilmen
+councilor/MS
+councilperson/S
+council/SM
+councilwoman/M
+councilwomen
+counsel/GSDM
+counsellings
+counselor/MS
+countability/E
+countable/U
+countably/U
+countdown/SM
+counted/U
+count/EGARDS
+countenance/EGDS
+countenancer/M
+countenance's
+counteract/DSVG
+counteraction/SM
+counterargument/SM
+counterattack/DRMGS
+counterbalance/MSDG
+counterclaim/GSDM
+counterclockwise
+counterculture/MS
+countercyclical
+counterespionage/MS
+counterexample/S
+counterfeiter/M
+counterfeit/ZSGRD
+counterflow
+counterfoil/MS
+counterforce/M
+counter/GSMD
+counterinsurgency/MS
+counterintelligence/MS
+counterintuitive
+countermand/DSG
+counterman/M
+countermeasure/SM
+countermen
+counteroffensive/SM
+counteroffer/SM
+counterpane/SM
+counterpart/SM
+counterpoint/GSDM
+counterpoise/GMSD
+counterproductive
+counterproposal/M
+counterrevolutionary/MS
+counterrevolution/MS
+counter's/E
+counters/E
+countersignature/MS
+countersign/SDG
+countersink/SG
+counterspy/MS
+counterstrike
+countersunk
+countertenor/SM
+countervail/DSG
+counterweight/GMDS
+countess/MS
+countless/Y
+countrify/D
+countryman/M
+countrymen
+country/MS
+countryside/MS
+countrywide
+countrywoman/M
+countrywomen
+county/SM
+coup/ASDG
+coupe/MS
+Couperin/M
+couple/ACU
+coupled/CU
+coupler/C
+couplers
+coupler's
+couple's
+couples/CU
+couplet/SM
+coupling's/C
+coupling/SM
+coupon/SM
+coup's
+courage/MS
+courageously
+courageousness/MS
+courageous/U
+courages/E
+Courbet/M
+courgette/MS
+courier/GMDS
+course/EGSRDM
+courser's/E
+courser/SM
+course's/AF
+courses/FA
+coursework
+coursing/M
+Courtenay/M
+courteousness/EM
+courteousnesses
+courteous/PEY
+courtesan/MS
+courtesied
+courtesy/ESM
+courtesying
+court/GZMYRDS
+courthouse/MS
+courtier/SM
+courtliness/MS
+courtly/RTP
+Court/M
+Courtnay/M
+Courtney/M
+courtroom/MS
+courtship/SM
+courtyard/SM
+couscous/MS
+cousinly/U
+cousin/YMS
+Cousteau/M
+couture/SM
+couturier/SM
+covalent/Y
+covariance/SM
+covariant/S
+covariate/SN
+covary
+cove/DRSMZG
+covenanted/U
+covenanter/M
+covenant/SGRDM
+coven/SM
+Covent/M
+Coventry/MS
+coverable/E
+cover/AEGUDS
+coverage/MS
+coverall/DMS
+coverer/AME
+covering/MS
+coverlet/MS
+coversheet
+covers/M
+covertness/SM
+covert/YPS
+coveter/M
+coveting/Y
+covetousness/SM
+covetous/PY
+covet/SGRD
+covey/SM
+covington
+cowardice/MS
+cowardliness/MS
+cowardly/P
+Coward/M
+coward/MYS
+cowbell/MS
+cowbird/MS
+cowboy/MS
+cowcatcher/SM
+cowed/Y
+cowering/Y
+cower/RDGZ
+cowgirl/MS
+cowhand/S
+cowherd/SM
+cowhide/MGSD
+Cowley/M
+cowlick/MS
+cowling/M
+cowl/SGMD
+cowman/M
+cow/MDRSZG
+cowmen
+coworker/MS
+Cowper/M
+cowpoke/MS
+cowpony
+cowpox/MS
+cowpuncher/M
+cowpunch/RZ
+cowrie/SM
+cowshed/SM
+cowslip/MS
+coxcomb/MS
+Cox/M
+cox/MDSG
+coxswain/GSMD
+coy/CDSG
+coyer
+coyest
+coyly
+Coy/M
+coyness/MS
+coyote/SM
+coypu/SM
+cozenage/MS
+cozen/SGD
+cozily
+coziness/MS
+Cozmo/M
+Cozumel/M
+cozy/DSRTPG
+CPA
+cpd
+CPI
+cpl
+Cpl
+CPO
+CPR
+cps
+CPU/SM
+crabapple
+crabbedness/M
+crabbed/YP
+Crabbe/M
+crabber/MS
+crabbily
+crabbiness/S
+crabbing/M
+crabby/PRT
+crabgrass/S
+crablike
+crab/MS
+crackable/U
+crackdown/MS
+crackerjack/S
+cracker/M
+crackle/GJDS
+crackling/M
+crackly/RT
+crackpot/SM
+crackup/S
+crack/ZSBYRDG
+cradler/M
+cradle/SRDGM
+cradling/M
+craftily
+craftiness/SM
+Craft/M
+craft/MRDSG
+craftsman/M
+craftsmanship/SM
+craftsmen
+craftspeople
+craftspersons
+craftswoman
+craftswomen
+crafty/TRP
+Craggie/M
+cragginess/SM
+Craggy/M
+craggy/RTP
+crag/SM
+Craig/M
+Cramer/M
+crammed
+crammer/M
+cramming
+cramper/M
+cramp/MRDGS
+crampon/SM
+cram/S
+Cranach/M
+cranberry/SM
+Crandall/M
+crane/DSGM
+cranelike
+Crane/M
+Cranford/M
+cranial
+cranium/MS
+crankcase/MS
+crankily
+crankiness/MS
+crank/SGTRDM
+crankshaft/MS
+cranky/TRP
+Cranmer/M
+cranny/DSGM
+Cranston/M
+crape/SM
+crapped
+crappie/M
+crapping
+crappy/RST
+crapshooter/SM
+crap/SMDG!
+crasher/M
+crashing/Y
+crash/SRDGZ
+crassness/MS
+crass/TYRP
+crate/DSRGMZ
+crater/DMG
+Crater/M
+cravat/SM
+cravatted
+cravatting
+crave/DSRGJ
+cravenness/SM
+craven/SPYDG
+craver/M
+craving/M
+crawdad/S
+crawfish's
+Crawford/M
+crawler/M
+crawl/RDSGZ
+crawlspace/S
+crawlway
+crawly/TRS
+craw/SYM
+crayfish/GSDM
+Crayola/M
+crayon/GSDM
+Cray/SM
+craze/GMDS
+crazily
+craziness/MS
+crazy/SRTP
+creakily
+creakiness/SM
+creak/SDG
+creaky/PTR
+creamer/M
+creamery/MS
+creamily
+creaminess/SM
+cream/SMRDGZ
+creamy/TRP
+creased/CU
+crease/IDRSG
+crease's
+creases/C
+creasing/C
+created/U
+create/XKVNGADS
+creationism/MS
+creationist/MS
+Creation/M
+creation/MAK
+creativeness/SM
+creative/YP
+creativities
+creativity/K
+creativity's
+Creator/M
+creator/MS
+creatureliness/M
+creaturely/P
+creature/YMS
+crèche/SM
+credence/MS
+credent
+credential/SGMD
+credenza/SM
+credibility/IMS
+credible/I
+credibly/I
+creditability/M
+creditableness/M
+creditable/P
+creditably/E
+credited/U
+credit/EGBSD
+creditor/MS
+credit's
+creditworthiness
+credo/SM
+credulity/ISM
+credulous/IY
+credulousness/SM
+creedal
+creed/C
+creeds
+creed's
+creekside
+creek/SM
+Creek/SM
+creel/SMDG
+Cree/MDS
+creeper/M
+creepily
+creepiness/SM
+creep/SGZR
+creepy/PRST
+Creigh/M
+Creight/M
+Creighton/M
+cremate/XDSNG
+cremation/M
+crematoria
+crematorium/MS
+crematory/S
+creme/S
+crenelate/XGNSD
+crenelation/M
+Creole/MS
+creole/SM
+Creon/M
+creosote/MGDS
+crepe/DSGM
+crept
+crescendoed
+crescendoing
+crescendo/SCM
+crescent/MS
+cress/S
+crestfallenness/M
+crestfallen/PY
+cresting/M
+crestless
+crest/SGMD
+Crestview/M
+cretaceous
+Cretaceously/M
+Cretaceous/Y
+Cretan/S
+Crete/M
+cretinism/MS
+cretin/MS
+cretinous
+cretonne/SM
+crevasse/DSMG
+crevice/SM
+crew/DMGS
+crewel/SM
+crewelwork/SM
+crewman/M
+crewmen
+cribbage/SM
+cribbed
+cribber/SM
+cribbing/M
+crib/SM
+Crichton/M
+cricketer/M
+cricket/SMZRDG
+crick/GDSM
+Crick/M
+cried/C
+crier/CM
+cries/C
+Crimea/M
+Crimean
+crime/GMDS
+criminality/MS
+criminalization/C
+criminalize/GC
+criminal/SYM
+criminologist/SM
+criminology/MS
+crimper/M
+crimp/RDGS
+crimson/DMSG
+cringer/M
+cringe/SRDG
+crinkle/DSG
+crinkly/TRS
+Crin/M
+crinoline/SM
+cripple/GMZDRS
+crippler/M
+crippling/Y
+Crisco/M
+crises
+crisis/M
+Cris/M
+crisper/M
+crispiness/SM
+crispness/MS
+crisp/PGTYRDS
+crispy/RPT
+criss
+crisscross/GDS
+Crissie/M
+Crissy/M
+Cristabel/M
+Cristal/M
+Crista/M
+Cristen/M
+Cristian/M
+Cristiano/M
+Cristie/M
+Cristi/M
+Cristina/M
+Cristine/M
+Cristin/M
+Cristionna/M
+Cristobal/M
+Cristy/M
+criteria
+criterion/M
+criticality
+critically/U
+criticalness/M
+critical/YP
+criticism/MS
+criticized/U
+criticize/GSRDZ
+criticizer/M
+criticizes/A
+criticizingly/S
+criticizing/UY
+critic/MS
+critique/MGSD
+critter/SM
+Cr/M
+croaker/M
+croak/SRDGZ
+croaky/RT
+Croatia/M
+Croatian/S
+Croat/SM
+Croce/M
+crocheter/M
+crochet/RDSZJG
+crockery/SM
+Crockett/M
+Crockpot/M
+crock/SGRDM
+crocodile/MS
+crocus/SM
+Croesus/SM
+crofter/M
+croft/MRGZS
+croissant/MS
+Croix/M
+Cromwellian
+Cromwell/M
+crone/SM
+Cronin/M
+Cronkite/M
+Cronus/M
+crony/SM
+crookedness/SM
+crooked/TPRY
+Crookes/M
+crookneck/MS
+crook/SGDM
+crooner/M
+croon/SRDGZ
+cropland/MS
+crop/MS
+cropped
+cropper/SM
+cropping
+croquet/MDSG
+croquette/SM
+Crosby/M
+crosier/SM
+crossarm
+crossbarred
+crossbarring
+crossbar/SM
+crossbeam/MS
+crossbones
+crossbowman/M
+crossbowmen
+crossbow/SM
+crossbred/S
+crossbreed/SG
+crosscheck/SGD
+crosscurrent/SM
+crosscut/SM
+crosscutting
+crossed/UA
+crosses/UA
+crossfire/SM
+crosshatch/GDS
+crossing/M
+Cross/M
+crossness/MS
+crossover/MS
+crosspatch/MS
+crosspiece/SM
+crosspoint
+crossproduct/S
+crossroad/GSM
+crossroads/M
+crosstalk/M
+crosstown
+crosswalk/MS
+crossway/M
+crosswind/SM
+crosswise
+crossword/MS
+cross/ZTYSRDMPBJG
+crotchetiness/M
+crotchet/MS
+crotchety/P
+crotchless
+crotch/MDS
+crouch/DSG
+croupier/M
+croup/SMDG
+croupy/TZR
+croûton/MS
+crowbait
+crowbarred
+crowbarring
+crowbar/SM
+crowdedness/M
+crowded/P
+crowd/MRDSG
+crowfeet
+crowfoot/M
+crow/GDMS
+Crowley/M
+crowned/U
+crowner/M
+crown/RDMSJG
+crozier's
+CRT/S
+crucial/Y
+crucible/MS
+crucifiable
+crucifixion/MS
+Crucifixion/MS
+crucifix/SM
+cruciform/S
+crucify/NGDS
+crudded
+crudding
+cruddy/TR
+crudeness/MS
+crude/YSP
+crudités
+crudity/MS
+crud/STMR
+cruelness/MS
+cruelty/SM
+cruel/YRTSP
+cruet/MS
+cruft
+crufty
+Cruikshank/M
+cruise/GZSRD
+cruiser/M
+cruller/SM
+crumb/GSYDM
+crumble/DSJG
+crumbliness/MS
+crumbly/PTRS
+crumby/RT
+crumminess/S
+crummy/SRTP
+crump
+crumpet/SM
+crumple/DSG
+crunch/DSRGZ
+crunchiness/MS
+crunchy/TRP
+crupper/MS
+crusade/GDSRMZ
+crusader/M
+cruse/MS
+crushable/U
+crusher/M
+crushing/Y
+crushproof
+crush/SRDBGZ
+Crusoe/M
+crustacean/MS
+crustal
+crust/GMDS
+crustily
+crustiness/SM
+crusty/SRTP
+crutch/MDSG
+Crux/M
+crux/MS
+Cruz/M
+crybaby/MS
+cry/JGDRSZ
+cryogenic/S
+cryogenics/M
+cryostat/M
+cryosurgery/SM
+cryptanalysis/M
+cryptanalyst/M
+cryptanalytic
+crypt/CS
+cryptic
+cryptically
+cryptogram/MS
+cryptographer/MS
+cryptographic
+cryptographically
+cryptography/MS
+cryptologic
+cryptological
+cryptologist/M
+cryptology/M
+Cryptozoic/M
+crypt's
+crystalline/S
+crystallite/SM
+crystallization/AMS
+crystallized/UA
+crystallizes/A
+crystallize/SRDZG
+crystallizing/A
+crystallographer/MS
+crystallographic
+crystallography/M
+Crystal/M
+crystal/SM
+Crysta/M
+Crystie/M
+Cs
+C's
+cs/EA
+cs's
+CST
+ct
+CT
+Cthrine/M
+Ct/M
+ctn
+ctr
+Cuba/M
+Cuban/S
+cubbed
+cubbing
+cubbyhole/MS
+cuber/M
+cube/SM
+cubical/Y
+cubicle/SM
+cubic/YS
+cubism/SM
+cubist/MS
+cubit/MS
+cub/MDRSZG
+cuboid
+Cuchulain/M
+cuckold/GSDM
+cuckoldry/MS
+cuckoo/SGDM
+cucumber/MS
+cuddle/GSD
+cuddly/TRP
+cu/DG
+cudgel/GSJMD
+cud/MS
+cue/MS
+cuff/GSDM
+Cuisinart/M
+cuisine/MS
+Culbertson/M
+culinary
+Cullan/M
+cull/DRGS
+cullender's
+Cullen/M
+culler/M
+Culley/M
+Cullie/M
+Cullin/M
+Cull/MN
+Cully/M
+culminate/XSDGN
+culmination/M
+culotte/S
+culpability/MS
+culpable/I
+culpableness/M
+culpably
+culpa/SM
+culprit/SM
+cultism/SM
+cultist/SM
+cultivable
+cultivated/U
+cultivate/XBSDGN
+cultivation/M
+cultivator/SM
+cult/MS
+cultural/Y
+cultured/U
+culture/SDGM
+Culver/MS
+culvert/SM
+Cu/M
+cumber/DSG
+Cumberland/M
+cumbersomeness/MS
+cumbersome/YP
+cumbrous
+cumin/MS
+cummerbund/MS
+Cummings
+cumquat's
+cum/S
+cumulate/XVNGSD
+cumulation/M
+cumulative/Y
+cumuli
+cumulonimbi
+cumulonimbus/M
+cumulus/M
+Cunard/M
+cuneiform/S
+cunnilingus/SM
+Cunningham/M
+cunningness/M
+cunning/RYSPT
+cunt/SM!
+cupboard/SM
+cupcake/SM
+Cupertino/M
+cupful/SM
+cupidinously
+cupidity/MS
+Cupid/M
+cupid/S
+cup/MS
+cupola/MDGS
+cupped
+cupping/M
+cupric
+cuprous
+curability/MS
+curable/IP
+curableness/MI
+curably/I
+Curacao/M
+curacy/SM
+curare/MS
+curate/VGMSD
+curative/YS
+curatorial
+curator/KMS
+curbing/M
+curbside
+curb/SJDMG
+curbstone/MS
+Curcio/M
+curdle/SDG
+curd/SMDG
+cured/U
+cure/KBDRSGZ
+curer/MK
+curettage/SM
+curfew/SM
+curfs
+curiae
+curia/M
+cur/IBS
+Curie/M
+curie/SM
+curiosity/SM
+curio/SM
+curiousness/SM
+curious/TPRY
+Curitiba/M
+curium/MS
+curler/SM
+curlew/MS
+curlicue/MGDS
+curliness/SM
+curling/M
+curl/UDSG
+curlycue's
+curly/PRT
+curmudgeon/MYS
+Curran/M
+currant/SM
+curred/AFI
+currency's
+currency/SF
+current/FSY
+currently/A
+currentness/M
+Currey/M
+curricle/M
+curricula
+curricular
+curriculum/M
+Currie/M
+currier/M
+Currier/M
+curring/FAI
+Curr/M
+currycomb/DMGS
+Curry/MR
+curry/RSDMG
+cur's
+curs/ASDVG
+curse/A
+cursedness/M
+cursed/YRPT
+curse's
+cursive/EPYA
+cursiveness/EM
+cursives
+cursor/DMSG
+cursorily
+cursoriness/SM
+cursory/P
+curtailer/M
+curtail/LSGDR
+curtailment/SM
+curtain/GSMD
+Curtice/M
+Curtis/M
+Curt/M
+curtness/MS
+curtsey's
+curtsy/SDMG
+curt/TYRP
+curvaceousness/S
+curvaceous/YP
+curvature/MS
+curved/A
+curved's
+curve/DSGM
+curvilinearity/M
+curvilinear/Y
+curving/M
+curvy/RT
+cushion/SMDG
+Cushman/M
+cushy/TR
+cuspid/MS
+cuspidor/MS
+cusp/MS
+cussedness/M
+cussed/YP
+cuss/EGDSR
+cusses/F
+cussing/F
+cuss's
+custard/MS
+Custer/M
+custodial
+custodianship/MS
+custodian/SM
+custody/MS
+customarily
+customariness/M
+customary/PS
+customer/M
+customhouse/S
+customization/SM
+customize/ZGBSRD
+custom/SMRZ
+cutaneous/Y
+cutaway/SM
+cutback/SM
+cuteness/MS
+cute/SPY
+cutesy/RT
+cuticle/SM
+cutlass/MS
+cutler/SM
+cutlery/MS
+cutlet/SM
+cut/MRST
+cutoff/MS
+cutout/SM
+cutter/SM
+cutthroat/SM
+cutting/MYS
+cuttlebone/SM
+cuttlefish/MS
+cuttle/M
+cutup/MS
+cutworm/MS
+Cuvier/M
+Cuzco/M
+CV
+cw
+cwt
+Cyanamid/M
+cyanate/M
+cyanic
+cyanide/GMSD
+cyan/MS
+cyanogen/M
+Cybele/M
+cybernetic/S
+cybernetics/M
+cyberpunk/S
+cyberspace/S
+Cybill/M
+Cybil/M
+Cyb/M
+cyborg/S
+Cyclades
+cyclamen/MS
+cycle/ASDG
+cycler
+cycle's
+cycleway/S
+cyclic
+cyclical/SY
+cycling/M
+cyclist/MS
+cyclohexanol
+cycloidal
+cycloid/SM
+cyclometer/MS
+cyclone/SM
+cyclonic
+cyclopean
+cyclopedia/MS
+cyclopes
+Cyclopes
+cyclops
+Cyclops/M
+cyclotron/MS
+cyder/SM
+cygnet/MS
+Cygnus/M
+cylinder/GMDS
+cylindric
+cylindrical/Y
+Cy/M
+cymbalist/MS
+cymbal/SM
+Cymbre/M
+Cynde/M
+Cyndia/M
+Cyndie/M
+Cyndi/M
+Cyndy/M
+cynical/UY
+cynicism/MS
+cynic/MS
+cynosure/SM
+Cynthea/M
+Cynthia/M
+Cynthie/M
+Cynthy/M
+cypher/MGSD
+cypreses
+cypress/SM
+Cyprian
+Cypriot/SM
+Cyprus/M
+Cyrano/M
+Cyrille/M
+Cyrillic
+Cyrill/M
+Cyrillus/M
+Cyril/M
+Cyrus/M
+cystic
+cyst/MS
+cytochemistry/M
+cytochrome/M
+cytologist/MS
+cytology/MS
+cytolysis/M
+cytoplasmic
+cytoplasm/SM
+cytosine/MS
+cytotoxic
+CZ
+czarevitch/M
+czarina/SM
+czarism/M
+czarist/S
+czarship
+czar/SM
+Czech
+Czechoslovakia/M
+Czechoslovakian/S
+Czechoslovak/S
+Czechs
+Czerniak/M
+Czerny/M
+D
+DA
+dabbed
+dabber/MS
+dabbing
+dabbler/M
+dabble/RSDZG
+dab/S
+Dacca's
+dace/MS
+Dacey/M
+dacha/SM
+Dachau/M
+dachshund/SM
+Dacia/M
+Dacie/M
+Dacron/MS
+dactylic/S
+dactyl/MS
+Dacy/M
+Dadaism/M
+dadaism/S
+Dadaist/M
+dadaist/S
+Dada/M
+daddy/SM
+Dade/M
+dado/DMG
+dadoes
+dad/SM
+Daedalus/M
+Dael/M
+daemonic
+daemon/SM
+Daffie/M
+Daffi/M
+daffiness/S
+daffodil/MS
+Daffy/M
+daffy/PTR
+daftness/MS
+daft/TYRP
+DAG
+dagger/DMSG
+Dag/M
+Dagmar/M
+Dagny/M
+Daguerre/M
+daguerreotype/MGDS
+Dagwood/M
+Dahlia/M
+dahlia/MS
+Dahl/M
+Dahomey/M
+Daile/M
+dailiness/MS
+daily/PS
+Daimler/M
+daintily
+daintiness/MS
+dainty/TPRS
+daiquiri/SM
+dairying/M
+dairyland
+dairymaid/SM
+dairyman/M
+dairymen
+dairy/MJGS
+dairywoman/M
+dairywomen
+Daisey/M
+Daisie/M
+Daisi/M
+dais/SM
+Daisy/M
+daisy/SM
+Dakar/M
+Dakotan
+Dakota/SM
+Dale/M
+Dalenna/M
+dale/SMH
+daleth/M
+Daley/M
+Dalhousie/M
+Dalia/M
+Dalian/M
+Dalila/M
+Dali/SM
+Dallas/M
+dalliance/SM
+dallier/M
+Dalli/MS
+Dall/M
+Dallon/M
+dally/ZRSDG
+Dal/M
+Dalmatia/M
+dalmatian/S
+Dalmatian/SM
+Daloris/M
+Dalston/M
+Dalt/M
+Dalton/M
+Daly/M
+damageable
+damaged/U
+damage/MZGRSD
+damager/M
+damaging/Y
+Damara/M
+Damaris/M
+Damascus/M
+damask/DMGS
+dame/SM
+Dame/SMN
+Damian/M
+Damiano/M
+Damien/M
+Damion/M
+Damita/M
+dam/MDS
+dammed
+damming
+dammit/S
+damnably
+damnation/MS
+damnedest/MS
+damned/TR
+damn/GSBRD
+damning/Y
+Damocles/M
+Damon/M
+damped/U
+dampener/M
+dampen/RDZG
+damper/M
+dampness/MS
+damp/SGZTXYRDNP
+damselfly/MS
+damsel/MS
+damson/MS
+Danaë
+Dana/M
+Danbury/M
+dancelike
+dancer/M
+dance/SRDJGZ
+dandelion/MS
+dander/DMGS
+dandify/SDG
+dandily
+dandle/GSD
+dandruff/MS
+dandy/TRSM
+Danelaw/M
+Danella/M
+Danell/M
+Dane/SM
+Danette/M
+danger/DMG
+Dangerfield/M
+dangerousness/M
+dangerous/YP
+dangler/M
+dangle/ZGRSD
+dangling/Y
+dang/SGZRD
+Danial/M
+Dania/M
+Danica/M
+Danice/M
+Daniela/M
+Daniele/M
+Daniella/M
+Danielle/M
+Daniel/SM
+Danielson/M
+Danie/M
+Danika/M
+Danila/M
+Dani/M
+Danish
+danish/S
+Danita/M
+Danit/M
+dankness/MS
+dank/TPYR
+Danna/M
+Dannel/M
+Dannie/M
+Danni/M
+Dannye/M
+Danny/M
+danseuse/SM
+Dan/SM
+Dante/M
+Danton/M
+Danube/M
+Danubian
+Danville/M
+Danya/M
+Danyelle/M
+Danyette/M
+Danzig/M
+Daphene/M
+Daphna/M
+Daphne/M
+dapperness/M
+dapper/PSTRY
+dapple/SDG
+Dara/M
+Darbee/M
+Darbie/M
+Darb/M
+Darby/M
+Darcee/M
+Darcey/M
+Darcie/M
+Darci/M
+D'Arcy
+Darcy/M
+Darda/M
+Dardanelles
+daredevil/MS
+daredevilry/S
+Dareen/M
+Darelle/M
+Darell/M
+Dare/M
+Daren/M
+darer/M
+daresay
+dare/ZGDRSJ
+d'Arezzo
+Daria/M
+Darice/M
+Darill/M
+Dari/M
+daringness/M
+daring/PY
+Darin/M
+Dario/M
+Darius/M
+Darjeeling/M
+darkener/M
+darken/RDZG
+dark/GTXYRDNSP
+darkish
+darkly/TR
+darkness/MS
+darkroom/SM
+Darla/M
+Darleen/M
+Darlene/M
+Darline/M
+Darling/M
+darlingness/M
+Darlington/M
+darling/YMSP
+Darlleen/M
+Dar/MNH
+Darnall/M
+darned/TR
+Darnell/M
+darner/M
+darn/GRDZS
+darning/M
+Darn/M
+Daron/M
+DARPA/M
+Darrelle/M
+Darrell/M
+Darrel/M
+Darren/M
+Darrick/M
+Darrin/M
+Darrow/M
+Darryl/M
+Darsey/M
+Darsie/M
+d'art
+dartboard/SM
+darter/M
+Darth/M
+Dartmouth/M
+dart/MRDGZS
+Darvon/M
+Darwinian/S
+Darwinism/MS
+Darwinist/MS
+Darwin/M
+Darya/M
+Daryle/M
+Daryl/M
+Daryn/M
+Dasha/M
+dashboard/SM
+dasher/M
+dash/GZSRD
+dashiki/SM
+dashing/Y
+Dasie/M
+Dasi/M
+dastardliness/SM
+dastardly/P
+dastard/MYS
+Dasya/M
+DAT
+database/DSMG
+datafile
+datagram/MS
+data/M
+Datamation/M
+Datamedia/M
+dataset/S
+datedly
+datedness
+date/DRSMZGV
+dated/U
+dateless
+dateline/DSMG
+dater/M
+Datha/M
+dative/S
+Datsun/M
+datum/MS
+dauber/M
+daub/RDSGZ
+Daugherty/M
+daughter/MYS
+Daumier/M
+Daune/M
+daunt/DSG
+daunted/U
+daunting/Y
+dauntlessness/SM
+dauntless/PY
+dauphin/SM
+Davao/M
+Daveen/M
+Dave/M
+Daven/M
+Davenport/M
+davenport/MS
+Daveta/M
+Davey/M
+Davida/M
+Davidde/M
+Davide/M
+David/SM
+Davidson/M
+Davie/M
+Davina/M
+Davine/M
+Davinich/M
+Davin/M
+Davis/M
+Davita/M
+davit/SM
+Dav/MN
+Davon/M
+Davy/SM
+dawdler/M
+dawdle/ZGRSD
+Dawes/M
+Dawna/M
+dawn/GSDM
+Dawn/M
+Dawson/M
+daybed/S
+daybreak/SM
+daycare/S
+daydreamer/M
+daydream/RDMSZG
+Dayle/M
+daylight/GSDM
+Day/M
+Dayna/M
+daysack
+day/SM
+daytime/SM
+Dayton/M
+dazed/PY
+daze/DSG
+dazzler/M
+dazzle/ZGJRSD
+dazzling/Y
+db
+DB
+dbl
+dB/M
+DBMS
+DC
+DD
+Ddene/M
+DDS
+DDT
+DE
+deacon/DSMG
+deaconess/MS
+deadbeat/SM
+deadbolt/S
+deadener/M
+deadening/MY
+deaden/RDG
+deadhead/MS
+deadline/MGDS
+deadliness/SM
+deadlock/MGDS
+deadly/RPT
+deadness/M
+deadpanned
+deadpanner
+deadpanning
+deadpan/S
+dead/PTXYRN
+deadwood/SM
+deafening/MY
+deafen/JGD
+deafness/MS
+deaf/TXPYRN
+dealer/M
+dealership/MS
+dealing/M
+deallocator
+deal/RSGZJ
+dealt
+Deana/M
+dean/DMG
+Deandre/M
+Deane/M
+deanery/MS
+Dean/M
+Deanna/M
+Deanne/M
+Deann/M
+deanship/SM
+Dearborn/M
+dearness/MS
+dearth/M
+dearths
+dear/TYRHPS
+deary/MS
+deassign
+deathbed/MS
+deathblow/SM
+deathless/Y
+deathlike
+deathly/TR
+death/MY
+deaths
+deathtrap/SM
+deathward
+deathwatch/MS
+debacle/SM
+debarkation/SM
+debark/G
+debar/L
+debarment/SM
+debarring
+debaser/M
+debatable/U
+debate/BMZ
+debater/M
+debauchedness/M
+debauched/PY
+debauchee/SM
+debaucher/M
+debauchery/SM
+debauch/GDRS
+Debbie/M
+Debbi/M
+Debby/M
+Debee/M
+debenture/MS
+Debera/M
+debilitate/NGXSD
+debilitation/M
+debility/MS
+Debi/M
+debit/DG
+deb/MS
+Deb/MS
+debonairness/SM
+debonair/PY
+Deborah/M
+Debora/M
+Debor/M
+debouch/DSG
+Debra/M
+debrief/GJ
+debris/M
+debtor/SM
+debt/SM
+Debussy/M
+débutante/SM
+debut/MDG
+decade/MS
+decadency/S
+decadent/YS
+decaffeinate/DSG
+decaf/S
+decagon/MS
+Decalogue/M
+decal/SM
+decamp/L
+decampment/MS
+decapitate/GSD
+decapitator/SM
+decathlon/SM
+Decatur/M
+decay/GRD
+Decca/M
+Deccan/M
+decease/M
+decedent/MS
+deceitfulness/SM
+deceitful/PY
+deceit/SM
+deceived/U
+deceiver/M
+deceives/U
+deceive/ZGRSD
+deceivingly
+deceiving/U
+decelerate/XNGSD
+deceleration/M
+decelerator/SM
+December/SM
+decency/ISM
+decennial/SY
+decent/TIYR
+deception/SM
+deceptiveness/SM
+deceptive/YP
+decertify/N
+dechlorinate/N
+decibel/MS
+decidability/U
+decidable/U
+decidedness/M
+decided/PY
+decide/GRSDB
+deciduousness/M
+deciduous/YP
+decile/SM
+deciliter/SM
+decimal/SYM
+decimate/XNGDS
+decimation/M
+decimeter/MS
+decipherable/IU
+decipher/BRZG
+decipherer/M
+decisional
+decisioned
+decisioning
+decision/ISM
+decisive/IPY
+decisiveness/MSI
+deckchair
+decker/M
+Decker/M
+deck/GRDMSJ
+deckhand/S
+decking/M
+Deck/RM
+declamation/SM
+declamatory
+declarable
+declaration/MS
+declaration's/A
+declarative/SY
+declarator/MS
+declaratory
+declare/AGSD
+declared/U
+declarer/MS
+declension/SM
+declination/MS
+decliner/M
+decline/ZGRSD
+declivity/SM
+Dec/M
+DEC/M
+DECNET
+DECnet/M
+deco
+décolletage/S
+décolleté
+decolletes
+decolorising
+decomposability/M
+decomposable/IU
+decompose/B
+decompress/R
+decongestant/S
+deconstruction
+deconvolution
+decorated/AU
+decorate/NGVDSX
+decorates/A
+decorating/A
+decoration/ASM
+decorativeness/M
+decorative/YP
+decorator/SM
+decorousness/MS
+decorousness's/I
+decorous/PIY
+decor/S
+decorticate/GNDS
+decortication/M
+decorum/MS
+decoupage/MGSD
+decouple/G
+decoy/M
+decrease
+decreasing/Y
+decreeing
+decree/RSM
+decremental
+decrement/DMGS
+decrepit
+decrepitude/SM
+decriminalization/S
+decriminalize/DS
+decry/G
+decrypt/GD
+decryption
+DECstation/M
+DECsystem/M
+DECtape/M
+decustomised
+Dedekind/M
+Dede/M
+dedicate/AGDS
+dedicated/Y
+dedication/MS
+dedicative
+dedicator/MS
+dedicatory
+Dedie/M
+Dedra/M
+deduce/RSDG
+deducible
+deductibility/M
+deductible/S
+deduction/SM
+deductive/Y
+deduct/VG
+Deeanne/M
+Deeann/M
+deeded
+Deedee/M
+deeding
+deed/IS
+deed's
+deejay/MDSG
+Dee/M
+deem/ADGS
+deemphasis
+Deena/M
+deepen/DG
+deepish
+deepness/MS
+deep/PTXSYRN
+Deerdre/M
+Deere/M
+deerskin/MS
+deer/SM
+deerstalker/SM
+deerstalking/M
+Deeyn/M
+deface/LZ
+defacement/SM
+defaecate
+defalcate/NGXSD
+defalcation/M
+defamation/SM
+defamatory
+defamer/M
+defame/ZR
+defaulter/M
+default/ZR
+defeated/U
+defeater/M
+defeatism/SM
+defeatist/SM
+defeat/ZGD
+defecate/DSNGX
+defecation/M
+defection/SM
+defectiveness/MS
+defective/PYS
+defect/MDSVG
+defector/MS
+defendant/SM
+defended/U
+defenestrate/GSD
+defenselessness/MS
+defenseless/PY
+defenses/U
+defense/VGSDM
+defensibility/M
+defensible/I
+defensibly/I
+defensiveness/MS
+defensive/PSY
+deference/MS
+deferential/Y
+deferent/S
+deferrable
+deferral/SM
+deferred
+deferrer/MS
+deferring
+deffer
+defiance/MS
+defiant/Y
+defibrillator/M
+deficiency/MS
+deficient/SY
+deficit/MS
+defier/M
+defile/L
+defilement/MS
+definable/UI
+definably/I
+define/AGDRS
+defined/U
+definer/SM
+definite/IPY
+definiteness/IMS
+definitional
+definition/ASM
+definitiveness/M
+definitive/SYP
+defis
+deflate/XNGRSDB
+deflationary
+deflation/M
+deflect/DSGV
+deflected/U
+deflection/MS
+deflector/MS
+defocus
+defocussing
+Defoe/M
+defog
+defogger/S
+defoliant/SM
+defoliator/SM
+deformational
+deform/B
+deformed/U
+deformity/SM
+defrauder/M
+defraud/ZGDR
+defrayal/SM
+defroster/M
+defrost/RZ
+deftness/MS
+deft/TYRP
+defunct/S
+defying/Y
+defy/RDG
+def/Z
+deg
+Degas/M
+degassing
+degauss/GD
+degeneracy/MS
+degenerateness/M
+degenerate/PY
+degrade/B
+degradedness/M
+degraded/YP
+degrading/Y
+degrease
+degree/SM
+degum
+Dehlia/M
+dehumanize
+dehydrator/MS
+deicer/M
+deice/ZR
+deictic
+Deidre/M
+deification/M
+deify/SDXGN
+deign/DGS
+Deimos/M
+Deina/M
+Deirdre/MS
+deistic
+deist/SM
+Deity/M
+deity/SM
+deja
+deject/DSG
+dejectedness/M
+dejected/PY
+dejection/SM
+Dejesus/M
+DeKalb/M
+DeKastere/M
+Delacroix/M
+Delacruz/M
+Delainey/M
+Dela/M
+Delaney/M
+Delano/M
+Delawarean/SM
+Delaware/MS
+delay/D
+delayer/G
+Delbert/M
+Delcina/M
+Delcine/M
+delectableness/M
+delectable/SP
+delectably
+delectation/MS
+delegable
+Deleon/M
+deleted/U
+deleteriousness/M
+deleterious/PY
+delete/XBRSDNG
+deletion/M
+delfs
+Delft/M
+delft/MS
+delftware/S
+Delgado/M
+Delhi/M
+Delia/M
+deliberateness/SM
+deliberate/PVY
+deliberativeness/M
+deliberative/PY
+Delibes/M
+delicacy/IMS
+delicate/IYP
+delicatenesses
+delicateness/IM
+delicates
+delicatessen/MS
+deliciousness/MS
+delicious/YSP
+delicti
+delightedness/M
+delighted/YP
+delightfulness/M
+delightful/YP
+Delilah/M
+Delilahs
+Delila/M
+Delinda/M
+delineate/SDXVNG
+delineation/M
+delinquency/MS
+delinquent/SYM
+deliquesce/GSD
+deliquescent
+deliriousness/MS
+delirious/PY
+delirium/SM
+deli/SM
+Delius/M
+deliverables
+deliverable/U
+deliver/AGSD
+deliverance/SM
+delivered/U
+deliverer/SM
+delivery/AM
+deliverymen/M
+Della/M
+Dell/M
+dell/SM
+Dellwood/M
+Delly/M
+Delmar/M
+Delmarva/M
+Delmer/M
+Delmonico
+Delmore/M
+Delmor/M
+Del/MY
+Delora/M
+Delores/M
+Deloria/M
+Deloris/M
+Delphic
+Delphi/M
+Delphine/M
+Delphinia/M
+delphinium/SM
+Delphinus/M
+Delta/M
+delta/MS
+deltoid/SM
+deluder/M
+delude/RSDG
+deluding/Y
+deluge/SDG
+delusional
+delusion/SM
+delusiveness/M
+delusive/PY
+deluxe
+delve/GZSRD
+delver/M
+demagnify/N
+demagogic
+demagogue/GSDM
+demagoguery/SM
+demagogy/MS
+demander/M
+demand/GSRD
+demandingly
+demanding/U
+demarcate/SDNGX
+demarcation/M
+Demavend/M
+demean/GDS
+demeanor/SM
+dementedness/M
+demented/YP
+dementia/MS
+Demerol/M
+demesne/SM
+Demeter/M
+Demetra/M
+Demetre/M
+Demetria/M
+Demetri/MS
+Demetrius/M
+demigod/MS
+demijohn/MS
+demimondaine/SM
+demimonde/SM
+demineralization/SM
+Deming/M
+demise/DMG
+demit
+demitasse/MS
+demitted
+demitting
+Dem/MG
+democracy/MS
+Democratic
+democratically/U
+democratic/U
+democratization/MS
+democratize/DRSG
+democratizes/U
+Democrat/MS
+democrat/SM
+Democritus/M
+démodé
+demo/DMPG
+demographer/MS
+demographical/Y
+demographic/S
+demography/MS
+demolisher/M
+demolish/GSRD
+demolition/MS
+demonetization/S
+demoniacal/Y
+demoniac/S
+demonic
+demonology/M
+demon/SM
+demonstrable/I
+demonstrableness/M
+demonstrably/I
+demonstrate/XDSNGV
+demonstration/M
+demonstrativenesses
+demonstrativeness/UM
+demonstratives
+demonstrative/YUP
+demonstrator/MS
+demoralization/M
+demoralizer/M
+demoralizing/Y
+DeMorgan/M
+Demosthenes/M
+demote/DGX
+demotic/S
+Demott/M
+demount/B
+Dempsey/M
+demulcent/S
+demultiplex
+demureness/SM
+demure/YP
+demurral/MS
+demurred
+demurrer/MS
+demurring
+demur/RTS
+demythologization/M
+demythologize/R
+den
+Dena/M
+dendrite/MS
+Deneb/M
+Denebola/M
+Deneen/M
+Dene/M
+Deng/M
+dengue/MS
+deniable/U
+denial/SM
+Denice/M
+denier/M
+denigrate/VNGXSD
+denigration/M
+denim/SM
+Denise/M
+Deni/SM
+denizen/SMDG
+Den/M
+De/NM
+Denmark/M
+Denna/M
+denned
+Dennet/M
+Denney/M
+Dennie/M
+Denni/MS
+denning
+Dennison/M
+Denny/M
+denominate/V
+denominational/Y
+denote/B
+denouement/MS
+denounce/LZRSDG
+denouncement/SM
+denouncer/M
+dense/FR
+densely
+denseness/SM
+densitometer/MS
+densitometric
+densitometry/M
+density/MS
+dens/RT
+dental/YS
+dentifrice/SM
+dentine's
+dentin/SM
+dent/ISGD
+dentistry/MS
+dentist/SM
+dentition/MS
+dent's
+denture/IMS
+denuclearize/GSD
+denudation/SM
+denude/DG
+denuder/M
+denunciate/VNGSDX
+denunciation/M
+Denver/M
+denying/Y
+Deny/M
+Denys
+Denyse/M
+deny/SRDZG
+deodorant/SM
+deodorization/SM
+deodorize/GZSRD
+deodorizer/M
+Deon/M
+Deonne/M
+deoxyribonucleic
+depart/L
+departmentalization/SM
+departmentalize/DSG
+departmental/Y
+department/MS
+departure/MS
+dependability/MS
+dependableness/M
+dependable/P
+dependably
+Dependant/MS
+depend/B
+dependence/ISM
+dependency/MS
+dependent/IYS
+dependent's
+depicted/U
+depicter/M
+depiction/SM
+depict/RDSG
+depilatory/S
+deplete/VGNSDX
+depletion/M
+deplorableness/M
+deplorable/P
+deplorably
+deplorer/M
+deplore/SRDBG
+deploring/Y
+deployable
+deploy/AGDLS
+deployment/SAM
+depolarize
+deponent/S
+deportation/MS
+deportee/SM
+deport/LG
+deportment/MS
+depose
+deposit/ADGS
+depositary/M
+deposition/A
+depositor/SAM
+depository/MS
+depravedness/M
+depraved/PY
+deprave/GSRD
+depraver/M
+depravity/SM
+deprecate/XSDNG
+deprecating/Y
+deprecation/M
+deprecatory
+depreciable
+depreciate/XDSNGV
+depreciating/Y
+depreciation/M
+depreciative/Y
+depressant/S
+depressible
+depression/MS
+depressive/YS
+depressor/MS
+depress/V
+deprive/GSD
+depth/M
+depths
+Dept/M
+deputation/SM
+depute/SDG
+deputize/DSG
+deputy/MS
+dequeue
+derail/L
+dérailleur/MS
+derailment/MS
+derange/L
+derangement/MS
+Derbyshire/M
+derby/SM
+Derby/SM
+dereference/Z
+Derek/M
+dereliction/SM
+derelict/S
+Derick/M
+deride/D
+deriding/Y
+derision/SM
+derisiveness/MS
+derisive/PY
+derisory
+derivable/U
+derivate/XNV
+derivation/M
+derivativeness/M
+derivative/SPYM
+derive/B
+derived/U
+Derk/M
+Der/M
+dermal
+dermatitides
+dermatitis/MS
+dermatological
+dermatologist/MS
+dermatology/MS
+dermis/SM
+Dermot/M
+derogate/XDSNGV
+derogation/M
+derogatorily
+derogatory
+Derrek/M
+Derrick/M
+derrick/SMDG
+Derrida/M
+derrière/S
+Derrik/M
+Derril/M
+derringer/SM
+Derron/M
+Derry/M
+dervish/SM
+Derward/M
+Derwin/M
+Des
+desalinate/NGSDX
+desalination/M
+desalinization/MS
+desalinize/GSD
+desalt/G
+descant/M
+Descartes/M
+descendant/SM
+descended/FU
+descendent's
+descender/M
+descending/F
+descends/F
+descend/ZGSDR
+descent
+describable/I
+describe/ZB
+description/MS
+descriptiveness/MS
+descriptive/SYP
+descriptor/SM
+descry/SDG
+Desdemona/M
+desecrater/M
+desecrate/SRDGNX
+desecration/M
+deserter/M
+desertification
+desertion/MS
+desert/ZGMRDS
+deservedness/M
+deserved/YU
+deserve/J
+deserving/Y
+déshabillé's
+desiccant/S
+desiccate/XNGSD
+desiccation/M
+desiccator/SM
+desiderata
+desideratum/M
+designable
+design/ADGS
+designate/VNGSDX
+designational
+designation/M
+designator/SM
+designed/Y
+designer/M
+designing/U
+Desi/M
+desirabilia
+desirability's
+desirability/US
+desirableness/SM
+desirableness's/U
+desirable/UPS
+desirably/U
+Desirae/M
+desire/BR
+desired/U
+Desiree/M
+desirer/M
+Desiri/M
+desirousness/M
+desirous/PY
+desist/DSG
+desk/SM
+desktop/S
+Desmond/M
+Desmund/M
+desolateness/SM
+desolate/PXDRSYNG
+desolater/M
+desolating/Y
+desolation/M
+desorption/M
+despairer/M
+despairing/Y
+despair/SGDR
+desperadoes
+desperado/M
+desperateness/SM
+desperate/YNXP
+desperation/M
+despicable
+despicably
+despiser/M
+despise/SRDG
+despoil/L
+despoilment/MS
+despond
+despondence/S
+despondency/MS
+despondent/Y
+despotic
+despotically
+despotism/SM
+dessert/SM
+dessicate/DN
+d'Estaing
+destinate/NX
+destination/M
+destine/GSD
+destiny/MS
+destituteness/M
+destitute/NXP
+destitution/M
+destroy/BZGDRS
+destroyer/M
+destructibility/SMI
+destructible/I
+destruction/SM
+destructiveness/MS
+destructive/YP
+destructor/M
+destruct/VGSD
+desuetude/MS
+desultorily
+desultoriness/M
+desultory/P
+detachedness/M
+detached/YP
+detacher/M
+detach/LSRDBG
+detachment/SM
+detailedness/M
+detailed/YP
+detainee/S
+detainer/M
+detain/LGRDS
+detainment/MS
+d'etat
+detectability/U
+detectable/U
+detectably/U
+detect/DBSVG
+detected/U
+detection/SM
+detective/MS
+detector/MS
+détente
+detentes
+detention/SM
+detergency/M
+detergent/SM
+deteriorate/XDSNGV
+deterioration/M
+determent/SM
+determinability/M
+determinable/IP
+determinableness/IM
+determinacy/I
+determinant/MS
+determinateness/IM
+determinate/PYIN
+determination/IM
+determinativeness/M
+determinative/P
+determinedly
+determinedness/M
+determined/U
+determine/GASD
+determiner/SM
+determinism/MS
+determinism's/I
+deterministically
+deterministic/I
+deterred/U
+deterrence/SM
+deterrent/SMY
+deterring
+detersive/S
+deter/SL
+deters/V
+detestableness/M
+detestable/P
+detestably
+detestation/SM
+dethrone/L
+dethronement/SM
+detonable
+detonated/U
+detonate/XDSNGV
+detonation/M
+detonator/MS
+detour/G
+detoxification/M
+detoxify/NXGSD
+detox/SDG
+detract/GVD
+detractive/Y
+d'etre
+detribalize/GSD
+detrimental/SY
+detriment/SM
+detritus/M
+Detroit/M
+deuced/Y
+deuce/SDGM
+deus
+deuterium/MS
+deuteron/M
+Deuteronomy/M
+Deutsch/M
+Deva/M
+Devanagari/M
+Devan/M
+devastate/XVNGSD
+devastating/Y
+devastation/M
+devastator/SM
+develop/ALZSGDR
+developed/U
+developer/MA
+developmental/Y
+development/ASM
+deviance/MS
+deviancy/S
+deviant/YMS
+deviated/U
+deviate/XSDGN
+deviating/U
+deviation/M
+devilishness/MS
+devilish/PY
+devilment/SM
+devilry/MS
+devil/SLMDG
+deviltry/MS
+Devi/M
+Devina/M
+Devin/M
+Devinne/M
+deviousness/SM
+devious/YP
+devise/JR
+deviser/M
+Devland/M
+Devlen/M
+Devlin/M
+Dev/M
+devoice
+devolution/MS
+devolve/GSD
+Devondra/M
+Devonian
+Devon/M
+Devonna/M
+Devonne/M
+Devonshire/M
+Devora/M
+devoted/Y
+devotee/MS
+devote/XN
+devotional/YS
+devotion/M
+devourer/M
+devour/SRDZG
+devoutness/MS
+devout/PRYT
+Devy/M
+Dewain/M
+dewar
+Dewar/M
+Dewayne/M
+dewberry/MS
+dewclaw/SM
+dewdrop/MS
+Dewey/M
+Dewie/M
+dewiness/MS
+Dewitt/M
+dewlap/MS
+Dew/M
+dew/MDGS
+dewy/TPR
+Dexedrine/M
+dexes/I
+Dex/M
+dexter
+dexterity/MS
+Dexter/M
+dexterousness/MS
+dexterous/PY
+dextrose/SM
+DH
+Dhaka
+Dhaulagiri/M
+dhoti/SM
+dhow/MS
+DI
+diabase/M
+diabetes/M
+diabetic/S
+diabolic
+diabolicalness/M
+diabolical/YP
+diabolism/M
+diachronic/P
+diacritical/YS
+diacritic/MS
+diadem/GMDS
+diaereses
+diaeresis/M
+Diaghilev/M
+diagnometer/SM
+diagnosable/U
+diagnose/BGDS
+diagnosed/U
+diagnosis/M
+diagnostically
+diagnostician/SM
+diagnostic/MS
+diagnostics/M
+diagonalize/GDSB
+diagonal/YS
+diagrammable
+diagrammatic
+diagrammaticality
+diagrammatically
+diagrammed
+diagrammer/SM
+diagramming
+diagram/MS
+Diahann/M
+dialectal/Y
+dialectical/Y
+dialectic/MS
+dialect/MS
+dialed/A
+dialer/M
+dialing/M
+dial/MRDSGZJ
+dialogged
+dialogging
+dialog/MS
+dials/A
+dialysis/M
+dialyzed/U
+dialyzes
+diam
+diamagnetic
+diameter/MS
+diametric
+diametrical/Y
+diamondback/SM
+diamond/GSMD
+Diana/M
+Diandra/M
+Diane/M
+Dianemarie/M
+Dian/M
+Dianna/M
+Dianne/M
+Diann/M
+Diannne/M
+diapason/MS
+diaper/SGDM
+diaphanousness/M
+diaphanous/YP
+diaphragmatic
+diaphragm/SM
+diarist/SM
+Diarmid/M
+diarrheal
+diarrhea/MS
+diary/MS
+diaspora
+Diaspora/SM
+diastase/SM
+diastole/MS
+diastolic
+diathermy/SM
+diathesis/M
+diatomic
+diatom/SM
+diatonic
+diatribe/MS
+Diaz's
+dibble/SDMG
+dibs
+DiCaprio/M
+dice/GDRS
+dicer/M
+dicey
+dichloride/M
+dichotomization/M
+dichotomize/DSG
+dichotomous/PY
+dichotomy/SM
+dicier
+diciest
+dicing/M
+Dickensian/S
+dickens/M
+Dickens/M
+dicker/DG
+Dickerson/M
+dickey/SM
+dick/GZXRDMS!
+Dickie/M
+dickier
+dickiest
+Dickinson/M
+Dickson/M
+Dick/XM
+Dicky/M
+dicky's
+dicotyledonous
+dicotyledon/SM
+dicta/M
+Dictaphone/SM
+dictate/SDNGX
+dictation/M
+dictatorialness/M
+dictatorial/YP
+dictator/MS
+dictatorship/SM
+dictionary/SM
+diction/MS
+dictum/M
+didactically
+didactic/S
+didactics/M
+did/AU
+diddler/M
+diddle/ZGRSD
+Diderot/M
+Didi/M
+didn't
+didoes
+dido/M
+Dido/M
+didst
+die/DS
+Diefenbaker/M
+Diego/M
+dieing
+dielectric/MS
+diem
+Diem/M
+Diena/M
+Dierdre/M
+diereses
+dieresis/M
+diesel/GMDS
+Diesel's
+dies's
+dies/U
+dietary/S
+dieter/M
+Dieter/M
+dietetic/S
+dietetics/M
+diethylaminoethyl
+diethylstilbestrol/M
+dietitian/MS
+diet/RDGZSM
+Dietrich/M
+Dietz/M
+difference/DSGM
+difference's/I
+differences/I
+differentiability
+differentiable
+differential/SMY
+differentiated/U
+differentiate/XSDNG
+differentiation/M
+differentiator/SM
+differentness
+different/YI
+differ/SZGRD
+difficile
+difficult/Y
+difficulty/SM
+diffidence/MS
+diffident/Y
+diffract/GSD
+diffraction/SM
+diffractometer/SM
+diffuseness/MS
+diffuse/PRSDZYVXNG
+diffuser/M
+diffusible
+diffusional
+diffusion/M
+diffusiveness/M
+diffusive/YP
+diffusivity/M
+digerati
+digested/IU
+digester/M
+digestibility/MS
+digestible/I
+digestifs
+digestion/ISM
+digestive/YSP
+digest/RDVGS
+digger/MS
+digging/S
+digitalis/M
+digitalization/MS
+digitalized
+digitalizes
+digitalizing
+digital/SY
+digitization/M
+digitizer/M
+digitize/ZGDRS
+digit/SM
+dignified/U
+dignify/DSG
+dignitary/SM
+dignity/ISM
+digram
+digraph/M
+digraphs
+digress/GVDS
+digression/SM
+digressiveness/M
+digressive/PY
+dig/TS
+dihedral
+Dijkstra/M
+Dijon/M
+dike/DRSMG
+diker/M
+diktat/SM
+Dilan/M
+dilapidate/XGNSD
+dilapidation/M
+dilatation/SM
+dilated/YP
+dilate/XVNGSD
+dilation/M
+dilatoriness/M
+dilator/SM
+dilatory/P
+Dilbert/M
+dilemma/MS
+dilettante/MS
+dilettantish
+dilettantism/MS
+diligence/SM
+diligentness/M
+diligent/YP
+dilithium
+Dillard/M
+Dillie/M
+Dillinger/M
+dilling/R
+dillis
+Dill/M
+Dillon/M
+dill/SGMD
+dillydally/GSD
+Dilly/M
+dilly/SM
+dilogarithm
+diluent
+diluted/U
+diluteness/M
+dilute/RSDPXYVNG
+dilution/M
+Di/M
+DiMaggio/M
+dimensionality/M
+dimensional/Y
+dimensionless
+dimension/MDGS
+dimer/M
+dime/SM
+dimethylglyoxime
+dimethyl/M
+diminished/U
+diminish/SDGBJ
+diminuendo/SM
+diminution/SM
+diminutiveness/M
+diminutive/SYP
+Dimitri/M
+Dimitry/M
+dimity/MS
+dimmed/U
+dimmer/MS
+dimmest
+dimming
+dimness/SM
+dimorphism/M
+dimple/MGSD
+dimply/RT
+dim/RYPZS
+dimwit/MS
+dimwitted
+Dinah/M
+Dina/M
+dinar/SM
+diner/M
+dine/S
+dinette/MS
+dingbat/MS
+ding/GD
+dinghy/SM
+dingily
+dinginess/SM
+dingle/MS
+dingoes
+dingo/MS
+dingus/SM
+dingy/PRST
+dinky/RST
+din/MDRZGS
+dinned
+dinner/SM
+dinnertime/S
+dinnerware/MS
+Dinnie/M
+dinning
+Dinny/M
+Dino/M
+dinosaur/MS
+dint/SGMD
+diocesan/S
+diocese/SM
+Diocletian/M
+diode/SM
+Diogenes/M
+Dione/M
+Dionisio/M
+Dionis/M
+Dion/M
+Dionne/M
+Dionysian
+Dionysus/M
+Diophantine/M
+diopter/MS
+diorama/SM
+Dior/M
+dioxalate
+dioxide/MS
+dioxin/S
+diphtheria/SM
+diphthong/SM
+diplexers
+diploid/S
+diplomacy/SM
+diploma/SMDG
+diplomata
+diplomatically
+diplomatic/S
+diplomatics/M
+diplomatist/SM
+diplomat/MS
+dipodic
+dipody/M
+dipole/MS
+dipped
+Dipper/M
+dipper/SM
+dipping/S
+dippy/TR
+dip/S
+dipsomaniac/MS
+dipsomania/SM
+dipstick/MS
+dipterous
+diptych/M
+diptychs
+Dir
+Dirac/M
+directed/IUA
+directionality
+directional/SY
+direction/MIS
+directions/A
+directive/SM
+directivity/M
+directly/I
+directness/ISM
+director/AMS
+directorate/SM
+directorial
+directorship/SM
+directory/SM
+direct/RDYPTSVG
+directrix/MS
+directs/IA
+direful/Y
+direness/M
+dire/YTRP
+dirge/GSDM
+Dirichlet/M
+dirigible/S
+dirk/GDMS
+Dirk/M
+dirndl/MS
+dirtily
+dirtiness/SM
+dirt/MS
+dirty/GPRSDT
+Dis
+disable/LZGD
+disablement/MS
+disabler/M
+disabuse
+disadvantaged/P
+disagreeable/S
+disallow/D
+disambiguate/DSGNX
+disappointed/Y
+disappointing/Y
+disarming/Y
+disarrange/L
+disastrous/Y
+disband/L
+disbandment/SM
+disbar/L
+disbarment/MS
+disbarring
+disbelieving/Y
+disbursal/S
+disburse/GDRSL
+disbursement/MS
+disburser/M
+discerner/M
+discernibility
+discernible/I
+discernibly
+discerning/Y
+discernment/MS
+discern/SDRGL
+disc/GDM
+discharged/U
+disciple/DSMG
+discipleship/SM
+disciplinarian/SM
+disciplinary
+disciplined/U
+discipline/IDM
+discipliner/M
+disciplines
+disciplining
+disclosed/U
+discography/MS
+discolored/MP
+discoloreds/U
+discolor/G
+discombobulate/SDGNX
+discomfit/DG
+discomfiture/MS
+disco/MG
+discommode/DG
+disconcerting/Y
+disconnectedness/S
+disconnected/P
+disconnecter/M
+disconnect/R
+disconsolate/YN
+discordance/SM
+discordant/Y
+discord/G
+discorporate/D
+discotheque/MS
+discount/B
+discourage/LGDR
+discouragement/MS
+discouraging/Y
+discoverable/I
+discover/ADGS
+discovered/U
+discoverer/S
+discovery/SAM
+discreetly/I
+discreetness's/I
+discreetness/SM
+discreet/TRYP
+discrepancy/SM
+discrepant/Y
+discreteness/SM
+discrete/YPNX
+discretionary
+discretion/IMS
+discretization
+discretized
+discriminable
+discriminant/MS
+discriminated/U
+discriminate/SDVNGX
+discriminating/YI
+discrimination/MI
+discriminator/MS
+discriminatory
+discursiveness/S
+discussant/MS
+discussed/UA
+discusser/M
+discussion/SM
+discus/SM
+disdainfulness/M
+disdainful/YP
+disdain/MGSD
+disease/G
+disembowelment/SM
+disembowel/SLGD
+disengage/L
+disfigure/L
+disfigurement/MS
+disfranchise/L
+disfranchisement/MS
+disgorge
+disgrace/R
+disgracer/M
+disgruntle/DSLG
+disgruntlement/MS
+disguised/UY
+disguise/R
+disguiser/M
+disgust
+disgusted/Y
+disgustful/Y
+disgusting/Y
+dishabille/SM
+disharmonious
+dishcloth/M
+dishcloths
+dishevel/LDGS
+dishevelment/MS
+dish/GD
+dishonest
+dishonored/U
+dishpan/MS
+dishrag/SM
+dishtowel/SM
+dishwasher/MS
+dishwater/SM
+disillusion/LGD
+disillusionment/SM
+disinfectant/MS
+disinherit
+disinterestedness/SM
+disinterested/P
+disinvest/L
+disjoin
+disjointedness/S
+disjunctive/YS
+disjunct/VS
+disk/D
+diskette/S
+dislike/G
+dislodge/LG
+dislodgement/M
+dismalness/M
+dismal/PSTRY
+dismantle/L
+dismantlement/SM
+dismay/D
+dismayed/U
+dismaying/Y
+dis/MB
+dismember/LG
+dismemberment/MS
+dismissive/Y
+dismiss/RZ
+Disneyland/M
+Disney/M
+disoblige/G
+disorderedness/M
+disordered/YP
+disorderliness/M
+disorderly/P
+disorder/Y
+disorganize
+disorganized/U
+disparagement/MS
+disparager/M
+disparage/RSDLG
+disparaging/Y
+disparateness/M
+disparate/PSY
+dispatch/Z
+dispelled
+dispelling
+dispel/S
+dispensable/I
+dispensary/MS
+dispensate/NX
+dispensation/M
+dispenser/M
+dispense/ZGDRSB
+dispersal/MS
+dispersant/M
+dispersed/Y
+disperser/M
+disperse/XDRSZLNGV
+dispersible
+dispersion/M
+dispersiveness/M
+dispersive/PY
+dispirit/DSG
+displace/L
+display/AGDS
+displayed/U
+displeased/Y
+displease/G
+displeasure
+disport
+disposable/S
+disposal/SM
+dispose/IGSD
+dispositional
+disposition/ISM
+disproportional
+disproportionate/N
+disproportionation/M
+disprove/B
+disputable/I
+disputably/I
+disputant/SM
+disputation/SM
+disputatious/Y
+disputed/U
+disputer/M
+dispute/ZBGSRD
+disquieting/Y
+disquiet/M
+disquisition/SM
+Disraeli/M
+disregardful
+disrepair/M
+disreputableness/M
+disreputable/P
+disrepute/M
+disrespect
+disrupted/U
+disrupter/M
+disrupt/GVDRS
+disruption/MS
+disruptive/YP
+disruptor/M
+dissatisfy
+dissect/DG
+dissed
+dissembler/M
+dissemble/ZGRSD
+disseminate/XGNSD
+dissemination/M
+dissension/SM
+dissenter/M
+dissent/ZGSDR
+dissertation/SM
+disservice
+disses
+dissever
+dissidence/SM
+dissident/MS
+dissimilar/S
+dissing
+dissipatedly
+dissipatedness/M
+dissipated/U
+dissipater/M
+dissipate/XRSDVNG
+dissipation/M
+dissociable/I
+dissociate/DSXNGV
+dissociated/U
+dissociation/M
+dissociative/Y
+dissoluble/I
+dissoluteness/SM
+dissolute/PY
+dissolve/ASDG
+dissolved/U
+dissonance/SM
+dissonant/Y
+dissuade/GDRS
+dissuader/M
+dissuasive
+dist
+distaff/SM
+distal/Y
+distance/DSMG
+distantness/M
+distant/YP
+distaste
+distemper
+distend
+distension
+distention/SM
+distillate/XNMS
+distillation/M
+distillery/MS
+distincter
+distinctest
+distinction/MS
+distinctiveness/MS
+distinctive/YP
+distinct/IYVP
+distinctness/MSI
+distinguishable/I
+distinguishably/I
+distinguish/BDRSG
+distinguished/U
+distinguisher/M
+distort/BGDR
+distorted/U
+distorter/M
+distortion/MS
+distract/DG
+distractedness/M
+distracted/YP
+distracting/Y
+distrait
+distraught/Y
+distress
+distressful
+distressing/Y
+distribute/ADXSVNGB
+distributed/U
+distributer
+distributional
+distribution/AM
+distributiveness/M
+distributive/SPY
+distributivity
+distributorship/M
+distributor/SM
+district/GSAD
+district's
+distrust/G
+disturbance/SM
+disturbed/U
+disturber/M
+disturbing/Y
+disturb/ZGDRS
+disulfide/M
+disuse/M
+disyllable/M
+Dita/M
+ditcher/M
+ditch/MRSDG
+dither/RDZSG
+ditsy/TR
+ditto/DMGS
+ditty/SDGM
+Ditzel/M
+ditz/S
+diuresis/M
+diuretic/S
+diurnal/SY
+divalent/S
+diva/MS
+divan/SM
+dived/M
+divergence/SM
+divergent/Y
+diverge/SDG
+diver/M
+diverseness/MS
+diverse/XYNP
+diversification/M
+diversifier/M
+diversify/GSRDNX
+diversionary
+diversion/M
+diversity/SM
+divert/GSD
+diverticulitis/SM
+divertimento/M
+dive/S
+divestiture/MS
+divest/LDGS
+divestment/S
+dividable
+divide/AGDS
+divided/U
+dividend/MS
+divider/MS
+divination/SM
+diviner/M
+divine/RSDTZYG
+divinity/MS
+divisibility/IMS
+divisible/I
+divisional
+division/SM
+divisiveness/MS
+divisive/PY
+divisor/SM
+divorcée/MS
+divorce/GSDLM
+divorcement/MS
+divot/MS
+div/TZGJDRS
+divulge/GSD
+divvy/GSDM
+Dixiecrat/MS
+dixieland
+Dixieland/MS
+Dixie/M
+Dix/M
+Dixon/M
+dizzily
+dizziness/SM
+dizzying/Y
+dizzy/PGRSDT
+DJ
+Djakarta's
+djellabah's
+djellaba/S
+d/JGVX
+Djibouti/M
+DMD
+Dmitri/M
+DMZ
+DNA
+Dnepropetrovsk/M
+Dnepr's
+Dnieper's
+Dniester/M
+Dniren/M
+DOA
+doable
+DOB
+Dobbin/M
+dobbin/MS
+Doberman
+Dobro/M
+docent/SM
+docile/Y
+docility/MS
+docker/M
+docket/GSMD
+dock/GZSRDM
+dockland/MS
+dockside/M
+dockworker/S
+dockyard/SM
+doc/MS
+Doctor
+doctoral
+doctorate/SM
+doctor/GSDM
+Doctorow/M
+doctrinaire/S
+doctrinal/Y
+doctrine/SM
+docudrama/S
+documentary/MS
+documentation/MS
+documented/U
+document/RDMZGS
+DOD
+dodder/DGS
+dodecahedra
+dodecahedral
+dodecahedron/M
+Dode/M
+dodge/GZSRD
+Dodge/M
+dodgem/S
+dodger/M
+Dodgson/M
+Dodie/M
+Dodi/M
+Dodington/M
+Dodoma/M
+dodo/SM
+Dodson/M
+Dody/M
+DOE
+Doe/M
+doe/MS
+doer/MU
+does/AU
+doeskin/MS
+doesn't
+d'oeuvre
+doff/SGD
+dogcart/SM
+dogcatcher/MS
+dogeared
+Doge/M
+doge/SM
+dogfight/GMS
+dogfish/SM
+dogfought
+doggedness/SM
+dogged/PY
+doggerel/SM
+dogging
+doggone/RSDTG
+doggy/SRMT
+doghouse/SM
+dogie/SM
+doglegged
+doglegging
+dogleg/SM
+dogma/MS
+dogmatically/U
+dogmatic/S
+dogmatics/M
+dogmatism/SM
+dogmatist/SM
+dogsbody/M
+dog/SM
+dogtooth/M
+Dogtown/M
+dogtrot/MS
+dogtrotted
+dogtrotting
+dogwood/SM
+dogy's
+Doha/M
+doh's
+doily/SM
+doing/MU
+Dolby/SM
+doldrum/S
+doldrums/M
+doled/F
+dolefuller
+dolefullest
+dolefulness/MS
+doleful/PY
+Dole/M
+dole/MGDS
+doles/F
+Dolf/M
+doling/F
+dollar/SM
+Dolley/M
+Dollie/M
+Dolli/M
+Doll/M
+doll/MDGS
+dollop/GSMD
+Dolly/M
+dolly/SDMG
+dolmen/MS
+dolomite/SM
+dolomitic
+Dolores/M
+Dolorita/SM
+dolorous/Y
+dolor/SM
+dolphin/SM
+Dolph/M
+doltishness/SM
+doltish/YP
+dolt/MS
+domain/MS
+dome/DSMG
+Domenic/M
+Domenico/M
+Domeniga/M
+Domesday/M
+domestically
+domesticate/DSXGN
+domesticated/U
+domestication/M
+domesticity/MS
+domestic/S
+domicile/SDMG
+domiciliary
+dominance/MS
+dominant/YS
+dominate/VNGXSD
+domination/M
+dominator/M
+dominatrices
+dominatrix
+domineer/DSG
+domineeringness/M
+domineering/YP
+Dominga/M
+Domingo/M
+Dominguez/M
+Dominica/M
+Dominican/MS
+Dominick/M
+Dominic/M
+Dominik/M
+Domini/M
+dominion/MS
+Dominique/M
+dominoes
+domino/M
+Domitian/M
+Dom/M
+Donahue/M
+Donald/M
+Donaldson/M
+Donall/M
+Donal/M
+Donalt/M
+Dona/M
+dona/MS
+Donatello/M
+donate/XVGNSD
+donation/M
+donative/M
+Donaugh/M
+Donavon/M
+done/AUF
+Donella/M
+Donelle/M
+Donetsk/M
+Donetta/M
+dong/GDMS
+dongle/S
+Donia/M
+Donica/M
+Donielle/M
+Donizetti/M
+donkey/MS
+Donna/M
+Donnamarie/M
+donned
+Donnell/M
+Donnelly/M
+Donne/M
+Donner/M
+Donnie/M
+Donni/M
+donning
+donnishness/M
+donnish/YP
+Donn/RM
+donnybrook/MS
+Donny/M
+donor/MS
+Donovan/M
+don/S
+Don/SM
+don't
+donut/MS
+donutted
+donutting
+doodad/MS
+doodlebug/MS
+doodler/M
+doodle/SRDZG
+doohickey/MS
+Dooley/M
+Doolittle/M
+doom/MDGS
+doomsday/SM
+Doonesbury/M
+doorbell/SM
+door/GDMS
+doorhandles
+doorkeeper/M
+doorkeep/RZ
+doorknob/SM
+doorman/M
+doormat/SM
+doormen
+doornail/M
+doorplate/SM
+doors/I
+doorstep/MS
+doorstepped
+doorstepping
+doorstop/MS
+doorway/MS
+dooryard/SM
+dopamine
+dopant/M
+dopa/SM
+dope/DRSMZG
+doper/M
+dopey
+dopier
+dopiest
+dopiness/S
+Doppler/M
+Dorado/M
+Doralia/M
+Doralin/M
+Doralyn/M
+Doralynne/M
+Doralynn/M
+Dora/M
+Dorcas
+Dorchester/M
+Doreen/M
+Dorelia/M
+Dorella/M
+Dorelle/M
+Doré/M
+Dorena/M
+Dorene/M
+Doretta/M
+Dorette/M
+Dorey/M
+Doria/M
+Dorian/M
+Doric
+Dorice/M
+Dorie/M
+Dori/MS
+Dorine/M
+Dorisa/M
+Dorise/M
+Dorita/M
+dork/S
+dorky/RT
+dormancy/MS
+dormant/S
+dormer/M
+dormice
+dormitory/SM
+dorm/MRZS
+dormouse/M
+Dorolice/M
+Dorolisa/M
+Doro/M
+Dorotea/M
+Doroteya/M
+Dorothea/M
+Dorothee/M
+Dorothy/M
+Dorree/M
+Dorrie/M
+Dorri/SM
+Dorry/M
+dorsal/YS
+Dorsey/M
+Dorthea/M
+Dorthy/M
+Dortmund/M
+Dory/M
+dory/SM
+DOS
+dosage/SM
+dose/M
+dos/GDS
+Dosi/M
+dosimeter/MS
+dosimetry/M
+dossier/MS
+dost
+Dostoevsky/M
+DOT
+dotage/SM
+dotard/MS
+doter/M
+dote/S
+Doti/M
+doting/Y
+Dot/M
+dot/MDRSJZG
+Dotson/M
+dotted
+Dottie/M
+Dotti/M
+dottiness/M
+dotting
+Dotty/M
+dotty/PRT
+do/TZRHGJ
+Douala/M
+Douay/M
+Doubleday/M
+doubled/UA
+double/GPSRDZ
+doubleheader/MS
+doubleness/M
+doubler/M
+doubles/M
+doublespeak/S
+doublethink/M
+doublet/MS
+doubleton/M
+doubling/A
+doubloon/MS
+doubly
+doubt/AGSDMB
+doubted/U
+doubter/SM
+doubtfulness/SM
+doubtful/YP
+doubting/Y
+doubtlessness/M
+doubtless/YP
+douche/GSDM
+Dougherty/M
+dough/M
+doughs
+doughty/RT
+doughy/RT
+Dougie/M
+Douglas/M
+Douglass
+Doug/M
+Dougy/M
+dourness/MS
+Douro/M
+dour/TYRP
+douser/M
+douse/SRDG
+dovecote/MS
+Dover/M
+dove/RSM
+dovetail/GSDM
+dovish
+Dov/MR
+dowager/SM
+dowdily
+dowdiness/MS
+dowdy/TPSR
+dowel/GMDS
+dower/GDMS
+Dow/M
+downbeat/SM
+downcast/S
+downdraft/M
+downer/M
+Downey/M
+downfall/NMS
+downgrade/GSD
+down/GZSRD
+downheartedness/MS
+downhearted/PY
+downhill/RS
+downland
+download/DGS
+downpipes
+downplay/GDS
+downpour/MS
+downrange
+downrightness/M
+downright/YP
+downriver
+Downs
+downscale/GSD
+downside/S
+downsize/DSG
+downslope
+downspout/SM
+downstage/S
+downstairs
+downstate/SR
+downstream
+downswing/MS
+downtime/SM
+downtowner/M
+downtown/MRS
+downtrend/M
+downtrodden
+downturn/MS
+downwardness/M
+downward/YPS
+downwind
+downy/RT
+dowry/SM
+dowse/GZSRD
+dowser/M
+doxology/MS
+doyenne/SM
+doyen/SM
+Doyle/M
+Doy/M
+doze
+dozen/GHD
+dozenths
+dozer/M
+doz/XGNDRS
+dozy
+DP
+DPs
+dpt
+DPT
+drabbed
+drabber
+drabbest
+drabbing
+drabness/MS
+drab/YSP
+drachma/MS
+Draco/M
+draconian
+Draconian
+Dracula/M
+draft/AMDGS
+draftee/SM
+drafter/MS
+draftily
+draftiness/SM
+drafting/S
+draftsman/M
+draftsmanship/SM
+draftsmen
+draftsperson
+draftswoman
+draftswomen
+drafty/PTR
+dragged
+dragger/M
+dragging/Y
+draggy/RT
+drag/MS
+dragnet/MS
+dragonfly/SM
+dragonhead/M
+dragon/SM
+dragoon/DMGS
+drainage/MS
+drainboard/SM
+drained/U
+drainer/M
+drainpipe/MS
+drain/SZGRDM
+Drake/M
+drake/SM
+Dramamine/MS
+drama/SM
+dramatically/U
+dramatical/Y
+dramatic/S
+dramatics/M
+dramatist/MS
+dramatization/MS
+dramatized/U
+dramatizer/M
+dramatize/SRDZG
+dramaturgy/M
+Drambuie/M
+drammed
+dramming
+dram/MS
+drank
+Drano/M
+draper/M
+drapery/MS
+drape/SRDGZ
+drastic
+drastically
+drat/S
+dratted
+dratting
+Dravidian/M
+drawable
+draw/ASG
+drawback/MS
+drawbridge/SM
+drawer/SM
+drawing/SM
+drawler/M
+drawling/Y
+drawl/RDSG
+drawly
+drawn/AI
+drawnly
+drawnness
+drawstring/MS
+dray/SMDG
+dreadfulness/SM
+dreadful/YPS
+dreadlocks
+dreadnought/SM
+dread/SRDG
+dreamboat/SM
+dreamed/U
+dreamer/M
+dreamily
+dreaminess/SM
+dreaming/Y
+dreamland/SM
+dreamlessness/M
+dreamless/PY
+dreamlike
+dream/SMRDZG
+dreamworld/S
+dreamy/PTR
+drearily
+dreariness/SM
+drear/S
+dreary/TRSP
+Dreddy/M
+dredge/MZGSRD
+dredger/M
+Dredi/M
+dreg/MS
+Dreiser/M
+Dre/M
+drencher/M
+drench/GDRS
+Dresden/M
+dress/ADRSG
+dressage/MS
+dressed/U
+dresser/MS
+dresser's/A
+dresses/U
+dressiness/SM
+dressing/MS
+dressmaker/MS
+dressmaking/SM
+dressy/PTR
+drew/A
+Drew/M
+Drexel/M
+Dreyfus/M
+Dreyfuss
+dribble/DRSGZ
+dribbler/M
+driblet/SM
+drib/SM
+dried/U
+drier/M
+drifter/M
+drifting/Y
+drift/RDZSG
+driftwood/SM
+driller/M
+drilling/M
+drillmaster/SM
+drill/MRDZGS
+drinkable/S
+drink/BRSZG
+drinker/M
+dripped
+dripping/MS
+drippy/RT
+drip/SM
+driveler/M
+drivel/GZDRS
+driven/P
+driver/M
+drive/SRBGZJ
+driveway/MS
+drizzle/DSGM
+drizzling/Y
+drizzly/TR
+Dr/M
+drogue/MS
+drollery/SM
+drollness/MS
+droll/RDSPTG
+drolly
+dromedary/MS
+Drona/M
+drone/SRDGM
+droning/Y
+drool/GSRD
+droopiness/MS
+drooping/Y
+droop/SGD
+droopy/PRT
+drophead
+dropkick/S
+droplet/SM
+dropout/MS
+dropped
+dropper/SM
+dropping/MS
+dropsical
+drop/SM
+dropsy/MS
+drosophila/M
+dross/SM
+drought/SM
+drover/M
+drove/SRDGZ
+drowner/M
+drown/RDSJG
+drowse/SDG
+drowsily
+drowsiness/SM
+drowsy/PTR
+drubbed
+drubber/MS
+drubbing/SM
+drub/S
+Drucie/M
+Drucill/M
+Druci/M
+Drucy/M
+drudge/MGSRD
+drudger/M
+drudgery/SM
+drudging/Y
+Drud/M
+drugged
+druggie/SRT
+drugging
+druggist/SM
+Drugi/M
+drugless
+drug/SM
+drugstore/SM
+druidism/MS
+druid/MS
+Druid's
+Dru/M
+drumbeat/SGM
+drumhead/M
+drumlin/MS
+drummed
+drummer/SM
+drumming
+Drummond/M
+drum/SM
+drumstick/SM
+drunkard/SM
+drunkenness/SM
+drunken/YP
+drunk/SRNYMT
+drupe/SM
+Drury/M
+Drusie/M
+Drusilla/M
+Drusi/M
+Drusy/M
+druthers
+dryad/MS
+Dryden/M
+dryer/MS
+dry/GYDRSTZ
+dryish
+dryness/SM
+drys
+drystone
+drywall/GSD
+D's
+d's/A
+Dshubba/M
+DST
+DTP
+dualism/MS
+dualistic
+dualist/M
+duality/MS
+dual/YS
+Duane/M
+Dubai/M
+dubbed
+dubber/S
+dubbing/M
+dubbin/MS
+Dubcek/M
+Dubhe/M
+dubiety/MS
+dubiousness/SM
+dubious/YP
+Dublin/M
+Dubrovnik/M
+dub/S
+Dubuque/M
+ducal
+ducat/SM
+duce/CAIKF
+duce's
+Duchamp/M
+duchess/MS
+duchy/SM
+duckbill/SM
+ducker/M
+duck/GSRDM
+duckling/SM
+duckpins
+duckpond
+duckweed/MS
+ducky/RSMT
+ducted/CFI
+ductile/I
+ductility/SM
+ducting/F
+duct/KMSF
+ductless
+duct's/A
+ducts/CI
+ductwork/M
+dudder
+dude/MS
+dudgeon/SM
+dud/GMDS
+Dudley/M
+Dud/M
+duelist/MS
+duel/MRDGZSJ
+dueness/M
+duenna/MS
+due/PMS
+duet/MS
+duetted
+duetting
+duffel/M
+duffer/M
+duff/GZSRDM
+Duffie/M
+Duff/M
+Duffy/M
+Dugald/M
+dugout/SM
+dug/S
+duh
+DUI
+Duisburg/M
+dukedom/SM
+duke/DSMG
+Duke/M
+Dukey/M
+Dukie/M
+Duky/M
+Dulcea/M
+Dulce/M
+dulcet/SY
+Dulcia/M
+Dulciana/M
+Dulcie/M
+dulcify
+Dulci/M
+dulcimer/MS
+Dulcinea/M
+Dulcine/M
+Dulcy/M
+dullard/MS
+Dulles/M
+dullness/MS
+dull/SRDPGT
+dully
+dulness's
+Dulsea/M
+Duluth/M
+duly/U
+Du/M
+Dumas
+dumbbell/MS
+dumbfound/GSDR
+dumbness/MS
+Dumbo/M
+dumb/PSGTYRD
+dumbstruck
+dumbwaiter/SM
+dumdum/MS
+dummy/SDMG
+Dumont/M
+dumper/UM
+dumpiness/MS
+dumpling/MS
+dump/SGZRD
+dumpster/S
+Dumpster/S
+Dumpty/M
+dumpy/PRST
+Dunant/M
+Dunbar/M
+Duncan/M
+dunce/MS
+Dunc/M
+Dundee/M
+dunderhead/MS
+Dunedin/M
+dune/SM
+dungaree/SM
+dungeon/GSMD
+dunghill/MS
+dung/SGDM
+Dunham/M
+dunker/M
+dunk/GSRD
+Dunkirk/M
+Dunlap/M
+Dun/M
+dunned
+Dunne/M
+dunner
+dunnest
+dunning
+Dunn/M
+dunno/M
+dun/S
+Dunstan/M
+duodecimal/S
+duodena
+duodenal
+duodenum/M
+duologue/M
+duo/MS
+duopolist
+duopoly/M
+dupe/NGDRSMZ
+duper/M
+dupion/M
+duple
+duplexer/M
+duplex/MSRDG
+duplicability/M
+duplicable
+duplicate/ADSGNX
+duplication/AM
+duplicative
+duplicator/MS
+duplicitous
+duplicity/SM
+Dupont/MS
+DuPont/MS
+durability/MS
+durableness/M
+durable/PS
+durably
+Duracell/M
+durance/SM
+Durand/M
+Duran/M
+Durante/M
+Durant/M
+durational
+duration/MS
+Durban/M
+Dürer/M
+duress/SM
+Durex/M
+Durham/MS
+during
+Durkee/M
+Durkheim/M
+Dur/M
+Durocher/M
+durst
+durum/MS
+Durward/M
+Duse/M
+Dusenberg/M
+Dusenbury/M
+Dushanbe/M
+dusk/GDMS
+duskiness/MS
+dusky/RPT
+Düsseldorf
+dustbin/MS
+dustcart/M
+dustcover
+duster/M
+dustily
+dustiness/MS
+dusting/M
+Dustin/M
+dustless
+dustman/M
+dustmen
+dust/MRDGZS
+dustpan/SM
+Dusty/M
+dusty/RPT
+Dutch/M
+Dutchman/M
+Dutchmen
+dutch/MS
+Dutchwoman
+Dutchwomen
+duteous/Y
+dutiable
+dutifulness/S
+dutiful/UPY
+duty/SM
+Duvalier/M
+duvet/SM
+duxes
+Dvina/M
+Dvorák/M
+Dwain/M
+dwarfish
+dwarfism/MS
+dwarf/MTGSPRD
+Dwayne/M
+dweeb/S
+dweller/SM
+dwell/IGS
+dwelling/MS
+dwelt/I
+DWI
+Dwight/M
+dwindle/GSD
+dyadic
+dyad/MS
+Dyana/M
+Dyane/M
+Dyan/M
+Dyanna/M
+Dyanne/M
+Dyann/M
+dybbukim
+dybbuk/SM
+dyed/A
+dyeing/M
+dye/JDRSMZG
+dyer/M
+Dyer/M
+dyes/A
+dyestuff/SM
+dying/UA
+Dyke/M
+dyke's
+Dylan/M
+Dy/M
+Dynah/M
+Dyna/M
+dynamical/Y
+dynamic/S
+dynamics/M
+dynamism/SM
+dynamiter/M
+dynamite/RSDZMG
+dynamized
+dynamo/MS
+dynastic
+dynasty/MS
+dyne/M
+dysentery/SM
+dysfunctional
+dysfunction/MS
+dyslectic/S
+dyslexia/MS
+dyslexically
+dyslexic/S
+dyspepsia/MS
+dyspeptic/S
+dysprosium/MS
+dystopia/M
+dystrophy/M
+dz
+Dzerzhinsky/M
+E
+ea
+each
+Eachelle/M
+Eada/M
+Eadie/M
+Eadith/M
+Eadmund/M
+eagerness/MS
+eager/TSPRYM
+eagle/SDGM
+eaglet/SM
+Eakins/M
+Ealasaid/M
+Eal/M
+Eamon/M
+earache/SM
+eardrum/SM
+earful/MS
+ear/GSMDYH
+Earhart/M
+earing/M
+earldom/MS
+Earle/M
+Earlene/M
+Earlie/M
+Earline/M
+earliness/SM
+Earl/M
+earl/MS
+earlobe/S
+Early/M
+early/PRST
+earmark/DGSJ
+earmuff/SM
+earned/U
+earner/M
+Earnestine/M
+Earnest/M
+earnestness/MS
+earnest/PYS
+earn/GRDZTSJ
+earning/M
+earphone/MS
+earpieces
+earplug/MS
+Earp/M
+earring/MS
+earshot/MS
+earsplitting
+Eartha/M
+earthbound
+earthed/U
+earthenware/MS
+earthiness/SM
+earthliness/M
+earthling/MS
+earthly/TPR
+earth/MDNYG
+earthmen
+earthmover/M
+earthmoving
+earthquake/SDGM
+earthshaking
+earths/U
+earthward/S
+earthwork/MS
+earthworm/MS
+earthy/PTR
+Earvin/M
+earwax/MS
+earwigged
+earwigging
+earwig/MS
+eased/E
+ease/LDRSMG
+easel/MS
+easement/MS
+easer/M
+ease's/EU
+eases/UE
+easies
+easily/U
+easiness/MSU
+easing/M
+eastbound
+easterly/S
+Easter/M
+easterner/M
+Easterner/M
+easternmost
+Eastern/RZ
+eastern/ZR
+easter/Y
+east/GSMR
+Easthampton/M
+easting/M
+Eastland/M
+Eastman/M
+eastward/S
+Eastwick/M
+Eastwood/M
+East/ZSMR
+easygoingness/M
+easygoing/P
+easy/PUTR
+eatables
+eatable/U
+eaten/U
+eater/M
+eatery/MS
+eating/M
+Eaton/M
+eat/SJZGNRB
+eavesdropped
+eavesdropper/MS
+eavesdropping
+eavesdrop/S
+eave/SM
+Eba/M
+Ebba/M
+ebb/DSG
+EBCDIC
+Ebeneezer/M
+Ebeneser/M
+Ebenezer/M
+Eben/M
+Eberhard/M
+Eberto/M
+Eb/MN
+Ebola
+Ebonee/M
+Ebonics
+Ebony/M
+ebony/SM
+Ebro/M
+ebullience/SM
+ebullient/Y
+ebullition/SM
+EC
+eccentrically
+eccentricity/SM
+eccentric/MS
+eccl
+Eccles
+Ecclesiastes/M
+ecclesiastical/Y
+ecclesiastic/MS
+ECG
+echelon/SGDM
+echinoderm/SM
+echo/DMG
+echoed/A
+echoes/A
+echoic
+echolocation/SM
+éclair/MS
+éclat/MS
+eclectically
+eclecticism/MS
+eclectic/S
+eclipse/MGSD
+ecliptic/MS
+eclogue/MS
+ecocide/SM
+ecol
+Ecole/M
+ecologic
+ecological/Y
+ecologist/MS
+ecology/MS
+Eco/M
+econ
+Econometrica/M
+econometricians
+econometric/S
+econometrics/M
+economical/YU
+economic/S
+economics/M
+economist/MS
+economization
+economize/GZSRD
+economizer/M
+economizing/U
+economy/MS
+ecosystem/MS
+ecru/SM
+ecstasy/MS
+Ecstasy/S
+ecstatically
+ecstatic/S
+ectoplasm/M
+Ecuadoran/S
+Ecuadorean/S
+Ecuadorian/S
+Ecuador/M
+ecumenical/Y
+ecumenicism/SM
+ecumenicist/MS
+ecumenic/MS
+ecumenics/M
+ecumenism/SM
+ecumenist/MS
+eczema/MS
+Eda/M
+Edam/SM
+Edan/M
+ed/ASC
+Edda/M
+Eddie/M
+Eddi/M
+Edd/M
+Eddy/M
+eddy/SDMG
+Edee/M
+Edeline/M
+edelweiss/MS
+Ede/M
+edema/SM
+edematous
+eden
+Eden/M
+Edgard/M
+Edgardo/M
+Edgar/M
+edge/DRSMZGJ
+edgeless
+edger/M
+Edgerton/M
+Edgewater/M
+edgewise
+Edgewood/M
+edgily
+edginess/MS
+edging/M
+edgy/TRP
+edibility/MS
+edibleness/SM
+edible/SP
+edict/SM
+Edie/M
+edification/M
+edifice/SM
+edifier/M
+edifying/U
+edify/ZNXGRSD
+Edik/M
+Edi/MH
+Edinburgh/M
+Edin/M
+Edison/M
+editable
+Edita/M
+edited/IU
+Editha/M
+Edithe/M
+Edith/M
+edition/SM
+editorialist/M
+editorialize/DRSG
+editorializer/M
+editorial/YS
+editor/MS
+editorship/MS
+edit/SADG
+Ediva/M
+Edlin/M
+Edmond/M
+Edmon/M
+Edmonton/M
+Edmund/M
+Edna/M
+Edouard/M
+EDP
+eds
+Edsel/M
+Edsger/M
+EDT
+Eduard/M
+Eduardo/M
+educability/SM
+educable/S
+educated/YP
+educate/XASDGN
+educationalists
+educational/Y
+education/AM
+educationists
+educative
+educator/MS
+educ/DBG
+educe/S
+eduction/M
+Eduino/M
+edutainment/S
+Edvard/M
+Edwardian
+Edwardo/M
+Edward/SM
+Edwina/M
+Edwin/M
+Ed/XMN
+Edy/M
+Edythe/M
+Edyth/M
+EEC
+EEG
+eek/S
+eelgrass/M
+eel/MS
+e'en
+EEO
+EEOC
+e'er
+eerie/RT
+eerily
+eeriness/MS
+Eeyore/M
+effaceable/I
+effacement/MS
+effacer/M
+efface/SRDLG
+effectiveness/ISM
+effectives
+effective/YIP
+effector/MS
+effect/SMDGV
+effectual/IYP
+effectualness/MI
+effectuate/SDGN
+effectuation/M
+effeminacy/MS
+effeminate/SY
+effendi/MS
+efferent/SY
+effervesce/GSD
+effervescence/SM
+effervescent/Y
+effeteness/SM
+effete/YP
+efficacious/IPY
+efficaciousness/MI
+efficacy/IMS
+efficiency/MIS
+efficient/ISY
+Effie/M
+effigy/SM
+effloresce
+efflorescence/SM
+efflorescent
+effluence/SM
+effluent/MS
+effluvia
+effluvium/M
+effluxion
+efflux/M
+effortlessness/SM
+effortless/PY
+effort/MS
+effrontery/MS
+effulgence/SM
+effulgent
+effuse/XSDVGN
+effusion/M
+effusiveness/MS
+effusive/YP
+EFL
+e/FMDS
+Efrain/M
+Efrem/M
+Efren/M
+EFT
+egad
+egalitarian/I
+egalitarianism/MS
+egalitarians
+EGA/M
+Egan/M
+Egbert/M
+Egerton/M
+eggbeater/SM
+eggcup/MS
+egger/M
+egg/GMDRS
+eggheaded/P
+egghead/SDM
+eggnog/SM
+eggplant/MS
+eggshell/SM
+egis's
+eglantine/MS
+egocentrically
+egocentricity/SM
+egocentric/S
+egoism/SM
+egoistic
+egoistical/Y
+egoist/SM
+egomaniac/MS
+egomania/MS
+Egon/M
+Egor/M
+ego/SM
+egotism/SM
+egotistic
+egotistical/Y
+egotist/MS
+egregiousness/MS
+egregious/PY
+egress/SDMG
+egret/SM
+Egyptian/S
+Egypt/M
+Egyptology/M
+eh
+Ehrlich/M
+Eichmann/M
+eiderdown/SM
+eider/SM
+eidetic
+Eiffel/M
+eigenfunction/MS
+eigenstate/S
+eigenvalue/SM
+eigenvector/MS
+eighteen/MHS
+eighteenths
+eightfold
+eighth/MS
+eighths
+eightieths
+eightpence
+eight/SM
+eighty/SHM
+Eileen/M
+Eilis/M
+Eimile/M
+Einsteinian
+einsteinium/MS
+Einstein/SM
+Eire/M
+Eirena/M
+Eisenhower/M
+Eisenstein/M
+Eisner/M
+eisteddfod/M
+either
+ejaculate/SDXNG
+ejaculation/M
+ejaculatory
+ejecta
+ejection/SM
+ejector/SM
+eject/VGSD
+Ekaterina/M
+Ekberg/M
+eked/A
+eke/DSG
+EKG
+Ekstrom/M
+Ektachrome/M
+elaborateness/SM
+elaborate/SDYPVNGX
+elaboration/M
+elaborators
+Elaina/M
+Elaine/M
+Elana/M
+eland/SM
+Elane/M
+élan/M
+Elanor/M
+elans
+elapse/SDG
+el/AS
+elastically/I
+elasticated
+elasticity/SM
+elasticize/GDS
+elastic/S
+elastodynamics
+elastomer/M
+elatedness/M
+elated/PY
+elater/M
+elate/SRDXGN
+elation/M
+Elayne/M
+Elba/MS
+Elbe/M
+Elberta/M
+Elbertina/M
+Elbertine/M
+Elbert/M
+elbow/GDMS
+elbowroom/SM
+Elbrus/M
+Elden/M
+elderberry/MS
+elderflower
+elderliness/M
+elderly/PS
+elder/SY
+eldest
+Eldin/M
+Eldon/M
+Eldorado's
+Eldredge/M
+Eldridge/M
+Eleanora/M
+Eleanore/M
+Eleanor/M
+Eleazar/M
+electable/U
+elect/ASGD
+elected/U
+electioneer/GSD
+election/SAM
+electiveness/M
+elective/SPY
+electoral/Y
+electorate/SM
+elector/SM
+Electra/M
+electress/M
+electricalness/M
+electrical/PY
+electrician/SM
+electricity/SM
+electric/S
+electrification/M
+electrifier/M
+electrify/ZXGNDRS
+electrocardiogram/MS
+electrocardiograph/M
+electrocardiographs
+electrocardiography/MS
+electrochemical/Y
+electrocute/GNXSD
+electrocution/M
+electrode/SM
+electrodynamics/M
+electrodynamic/YS
+electroencephalogram/SM
+electroencephalographic
+electroencephalograph/M
+electroencephalographs
+electroencephalography/MS
+electrologist/MS
+electroluminescent
+electrolysis/M
+electrolyte/SM
+electrolytic
+electrolytically
+electrolyze/SDG
+electro/M
+electromagnetic
+electromagnetically
+electromagnetism/SM
+electromagnet/SM
+electromechanical
+electromechanics
+electromotive
+electromyograph
+electromyographic
+electromyographically
+electromyography/M
+electronegative
+electronically
+electronic/S
+electronics/M
+electron/MS
+electrophoresis/M
+electrophorus/M
+electroplate/DSG
+electroscope/MS
+electroscopic
+electroshock/GDMS
+electrostatic/S
+electrostatics/M
+electrotherapist/M
+electrotype/GSDZM
+electroweak
+eleemosynary
+Eleen/M
+elegance/ISM
+elegant/YI
+elegiacal
+elegiac/S
+elegy/SM
+elem
+elemental/YS
+elementarily
+elementariness/M
+elementary/P
+element/MS
+Elena/M
+Elene/M
+Eleni/M
+Elenore/M
+Eleonora/M
+Eleonore/M
+elephantiases
+elephantiasis/M
+elephantine
+elephant/SM
+elevated/S
+elevate/XDSNG
+elevation/M
+elevator/SM
+eleven/HM
+elevens/S
+elevenths
+elev/NX
+Elfie/M
+elfin/S
+elfish
+elf/M
+Elfreda/M
+Elfrida/M
+Elfrieda/M
+Elga/M
+Elgar/M
+Elianora/M
+Elianore/M
+Elia/SM
+Elicia/M
+elicitation/MS
+elicit/GSD
+elide/GSD
+Elie/M
+eligibility/ISM
+eligible/SI
+Elihu/M
+Elijah/M
+Eli/M
+eliminate/XSDYVGN
+elimination/M
+eliminator/SM
+Elinore/M
+Elinor/M
+Eliot/M
+Elisabeth/M
+Elisabet/M
+Elisabetta/M
+Elisa/M
+Elise/M
+Eliseo/M
+Elisha/M
+elision/SM
+Elissa/M
+Elita/M
+elite/MPS
+elitism/SM
+elitist/SM
+elixir/MS
+Elizabethan/S
+Elizabeth/M
+Elizabet/M
+Eliza/M
+Elka/M
+Elke/M
+Elkhart/M
+elk/MS
+Elladine/M
+Ella/M
+Ellary/M
+Elle/M
+Ellene/M
+Ellen/M
+Ellerey/M
+Ellery/M
+Ellesmere/M
+Ellette/M
+Ellie/M
+Ellington/M
+Elliot/M
+Elliott/M
+ellipse/MS
+ellipsis/M
+ellipsoidal
+ellipsoid/MS
+ellipsometer/MS
+ellipsometry
+elliptic
+elliptical/YS
+ellipticity/M
+Elli/SM
+Ellison/M
+Ellissa/M
+ell/MS
+Ellswerth/M
+Ellsworth/M
+Ellwood/M
+Elly/M
+Ellyn/M
+Ellynn/M
+Elma/M
+Elmer/M
+Elmhurst/M
+Elmira/M
+elm/MRS
+Elmo/M
+Elmore/M
+Elmsford/M
+El/MY
+Elna/MH
+Elnar/M
+Elnath/M
+Elnora/M
+Elnore/M
+elocutionary
+elocutionist/MS
+elocution/SM
+elodea/S
+Elohim/M
+Eloisa/M
+Eloise/M
+elongate/NGXSD
+elongation/M
+Elonore/M
+elopement/MS
+eloper/M
+elope/SRDLG
+eloquence/SM
+eloquent/IY
+Elora/M
+Eloy/M
+Elroy/M
+els
+Elsa/M
+Elsbeth/M
+else/M
+Else/M
+Elset/M
+elsewhere
+Elsey/M
+Elsie/M
+Elsi/M
+Elsinore/M
+Elspeth/M
+Elston/M
+Elsworth/M
+Elsy/M
+Eltanin/M
+Elton/M
+eluate/SM
+elucidate/SDVNGX
+elucidation/M
+elude/GSD
+elusiveness/SM
+elusive/YP
+elute/DGN
+elution/M
+Elva/M
+elven
+Elvera/M
+elver/SM
+elves/M
+Elvia/M
+Elvina/M
+Elvin/M
+Elvira/M
+elvish
+Elvis/M
+Elvyn/M
+Elwin/M
+Elwira/M
+Elwood/M
+Elwyn/M
+Ely/M
+Elyn/M
+Elysée/M
+Elysees
+Elyse/M
+Elysha/M
+Elysia/M
+elysian
+Elysian
+Elysium/SM
+Elyssa/M
+EM
+emaciate/NGXDS
+emaciation/M
+emacs/M
+Emacs/M
+email/SMDG
+Emalee/M
+Emalia/M
+Ema/M
+emanate/XSDVNG
+emanation/M
+emancipate/DSXGN
+emancipation/M
+emancipator/MS
+Emanuele/M
+Emanuel/M
+emasculate/GNDSX
+emasculation/M
+embalmer/M
+embalm/ZGRDS
+embank/GLDS
+embankment/MS
+embarcadero
+embargoes
+embargo/GMD
+embark/ADESG
+embarkation/EMS
+embarrassedly
+embarrassed/U
+embarrassing/Y
+embarrassment/MS
+embarrass/SDLG
+embassy/MS
+embattle/DSG
+embeddable
+embedded
+embedder
+embedding/MS
+embed/S
+embellished/U
+embellisher/M
+embellish/LGRSD
+embellishment/MS
+ember/MS
+embezzle/LZGDRS
+embezzlement/MS
+embezzler/M
+embitter/LGDS
+embitterment/SM
+emblazon/DLGS
+emblazonment/SM
+emblematic
+emblem/GSMD
+embodier/M
+embodiment/ESM
+embody/ESDGA
+embolden/DSG
+embolism/SM
+embosom
+embosser/M
+emboss/ZGRSD
+embouchure/SM
+embower/GSD
+embraceable
+embracer/M
+embrace/RSDVG
+embracing/Y
+embrasure/MS
+embrittle
+embrocation/SM
+embroiderer/M
+embroider/SGZDR
+embroidery/MS
+embroilment/MS
+embroil/SLDG
+embryologist/SM
+embryology/MS
+embryonic
+embryo/SM
+emceeing
+emcee/SDM
+Emelda/M
+Emelen/M
+Emelia/M
+Emelina/M
+Emeline/M
+Emelita/M
+Emelyne/M
+emendation/MS
+emend/SRDGB
+emerald/SM
+Emera/M
+emerge/ADSG
+emergence/MAS
+emergency/SM
+emergent/S
+emerita
+emeritae
+emeriti
+emeritus
+Emerson/M
+Emery/M
+emery/MGSD
+emetic/S
+emf/S
+emigrant/MS
+emigrate/SDXNG
+emigration/M
+émigré/S
+Emilee/M
+Emile/M
+Emilia/M
+Emilie/M
+Emili/M
+Emiline/M
+Emilio/M
+Emil/M
+Emily/M
+eminence/MS
+Eminence/MS
+eminent/Y
+emirate/SM
+emir/SM
+emissary/SM
+emission/AMS
+emissivity/MS
+emit/S
+emittance/M
+emitted
+emitter/SM
+emitting
+Emlen/M
+Emlyn/M
+Emlynne/M
+Emlynn/M
+em/M
+Em/M
+Emmalee/M
+Emmaline/M
+Emmalyn/M
+Emmalynne/M
+Emmalynn/M
+Emma/M
+Emmanuel/M
+Emmeline/M
+Emmerich/M
+Emmery/M
+Emmet/M
+Emmett/M
+Emmey/M
+Emmie/M
+Emmi/M
+Emmit/M
+Emmott/M
+Emmye/M
+Emmy/SM
+Emogene/M
+emollient/S
+emolument/SM
+Emory/M
+emote/SDVGNX
+emotionalism/MS
+emotionality/M
+emotionalize/GDS
+emotional/UY
+emotionless
+emotion/M
+emotive/Y
+empaneled
+empaneling
+empath
+empathetic
+empathetical/Y
+empathic
+empathize/SDG
+empathy/MS
+emperor/MS
+emphases
+emphasis/M
+emphasize/ZGCRSDA
+emphatically/U
+emphatic/U
+emphysema/SM
+emphysematous
+empire/MS
+empirical/Y
+empiricism/SM
+empiricist/SM
+empiric/SM
+emplace/L
+emplacement/MS
+employability/UM
+employable/US
+employed/U
+employee/SM
+employer/SM
+employ/LAGDS
+employment/UMAS
+emporium/MS
+empower/GLSD
+empowerment/MS
+empress/MS
+emptier/M
+emptily
+emptiness/SM
+empty/GRSDPT
+empyrean/SM
+ems/C
+EMT
+emulate/SDVGNX
+emulation/M
+emulative/Y
+emulator/MS
+emulsification/M
+emulsifier/M
+emulsify/NZSRDXG
+emulsion/SM
+emu/SM
+Emylee/M
+Emyle/M
+enabler/M
+enable/SRDZG
+enactment/ASM
+enact/SGALD
+enameler/M
+enamelware/SM
+enamel/ZGJMDRS
+enamor/DSG
+en/BM
+enc
+encamp/LSDG
+encampment/MS
+encapsulate/SDGNX
+encapsulation/M
+encase/GSDL
+encasement/SM
+encephalitic
+encephalitides
+encephalitis/M
+encephalographic
+encephalopathy/M
+enchain/SGD
+enchanter/MS
+enchant/ESLDG
+enchanting/Y
+enchantment/MSE
+enchantress/MS
+enchilada/SM
+encipherer/M
+encipher/SRDG
+encircle/GLDS
+encirclement/SM
+encl
+enclave/MGDS
+enclosed/U
+enclose/GDS
+enclosure/SM
+encoder/M
+encode/ZJGSRD
+encomium/SM
+encompass/GDS
+encore/GSD
+encounter/GSD
+encouragement/SM
+encourager/M
+encourage/SRDGL
+encouraging/Y
+encroacher/M
+encroach/LGRSD
+encroachment/MS
+encrustation/MS
+encrust/DSG
+encrypt/DGS
+encrypted/U
+encryption/SM
+encumbered/U
+encumber/SEDG
+encumbrancer/M
+encumbrance/SRM
+ency
+encyclical/SM
+encyclopaedia's
+encyclopedia/SM
+encyclopedic
+encyst/GSLD
+encystment/MS
+endanger/DGSL
+endangerment/SM
+endear/GSLD
+endearing/Y
+endearment/MS
+endeavored/U
+endeavorer/M
+endeavor/GZSMRD
+endemically
+endemicity
+endemic/S
+ender/M
+endgame/M
+Endicott/M
+ending/M
+endive/SM
+endlessness/MS
+endless/PY
+endmost
+endnote/MS
+endocrine/S
+endocrinologist/SM
+endocrinology/SM
+endogamous
+endogamy/M
+endogenous/Y
+endomorphism/SM
+endorse/DRSZGL
+endorsement/MS
+endorser/M
+endoscope/MS
+endoscopic
+endoscopy/SM
+endosperm/M
+endothelial
+endothermic
+endow/GSDL
+endowment/SM
+endpoint/MS
+endue/SDG
+endungeoned
+endurable/U
+endurably/U
+endurance/SM
+endure/BSDG
+enduringness/M
+enduring/YP
+endways
+Endymion/M
+end/ZGVMDRSJ
+ENE
+enema/SM
+enemy/SM
+energetically
+energetic/S
+energetics/M
+energized/U
+energizer/M
+energize/ZGDRS
+energy/MS
+enervate/XNGVDS
+enervation/M
+enfeeble/GLDS
+enfeeblement/SM
+enfilade/MGDS
+enfold/SGD
+enforceability/M
+enforceable/U
+enforced/Y
+enforce/LDRSZG
+enforcement/SM
+enforcer/M
+enforcible/U
+enfranchise/ELDRSG
+enfranchisement/EMS
+enfranchiser/M
+engage/ADSGE
+engagement/SEM
+engaging/Y
+Engelbert/M
+Engel/MS
+engender/DGS
+engineer/GSMDJ
+engineering/MY
+engine/MGSD
+England/M
+england/ZR
+Englebert/M
+Englewood/M
+English/GDRSM
+Englishman/M
+Englishmen
+Englishwoman/M
+Englishwomen
+Eng/M
+engorge/LGDS
+engorgement/MS
+Engracia/M
+engram/MS
+engraver/M
+engrave/ZGDRSJ
+engraving/M
+engrossed/Y
+engrosser/M
+engross/GLDRS
+engrossing/Y
+engrossment/SM
+engulf/GDSL
+engulfment/SM
+enhanceable
+enhance/LZGDRS
+enhancement/MS
+enhancer/M
+enharmonic
+Enid/M
+Enif/M
+enigma/MS
+enigmatic
+enigmatically
+Eniwetok/M
+enjambement's
+enjambment/MS
+enjoinder
+enjoin/GSD
+enjoyability
+enjoyableness/M
+enjoyable/P
+enjoyably
+enjoy/GBDSL
+enjoyment/SM
+Enkidu/M
+enlargeable
+enlarge/LDRSZG
+enlargement/MS
+enlarger/M
+enlightened/U
+enlighten/GDSL
+enlightening/U
+enlightenment/SM
+enlistee/MS
+enlister/M
+enlistment/SAM
+enlist/SAGDL
+enliven/LDGS
+enlivenment/SM
+enmesh/DSLG
+enmeshment/SM
+enmity/MS
+Ennis/M
+ennoble/LDRSG
+ennoblement/SM
+ennobler/M
+ennui/SM
+Enoch/M
+enormity/SM
+enormousness/MS
+enormous/YP
+Enos
+enough
+enoughs
+enplane/DSG
+enqueue/DS
+enquirer/S
+enquiringly
+enrage/SDG
+enrapture/GSD
+Enrica/M
+enricher/M
+Enrichetta/M
+enrich/LDSRG
+enrichment/SM
+Enrico/M
+Enrika/M
+Enrique/M
+Enriqueta/M
+enrobed
+enrollee/SM
+enroll/LGSD
+enrollment/SM
+ens
+ensconce/DSG
+ensemble/MS
+enshrine/DSLG
+enshrinement/SM
+enshroud/DGS
+ensign/SM
+ensilage/DSMG
+enslavement/MS
+enslaver/M
+enslave/ZGLDSR
+ensnare/GLDS
+ensnarement/SM
+Ensolite/M
+ensue/SDG
+ensurer/M
+ensure/SRDZG
+entailer/M
+entailment/MS
+entail/SDRLG
+entangle/EGDRSL
+entanglement/ESM
+entangler/EM
+entente/MS
+enter/ASDG
+entered/U
+enterer/M
+enteritides
+enteritis/SM
+enterprise/GMSR
+Enterprise/M
+enterpriser/M
+enterprising/Y
+entertainer/M
+entertaining/Y
+entertainment/SM
+entertain/SGZRDL
+enthalpy/SM
+enthrall/GDSL
+enthrallment/SM
+enthrone/GDSL
+enthronement/MS
+enthuse/DSG
+enthusiasm/SM
+enthusiastically/U
+enthusiastic/U
+enthusiast/MS
+enticement/SM
+entice/SRDJLZG
+enticing/Y
+entire/SY
+entirety/SM
+entitle/GLDS
+entitlement/MS
+entity/SM
+entomb/GDSL
+entombment/MS
+entomological
+entomologist/S
+entomology/MS
+entourage/SM
+entr'acte/S
+entrails
+entrainer/M
+entrain/GSLDR
+entrancement/MS
+entrance/MGDSL
+entranceway/M
+entrancing/Y
+entrant/MS
+entrapment/SM
+entrapped
+entrapping
+entrap/SL
+entreating/Y
+entreat/SGD
+entreaty/SM
+entrée/S
+entrench/LSDG
+entrenchment/MS
+entrepreneurial
+entrepreneur/MS
+entrepreneurship/M
+entropic
+entropy/MS
+entrust/DSG
+entry/ASM
+entryway/SM
+entwine/DSG
+enumerable
+enumerate/AN
+enumerated/U
+enumerates
+enumerating
+enumeration's/A
+enumeration/SM
+enumerative
+enumerator/SM
+enunciable
+enunciated/U
+enunciate/XGNSD
+enunciation/M
+enureses
+enuresis/M
+envelope/MS
+enveloper/M
+envelopment/MS
+envelop/ZGLSDR
+envenom/SDG
+enviableness/M
+enviable/U
+enviably
+envied/U
+envier/M
+enviousness/SM
+envious/PY
+environ/LGSD
+environmentalism/SM
+environmentalist/SM
+environmental/Y
+environment/MS
+envisage/DSG
+envision/GSD
+envoy/SM
+envying/Y
+envy/SRDMG
+enzymatic
+enzymatically
+enzyme/SM
+enzymology/M
+Eocene
+EOE
+eohippus/M
+Eolanda/M
+Eolande/M
+eolian
+eon/SM
+EPA
+epaulet/SM
+épée/S
+ephedrine/MS
+ephemeral/SY
+ephemera/MS
+ephemerids
+ephemeris/M
+Ephesian/S
+Ephesians/M
+Ephesus/M
+Ephraim/M
+Ephrayim/M
+Ephrem/M
+epically
+epicenter/SM
+epic/SM
+Epictetus/M
+Epicurean
+epicurean/S
+epicure/SM
+Epicurus/M
+epicycle/MS
+epicyclic
+epicyclical/Y
+epicycloid/M
+epidemically
+epidemic/MS
+epidemiological/Y
+epidemiologist/MS
+epidemiology/MS
+epidermal
+epidermic
+epidermis/MS
+epidural
+epigenetic
+epiglottis/SM
+epigrammatic
+epigram/MS
+epigrapher/M
+epigraph/RM
+epigraphs
+epigraphy/MS
+epilepsy/SM
+epileptic/S
+epilogue/SDMG
+Epimethius/M
+epinephrine/SM
+epiphany/SM
+Epiphany/SM
+epiphenomena
+episcopacy/MS
+episcopalian
+Episcopalian/S
+Episcopal/S
+episcopal/Y
+episcopate/MS
+episode/SM
+episodic
+episodically
+epistemic
+epistemological/Y
+epistemology/M
+epistle/MRS
+Epistle/SM
+epistolary/S
+epistolatory
+epitaph/GMD
+epitaphs
+epitaxial/Y
+epitaxy/M
+epithelial
+epithelium/MS
+epithet/MS
+epitome/MS
+epitomized/U
+epitomizer/M
+epitomize/SRDZG
+epochal/Y
+epoch/M
+epochs
+eponymous
+epoxy/GSD
+epsilon/SM
+Epsom/M
+Epstein/M
+equability/MS
+equableness/M
+equable/P
+equably
+equaling
+equality/ISM
+equalization/MS
+equalize/DRSGJZ
+equalized/U
+equalizer/M
+equalizes/U
+equal/USDY
+equanimity/MS
+equate/NGXBSD
+equation/M
+equatorial/S
+equator/SM
+equerry/MS
+equestrianism/SM
+equestrian/S
+equestrienne/SM
+equiangular
+equidistant/Y
+equilateral/S
+equilibrate/GNSD
+equilibration/M
+equilibrium/MSE
+equine/S
+equinoctial/S
+equinox/MS
+equipage/SM
+equipartition/M
+equip/AS
+equipment/SM
+equipoise/GMSD
+equipotent
+equipped/AU
+equipping/A
+equiproportional
+equiproportionality
+equiproportionate
+equitable/I
+equitableness/M
+equitably/I
+equitation/SM
+equity/IMS
+equiv
+equivalence/DSMG
+equivalent/SY
+equivocalness/MS
+equivocal/UY
+equivocate/NGSDX
+equivocation/M
+equivocator/SM
+Equuleus/M
+ER
+ERA
+eradicable/I
+eradicate/SDXVGN
+eradication/M
+eradicator/SM
+era/MS
+Eran/M
+erase/N
+eraser/M
+erasion/M
+Erasmus/M
+eras/SRDBGZ
+Erastus/M
+erasure/MS
+Erato/M
+Eratosthenes/M
+erbium/SM
+Erda/M
+ere
+Erebus/M
+erect/GPSRDY
+erectile
+erection/SM
+erectness/MS
+erector/SM
+Erek/M
+erelong
+eremite/MS
+Erena/M
+ergo
+ergodic
+ergodicity/M
+ergonomically
+ergonomics/M
+ergonomic/U
+ergophobia
+ergosterol/SM
+ergot/SM
+erg/SM
+Erhard/M
+Erhart/M
+Erica/M
+Ericha/M
+Erich/M
+Ericka/M
+Erick/M
+Erickson/M
+Eric/M
+Ericson's
+Ericsson's
+Eridanus/M
+Erie/SM
+Erika/M
+Erik/M
+Erikson/M
+Erina/M
+Erin/M
+Erinna/M
+Erinn/M
+eris
+Eris
+Eritrea/M
+Erlang/M
+Erlenmeyer/M
+Erl/M
+Er/M
+Erma/M
+Ermanno/M
+Ermengarde/M
+Ermentrude/M
+Ermina/M
+ermine/MSD
+Erminia/M
+Erminie/M
+Ermin/M
+Ernaline/M
+Erna/M
+Ernesta/M
+Ernestine/M
+Ernest/M
+Ernesto/M
+Ernestus/M
+Ernie/M
+Ernst/M
+Erny/M
+erode/SDG
+erodible
+erogenous
+erosible
+erosional
+erosion/SM
+erosiveness/M
+erosive/P
+Eros/SM
+erotically
+erotica/M
+eroticism/MS
+erotic/S
+errancy/MS
+errand/MS
+errantry/M
+errant/YS
+errata/SM
+erratically
+erratic/S
+erratum/MS
+err/DGS
+Errick/M
+erring/UY
+Erroll/M
+Errol/M
+erroneousness/M
+erroneous/YP
+error/SM
+ersatz/S
+Erse/M
+Erskine/M
+erst
+erstwhile
+Ertha/M
+eructation/MS
+eruct/DGS
+erudite/NYX
+erudition/M
+erupt/DSVG
+eruption/SM
+eruptive/SY
+Ervin/M
+ErvIn/M
+Erv/M
+Erwin/M
+Eryn/M
+erysipelas/SM
+erythrocyte/SM
+es
+e's
+Es
+E's
+Esau/M
+escadrille/M
+escalate/CDSXGN
+escalation/MC
+escalator/SM
+escallop/SGDM
+escapable/I
+escapade/SM
+escapee/MS
+escape/LGSRDB
+escapement/MS
+escaper/M
+escapism/SM
+escapist/S
+escapology
+escarole/MS
+escarpment/MS
+eschatology/M
+Escherichia/M
+Escher/M
+eschew/SGD
+Escondido/M
+escort/SGMD
+escritoire/SM
+escrow/DMGS
+escudo/MS
+escutcheon/SM
+Esdras/M
+ESE
+Eskimo/SM
+ESL
+Esma/M
+Esmaria/M
+Esmark/M
+Esme/M
+Esmeralda/M
+esophageal
+esophagi
+esophagus/M
+esoteric
+esoterica
+esoterically
+esp
+ESP
+espadrille/MS
+Espagnol/M
+espalier/SMDG
+especial/Y
+Esperanto/M
+Esperanza/M
+Espinoza/M
+espionage/SM
+esplanade/SM
+Esp/M
+Esposito/M
+espousal/MS
+espouser/M
+espouse/SRDG
+espresso/SM
+esprit/SM
+espy/GSD
+Esq/M
+esquire/GMSD
+Esquire/S
+Esra/M
+Essa/M
+essayer/M
+essayist/SM
+essay/SZMGRD
+essence/MS
+Essene/SM
+Essen/M
+essentialist/M
+essentially
+essentialness/M
+essential/USI
+Essequibo/M
+Essex/M
+Essie/M
+Essy/M
+EST
+established/U
+establisher/M
+establish/LAEGSD
+establishment/EMAS
+Establishment/MS
+Esta/M
+estate/GSDM
+Esteban/M
+esteem/EGDS
+Estela/M
+Estele/M
+Estella/M
+Estelle/M
+Estell/M
+Estel/M
+Esterházy/M
+ester/M
+Ester/M
+Estes
+Estevan/M
+Esther/M
+esthete's
+esthetically
+esthetic's
+esthetics's
+estimable/I
+estimableness/M
+estimate/XDSNGV
+estimating/A
+estimation/M
+estimator/SM
+Estonia/M
+Estonian/S
+estoppal
+Estrada/M
+estrange/DRSLG
+estrangement/SM
+estranger/M
+Estrella/M
+Estrellita/M
+estrogen/SM
+estrous
+estrus/SM
+est/RZ
+estuarine
+estuary/SM
+et
+ET
+ETA
+Etan/M
+eta/SM
+etc
+etcetera/SM
+etcher/M
+etch/GZJSRD
+etching/M
+ETD
+eternalness/SM
+eternal/PSY
+eternity/SM
+ethane/SM
+Ethan/M
+ethanol/MS
+Ethelbert/M
+Ethelda/M
+Ethelind/M
+Etheline/M
+Ethelin/M
+Ethel/M
+Ethelred/M
+Ethelyn/M
+Ethe/M
+etherealness/M
+ethereal/PY
+etherized
+Ethernet/MS
+ether/SM
+ethically/U
+ethicalness/M
+ethical/PYS
+ethicist/S
+ethic/MS
+Ethiopia/M
+Ethiopian/S
+ethnically
+ethnicity/MS
+ethnic/S
+ethnocentric
+ethnocentrism/MS
+ethnographers
+ethnographic
+ethnography/M
+ethnological
+ethnologist/SM
+ethnology/SM
+ethnomethodology
+ethological
+ethologist/MS
+ethology/SM
+ethos/SM
+ethylene/MS
+Ethyl/M
+ethyl/SM
+Etienne/M
+etiologic
+etiological
+etiology/SM
+etiquette/SM
+Etna/M
+Etruria/M
+Etruscan/MS
+Etta/M
+Ettie/M
+Etti/M
+Ettore/M
+Etty/M
+étude/MS
+etymological/Y
+etymologist/SM
+etymology/MS
+EU
+eucalypti
+eucalyptus/SM
+Eucharistic
+Eucharist/SM
+euchre/MGSD
+euclidean
+Euclid/M
+Eudora/M
+Euell/M
+Eugene/M
+Eugenia/M
+eugenically
+eugenicist/SM
+eugenic/S
+eugenics/M
+Eugenie/M
+Eugenio/M
+Eugenius/M
+Eugen/M
+Eugine/M
+Eulalie/M
+Eula/M
+Eulerian/M
+Euler/M
+eulogistic
+eulogist/MS
+eulogized/U
+eulogize/GRSDZ
+eulogizer/M
+eulogy/MS
+Eu/M
+Eumenides
+Eunice/M
+eunuch/M
+eunuchs
+Euphemia/M
+euphemism/MS
+euphemistic
+euphemistically
+euphemist/M
+euphonious/Y
+euphonium/M
+euphony/SM
+euphoria/SM
+euphoric
+euphorically
+Euphrates/M
+Eurasia/M
+Eurasian/S
+eureka/S
+Euripides/M
+Eur/M
+Eurodollar/SM
+Europa/M
+Europeanization/SM
+Europeanized
+European/MS
+Europe/M
+europium/MS
+Eurydice/M
+Eustace/M
+Eustachian/M
+Eustacia/M
+eutectic
+Euterpe/M
+euthanasia/SM
+euthenics/M
+evacuate/DSXNGV
+evacuation/M
+evacuee/MS
+evader/M
+evade/SRDBGZ
+Evaleen/M
+evaluable
+evaluate/ADSGNX
+evaluated/U
+evaluational
+evaluation/MA
+evaluative
+evaluator/MS
+Eva/M
+evanescence/MS
+evanescent
+Evangelia/M
+evangelic
+evangelicalism/SM
+Evangelical/S
+evangelical/YS
+Evangelina/M
+Evangeline/M
+Evangelin/M
+evangelism/SM
+evangelistic
+evangelist/MS
+Evangelist/MS
+evangelize/GDS
+Evania/M
+Evan/MS
+Evanne/M
+Evanston/M
+Evansville/M
+evaporate/VNGSDX
+evaporation/M
+evaporative/Y
+evaporator/MS
+evasion/SM
+evasiveness/SM
+evasive/PY
+Eveleen/M
+Evelina/M
+Eveline/M
+Evelin/M
+Evelyn/M
+Eve/M
+evened
+evener/M
+evenhanded/YP
+evening/SM
+Evenki/M
+Even/M
+evenness/MSU
+even/PUYRT
+evens
+evensong/MS
+eventfulness/SM
+eventful/YU
+eventide/SM
+event/SGM
+eventuality/MS
+eventual/Y
+eventuate/GSD
+Everard/M
+Eveready/M
+Evered/M
+Everest/M
+Everette/M
+Everett/M
+everglade/MS
+Everglades
+evergreen/S
+Everhart/M
+everlastingness/M
+everlasting/PYS
+everliving
+evermore
+EverReady/M
+eve/RSM
+ever/T
+every
+everybody/M
+everydayness/M
+everyday/P
+everyman
+everyone/MS
+everyplace
+everything
+everywhere
+eve's/A
+eves/A
+Evey/M
+evict/DGS
+eviction/SM
+evidence/MGSD
+evidential/Y
+evident/YS
+Evie/M
+evildoer/SM
+evildoing/MS
+evilness/MS
+evil/YRPTS
+evince/SDG
+Evin/M
+eviscerate/GNXDS
+evisceration/M
+Evita/M
+Ev/MN
+evocable
+evocate/NVX
+evocation/M
+evocativeness/M
+evocative/YP
+evoke/SDG
+evolute/NMXS
+evolutionarily
+evolutionary
+evolutionist/MS
+evolution/M
+evolve/SDG
+Evonne/M
+Evvie/M
+Evvy/M
+Evy/M
+Evyn/M
+Ewan/M
+Eward/M
+Ewart/M
+Ewell/M
+ewe/MZRS
+Ewen/M
+ewer/M
+Ewing/M
+exacerbate/NGXDS
+exacerbation/M
+exacter/M
+exactingness/M
+exacting/YP
+exaction/SM
+exactitude/ISM
+exactly/I
+exactness/MSI
+exact/TGSPRDY
+exaggerate/DSXNGV
+exaggerated/YP
+exaggeration/M
+exaggerative/Y
+exaggerator/MS
+exaltation/SM
+exalted/Y
+exalter/M
+exalt/ZRDGS
+examen/M
+examination/AS
+examination's
+examine/BGZDRS
+examined/AU
+examinees
+examiner/M
+examines/A
+examining/A
+exam/MNS
+example/DSGM
+exampled/U
+exasperate/DSXGN
+exasperated/Y
+exasperating/Y
+exasperation/M
+Excalibur/M
+excavate/NGDSX
+excavation/M
+excavator/SM
+Excedrin/M
+exceeder/M
+exceeding/Y
+exceed/SGDR
+excelled
+excellence/SM
+excellency/MS
+Excellency/MS
+excellent/Y
+excelling
+excel/S
+excelsior/S
+except/DSGV
+exceptionable/U
+exceptionalness/M
+exceptional/YU
+exception/BMS
+excerpter/M
+excerpt/GMDRS
+excess/GVDSM
+excessiveness/M
+excessive/PY
+exchangeable
+exchange/GDRSZ
+exchanger/M
+exchequer/SM
+Exchequer/SM
+excise/XMSDNGB
+excision/M
+excitability/MS
+excitableness/M
+excitable/P
+excitably
+excitation/SM
+excitatory
+excited/Y
+excitement/MS
+exciter/M
+excite/RSDLBZG
+excitingly
+exciting/U
+exciton/M
+exclaimer/M
+exclaim/SZDRG
+exclamation/MS
+exclamatory
+exclude/DRSG
+excluder/M
+exclusionary
+exclusioner/M
+exclusion/SZMR
+exclusiveness/SM
+exclusive/SPY
+exclusivity/MS
+excommunicate/XVNGSD
+excommunication/M
+excoriate/GNXSD
+excoriation/M
+excremental
+excrement/SM
+excrescence/MS
+excrescent
+excreta
+excrete/NGDRSX
+excreter/M
+excretion/M
+excretory/S
+excruciate/NGDS
+excruciating/Y
+excruciation/M
+exculpate/XSDGN
+exculpation/M
+exculpatory
+excursionist/SM
+excursion/MS
+excursiveness/SM
+excursive/PY
+excursus/MS
+excusable/IP
+excusableness/IM
+excusably/I
+excuse/BGRSD
+excused/U
+excuser/M
+exec/MS
+execrableness/M
+execrable/P
+execrably
+execrate/DSXNGV
+execration/M
+executable/MS
+execute/NGVZBXDRS
+executer/M
+executional
+executioner/M
+execution/ZMR
+executive/SM
+executor/SM
+executrices
+executrix/M
+exegeses
+exegesis/M
+exegete/M
+exegetical
+exegetic/S
+exemplariness/M
+exemplar/MS
+exemplary/P
+exemplification/M
+exemplifier/M
+exemplify/ZXNSRDG
+exemption/MS
+exempt/SDG
+exerciser/M
+exercise/ZDRSGB
+exertion/MS
+exert/SGD
+Exeter/M
+exeunt
+exhalation/SM
+exhale/GSD
+exhausted/Y
+exhauster/M
+exhaustible/I
+exhausting/Y
+exhaustion/SM
+exhaustiveness/MS
+exhaustive/YP
+exhaust/VGRDS
+exhibitioner/M
+exhibitionism/MS
+exhibitionist/MS
+exhibition/ZMRS
+exhibitor/SM
+exhibit/VGSD
+exhilarate/XSDVNG
+exhilarating/Y
+exhilaration/M
+exhortation/SM
+exhort/DRSG
+exhorter/M
+exhumation/SM
+exhume/GRSD
+exhumer/M
+exigence/S
+exigency/SM
+exigent/SY
+exiguity/SM
+exiguous
+exile/SDGM
+existence/MS
+existent/I
+existentialism/MS
+existentialistic
+existentialist/MS
+existential/Y
+existents
+exist/SDG
+exit/MDSG
+exobiology/MS
+exocrine
+Exodus/M
+exodus/SM
+exogamous
+exogamy/M
+exogenous/Y
+exonerate/SDVGNX
+exoneration/M
+exorbitance/MS
+exorbitant/Y
+exorcise/SDG
+exorcism/SM
+exorcist/SM
+exorcizer/M
+exoskeleton/MS
+exosphere/SM
+exothermic
+exothermically
+exotica
+exotically
+exoticism/SM
+exoticness/M
+exotic/PS
+exp
+expandability/M
+expand/DRSGZB
+expanded/U
+expander/M
+expanse/DSXGNVM
+expansible
+expansionary
+expansionism/MS
+expansionist/MS
+expansion/M
+expansiveness/S
+expansive/YP
+expatiate/XSDNG
+expatiation/M
+expatriate/SDNGX
+expatriation/M
+expectancy/MS
+expectant/YS
+expectational
+expectation/MS
+expected/UPY
+expecting/Y
+expectorant/S
+expectorate/NGXDS
+expectoration/M
+expect/SBGD
+expedience/IS
+expediency/IMS
+expedients
+expedient/YI
+expediter/M
+expedite/ZDRSNGX
+expeditionary
+expedition/M
+expeditiousness/MS
+expeditious/YP
+expeditor's
+expellable
+expelled
+expelling
+expel/S
+expendable/S
+expended/U
+expender/M
+expenditure/SM
+expend/SDRGB
+expense/DSGVM
+expensive/IYP
+expensiveness/SMI
+experienced/U
+experience/ISDM
+experiencing
+experiential/Y
+experimentalism/M
+experimentalist/SM
+experimental/Y
+experimentation/SM
+experimenter/M
+experiment/GSMDRZ
+experted
+experting
+expertise/SM
+expertize/GD
+expertnesses
+expertness/IM
+expert/PISY
+expert's
+expiable/I
+expiate/XGNDS
+expiation/M
+expiatory
+expiration/MS
+expired/U
+expire/SDG
+expiry/MS
+explainable/UI
+explain/ADSG
+explained/U
+explainer/SM
+explanation/MS
+explanatory
+expletive/SM
+explicable/I
+explicate/VGNSDX
+explication/M
+explicative/Y
+explicitness/SM
+explicit/PSY
+explode/DSRGZ
+exploded/U
+exploder/M
+exploitation/MS
+exploitative
+exploited/U
+exploiter/M
+exploit/ZGVSMDRB
+exploration/MS
+exploratory
+explore/DSRBGZ
+explored/U
+explorer/M
+explosion/MS
+explosiveness/SM
+explosive/YPS
+expo/MS
+exponential/SY
+exponentiate/XSDNG
+exponentiation/M
+exponent/MS
+exportability
+exportable
+export/AGSD
+exportation/SM
+exporter/MS
+export's
+expose
+exposed/U
+exposer/M
+exposit/D
+exposition/SM
+expositor/MS
+expository
+expos/RSDZG
+expostulate/DSXNG
+expostulation/M
+exposure/SM
+expounder/M
+expound/ZGSDR
+expressed/U
+expresser/M
+express/GVDRSY
+expressibility/I
+expressible/I
+expressibly/I
+expressionism/SM
+expressionistic
+expressionist/S
+expressionless/YP
+expression/MS
+expressive/IYP
+expressiveness/MS
+expressiveness's/I
+expressway/SM
+expropriate/XDSGN
+expropriation/M
+expropriator/SM
+expulsion/MS
+expunge/GDSR
+expunger/M
+expurgated/U
+expurgate/SDGNX
+expurgation/M
+exquisiteness/SM
+exquisite/YPS
+ex/S
+ext
+extant
+extemporaneousness/MS
+extemporaneous/YP
+extempore/S
+extemporization/SM
+extemporizer/M
+extemporize/ZGSRD
+extendability/M
+extendedly
+extendedness/M
+extended/U
+extender/M
+extendibility/M
+extendibles
+extend/SGZDR
+extensibility/M
+extensible/I
+extensional/Y
+extension/SM
+extensiveness/SM
+extensive/PY
+extensor/MS
+extent/SM
+extenuate/XSDGN
+extenuation/M
+exterior/MYS
+exterminate/XNGDS
+extermination/M
+exterminator/SM
+externalities
+externalization/SM
+externalize/GDS
+external/YS
+extern/M
+extinct/DGVS
+extinction/MS
+extinguishable/I
+extinguish/BZGDRS
+extinguisher/M
+extirpate/XSDVNG
+extirpation/M
+extolled
+extoller/M
+extolling
+extol/S
+extort/DRSGV
+extorter/M
+extortionate/Y
+extortioner/M
+extortionist/SM
+extortion/ZSRM
+extracellular/Y
+extract/GVSBD
+extraction/SM
+extractive/Y
+extractor/SM
+extracurricular/S
+extradite/XNGSDB
+extradition/M
+extragalactic
+extralegal/Y
+extramarital
+extramural
+extraneousness/M
+extraneous/YP
+extraordinarily
+extraordinariness/M
+extraordinary/PS
+extrapolate/XVGNSD
+extrapolation/M
+extra/S
+extrasensory
+extraterrestrial/S
+extraterritorial
+extraterritoriality/MS
+extravagance/MS
+extravagant/Y
+extravaganza/SM
+extravehicular
+extravert's
+extrema
+extremal
+extreme/DSRYTP
+extremeness/MS
+extremism/SM
+extremist/MS
+extremity/SM
+extricable/I
+extricate/XSDNG
+extrication/M
+extrinsic
+extrinsically
+extroversion/SM
+extrovert/GMDS
+extrude/GDSR
+extruder/M
+extrusion/MS
+extrusive
+exuberance/MS
+exuberant/Y
+exudate/XNM
+exudation/M
+exude/GSD
+exultant/Y
+exultation/SM
+exult/DGS
+exulting/Y
+exurban
+exurbanite/SM
+exurbia/MS
+exurb/MS
+Exxon/M
+Eyck/M
+Eyde/M
+Eydie/M
+eyeball/GSMD
+eyebrow/MS
+eyed/P
+eyedropper/MS
+eyeful/MS
+eye/GDRSMZ
+eyeglass/MS
+eyelash/MS
+eyeless
+eyelet/GSMD
+eyelid/SM
+eyeliner/MS
+eyeopener/MS
+eyeopening
+eyepiece/SM
+eyer/M
+eyeshadow
+eyesight/MS
+eyesore/SM
+eyestrain/MS
+eyeteeth
+eyetooth/M
+eyewash/MS
+eyewitness/SM
+Eyre/M
+eyrie's
+Eysenck/M
+Ezechiel/M
+Ezekiel/M
+Ezequiel/M
+Eziechiele/M
+Ezmeralda/M
+Ezra/M
+Ezri/M
+F
+FAA
+Fabe/MR
+Fabergé/M
+Faber/M
+Fabiano/M
+Fabian/S
+Fabien/M
+Fabio/M
+fable/GMSRD
+fabler/M
+fabricate/SDXNG
+fabrication/M
+fabricator/MS
+fabric/MS
+fabulists
+fabulousness/M
+fabulous/YP
+facade/GMSD
+face/AGCSD
+facecloth
+facecloths
+faceless/P
+faceplate/M
+facer/CM
+face's
+facetiousness/MS
+facetious/YP
+facet/SGMD
+facial/YS
+facileness/M
+facile/YP
+facilitate/VNGXSD
+facilitation/M
+facilitator/SM
+facilitatory
+facility/MS
+facing/MS
+facsimileing
+facsimile/MSD
+factional
+factionalism/SM
+faction/SM
+factiousness/M
+factious/PY
+factitious
+fact/MS
+facto
+factoid/S
+factorial/MS
+factoring/A
+factoring's
+factorisable
+factorization/SM
+factorize/GSD
+factor/SDMJG
+factory/MS
+factotum/MS
+factuality/M
+factualness/M
+factual/PY
+faculty/MS
+faddish
+faddist/SM
+fadedly
+faded/U
+fadeout
+fader/M
+fade/S
+fading's
+fading/U
+fad/ZGSMDR
+Fae/M
+faerie/MS
+Faeroe/M
+faery's
+Fafnir/M
+fagged
+fagging
+faggoting's
+Fagin/M
+fag/MS
+fagoting/M
+fagot/MDSJG
+Fahd/M
+Fahrenheit/S
+faïence/S
+failing's
+failing/UY
+fail/JSGD
+faille/MS
+failsafe
+failure/SM
+Faina/M
+fain/GTSRD
+fainter/M
+fainthearted
+faintness/MS
+faint/YRDSGPT
+Fairbanks
+Fairchild/M
+faired
+Fairfax/M
+Fairfield/M
+fairgoer/S
+fairground/MS
+fairing/MS
+fairish
+Fairleigh/M
+fairless
+Fairlie/M
+Fair/M
+Fairmont/M
+fairness's
+fairness/US
+Fairport/M
+fairs
+fair/TURYP
+Fairview/M
+fairway/MS
+fairyland/MS
+fairy/MS
+fairytale
+Faisalabad
+Faisal/M
+faithed
+faithfulness/MSU
+faithfuls
+faithful/UYP
+faithing
+faithlessness/SM
+faithless/YP
+Faith/M
+faiths
+faith's
+faith/U
+fajitas
+faker/M
+fake/ZGDRS
+fakir/SM
+falafel
+falconer/M
+falconry/MS
+falcon/ZSRM
+Falito/M
+Falkland/MS
+Falk/M
+Falkner/M
+fallaciousness/M
+fallacious/PY
+fallacy/MS
+faller/M
+fallibility/MSI
+fallible/I
+fallibleness/MS
+fallibly/I
+falloff/S
+Fallon/M
+fallopian
+Fallopian/M
+fallout/MS
+fallowness/M
+fallow/PSGD
+fall/SGZMRN
+falsehood/SM
+falseness/SM
+false/PTYR
+falsetto/SM
+falsie/MS
+falsifiability/M
+falsifiable/U
+falsification/M
+falsifier/M
+falsify/ZRSDNXG
+falsity/MS
+Falstaff/M
+falterer/M
+faltering/UY
+falter/RDSGJ
+Falwell/M
+fa/M
+famed/C
+fame/DSMG
+fames/C
+familial
+familiarity/MUS
+familiarization/MS
+familiarized/U
+familiarizer/M
+familiarize/ZGRSD
+familiarizing/Y
+familiarly/U
+familiarness/M
+familiar/YPS
+family/MS
+famine/SM
+faming/C
+famish/GSD
+famously/I
+famousness/M
+famous/PY
+fanaticalness/M
+fanatical/YP
+fanaticism/MS
+fanatic/SM
+Fanchette/M
+Fanchon/M
+fancied
+Fancie/M
+fancier/SM
+fanciest
+fancifulness/MS
+fanciful/YP
+fancily
+fanciness/SM
+fancying
+fancy/IS
+Fancy/M
+fancywork/SM
+fandango/SM
+Fanechka/M
+fanfare/SM
+fanfold/M
+fang/DMS
+fangled
+Fania/M
+fanlight/SM
+Fan/M
+fanned
+Fannie/M
+Fanni/M
+fanning
+fanny/SM
+Fanny/SM
+fanout
+fan/SM
+fantail/SM
+fantasia/SM
+fantasist/M
+fantasize/SRDG
+fantastical/Y
+fantastic/S
+fantasy/GMSD
+Fanya/M
+fanzine/S
+FAQ/SM
+Faraday/M
+farad/SM
+Farah/M
+Fara/M
+Farand/M
+faraway
+Farber/M
+farce/SDGM
+farcical/Y
+fare/MS
+farer/M
+farewell/DGMS
+farfetchedness/M
+far/GDR
+Fargo/M
+Farica/M
+farinaceous
+farina/MS
+Farkas/M
+Farlay/M
+Farlee/M
+Farleigh/M
+Farley/M
+Farlie/M
+Farly/M
+farmer/M
+Farmer/M
+farmhand/S
+farmhouse/SM
+farming/M
+Farmington/M
+farmland/SM
+farm/MRDGZSJ
+farmstead/SM
+farmworker/S
+Far/MY
+farmyard/MS
+faro/MS
+farragoes
+farrago/M
+Farragut/M
+Farrah/M
+Farrakhan/M
+Farra/M
+Farrand/M
+Farrell/M
+Farrel/M
+farrier/SM
+Farris/M
+Farr/M
+farrow/DMGS
+farseeing
+farsightedness/SM
+farsighted/YP
+farther
+farthermost
+farthest
+farthing/SM
+fart/MDGS!
+fas
+fascia/SM
+fascicle/DSM
+fasciculate/DNX
+fasciculation/M
+fascinate/SDNGX
+fascinating/Y
+fascination/M
+fascism/MS
+Fascism's
+fascistic
+Fascist's
+fascist/SM
+fashionableness/M
+fashionable/PS
+fashionably/U
+fashion/ADSG
+fashioner/SM
+fashion's
+Fassbinder/M
+fastback/MS
+fastball/S
+fasten/AGUDS
+fastener/MS
+fastening/SM
+fast/GTXSPRND
+fastidiousness/MS
+fastidious/PY
+fastness/MS
+fatalism/MS
+fatalistic
+fatalistically
+fatalist/MS
+fatality/MS
+fatal/SY
+fatback/SM
+fatefulness/MS
+fateful/YP
+fate/MS
+Fates
+fatheaded/P
+fathead/SMD
+father/DYMGS
+fathered/U
+fatherhood/MS
+fatherland/SM
+fatherless
+fatherliness/M
+fatherly/P
+Father/SM
+fathomable/U
+fathomless
+fathom/MDSBG
+fatigued/U
+fatigue/MGSD
+fatiguing/Y
+Fatima/M
+fatness/SM
+fat/PSGMDY
+fatso/M
+fatted
+fattener/M
+fatten/JZGSRD
+fatter
+fattest/M
+fattiness/SM
+fatting
+fatty/RSPT
+fatuity/MS
+fatuousness/SM
+fatuous/YP
+fatwa/SM
+faucet/SM
+Faulknerian
+Faulkner/M
+fault/CGSMD
+faultfinder/MS
+faultfinding/MS
+faultily
+faultiness/MS
+faultlessness/SM
+faultless/PY
+faulty/RTP
+fauna/MS
+Faunie/M
+Faun/M
+faun/MS
+Fauntleroy/M
+Faustian
+Faustina/M
+Faustine/M
+Faustino/M
+Faust/M
+Faustus/M
+fauvism/S
+favorableness/MU
+favorable/UMPS
+favorably/U
+favoredness/M
+favored's/U
+favored/YPSM
+favorer/EM
+favor/ESMRDGZ
+favoring/MYS
+favorings/U
+favorite/SMU
+favoritism/MS
+favors/A
+Fawkes/M
+Fawne/M
+fawner/M
+fawn/GZRDMS
+Fawnia/M
+fawning/Y
+Fawn/M
+fax/GMDS
+Fax/M
+Faydra/M
+Faye/M
+Fayette/M
+Fayetteville/M
+Fayina/M
+Fay/M
+fay/MDRGS
+Fayre/M
+Faythe/M
+Fayth/M
+faze/DSG
+FBI
+FCC
+FD
+FDA
+FDIC
+FDR/M
+fealty/MS
+fearfuller
+fearfullest
+fearfulness/MS
+fearful/YP
+fearlessness/MS
+fearless/PY
+fear/RDMSG
+fearsomeness/M
+fearsome/PY
+feasibility/SM
+feasibleness/M
+feasible/UI
+feasibly/U
+feaster/M
+feast/GSMRD
+feater/C
+featherbed
+featherbedding/SM
+featherbrain/MD
+feathered/U
+feathering/M
+featherless
+featherlight
+Featherman/M
+feathertop
+featherweight/SM
+feathery/TR
+feather/ZMDRGS
+feat/MYRGTS
+feats/C
+featureless
+feature/MGSD
+Feb/M
+febrile
+February/MS
+fecal
+feces
+fecklessness/M
+feckless/PY
+fecundability
+fecundate/XSDGN
+fecundation/M
+fecund/I
+fecundity/SM
+federalism/SM
+Federalist
+federalist/MS
+federalization/MS
+federalize/GSD
+Federal/S
+federal/YS
+federated/U
+federate/FSDXVNG
+federation/FM
+federative/Y
+Federica/M
+Federico/M
+FedEx/M
+Fedora/M
+fedora/SM
+feds
+Fed/SM
+fed/U
+feebleness/SM
+feeble/TPR
+feebly
+feedback/SM
+feedbag/MS
+feeder/M
+feed/GRZJS
+feeding/M
+feedlot/SM
+feedstock
+feedstuffs
+feeing
+feeler/M
+feel/GZJRS
+feelingly/U
+feeling/MYP
+feelingness/M
+Fee/M
+fee/MDS
+feet/M
+feigned/U
+feigner/M
+feign/RDGS
+feint/MDSG
+feisty/RT
+Felder/M
+Feldman/M
+feldspar/MS
+Felecia/M
+Felicdad/M
+Felice/M
+Felicia/M
+Felicio/M
+felicitate/XGNSD
+felicitation/M
+felicitous/IY
+felicitousness/M
+felicity/IMS
+Felicity/M
+Felicle/M
+Felic/M
+Felike/M
+Feliks/M
+feline/SY
+Felipa/M
+Felipe/M
+Felisha/M
+Felita/M
+Felix/M
+Feliza/M
+Felizio/M
+fella/S
+fellatio/SM
+felled/A
+feller/M
+felling/A
+Fellini/M
+fellness/M
+fellowman
+fellowmen
+fellow/SGDYM
+fellowshipped
+fellowshipping
+fellowship/SM
+fell/PSGZTRD
+feloniousness/M
+felonious/PY
+felon/MS
+felony/MS
+felt/GSD
+felting/M
+Fe/M
+female/MPS
+femaleness/SM
+feminineness/M
+feminine/PYS
+femininity/MS
+feminism/MS
+feminist/MS
+femme/MS
+femoral
+fem/S
+femur/MS
+fenced/U
+fencepost/M
+fencer/M
+fence/SRDJGMZ
+fencing/M
+fender/CM
+fend/RDSCZG
+Fenelia/M
+fenestration/CSM
+Fenian/M
+fenland/M
+fen/MS
+fennel/SM
+Fenwick/M
+Feodora/M
+Feodor/M
+feral
+Ferber/M
+Ferdie/M
+Ferdinanda/M
+Ferdinande/M
+Ferdinand/M
+Ferdinando/M
+Ferd/M
+Ferdy/M
+fer/FLC
+Fergus/M
+Ferguson/M
+Ferlinghetti/M
+Fermat/M
+fermentation/MS
+fermented
+fermenter
+ferment/FSCM
+fermenting
+Fermi/M
+fermion/MS
+fermium/MS
+Fernanda/M
+Fernande/M
+Fernandez/M
+Fernandina/M
+Fernando/M
+Ferne/M
+fernery/M
+Fern/M
+fern/MS
+ferny/TR
+ferociousness/MS
+ferocious/YP
+ferocity/MS
+Ferrari/M
+Ferraro/M
+Ferreira/M
+Ferrell/M
+Ferrel/M
+Ferrer/M
+ferreter/M
+ferret/SMRDG
+ferric
+ferris
+Ferris
+ferrite/M
+ferro
+ferroelectric
+ferromagnetic
+ferromagnet/M
+ferrous
+ferrule/MGSD
+ferryboat/MS
+ferryman/M
+ferrymen
+ferry/SDMG
+fertileness/M
+fertile/YP
+fertility/IMS
+fertilization/ASM
+fertilized/U
+fertilizer/M
+fertilizes/A
+fertilize/SRDZG
+ferule/SDGM
+fervency/MS
+fervent/Y
+fervidness/M
+fervid/YP
+fervor/MS
+fess/KGFSD
+Fess/M
+fess's
+festal/S
+fester/GD
+festival/SM
+festiveness/SM
+festive/PY
+festivity/SM
+festoon/SMDG
+fest/RVZ
+fetal
+feta/MS
+fetcher/M
+fetching/Y
+fetch/RSDGZ
+feted
+fête/MS
+fetich's
+fetidness/SM
+fetid/YP
+feting
+fetishism/SM
+fetishistic
+fetishist/SM
+fetish/MS
+fetlock/MS
+fetter's
+fetter/UGSD
+fettle/GSD
+fettling/M
+fettuccine/S
+fetus/SM
+feudalism/MS
+feudalistic
+feudal/Y
+feudatory/M
+feud/MDSG
+feverishness/SM
+feverish/PY
+fever/SDMG
+fewness/MS
+few/PTRS
+Fey/M
+Feynman/M
+fey/RT
+fez/M
+Fez/M
+fezzes
+ff
+FHA
+fiancée/S
+fiancé/MS
+Fianna/M
+Fiann/M
+fiascoes
+fiasco/M
+Fiat/M
+fiat/MS
+fibbed
+fibber/MS
+fibbing
+fiberboard/MS
+fiber/DM
+fiberfill/S
+Fiberglas/M
+fiberglass/DSMG
+Fibonacci/M
+fibrillate/XGNDS
+fibrillation/M
+fibril/MS
+fibrin/MS
+fibroblast/MS
+fibroid/S
+fibroses
+fibrosis/M
+fibrousness/M
+fibrous/YP
+fib/SZMR
+fibulae
+fibula/M
+fibular
+FICA
+fices
+fiche/SM
+Fichte/M
+fichu/SM
+fickleness/MS
+fickle/RTP
+ficos
+fictionalization/MS
+fictionalize/DSG
+fictional/Y
+fiction/SM
+fictitiousness/M
+fictitious/PY
+fictive/Y
+ficus
+fiddle/GMZJRSD
+fiddler/M
+fiddlestick/SM
+fiddly
+fide/F
+Fidela/M
+Fidelia/M
+Fidelio/M
+fidelity/IMS
+Fidelity/M
+Fidel/M
+fidget/DSG
+fidgety
+Fidole/M
+Fido/M
+fiducial/Y
+fiduciary/MS
+fiefdom/S
+fief/MS
+fielded
+fielder/IM
+fielding
+Fielding/M
+Field/MGS
+fieldstone/M
+fieldworker/M
+fieldwork/ZMRS
+field/ZISMR
+fiendishness/M
+fiendish/YP
+fiend/MS
+fierceness/SM
+fierce/RPTY
+fierily
+fieriness/MS
+fiery/PTR
+fie/S
+fies/C
+fiesta/MS
+fife/DRSMZG
+fifer/M
+Fifi/M
+Fifine/M
+FIFO
+fifteen/HRMS
+fifteenths
+fifths
+fifth/Y
+fiftieths
+fifty/HSM
+Figaro/M
+figged
+figging
+fightback
+fighter/MIS
+fighting/IS
+fight/ZSJRG
+figment/MS
+fig/MLS
+Figueroa/M
+figural
+figuration/FSM
+figurativeness/M
+figurative/YP
+figure/GFESD
+figurehead/SM
+figurer/SM
+figure's
+figurine/SM
+figuring/S
+Fijian/SM
+Fiji/M
+filamentary
+filament/MS
+filamentous
+Filberte/M
+Filbert/M
+filbert/MS
+Filberto/M
+filch/SDG
+filed/AC
+file/KDRSGMZ
+filename/SM
+filer/KMCS
+files/AC
+filet's
+filial/UY
+Filia/M
+filibusterer/M
+filibuster/MDRSZG
+Filide/M
+filigreeing
+filigree/MSD
+filing/AC
+filings
+Filipino/SM
+Filip/M
+Filippa/M
+Filippo/M
+fill/BAJGSD
+filled/U
+filler/MS
+filleting/M
+fillet/MDSG
+filling/M
+fillip/MDGS
+Fillmore/M
+filly/SM
+filmdom/M
+Filmer/M
+filminess/SM
+filming/M
+filmmaker/S
+Filmore/M
+film/SGMD
+filmstrip/SM
+filmy/RTP
+Filofax/S
+filtered/U
+filterer/M
+filter/RDMSZGB
+filthily
+filthiness/SM
+filth/M
+filths
+filthy/TRSDGP
+filtrated/I
+filtrate/SDXMNG
+filtrates/I
+filtrating/I
+filtration/IMS
+finagler/M
+finagle/RSDZG
+finale/MS
+finalist/MS
+finality/MS
+finalization/SM
+finalize/GSD
+final/SY
+Fina/M
+financed/A
+finance/MGSDJ
+finances/A
+financial/Y
+financier/DMGS
+financing/A
+Finch/M
+finch/MS
+findable/U
+find/BRJSGZ
+finder/M
+finding/M
+Findlay/M
+Findley/M
+fine/FGSCRDA
+finely
+fineness/MS
+finery/MAS
+fine's
+finespun
+finesse/SDMG
+fingerboard/SM
+fingerer/M
+fingering/M
+fingerless
+fingerling/M
+fingernail/MS
+fingerprint/SGDM
+finger/SGRDMJ
+fingertip/MS
+finial/SM
+finical
+finickiness/S
+finicky/RPT
+fining/M
+finished/UA
+finisher/M
+finishes/A
+finish/JZGRSD
+finis/SM
+finite/ISPY
+finitely/C
+finiteness/MIC
+fink/GDMS
+Finland/M
+Finlay/M
+Finley/M
+Fin/M
+Finnbogadottir/M
+finned
+Finnegan/M
+finner
+finning
+Finnish
+Finn/MS
+finny/RT
+fin/TGMDRS
+Fiona/M
+Fionna/M
+Fionnula/M
+fiord's
+Fiorello/M
+Fiorenze/M
+Fiori/M
+f/IRAC
+firearm/SM
+fireball/SM
+fireboat/M
+firebomb/MDSG
+firebox/MS
+firebrand/MS
+firebreak/SM
+firebrick/SM
+firebug/SM
+firecracker/SM
+firedamp/SM
+fired/U
+firefight/JRGZS
+firefly/MS
+Firefox/M
+fireguard/M
+firehouse/MS
+firelight/GZSM
+fireman/M
+firemen
+fire/MS
+fireplace/MS
+fireplug/MS
+firepower/SM
+fireproof/SGD
+firer/M
+firesafe
+fireside/SM
+Firestone/M
+firestorm/SM
+firetrap/SM
+firetruck/S
+firewall/S
+firewater/SM
+firewood/MS
+firework/MS
+firing/M
+firkin/M
+firmament/MS
+firmer
+firmest
+firm/ISFDG
+firmly/I
+firmness/MS
+firm's
+firmware/MS
+firring
+firstborn/S
+firsthand
+first/SY
+firth/M
+firths
+fir/ZGJMDRHS
+fiscal/YS
+Fischbein/M
+Fischer/M
+fishbowl/MS
+fishcake/S
+fisher/M
+Fisher/M
+fisherman/M
+fishermen/M
+fishery/MS
+fishhook/MS
+fishily
+fishiness/MS
+fishing/M
+fish/JGZMSRD
+Fishkill/M
+fishmeal
+fishmonger/MS
+fishnet/SM
+fishpond/SM
+fishtail/DMGS
+fishtanks
+fishwife/M
+fishwives
+fishy/TPR
+Fiske/M
+Fisk/M
+fissile
+fissionable/S
+fission/BSDMG
+fissure/MGSD
+fistfight/SM
+fistful/MS
+fisticuff/SM
+fist/MDGS
+fistula/SM
+fistulous
+Fitchburg/M
+Fitch/M
+fitfulness/SM
+fitful/PY
+fitments
+fitness/USM
+fits/AK
+fit's/K
+fitted/UA
+fitter/SM
+fittest
+fitting/AU
+fittingly
+fittingness/M
+fittings
+fit/UYPS
+Fitzgerald/M
+Fitz/M
+Fitzpatrick/M
+Fitzroy/M
+fivefold
+five/MRS
+fiver/M
+fixable
+fixate/VNGXSD
+fixatifs
+fixation/M
+fixative/S
+fixedness/M
+fixed/YP
+fixer/SM
+fixes/I
+fixing/SM
+fixity/MS
+fixture/SM
+fix/USDG
+Fizeau/M
+fizzer/M
+fizzle/GSD
+fizz/SRDG
+fizzy/RT
+fjord/SM
+FL
+flabbergast/GSD
+flabbergasting/Y
+flabbily
+flabbiness/SM
+flabby/TPR
+flab/MS
+flaccidity/MS
+flaccid/Y
+flack/SGDM
+flagella/M
+flagellate/DSNGX
+flagellation/M
+flagellum/M
+flagged
+flaggingly/U
+flagging/SMY
+flagman/M
+flagmen
+flag/MS
+flagon/SM
+flagpole/SM
+flagrance/MS
+flagrancy/SM
+flagrant/Y
+flagship/MS
+flagstaff/MS
+flagstone/SM
+flail/SGMD
+flair/SM
+flaker/M
+flake/SM
+flakiness/MS
+flak/RDMGS
+flaky/PRT
+Fla/M
+flambé/D
+flambeing
+flambes
+flamboyance/MS
+flamboyancy/MS
+flamboyant/YS
+flamenco/SM
+flamen/M
+flameproof/DGS
+flamer/IM
+flame's
+flame/SIGDR
+flamethrower/SM
+flamingo/SM
+flaming/Y
+flammability/ISM
+flammable/SI
+flam/MRNDJGZ
+Flanagan/M
+Flanders/M
+flange/GMSD
+flanker/M
+flank/SGZRDM
+flan/MS
+flannel/DMGS
+flannelet/MS
+flannelette's
+flapjack/SM
+flap/MS
+flapped
+flapper/SM
+flapping
+flaps/M
+flare/SDG
+flareup/S
+flaring/Y
+flashback/SM
+flashbulb/SM
+flashcard/S
+flashcube/MS
+flasher/M
+flashgun/S
+flashily
+flashiness/SM
+flashing/M
+flash/JMRSDGZ
+flashlight/MS
+flashy/TPR
+flask/SM
+flatbed/S
+flatboat/MS
+flatcar/MS
+flatfeet
+flatfish/SM
+flatfoot/SGDM
+flathead/M
+flatiron/SM
+flatland/RS
+flatmate/M
+flat/MYPS
+flatness/MS
+flatted
+flattener/M
+flatten/SDRG
+flatter/DRSZG
+flatterer/M
+flattering/YU
+flattery/SM
+flattest/M
+flatting
+flattish
+Flatt/M
+flattop/MS
+flatulence/SM
+flatulent/Y
+flatus/SM
+flatware/MS
+flatworm/SM
+Flaubert/M
+flaunting/Y
+flaunt/SDG
+flautist/SM
+flavored/U
+flavorer/M
+flavorful
+flavoring/M
+flavorless
+flavor/SJDRMZG
+flavorsome
+flaw/GDMS
+flawlessness/MS
+flawless/PY
+flax/MSN
+flaxseed/M
+flayer/M
+flay/RDGZS
+fleabag/MS
+fleabites
+flea/SM
+fleawort/M
+fleck/GRDMS
+Fledermaus/M
+fledged/U
+fledge/GSD
+fledgling/SM
+fleecer/M
+fleece/RSDGMZ
+fleeciness/SM
+fleecy/RTP
+fleeing
+flee/RS
+fleetingly/M
+fleetingness/SM
+fleeting/YP
+fleet/MYRDGTPS
+fleetness/MS
+Fleischer/M
+Fleischman/M
+Fleisher/M
+Fleming/M
+Flemished/M
+Flemish/GDSM
+Flemishing/M
+Flem/JGM
+Flemming/M
+flesher/M
+fleshiness/M
+flesh/JMYRSDG
+fleshless
+fleshly/TR
+fleshpot/SM
+fleshy/TPR
+fletch/DRSGJ
+fletcher/M
+Fletcher/M
+fletching/M
+Fletch/MR
+Fleurette/M
+Fleur/M
+flew/S
+flews/M
+flexed/I
+flexibility/MSI
+flexible/I
+flexibly/I
+flexitime's
+flex/MSDAG
+flextime/S
+flexural
+flexure/M
+fl/GJD
+flibbertigibbet/MS
+flicker/GD
+flickering/Y
+flickery
+flick/GZSRD
+flier/M
+flight/GMDS
+flightiness/SM
+flightless
+flightpath
+flighty/RTP
+flimflammed
+flimflamming
+flimflam/MS
+flimsily
+flimsiness/MS
+flimsy/PTRS
+flincher/M
+flinch/GDRS
+flinching/U
+flinger/M
+fling/RMG
+Flin/M
+Flinn/M
+flintiness/M
+flintless
+flintlock/MS
+Flint/M
+flint/MDSG
+Flintstones
+flinty/TRP
+flipflop
+flippable
+flippancy/MS
+flippant/Y
+flipped
+flipper/SM
+flippest
+flipping
+flip/S
+flirtation/SM
+flirtatiousness/MS
+flirtatious/PY
+flirt/GRDS
+flit/S
+flitted
+flitting
+floater/M
+float/SRDGJZ
+floaty
+flocculate/GNDS
+flocculation/M
+flock/SJDMG
+floe/MS
+flogged
+flogger/SM
+flogging/SM
+flog/S
+Flo/M
+floodgate/MS
+floodlight/DGMS
+floodlit
+floodplain/S
+flood/SMRDG
+floodwater/SM
+floorboard/MS
+floorer/M
+flooring/M
+floor/SJRDMG
+floorspace
+floorwalker/SM
+floozy/SM
+flophouse/SM
+flop/MS
+flopped
+flopper/M
+floppily
+floppiness/SM
+flopping
+floppy/TMRSP
+floral/SY
+Flora/M
+Florance/M
+flora/SM
+Florella/M
+Florence/M
+Florencia/M
+Florentia/M
+Florentine/S
+Florenza/M
+florescence/MIS
+florescent/I
+Flore/SM
+floret/MS
+Florette/M
+Floria/M
+Florian/M
+Florida/M
+Floridan/S
+Floridian/S
+floridness/SM
+florid/YP
+Florie/M
+Florina/M
+Florinda/M
+Florine/M
+florin/MS
+Flori/SM
+florist/MS
+Flor/M
+Florrie/M
+Florri/M
+Florry/M
+Flory/M
+floss/GSDM
+Flossie/M
+Flossi/M
+Flossy/M
+flossy/RST
+flotation/SM
+flotilla/SM
+flotsam/SM
+flounce/GDS
+flouncing/M
+flouncy/RT
+flounder/SDG
+flourisher/M
+flourish/GSRD
+flourishing/Y
+flour/SGDM
+floury/TR
+flouter/M
+flout/GZSRD
+flowchart/SG
+flowed
+flowerbed/SM
+flower/CSGD
+flowerer/M
+floweriness/SM
+flowerless
+flowerpot/MS
+flower's
+Flowers
+flowery/TRP
+flowing/Y
+flow/ISG
+flown
+flowstone
+Floyd/M
+Flss/M
+flt
+flubbed
+flubbing
+flub/S
+fluctuate/XSDNG
+fluctuation/M
+fluency/MS
+fluently
+fluent/SF
+flue/SM
+fluffiness/SM
+fluff/SGDM
+fluffy/PRT
+fluidity/SM
+fluidized
+fluid/MYSP
+fluidness/M
+fluke/SDGM
+fluky/RT
+flume/SDGM
+flummox/DSG
+flu/MS
+flung
+flunkey's
+flunk/SRDG
+flunky/MS
+fluoresce/GSRD
+fluorescence/MS
+fluorescent/S
+fluoridate/XDSGN
+fluoridation/M
+fluoride/SM
+fluorimetric
+fluorinated
+fluorine/SM
+fluorite/MS
+fluorocarbon/MS
+fluoroscope/MGDS
+fluoroscopic
+flurry/GMDS
+flushness/M
+flush/TRSDPBG
+fluster/DSG
+fluter/M
+flute/SRDGMJ
+fluting/M
+flutist/MS
+flutter/DRSG
+flutterer/M
+fluttery
+fluxed/A
+fluxes/A
+flux/IMS
+fluxing
+flyaway
+flyblown
+flyby/M
+flybys
+flycatcher/MS
+flyer's
+fly/JGBDRSTZ
+flyleaf/M
+flyleaves
+Flynn/M
+flyover/MS
+flypaper/MS
+flysheet/S
+flyspeck/MDGS
+flyswatter/S
+flyway/MS
+flyweight/MS
+flywheel/MS
+FM
+Fm/M
+FNMA/M
+foal/MDSG
+foaminess/MS
+foam/MRDSG
+foamy/RPT
+fobbed
+fobbing
+fob/SM
+focal/F
+focally
+Foch/M
+foci's
+focused/AU
+focuser/M
+focuses/A
+focus/SRDMBG
+fodder/GDMS
+foe/SM
+foetid
+FOFL
+fogbound
+fogged/C
+foggily
+fogginess/MS
+fogging/C
+foggy/RPT
+foghorn/SM
+fogs/C
+fog/SM
+fogyish
+fogy/SM
+foible/MS
+foil/GSD
+foist/GDS
+Fokker/M
+foldaway/S
+folded/AU
+folder/M
+foldout/MS
+fold/RDJSGZ
+folds/UA
+Foley/M
+foliage/MSD
+foliate/CSDXGN
+foliation/CM
+folio/SDMG
+folklike
+folklore/MS
+folkloric
+folklorist/SM
+folk/MS
+folksiness/MS
+folksinger/S
+folksinging/S
+folksong/S
+folksy/TPR
+folktale/S
+folkway/S
+foll
+follicle/SM
+follicular
+follower/M
+follow/JSZBGRD
+followup's
+folly/SM
+Folsom
+fol/Y
+Fomalhaut/M
+fomentation/SM
+fomenter/M
+foment/RDSG
+Fonda/M
+fondant/SM
+fondle/GSRD
+fondler/M
+fondness/MS
+fond/PMYRDGTS
+fondue/MS
+Fons
+Fonsie/M
+Fontainebleau/M
+Fontaine/M
+Fontana/M
+fontanelle's
+fontanel/MS
+font/MS
+Fonzie/M
+Fonz/M
+foodie/S
+food/MS
+foodstuff/MS
+foolery/MS
+foolhardily
+foolhardiness/SM
+foolhardy/PTR
+foolishness/SM
+foolish/PRYT
+fool/MDGS
+foolproof
+foolscap/MS
+footage/SM
+football/SRDMGZ
+footbridge/SM
+Foote/M
+footer/M
+footfall/SM
+foothill/SM
+foothold/MS
+footing/M
+footless
+footlights
+footling
+footlocker/SM
+footloose
+footman/M
+footmarks
+footmen
+footnote/MSDG
+footpad/SM
+footpath/M
+footpaths
+footplate/M
+footprint/MS
+footrace/S
+footrest/MS
+footsie/SM
+foot/SMRDGZJ
+footsore
+footstep/SM
+footstool/SM
+footwear/M
+footwork/SM
+fop/MS
+fopped
+foppery/MS
+fopping
+foppishness/SM
+foppish/YP
+forage/GSRDMZ
+forager/M
+forayer/M
+foray/SGMRD
+forbade
+forbearance/SM
+forbearer/M
+forbear/MRSG
+Forbes/M
+forbidden
+forbiddingness/M
+forbidding/YPS
+forbid/S
+forbore
+forborne
+forced/Y
+forcefield/MS
+forcefulness/MS
+forceful/PY
+forceps/M
+forcer/M
+force/SRDGM
+forcibleness/M
+forcible/P
+forcibly
+fordable/U
+Fordham/M
+Ford/M
+ford/SMDBG
+forearm/GSDM
+forebear/MS
+forebode/GJDS
+forebodingness/M
+foreboding/PYM
+forecaster/M
+forecastle/MS
+forecast/SZGR
+foreclose/GSD
+foreclosure/MS
+forecourt/SM
+foredoom/SDG
+forefather/SM
+forefeet
+forefinger/MS
+forefoot/M
+forefront/SM
+foregoer/M
+foregoing/S
+foregone
+foregos
+foreground/MGDS
+forehand/S
+forehead/MS
+foreigner/M
+foreignness/SM
+foreign/PRYZS
+foreknew
+foreknow/GS
+foreknowledge/MS
+foreknown
+foreleg/MS
+forelimb/MS
+forelock/MDSG
+foreman/M
+Foreman/M
+foremast/SM
+foremen
+foremost
+forename/DSM
+forenoon/SM
+forensically
+forensic/S
+forensics/M
+foreordain/DSG
+forepart/MS
+forepaws
+forepeople
+foreperson/S
+foreplay/MS
+forequarter/SM
+forerunner/MS
+fore/S
+foresail/SM
+foresaw
+foreseeable/U
+foreseeing
+foreseen/U
+foreseer/M
+foresee/ZSRB
+foreshadow/SGD
+foreshore/M
+foreshorten/DSG
+foresightedness/SM
+foresighted/PY
+foresight/SMD
+foreskin/SM
+forestaller/M
+forestall/LGSRD
+forestallment/M
+forestation/MCS
+forestations/A
+forest/CSAGD
+Forester/M
+forester/SM
+forestland/S
+Forest/MR
+forestry/MS
+forest's
+foretaste/MGSD
+foreteller/M
+foretell/RGS
+forethought/MS
+foretold
+forevermore
+forever/PS
+forewarner/M
+forewarn/GSJRD
+forewent
+forewoman/M
+forewomen
+foreword/SM
+forfeiter/M
+forfeiture/MS
+forfeit/ZGDRMS
+forfend/GSD
+forgather/GSD
+forgave
+forged/A
+forge/JVGMZSRD
+forger/M
+forgery/MS
+forges/A
+forgetfulness/SM
+forgetful/PY
+forget/SV
+forgettable/U
+forgettably/U
+forgetting
+forging/M
+forgivable/U
+forgivably/U
+forgiven
+forgiveness/SM
+forgiver/M
+forgive/SRPBZG
+forgivingly
+forgivingness/M
+forgiving/UP
+forgoer/M
+forgoes
+forgone
+forgo/RSGZ
+forgot
+forgotten/U
+for/HT
+forkful/S
+fork/GSRDM
+forklift/DMSG
+forlornness/M
+forlorn/PTRY
+formability/AM
+formaldehyde/SM
+formalin/M
+formalism/SM
+formalistic
+formalist/SM
+formality/SMI
+formal/IY
+formalization/SM
+formalized/U
+formalizer/M
+formalizes/I
+formalize/ZGSRD
+formalness/M
+formals
+formant/MIS
+format/AVS
+formate/MXGNSD
+formation/AFSCIM
+formatively/I
+formativeness/IM
+formative/SYP
+format's
+formatted/UA
+formatter/A
+formatters
+formatter's
+formatting/A
+form/CGSAFDI
+formed/U
+former/FSAI
+formerly
+formfitting
+formic
+Formica/MS
+formidableness/M
+formidable/P
+formidably
+formlessness/MS
+formless/PY
+Formosa/M
+Formosan
+form's
+formulaic
+formula/SM
+formulate/AGNSDX
+formulated/U
+formulation/AM
+formulator/SM
+fornicate/GNXSD
+fornication/M
+fornicator/SM
+Forrester/M
+Forrest/RM
+forsaken
+forsake/SG
+forsook
+forsooth
+Forster/M
+forswear/SG
+forswore
+forsworn
+forsythia/MS
+Fortaleza/M
+forte/MS
+forthcome/JG
+forthcoming/U
+FORTH/M
+forthrightness/SM
+forthright/PYS
+forthwith
+fortieths
+fortification/MS
+fortified/U
+fortifier/SM
+fortify/ADSG
+fortiori
+fortissimo/S
+fortitude/SM
+fortnightly/S
+fortnight/MYS
+FORTRAN
+Fortran/M
+fortress/GMSD
+fort/SM
+fortuitousness/SM
+fortuitous/YP
+fortuity/MS
+fortunateness/M
+fortunate/YUS
+fortune/MGSD
+fortuneteller/SM
+fortunetelling/SM
+forty/SRMH
+forum/MS
+forwarder/M
+forwarding/M
+forwardness/MS
+forward/PTZSGDRY
+forwent
+fossiliferous
+fossilization/MS
+fossilized/U
+fossilize/GSD
+fossil/MS
+Foss/M
+fosterer/M
+Foster/M
+foster/SRDG
+Foucault/M
+fought
+foulard/SM
+foulmouth/D
+foulness/MS
+fouls/M
+foul/SYRDGTP
+foundational
+foundation/SM
+founded/UF
+founder/MDG
+founder's/F
+founding/F
+foundling/MS
+found/RDGZS
+foundry/MS
+founds/KF
+fountainhead/SM
+fountain/SMDG
+fount/MS
+fourfold
+Fourier/M
+fourpence/M
+fourpenny
+fourposter/SM
+fourscore/S
+four/SHM
+foursome/SM
+foursquare
+fourteener/M
+fourteen/SMRH
+fourteenths
+Fourth
+fourths
+Fourths
+fourth/Y
+fovea/M
+fowler/M
+Fowler/M
+fowling/M
+fowl/SGMRD
+foxfire/SM
+foxglove/SM
+Foxhall/M
+foxhole/SM
+foxhound/SM
+foxily
+foxiness/MS
+foxing/M
+fox/MDSG
+Fox/MS
+foxtail/M
+foxtrot/MS
+foxtrotted
+foxtrotting
+foxy/TRP
+foyer/SM
+FPO
+fps
+fr
+fracas/SM
+fractal/SM
+fractional/Y
+fractionate/DNG
+fractionation/M
+fractioned
+fractioning
+fraction/ISMA
+fractiousness/SM
+fractious/PY
+fracture/MGDS
+fragile/Y
+fragility/MS
+fragmentarily
+fragmentariness/M
+fragmentary/P
+fragmentation/MS
+fragment/SDMG
+Fragonard/M
+fragrance/SM
+fragrant/Y
+frailness/MS
+frail/STPYR
+frailty/MS
+framed/U
+framer/M
+frame/SRDJGMZ
+framework/SM
+framing/M
+Francaise/M
+France/MS
+Francene/M
+Francesca/M
+Francesco/M
+franchisee/S
+franchise/ESDG
+franchiser/SM
+franchise's
+Franchot/M
+Francie/M
+Francine/M
+Francis
+Francisca/M
+Franciscan/MS
+Francisco/M
+Franciska/M
+Franciskus/M
+francium/MS
+Francklin/M
+Francklyn/M
+Franck/M
+Francoise/M
+Francois/M
+Franco/M
+francophone/M
+franc/SM
+Francyne/M
+frangibility/SM
+frangible
+Frankel/M
+Frankenstein/MS
+franker/M
+Frankford/M
+Frankfort/M
+Frankfurter/M
+frankfurter/MS
+Frankfurt/RM
+Frankie/M
+frankincense/MS
+Frankish/M
+franklin/M
+Franklin/M
+Franklyn/M
+frankness/MS
+frank/SGTYRDP
+Frank/SM
+Franky/M
+Fran/MS
+Frannie/M
+Franni/M
+Franny/M
+Fransisco/M
+frantically
+franticness/M
+frantic/PY
+Frants/M
+Franzen/M
+Franz/NM
+frappé
+frappeed
+frappeing
+frappes
+Frasco/M
+Fraser/M
+Frasier/M
+Frasquito/M
+fraternal/Y
+fraternity/MSF
+fraternization/SM
+fraternize/GZRSD
+fraternizer/M
+fraternizing/U
+frat/MS
+fratricidal
+fratricide/MS
+fraud/CS
+fraud's
+fraudsters
+fraudulence/S
+fraudulent/YP
+fraught/SGD
+Fraulein/S
+Frau/MN
+fray/CSDG
+Frayda/M
+Frayne/M
+fray's
+Fraze/MR
+Frazer/M
+Frazier/M
+frazzle/GDS
+freakishness/SM
+freakish/YP
+freak/SGDM
+freaky/RT
+freckle/GMDS
+freckly/RT
+Freda/M
+Freddie/M
+Freddi/M
+Freddy/M
+Fredek/M
+Fredelia/M
+Frederica/M
+Frederich/M
+Fredericka/M
+Frederick/MS
+Frederic/M
+Frederico/M
+Fredericton/M
+Frederigo/M
+Frederik/M
+Frederique/M
+Fredholm/M
+Fredia/M
+Fredi/M
+Fred/M
+Fredra/M
+Fredrick/M
+Fredrickson/M
+Fredric/M
+Fredrika/M
+freebase/GDS
+freebie/MS
+freebooter/M
+freeboot/ZR
+freeborn
+freedman/M
+Freedman/M
+freedmen
+freedom/MS
+freehand/D
+freehanded/Y
+freeholder/M
+freehold/ZSRM
+freeing/S
+freelance/SRDGZM
+Freeland/M
+freeloader/M
+freeload/SRDGZ
+Free/M
+freeman/M
+Freeman/M
+freemasonry/M
+Freemasonry/MS
+Freemason/SM
+freemen
+Freemon/M
+freeness/M
+Freeport/M
+freestanding
+freestone/SM
+freestyle/SM
+freethinker/MS
+freethinking/S
+Freetown/M
+freeway/MS
+freewheeler/M
+freewheeling/P
+freewheel/SRDMGZ
+freewill
+free/YTDRSP
+freezable
+freezer/SM
+freeze/UGSA
+freezing/S
+Freida/M
+freighter/M
+freight/ZGMDRS
+Fremont/M
+Frenchman/M
+French/MDSG
+Frenchmen
+Frenchwoman/M
+Frenchwomen
+frenetically
+frenetic/S
+frenzied/Y
+frenzy/MDSG
+freon/S
+Freon/SM
+freq
+frequency/ISM
+frequented/U
+frequenter/MS
+frequentest
+frequenting
+frequent/IY
+frequentness/M
+frequents
+fresco/DMG
+frescoes
+fresh/AZSRNDG
+freshener/M
+freshen/SZGDR
+fresher/MA
+freshest
+freshet/SM
+freshly
+freshman/M
+freshmen
+freshness/MS
+freshwater/SM
+Fresnel/M
+Fresno/M
+fretboard
+fretfulness/MS
+fretful/PY
+fret/S
+fretsaw/S
+fretted
+fretting
+fretwork/MS
+Freudian/S
+Freud/M
+Freya/M
+Frey/M
+friableness/M
+friable/P
+friary/MS
+friar/YMS
+fricasseeing
+fricassee/MSD
+frication/M
+fricative/MS
+Frick/M
+frictional/Y
+frictionless/Y
+friction/MS
+Friday/SM
+fridge/SM
+fried/A
+Frieda/M
+Friedan/M
+friedcake/SM
+Friederike/M
+Friedman/M
+Friedrich/M
+Friedrick/M
+friendlessness/M
+friendless/P
+friendlies
+friendlily
+friendliness/USM
+friendly/PUTR
+friend/SGMYD
+friendship/MS
+frier's
+fries/M
+frieze/SDGM
+frigate/SM
+Frigga/M
+frigged
+frigging/S
+frighten/DG
+frightening/Y
+frightfulness/MS
+frightful/PY
+fright/GXMDNS
+Frigidaire/M
+frigidity/MS
+frigidness/SM
+frigid/YP
+frig/S
+frill/MDGS
+frilly/RST
+Fri/M
+fringe/IGSD
+fringe's
+frippery/SM
+Frisbee/MS
+Frisco/M
+Frisian/SM
+frisker/M
+friskily
+friskiness/SM
+frisk/RDGS
+frisky/RTP
+frisson/M
+Frito/M
+fritterer/M
+fritter/RDSG
+Fritz/M
+fritz/SM
+frivolity/MS
+frivolousness/SM
+frivolous/PY
+frizz/GYSD
+frizzle/DSG
+frizzly/RT
+frizzy/RT
+Fr/MD
+Frobisher/M
+frocking/M
+frock's
+frock/SUDGC
+frogged
+frogging
+frogman/M
+frogmarched
+frogmen
+frog/MS
+fro/HS
+Froissart/M
+frolicked
+frolicker/SM
+frolicking
+frolic/SM
+frolicsome
+from
+Fromm/M
+frond/SM
+frontage/MS
+frontal/SY
+Frontenac/M
+front/GSFRD
+frontier/SM
+frontiersman/M
+frontiersmen
+frontispiece/SM
+frontrunner's
+front's
+frontward/S
+frosh/M
+Frostbelt/M
+frostbite/MS
+frostbit/G
+frostbiting/M
+frostbitten
+frost/CDSG
+frosteds
+frosted/U
+frostily
+frostiness/SM
+frosting/MS
+Frost/M
+frost's
+frosty/PTR
+froth/GMD
+frothiness/SM
+froths
+frothy/TRP
+froufrou/MS
+frowardness/MS
+froward/P
+frowner/M
+frowning/Y
+frown/RDSG
+frowzily
+frowziness/SM
+frowzy/RPT
+frozenness/M
+frozen/YP
+froze/UA
+fructify/GSD
+fructose/MS
+Fruehauf/M
+frugality/SM
+frugal/Y
+fruitcake/SM
+fruiterer/M
+fruiter/RM
+fruitfuller
+fruitfullest
+fruitfulness/MS
+fruitful/UYP
+fruit/GMRDS
+fruitiness/MS
+fruition/SM
+fruitlessness/MS
+fruitless/YP
+fruity/RPT
+frumpish
+frump/MS
+frumpy/TR
+Frunze/M
+frustrater/M
+frustrate/RSDXNG
+frustrating/Y
+frustration/M
+frustum/SM
+Frye/M
+fryer/MS
+Fry/M
+fry/NGDS
+F's
+f's/KA
+FSLIC
+ft/C
+FTC
+FTP
+fuchsia/MS
+Fuchs/M
+fucker/M!
+fuck/GZJRDMS!
+FUD
+fuddle/GSD
+fudge/GMSD
+fuel/ASDG
+fueler/SM
+fuel's
+Fuentes/M
+fugal
+Fugger/M
+fugitiveness/M
+fugitive/SYMP
+fugue/GMSD
+fuhrer/S
+Fuji/M
+Fujitsu/M
+Fujiyama
+Fukuoka/M
+Fulani/M
+Fulbright/M
+fulcrum/SM
+fulfilled/U
+fulfiller/M
+fulfill/GLSRD
+fulfillment/MS
+fullback/SMG
+fuller/DMG
+Fuller/M
+Fullerton/M
+fullish
+fullness/MS
+full/RDPSGZT
+fullstops
+fullword/SM
+fully
+fulminate/XSDGN
+fulmination/M
+fulness's
+fulsomeness/SM
+fulsome/PY
+Fulton/M
+Fulvia/M
+fumble/GZRSD
+fumbler/M
+fumbling/Y
+fume/DSG
+fumigant/MS
+fumigate/NGSDX
+fumigation/M
+fumigator/SM
+fuming/Y
+fumy/TR
+Funafuti
+functionalism/M
+functionalist/SM
+functionality/S
+functional/YS
+functionary/MS
+function/GSMD
+functor/SM
+fundamentalism/SM
+fundamentalist/SM
+fundamental/SY
+fund/ASMRDZG
+funded/U
+fundholders
+fundholding
+funding/S
+Fundy/M
+funeral/MS
+funerary
+funereal/Y
+funfair/M
+fungal/S
+fungible/M
+fungicidal
+fungicide/SM
+fungi/M
+fungoid/S
+fungous
+fungus/M
+funicular/SM
+funk/GSDM
+funkiness/S
+funky/RTP
+fun/MS
+funned
+funnel/SGMD
+funner
+funnest
+funnily/U
+funniness/SM
+funning
+funny/RSPT
+furbelow/MDSG
+furbisher/M
+furbish/GDRSA
+furiousness/M
+furious/RYP
+furlong/MS
+furlough/DGM
+furloughs
+furl/UDGS
+furn
+furnace/GMSD
+furnished/U
+furnisher/MS
+furnish/GASD
+furnishing/SM
+furniture/SM
+furore/MS
+furor/MS
+fur/PMS
+furred
+furrier/M
+furriness/SM
+furring/SM
+furrow/DMGS
+furry/RTZP
+furtherance/MS
+furtherer/M
+furthermore
+furthermost
+further/TGDRS
+furthest
+furtiveness/SM
+furtive/PY
+fury/SM
+furze/SM
+fusebox/S
+fusee/SM
+fuse/FSDAGCI
+fuselage/SM
+fuse's/A
+Fushun/M
+fusibility/SM
+fusible/I
+fusiform
+fusilier/MS
+fusillade/SDMG
+fusion/KMFSI
+fussbudget/MS
+fusser/M
+fussily
+fussiness/MS
+fusspot/SM
+fuss/SRDMG
+fussy/PTR
+fustian/MS
+fustiness/MS
+fusty/RPT
+fut
+futileness/M
+futile/PY
+futility/MS
+futon/S
+future/SM
+futurism/SM
+futuristic/S
+futurist/S
+futurity/MS
+futurologist/S
+futurology/MS
+futz/GSD
+fuze's
+Fuzhou/M
+Fuzzbuster/M
+fuzzily
+fuzziness/SM
+fuzz/SDMG
+fuzzy/PRT
+fwd
+FWD
+fwy
+FY
+FYI
+GA
+gabardine/SM
+gabbed
+Gabbey/M
+Gabbie/M
+Gabbi/M
+gabbiness/S
+gabbing
+gabble/SDG
+Gabby/M
+gabby/TRP
+Gabe/M
+gaberdine's
+Gabey/M
+gabfest/MS
+Gabie/M
+Gabi/M
+gable/GMSRD
+Gable/M
+Gabonese
+Gabon/M
+Gaborone/M
+Gabriela/M
+Gabriele/M
+Gabriella/M
+Gabrielle/M
+Gabriellia/M
+Gabriell/M
+Gabriello/M
+Gabriel/M
+Gabrila/M
+gab/S
+Gaby/M
+Gacrux/M
+gadabout/MS
+gadded
+gadder/MS
+gadding
+gadfly/MS
+gadgetry/MS
+gadget/SM
+gadolinium/MS
+gad/S
+Gadsden/M
+Gaea/M
+Gaelan/M
+Gaelic/M
+Gael/SM
+Gae/M
+gaffe/MS
+gaffer/M
+gaff/SGZRDM
+gaga
+Gagarin/M
+gag/DRSG
+Gage/M
+gager/M
+gage/SM
+gagged
+gagging
+gaggle/SDG
+gagwriter/S
+gaiety/MS
+Gaile/M
+Gail/M
+gaily
+gain/ADGS
+gainer/SM
+Gaines/M
+Gainesville/M
+gainfulness/M
+gainful/YP
+gaining/S
+gainly/U
+gainsaid
+gainsayer/M
+gainsay/RSZG
+Gainsborough/M
+gaiter/M
+gait/GSZMRD
+Gaithersburg/M
+galactic
+Galahad/MS
+Galapagos/M
+gal/AS
+gala/SM
+Galatea/M
+Galatia/M
+Galatians/M
+Galaxy/M
+galaxy/MS
+Galbraith/M
+Galbreath/M
+gale/AS
+Gale/M
+galen
+galena/MS
+galenite/M
+Galen/M
+gale's
+Galibi/M
+Galilean/MS
+Galilee/M
+Galileo/M
+Galina/M
+Gallagher/M
+gallanted
+gallanting
+gallantry/MS
+gallants
+gallant/UY
+Gallard/M
+gallbladder/MS
+Gallegos/M
+galleon/SM
+galleria/S
+gallery/MSDG
+galley/MS
+Gallic
+Gallicism/SM
+gallimaufry/MS
+galling/Y
+gallium/SM
+gallivant/GDS
+Gall/M
+gallonage/M
+gallon/SM
+galloper/M
+gallop/GSRDZ
+Galloway/M
+gallows/M
+gall/SGMD
+gallstone/MS
+Gallup/M
+Gal/MN
+Galois/M
+galoot/MS
+galore/S
+galosh/GMSD
+gal's
+Galsworthy/M
+galumph/GD
+galumphs
+galvanic
+Galvani/M
+galvanism/MS
+galvanization/SM
+galvanize/SDG
+Galvan/M
+galvanometer/SM
+galvanometric
+Galven/M
+Galveston/M
+Galvin/M
+Ga/M
+Gamaliel/M
+Gama/M
+Gambia/M
+Gambian/S
+gambit/MS
+gamble/GZRSD
+Gamble/M
+gambler/M
+gambol/SGD
+gamecock/SM
+gamekeeper/MS
+gameness/MS
+game/PJDRSMYTZG
+gamesmanship/SM
+gamesmen
+gamester/M
+gamest/RZ
+gamete/MS
+gametic
+gamine/SM
+gaminess/MS
+gaming/M
+gamin/MS
+gamma/MS
+gammon/DMSG
+Gamow/M
+gamut/MS
+gamy/TRP
+gander/DMGS
+Gandhian
+Gandhi/M
+gangbusters
+ganger/M
+Ganges/M
+gang/GRDMS
+gangland/SM
+ganglia/M
+gangling
+ganglionic
+ganglion/M
+gangplank/SM
+gangrene/SDMG
+gangrenous
+gangster/SM
+Gangtok/M
+gangway/MS
+Gan/M
+gannet/SM
+Gannie/M
+Gannon/M
+Ganny/M
+gantlet/GMDS
+Gantry/M
+gantry/MS
+Ganymede/M
+GAO
+gaoler/M
+gaol/MRDGZS
+gaper/M
+gape/S
+gaping/Y
+gapped
+gapping
+gap/SJMDRG
+garage/GMSD
+Garald/M
+garbageman/M
+garbage/SDMG
+garbanzo/MS
+garb/DMGS
+garbler/M
+garble/RSDG
+Garbo/M
+Garcia/M
+garçon/SM
+gardener/M
+Gardener/M
+gardenia/SM
+gardening/M
+garden/ZGRDMS
+Gardie/M
+Gardiner/M
+Gard/M
+Gardner/M
+Gardy/M
+Garek/M
+Gare/MH
+Gareth/M
+Garey/M
+Garfield/M
+garfish/MS
+Garfunkel/M
+Gargantua/M
+gargantuan
+gargle/SDG
+gargoyle/DSM
+Garibaldi/M
+Garik/M
+garishness/MS
+garish/YP
+Garland/M
+garland/SMDG
+garlicked
+garlicking
+garlicky
+garlic/SM
+garment/MDGS
+Gar/MH
+Garner/M
+garner/SGD
+Garnet/M
+garnet/SM
+Garnette/M
+Garnett/M
+garnish/DSLG
+garnisheeing
+garnishee/SDM
+garnishment/MS
+Garold/M
+garote's
+garotte's
+Garrard/M
+garred
+Garrek/M
+Garreth/M
+Garret/M
+garret/SM
+Garrett/M
+Garrick/M
+Garrik/M
+garring
+Garrison/M
+garrison/SGMD
+garroter/M
+garrote/SRDMZG
+Garrot/M
+garrotte's
+Garrott/M
+garrulity/SM
+garrulousness/MS
+garrulous/PY
+Garry/M
+gar/SLM
+garter/SGDM
+Garth/M
+Garvey/M
+Garvin/M
+Garv/M
+Garvy/M
+Garwin/M
+Garwood/M
+Gary/M
+Garza/M
+gasbag/MS
+Gascony/M
+gaseousness/M
+gaseous/YP
+gases/C
+gas/FC
+gash/GTMSRD
+gasification/M
+gasifier/M
+gasify/SRDGXZN
+gasket/SM
+gaslight/DMS
+gasohol/S
+gasoline/MS
+gasometer/M
+Gaspard/M
+Gaspar/M
+Gasparo/M
+gasper/M
+Gasper/M
+gasp/GZSRD
+gasping/Y
+gas's
+gassed/C
+Gasser/M
+gasser/MS
+Gasset/M
+gassiness/M
+gassing/SM
+gassy/PTR
+Gaston/M
+gastric
+gastritides
+gastritis/MS
+gastroenteritides
+gastroenteritis/M
+gastrointestinal
+gastronome/SM
+gastronomic
+gastronomical/Y
+gastronomy/MS
+gastropod/SM
+gasworks/M
+gateau/MS
+gateaux
+gatecrash/GZSRD
+gatehouse/MS
+gatekeeper/SM
+gate/MGDS
+gatepost/SM
+Gates
+gateway/MS
+gathered/IA
+gatherer/M
+gathering/M
+gather/JRDZGS
+gathers/A
+Gatlinburg/M
+Gatling/M
+Gatorade/M
+gator/MS
+Gatsby/M
+Gatun/M
+gaucheness/SM
+gaucherie/SM
+gauche/TYPR
+gaucho/SM
+gaudily
+gaudiness/MS
+gaudy/PRST
+gaugeable
+gauger/M
+Gauguin/M
+Gaulish/M
+Gaulle/M
+Gaul/MS
+Gaultiero/M
+gauntlet/GSDM
+Gauntley/M
+gauntness/MS
+gaunt/PYRDSGT
+gauss/C
+gausses
+Gaussian
+Gauss/M
+gauss's
+Gautama/M
+Gauthier/M
+Gautier/M
+gauze/SDGM
+gauziness/MS
+gauzy/TRP
+Gavan/M
+gave
+gavel/GMDS
+Gaven/M
+Gavin/M
+Gav/MN
+gavotte/MSDG
+Gavra/M
+Gavrielle/M
+Gawain/M
+Gawen/M
+gawkily
+gawkiness/MS
+gawk/SGRDM
+gawky/RSPT
+Gayel/M
+Gayelord/M
+Gaye/M
+gayety's
+Gayla/M
+Gayleen/M
+Gaylene/M
+Gayler/M
+Gayle/RM
+Gaylord/M
+Gaylor/M
+Gay/M
+gayness/SM
+Gaynor/M
+gay/RTPS
+Gaza/M
+gazebo/SM
+gaze/DRSZG
+gazelle/MS
+gazer/M
+gazetteer/SGDM
+gazette/MGSD
+Gaziantep/M
+gazillion/S
+gazpacho/MS
+GB
+G/B
+Gdansk/M
+Gd/M
+GDP
+Gearalt/M
+Gearard/M
+gearbox/SM
+gear/DMJSG
+gearing/M
+gearshift/MS
+gearstick
+gearwheel/SM
+Geary/M
+gecko/MS
+GED
+geegaw's
+geeing
+geek/SM
+geeky/RT
+geese/M
+geest/M
+gee/TDS
+geezer/MS
+Gehenna/M
+Gehrig/M
+Geiger/M
+Geigy/M
+geisha/M
+gelatinousness/M
+gelatinous/PY
+gelatin/SM
+gelcap
+gelding/M
+geld/JSGD
+gelid
+gelignite/MS
+gelled
+gelling
+gel/MBS
+Gelya/M
+Ge/M
+GE/M
+Gemini/SM
+gemlike
+Gemma/M
+gemmed
+gemming
+gem/MS
+gemological
+gemologist/MS
+gemology/MS
+gemstone/SM
+gen
+Gena/M
+Genaro/M
+gendarme/MS
+gender/DMGS
+genderless
+genealogical/Y
+genealogist/SM
+genealogy/MS
+Gene/M
+gene/MS
+generalissimo/SM
+generalist/MS
+generality/MS
+generalizable/SM
+generalization/MS
+generalized/U
+generalize/GZBSRD
+generalizer/M
+general/MSPY
+generalness/M
+generalship/SM
+genera/M
+generate/CXAVNGSD
+generational
+generation/MCA
+generative/AY
+generators/A
+generator/SM
+generically
+generic/PS
+generosity/MS
+generously/U
+generousness/SM
+generous/PY
+Genesco/M
+genesis/M
+Genesis/M
+genes/S
+genetically
+geneticist/MS
+genetic/S
+genetics/M
+Genet/M
+Geneva/M
+Genevieve/M
+Genevra/M
+Genghis/M
+geniality/FMS
+genially/F
+genialness/M
+genial/PY
+Genia/M
+genies/K
+genie/SM
+genii/M
+genitalia
+genitals
+genital/YF
+genitive/SM
+genitourinary
+genius/SM
+Gen/M
+Genna/M
+Gennie/M
+Gennifer/M
+Genni/M
+Genny/M
+Genoa/SM
+genocidal
+genocide/SM
+Geno/M
+genome/SM
+genotype/MS
+Genovera/M
+genre/MS
+gent/AMS
+genteelness/MS
+genteel/PRYT
+gentian/SM
+gentile/S
+Gentile's
+gentility/MS
+gentlefolk/S
+gentlemanliness/M
+gentlemanly/U
+gentleman/YM
+gentlemen
+gentleness/SM
+gentle/PRSDGT
+gentlewoman/M
+gentlewomen/M
+gently
+gentrification/M
+gentrify/NSDGX
+Gentry/M
+gentry/MS
+genuflect/GDS
+genuflection/MS
+genuineness/SM
+genuine/PY
+genus
+Genvieve/M
+geocentric
+geocentrically
+geocentricism
+geochemical/Y
+geochemistry/MS
+geochronology/M
+geodesic/S
+geode/SM
+geodesy/MS
+geodetic/S
+Geoff/M
+Geoffrey/M
+Geoffry/M
+geog
+geographer/MS
+geographic
+geographical/Y
+geography/MS
+geologic
+geological/Y
+geologist/MS
+geology/MS
+geom
+Geo/M
+geomagnetic
+geomagnetically
+geomagnetism/SM
+geometer/MS
+geometrical/Y
+geometrician/M
+geometric/S
+geometry/MS
+geomorphological
+geomorphology/M
+geophysical/Y
+geophysicist/MS
+geophysics/M
+geopolitical/Y
+geopolitic/S
+geopolitics/M
+Georas/M
+Geordie/M
+Georgeanna/M
+Georgeanne/M
+Georgena/M
+George/SM
+Georgeta/M
+Georgetown/M
+Georgetta/M
+Georgette/M
+Georgia/M
+Georgiana/M
+Georgianna/M
+Georgianne/M
+Georgian/S
+Georgie/M
+Georgi/M
+Georgina/M
+Georgine/M
+Georg/M
+Georgy/M
+geostationary
+geosynchronous
+geosyncline/SM
+geothermal
+geothermic
+Geralda/M
+Geraldine/M
+Gerald/M
+geranium/SM
+Gerard/M
+Gerardo/M
+Gerber/M
+gerbil/MS
+Gerda/M
+Gerek/M
+Gerhardine/M
+Gerhard/M
+Gerhardt/M
+Gerianna/M
+Gerianne/M
+geriatric/S
+geriatrics/M
+Gerick/M
+Gerik/M
+Geri/M
+Geritol/M
+Gerladina/M
+Ger/M
+Germaine/M
+Germain/M
+Germana/M
+germane
+Germania/M
+Germanic/M
+germanium/SM
+germanized
+German/SM
+Germantown/M
+Germany/M
+Germayne/M
+germen/M
+germicidal
+germicide/MS
+germinal/Y
+germinated/U
+germinate/XVGNSD
+germination/M
+germinative/Y
+germ/MNS
+Gerome/M
+Geronimo/M
+gerontocracy/M
+gerontological
+gerontologist/SM
+gerontology/SM
+Gerrard/M
+Gerrie/M
+Gerrilee/M
+Gerri/M
+Gerry/M
+gerrymander/SGD
+Gershwin/MS
+Gerta/M
+Gertie/M
+Gerti/M
+Gert/M
+Gertruda/M
+Gertrude/M
+Gertrudis/M
+Gertrud/M
+Gerty/M
+gerundive/M
+gerund/SVM
+Gery/M
+gestalt/M
+gestapo/S
+Gestapo/SM
+gestate/SDGNX
+gestational
+gestation/M
+gesticulate/XSDVGN
+gesticulation/M
+gesticulative/Y
+gestural
+gesture/SDMG
+gesundheit
+getaway/SM
+Gethsemane/M
+get/S
+getter/SDM
+getting
+Getty/M
+Gettysburg/M
+getup/MS
+gewgaw/MS
+Gewürztraminer
+geyser/GDMS
+Ghanaian/MS
+Ghana/M
+Ghanian's
+ghastliness/MS
+ghastly/TPR
+ghat/MS
+Ghats/M
+Ghent/M
+Gherardo/M
+gherkin/SM
+ghetto/DGMS
+ghettoize/SDG
+Ghibelline/M
+ghostlike
+ghostliness/MS
+ghostly/TRP
+ghost/SMYDG
+ghostwrite/RSGZ
+ghostwritten
+ghostwrote
+ghoulishness/SM
+ghoulish/PY
+ghoul/SM
+GHQ
+GI
+Giacinta/M
+Giacobo/M
+Giacometti/M
+Giacomo/M
+Giacopo/M
+Giana/M
+Gianina/M
+Gian/M
+Gianna/M
+Gianni/M
+Giannini/M
+giantess/MS
+giantkiller
+giant/SM
+Giauque/M
+Giavani/M
+gibber/DGS
+gibberish/MS
+gibbet/MDSG
+Gibbie/M
+Gibb/MS
+Gibbon/M
+gibbon/MS
+gibbousness/M
+gibbous/YP
+Gibby/M
+gibe/GDRS
+giber/M
+giblet/MS
+Gib/M
+Gibraltar/MS
+Gibson/M
+giddap
+giddily
+giddiness/SM
+Giddings/M
+giddy/GPRSDT
+Gide/M
+Gideon/MS
+Gielgud/M
+Gienah/M
+Giffard/M
+Giffer/M
+Giffie/M
+Gifford/M
+Giff/RM
+Giffy/M
+giftedness/M
+gifted/PY
+gift/SGMD
+gigabyte/S
+gigacycle/MS
+gigahertz/M
+gigantically
+giganticness/M
+gigantic/P
+gigavolt
+gigawatt/M
+gigged
+gigging
+giggler/M
+giggle/RSDGZ
+giggling/Y
+giggly/TR
+Gigi/M
+gig/MS
+GIGO
+gigolo/MS
+gila
+Gila/M
+Gilberta/M
+Gilberte/M
+Gilbertina/M
+Gilbertine/M
+gilbert/M
+Gilbert/M
+Gilberto/M
+Gilbertson/M
+Gilburt/M
+Gilchrist/M
+Gilda/M
+gilder/M
+gilding/M
+gild/JSGZRD
+Gilead/M
+Gilemette/M
+Giles
+Gilgamesh/M
+Gilkson/M
+Gillan/M
+Gilles
+Gillespie/M
+Gillette/M
+Gilliam/M
+Gillian/M
+Gillie/M
+Gilligan/M
+Gilli/M
+Gill/M
+gill/SGMRD
+Gilly/M
+Gilmore/M
+Gil/MY
+gilt/S
+gimbaled
+gimbals
+Gimbel/M
+gimcrackery/SM
+gimcrack/S
+gimlet/MDSG
+gimme/S
+gimmick/GDMS
+gimmickry/MS
+gimmicky
+gimp/GSMD
+gimpy/RT
+Gina/M
+Ginelle/M
+Ginevra/M
+gingerbread/SM
+gingerliness/M
+gingerly/P
+Ginger/M
+ginger/SGDYM
+gingersnap/SM
+gingery
+gingham/SM
+gingivitis/SM
+Gingrich/M
+ginkgoes
+ginkgo/M
+ginmill
+gin/MS
+ginned
+Ginnie/M
+Ginnifer/M
+Ginni/M
+ginning
+Ginny/M
+Gino/M
+Ginsberg/M
+Ginsburg/M
+ginseng/SM
+Gioconda/M
+Giordano/M
+Giorgia/M
+Giorgi/M
+Giorgio/M
+Giorgione/M
+Giotto/M
+Giovanna/M
+Giovanni/M
+Gipsy's
+giraffe/MS
+Giralda/M
+Giraldo/M
+Giraud/M
+Giraudoux/M
+girded/U
+girder/M
+girdle/GMRSD
+girdler/M
+gird/RDSGZ
+girlfriend/MS
+girlhood/SM
+girlie/M
+girlishness/SM
+girlish/YP
+girl/MS
+giro/M
+girt/GDS
+girth/MDG
+girths
+Gisela/M
+Giselbert/M
+Gisele/M
+Gisella/M
+Giselle/M
+Gish/M
+gist/MS
+git/M
+Giuditta/M
+Giulia/M
+Giuliano/M
+Giulietta/M
+Giulio/M
+Giuseppe/M
+Giustina/M
+Giustino/M
+Giusto/M
+giveaway/SM
+giveback/S
+give/HZGRS
+given/SP
+giver/M
+giving/Y
+Giza/M
+Gizela/M
+gizmo's
+gizzard/SM
+Gk/M
+glacé/DGS
+glacial/Y
+glaciate/XNGDS
+glaciation/M
+glacier/SM
+glaciological
+glaciologist/M
+glaciology/M
+gladded
+gladden/GDS
+gladder
+gladdest
+gladding
+gladdy
+glade/SM
+gladiatorial
+gladiator/SM
+Gladi/M
+gladiola/MS
+gladioli
+gladiolus/M
+gladly/RT
+Glad/M
+gladness/MS
+gladsome/RT
+Gladstone/MS
+Gladys
+glad/YSP
+glamor/DMGS
+glamorization/MS
+glamorizer/M
+glamorize/SRDZG
+glamorousness/M
+glamorous/PY
+glance/GJSD
+glancing/Y
+glanders/M
+glandes
+glandular/Y
+gland/ZSM
+glans/M
+glare/SDG
+glaringness/M
+glaring/YP
+Glaser/M
+Glasgow/M
+glasnost/S
+glassblower/S
+glassblowing/MS
+glassful/MS
+glass/GSDM
+glasshouse/SM
+glassily
+glassiness/SM
+glassless
+Glass/M
+glassware/SM
+glasswort/M
+glassy/PRST
+Glastonbury/M
+Glaswegian/S
+glaucoma/SM
+glaucous
+glazed/U
+glazer/M
+glaze/SRDGZJ
+glazier/SM
+glazing/M
+gleam/MDGS
+gleaner/M
+gleaning/M
+glean/RDGZJS
+Gleason/M
+Gleda/M
+gleed/M
+glee/DSM
+gleefulness/MS
+gleeful/YP
+gleeing
+Glendale/M
+Glenda/M
+Glenden/M
+Glendon/M
+Glenine/M
+Glen/M
+Glenna/M
+Glennie/M
+Glennis/M
+Glenn/M
+glen/SM
+glibber
+glibbest
+glibness/MS
+glib/YP
+glide/JGZSRD
+glider/M
+glim/M
+glimmer/DSJG
+glimmering/M
+glimpse/DRSZMG
+glimpser/M
+glint/DSG
+glissandi
+glissando/M
+glisten/DSG
+glister/DGS
+glitch/MS
+glitter/GDSJ
+glittering/Y
+glittery
+glitz/GSD
+glitzy/TR
+gloaming/MS
+gloater/M
+gloating/Y
+gloat/SRDG
+globalism/S
+globalist/S
+global/SY
+globe/SM
+globetrotter/MS
+glob/GDMS
+globularity/M
+globularness/M
+globular/PY
+globule/MS
+globulin/MS
+glockenspiel/SM
+glommed
+gloom/GSMD
+gloomily
+gloominess/MS
+gloomy/RTP
+glop/MS
+glopped
+glopping
+gloppy/TR
+Gloria/M
+Gloriana/M
+Gloriane/M
+glorification/M
+glorifier/M
+glorify/XZRSDNG
+Glori/M
+glorious/IYP
+gloriousness/IM
+Glory/M
+glory/SDMG
+glossary/MS
+gloss/GSDM
+glossily
+glossiness/SM
+glossolalia/SM
+glossy/RSPT
+glottal
+glottalization/M
+glottis/MS
+Gloucester/M
+gloveless
+glover/M
+Glover/M
+glove/SRDGMZ
+glower/GD
+glow/GZRDMS
+glowing/Y
+glowworm/SM
+glucose/SM
+glue/DRSMZG
+glued/U
+gluer/M
+gluey
+gluier
+gluiest
+glummer
+glummest
+glumness/MS
+glum/SYP
+gluon/M
+glutamate/M
+gluten/M
+glutenous
+glutinousness/M
+glutinous/PY
+glut/SMNX
+glutted
+glutting
+glutton/MS
+gluttonous/Y
+gluttony/SM
+glyceride/M
+glycerinate/MD
+glycerine's
+glycerin/SM
+glycerolized/C
+glycerol/SM
+glycine/M
+glycogen/SM
+glycol/MS
+Glynda/M
+Glynis/M
+Glyn/M
+Glynnis/M
+Glynn/M
+glyph/M
+glyphs
+gm
+GM
+GMT
+gnarl/SMDG
+gnash/SDG
+gnat/MS
+gnawer/M
+gnaw/GRDSJ
+gnawing/M
+gneiss/SM
+Gnni/M
+gnomelike
+GNOME/M
+gnome/SM
+gnomic
+gnomish
+gnomonic
+gnosticism
+Gnosticism/M
+gnostic/K
+Gnostic/M
+GNP
+gnu/MS
+goad/MDSG
+goalie/SM
+goalkeeper/MS
+goalkeeping/M
+goalless
+goal/MDSG
+goalmouth/M
+goalpost/S
+goalscorer
+goalscoring
+goaltender/SM
+Goa/M
+goatee/SM
+goatherd/MS
+goat/MS
+goatskin/SM
+gobbed
+gobbet/MS
+gobbing
+gobbledegook's
+gobbledygook/S
+gobbler/M
+gobble/SRDGZ
+Gobi/M
+goblet/MS
+goblin/SM
+gob/SM
+Godard/M
+Godart/M
+godchild/M
+godchildren
+goddammit
+goddamn/GS
+Goddard/M
+Goddart/M
+goddaughter/SM
+godded
+goddess/MS
+godding
+Gödel/M
+godfather/GSDM
+godforsaken
+Godfree/M
+Godfrey/M
+Godfry/M
+godhead/S
+godhood/SM
+Godiva/M
+godlessness/MS
+godless/P
+godlikeness/M
+godlike/P
+godliness/UMS
+godly/UTPR
+God/M
+godmother/MS
+Godot/M
+godparent/SM
+godsend/MS
+god/SMY
+godson/MS
+Godspeed/S
+Godthaab/M
+Godunov/M
+Godwin/M
+Godzilla/M
+Goebbels/M
+Goering/M
+goer/MG
+goes
+Goethals/M
+Goethe/M
+gofer/SM
+Goff/M
+goggler/M
+goggle/SRDGZ
+Gogh/M
+Gog/M
+Gogol/M
+Goiania/M
+going/M
+goiter/SM
+Golan/M
+Golconda/M
+Golda/M
+Goldarina/M
+Goldberg/M
+goldbricker/M
+goldbrick/GZRDMS
+Golden/M
+goldenness/M
+goldenrod/SM
+goldenseal/M
+golden/TRYP
+goldfinch/MS
+goldfish/SM
+Goldia/M
+Goldie/M
+Goldilocks/M
+Goldi/M
+Goldina/M
+Golding/M
+Goldman/M
+goldmine/S
+gold/MRNGTS
+goldsmith/M
+Goldsmith/M
+goldsmiths
+Goldstein/M
+Goldwater/M
+Goldwyn/M
+Goldy/M
+Goleta/M
+golfer/M
+golf/RDMGZS
+Golgotha/M
+Goliath/M
+Goliaths
+golly/S
+Gomez/M
+Gomorrah/M
+Gompers/M
+go/MRHZGJ
+gonadal
+gonad/SM
+gondola/SM
+gondolier/MS
+Gondwanaland/M
+goner/M
+gone/RZN
+gong/SGDM
+gonion/M
+gonna
+gonorrheal
+gonorrhea/MS
+Gonzales/M
+Gonzalez/M
+Gonzalo/M
+Goober/M
+goober/MS
+goodbye/MS
+goodhearted
+goodie's
+goodish
+goodly/TR
+Good/M
+Goodman/M
+goodness/MS
+goodnight
+Goodrich/M
+good/SYP
+goodwill/MS
+Goodwin/M
+Goodyear/M
+goody/SM
+gooey
+goofiness/MS
+goof/SDMG
+goofy/RPT
+Google/M
+gooier
+gooiest
+gook/SM
+goo/MS
+goon/SM
+goop/SM
+gooseberry/MS
+goosebumps
+goose/M
+goos/SDG
+GOP
+Gopher
+gopher/SM
+Goran/M
+Goraud/M
+Gorbachev
+Gordan/M
+Gorden/M
+Gordian/M
+Gordie/M
+Gordimer/M
+Gordon/M
+Gordy/M
+gore/DSMG
+Gore/M
+Goren/M
+Gorey/M
+Gorgas
+gorged/E
+gorge/GMSRD
+gorgeousness/SM
+gorgeous/YP
+gorger/EM
+gorges/E
+gorging/E
+Gorgon/M
+gorgon/S
+Gorgonzola/M
+Gorham/M
+gorilla/MS
+gorily
+goriness/MS
+goring/M
+Gorky/M
+gormandizer/M
+gormandize/SRDGZ
+gormless
+gorp/S
+gorse/SM
+gory/PRT
+gos
+goshawk/MS
+gosh/S
+gosling/M
+gospeler/M
+gospel/MRSZ
+Gospel/SM
+gossamer/SM
+gossipy
+gossip/ZGMRDS
+gotcha/SM
+Göteborg/M
+Gotham/M
+Gothart/M
+Gothicism/M
+Gothic/S
+Goth/M
+Goths
+got/IU
+goto
+GOTO/MS
+gotta
+gotten/U
+Gottfried/M
+Goucher/M
+Gouda/SM
+gouge/GZSRD
+gouger/M
+goulash/SM
+Gould/M
+Gounod/M
+gourde/SM
+gourd/MS
+gourmand/MS
+gourmet/MS
+gout/SM
+gouty/RT
+governable/U
+governance/SM
+governed/U
+governess/SM
+govern/LBGSD
+governmental/Y
+government/MS
+Governor
+governor/MS
+governorship/SM
+gov/S
+govt
+gown/GSDM
+Goya/M
+GP
+GPA
+GPO
+GPSS
+gr
+grabbed
+grabber/SM
+grabbing/S
+grab/S
+Gracchus/M
+grace/ESDMG
+graceful/EYPU
+gracefuller
+gracefullest
+gracefulness/ESM
+Graceland/M
+gracelessness/MS
+graceless/PY
+Grace/M
+Gracia/M
+Graciela/M
+Gracie/M
+graciousness/SM
+gracious/UY
+grackle/SM
+gradate/DSNGX
+gradation/MCS
+grade/ACSDG
+graded/U
+Gradeigh/M
+gradely
+grader/MC
+grade's
+Gradey/M
+gradient/RMS
+grad/MRDGZJS
+gradualism/MS
+gradualist/MS
+gradualness/MS
+gradual/SYP
+graduand/SM
+graduate/MNGDSX
+graduation/M
+Grady/M
+Graehme/M
+Graeme/M
+Graffias/M
+graffiti
+graffito/M
+Graff/M
+grafter/M
+grafting/M
+graft/MRDSGZ
+Grafton/M
+Grahame/M
+Graham/M
+graham/SM
+Graig/M
+grail/S
+Grail/SM
+grainer/M
+grain/IGSD
+graininess/MS
+graining/M
+grain's
+grainy/RTP
+gram/KSM
+Gram/M
+grammarian/SM
+grammar/MS
+grammaticality/M
+grammaticalness/M
+grammatical/UY
+grammatic/K
+gramme/SM
+Grammy/S
+gramophone/SM
+Grampians
+grampus/SM
+Granada/M
+granary/MS
+grandam/SM
+grandaunt/MS
+grandchild/M
+grandchildren
+granddaddy/MS
+granddad/SM
+granddaughter/MS
+grandee/SM
+grandeur/MS
+grandfather/MYDSG
+grandiloquence/SM
+grandiloquent/Y
+grandiose/YP
+grandiosity/MS
+grandkid/SM
+grandma/MS
+grandmaster/MS
+grandmother/MYS
+grandnephew/MS
+grandness/MS
+grandniece/SM
+grandpa/MS
+grandparent/MS
+grandson/MS
+grandstander/M
+grandstand/SRDMG
+grand/TPSYR
+granduncle/MS
+Grange/MR
+grange/MSR
+Granger/M
+granite/MS
+granitic
+Gran/M
+Grannie/M
+Granny/M
+granny/MS
+granola/S
+grantee/MS
+granter/M
+Grantham/M
+Granthem/M
+Grantley/M
+Grant/M
+grantor's
+grant/SGZMRD
+grantsmanship/S
+granularity/SM
+granular/Y
+granulate/SDXVGN
+granulation/M
+granule/SM
+granulocytic
+Granville/M
+grapefruit/SM
+grape/SDGM
+grapeshot/M
+grapevine/MS
+grapheme/M
+graph/GMD
+graphical/Y
+graphicness/M
+graphic/PS
+graphics/M
+graphite/SM
+graphologist/SM
+graphology/MS
+graphs
+grapnel/SM
+grapple/DRSG
+grappler/M
+grappling/M
+grasper/M
+graspingness/M
+grasping/PY
+grasp/SRDBG
+grass/GZSDM
+grasshopper/SM
+grassland/MS
+Grass/M
+grassroots
+grassy/RT
+Grata/M
+gratefuller
+gratefullest
+gratefulness/USM
+grateful/YPU
+grater/M
+grates/I
+grate/SRDJGZ
+Gratia/M
+Gratiana/M
+graticule/M
+gratification/M
+gratified/U
+gratifying/Y
+gratify/NDSXG
+grating/YM
+gratis
+gratitude/IMS
+gratuitousness/MS
+gratuitous/PY
+gratuity/SM
+gravamen/SM
+gravedigger/SM
+gravel/SGMYD
+graven
+graveness/MS
+graver/M
+graveside/S
+Graves/M
+grave/SRDPGMZTY
+gravestone/SM
+graveyard/MS
+gravidness/M
+gravid/PY
+gravimeter/SM
+gravimetric
+gravitas
+gravitate/XVGNSD
+gravitational/Y
+gravitation/M
+graviton/SM
+gravity/MS
+gravy/SM
+graybeard/MS
+Grayce/M
+grayish
+Gray/M
+grayness/S
+gray/PYRDGTS
+Grayson/M
+graze/GZSRD
+grazer/M
+Grazia/M
+grazing/M
+grease/GMZSRD
+greasepaint/MS
+greaseproof
+greaser/M
+greasily
+greasiness/SM
+greasy/PRT
+greatcoat/DMS
+greaten/DG
+greathearted
+greatness/MS
+great/SPTYRN
+grebe/MS
+Grecian/S
+Greece/M
+greed/C
+greedily
+greediness/SM
+greeds
+greed's
+greedy/RTP
+Greek/SM
+Greeley/M
+greenback/MS
+greenbelt/S
+Greenberg/M
+Greenblatt/M
+Greenbriar/M
+Greene/M
+greenery/MS
+Greenfeld/M
+greenfield
+Greenfield/M
+greenfly/M
+greengage/SM
+greengrocer/SM
+greengrocery/M
+greenhorn/SM
+greenhouse/SM
+greening/M
+greenish/P
+Greenland/M
+Green/M
+greenmail/GDS
+greenness/MS
+Greenpeace/M
+greenroom/SM
+Greensboro/M
+Greensleeves/M
+Greensville/M
+greensward/SM
+green/SYRDMPGT
+Greentree/M
+Greenville/M
+Greenwich/M
+greenwood/MS
+Greer/M
+greeter/M
+greeting/M
+greets/A
+greet/SRDJGZ
+gregariousness/MS
+gregarious/PY
+Gregg/M
+Greggory/M
+Greg/M
+Gregoire/M
+Gregoor/M
+Gregorian
+Gregorio/M
+Gregorius/M
+Gregor/M
+Gregory/M
+gremlin/SM
+Grenada/M
+grenade/MS
+Grenadian/S
+grenadier/SM
+Grenadines
+grenadine/SM
+Grendel/M
+Grenier/M
+Grenoble/M
+Grenville/M
+Gresham/M
+Gretal/M
+Greta/M
+Gretchen/M
+Gretel/M
+Grete/M
+Grethel/M
+Gretna/M
+Gretta/M
+Gretzky/M
+grew/A
+greybeard/M
+greyhound/MS
+Grey/M
+greyness/M
+gridded
+griddlecake/SM
+griddle/DSGM
+gridiron/GSMD
+gridlock/DSG
+grids/A
+grid/SGM
+grief/MS
+Grieg/M
+Grier/M
+grievance/SM
+griever/M
+grieve/SRDGZ
+grieving/Y
+grievousness/SM
+grievous/PY
+Griffie/M
+Griffin/M
+griffin/SM
+Griffith/M
+Griff/M
+griffon's
+Griffy/M
+griller/M
+grille/SM
+grill/RDGS
+grillwork/M
+grimace/DRSGM
+grimacer/M
+Grimaldi/M
+grime/MS
+Grimes
+griminess/MS
+grimmer
+grimmest
+Grimm/M
+grimness/MS
+grim/PGYD
+grimy/TPR
+Grinch/M
+grind/ASG
+grinder/MS
+grinding/SY
+grindstone/SM
+gringo/SM
+grinned
+grinner/M
+grinning/Y
+grin/S
+griper/M
+gripe/S
+grippe/GMZSRD
+gripper/M
+gripping/Y
+grip/SGZMRD
+Griselda/M
+grisliness/SM
+grisly/RPT
+Gris/M
+Grissel/M
+gristle/SM
+gristliness/M
+gristly/TRP
+gristmill/MS
+grist/MYS
+Griswold/M
+grit/MS
+gritted
+gritter/MS
+grittiness/SM
+gritting
+gritty/PRT
+Griz/M
+grizzle/DSG
+grizzling/M
+grizzly/TRS
+Gr/M
+groaner/M
+groan/GZSRDM
+groat/SM
+grocer/MS
+grocery/MS
+groggily
+grogginess/SM
+groggy/RPT
+grog/MS
+groin/MGSD
+grokked
+grokking
+grok/S
+grommet/GMDS
+Gromyko/M
+groofs
+groomer/M
+groom/GZSMRD
+groomsman/M
+groomsmen
+Groot/M
+groover/M
+groove/SRDGM
+groovy/TR
+groper/M
+grope/SRDJGZ
+Gropius/M
+grosbeak/SM
+grosgrain/MS
+Gross
+Grosset/M
+gross/GTYSRDP
+Grossman/M
+grossness/MS
+Grosvenor/M
+Grosz/M
+grotesqueness/MS
+grotesque/PSY
+Grotius/M
+Groton/M
+grottoes
+grotto/M
+grouch/GDS
+grouchily
+grouchiness/MS
+grouchy/RPT
+groundbreaking/S
+grounded/U
+grounder/M
+groundhog/SM
+ground/JGZMDRS
+groundlessness/M
+groundless/YP
+groundnut/MS
+groundsheet/M
+groundskeepers
+groundsman/M
+groundswell/S
+groundwater/S
+groundwork/SM
+grouped/A
+grouper/M
+groupie/MS
+grouping/M
+groups/A
+group/ZJSMRDG
+grouse/GMZSRD
+grouser/M
+grouter/M
+grout/GSMRD
+groveler/M
+grovelike
+groveling/Y
+grovel/SDRGZ
+Grover/M
+Grove/RM
+grove/SRMZ
+grower/M
+grow/GZYRHS
+growing/I
+growingly
+growler/M
+growling/Y
+growl/RDGZS
+growly/RP
+grown/IA
+grownup/MS
+grows/A
+growth/IMA
+growths/IA
+grubbed
+grubber/SM
+grubbily
+grubbiness/SM
+grubbing
+grubby/RTP
+grub/MS
+grubstake/MSDG
+grudge/GMSRDJ
+grudger/M
+grudging/Y
+grueling/Y
+gruel/MDGJS
+gruesomeness/SM
+gruesome/RYTP
+gruffness/MS
+gruff/PSGTYRD
+grumble/GZJDSR
+grumbler/M
+grumbling/Y
+Grumman/M
+grumpily
+grumpiness/MS
+grump/MDGS
+grumpy/TPR
+Grundy/M
+Grünewald/M
+grunge/S
+grungy/RT
+grunion/SM
+grunter/M
+grunt/SGRD
+Grusky/M
+Grus/M
+Gruyère
+Gruyeres
+gryphon's
+g's
+G's
+gs/A
+GSA
+gt
+GU
+guacamole/MS
+Guadalajara/M
+Guadalcanal/M
+Guadalquivir/M
+Guadalupe/M
+Guadeloupe/M
+Guallatiri/M
+Gualterio/M
+Guamanian/SM
+Guam/M
+Guangzhou
+guanine/MS
+guano/MS
+Guantanamo/M
+Guarani/M
+guarani/SM
+guaranteeing
+guarantee/RSDZM
+guarantor/SM
+guaranty/MSDG
+guardedness/UM
+guarded/UYP
+guarder/M
+guardhouse/SM
+Guardia/M
+guardianship/MS
+guardian/SM
+guardrail/SM
+guard/RDSGZ
+guardroom/SM
+guardsman/M
+guardsmen
+Guarnieri/M
+Guatemala/M
+Guatemalan/S
+guava/SM
+Guayaquil/M
+gubernatorial
+Gucci/M
+gudgeon/M
+Guelph/M
+Guendolen/M
+Guenevere/M
+Guenna/M
+Guenther/M
+guernsey/S
+Guernsey/SM
+Guerra/M
+Guerrero/M
+guerrilla/MS
+guessable/U
+guess/BGZRSD
+guessed/U
+guesser/M
+guesstimate/DSMG
+guesswork/MS
+guest/SGMD
+Guevara/M
+guffaw/GSDM
+guff/SM
+Guggenheim/M
+Guglielma/M
+Guglielmo/M
+Guhleman/M
+GUI
+Guiana/M
+guidance/MS
+guidebook/SM
+guided/U
+guide/GZSRD
+guideline/SM
+guidepost/MS
+guider/M
+Guido/M
+Guilbert/M
+guilder/M
+guildhall/SM
+guild/SZMR
+guileful
+guilelessness/MS
+guileless/YP
+guile/SDGM
+Guillaume/M
+Guillema/M
+Guillemette/M
+guillemot/MS
+Guillermo/M
+guillotine/SDGM
+guiltily
+guiltiness/MS
+guiltlessness/M
+guiltless/YP
+guilt/SM
+guilty/PTR
+Gui/M
+Guinea/M
+Guinean/S
+guinea/SM
+Guinevere/M
+Guinna/M
+Guinness/M
+guise's
+guise/SDEG
+guitarist/SM
+guitar/SM
+Guiyang
+Guizot/M
+Gujarati/M
+Gujarat/M
+Gujranwala/M
+gulag/S
+gulch/MS
+gulden/MS
+gulf/DMGS
+Gullah/M
+gullet/MS
+gulley's
+gullibility/MS
+gullible
+Gulliver/M
+gull/MDSG
+gully/SDMG
+gulp/RDGZS
+gumboil/MS
+gumbo/MS
+gumboots
+gumdrop/SM
+gummed
+gumminess/M
+gumming/C
+gum/MS
+gummy/RTP
+gumption/SM
+gumshoeing
+gumshoe/SDM
+gumtree/MS
+Gunar/M
+gunboat/MS
+Gunderson/M
+gunfighter/M
+gunfight/SRMGZ
+gunfire/SM
+gunflint/M
+gunfought
+Gunilla/M
+gunk/SM
+gunky/RT
+Gun/M
+gunman/M
+gunmen
+gunmetal/MS
+gun/MS
+Gunnar/M
+gunned
+gunnel's
+Gunner/M
+gunner/SM
+gunnery/MS
+gunning/M
+gunnysack/SM
+gunny/SM
+gunpoint/MS
+gunpowder/SM
+gunrunner/MS
+gunrunning/MS
+gunship/S
+gunshot/SM
+gunslinger/M
+gunsling/GZR
+gunsmith/M
+gunsmiths
+Guntar/M
+Gunter/M
+Gunther/M
+gunwale/MS
+Guofeng/M
+guppy/SM
+Gupta/M
+gurgle/SDG
+Gurkha/M
+gurney/S
+guru/MS
+Gusella/M
+gusher/M
+gush/SRDGZ
+gushy/TR
+Gus/M
+Guss
+gusset/MDSG
+Gussie/M
+Gussi/M
+gussy/GSD
+Gussy/M
+Gustaf/M
+Gustafson/M
+Gusta/M
+gustatory
+Gustave/M
+Gustav/M
+Gustavo/M
+Gustavus/M
+gusted/E
+Gustie/M
+gustily
+Gusti/M
+gustiness/M
+gusting/E
+gust/MDGS
+gustoes
+gusto/M
+gusts/E
+Gusty/M
+gusty/RPT
+Gutenberg/M
+Guthrey/M
+Guthrie/M
+Guthry/M
+Gutierrez/M
+gutlessness/S
+gutless/P
+gutser/M
+gutsiness/M
+gut/SM
+guts/R
+gutsy/PTR
+gutted
+gutter/GSDM
+guttering/M
+guttersnipe/M
+gutting
+gutturalness/M
+guttural/SPY
+gutty/RSMT
+Guyana/M
+Guyanese
+Guy/M
+guy/MDRZGS
+Guzman/M
+guzzle/GZRSD
+guzzler/M
+g/VBX
+Gwalior/M
+Gwendolen/M
+Gwendoline/M
+Gwendolin/M
+Gwendolyn/M
+Gweneth/M
+Gwenette/M
+Gwen/M
+Gwenneth/M
+Gwennie/M
+Gwenni/M
+Gwenny/M
+Gwenora/M
+Gwenore/M
+Gwyneth/M
+Gwyn/M
+Gwynne/M
+gymkhana/SM
+gym/MS
+gymnasia's
+gymnasium/SM
+gymnastically
+gymnastic/S
+gymnastics/M
+gymnast/SM
+gymnosperm/SM
+gynecologic
+gynecological/MS
+gynecologist/SM
+gynecology/MS
+gypped
+gypper/S
+gypping
+gyp/S
+gypsite
+gypster/S
+gypsum/MS
+gypsy/SDMG
+Gypsy/SM
+gyrate/XNGSD
+gyration/M
+gyrator/MS
+gyrfalcon/SM
+gyrocompass/M
+gyro/MS
+gyroscope/SM
+gyroscopic
+gyve/GDS
+H
+Haag/M
+Haas/M
+Habakkuk/M
+habeas
+haberdasher/SM
+haberdashery/SM
+Haber/M
+Haberman/M
+Habib/M
+habiliment/SM
+habitability/MS
+habitableness/M
+habitable/P
+habitant/ISM
+habitation/MI
+habitations
+habitat/MS
+habit/IBDGS
+habit's
+habitualness/SM
+habitual/SYP
+habituate/SDNGX
+habituation/M
+habitué/MS
+hacienda/MS
+hacker/M
+Hackett/M
+hack/GZSDRBJ
+hackler/M
+hackle/RSDMG
+hackney/SMDG
+hacksaw/SDMG
+hackwork/S
+Hadamard/M
+Hadar/M
+Haddad/M
+haddock/MS
+hades
+Hades
+had/GD
+hadji's
+hadj's
+Hadlee/M
+Hadleigh/M
+Hadley/M
+Had/M
+hadn't
+Hadria/M
+Hadrian/M
+hadron/MS
+hadst
+haemoglobin's
+haemophilia's
+haemorrhage's
+Hafiz/M
+hafnium/MS
+haft/GSMD
+Hagan/M
+Hagar/M
+Hagen/M
+Hager/M
+Haggai/M
+haggardness/MS
+haggard/SYP
+hagged
+hagging
+haggish
+haggis/SM
+haggler/M
+haggle/RSDZG
+Hagiographa/M
+hagiographer/SM
+hagiography/MS
+hag/SMN
+Hagstrom/M
+Hague/M
+ha/H
+hahnium/S
+Hahn/M
+Haifa/M
+haiku/M
+Hailee/M
+hailer/M
+Hailey/M
+hail/SGMDR
+hailstone/SM
+hailstorm/SM
+Haily/M
+Haiphong/M
+hairball/SM
+hairbreadth/M
+hairbreadths
+hairbrush/SM
+haircare
+haircloth/M
+haircloths
+haircut/MS
+haircutting
+hairdo/SM
+hairdresser/SM
+hairdressing/SM
+hairdryer/S
+hairiness/MS
+hairlessness/M
+hairless/P
+hairlike
+hairline/SM
+hairnet/MS
+hairpiece/MS
+hairpin/MS
+hairsbreadth
+hairsbreadths
+hair/SDM
+hairsplitter/SM
+hairsplitting/MS
+hairspray
+hairspring/SM
+hairstyle/SMG
+hairstylist/S
+hairy/PTR
+Haitian/S
+Haiti/M
+hajjes
+hajji/MS
+hajj/M
+Hakeem/M
+hake/MS
+Hakim/M
+Hakka/M
+Hakluyt/M
+halalled
+halalling
+halal/S
+halberd/SM
+halcyon/S
+Haldane/M
+Haleakala/M
+Haleigh/M
+hale/ISRDG
+Hale/M
+haler/IM
+halest
+Halette/M
+Haley/M
+halfback/SM
+halfbreed
+halfheartedness/MS
+halfhearted/PY
+halfpence/S
+halfpenny/MS
+halfpennyworth
+half/PM
+halftime/S
+halftone/MS
+halfway
+halfword/MS
+halibut/SM
+halide/SM
+Halie/M
+Halifax/M
+Hali/M
+Halimeda/M
+halite/MS
+halitoses
+halitosis/M
+hallelujah
+hallelujahs
+Halley/M
+halliard's
+Hallie/M
+Halli/M
+Hallinan/M
+Hall/M
+Hallmark/M
+hallmark/SGMD
+hallo/GDS
+halloo's
+Halloween/MS
+hallowing
+hallows
+hallow/UD
+hall/SMR
+Hallsy/M
+hallucinate/VNGSDX
+hallucination/M
+hallucinatory
+hallucinogenic/S
+hallucinogen/SM
+hallway/SM
+Hally/M
+halocarbon
+halogenated
+halogen/SM
+halon
+halo/SDMG
+Halpern/M
+Halsey/M
+Hal/SMY
+Halsy/M
+halter/GDM
+halt/GZJSMDR
+halting/Y
+halve/GZDS
+halves/M
+halyard/MS
+Ha/M
+Hamal/M
+Haman/M
+hamburger/M
+Hamburg/MS
+hamburg/SZRM
+Hamel/M
+Hamey/M
+Hamhung/M
+Hamid/M
+Hamilcar/M
+Hamil/M
+Hamiltonian/MS
+Hamilton/M
+Hamish/M
+Hamitic/M
+Hamlen/M
+Hamlet/M
+hamlet/MS
+Hamlin/M
+Ham/M
+Hammad/M
+Hammarskjold/M
+hammed
+hammerer/M
+hammerhead/SM
+hammering/M
+hammerless
+hammerlock/MS
+Hammerstein/M
+hammertoe/SM
+hammer/ZGSRDM
+Hammett/M
+hamming
+hammock/MS
+Hammond/M
+Hammurabi/M
+hammy/RT
+Hamnet/M
+hampered/U
+hamper/GSD
+Hampshire/M
+Hampton/M
+ham/SM
+hamster/MS
+hamstring/MGS
+hamstrung
+Hamsun/M
+Hana/M
+Hanan/M
+Hancock/M
+handbagged
+handbagging
+handbag/MS
+handball/SM
+handbarrow/MS
+handbasin
+handbill/MS
+handbook/SM
+handbrake/M
+handcar/SM
+handcart/MS
+handclasp/MS
+handcraft/GMDS
+handcuff/GSD
+handcuffs/M
+handedness/M
+handed/PY
+Handel/M
+hander/S
+handful/SM
+handgun/SM
+handhold/M
+handicapped
+handicapper/SM
+handicapping
+handicap/SM
+handicraftsman/M
+handicraftsmen
+handicraft/SMR
+handily/U
+handiness/SM
+handiwork/MS
+handkerchief/MS
+handleable
+handlebar/SM
+handle/MZGRSD
+handler/M
+handless
+handling/M
+handmade
+handmaiden/M
+handmaid/NMSX
+handout/SM
+handover
+handpick/GDS
+handrail/SM
+hand's
+handsaw/SM
+handset/SM
+handshake/GMSR
+handshaker/M
+handshaking/M
+handsomely/U
+handsomeness/MS
+handsome/RPTY
+handspike/SM
+handspring/SM
+handstand/MS
+hand/UDSG
+handwork/SM
+handwoven
+handwrite/GSJ
+handwriting/M
+handwritten
+Handy/M
+handyman/M
+handymen
+handy/URT
+Haney/M
+hangar/SGDM
+hangdog/S
+hanged/A
+hanger/M
+hang/GDRZBSJ
+hanging/M
+hangman/M
+hangmen
+hangnail/MS
+hangout/MS
+hangover/SM
+hangs/A
+Hangul/M
+hangup/S
+Hangzhou
+Hankel/M
+hankerer/M
+hanker/GRDJ
+hankering/M
+hank/GZDRMS
+hankie/SM
+Hank/M
+hanky's
+Hannah/M
+Hanna/M
+Hannibal/M
+Hannie/M
+Hanni/MS
+Hanny/M
+Hanoi/M
+Hanoverian
+Hanover/M
+Hansel/M
+Hansen/M
+Hansiain/M
+Han/SM
+Hans/N
+hansom/MS
+Hanson/M
+Hanuka/S
+Hanukkah/M
+Hanukkahs
+Hapgood/M
+haphazardness/SM
+haphazard/SPY
+haplessness/MS
+hapless/YP
+haploid/S
+happed
+happening/M
+happen/JDGS
+happenstance/SM
+happily/U
+happiness/UMS
+happing
+Happy/M
+happy/UTPR
+Hapsburg/M
+hap/SMY
+Harald/M
+harangue/GDRS
+haranguer/M
+Harare
+harasser/M
+harass/LSRDZG
+harassment/SM
+Harbert/M
+harbinger/DMSG
+Harbin/M
+harborer/M
+harbor/ZGRDMS
+Harcourt/M
+hardback/SM
+hardball/SM
+hardboard/SM
+hardboiled
+hardbound
+hardcore/MS
+hardcover/SM
+hardened/U
+hardener/M
+hardening/M
+harden/ZGRD
+hardhat/S
+hardheadedness/SM
+hardheaded/YP
+hardheartedness/SM
+hardhearted/YP
+hardihood/MS
+hardily
+hardiness/SM
+Harding/M
+Hardin/M
+hardliner/S
+hardness/MS
+hardscrabble
+hardshell
+hardship/MS
+hardstand/S
+hardtack/MS
+hardtop/MS
+hardware/SM
+hardwire/DSG
+hardwood/MS
+hardworking
+Hardy/M
+hard/YNRPJGXTS
+hardy/PTRS
+harebell/MS
+harebrained
+harelip/MS
+harelipped
+hare/MGDS
+harem/SM
+Hargreaves/M
+hark/GDS
+Harland/M
+Harlan/M
+Harlem/M
+Harlene/M
+Harlen/M
+Harlequin
+harlequin/MS
+Harley/M
+Harlie/M
+Harli/M
+Harlin/M
+harlotry/MS
+harlot/SM
+Harlow/M
+Harman/M
+harmed/U
+harmer/M
+harmfulness/MS
+harmful/PY
+harmlessness/SM
+harmless/YP
+harm/MDRGS
+Harmonia/M
+harmonically
+harmonica/MS
+harmonic/S
+harmonics/M
+Harmonie/M
+harmonious/IPY
+harmoniousness/MS
+harmoniousness's/I
+harmonium/MS
+harmonization/A
+harmonizations
+harmonization's
+harmonized/U
+harmonizer/M
+harmonizes/UA
+harmonize/ZGSRD
+Harmon/M
+harmony/EMS
+Harmony/M
+harness/DRSMG
+harnessed/U
+harnesser/M
+harnesses/U
+Harold/M
+Haroun/M
+harper/M
+Harper/M
+harping/M
+harpist/SM
+harp/MDRJGZS
+Harp/MR
+harpooner/M
+harpoon/SZGDRM
+harpsichordist/MS
+harpsichord/SM
+harpy/SM
+Harpy/SM
+Harrell/M
+harridan/SM
+Harrie/M
+harrier/M
+Harriet/M
+Harrietta/M
+Harriette/M
+Harriett/M
+Harrington/M
+Harriot/M
+Harriott/M
+Harrisburg/M
+Harri/SM
+Harrisonburg/M
+Harrison/M
+harrower/M
+harrow/RDMGS
+harrumph/SDG
+Harry/M
+harry/RSDGZ
+harshen/GD
+harshness/SM
+harsh/TRNYP
+Harte/M
+Hartford/M
+Hartley/M
+Hartline/M
+Hart/M
+Hartman/M
+hart/MS
+Hartwell/M
+Harvard/M
+harvested/U
+harvester/M
+harvestman/M
+harvest/MDRZGS
+Harvey/MS
+Harv/M
+Harwell/M
+Harwilll/M
+has
+Hasbro/M
+hash/AGSD
+Hasheem/M
+hasher/M
+Hashim/M
+hashing/M
+hashish/MS
+hash's
+Hasidim
+Haskell/M
+Haskel/M
+Haskins/M
+Haslett/M
+hasn't
+hasp/GMDS
+hassle/MGRSD
+hassock/MS
+haste/MS
+hastener/M
+hasten/GRD
+hast/GXJDN
+Hastie/M
+hastily
+hastiness/MS
+Hastings/M
+Hasty/M
+hasty/RPT
+hatchback/SM
+hatcheck/S
+hatched/U
+hatcher/M
+hatchery/MS
+hatchet/MDSG
+hatching/M
+hatch/RSDJG
+Hatchure/M
+hatchway/MS
+hatefulness/MS
+hateful/YP
+hater/M
+hate/S
+Hatfield/M
+Hathaway/M
+hatless
+hat/MDRSZG
+hatred/SM
+hatstands
+hatted
+Hatteras/M
+hatter/SM
+Hattie/M
+Hatti/M
+hatting
+Hatty/M
+hauberk/SM
+Haugen/M
+haughtily
+haughtiness/SM
+haughty/TPR
+haulage/MS
+hauler/M
+haul/SDRGZ
+haunch/GMSD
+haunter/M
+haunting/Y
+haunt/JRDSZG
+Hauptmann/M
+Hausa/M
+Hausdorff/M
+Hauser/M
+hauteur/MS
+Havana/SM
+Havarti
+Havel/M
+haven/DMGS
+Haven/M
+haven't
+haver/G
+haversack/SM
+have/ZGSR
+havocked
+havocking
+havoc/SM
+Haw
+Hawaiian/S
+Hawaii/M
+hawker/M
+hawk/GZSDRM
+Hawking
+hawking/M
+Hawkins/M
+hawkishness/S
+hawkish/P
+Hawley/M
+haw/MDSG
+hawser/M
+haws/RZ
+Hawthorne/M
+hawthorn/MS
+haycock/SM
+Hayden/M
+Haydn/M
+Haydon/M
+Hayes
+hayfield/MS
+hay/GSMDR
+Hayley/M
+hayloft/MS
+haymow/MS
+Haynes
+hayrick/MS
+hayride/MS
+hayseed/MS
+Hay/SM
+haystack/SM
+haywain
+Hayward/M
+haywire/MS
+Haywood/M
+Hayyim/M
+hazard/MDGS
+hazardousness/M
+hazardous/PY
+haze/DSRJMZG
+Hazel/M
+hazel/MS
+hazelnut/SM
+Haze/M
+hazer/M
+hazily
+haziness/MS
+hazing/M
+Hazlett/M
+Hazlitt/M
+hazy/PTR
+HBO/M
+hdqrs
+HDTV
+headache/MS
+headband/SM
+headboard/MS
+headcount
+headdress/MS
+header/M
+headfirst
+headgear/SM
+headhunter/M
+headhunting/M
+headhunt/ZGSRDMJ
+headily
+headiness/S
+heading/M
+headlamp/S
+headland/MS
+headlessness/M
+headless/P
+headlight/MS
+headline/DRSZMG
+headliner/M
+headlock/MS
+headlong
+Head/M
+headman/M
+headmaster/MS
+headmastership/M
+headmen
+headmistress/MS
+headphone/SM
+headpiece/SM
+headpin/MS
+headquarter/GDS
+headrest/MS
+headroom/SM
+headscarf/M
+headset/SM
+headship/SM
+headshrinker/MS
+head/SJGZMDR
+headsman/M
+headsmen
+headstall/SM
+headstand/MS
+headstock/M
+headstone/MS
+headstrong
+headwaiter/SM
+headwall/S
+headwater/S
+headway/MS
+headwind/SM
+headword/MS
+heady/PTR
+heal/DRHSGZ
+healed/U
+healer/M
+Heall/M
+healthfully
+healthfulness/SM
+healthful/U
+healthily/U
+healthiness/MSU
+health/M
+healths
+healthy/URPT
+heap/SMDG
+heard/UA
+hearer/M
+hearing/AM
+hearken/SGD
+hearsay/SM
+hearse/M
+hears/SDAG
+Hearst/M
+heartache/SM
+heartbeat/MS
+heartbreak/GMS
+heartbreaking/Y
+heartbroke
+heartbroken
+heartburning/M
+heartburn/SGM
+hearted/Y
+hearten/EGDS
+heartening/EY
+heartfelt
+hearth/M
+hearthrug
+hearths
+hearthstone/MS
+heartily
+heartiness/SM
+heartland/SM
+heartlessness/SM
+heartless/YP
+heartrending/Y
+heartsickness/MS
+heartsick/P
+heart/SMDNXG
+heartstrings
+heartthrob/MS
+heartwarming
+Heartwood/M
+heartwood/SM
+hearty/TRSP
+hear/ZTSRHJG
+heatedly
+heated/UA
+heater/M
+heathendom/SM
+heathenish/Y
+heathenism/MS
+heathen/M
+heather/M
+Heather/M
+heathery
+Heathkit/M
+heathland
+Heathman/M
+Heath/MR
+heath/MRNZX
+heaths
+heatproof
+heats/A
+heat/SMDRGZBJ
+heatstroke/MS
+heatwave
+heave/DSRGZ
+heavenliness/M
+heavenly/PTR
+heaven/SYM
+heavenward/S
+heaver/M
+heaves/M
+heavily
+heaviness/MS
+Heaviside/M
+heavyhearted
+heavyset
+heavy/TPRS
+heavyweight/SM
+Hebe/M
+hebephrenic
+Hebert/M
+Heb/M
+Hebraic
+Hebraism/MS
+Hebrew/SM
+Hebrides/M
+Hecate/M
+hecatomb/M
+heckler/M
+heckle/RSDZG
+heck/S
+hectare/MS
+hectically
+hectic/S
+hectogram/MS
+hectometer/SM
+Hector/M
+hector/SGD
+Hecuba/M
+he'd
+Heda/M
+Hedda/M
+Heddie/M
+Heddi/M
+hedge/DSRGMZ
+hedgehog/MS
+hedgehopped
+hedgehopping
+hedgehop/S
+hedger/M
+hedgerow/SM
+hedging/Y
+Hedi/M
+hedonism/SM
+hedonistic
+hedonist/MS
+Hedvige/M
+Hedvig/M
+Hedwiga/M
+Hedwig/M
+Hedy/M
+heeded/U
+heedfulness/M
+heedful/PY
+heeding/U
+heedlessness/SM
+heedless/YP
+heed/SMGD
+heehaw/DGS
+heeler/M
+heeling/M
+heelless
+heel/SGZMDR
+Heep/M
+Hefner/M
+heft/GSD
+heftily
+heftiness/SM
+hefty/TRP
+Hegelian
+Hegel/M
+hegemonic
+hegemony/MS
+Hegira/M
+hegira/S
+Heida/M
+Heidegger/M
+Heidelberg/M
+Heidie/M
+Heidi/M
+heifer/MS
+Heifetz/M
+heighten/GD
+height/SMNX
+Heimlich/M
+Heindrick/M
+Heineken/M
+Heine/M
+Heinlein/M
+heinousness/SM
+heinous/PY
+Heinrich/M
+Heinrick/M
+Heinrik/M
+Heinze/M
+Heinz/M
+heiress/MS
+heirloom/MS
+heir/SDMG
+Heisenberg/M
+Heiser/M
+heister/M
+heist/GSMRD
+Hejira's
+Helaina/M
+Helaine/M
+held
+Helena/M
+Helene/M
+Helenka/M
+Helen/M
+Helga/M
+Helge/M
+helical/Y
+helices/M
+helicon/M
+Helicon/M
+helicopter/GSMD
+heliocentric
+heliography/M
+Heliopolis/M
+Helios/M
+heliosphere
+heliotrope/SM
+heliport/MS
+helium/MS
+helix/M
+he'll
+hellbender/M
+hellbent
+hellcat/SM
+hellebore/SM
+Hellene/SM
+Hellenic
+Hellenism/MS
+Hellenistic
+Hellenist/MS
+Hellenization/M
+Hellenize
+heller/M
+Heller/M
+Hellespont/M
+hellfire/M
+hell/GSMDR
+hellhole/SM
+Helli/M
+hellion/SM
+hellishness/SM
+hellish/PY
+Hellman/M
+hello/GMS
+Hell's
+helluva
+helmed
+helmet/GSMD
+Helmholtz/M
+helming
+helms
+helm's
+helmsman/M
+helmsmen
+helm/U
+Helmut/M
+Héloise/M
+helot/S
+helper/M
+helpfulness/MS
+helpful/UY
+help/GZSJDR
+helping/M
+helplessness/SM
+helpless/YP
+helpline/S
+helpmate/SM
+helpmeet's
+Helsa/M
+Helsinki/M
+helve/GMDS
+Helvetian/S
+Helvetius/M
+Helyn/M
+He/M
+hematite/MS
+hematologic
+hematological
+hematologist/SM
+hematology/MS
+heme/MS
+Hemingway/M
+hemisphere/MSD
+hemispheric
+hemispherical
+hemline/SM
+hemlock/MS
+hemmed
+hemmer/SM
+hemming
+hem/MS
+hemoglobin/MS
+hemolytic
+hemophiliac/SM
+hemophilia/SM
+hemorrhage/GMDS
+hemorrhagic
+hemorrhoid/MS
+hemostat/SM
+hemp/MNS
+h/EMS
+hemstitch/DSMG
+henceforth
+henceforward
+hence/S
+Hench/M
+henchman/M
+henchmen
+Henderson/M
+Hendrick/SM
+Hendrickson/M
+Hendrika/M
+Hendrik/M
+Hendrix/M
+henge/M
+Henka/M
+Henley/M
+hen/MS
+henna/MDSG
+Hennessey/M
+henning
+henpeck/GSD
+Henrie/M
+Henrieta/M
+Henrietta/M
+Henriette/M
+Henrik/M
+Henri/M
+Henryetta/M
+henry/M
+Henry/M
+Hensley/M
+Henson/M
+heparin/MS
+hepatic/S
+hepatitides
+hepatitis/M
+Hepburn/M
+Hephaestus/M
+Hephzibah/M
+hepper
+heppest
+Hepplewhite
+hep/S
+heptagonal
+heptagon/SM
+heptane/M
+heptathlon/S
+her
+Heracles/M
+Heraclitus/M
+heralded/U
+heraldic
+herald/MDSG
+heraldry/MS
+Hera/M
+herbaceous
+herbage/MS
+herbalism
+herbalist/MS
+herbal/S
+Herbart/M
+Herbert/M
+herbicidal
+herbicide/MS
+Herbie/M
+herbivore/SM
+herbivorous/Y
+Herb/M
+herb/MS
+Herby/M
+Herc/M
+Herculaneum/M
+herculean
+Herculean
+Hercule/MS
+Herculie/M
+herder/M
+Herder/M
+herd/MDRGZS
+herdsman/M
+herdsmen
+hereabout/S
+hereafter/S
+hereby
+hereditary
+heredity/MS
+Hereford/SM
+herein
+hereinafter
+here/IS
+hereof
+hereon
+here's
+heres/M
+heresy/SM
+heretical
+heretic/SM
+hereto
+heretofore
+hereunder
+hereunto
+hereupon
+herewith
+Heriberto/M
+heritable
+heritage/MS
+heritor/IM
+Herkimer/M
+Herman/M
+Hermann/M
+hermaphrodite/SM
+hermaphroditic
+Hermaphroditus/M
+hermeneutic/S
+hermeneutics/M
+Hermes
+hermetical/Y
+hermetic/S
+Hermia/M
+Hermie/M
+Hermina/M
+Hermine/M
+Herminia/M
+Hermione/M
+hermitage/SM
+Hermite/M
+hermitian
+hermit/MS
+Hermon/M
+Hermosa/M
+Hermosillo/M
+Hermy/M
+Hernandez/M
+Hernando/M
+hernial
+hernia/MS
+herniate/NGXDS
+Herod/M
+Herodotus/M
+heroes
+heroically
+heroics
+heroic/U
+heroine/SM
+heroin/MS
+heroism/SM
+Herold/M
+hero/M
+heron/SM
+herpes/M
+herpetologist/SM
+herpetology/MS
+Herrera/M
+Herrick/M
+herringbone/SDGM
+Herring/M
+herring/SM
+Herrington/M
+Herr/MG
+Herschel/M
+Hersch/M
+herself
+Hersey/M
+Hershel/M
+Hershey/M
+Hersh/M
+Herta/M
+Hertha/M
+hertz/M
+Hertz/M
+Hertzog/M
+Hertzsprung/M
+Herve/M
+Hervey/M
+Herzegovina/M
+Herzl/M
+hes
+Hesiod/M
+hesitance/S
+hesitancy/SM
+hesitantly
+hesitant/U
+hesitater/M
+hesitate/XDRSNG
+hesitating/UY
+hesitation/M
+Hesperus/M
+Hesse/M
+Hessian/MS
+Hess/M
+Hester/M
+Hesther/M
+Hestia/M
+Heston/M
+heterodox
+heterodoxy/MS
+heterodyne
+heterogamous
+heterogamy/M
+heterogeneity/SM
+heterogeneousness/M
+heterogeneous/PY
+heterosexuality/SM
+heterosexual/YMS
+heterostructure
+heterozygous
+Hettie/M
+Hetti/M
+Hetty/M
+Heublein/M
+heuristically
+heuristic/SM
+Heusen/M
+Heuser/M
+he/VMZ
+hew/DRZGS
+Hewe/M
+hewer/M
+Hewet/M
+Hewett/M
+Hewie/M
+Hewitt/M
+Hewlett/M
+Hew/M
+hexachloride/M
+hexadecimal/YS
+hexafluoride/M
+hexagonal/Y
+hexagon/SM
+hexagram/SM
+hexameter/SM
+hex/DSRG
+hexer/M
+hey
+heyday/MS
+Heyerdahl/M
+Heywood/M
+Hezekiah/M
+hf
+HF
+Hf/M
+Hg/M
+hgt
+hgwy
+HHS
+HI
+Hialeah/M
+hiatus/SM
+Hiawatha/M
+hibachi/MS
+hibernate/XGNSD
+hibernation/M
+hibernator/SM
+Hibernia/M
+Hibernian/S
+hibiscus/MS
+hiccup/MDGS
+hickey/SM
+Hickey/SM
+Hickman/M
+Hickok/M
+hickory/MS
+hick/SM
+Hicks/M
+hi/D
+hidden/U
+hideaway/SM
+hidebound
+hideousness/SM
+hideous/YP
+hideout/MS
+hider/M
+hide/S
+hiding/M
+hid/ZDRGJ
+hieing
+hierarchal
+hierarchic
+hierarchical/Y
+hierarchy/SM
+hieratic
+hieroglyph
+hieroglyphic/S
+hieroglyphics/M
+hieroglyphs
+Hieronymus/M
+hie/S
+hifalutin
+Higashiosaka
+Higgins/M
+highball/GSDM
+highborn
+highboy/MS
+highbrow/SM
+highchair/SM
+highfalutin
+Highfield/M
+highhandedness/SM
+highhanded/PY
+highish
+Highlander/SM
+Highlands
+highland/ZSRM
+highlight/GZRDMS
+Highness/M
+highness/MS
+highpoint
+high/PYRT
+highroad/MS
+highs
+hight
+hightail/DGS
+highwayman/M
+highwaymen
+highway/MS
+hijacker/M
+hijack/JZRDGS
+hiker/M
+hike/ZGDSR
+Hilario/M
+hilariousness/MS
+hilarious/YP
+hilarity/MS
+Hilarius/M
+Hilary/M
+Hilbert/M
+Hildagarde/M
+Hildagard/M
+Hilda/M
+Hildebrand/M
+Hildegaard/M
+Hildegarde/M
+Hilde/M
+Hildy/M
+Hillard/M
+Hillary/M
+hillbilly/MS
+Hillcrest/M
+Hillel/M
+hiller/M
+Hillery/M
+hill/GSMDR
+Hilliard/M
+Hilliary/M
+Hillie/M
+Hillier/M
+hilliness/SM
+Hill/M
+hillman
+hillmen
+hillock/SM
+Hillsboro/M
+Hillsdale/M
+hillside/SM
+hilltop/MS
+hillwalking
+Hillyer/M
+Hilly/RM
+hilly/TRP
+hilt/MDGS
+Hilton/M
+Hi/M
+Himalaya/MS
+Himalayan/S
+Himmler/M
+him/S
+himself
+Hinayana/M
+Hinda/M
+Hindemith/M
+Hindenburg/M
+hindered/U
+hinderer/M
+hinder/GRD
+Hindi/M
+hindmost
+hindquarter/SM
+hindrance/SM
+hind/RSZ
+hindsight/SM
+Hinduism/SM
+Hindu/MS
+Hindustani/MS
+Hindustan/M
+Hines/M
+hinger
+hinge's
+hinge/UDSG
+Hinkle/M
+Hinsdale/M
+hinterland/MS
+hinter/M
+hint/GZMDRS
+Hinton/M
+Hinze/M
+hipbone/SM
+hipness/S
+Hipparchus/M
+hipped
+hipper
+hippest
+hippie/MTRS
+hipping/M
+Hippocrates/M
+Hippocratic
+hippodrome/MS
+hippo/MS
+hippopotamus/SM
+hip/PSM
+hippy's
+hipster/MS
+hiragana
+Hiram/M
+hire/AGSD
+hireling/SM
+hirer/SM
+Hirey/M
+hiring/S
+Hirohito/M
+Hiroshi/M
+Hiroshima/M
+Hirsch/M
+hirsuteness/MS
+hirsute/P
+his
+Hispanic/SM
+Hispaniola/M
+hiss/DSRMJG
+hisser/M
+hissing/M
+Hiss/M
+histamine/SM
+histidine/SM
+histochemic
+histochemical
+histochemistry/M
+histogram/MS
+histological
+histologist/MS
+histology/SM
+historian/MS
+historic
+historicalness/M
+historical/PY
+historicism/M
+historicist/M
+historicity/MS
+historiographer/SM
+historiography/MS
+history/MS
+histrionically
+histrionic/S
+histrionics/M
+hist/SDG
+Hitachi/M
+Hitchcock/M
+hitcher/MS
+hitchhike/RSDGZ
+hitch/UGSD
+hither
+hitherto
+Hitler/SM
+hitless
+hit/MS
+hittable
+hitter/SM
+hitting
+Hittite/SM
+HIV
+hive/MGDS
+h'm
+HM
+HMO
+Hmong
+HMS
+hoarder/M
+hoarding/M
+hoard/RDJZSGM
+hoarfrost/SM
+hoariness/MS
+hoar/M
+hoarseness/SM
+hoarse/RTYP
+hoary/TPR
+hoaxer/M
+hoax/GZMDSR
+Hobard/M
+Hobart/M
+hobbed
+Hobbes/M
+hobbing
+hobbit
+hobbler/M
+hobble/ZSRDG
+Hobbs/M
+hobbyhorse/SM
+hobbyist/SM
+hobby/SM
+Hobday/M
+Hobey/M
+hobgoblin/MS
+Hobie/M
+hobnail/GDMS
+hobnobbed
+hobnobbing
+hobnob/S
+Hoboken/M
+hobo/SDMG
+hob/SM
+hoc
+hocker/M
+hockey/SM
+hock/GDRMS
+Hockney/M
+hockshop/SM
+hodge/MS
+Hodge/MS
+hodgepodge/SM
+Hodgkin/M
+ho/DRYZ
+hod/SM
+Hoebart/M
+hoecake/SM
+hoedown/MS
+hoeing
+hoer/M
+hoe/SM
+Hoffa/M
+Hoff/M
+Hoffman/M
+Hofstadter/M
+Hogan/M
+hogan/SM
+Hogarth/M
+hogback/MS
+hogged
+hogger
+hogging
+hoggish/Y
+hogshead/SM
+hog/SM
+hogtie/SD
+hogtying
+hogwash/SM
+Hohenlohe/M
+Hohenstaufen/M
+Hohenzollern/M
+Hohhot/M
+hoister/M
+hoist/GRDS
+hoke/DSG
+hokey/PRT
+hokier
+hokiest
+Hokkaido/M
+hokum/MS
+Hokusai/M
+Holbein/M
+Holbrook/M
+Holcomb/M
+holdall/MS
+Holden/M
+holder/M
+Holder/M
+holding/IS
+holding's
+hold/NRBSJGZ
+holdout/SM
+holdover/SM
+holdup/MS
+hole/MGDS
+holey
+holiday/GRDMS
+Holiday/M
+holidaymaker/S
+holier/U
+Holiness/MS
+holiness/MSU
+holistic
+holistically
+hollandaise
+Hollandaise/M
+Hollander/M
+Holland/RMSZ
+holler/GDS
+Hollerith/M
+Holley/M
+Hollie/M
+Holli/SM
+Hollister/M
+Holloway/M
+hollowness/MS
+hollow/RDYTGSP
+hollowware/M
+Hollyanne/M
+hollyhock/MS
+Holly/M
+holly/SM
+Hollywood/M
+Holman/M
+Holmes
+holmium/MS
+Holm/M
+Holocaust
+holocaust/MS
+Holocene
+hologram/SM
+holograph/GMD
+holographic
+holographs
+holography/MS
+Holstein/MS
+holster/MDSG
+Holst/M
+Holt/M
+Holyoke/M
+holy/SRTP
+holystone/MS
+Holzman/M
+Ho/M
+homage/MGSRD
+homager/M
+hombre/SM
+homburg/SM
+homebody/MS
+homebound
+homeboy/S
+homebuilder/S
+homebuilding
+homebuilt
+homecoming/MS
+home/DSRMYZG
+homegrown
+homeland/SM
+homelessness/SM
+homeless/P
+homelike
+homeliness/SM
+homely/RPT
+homemade
+homemake/JRZG
+homemaker/M
+homemaking/M
+homeomorphic
+homeomorphism/MS
+homeomorph/M
+homeopath
+homeopathic
+homeopaths
+homeopathy/MS
+homeostases
+homeostasis/M
+homeostatic
+homeowner/S
+homeownership
+homepage
+Homere/M
+homer/GDM
+Homeric
+homerists
+Homer/M
+homeroom/MS
+Homerus/M
+homeschooling/S
+homesickness/MS
+homesick/P
+homespun/S
+homesteader/M
+homestead/GZSRDM
+homestretch/SM
+hometown/SM
+homeward
+homeworker/M
+homework/ZSMR
+homeyness/MS
+homey/PS
+homicidal/Y
+homicide/SM
+homier
+homiest
+homiletic/S
+homily/SM
+hominess's
+homing/M
+hominid/MS
+hominy/SM
+Hom/MR
+homogamy/M
+homogenate/MS
+homogeneity/ISM
+homogeneous/PY
+homogenization/MS
+homogenize/DRSGZ
+homogenizer/M
+homograph/M
+homographs
+homological
+homologous
+homologue/M
+homology/MS
+homomorphic
+homomorphism/SM
+homonym/SM
+homophobia/S
+homophobic
+homophone/MS
+homopolymers
+homosexuality/SM
+homosexual/YMS
+homo/SM
+homotopy
+homozygous/Y
+honcho/DSG
+Honda/M
+Hondo/M
+Honduran/S
+Honduras/M
+Honecker/M
+hone/SM
+honestly/E
+honest/RYT
+honesty/ESM
+honeybee/SM
+honeycomb/SDMG
+honeydew/SM
+honey/GSMD
+honeylocust
+Honey/M
+honeymooner/M
+honeymoon/RDMGZS
+honeysuckle/MS
+Honeywell/M
+hong/M
+Honiara/M
+honker/M
+honk/GZSDRM
+honky/SM
+Hon/M
+hon/MDRSZTG
+Honolulu/M
+honorableness/SM
+honorable/PSM
+honorables/U
+honorablies/U
+honorably/UE
+honorarily
+honorarium/SM
+honorary/S
+honored/U
+honoree/S
+honor/ERDBZGS
+honorer/EM
+Honoria/M
+honorific/S
+Honor/M
+honor's
+honors/A
+Honshu/M
+hooch/MS
+hoodedness/M
+hooded/P
+hoodlum/SM
+Hood/M
+hood/MDSG
+hoodoo/DMGS
+hoodwinker/M
+hoodwink/SRDG
+hooey/SM
+hoof/DRMSG
+hoofer/M
+hoofmark/S
+hookah/M
+hookahs
+hookedness/M
+hooked/P
+Hooke/MR
+hooker/M
+Hooker/M
+hookey's
+hook/GZDRMS
+hooks/U
+hookup/SM
+hookworm/MS
+hooky/SRMT
+hooliganism/SM
+hooligan/SM
+hooper/M
+Hooper/M
+hoopla/SM
+hoop/MDRSG
+hooray/SMDG
+hoosegow/MS
+Hoosier/SM
+hootch's
+hootenanny/SM
+hooter/M
+hoot/MDRSGZ
+Hoover/MS
+hooves/M
+hoped/U
+hopefulness/MS
+hopeful/SPY
+hopelessness/SM
+hopeless/YP
+Hope/M
+hoper/M
+hope/SM
+Hopewell/M
+Hopi/SM
+Hopkinsian/M
+Hopkins/M
+hopped
+Hopper/M
+hopper/MS
+hopping/M
+hoppled
+hopples
+hopscotch/MDSG
+hop/SMDRG
+Horace/M
+Horacio/M
+Horatia/M
+Horatio/M
+Horatius/M
+horde/DSGM
+horehound/MS
+horizon/MS
+horizontal/YS
+Hormel/M
+hormonal/Y
+hormone/MS
+Hormuz/M
+hornbeam/M
+hornblende/MS
+Hornblower/M
+hornedness/M
+horned/P
+Horne/M
+hornet/MS
+horn/GDRMS
+horniness/M
+hornless
+hornlike
+Horn/M
+hornpipe/MS
+horny/TRP
+horologic
+horological
+horologist/MS
+horology/MS
+horoscope/MS
+Horowitz/M
+horrendous/Y
+horribleness/SM
+horrible/SP
+horribly
+horridness/M
+horrid/PY
+horrific
+horrifically
+horrify/DSG
+horrifying/Y
+horror/MS
+hors/DSGX
+horseback/MS
+horsedom
+horseflesh/M
+horsefly/MS
+horsehair/SM
+horsehide/SM
+horselaugh/M
+horselaughs
+horseless
+horselike
+horsely
+horseman/M
+horsemanship/MS
+horsemen
+horseplayer/M
+horseplay/SMR
+horsepower/SM
+horseradish/SM
+horse's
+horseshoeing
+horseshoe/MRSD
+horseshoer/M
+horsetail/SM
+horse/UGDS
+horsewhipped
+horsewhipping
+horsewhip/SM
+horsewoman/M
+horsewomen
+horsey
+horsier
+horsiest
+horsing/M
+Horst/M
+hortatory
+Horten/M
+Hortense/M
+Hortensia/M
+horticultural
+horticulture/SM
+horticulturist/SM
+Hort/MN
+Horton/M
+Horus/M
+hosanna/SDG
+Hosea/M
+hose/M
+hosepipe
+hos/GDS
+hosier/MS
+hosiery/SM
+hosp
+hospice/MS
+hospitable/I
+hospitably/I
+hospitality/MS
+hospitality's/I
+hospitalization/MS
+hospitalize/GSD
+hospital/MS
+hostage/MS
+hosteler/M
+hostelry/MS
+hostel/SZGMRD
+hostess/MDSG
+hostile/YS
+hostility/SM
+hostler/MS
+Host/MS
+host/MYDGS
+hotbed/MS
+hotblooded
+hotbox/MS
+hotcake/S
+hotchpotch/M
+hotelier/MS
+hotelman/M
+hotel/MS
+hotfoot/DGS
+hothead/DMS
+hotheadedness/SM
+hotheaded/PY
+hothouse/MGDS
+hotness/MS
+hotplate/SM
+hotpot/M
+hot/PSY
+hotrod
+hotshot/S
+hotted
+Hottentot/SM
+hotter
+hottest
+hotting
+Houdaille/M
+Houdini/M
+hough/M
+hounder/M
+hounding/M
+hound/MRDSG
+hourglass/MS
+houri/MS
+hourly/S
+hour/YMS
+house/ASDG
+houseboat/SM
+housebound
+houseboy/SM
+housebreaker/M
+housebreaking/M
+housebreak/JSRZG
+housebroke
+housebroken
+housebuilding
+housecleaning/M
+houseclean/JDSG
+housecoat/MS
+housefly/MS
+houseful/SM
+householder/M
+household/ZRMS
+househusband/S
+housekeeper/M
+housekeeping/M
+housekeep/JRGZ
+houselights
+House/M
+housemaid/MS
+houseman/M
+housemen
+housemother/MS
+housemoving
+houseparent/SM
+houseplant/S
+houser
+house's
+housetop/MS
+housewares
+housewarming/MS
+housewifeliness/M
+housewifely/P
+housewife/YM
+housewives
+houseworker/M
+housework/ZSMR
+housing/MS
+Housman/M
+Houston/M
+Houyhnhnm/M
+HOV
+hovel/GSMD
+hovercraft/M
+hoverer/M
+hover/GRD
+hove/ZR
+Howard/M
+howbeit
+howdah/M
+howdahs
+howdy/GSD
+Howell/MS
+Howe/M
+however
+Howey/M
+Howie/M
+howitzer/MS
+howler/M
+howl/GZSMDR
+Howrah/M
+how/SM
+howsoever
+hoyden/DMGS
+hoydenish
+Hoyle/SM
+hoy/M
+Hoyt/M
+hp
+HP
+HQ
+hr
+HR
+HRH
+Hrothgar/M
+hrs
+h's
+H's
+HS
+HST
+ht
+HTML
+Hts/M
+HTTP
+Huang/M
+huarache/SM
+hubba
+Hubbard/M
+Hubble/M
+hubbub/SM
+hubby/SM
+hubcap/SM
+Huber/M
+Hube/RM
+Hubert/M
+Huberto/M
+Hubey/M
+Hubie/M
+hub/MS
+hubris/SM
+huckleberry/SM
+Huck/M
+huckster/SGMD
+HUD
+Huddersfield/M
+huddler/M
+huddle/RSDMG
+Hudson/M
+hue/MDS
+Huerta/M
+Huey/M
+huffily
+huffiness/SM
+Huff/M
+Huffman/M
+huff/SGDM
+huffy/TRP
+hugeness/MS
+huge/YP
+hugged
+hugger
+hugging/S
+Huggins
+Hughie/M
+Hugh/MS
+Hugibert/M
+Hugo/M
+hug/RTS
+Huguenot/SM
+Hugues/M
+huh
+huhs
+Hui/M
+Huitzilopitchli/M
+hula/MDSG
+Hulda/M
+hulk/GDMS
+hullabaloo/SM
+huller/M
+hulling/M
+Hull/M
+hull/MDRGZS
+hullo/GSDM
+humane/IY
+humaneness/SM
+humaner
+humanest
+human/IPY
+humanism/SM
+humanistic
+humanist/SM
+humanitarianism/SM
+humanitarian/S
+humanity/ISM
+humanization/CSM
+humanized/C
+humanizer/M
+humanize/RSDZG
+humanizes/IAC
+humanizing/C
+humankind/M
+humannesses
+humanness/IM
+humanoid/S
+humans
+Humbert/M
+Humberto/M
+humbleness/SM
+humble/TZGPRSDJ
+humbly
+Humboldt/M
+humbugged
+humbugging
+humbug/MS
+humdinger/MS
+humdrum/S
+Hume/M
+humeral/S
+humeri
+humerus/M
+Humfrey/M
+Humfrid/M
+Humfried/M
+humidification/MC
+humidifier/CM
+humidify/RSDCXGNZ
+humidistat/M
+humidity/MS
+humidor/MS
+humid/Y
+humiliate/SDXNG
+humiliating/Y
+humiliation/M
+humility/MS
+hummed
+Hummel/M
+hummer/SM
+humming
+hummingbird/SM
+hummock/MDSG
+hummocky
+hummus/S
+humongous
+humored/U
+humorist/MS
+humorlessness/MS
+humorless/PY
+humorousness/MS
+humorous/YP
+humor/RDMZGS
+humpback/SMD
+hump/GSMD
+humph/DG
+Humphrey/SM
+humphs
+Humpty/M
+hum/S
+humus/SM
+Humvee
+hunchback/DSM
+hunch/GMSD
+hundredfold/S
+hundred/SHRM
+hundredths
+hundredweight/SM
+Hunfredo/M
+hung/A
+Hungarian/MS
+Hungary/M
+hunger/SDMG
+Hung/M
+hungover
+hungrily
+hungriness/SM
+hungry/RTP
+hunker/DG
+hunky/RST
+hunk/ZRMS
+Hun/MS
+hunter/M
+Hunter/M
+hunt/GZJDRS
+hunting/M
+Huntington/M
+Huntlee/M
+Huntley/M
+Hunt/MR
+huntress/MS
+huntsman/M
+huntsmen
+Huntsville/M
+hurdle/JMZGRSD
+hurdler/M
+hurl/DRGZJS
+Hurlee/M
+Hurleigh/M
+hurler/M
+Hurley/M
+hurling/M
+Huron/SM
+hurray/SDG
+hurricane/MS
+hurriedness/M
+hurried/UY
+hurry/RSDG
+Hurst/M
+hurter/M
+hurtfulness/MS
+hurtful/PY
+hurting/Y
+hurtle/SDG
+hurts
+hurt/U
+Hurwitz/M
+Hus
+Husain's
+husbander/M
+husband/GSDRYM
+husbandman/M
+husbandmen
+husbandry/SM
+Husein/M
+hush/DSG
+husker/M
+huskily
+huskiness/MS
+husking/M
+husk/SGZDRM
+husky/RSPT
+hussar/MS
+Hussein/M
+Husserl/M
+hussy/SM
+hustings/M
+hustler/M
+hustle/RSDZG
+Huston/M
+Hutchins/M
+Hutchinson/M
+Hutchison/M
+hutch/MSDG
+hut/MS
+hutted
+hutting
+Hutton/M
+Hutu/M
+Huxley/M
+Huygens/M
+huzzah/GD
+huzzahs
+hwy
+Hyacintha/M
+Hyacinthe/M
+Hyacinthia/M
+Hyacinthie/M
+hyacinth/M
+Hyacinth/M
+hyacinths
+Hyades
+hyaena's
+Hyannis/M
+Hyatt/M
+hybridism/SM
+hybridization/S
+hybridize/GSD
+hybrid/MS
+Hyde/M
+Hyderabad/M
+Hydra/M
+hydra/MS
+hydrangea/SM
+hydrant/SM
+hydrate/CSDNGX
+hydrate's
+hydration/MC
+hydraulically
+hydraulicked
+hydraulicking
+hydraulic/S
+hydraulics/M
+hydrazine/M
+hydride/MS
+hydrocarbon/SM
+hydrocephali
+hydrocephalus/MS
+hydrochemistry
+hydrochloric
+hydrochloride/M
+hydrodynamical
+hydrodynamic/S
+hydrodynamics/M
+hydroelectric
+hydroelectrically
+hydroelectricity/SM
+hydrofluoric
+hydrofoil/MS
+hydrogenate/CDSGN
+hydrogenate's
+hydrogenation/MC
+hydrogenations
+hydrogen/MS
+hydrogenous
+hydrological/Y
+hydrologist/MS
+hydrology/SM
+hydrolysis/M
+hydrolyzed/U
+hydrolyze/GSD
+hydromagnetic
+hydromechanics/M
+hydrometer/SM
+hydrometry/MS
+hydrophilic
+hydrophobia/SM
+hydrophobic
+hydrophone/SM
+hydroplane/DSGM
+hydroponic/S
+hydroponics/M
+hydro/SM
+hydrosphere/MS
+hydrostatic/S
+hydrostatics/M
+hydrotherapy/SM
+hydrothermal/Y
+hydrous
+hydroxide/MS
+hydroxy
+hydroxylate/N
+hydroxyl/SM
+hydroxyzine/M
+hyena/MS
+hygiene/MS
+hygienically
+hygienic/S
+hygienics/M
+hygienist/MS
+hygrometer/SM
+hygroscopic
+hying
+Hy/M
+Hyman/M
+hymeneal/S
+Hymen/M
+hymen/MS
+Hymie/M
+hymnal/SM
+hymnbook/S
+hymn/GSDM
+Hynda/M
+hype/MZGDSR
+hyperactive/S
+hyperactivity/SM
+hyperbola/MS
+hyperbole/MS
+hyperbolic
+hyperbolically
+hyperboloidal
+hyperboloid/SM
+hypercellularity
+hypercritical/Y
+hypercube/MS
+hyperemia/M
+hyperemic
+hyperfine
+hypergamous/Y
+hypergamy/M
+hyperglycemia/MS
+hyperinflation
+Hyperion/M
+hypermarket/SM
+hypermedia/S
+hyperplane/SM
+hyperplasia/M
+hypersensitiveness/MS
+hypersensitive/P
+hypersensitivity/MS
+hypersonic
+hyperspace/M
+hypersphere/M
+hypertension/MS
+hypertensive/S
+hypertext/SM
+hyperthyroid
+hyperthyroidism/MS
+hypertrophy/MSDG
+hypervelocity
+hyperventilate/XSDGN
+hyperventilation/M
+hyphenated/U
+hyphenate/NGXSD
+hyphenation/M
+hyphen/DMGS
+hypnoses
+hypnosis/M
+hypnotherapy/SM
+hypnotically
+hypnotic/S
+hypnotism/MS
+hypnotist/SM
+hypnotize/SDG
+hypoactive
+hypoallergenic
+hypocellularity
+hypochondriac/SM
+hypochondria/MS
+hypocrisy/SM
+hypocrite/MS
+hypocritical/Y
+hypodermic/S
+hypo/DMSG
+hypoglycemia/SM
+hypoglycemic/S
+hypophyseal
+hypophysectomized
+hypotenuse/MS
+hypothalami
+hypothalamic
+hypothalamically
+hypothalamus/M
+hypothermia/SM
+hypotheses
+hypothesis/M
+hypothesizer/M
+hypothesize/ZGRSD
+hypothetic
+hypothetical/Y
+hypothyroid
+hypothyroidism/SM
+hypoxia/M
+hyssop/MS
+hysterectomy/MS
+hysteresis/M
+hysteria/SM
+hysterical/YU
+hysteric/SM
+Hyundai/M
+Hz
+i
+I
+IA
+Iaccoca/M
+Iago/M
+Iain/M
+Ia/M
+iambi
+iambic/S
+iamb/MS
+iambus/SM
+Ian/M
+Ianthe/M
+Ibadan/M
+Ibbie/M
+Ibby/M
+Iberia/M
+Iberian/MS
+Ibero/M
+ibex/MS
+ibid
+ibidem
+ibis/SM
+IBM/M
+Ibo/M
+Ibrahim/M
+Ibsen/M
+ibuprofen/S
+Icarus/M
+ICBM/S
+ICC
+iceberg/SM
+iceboat/MS
+icebound
+icebox/MS
+icebreaker/SM
+icecap/SM
+ice/GDSC
+Icelander/M
+Icelandic
+Iceland/MRZ
+Ice/M
+iceman/M
+icemen
+icepack
+icepick/S
+ice's
+Ichabod/M
+ichneumon/M
+ichthyologist/MS
+ichthyology/MS
+icicle/SM
+icily
+iciness/SM
+icing/MS
+icky/RT
+iconic
+icon/MS
+iconoclasm/MS
+iconoclastic
+iconoclast/MS
+iconography/MS
+icosahedra
+icosahedral
+icosahedron/M
+ictus/SM
+ICU
+icy/RPT
+I'd
+ID
+Idahoan/S
+Idahoes
+Idaho/MS
+Idalia/M
+Idalina/M
+Idaline/M
+Ida/M
+idealism/MS
+idealistic
+idealistically
+idealist/MS
+idealization/MS
+idealized/U
+idealize/GDRSZ
+idealizer/M
+ideal/MYS
+idealogical
+idea/SM
+ideate/SN
+ideation/M
+Idelle/M
+Idell/M
+idem
+idempotent/S
+identicalness/M
+identical/YP
+identifiability
+identifiable/U
+identifiably
+identification/M
+identified/U
+identifier/M
+identify/XZNSRDG
+identity/SM
+ideogram/MS
+ideographic
+ideograph/M
+ideographs
+ideological/Y
+ideologist/SM
+ideologue/S
+ideology/SM
+ides
+Idette/M
+idiocy/MS
+idiolect/M
+idiomatically
+idiomatic/P
+idiom/MS
+idiopathic
+idiosyncrasy/SM
+idiosyncratic
+idiosyncratically
+idiotic
+idiotically
+idiot/MS
+idleness/MS
+idle/PZTGDSR
+idler/M
+id/MY
+idolater/MS
+idolatress/S
+idolatrous
+idolatry/SM
+idolization/SM
+idolized/U
+idolizer/M
+idolize/ZGDRS
+idol/MS
+ids
+IDs
+idyllic
+idyllically
+idyll/MS
+IE
+IEEE
+Ieyasu/M
+if
+iffiness/S
+iffy/TPR
+Ifni/M
+ifs
+Iggie/M
+Iggy/M
+igloo/MS
+Ignace/M
+Ignacio/M
+Ignacius/M
+Ignatius/M
+Ignazio/M
+Ignaz/M
+igneous
+ignitable
+ignite/ASDG
+igniter/M
+ignition/MS
+ignobleness/M
+ignoble/P
+ignobly
+ignominious/Y
+ignominy/MS
+ignoramus/SM
+ignorance/MS
+ignorantness/M
+ignorant/SPY
+ignorer/M
+ignore/SRDGB
+Igor/M
+iguana/MS
+Iguassu/M
+ii
+iii
+Ijsselmeer/M
+Ike/M
+Ikey/M
+Ikhnaton/M
+ikon's
+IL
+Ilaire/M
+Ila/M
+Ilario/M
+ilea
+Ileana/M
+Ileane/M
+ileitides
+ileitis/M
+Ilene/M
+ileum/M
+ilia
+iliac
+Iliad/MS
+Ilise/M
+ilium/M
+Ilka/M
+ilk/MS
+I'll
+Illa/M
+illegality/MS
+illegal/YS
+illegibility/MS
+illegible
+illegibly
+illegitimacy/SM
+illegitimate/SDGY
+illiberality/SM
+illiberal/Y
+illicitness/MS
+illicit/YP
+illimitableness/M
+illimitable/P
+Illinoisan/MS
+Illinois/M
+illiquid
+illiteracy/MS
+illiterateness/M
+illiterate/PSY
+Ill/M
+illness/MS
+illogicality/SM
+illogicalness/M
+illogical/PY
+illogic/M
+ill/PS
+illume/DG
+illuminate/XSDVNG
+Illuminati
+illuminatingly
+illuminating/U
+illumination/M
+illumine/BGSD
+illusionary
+illusion/ES
+illusionist/MS
+illusion's
+illusiveness/M
+illusive/PY
+illusoriness/M
+illusory/P
+illustrated/U
+illustrate/VGNSDX
+illustration/M
+illustrative/Y
+illustrator/SM
+illustriousness/SM
+illustrious/PY
+illus/V
+illy
+Ilona/M
+Ilsa/M
+Ilse/M
+Ilysa/M
+Ilyse/M
+Ilyssa/M
+Ilyushin/M
+I'm
+image/DSGM
+Imagen/M
+imagery/MS
+imaginableness
+imaginable/U
+imaginably/U
+imaginariness/M
+imaginary/PS
+imagination/MS
+imaginativeness/M
+imaginative/UY
+imagined/U
+imaginer/M
+imagine/RSDJBG
+imagoes
+imago/M
+imam/MS
+imbalance/SDM
+imbecile/YMS
+imbecilic
+imbecility/MS
+imbiber/M
+imbibe/ZRSDG
+imbrication/SM
+Imbrium/M
+imbroglio/MS
+imbruing
+imbue/GDS
+Imelda/M
+IMF
+IMHO
+imitable/I
+imitate/SDVNGX
+imitation/M
+imitativeness/MS
+imitative/YP
+imitator/SM
+immaculateness/SM
+immaculate/YP
+immanence/S
+immanency/MS
+immanent/Y
+Immanuel/M
+immateriality/MS
+immaterialness/MS
+immaterial/PY
+immatureness/M
+immature/SPY
+immaturity/MS
+immeasurableness/M
+immeasurable/P
+immeasurably
+immediacy/MS
+immediateness/SM
+immediate/YP
+immemorial/Y
+immenseness/M
+immense/PRTY
+immensity/MS
+immerse/RSDXNG
+immersible
+immersion/M
+immigrant/SM
+immigrate/NGSDX
+immigration/M
+imminence/SM
+imminentness/M
+imminent/YP
+immobile
+immobility/MS
+immobilization/MS
+immobilize/DSRG
+immoderateness/M
+immoderate/NYP
+immoderation/M
+immodest/Y
+immodesty/SM
+immolate/SDNGX
+immolation/M
+immorality/MS
+immoral/Y
+immortality/SM
+immortalized/U
+immortalize/GDS
+immortal/SY
+immovability/SM
+immovableness/M
+immovable/PS
+immovably
+immune/S
+immunity/SM
+immunization/MS
+immunize/GSD
+immunoassay/M
+immunodeficiency/S
+immunodeficient
+immunologic
+immunological/Y
+immunologist/SM
+immunology/MS
+immure/GSD
+immutability/MS
+immutableness/M
+immutable/P
+immutably
+IMNSHO
+IMO
+Imogene/M
+Imogen/M
+Imojean/M
+impaction/SM
+impactor/SM
+impact/VGMRDS
+impaired/U
+impairer/M
+impair/LGRDS
+impairment/SM
+impala/MS
+impale/GLRSD
+impalement/SM
+impaler/M
+impalpable
+impalpably
+impanel/DGS
+impartation/M
+impart/GDS
+impartiality/SM
+impartial/Y
+impassableness/M
+impassable/P
+impassably
+impasse/SXBMVN
+impassibility/SM
+impassible
+impassibly
+impassion/DG
+impassioned/U
+impassiveness/MS
+impassive/YP
+impassivity/MS
+impasto/SM
+impatience/SM
+impatiens/M
+impatient/Y
+impeachable/U
+impeach/DRSZGLB
+impeacher/M
+impeachment/MS
+impeccability/SM
+impeccable/S
+impeccably
+impecuniousness/MS
+impecunious/PY
+impedance/MS
+impeded/U
+impeder/M
+impede/S
+imped/GRD
+impedimenta
+impediment/SM
+impelled
+impeller/MS
+impelling
+impel/S
+impend/DGS
+impenetrability/MS
+impenetrableness/M
+impenetrable/P
+impenetrably
+impenitence/MS
+impenitent/YS
+imperativeness/M
+imperative/PSY
+imperceivable
+imperceptibility/MS
+imperceptible
+imperceptibly
+imperceptive
+imperf
+imperfectability
+imperfection/MS
+imperfectness/SM
+imperfect/YSVP
+imperialism/MS
+imperialistic
+imperialistically
+imperialist/SM
+imperial/YS
+imperil/GSLD
+imperilment/SM
+imperiousness/MS
+imperious/YP
+imperishableness/M
+imperishable/SP
+imperishably
+impermanence/MS
+impermanent/Y
+impermeability/SM
+impermeableness/M
+impermeable/P
+impermeably
+impermissible
+impersonality/M
+impersonalized
+impersonal/Y
+impersonate/XGNDS
+impersonation/M
+impersonator/SM
+impertinence/SM
+impertinent/YS
+imperturbability/SM
+imperturbable
+imperturbably
+imperviousness/M
+impervious/PY
+impetigo/MS
+impetuosity/MS
+impetuousness/MS
+impetuous/YP
+impetus/MS
+impiety/MS
+impinge/LS
+impingement/MS
+imping/GD
+impiousness/SM
+impious/PY
+impishness/MS
+impish/YP
+implacability/SM
+implacableness/M
+implacable/P
+implacably
+implantation/SM
+implant/BGSDR
+implanter/M
+implausibility/MS
+implausible
+implausibly
+implementability
+implementable/U
+implementation/A
+implementations
+implementation's
+implemented/AU
+implementer/M
+implementing/A
+implementor/MS
+implement/SMRDGZB
+implicant/SM
+implicate/VGSD
+implication/M
+implicative/PY
+implicitness/SM
+implicit/YP
+implied/Y
+implode/GSD
+implore/GSD
+imploring/Y
+implosion/SM
+implosive/S
+imply/GNSDX
+impoliteness/MS
+impolite/YP
+impoliticness/M
+impolitic/PY
+imponderableness/M
+imponderable/PS
+importance/SM
+important/Y
+importation/MS
+importer/M
+importing/A
+import/SZGBRD
+importunateness/M
+importunate/PYGDS
+importuner/M
+importune/SRDZYG
+importunity/SM
+imposable
+impose/ASDG
+imposer/SM
+imposingly
+imposing/U
+imposition/SM
+impossibility/SM
+impossibleness/M
+impossible/PS
+impossibly
+imposter's
+impostor/SM
+impost/SGMD
+imposture/SM
+impotence/MS
+impotency/S
+impotent/SY
+impound/GDS
+impoundments
+impoverisher/M
+impoverish/LGDRS
+impoverishment/SM
+impracticableness/M
+impracticable/P
+impracticably
+impracticality/SM
+impracticalness/M
+impractical/PY
+imprecate/NGXSD
+imprecation/M
+impreciseness/MS
+imprecise/PYXN
+imprecision/M
+impregnability/MS
+impregnableness/M
+impregnable/P
+impregnably
+impregnate/DSXNG
+impregnation/M
+impresario/SM
+impress/DRSGVL
+impressed/U
+impresser/M
+impressibility/MS
+impressible
+impressionability/SM
+impressionableness/M
+impressionable/P
+impression/BMS
+impressionism/SM
+impressionistic
+impressionist/MS
+impressiveness/MS
+impressive/YP
+impressment/M
+imprimatur/SM
+imprinter/M
+imprinting/M
+imprint/SZDRGM
+imprison/GLDS
+imprisonment/MS
+improbability/MS
+improbableness/M
+improbable/P
+improbably
+impromptu/S
+improperness/M
+improper/PY
+impropitious
+impropriety/SM
+improved/U
+improvement/MS
+improver/M
+improve/SRDGBL
+improvidence/SM
+improvident/Y
+improvisational
+improvisation/MS
+improvisatory
+improviser/M
+improvise/RSDZG
+imprudence/SM
+imprudent/Y
+imp/SGMDRY
+impudence/MS
+impudent/Y
+impugner/M
+impugn/SRDZGB
+impulse/XMVGNSD
+impulsion/M
+impulsiveness/MS
+impulsive/YP
+impunity/SM
+impureness/M
+impure/RPTY
+impurity/MS
+imputation/SM
+impute/SDBG
+Imus/M
+IN
+inaction
+inactive
+inadequate/S
+inadvertence/MS
+inadvertent/Y
+inalienability/MS
+inalienably
+inalterableness/M
+inalterable/P
+Ina/M
+inamorata/MS
+inane/SRPYT
+inanimateness/S
+inanimate/P
+inanity/MS
+inappeasable
+inappropriate/P
+inarticulate/P
+in/AS
+inasmuch
+inaugural/S
+inaugurate/XSDNG
+inauguration/M
+inauthenticity
+inbound/G
+inbred/S
+inbreed/JG
+incalculableness/M
+incalculably
+incandescence/SM
+incandescent/YS
+incant
+incantation/SM
+incantatory
+incapable/S
+incapacitate/GNSD
+incapacitation/M
+incarcerate/XGNDS
+incarceration/M
+incarnadine/GDS
+incarnate/AGSDNX
+incarnation/AM
+Inca/SM
+incendiary/S
+incense/MGDS
+incentive/ESM
+incentively
+incept/DGVS
+inception/MS
+inceptive/Y
+inceptor/M
+incessant/Y
+incest/SM
+incestuousness/MS
+incestuous/PY
+inch/GMDS
+inchoate/DSG
+Inchon/M
+inchworm/MS
+incidence/MS
+incidental/YS
+incident/SM
+incinerate/XNGSD
+incineration/M
+incinerator/SM
+incipience/SM
+incipiency/M
+incipient/Y
+incise/SDVGNX
+incision/M
+incisiveness/MS
+incisive/YP
+incisor/MS
+incitement/MS
+inciter/M
+incite/RZL
+incl
+inclination/ESM
+incline/EGSD
+incliner/M
+inclining/M
+include/GDS
+inclusion/MS
+inclusiveness/MS
+inclusive/PY
+Inc/M
+incognito/S
+incoherency/M
+income/M
+incommode/DG
+incommunicado
+incomparable
+incompetent/MS
+incomplete/P
+inconceivability/MS
+inconceivableness/M
+inconceivable/P
+incondensable
+incongruousness/S
+inconsiderableness/M
+inconsiderable/P
+inconsistence
+inconsolableness/M
+inconsolable/P
+inconsolably
+incontestability/SM
+incontestably
+incontrovertibly
+inconvenience/DG
+inconvertibility
+inconvertible
+incorporable
+incorporated/UE
+incorporate/GASDXN
+incorrect/P
+incorrigibility/MS
+incorrigibleness/M
+incorrigible/SP
+incorrigibly
+incorruptible/S
+incorruptibly
+increase/JB
+increaser/M
+increasing/Y
+incredibleness/M
+incredible/P
+incremental/Y
+incrementation
+increment/DMGS
+incriminate/XNGSD
+incrimination/M
+incriminatory
+incrustation/SM
+inc/T
+incubate/XNGVDS
+incubation/M
+incubator/MS
+incubus/MS
+inculcate/SDGNX
+inculcation/M
+inculpate/SDG
+incumbency/MS
+incumbent/S
+incunabula
+incunabulum
+incurable/S
+incurious
+incursion/SM
+ind
+indebtedness/SM
+indebted/P
+indefatigableness/M
+indefatigable/P
+indefatigably
+indefeasible
+indefeasibly
+indefinableness/M
+indefinable/PS
+indefinite/S
+indelible
+indelibly
+indemnification/M
+indemnify/NXSDG
+indemnity/SM
+indentation/SM
+indented/U
+indenter/M
+indention/SM
+indent/R
+indenture/DG
+Independence/M
+indescribableness/M
+indescribable/PS
+indescribably
+indestructibleness/M
+indestructible/P
+indestructibly
+indeterminably
+indeterminacy/MS
+indeterminism
+indexation/S
+indexer/M
+index/MRDZGB
+India/M
+Indiana/M
+Indianan/S
+Indianapolis/M
+Indianian/S
+Indian/SM
+indicant/MS
+indicate/DSNGVX
+indication/M
+indicative/SY
+indicator/MS
+indices's
+indicter/M
+indictment/SM
+indict/SGLBDR
+indifference
+indigence/MS
+indigenousness/M
+indigenous/YP
+indigent/SY
+indigestible/S
+indignant/Y
+indignation/MS
+indigo/SM
+Indira/M
+indirect/PG
+indiscreet/P
+indiscriminateness/M
+indiscriminate/PY
+indispensability/MS
+indispensableness/M
+indispensable/SP
+indispensably
+indisputableness/M
+indisputable/P
+indissolubleness/M
+indissoluble/P
+indissolubly
+indistinguishableness/M
+indistinguishable/P
+indite/SDG
+indium/SM
+individualism/MS
+individualistic
+individualistically
+individualist/MS
+individuality/MS
+individualization/SM
+individualize/DRSGZ
+individualized/U
+individualizer/M
+individualizes/U
+individualizing/Y
+individual/YMS
+individuate/DSXGN
+individuation/M
+indivisibleness/M
+indivisible/SP
+indivisibly
+Ind/M
+Indochina/M
+Indochinese
+indoctrinate/GNXSD
+indoctrination/M
+indoctrinator/SM
+indolence/SM
+indolent/Y
+indomitableness/M
+indomitable/P
+indomitably
+Indonesia/M
+Indonesian/S
+indoor
+Indore/M
+Indra/M
+indubitableness/M
+indubitable/P
+indubitably
+inducement/MS
+inducer/M
+induce/ZGLSRD
+inducible
+inductance/MS
+inductee/SM
+induct/GV
+induction/SM
+inductiveness/M
+inductive/PY
+inductor/MS
+indulge/GDRS
+indulgence/SDGM
+indulgent/Y
+indulger/M
+Indus/M
+industrialism/MS
+industrialist/MS
+industrialization/MS
+industrialized/U
+industrialize/SDG
+industrial/SY
+industriousness/SM
+industrious/YP
+industry/SM
+Indy/SM
+inebriate/NGSDX
+inebriation/M
+inedible
+ineducable
+ineffability/MS
+ineffableness/M
+ineffable/P
+ineffably
+inelastic
+ineligibly
+ineluctable
+ineluctably
+ineptitude/SM
+ineptness/MS
+inept/YP
+inequivalent
+inerrant
+inertial/Y
+inertia/SM
+inertness/MS
+inert/SPY
+Ines
+inescapably
+Inesita/M
+Inessa/M
+inestimably
+inevitability/MS
+inevitableness/M
+inevitable/P
+inevitably
+inexact/P
+inexhaustibleness/M
+inexhaustible/P
+inexhaustibly
+inexorability/M
+inexorableness/M
+inexorable/P
+inexorably
+inexpedience/M
+inexplicableness/M
+inexplicable/P
+inexplicably
+inexplicit
+inexpressibility/M
+inexpressibleness/M
+inexpressible/PS
+inextricably
+Inez/M
+infamous
+infamy/SM
+infancy/M
+infanticide/MS
+infantile
+infant/MS
+infantryman/M
+infantrymen
+infantry/SM
+infarction/SM
+infarct/SM
+infatuate/XNGSD
+infatuation/M
+infauna
+infected/U
+infecter
+infect/ESGDA
+infection/EASM
+infectiousness/MS
+infectious/PY
+infective
+infer/B
+inference/GMSR
+inferential/Y
+inferiority/MS
+inferior/SMY
+infernal/Y
+inferno/MS
+inferred
+inferring
+infertile
+infestation/MS
+infester/M
+infest/GSDR
+infidel/SM
+infighting/M
+infill/MG
+infiltrate/V
+infiltrator/MS
+infinitesimal/SY
+infinite/V
+infinitival
+infinitive/YMS
+infinitude/MS
+infinitum
+infinity/SM
+infirmary/SM
+infirmity/SM
+infix/M
+inflammableness/M
+inflammable/P
+inflammation/MS
+inflammatory
+inflatable/MS
+inflate/NGBDRSX
+inflater/M
+inflationary
+inflation/ESM
+inflect/GVDS
+inflectional/Y
+inflection/SM
+inflexibleness/M
+inflexible/P
+inflexion/SM
+inflict/DRSGV
+inflicter/M
+infliction/SM
+inflow/M
+influenced/U
+influencer/M
+influence/SRDGM
+influent
+influential/SY
+influenza/MS
+infomercial/S
+Informatica/M
+informatics
+informational
+information/ES
+informativeness/S
+informative/UY
+informatory
+informed/U
+informer/M
+info/SM
+infotainment/S
+infra
+infrared/SM
+infrasonic
+infrastructural
+infrastructure/MS
+infrequence/S
+infringe/LR
+infringement/SM
+infringer/M
+infuriate/GNYSD
+infuriating/Y
+infuriation/M
+infuser/M
+infuse/RZ
+infusibleness/M
+infusible/P
+inf/ZT
+Ingaberg/M
+Ingaborg/M
+Inga/M
+Ingamar/M
+Ingar/M
+Ingeberg/M
+Ingeborg/M
+Ingelbert/M
+Ingemar/M
+ingeniousness/MS
+ingenious/YP
+ingénue/S
+ingenuity/SM
+ingenuous/EY
+ingenuousness/MS
+Inger/M
+Inge/RM
+Ingersoll/M
+ingest/DGVS
+ingestible
+ingestion/SM
+Inglebert/M
+inglenook/MS
+Inglewood/M
+Inglis/M
+Ingmar/M
+ingoing
+ingot/SMDG
+ingrained/Y
+Ingra/M
+Ingram/M
+ingrate/M
+ingratiate/DSGNX
+ingratiating/Y
+ingratiation/M
+ingredient/SM
+Ingres/M
+ingression/M
+ingress/MS
+Ingrid/M
+Ingrim/M
+ingrown/P
+inguinal
+Ingunna/M
+inhabitable/U
+inhabitance
+inhabited/U
+inhabiter/M
+inhabit/R
+inhalant/S
+inhalation/SM
+inhalator/SM
+inhale/Z
+inhere/DG
+inherent/Y
+inheritableness/M
+inheritable/P
+inheritance/EMS
+inherit/BDSG
+inherited/E
+inheriting/E
+inheritor/S
+inheritress/MS
+inheritrix/MS
+inherits/E
+inhibit/DVGS
+inhibited/U
+inhibiter's
+inhibition/MS
+inhibitor/MS
+inhibitory
+inhomogeneous
+inhospitableness/M
+inhospitable/P
+inhospitality
+Inigo/M
+inimical/Y
+inimitableness/M
+inimitable/P
+inimitably
+inion
+iniquitousness/M
+iniquitous/PY
+iniquity/MS
+initialer/M
+initial/GSPRDY
+initialization/A
+initializations
+initialization's
+initialize/ASDG
+initialized/U
+initializer/S
+initiates
+initiate/UD
+initiating
+initiation/SM
+initiative/SM
+initiator/MS
+initiatory
+injectable/U
+inject/GVSDB
+injection/MS
+injector/SM
+injunctive
+injured/U
+injurer/M
+injure/SRDZG
+injuriousness/M
+injurious/YP
+inkblot/SM
+inker/M
+inkiness/MS
+inkling/SM
+inkstand/SM
+inkwell/SM
+inky/TP
+ink/ZDRJ
+inland
+inlander/M
+inlay/RG
+inletting
+inly/G
+inmost
+Inna/M
+innards
+innateness/SM
+innate/YP
+innermost/S
+innersole/S
+innerspring
+innervate/GNSDX
+innervation/M
+inner/Y
+inning/M
+Innis/M
+innkeeper/MS
+innocence/SM
+Innocent/M
+innocent/SYRT
+innocuousness/MS
+innocuous/PY
+innovate/SDVNGX
+innovation/M
+innovative/P
+innovator/MS
+innovatory
+Innsbruck/M
+innuendo/MDGS
+innumerability/M
+innumerableness/M
+innumerable/P
+innumerably
+innumerate
+inn/ZGDRSJ
+inoculate/ASDG
+inoculation/MS
+inoculative
+inoffensive/P
+Inonu/M
+inopportuneness/M
+inopportune/P
+inordinateness/M
+inordinate/PY
+inorganic
+inpatient
+In/PM
+input/MRDG
+inquirer/M
+inquire/ZR
+inquiring/Y
+inquiry/MS
+inquisitional
+inquisition/MS
+Inquisition/MS
+inquisitiveness/MS
+inquisitive/YP
+inquisitorial/Y
+inquisitor/MS
+INRI
+inrush/M
+ins
+INS
+insalubrious
+insanitary
+insatiability/MS
+insatiableness/M
+insatiable/P
+insatiably
+inscribe/Z
+inscription/SM
+inscrutability/SM
+inscrutableness/SM
+inscrutable/P
+inscrutably
+inseam
+insecticidal
+insecticide/MS
+insectivore/SM
+insectivorous
+insecureness/M
+insecure/P
+inseminate/NGXSD
+insemination/M
+insensateness/M
+insensate/P
+insensible/P
+insentient
+inseparable/S
+insert/ADSG
+inserter/M
+insertion/AMS
+insetting
+inshore
+insider/M
+inside/Z
+insidiousness/MS
+insidious/YP
+insightful/Y
+insigne's
+insignia/SM
+insignificant
+insinuate/VNGXSD
+insinuating/Y
+insinuation/M
+insinuator/SM
+insipidity/MS
+insipid/Y
+insistence/SM
+insistent/Y
+insisting/Y
+insist/SGD
+insociable
+insofar
+insole/M
+insolence/SM
+insolent/YS
+insolubleness/M
+insoluble/P
+insolubly
+insomniac/S
+insomnia/MS
+insomuch
+insouciance/SM
+insouciant/Y
+inspect/AGSD
+inspection/SM
+inspective
+inspectorate/MS
+inspector/SM
+inspirational/Y
+inspiration/MS
+inspired/U
+inspire/R
+inspirer/M
+inspiring/U
+inspirit/DG
+Inst
+installable
+install/ADRSG
+installation/SM
+installer/MS
+installment/MS
+instance/GD
+instantaneousness/M
+instantaneous/PY
+instantiated/U
+instantiate/SDXNG
+instantiation/M
+instant/SRYMP
+instate/AGSD
+inst/B
+instead
+instigate/XSDVGN
+instigation/M
+instigator/SM
+instillation/SM
+instinctive/Y
+instinctual
+instinct/VMS
+instituter/M
+institutes/M
+institute/ZXVGNSRD
+institutionalism/M
+institutionalist/M
+institutionalization/SM
+institutionalize/GDS
+institutional/Y
+institution/AM
+institutor's
+instr
+instruct/DSVG
+instructed/U
+instructional
+instruction/MS
+instructiveness/M
+instructive/PY
+instructor/MS
+instrumentalist/MS
+instrumentality/SM
+instrumental/SY
+instrumentation/SM
+instrument/GMDS
+insubordinate
+insubstantial
+insufferable
+insufferably
+insularity/MS
+insular/YS
+insulate/DSXNG
+insulated/U
+insulation/M
+insulator/MS
+insulin/MS
+insult/DRSG
+insulter/M
+insulting/Y
+insuperable
+insuperably
+insupportableness/M
+insupportable/P
+insurance/MS
+insurance's/A
+insure/BZGS
+insured/S
+insurer/M
+insurgence/SM
+insurgency/MS
+insurgent/MS
+insurmountably
+insurrectionist/SM
+insurrection/SM
+intactness/M
+intact/P
+intaglio/GMDS
+intake/M
+intangible/M
+integer/MS
+integrability/M
+integrable
+integral/SYM
+integrand/MS
+integrate/AGNXEDS
+integration/EMA
+integrative/E
+integrator/MS
+integrity/SM
+integument/SM
+intellective/Y
+intellect/MVS
+intellectualism/MS
+intellectuality/M
+intellectualize/GSD
+intellectualness/M
+intellectual/YPS
+intelligence/MSR
+intelligencer/M
+intelligentsia/MS
+intelligent/UY
+intelligibilities
+intelligibility/UM
+intelligibleness/MU
+intelligible/PU
+intelligibly/U
+Intel/M
+Intelsat/M
+intemperate/P
+intendant/MS
+intendedness/M
+intended/SYP
+intender/M
+intensification/M
+intensifier/M
+intensify/GXNZRSD
+intensional/Y
+intensiveness/MS
+intensive/PSY
+intentionality/M
+intentional/UY
+intention/SDM
+intentness/SM
+intent/YP
+interaction/MS
+interactive/PY
+interactivity
+interact/VGDS
+interaxial
+interbank
+interbred
+interbreed/GS
+intercalate/GNVDS
+intercalation/M
+intercase
+intercaste
+interceder/M
+intercede/SRDG
+intercensal
+intercept/DGS
+interception/MS
+interceptor/MS
+intercession/MS
+intercessor/SM
+intercessory
+interchangeability/M
+interchangeableness/M
+interchangeable/P
+interchangeably
+interchange/DSRGJ
+interchanger/M
+intercity
+interclass
+intercohort
+intercollegiate
+intercommunicate/SDXNG
+intercommunication/M
+intercom/SM
+interconnectedness/M
+interconnected/P
+interconnect/GDS
+interconnection/SM
+interconnectivity
+intercontinental
+interconversion/M
+intercorrelated
+intercourse/SM
+Interdata/M
+interdenominational
+interdepartmental/Y
+interdependence/MS
+interdependency/SM
+interdependent/Y
+interdiction/MS
+interdict/MDVGS
+interdisciplinary
+interested/UYE
+interest/GEMDS
+interestingly/U
+interestingness/M
+interesting/YP
+inter/ESTL
+interface/SRDGM
+interfacing/M
+interfaith
+interference/MS
+interferer/M
+interfere/SRDG
+interfering/Y
+interferometer/SM
+interferometric
+interferometry/M
+interferon/MS
+interfile/GSD
+intergalactic
+intergenerational
+intergeneration/M
+interglacial
+intergovernmental
+intergroup
+interim/S
+interindex
+interindustry
+interior/SMY
+interj
+interject/GDS
+interjectional
+interjection/MS
+interlace/GSD
+interlard/SGD
+interlayer/G
+interleave/SDG
+interleukin/S
+interlibrary
+interlinear/S
+interline/JGSD
+interlingual
+interlingua/M
+interlining/M
+interlink/GDS
+interlisp/M
+interlobular
+interlocker/M
+interlock/RDSG
+interlocutor/MS
+interlocutory
+interlope/GZSRD
+interloper/M
+interlude/MSDG
+intermarriage/MS
+intermarry/GDS
+intermediary/MS
+intermediateness/M
+intermediate/YMNGSDP
+intermediation/M
+interment/SME
+intermeshed
+intermetrics
+intermezzi
+intermezzo/SM
+interminably
+intermingle/DSG
+intermission/MS
+intermittent/Y
+intermix/GSRD
+intermodule
+intermolecular/Y
+internalization/SM
+internalize/GDS
+internal/SY
+Internationale/M
+internationalism/SM
+internationalist/SM
+internationality/M
+internationalization/MS
+internationalize/DSG
+international/YS
+internecine
+internee/SM
+interne's
+Internet/M
+INTERNET/M
+internetwork
+internist/SM
+intern/L
+internment/SM
+internship/MS
+internuclear
+interocular
+interoffice
+interoperability
+interpenetrates
+interpersonal/Y
+interplanetary
+interplay/GSMD
+interpol
+interpolate/XGNVBDS
+interpolation/M
+Interpol/M
+interpose/GSRD
+interposer/M
+interposition/MS
+interpretable/U
+interpret/AGSD
+interpretation/MSA
+interpretative/Y
+interpreted/U
+interpreter/SM
+interpretive/Y
+interpretor/S
+interprocess
+interprocessor
+interquartile
+interracial
+interred/E
+interregional
+interregnum/MS
+interrelatedness/M
+interrelated/PY
+interrelate/GNDSX
+interrelation/M
+interrelationship/SM
+interring/E
+interrogate/DSXGNV
+interrogation/M
+interrogative/SY
+interrogator/SM
+interrogatory/S
+interrupted/U
+interrupter/M
+interruptibility
+interruptible
+interruption/MS
+interrupt/VGZRDS
+interscholastic
+intersect/GDS
+intersection/MS
+intersession/MS
+interspecies
+intersperse/GNDSX
+interspersion/M
+interstage
+interstate/S
+interstellar
+interstice/SM
+interstitial/SY
+intersurvey
+intertask
+intertwine/GSD
+interurban/S
+interval/MS
+intervene/GSRD
+intervener/M
+intervenor/M
+interventionism/MS
+interventionist/S
+intervention/MS
+interview/AMD
+interviewed/U
+interviewee/SM
+interviewer/SM
+interviewing
+interviews
+intervocalic
+interweave/GS
+interwove
+interwoven
+intestacy/SM
+intestinal/Y
+intestine/SM
+inti
+intifada
+intimacy/SM
+intimal
+intimateness/M
+intimater/M
+intimate/XYNGPDRS
+intimation/M
+intimidate/SDXNG
+intimidating/Y
+intimidation/M
+into
+intolerableness/M
+intolerable/P
+intolerant/PS
+intonate/NX
+intonation/M
+intoxicant/MS
+intoxicate/DSGNX
+intoxicated/Y
+intoxication/M
+intra
+intracellular
+intracity
+intraclass
+intracohort
+intractability/M
+intractableness/M
+intractable/P
+intradepartmental
+intrafamily
+intragenerational
+intraindustry
+intraline
+intrametropolitan
+intramural/Y
+intramuscular/Y
+intranasal
+intransigence/MS
+intransigent/YS
+intransitive/S
+intraoffice
+intraprocess
+intrapulmonary
+intraregional
+intrasectoral
+intrastate
+intratissue
+intrauterine
+intravenous/YS
+intrepidity/SM
+intrepidness/M
+intrepid/YP
+intricacy/SM
+intricateness/M
+intricate/PY
+intrigue/DRSZG
+intriguer/M
+intriguing/Y
+intrinsically
+intrinsic/S
+introduce/ADSG
+introducer/M
+introduction/ASM
+introductory
+introit/SM
+introject/SD
+intro/S
+introspection/MS
+introspectiveness/M
+introspective/YP
+introspect/SGVD
+introversion/SM
+introvert/SMDG
+intruder/M
+intrude/ZGDSR
+intrusion/SM
+intrusiveness/MS
+intrusive/SYP
+intubate/NGDS
+intubation/M
+intuit/GVDSB
+intuitionist/M
+intuitiveness/MS
+intuitive/YP
+int/ZR
+Inuit/MS
+inundate/SXNG
+inundation/M
+inure/GDS
+invader/M
+invade/ZSRDG
+invalid/GSDM
+invalidism/MS
+invariable/P
+invariant/M
+invasion/SM
+invasive/P
+invectiveness/M
+invective/PSMY
+inveigh/DRG
+inveigher/M
+inveighs
+inveigle/DRSZG
+inveigler/M
+invent/ADGS
+invented/U
+invention/ASM
+inventiveness/MS
+inventive/YP
+inventor/MS
+inventory/SDMG
+Inverness/M
+inverse/YV
+inverter/M
+invertible
+invert/ZSGDR
+invest/ADSLG
+investigate/XDSNGV
+investigation/MA
+investigator/MS
+investigatory
+investiture/SM
+investment/ESA
+investment's/A
+investor/SM
+inveteracy/MS
+inveterate/Y
+inviability
+invidiousness/MS
+invidious/YP
+invigilate/GD
+invigilator/SM
+invigorate/ANGSD
+invigorating/Y
+invigoration/AM
+invigorations
+invincibility/SM
+invincibleness/M
+invincible/P
+invincibly
+inviolability/MS
+inviolably
+inviolateness/M
+inviolate/YP
+inviscid
+invisibleness/M
+invisible/S
+invitational/S
+invitation/MS
+invited/U
+invitee/S
+inviter/M
+invite/SRDG
+inviting/Y
+invocable
+invocate
+invoked/A
+invoke/GSRDBZ
+invoker/M
+invokes/A
+involuntariness/S
+involuntary/P
+involute/XYN
+involution/M
+involutorial
+involvedly
+involved/U
+involve/GDSRL
+involvement/SM
+involver/M
+invulnerability/M
+invulnerableness/M
+inwardness/M
+inward/PY
+ioctl
+iodate/MGND
+iodation/M
+iodide/MS
+iodinate/DNG
+iodine/MS
+iodize/GSD
+Iolande/M
+Iolanthe/M
+Io/M
+Iona/M
+Ionesco/M
+Ionian/M
+ionic/S
+Ionic/S
+ionization's
+ionization/SU
+ionized/UC
+ionize/GNSRDJXZ
+ionizer's
+ionizer/US
+ionizes/U
+ionizing/U
+ionosphere/SM
+ionospheric
+ion's/I
+ion/SMU
+Iorgo/MS
+Iormina/M
+Iosep/M
+iota/SM
+IOU
+Iowan/S
+Iowa/SM
+IPA
+ipecac/MS
+Iphigenia/M
+ipso
+Ipswich/M
+IQ
+Iqbal/M
+Iquitos/M
+Ira/M
+Iranian/MS
+Iran/M
+Iraqi/SM
+Iraq/M
+IRA/S
+irascibility/SM
+irascible
+irascibly
+irateness/S
+irate/RPYT
+ireful
+Ireland/M
+ire/MGDS
+Irena/M
+Irene/M
+irenic/S
+iridescence/SM
+iridescent/Y
+irides/M
+iridium/MS
+irids
+Irina/M
+Iris
+iris/GDSM
+Irishman/M
+Irishmen
+Irish/R
+Irishwoman/M
+Irishwomen
+Irita/M
+irk/GDS
+irksomeness/SM
+irksome/YP
+Irkutsk/M
+Ir/M
+Irma/M
+ironclad/S
+iron/DRMPSGJ
+ironer/M
+ironic
+ironicalness/M
+ironical/YP
+ironing/M
+ironmonger/M
+ironmongery/M
+ironside/MS
+ironstone/MS
+ironware/SM
+ironwood/SM
+ironworker/M
+ironwork/MRS
+irony/SM
+Iroquoian/MS
+Iroquois/M
+irradiate/XSDVNG
+irradiation/M
+irrationality/MS
+irrationalness/M
+irrational/YSP
+Irrawaddy/M
+irreclaimable
+irreconcilability/MS
+irreconcilableness/M
+irreconcilable/PS
+irreconcilably
+irrecoverableness/M
+irrecoverable/P
+irrecoverably
+irredeemable/S
+irredeemably
+irredentism/M
+irredentist/M
+irreducibility/M
+irreducible
+irreducibly
+irreflexive
+irrefutable
+irrefutably
+irregardless
+irregularity/SM
+irregular/YS
+irrelevance/SM
+irrelevancy/MS
+irrelevant/Y
+irreligious
+irremediableness/M
+irremediable/P
+irremediably
+irremovable
+irreparableness/M
+irreparable/P
+irreparably
+irreplaceable/P
+irrepressible
+irrepressibly
+irreproachableness/M
+irreproachable/P
+irreproachably
+irreproducibility
+irreproducible
+irresistibility/M
+irresistibleness/M
+irresistible/P
+irresistibly
+irresoluteness/SM
+irresolute/PNXY
+irresolution/M
+irresolvable
+irrespective/Y
+irresponsibility/SM
+irresponsibleness/M
+irresponsible/PS
+irresponsibly
+irretrievable
+irretrievably
+irreverence/MS
+irreverent/Y
+irreversible
+irreversibly
+irrevocableness/M
+irrevocable/P
+irrevocably
+irrigable
+irrigate/DSXNG
+irrigation/M
+irritability/MS
+irritableness/M
+irritable/P
+irritably
+irritant/S
+irritate/DSXNGV
+irritated/Y
+irritating/Y
+irritation/M
+irrupt/GVSD
+irruption/SM
+IRS
+Irtish/M
+Irvine/M
+Irving/M
+Irvin/M
+Irv/MG
+Irwin/M
+Irwinn/M
+is
+i's
+Isaac/SM
+Isaak/M
+Isabelita/M
+Isabella/M
+Isabelle/M
+Isabel/M
+Isacco/M
+Isac/M
+Isadora/M
+Isadore/M
+Isador/M
+Isahella/M
+Isaiah/M
+Isak/M
+Isa/M
+ISBN
+Iscariot/M
+Iseabal/M
+Isfahan/M
+Isherwood/M
+Ishim/M
+Ishmael/M
+Ishtar/M
+Isiahi/M
+Isiah/M
+Isidora/M
+Isidore/M
+Isidor/M
+Isidoro/M
+Isidro/M
+isinglass/MS
+Isis/M
+Islamabad/M
+Islamic/S
+Islam/SM
+islander/M
+island/GZMRDS
+Islandia/M
+isle/MS
+islet/SM
+isl/GD
+Ismael/M
+ism/MCS
+isn't
+ISO
+isobaric
+isobar/MS
+Isobel/M
+isochronal/Y
+isochronous/Y
+isocline/M
+isocyanate/M
+isodine
+isolate/SDXNG
+isolationism/SM
+isolationistic
+isolationist/SM
+isolation/M
+isolator/MS
+Isolde/M
+isomeric
+isomerism/SM
+isomer/SM
+isometrically
+isometric/S
+isometrics/M
+isomorphic
+isomorphically
+isomorphism/MS
+isomorph/M
+isoperimetrical
+isopleth/M
+isopleths
+isosceles
+isostatic
+isothermal/Y
+isotherm/MS
+isotonic
+isotope/SM
+isotopic
+isotropic
+isotropically
+isotropy/M
+Ispahan's
+ispell/M
+Ispell/M
+Israeli/MS
+Israelite/SM
+Israel/MS
+Issac/M
+Issiah/M
+Issie/M
+Issi/M
+issuable
+issuance/MS
+issuant
+issued/A
+issue/GMZDSR
+issuer/AMS
+issues/A
+issuing/A
+Issy/M
+Istanbul/M
+isthmian/S
+isthmus/SM
+Istvan/M
+Isuzu/M
+It
+IT
+Itaipu/M
+ital
+Italianate/GSD
+Italian/MS
+italicization/MS
+italicized/U
+italicize/GSD
+italic/S
+Ital/M
+Italy/M
+Itasca/M
+itch/GMDS
+itchiness/MS
+Itch/M
+itchy/RTP
+ITcorp/M
+ITCorp/M
+it'd
+Itel/M
+itemization/SM
+itemized/U
+itemize/GZDRS
+itemizer/M
+itemizes/A
+item/MDSG
+iterate/ASDXVGN
+iteration/M
+iterative/YA
+iterator/MS
+Ithaca/M
+Ithacan
+itinerant/SY
+itinerary/MS
+it'll
+it/MUS
+Ito/M
+its
+itself
+ITT
+IUD/S
+IV
+Iva/M
+Ivanhoe/M
+Ivan/M
+Ivar/M
+I've
+Ive/MRS
+Iver/M
+Ivette/M
+Ivett/M
+Ivie/M
+iv/M
+Ivonne/M
+Ivor/M
+Ivory/M
+ivory/SM
+IVs
+Ivy/M
+ivy/MDS
+ix
+Izaak/M
+Izabel/M
+Izak/M
+Izanagi/M
+Izanami/M
+Izhevsk/M
+Izmir/M
+Izvestia/M
+Izzy/M
+jabbed
+jabberer/M
+jabber/JRDSZG
+jabbing
+Jabez/M
+Jablonsky/M
+jabot/MS
+jab/SM
+jacaranda/MS
+Jacenta/M
+Jacinda/M
+Jacinta/M
+Jacintha/M
+Jacinthe/M
+jackal/SM
+jackass/SM
+jackboot/DMS
+jackdaw/SM
+Jackelyn/M
+jacketed/U
+jacket/GSMD
+jack/GDRMS
+jackhammer/MDGS
+Jackie/M
+Jacki/M
+jackknife/MGSD
+jackknives
+Jacklin/M
+Jacklyn/M
+Jack/M
+Jackman/M
+jackpot/MS
+Jackqueline/M
+Jackquelin/M
+jackrabbit/DGS
+Jacksonian
+Jackson/SM
+Jacksonville/M
+jackstraw/MS
+Jacky/M
+Jaclin/M
+Jaclyn/M
+Jacobean
+Jacobian/M
+Jacobi/M
+Jacobin/M
+Jacobite/M
+Jacobo/M
+Jacobsen/M
+Jacob/SM
+Jacobs/N
+Jacobson/M
+Jacobus
+Jacoby/M
+jacquard/MS
+Jacquard/SM
+Jacqueline/M
+Jacquelin/M
+Jacquelyn/M
+Jacquelynn/M
+Jacquenetta/M
+Jacquenette/M
+Jacques/M
+Jacquetta/M
+Jacquette/M
+Jacquie/M
+Jacqui/M
+jacuzzi
+Jacuzzi/S
+Jacynth/M
+Jada/M
+jadedness/SM
+jaded/PY
+jadeite/SM
+Jade/M
+jade/MGDS
+Jaeger/M
+Jae/M
+jaggedness/SM
+jagged/RYTP
+Jagger/M
+jaggers
+jagging
+jag/S
+jaguar/MS
+jailbird/MS
+jailbreak/SM
+jailer/M
+jail/GZSMDR
+Jaime/M
+Jaimie/M
+Jaine/M
+Jainism/M
+Jain/M
+Jaipur/M
+Jakarta/M
+Jake/MS
+Jakie/M
+Jakob/M
+jalapeño/S
+jalopy/SM
+jalousie/MS
+Jamaal/M
+Jamaica/M
+Jamaican/S
+Jamal/M
+Jamar/M
+jambalaya/MS
+jamb/DMGS
+jamboree/MS
+Jamel/M
+Jame/MS
+Jameson/M
+Jamestown/M
+Jamesy/M
+Jamey/M
+Jamie/M
+Jamill/M
+Jamil/M
+Jami/M
+Jamima/M
+Jamison/M
+Jammal/M
+jammed/U
+Jammie/M
+jamming/U
+jam/SM
+Janacek/M
+Jana/M
+Janaya/M
+Janaye/M
+Jandy/M
+Janean/M
+Janeczka/M
+Janeen/M
+Janeiro/M
+Janek/M
+Janela/M
+Janella/M
+Janelle/M
+Janell/M
+Janel/M
+Jane/M
+Janene/M
+Janenna/M
+Janessa/M
+Janesville/M
+Janeta/M
+Janet/M
+Janetta/M
+Janette/M
+Janeva/M
+Janey/M
+jangler/M
+jangle/RSDGZ
+jangly
+Jania/M
+Janice/M
+Janie/M
+Janifer/M
+Janina/M
+Janine/M
+Janis/M
+janissary/MS
+Janith/M
+janitorial
+janitor/SM
+Janka/M
+Jan/M
+Janna/M
+Jannelle/M
+Jannel/M
+Jannie/M
+Janos/M
+Janot/M
+Jansenist/M
+Jansen/M
+January/MS
+Janus/M
+Jany/M
+Japanese/SM
+Japan/M
+japanned
+japanner
+japanning
+japan/SM
+jape/DSMG
+Japura/M
+Jaquelin/M
+Jaquelyn/M
+Jaquenetta/M
+Jaquenette/M
+Jaquith/M
+Jarad/M
+jardinière/MS
+Jard/M
+Jareb/M
+Jared/M
+jarful/S
+jargon/SGDM
+Jarib/M
+Jarid/M
+Jarlsberg
+jar/MS
+Jarrad/M
+jarred
+Jarred/M
+Jarret/M
+Jarrett/M
+Jarrid/M
+jarring/SY
+Jarrod/M
+Jarvis/M
+Jase/M
+Jasen/M
+Jasmina/M
+Jasmine/M
+jasmine/MS
+Jasmin/M
+Jason/M
+Jasper/M
+jasper/MS
+Jastrow/M
+Jasun/M
+jato/SM
+jaundice/DSMG
+jaundiced/U
+jauntily
+jauntiness/MS
+jaunt/MDGS
+jaunty/SRTP
+Javanese
+Java/SM
+javelin/SDMG
+Javier/M
+jawbone/SDMG
+jawbreaker/SM
+jawline
+jaw/SMDG
+Jaxartes/M
+Jayapura/M
+jaybird/SM
+Jaycee/SM
+Jaye/M
+Jay/M
+Jaymee/M
+Jayme/M
+Jaymie/M
+Jaynell/M
+Jayne/M
+jay/SM
+Jayson/M
+jaywalker/M
+jaywalk/JSRDZG
+Jazmin/M
+jazziness/M
+jazzmen
+jazz/MGDS
+jazzy/PTR
+JCS
+jct
+JD
+Jdavie/M
+jealousness/M
+jealous/PY
+jealousy/MS
+Jeana/M
+Jeanelle/M
+Jeane/M
+Jeanette/M
+Jeanie/M
+Jeanine/M
+Jean/M
+jean/MS
+Jeanna/M
+Jeanne/M
+Jeannette/M
+Jeannie/M
+Jeannine/M
+Jecho/M
+Jedd/M
+Jeddy/M
+Jedediah/M
+Jedidiah/M
+Jedi/M
+Jed/M
+jeep/GZSMD
+Jeep/S
+jeerer/M
+jeering/Y
+jeer/SJDRMG
+Jeeves/M
+jeez
+Jefferey/M
+Jeffersonian/S
+Jefferson/M
+Jeffery/M
+Jeffie/M
+Jeff/M
+Jeffrey/SM
+Jeffry/M
+Jeffy/M
+jehad's
+Jehanna/M
+Jehoshaphat/M
+Jehovah/M
+Jehu/M
+jejuna
+jejuneness/M
+jejune/PY
+jejunum/M
+Jekyll/M
+Jelene/M
+jell/GSD
+Jello/M
+jello's
+jellybean/SM
+jellyfish/MS
+jellying/M
+jellylike
+jellyroll/S
+jelly/SDMG
+Jemie/M
+Jemimah/M
+Jemima/M
+Jemmie/M
+jemmy/M
+Jemmy/M
+Jena/M
+Jenda/M
+Jenelle/M
+Jenica/M
+Jeniece/M
+Jenifer/M
+Jeniffer/M
+Jenilee/M
+Jeni/M
+Jenine/M
+Jenkins/M
+Jen/M
+Jenna/M
+Jennee/M
+Jenner/M
+jennet/SM
+Jennette/M
+Jennica/M
+Jennie/M
+Jennifer/M
+Jennilee/M
+Jenni/M
+Jennine/M
+Jennings/M
+Jenn/RMJ
+Jenny/M
+jenny/SM
+Jeno/M
+Jensen/M
+Jens/N
+jeopard
+jeopardize/GSD
+jeopardy/MS
+Jephthah/M
+Jerad/M
+Jerald/M
+Jeralee/M
+Jeramey/M
+Jeramie/M
+Jere/M
+Jereme/M
+jeremiad/SM
+Jeremiah/M
+Jeremiahs
+Jeremias/M
+Jeremie/M
+Jeremy/M
+Jericho/M
+Jeri/M
+jerker/M
+jerk/GSDRJ
+jerkily
+jerkiness/SM
+jerkin/SM
+jerkwater/S
+jerky/RSTP
+Jermaine/M
+Jermain/M
+Jermayne/M
+Jeroboam/M
+Jerold/M
+Jerome/M
+Jeromy/M
+Jerrie/M
+Jerrilee/M
+Jerrilyn/M
+Jerri/M
+Jerrine/M
+Jerrod/M
+Jerrold/M
+Jerrome/M
+jerrybuilt
+Jerrylee/M
+jerry/M
+Jerry/M
+jersey/MS
+Jersey/MS
+Jerusalem/M
+Jervis/M
+Jes
+Jessalin/M
+Jessalyn/M
+Jessa/M
+Jessamine/M
+jessamine's
+Jessamyn/M
+Jessee/M
+Jesselyn/M
+Jesse/M
+Jessey/M
+Jessica/M
+Jessie/M
+Jessika/M
+Jessi/M
+jess/M
+Jess/M
+Jessy/M
+jest/DRSGZM
+jester/M
+jesting/Y
+Jesuit/SM
+Jesus
+Jeth/M
+Jethro/M
+jetliner/MS
+jet/MS
+jetport/SM
+jetsam/MS
+jetted/M
+jetting/M
+jettison/DSG
+jetty/RSDGMT
+jeweler/M
+jewelery/S
+jewel/GZMRDS
+Jewelled/M
+Jewelle/M
+jewellery's
+Jewell/MD
+Jewel/M
+jewelry/MS
+Jewess/SM
+Jewishness/MS
+Jewish/P
+Jew/MS
+Jewry/MS
+Jezebel/MS
+j/F
+JFK/M
+jg/M
+jibbed
+jibbing
+jibe/S
+jib/MDSG
+Jidda/M
+jiff/S
+jiffy/SM
+jigged
+jigger/SDMG
+jigging/M
+jiggle/SDG
+jiggly/TR
+jig/MS
+jigsaw/GSDM
+jihad/SM
+Jilin
+Jillana/M
+Jillane/M
+Jillayne/M
+Jilleen/M
+Jillene/M
+Jillian/M
+Jillie/M
+Jilli/M
+Jill/M
+Jilly/M
+jilt/DRGS
+jilter/M
+Jimenez/M
+Jim/M
+Jimmie/M
+jimmy/GSDM
+Jimmy/M
+jimsonweed/S
+Jinan
+jingler/M
+jingle/RSDG
+jingly/TR
+jingoism/SM
+jingoistic
+jingoist/SM
+jingo/M
+Jinnah/M
+jinni's
+jinn/MS
+Jinny/M
+jinrikisha/SM
+jinx/GMDS
+jitney/MS
+jitterbugged
+jitterbugger
+jitterbugging
+jitterbug/SM
+jitter/S
+jittery/TR
+jiujitsu's
+Jivaro/M
+jive/MGDS
+Joachim/M
+Joana/M
+Joane/M
+Joanie/M
+Joan/M
+Joanna/M
+Joanne/SM
+Joann/M
+Joaquin/M
+jobbed
+jobber/MS
+jobbery/M
+jobbing/M
+Jobey/M
+jobholder/SM
+Jobie/M
+Jobi/M
+Jobina/M
+joblessness/MS
+jobless/P
+Jobrel/M
+job/SM
+Job/SM
+Jobye/M
+Joby/M
+Jobyna/M
+Jocasta/M
+Joceline/M
+Jocelin/M
+Jocelyne/M
+Jocelyn/M
+jockey/SGMD
+jock/GDMS
+Jock/M
+Jocko/M
+jockstrap/MS
+jocoseness/MS
+jocose/YP
+jocosity/SM
+jocularity/SM
+jocular/Y
+jocundity/SM
+jocund/Y
+Jodee/M
+jodhpurs
+Jodie/M
+Jodi/M
+Jody/M
+Joeann/M
+Joela/M
+Joelie/M
+Joella/M
+Joelle/M
+Joellen/M
+Joell/MN
+Joelly/M
+Joellyn/M
+Joel/MY
+Joelynn/M
+Joe/M
+Joesph/M
+Joete/M
+joey/M
+Joey/M
+jogged
+jogger/SM
+jogging/S
+joggler/M
+joggle/SRDG
+Jogjakarta/M
+jog/S
+Johan/M
+Johannah/M
+Johanna/M
+Johannes
+Johannesburg/M
+Johann/M
+Johansen/M
+Johanson/M
+Johna/MH
+Johnathan/M
+Johnath/M
+Johnathon/M
+Johnette/M
+Johnie/M
+Johnna/M
+Johnnie/M
+johnnycake/SM
+Johnny/M
+johnny/SM
+Johnsen/M
+john/SM
+John/SM
+Johns/N
+Johnson/M
+Johnston/M
+Johnstown/M
+Johny/M
+Joice/M
+join/ADGFS
+joined/U
+joiner/FSM
+joinery/MS
+jointed/EYP
+jointedness/ME
+joint/EGDYPS
+jointer/M
+jointly/F
+joint's
+jointures
+joist/GMDS
+Jojo/M
+joke/MZDSRG
+joker/M
+jokey
+jokier
+jokiest
+jokily
+joking/Y
+Jolee/M
+Joleen/M
+Jolene/M
+Joletta/M
+Jolie/M
+Joliet's
+Joli/M
+Joline/M
+Jolla/M
+jollification/MS
+jollily
+jolliness/SM
+jollity/MS
+jolly/TSRDGP
+Jolson/M
+jolt/DRGZS
+jolter/M
+Joly/M
+Jolyn/M
+Jolynn/M
+Jo/MY
+Jonah/M
+Jonahs
+Jonas
+Jonathan/M
+Jonathon/M
+Jonell/M
+Jone/MS
+Jones/S
+Jonie/M
+Joni/MS
+Jon/M
+jonquil/MS
+Jonson/M
+Joplin/M
+Jordain/M
+Jordana/M
+Jordanian/S
+Jordan/M
+Jordanna/M
+Jordon/M
+Jorey/M
+Jorgan/M
+Jorge/M
+Jorgensen/M
+Jorgenson/M
+Jorie/M
+Jori/M
+Jorrie/M
+Jorry/M
+Jory/M
+Joscelin/M
+Josee/M
+Josefa/M
+Josefina/M
+Josef/M
+Joseito/M
+Jose/M
+Josepha/M
+Josephina/M
+Josephine/M
+Joseph/M
+Josephs
+Josephson/M
+Josephus/M
+Josey/M
+josh/DSRGZ
+josher/M
+Joshia/M
+Josh/M
+Joshuah/M
+Joshua/M
+Josiah/M
+Josias/M
+Josie/M
+Josi/M
+Josselyn/M
+joss/M
+jostle/SDG
+Josue/M
+Josy/M
+jot/S
+jotted
+jotter/SM
+jotting/SM
+Joule/M
+joule/SM
+jounce/SDG
+jouncy/RT
+Jourdain/M
+Jourdan/M
+journalese/MS
+journal/GSDM
+journalism/SM
+journalistic
+journalist/SM
+journalize/DRSGZ
+journalized/U
+journalizer/M
+journey/DRMZSGJ
+journeyer/M
+journeyman/M
+journeymen
+jouster/M
+joust/ZSMRDG
+Jovanovich/M
+Jove/M
+joviality/SM
+jovial/Y
+Jovian
+jowl/SMD
+jowly/TR
+Joya/M
+Joyan/M
+Joyann/M
+Joycean
+Joycelin/M
+Joyce/M
+Joye/M
+joyfuller
+joyfullest
+joyfulness/SM
+joyful/PY
+joylessness/MS
+joyless/PY
+Joy/M
+joy/MDSG
+Joyner/M
+joyousness/MS
+joyous/YP
+joyridden
+joyride/SRZMGJ
+joyrode
+joystick/S
+Jozef/M
+JP
+Jpn
+Jr/M
+j's
+J's
+Jsandye/M
+Juana/M
+Juanita/M
+Juan/M
+Juarez
+Jubal/M
+jubilant/Y
+jubilate/XNGDS
+jubilation/M
+jubilee/SM
+Judah/M
+Judaic
+Judaical
+Judaism/SM
+Judas/S
+juddered
+juddering
+Judd/M
+Judea/M
+Jude/M
+judge/AGDS
+judger/M
+judge's
+judgeship/SM
+judgmental/Y
+judgment/MS
+judicable
+judicatory/S
+judicature/MS
+judicial/Y
+judiciary/S
+judicious/IYP
+judiciousness/SMI
+Judie/M
+Judi/MH
+Juditha/M
+Judith/M
+Jud/M
+judo/MS
+Judon/M
+Judson/M
+Judye/M
+Judy/M
+jugate/F
+jugful/SM
+jugged
+Juggernaut/M
+juggernaut/SM
+jugging
+juggler/M
+juggle/RSDGZ
+jugglery/MS
+jug/MS
+jugular/S
+juice/GMZDSR
+juicer/M
+juicily
+juiciness/MS
+juicy/TRP
+Juieta/M
+jujitsu/MS
+jujube/SM
+juju/M
+jujutsu's
+jukebox/SM
+juke/GS
+Julee/M
+Jule/MS
+julep/SM
+Julia/M
+Juliana/M
+Juliane/M
+Julian/M
+Julianna/M
+Julianne/M
+Juliann/M
+Julie/M
+julienne/GSD
+Julienne/M
+Julieta/M
+Juliet/M
+Julietta/M
+Juliette/M
+Juli/M
+Julina/M
+Juline/M
+Julio/M
+Julissa/M
+Julita/M
+Julius/M
+Jul/M
+Julys
+July/SM
+jumble/GSD
+jumbo/MS
+jumper/M
+jump/GZDRS
+jumpily
+jumpiness/MS
+jumpsuit/S
+jumpy/PTR
+jun
+junco/MS
+junction/IMESF
+juncture/SFM
+Juneau/M
+June/MS
+Junette/M
+Jungfrau/M
+Jungian
+jungle/SDM
+Jung/M
+Junia/M
+Junie/M
+Junina/M
+juniority/M
+junior/MS
+Junior/S
+juniper/SM
+junkerdom
+Junker/SM
+junketeer/SGDM
+junket/SMDG
+junk/GZDRMS
+junkie/RSMT
+junkyard/MS
+Jun/M
+Juno/M
+junta/MS
+Jupiter/M
+Jurassic
+juridic
+juridical/Y
+juried
+jurisdictional/Y
+jurisdiction/SM
+jurisprudence/SM
+jurisprudent
+jurisprudential/Y
+juristic
+jurist/MS
+juror/MS
+Jurua/M
+jury/IMS
+jurying
+juryman/M
+jurymen
+jurywoman/M
+jurywomen
+justed
+Justen/M
+juster/M
+justest
+Justice/M
+justice/MIS
+justiciable
+justifiability/M
+justifiable/U
+justifiably/U
+justification/M
+justified/UA
+justifier/M
+justify/GDRSXZN
+Justina/M
+Justine/M
+justing
+Justinian/M
+Justin/M
+Justinn/M
+Justino/M
+Justis/M
+justness/MS
+justness's/U
+justs
+just/UPY
+Justus/M
+jute/SM
+Jutish
+Jutland/M
+jut/S
+jutted
+jutting
+Juvenal/M
+juvenile/SM
+juxtapose/SDG
+juxtaposition/SM
+JV
+J/X
+Jyoti/M
+Kaaba/M
+kabob/SM
+kaboom
+Kabuki
+kabuki/SM
+Kabul/M
+Kacey/M
+Kacie/M
+Kacy/M
+Kaddish/M
+kaddish/S
+Kaela/M
+kaffeeklatch
+kaffeeklatsch/S
+Kafkaesque
+Kafka/M
+kaftan's
+Kagoshima/M
+Kahaleel/M
+Kahlil/M
+Kahlua/M
+Kahn/M
+Kaia/M
+Kaifeng/M
+Kaila/M
+Kaile/M
+Kailey/M
+Kai/M
+Kaine/M
+Kain/M
+kaiser/MS
+Kaiser/SM
+Kaitlin/M
+Kaitlyn/M
+Kaitlynn/M
+Kaja/M
+Kajar/M
+Kakalina/M
+Kalahari/M
+Kala/M
+Kalamazoo/M
+Kalashnikov/M
+Kalb/M
+Kaleb/M
+Kaleena/M
+kaleidescope
+kaleidoscope/SM
+kaleidoscopic
+kaleidoscopically
+Kale/M
+kale/MS
+Kalgoorlie/M
+Kalie/M
+Kalila/M
+Kalil/M
+Kali/M
+Kalina/M
+Kalinda/M
+Kalindi/M
+Kalle/M
+Kalli/M
+Kally/M
+Kalmyk
+Kalvin/M
+Kama/M
+Kamchatka/M
+Kamehameha/M
+Kameko/M
+Kamikaze/MS
+kamikaze/SM
+Kamilah/M
+Kamila/M
+Kamillah/M
+Kampala/M
+Kampuchea/M
+Kanchenjunga/M
+Kandace/M
+Kandahar/M
+Kandinsky/M
+Kandy/M
+Kane/M
+kangaroo/SGMD
+Kania/M
+Kankakee/M
+Kan/MS
+Kannada/M
+Kano/M
+Kanpur/M
+Kansan/S
+Kansas
+Kantian
+Kant/M
+Kanya/M
+Kaohsiung/M
+kaolinite/M
+kaolin/MS
+Kaplan/M
+kapok/SM
+Kaposi/M
+kappa/MS
+kaput/M
+Karachi/M
+Karaganda/M
+Karakorum/M
+karakul/MS
+Karalee/M
+Karalynn/M
+Kara/M
+Karamazov/M
+karaoke/S
+karate/MS
+karat/SM
+Karee/M
+Kareem/M
+Karel/M
+Kare/M
+Karena/M
+Karenina/M
+Karen/M
+Karia/M
+Karie/M
+Karil/M
+Karilynn/M
+Kari/M
+Karim/M
+Karina/M
+Karine/M
+Karin/M
+Kariotta/M
+Karisa/M
+Karissa/M
+Karita/M
+Karla/M
+Karlan/M
+Karlee/M
+Karleen/M
+Karlene/M
+Karlen/M
+Karlie/M
+Karlik/M
+Karlis
+Karl/MNX
+Karloff/M
+Karlotta/M
+Karlotte/M
+Karly/M
+Karlyn/M
+karma/SM
+Karmen/M
+karmic
+Karna/M
+Karney/M
+Karola/M
+Karole/M
+Karolina/M
+Karoline/M
+Karol/M
+Karoly/M
+Karon/M
+Karo/YM
+Karp/M
+Karrah/M
+Karrie/M
+Karroo/M
+Karry/M
+kart/MS
+Karylin/M
+Karyl/M
+Kary/M
+Karyn/M
+Kasai/M
+Kasey/M
+Kashmir/SM
+Kaspar/M
+Kasparov/M
+Kasper/M
+Kass
+Kassandra/M
+Kassey/M
+Kassia/M
+Kassie/M
+Kassi/M
+katakana
+Katalin/M
+Kata/M
+Katee/M
+Katelyn/M
+Kate/M
+Katerina/M
+Katerine/M
+Katey/M
+Katha/M
+Katharina/M
+Katharine/M
+Katharyn/M
+Kathe/M
+Katherina/M
+Katherine/M
+Katheryn/M
+Kathiawar/M
+Kathie/M
+Kathi/M
+Kathleen/M
+Kathlin/M
+Kath/M
+Kathmandu
+Kathrine/M
+Kathryne/M
+Kathryn/M
+Kathye/M
+Kathy/M
+Katie/M
+Kati/M
+Katina/M
+Katine/M
+Katinka/M
+Katleen/M
+Katlin/M
+Kat/M
+Katmai/M
+Katmandu's
+Katowice/M
+Katrina/M
+Katrine/M
+Katrinka/M
+Kattie/M
+Katti/M
+Katuscha/M
+Katusha/M
+Katya/M
+katydid/SM
+Katy/M
+Katz/M
+Kauai/M
+Kauffman/M
+Kaufman/M
+Kaunas/M
+Kaunda/M
+Kawabata/M
+Kawasaki/M
+kayak/SGDM
+Kaycee/M
+Kaye/M
+Kayla/M
+Kaylee/M
+Kayle/M
+Kayley/M
+Kaylil/M
+Kaylyn/M
+Kay/M
+Kayne/M
+kayo/DMSG
+Kazakh/M
+Kazakhstan
+Kazan/M
+Kazantzakis/M
+kazoo/SM
+Kb
+KB
+KC
+kcal/M
+kc/M
+KDE/M
+Keane/M
+Kean/M
+Kearney/M
+Keary/M
+Keaton/M
+Keats/M
+kebab/SM
+Keck/M
+Keefe/MR
+Keefer/M
+Keegan/M
+Keelby/M
+Keeley/M
+keel/GSMDR
+keelhaul/SGD
+Keelia/M
+Keely/M
+Keenan/M
+Keene/M
+keener/M
+keen/GTSPYDR
+keening/M
+Keen/M
+keenness/MS
+keeper/M
+keep/GZJSR
+keeping/M
+keepsake/SM
+Keewatin/M
+kegged
+kegging
+keg/MS
+Keillor/M
+Keir/M
+Keisha/M
+Keith/M
+Kelbee/M
+Kelby/M
+Kelcey/M
+Kelcie/M
+Kelci/M
+Kelcy/M
+Kele/M
+Kelila/M
+Kellby/M
+Kellen/M
+Keller/M
+Kelley/M
+Kellia/M
+Kellie/M
+Kelli/M
+Kellina/M
+Kellogg/M
+Kellsie/M
+Kellyann/M
+Kelly/M
+kelp/GZMDS
+Kelsey/M
+Kelsi/M
+Kelsy/M
+Kelt's
+Kelvin/M
+kelvin/MS
+Kelwin/M
+Kemerovo/M
+Kempis/M
+Kemp/M
+Kendall/M
+Kendal/M
+Kendell/M
+Kendra/M
+Kendre/M
+Kendrick/MS
+Kenilworth/M
+Ken/M
+Kenmore/M
+ken/MS
+Kenna/M
+Kennan/M
+Kennecott/M
+kenned
+Kennedy/M
+kennel/GSMD
+Kenneth/M
+Kennett/M
+Kennie/M
+kenning
+Kennith/M
+Kenn/M
+Kenny/M
+keno/M
+Kenon/M
+Kenosha/M
+Kensington/M
+Kent/M
+Kenton/M
+Kentuckian/S
+Kentucky/M
+Kenya/M
+Kenyan/S
+Kenyatta/M
+Kenyon/M
+Keogh/M
+Keokuk/M
+kepi/SM
+Kepler/M
+kept
+keratin/MS
+kerbside
+Kerby/M
+kerchief/MDSG
+Kerensky/M
+Kerianne/M
+Keriann/M
+Keri/M
+Kerk/M
+Ker/M
+Kermie/M
+Kermit/M
+Kermy/M
+kerned
+kernel/GSMD
+kerning
+Kern/M
+kerosene/MS
+Kerouac/M
+Kerrie/M
+Kerrill/M
+Kerri/M
+Kerrin/M
+Kerr/M
+Kerry/M
+Kerstin/M
+Kerwin/M
+Kerwinn/M
+Kesley/M
+Keslie/M
+Kessiah/M
+Kessia/M
+Kessler/M
+kestrel/SM
+ketch/MS
+ketchup/SM
+ketone/M
+ketosis/M
+Kettering/M
+Kettie/M
+Ketti/M
+kettledrum/SM
+kettleful
+kettle/SM
+Ketty/M
+Kevan/M
+Keven/M
+Kevina/M
+Kevin/M
+Kevlar
+Kev/MN
+Kevon/M
+Kevorkian/M
+Kevyn/M
+Kewaskum/M
+Kewaunee/M
+Kewpie/M
+keyboardist/S
+keyboard/RDMZGS
+keyclick/SM
+keyhole/MS
+Key/M
+Keynesian/M
+Keynes/M
+keynoter/M
+keynote/SRDZMG
+keypad/MS
+keypuncher/M
+keypunch/ZGRSD
+keyring
+key/SGMD
+keystone/SM
+keystroke/SDMG
+keyword/SM
+k/FGEIS
+kg
+K/G
+KGB
+Khabarovsk/M
+Khachaturian/M
+khaki/SM
+Khalid/M
+Khalil/M
+Khan/M
+khan/MS
+Kharkov/M
+Khartoum/M
+Khayyam/M
+Khmer/M
+Khoisan/M
+Khomeini/M
+Khorana/M
+Khrushchev/SM
+Khufu/M
+Khulna/M
+Khwarizmi/M
+Khyber/M
+kHz/M
+KIA
+Kiah/M
+Kial/M
+kibble/GMSD
+kibbutzim
+kibbutz/M
+kibitzer/M
+kibitz/GRSDZ
+kibosh/GMSD
+Kickapoo/M
+kickback/SM
+kickball/MS
+kicker/M
+kick/GZDRS
+kickoff/SM
+kickstand/MS
+kicky/RT
+kidded
+kidder/SM
+kiddie/SD
+kidding/YM
+kiddish
+Kidd/M
+kiddo/SM
+kiddying
+kiddy's
+kidless
+kid/MS
+kidnaper's
+kidnaping's
+kidnap/MSJ
+kidnapped
+kidnapper/SM
+kidnapping/S
+kidney/MS
+kidskin/SM
+Kieffer/M
+kielbasa/SM
+kielbasi
+Kiele/M
+Kiel/M
+Kienan/M
+kier/I
+Kierkegaard/M
+Kiersten/M
+Kieth/M
+Kiev/M
+Kigali/M
+Kikelia/M
+Kikuyu/M
+Kilauea/M
+Kile/M
+Kiley/M
+Kilian/M
+Kilimanjaro/M
+kill/BJGZSDR
+killdeer/SM
+Killebrew/M
+killer/M
+Killian/M
+Killie/M
+killing/Y
+killjoy/S
+Killy/M
+kiln/GDSM
+kilobaud/M
+kilobit/S
+kilobuck
+kilobyte/S
+kilocycle/MS
+kilogauss/M
+kilogram/MS
+kilohertz/M
+kilohm/M
+kilojoule/MS
+kiloliter/MS
+kilometer/SM
+kilo/SM
+kiloton/SM
+kilovolt/SM
+kilowatt/SM
+kiloword
+kilter/M
+kilt/MDRGZS
+Ki/M
+Kimball/M
+Kimbell/M
+Kimberlee/M
+Kimberley/M
+Kimberli/M
+Kimberly/M
+Kimberlyn/M
+Kimble/M
+Kimbra/M
+Kim/M
+Kimmie/M
+Kimmi/M
+Kimmy/M
+kimono/MS
+Kincaid/M
+kinda
+kindergarten/MS
+kindergärtner/SM
+kinder/U
+kindheartedness/MS
+kindhearted/YP
+kindle/AGRSD
+kindler/M
+kindliness/SM
+kindliness's/U
+kindling/M
+kindly/TUPR
+kindness's
+kindness/US
+kind/PSYRT
+kindred/S
+kinematic/S
+kinematics/M
+kinesics/M
+kine/SM
+kinesthesis
+kinesthetically
+kinesthetic/S
+kinetically
+kinetic/S
+kinetics/M
+kinfolk/S
+kingbird/M
+kingdom/SM
+kingfisher/MS
+kinglet/M
+kingliness/M
+kingly/TPR
+King/M
+kingpin/MS
+Kingsbury/M
+king/SGYDM
+kingship/SM
+Kingsley/M
+Kingsly/M
+Kingston/M
+Kingstown/M
+Kingwood/M
+kink/GSDM
+kinkily
+kinkiness/SM
+kinky/PRT
+Kin/M
+kin/MS
+Kinna/M
+Kinney/M
+Kinnickinnic/M
+Kinnie/M
+Kinny/M
+Kinsey/M
+kinsfolk/S
+Kinshasa/M
+Kinshasha/M
+kinship/SM
+Kinsley/M
+kinsman/M
+kinsmen/M
+kinswoman/M
+kinswomen
+kiosk/SM
+Kiowa/SM
+Kipling/M
+Kip/M
+kip/MS
+Kippar/M
+kipped
+kipper/DMSG
+Kipper/M
+Kippie/M
+kipping
+Kipp/MR
+Kippy/M
+Kira/M
+Kirbee/M
+Kirbie/M
+Kirby/M
+Kirchhoff/M
+Kirchner/M
+Kirchoff/M
+Kirghistan/M
+Kirghizia/M
+Kirghiz/M
+Kiribati
+Kiri/M
+Kirinyaga/M
+kirk/GDMS
+Kirkland/M
+Kirk/M
+Kirkpatrick/M
+Kirkwood/M
+Kirov/M
+kirsch/S
+Kirsteni/M
+Kirsten/M
+Kirsti/M
+Kirstin/M
+Kirstyn/M
+Kisangani/M
+Kishinev/M
+kismet/SM
+kiss/DSRBJGZ
+Kissee/M
+kisser/M
+Kissiah/M
+Kissie/M
+Kissinger/M
+Kitakyushu/M
+kitbag's
+kitchener/M
+Kitchener/M
+kitchenette/SM
+kitchen/GDRMS
+kitchenware/SM
+kiter/M
+kite/SM
+kith/MDG
+kiths
+Kit/M
+kit/MDRGS
+kitsch/MS
+kitschy
+kitted
+kittenishness/M
+kittenish/YP
+kitten/SGDM
+Kittie/M
+Kitti/M
+kitting
+kittiwakes
+Kitty/M
+kitty/SM
+Kiwanis/M
+kiwifruit/S
+kiwi/SM
+Kizzee/M
+Kizzie/M
+KKK
+kl
+Klan/M
+Klansman/M
+Klara/M
+Klarika/M
+Klarrisa/M
+Klaus/M
+klaxon/M
+Klee/M
+Kleenex/SM
+Klein/M
+Kleinrock/M
+Klemens/M
+Klement/M
+Kleon/M
+kleptomaniac/SM
+kleptomania/MS
+Kliment/M
+Kline/M
+Klingon/M
+Klondike/SDMG
+kludger/M
+kludge/RSDGMZ
+kludgey
+klutziness/S
+klutz/SM
+klutzy/TRP
+Klux/M
+klystron/MS
+km
+kn
+knacker/M
+knack/SGZRDM
+knackwurst/MS
+Knapp/M
+knapsack/MS
+Knauer/M
+knavery/MS
+knave/SM
+knavish/Y
+kneader/M
+knead/GZRDS
+kneecap/MS
+kneecapped
+kneecapping
+knee/DSM
+kneeing
+kneeler/M
+kneel/GRS
+kneepad/SM
+knell/SMDG
+knelt
+Knesset/M
+knew
+Kngwarreye/M
+Knickerbocker/MS
+knickerbocker/S
+knickknack/SM
+knick/ZR
+Knievel/M
+knife/DSGM
+knighthood/MS
+knightliness/MS
+knightly/P
+Knight/M
+knight/MDYSG
+knish/MS
+knit/AU
+knits
+knitted
+knitter/MS
+knitting/SM
+knitwear/M
+knives/M
+knobbly
+knobby/RT
+Knobeloch/M
+knob/MS
+knockabout/M
+knockdown/S
+knocker/M
+knock/GZSJRD
+knockoff/S
+knockout/MS
+knockwurst's
+knoll/MDSG
+Knopf/M
+Knossos/M
+knothole/SM
+knot/MS
+knotted
+knottiness/M
+knotting/M
+knotty/TPR
+knowable/U
+knower/M
+know/GRBSJ
+knowhow
+knowingly/U
+knowing/RYT
+knowings/U
+knowledgeableness/M
+knowledgeable/P
+knowledgeably
+knowledge/SM
+Knowles
+known/SU
+Knox/M
+Knoxville/M
+knuckleball/R
+knuckle/DSMG
+knuckleduster
+knucklehead/MS
+Knudsen/M
+Knudson/M
+knurl/DSG
+Knuth/M
+Knutsen/M
+Knutson/M
+KO
+koala/SM
+Kobayashi/M
+Kobe/M
+Kochab/M
+Koch/M
+Kodachrome/M
+Kodak/SM
+Kodaly/M
+Kodiak/M
+Koenig/M
+Koenigsberg/M
+Koenraad/M
+Koestler/M
+Kohinoor/M
+Kohler/M
+Kohl/MR
+kohlrabies
+kohlrabi/M
+kola/SM
+Kolyma/M
+Kommunizma/M
+Kong/M
+Kongo/M
+Konrad/M
+Konstance/M
+Konstantine/M
+Konstantin/M
+Konstanze/M
+kookaburra/SM
+kook/GDMS
+kookiness/S
+kooky/PRT
+Koo/M
+Koontz/M
+kopeck/MS
+Koppers/M
+Koralle/M
+Koral/M
+Kora/M
+Koranic
+Koran/SM
+Kordula/M
+Korea/M
+Korean/S
+Korella/M
+Kore/M
+Koren/M
+Koressa/M
+Korey/M
+Korie/M
+Kori/M
+Kornberg/M
+Korney/M
+Korrie/M
+Korry/M
+Kort/M
+Kory/M
+Korzybski/M
+Kosciusko/M
+kosher/DGS
+Kossuth/M
+Kosygin/M
+Kovacs/M
+Kowalewski/M
+Kowalski/M
+Kowloon/M
+kowtow/SGD
+KP
+kph
+kraal/SMDG
+Kraemer/M
+kraft/M
+Kraft/M
+Krakatau's
+Krakatoa/M
+Krakow/M
+Kramer/M
+Krasnodar/M
+Krasnoyarsk/M
+Krause/M
+kraut/S!
+Krebs/M
+Kremlin/M
+Kremlinologist/MS
+Kremlinology/MS
+Kresge/M
+Krieger/M
+kriegspiel/M
+krill/MS
+Kringle/M
+Krisha/M
+Krishnah/M
+Krishna/M
+Kris/M
+Krispin/M
+Krissie/M
+Krissy/M
+Kristal/M
+Krista/M
+Kristan/M
+Kristel/M
+Kriste/M
+Kristen/M
+Kristian/M
+Kristie/M
+Kristien/M
+Kristi/MN
+Kristina/M
+Kristine/M
+Kristin/M
+Kristofer/M
+Kristoffer/M
+Kristofor/M
+Kristoforo/M
+Kristo/MS
+Kristopher/M
+Kristy/M
+Kristyn/M
+Kr/M
+Kroc/M
+Kroger/M
+króna/M
+Kronecker/M
+krone/RM
+kronor
+krónur
+Kropotkin/M
+Krueger/M
+Kruger/M
+Krugerrand/S
+Krupp/M
+Kruse/M
+krypton/SM
+Krystalle/M
+Krystal/M
+Krysta/M
+Krystle/M
+Krystyna/M
+ks
+K's
+KS
+k's/IE
+kt
+Kublai/M
+Kubrick/M
+kuchen/MS
+kudos/M
+kudzu/SM
+Kuenning/M
+Kuhn/M
+Kuibyshev/M
+Ku/M
+Kumar/M
+kumquat/SM
+Kunming/M
+Kuomintang/M
+Kurdish/M
+Kurdistan/SM
+Kurd/SM
+Kurosawa/M
+Kurtis/M
+Kurt/M
+kurtosis/M
+Kusch/M
+Kuwaiti/SM
+Kuwait/M
+Kuznetsk/M
+Kuznets/M
+kvetch/DSG
+kw
+kW
+Kwakiutl/M
+Kwangchow's
+Kwangju/M
+Kwanzaa/S
+kWh
+KY
+Kyla/M
+kyle/M
+Kyle/M
+Kylen/M
+Kylie/M
+Kylila/M
+Kylynn/M
+Ky/MH
+Kym/M
+Kynthia/M
+Kyoto/M
+Kyrgyzstan
+Kyrstin/M
+Kyushu/M
+L
+LA
+Laban/M
+labeled/U
+labeler/M
+label/GAZRDS
+labellings/A
+label's
+labial/YS
+labia/M
+labile
+labiodental
+labium/M
+laboratory/MS
+laboredness/M
+labored/PMY
+labored's/U
+laborer/M
+laboring/MY
+laborings/U
+laboriousness/MS
+laborious/PY
+labor/RDMJSZG
+laborsaving
+Labradorean/S
+Labrador/SM
+lab/SM
+Lab/SM
+laburnum/SM
+labyrinthine
+labyrinth/M
+labyrinths
+laced/U
+Lacee/M
+lace/MS
+lacerate/NGVXDS
+laceration/M
+lacer/M
+laces/U
+lacewing/MS
+Lacey/M
+Lachesis/M
+lachrymal/S
+lachrymose
+Lacie/M
+lacing/M
+lackadaisic
+lackadaisical/Y
+Lackawanna/M
+lacker/M
+lackey/SMDG
+lack/GRDMS
+lackluster/S
+Lac/M
+laconic
+laconically
+lacquerer/M
+lacquer/ZGDRMS
+lacrosse/MS
+lac/SGMDR
+lactate/MNGSDX
+lactational/Y
+lactation/M
+lacteal
+lactic
+lactose/MS
+lacunae
+lacuna/M
+Lacy/M
+lacy/RT
+ladder/GDMS
+laddie/MS
+laded/U
+ladened
+ladening
+laden/U
+lade/S
+lading/M
+ladle/SDGM
+Ladoga/M
+Ladonna/M
+lad/XGSJMND
+ladybird/SM
+ladybug/MS
+ladyfinger/SM
+ladylike/U
+ladylove/MS
+Ladyship/MS
+ladyship/SM
+lady/SM
+Lady/SM
+Laetitia/M
+laetrile/S
+Lafayette/M
+Lafitte/M
+lager/DMG
+laggard/MYSP
+laggardness/M
+lagged
+lagging/MS
+lagniappe/SM
+lagoon/MS
+Lagos/M
+Lagrange/M
+Lagrangian/M
+Laguerre/M
+Laguna/M
+lag/ZSR
+Lahore/M
+laid/AI
+Laidlaw/M
+lain
+Laina/M
+Lainey/M
+Laird/M
+laird/MS
+lair/GDMS
+laissez
+laity/SM
+Laius/M
+lake/DSRMG
+Lakehurst/M
+Lakeisha/M
+laker/M
+lakeside
+Lakewood/M
+Lakisha/M
+Lakshmi/M
+lallygagged
+lallygagging
+lallygag/S
+Lalo/M
+La/M
+Lamaism/SM
+Lamarck/M
+Lamar/M
+lamasery/MS
+lama/SM
+Lamaze
+lambada/S
+lambaste/SDG
+lambda/SM
+lambency/MS
+lambent/Y
+Lambert/M
+lambkin/MS
+Lamb/M
+Lamborghini/M
+lambskin/MS
+lamb/SRDMG
+lambswool
+lamebrain/SM
+lamed/M
+lameness/MS
+lamentableness/M
+lamentable/P
+lamentably
+lamentation/SM
+lament/DGSB
+lamented/U
+lame/SPY
+la/MHLG
+laminae
+lamina/M
+laminar
+laminate/XNGSD
+lamination/M
+lam/MDRSTG
+lammed
+lammer
+lamming
+Lammond/M
+Lamond/M
+Lamont/M
+L'Amour
+lampblack/SM
+lamplighter/M
+lamplight/ZRMS
+lampooner/M
+lampoon/RDMGS
+Lamport/M
+lamppost/SM
+lamprey/MS
+lamp/SGMRD
+lampshade/MS
+LAN
+Lanae/M
+Lanai/M
+lanai/SM
+Lana/M
+Lancashire/M
+Lancaster/M
+Lancelot/M
+Lance/M
+lancer/M
+lance/SRDGMZ
+lancet/MS
+landau/MS
+lander/I
+landfall/SM
+landfill/DSG
+landforms
+landholder/M
+landhold/JGZR
+landing/M
+Landis/M
+landlady/MS
+landless
+landlines
+landlocked
+landlord/MS
+landlubber/SM
+Land/M
+landmark/GSMD
+landmass/MS
+Landon/M
+landowner/MS
+landownership/M
+landowning/SM
+Landry/M
+Landsat
+landscape/GMZSRD
+landscaper/M
+lands/I
+landslide/MS
+landslid/G
+landslip
+landsman/M
+landsmen
+land/SMRDJGZ
+Landsteiner/M
+landward/S
+Landwehr/M
+Lane/M
+lane/SM
+Lanette/M
+Laney/M
+Langeland/M
+Lange/M
+Langerhans/M
+Langford/M
+Langland/M
+Langley/M
+Lang/M
+Langmuir/M
+Langsdon/M
+Langston/M
+language/MS
+languidness/MS
+languid/PY
+languisher/M
+languishing/Y
+languish/SRDG
+languorous/Y
+languor/SM
+Lanie/M
+Lani/M
+Lanita/M
+lankiness/SM
+lankness/MS
+lank/PTYR
+lanky/PRT
+Lanna/M
+Lannie/M
+Lanni/M
+Lanny/M
+lanolin/MS
+Lansing/M
+lantern/GSDM
+lanthanide/M
+lanthanum/MS
+lanyard/MS
+Lanzhou
+Laocoon/M
+Lao/SM
+Laotian/MS
+lapboard/MS
+lapdog/S
+lapel/MS
+lapidary/MS
+lapin/MS
+Laplace/M
+Lapland/ZMR
+lapped
+lappet/MS
+lapping
+Lapp/SM
+lapsed/A
+lapse/KSDMG
+lapser/MA
+lapses/A
+lapsing/A
+lap/SM
+laps/SRDG
+laptop/SM
+lapwing/MS
+Laraine/M
+Lara/M
+Laramie/M
+larboard/MS
+larcenist/S
+larcenous
+larceny/MS
+larch/MS
+larder/M
+lard/MRDSGZ
+Lardner/M
+lardy/RT
+Laredo/M
+largehearted
+largemouth
+largeness/SM
+large/SRTYP
+largess/SM
+largish
+largo/S
+lariat/MDGS
+Lari/M
+Larina/M
+Larine/M
+Larisa/M
+Larissa/M
+larker/M
+lark/GRDMS
+Lark/M
+larkspur/MS
+Larousse/M
+Larry/M
+Larsen/M
+Lars/NM
+Larson/M
+larvae
+larval
+larva/M
+laryngeal/YS
+larynges
+laryngitides
+laryngitis/M
+larynx/M
+Laryssa/M
+lasagna/S
+lasagne's
+Lascaux/M
+lasciviousness/MS
+lascivious/YP
+lase
+laser/M
+lashed/U
+lasher/M
+lashing/M
+lash/JGMSRD
+Lassa/M
+Lassen/M
+Lassie/M
+lassie/SM
+lassitude/MS
+lassoer/M
+lasso/GRDMS
+las/SRZG
+lass/SM
+laster/M
+lastingness/M
+lasting/PY
+last/JGSYRD
+Laszlo/M
+Latasha/M
+Latashia/M
+latching/M
+latchkey/SM
+latch's
+latch/UGSD
+latecomer/SM
+lated/A
+late/KA
+lately
+latency/MS
+lateness/MS
+latent/YS
+later/A
+lateral/GDYS
+lateralization
+Lateran/M
+latest/S
+LaTeX/M
+latex/MS
+lathe/M
+latherer/M
+lather/RDMG
+lathery
+lathing/M
+lath/MSRDGZ
+Lathrop/M
+laths
+Latia/M
+latices/M
+Latina/SM
+Latinate
+Latino/S
+Latin/RMS
+latish
+Latisha/M
+latitude/SM
+latitudinal/Y
+latitudinarian/S
+latitudinary
+Lat/M
+Latonya/M
+Latoya/M
+Latrena/M
+Latrina/M
+latrine/MS
+Latrobe/M
+lat/SDRT
+latter/YM
+latte/SR
+lattice/SDMG
+latticework/MS
+latticing/M
+Lattimer/M
+Latvia/M
+Latvian/S
+laudably
+laudanum/MS
+laudatory
+Lauderdale/M
+lauder/M
+Lauder/M
+Laud/MR
+laud/RDSBG
+lauds/M
+Laue/M
+laughableness/M
+laughable/P
+laughably
+laugh/BRDZGJ
+laugher/M
+laughing/MY
+laughingstock/SM
+laughs
+laughter/MS
+Laughton/M
+Launce/M
+launch/AGSD
+launcher/MS
+launching/S
+launchpad/S
+laundered/U
+launderer/M
+launderette/MS
+launder/SDRZJG
+laundress/MS
+laundrette/S
+laundromat/S
+Laundromat/SM
+laundryman/M
+laundrymen
+laundry/MS
+laundrywoman/M
+laundrywomen
+Lauraine/M
+Lauralee/M
+Laural/M
+laura/M
+Laura/M
+Laurasia/M
+laureate/DSNG
+laureateship/SM
+Lauree/M
+Laureen/M
+Laurella/M
+Laurel/M
+laurel/SGMD
+Laure/M
+Laurena/M
+Laurence/M
+Laurene/M
+Lauren/SM
+Laurentian
+Laurent/M
+Lauretta/M
+Laurette/M
+Laurianne/M
+Laurice/M
+Laurie/M
+Lauri/M
+Lauritz/M
+Lauryn/M
+Lausanne/M
+lavage/MS
+lavaliere/MS
+Laval/M
+lava/SM
+lavatory/MS
+lave/GDS
+Lavena/M
+lavender/MDSG
+Laverna/M
+Laverne/M
+Lavern/M
+Lavina/M
+Lavinia/M
+Lavinie/M
+lavishness/MS
+lavish/SRDYPTG
+Lavoisier/M
+Lavonne/M
+Lawanda/M
+lawbreaker/SM
+lawbreaking/MS
+Lawford/M
+lawfulness/SMU
+lawful/PUY
+lawgiver/MS
+lawgiving/M
+lawlessness/MS
+lawless/PY
+Law/M
+lawmaker/MS
+lawmaking/SM
+lawman/M
+lawmen
+lawnmower/S
+lawn/SM
+Lawrence/M
+Lawrenceville/M
+lawrencium/SM
+Lawry/M
+law/SMDG
+Lawson/M
+lawsuit/MS
+Lawton/M
+lawyer/DYMGS
+laxativeness/M
+laxative/PSYM
+laxer/A
+laxes/A
+laxity/SM
+laxness/SM
+lax/PTSRY
+layabout/MS
+Layamon/M
+layaway/S
+lay/CZGSR
+layered/C
+layer/GJDM
+layering/M
+layer's/IC
+layette/SM
+Layla/M
+Lay/M
+layman/M
+laymen
+Layne/M
+Layney/M
+layoff/MS
+layout/SM
+layover/SM
+laypeople
+layperson/S
+lays/AI
+Layton/M
+layup/MS
+laywoman/M
+laywomen
+Lazare/M
+Lazar/M
+Lazaro/M
+Lazarus/M
+laze/DSG
+lazily
+laziness/MS
+lazuli/M
+lazybones/M
+lazy/PTSRDG
+lb
+LBJ/M
+lbs
+LC
+LCD
+LCM
+LDC
+leachate
+Leach/M
+leach/SDG
+Leadbelly/M
+leaded/U
+leadenness/M
+leaden/PGDY
+leaderless
+leader/M
+leadership/MS
+lead/SGZXJRDN
+leadsman/M
+leadsmen
+leafage/MS
+leaf/GSDM
+leafhopper/M
+leafiness/M
+leafless
+leaflet/SDMG
+leafstalk/SM
+leafy/PTR
+leaguer/M
+league/RSDMZG
+Leah/M
+leakage/SM
+leaker/M
+Leakey/M
+leak/GSRDM
+leakiness/MS
+leaky/PRT
+Lea/M
+lea/MS
+Leander/M
+Leandra/M
+leaner/M
+leaning/M
+Lean/M
+Leanna/M
+Leanne/M
+leanness/MS
+Leann/M
+Leanora/M
+Leanor/M
+lean/YRDGTJSP
+leaper/M
+leapfrogged
+leapfrogging
+leapfrog/SM
+leap/RDGZS
+Lear/M
+learnedly
+learnedness/M
+learned/UA
+learner/M
+learning/M
+learns/UA
+learn/SZGJRD
+Leary/M
+lease/ARSDG
+leaseback/MS
+leaseholder/M
+leasehold/SRMZ
+leaser/MA
+lease's
+leash's
+leash/UGSD
+leasing/M
+leas/SRDGZ
+least/S
+leastwise
+leatherette/S
+leather/MDSG
+leathern
+leatherneck/SM
+leathery
+leaven/DMJGS
+leavened/U
+leavening/M
+Leavenworth/M
+leaver/M
+leaves/M
+leave/SRDJGZ
+leaving/M
+Lebanese
+Lebanon/M
+Lebbie/M
+lebensraum
+Lebesgue/M
+Leblanc/M
+lecher/DMGS
+lecherousness/MS
+lecherous/YP
+lechery/MS
+lecithin/SM
+lectern/SM
+lecturer/M
+lecture/RSDZMG
+lectureship/SM
+led
+Leda/M
+Lederberg/M
+ledger/DMG
+ledge/SRMZ
+LED/SM
+Leeanne/M
+Leeann/M
+leech/MSDG
+Leeds/M
+leek/SM
+Leelah/M
+Leela/M
+Leeland/M
+Lee/M
+lee/MZRS
+Leena/M
+leer/DG
+leeriness/MS
+leering/Y
+leery/PTR
+Leesa/M
+Leese/M
+Leeuwenhoek/M
+Leeward/M
+leeward/S
+leeway/MS
+leftism/SM
+leftist/SM
+leftmost
+leftover/MS
+Left/S
+left/TRS
+leftward/S
+Lefty/M
+lefty/SM
+legacy/MS
+legalese/MS
+legalism/SM
+legalistic
+legality/MS
+legalization/MS
+legalize/DSG
+legalized/U
+legal/SY
+legate/AXCNGSD
+legatee/MS
+legate's/C
+legation/AMC
+legato/SM
+legendarily
+legendary/S
+Legendre/M
+legend/SM
+legerdemain/SM
+Leger/SM
+legged
+legginess/MS
+legging/MS
+leggy/PRT
+leghorn/SM
+Leghorn/SM
+legibility/MS
+legible
+legibly
+legionary/S
+legionnaire/SM
+legion/SM
+legislate/SDXVNG
+legislation/M
+legislative/SY
+legislator/SM
+legislature/MS
+legitimacy/MS
+legitimate/SDNGY
+legitimation/M
+legitimatize/SDG
+legitimization/MS
+legitimize/RSDG
+legit/S
+legless
+legman/M
+legmen
+leg/MS
+Lego/M
+Legra/M
+Legree/M
+legroom/MS
+legstraps
+legume/SM
+leguminous
+legwork/SM
+Lehigh/M
+Lehman/M
+Leia/M
+Leibniz/M
+Leicester/SM
+Leiden/M
+Leif/M
+Leigha/M
+Leigh/M
+Leighton/M
+Leilah/M
+Leila/M
+lei/MS
+Leipzig/M
+Leisha/M
+leisureliness/MS
+leisurely/P
+leisure/SDYM
+leisurewear
+leitmotif/SM
+leitmotiv/MS
+Lek/M
+Lelah/M
+Lela/M
+Leland/M
+Lelia/M
+Lemaitre/M
+Lemar/M
+Lemke/M
+Lem/M
+lemma/MS
+lemme/GJ
+Lemmie/M
+lemming/M
+Lemmy/M
+lemonade/SM
+lemon/GSDM
+lemony
+Lemuel/M
+Lemuria/M
+lemur/MS
+Lena/M
+Lenard/M
+Lenci/M
+lender/M
+lend/SRGZ
+Lenee/M
+Lenette/M
+lengthener/M
+lengthen/GRD
+lengthily
+lengthiness/MS
+length/MNYX
+lengths
+lengthwise
+lengthy/TRP
+lenience/S
+leniency/MS
+lenient/SY
+Leningrad/M
+Leninism/M
+Leninist
+Lenin/M
+lenitive/S
+Lenka/M
+Len/M
+Le/NM
+Lenna/M
+Lennard/M
+Lennie/M
+Lennon/M
+Lenny/M
+Lenoir/M
+Leno/M
+Lenora/M
+Lenore/M
+lens/SRDMJGZ
+lent/A
+lenticular
+lentil/SM
+lento/S
+Lent/SMN
+Leodora/M
+Leoine/M
+Leola/M
+Leoline/M
+Leo/MS
+Leona/M
+Leonanie/M
+Leonard/M
+Leonardo/M
+Leoncavallo/M
+Leonelle/M
+Leonel/M
+Leone/M
+Leonerd/M
+Leonhard/M
+Leonidas/M
+Leonid/M
+Leonie/M
+leonine
+Leon/M
+Leonora/M
+Leonore/M
+Leonor/M
+Leontine/M
+Leontyne/M
+leopardess/SM
+leopard/MS
+leopardskin
+Leopold/M
+Leopoldo/M
+Leopoldville/M
+Leora/M
+leotard/MS
+leper/SM
+Lepidus/M
+Lepke/M
+leprechaun/SM
+leprosy/MS
+leprous
+lepta
+lepton/SM
+Lepus/M
+Lerner/M
+Leroi/M
+Leroy/M
+Lesa/M
+lesbianism/MS
+lesbian/MS
+Leshia/M
+lesion/DMSG
+Lesley/M
+Leslie/M
+Lesli/M
+Lesly/M
+Lesotho/M
+lessee/MS
+lessen/GDS
+Lesseps/M
+lesser
+lesses
+Lessie/M
+lessing
+lesson/DMSG
+lessor/MS
+less/U
+Lester/M
+lest/R
+Les/Y
+Lesya/M
+Leta/M
+letdown/SM
+lethality/M
+lethal/YS
+Letha/M
+lethargic
+lethargically
+lethargy/MS
+Lethe/M
+Lethia/M
+Leticia/M
+Letisha/M
+let/ISM
+Letitia/M
+Letizia/M
+Letta/M
+letterbox/S
+lettered/U
+letterer/M
+letterhead/SM
+lettering/M
+letter/JSZGRDM
+letterman/M
+Letterman/M
+lettermen
+letterpress/MS
+Lettie/M
+Letti/M
+letting/S
+lettuce/SM
+Letty/M
+letup/MS
+leukemia/SM
+leukemic/S
+leukocyte/MS
+Leupold/M
+Levant/M
+leveeing
+levee/SDM
+leveled/U
+leveler/M
+levelheadedness/S
+levelheaded/P
+leveling/U
+levelness/SM
+level/STZGRDYP
+leverage/MGDS
+lever/SDMG
+Levesque/M
+Levey/M
+Leviathan
+leviathan/MS
+levier/M
+Levi/MS
+Levine/M
+Levin/M
+levitate/XNGDS
+levitation/M
+Leviticus/M
+Levitt/M
+levity/MS
+Lev/M
+Levon/M
+Levy/M
+levy/SRDZG
+lewdness/MS
+lewd/PYRT
+Lewellyn/M
+Lewes
+Lewie/M
+Lewinsky/M
+lewis/M
+Lewis/M
+Lewiss
+Lew/M
+lex
+lexeme/MS
+lexical/Y
+lexicographer/MS
+lexicographic
+lexicographical/Y
+lexicography/SM
+lexicon/SM
+Lexie/M
+Lexi/MS
+Lexine/M
+Lexington/M
+Lexus/M
+Lexy/M
+Leyden/M
+Leyla/M
+Lezley/M
+Lezlie/M
+lg
+Lhasa/SM
+Lhotse/M
+liability/SAM
+liable/AP
+liaise/GSD
+liaison/SM
+Lia/M
+Liam/M
+Liana/M
+Liane/M
+Lian/M
+Lianna/M
+Lianne/M
+liar/MS
+libation/SM
+libbed
+Libbey/M
+Libbie/M
+Libbi/M
+libbing
+Libby/M
+libeler/M
+libel/GMRDSZ
+libelous/Y
+Liberace/M
+liberalism/MS
+liberality/MS
+liberalization/SM
+liberalized/U
+liberalize/GZSRD
+liberalizer/M
+liberalness/MS
+liberal/YSP
+liberate/NGDSCX
+liberationists
+liberation/MC
+liberator/SCM
+Liberia/M
+Liberian/S
+libertarianism/M
+libertarian/MS
+libertine/MS
+liberty/MS
+libidinal
+libidinousness/M
+libidinous/PY
+libido/MS
+Lib/M
+lib/MS
+librarian/MS
+library/MS
+Libra/SM
+libretoes
+libretos
+librettist/MS
+libretto/MS
+Libreville/M
+Librium/M
+Libya/M
+Libyan/S
+lice/M
+licensed/AU
+licensee/SM
+license/MGBRSD
+licenser/M
+licenses/A
+licensing/A
+licensor/M
+licentiate/MS
+licentiousness/MS
+licentious/PY
+Licha/M
+lichee's
+lichen/DMGS
+Lichtenstein/M
+Lichter/M
+licit/Y
+licked/U
+lickerish
+licker/M
+lick/GRDSJ
+licking/M
+licorice/SM
+Lida/M
+lidded
+lidding
+Lidia/M
+lidless
+lid/MS
+lido/MS
+Lieberman/M
+Liebfraumilch/M
+Liechtenstein/RMZ
+lied/MR
+lie/DRS
+Lief/M
+liefs/A
+lief/TSR
+Liege/M
+liege/SR
+Lie/M
+lien/SM
+lier/IMA
+lies/A
+Liesa/M
+lieu/SM
+lieut
+lieutenancy/MS
+lieutenant/SM
+Lieut/M
+lifeblood/SM
+lifeboat/SM
+lifebuoy/S
+lifeforms
+lifeguard/MDSG
+lifelessness/SM
+lifeless/PY
+lifelikeness/M
+lifelike/P
+lifeline/SM
+lifelong
+life/MZR
+lifer/M
+lifesaver/SM
+lifesaving/S
+lifespan/S
+lifestyle/S
+lifetaking
+lifetime/MS
+lifework/MS
+LIFO
+lifter/M
+lift/GZMRDS
+liftoff/MS
+ligament/MS
+ligand/MS
+ligate/XSDNG
+ligation/M
+ligature/DSGM
+light/ADSCG
+lighted/U
+lightener/M
+lightening/M
+lighten/ZGDRS
+lighter/CM
+lightered
+lightering
+lighters
+lightest
+lightface/SDM
+lightheaded
+lightheartedness/MS
+lighthearted/PY
+lighthouse/MS
+lighting/MS
+lightly
+lightness/MS
+lightning/SMD
+lightproof
+light's
+lightship/SM
+lightweight/S
+ligneous
+lignite/MS
+lignum
+likability/MS
+likableness/MS
+likable/P
+likeability's
+liked/E
+likelihood/MSU
+likely/UPRT
+likeness/MSU
+liken/GSD
+liker/E
+liker's
+likes/E
+likest
+like/USPBY
+likewise
+liking/SM
+lilac/MS
+Lilah/M
+Lila/SM
+Lilia/MS
+Liliana/M
+Liliane/M
+Lilian/M
+Lilith/M
+Liliuokalani/M
+Lilla/M
+Lille/M
+Lillian/M
+Lillie/M
+Lilli/MS
+lilliputian/S
+Lilliputian/SM
+Lilliput/M
+Lilllie/M
+Lilly/M
+Lil/MY
+Lilongwe/M
+lilting/YP
+lilt/MDSG
+Lilyan/M
+Lily/M
+lily/MSD
+Lima/M
+Limbaugh/M
+limbered/U
+limberness/SM
+limber/RDYTGP
+limbers/U
+limbic
+limbless
+Limbo
+limbo/GDMS
+limb/SGZRDM
+Limburger/SM
+limeade/SM
+lime/DSMG
+limekiln/M
+limelight/DMGS
+limerick/SM
+limestone/SM
+limitability
+limitably
+limitation/MCS
+limit/CSZGRD
+limitedly/U
+limitedness/M
+limited/PSY
+limiter/M
+limiting/S
+limitlessness/SM
+limitless/PY
+limit's
+limn/GSD
+Limoges/M
+limo/S
+limousine/SM
+limper/M
+limpet/SM
+limpidity/MS
+limpidness/SM
+limpid/YP
+limpness/MS
+Limpopo/M
+limp/SGTPYRD
+Li/MY
+limy/TR
+linage/MS
+Lina/M
+linchpin/MS
+Linc/M
+Lincoln/SM
+Linda/M
+Lindbergh/M
+Lindberg/M
+linden/MS
+Lindholm/M
+Lindie/M
+Lindi/M
+Lind/M
+Lindon/M
+Lindquist/M
+Lindsay/M
+Lindsey/M
+Lindstrom/M
+Lindsy/M
+Lindy/M
+line/AGDS
+lineage/SM
+lineal/Y
+Linea/M
+lineament/MS
+linearity/MS
+linearize/SDGNB
+linear/Y
+linebacker/SM
+lined/U
+linefeed
+Linell/M
+lineman/M
+linemen
+linen/SM
+liner/SM
+line's
+linesman/M
+linesmen
+Linet/M
+Linette/M
+lineup/S
+lingerer/M
+lingerie/SM
+lingering/Y
+linger/ZGJRD
+lingoes
+lingo/M
+lingual/SY
+lingua/M
+linguine
+linguini's
+linguistically
+linguistic/S
+linguistics/M
+linguist/SM
+ling/ZR
+liniment/MS
+lining/SM
+linkable
+linkage/SM
+linked/A
+linker/S
+linking/S
+Link/M
+link's
+linkup/S
+link/USGD
+Lin/M
+Linnaeus/M
+Linnea/M
+Linnell/M
+Linnet/M
+linnet/SM
+Linnie/M
+Linn/M
+Linoel/M
+linoleum/SM
+lino/M
+Linotype/M
+linseed/SM
+lintel/SM
+linter/M
+Linton/M
+lint/SMR
+linty/RST
+Linus/M
+Linux/M
+Linwood/M
+Linzy/M
+Lionello/M
+Lionel/M
+lioness/SM
+lionhearted
+lionization/SM
+lionizer/M
+lionize/ZRSDG
+Lion/M
+lion/MS
+lipase/M
+lipid/MS
+lip/MS
+liposuction/S
+lipped
+lipper
+Lippi/M
+lipping
+Lippmann/M
+lippy/TR
+lipread/GSRJ
+Lipschitz/M
+Lipscomb/M
+lipstick/MDSG
+Lipton/M
+liq
+liquefaction/SM
+liquefier/M
+liquefy/DRSGZ
+liqueur/DMSG
+liquidate/GNXSD
+liquidation/M
+liquidator/SM
+liquidity/SM
+liquidizer/M
+liquidize/ZGSRD
+liquidness/M
+liquid/SPMY
+liquorice/SM
+liquorish
+liquor/SDMG
+lira/M
+Lira/M
+lire
+Lisabeth/M
+Lisa/M
+Lisbeth/M
+Lisbon/M
+Lise/M
+Lisetta/M
+Lisette/M
+Lisha/M
+Lishe/M
+Lisle/M
+lisle/SM
+lisper/M
+lisp/MRDGZS
+Lissajous/M
+Lissa/M
+Lissie/M
+Lissi/M
+Liss/M
+lissomeness/M
+lissome/P
+lissomness/M
+Lissy/M
+listed/U
+listener/M
+listen/ZGRD
+Listerine/M
+lister/M
+Lister/M
+listing/M
+list/JMRDNGZXS
+listlessness/SM
+listless/PY
+Liston/M
+Liszt/M
+Lita/M
+litany/MS
+litchi/SM
+literacy/MS
+literalism/M
+literalistic
+literalness/MS
+literal/PYS
+literariness/SM
+literary/P
+literate/YNSP
+literati
+literation/M
+literature/SM
+liter/M
+lite/S
+litheness/SM
+lithe/PRTY
+lithesome
+lithium/SM
+lithograph/DRMGZ
+lithographer/M
+lithographic
+lithographically
+lithographs
+lithography/MS
+lithology/M
+lithosphere/MS
+lithospheric
+Lithuania/M
+Lithuanian/S
+litigant/MS
+litigate/NGXDS
+litigation/M
+litigator/SM
+litigiousness/MS
+litigious/PY
+litmus/SM
+litotes/M
+lit/RZS
+littérateur/S
+litterbug/SM
+litter/SZGRDM
+Little/M
+littleneck/M
+littleness/SM
+little/RSPT
+Littleton/M
+Litton/M
+littoral/S
+liturgical/Y
+liturgic/S
+liturgics/M
+liturgist/MS
+liturgy/SM
+Liuka/M
+livability/MS
+livableness/M
+livable/U
+livably
+Liva/M
+lived/A
+livelihood/SM
+liveliness/SM
+livelong/S
+lively/RTP
+liveness/M
+liven/SDG
+liver/CSGD
+liveried
+liverish
+Livermore/M
+Liverpool/M
+Liverpudlian/MS
+liver's
+liverwort/SM
+liverwurst/SM
+livery/CMS
+liveryman/MC
+liverymen/C
+lives/A
+lives's
+livestock/SM
+live/YHZTGJDSRPB
+Livia/M
+lividness/M
+livid/YP
+livingness/M
+Livingstone/M
+Livingston/M
+living/YP
+Liv/M
+Livonia/M
+Livvie/M
+Livvy/M
+Livvyy/M
+Livy/M
+Lizabeth/M
+Liza/M
+lizard/MS
+Lizbeth/M
+Lizette/M
+Liz/M
+Lizzie/M
+Lizzy/M
+l/JGVXT
+Ljubljana/M
+LL
+llama/SM
+llano/SM
+LLB
+ll/C
+LLD
+Llewellyn/M
+Lloyd/M
+Llywellyn/M
+LNG
+lo
+loadable
+loaded/A
+loader/MU
+loading/MS
+load's/A
+loads/A
+loadstar's
+loadstone's
+load/SURDZG
+loafer/M
+Loafer/S
+loaf/SRDMGZ
+loam/SMDG
+loamy/RT
+loaner/M
+loaning/M
+loan/SGZRDMB
+loansharking/S
+loanword/S
+loathe
+loather/M
+loathing/M
+loath/JPSRDYZG
+loathness/M
+loathsomeness/MS
+loathsome/PY
+loaves/M
+Lobachevsky/M
+lobar
+lobbed
+lobber/MS
+lobbing
+lobby/GSDM
+lobbyist/MS
+lobe/SM
+lob/MDSG
+lobotomist
+lobotomize/GDS
+lobotomy/MS
+lobster/MDGS
+lobularity
+lobular/Y
+lobule/SM
+locale/MS
+localisms
+locality/MS
+localization/MS
+localized/U
+localizer/M
+localizes/U
+localize/ZGDRS
+local/SGDY
+locatable
+locate/AXESDGN
+locater/M
+locational/Y
+location/EMA
+locative/S
+locator's
+Lochinvar/M
+loch/M
+lochs
+loci/M
+lockable
+Lockean/M
+locked/A
+Locke/M
+locker/SM
+locket/SM
+Lockhart/M
+Lockheed/M
+Lockian/M
+locking/S
+lockjaw/SM
+Lock/M
+locknut/M
+lockout/MS
+lock's
+locksmithing/M
+locksmith/MG
+locksmiths
+lockstep/S
+lock/UGSD
+lockup/MS
+Lockwood/M
+locomotion/SM
+locomotive/YMS
+locomotor
+locomotory
+loco/SDMG
+locoweed/MS
+locus/M
+locust/SM
+locution/MS
+lode/SM
+lodestar/MS
+lodestone/MS
+lodged/E
+lodge/GMZSRDJ
+Lodge/M
+lodgepole
+lodger/M
+lodges/E
+lodging/M
+lodgment/M
+Lodovico/M
+Lodowick/M
+Lodz
+Loeb/M
+Loella/M
+Loewe/M
+Loewi/M
+lofter/M
+loftily
+loftiness/SM
+loft/SGMRD
+lofty/PTR
+loganberry/SM
+Logan/M
+logarithmic
+logarithmically
+logarithm/MS
+logbook/MS
+loge/SMNX
+logged/U
+loggerhead/SM
+logger/SM
+loggia/SM
+logging/MS
+logicality/MS
+logicalness/M
+logical/SPY
+logician/SM
+logic/SM
+login/S
+logion/M
+logistical/Y
+logistic/MS
+logjam/SM
+LOGO
+logo/SM
+logotype/MS
+logout
+logrolling/SM
+log's/K
+log/SM
+logy/RT
+Lohengrin/M
+loincloth/M
+loincloths
+loin/SM
+Loire/M
+Loise/M
+Lois/M
+loiterer/M
+loiter/RDJSZG
+Loki/M
+Lola/M
+Loleta/M
+Lolita/M
+loller/M
+lollipop/MS
+loll/RDGS
+Lolly/M
+lolly/SM
+Lombardi/M
+Lombard/M
+Lombardy/M
+Lomb/M
+Lome
+Lona/M
+Londonderry/M
+Londoner/M
+London/RMZ
+Lonee/M
+loneliness/SM
+lonely/TRP
+loneness/M
+lone/PYZR
+loner/M
+lonesomeness/MS
+lonesome/PSY
+longboat/MS
+longbow/SM
+longed/K
+longeing
+longer/K
+longevity/MS
+Longfellow/M
+longhair/SM
+longhand/SM
+longhorn/SM
+longing/MY
+longish
+longitude/MS
+longitudinal/Y
+long/JGTYRDPS
+Long/M
+longness/M
+longshoreman/M
+longshoremen
+longsighted
+longs/K
+longstanding
+Longstreet/M
+longsword
+longterm
+longtime
+Longueuil/M
+longueur/SM
+longways
+longword/SM
+Loni/M
+Lon/M
+Lonna/M
+Lonnard/M
+Lonnie/M
+Lonni/M
+Lonny/M
+loofah/M
+loofahs
+lookahead
+lookalike/S
+looker/M
+look/GZRDS
+lookout/MS
+lookup/SM
+looming/M
+Loomis/M
+loom/MDGS
+loon/MS
+loony/SRT
+looper/M
+loophole/MGSD
+loop/MRDGS
+loopy/TR
+loosed/U
+looseleaf
+loosener/M
+looseness/MS
+loosen/UDGS
+loose/SRDPGTY
+looses/U
+loosing/M
+looter/M
+loot/MRDGZS
+loper/M
+lope/S
+Lopez/M
+lopped
+lopper/MS
+lopping
+lop/SDRG
+lopsidedness/SM
+lopsided/YP
+loquaciousness/MS
+loquacious/YP
+loquacity/SM
+Loraine/M
+Lorain/M
+Loralee/M
+Loralie/M
+Loralyn/M
+Lora/M
+Lorant/M
+lording/M
+lordliness/SM
+lordly/PTR
+Lord/MS
+lord/MYDGS
+lordship/SM
+Lordship/SM
+Loree/M
+Loreen/M
+Lorelei/M
+Lorelle/M
+lore/MS
+Lorena/M
+Lorene/M
+Loren/SM
+Lorentzian/M
+Lorentz/M
+Lorenza/M
+Lorenz/M
+Lorenzo/M
+Loretta/M
+Lorette/M
+lorgnette/SM
+Loria/M
+Lorianna/M
+Lorianne/M
+Lorie/M
+Lorilee/M
+Lorilyn/M
+Lori/M
+Lorinda/M
+Lorine/M
+Lorin/M
+loris/SM
+Lorita/M
+lorn
+Lorna/M
+Lorne/M
+Lorraine/M
+Lorrayne/M
+Lorre/M
+Lorrie/M
+Lorri/M
+Lorrin/M
+lorryload/S
+Lorry/M
+lorry/SM
+Lory/M
+Los
+loser/M
+lose/ZGJBSR
+lossage
+lossless
+loss/SM
+lossy/RT
+lost/P
+Lothaire/M
+Lothario/MS
+lotion/MS
+Lot/M
+lot/MS
+Lotta/M
+lotted
+Lotte/M
+lotter
+lottery/MS
+Lottie/M
+Lotti/M
+lotting
+Lott/M
+lotto/MS
+Lotty/M
+lotus/SM
+louden/DG
+loudhailer/S
+loudly/RT
+loudmouth/DM
+loudmouths
+loudness/MS
+loudspeaker/SM
+loudspeaking
+loud/YRNPT
+Louella/M
+Louie/M
+Louisa/M
+Louise/M
+Louisette/M
+Louisiana/M
+Louisianan/S
+Louisianian/S
+Louis/M
+Louisville/M
+Lou/M
+lounger/M
+lounge/SRDZG
+Lourdes/M
+lour/GSD
+louse/CSDG
+louse's
+lousewort/M
+lousily
+lousiness/MS
+lousy/PRT
+loutishness/M
+loutish/YP
+Loutitia/M
+lout/SGMD
+louver/DMS
+L'Ouverture
+Louvre/M
+lovableness/MS
+lovable/U
+lovably
+lovebird/SM
+lovechild
+Lovecraft/M
+love/DSRMYZGJB
+loved/U
+Lovejoy/M
+Lovelace/M
+Loveland/M
+lovelessness/M
+loveless/YP
+lovelies
+lovelinesses
+loveliness/UM
+Lovell/M
+lovelornness/M
+lovelorn/P
+lovely/URPT
+Love/M
+lovemaking/SM
+lover/YMG
+lovesick
+lovestruck
+lovingly
+lovingness/M
+loving/U
+lowborn
+lowboy/SM
+lowbrow/MS
+lowdown/S
+Lowell/M
+Lowe/M
+lowercase/GSD
+lower/DG
+lowermost
+Lowery/M
+lowish
+lowland/RMZS
+Lowlands/M
+lowlife/SM
+lowlight/MS
+lowliness/MS
+lowly/PTR
+lowness/MS
+low/PDRYSZTG
+Lowrance/M
+lox/MDSG
+loyaler
+loyalest
+loyal/EY
+loyalism/SM
+loyalist/SM
+loyalty/EMS
+Loyang/M
+Loydie/M
+Loyd/M
+Loy/M
+Loyola/M
+lozenge/SDM
+LP
+LPG
+LPN/S
+Lr
+ls
+l's
+L's
+LSD
+ltd
+Ltd/M
+Lt/M
+Luanda/M
+Luann/M
+luau/MS
+lubber/YMS
+Lubbock/M
+lube/DSMG
+lubricant/SM
+lubricate/VNGSDX
+lubrication/M
+lubricator/MS
+lubricious/Y
+lubricity/SM
+Lubumbashi/M
+Lucais/M
+Luca/MS
+Luce/M
+lucent/Y
+Lucerne/M
+Lucho/M
+Lucia/MS
+Luciana/M
+Lucian/M
+Luciano/M
+lucidity/MS
+lucidness/MS
+lucid/YP
+Lucie/M
+Lucien/M
+Lucienne/M
+Lucifer/M
+Lucila/M
+Lucile/M
+Lucilia/M
+Lucille/M
+Luci/MN
+Lucina/M
+Lucinda/M
+Lucine/M
+Lucio/M
+Lucita/M
+Lucite/MS
+Lucius/M
+luck/GSDM
+luckier/U
+luckily/U
+luckiness/UMS
+luckless
+Lucknow/M
+Lucky/M
+lucky/RSPT
+lucrativeness/SM
+lucrative/YP
+lucre/MS
+Lucretia/M
+Lucretius/M
+lucubrate/GNSDX
+lucubration/M
+Lucy/M
+Luddite/SM
+Ludhiana/M
+ludicrousness/SM
+ludicrous/PY
+Ludlow/M
+Ludmilla/M
+ludo/M
+Ludovico/M
+Ludovika/M
+Ludvig/M
+Ludwig/M
+Luella/M
+Luelle/M
+luff/GSDM
+Lufthansa/M
+Luftwaffe/M
+luge/MC
+Luger/M
+luggage/SM
+lugged
+lugger/SM
+lugging
+Lugosi/M
+lug/RS
+lugsail/SM
+lugubriousness/MS
+lugubrious/YP
+Luigi/M
+Luisa/M
+Luise/M
+Luis/M
+Lukas/M
+Luke/M
+lukewarmness/SM
+lukewarm/PY
+Lula/M
+Lulita/M
+lullaby/GMSD
+lull/SDG
+lulu/M
+Lulu/M
+Lu/M
+lumbago/SM
+lumbar/S
+lumberer/M
+lumbering/M
+lumberjack/MS
+lumberman/M
+lumbermen
+lumber/RDMGZSJ
+lumberyard/MS
+lumen/M
+Lumière/M
+luminance/M
+luminary/MS
+luminescence/SM
+luminescent
+luminosity/MS
+luminousness/M
+luminous/YP
+lummox/MS
+lumper/M
+lumpiness/MS
+lumpishness/M
+lumpish/YP
+lump/SGMRDN
+lumpy/TPR
+lunacy/MS
+Luna/M
+lunar/S
+lunary
+lunate/YND
+lunatic/S
+lunation/M
+luncheonette/SM
+luncheon/SMDG
+luncher/M
+lunch/GMRSD
+lunchpack
+lunchroom/MS
+lunchtime/MS
+Lundberg/M
+Lund/M
+Lundquist/M
+lune/M
+lunge/MS
+lunger/M
+lungfish/SM
+lungful
+lung/SGRDM
+lunkhead/SM
+Lupe/M
+lupine/SM
+Lupus/M
+lupus/SM
+Lura/M
+lurcher/M
+lurch/RSDG
+lure/DSRG
+lurer/M
+Lurette/M
+lurex
+Luria/M
+luridness/SM
+lurid/YP
+lurker/M
+lurk/GZSRD
+Lurleen/M
+Lurlene/M
+Lurline/M
+Lusaka/M
+Lusa/M
+lusciousness/MS
+luscious/PY
+lushness/MS
+lush/YSRDGTP
+Lusitania/M
+luster/GDM
+lustering/M
+lusterless
+lustfulness/M
+lustful/PY
+lustily
+lustiness/MS
+lust/MRDGZS
+lustrousness/M
+lustrous/PY
+lusty/PRT
+lutanist/MS
+lute/DSMG
+lutenist/MS
+Lutero/M
+lutetium/MS
+Lutheranism/MS
+Lutheran/SM
+Luther/M
+luting/M
+Lutz
+Luxembourgian
+Luxembourg/RMZ
+Luxemburg's
+luxe/MS
+luxuriance/MS
+luxuriant/Y
+luxuriate/GNSDX
+luxuriation/M
+luxuriousness/SM
+luxurious/PY
+luxury/MS
+Luz/M
+Luzon/M
+L'vov
+Lyallpur/M
+lyceum/MS
+lychee's
+lycopodium/M
+Lycra/S
+Lycurgus/M
+Lyda/M
+Lydia/M
+Lydian/S
+Lydie/M
+Lydon/M
+lye/JSMG
+Lyell/M
+lying/Y
+Lyle/M
+Lyly/M
+Lyman/M
+Lyme/M
+lymphatic/S
+lymph/M
+lymphocyte/SM
+lymphoid
+lymphoma/MS
+lymphs
+Ly/MY
+Lynchburg/M
+lyncher/M
+lynching/M
+Lynch/M
+lynch/ZGRSDJ
+Lynda/M
+Lyndell/M
+Lyndel/M
+Lynde/M
+Lyndon/M
+Lyndsay/M
+Lyndsey/M
+Lyndsie/M
+Lyndy/M
+Lynea/M
+Lynelle/M
+Lynette/M
+Lynett/M
+Lyn/M
+Lynna/M
+Lynnea/M
+Lynnelle/M
+Lynnell/M
+Lynne/M
+Lynnet/M
+Lynnette/M
+Lynnett/M
+Lynn/M
+Lynsey/M
+lynx/MS
+Lyon/SM
+Lyra/M
+lyrebird/MS
+lyre/SM
+lyricalness/M
+lyrical/YP
+lyricism/SM
+lyricist/SM
+lyric/S
+Lysenko/M
+lysine/M
+Lysistrata/M
+Lysol/M
+Lyssa/M
+LyX/M
+MA
+Maalox/M
+ma'am
+Mabelle/M
+Mabel/M
+Mable/M
+Mab/M
+macabre/Y
+macadamize/SDG
+macadam/SM
+Macao/M
+macaque/SM
+macaroni/SM
+macaroon/MS
+Macarthur/M
+MacArthur/M
+Macaulay/M
+macaw/SM
+Macbeth/M
+Maccabees/M
+Maccabeus/M
+Macdonald/M
+MacDonald/M
+MacDraw/M
+Macedonia/M
+Macedonian/S
+Macedon/M
+mace/MS
+Mace/MS
+macerate/DSXNG
+maceration/M
+macer/M
+Macgregor/M
+MacGregor/M
+machete/SM
+Machiavellian/S
+Machiavelli/M
+machinate/SDXNG
+machination/M
+machinelike
+machine/MGSDB
+machinery/SM
+machinist/MS
+machismo/SM
+Mach/M
+macho/S
+Machs
+Macias/M
+Macintosh/M
+MacIntosh/M
+macintosh's
+Mackenzie/M
+MacKenzie/M
+mackerel/SM
+Mackinac/M
+Mackinaw
+mackinaw/SM
+mackintosh/SM
+mack/M
+Mack/M
+MacLeish/M
+Macmillan/M
+MacMillan/M
+Macon/SM
+MacPaint/M
+macramé/S
+macrobiotic/S
+macrobiotics/M
+macrocosm/MS
+macrodynamic
+macroeconomic/S
+macroeconomics/M
+macromolecular
+macromolecule/SM
+macron/MS
+macrophage/SM
+macroscopic
+macroscopically
+macrosimulation
+macro/SM
+macrosocioeconomic
+Mac/SGMD
+mac/SGMDR
+Macy/M
+Madagascan/SM
+Madagascar/M
+Madalena/M
+Madalyn/M
+Mada/M
+madame/M
+Madame/MS
+madam/SM
+madcap/S
+Maddalena/M
+madded
+madden/GSD
+maddening/Y
+Madden/M
+madder/MS
+maddest
+Maddie/M
+Maddi/M
+madding
+Maddox/M
+Maddy/M
+made/AU
+Madeira/SM
+Madelaine/M
+Madeleine/M
+Madelena/M
+Madelene/M
+Madelina/M
+Madeline/M
+Madelin/M
+Madella/M
+Madelle/M
+Madel/M
+Madelon/M
+Madelyn/M
+mademoiselle/MS
+Madge/M
+madhouse/SM
+Madhya/M
+Madison/M
+Madlen/M
+Madlin/M
+madman/M
+madmen
+madness/SM
+Madonna/MS
+mad/PSY
+Madras
+madras/SM
+Madrid/M
+madrigal/MSG
+Madsen/M
+Madurai/M
+madwoman/M
+madwomen
+Mady/M
+Maegan/M
+Maelstrom/M
+maelstrom/SM
+Mae/M
+maestro/MS
+Maeterlinck/M
+Mafia/MS
+mafia/S
+mafiosi
+mafioso/M
+Mafioso/S
+MAG
+magazine/DSMG
+Magdaia/M
+Magdalena/M
+Magdalene/M
+Magdalen/M
+Magda/M
+Magellanic
+Magellan/M
+magenta/MS
+magged
+Maggee/M
+Maggie/M
+Maggi/M
+magging
+maggot/MS
+maggoty/RT
+Maggy/M
+magi
+magical/Y
+magician/MS
+magicked
+magicking
+magic/SM
+Magill/M
+Magi/M
+Maginot/M
+magisterial/Y
+magistracy/MS
+magistrate/MS
+Mag/M
+magma/SM
+magnanimity/SM
+magnanimosity
+magnanimous/PY
+magnate/SM
+magnesia/MS
+magnesite/M
+magnesium/SM
+magnetically
+magnetic/S
+magnetics/M
+magnetism/SM
+magnetite/SM
+magnetizable
+magnetization/ASCM
+magnetize/CGDS
+magnetized/U
+magnetodynamics
+magnetohydrodynamical
+magnetohydrodynamics/M
+magnetometer/MS
+magneto/MS
+magnetosphere/M
+magnetron/M
+magnet/SM
+magnification/M
+magnificence/SM
+magnificent/Y
+magnified/U
+magnify/DRSGNXZ
+magniloquence/MS
+magniloquent
+Magnitogorsk/M
+magnitude/SM
+magnolia/SM
+Magnum
+magnum/SM
+Magnuson/M
+Magog/M
+Magoo/M
+magpie/SM
+Magritte/M
+Magruder/M
+mag/S
+Magsaysay/M
+Maguire/SM
+Magus/M
+Magyar/MS
+Mahabharata
+Mahala/M
+Mahalia/M
+maharajah/M
+maharajahs
+maharanee's
+maharani/MS
+Maharashtra/M
+maharishi/SM
+mahatma/SM
+Mahavira/M
+Mahayana/M
+Mahayanist
+Mahdi/M
+Mahfouz/M
+Mahican/SM
+mahjong's
+Mahler/M
+Mahmoud/M
+Mahmud/M
+mahogany/MS
+Mahomet's
+mahout/SM
+Maia/M
+Maible/M
+maidenhair/MS
+maidenhead/SM
+maidenhood/SM
+maidenly/P
+maiden/YM
+maidservant/MS
+maid/SMNX
+maier
+Maier/M
+Maiga/M
+Maighdiln/M
+Maigret/M
+mailbag/MS
+mailbox/MS
+mail/BSJGZMRD
+mailer/M
+Mailer/M
+Maillol/M
+maillot/SM
+mailman/M
+mailmen
+Maiman/M
+maimedness/M
+maimed/P
+maimer/M
+Maimonides/M
+Mai/MR
+maim/SGZRD
+mainbrace/M
+Maine/MZR
+Mainer/M
+mainframe/MS
+mainlander/M
+mainland/SRMZ
+mainliner/M
+mainline/RSDZG
+mainly
+mainmast/SM
+main/SA
+mainsail/SM
+mains/M
+mainspring/SM
+mainstay/MS
+mainstream/DRMSG
+maintainability
+maintainable/U
+maintain/BRDZGS
+maintained/U
+maintainer/M
+maintenance/SM
+maintop/SM
+maiolica's
+Maire/M
+Mair/M
+Maisey/M
+Maisie/M
+maisonette/MS
+Maison/M
+Maitilde/M
+maize/MS
+Maj
+Maje/M
+majestic
+majestically
+majesty/MS
+Majesty/MS
+majolica/SM
+Majorca/M
+major/DMGS
+majordomo/S
+majorette/SM
+majority/SM
+Major/M
+Majuro/M
+makable
+Makarios/M
+makefile/S
+makeover/S
+Maker/M
+maker/SM
+makeshift/S
+make/UGSA
+makeup/MS
+making/SM
+Malabar/M
+Malabo/M
+Malacca/M
+Malachi/M
+malachite/SM
+maladapt/DV
+maladjust/DLV
+maladjustment/MS
+maladministration
+maladroitness/MS
+maladroit/YP
+malady/MS
+Malagasy/M
+malaise/SM
+Mala/M
+Malamud/M
+malamute/SM
+Malanie/M
+malaprop
+malapropism/SM
+Malaprop/M
+malarial
+malaria/MS
+malarious
+malarkey/SM
+malathion/S
+Malawian/S
+Malawi/M
+Malayalam/M
+Malaya/M
+Malayan/MS
+Malaysia/M
+Malaysian/S
+Malay/SM
+Malchy/M
+Malcolm/M
+malcontentedness/M
+malcontented/PY
+malcontent/SMD
+Maldive/SM
+Maldivian/S
+Maldonado/M
+maledict
+malediction/MS
+malefaction/MS
+malefactor/MS
+malefic
+maleficence/MS
+maleficent
+Male/M
+Malena/M
+maleness/MS
+male/PSM
+malevolence/S
+malevolencies
+malevolent/Y
+malfeasance/SM
+malfeasant
+malformation/MS
+malformed
+malfunction/SDG
+Malia/M
+Malian/S
+Malibu/M
+malice/MGSD
+maliciousness/MS
+malicious/YU
+malignancy/SM
+malignant/YS
+malign/GSRDYZ
+malignity/MS
+Mali/M
+Malina/M
+Malinda/M
+Malinde/M
+malingerer/M
+malinger/GZRDS
+Malinowski/M
+Malissa/M
+Malissia/M
+mallard/SM
+Mallarmé/M
+malleability/SM
+malleableness/M
+malleable/P
+mallet/MS
+Mallissa/M
+Mallorie/M
+Mallory/M
+mallow/MS
+mall/SGMD
+Mal/M
+malnourished
+malnutrition/SM
+malocclusion/MS
+malodorous
+Malone/M
+Malorie/M
+Malory/M
+malposed
+malpractice/SM
+Malraux/M
+Malta/M
+malted/S
+Maltese
+Malthusian/S
+Malthus/M
+malting/M
+maltose/SM
+maltreat/GDSL
+maltreatment/S
+malt/SGMD
+malty/RT
+Malva/M
+Malvina/M
+Malvin/M
+Malynda/M
+mama/SM
+mamba/SM
+mambo/GSDM
+Mame/M
+Mamet/M
+ma/MH
+Mamie/M
+mammalian/SM
+mammal/SM
+mammary
+mamma's
+mammogram/S
+mammography/S
+Mammon's
+mammon/SM
+mammoth/M
+mammoths
+mammy/SM
+Mamore/M
+manacle/SDMG
+manageability/S
+manageableness
+manageable/U
+managed/U
+management/SM
+manageress/M
+managerial/Y
+manager/M
+managership/M
+manage/ZLGRSD
+Managua/M
+Manama/M
+mañana/M
+mananas
+Manasseh/M
+manatee/SM
+Manaus's
+Manchester/M
+Manchu/MS
+Manchuria/M
+Manchurian/S
+Mancini/M
+manciple/M
+Mancunian/MS
+mandala/SM
+Mandalay/M
+Manda/M
+mandamus/GMSD
+Mandarin
+mandarin/MS
+mandate/SDMG
+mandatory/S
+Mandela
+Mandelbrot/M
+Mandel/M
+mandible/MS
+mandibular
+Mandie/M
+Mandi/M
+Mandingo/M
+mandolin/MS
+mandrake/MS
+mandrel/SM
+mandrill/SM
+Mandy/M
+manège/GSD
+mane/MDS
+Manet/M
+maneuverability/MS
+maneuverer/M
+maneuver/MRDSGB
+Manfred/M
+manful/Y
+manganese/MS
+mange/GMSRDZ
+manger/M
+manginess/S
+mangler/M
+mangle/RSDG
+mangoes
+mango/M
+mangrove/MS
+mangy/PRT
+manhandle/GSD
+Manhattan/SM
+manhole/MS
+manhood/MS
+manhunt/SM
+maniacal/Y
+maniac/SM
+mania/SM
+manically
+Manichean/M
+manic/S
+manicure/MGSD
+manicurist/SM
+manifestation/SM
+manifesto/GSDM
+manifest/YDPGS
+manifolder/M
+manifold/GPYRDMS
+manifoldness/M
+manikin/MS
+Manila/MS
+manila/S
+manilla's
+Mani/M
+manioc/SM
+manipulability
+manipulable
+manipulate/SDXBVGN
+manipulative/PM
+manipulator/MS
+manipulatory
+Manitoba/M
+Manitoulin/M
+Manitowoc/M
+mankind/M
+Mankowski/M
+Manley/M
+manlike
+manliness/SM
+manliness's/U
+manly/URPT
+manna/MS
+manned/U
+mannequin/MS
+mannered/U
+mannerism/SM
+mannerist/M
+mannerliness/MU
+mannerly/UP
+manner/SDYM
+Mann/GM
+Mannheim/M
+Mannie/M
+mannikin's
+Manning/M
+manning/U
+mannishness/SM
+mannish/YP
+Manny/M
+Manolo/M
+Mano/M
+manometer/SM
+Manon/M
+manorial
+manor/MS
+manpower/SM
+manqué/M
+man's
+mansard/SM
+manservant/M
+manse/XNM
+Mansfield/M
+mansion/M
+manslaughter/SM
+Man/SM
+Manson/M
+mans/S
+manta/MS
+Mantegna/M
+mantelpiece/MS
+mantel/SM
+mantes
+mantilla/MS
+mantissa/SM
+mantis/SM
+mantle/ESDG
+Mantle/M
+mantle's
+mantling/M
+mantra/MS
+mantrap/SM
+manual/SMY
+Manuela/M
+Manuel/M
+manufacture/JZGDSR
+manufacturer/M
+manumission/MS
+manumit/S
+manumitted
+manumitting
+manure/RSDMZG
+manuscript/MS
+man/USY
+Manville/M
+Manx
+many
+Manya/M
+Maoism/MS
+Maoist/S
+Mao/M
+Maori/SM
+Maplecrest/M
+maple/MS
+mapmaker/S
+mappable
+mapped/UA
+mapper/S
+mapping/MS
+Mapplethorpe/M
+maps/AU
+map/SM
+Maputo/M
+Marabel/M
+marabou/MS
+marabout's
+Maracaibo/M
+maraca/MS
+Mara/M
+maraschino/SM
+Marathi
+marathoner/M
+Marathon/M
+marathon/MRSZ
+Marat/M
+marauder/M
+maraud/ZGRDS
+marbleize/GSD
+marble/JRSDMG
+marbler/M
+marbling/M
+Marceau/M
+Marcela/M
+Marcelia/M
+Marcelino/M
+Marcella/M
+Marcelle/M
+Marcellina/M
+Marcelline/M
+Marcello/M
+Marcellus/M
+Marcel/M
+Marcelo/M
+Marchall/M
+Marchelle/M
+marcher/M
+marchioness/SM
+March/MS
+march/RSDZG
+Marcia/M
+Marciano/M
+Marcie/M
+Marcile/M
+Marcille/M
+Marci/M
+Marc/M
+Marconi/M
+Marco/SM
+Marcotte/M
+Marcus/M
+Marcy/M
+Mardi/SM
+Marduk/M
+Mareah/M
+mare/MS
+Marena/M
+Maren/M
+Maressa/M
+Margalit/M
+Margalo/M
+Marga/M
+Margareta/M
+Margarete/M
+Margaretha/M
+Margarethe/M
+Margaret/M
+Margaretta/M
+Margarette/M
+margarine/MS
+Margarita/M
+margarita/SM
+Margarito/M
+Margaux/M
+Margeaux/M
+Marge/M
+Margery/M
+Marget/M
+Margette/M
+Margie/M
+Margi/M
+marginalia
+marginality
+marginalization
+marginalize/SDG
+marginal/YS
+margin/GSDM
+Margit/M
+Margo/M
+Margot/M
+Margrethe/M
+Margret/M
+Marguerite/M
+Margy/M
+mariachi/SM
+maria/M
+Maria/M
+Mariam/M
+Mariana/SM
+Marian/MS
+Marianna/M
+Marianne/M
+Mariann/M
+Mariano/M
+Maribelle/M
+Maribel/M
+Maribeth/M
+Maricela/M
+Marice/M
+Maridel/M
+Marieann/M
+Mariejeanne/M
+Mariele/M
+Marielle/M
+Mariellen/M
+Mariel/M
+Marie/M
+Marietta/M
+Mariette/M
+Marigold/M
+marigold/MS
+Marijn/M
+Marijo/M
+marijuana/SM
+Marika/M
+Marilee/M
+Marilin/M
+Marillin/M
+Marilyn/M
+marimba/SM
+Mari/MS
+marinade/MGDS
+Marina/M
+marina/MS
+marinara/SM
+marinate/NGXDS
+marination/M
+mariner/M
+Marine/S
+marine/ZRS
+Marin/M
+Marinna/M
+Marino/M
+Mario/M
+marionette/MS
+Marion/M
+Mariquilla/M
+Marisa/M
+Mariska/M
+Marisol/M
+Marissa/M
+Maritain/M
+marital/Y
+Marita/M
+maritime/R
+Maritsa/M
+Maritza/M
+Mariupol/M
+Marius/M
+Mariya/M
+Marja/M
+Marje/M
+Marjie/M
+Marji/M
+Marj/M
+marjoram/SM
+Marjorie/M
+Marjory/M
+Marjy/M
+Markab/M
+markdown/SM
+marked/AU
+markedly
+marker/M
+marketability/SM
+marketable/U
+Marketa/M
+marketeer/S
+marketer/M
+market/GSMRDJBZ
+marketing/M
+marketplace/MS
+mark/GZRDMBSJ
+Markham/M
+marking/M
+Markism/M
+markkaa
+markka/M
+Mark/MS
+Markos
+Markov
+Markovian
+Markovitz/M
+marks/A
+marksman/M
+marksmanship/S
+marksmen
+markup/SM
+Markus/M
+Marla/M
+Marlane/M
+Marlboro/M
+Marlborough/M
+Marleah/M
+Marlee/M
+Marleen/M
+Marlena/M
+Marlene/M
+Marley/M
+Marlie/M
+Marline/M
+marlinespike/SM
+Marlin/M
+marlin/SM
+marl/MDSG
+Marlo/M
+Marlon/M
+Marlowe/M
+Marlow/M
+Marlyn/M
+Marmaduke/M
+marmalade/MS
+Marmara/M
+marmoreal
+marmoset/MS
+marmot/SM
+Marna/M
+Marne/M
+Marney/M
+Marnia/M
+Marnie/M
+Marni/M
+maroon/GRDS
+marquee/MS
+Marquesas/M
+marque/SM
+marquess/MS
+marquetry/SM
+Marquette/M
+Marquez/M
+marquise/M
+marquisette/MS
+Marquis/M
+marquis/SM
+Marquita/M
+Marrakesh/M
+marred/U
+marriageability/SM
+marriageable
+marriage/ASM
+married/US
+Marrilee/M
+marring
+Marriott/M
+Marris/M
+Marrissa/M
+marrowbone/MS
+marrow/GDMS
+marry/SDGA
+mar/S
+Marseillaise/SM
+Marseilles
+Marseille's
+marshal/GMDRSZ
+Marshalled/M
+marshaller
+Marshall/GDM
+Marshalling/M
+marshallings
+Marshal/M
+Marsha/M
+marshiness/M
+marshland/MS
+Marsh/M
+marshmallow/SM
+marsh/MS
+marshy/PRT
+Marsiella/M
+Mar/SMN
+marsupial/MS
+Martainn/M
+Marta/M
+Martelle/M
+Martel/M
+marten/M
+Marten/M
+Martguerita/M
+Martha/M
+Marthe/M
+Marthena/M
+Martial
+martial/Y
+Martian/S
+Martica/M
+Martie/M
+Marti/M
+Martina/M
+martinet/SM
+Martinez/M
+martingale/MS
+martini/MS
+Martinique/M
+Martin/M
+Martino/M
+martin/SM
+Martinson/M
+Martita/M
+mart/MDNGXS
+Mart/MN
+Marty/M
+Martyn/M
+Martynne/M
+martyrdom/SM
+martyr/GDMS
+Marva/M
+marvel/DGS
+Marvell/M
+marvelous/PY
+Marve/M
+Marven/M
+Marvin/M
+Marv/NM
+Marwin/M
+Marxian/S
+Marxism/SM
+Marxist/SM
+Marx/M
+Marya/M
+Maryanna/M
+Maryanne/M
+Maryann/M
+Marybelle/M
+Marybeth/M
+Maryellen/M
+Maryjane/M
+Maryjo/M
+Maryland/MZR
+Marylee/M
+Marylinda/M
+Marylin/M
+Maryl/M
+Marylou/M
+Marylynne/M
+Mary/M
+Maryrose/M
+Marys
+Marysa/M
+marzipan/SM
+Masada/M
+Masai/M
+Masaryk/M
+masc
+Mascagni/M
+mascara/SGMD
+mascot/SM
+masculineness/M
+masculine/PYS
+masculinity/SM
+Masefield/M
+maser/M
+Maseru/M
+MASH
+Masha/M
+Mashhad/M
+mash/JGZMSRD
+m/ASK
+masked/U
+masker/M
+mask/GZSRDMJ
+masks/U
+masochism/MS
+masochistic
+masochistically
+masochist/MS
+masonic
+Masonic
+Masonite/M
+masonry/MS
+mason/SDMG
+Mason/SM
+masquerader/M
+masquerade/RSDGMZ
+masquer/M
+masque/RSMZ
+Massachusetts/M
+massacre/DRSMG
+massager/M
+massage/SRDMG
+Massasoit/M
+Massenet/M
+masseur/MS
+masseuse/SM
+Massey/M
+massif/SM
+Massimiliano/M
+Massimo/M
+massing/R
+massiveness/SM
+massive/YP
+massless
+mas/SRZ
+Mass/S
+mass/VGSD
+mastectomy/MS
+masterclass
+mastered/A
+masterfulness/M
+masterful/YP
+master/JGDYM
+masterliness/M
+masterly/P
+mastermind/GDS
+masterpiece/MS
+mastership/M
+Master/SM
+masterstroke/MS
+masterwork/S
+mastery/MS
+mast/GZSMRD
+masthead/SDMG
+masticate/SDXGN
+mastication/M
+mastic/SM
+mastiff/MS
+mastodon/MS
+mastoid/S
+masturbate/SDNGX
+masturbation/M
+masturbatory
+matador/SM
+Mata/M
+matchable/U
+match/BMRSDZGJ
+matchbook/SM
+matchbox/SM
+matched/UA
+matcher/M
+matches/A
+matchless/Y
+matchlock/MS
+matchmake/GZJR
+matchmaker/M
+matchmaking/M
+matchplay
+match's/A
+matchstick/MS
+matchwood/SM
+mated/U
+mate/IMS
+Matelda/M
+Mateo/M
+materialism/SM
+materialistic
+materialistically
+materialist/SM
+materiality/M
+materialization/SM
+materialize/CDS
+materialized/A
+materializer/SM
+materializes/A
+materializing
+materialness/M
+material/SPYM
+matériel/MS
+mater/M
+maternal/Y
+maternity/MS
+mates/U
+mathematical/Y
+Mathematica/M
+mathematician/SM
+mathematic/S
+mathematics/M
+Mathematik/M
+Mather/M
+Mathe/RM
+Mathew/MS
+Mathewson/M
+Mathian/M
+Mathias
+Mathieu/M
+Mathilda/M
+Mathilde/M
+Mathis
+math/M
+maths
+Matias/M
+Matilda/M
+Matilde/M
+matinée/S
+mating/M
+matins/M
+Matisse/SM
+matriarchal
+matriarch/M
+matriarchs
+matriarchy/MS
+matrices
+matricidal
+matricide/MS
+matriculate/XSDGN
+matriculation/M
+matrimonial/Y
+matrimony/SM
+matrix/M
+matron/YMS
+mat/SJGMDR
+Matsumoto/M
+matte/JGMZSRD
+Mattel/M
+Matteo/M
+matter/GDM
+Matterhorn/M
+Matthaeus/M
+Mattheus/M
+Matthew/MS
+Matthias
+Matthieu/M
+Matthiew/M
+Matthus/M
+Mattias/M
+Mattie/M
+Matti/M
+matting/M
+mattins's
+Matt/M
+mattock/MS
+mattress/MS
+matt's
+Matty/M
+maturate/DSNGVX
+maturational
+maturation/M
+matureness/M
+maturer/M
+mature/RSDTPYG
+maturity/MS
+matzo/SHM
+matzot
+Maude/M
+Maudie/M
+maudlin/Y
+Maud/M
+Maugham/M
+Maui/M
+mauler/M
+maul/RDGZS
+maunder/GDS
+Maupassant/M
+Maura/M
+Maureene/M
+Maureen/M
+Maure/M
+Maurene/M
+Mauriac/M
+Maurice/M
+Mauricio/M
+Maurie/M
+Maurine/M
+Maurise/M
+Maurita/M
+Mauritania/M
+Mauritanian/S
+Mauritian/S
+Mauritius/M
+Maurits/M
+Maurizia/M
+Maurizio/M
+Maurois/M
+Mauro/M
+Maury/M
+Mauser/M
+mausoleum/SM
+mauve/SM
+maven/S
+maverick/SMDG
+mavin's
+Mavis/M
+Mavra/M
+mawkishness/SM
+mawkish/PY
+Mawr/M
+maw/SGMD
+max/GDS
+Maxie/M
+maxillae
+maxilla/M
+maxillary/S
+Maxi/M
+maximality
+maximal/SY
+maxima's
+Maximilian/M
+Maximilianus/M
+Maximilien/M
+maximization/SM
+maximizer/M
+maximize/RSDZG
+Maxim/M
+Maximo/M
+maxim/SM
+maximum/MYS
+Maxine/M
+maxi/S
+Max/M
+Maxtor/M
+Maxwellian
+maxwell/M
+Maxwell/M
+Maxy/M
+Maya/MS
+Mayan/S
+Maybelle/M
+maybe/S
+mayday/S
+may/EGS
+Maye/M
+mayer
+Mayer/M
+mayest
+Mayfair/M
+Mayflower/M
+mayflower/SM
+mayfly/MS
+mayhap
+mayhem/MS
+Maynard/M
+Mayne/M
+Maynord/M
+mayn't
+Mayo/M
+mayonnaise/MS
+mayoral
+mayoralty/MS
+mayoress/MS
+Mayor/M
+mayor/MS
+mayorship/M
+mayo/S
+maypole/MS
+Maypole/SM
+Mayra/M
+May/SMR
+mayst
+Mazama/M
+Mazarin/M
+Mazatlan/M
+Mazda/M
+mazedness/SM
+mazed/YP
+maze/MGDSR
+mazurka/SM
+Mazzini/M
+Mb
+MB
+MBA
+Mbabane/M
+Mbini/M
+MC
+McAdam/MS
+McAllister/M
+McBride/M
+McCabe/M
+McCain/M
+McCall/M
+McCarthyism/M
+McCarthy/M
+McCartney/M
+McCarty/M
+McCauley/M
+McClain/M
+McClellan/M
+McClure/M
+McCluskey/M
+McConnell/M
+McCormick/M
+McCoy/SM
+McCracken/M
+McCray/M
+McCullough/M
+McDaniel/M
+McDermott/M
+McDonald/M
+McDonnell/M
+McDougall/M
+McDowell/M
+McElhaney/M
+McEnroe/M
+McFadden/M
+McFarland/M
+McGee/M
+McGill/M
+McGovern/M
+McGowan/M
+McGrath/M
+McGraw/M
+McGregor/M
+McGuffey/M
+McGuire/M
+MCI/M
+McIntosh/M
+McIntyre/M
+McKay/M
+McKee/M
+McKenzie/M
+McKesson/M
+McKinley/M
+McKinney/M
+McKnight/M
+McLanahan/M
+McLaughlin/M
+McLean/M
+McLeod/M
+McLuhan/M
+McMahon/M
+McMartin/M
+McMillan/M
+McNamara/M
+McNaughton/M
+McNeil/M
+McPherson/M
+MD
+Md/M
+mdse
+MDT
+ME
+Meade/M
+Mead/M
+meadowland
+meadowlark/SM
+meadow/MS
+Meadows
+meadowsweet/M
+mead/SM
+Meagan/M
+meagerness/SM
+meager/PY
+Meaghan/M
+meagres
+mealiness/MS
+meal/MDGS
+mealtime/MS
+mealybug/S
+mealymouthed
+mealy/PRST
+meander/JDSG
+meaneing
+meanie/MS
+meaningfulness/SM
+meaningful/YP
+meaninglessness/SM
+meaningless/PY
+meaning/M
+meanness/S
+means/M
+meantime/SM
+meant/U
+meanwhile/S
+Meany/M
+mean/YRGJTPS
+meany's
+Meara/M
+measle/SD
+measles/M
+measly/TR
+measurable/U
+measurably
+measure/BLMGRSD
+measured/Y
+measureless
+measurement/SM
+measurer/M
+measures/A
+measuring/A
+meas/Y
+meataxe
+meatball/MS
+meatiness/MS
+meatless
+meatloaf
+meatloaves
+meat/MS
+meatpacking/S
+meaty/RPT
+Mecca/MS
+mecca/S
+mechanical/YS
+mechanic/MS
+mechanism/SM
+mechanistic
+mechanistically
+mechanist/M
+mechanization/SM
+mechanized/U
+mechanizer/M
+mechanize/RSDZGB
+mechanizes/U
+mechanochemically
+Mechelle/M
+med
+medalist/MS
+medallion/MS
+medal/SGMD
+Medan/M
+meddle/GRSDZ
+meddlesome
+Medea/M
+Medellin
+Medfield/M
+mediaeval's
+medial/AY
+medials
+median/YMS
+media/SM
+mediateness/M
+mediate/PSDYVNGX
+mediation/ASM
+mediator/SM
+Medicaid/SM
+medical/YS
+medicament/MS
+Medicare/MS
+medicate/DSXNGV
+medication/M
+Medici/MS
+medicinal/SY
+medicine/DSMG
+medico/SM
+medic/SM
+medievalist/MS
+medieval/YMS
+Medina/M
+mediocre
+mediocrity/MS
+meditate/NGVXDS
+meditation/M
+meditativeness/M
+meditative/PY
+Mediterranean/MS
+mediumistic
+medium/SM
+medley/SM
+medulla/SM
+Medusa/M
+meed/MS
+meekness/MS
+meek/TPYR
+meerschaum/MS
+meeter/M
+meetinghouse/S
+meeting/M
+meet/JGSYR
+me/G
+mega
+megabit/MS
+megabuck/S
+megabyte/S
+megacycle/MS
+megadeath/M
+megadeaths
+megahertz/M
+megalithic
+megalith/M
+megaliths
+megalomaniac/SM
+megalomania/SM
+megalopolis/SM
+Megan/M
+megaphone/SDGM
+megaton/MS
+megavolt/M
+megawatt/SM
+megaword/S
+Megen/M
+Meggie/M
+Meggi/M
+Meggy/M
+Meghan/M
+Meghann/M
+Meg/MN
+megohm/MS
+Mehetabel/M
+Meier/M
+Meighen/M
+Meiji/M
+Mei/MR
+meioses
+meiosis/M
+meiotic
+Meir/M
+Meister/M
+Meistersinger/M
+Mejia/M
+Mekong/M
+Mela/M
+Melamie/M
+melamine/SM
+melancholia/SM
+melancholic/S
+melancholy/MS
+Melanesia/M
+Melanesian/S
+melange/S
+Melania/M
+Melanie/M
+melanin/MS
+melanoma/SM
+Melantha/M
+Melany/M
+Melba/M
+Melbourne/M
+Melcher/M
+Melchior/M
+meld/SGD
+mêlée/MS
+Melendez/M
+Melesa/M
+Melessa/M
+Melicent/M
+Melina/M
+Melinda/M
+Melinde/M
+meliorate/XSDVNG
+melioration/M
+Melisa/M
+Melisande/M
+Melisandra/M
+Melisenda/M
+Melisent/M
+Melissa/M
+Melisse/M
+Melita/M
+Melitta/M
+Mella/M
+Mellicent/M
+Mellie/M
+mellifluousness/SM
+mellifluous/YP
+Melli/M
+Mellisa/M
+Mellisent/M
+Melloney/M
+Mellon/M
+mellowness/MS
+mellow/TGRDYPS
+Melly/M
+Mel/MY
+Melodee/M
+melodically
+melodic/S
+Melodie/M
+melodiousness/S
+melodious/YP
+melodrama/SM
+melodramatically
+melodramatic/S
+Melody/M
+melody/MS
+Melonie/M
+melon/MS
+Melony/M
+Melosa/M
+Melpomene/M
+meltdown/S
+melter/M
+melting/Y
+Melton/M
+melt/SAGD
+Melva/M
+Melville/M
+Melvin/M
+Melvyn/M
+Me/M
+member/DMS
+membered/AE
+members/EA
+membership/SM
+membrane/MSD
+membranous
+memento/SM
+Memling/M
+memoir/MS
+memorabilia
+memorability/SM
+memorableness/M
+memorable/P
+memorably
+memorandum/SM
+memorialize/DSG
+memorialized/U
+memorial/SY
+memoriam
+memorization/MS
+memorized/U
+memorizer/M
+memorize/RSDZG
+memorizes/A
+memoryless
+memory/MS
+memo/SM
+Memphis/M
+menace/GSD
+menacing/Y
+menagerie/SM
+menage/S
+Menander/M
+menarche/MS
+Menard/M
+Mencius/M
+Mencken/M
+mendaciousness/M
+mendacious/PY
+mendacity/MS
+Mendeleev/M
+mendelevium/SM
+Mendelian
+Mendel/M
+Mendelssohn/M
+mender/M
+Mendez/M
+mendicancy/MS
+mendicant/S
+Mendie/M
+mending/M
+Mendocino/M
+Mendoza/M
+mend/RDSJGZ
+Mendy/M
+Menelaus/M
+Menes/M
+menfolk/S
+menhaden/M
+menial/YS
+meningeal
+meninges
+meningitides
+meningitis/M
+meninx
+menisci
+meniscus/M
+Menkalinan/M
+Menkar/M
+Menkent/M
+Menlo/M
+men/MS
+Mennonite/SM
+Menominee
+menopausal
+menopause/SM
+menorah/M
+menorahs
+Menotti/M
+Mensa/M
+Mensch/M
+mensch/S
+menservants/M
+mens/SDG
+menstrual
+menstruate/NGDSX
+menstruation/M
+mensurable/P
+mensuration/MS
+menswear/M
+mentalist/MS
+mentality/MS
+mental/Y
+mentholated
+menthol/SM
+mentionable/U
+mentioned/U
+mentioner/M
+mention/ZGBRDS
+mentor/DMSG
+Menuhin/M
+menu/SM
+Menzies/M
+meow/DSG
+Mephistopheles/M
+Merak/M
+Mercado/M
+mercantile
+Mercator/M
+Mercedes
+mercenariness/M
+mercenary/SMP
+mercerize/SDG
+Mercer/M
+mercer/SM
+merchandiser/M
+merchandise/SRDJMZG
+merchantability
+merchantman/M
+merchantmen
+merchant/SBDMG
+Mercie/M
+mercifully/U
+mercifulness/M
+merciful/YP
+mercilessness/SM
+merciless/YP
+Merci/M
+Merck/M
+mercurial/SPY
+mercuric
+Mercurochrome/M
+mercury/MS
+Mercury/MS
+Mercy/M
+mercy/SM
+Meredeth/M
+Meredithe/M
+Meredith/M
+Merell/M
+meretriciousness/SM
+meretricious/YP
+mere/YS
+merganser/MS
+merger/M
+merge/SRDGZ
+Meridel/M
+meridian/MS
+meridional
+Meridith/M
+Meriel/M
+Merilee/M
+Merill/M
+Merilyn/M
+meringue/MS
+merino/MS
+Meris
+Merissa/M
+merited/U
+meritocracy/MS
+meritocratic
+meritocrats
+meritoriousness/MS
+meritorious/PY
+merit/SCGMD
+Meriwether/M
+Merla/M
+Merle/M
+Merlina/M
+Merline/M
+merlin/M
+Merlin/M
+Merl/M
+mermaid/MS
+merman/M
+mermen
+Merna/M
+Merola/M
+meromorphic
+Merralee/M
+Merrel/M
+Merriam/M
+Merrick/M
+Merridie/M
+Merrielle/M
+Merrie/M
+Merrilee/M
+Merrile/M
+Merrili/M
+Merrill/M
+merrily
+Merrily/M
+Merrimack/M
+Merrimac/M
+merriment/MS
+merriness/S
+Merritt/M
+Merry/M
+merrymaker/MS
+merrymaking/SM
+merry/RPT
+Mersey/M
+mer/TGDR
+Merton/M
+Mervin/M
+Merv/M
+Merwin/M
+Merwyn/M
+Meryl/M
+Mesa
+Mesabi/M
+mesa/SM
+mescaline/SM
+mescal/SM
+mesdames/M
+mesdemoiselles/M
+Meshed's
+meshed/U
+mesh/GMSD
+mesmeric
+mesmerism/SM
+mesmerized/U
+mesmerizer/M
+mesmerize/SRDZG
+Mesolithic/M
+mesomorph/M
+mesomorphs
+meson/MS
+Mesopotamia/M
+Mesopotamian/S
+mesosphere/MS
+mesozoic
+Mesozoic
+mesquite/MS
+mes/S
+message/SDMG
+messeigneurs
+messenger/GSMD
+Messerschmidt/M
+mess/GSDM
+Messiaen/M
+messiah
+Messiah/M
+messiahs
+Messiahs
+messianic
+Messianic
+messieurs/M
+messily
+messiness/MS
+messmate/MS
+Messrs/M
+messy/PRT
+mestizo/MS
+meta
+metabolic
+metabolically
+metabolism/MS
+metabolite/SM
+metabolize/GSD
+metacarpal/S
+metacarpi
+metacarpus/M
+metacircular
+metacircularity
+metalanguage/MS
+metalization/SM
+metalized
+metallic/S
+metalliferous
+metallings
+metallography/M
+metalloid/M
+metallurgic
+metallurgical/Y
+metallurgist/S
+metallurgy/MS
+metal/SGMD
+metalsmith/MS
+metalworking/M
+metalwork/RMJGSZ
+Meta/M
+metamathematical
+metamorphic
+metamorphism/SM
+metamorphose/GDS
+metamorphosis/M
+metaphoric
+metaphorical/Y
+metaphor/MS
+metaphosphate/M
+metaphysical/Y
+metaphysic/SM
+metastability/M
+metastable
+metastases
+metastasis/M
+metastasize/DSG
+metastatic
+metatarsal/S
+metatarsi
+metatarsus/M
+metatheses
+metathesis/M
+metathesized
+metathesizes
+metathesizing
+metavariable
+metempsychoses
+metempsychosis/M
+meteoric
+meteorically
+meteorite/SM
+meteoritic/S
+meteoritics/M
+meteoroid/SM
+meteorologic
+meteorological
+meteorologist/S
+meteorology/MS
+meteor/SM
+meter/GDM
+mete/ZDGSR
+methadone/SM
+methane/MS
+methanol/SM
+methinks
+methionine/M
+methodicalness/SM
+methodical/YP
+methodism
+Methodism/SM
+methodist/MS
+Methodist/MS
+method/MS
+methodological/Y
+methodologists
+methodology/MS
+methought
+Methuen/M
+Methuselah/M
+Methuselahs
+methylated
+methylene/M
+methyl/SM
+meticulousness/MS
+meticulous/YP
+métier/S
+metonymy/M
+Metrecal/M
+metrical/Y
+metricate/SDNGX
+metricize/GSD
+metrics/M
+metric/SM
+metronome/MS
+metropolis/SM
+metropolitanization
+metropolitan/S
+metro/SM
+mets
+Metternich/M
+mettle/SDM
+mettlesome
+met/U
+Metzler/M
+Meuse/M
+mewl/GSD
+mew/SGD
+mews/SM
+Mex
+Mexicali/M
+Mexican/S
+Mexico/M
+Meyerbeer/M
+Meyer/SM
+mezzanine/MS
+mezzo/S
+MFA
+mfg
+mfr/S
+mg
+M/GB
+Mg/M
+MGM/M
+mgr
+Mgr
+MHz
+MI
+MIA
+Mia/M
+Miami/SM
+Miaplacidus/M
+miasmal
+miasma/SM
+Micaela/M
+Micah/M
+mica/MS
+micelles
+mice/M
+Michaela/M
+Michaelangelo/M
+Michaelina/M
+Michaeline/M
+Michaella/M
+Michaelmas/MS
+Michael/SM
+Michaelson/M
+Michail/M
+Michale/M
+Michal/M
+Micheal/M
+Micheil/M
+Michelangelo/M
+Michele/M
+Michelina/M
+Micheline/M
+Michelin/M
+Michelle/M
+Michell/M
+Michel/M
+Michelson/M
+Michigander/S
+Michiganite/S
+Michigan/M
+Mich/M
+Mickelson/M
+Mickey/M
+mickey/SM
+Mickie/M
+Micki/M
+Mick/M
+Micky/M
+Mic/M
+Micmac/M
+micra's
+microamp
+microanalysis/M
+microanalytic
+microbe/MS
+microbial
+microbicidal
+microbicide/M
+microbiological
+microbiologist/MS
+microbiology/SM
+microbrewery/S
+microchemistry/M
+microchip/S
+microcircuit/MS
+microcode/GSD
+microcomputer/MS
+microcosmic
+microcosm/MS
+microdensitometer
+microdot/MS
+microeconomic/S
+microeconomics/M
+microelectronic/S
+microelectronics/M
+microfiber/S
+microfiche/M
+microfilm/DRMSG
+microfossils
+micrography/M
+microgroove/MS
+microhydrodynamics
+microinstruction/SM
+microjoule
+microlevel
+microlight/S
+micromanage/GDSL
+micromanagement/S
+micrometeorite/MS
+micrometeoritic
+micrometer/SM
+Micronesia/M
+Micronesian/S
+micron/MS
+microorganism/SM
+microphone/SGM
+Microport/M
+microprocessing
+microprocessor/SM
+microprogrammed
+microprogramming
+microprogram/SM
+micro/S
+microscope/SM
+microscopic
+microscopical/Y
+microscopy/MS
+microsecond/MS
+microsimulation/S
+Microsystems
+micros/M
+Microsoft/M
+microsomal
+microstore
+microsurgery/SM
+MicroVAXes
+MicroVAX/M
+microvolt/SM
+microwaveable
+microwave/BMGSD
+microword/S
+midair/MS
+midas
+Midas/M
+midband/M
+midday/MS
+midden/SM
+middest
+middlebrow/SM
+Middlebury/M
+middle/GJRSD
+middleman/M
+middlemen
+middlemost
+Middlesex/M
+Middleton/M
+Middletown/M
+middleweight/SM
+middling/Y
+middy/SM
+Mideastern
+Mideast/M
+midfield/RM
+Midge/M
+midge/SM
+midget/MS
+midi/S
+midland/MRS
+Midland/MS
+midlife
+midlives
+midmorn/G
+midmost/S
+midnight/SYM
+midpoint/MS
+midrange
+midrib/MS
+midriff/MS
+mid/S
+midscale
+midsection/M
+midshipman/M
+midshipmen
+midship/S
+midspan
+midstream/MS
+midst/SM
+midsummer/MS
+midterm/MS
+midtown/MS
+Midway/M
+midway/S
+midweek/SYM
+Midwesterner/M
+Midwestern/ZR
+Midwest/M
+midwicket
+midwifery/SM
+midwife/SDMG
+midwinter/YMS
+midwives
+midyear/MS
+mien/M
+miff/GDS
+mightily
+mightiness/MS
+mightn't
+might/S
+mighty/TPR
+mignon
+mignonette/SM
+Mignon/M
+Mignonne/M
+migraine/SM
+migrant/MS
+migrate/ASDG
+migration/MS
+migrative
+migratory/S
+MIG/S
+Miguela/M
+Miguelita/M
+Miguel/M
+mikado/MS
+Mikaela/M
+Mikael/M
+mike/DSMG
+Mikel/M
+Mike/M
+Mikey/M
+Mikhail/M
+Mikkel/M
+Mikol/M
+Mikoyan/M
+milady/MS
+Milagros/M
+Milanese
+Milan/M
+milch/M
+mildew/DMGS
+mildness/MS
+Mildred/M
+Mildrid/M
+mild/STYRNP
+mileage/SM
+Milena/M
+milepost/SM
+miler/M
+mile/SM
+Mile/SM
+milestone/MS
+Milford/M
+Milicent/M
+milieu/SM
+Milissent/M
+militancy/MS
+militantness/M
+militant/YPS
+militarily
+militarism/SM
+militaristic
+militarist/MS
+militarization/SCM
+militarize/SDCG
+military
+militate/SDG
+militiaman/M
+militiamen
+militia/SM
+Milka/M
+Milken/M
+milker/M
+milk/GZSRDM
+milkiness/MS
+milkmaid/SM
+milkman/M
+milkmen
+milkshake/S
+milksop/SM
+milkweed/MS
+milky/RPT
+millage/S
+Millard/M
+Millay/M
+millenarian
+millenarianism/M
+millennial
+millennialism
+millennium/MS
+millepede's
+miller/M
+Miller/M
+Millet/M
+millet/MS
+milliamp
+milliampere/S
+milliard/MS
+millibar/MS
+Millicent/M
+millidegree/S
+Millie/M
+milligram/MS
+millijoule/S
+Millikan/M
+milliliter/MS
+Milli/M
+millimeter/SM
+milliner/SM
+millinery/MS
+milling/M
+millionaire/MS
+million/HDMS
+millionth/M
+millionths
+millipede/SM
+millisecond/MS
+Millisent/M
+millivoltmeter/SM
+millivolt/SM
+milliwatt/S
+millpond/MS
+millrace/SM
+mill/SGZMRD
+Mill/SMR
+millstone/SM
+millstream/SM
+millwright/MS
+Milly/M
+mil/MRSZ
+Mil/MY
+Milne/M
+Milo/M
+Milquetoast/S
+milquetoast/SM
+Miltiades/M
+Miltie/M
+Milt/M
+milt/MDSG
+Miltonic
+Milton/M
+Miltown/M
+Milty/M
+Milwaukee/M
+Milzie/M
+MIMD
+mime/DSRMG
+mimeograph/GMDS
+mimeographs
+mimer/M
+mimesis/M
+mimetic
+mimetically
+mimicked
+mimicker/SM
+mimicking
+mimicry/MS
+mimic/S
+Mimi/M
+mi/MNX
+Mimosa/M
+mimosa/SM
+Mina/M
+minaret/MS
+minatory
+mincemeat/MS
+mincer/M
+mince/SRDGZJ
+mincing/Y
+Minda/M
+Mindanao/M
+mind/ARDSZG
+mindbogglingly
+minded/P
+minder/M
+mindfully
+mindfulness/MS
+mindful/U
+mindlessness/SM
+mindless/YP
+Mindoro/M
+min/DRZGJ
+mind's
+mindset/S
+Mindy/M
+minefield/MS
+mineralization/C
+mineralized/U
+mineralogical
+mineralogist/SM
+mineralogy/MS
+mineral/SM
+miner/M
+Miner/M
+Minerva/M
+mineshaft
+mine/SNX
+minestrone/MS
+minesweeper/MS
+Minetta/M
+Minette/M
+mineworkers
+mingle/SDG
+Ming/M
+Mingus/M
+miniature/GMSD
+miniaturist/SM
+miniaturization/MS
+miniaturize/SDG
+minibike/S
+minibus/SM
+minicab/M
+minicam/MS
+minicomputer/SM
+minidress/SM
+minify/GSD
+minimalism/S
+minimalistic
+minimalist/MS
+minimality
+minimal/SY
+minima's
+minimax/M
+minimization/MS
+minimized/U
+minimizer/M
+minimize/RSDZG
+minim/SM
+minimum/MS
+mining/M
+minion/M
+mini/S
+miniseries
+miniskirt/MS
+ministerial/Y
+minister/MDGS
+ministrant/S
+ministration/SM
+ministry/MS
+minivan/S
+miniver/M
+minke
+mink/SM
+Min/MR
+Minna/M
+Minnaminnie/M
+Minneapolis/M
+Minne/M
+minnesinger/MS
+Minnesota/M
+Minnesotan/S
+Minnie/M
+Minni/M
+Minn/M
+Minnnie/M
+minnow/SM
+Minny/M
+Minoan/S
+Minolta/M
+minor/DMSG
+minority/MS
+Minor/M
+Minos
+Minotaur/M
+minotaur/S
+Minot/M
+minoxidil/S
+Minsk/M
+Minsky/M
+minster/SM
+minstrel/SM
+minstrelsy/MS
+mintage/SM
+Mintaka/M
+Minta/M
+minter/M
+mint/GZSMRD
+minty/RT
+minuend/SM
+minuet/SM
+Minuit/M
+minuscule/SM
+minus/S
+minuteman
+Minuteman/M
+minutemen
+minuteness/SM
+minute/RSDPMTYG
+minutiae
+minutia/M
+minx/MS
+Miocene
+MIPS
+Miquela/M
+Mirabeau/M
+Mirabella/M
+Mirabelle/M
+Mirabel/M
+Mirach/M
+miracle/MS
+miraculousness/M
+miraculous/PY
+mirage/GSDM
+Mira/M
+Miranda/M
+Miran/M
+Mireielle/M
+Mireille/M
+Mirella/M
+Mirelle/M
+mire/MGDS
+Mirfak/M
+Miriam/M
+Mirilla/M
+Mir/M
+Mirna/M
+Miro
+mirror/DMGS
+mirthfulness/SM
+mirthful/PY
+mirthlessness/M
+mirthless/YP
+mirth/M
+mirths
+MIRV/DSG
+miry/RT
+Mirzam/M
+misaddress/SDG
+misadventure/SM
+misalign/DSGL
+misalignment/MS
+misalliance/MS
+misanalysed
+misanthrope/MS
+misanthropic
+misanthropically
+misanthropist/S
+misanthropy/SM
+misapplier/M
+misapply/GNXRSD
+misapprehend/GDS
+misapprehension/MS
+misappropriate/GNXSD
+misbegotten
+misbehaver/M
+misbehave/RSDG
+misbehavior/SM
+misbrand/DSG
+misc
+miscalculate/XGNSD
+miscalculation/M
+miscall/SDG
+miscarriage/MS
+miscarry/SDG
+miscast/GS
+miscegenation/SM
+miscellanea
+miscellaneous/PY
+miscellany/MS
+Mischa/M
+mischance/MGSD
+mischief/MDGS
+mischievousness/MS
+mischievous/PY
+miscibility/S
+miscible/C
+misclassification/M
+misclassified
+misclassifying
+miscode/SDG
+miscommunicate/NDS
+miscomprehended
+misconceive/GDS
+misconception/MS
+misconduct/GSMD
+misconfiguration
+misconstruction/MS
+misconstrue/DSG
+miscopying
+miscount/DGS
+miscreant/MS
+miscue/MGSD
+misdeal/SG
+misdealt
+misdeed/MS
+misdemeanant/SM
+misdemeanor/SM
+misdiagnose/GSD
+misdid
+misdirect/GSD
+misdirection/MS
+misdirector/S
+misdoes
+misdo/JG
+misdone
+miserableness/SM
+miserable/SP
+miserably
+miser/KM
+miserliness/MS
+miserly/P
+misery/MS
+mises/KC
+misfeasance/MS
+misfeature/M
+misfield
+misfile/SDG
+misfire/SDG
+misfit/MS
+misfitted
+misfitting
+misfortune/SM
+misgauge/GDS
+misgiving/MYS
+misgovern/LDGS
+misgovernment/S
+misguidance/SM
+misguidedness/M
+misguided/PY
+misguide/DRSG
+misguider/M
+Misha/M
+mishandle/SDG
+mishap/MS
+mishapped
+mishapping
+misheard
+mishear/GS
+mishitting
+mishmash/SM
+misidentification/M
+misidentify/GNSD
+misinformation/SM
+misinform/GDS
+misinterpretation/MS
+misinterpreter/M
+misinterpret/RDSZG
+misjudge/DSG
+misjudging/Y
+misjudgment/MS
+Miskito
+mislabel/DSG
+mislaid
+mislay/GS
+misleader/M
+mislead/GRJS
+misleading/Y
+misled
+mismanage/LGSD
+mismanagement/MS
+mismatch/GSD
+misname/GSD
+misnomer/GSMD
+misogamist/MS
+misogamy/MS
+misogynistic
+misogynist/MS
+misogynous
+misogyny/MS
+misperceive/SD
+misplace/GLDS
+misplacement/MS
+misplay/GSD
+mispositioned
+misprint/SGDM
+misprision/SM
+mispronounce/DSG
+mispronunciation/MS
+misquotation/MS
+misquote/GDS
+misreader/M
+misread/RSGJ
+misrelated
+misremember/DG
+misreport/DGS
+misrepresentation/MS
+misrepresenter/M
+misrepresent/SDRG
+misroute/DS
+misrule/SDG
+missal/ESM
+misshape/DSG
+misshapenness/SM
+misshapen/PY
+Missie/M
+missile/MS
+missilery/SM
+mission/AMS
+missionary/MS
+missioned
+missioner/SM
+missioning
+missis's
+Mississauga/M
+Mississippian/S
+Mississippi/M
+missive/MS
+Missoula/M
+Missourian/S
+Missouri/M
+misspeak/SG
+misspecification
+misspecified
+misspelling/M
+misspell/SGJD
+misspend/GS
+misspent
+misspoke
+misspoken
+mis/SRZ
+miss/SDEGV
+Miss/SM
+misstate/GLDRS
+misstatement/MS
+misstater/M
+misstep/MS
+misstepped
+misstepping
+missus/SM
+Missy/M
+mistakable/U
+mistake/BMGSR
+mistaken/Y
+mistaker/M
+mistaking/Y
+Mistassini/M
+mister/GDM
+Mister/SM
+mistily
+Misti/M
+mistime/GSD
+mistiness/S
+mistletoe/MS
+mist/MRDGZS
+mistook
+mistral/MS
+mistranslated
+mistranslates
+mistranslating
+mistranslation/SM
+mistreat/DGSL
+mistreatment/SM
+Mistress/MS
+mistress/MSY
+mistrial/SM
+mistruster/M
+mistrustful/Y
+mistrust/SRDG
+Misty/M
+mistype/SDGJ
+misty/PRT
+misunderstander/M
+misunderstanding/M
+misunderstand/JSRZG
+misunderstood
+misuser/M
+misuse/RSDMG
+miswritten
+Mitchael/M
+Mitchell/M
+Mitchel/M
+Mitch/M
+miterer/M
+miter/GRDM
+mite/SRMZ
+Mitford/M
+Mithra/M
+Mithridates/M
+mitigated/U
+mitigate/XNGVDS
+mitigation/M
+MIT/M
+mitoses
+mitosis/M
+mitotic
+MITRE/SM
+Mitsubishi/M
+mitten/M
+Mitterrand/M
+mitt/XSMN
+Mitty/M
+Mitzi/M
+mitzvahs
+mixable
+mix/AGSD
+mixed/U
+mixer/SM
+mixture/SM
+Mizar/M
+mizzenmast/SM
+mizzen/MS
+Mk
+mks
+ml
+Mlle/M
+mm
+MM
+MMe
+Mme/SM
+MN
+mnemonically
+mnemonics/M
+mnemonic/SM
+Mnemosyne/M
+Mn/M
+MO
+moan/GSZRDM
+moat/SMDG
+mobbed
+mobber
+mobbing
+mobcap/SM
+Mobile/M
+mobile/S
+mobility/MS
+mobilizable
+mobilization/AMCS
+mobilize/CGDS
+mobilized/U
+mobilizer/MS
+mobilizes/A
+Mobil/M
+mob/MS
+mobster/MS
+Mobutu/M
+moccasin/SM
+mocha/SM
+mockers/M
+mockery/MS
+mock/GZSRD
+mockingbird/MS
+mocking/Y
+mo/CSK
+modality/MS
+modal/Y
+modeled/A
+modeler/M
+modeling/M
+models/A
+model/ZGSJMRD
+mode/MS
+modem/SM
+moderated/U
+moderateness/SM
+moderate/PNGDSXY
+moderation/M
+moderator/MS
+modernism/MS
+modernistic
+modernist/S
+modernity/SM
+modernization/MS
+modernized/U
+modernizer/M
+modernize/SRDGZ
+modernizes/U
+modernness/SM
+modern/PTRYS
+Modesta/M
+Modestia/M
+Modestine/M
+Modesto/M
+modest/TRY
+Modesty/M
+modesty/MS
+modicum/SM
+modifiability/M
+modifiableness/M
+modifiable/U
+modification/M
+modified/U
+modifier/M
+modify/NGZXRSD
+Modigliani/M
+modishness/MS
+modish/YP
+mod/TSR
+Modula/M
+modularity/SM
+modularization
+modularize/SDG
+modular/SY
+modulate/ADSNCG
+modulation/CMS
+modulator/ACSM
+module/SM
+moduli
+modulo
+modulus/M
+modus
+Moe/M
+Moen/M
+Mogadiscio's
+Mogadishu
+mogul/MS
+Mogul/MS
+mohair/SM
+Mohamed/M
+Mohammad/M
+Mohammedanism/MS
+Mohammedan/SM
+Mohammed's
+Mohandas/M
+Mohandis/M
+Mohawk/MS
+Mohegan/S
+Mohican's
+Moho/M
+Mohorovicic/M
+Mohr/M
+moiety/MS
+moil/SGD
+Moina/M
+Moines/M
+Moira/M
+moire/MS
+Moise/MS
+Moiseyev/M
+Moishe/M
+moistener/M
+moisten/ZGRD
+moistness/MS
+moist/TXPRNY
+moisture/MS
+moisturize/GZDRS
+Mojave/M
+molal
+molarity/SM
+molar/MS
+molasses/MS
+Moldavia/M
+Moldavian/S
+moldboard/SM
+molder/DG
+moldiness/SM
+molding/M
+mold/MRDJSGZ
+Moldova
+moldy/PTR
+molecularity/SM
+molecular/Y
+molecule/MS
+molehill/SM
+mole/MTS
+moleskin/MS
+molestation/SM
+molested/U
+molester/M
+molest/RDZGS
+Moliere
+Molina/M
+Moline/M
+Mollee/M
+Mollie/M
+mollification/M
+mollify/XSDGN
+Molli/M
+Moll/M
+moll/MS
+mollusc's
+mollusk/S
+mollycoddler/M
+mollycoddle/SRDG
+Molly/M
+molly/SM
+Molnar/M
+Moloch/M
+Molokai/M
+Molotov/M
+molter/M
+molt/RDNGZS
+Moluccas
+molybdenite/M
+molybdenum/MS
+Mombasa/M
+momenta
+momentarily
+momentariness/SM
+momentary/P
+moment/MYS
+momentousness/MS
+momentous/YP
+momentum/SM
+momma/S
+Mommy/M
+mommy/SM
+Mo/MN
+mom/SM
+Monaco/M
+monadic
+monad/SM
+Monah/M
+Mona/M
+monarchic
+monarchical
+monarchism/MS
+monarchistic
+monarchist/MS
+monarch/M
+monarchs
+monarchy/MS
+Monash/M
+monastery/MS
+monastical/Y
+monasticism/MS
+monastic/S
+monaural/Y
+Mondale/M
+Monday/MS
+Mondrian/M
+Monegasque/SM
+Monera/M
+monetarily
+monetarism/S
+monetarist/MS
+monetary
+monetization/CMA
+monetize/CGADS
+Monet/M
+moneybag/SM
+moneychangers
+moneyer/M
+moneylender/SM
+moneymaker/MS
+moneymaking/MS
+money/SMRD
+Monfort/M
+monger/SGDM
+Mongolia/M
+Mongolian/S
+Mongolic/M
+mongolism/SM
+mongoloid/S
+Mongoloid/S
+Mongol/SM
+mongoose/SM
+mongrel/SM
+Monica/M
+monies/M
+Monika/M
+moniker/MS
+Monique/M
+monism/MS
+monist/SM
+monition/SM
+monitored/U
+monitor/GSMD
+monitory/S
+monkeyshine/S
+monkey/SMDG
+monkish
+Monk/M
+monk/MS
+monkshood/SM
+Monmouth/M
+monochromatic
+monochromator
+monochrome/MS
+monocle/SDM
+monoclinic
+monoclonal/S
+monocotyledonous
+monocotyledon/SM
+monocular/SY
+monodic
+monodist/S
+monody/MS
+monogamist/MS
+monogamous/PY
+monogamy/MS
+monogrammed
+monogramming
+monogram/MS
+monograph/GMDS
+monographs
+monolingualism
+monolingual/S
+monolithic
+monolithically
+monolith/M
+monoliths
+monologist/S
+monologue/GMSD
+monomaniacal
+monomaniac/MS
+monomania/MS
+monomeric
+monomer/SM
+monomial/SM
+mono/MS
+Monongahela/M
+mononuclear
+mononucleoses
+mononucleosis/M
+monophonic
+monoplane/MS
+monopole/S
+monopolistic
+monopolist/MS
+monopolization/MS
+monopolized/U
+monopolize/GZDSR
+monopolizes/U
+monopoly/MS
+monorail/SM
+monostable
+monosyllabic
+monosyllable/MS
+monotheism/SM
+monotheistic
+monotheist/S
+monotone/SDMG
+monotonic
+monotonically
+monotonicity
+monotonousness/MS
+monotonous/YP
+monotony/MS
+monovalent
+monoxide/SM
+Monroe/M
+Monro/M
+Monrovia/M
+Monsanto/M
+monseigneur
+monsieur/M
+Monsignori
+Monsignor/MS
+monsignor/S
+Mon/SM
+monsoonal
+monsoon/MS
+monster/SM
+monstrance/ASM
+monstrosity/SM
+monstrousness/M
+monstrous/YP
+montage/SDMG
+Montague/M
+Montaigne/M
+Montana/M
+Montanan/MS
+Montcalm/M
+Montclair/M
+Monte/M
+Montenegrin
+Montenegro/M
+Monterey/M
+Monterrey/M
+Montesquieu/M
+Montessori/M
+Monteverdi/M
+Montevideo/M
+Montezuma
+Montgomery/M
+monthly/S
+month/MY
+months
+Monticello/M
+Monti/M
+Mont/M
+Montmartre/M
+Montoya/M
+Montpelier/M
+Montrachet/M
+Montreal/M
+Montserrat/M
+Monty/M
+monumentality/M
+monumental/Y
+monument/DMSG
+mooch/ZSRDG
+moodily
+moodiness/MS
+mood/MS
+Moody/M
+moody/PTR
+Moog
+moo/GSD
+moonbeam/SM
+Mooney/M
+moon/GDMS
+moonless
+moonlight/GZDRMS
+moonlighting/M
+moonlit
+Moon/M
+moonscape/MS
+moonshiner/M
+moonshine/SRZM
+moonshot/MS
+moonstone/SM
+moonstruck
+moonwalk/SDG
+Moore/M
+moor/GDMJS
+mooring/M
+Moorish
+moorland/MS
+Moor/MS
+moose/M
+moot/RDGS
+moped/MS
+moper/M
+mope/S
+mopey
+mopier
+mopiest
+mopish
+mopped
+moppet/MS
+mopping
+mop/SZGMDR
+moraine/MS
+morale/MS
+Morales/M
+moralistic
+moralistically
+moralist/MS
+morality/UMS
+moralization/CS
+moralize/CGDRSZ
+moralled
+moraller
+moralling
+moral/SMY
+Mora/M
+Moran/M
+morass/SM
+moratorium/SM
+Moravia/M
+Moravian
+moray/SM
+morbidity/SM
+morbidness/S
+morbid/YP
+mordancy/MS
+mordant/GDYS
+Mordecai/M
+Mord/M
+Mordred/M
+Mordy/M
+more/DSN
+Moreen/M
+Morehouse/M
+Moreland/M
+morel/SM
+More/M
+Morena/M
+Moreno/M
+moreover
+Morey/M
+Morgana/M
+Morganica/M
+Morgan/MS
+Morganne/M
+morgen/M
+Morgen/M
+morgue/SM
+Morgun/M
+Moria/M
+Moriarty/M
+moribundity/M
+moribund/Y
+Morie/M
+Morin/M
+morion/M
+Morison/M
+Morissa/M
+Morita/M
+Moritz/M
+Morlee/M
+Morley/M
+Morly/M
+Mormonism/MS
+Mormon/SM
+Morna/M
+morning/MY
+morn/SGJDM
+Moroccan/S
+Morocco/M
+morocco/SM
+Moro/M
+moronic
+moronically
+Moroni/M
+moron/SM
+moroseness/MS
+morose/YP
+morpheme/DSMG
+morphemic/S
+Morpheus/M
+morph/GDJ
+morphia/S
+morphine/MS
+morphism/MS
+morphologic
+morphological/Y
+morphology/MS
+morphophonemic/S
+morphophonemics/M
+morphs
+Morrie/M
+morris
+Morris/M
+Morrison/M
+Morristown/M
+Morrow/M
+morrow/MS
+Morry/M
+morsel/GMDS
+Morse/M
+mortality/SM
+mortal/SY
+mortarboard/SM
+mortar/GSDM
+Morten/M
+mortgageable
+mortgagee/SM
+mortgage/MGDS
+mortgagor/SM
+mortice's
+mortician/SM
+Mortie/M
+mortification/M
+mortified/Y
+mortifier/M
+mortify/DRSXGN
+Mortimer/M
+mortise/MGSD
+Mort/MN
+Morton/M
+mortuary/MS
+Morty/M
+Mosaic
+mosaicked
+mosaicking
+mosaic/MS
+Moscone/M
+Moscow/M
+Moseley/M
+Moselle/M
+Mose/MSR
+Moser/M
+mosey/SGD
+Moshe/M
+Moslem's
+Mosley/M
+mosque/SM
+mosquitoes
+mosquito/M
+mos/S
+mossback/MS
+Mossberg/M
+Moss/M
+moss/SDMG
+mossy/SRT
+most/SY
+Mosul/M
+mote/ASCNK
+motel/MS
+mote's
+motet/SM
+mothball/DMGS
+motherboard/MS
+motherfucker/MS!
+motherfucking/!
+motherhood/SM
+mothering/M
+motherland/SM
+motherless
+motherliness/MS
+motherly/P
+mother/RDYMZG
+moths
+moth/ZMR
+motif/MS
+motile/S
+motility/MS
+motional/K
+motioner/M
+motion/GRDMS
+motionlessness/S
+motionless/YP
+motion's/ACK
+motions/K
+motivated/U
+motivate/XDSNGV
+motivational/Y
+motivation/M
+motivator/S
+motiveless
+motive/MGSD
+motley/S
+motlier
+motliest
+mot/MSV
+motocross/SM
+motorbike/SDGM
+motorboat/MS
+motorcade/MSDG
+motorcar/MS
+motorcycle/GMDS
+motorcyclist/SM
+motor/DMSG
+motoring/M
+motorist/SM
+motorization/SM
+motorize/DSG
+motorized/U
+motorman/M
+motormen
+motormouth
+motormouths
+Motorola/M
+motorway/SM
+Motown/M
+mottle/GSRD
+mottler/M
+Mott/M
+mottoes
+motto/M
+moue/DSMG
+moulder/DSG
+moult/GSD
+mound/GMDS
+mountable
+mountaineering/M
+mountaineer/JMDSG
+mountainousness/M
+mountainous/PY
+mountainside/MS
+mountain/SM
+mountaintop/SM
+Mountbatten/M
+mountebank/SGMD
+mounted/U
+mount/EGACD
+mounter/SM
+mounties
+Mountie/SM
+mounting/MS
+Mount/M
+mounts/AE
+mourner/M
+mournfuller
+mournfullest
+mournfulness/S
+mournful/YP
+mourning/M
+mourn/ZGSJRD
+mouser/M
+mouse/SRDGMZ
+mousetrapped
+mousetrapping
+mousetrap/SM
+mousiness/MS
+mousing/M
+mousse/MGSD
+Moussorgsky/M
+mousy/PRT
+Mouthe/M
+mouthful/MS
+mouthiness/SM
+mouth/MSRDG
+mouthorgan
+mouthpiece/SM
+mouths
+mouthwash/SM
+mouthwatering
+mouthy/PTR
+Mouton/M
+mouton/SM
+movable/ASP
+movableness/AM
+move/ARSDGZB
+moved/U
+movement/SM
+mover/AM
+moviegoer/S
+movie/SM
+moving/YS
+mower/M
+Mowgli/M
+mowing/M
+mow/SDRZG
+moxie/MS
+Moyer/M
+Moyna/M
+Moyra/M
+Mozambican/S
+Mozambique/M
+Mozart/M
+Mozelle/M
+Mozes/M
+Mozilla/M
+mozzarella/MS
+mp
+MP
+mpg
+mph
+MPH
+MRI
+Mr/M
+Mrs
+ms
+M's
+MS
+MSG
+Msgr/M
+m's/K
+Ms/S
+MST
+MSW
+mt
+MT
+mtg
+mtge
+Mt/M
+MTS
+MTV
+Muawiya/M
+Mubarak/M
+muchness/M
+much/SP
+mucilage/MS
+mucilaginous
+mucker/M
+muck/GRDMS
+muckraker/M
+muckrake/ZMDRSG
+mucky/RT
+mucosa/M
+mucous
+mucus/SM
+mudded
+muddily
+muddiness/SM
+mudding
+muddle/GRSDZ
+muddleheaded/P
+muddlehead/SMD
+muddler/M
+muddy/TPGRSD
+mudflat/S
+mudguard/SM
+mudlarks
+mud/MS
+mudroom/S
+mudslide/S
+mudslinger/M
+mudslinging/M
+mudsling/JRGZ
+Mueller/M
+Muenster
+muenster/MS
+muesli/M
+muezzin/MS
+muff/GDMS
+Muffin/M
+muffin/SM
+muffler/M
+muffle/ZRSDG
+Mufi/M
+Mufinella/M
+mufti/MS
+Mugabe/M
+mugged
+mugger/SM
+mugginess/S
+mugging/S
+muggy/RPT
+mugshot/S
+mug/SM
+mugwump/MS
+Muhammadanism/S
+Muhammadan/SM
+Muhammad/M
+Muire/M
+Muir/M
+Mukden/M
+mukluk/SM
+mulattoes
+mulatto/M
+mulberry/MS
+mulch/GMSD
+mulct/SDG
+Mulder/M
+mule/MGDS
+muleskinner/S
+muleteer/MS
+mulishness/MS
+mulish/YP
+mullah/M
+mullahs
+mullein/MS
+Mullen/M
+muller/M
+Muller/M
+mullet/MS
+Mulligan/M
+mulligan/SM
+mulligatawny/SM
+Mullikan/M
+Mullins
+mullion/MDSG
+mull/RDSG
+Multan/M
+multi
+Multibus/M
+multicellular
+multichannel/M
+multicollinearity/M
+multicolor/SDM
+multicolumn
+multicomponent
+multicomputer/MS
+Multics/M
+MULTICS/M
+multicultural
+multiculturalism/S
+multidimensional
+multidimensionality
+multidisciplinary
+multifaceted
+multifamily
+multifariousness/SM
+multifarious/YP
+multifigure
+multiform
+multifunction/D
+multilateral/Y
+multilayer
+multilevel/D
+multilingual
+multilingualism/S
+multimedia/S
+multimegaton/M
+multimeter/M
+multimillionaire/SM
+multinational/S
+multinomial/M
+multiphase
+multiple/SM
+multiplet/SM
+multiplex/GZMSRD
+multiplexor's
+multipliable
+multiplicand/SM
+multiplication/M
+multiplicative/YS
+multiplicity/MS
+multiplier/M
+multiply/ZNSRDXG
+multiprocess/G
+multiprocessor/MS
+multiprogram
+multiprogrammed
+multiprogramming/MS
+multipurpose
+multiracial
+multistage
+multistory/S
+multisyllabic
+multitasking/S
+multitude/MS
+multitudinousness/M
+multitudinous/YP
+multiuser
+multivalent
+multivalued
+multivariate
+multiversity/M
+multivitamin/S
+mu/M
+mumbler/M
+mumbletypeg/S
+mumble/ZJGRSD
+Mumford/M
+mummed
+mummer/SM
+mummery/MS
+mummification/M
+mummify/XSDGN
+mumming
+mum/MS
+mummy/GSDM
+mumps/M
+muncher/M
+Münchhausen/M
+munchies
+Munch/M
+munch/ZRSDG
+Muncie/M
+mundane/YSP
+Mundt/M
+munge/JGZSRD
+Munich/M
+municipality/SM
+municipal/YS
+munificence/MS
+munificent/Y
+munition/SDG
+Munmro/M
+Munoz/M
+Munroe/M
+Munro/M
+mun/S
+Munsey/M
+Munson/M
+Munster/MS
+Muong/M
+muon/M
+Muppet/M
+muralist/SM
+mural/SM
+Murasaki/M
+Murat/M
+Murchison/M
+Murcia/M
+murderer/M
+murderess/S
+murder/GZRDMS
+murderousness/M
+murderous/YP
+Murdoch/M
+Murdock/M
+Mureil/M
+Murial/M
+muriatic
+Murielle/M
+Muriel/M
+Murillo/M
+murkily
+murkiness/S
+murk/TRMS
+murky/RPT
+Murmansk/M
+murmurer/M
+murmuring/U
+murmurous
+murmur/RDMGZSJ
+Murphy/M
+murrain/SM
+Murray/M
+Murrow/M
+Murrumbidgee/M
+Murry/M
+Murvyn/M
+muscatel/MS
+Muscat/M
+muscat/SM
+musclebound
+muscle/SDMG
+Muscovite/M
+muscovite/MS
+Muscovy/M
+muscularity/SM
+muscular/Y
+musculature/SM
+muse
+Muse/M
+muser/M
+musette/SM
+museum/MS
+mus/GJDSR
+musher/M
+mushiness/MS
+mush/MSRDG
+mushroom/DMSG
+mushy/PTR
+Musial/M
+musicale/SM
+musicality/SM
+musicals
+musical/YU
+musician/MYS
+musicianship/MS
+musicked
+musicking
+musicological
+musicologist/MS
+musicology/MS
+music/SM
+musing/Y
+Muskegon/M
+muskeg/SM
+muskellunge/SM
+musketeer/MS
+musketry/MS
+musket/SM
+musk/GDMS
+muskie/M
+muskiness/MS
+muskmelon/MS
+muskox/N
+muskrat/MS
+musky/RSPT
+Muslim/MS
+muslin/MS
+mussel/MS
+Mussolini/MS
+Mussorgsky/M
+muss/SDG
+mussy/RT
+mustache/DSM
+mustachio/MDS
+mustang/MS
+mustard/MS
+muster/GD
+mustily
+mustiness/MS
+mustn't
+must/RDGZS
+must've
+musty/RPT
+mutability/SM
+mutableness/M
+mutable/P
+mutably
+mutagen/SM
+mutant/MS
+mutate/XVNGSD
+mutational/Y
+mutation/M
+mutator/S
+muted/Y
+muteness/S
+mute/PDSRBYTG
+mutilate/XDSNG
+mutilation/M
+mutilator/MS
+mutineer/SMDG
+mutinous/Y
+mutiny/MGSD
+Mutsuhito/M
+mutterer/M
+mutter/GZRDJ
+muttonchops
+mutton/SM
+mutt/ZSMR
+mutuality/S
+mutual/SY
+muumuu/MS
+muzak
+Muzak/SM
+Muzo/M
+muzzled/U
+muzzle/MGRSD
+muzzler/M
+MVP
+MW
+Myanmar
+Mycah/M
+Myca/M
+Mycenaean
+Mycenae/M
+Mychal/M
+mycologist/MS
+mycology/MS
+myelitides
+myelitis/M
+Myer/MS
+myers
+mylar
+Mylar/S
+Myles/M
+Mylo/M
+My/M
+myna/SM
+Mynheer/M
+myocardial
+myocardium/M
+myopia/MS
+myopically
+myopic/S
+Myrah/M
+Myra/M
+Myranda/M
+Myrdal/M
+myriad/S
+Myriam/M
+Myrilla/M
+Myrle/M
+Myrlene/M
+myrmidon/S
+Myrna/M
+Myron/M
+myrrh/M
+myrrhs
+Myrta/M
+Myrtia/M
+Myrtice/M
+Myrtie/M
+Myrtle/M
+myrtle/SM
+Myrvyn/M
+Myrwyn/M
+mys
+my/S
+myself
+Mysore/M
+mysteriousness/MS
+mysterious/YP
+mystery/MDSG
+mystical/Y
+mysticism/MS
+mystic/SM
+mystification/M
+mystifier/M
+mystify/CSDGNX
+mystifying/Y
+mystique/MS
+Myst/M
+mythic
+mythical/Y
+myth/MS
+mythographer/SM
+mythography/M
+mythological/Y
+mythologist/MS
+mythologize/CSDG
+mythology/SM
+myths
+N
+NAACP
+nabbed
+nabbing
+Nabisco/M
+nabob/SM
+Nabokov/M
+nab/S
+nacelle/SM
+nacho/S
+NaCl/M
+nacre/MS
+nacreous
+Nada/M
+Nadean/M
+Nadeen/M
+Nader/M
+Nadia/M
+Nadine/M
+nadir/SM
+Nadiya/M
+Nadya/M
+Nady/M
+nae/VM
+Nagasaki/M
+nagged
+nagger/S
+nagging/Y
+nag/MS
+Nagoya/M
+Nagpur/M
+Nagy/M
+Nahuatl/SM
+Nahum/M
+naiad/SM
+naifs
+nailbrush/SM
+nailer/M
+nail/SGMRD
+Naipaul/M
+Nair/M
+Nairobi/M
+Naismith/M
+naive/SRTYP
+naiveté/SM
+naivety/MS
+Nakamura/M
+Nakayama/M
+nakedness/MS
+naked/TYRP
+Nakoma/M
+Nalani/M
+Na/M
+Namath/M
+nameable/U
+name/ADSG
+namedrop
+namedropping
+named's
+named/U
+nameless/PY
+namely
+nameplate/MS
+namer/SM
+name's
+namesake/SM
+Namibia/M
+Namibian/S
+naming/M
+Nam/M
+Nanak/M
+Nana/M
+Nananne/M
+Nancee/M
+Nance/M
+Nancey/M
+Nanchang/M
+Nancie/M
+Nanci/M
+Nancy/M
+Nanete/M
+Nanette/M
+Nanice/M
+Nani/M
+Nanine/M
+Nanjing
+Nanking's
+Nan/M
+Nannette/M
+Nannie/M
+Nanni/M
+Nanny/M
+nanny/SDMG
+nanometer/MS
+Nanon/M
+Nanook/M
+nanosecond/SM
+Nansen/M
+Nantes/M
+Nantucket/M
+Naoma/M
+Naomi/M
+napalm/MDGS
+nape/SM
+Naphtali/M
+naphthalene/MS
+naphtha/SM
+Napier/M
+napkin/SM
+Naples/M
+napless
+Nap/M
+Napoleonic
+napoleon/MS
+Napoleon/MS
+napped
+napper/MS
+Nappie/M
+napping
+Nappy/M
+nappy/TRSM
+nap/SM
+Nara/M
+Narbonne/M
+narc/DGS
+narcissism/MS
+narcissistic
+narcissist/MS
+narcissus/M
+Narcissus/M
+narcoleptic
+narcoses
+narcosis/M
+narcotic/SM
+narcotization/S
+narcotize/GSD
+Nariko/M
+Nari/M
+nark's
+Narmada/M
+Narragansett/M
+narrate/VGNSDX
+narration/M
+narrative/MYS
+narratology
+narrator/SM
+narrowing/P
+narrowness/SM
+narrow/RDYTGPS
+narwhal/MS
+nary
+nasality/MS
+nasalization/MS
+nasalize/GDS
+nasal/YS
+NASA/MS
+nascence/ASM
+nascent/A
+NASDAQ
+Nash/M
+Nashua/M
+Nashville/M
+Nassau/M
+Nasser/M
+nastily
+nastiness/MS
+nasturtium/SM
+nasty/TRSP
+natal
+Natala/M
+Natalee/M
+Natale/M
+Natalia/M
+Natalie/M
+Natalina/M
+Nataline/M
+natalist
+natality/M
+Natal/M
+Natalya/M
+Nata/M
+Nataniel/M
+Natasha/M
+Natassia/M
+Natchez
+natch/S
+Nate/XMN
+Nathalia/M
+Nathalie/M
+Nathanael/M
+Nathanial/M
+Nathaniel/M
+Nathanil/M
+Nathan/MS
+nationalism/SM
+nationalistic
+nationalistically
+nationalist/MS
+nationality/MS
+nationalization/MS
+nationalize/CSDG
+nationalized/AU
+nationalizer/SM
+national/YS
+nationhood/SM
+nation/MS
+nationwide
+nativeness/M
+native/PYS
+Natividad/M
+Nativity/M
+nativity/MS
+Natka/M
+natl
+Nat/M
+NATO/SM
+natter/SGD
+nattily
+nattiness/SM
+Natty/M
+natty/TRP
+naturalism/MS
+naturalistic
+naturalist/MS
+naturalization/SM
+naturalized/U
+naturalize/GSD
+naturalness/US
+natural/PUY
+naturals
+nature/ASDCG
+nature's
+naturist
+Naugahyde/S
+naughtily
+naughtiness/SM
+naught/MS
+naughty/TPRS
+Naur/M
+Nauru/M
+nausea/SM
+nauseate/DSG
+nauseating/Y
+nauseousness/SM
+nauseous/P
+nautical/Y
+nautilus/MS
+Navaho's
+Navajoes
+Navajo/S
+naval/Y
+Navarro/M
+navel/MS
+nave/SM
+navigability/SM
+navigableness/M
+navigable/P
+navigate/DSXNG
+navigational
+navigation/M
+navigator/MS
+Navona/M
+Navratilova/M
+navvy/M
+Navy/S
+navy/SM
+nay/MS
+naysayer/S
+Nazarene/MS
+Nazareth/M
+Nazi/SM
+Nazism/S
+NB
+NBA
+NBC
+Nb/M
+NBS
+NC
+NCAA
+NCC
+NCO
+NCR
+ND
+N'Djamena
+Ndjamena/M
+Nd/M
+Ne
+NE
+Neala/M
+Neale/M
+Neall/M
+Neal/M
+Nealon/M
+Nealson/M
+Nealy/M
+Neanderthal/S
+neap/DGS
+Neapolitan/SM
+nearby
+nearly/RT
+nearness/MS
+nearside/M
+nearsightedness/S
+nearsighted/YP
+near/TYRDPSG
+neaten/DG
+neath
+neatness/MS
+neat/YRNTXPS
+Neb/M
+Nebraska/M
+Nebraskan/MS
+Nebr/M
+Nebuchadnezzar/MS
+nebulae
+nebula/M
+nebular
+nebulousness/SM
+nebulous/PY
+necessaries
+necessarily/U
+necessary/U
+necessitate/DSNGX
+necessitation/M
+necessitous
+necessity/SM
+neckband/M
+neckerchief/MS
+neck/GRDMJS
+necking/M
+necklace/DSMG
+neckline/MS
+necktie/MS
+necrology/SM
+necromancer/MS
+necromancy/MS
+necromantic
+necrophiliac/S
+necrophilia/M
+necropolis/SM
+necropsy/M
+necroses
+necrosis/M
+necrotic
+nectarine/SM
+nectarous
+nectar/SM
+nectary/MS
+Neda/M
+Nedda/M
+Neddie/M
+Neddy/M
+Nedi/M
+Ned/M
+née
+needed/U
+needer/M
+needful/YSP
+Needham/M
+neediness/MS
+needlecraft/M
+needle/GMZRSD
+needlepoint/SM
+needlessness/S
+needless/YP
+needlewoman/M
+needlewomen
+needlework/RMS
+needn't
+need/YRDGS
+needy/TPR
+Neel/M
+Neely/M
+ne'er
+nefariousness/MS
+nefarious/YP
+Nefen/M
+Nefertiti/M
+negated/U
+negater/M
+negate/XRSDVNG
+negation/M
+negativeness/SM
+negative/PDSYG
+negativism/MS
+negativity/MS
+negator/MS
+Negev/M
+neglecter/M
+neglectfulness/SM
+neglectful/YP
+neglect/SDRG
+negligee/SM
+negligence/MS
+negligent/Y
+negligibility/M
+negligible
+negligibly
+negotiability/MS
+negotiable/A
+negotiant/M
+negotiate/ASDXGN
+negotiation/MA
+negotiator/MS
+Negress/MS
+negritude/MS
+Negritude/S
+Negroes
+negroid
+Negroid/S
+Negro/M
+neg/S
+Nehemiah/M
+Nehru/M
+neighbored/U
+neighborer/M
+neighborhood/SM
+neighborlinesses
+neighborliness/UM
+neighborly/UP
+neighbor/SMRDYZGJ
+neigh/MDG
+neighs
+Neila/M
+Neile/M
+Neilla/M
+Neille/M
+Neill/M
+Neil/SM
+neither
+Nelda/M
+Nelia/M
+Nelie/M
+Nelle/M
+Nellie/M
+Nelli/M
+Nell/M
+Nelly/M
+Nelsen/M
+Nels/N
+Nelson/M
+nelson/MS
+nematic
+nematode/SM
+Nembutal/M
+nemeses
+nemesis
+Nemesis/M
+neoclassical
+neoclassicism/MS
+neoclassic/M
+neocolonialism/MS
+neocortex/M
+neodymium/MS
+Neogene
+neolithic
+Neolithic/M
+neologism/SM
+neomycin/M
+neonatal/Y
+neonate/MS
+neon/DMS
+neophyte/MS
+neoplasm/SM
+neoplastic
+neoprene/SM
+Nepalese
+Nepali/MS
+Nepal/M
+nepenthe/MS
+nephew/MS
+nephrite/SM
+nephritic
+nephritides
+nephritis/M
+nepotism/MS
+nepotist/S
+Neptune/M
+neptunium/MS
+nerd/S
+nerdy/RT
+Nereid/M
+Nerf/M
+Nerissa/M
+Nerita/M
+Nero/M
+Neron/M
+Nerta/M
+Nerte/M
+Nertie/M
+Nerti/M
+Nert/M
+Nerty/M
+Neruda/M
+nervelessness/SM
+nerveless/YP
+nerve's
+nerve/UGSD
+nerviness/SM
+nerving/M
+nervousness/SM
+nervous/PY
+nervy/TPR
+Nessa/M
+Nessie/M
+Nessi/M
+Nessy/M
+Nesta/M
+nester/M
+Nester/M
+Nestle/M
+nestler/M
+nestle/RSDG
+nestling/M
+Nestorius/M
+Nestor/M
+nest/RDGSBM
+netball/M
+nether
+Netherlander/SM
+Netherlands/M
+nethermost
+netherworld/S
+Netscape/M
+net/SM
+Netta/M
+Nettie/M
+Netti/M
+netting/M
+nett/JGRDS
+Nettle/M
+nettle/MSDG
+nettlesome
+Netty/M
+network/SJMDG
+Netzahualcoyotl/M
+Neumann/M
+neuralgia/MS
+neuralgic
+neural/Y
+neurasthenia/MS
+neurasthenic/S
+neuritic/S
+neuritides
+neuritis/M
+neuroanatomy
+neurobiology/M
+neurological/Y
+neurologist/MS
+neurology/SM
+neuromuscular
+neuronal
+neurone/S
+neuron/MS
+neuropathology/M
+neurophysiology/M
+neuropsychiatric
+neuroses
+neurosis/M
+neurosurgeon/MS
+neurosurgery/SM
+neurotically
+neurotic/S
+neurotransmitter/S
+neuter/JZGRD
+neutralise's
+neutralism/MS
+neutralist/S
+neutrality/MS
+neutralization/MS
+neutralized/U
+neutralize/GZSRD
+neutral/PYS
+neutrino/MS
+neutron/MS
+neut/ZR
+Nevada/M
+Nevadan/S
+Nevadian/S
+Neva/M
+never
+nevermore
+nevertheless
+nevi
+Nevile/M
+Neville/M
+Nevil/M
+Nevin/SM
+Nevis/M
+Nev/M
+Nevsa/M
+Nevsky/M
+nevus/M
+Newark/M
+newbie/S
+newborn/S
+Newbury/M
+Newburyport/M
+Newcastle/M
+newcomer/MS
+newed/A
+Newell/M
+newel/MS
+newer/A
+newfangled
+newfound
+newfoundland
+Newfoundlander/M
+Newfoundland/SRMZ
+newish
+newline/SM
+newlywed/MS
+Newman/M
+newness/MS
+Newport/M
+news/A
+newsagent/MS
+newsboy/SM
+newscaster/M
+newscasting/M
+newscast/SRMGZ
+newsdealer/MS
+newsed
+newses
+newsflash/S
+newsgirl/S
+newsgroup/SM
+newsing
+newsletter/SM
+NeWS/M
+newsman/M
+newsmen
+newspaperman/M
+newspapermen
+newspaper/SMGD
+newspaperwoman/M
+newspaperwomen
+newsprint/MS
+new/SPTGDRY
+newsreader/MS
+newsreel/SM
+newsroom/S
+news's
+newsstand/MS
+Newsweekly/M
+newsweekly/S
+Newsweek/MY
+newswire
+newswoman/M
+newswomen
+newsworthiness/SM
+newsworthy/RPT
+newsy/TRS
+newt/MS
+Newtonian
+Newton/M
+newton/SM
+Nexis/M
+next
+nexus/SM
+Neysa/M
+NF
+NFC
+NFL
+NFS
+Ngaliema/M
+Nguyen/M
+NH
+NHL
+niacin/SM
+Niagara/M
+Niall/M
+Nial/M
+Niamey/M
+nibbed
+nibbing
+nibbler/M
+nibble/RSDGZ
+Nibelung/M
+nib/SM
+Nicaean
+Nicaragua/M
+Nicaraguan/S
+Niccolo/M
+Nice/M
+Nicene
+niceness/MS
+nicety/MS
+nice/YTPR
+niche/SDGM
+Nicholas
+Nichole/M
+Nicholle/M
+Nichol/MS
+Nicholson/M
+nichrome
+nickelodeon/SM
+nickel/SGMD
+nicker/GD
+Nickey/M
+nick/GZRDMS
+Nickie/M
+Nicki/M
+Nicklaus/M
+Nick/M
+nicknack's
+nickname/MGDRS
+nicknamer/M
+Nickolai/M
+Nickola/MS
+Nickolaus/M
+Nicko/M
+Nicky/M
+Nicobar/M
+Nicodemus/M
+Nicolai/MS
+Nicola/MS
+Nicolea/M
+Nicole/M
+Nicolette/M
+Nicoli/MS
+Nicolina/M
+Nicoline/M
+Nicolle/M
+Nicol/M
+Nico/M
+Nicosia/M
+nicotine/MS
+Niebuhr/M
+niece/MS
+Niel/MS
+Nielsen/M
+Niels/N
+Nielson/M
+Nietzsche/M
+Nieves/M
+nifty/TRS
+Nigel/M
+Nigeria/M
+Nigerian/S
+Nigerien
+Niger/M
+niggardliness/SM
+niggardly/P
+niggard/SGMDY
+nigger/SGDM!
+niggler/M
+niggle/RSDGZJ
+niggling/Y
+nigh/RDGT
+nighs
+nightcap/SM
+nightclothes
+nightclubbed
+nightclubbing
+nightclub/MS
+nightdress/MS
+nightfall/SM
+nightgown/MS
+nighthawk/MS
+nightie/MS
+Nightingale/M
+nightingale/SM
+nightlife/MS
+nightlong
+nightmare/MS
+nightmarish/Y
+nightshade/SM
+nightshirt/MS
+night/SMYDZ
+nightspot/MS
+nightstand/SM
+nightstick/S
+nighttime/S
+nightwear/M
+nighty's
+NIH
+nihilism/MS
+nihilistic
+nihilist/MS
+Nijinsky/M
+Nikaniki/M
+Nike/M
+Niki/M
+Nikita/M
+Nikkie/M
+Nikki/M
+Nikko/M
+Nikolai/M
+Nikola/MS
+Nikolaos/M
+Nikolaus/M
+Nikolayev's
+Nikoletta/M
+Nikolia/M
+Nikolos/M
+Niko/MS
+Nikon/M
+Nile/SM
+nilled
+nilling
+Nil/MS
+nil/MYS
+nilpotent
+Nilsen/M
+Nils/N
+Nilson/M
+Nilsson/M
+Ni/M
+nimbi
+nimbleness/SM
+nimble/TRP
+nimbly
+nimbus/DM
+NIMBY
+Nimitz/M
+Nimrod/MS
+Nina/M
+nincompoop/MS
+ninefold
+nine/MS
+ninepence/M
+ninepin/S
+ninepins/M
+nineteen/SMH
+nineteenths
+ninetieths
+Ninetta/M
+Ninette/M
+ninety/MHS
+Nineveh/M
+ninja/S
+Ninnetta/M
+Ninnette/M
+ninny/SM
+Ninon/M
+Nintendo/M
+ninth
+ninths
+Niobe/M
+niobium/MS
+nipped
+nipper/DMGS
+nippiness/S
+nipping/Y
+nipple/GMSD
+Nipponese
+Nippon/M
+nippy/TPR
+nip/S
+Nirenberg/M
+nirvana/MS
+Nirvana/S
+nisei
+Nisei/MS
+Nissa/M
+Nissan/M
+Nisse/M
+Nissie/M
+Nissy/M
+Nita/M
+niter/M
+nitpick/DRSJZG
+nitrate/MGNXSD
+nitration/M
+nitric
+nitride/MGS
+nitriding/M
+nitrification/SM
+nitrite/MS
+nitrocellulose/MS
+nitrogenous
+nitrogen/SM
+nitroglycerin/MS
+nitrous
+nitwit/MS
+nit/ZSMR
+Niven/M
+nixer/M
+nix/GDSR
+Nixie/M
+Nixon/M
+NJ
+Nkrumah/M
+NLRB
+nm
+NM
+no/A
+NOAA
+Noach/M
+Noah/M
+Noak/M
+Noami/M
+Noam/M
+Nobelist/SM
+nobelium/MS
+Nobel/M
+Nobe/M
+Nobie/M
+nobility/MS
+Noble/M
+nobleman/M
+noblemen
+nobleness/SM
+noblesse/M
+noble/TPSR
+noblewoman
+noblewomen
+nob/MY
+nobody/MS
+Noby/M
+nocturnal/SY
+nocturne/SM
+nodal/Y
+nodded
+nodding
+noddle/MSDG
+noddy/M
+node/MS
+NoDoz/M
+nod/SM
+nodular
+nodule/SM
+Noelani/M
+Noella/M
+Noelle/M
+Noell/M
+Noellyn/M
+Noel/MS
+noel/S
+Noelyn/M
+Noe/M
+Noemi/M
+noes/S
+noggin/SM
+nohow
+noise/GMSD
+noiselessness/SM
+noiseless/YP
+noisemaker/M
+noisemake/ZGR
+noisily
+noisiness/MS
+noisome
+noisy/TPR
+Nola/M
+Nolana/M
+Noland/M
+Nolan/M
+Nolie/M
+Nollie/M
+Noll/M
+Nolly/M
+No/M
+nomadic
+nomad/SM
+Nome/M
+nomenclature/MS
+Nomi/M
+nominalized
+nominal/K
+nominally
+nominals
+nominate/CDSAXNG
+nomination/MAC
+nominative/SY
+nominator/CSM
+nominee/MS
+non
+nonabrasive
+nonabsorbent/S
+nonacademic/S
+nonacceptance/MS
+nonacid/MS
+nonactive
+nonadaptive
+nonaddictive
+nonadhesive
+nonadjacent
+nonadjustable
+nonadministrative
+nonage/MS
+nonagenarian/MS
+nonaggression/SM
+nonagricultural
+Nonah/M
+nonalcoholic/S
+nonaligned
+nonalignment/SM
+nonallergic
+Nona/M
+nonappearance/MS
+nonassignable
+nonathletic
+nonattendance/SM
+nonautomotive
+nonavailability/SM
+nonbasic
+nonbeliever/SM
+nonbelligerent/S
+nonblocking
+nonbreakable
+nonburnable
+nonbusiness
+noncaloric
+noncancerous
+noncarbohydrate/M
+nonce/MS
+nonchalance/SM
+nonchalant/YP
+nonchargeable
+nonclerical/S
+nonclinical
+noncollectable
+noncombatant/MS
+noncombustible/S
+noncommercial/S
+noncommissioned
+noncommittal/Y
+noncom/MS
+noncommunicable
+noncompeting
+noncompetitive
+noncompliance/MS
+noncomplying/S
+noncomprehending
+nonconducting
+nonconductor/MS
+nonconforming
+nonconformist/SM
+nonconformity/SM
+nonconsecutive
+nonconservative
+nonconstructive
+noncontagious
+noncontiguous
+noncontinuous
+noncontributing
+noncontributory
+noncontroversial
+nonconvertible
+noncooperation/SM
+noncorroding/S
+noncorrosive
+noncredit
+noncriminal/S
+noncritical
+noncrystalline
+noncumulative
+noncustodial
+noncyclic
+nondairy
+nondecreasing
+nondeductible
+nondelivery/MS
+nondemocratic
+nondenominational
+nondepartmental
+nondepreciating
+nondescript/YS
+nondestructive/Y
+nondetachable
+nondeterminacy
+nondeterminate/Y
+nondeterminism
+nondeterministic
+nondeterministically
+nondisciplinary
+nondisclosure/SM
+nondiscrimination/SM
+nondiscriminatory
+nondramatic
+nondrinker/SM
+nondrying
+nondurable
+noneconomic
+noneducational
+noneffective/S
+nonelastic
+nonelectrical
+nonelectric/S
+nonemergency
+nonempty
+nonenforceable
+nonentity/MS
+nonequivalence/M
+nonequivalent/S
+none/S
+nones/M
+nonessential/S
+nonesuch/SM
+nonetheless
+nonevent/MS
+nonexchangeable
+nonexclusive
+nonexempt
+nonexistence/MS
+nonexistent
+nonexplosive/S
+nonextensible
+nonfactual
+nonfading
+nonfat
+nonfatal
+nonfattening
+nonferrous
+nonfictional
+nonfiction/SM
+nonflammable
+nonflowering
+nonfluctuating
+nonflying
+nonfood/M
+nonfreezing
+nonfunctional
+nongovernmental
+nongranular
+nonhazardous
+nonhereditary
+nonhuman
+nonidentical
+Nonie/M
+Noni/M
+noninclusive
+nonindependent
+nonindustrial
+noninfectious
+noninflammatory
+noninflationary
+noninflected
+nonintellectual/S
+noninteracting
+noninterchangeable
+noninterference/MS
+nonintervention/SM
+nonintoxicating
+nonintuitive
+noninvasive
+nonionic
+nonirritating
+nonjudgmental
+nonjudicial
+nonlegal
+nonlethal
+nonlinearity/MS
+nonlinear/Y
+nonlinguistic
+nonliterary
+nonliving
+nonlocal
+nonmagical
+nonmagnetic
+nonmalignant
+nonmember/SM
+nonmetallic
+nonmetal/MS
+nonmigratory
+nonmilitant/S
+nonmilitary
+Nonnah/M
+Nonna/M
+nonnarcotic/S
+nonnative/S
+nonnegative
+nonnegotiable
+nonnuclear
+nonnumerical/S
+nonobjective
+nonobligatory
+nonobservance/MS
+nonobservant
+nonoccupational
+nonoccurence
+nonofficial
+nonogenarian
+nonoperational
+nonoperative
+nonorthogonal
+nonorthogonality
+nonparallel/S
+nonparametric
+nonpareil/SM
+nonparticipant/SM
+nonparticipating
+nonpartisan/S
+nonpaying
+nonpayment/SM
+nonperformance/SM
+nonperforming
+nonperishable/S
+nonperson/S
+nonperturbing
+nonphysical/Y
+nonplus/S
+nonplussed
+nonplussing
+nonpoisonous
+nonpolitical
+nonpolluting
+nonporous
+nonpracticing
+nonprejudicial
+nonprescription
+nonprocedural/Y
+nonproductive
+nonprofessional/S
+nonprofit/SB
+nonprogrammable
+nonprogrammer
+nonproliferation/SM
+nonpublic
+nonpunishable
+nonracial
+nonradioactive
+nonrandom
+nonreactive
+nonreciprocal/S
+nonreciprocating
+nonrecognition/SM
+nonrecoverable
+nonrecurring
+nonredeemable
+nonreducing
+nonrefillable
+nonrefundable
+nonreligious
+nonrenewable
+nonrepresentational
+nonresidential
+nonresident/SM
+nonresidual
+nonresistance/SM
+nonresistant/S
+nonrespondent/S
+nonresponse
+nonrestrictive
+nonreturnable/S
+nonrhythmic
+nonrigid
+nonsalaried
+nonscheduled
+nonscientific
+nonscoring
+nonseasonal
+nonsectarian
+nonsecular
+nonsegregated
+nonsense/MS
+nonsensicalness/M
+nonsensical/PY
+nonsensitive
+nonsexist
+nonsexual
+nonsingular
+nonskid
+nonslip
+nonsmoker/SM
+nonsmoking
+nonsocial
+nonspeaking
+nonspecialist/MS
+nonspecializing
+nonspecific
+nonspiritual/S
+nonstaining
+nonstandard
+nonstarter/SM
+nonstick
+nonstop
+nonstrategic
+nonstriking
+nonstructural
+nonsuccessive
+nonsupervisory
+nonsupport/GS
+nonsurgical
+nonsustaining
+nonsympathizer/M
+nontarnishable
+nontaxable/S
+nontechnical/Y
+nontenured
+nonterminal/MS
+nonterminating
+nontermination/M
+nontheatrical
+nonthinking/S
+nonthreatening
+nontoxic
+nontraditional
+nontransferable
+nontransparent
+nontrivial
+nontropical
+nonuniform
+nonunion/S
+nonuser/SM
+nonvenomous
+nonverbal/Y
+nonveteran/MS
+nonviable
+nonviolence/SM
+nonviolent/Y
+nonvirulent
+nonvocal
+nonvocational
+nonvolatile
+nonvolunteer/S
+nonvoter/MS
+nonvoting
+nonwhite/SM
+nonworking
+nonyielding
+nonzero
+noodle/GMSD
+nook/MS
+noonday/MS
+noon/GDMS
+nooning/M
+noontide/MS
+noontime/MS
+noose/SDGM
+nope/S
+NORAD/M
+noradrenalin
+noradrenaline/M
+Norah/M
+Nora/M
+Norbert/M
+Norberto/M
+Norbie/M
+Norby/M
+Nordhoff/M
+Nordic/S
+Nordstrom/M
+Norean/M
+Noreen/M
+Norene/M
+Norfolk/M
+nor/H
+Norina/M
+Norine/M
+normalcy/MS
+normality/SM
+normalization/A
+normalizations
+normalization's
+normalized/AU
+normalizes/AU
+normalize/SRDZGB
+normal/SY
+Norma/M
+Normand/M
+Normandy/M
+Norman/SM
+normativeness/M
+normative/YP
+Normie/M
+norm/SMGD
+Normy/M
+Norplant
+Norrie/M
+Norri/SM
+Norristown/M
+Norry/M
+Norse
+Norseman/M
+Norsemen
+Northampton/M
+northbound
+northeastern
+northeaster/YM
+Northeast/SM
+northeastward/S
+northeast/ZSMR
+northerly/S
+norther/MY
+Northerner/M
+northernmost
+northern/RYZS
+Northfield/M
+northing/M
+northland
+North/M
+northmen
+north/MRGZ
+Northrop/M
+Northrup/M
+norths
+Norths
+Northumberland/M
+northward/S
+northwestern
+northwester/YM
+northwest/MRZS
+Northwest/MS
+northwestward/S
+Norton/M
+Norwalk/M
+Norway/M
+Norwegian/S
+Norwich/M
+Norw/M
+nosebag/M
+nosebleed/SM
+nosecone/S
+nosedive/DSG
+nosed/V
+nosegay/MS
+nose/M
+Nosferatu/M
+nos/GDS
+nosh/MSDG
+nosily
+nosiness/MS
+nosing/M
+nostalgia/SM
+nostalgically
+nostalgic/S
+Nostradamus/M
+Nostrand/M
+nostril/SM
+nostrum/SM
+nosy/SRPMT
+notability/SM
+notableness/M
+notable/PS
+notably
+notarial
+notarization/S
+notarize/DSG
+notary/MS
+notate/VGNXSD
+notational/CY
+notation/CMSF
+notative/CF
+notch/MSDG
+not/DRGB
+notebook/MS
+note/CSDFG
+notedness/M
+noted/YP
+notepad/S
+notepaper/MS
+note's
+noteworthiness/SM
+noteworthy/P
+nothingness/SM
+nothing/PS
+noticeable/U
+noticeably
+noticeboard/S
+noticed/U
+notice/MSDG
+notifiable
+notification/M
+notifier/M
+notify/NGXSRDZ
+notional/Y
+notion/MS
+notoriety/S
+notoriousness/M
+notorious/YP
+Notre/M
+Nottingham/M
+notwithstanding
+Nouakchott/M
+nougat/MS
+Noumea/M
+noun/SMK
+nourish/DRSGL
+nourished/U
+nourisher/M
+nourishment/SM
+nous/M
+nouveau
+nouvelle
+novae
+Novak/M
+Nova/M
+nova/MS
+novelette/SM
+Novelia/M
+novelist/SM
+novelization/S
+novelize/GDS
+Novell/SM
+novella/SM
+novel/SM
+novelty/MS
+November/SM
+novena/SM
+novene
+Novgorod/M
+novice/MS
+novitiate/MS
+Nov/M
+Novocaine/M
+Novocain/S
+Novokuznetsk/M
+Novosibirsk/M
+NOW
+nowadays
+noway/S
+Nowell/M
+nowhere/S
+nowise
+now/S
+noxiousness/M
+noxious/PY
+Noyce/M
+Noyes/M
+nozzle/MS
+Np
+NP
+NRA
+nroff/M
+N's
+NS
+n's/CI
+NSF
+n/T
+NT
+nth
+nuance/SDM
+nubbin/SM
+nubby/RT
+Nubia/M
+Nubian/M
+nubile
+nub/MS
+nuclear/K
+nuclease/M
+nucleated/A
+nucleate/DSXNG
+nucleation/M
+nucleic
+nuclei/M
+nucleoli
+nucleolus/M
+nucleon/MS
+nucleotide/MS
+nucleus/M
+nuclide/M
+nude/CRS
+nudely
+nudeness/M
+nudest
+nudge/GSRD
+nudger/M
+nudism/MS
+nudist/MS
+nudity/MS
+nugatory
+Nugent/M
+nugget/SM
+nuisance/MS
+nuke/DSMG
+Nukualofa
+null/DSG
+nullification/M
+nullifier/M
+nullify/RSDXGNZ
+nullity/SM
+nu/M
+numbered/UA
+numberer/M
+numberless
+numberplate/M
+number/RDMGJ
+numbers/A
+Numbers/M
+numbing/Y
+numbness/MS
+numb/SGZTYRDP
+numbskull's
+numerable/IC
+numeracy/SI
+numeral/YMS
+numerate/SDNGX
+numerates/I
+numeration/M
+numerator/MS
+numerical/Y
+numeric/S
+numerological
+numerologist/S
+numerology/MS
+numerousness/M
+numerous/YP
+numinous/S
+numismatic/S
+numismatics/M
+numismatist/MS
+numskull/SM
+Nunavut/M
+nuncio/SM
+Nunez/M
+Nunki/M
+nun/MS
+nunnery/MS
+nuptial/S
+Nuremberg/M
+Nureyev/M
+nursemaid/MS
+nurser/M
+nurseryman/M
+nurserymen
+nursery/MS
+nurse/SRDJGMZ
+nursling/M
+nurturer/M
+nurture/SRDGZM
+nus
+nutate/NGSD
+nutation/M
+nutcracker/M
+nutcrack/RZ
+nuthatch/SM
+nutmeat/SM
+nutmegged
+nutmegging
+nutmeg/MS
+nut/MS
+nutpick/MS
+Nutrasweet/M
+nutria/SM
+nutrient/MS
+nutriment/MS
+nutritional/Y
+nutritionist/MS
+nutrition/SM
+nutritiousness/MS
+nutritious/PY
+nutritive/Y
+nutshell/MS
+nutted
+nuttiness/SM
+nutting
+nutty/TRP
+nuzzle/GZRSD
+NV
+NW
+NWT
+NY
+Nyasa/M
+NYC
+Nydia/M
+Nye/M
+Nyerere/M
+nylon/SM
+nymphet/MS
+nymph/M
+nympholepsy/M
+nymphomaniac/S
+nymphomania/MS
+nymphs
+Nyquist/M
+NYSE
+Nyssa/M
+NZ
+o
+O
+oafishness/S
+oafish/PY
+oaf/MS
+Oahu/M
+Oakland/M
+Oakley/M
+Oakmont/M
+oak/SMN
+oakum/MS
+oakwood
+oar/GSMD
+oarlock/MS
+oarsman/M
+oarsmen
+oarswoman
+oarswomen
+OAS
+oases
+oasis/M
+oatcake/MS
+oater/M
+Oates/M
+oath/M
+oaths
+oatmeal/SM
+oat/SMNR
+Oaxaca/M
+ob
+OB
+Obadiah/M
+Obadias/M
+obbligato/S
+obduracy/S
+obdurateness/S
+obdurate/PDSYG
+Obediah/M
+obedience/EMS
+obedient/EY
+Obed/M
+obeisance/MS
+obeisant/Y
+obelisk/SM
+Oberlin/M
+Oberon/M
+obese
+obesity/MS
+obey/EDRGS
+obeyer/EM
+obfuscate/SRDXGN
+obfuscation/M
+obfuscatory
+Obidiah/M
+Obie/M
+obi/MDGS
+obit/SMR
+obituary/SM
+obj
+objectify/GSDXN
+objectionableness/M
+objectionable/U
+objectionably
+objection/SMB
+objectiveness/MS
+objective/PYS
+objectivity/MS
+objector/SM
+object/SGVMD
+objurgate/GNSDX
+objurgation/M
+oblate/NYPSX
+oblation/M
+obligate/NGSDXY
+obligational
+obligation/M
+obligatorily
+obligatory
+obliged/E
+obliger/M
+obliges/E
+oblige/SRDG
+obligingness/M
+obliging/PY
+oblique/DSYGP
+obliqueness/S
+obliquity/MS
+obliterate/VNGSDX
+obliteration/M
+obliterative/Y
+oblivion/MS
+obliviousness/MS
+oblivious/YP
+oblongness/M
+oblong/SYP
+obloquies
+obloquy/M
+Ob/MD
+obnoxiousness/MS
+obnoxious/YP
+oboe/SM
+oboist/S
+obos
+O'Brien/M
+obs
+obscene/RYT
+obscenity/MS
+obscurantism/MS
+obscurantist/MS
+obscuration
+obscureness/M
+obscure/YTPDSRGL
+obscurity/MS
+obsequies
+obsequiousness/S
+obsequious/YP
+obsequy
+observability/M
+observable/SU
+observably
+observance/MS
+observantly
+observants
+observant/U
+observational/Y
+observation/MS
+observatory/MS
+observed/U
+observer/M
+observe/ZGDSRB
+observing/Y
+obsess/GVDS
+obsessional
+obsession/MS
+obsessiveness/S
+obsessive/PYS
+obsidian/SM
+obsolesce/GSD
+obsolescence/S
+obsolescent/Y
+obsolete/GPDSY
+obsoleteness/M
+obstacle/SM
+obstetrical
+obstetrician/SM
+obstetric/S
+obstetrics/M
+obstinacy/SM
+obstinateness/M
+obstinate/PY
+obstreperousness/SM
+obstreperous/PY
+obstructed/U
+obstructer/M
+obstructionism/SM
+obstructionist/MS
+obstruction/SM
+obstructiveness/MS
+obstructive/PSY
+obstruct/RDVGS
+obtainable/U
+obtainably
+obtain/LSGDRB
+obtainment/S
+obtrude/DSRG
+obtruder/M
+obtrusion/S
+obtrusiveness/MSU
+obtrusive/UPY
+obtuseness/S
+obtuse/PRTY
+obverse/YS
+obviate/XGNDS
+obviousness/SM
+obvious/YP
+Oby/M
+ocarina/MS
+O'Casey
+Occam/M
+occasional/Y
+occasion/MDSJG
+Occidental/S
+occidental/SY
+occident/M
+Occident/SM
+occipital/Y
+occlude/GSD
+occlusion/MS
+occlusive/S
+occulter/M
+occultism/SM
+occult/SRDYG
+occupancy/SM
+occupant/MS
+occupational/Y
+occupation/SAM
+occupied/AU
+occupier/M
+occupies/A
+occupy/RSDZG
+occur/AS
+occurred/A
+occurrence/SM
+occurring/A
+oceanfront/MS
+oceangoing
+Oceania/M
+oceanic
+ocean/MS
+oceanographer/SM
+oceanographic
+oceanography/SM
+oceanology/MS
+oceanside
+Oceanside/M
+Oceanus/M
+ocelot/SM
+ocher/DMGS
+Ochoa/M
+o'clock
+O'Clock
+O'Connell/M
+O'Connor/M
+Oconomowoc/M
+OCR
+octagonal/Y
+octagon/SM
+octahedral
+octahedron/M
+octal/S
+octane/MS
+octant/M
+octave/MS
+Octavia/M
+Octavian/M
+Octavio/M
+Octavius/M
+octavo/MS
+octennial
+octet/SM
+octile
+octillion/M
+Oct/M
+October/MS
+octogenarian/MS
+octopus/SM
+octoroon/M
+ocular/S
+oculist/SM
+OD
+odalisque/SM
+oddball/SM
+oddity/MS
+oddment/MS
+oddness/MS
+odd/TRYSPL
+Odele/M
+Odelia/M
+Odelinda/M
+Odella/M
+Odelle/M
+Odell/M
+O'Dell/M
+ode/MDRS
+Ode/MR
+Oderberg/MS
+Oder/M
+Odessa/M
+Odets/M
+Odetta/M
+Odette/M
+Odey/M
+Odie/M
+Odilia/M
+Odille/M
+Odin/M
+odiousness/MS
+odious/PY
+Odis/M
+odium/MS
+Odo/M
+odometer/SM
+Odom/M
+O'Donnell/M
+odor/DMS
+odoriferous
+odorless
+odorous/YP
+ODs
+O'Dwyer/M
+Ody/M
+Odysseus/M
+Odyssey/M
+odyssey/S
+OE
+OED
+oedipal
+Oedipal/Y
+Oedipus/M
+OEM/M
+OEMS
+oenology/MS
+oenophile/S
+o'er
+O'Er
+Oersted/M
+oesophagi
+oeuvre/SM
+Ofelia/M
+Ofella/M
+offal/MS
+offbeat/MS
+offcuts
+Offenbach/M
+offender/M
+offend/SZGDR
+offense/MSV
+offensively/I
+offensiveness/MSI
+offensive/YSP
+offerer/M
+offering/M
+offer/RDJGZ
+offertory/SM
+offhand/D
+offhandedness/S
+offhanded/YP
+officeholder/SM
+officemate/S
+officer/GMD
+officership/S
+office/SRMZ
+officialdom/SM
+officialism/SM
+officially/U
+official/PSYM
+officiant/SM
+officiate/XSDNG
+officiation/M
+officiator/MS
+officio
+officiousness/MS
+officious/YP
+offing/M
+offish
+offload/GDS
+offprint/GSDM
+offramp
+offset/SM
+offsetting
+offshoot/MS
+offshore
+offside/RS
+offspring/M
+offstage/S
+off/SZGDRJ
+offtrack
+Ofilia/M
+of/K
+often/RT
+oftentimes
+oft/NRT
+ofttimes
+Ogbomosho/M
+Ogdan/M
+Ogden/M
+Ogdon/M
+Ogilvy/M
+ogive/M
+Oglethorpe/M
+ogle/ZGDSR
+ogreish
+ogre/MS
+ogress/S
+oh
+OH
+O'Hara
+O'Hare/M
+O'Higgins
+Ohioan/S
+Ohio/M
+ohmic
+ohmmeter/MS
+ohm/SM
+oho/S
+ohs
+OHSA/M
+oilcloth/M
+oilcloths
+oiler/M
+oilfield/MS
+oiliness/SM
+oilman/M
+oil/MDRSZG
+oilmen
+oilseed/SM
+oilskin/MS
+oily/TPR
+oink/GDS
+ointment/SM
+Oise/M
+OJ
+Ojibwa/SM
+Okamoto/M
+okapi/SM
+Okayama/M
+okay/M
+Okeechobee/M
+O'Keeffe
+Okefenokee
+Okhotsk/M
+Okinawa/M
+Okinawan/S
+Oklahoma/M
+Oklahoman/SM
+Okla/M
+OK/MDG
+okra/MS
+OKs
+Oktoberfest
+Olaf/M
+Olag/M
+Ola/M
+Olav/M
+Oldenburg/M
+olden/DG
+Oldfield/M
+oldie/MS
+oldish
+oldness/S
+Oldsmobile/M
+oldster/SM
+Olduvai/M
+old/XTNRPS
+olé
+oleaginous
+oleander/SM
+O'Leary/M
+olefin/M
+Oleg/M
+Ole/MV
+Olenek/M
+Olenka/M
+Olen/M
+Olenolin/M
+oleomargarine/SM
+oleo/S
+oles
+olfactory
+Olga/M
+Olia/M
+oligarchic
+oligarchical
+oligarch/M
+oligarchs
+oligarchy/SM
+Oligocene
+oligopolistic
+oligopoly/MS
+Olimpia/M
+Olin/M
+olive/MSR
+Olive/MZR
+Oliver/M
+Olivero/M
+Olivette/M
+Olivetti/M
+Olivia/M
+Olivier/M
+Olivie/RM
+Oliviero/M
+Oliy/M
+Ollie/M
+Olly/M
+Olmec
+Olmsted/M
+Olsen/M
+Olson/M
+Olva/M
+Olvan/M
+Olwen/M
+Olympe/M
+Olympiad/MS
+Olympian/S
+Olympia/SM
+Olympic/S
+Olympie/M
+Olympus/M
+Omaha/SM
+Oman/M
+Omar/M
+ombudsman/M
+ombudsmen
+Omdurman/M
+omega/MS
+omelet/SM
+omelette's
+omen/DMG
+Omero/M
+omicron/MS
+ominousness/SM
+ominous/YP
+omission/MS
+omit/S
+omitted
+omitting
+omnibus/MS
+omni/M
+omnipotence/SM
+Omnipotent
+omnipotent/SY
+omnipresence/MS
+omnipresent/Y
+omniscience/SM
+omniscient/YS
+omnivore/MS
+omnivorousness/MS
+omnivorous/PY
+oms
+Omsk/M
+om/XN
+ON
+onanism/M
+Onassis/M
+oncer/M
+once/SR
+oncogene/S
+oncologist/S
+oncology/SM
+oncoming/S
+Ondrea/M
+Oneal/M
+Onega/M
+Onegin/M
+Oneida/SM
+O'Neil
+O'Neill
+oneness/MS
+one/NPMSX
+oner/M
+onerousness/SM
+onerous/YP
+oneself
+onetime
+oneupmanship
+Onfre/M
+Onfroi/M
+ongoing/S
+Onida/M
+onion/GDM
+onionskin/MS
+onlooker/MS
+onlooking
+only/TP
+Onofredo/M
+Ono/M
+onomatopoeia/SM
+onomatopoeic
+onomatopoetic
+Onondaga/MS
+onrush/GMS
+on/RY
+ons
+Onsager/M
+onset/SM
+onsetting
+onshore
+onside
+onslaught/MS
+Ontarian/S
+Ontario/M
+Ont/M
+onto
+ontogeny/SM
+ontological/Y
+ontology/SM
+onus/SM
+onward/S
+onyx/MS
+oodles
+ooh/GD
+oohs
+oolitic
+Oona/M
+OOo/M
+oops/S
+Oort/M
+ooze/GDS
+oozy/RT
+opacity/SM
+opalescence/S
+opalescent/Y
+Opalina/M
+Opaline/M
+Opal/M
+opal/SM
+opaque/GTPYRSD
+opaqueness/SM
+opcode/MS
+OPEC
+Opel/M
+opencast
+opened/AU
+opener/M
+openhandedness/SM
+openhanded/P
+openhearted
+opening/M
+openness/S
+OpenOffice.org/M
+opens/A
+openwork/MS
+open/YRDJGZTP
+operable/I
+operandi
+operand/SM
+operant/YS
+opera/SM
+operate/XNGVDS
+operatically
+operatic/S
+operationalization/S
+operationalize/D
+operational/Y
+operation/M
+operative/IP
+operatively
+operativeness/MI
+operatives
+operator/SM
+operetta/MS
+ope/S
+Ophelia/M
+Ophelie/M
+Ophiuchus/M
+ophthalmic/S
+ophthalmologist/SM
+ophthalmology/MS
+opiate/GMSD
+opine/XGNSD
+opinionatedness/M
+opinionated/PY
+opinion/M
+opioid
+opium/MS
+opossum/SM
+opp
+Oppenheimer/M
+opponent/MS
+opportune/IY
+opportunism/SM
+opportunistic
+opportunistically
+opportunist/SM
+opportunity/MS
+oppose/BRSDG
+opposed/U
+opposer/M
+oppositeness/M
+opposite/SXYNP
+oppositional
+opposition/M
+oppress/DSGV
+oppression/MS
+oppressiveness/MS
+oppressive/YP
+oppressor/MS
+opprobrious/Y
+opprobrium/SM
+Oprah/M
+ops
+opt/DSG
+opthalmic
+opthalmologic
+opthalmology
+optical/Y
+optician/SM
+optic/S
+optics/M
+optima
+optimality
+optimal/Y
+optimise's
+optimism/SM
+optimistic
+optimistically
+optimist/SM
+optimization/SM
+optimize/DRSZG
+optimized/U
+optimizer/M
+optimizes/U
+optimum/SM
+optionality/M
+optional/YS
+option/GDMS
+optoelectronic
+optometric
+optometrist/MS
+optometry/SM
+opulence/SM
+opulent/Y
+opus/SM
+op/XGDN
+OR
+oracle/GMSD
+oracular
+Oralee/M
+Oralia/M
+Oralie/M
+Oralla/M
+Oralle/M
+oral/YS
+Ora/M
+orangeade/MS
+Orange/M
+orange/MS
+orangery/SM
+orangutan/MS
+Oranjestad/M
+Oran/M
+orate/SDGNX
+oration/M
+oratorical/Y
+oratorio/MS
+orator/MS
+oratory/MS
+Orazio/M
+Orbadiah/M
+orbicular
+orbiculares
+orbital/MYS
+orbit/MRDGZS
+orb/SMDG
+orchard/SM
+orchestral/Y
+orchestra/MS
+orchestrate/GNSDX
+orchestrater's
+orchestration/M
+orchestrator/M
+orchid/SM
+ordainer/M
+ordainment/MS
+ordain/SGLDR
+ordeal/SM
+order/AESGD
+ordered/U
+orderer
+ordering/S
+orderless
+orderliness/SE
+orderly/PS
+order's/E
+ordinal/S
+ordinance/MS
+ordinarily
+ordinariness/S
+ordinary/RSPT
+ordinated
+ordinate/I
+ordinates
+ordinate's
+ordinating
+ordination/SM
+ordnance/SM
+Ordovician
+ordure/MS
+oregano/SM
+Oreg/M
+Oregonian/S
+Oregon/M
+Orelee/M
+Orelia/M
+Orelie/M
+Orella/M
+Orelle/M
+Orel/M
+Oren/M
+Ore/NM
+ore/NSM
+Oreo
+Orestes
+organdie's
+organdy/MS
+organelle/MS
+organically/I
+organic/S
+organismic
+organism/MS
+organist/MS
+organizable/UMS
+organizational/MYS
+organization/MEAS
+organize/AGZDRS
+organized/UE
+organizer/MA
+organizes/E
+organizing/E
+organ/MS
+organometallic
+organza/SM
+orgasm/GSMD
+orgasmic
+orgiastic
+orgy/SM
+Oriana/M
+oriel/MS
+orientable
+Oriental/S
+oriental/SY
+orientated/A
+orientate/ESDXGN
+orientates/A
+orientation/AMES
+orienteering/M
+orienter
+orient/GADES
+orient's
+Orient/SM
+orifice/MS
+orig
+origami/MS
+originality/SM
+originally
+original/US
+originate/VGNXSD
+origination/M
+originative/Y
+originator/SM
+origin/MS
+Orin/M
+Orinoco/M
+oriole/SM
+Orion/M
+orison/SM
+Oriya/M
+Orizaba/M
+Orkney/M
+Orland/M
+Orlando/M
+Orlan/M
+Orleans
+Orlick/M
+Orlon/SM
+Orly/M
+ormolu/SM
+or/MY
+ornamental/SY
+ornamentation/SM
+ornament/GSDM
+ornateness/SM
+ornate/YP
+orneriness/SM
+ornery/PRT
+ornithological
+ornithologist/SM
+ornithology/MS
+orographic/M
+orography/M
+Orono/M
+orotund
+orotundity/MS
+orphanage/MS
+orphanhood/M
+orphan/SGDM
+Orpheus/M
+Orphic
+Orran/M
+Orren/M
+Orrin/M
+orris/SM
+Orr/MN
+ors
+Orsa/M
+Orsola/M
+Orson/M
+Ortega/M
+Ortensia/M
+orthodontia/S
+orthodontic/S
+orthodontics/M
+orthodontist/MS
+orthodoxies
+orthodoxly/U
+Orthodox/S
+orthodoxy's
+orthodox/YS
+orthodoxy/U
+orthogonality/M
+orthogonalization/M
+orthogonalized
+orthogonal/Y
+orthographic
+orthographically
+orthography/MS
+orthonormal
+orthopedic/S
+orthopedics/M
+orthopedist/SM
+orthophosphate/MS
+orthorhombic
+Ortiz/M
+Orton/M
+Orval/M
+Orville/M
+Orv/M
+Orwellian
+Orwell/M
+o's
+Osage/SM
+Osaka/M
+Osbert/M
+Osborne/M
+Osborn/M
+Osbourne/M
+Osbourn/M
+Oscar/SM
+Osceola/M
+oscillate/SDXNG
+oscillation/M
+oscillator/SM
+oscillatory
+oscilloscope/SM
+osculate/XDSNG
+osculation/M
+Osgood/M
+OSHA
+Oshawa/M
+O'Shea/M
+Oshkosh/M
+osier/MS
+Osiris/M
+Oslo/M
+Os/M
+OS/M
+Osman/M
+osmium/MS
+Osmond/M
+osmoses
+osmosis/M
+osmotic
+Osmund/M
+osprey/SM
+osseous/Y
+Ossie/M
+ossification/M
+ossify/NGSDX
+ostensible
+ostensibly
+ostentation/MS
+ostentatiousness/M
+ostentatious/PY
+osteoarthritides
+osteoarthritis/M
+osteology/M
+osteopathic
+osteopath/M
+osteopaths
+osteopathy/MS
+osteoporoses
+osteoporosis/M
+ostracise's
+ostracism/MS
+ostracize/GSD
+Ostrander/M
+ostrich/MS
+Ostrogoth/M
+Ostwald/M
+O'Sullivan/M
+Osvaldo/M
+Oswald/M
+Oswell/M
+OT
+OTB
+OTC
+Otes
+Otha/M
+Othelia/M
+Othella/M
+Othello/M
+otherness/M
+other/SMP
+otherwise
+otherworldly/P
+otherworld/Y
+Othilia/M
+Othilie/M
+Otho/M
+otiose
+Otis/M
+OTOH
+Ottawa/MS
+otter/DMGS
+Ottilie/M
+Otto/M
+Ottoman
+ottoman/MS
+Ouagadougou/M
+oubliette/SM
+ouch/SDG
+oughtn't
+ought/SGD
+Ouija/MS
+ounce/MS
+our/S
+ourself
+ourselves
+ouster/M
+oust/RDGZS
+outage/MS
+outargue/GDS
+outback/MRS
+outbalance/GDS
+outbidding
+outbid/S
+outboard/S
+outboast/GSD
+outbound/S
+outbreak/SMG
+outbroke
+outbroken
+outbuilding/SM
+outburst/MGS
+outcast/GSM
+outclass/SDG
+outcome/SM
+outcropped
+outcropping/S
+outcrop/SM
+outcry/MSDG
+outdated/P
+outdid
+outdistance/GSD
+outdoes
+outdo/G
+outdone
+outdoor/S
+outdoorsy
+outdraw/GS
+outdrawn
+outdrew
+outermost
+outerwear/M
+outface/SDG
+outfall/MS
+outfielder/M
+outfield/RMSZ
+outfight/SG
+outfit/MS
+outfitted
+outfitter/MS
+outfitting
+outflank/SGD
+outflow/SMDG
+outfought
+outfox/GSD
+outgeneraled
+outgoes
+outgo/GJ
+outgoing/P
+outgrew
+outgrip
+outgrow/GSH
+outgrown
+outgrowth/M
+outgrowths
+outguess/SDG
+outhit/S
+outhitting
+outhouse/SM
+outing/M
+outlaid
+outlander/M
+outlandishness/MS
+outlandish/PY
+outland/ZR
+outlast/GSD
+outlawry/M
+outlaw/SDMG
+outlay/GSM
+outlet/SM
+outliers
+outline/SDGM
+outlive/GSD
+outlook/MDGS
+outlying
+outmaneuver/GSD
+outmatch/SDG
+outmigration
+outmoded
+outness/M
+outnumber/GDS
+outpaced
+outpatient/SM
+outperform/DGS
+out/PJZGSDR
+outplacement/S
+outplay/GDS
+outpoint/GDS
+outpost/SM
+outpouring/M
+outpour/MJG
+outproduce/GSD
+output/SM
+outputted
+outputting
+outrace/GSD
+outrage/GSDM
+outrageousness/M
+outrageous/YP
+outran
+outrank/GSD
+outré
+outreach/SDG
+outrider/MS
+outrigger/SM
+outright/Y
+outrunning
+outrun/S
+outscore/GDS
+outsell/GS
+outset/MS
+outsetting
+outshine/SG
+outshone
+outshout/GDS
+outsider/PM
+outside/ZSR
+outsize/S
+outskirt/SM
+outsmart/SDG
+outsold
+outsource/SDJG
+outspend/SG
+outspent
+outspoke
+outspokenness/SM
+outspoken/YP
+outspread/SG
+outstanding/Y
+outstate/NX
+outstation/M
+outstay/SDG
+outstretch/GSD
+outstripped
+outstripping
+outstrip/S
+outtake/S
+outvote/GSD
+outwardness/M
+outward/SYP
+outwear/SG
+outweigh/GD
+outweighs
+outwit/S
+outwitted
+outwitting
+outwore
+outwork/SMDG
+outworn
+ouzo/SM
+oval/MYPS
+ovalness/M
+ova/M
+ovarian
+ovary/SM
+ovate/SDGNX
+ovation/GMD
+ovenbird/SM
+oven/MS
+overabundance/MS
+overabundant
+overachieve/SRDGZ
+overact/DGVS
+overage/S
+overaggressive
+overallocation
+overall/SM
+overambitious
+overanxious
+overarching
+overarm/GSD
+overate
+overattentive
+overawe/GDS
+overbalance/DSG
+overbear/GS
+overbearingness/M
+overbearing/YP
+overbidding
+overbid/S
+overbite/MS
+overblown
+overboard
+overbold
+overbook/SDG
+overbore
+overborne
+overbought
+overbuild/GS
+overbuilt
+overburdening/Y
+overburden/SDG
+overbuy/GS
+overcame
+overcapacity/M
+overcapitalize/DSG
+overcareful
+overcast/GS
+overcasting/M
+overcautious
+overcerebral
+overcharge/DSG
+overcloud/DSG
+overcoating/M
+overcoat/SMG
+overcomer/M
+overcome/RSG
+overcommitment/S
+overcompensate/XGNDS
+overcompensation/M
+overcomplexity/M
+overcomplicated
+overconfidence/MS
+overconfident/Y
+overconscientious
+overconsumption/M
+overcook/SDG
+overcooled
+overcorrection
+overcritical
+overcrowd/DGS
+overcurious
+overdecorate/SDG
+overdependent
+overdetermined
+overdevelop/SDG
+overdid
+overdoes
+overdo/G
+overdone
+overdose/DSMG
+overdraft/SM
+overdraw/GS
+overdrawn
+overdress/GDS
+overdrew
+overdrive/GSM
+overdriven
+overdrove
+overdubbed
+overdubbing
+overdub/S
+overdue
+overeagerness/M
+overeager/PY
+overeater/M
+overeat/GNRS
+overeducated
+overemotional
+overemphases
+overemphasis/M
+overemphasize/GZDSR
+overenthusiastic
+overestimate/DSXGN
+overestimation/M
+overexcite/DSG
+overexercise/SDG
+overexert/GDS
+overexertion/SM
+overexploitation
+overexploited
+overexpose/GDS
+overexposure/SM
+overextend/DSG
+overextension
+overfall/M
+overfed
+overfeed/GS
+overfill/GDS
+overfishing
+overflew
+overflight/SM
+overflow/DGS
+overflown
+overfly/GS
+overfond
+overfull
+overgeneralize/GDS
+overgenerous
+overgraze/SDG
+overgrew
+overground
+overgrow/GSH
+overgrown
+overgrowth/M
+overgrowths
+overhand/DGS
+overhang/GS
+overhasty
+overhaul/GRDJS
+overhead/S
+overheard
+overhearer/M
+overhear/SRG
+overheat/SGD
+overhung
+overincredulous
+overindulgence/SM
+overindulgent
+overindulge/SDG
+overinflated
+overjoy/SGD
+overkill/SDMG
+overladed
+overladen
+overlaid
+overlain
+overland/S
+overlap/MS
+overlapped
+overlapping
+overlarge
+overlay/GS
+overleaf
+overlie
+overload/SDG
+overlong
+overlook/DSG
+overlord/DMSG
+overloud
+overly/GRS
+overmanning
+overmaster/GSD
+overmatching
+overmodest
+overmuch/S
+overnice
+overnight/SDRGZ
+overoptimism/SM
+overoptimistic
+overpaid
+overparticular
+overpass/GMSD
+overpay/LSG
+overpayment/M
+overplay/SGD
+overpopulate/DSNGX
+overpopulation/M
+overpopulous
+overpower/GSD
+overpowering/Y
+overpraise/DSG
+overprecise
+overpressure
+overprice/SDG
+overprint/DGS
+overproduce/SDG
+overproduction/S
+overprotect/GVDS
+overprotection/M
+overqualified
+overran
+overrate/DSG
+overreach/DSRG
+overreaction/SM
+overreact/SGD
+overred
+overrefined
+overrepresented
+overridden
+overrider/M
+override/RSG
+overripe
+overrode
+overrule/GDS
+overrunning
+overrun/S
+oversample/DG
+oversaturate
+oversaw
+oversea/S
+overseeing
+overseen
+overseer/M
+oversee/ZRS
+oversell/SG
+oversensitiveness/S
+oversensitive/P
+oversensitivity
+oversexed
+overshadow/GSD
+overshoe/SM
+overshoot/SG
+overshot/S
+oversight/SM
+oversimple
+oversimplification/M
+oversimplify/GXNDS
+oversize/GS
+oversleep/GS
+overslept
+oversoftness/M
+oversoft/P
+oversold
+overspecialization/MS
+overspecialize/GSD
+overspend/SG
+overspent
+overspill/DMSG
+overspread/SG
+overstaffed
+overstatement/SM
+overstate/SDLG
+overstay/GSD
+overstepped
+overstepping
+overstep/S
+overstimulate/DSG
+overstock/SGD
+overstraining
+overstressed
+overstretch/D
+overstrict
+overstrike/GS
+overstrung
+overstuffed
+oversubscribe/SDG
+oversubtle
+oversupply/MDSG
+oversuspicious
+overtaken
+overtake/RSZG
+overtax/DSG
+overthrew
+overthrow/GS
+overthrown
+overtightened
+overtime/MGDS
+overtire/DSG
+overtone/MS
+overtook
+overt/PY
+overture/DSMG
+overturn/SDG
+overuse/DSG
+overvalue/GSD
+overview/MS
+overweening
+overweight/GSD
+overwhelm/GDS
+overwhelming/Y
+overwinter/SDG
+overwork/GSD
+overwrap
+overwrite/SG
+overwritten
+overwrote
+overwrought
+over/YGS
+overzealousness/M
+overzealous/P
+Ovid/M
+oviduct/SM
+oviform
+oviparous
+ovoid/S
+ovular
+ovulate/GNXDS
+ovulatory
+ovule/MS
+ovum/MS
+ow/DYG
+Owen/MS
+owe/S
+owlet/SM
+owl/GSMDR
+owlishness/M
+owlish/PY
+owned/U
+own/EGDS
+ownership/MS
+owner/SM
+oxalate/M
+oxalic
+oxaloacetic
+oxblood/S
+oxbow/SM
+oxcart/MS
+oxen/M
+oxford/MS
+Oxford/MS
+oxidant/SM
+oxidate/NVX
+oxidation/M
+oxidative/Y
+oxide/SM
+oxidization/MS
+oxidized/U
+oxidize/JDRSGZ
+oxidizer/M
+oxidizes/A
+ox/MNS
+Oxnard
+Oxonian
+oxtail/M
+Oxus/M
+oxyacetylene/MS
+oxygenate/XSDMGN
+oxygenation/M
+oxygen/MS
+oxyhydroxides
+oxymora
+oxymoron/M
+oyster/GSDM
+oystering/M
+oz
+Ozark/SM
+Oz/M
+ozone/SM
+Ozymandias/M
+Ozzie/M
+Ozzy/M
+P
+PA
+Pablo/M
+Pablum/M
+pablum/S
+Pabst/M
+pabulum/SM
+PAC
+pace/DRSMZG
+Pace/M
+pacemaker/SM
+pacer/M
+pacesetter/MS
+pacesetting
+Pacheco/M
+pachyderm/MS
+pachysandra/MS
+pacific
+pacifically
+pacification/M
+Pacific/M
+pacifier/M
+pacifism/MS
+pacifistic
+pacifist/MS
+pacify/NRSDGXZ
+package/ARSDG
+packaged/U
+packager/S
+package's
+packages/U
+packaging/SM
+Packard/SM
+packed/AU
+packer/MUS
+packet/MSDG
+pack/GZSJDRMB
+packhorse/M
+packinghouse/S
+packing/M
+packsaddle/SM
+Packston/M
+packs/UA
+Packwood/M
+Paco/M
+Pacorro/M
+pact/SM
+Padang/M
+padded/U
+Paddie/M
+padding/SM
+paddle/MZGRSD
+paddler/M
+paddock/SDMG
+Paddy/M
+paddy/SM
+Padget/M
+Padgett/M
+Padilla/M
+padlock/SGDM
+pad/MS
+Padraic/M
+Padraig/M
+padre/MS
+Padrewski/M
+Padriac/M
+paean/MS
+paediatrician/MS
+paediatrics/M
+paedophilia's
+paella/SM
+paeony/M
+Paganini/M
+paganism/MS
+pagan/SM
+pageantry/SM
+pageant/SM
+pageboy/SM
+paged/U
+pageful
+Page/M
+page/MZGDRS
+pager/M
+paginate/DSNGX
+Paglia/M
+pagoda/MS
+Pahlavi/M
+paid/AU
+Paige/M
+pailful/SM
+Pail/M
+pail/SM
+Paine/M
+painfuller
+painfullest
+painfulness/MS
+painful/YP
+pain/GSDM
+painkiller/MS
+painkilling
+painlessness/S
+painless/YP
+painstaking/SY
+paint/ADRZGS
+paintbox/M
+paintbrush/SM
+painted/U
+painterly/P
+painter/YM
+painting/SM
+paint's
+paintwork
+paired/UA
+pair/JSDMG
+pairs/A
+pairwise
+paisley/MS
+pajama/MDS
+Pakistani/S
+Pakistan/M
+palace/MS
+paladin/MS
+palaeolithic
+palaeontologists
+palaeontology/M
+palanquin/MS
+palatability/M
+palatableness/M
+palatable/P
+palatalization/MS
+palatalize/SDG
+palatal/YS
+palate/BMS
+palatial/Y
+palatinate/SM
+Palatine
+palatine/S
+palaver/GSDM
+paleface/SM
+Palembang/M
+paleness/S
+Paleocene
+Paleogene
+paleographer/SM
+paleography/SM
+paleolithic
+Paleolithic
+paleontologist/S
+paleontology/MS
+Paleozoic
+Palermo/M
+pale/SPY
+Palestine/M
+Palestinian/S
+Palestrina/M
+palette/MS
+Paley/M
+palfrey/MS
+palimony/S
+palimpsest/MS
+palindrome/MS
+palindromic
+paling/M
+palisade/MGSD
+Palisades/M
+palish
+Palladio/M
+palladium/SM
+pallbearer/SM
+palletized
+pallet/SMGD
+pall/GSMD
+palliate/SDVNGX
+palliation/M
+palliative/SY
+pallidness/MS
+pallid/PY
+Pall/M
+pallor/MS
+palmate
+palmer/M
+Palmer/M
+Palmerston/M
+palmetto/MS
+palm/GSMDR
+palmist/MS
+palmistry/MS
+Palm/MR
+Palmolive/M
+palmtop/S
+Palmyra/M
+palmy/RT
+Palo/M
+Paloma/M
+Palomar/M
+palomino/MS
+palpable
+palpably
+palpate/SDNGX
+palpation/M
+palpitate/NGXSD
+palpitation/M
+pal/SJMDRYTG
+palsy/GSDM
+paltriness/SM
+paltry/TRP
+paludal
+Pa/M
+Pamela/M
+Pamelina/M
+Pamella/M
+pa/MH
+Pamirs
+Pam/M
+Pammie/M
+Pammi/M
+Pammy/M
+pampas/M
+pamperer/M
+pamper/RDSG
+Pampers
+pamphleteer/DMSG
+pamphlet/SM
+panacea/MS
+panache/MS
+Panama/MS
+Panamanian/S
+panama/S
+pancake/MGSD
+Panchito/M
+Pancho/M
+panchromatic
+pancreas/MS
+pancreatic
+panda/SM
+pandemic/S
+pandemonium/SM
+pander/ZGRDS
+Pandora/M
+panegyric/SM
+pane/KMS
+paneling/M
+panelist/MS
+panelization
+panelized
+panel/JSGDM
+Pangaea/M
+pang/GDMS
+pangolin/M
+panhandle/RSDGMZ
+panicked
+panicking
+panicky/RT
+panic/SM
+panier's
+panjandrum/M
+Pankhurst/M
+Pan/M
+Panmunjom/M
+panned
+pannier/SM
+panning
+panoply/MSD
+panorama/MS
+panoramic
+panpipes
+Pansie/M
+pan/SMD
+Pansy/M
+pansy/SM
+Pantagruel/M
+Pantaloon/M
+pantaloons
+pant/GDS
+pantheism/MS
+pantheistic
+pantheist/S
+pantheon/MS
+panther/SM
+pantie/SM
+pantiled
+pantograph/M
+pantomime/SDGM
+pantomimic
+pantomimist/SM
+pantry/SM
+pantsuit/SM
+pantyhose
+pantyliner
+pantywaist/SM
+Panza/M
+Paola/M
+Paoli/M
+Paolina/M
+Paolo/M
+papacy/SM
+Papagena/M
+Papageno/M
+papal/Y
+papa/MS
+paparazzi
+papaw/SM
+papaya/MS
+paperback/GDMS
+paperboard/MS
+paperboy/SM
+paperer/M
+papergirl/SM
+paper/GJMRDZ
+paperhanger/SM
+paperhanging/SM
+paperiness/M
+paperless
+paperweight/MS
+paperwork/SM
+papery/P
+papillae
+papilla/M
+papillary
+papist/MS
+papoose/SM
+Pappas/M
+papped
+papping
+pappy/RST
+paprika/MS
+pap/SZMNR
+papyri
+papyrus/M
+Paquito/M
+parable/MGSD
+parabola/MS
+parabolic
+paraboloidal/M
+paraboloid/MS
+Paracelsus/M
+paracetamol/M
+parachuter/M
+parachute/RSDMG
+parachutist/MS
+Paraclete/M
+parader/M
+parade/RSDMZG
+paradigmatic
+paradigm/SM
+paradisaic
+paradisaical
+Paradise/M
+paradise/MS
+paradoxic
+paradoxicalness/M
+paradoxical/YP
+paradox/MS
+paraffin/GSMD
+paragon/SGDM
+paragrapher/M
+paragraph/MRDG
+paragraphs
+Paraguayan/S
+Paraguay/M
+parakeet/MS
+paralegal/S
+paralinguistic
+parallax/SM
+parallel/DSG
+paralleled/U
+parallelepiped/MS
+parallelism/SM
+parallelization/MS
+parallelize/ZGDSR
+parallelogram/MS
+paralysis/M
+paralytically
+paralytic/S
+paralyzedly/S
+paralyzed/Y
+paralyzer/M
+paralyze/ZGDRS
+paralyzingly/S
+paralyzing/Y
+paramagnetic
+paramagnet/M
+Paramaribo/M
+paramecia
+paramecium/M
+paramedical/S
+paramedic/MS
+parameterization/SM
+parameterize/BSDG
+parameterized/U
+parameterless
+parameter/SM
+parametric
+parametrically
+parametrization
+parametrize/DS
+paramilitary/S
+paramount/S
+paramour/MS
+para/MS
+Paramus/M
+Paraná
+paranoiac/S
+paranoia/SM
+paranoid/S
+paranormal/SY
+parapet/SMD
+paraphernalia
+paraphrase/GMSRD
+paraphraser/M
+paraplegia/MS
+paraplegic/S
+paraprofessional/SM
+parapsychologist/S
+parapsychology/MS
+paraquat/S
+parasite/SM
+parasitically
+parasitic/S
+parasitism/SM
+parasitologist/M
+parasitology/M
+parasol/SM
+parasympathetic/S
+parathion/SM
+parathyroid/S
+paratrooper/M
+paratroop/RSZ
+paratyphoid/S
+parboil/DSG
+parceled/U
+parceling/M
+parcel/SGMD
+Parcheesi/M
+parch/GSDL
+parchment/SM
+PARC/M
+pardonableness/M
+pardonable/U
+pardonably/U
+pardoner/M
+pardon/ZBGRDS
+paregoric/SM
+parentage/MS
+parental/Y
+parenteral
+parentheses
+parenthesis/M
+parenthesize/GSD
+parenthetic
+parenthetical/Y
+parenthood/MS
+parent/MDGJS
+pare/S
+paresis/M
+pares/S
+Pareto/M
+parfait/SM
+pariah/M
+pariahs
+parietal/S
+parimutuel/S
+paring/M
+parishioner/SM
+parish/MS
+Parisian/SM
+Paris/M
+parity/ESM
+parka/MS
+Parke/M
+Parker/M
+Parkersburg/M
+park/GJZDRMS
+Parkhouse/M
+parking/M
+Parkinson/M
+parkish
+parkland/M
+parklike
+Parkman
+Park/RMS
+parkway/MS
+parlance/SM
+parlay/DGS
+parley/MDSG
+parliamentarian/SM
+parliamentary/U
+parliament/MS
+Parliament/MS
+parlor/SM
+parlous
+Parmesan/S
+parmigiana
+Parnassus/SM
+Parnell/M
+parochialism/SM
+parochiality
+parochial/Y
+parodied/U
+parodist/SM
+parody/SDGM
+parolee/MS
+parole/MSDG
+paroxysmal
+paroxysm/MS
+parquetry/SM
+parquet/SMDG
+parrakeet's
+parred
+parricidal
+parricide/MS
+parring
+Parrish/M
+Parr/M
+Parrnell/M
+parrot/GMDS
+parrotlike
+parry/GSD
+Parry/M
+parse
+parsec/SM
+parsed/U
+Parsee's
+parser/M
+Parsifal/M
+parsimonious/Y
+parsimony/SM
+pars/JDSRGZ
+parsley/MS
+parsnip/MS
+parsonage/MS
+parson/MS
+Parsons/M
+partaken
+partaker/M
+partake/ZGSR
+part/CDGS
+parterre/MS
+parter/S
+parthenogeneses
+parthenogenesis/M
+Parthenon/M
+Parthia/M
+partiality/MS
+partial/SY
+participant/MS
+participate/NGVDSX
+participation/M
+participator/S
+participatory
+participial/Y
+participle/MS
+particleboard/S
+particle/MS
+particolored
+particularistic
+particularity/SM
+particularization/MS
+particularize/GSD
+particular/SY
+particulate/S
+parting/MS
+partisanship/SM
+partisan/SM
+partition/AMRDGS
+partitioned/U
+partitioner/M
+partitive/S
+partizan's
+partly
+partner/DMGS
+partnership/SM
+partook
+partridge/MS
+part's
+parturition/SM
+partway
+party/RSDMG
+parvenu/SM
+par/ZGSJBMDR
+Pasadena/M
+PASCAL
+Pascale/M
+Pascal/M
+pascal/SM
+paschal/S
+pasha/MS
+Paso/M
+Pasquale/M
+pas/S
+passably
+passage/MGSD
+passageway/MS
+Passaic/M
+passband
+passbook/MS
+passel/MS
+passé/M
+passenger/MYS
+passerby
+passer/M
+passersby
+passim
+passing/Y
+passionated
+passionate/EYP
+passionateness/EM
+passionates
+passionating
+passioned
+passionflower/MS
+passioning
+passionless
+passion/SEM
+Passion/SM
+passivated
+passiveness/S
+passive/SYP
+passivity/S
+pass/JGVBZDSR
+passkey/SM
+passmark
+passover
+Passover/MS
+passport/SM
+password/SDM
+pasta/MS
+pasteboard/SM
+pasted/UA
+pastel/MS
+paste/MS
+Pasternak/M
+pastern/SM
+pasteup
+pasteurization/MS
+pasteurized/U
+pasteurizer/M
+pasteurize/RSDGZ
+Pasteur/M
+pastiche/MS
+pastille/SM
+pastime/SM
+pastiness/SM
+pastoralization/M
+pastoral/SPY
+pastorate/MS
+pastor/GSDM
+past/PGMDRS
+pastrami/MS
+pastry/SM
+past's/A
+pasts/A
+pasturage/SM
+pasture/MGSRD
+pasturer/M
+pasty/PTRS
+Patagonia/M
+Patagonian/S
+patch/EGRSD
+patcher/EM
+patchily
+patchiness/S
+patch's
+patchwork/RMSZ
+patchy/PRT
+patellae
+patella/MS
+Patel/M
+Pate/M
+paten/M
+Paten/M
+patentee/SM
+patent/ZGMRDYSB
+paterfamilias/SM
+pater/M
+paternalism/MS
+paternalist
+paternalistic
+paternal/Y
+paternity/SM
+paternoster/SM
+Paterson/M
+pate/SM
+pathetic
+pathetically
+pathfinder/MS
+pathless/P
+path/M
+pathname/SM
+pathogenesis/M
+pathogenic
+pathogen/SM
+pathologic
+pathological/Y
+pathologist/MS
+pathology/SM
+pathos/SM
+paths
+pathway/MS
+Patience/M
+patience/SM
+patient/MRYTS
+patient's/I
+patients/I
+patina/SM
+patine
+Patin/M
+patio/MS
+Pat/MN
+pat/MNDRS
+Patna/M
+patois/M
+Paton/M
+patresfamilias
+patriarchal
+patriarchate/MS
+patriarch/M
+patriarchs
+patriarchy/MS
+Patrica/M
+Patrice/M
+Patricia/M
+patrician/MS
+patricide/MS
+Patricio/M
+Patrick/M
+Patric/M
+patrimonial
+patrimony/SM
+patriotically
+patriotic/U
+patriotism/SM
+patriot/SM
+patristic/S
+Patrizia/M
+Patrizio/M
+Patrizius/M
+patrolled
+patrolling
+patrolman/M
+patrolmen
+patrol/MS
+patrolwoman
+patrolwomen
+patronage/MS
+patroness/S
+patronization
+patronized/U
+patronize/GZRSDJ
+patronizer/M
+patronizes/A
+patronizing's/U
+patronizing/YM
+patronymically
+patronymic/S
+patron/YMS
+patroon/MS
+patsy/SM
+Patsy/SM
+patted
+Patten/M
+patten/MS
+patterer/M
+pattern/GSDM
+patternless
+patter/RDSGJ
+Patterson/M
+Pattie/M
+Patti/M
+patting
+Pattin/M
+Patton/M
+Patty/M
+patty/SM
+paucity/SM
+Paula/M
+Paule/M
+Pauletta/M
+Paulette/M
+Paulie/M
+Pauli/M
+Paulina/M
+Pauline
+Pauling/M
+Paulita/M
+Paul/MG
+Paulo/M
+Paulsen/M
+Paulson/M
+Paulus/M
+Pauly/M
+paunch/GMSD
+paunchiness/M
+paunchy/RTP
+pauperism/SM
+pauperize/SDG
+pauper/SGDM
+pause/DSG
+Pavarotti
+paved/UA
+pave/GDRSJL
+Pavel/M
+pavement/SGDM
+paver/M
+paves/A
+Pavia/M
+pavilion/SMDG
+paving/A
+paving's
+Pavla/M
+Pavlova/MS
+Pavlovian
+Pavlov/M
+pawl/SM
+paw/MDSG
+pawnbroker/SM
+pawnbroking/S
+Pawnee/SM
+pawner/M
+pawn/GSDRM
+pawnshop/MS
+pawpaw's
+Pawtucket/M
+paxes
+Paxon/M
+Paxton/M
+payable/S
+pay/AGSLB
+payback/S
+paycheck/SM
+payday/MS
+payed
+payee/SM
+payer/SM
+payload/SM
+paymaster/SM
+payment/ASM
+Payne/SM
+payoff/MS
+payola/MS
+payout/S
+payroll/MS
+payslip/S
+Payson/M
+Payton/M
+Paz/M
+Pb/M
+PBS
+PBX
+PCB
+PC/M
+PCP
+PCs
+pct
+pd
+PD
+Pd/M
+PDP
+PDQ
+PDT
+PE
+Peabody/M
+peaceableness/M
+peaceable/P
+peaceably
+peacefuller
+peacefullest
+peacefulness/S
+peaceful/PY
+peace/GMDS
+peacekeeping/S
+Peace/M
+peacemaker/MS
+peacemaking/MS
+peacetime/MS
+peach/GSDM
+Peachtree/M
+peachy/RT
+peacock/SGMD
+Peadar/M
+peafowl/SM
+peahen/MS
+peaked/P
+peakiness/M
+peak/SGDM
+peaky/P
+pealed/A
+Peale/M
+peal/MDSG
+peals/A
+pea/MS
+peanut/SM
+Pearce/M
+Pearla/M
+Pearle/M
+pearler/M
+Pearlie/M
+Pearline/M
+Pearl/M
+pearl/SGRDM
+pearly/TRS
+Pearson/M
+pear/SYM
+peartrees
+Peary/M
+peasanthood
+peasantry/SM
+peasant/SM
+peashooter/MS
+peats/A
+peat/SM
+peaty/TR
+pebble/MGSD
+pebbling/M
+pebbly/TR
+Pebrook/M
+pecan/SM
+peccadilloes
+peccadillo/M
+peccary/MS
+Pechora/M
+pecker/M
+peck/GZSDRM
+Peckinpah/M
+Peck/M
+Pecos/M
+pectic
+pectin/SM
+pectoral/S
+peculate/NGDSX
+peculator/S
+peculiarity/MS
+peculiar/SY
+pecuniary
+pedagogical/Y
+pedagogic/S
+pedagogics/M
+pedagogue/SDGM
+pedagogy/MS
+pedal/SGRDM
+pedantic
+pedantically
+pedantry/MS
+pedant/SM
+peddler/M
+peddle/ZGRSD
+pederast/SM
+pederasty/SM
+Peder/M
+pedestal/GDMS
+pedestrianization
+pedestrianize/GSD
+pedestrian/MS
+pediatrician/SM
+pediatric/S
+pedicab/SM
+pedicure/DSMG
+pedicurist/SM
+pedigree/DSM
+pediment/DMS
+pedlar's
+pedometer/MS
+pedophile/S
+pedophilia
+Pedro/M
+peduncle/MS
+peeing
+peekaboo/SM
+peek/GSD
+peeler/M
+peeling/M
+Peel/M
+peel/SJGZDR
+peen/GSDM
+peeper/M
+peephole/SM
+peep/SGZDR
+peepshow/MS
+peepy
+peerage/MS
+peer/DMG
+peeress/MS
+peerlessness/M
+peerless/PY
+peeve/GZMDS
+peevers/M
+peevishness/SM
+peevish/YP
+peewee/S
+pee/ZDRS
+Pegasus/MS
+pegboard/SM
+Pegeen/M
+pegged
+Peggie/M
+Peggi/M
+pegging
+Peggy/M
+Peg/M
+peg/MS
+peignoir/SM
+Pei/M
+Peiping/M
+Peirce/M
+pejoration/SM
+pejorative/SY
+peke/MS
+Pekinese's
+pekingese
+Pekingese/SM
+Peking/SM
+pekoe/SM
+pelagic
+Pelee/M
+Pele/M
+pelf/SM
+Pelham/M
+pelican/SM
+pellagra/SM
+pellet/SGMD
+pellucid
+Peloponnese/M
+pelter/M
+pelt/GSDR
+pelvic/S
+pelvis/SM
+Pembroke/M
+pemmican/SM
+penalization/SM
+penalized/U
+penalize/SDG
+penalty/MS
+penal/Y
+Pena/M
+penance/SDMG
+pence/M
+penchant/MS
+pencil/SGJMD
+pendant/SM
+pend/DCGS
+pendent/CS
+Penderecki/M
+Pendleton/M
+pendulous
+pendulum/MS
+Penelopa/M
+Penelope/M
+penetrability/SM
+penetrable
+penetrate/SDVGNX
+penetrating/Y
+penetration/M
+penetrativeness/M
+penetrative/PY
+penetrator/MS
+penguin/MS
+penicillin/SM
+penile
+peninsular
+peninsula/SM
+penis/MS
+penitence/MS
+penitential/YS
+penitentiary/MS
+penitent/SY
+penknife/M
+penknives
+penlight/MS
+pen/M
+Pen/M
+penman/M
+penmanship/MS
+penmen
+Penna
+pennant/SM
+penned
+Penney/M
+Pennie/M
+penniless
+Penni/M
+penning
+Pennington/M
+pennis
+Penn/M
+pennon/SM
+Pennsylvania/M
+Pennsylvanian/S
+Penny/M
+penny/SM
+pennyweight/SM
+pennyworth/M
+penologist/MS
+penology/MS
+Penrod/M
+Pensacola/M
+pensioner/M
+pension/ZGMRDBS
+pensiveness/S
+pensive/PY
+pens/V
+pentacle/MS
+pentagonal/SY
+Pentagon/M
+pentagon/SM
+pentagram/MS
+pentameter/SM
+pent/AS
+Pentateuch/M
+pentathlete/S
+pentathlon/MS
+pentatonic
+pentecostal
+Pentecostalism/S
+Pentecostal/S
+Pentecost/SM
+penthouse/SDGM
+Pentium/M
+penuche/SM
+penultimate/SY
+penumbrae
+penumbra/MS
+penuriousness/MS
+penurious/YP
+penury/SM
+peonage/MS
+peon/MS
+peony/SM
+people/SDMG
+Peoria/M
+Pepe/M
+Pepillo/M
+Pepi/M
+Pepin/M
+Pepita/M
+Pepito/M
+pepped
+peppercorn/MS
+pepperer/M
+peppergrass/M
+peppermint/MS
+pepperoni/S
+pepper/SGRDM
+peppery
+peppiness/SM
+pepping
+peppy/PRT
+Pepsico/M
+PepsiCo/M
+Pepsi/M
+pepsin/SM
+pep/SM
+peptic/S
+peptidase/SM
+peptide/SM
+peptizing
+Pepys/M
+Pequot/M
+peradventure/S
+perambulate/DSNGX
+perambulation/M
+perambulator/MS
+percale/MS
+perceivably
+perceive/DRSZGB
+perceived/U
+perceiver/M
+percentage/MS
+percentile/SM
+percent/MS
+perceptible
+perceptibly
+perceptional
+perception/MS
+perceptiveness/MS
+perceptive/YP
+perceptual/Y
+percept/VMS
+Perceval/M
+perchance
+perch/GSDM
+perchlorate/M
+perchlorination
+percipience/MS
+percipient/S
+Percival/M
+percolate/NGSDX
+percolation/M
+percolator/MS
+percuss/DSGV
+percussionist/MS
+percussion/SAM
+percussiveness/M
+percussive/PY
+percutaneous/Y
+Percy/M
+perdition/MS
+perdurable
+peregrinate/XSDNG
+peregrination/M
+peregrine/S
+Perelman/M
+peremptorily
+peremptory/P
+perennial/SY
+pères
+perestroika/S
+Perez/M
+perfecta/S
+perfect/DRYSTGVP
+perfecter/M
+perfectibility/MS
+perfectible
+perfectionism/MS
+perfectionist/MS
+perfection/MS
+perfectiveness/M
+perfective/PY
+perfectness/MS
+perfidiousness/M
+perfidious/YP
+perfidy/MS
+perforated/U
+perforate/XSDGN
+perforation/M
+perforce
+performance/MS
+performed/U
+performer/M
+perform/SDRZGB
+perfumer/M
+perfumery/SM
+perfume/ZMGSRD
+perfunctorily
+perfunctoriness/M
+perfunctory/P
+perfused
+perfusion/M
+Pergamon/M
+pergola/SM
+perhaps/S
+Peria/M
+pericardia
+pericardium/M
+Perice/M
+Periclean
+Pericles/M
+perigee/SM
+perihelia
+perihelion/M
+peril/GSDM
+Perilla/M
+perilousness/M
+perilous/PY
+Peri/M
+perimeter/MS
+perinatal
+perinea
+perineum/M
+periodic
+periodical/YMS
+periodicity/MS
+period/MS
+periodontal/Y
+periodontics/M
+periodontist/S
+peripatetic/S
+peripheral/SY
+periphery/SM
+periphrases
+periphrasis/M
+periphrastic
+periscope/SDMG
+perishable/SM
+perish/BZGSRD
+perishing/Y
+peristalses
+peristalsis/M
+peristaltic
+peristyle/MS
+peritoneal
+peritoneum/SM
+peritonitis/MS
+periwigged
+periwigging
+periwig/MS
+periwinkle/SM
+perjurer/M
+perjure/SRDZG
+perjury/MS
+per/K
+perk/GDS
+perkily
+perkiness/S
+Perkin/SM
+perky/TRP
+Perla/M
+Perle/M
+Perl/M
+permafrost/MS
+permalloy/M
+Permalloy/M
+permanence/SM
+permanency/MS
+permanentness/M
+permanent/YSP
+permeability/SM
+permeableness/M
+permeable/P
+permeate/NGVDSX
+Permian
+permissibility/M
+permissibleness/M
+permissible/P
+permissibly
+permission/SM
+permissiveness/MS
+permissive/YP
+permit/SM
+permitted
+permitting
+Perm/M
+perm/MDGS
+permutation/MS
+permute/SDG
+Pernell/M
+perniciousness/MS
+pernicious/PY
+Pernod/M
+Peron/M
+peroration/SM
+Perot/M
+peroxidase/M
+peroxide/MGDS
+perpend/DG
+perpendicularity/SM
+perpendicular/SY
+perpetrate/NGXSD
+perpetration/M
+perpetrator/SM
+perpetual/SY
+perpetuate/NGSDX
+perpetuation/M
+perpetuity/MS
+perplex/DSG
+perplexed/Y
+perplexity/MS
+perquisite/SM
+Perren/M
+Perri/M
+Perrine/M
+Perry/MR
+persecute/XVNGSD
+persecution/M
+persecutor/MS
+persecutory
+Perseid/M
+Persephone/M
+Perseus/M
+perseverance/MS
+persevere/GSD
+persevering/Y
+Pershing/M
+Persia/M
+Persian/S
+persiflage/MS
+persimmon/SM
+Persis/M
+persist/DRSG
+persistence/SM
+persistent/Y
+persnickety
+personableness/M
+personable/P
+personae
+personage/SM
+personality/SM
+personalization/CMS
+personalize/CSDG
+personalized/U
+personalty/MS
+personal/YS
+persona/M
+person/BMS
+personification/M
+personifier/M
+personify/XNGDRS
+personnel/SM
+person's/U
+persons/U
+perspective/YMS
+perspex
+perspicaciousness/M
+perspicacious/PY
+perspicacity/S
+perspicuity/SM
+perspicuousness/M
+perspicuous/YP
+perspiration/MS
+perspire/DSG
+persuaded/U
+persuader/M
+persuade/ZGDRSB
+persuasion/SM
+persuasively
+persuasiveness/MS
+persuasive/U
+pertain/GSD
+Perth/M
+pertinaciousness/M
+pertinacious/YP
+pertinacity/MS
+pertinence/S
+pertinent/YS
+pertness/MS
+perturbation/MS
+perturbed/U
+perturb/GDS
+pertussis/SM
+pert/YRTSP
+peruke/SM
+Peru/M
+perusal/SM
+peruser/M
+peruse/RSDZG
+Peruvian/S
+pervade/SDG
+pervasion/M
+pervasiveness/MS
+pervasive/PY
+perverseness/SM
+perverse/PXYNV
+perversion/M
+perversity/MS
+pervert/DRSG
+perverted/YP
+perverter/M
+perviousness
+peseta/SM
+Peshawar/M
+peskily
+peskiness/S
+pesky/RTP
+peso/MS
+pessimal/Y
+pessimism/SM
+pessimistic
+pessimistically
+pessimist/SM
+pester/DG
+pesticide/MS
+pestiferous
+pestilence/SM
+pestilential/Y
+pestilent/Y
+pestle/SDMG
+pesto/S
+pest/RZSM
+PET
+Pétain/M
+petal/SDM
+Peta/M
+petard/MS
+petcock/SM
+Pete/M
+peter/GD
+Peter/M
+Petersburg/M
+Petersen/M
+Peters/N
+Peterson/M
+Peterus/M
+Petey/M
+pethidine/M
+petiole/SM
+petiteness/M
+petite/XNPS
+petitioner/M
+petition/GZMRD
+petition's/A
+petitions/A
+petits
+Petkiewicz/M
+Pet/MRZ
+Petra/M
+Petrarch/M
+petrel/SM
+petri
+petrifaction/SM
+petrify/NDSG
+Petrina/M
+Petr/M
+petrochemical/SM
+petrodollar/MS
+petroglyph/M
+petrolatum/MS
+petroleum/MS
+petrolled
+petrolling
+petrol/MS
+petrologist/MS
+petrology/MS
+Petronella/M
+Petronia/M
+Petronilla/M
+Petronille/M
+pet/SMRZ
+petted
+petter/MS
+Pettibone/M
+petticoat/SMD
+pettifogged
+pettifogger/SM
+pettifogging
+pettifog/S
+pettily
+pettiness/S
+petting
+pettis
+pettishness/M
+pettish/YP
+Petty/M
+petty/PRST
+petulance/MS
+petulant/Y
+Petunia/M
+petunia/SM
+Peugeot/M
+Pewaukee/M
+pewee/MS
+pewit/MS
+pew/SM
+pewter/SRM
+peyote/SM
+Peyter/M
+Peyton/M
+pf
+Pfc
+PFC
+pfennig/SM
+Pfizer/M
+pg
+PG
+Phaedra/M
+Phaethon/M
+phaeton/MS
+phage/M
+phagocyte/SM
+Phaidra/M
+phalanger/MS
+phalanges
+phalanx/SM
+phalli
+phallic
+phallus/M
+Phanerozoic
+phantasmagoria/SM
+phantasmal
+phantasm/SM
+phantasy's
+phantom/MS
+pharaoh
+Pharaoh/M
+pharaohs
+Pharaohs
+pharisaic
+Pharisaic
+Pharisaical
+pharisee/S
+Pharisee/SM
+pharmaceutical/SY
+pharmaceutic/S
+pharmaceutics/M
+pharmacist/SM
+pharmacological/Y
+pharmacologist/SM
+pharmacology/SM
+pharmacopoeia/SM
+pharmacy/SM
+pharyngeal/S
+pharynges
+pharyngitides
+pharyngitis/M
+pharynx/M
+phase/DSRGZM
+phaseout/S
+PhD
+pheasant/SM
+Phebe/M
+Phedra/M
+Phekda/M
+Phelia/M
+Phelps/M
+phenacetin/MS
+phenobarbital/SM
+phenolic
+phenol/MS
+phenolphthalein/M
+phenomenal/Y
+phenomena/SM
+phenomenological/Y
+phenomenology/MS
+phenomenon/SM
+phenotype/MS
+phenylalanine/M
+phenyl/M
+pheromone/MS
+phew/S
+phialled
+phialling
+phial/MS
+Phidias/M
+Philadelphia/M
+philanderer/M
+philander/SRDGZ
+philanthropic
+philanthropically
+philanthropist/MS
+philanthropy/SM
+philatelic
+philatelist/MS
+philately/SM
+Philbert/M
+Philco/M
+philharmonic/S
+Philipa/M
+Philip/M
+Philippa/M
+Philippe/M
+Philippians/M
+philippic/SM
+Philippine/SM
+Philis/M
+philistine/S
+Philistine/SM
+philistinism/S
+Phillida/M
+Phillie/M
+Phillipa/M
+Phillipe/M
+Phillip/MS
+Phillipp/M
+Phillis/M
+Philly/SM
+Phil/MY
+philodendron/MS
+philological/Y
+philologist/MS
+philology/MS
+Philomena/M
+philosopher/MS
+philosophic
+philosophical/Y
+philosophized/U
+philosophizer/M
+philosophizes/U
+philosophize/ZDRSG
+philosophy/MS
+philter/SGDM
+philtre/DSMG
+Phineas/M
+Phip/M
+Phipps/M
+phi/SM
+phlebitides
+phlebitis/M
+phlegmatic
+phlegmatically
+phlegm/SM
+phloem/MS
+phlox/M
+pH/M
+Ph/M
+phobia/SM
+phobic/S
+Phobos/M
+Phoebe/M
+phoebe/SM
+Phoenicia/M
+Phoenician/SM
+Phoenix/M
+phoenix/MS
+phone/DSGM
+phoneme/SM
+phonemically
+phonemic/S
+phonemics/M
+phonetically
+phonetician/SM
+phonetic/S
+phonetics/M
+phonically
+phonic/S
+phonics/M
+phoniness/MS
+phonographer/M
+phonographic
+phonograph/RM
+phonographs
+phonologic
+phonological/Y
+phonologist/MS
+phonology/MS
+phonon/M
+phony/PTRSDG
+phooey/S
+phosphatase/M
+phosphate/MS
+phosphide/M
+phosphine/MS
+phosphoresce
+phosphorescence/SM
+phosphorescent/Y
+phosphoric
+phosphor/MS
+phosphorous
+phosphorus/SM
+photocell/MS
+photochemical/Y
+photochemistry/M
+photocopier/M
+photocopy/MRSDZG
+photoelectric
+photoelectrically
+photoelectronic
+photoelectrons
+photoengraver/M
+photoengrave/RSDJZG
+photoengraving/M
+photofinishing/MS
+photogenic
+photogenically
+photograph/AGD
+photographer/SM
+photographic
+photographically
+photograph's
+photographs/A
+photography/MS
+photojournalism/SM
+photojournalist/SM
+photoluminescence/M
+photolysis/M
+photolytic
+photometer/SM
+photometric
+photometrically
+photometry/M
+photomicrograph/M
+photomicrography/M
+photomultiplier/M
+photon/MS
+photorealism
+photosensitive
+photo/SGMD
+photosphere/M
+photostatic
+Photostat/MS
+Photostatted
+Photostatting
+photosyntheses
+photosynthesis/M
+photosynthesize/DSG
+photosynthetic
+phototypesetter
+phototypesetting/M
+phrasal
+phrase/AGDS
+phrasebook
+phrasemaking
+phraseology/MS
+phrase's
+phrasing/SM
+phrenological/Y
+phrenologist/MS
+phrenology/MS
+phylactery/MS
+phylae
+phyla/M
+Phylis/M
+Phyllida/M
+Phyllis/M
+Phyllys/M
+phylogeny/MS
+phylum/M
+Phylys/M
+phys
+physicality/M
+physical/PYS
+physician/SM
+physicist/MS
+physicked
+physicking
+physic/SM
+physiochemical
+physiognomy/SM
+physiography/MS
+physiologic
+physiological/Y
+physiologist/SM
+physiology/MS
+physiotherapist/MS
+physiotherapy/SM
+physique/MSD
+phytoplankton/M
+Piaf/M
+Piaget/M
+Pia/M
+pianism/M
+pianissimo/S
+pianistic
+pianist/SM
+pianoforte/MS
+pianola
+Pianola/M
+piano/SM
+piaster/MS
+piazza/SM
+pibroch/M
+pibrochs
+picador/MS
+picaresque/S
+pica/SM
+Picasso/M
+picayune/S
+Piccadilly/M
+piccalilli/MS
+piccolo/MS
+pickaback's
+pickaxe's
+pickax/GMSD
+pickerel/MS
+Pickering/M
+picker/MG
+picketer/M
+picket/MSRDZG
+Pickett/M
+Pickford/M
+pick/GZSJDR
+pickle/SDMG
+Pickman/M
+pickoff/S
+pickpocket/GSM
+pickup/SM
+Pickwick/M
+picky/RT
+picnicked
+picnicker/MS
+picnicking
+picnic/SM
+picofarad/MS
+picojoule
+picoseconds
+picot/DMGS
+Pict/M
+pictograph/M
+pictographs
+pictorialness/M
+pictorial/PYS
+picture/MGSD
+picturesqueness/SM
+picturesque/PY
+piddle/GSD
+piddly
+pidgin/SM
+piebald/S
+piece/GMDSR
+piecemeal
+piecer/M
+piecewise
+pieceworker/M
+piecework/ZSMR
+piedmont
+Piedmont/M
+pieing
+pie/MS
+Pierce/M
+piercer/M
+pierce/RSDZGJ
+piercing/Y
+Pierette/M
+pier/M
+Pier/M
+Pierre/M
+Pierrette/M
+Pierrot/M
+Pierson/M
+Pieter/M
+Pietra/M
+Pietrek/M
+Pietro/M
+piety/SM
+piezoelectric
+piezoelectricity/M
+piffle/MGSD
+pigeon/DMGS
+pigeonhole/SDGM
+pigged
+piggery/M
+pigging
+piggishness/SM
+piggish/YP
+piggyback/MSDG
+Piggy/M
+piggy/RSMT
+pigheadedness/S
+pigheaded/YP
+piglet/MS
+pigmentation/MS
+pigment/MDSG
+pig/MLS
+Pigmy's
+pigpen/SM
+pigroot
+pigskin/MS
+pigsty/SM
+pigswill/M
+pigtail/SMD
+Pike/M
+pike/MZGDRS
+piker/M
+pikestaff/MS
+pilaf/MS
+pilaster/SM
+Pilate/M
+pilau's
+pilchard/SM
+Pilcomayo/M
+pile/JDSMZG
+pileup/MS
+pilferage/SM
+pilferer/M
+pilfer/ZGSRD
+Pilgrim
+pilgrimage/DSGM
+pilgrim/MS
+piling/M
+pillage/RSDZG
+pillar/DMSG
+pillbox/MS
+pill/GSMD
+pillion/DMGS
+pillory/MSDG
+pillowcase/SM
+pillow/GDMS
+pillowslip/S
+Pillsbury/M
+pilot/DMGS
+pilothouse/SM
+piloting/M
+pimento/MS
+pimiento/SM
+pimpernel/SM
+pimp/GSMYD
+pimple/SDM
+pimplike
+pimply/TRM
+PIN
+pinafore/MS
+piñata/S
+Pinatubo/M
+pinball/MS
+Pincas/M
+pincer/GSD
+Pinchas/M
+pincher/M
+pinch/GRSD
+pincushion/SM
+Pincus/M
+Pindar/M
+pineapple/MS
+pined/A
+Pinehurst/M
+pine/MNGXDS
+pines/A
+pinfeather/SM
+ping/GDRM
+pinheaded/P
+pinhead/SMD
+pinhole/SM
+pining/A
+pinion/DMG
+Pinkerton/M
+pinkeye/MS
+pink/GTYDRMPS
+pinkie/SM
+pinkish/P
+pinkness/S
+pinko/MS
+pinky's
+pinnacle/MGSD
+pinnate
+pinned/U
+pinning/S
+Pinocchio/M
+Pinochet/M
+pinochle/SM
+piñon/S
+pinpoint/SDG
+pinprick/MDSG
+pin's
+pinsetter/SM
+Pinsky/M
+pinstripe/SDM
+pintail/SM
+Pinter/M
+pint/MRS
+pinto/S
+pinup/MS
+pin/US
+pinwheel/DMGS
+pinyin
+Pinyin
+piny/RT
+pioneer/SDMG
+pion/M
+Piotr/M
+piousness/MS
+pious/YP
+pipeline/DSMG
+pipe/MS
+piper/M
+Piper/M
+Pipestone/M
+pipet's
+pipette/MGSD
+pipework
+piping/YM
+pipit/MS
+pip/JSZMGDR
+Pip/MR
+Pippa/M
+pipped
+pipping
+pippin/SM
+Pippo/M
+Pippy/M
+pipsqueak/SM
+piquancy/MS
+piquantness/M
+piquant/PY
+pique/GMDS
+piracy/MS
+Piraeus/M
+Pirandello/M
+piranha/SM
+pirate/MGSD
+piratical/Y
+pirogi
+pirogies
+pirouette/MGSD
+pis
+Pisa/M
+piscatorial
+Pisces/M
+Pisistratus/M
+pismire/SM
+Pissaro/M
+piss/DSRG!
+pistachio/MS
+piste/SM
+pistillate
+pistil/MS
+pistoleers
+pistole/M
+pistol/SMGD
+piston/SM
+pitapat/S
+pitapatted
+pitapatting
+pita/SM
+Pitcairn/M
+pitchblende/SM
+pitcher/M
+pitchfork/GDMS
+pitching/M
+pitchman/M
+pitchmen
+pitch/RSDZG
+pitchstone/M
+piteousness/SM
+piteous/YP
+pitfall/SM
+pithily
+pithiness/SM
+pith/MGDS
+piths
+pithy/RTP
+pitiableness/M
+pitiable/P
+pitiably
+pitier/M
+pitifuller
+pitifullest
+pitifulness/M
+pitiful/PY
+pitilessness/SM
+pitiless/PY
+pitman/M
+pit/MS
+Pitney/M
+piton/SM
+pittance/SM
+pitted
+pitting
+Pittman/M
+Pittsburgh/ZM
+Pittsfield/M
+Pitt/SM
+Pittston/M
+pituitary/SM
+pitying/Y
+pity/ZDSRMG
+Pius/M
+pivotal/Y
+pivot/DMSG
+pivoting/M
+pix/DSG
+pixel/SM
+pixie/MS
+pixiness
+pixmap/SM
+Pizarro/M
+pizazz/S
+pi/ZGDRH
+pizza/SM
+pizzeria/SM
+pizzicati
+pizzicato
+pj's
+PJ's
+pk
+pkg
+pkt
+pkwy
+Pkwy
+pl
+placard/DSMG
+placate/NGVXDRS
+placatory
+placeable/A
+placebo/SM
+placed/EAU
+place/DSRJLGZM
+placeholder/S
+placekick/DGS
+placeless/Y
+placement/AMES
+placental/S
+placenta/SM
+placer/EM
+places/EA
+placidity/SM
+placidness/M
+placid/PY
+placing/AE
+placket/SM
+plagiarism/MS
+plagiarist/MS
+plagiarize/GZDSR
+plagiary/SM
+plagued/U
+plague/MGRSD
+plaguer/M
+plaice/M
+plaid/DMSG
+plainclothes
+plainclothesman
+plainclothesmen
+Plainfield/M
+plainness/MS
+plainsman/M
+plainsmen
+plainsong/SM
+plainspoken
+plain/SPTGRDY
+plaintiff/MS
+plaintiveness/M
+plaintive/YP
+plaint/VMS
+Plainview/M
+plaiting/M
+plait/SRDMG
+planar
+planarity
+Planck/M
+plan/DRMSGZ
+planeload
+planer/M
+plane's
+plane/SCGD
+planetarium/MS
+planetary
+planetesimal/M
+planet/MS
+planetoid/SM
+plangency/S
+plangent
+planking/M
+plank/SJMDG
+plankton/MS
+planned/U
+planner/SM
+planning
+Plano
+planoconcave
+planoconvex
+Plantagenet/M
+plantain/MS
+plantar
+plantation/MS
+planter/MS
+planting/S
+plantlike
+plant's
+plant/SADG
+plaque/MS
+plash/GSDM
+plasma/MS
+plasmid/S
+plasm/M
+plasterboard/MS
+plasterer/M
+plastering/M
+plaster/MDRSZG
+plasterwork/M
+plastically
+plasticine
+Plasticine/M
+plasticity/SM
+plasticize/GDS
+plastic/MYS
+plateau/GDMS
+plateful/S
+platelet/SM
+platen/M
+plater/M
+plate/SM
+platform/SGDM
+Plath/M
+plating/M
+platinize/GSD
+platinum/MS
+platitude/SM
+platitudinous/Y
+plat/JDNRSGXZ
+Plato/M
+platonic
+Platonic
+Platonism/M
+Platonist
+platoon/MDSG
+platted
+Platte/M
+platter/MS
+Platteville/M
+platting
+platypus/MS
+platys
+platy/TR
+plaudit/MS
+plausibility/S
+plausible/P
+plausibly
+Plautus/M
+playability/U
+playable/U
+playacting/M
+playact/SJDG
+playback/MS
+playbill/SM
+Playboy/M
+playboy/SM
+play/DRSEBG
+played/A
+player's/E
+player/SM
+playfellow/S
+playfulness/MS
+playful/PY
+playgirl/SM
+playgoer/MS
+playground/MS
+playgroup/S
+playhouse/SM
+playing/S
+playmate/MS
+playoff/S
+playpen/SM
+playroom/SM
+plays/A
+Playtex/M
+plaything/MS
+playtime/SM
+playwright/SM
+playwriting/M
+plaza/SM
+pleader/MA
+pleading/MY
+plead/ZGJRDS
+pleasanter
+pleasantest
+pleasantness/SMU
+pleasantry/MS
+pleasant/UYP
+pleased/EU
+pleaser/M
+pleases/E
+please/Y
+pleasingness/M
+pleasing/YP
+plea/SM
+pleas/RSDJG
+pleasurableness/M
+pleasurable/P
+pleasurably
+pleasureful
+pleasure/MGBDS
+pleasure's/E
+pleasures/E
+pleater/M
+pleat/RDMGS
+plebeian/SY
+plebe/MS
+plebiscite/SM
+plectra
+plectrum/SM
+pledger/M
+pledge/RSDMG
+Pleiads
+Pleistocene
+plenary/S
+plenipotentiary/S
+plenitude/MS
+plenteousness/M
+plenteous/PY
+plentifulness/M
+plentiful/YP
+plenty/SM
+plenum/M
+pleonasm/MS
+plethora/SM
+pleurae
+pleural
+pleura/M
+pleurisy/SM
+Plexiglas/MS
+plexus/SM
+pliability/MS
+pliableness/M
+pliable/P
+pliancy/MS
+pliantness/M
+pliant/YP
+plication/MA
+plier/MA
+plight/GMDRS
+plimsolls
+plinker/M
+plink/GRDS
+plinth/M
+plinths
+Pliny/M
+Pliocene/S
+PLO
+plodded
+plodder/SM
+plodding/SY
+plod/S
+plopped
+plopping
+plop/SM
+plosive
+plot/SM
+plotted/A
+plotter/MDSG
+plotting
+plover/MS
+plowed/U
+plower/M
+plowman/M
+plowmen
+plow/SGZDRM
+plowshare/MS
+ploy's
+ploy/SCDG
+plucker/M
+pluckily
+pluckiness/SM
+pluck/SGRD
+plucky/TPR
+pluggable
+plugged/UA
+plugging/AU
+plughole
+plug's
+plug/US
+plumage/DSM
+plumbago/M
+plumbed/U
+plumber/M
+plumbing/M
+plumb/JSZGMRD
+plume/SM
+plummer
+plummest
+plummet/DSG
+plummy
+plumper/M
+plumpness/S
+plump/RDNYSTGP
+plum/SMDG
+plumy/TR
+plunder/GDRSZ
+plunger/M
+plunge/RSDZG
+plunker/M
+plunk/ZGSRD
+pluperfect/S
+pluralism/MS
+pluralistic
+pluralist/S
+plurality/SM
+pluralization/MS
+pluralize/GZRSD
+pluralizer/M
+plural/SY
+plushness/MS
+plush/RSYMTP
+plushy/RPT
+plus/S
+plussed
+plussing
+Plutarch/M
+plutocracy/MS
+plutocratic
+plutocrat/SM
+Pluto/M
+plutonium/SM
+pluvial/S
+ply/AZNGRSD
+Plymouth/M
+plywood/MS
+pm
+PM
+Pm/M
+PMS
+pneumatically
+pneumatic/S
+pneumatics/M
+pneumonia/MS
+PO
+poacher/M
+poach/ZGSRD
+Pocahontas/M
+pocketbook/SM
+pocketful/SM
+pocketing/M
+pocketknife/M
+pocketknives
+pocket/MSRDG
+pock/GDMS
+pockmark/MDSG
+Pocono/MS
+podded
+podding
+podge/ZR
+Podgorica/M
+podiatrist/MS
+podiatry/MS
+podium/MS
+pod/SM
+Podunk/M
+Poe/M
+poem/MS
+poesy/GSDM
+poetaster/MS
+poetess/MS
+poetically
+poeticalness
+poetical/U
+poetic/S
+poetics/M
+poet/MS
+poetry/SM
+pogo
+Pogo/M
+pogrom/GMDS
+poignancy/MS
+poignant/Y
+Poincaré/M
+poinciana/SM
+Poindexter/M
+poinsettia/SM
+pointblank
+pointedness/M
+pointed/PY
+pointer/M
+pointillism/SM
+pointillist/SM
+pointing/M
+pointlessness/SM
+pointless/YP
+point/RDMZGS
+pointy/TR
+poise/M
+pois/GDS
+poi/SM
+poisoner/M
+poisoning/M
+poisonous/PY
+poison/RDMZGSJ
+Poisson/M
+poke/DRSZG
+Pokemon/M
+pokerface/D
+poker/M
+poky/SRT
+Poland/M
+Polanski/M
+polarimeter/SM
+polarimetry
+polariscope/M
+Polaris/M
+polarity/MS
+polarization/CMS
+polarized/UC
+polarize/RSDZG
+polarizes/C
+polarizing/C
+polarogram/SM
+polarograph
+polarography/M
+Polaroid/SM
+polar/S
+polecat/SM
+polemical/Y
+polemicist/S
+polemic/S
+polemics/M
+pole/MS
+Pole/MS
+poler/M
+polestar/S
+poleward/S
+pol/GMDRS
+policeman/M
+policemen/M
+police/MSDG
+policewoman/M
+policewomen
+policyholder/MS
+policymaker/S
+policymaking
+policy/SM
+poliomyelitides
+poliomyelitis/M
+polio/SM
+Polish
+polished/U
+polisher/M
+polish/RSDZGJ
+polis/M
+Politburo/M
+politburo/S
+politeness/MS
+polite/PRTY
+politesse/SM
+politically
+political/U
+politician/MS
+politicization/S
+politicize/CSDG
+politicked
+politicking/SM
+politico/SM
+politic/S
+politics/M
+polity/MS
+polka/SDMG
+Polk/M
+pollack/SM
+Pollard/M
+polled/U
+pollen/GDM
+pollinate/XSDGN
+pollination/M
+pollinator/MS
+polliwog/SM
+poll/MDNRSGX
+pollock's
+Pollock/SM
+pollster/MS
+pollutant/MS
+polluted/U
+polluter/M
+pollute/RSDXZVNG
+pollution/M
+Pollux/M
+Pollyanna/M
+Polly/M
+pollywog's
+Pol/MY
+Polo/M
+polo/MS
+polonaise/MS
+polonium/MS
+poltergeist/SM
+poltroon/MS
+polyandrous
+polyandry/MS
+polyatomic
+polybutene/MS
+polycarbonate
+polychemicals
+polychrome
+polyclinic/MS
+polycrystalline
+polyelectrolytes
+polyester/SM
+polyether/S
+polyethylene/SM
+polygamist/MS
+polygamous/Y
+polygamy/MS
+polyglot/S
+polygonal/Y
+polygon/MS
+polygraph/MDG
+polygraphs
+polygynous
+polyhedral
+polyhedron/MS
+Polyhymnia/M
+polyisobutylene
+polyisocyanates
+polymath/M
+polymaths
+polymerase/S
+polymeric
+polymerization/SM
+polymerize/SDG
+polymer/MS
+polymorphic
+polymorphism/MS
+polymorph/M
+polymyositis
+Polynesia/M
+Polynesian/S
+polynomial/YMS
+Polyphemus/M
+polyphonic
+polyphony/MS
+polyphosphate/S
+polyp/MS
+polypropylene/MS
+polystyrene/SM
+polysyllabic
+polysyllable/SM
+polytechnic/MS
+polytheism/SM
+polytheistic
+polytheist/SM
+polythene/M
+polytonal/Y
+polytopes
+polyunsaturated
+polyurethane/SM
+polyvinyl/MS
+Po/M
+pomade/MGSD
+pomander/MS
+pomegranate/SM
+Pomerania/M
+Pomeranian
+pommel/GSMD
+Pomona/M
+Pompadour/M
+pompadour/MDS
+pompano/SM
+Pompeian/S
+Pompeii/M
+Pompey/M
+pompom/SM
+pompon's
+pomposity/MS
+pompousness/S
+pompous/YP
+pomp/SM
+ponce/M
+Ponce/M
+Ponchartrain/M
+poncho/MS
+ponderer/M
+ponderousness/MS
+ponderous/PY
+ponder/ZGRD
+pond/SMDRGZ
+pone/SM
+pongee/MS
+poniard/GSDM
+pons/M
+Pontchartrain/M
+Pontiac/M
+Pontianak/M
+pontiff/MS
+pontifical/YS
+pontificate/XGNDS
+pontoon/SMDG
+pony/DSMG
+ponytail/SM
+pooch/GSDM
+poodle/MS
+poof/MS
+pooh/DG
+Pooh/M
+poohs
+Poole/M
+pool/MDSG
+poolroom/MS
+poolside
+Poona/M
+poop/MDSG
+poorboy
+poorhouse/MS
+poorness/MS
+poor/TYRP
+popcorn/MS
+Popek/MS
+pope/SM
+Pope/SM
+Popeye/M
+popgun/SM
+popinjay/MS
+poplar/SM
+poplin/MS
+Popocatepetl/M
+popover/SM
+poppa/MS
+popped
+Popper/M
+popper/SM
+poppet/M
+popping
+Poppins/M
+poppycock/MS
+Poppy/M
+poppy/SDM
+poppyseed
+Popsicle/MS
+pop/SM
+populace/MS
+popularism
+popularity/UMS
+popularization/SM
+popularize/A
+popularized
+popularizer/MS
+popularizes/U
+popularizing
+popular/YS
+populate/CXNGDS
+populated/UA
+populates/A
+populating/A
+population/MC
+populism/S
+populist/SM
+populousness/MS
+populous/YP
+porcelain/SM
+porch/SM
+porcine
+porcupine/MS
+pore/ZGDRS
+Porfirio/M
+porgy/SM
+poring/Y
+porker/M
+porky/TSR
+pork/ZRMS
+pornographer/SM
+pornographic
+pornographically
+pornography/SM
+porno/S
+porn/S
+porosity/SM
+porousness/MS
+porous/PY
+porphyritic
+porphyry/MS
+porpoise/DSGM
+porridge/MS
+Porrima/M
+porringer/MS
+Porsche/M
+portability/S
+portables
+portable/U
+portably
+port/ABSGZMRD
+portage/ASM
+portaged
+portaging
+portal/SM
+portamento/M
+portcullis/MS
+ported/CE
+Porte/M
+portend/SDG
+portentousness/M
+portentous/PY
+portent/SM
+porterage/M
+porter/DMG
+porterhouse/SM
+Porter/M
+porter's/A
+portfolio/MS
+porthole/SM
+Portia/M
+porticoes
+portico/M
+Portie/M
+portière/SM
+porting/E
+portion/KGSMD
+Portland/M
+portliness/SM
+portly/PTR
+portmanteau/SM
+Port/MR
+Pôrto/M
+portraitist/SM
+portrait/MS
+portraiture/MS
+portrayal/SM
+portrayer/M
+portray/GDRS
+ports/CE
+Portsmouth/M
+Portugal/M
+Portuguese/M
+portulaca/MS
+Porty/M
+posed/CA
+Poseidon/M
+poser/KME
+poses/CA
+poseur/MS
+pose/ZGKDRSE
+posh/DSRGT
+posing/CA
+positifs
+positionable
+positional/KY
+position/KGASMD
+position's/EC
+positions/EC
+positiveness/S
+positive/RSPYT
+positivism/M
+positivist/S
+positivity
+positron/SM
+posit/SCGD
+Posner/M
+posse/M
+possess/AGEDS
+possessed/PY
+possession/AEMS
+possessional
+possessiveness/MS
+possessive/PSMY
+possessor/MS
+possibility/SM
+possible/TRS
+possibly
+poss/S
+possum/MS
+postage/MS
+postal/S
+post/ASDRJG
+postbag/M
+postbox/SM
+postcard/SM
+postcode/SM
+postcondition/S
+postconsonantal
+postdate/DSG
+postdoctoral
+posteriori
+posterior/SY
+posterity/SM
+poster/MS
+postfix/GDS
+postgraduate/SM
+posthaste/S
+posthumousness/M
+posthumous/YP
+posthypnotic
+postilion/MS
+postindustrial
+posting/M
+postlude/MS
+Post/M
+postman/M
+postmarital
+postmark/GSMD
+postmaster/SM
+postmen
+postmeridian
+postmistress/MS
+postmodern
+postmodernist
+postmortem/S
+postnasal
+postnatal
+postoperative/Y
+postorder
+postpaid
+postpartum
+postpone/GLDRS
+postponement/S
+postpositions
+postprandial
+post's
+postscript/SM
+postsecondary
+postulate/XGNSD
+postulation/M
+postural
+posture/MGSRD
+posturer/M
+postvocalic
+postwar
+posy/SM
+potability/SM
+potableness/M
+potable/SP
+potage/M
+potash/MS
+potassium/MS
+potatoes
+potato/M
+potbelly/MSD
+potboiler/M
+potboil/ZR
+pot/CMS
+Potemkin/M
+potency/MS
+potentate/SM
+potentiality/MS
+potential/SY
+potentiating
+potentiometer/SM
+potent/YS
+potful/SM
+pothead/MS
+potherb/MS
+pother/GDMS
+potholder/MS
+pothole/SDMG
+potholing/M
+pothook/SM
+potion/SM
+potlatch/SM
+potluck/MS
+Potomac/M
+potpie/SM
+potpourri/SM
+Potsdam/M
+potsherd/MS
+potshot/S
+pottage/SM
+Pottawatomie/M
+potted
+Potter/M
+potter/RDMSG
+pottery/MS
+potting
+Potts/M
+potty/SRT
+pouch/SDMG
+Poughkeepsie/M
+Poul/M
+poulterer/MS
+poultice/DSMG
+poultry/MS
+pounce/SDG
+poundage/MS
+pounder/MS
+pound/KRDGS
+Pound/M
+pour/DSG
+pourer's
+Poussin/MS
+pouter/M
+pout/GZDRS
+poverty/MS
+POW
+powderpuff
+powder/RDGMS
+powdery
+Powell/M
+powerboat/MS
+powerfulness/M
+powerful/YP
+power/GMD
+powerhouse/MS
+powerlessness/SM
+powerless/YP
+Powers
+Powhatan/M
+pow/RZ
+powwow/GDMS
+pox/GMDS
+Poznan/M
+pp
+PP
+ppm
+ppr
+PPS
+pr
+PR
+practicability/S
+practicable/P
+practicably
+practicality/SM
+practicalness/M
+practical/YPS
+practice/BDRSMG
+practiced/U
+practicer/M
+practicum/SM
+practitioner/SM
+Pradesh/M
+Prado/M
+Praetorian
+praetorian/S
+praetor/MS
+pragmatical/Y
+pragmatic/S
+pragmatics/M
+pragmatism/MS
+pragmatist/MS
+Prague/M
+Praia
+prairie/MS
+praise/ESDG
+praiser/S
+praise's
+praiseworthiness/MS
+praiseworthy/P
+praising/Y
+Prakrit/M
+praline/MS
+pram/MS
+prancer/M
+prance/ZGSRD
+prancing/Y
+prank/SMDG
+prankster/SM
+praseodymium/SM
+Pratchett/M
+prate/DSRGZ
+prater/M
+pratfall/MS
+prating/Y
+prattle/DRSGZ
+prattler/M
+prattling/Y
+Pratt/M
+Prattville/M
+Pravda/M
+prawn/MDSG
+praxes
+praxis/M
+Praxiteles/M
+pray/DRGZS
+prayerbook
+prayerfulness/M
+prayerful/YP
+prayer/M
+PRC
+preach/DRSGLZJ
+preacher/M
+preaching/Y
+preachment/MS
+preachy/RT
+preadolescence/S
+Preakness/M
+preallocate/XGNDS
+preallocation/M
+preallocator/S
+preamble/MGDS
+preamp
+preamplifier/M
+prearrange/LSDG
+prearrangement/SM
+preassign/SDG
+preauthorize
+prebendary/M
+Precambrian
+precancel/DGS
+precancerous
+precariousness/MS
+precarious/PY
+precautionary
+precaution/SGDM
+precede/DSG
+precedence/SM
+precedented/U
+precedent/SDM
+preceptive/Y
+preceptor/MS
+precept/SMV
+precess/DSG
+precession/M
+precinct/MS
+preciosity/MS
+preciousness/S
+precious/PYS
+precipice/MS
+precipitable
+precipitant/S
+precipitateness/M
+precipitate/YNGVPDSX
+precipitation/M
+precipitousness/M
+precipitous/YP
+preciseness/SM
+precise/XYTRSPN
+precision/M
+précis/MDG
+preclude/GDS
+preclusion/S
+precociousness/MS
+precocious/YP
+precocity/SM
+precode/D
+precognition/SM
+precognitive
+precollege/M
+precolonial
+precomputed
+preconceive/GSD
+preconception/SM
+precondition/GMDS
+preconscious
+precook/GDS
+precursor/SM
+precursory
+precut
+predate/NGDSX
+predation/CMS
+predator/SM
+predatory
+predecease/SDG
+predecessor/MS
+predeclared
+predecline
+predefine/GSD
+predefinition/SM
+predesignate/GDS
+predestination/SM
+predestine/SDG
+predetermination/MS
+predeterminer/M
+predetermine/ZGSRD
+predicable/S
+predicament/SM
+predicate/VGNXSD
+predication/M
+predicator
+predictability/UMS
+predictable/U
+predictably/U
+predict/BSDGV
+predicted/U
+prediction/MS
+predictive/Y
+predictor/MS
+predigest/GDS
+predilect
+predilection/SM
+predispose/SDG
+predisposition/MS
+predoctoral
+predominance/SM
+predominant/Y
+predominate/YSDGN
+predomination/M
+preemie/MS
+preeminence/SM
+preeminent/Y
+preemployment/M
+preempt/GVSD
+preemption/SM
+preemptive/Y
+preemptor/M
+preener/M
+preen/SRDG
+preexist/DSG
+preexistence/SM
+preexistent
+prefabbed
+prefabbing
+prefab/MS
+prefabricate/XNGDS
+prefabrication/M
+preface/DRSGM
+prefacer/M
+prefatory
+prefect/MS
+prefecture/MS
+preferableness/M
+preferable/P
+preferably
+prefer/BL
+preference/MS
+preferential/Y
+preferment/SM
+preferred
+preferring
+prefiguration/M
+prefigure/SDG
+prefix/MDSG
+preflight/SGDM
+preform/DSG
+pref/RZ
+pregnancy/SM
+pregnant/Y
+preheat/GDS
+prehensile
+prehistoric
+prehistorical/Y
+prehistory/SM
+preindustrial
+preinitialize/SDG
+preinterview/M
+preisolated
+prejudge/DRSG
+prejudger/M
+prejudgment/SM
+prejudiced/U
+prejudice/MSDG
+prejudicial/PY
+prekindergarten/MS
+prelacy/MS
+prelate/SM
+preliminarily
+preliminary/S
+preliterate/S
+preloaded
+prelude/GMDRS
+preluder/M
+premarital/Y
+premarket
+prematureness/M
+premature/SPY
+prematurity/M
+premedical
+premeditated/Y
+premeditate/XDSGNV
+premeditation/M
+premed/S
+premenstrual
+premiere/MS
+premier/GSDM
+premiership/SM
+Preminger/M
+premise/GMDS
+premiss's
+premium/MS
+premix/GDS
+premolar/S
+premonition/SM
+premonitory
+prenatal/Y
+Pren/M
+Prenticed/M
+Prentice/MGD
+Prenticing/M
+Prentiss/M
+Prent/M
+prenuptial
+preoccupation/MS
+preoccupy/DSG
+preoperative
+preordain/DSLG
+prepackage/GSD
+prepaid
+preparation/SM
+preparative/SYM
+preparatory
+preparedly
+preparedness/USM
+prepared/UP
+prepare/ZDRSG
+prepay/GLS
+prepayment/SM
+prepender/S
+prepends
+preplanned
+preponderance/SM
+preponderant/Y
+preponderate/DSYGN
+prepositional/Y
+preposition/SDMG
+prepossess/GSD
+prepossessing/U
+prepossession/MS
+preposterousness/M
+preposterous/PY
+prepped
+prepping
+preppy/RST
+preprepared
+preprint/SGDM
+preprocessed
+preprocessing
+preprocessor/S
+preproduction
+preprogrammed
+prep/SM
+prepubescence/S
+prepubescent/S
+prepublication/M
+prepuce/SM
+prequel/S
+preradiation
+prerecord/DGS
+preregister/DSG
+preregistration/MS
+prerequisite/SM
+prerogative/SDM
+Pres
+presage/GMDRS
+presager/M
+presbyopia/MS
+presbyterian
+Presbyterianism/S
+Presbyterian/S
+presbyter/MS
+presbytery/MS
+preschool/RSZ
+prescience/SM
+prescient/Y
+Prescott/M
+prescribed/U
+prescriber/M
+prescribe/RSDG
+prescription/SM
+prescriptive/Y
+prescript/SVM
+preselect/SGD
+presence/SM
+presentableness/M
+presentable/P
+presentably/A
+presentational/A
+presentation/AMS
+presented/A
+presenter/A
+presentiment/MS
+presentment/SM
+presents/A
+present/SLBDRYZGP
+preservationist/S
+preservation/SM
+preservative/SM
+preserve/DRSBZG
+preserved/U
+preserver/M
+preset/S
+presetting
+preshrank
+preshrink/SG
+preshrunk
+preside/DRSG
+presidency/MS
+presidential/Y
+president/SM
+presider/M
+presidia
+presidium/M
+Presley/M
+presoaks
+presort/GDS
+pres/S
+press/ACDSG
+pressed/U
+presser/MS
+pressingly/C
+pressing/YS
+pressman/M
+pressmen
+pressure/DSMG
+pressurization/MS
+pressurize/DSRGZ
+pressurized/U
+prestidigitate/NX
+prestidigitation/M
+prestidigitatorial
+prestidigitator/M
+prestige/MS
+prestigious/PY
+Preston/M
+presto/S
+presumably
+presume/BGDRS
+presumer/M
+presuming/Y
+presumption/MS
+presumptive/Y
+presumptuousness/SM
+presumptuous/YP
+presuppose/GDS
+presupposition/S
+pretax
+preteen/S
+pretended/Y
+pretender/M
+pretending/U
+pretend/SDRZG
+pretense/MNVSX
+pretension/GDM
+pretentiousness/S
+pretentious/UYP
+preterite's
+preterit/SM
+preternatural/Y
+pretest/SDG
+pretext/SMDG
+Pretoria/M
+pretreated
+pretreatment/S
+pretrial
+prettify/SDG
+prettily
+prettiness/SM
+pretty/TGPDRS
+pretzel/SM
+prevailing/Y
+prevail/SGD
+prevalence/MS
+prevalent/SY
+prevaricate/DSXNG
+prevaricator/MS
+preventable/U
+preventably
+preventative/S
+prevent/BSDRGV
+preventer/M
+prevention/MS
+preventiveness/M
+preventive/SPY
+preview/ZGSDRM
+previous/Y
+prevision/SGMD
+prewar
+prexes
+preyer's
+prey/SMDG
+Priam/M
+priapic
+Pribilof/M
+price/AGSD
+priced/U
+priceless
+Price/M
+pricer/MS
+price's
+pricey
+pricier
+priciest
+pricker/M
+pricking/M
+prickle/GMDS
+prickliness/S
+prickly/RTP
+prick/RDSYZG
+prideful/Y
+pride/GMDS
+prier/M
+priestess/MS
+priesthood/SM
+Priestley/M
+priestliness/SM
+priestly/PTR
+priest/SMYDG
+prigged
+prigging
+priggishness/S
+priggish/PYM
+prig/SM
+primacy/MS
+primal
+primarily
+primary/MS
+primate/MS
+primed/U
+primely/M
+primeness/M
+prime/PYS
+primer/M
+Prime's
+primeval/Y
+priming/M
+primitiveness/SM
+primitive/YPS
+primitivism/M
+primmed
+primmer
+primmest
+primming
+primness/MS
+primogenitor/MS
+primogeniture/MS
+primordial/YS
+primp/DGS
+primrose/MGSD
+prim/SPJGZYDR
+princedom/MS
+princeliness/SM
+princely/PRT
+Prince/M
+prince/SMY
+princess/MS
+Princeton/M
+principality/MS
+principal/SY
+Principe/M
+Principia/M
+principled/U
+principle/SDMG
+printable/U
+printably
+print/AGDRS
+printed/U
+printer/AM
+printers
+printing/SM
+printmaker/M
+printmake/ZGR
+printmaking/M
+printout/S
+Prinz/M
+prioress/MS
+priori
+prioritize/DSRGZJ
+priority/MS
+prior/YS
+priory/SM
+Pris
+Prisca/M
+Priscella/M
+Priscilla/M
+prised
+prise/GMAS
+prismatic
+prism/MS
+prison/DRMSGZ
+prisoner/M
+Prissie/M
+prissily
+prissiness/SM
+prissy/RSPT
+pristine/Y
+prithee/S
+privacy/MS
+privateer/SMDG
+privateness/M
+private/NVYTRSXP
+privation/MCS
+privative/Y
+privatization/S
+privatize/GSD
+privet/SM
+privileged/U
+privilege/SDMG
+privily
+privy/SRMT
+prized/A
+prize/DSRGZM
+prizefighter/M
+prizefighting/M
+prizefight/SRMGJZ
+prizewinner/S
+prizewinning
+Pr/MN
+PRO
+proactive
+probabilist
+probabilistic
+probabilistically
+probability/SM
+probable/S
+probably
+probated/A
+probate/NVMX
+probates/A
+probating/A
+probational
+probationary/S
+probationer/M
+probation/MRZ
+probation's/A
+probative/A
+prober/M
+probity/SM
+problematical/UY
+problematic/S
+problem/SM
+proboscis/MS
+prob/RBJ
+procaine/MS
+procedural/SY
+procedure/MS
+proceeder/M
+proceeding/M
+proceed/JRDSG
+process/BSDMG
+processed/UA
+processes/A
+processional/YS
+procession/GD
+processor/MS
+proclamation/MS
+proclivity/MS
+proconsular
+procrastinate/XNGDS
+procrastination/M
+procrastinator/MS
+procreational
+procreatory
+procrustean
+Procrustean
+Procrustes/M
+proctor/GSDM
+proctorial
+procurable/U
+procure/L
+procurement/MS
+Procyon/M
+prodded
+prodding
+prodigality/S
+prodigal/SY
+prodigiousness/M
+prodigious/PY
+prodigy/MS
+prod/S
+produce/AZGDRS
+producer/AM
+producible/A
+production/ASM
+productively/UA
+productiveness/MS
+productive/PY
+productivities
+productivity/A
+productivity's
+productize/GZRSD
+product/V
+Prof
+profanation/S
+profaneness/MS
+profane/YPDRSG
+profanity/MS
+professed/Y
+professionalism/SM
+professionalize/GSD
+professional/USY
+profession/SM
+professorial/Y
+professorship/SM
+professor/SM
+proffer/GSD
+proficiency/SM
+proficient/YS
+profitability/MS
+profitableness/MU
+profitable/UP
+profitably/U
+profiteer/GSMD
+profiterole/MS
+profit/GZDRB
+profitless
+profligacy/S
+profligate/YS
+proforma/S
+profoundity
+profoundness/SM
+profound/PTYR
+prof/S
+profundity/MS
+profuseness/MS
+profuse/YP
+progenitor/SM
+progeny/M
+progesterone/SM
+prognathous
+prognoses
+prognosis/M
+prognosticate/NGVXDS
+prognostication/M
+prognosticator/S
+prognostic/S
+program/CSA
+programed
+programing
+programmability
+programmable/S
+programmed/CA
+programmer/ASM
+programming/CA
+programmings
+progression/SM
+progressiveness/SM
+progressive/SPY
+progressivism
+progress/MSDVG
+prohibiter/M
+prohibitionist/MS
+prohibition/MS
+Prohibition/MS
+prohibitiveness/M
+prohibitive/PY
+prohibitory
+prohibit/VGSRD
+projected/AU
+projectile/MS
+projectionist/MS
+projection/MS
+projective/Y
+project/MDVGS
+projector/SM
+Prokofieff/M
+Prokofiev/M
+prolegomena
+proletarianization/M
+proletarianized
+proletarian/S
+proletariat/SM
+proliferate/GNVDSX
+proliferation/M
+prolifically
+prolific/P
+prolixity/MS
+prolix/Y
+prologize
+prologue/MGSD
+prologuize
+prolongate/NGSDX
+prolongation/M
+prolonger/M
+prolong/G
+promenade/GZMSRD
+promenader/M
+Promethean
+Prometheus/M
+promethium/SM
+prominence/MS
+prominent/Y
+promiscuity/MS
+promiscuousness/M
+promiscuous/PY
+promise/GD
+promising/UY
+promissory
+promontory/MS
+promote/GVZBDR
+promoter/M
+promotiveness/M
+promotive/P
+prompted/U
+prompter/M
+promptitude/SM
+promptness/MS
+prompt/SGJTZPYDR
+pro/MS
+promulgate/NGSDX
+promulgation/M
+promulgator/MS
+pron
+proneness/MS
+prone/PY
+pronghorn/SM
+prong/SGMD
+pronominalization
+pronominalize
+pronounceable/U
+pronouncedly
+pronounced/U
+pronounce/GLSRD
+pronouncement/SM
+pronouncer/M
+pronto
+pronunciation/SM
+proofed/A
+proofer
+proofing/M
+proofreader/M
+proofread/GZSR
+proof/SEAM
+propaganda/SM
+propagandistic
+propagandist/SM
+propagandize/DSG
+propagated/U
+propagate/SDVNGX
+propagation/M
+propagator/MS
+propellant/MS
+propelled
+propeller/MS
+propelling
+propel/S
+propensity/MS
+properness/M
+proper/PYRT
+propertied/U
+property/SDM
+prophecy/SM
+prophesier/M
+prophesy/GRSDZ
+prophetess/S
+prophetic
+prophetical/Y
+prophet/SM
+prophylactic/S
+prophylaxes
+prophylaxis/M
+propinquity/MS
+propionate/M
+propitiate/GNXSD
+propitiatory
+propitiousness/M
+propitious/YP
+proponent/MS
+proportionality/M
+proportional/SY
+proportionate/YGESD
+proportioner/M
+proportion/ESGDM
+proportionment/M
+proposal/SM
+propped
+propping
+proprietary/S
+proprietorial
+proprietorship/SM
+proprietor/SM
+proprietress/MS
+propriety/MS
+proprioception
+proprioceptive
+prop/SZ
+propulsion/MS
+propulsive
+propylene/M
+prorogation/SM
+prorogue
+prosaic
+prosaically
+proscenium/MS
+prosciutti
+prosciutto/SM
+proscription/SM
+proscriptive
+pros/DSRG
+prosecute/SDBXNG
+prosecution/M
+prosecutor/MS
+proselyte/SDGM
+proselytism/MS
+proselytize/ZGDSR
+prose/M
+proser/M
+Proserpine/M
+prosodic/S
+prosody/MS
+prospect/DMSVG
+prospection/SM
+prospectiveness/M
+prospective/SYP
+prospector/MS
+prospectus/SM
+prosper/GSD
+prosperity/MS
+prosperousness/M
+prosperous/PY
+prostate
+prostheses
+prosthesis/M
+prosthetic/S
+prosthetics/M
+prostitute/DSXNGM
+prostitution/M
+prostrate/SDXNG
+prostration/M
+prosy/RT
+protactinium/MS
+protagonist/SM
+Protagoras/M
+protean/S
+protease/M
+protect/DVGS
+protected/UY
+protectionism/MS
+protectionist/MS
+protection/MS
+protectiveness/S
+protective/YPS
+protectorate/SM
+protector/MS
+protégées
+protégé/SM
+protein/MS
+proteolysis/M
+proteolytic
+Proterozoic/M
+protestantism
+Protestantism/MS
+protestant/S
+Protestant/SM
+protestation/MS
+protest/G
+protesting/Y
+Proteus/M
+protocol/DMGS
+protoplasmic
+protoplasm/MS
+prototype/SDGM
+prototypic
+prototypical/Y
+protozoa
+protozoan/MS
+protozoic
+protozoon's
+protract/DG
+protrude/SDG
+protrusile
+protrusion/MS
+protrusive/PY
+protuberance/S
+protuberant
+Proudhon/M
+proud/TRY
+Proust/M
+provabilities
+provability's
+provability/U
+provableness/M
+provable/P
+provably
+prov/DRGZB
+proved/U
+proven/U
+prove/ESDAG
+provenance/SM
+Provençal
+Provencals
+Provence/M
+provender/SDG
+provenience/SM
+provenly
+proverb/DG
+proverbial/Y
+Proverbs/M
+prover/M
+provide/DRSBGZ
+provided/U
+providence/SM
+Providence/SM
+providential/Y
+provident/Y
+provider/M
+province/SM
+provincialism/SM
+provincial/SY
+provisional/YS
+provisioner/M
+provision/R
+proviso/MS
+provocateur/S
+provocativeness/SM
+provocative/P
+provoked/U
+provoke/GZDRS
+provoking/Y
+provolone/SM
+Provo/M
+provost/MS
+prowess/SM
+prowler/M
+prowl/RDSZG
+prow/TRMS
+proximal/Y
+proximateness/M
+proximate/PY
+proximity/MS
+Proxmire/M
+proxy/SM
+Prozac
+prude/MS
+Prudence/M
+prudence/SM
+Prudential/M
+prudential/SY
+prudent/Y
+prudery/MS
+Prudi/M
+prudishness/SM
+prudish/YP
+Prudy/M
+Prue/M
+Pruitt/M
+Pru/M
+prune/DSRGZM
+pruner/M
+prurience/MS
+prurient/Y
+Prussia/M
+Prussian/S
+prussic
+Prut/M
+Pryce/M
+pry/DRSGTZ
+pryer's
+prying/Y
+P's
+PS
+p's/A
+psalmist/SM
+psalm/SGDM
+Psalms/M
+psalter
+Psalter/SM
+psaltery/MS
+psephologist/M
+pseudonymous
+pseudonym/SM
+pseudopod
+pseudo/S
+pseudoscience/S
+pshaw/SDG
+psi/S
+psittacoses
+psittacosis/M
+psoriases
+psoriasis/M
+psst/S
+PST
+psychedelically
+psychedelic/S
+psyche/M
+Psyche/M
+psychiatric
+psychiatrist/SM
+psychiatry/MS
+psychical/Y
+psychic/MS
+psychoacoustic/S
+psychoacoustics/M
+psychoactive
+psychoanalysis/M
+psychoanalyst/S
+psychoanalytic
+psychoanalytical
+psychoanalyze/SDG
+psychobabble/S
+psychobiology/M
+psychocultural
+psychodrama/MS
+psychogenic
+psychokinesis/M
+psycholinguistic/S
+psycholinguistics/M
+psycholinguists
+psychological/Y
+psychologist/MS
+psychology/MS
+psychometric/S
+psychometrics/M
+psychometry/M
+psychoneuroses
+psychoneurosis/M
+psychopathic/S
+psychopath/M
+psychopathology/M
+psychopaths
+psychopathy/SM
+psychophysical/Y
+psychophysic/S
+psychophysics/M
+psychophysiology/M
+psychosis/M
+psycho/SM
+psychosocial/Y
+psychosomatic/S
+psychosomatics/M
+psychos/S
+psychotherapeutic/S
+psychotherapist/MS
+psychotherapy/SM
+psychotically
+psychotic/S
+psychotropic/S
+psychs
+psych/SDG
+PT
+PTA
+Ptah/M
+ptarmigan/MS
+pt/C
+pterodactyl/SM
+Pt/M
+PTO
+Ptolemaic
+Ptolemaists
+Ptolemy/MS
+ptomaine/MS
+Pu
+pubbed
+pubbing
+pubertal
+puberty/MS
+pubes
+pubescence/S
+pubescent
+pubic
+pubis/M
+publican/AMS
+publication/AMS
+publicist/SM
+publicity/SM
+publicized/U
+publicize/SDG
+publicness/M
+publics/A
+public/YSP
+publishable/U
+published/UA
+publisher/ASM
+publishes/A
+publishing/M
+publish/JDRSBZG
+pub/MS
+Puccini/M
+puce/SM
+pucker/DG
+Puckett/M
+puck/GZSDRM
+puckishness/S
+puckish/YP
+Puck/M
+pudding/MS
+puddle/JMGRSD
+puddler/M
+puddling/M
+puddly
+pudenda
+pudendum/M
+pudginess/SM
+pudgy/PRT
+Puebla/M
+Pueblo/MS
+pueblo/SM
+puerile/Y
+puerility/SM
+puerperal
+puers
+Puerto/M
+puffball/SM
+puffer/M
+puffery/M
+puffiness/S
+puffin/SM
+Puff/M
+puff/SGZDRM
+puffy/PRT
+Puget/M
+pugged
+pugging
+Pugh/M
+pugilism/SM
+pugilistic
+pugilist/S
+pug/MS
+pugnaciousness/MS
+pugnacious/YP
+pugnacity/SM
+puissant/Y
+puke/GDS
+pukka
+Pulaski/SM
+pulchritude/SM
+pulchritudinous/M
+pule/GDS
+Pulitzer/SM
+pullback/S
+pull/DRGZSJ
+pullet/SM
+pulley/SM
+Pullman/MS
+pullout/S
+pullover/SM
+pulmonary
+pulpiness/S
+pulpit/MS
+pulp/MDRGS
+pulpwood/MS
+pulpy/PTR
+pulsar/MS
+pulsate/NGSDX
+pulsation/M
+pulse/ADSG
+pulser
+pulse's
+pulverable
+pulverization/MS
+pulverized/U
+pulverize/GZSRD
+pulverizer/M
+pulverizes/UA
+puma/SM
+pumice/SDMG
+pummel/SDG
+pumpernickel/SM
+pump/GZSMDR
+pumping/M
+pumpkin/MS
+punchbowl/M
+punched/U
+puncheon/MS
+puncher/M
+punch/GRSDJBZ
+punchline/S
+Punch/M
+punchy/RT
+punctilio/SM
+punctiliousness/SM
+punctilious/PY
+punctualities
+punctuality/UM
+punctualness/M
+punctual/PY
+punctuate/SDXNG
+punctuational
+punctuation/M
+puncture/SDMG
+punditry/S
+pundit/SM
+pungency/MS
+pungent/Y
+Punic
+puniness/MS
+punished/U
+punisher/M
+punishment/MS
+punish/RSDGBL
+punitiveness/M
+punitive/YP
+Punjabi/M
+Punjab/M
+punk/TRMS
+punky/PRS
+pun/MS
+punned
+punning
+punster/SM
+punter/M
+punt/GZMDRS
+puny/PTR
+pupae
+pupal
+pupa/M
+pupate/NGSD
+pupillage/M
+pupil/SM
+pup/MS
+pupped
+puppeteer/SM
+puppetry/MS
+puppet/SM
+pupping
+puppy/GSDM
+puppyish
+purblind
+Purcell/M
+purchasable
+purchase/GASD
+purchaser/MS
+purdah/M
+purdahs
+Purdue/M
+purebred/S
+puree/DSM
+pureeing
+pureness/MS
+pure/PYTGDR
+purgation/M
+purgative/MS
+purgatorial
+purgatory/SM
+purge/GZDSR
+purger/M
+purify/GSRDNXZ
+Purim/SM
+Purina/M
+purine/SM
+purism/MS
+puristic
+purist/MS
+puritanic
+puritanical/Y
+Puritanism/MS
+puritanism/S
+puritan/SM
+Puritan/SM
+purity/SM
+purlieu/SM
+purl/MDGS
+purloin/DRGS
+purloiner/M
+purple/MTGRSD
+purplish
+purport/DRSZG
+purported/Y
+purposefulness/S
+purposeful/YP
+purposelessness/M
+purposeless/PY
+purpose/SDVGYM
+purposiveness/M
+purposive/YP
+purr/DSG
+purring/Y
+purse/DSRGZM
+purser/M
+pursuance/MS
+pursuant
+pursuer/M
+pursue/ZGRSD
+pursuit/MS
+purulence/MS
+purulent
+Purus
+purveyance/MS
+purvey/DGS
+purveyor/MS
+purview/SM
+Pusan/M
+Pusey/M
+pushbutton/S
+pushcart/SM
+pushchair/SM
+pushdown
+push/DSRBGZ
+pusher/M
+pushily
+pushiness/MS
+Pushkin/M
+pushover/SM
+Pushtu/M
+pushy/PRT
+pusillanimity/MS
+pusillanimous/Y
+pus/SM
+puss/S
+pussycat/S
+pussyfoot/DSG
+pussy/TRSM
+pustular
+pustule/MS
+putative/Y
+Putin/M
+put/IS
+Putnam/M
+Putnem/M
+putout/S
+putrefaction/SM
+putrefactive
+putrefy/DSG
+putrescence/MS
+putrescent
+putridity/M
+putridness/M
+putrid/YP
+putsch/S
+putted/I
+puttee/MS
+putter/RDMGZ
+putting/I
+putt/SGZMDR
+puttying/M
+putty/SDMG
+puzzle/JRSDZLG
+puzzlement/MS
+puzzler/M
+PVC
+pvt
+Pvt/M
+PW
+PX
+p/XTGJ
+Pygmalion/M
+pygmy/SM
+Pygmy/SM
+Pyhrric/M
+pyknotic
+Pyle/M
+pylon/SM
+pylori
+pyloric
+pylorus/M
+Pym/M
+Pynchon/M
+Pyongyang/M
+pyorrhea/SM
+Pyotr/M
+pyramidal/Y
+pyramid/GMDS
+pyre/MS
+Pyrenees
+Pyrex/SM
+pyridine/M
+pyrimidine/SM
+pyrite/MS
+pyroelectric
+pyroelectricity/SM
+pyrolysis/M
+pyrolyze/RSM
+pyromaniac/SM
+pyromania/MS
+pyrometer/MS
+pyrometry/M
+pyrophosphate/M
+pyrotechnical
+pyrotechnic/S
+pyrotechnics/M
+pyroxene/M
+pyroxenite/M
+Pyrrhic
+Pythagoras/M
+Pythagorean/S
+Pythias
+Python/M
+python/MS
+pyx/MDSG
+q
+Q
+QA
+Qaddafi/M
+Qantas/M
+Qatar/M
+QB
+QC
+QED
+Qingdao
+Qiqihar/M
+QM
+Qom/M
+qr
+q's
+Q's
+qt
+qty
+qua
+Quaalude/M
+quackery/MS
+quackish
+quack/SDG
+quadded
+quadding
+quadrangle/MS
+quadrangular/M
+quadrant/MS
+quadraphonic/S
+quadrapole
+quadratical/Y
+quadratic/SM
+quadrature/MS
+quadrennial/SY
+quadrennium/MS
+quadric
+quadriceps/SM
+quadrilateral/S
+quadrille/XMGNSD
+quadrillion/MH
+quadripartite/NY
+quadriplegia/SM
+quadriplegic/SM
+quadrivia
+quadrivium/M
+quadrupedal
+quadruped/MS
+quadruple/GSD
+quadruplet/SM
+quadruplicate/GDS
+quadruply/NX
+quadrupole
+quad/SM
+quadword/MS
+quaffer/M
+quaff/SRDG
+quagmire/DSMG
+quahog/MS
+quail/GSDM
+quaintness/MS
+quaint/PTYR
+quake/GZDSR
+Quakeress/M
+Quakerism/S
+Quaker/SM
+quaky/RT
+qualification/ME
+qualified/UY
+qualifier/SM
+qualify/EGXSDN
+qualitative/Y
+quality/MS
+qualmish
+qualm/SM
+quandary/MS
+quangos
+quanta/M
+Quantico/M
+quantifiable/U
+quantified/U
+quantifier/M
+quantify/GNSRDZX
+quantile/S
+quantitativeness/M
+quantitative/PY
+quantity/MS
+quantization/MS
+quantizer/M
+quantize/ZGDRS
+quantum/M
+quarantine/DSGM
+quark/SM
+quarreler/M
+quarrellings
+quarrelsomeness/MS
+quarrelsome/PY
+quarrel/SZDRMG
+quarrier/M
+quarryman/M
+quarrymen
+quarry/RSDGM
+quarterback/SGMD
+quarterdeck/MS
+quarterer/M
+quarterfinal/MS
+quartering/M
+quarterly/S
+quartermaster/MS
+quarter/MDRYG
+quarterstaff/M
+quarterstaves
+quartet/SM
+quartic/S
+quartile/SM
+quarto/SM
+quart/RMSZ
+quartzite/M
+quartz/SM
+quasar/SM
+quash/GSD
+quasi
+quasilinear
+Quasimodo/M
+Quaternary
+quaternary/S
+quaternion/SM
+quatrain/SM
+quaver/GDS
+quavering/Y
+quavery
+Quayle/M
+quayside/M
+quay/SM
+queasily
+queasiness/SM
+queasy/TRP
+Quebec/M
+Quechua/M
+Queenie/M
+queenly/RT
+queen/SGMDY
+Queensland/M
+Queen/SM
+queerness/S
+queer/STGRDYP
+queller/M
+quell/SRDG
+Que/M
+quenchable/U
+quenched/U
+quencher/M
+quench/GZRSDB
+quenchless
+Quentin/M
+Quent/M
+Querida/M
+quern/M
+querulousness/S
+querulous/YP
+query/MGRSD
+quested/A
+quester/AS
+quester's
+quest/FSIM
+questing
+questionableness/M
+questionable/P
+questionably/U
+questioned/UA
+questioner/M
+questioning/UY
+questionnaire/MS
+question/SMRDGBZJ
+quests/A
+Quetzalcoatl/M
+queued/C
+queue/GZMDSR
+queuer/M
+queues/C
+queuing/C
+Quezon/M
+quibble/GZRSD
+quibbler/M
+quiche/SM
+quicken/RDG
+quickie/MS
+quicklime/SM
+quickness/MS
+quick/RNYTXPS
+quicksand/MS
+quicksilver/GDMS
+quickstep/SM
+quid/SM
+quiesce/D
+quiescence/MS
+quiescent/YP
+quieted/E
+quieten/SGD
+quieter/E
+quieter's
+quieting/E
+quietly/E
+quietness/MS
+quiets/E
+quietude/IEMS
+quietus/MS
+quiet/UTGPSDRY
+Quillan/M
+quill/GSDM
+Quill/M
+quilter/M
+quilting/M
+quilt/SZJGRDM
+quincentenary/M
+quince/SM
+Quincey/M
+quincy/M
+Quincy/M
+quinine/MS
+Quinlan/M
+Quinn/M
+quinquennial/Y
+quinsy/SM
+Quinta/M
+Quintana/M
+quintessence/SM
+quintessential/Y
+quintet/SM
+quintic
+quintile/SM
+Quintilian/M
+Quintilla/M
+quintillion/MH
+quintillionth/M
+Quintina/M
+Quintin/M
+Quint/M
+quint/MS
+Quinton/M
+quintuple/SDG
+quintuplet/MS
+Quintus/M
+quip/MS
+quipped
+quipper
+quipping
+quipster/SM
+quired/AI
+quire/MDSG
+quires/AI
+Quirinal/M
+quiring/IA
+quirkiness/SM
+quirk/SGMD
+quirky/PTR
+quirt/SDMG
+Quisling/M
+quisling/SM
+quitclaim/GDMS
+quit/DGS
+quite/SADG
+Quito/M
+quittance/SM
+quitter/SM
+quitting
+quiver/GDS
+quivering/Y
+quivery
+Quixote/M
+quixotic
+quixotically
+Quixotism/M
+quiz/M
+quizzed
+quizzer/SM
+quizzes
+quizzical/Y
+quizzing
+quo/H
+quoin/SGMD
+quoit/GSDM
+quondam
+quonset
+Quonset
+quorate/I
+quorum/MS
+quotability/S
+quota/MS
+quotation/SM
+quoter/M
+quote/UGSD
+quot/GDRB
+quotidian/S
+quotient/SM
+qwerty
+qwertys
+Rabat/M
+rabbet/GSMD
+Rabbi/M
+rabbi/MS
+rabbinate/MS
+rabbinic
+rabbinical/Y
+rabbiter/M
+rabbit/MRDSG
+rabble/GMRSD
+rabbler/M
+Rabelaisian
+Rabelais/M
+rabidness/SM
+rabid/YP
+rabies
+Rabi/M
+Rabin/M
+rabis
+Rab/M
+raccoon/SM
+racecourse/MS
+racegoers
+racehorse/SM
+raceme/MS
+race/MZGDRSJ
+racer/M
+racetrack/SMR
+raceway/SM
+Rachael/M
+Rachele/M
+Rachelle/M
+Rachel/M
+Rachmaninoff/M
+racialism/MS
+racialist/MS
+racial/Y
+racily
+Racine/M
+raciness/MS
+racism/S
+racist/MS
+racketeer/MDSJG
+racket/SMDG
+rackety
+rack/GDRMS
+raconteur/SM
+racoon's
+racquetball/S
+racquet's
+racy/RTP
+radarscope/MS
+radar/SM
+Radcliffe/M
+radded
+radder
+raddest
+Raddie/M
+radding
+Raddy/M
+radial/SY
+radiance/SM
+radian/SM
+radiant/YS
+radiate/XSDYVNG
+radiation/M
+radiative/Y
+radiator/MS
+radicalism/MS
+radicalization/S
+radicalize/GSD
+radicalness/M
+radical/SPY
+radices's
+radii/M
+radioactive/Y
+radioactivity/MS
+radioastronomical
+radioastronomy
+radiocarbon/MS
+radiochemical/Y
+radiochemistry/M
+radiogalaxy/S
+radiogram/SM
+radiographer/MS
+radiographic
+radiography/MS
+radioisotope/SM
+radiologic
+radiological/Y
+radiologist/MS
+radiology/MS
+radioman/M
+radiomen
+radiometer/SM
+radiometric
+radiometry/MS
+radionics
+radionuclide/M
+radiopasteurization
+radiophone/MS
+radiophysics
+radioscopy/SM
+radio/SMDG
+radiosonde/SM
+radiosterilization
+radiosterilized
+radiotelegraph
+radiotelegraphs
+radiotelegraphy/MS
+radiotelephone/SM
+radiotherapist/SM
+radiotherapy/SM
+radish/MS
+radium/MS
+radius/M
+radix/SM
+Rad/M
+radon/SM
+rad/S
+Raeann/M
+Rae/M
+RAF
+Rafaela/M
+Rafaelia/M
+Rafaelita/M
+Rafaellle/M
+Rafaello/M
+Rafael/M
+Rafa/M
+Rafe/M
+Raffaello/M
+Raffarty/M
+Rafferty/M
+raffia/SM
+raffishness/SM
+raffish/PY
+raffle/MSDG
+Raff/M
+Rafi/M
+Raf/M
+rafter/DM
+raft/GZSMDR
+raga/MS
+ragamuffin/MS
+ragbag/SM
+rage/MS
+raggedness/SM
+ragged/PRYT
+raggedy/TR
+ragging
+rag/GSMD
+raging/Y
+raglan/MS
+Ragnar/M
+Ragnarök
+ragout/SMDG
+ragtag/MS
+ragtime/MS
+ragweed/MS
+ragwort/M
+Rahal/M
+rah/DG
+Rahel/M
+rahs
+raider/M
+raid/MDRSGZ
+railbird/S
+rail/CDGS
+railer/SM
+railhead/SM
+railing/MS
+raillery/MS
+railroader/M
+railroading/M
+railroad/SZRDMGJ
+rail's
+railwaymen
+railway/MS
+raiment/SM
+Raimondo/M
+Raimund/M
+Raimundo/M
+Raina/M
+rainbow/MS
+raincloud/S
+raincoat/SM
+raindrop/SM
+Raine/MR
+Rainer/M
+rainfall/SM
+rainforest's
+rain/GSDM
+Rainier/M
+rainless
+rainmaker/SM
+rainmaking/MS
+rainproof/GSD
+rainstorm/SM
+rainwater/MS
+rainy/RT
+raise/DSRGZ
+raiser/M
+raising/M
+raisin/MS
+rajah/M
+rajahs
+Rajive/M
+raj/M
+Rakel/M
+rake/MGDRS
+raker/M
+rakishness/MS
+rakish/PY
+Raleigh/M
+Ralf/M
+Ralina/M
+rally/GSD
+Ralph/M
+Ralston/M
+Ra/M
+Ramada/M
+Ramadan/SM
+Ramakrishna/M
+Rama/M
+Raman/M
+Ramayana/M
+ramble/JRSDGZ
+rambler/M
+rambling/Y
+Rambo/M
+rambunctiousness/S
+rambunctious/PY
+ramekin/SM
+ramie/MS
+ramification/M
+ramify/XNGSD
+Ramirez/M
+Ramiro/M
+ramjet/SM
+Ram/M
+rammed
+ramming
+Ramo/MS
+Ramona/M
+Ramonda/M
+Ramon/M
+rampage/SDG
+rampancy/S
+rampant/Y
+rampart/SGMD
+ramp/GMDS
+ramrodded
+ramrodding
+ramrod/MS
+RAM/S
+Ramsay/M
+Ramses/M
+Ramsey/M
+ramshackle
+ram/SM
+rams/S
+ran/A
+Rana/M
+Rancell/M
+Rance/M
+rancher/M
+rancho/SM
+ranch/ZRSDMJG
+rancidity/MS
+rancidness/SM
+rancid/P
+rancorous/Y
+rancor/SM
+Randall/M
+Randal/M
+Randa/M
+Randee/M
+Randell/M
+Randene/M
+Randie/M
+Randi/M
+randiness/S
+Rand/M
+rand/MDGS
+Randolf/M
+Randolph/M
+randomization/SM
+randomize/SRDG
+randomness/SM
+random/PYS
+Randy/M
+randy/PRST
+Ranee/M
+ranee/SM
+ranged/C
+rangeland/S
+ranger/M
+ranges/C
+range/SM
+rang/GZDR
+ranginess/S
+ranging/C
+Rangoon/M
+rangy/RPT
+Rania/M
+Ranice/M
+Ranier/M
+Rani/MR
+Ranique/M
+rani's
+ranked/U
+ranker/M
+rank/GZTYDRMPJS
+Rankine/M
+ranking/M
+Rankin/M
+rankle/SDG
+rankness/MS
+Ranna/M
+ransacker/M
+ransack/GRDS
+Ransell/M
+ransomer/M
+Ransom/M
+ransom/ZGMRDS
+ranter/M
+rant/GZDRJS
+ranting/Y
+Raoul/M
+rapaciousness/MS
+rapacious/YP
+rapacity/MS
+rapeseed/M
+rape/SM
+Raphaela/M
+Raphael/M
+rapidity/MS
+rapidness/S
+rapid/YRPST
+rapier/SM
+rapine/SM
+rapist/MS
+rap/MDRSZG
+rapped
+rappelled
+rappelling
+rappel/S
+rapper/SM
+rapping/M
+rapporteur/SM
+rapport/SM
+rapprochement/SM
+rapscallion/MS
+raptness/S
+rapture/MGSD
+rapturousness/M
+rapturous/YP
+rapt/YP
+Rapunzel/M
+Raquela/M
+Raquel/M
+rarebit/MS
+rarefaction/MS
+rarefy/GSD
+rareness/MS
+rare/YTPGDRS
+rarity/SM
+Rasalgethi/M
+Rasalhague/M
+rascal/SMY
+rasher/M
+rashness/S
+rash/PZTYSR
+Rasia/M
+Rasla/M
+Rasmussen/M
+raspberry/SM
+rasper/M
+rasping/Y
+rasp/SGJMDR
+Rasputin/M
+raspy/RT
+Rastaban/M
+Rastafarian/M
+raster/MS
+Rastus/M
+ratchet/MDSG
+rateable
+rated/U
+rate/KNGSD
+ratepayer/SM
+rater/M
+rate's
+Ratfor/M
+rather
+Rather/M
+rathskeller/SM
+ratifier/M
+ratify/ZSRDGXN
+rating/M
+ratiocinate/VNGSDX
+ratiocination/M
+ratio/MS
+rationale/SM
+rationalism/SM
+rationalistic
+rationalist/S
+rationality/MS
+rationalization/SM
+rationalizer/M
+rationalize/ZGSRD
+rationalness/M
+rational/YPS
+ration/DSMG
+Ratliff/M
+ratlike
+ratline/SM
+rat/MDRSJZGB
+rattail
+rattan/MS
+ratted
+ratter/MS
+ratting
+rattlebrain/DMS
+rattle/RSDJGZ
+rattlesnake/MS
+rattletrap/MS
+rattling/Y
+rattly/TR
+rattrap/SM
+ratty/RT
+raucousness/SM
+raucous/YP
+Raul/M
+raunchily
+raunchiness/S
+raunchy/RTP
+ravage/GZRSD
+ravager/M
+raveling/S
+Ravel/M
+ravel/UGDS
+raven/JGMRDS
+Raven/M
+ravenous/YP
+raver/M
+rave/ZGDRSJ
+Ravid/M
+Ravi/M
+ravine/SDGM
+ravioli/SM
+ravisher/M
+ravishing/Y
+ravish/LSRDZG
+ravishment/SM
+Raviv/M
+Rawalpindi/M
+rawboned
+rawhide/SDMG
+Rawley/M
+Rawlings/M
+Rawlins/M
+Rawlinson/M
+rawness/SM
+raw/PSRYT
+Rawson/M
+Rayburn/M
+Raychel/M
+Raye/M
+ray/GSMD
+Rayleigh/M
+Ray/M
+Raymond/M
+Raymondville/M
+Raymund/M
+Raymundo/M
+Rayna/M
+Raynard/M
+Raynell/M
+Rayner/M
+Raynor/M
+rayon/SM
+Rayshell/M
+Raytheon/M
+raze/DRSG
+razer/M
+razorback/SM
+razorblades
+razor/MDGS
+razz/GDS
+razzmatazz/S
+Rb
+RBI/S
+RC
+RCA
+rcpt
+RCS
+rd
+RD
+RDA
+Rd/M
+reabbreviate
+reachability
+reachable/U
+reachably
+reached/U
+reacher/M
+reach/GRB
+reacquisition
+reactant/SM
+reacted/U
+reaction
+reactionary/SM
+reactivity
+readability/MS
+readable/P
+readably
+readdress/G
+Reade/M
+reader/M
+readership/MS
+Read/GM
+readied
+readies
+readily
+readinesses
+readiness/UM
+reading/M
+Reading/M
+read/JGZBR
+readopt/G
+readout/MS
+reads/A
+readying
+ready/TUPR
+Reagan/M
+Reagen/M
+realisms
+realism's
+realism/U
+realistically/U
+realistic/U
+realist/SM
+reality/USM
+realizability/MS
+realizableness/M
+realizable/SMP
+realizably/S
+realization/MS
+realized/U
+realize/JRSDBZG
+realizer/M
+realizes/U
+realizing/MY
+realm/M
+realness/S
+realpolitik/SM
+real/RSTP
+realtor's
+Realtor/S
+realty/SM
+Rea/M
+reamer/M
+ream/MDRGZ
+Reamonn/M
+reanimate
+reaper/M
+reappraise/G
+reap/SGZ
+rear/DRMSG
+rearguard/MS
+rearmost
+rearrange/L
+rearward/S
+reasonableness/SMU
+reasonable/UP
+reasonably/U
+Reasoner/M
+reasoner/SM
+reasoning/MS
+reasonless
+reasons
+reason/UBDMG
+reassess/GL
+reassuringly/U
+reattach/GSL
+reawakening/M
+Reba/M
+rebate/M
+Rebbecca/M
+Rebeca/M
+Rebecca's
+Rebecka/M
+Rebekah/M
+Rebeka/M
+Rebekkah/M
+rebeller
+rebellion/SM
+rebelliousness/MS
+rebellious/YP
+rebel/MS
+Rebe/M
+rebid
+rebidding
+rebind/G
+rebirth
+reboil/G
+rebook
+reboot/ZR
+rebound/G
+rebroadcast/MG
+rebuke/RSDG
+rebuking/Y
+rebus
+rebuttal/SM
+rebutting
+rec
+recalcitrance/SM
+recalcitrant/S
+recalibrate/N
+recantation/S
+recant/G
+recap
+recappable
+recapping
+recast/G
+recd
+rec'd
+recede
+receipt/SGDM
+receivable/S
+received/U
+receiver/M
+receivership/SM
+receive/ZGRSDB
+recency/M
+recension/M
+recentness/SM
+recent/YPT
+receptacle/SM
+receptionist/MS
+reception/MS
+receptiveness/S
+receptive/YP
+receptivity/S
+receptor/MS
+recessional/S
+recessionary
+recessiveness/M
+recessive/YPS
+recess/SDMVG
+rechargeable
+recheck/G
+recherché
+recherches
+recidivism/MS
+recidivist/MS
+Recife/M
+recipe/MS
+recipiency
+recipient/MS
+reciprocal/SY
+reciprocate/NGXVDS
+reciprocation/M
+reciprocity/MS
+recitalist/S
+recital/MS
+recitative/MS
+reciter/M
+recite/ZR
+recked
+recking
+recklessness/S
+reckless/PY
+reckoner/M
+reckoning/M
+reckon/SGRDJ
+reclaim/B
+reclamation/SM
+recliner/M
+recline/RSDZG
+recluse/MVNS
+reclusion/M
+recode/G
+recognizability
+recognizable/U
+recognizably
+recognize/BZGSRD
+recognizedly/S
+recognized/U
+recognizer/M
+recognizingly/S
+recognizing/UY
+recoilless
+recoinage
+recolor/GD
+recombinant
+recombine
+recommended/U
+recompense/GDS
+recompute/B
+reconciled/U
+reconciler/M
+reconcile/SRDGB
+reconditeness/M
+recondite/YP
+reconfigurability
+reconfigure/R
+reconnaissance/MS
+reconnect/R
+reconnoiter/GSD
+reconquer/G
+reconsecrate
+reconstitute
+reconstructed/U
+Reconstruction/M
+reconsult/G
+recontact/G
+recontaminate/N
+recontribute
+recook/G
+recopy/G
+recorded/AU
+records/A
+record/ZGJ
+recourse
+recoverability
+recoverable/U
+recover/B
+recovery/MS
+recreant/S
+recreational
+recriminate/GNVXDS
+recrimination/M
+recriminatory
+recross/G
+recrudesce/GDS
+recrudescence/MS
+recrudescent
+recruiter/M
+recruitment/MS
+recruit/ZSGDRML
+recrystallize
+rectal/Y
+rectangle/SM
+rectangular/Y
+recta's
+rectifiable
+rectification/M
+rectifier/M
+rectify/DRSGXZN
+rectilinear/Y
+rectitude/MS
+recto/MS
+rector/SM
+rectory/MS
+rectum/SM
+recumbent/Y
+recuperate/VGNSDX
+recuperation/M
+recur
+recurrence/MS
+recurrent
+recurse/NX
+recursion/M
+recusant/M
+recuse
+recyclable/S
+recycle/BZ
+redact/DGS
+redaction/SM
+redactor/MS
+redbird/SM
+redbreast/SM
+redbrick/M
+redbud/M
+redcap/MS
+redcoat/SM
+redcurrant/M
+redden/DGS
+redder
+reddest
+redding
+reddish/P
+Redd/M
+redeclaration
+redecorate
+redeemable/U
+redeem/BRZ
+redeemed/U
+redeemer/M
+Redeemer/M
+redemptioner/M
+redemption/RMS
+redemptive
+redeposit/M
+redetermination
+Redford/M
+Redgrave/M
+redhead/DRMS
+Redhook/M
+redial/G
+redirect/G
+redirection
+redlining/S
+Redmond/M
+redneck/SMD
+redness/MS
+redo/G
+redolence/MS
+redolent
+Redondo/M
+redouble/S
+redoubtably
+redound/GDS
+red/PYS
+redshift/S
+redskin/SM
+Redstone/M
+reduced/U
+reducer/M
+reduce/RSDGZ
+reducibility/M
+reducible
+reducibly
+reductionism/M
+reductionist/S
+reduction/SM
+reduct/V
+redundancy/SM
+redundant/Y
+redwood/SM
+redye
+redyeing
+Reeba/M
+Reebok/M
+Reece/M
+reecho/G
+reed/GMDR
+reediness/SM
+reeding/M
+Reed/M
+Reedville/M
+reedy/PTR
+reefer/M
+reef/GZSDRM
+reeker/M
+reek/GSR
+reeler/M
+reel's
+reel/USDG
+Ree/MDS
+Reena/M
+reenforcement
+reentrant
+Reese/M
+reestimate/M
+Reeta/M
+Reeva/M
+reeve/G
+Reeves
+reexamine
+refection/SM
+refectory/SM
+refer/B
+refereed/U
+refereeing
+referee/MSD
+reference/CGSRD
+referenced/U
+reference's
+referencing/U
+referendum/MS
+referentiality
+referential/YM
+referent/SM
+referral/SM
+referred
+referrer/S
+referring
+reffed
+reffing
+refile
+refinance
+refined/U
+refine/LZ
+refinement/MS
+refinish/G
+refit
+reflectance/M
+reflected/U
+reflectional
+reflection/SM
+reflectiveness/M
+reflective/YP
+reflectivity/M
+reflector/MS
+reflect/SDGV
+reflexion/MS
+reflexiveness/M
+reflexive/PSY
+reflexivity/M
+reflex/YV
+reflooring
+refluent
+reflux/G
+refocus/G
+refold/G
+reforestation
+reforge/G
+reformatory/SM
+reform/B
+reformed/U
+reformer/M
+reformism/M
+reformist/S
+refract/DGVS
+refractiveness/M
+refractive/PY
+refractometer/MS
+refractoriness/M
+refractory/PS
+refrain/DGS
+refreshed/U
+refreshing/Y
+refresh/LB
+refreshment/MS
+refrigerant/MS
+refrigerated/U
+refrigerate/XDSGN
+refrigeration/M
+refrigerator/MS
+refrozen
+refry/GS
+refugee/MS
+refuge/SDGM
+Refugio/M
+refulgence/SM
+refulgent
+refund/B
+refunder/M
+refurbish/L
+refurbishment/S
+refusal/SM
+refuse/R
+refuser/M
+refutation/MS
+refute/GZRSDB
+refuter/M
+ref/ZS
+reg
+regale/L
+regalement/S
+regal/GYRD
+regalia/M
+Regan/M
+regard/EGDS
+regardless/PY
+regather/G
+regatta/MS
+regency/MS
+regeneracy/MS
+regenerately
+regenerateness/M
+regenerate/U
+Regen/M
+reggae/SM
+Reggie/M
+Reggi/MS
+Reggy/M
+regicide/SM
+regime/MS
+regimen/MS
+regimental/S
+regimentation/MS
+regiment/SDMG
+Reginae
+Reginald/M
+Regina/M
+Reginauld/M
+Regine/M
+regionalism/MS
+regional/SY
+region/SM
+Regis/M
+register's
+register/UDSG
+registrable
+registrant/SM
+registrar/SM
+registration/AM
+registrations
+registry/MS
+Reg/MN
+regnant
+Regor/M
+regress/DSGV
+regression/MS
+regressiveness/M
+regressive/PY
+regressors
+regretfulness/M
+regretful/PY
+regret/S
+regrettable
+regrettably
+regretted
+regretting
+reground
+regroup/G
+regrow/G
+regularity/MS
+regularization/MS
+regularize/SDG
+regular/YS
+regulate/CSDXNG
+regulated/U
+regulation/M
+regulative
+regulator/SM
+regulatory
+Regulus/M
+regurgitate/XGNSD
+regurgitation/M
+rehabbed
+rehabbing
+rehabilitate/SDXVGN
+rehabilitation/M
+rehab/S
+rehang/G
+rehear/GJ
+rehearsal/SM
+rehearse
+rehearsed/U
+rehearser/M
+rehears/R
+reheat/G
+reheating/M
+Rehnquist
+rehydrate
+Reichenberg/M
+Reich/M
+Reichstags
+Reichstag's
+Reidar/M
+Reider/M
+Reid/MR
+reign/MDSG
+Reiko/M
+Reilly/M
+reimburse/GSDBL
+reimbursement/MS
+Reinald/M
+Reinaldo/MS
+Reina/M
+reindeer/M
+Reine/M
+reinforced/U
+reinforce/GSRDL
+reinforcement/MS
+reinforcer/M
+rein/GDM
+Reinhard/M
+Reinhardt/M
+Reinhold/M
+Reinold/M
+reinstate/L
+reinstatement/MS
+reinsurance
+Reinwald/M
+reissue
+REIT
+reiterative/SP
+rejecter/M
+rejecting/Y
+rejection/SM
+rejector/MS
+reject/RDVGS
+rejigger
+rejoice/RSDJG
+rejoicing/Y
+rejoinder/SM
+rejuvenate/NGSDX
+rejuvenatory
+relapse
+relatedly
+relatedness/MS
+related/U
+relater/M
+relate/XVNGSZ
+relational/Y
+relation/M
+relationship/MS
+relativeness/M
+relative/SPY
+relativism/M
+relativistic
+relativistically
+relativist/MS
+relativity/MS
+relator's
+relaxant/SM
+relaxation/MS
+relaxedness/M
+relaxed/YP
+relax/GZD
+relaxing/Y
+relay/GDM
+relearn/G
+releasable/U
+release/B
+released/U
+relenting/U
+relentlessness/SM
+relentless/PY
+relent/SDG
+relevance/SM
+relevancy/MS
+relevant/Y
+reliability/UMS
+reliables
+reliable/U
+reliably/U
+reliance/MS
+reliant/Y
+relicense/R
+relic/MS
+relict/C
+relict's
+relief/M
+relievedly
+relieved/U
+reliever/M
+relieve/RSDZG
+religionists
+religion/SM
+religiosity/M
+religiousness/MS
+religious/PY
+relink/G
+relinquish/GSDL
+relinquishment/SM
+reliquary/MS
+relish/GSD
+relive/GB
+reload/GR
+relocate/B
+reluctance/MS
+reluctant/Y
+rel/V
+rely/DG
+rem
+Re/M
+remade/S
+remainder/SGMD
+remain/GD
+remake/M
+remand/DGS
+remap
+remapping
+remarkableness/S
+remarkable/U
+remarkably
+remark/BG
+remarked/U
+Remarque/M
+rematch/G
+Rembrandt/M
+remeasure/D
+remediableness/M
+remediable/P
+remedy/SDMG
+remembered/U
+rememberer/M
+remember/GR
+remembrance/MRS
+remembrancer/M
+Remington/M
+reminisce/GSD
+reminiscence/SM
+reminiscent/Y
+remissness/MS
+remiss/YP
+remit/S
+remittance/MS
+remitted
+remitting/U
+Rem/M
+remnant/MS
+remodel/G
+remolding
+remonstrant/MS
+remonstrate/SDXVNG
+remonstration/M
+remonstrative/Y
+remorsefulness/M
+remorseful/PY
+remorselessness/MS
+remorseless/YP
+remorse/SM
+remoteness/MS
+remote/RPTY
+remoulds
+removal/MS
+REM/S
+remunerated/U
+remunerate/VNGXSD
+remuneration/M
+remunerativeness/M
+remunerative/YP
+Remus/M
+Remy/M
+Renado/M
+Renae/M
+renaissance/S
+Renaissance/SM
+renal
+Renaldo/M
+Rena/M
+Renard/M
+Renascence/SM
+Renata/M
+Renate/M
+Renato/M
+renaturation
+Renaud/M
+Renault/MS
+rend
+renderer/M
+render/GJRD
+rendering/M
+rendezvous/DSMG
+rendition/GSDM
+rend/RGZS
+Renee/M
+renegade/SDMG
+renege/GZRSD
+reneger/M
+Renelle/M
+Renell/M
+Rene/M
+renewal/MS
+renew/BG
+renewer/M
+Renie/M
+rennet/MS
+Rennie/M
+rennin/SM
+Renoir/M
+Reno/M
+renounce/LGRSD
+renouncement/MS
+renouncer/M
+renovate/NGXSD
+renovation/M
+renovator/SM
+renown/SGDM
+Rensselaer/M
+rentaller
+rental/SM
+renter/M
+rent/GZMDRS
+renumber/G
+renumeration
+renunciate/VNX
+renunciation/M
+Renville/M
+reoccupy/G
+reopen/G
+reorganized/U
+repack/G
+repairable/U
+repair/BZGR
+repairer/M
+repairman/M
+repairmen
+repairs/E
+repaper
+reparable
+reparation/SM
+reparteeing
+repartee/MDS
+repartition/Z
+repast/G
+repatriate/SDXNG
+repave
+repealer/M
+repeal/GR
+repeatability/M
+repeatable/U
+repeatably
+repeated/Y
+repeater/M
+repeat/RDJBZG
+repelled
+repellent/SY
+repelling/Y
+repel/S
+repentance/SM
+repentant/SY
+repent/RDG
+repertoire/SM
+repertory/SM
+repetition
+repetitiousness/S
+repetitious/YP
+repetitiveness/MS
+repetitive/PY
+repine/R
+repiner/M
+replace/RL
+replay/GM
+replenish/LRSDG
+replenishment/S
+repleteness/MS
+replete/SDPXGN
+repletion/M
+replica/SM
+replicate/SDVG
+replicator/S
+replug
+reply/X
+Rep/M
+repopulate
+reported/Y
+reportorial/Y
+reposeful
+repose/M
+repository/MS
+reprehend/GDS
+reprehensibility/MS
+reprehensibleness/M
+reprehensible/P
+reprehensibly
+reprehension/MS
+representable/U
+representational/Y
+representativeness/M
+Representative/S
+representative/SYMP
+representativity
+represented/U
+represent/GB
+repression/SM
+repressiveness/M
+repressive/YP
+repress/V
+reprieve/GDS
+reprimand/SGMD
+reprint/M
+reprisal/MS
+reproacher/M
+reproachfulness/M
+reproachful/YP
+reproach/GRSDB
+reproaching/Y
+reprobate/N
+reprocess/G
+reproducibility/MS
+reproducible/S
+reproducibly
+reproductive/S
+reproof/G
+reprove/R
+reproving/Y
+rep/S
+reptile/SM
+reptilian/S
+Republicanism/S
+republicanism/SM
+Republican/S
+republic/M
+republish/G
+repudiate/XGNSD
+repudiation/M
+repudiator/S
+repugnance/MS
+repugnant/Y
+repulse/VNX
+repulsion/M
+repulsiveness/MS
+repulsive/PY
+reputability/SM
+reputably/E
+reputation/SM
+reputed/Y
+repute/ESB
+reputing
+requested/U
+request/G
+Requiem/MS
+requiem/SM
+require/LR
+requirement/MS
+requisiteness/M
+requisite/PNXS
+requisitioner/M
+requisition/GDRM
+requital/MS
+requited/U
+requiter/M
+requite/RZ
+reread/G
+rerecord/G
+rerouteing
+rerunning
+res/C
+rescale
+rescind/SDRG
+rescission/SM
+rescue/GZRSD
+reseal/BG
+research/MB
+reselect/G
+resemblant
+resemble/DSG
+resend/G
+resent/DSLG
+resentfulness/SM
+resentful/PY
+resentment/MS
+reserpine/MS
+reservation/MS
+reservednesses
+reservedness/UM
+reserved/UYP
+reservist/SM
+reservoir/MS
+reset/RDG
+resettle/L
+reshipping
+reshow/G
+reshuffle/M
+reside/G
+residence/MS
+residency/SM
+residential/Y
+resident/SM
+resider/M
+residua
+residual/YS
+residuary
+residue/SM
+residuum/M
+resignation/MS
+resigned/YP
+resilience/MS
+resiliency/S
+resilient/Y
+resin/D
+resinlike
+resinous
+resiny
+resistance/SM
+Resistance/SM
+resistantly
+resistants
+resistant/U
+resisted/U
+resistible
+resistibly
+resisting/U
+resistiveness/M
+resistive/PY
+resistivity/M
+resistless
+resistor/MS
+resist/RDZVGS
+resize/G
+resold
+resole/G
+resoluble
+resoluteness/MS
+resolute/PYTRV
+resolvability/M
+resolvable/U
+resolved/U
+resolvent
+resonance/SM
+resonant/YS
+resonate/DSG
+resonator/MS
+resorption/MS
+resort/R
+resound/G
+resourcefulness/SM
+resourceful/PY
+resp
+respectability/SM
+respectable/SP
+respectably
+respect/BSDRMZGV
+respected/E
+respectful/EY
+respectfulness/SM
+respecting/E
+respectiveness/M
+respective/PY
+respect's/E
+respects/E
+respell/G
+respiration/MS
+respirator/SM
+respiratory/M
+resplendence/MS
+resplendent/Y
+respondent/MS
+respond/SDRZG
+responser/M
+response/RSXMV
+responsibility/MS
+responsibleness/M
+responsible/P
+responsibly
+responsiveness/MSU
+responsive/YPU
+respray/G
+restart/B
+restate/L
+restaurant/SM
+restaurateur/SM
+rest/DRSGVM
+rested/U
+rester/M
+restfuller
+restfullest
+restfulness/MS
+restful/YP
+restitution/SM
+restiveness/SM
+restive/PY
+restlessness/MS
+restless/YP
+restorability
+Restoration/M
+restoration/MS
+restorative/PYS
+restorer/M
+restore/Z
+restrained/UY
+restraint/MS
+restrict/DVGS
+restricted/YU
+restriction/SM
+restrictively
+restrictiveness/MS
+restrictives
+restrictive/U
+restroom/SM
+restructurability
+restructure
+rest's/U
+rests/U
+restudy/M
+restyle
+resubstitute
+resultant/YS
+result/SGMD
+resume/SDBG
+resumption/MS
+resurface
+resurgence/MS
+resurgent
+resurrect/GSD
+resurrection/SM
+resurvey/G
+resuscitate/XSDVNG
+resuscitation/M
+resuscitator/MS
+retail/Z
+retainer/M
+retain/LZGSRD
+retake
+retaliate/VNGXSD
+retaliation/M
+retaliatory
+Reta/M
+retardant/SM
+retardation/SM
+retarder/M
+retard/ZGRDS
+retch/SDG
+retention/SM
+retentiveness/S
+retentive/YP
+retentivity/M
+retest/G
+Retha/M
+rethought
+reticence/S
+reticent/Y
+reticle/SM
+reticular
+reticulate/GNYXSD
+reticulation/M
+reticule/MS
+reticulum/M
+retinal/S
+retina/SM
+retinue/MS
+retiredness/M
+retiree/MS
+retire/L
+retirement/SM
+retiring/YP
+retort/GD
+retract/DG
+retractile
+retrench/L
+retrenchment/MS
+retributed
+retribution/MS
+retributive
+retrieval/SM
+retriever/M
+retrieve/ZGDRSB
+retroactive/Y
+retrofire/GMSD
+retrofit/S
+retrofitted
+retrofitting
+retroflection
+retroflex/D
+retroflexion/M
+retrogradations
+retrograde/GYDS
+retrogression/MS
+retrogressive/Y
+retrogress/SDVG
+retrorocket/MS
+retro/SM
+retrospection/MS
+retrospective/SY
+retrospect/SVGMD
+retrovirus/S
+retrovision
+retry/G
+retsina/SM
+returnable/S
+returned/U
+returnee/SM
+retype
+Reube/M
+Reuben/M
+Reub/NM
+Reunion/M
+reuse/B
+Reuters
+Reuther/M
+reutilization
+Reuven/M
+Reva/M
+revanchist
+revealed/U
+revealingly
+revealing/U
+reveal/JBG
+reveille/MS
+revelation/MS
+Revelation/MS
+revelatory
+revelry/MS
+revel/SJRDGZ
+revenge/MGSRD
+revenger/M
+revenuer/M
+revenue/ZR
+reverberant
+reverberate/XVNGSD
+reverberation/M
+revere/GSD
+Revere/M
+reverencer/M
+reverence/SRDGM
+Reverend
+reverend/SM
+reverential/Y
+reverent/Y
+reverie/SM
+reversal/MS
+reverser/M
+reverse/Y
+reversibility/M
+reversible/S
+reversibly
+reversioner/M
+reversion/R
+revers/M
+reverter/M
+revertible
+revert/RDVGS
+revet/L
+revetment/SM
+review/G
+revile/GZSDL
+revilement/MS
+reviler/M
+revise/BRZ
+revised/U
+revisionary
+revisionism/SM
+revisionist/SM
+revitalize/ZR
+revivalism/MS
+revivalist/MS
+revival/SM
+reviver/M
+revive/RSDG
+revivification/M
+revivify/X
+Revkah/M
+Revlon/M
+Rev/M
+revocable
+revoke/GZRSD
+revolter/M
+revolt/GRD
+revolting/Y
+revolutionariness/M
+revolutionary/MSP
+revolutionist/MS
+revolutionize/GDSRZ
+revolutionizer/M
+revolution/SM
+revolve/BSRDZJG
+revolver/M
+revue/MS
+revulsion/MS
+revved
+revving
+rev/ZM
+rewarded/U
+rewarding/Y
+rewarm/G
+reweave
+rewedding
+reweigh/G
+rewind/BGR
+rewire/G
+rework/G
+rexes
+Rex/M
+Reyes
+Reykjavik/M
+re/YM
+Rey/M
+Reynaldo/M
+Reyna/M
+Reynard/M
+Reynold/SM
+rezone
+Rf
+RF
+RFC
+RFD
+R/G
+rhapsodic
+rhapsodical
+rhapsodize/GSD
+rhapsody/SM
+Rhea/M
+rhea/SM
+Rheba/M
+Rhee/M
+Rheims/M
+Rheinholdt/M
+Rhenish
+rhenium/MS
+rheology/M
+rheostat/MS
+rhesus/S
+Rheta/M
+rhetorical/YP
+rhetorician/MS
+rhetoric/MS
+Rhetta/M
+Rhett/M
+rheumatically
+rheumatic/S
+rheumatics/M
+rheumatism/SM
+rheumatoid
+rheum/MS
+rheumy/RT
+Rhiamon/M
+Rhianna/M
+Rhiannon/M
+Rhianon/M
+Rhinelander/M
+Rhineland/RM
+Rhine/M
+rhinestone/SM
+rhinitides
+rhinitis/M
+rhinoceros/MS
+rhino/MS
+rhinotracheitis
+rhizome/MS
+Rh/M
+Rhoda/M
+Rhodes
+Rhodesia/M
+Rhodesian/S
+Rhodia/M
+Rhodie/M
+rhodium/MS
+rhododendron/SM
+rhodolite/M
+rhodonite/M
+Rhody/M
+rhombic
+rhomboidal
+rhomboid/SM
+rhombus/SM
+rho/MS
+Rhona/M
+Rhonda/M
+Rhone
+rhubarb/MS
+rhyme/DSRGZM
+rhymester/MS
+Rhys/M
+rhythmical/Y
+rhythmic/S
+rhythmics/M
+rhythm/MS
+RI
+rial/MS
+Riane/M
+Riannon/M
+Rianon/M
+ribaldry/MS
+ribald/S
+ribbed
+Ribbentrop/M
+ribber/S
+ribbing/M
+ribbon/DMSG
+ribcage
+rib/MS
+riboflavin/MS
+ribonucleic
+ribosomal
+ribosome/MS
+Rica/M
+Rican/SM
+Ricard/M
+Ricardo/M
+Ricca/M
+Riccardo/M
+rice/DRSMZG
+Rice/M
+ricer/M
+Richard/MS
+Richardo/M
+Richardson/M
+Richart/M
+Richelieu/M
+richen/DG
+Richey/M
+Richfield/M
+Richie/M
+Richland/M
+Rich/M
+Richmond/M
+Richmound/M
+richness/MS
+Richter/M
+Richthofen/M
+Richy/M
+rich/YNSRPT
+Rici/M
+Rickard/M
+Rickenbacker/M
+Rickenbaugh/M
+Rickert/M
+rickets/M
+rickety/RT
+Rickey/M
+rick/GSDM
+Rickie/M
+Ricki/M
+Rick/M
+Rickover/M
+rickrack/MS
+rickshaw/SM
+Ricky/M
+Ric/M
+ricochet/GSD
+Rico/M
+Ricoriki/M
+ricotta/MS
+riddance/SM
+ridden
+ridding
+riddle/GMRSD
+Riddle/M
+ride/CZSGR
+Ride/M
+rider/CM
+riderless
+ridership/S
+ridge/DSGM
+Ridgefield/M
+ridgepole/SM
+Ridgway/M
+ridgy/RT
+ridicule/MGDRS
+ridiculer/M
+ridiculousness/MS
+ridiculous/PY
+riding/M
+rid/ZGRJSB
+Riemann/M
+Riesling/SM
+rife/RT
+riff/GSDM
+riffle/SDG
+riffraff/SM
+rifled/U
+rifle/GZMDSR
+rifleman/M
+riflemen
+rifler/M
+rifling/M
+rift/GSMD
+Riga/M
+rigamarole's
+rigatoni/M
+Rigel/M
+rigged
+rigger/SM
+rigging/MS
+Riggs/M
+righteousnesses/U
+righteousness/MS
+righteous/PYU
+rightfulness/MS
+rightful/PY
+rightism/SM
+rightist/S
+rightmost
+rightness/MS
+Right/S
+right/SGTPYRDN
+rightsize/SDG
+rights/M
+rightward/S
+rigidify/S
+rigidity/S
+rigidness/S
+rigid/YP
+rigmarole/MS
+rig/MS
+Rigoberto/M
+Rigoletto/M
+rigor/MS
+rigorousness/S
+rigorous/YP
+Riki/M
+Rikki/M
+Rik/M
+rile/DSG
+Riley/M
+Rilke/M
+rill/GSMD
+Rimbaud/M
+rime/MS
+rimer/M
+rim/GSMDR
+rimless
+rimmed
+rimming
+Rinaldo/M
+Rina/M
+rind/MDGS
+Rinehart/M
+ringer/M
+ring/GZJDRM
+ringing/Y
+ringleader/MS
+ringlet/SM
+ringlike
+Ringling/M
+Ring/M
+ringmaster/MS
+Ringo/M
+ringside/ZMRS
+ringworm/SM
+rink/GDRMS
+rinse/DSRG
+Riobard/M
+Rio/MS
+Riordan/M
+rioter/M
+riotousness/M
+riotous/PY
+riot/SMDRGZJ
+RIP
+riparian/S
+ripcord/SM
+ripened/U
+ripenesses
+ripeness/UM
+ripen/RDG
+ripe/PSY
+riper/U
+ripest/U
+Ripley/M
+Rip/M
+rip/NDRSXTG
+ripoff/S
+riposte/SDMG
+ripped
+ripper/SM
+ripping
+rippler/M
+ripple/RSDGM
+ripply/TR
+ripsaw/GDMS
+riptide/SM
+Risa/M
+RISC
+risen
+riser/M
+rise/RSJZG
+risibility/SM
+risible/S
+rising/M
+risker/M
+risk/GSDRM
+riskily
+riskiness/MS
+risky/RTP
+risotto/SM
+risqué
+rissole/M
+Ritalin
+Rita/M
+Ritchie/M
+rite/DSM
+Ritter/M
+ritualism/SM
+ritualistic
+ritualistically
+ritualized
+ritual/MSY
+Ritz/M
+ritzy/TR
+rivaled/U
+Rivalee/M
+rivalry/MS
+rival/SGDM
+Riva/MS
+rive/CSGRD
+Rivera/M
+riverbank/SM
+riverbed/S
+riverboat/S
+river/CM
+riverfront
+riverine
+Rivers
+Riverside/M
+riverside/S
+Riverview/M
+riveter/M
+rivet/GZSRDM
+riveting/Y
+Riviera/MS
+Rivi/M
+Rivkah/M
+rivulet/SM
+Rivy/M
+riv/ZGNDR
+Riyadh/M
+riyal/SM
+rm
+RMS
+RN
+RNA
+Rn/M
+roach/GSDM
+Roach/M
+roadbed/MS
+roadblock/SMDG
+roadhouse/SM
+roadie/S
+roadkill/S
+road/MIS
+roadrunner/MS
+roadshow/S
+roadside/S
+roadsigns
+roadster/SM
+roadsweepers
+roadway/SM
+roadwork/SM
+roadworthy
+roam/DRGZS
+Roana/M
+Roanna/M
+Roanne/M
+Roanoke/M
+roan/S
+roar/DRSJGZ
+roarer/M
+roaring/T
+Roarke/M
+roaster/M
+roast/SGJZRD
+robbed
+robber/SM
+Robbert/M
+robbery/SM
+Robbie/M
+Robbi/M
+robbing
+Robbin/MS
+Robb/M
+Robby/M
+Robbyn/M
+robe/ESDG
+Robena/M
+Robenia/M
+Robers/M
+Roberson/M
+Roberta/M
+Robert/MS
+Roberto/M
+Robertson/SM
+robe's
+Robeson/M
+Robespierre/M
+Robina/M
+Robinet/M
+Robinetta/M
+Robinette/M
+Robinett/M
+Robinia/M
+Robin/M
+robin/MS
+Robinson/M
+Robinsonville/M
+Robles/M
+Rob/MZ
+robotic/S
+robotism
+robotize/GDS
+robot/MS
+rob/SDG
+Robson/M
+Robt/M
+robustness/SM
+robust/RYPT
+Roby/M
+Robyn/M
+Rocco/M
+Rocha/M
+Rochambeau/M
+Rochella/M
+Rochelle/M
+Rochell/M
+Roche/M
+Rochester/M
+Rochette/M
+Roch/M
+rockabilly/MS
+rockabye
+Rockaway/MS
+rockbound
+Rockefeller/M
+rocker/M
+rocketry/MS
+rocket/SMDG
+Rockey/M
+rockfall/S
+Rockford/M
+rock/GZDRMS
+Rockie/M
+rockiness/MS
+Rockland/M
+Rock/M
+Rockne/M
+Rockville/M
+Rockwell/M
+Rocky/SM
+rocky/SRTP
+rococo/MS
+Roda/M
+rodded
+Roddenberry/M
+rodder
+Roddie/M
+rodding
+Rodd/M
+Roddy/M
+rodent/MS
+rodeo/SMDG
+Roderich/M
+Roderick/M
+Roderic/M
+Roderigo/M
+rode/S
+Rodger/M
+Rodge/ZMR
+Rodie/M
+Rodi/M
+Rodina/M
+Rodin/M
+Rod/M
+Rodney/M
+Rodolfo/M
+Rodolphe/M
+Rodolph/M
+Rodrick/M
+Rodrigo/M
+Rodriguez/M
+Rodrique/M
+Rodriquez/M
+rod/SGMD
+roebuck/SM
+Roentgen's
+roentgen/SM
+roe/SM
+ROFL
+Rogelio/M
+roger/GSD
+Rogerio/M
+Roger/M
+Roget/M
+Rog/MRZ
+rogued/K
+rogue/GMDS
+roguery/MS
+rogues/K
+roguing/K
+roguishness/SM
+roguish/PY
+roil/SGD
+Roi/SM
+roisterer/M
+roister/SZGRD
+Rojas/M
+Roland/M
+Rolando/M
+Roldan/M
+role/MS
+Roley/M
+Rolfe/M
+Rolf/M
+Rolland/M
+rollback/SM
+rolled/A
+Rollerblade/S
+rollerskating
+roller/SM
+rollick/DGS
+rollicking/Y
+Rollie/M
+rolling/S
+Rollin/SM
+Rollo/M
+rollover/S
+roll/UDSG
+Rolodex
+Rolph/M
+Rolvaag/M
+ROM
+romaine/MS
+Romain/M
+Roma/M
+romancer/M
+romance/RSDZMG
+Romanesque/S
+Romania/M
+Romanian/SM
+Romano/MS
+Romanov/M
+roman/S
+Romansh/M
+Romans/M
+Roman/SM
+romantically/U
+romanticism/MS
+Romanticism/S
+romanticist/S
+romanticize/SDG
+romantic/MS
+Romany/SM
+Romeo/MS
+romeo/S
+Romero/M
+Rome/SM
+Rommel/M
+Romney/M
+Romola/M
+Romona/M
+Romonda/M
+romper/M
+romp/GSZDR
+Rom/SM
+Romulus/M
+Romy/M
+Ronalda/M
+Ronald/M
+Rona/M
+Ronda/M
+rondo/SM
+Ronica/M
+Ron/M
+Ronna/M
+Ronnica/M
+Ronnie/M
+Ronni/M
+Ronny/M
+Ronstadt/M
+Rontgen
+Roobbie/M
+rood/MS
+roof/DRMJGZS
+roofer/M
+roofgarden
+roofing/M
+roofless
+rooftop/S
+rookery/MS
+rook/GDMS
+rookie/SRMT
+roomer/M
+roomette/SM
+roomful/MS
+roominess/MS
+roommate/SM
+room/MDRGZS
+roomy/TPSR
+Rooney/M
+Rooseveltian
+Roosevelt/M
+rooster/M
+roost/SGZRDM
+rooted/P
+rooter/M
+rootlessness/M
+rootless/P
+rootlet/SM
+Root/M
+root/MGDRZS
+rootstock/M
+rope/DRSMZG
+roper/M
+roping/M
+Roquefort/MS
+Roquemore/M
+Rora/M
+Rorie/M
+Rori/M
+Rorke/M
+Rorschach
+Rory/M
+Rosabella/M
+Rosabelle/M
+Rosabel/M
+Rosaleen/M
+Rosales/M
+Rosalia/M
+Rosalie/M
+Rosalinda/M
+Rosalinde/M
+Rosalind/M
+Rosaline/M
+Rosalynd/M
+Rosalyn/M
+Rosa/M
+Rosamond/M
+Rosamund/M
+Rosana/M
+Rosanna/M
+Rosanne/M
+Rosario/M
+rosary/SM
+Roscoe/M
+Rosco/M
+Roseanna/M
+Roseanne/M
+Roseann/M
+roseate/Y
+Roseau
+rosebud/MS
+rosebush/SM
+Rosecrans/M
+Roseland/M
+Roselia/M
+Roseline/M
+Roselin/M
+Rosella/M
+Roselle/M
+Rose/M
+Rosemaria/M
+Rosemarie/M
+Rosemary/M
+rosemary/MS
+rose/MGDS
+Rosemonde/M
+Rosenberg/M
+Rosenblum/M
+Rosendo/M
+Rosene/M
+Rosen/M
+Rosenthal/M
+Rosenzweig/M
+Rosetta/M
+Rosette/M
+rosette/SDMG
+rosewater
+rosewood/SM
+Roshelle/M
+Rosicrucian/M
+Rosie/M
+rosily
+Rosina/M
+rosiness/MS
+rosin/SMDG
+Rosita/M
+Roslyn/M
+Rosmunda/M
+Ros/N
+Ross
+Rossetti/M
+Rossie/M
+Rossi/M
+Rossini/M
+Rossy/M
+Rostand/M
+roster/DMGS
+Rostov/M
+rostra's
+rostrum/SM
+Roswell/M
+Rosy/M
+rosy/RTP
+rota/MS
+Rotarian/SM
+rotary/S
+rotated/U
+rotate/VGNXSD
+rotational/Y
+rotation/M
+rotative/Y
+rotator/SM
+rotatory
+ROTC
+rote/MS
+rotgut/MS
+Roth/M
+Rothschild/M
+rotisserie/MS
+rotogravure/SM
+rotor/MS
+rototill/RZ
+rot/SDG
+rotted
+rottenness/S
+rotten/RYSTP
+Rotterdam/M
+rotter/M
+rotting
+rotunda/SM
+rotundity/S
+rotundness/S
+rotund/SDYPG
+Rouault/M
+roué/MS
+rouge/GMDS
+roughage/SM
+roughen/DG
+rougher/M
+roughhouse/GDSM
+roughish
+roughneck/MDSG
+roughness/MS
+roughs
+roughshod
+rough/XPYRDNGT
+roulette/MGDS
+roundabout/PSM
+roundedness/M
+rounded/P
+roundelay/SM
+roundels
+rounder/M
+roundhead/D
+roundheadedness/M
+roundheaded/P
+roundhouse/SM
+roundish
+roundness/MS
+roundoff
+roundup/MS
+roundworm/MS
+round/YRDSGPZT
+Rourke/M
+rouse/DSRG
+rouser/M
+Rousseau/M
+roustabout/SM
+roust/SGD
+route/ASRDZGJ
+router/M
+route's
+rout/GZJMDRS
+routine/SYM
+routing/M
+routinize/GSD
+Rouvin/M
+rover/M
+Rover/M
+rove/ZGJDRS
+roving/M
+Rowan/M
+rowboat/SM
+rowdily
+rowdiness/MS
+rowdyism/MS
+rowdy/PTSR
+rowel/DMSG
+Rowe/M
+Rowena/M
+rowen/M
+Rowen/M
+rower/M
+Rowland/M
+Rowley/M
+Row/MN
+Rowney/M
+row/SJZMGNDR
+Roxana/M
+Roxane/M
+Roxanna/M
+Roxanne/M
+Roxie/M
+Roxi/M
+Roxine/M
+Roxy/M
+royalist/SM
+Royall/M
+Royal/M
+royal/SY
+royalty/MS
+Royce/M
+Roy/M
+Rozalie/M
+Rozalin/M
+Rozamond/M
+Rozanna/M
+Rozanne/M
+Rozele/M
+Rozella/M
+Rozelle/M
+Roze/M
+Rozina/M
+Roz/M
+RP
+rpm
+RPM
+rps
+RR
+Rriocard/M
+rs
+r's
+R's
+RSFSR
+RSI
+RSV
+RSVP
+RSX
+rt
+rte
+Rte
+RTFM
+r/TGVJ
+Rubaiyat/M
+rubato/MS
+rubbed
+rubberize/GSD
+rubberneck/DRMGSZ
+rubber/SDMG
+rubbery/TR
+rubbing/M
+rubbish/DSMG
+rubbishy
+rubble/GMSD
+rubdown/MS
+rubella/MS
+Rube/M
+Ruben/MS
+rube/SM
+Rubetta/M
+Rubia/M
+Rubicon/SM
+rubicund
+rubidium/SM
+Rubie/M
+Rubik/M
+Rubi/M
+Rubina/M
+Rubin/M
+Rubinstein/M
+ruble/MS
+rubout
+rubric/MS
+rub/S
+Ruby/M
+ruby/MTGDSR
+Ruchbah/M
+ruck/M
+rucksack/SM
+ruckus/SM
+ruction/SM
+rudderless
+rudder/MS
+Ruddie/M
+ruddiness/MS
+Rudd/M
+Ruddy/M
+ruddy/PTGRSD
+rudeness/MS
+rude/PYTR
+Rudie/M
+Rudiger/M
+rudimentariness/M
+rudimentary/P
+rudiment/SM
+Rudolf/M
+Rudolfo/M
+Rudolph/M
+Rudyard/M
+Rudy/M
+ruefulness/S
+rueful/PY
+rue/GDS
+Rufe/M
+ruff/GSYDM
+ruffian/GSMDY
+ruffled/U
+ruffler/M
+ruffle/RSDG
+ruffly/TR
+Rufus/M
+Rugby's
+rugby/SM
+ruggedness/S
+rugged/PYRT
+Ruggiero/M
+rugging
+rug/MS
+Ruhr/M
+ruination/MS
+ruiner/M
+ruin/MGSDR
+ruinousness/M
+ruinous/YP
+Ruiz/M
+rulebook/S
+ruled/U
+rule/MZGJDRS
+ruler/GMD
+ruling/M
+Rumanian's
+Rumania's
+rumba/GDMS
+rumble/JRSDG
+rumbler/M
+rumbustious
+rumen/M
+Rumford/M
+Ru/MH
+ruminant/YMS
+ruminate/VNGXSD
+ruminative/Y
+rummage/GRSD
+rummager/M
+Rummel/M
+rummer
+rummest
+rummy/TRSM
+rumored/U
+rumorer/M
+rumormonger/SGMD
+rumor/ZMRDSG
+Rumpelstiltskin/M
+rump/GMYDS
+rumple/SDG
+rumply/TR
+rumpus/SM
+rum/XSMN
+runabout/SM
+runaround/S
+run/AS
+runaway/S
+rundown/SM
+rune/MS
+Runge/M
+rung/MS
+runic
+runlet/SM
+runnable
+runnel/SM
+runner/MS
+running/S
+Runnymede/M
+runny/RT
+runoff/MS
+runtime
+runtiness/M
+runt/MS
+runty/RPT
+runway/MS
+Runyon/M
+rupee/MS
+Ruperta/M
+Rupert/M
+Ruperto/M
+rupiah/M
+rupiahs
+Ruppert/M
+Ruprecht/M
+rupture/GMSD
+rurality/M
+rural/Y
+Rurik/M
+ruse/MS
+Rushdie/M
+rush/DSRGZ
+rusher/M
+rushes/I
+rushing/M
+Rush/M
+Rushmore/M
+rushy/RT
+Ruskin/M
+rusk/MS
+Russell/M
+Russel/M
+russet/MDS
+russetting
+Russia/M
+Russian/SM
+Russo/M
+Russ/S
+Rustbelt/M
+rustically
+rusticate/GSD
+rustication/M
+rusticity/S
+rustic/S
+Rustie/M
+rustiness/MS
+Rustin/M
+rustler/M
+rustle/RSDGZ
+rust/MSDG
+rustproof/DGS
+Rusty/M
+rusty/XNRTP
+rutabaga/SM
+Rutger/SM
+Ruthanne/M
+Ruthann/M
+Ruthe/M
+ruthenium/MS
+rutherfordium/SM
+Rutherford/M
+Ruthie/M
+Ruthi/M
+ruthlessness/MS
+ruthless/YP
+Ruth/M
+Ruthy/M
+Rutland/M
+Rutledge/M
+rut/MS
+rutted
+Rutter/M
+Ruttger/M
+rutting
+rutty/RT
+Ruy/M
+RV
+RVs
+Rwandan/S
+Rwanda/SM
+Rwy/M
+Rx/M
+Ryan/M
+Ryann/M
+Rycca/M
+Rydberg/M
+Ryder/M
+rye/MS
+Ryley/M
+Ry/M
+Ryon/M
+Ryukyu/M
+Ryun/M
+S
+SA
+Saab/M
+Saar/M
+Saba/M
+sabbath
+Sabbath/M
+Sabbaths
+sabbatical/S
+sabered/U
+saber/GSMD
+Sabik/M
+Sabina/M
+Sabine/M
+Sabin/M
+sable/GMDS
+sabotage/DSMG
+saboteur/SM
+sabot/MS
+Sabra/M
+sabra/MS
+Sabrina/M
+SAC
+Sacajawea/M
+saccharides
+saccharine
+saccharin/MS
+Sacco/M
+sacerdotal
+Sacha/M
+sachem/MS
+sachet/SM
+Sachs/M
+sackcloth/M
+sackcloths
+sacker/M
+sackful/MS
+sack/GJDRMS
+sacking/M
+sacral
+sacra/L
+sacramental/S
+sacrament/DMGS
+Sacramento/M
+sacredness/S
+sacred/PY
+sacrificer/M
+sacrifice/RSDZMG
+sacrificial/Y
+sacrilege/MS
+sacrilegious/Y
+sacristan/SM
+sacristy/MS
+sacroiliac/S
+sacrosanctness/MS
+sacrosanct/P
+sacrum/M
+sac/SM
+Sada/M
+Sadat/M
+Saddam/M
+sadden/DSG
+sadder
+saddest
+saddlebag/SM
+saddler/M
+saddle's
+saddle/UGDS
+Sadducee/M
+Sadella/M
+Sade/M
+sades
+Sadie/M
+sadism/MS
+sadistic
+sadistically
+sadist/MS
+sadness/SM
+sadomasochism/MS
+sadomasochistic
+sadomasochist/S
+sad/PY
+Sadr/M
+Sadye/M
+safari/GMDS
+safeguard/MDSG
+safekeeping/MS
+safeness/MS
+safeness's/U
+safes
+safety/SDMG
+safe/URPTY
+safflower/SM
+saffron/MS
+sagaciousness/M
+sagacious/YP
+sagacity/MS
+saga/MS
+Sagan/M
+sagebrush/SM
+sage/MYPS
+sagged
+sagger
+sagging
+saggy/RT
+Saginaw/M
+Sagittarius/MS
+sago/MS
+sag/TSR
+saguaro/SM
+Sahara/M
+Saharan/M
+Sahel
+sahib/MS
+Saidee/M
+saids
+said/U
+Saigon/M
+sailboard/DGS
+sailboat/SRMZG
+sailcloth/M
+sailcloths
+sailer/M
+sailfish/SM
+sail/GJMDRS
+sailing/M
+sailor/YMS
+sailplane/SDMG
+sainthood/MS
+saintlike
+saintliness/MS
+saintly/RTP
+saint/YDMGS
+Saiph/M
+saith
+saiths
+Sakai/M
+sake/MRS
+saker/M
+Sakhalin/M
+Sakharov/M
+Saki/M
+saki's
+salaam/GMDS
+salable/U
+salaciousness/MS
+salacious/YP
+salacity/MS
+Saladin/M
+Salado/M
+salad/SM
+Salaidh/M
+salamander/MS
+salami/MS
+salary/SDMG
+Salas/M
+Salazar/M
+saleability/M
+sale/ABMS
+Saleem/M
+Salem/M
+Salerno/M
+salesclerk/SM
+salesgirl/SM
+saleslady/S
+salesman/M
+salesmanship/SM
+salesmen
+salespeople/M
+salesperson/MS
+salesroom/M
+saleswoman
+saleswomen
+salience/MS
+saliency
+salient/SY
+Salim/M
+Salina/MS
+saline/S
+salinger
+Salinger/M
+salinity/MS
+Salisbury/M
+Salish/M
+saliva/MS
+salivary
+salivate/XNGSD
+salivation/M
+Salk/M
+Sallee/M
+Salle/M
+Sallie/M
+Salli/M
+sallowness/MS
+sallow/TGRDSP
+Sallust/M
+Sallyanne/M
+Sallyann/M
+sally/GSDM
+Sally/M
+salmonellae
+salmonella/M
+Salmon/M
+salmon/SM
+Sal/MY
+Saloma/M
+Salome/M
+Salomi/M
+Salomo/M
+Salomone/M
+Salomon/M
+Salonika/M
+salon/SM
+saloonkeeper
+saloon/MS
+salsa/MS
+salsify/M
+SALT
+saltcellar/SM
+salted/UC
+salter/M
+salt/GZTPMDRS
+saltine/MS
+saltiness/SM
+saltness/M
+Salton/M
+saltpeter/SM
+salts/C
+saltshaker/S
+saltwater
+salty/RSPT
+salubriousness/M
+salubrious/YP
+salubrity/M
+salutariness/M
+salutary/P
+salutation/SM
+salutatory/S
+saluter/M
+salute/RSDG
+Salvadoran/S
+Salvadorian/S
+Salvador/M
+salvageable
+salvage/MGRSD
+salvager/M
+salvation/MS
+Salvatore/M
+salve/GZMDSR
+salver/M
+Salvidor/M
+salvo/GMDS
+Salween/M
+Salyut/M
+Salz/M
+SAM
+Samantha/M
+Samara/M
+Samaria/M
+Samaritan/MS
+samarium/MS
+Samarkand/M
+samba/GSDM
+sameness/MS
+same/SP
+Sam/M
+Sammie/M
+Sammy/M
+Samoa
+Samoan/S
+Samoset/M
+samovar/SM
+Samoyed/M
+sampan/MS
+sampler/M
+sample/RSDJGMZ
+sampling/M
+Sampson/M
+Samsonite/M
+Samson/M
+Samuele/M
+Samuel/SM
+Samuelson/M
+samurai/M
+San'a
+Sana/M
+sanatorium/MS
+Sanborn/M
+Sanchez/M
+Sancho/M
+sanctification/M
+sanctifier/M
+sanctify/RSDGNX
+sanctimoniousness/MS
+sanctimonious/PY
+sanctimony/MS
+sanctioned/U
+sanction/SMDG
+sanctity/SM
+sanctuary/MS
+sanctum/SM
+sandal/MDGS
+sandalwood/SM
+sandbagged
+sandbagging
+sandbag/MS
+sandbank/SM
+sandbar/S
+sandblaster/M
+sandblast/GZSMRD
+sandbox/MS
+Sandburg/M
+sandcastle/S
+Sande/M
+Sanderling/M
+sander/M
+Sander/M
+Sanderson/M
+sandhill
+sandhog/SM
+Sandia/M
+Sandie/M
+Sandi/M
+sandiness/S
+Sandinista
+sandlot/SM
+sandlotter/S
+sandman/M
+sandmen
+Sand/MRZ
+Sandor/M
+Sandoval/M
+sandpaper/DMGS
+sandpile
+sandpiper/MS
+sandpit/M
+Sandra/M
+Sandro/M
+sand/SMDRGZ
+sandstone/MS
+sandstorm/SM
+Sandusky/M
+sandwich/SDMG
+Sandye/M
+Sandy/M
+sandy/PRT
+saned
+sane/IRYTP
+saneness/MS
+saneness's/I
+sanes
+Sanford/M
+Sanforized
+Sanger/M
+sangfroid/S
+sangria/SM
+Sang/RM
+sang/S
+sanguinary
+sanguined
+sanguine/F
+sanguinely
+sanguineness/M
+sanguineous/F
+sanguines
+sanguining
+Sanhedrin/M
+saning
+sanitarian/S
+sanitarium/SM
+sanitary/S
+sanitate/NX
+sanitation/M
+sanitizer/M
+sanitize/RSDZG
+sanity/SIM
+sank
+Sankara/M
+San/M
+sans
+sanserif
+Sanskritic
+Sanskritize/M
+Sanskrit/M
+Sansone/M
+Sanson/M
+Santa/M
+Santana/M
+Santayana/M
+Santeria
+Santiago/M
+Santo/MS
+sapience/MS
+sapient
+sapless
+sapling/SM
+sap/MS
+sapped
+sapper/SM
+Sapphira/M
+Sapphire/M
+sapphire/MS
+Sappho/M
+sappiness/SM
+sapping
+Sapporo/M
+sappy/RPT
+saprophyte/MS
+saprophytic
+sapsucker/SM
+sapwood/SM
+Saraann/M
+Saracen/MS
+Saragossa/M
+Sarah/M
+Sarajane/M
+Sarajevo/M
+Sara/M
+Saran/M
+saran/SM
+sarape's
+Sarasota/M
+Saratoga/M
+Saratov/M
+Sarawak/M
+sarcasm/MS
+sarcastic
+sarcastically
+sarcoma/MS
+sarcophagi
+sarcophagus/M
+sardine/SDMG
+Sardinia/M
+sardonic
+sardonically
+Saree/M
+Sarena/M
+Sarene/M
+Sarette/M
+Sargasso/M
+Sarge/M
+Sargent/M
+sarge/SM
+Sargon/M
+Sari/M
+sari/MS
+Sarina/M
+Sarine/M
+Sarita/M
+Sarnoff/M
+sarong/MS
+Saroyan/M
+sarsaparilla/MS
+Sarto/M
+sartorial/Y
+sartorius/M
+Sartre/M
+Sascha/M
+SASE
+Sasha/M
+sashay/GDS
+Sashenka/M
+sash/GMDS
+Saskatchewan/M
+Saskatoon/M
+Sask/M
+sassafras/MS
+sass/GDSM
+Sassoon/M
+sassy/TRS
+SAT
+satanic
+satanical/Y
+Satanism/M
+satanism/S
+Satanist/M
+satanist/S
+Satan/M
+satchel/SM
+sat/DG
+sateen/MS
+satellite/GMSD
+sate/S
+satiable/I
+satiate/GNXSD
+satiation/M
+satiety/MS
+satin/MDSG
+satinwood/MS
+satiny
+satire/SM
+satiric
+satirical/Y
+satirist/SM
+satirize/DSG
+satirizes/U
+satisfaction/ESM
+satisfactorily/U
+satisfactoriness/MU
+satisfactory/UP
+satisfiability/U
+satisfiable/U
+satisfied/UE
+satisfier/M
+satisfies/E
+satisfy/GZDRS
+satisfying/EU
+satisfyingly
+Sat/M
+satori/SM
+satrap/SM
+saturated/CUA
+saturater/M
+saturates/A
+saturate/XDRSNG
+saturation/M
+Saturday/MS
+saturnalia
+Saturnalia/M
+saturnine/Y
+Saturn/M
+Satyanarayanan/M
+satyriases
+satyriasis/M
+satyric
+satyr/MS
+sauce/DSRGZM
+saucepan/SM
+saucer/M
+saucily
+sauciness/S
+saucy/TRP
+Saudi/S
+Saud/M
+Saudra/M
+sauerkraut/SM
+Saukville/M
+Saul/M
+Sault/M
+sauna/DMSG
+Sauncho/M
+Saunder/SM
+Saunderson/M
+Saundra/M
+saunter/DRSG
+saurian/S
+sauropod/SM
+sausage/MS
+Saussure/M
+sauté/DGS
+Sauternes/M
+Sauveur/M
+savage/GTZYPRSD
+Savage/M
+savageness/SM
+savagery/MS
+Savannah/M
+savanna/MS
+savant/SM
+saved/U
+saveloy/M
+saver/M
+save/ZGJDRSB
+Savina/M
+Savior/M
+savior/SM
+Saviour/M
+Savonarola/M
+savored/U
+savorer/M
+savorier
+savoriest
+savoriness/S
+savoringly/S
+savoring/Y
+savor/SMRDGZ
+savory/UMPS
+Savoyard/M
+Savoy/M
+savoy/SM
+savvy/GTRSD
+sawbones/M
+sawbuck/SM
+sawdust/MDSG
+sawer/M
+sawfly/SM
+sawhorse/MS
+Saw/M
+sawmill/SM
+saw/SMDRG
+sawtooth
+Sawyere/M
+Sawyer/M
+sawyer/MS
+Saxe/M
+saxifrage/SM
+Sax/M
+sax/MS
+Saxon/SM
+Saxony/M
+saxophone/MS
+saxophonist/SM
+Saxton/M
+Sayer/M
+sayer/SM
+sayest
+saying/MS
+Sayre/MS
+says/M
+say/USG
+Say/ZMR
+SBA
+Sb/M
+SC
+scabbard/SGDM
+scabbed
+scabbiness/SM
+scabbing
+scabby/RTP
+scabies/M
+scabrousness/M
+scabrous/YP
+scab/SM
+scad/SM
+scaffolding/M
+scaffold/JGDMS
+scalability
+Scala/M
+scalar/SM
+scalawag/SM
+scald/GJRDS
+scaled/AU
+scale/JGZMBDSR
+scaleless
+scalene
+scaler/M
+scales/A
+scaliness/MS
+scaling/A
+scallion/MS
+scalloper/M
+scallop/GSMDR
+scalloping/M
+scalpel/SM
+scalper/M
+scalp/GZRDMS
+scalping/M
+scaly/TPR
+scammed
+scamming
+scamper/GD
+scampi/M
+scamp/RDMGZS
+scam/SM
+Scan
+scan/AS
+scandal/GMDS
+scandalized/U
+scandalize/GDS
+scandalmonger/SM
+scandalousness/M
+scandalous/YP
+Scandinavia/M
+Scandinavian/S
+scandium/MS
+scanned/A
+scanner/SM
+scanning/A
+scansion/SM
+scant/CDRSG
+scantest
+scantily
+scantiness/MS
+scantly
+scantness/MS
+scanty/TPRS
+scapegoat/SGDM
+scapegrace/MS
+scape/M
+scapulae
+scapula/M
+scapular/S
+scarab/SM
+Scaramouch/M
+Scarborough/M
+scarceness/SM
+scarce/RTYP
+scarcity/MS
+scar/DRMSG
+scarecrow/MS
+scaremongering/M
+scaremonger/SGM
+scarer/M
+scare/S
+scarface
+Scarface/M
+scarf/SDGM
+scarification/M
+scarify/DRSNGX
+scarily
+scariness/S
+scarlatina/MS
+Scarlatti/M
+Scarlet/M
+scarlet/MDSG
+Scarlett/M
+scarp/SDMG
+scarred
+scarring
+scarves/M
+scary/PTR
+scathe/DG
+scathed/U
+scathing/Y
+scatological
+scatology/SM
+scat/S
+scatted
+scatterbrain/MDS
+scatter/DRJZSG
+scatterer/M
+scattergun
+scattering/YM
+scatting
+scavenge/GDRSZ
+scavenger/M
+SCCS
+scenario/SM
+scenarist/MS
+scene/GMDS
+scenery/SM
+scenically
+scenic/S
+scented/U
+scent/GDMS
+scentless
+scent's/C
+scents/C
+scepter/DMSG
+scepters/U
+sceptically
+sch
+Schaefer/M
+Schaeffer/M
+Schafer/M
+Schaffner/M
+Schantz/M
+Schapiro/M
+Scheat/M
+Schedar/M
+schedule/ADSRG
+scheduled/U
+scheduler/MS
+schedule's
+Scheherazade/M
+Scheherezade/M
+Schelling/M
+schema/M
+schemata
+schematically
+schematic/S
+scheme/JSRDGMZ
+schemer/M
+schemta
+Schenectady/M
+scherzo/MS
+Schick/M
+Schiller/M
+schilling/SM
+schismatic/S
+schism/SM
+schist/SM
+schizoid/S
+schizomycetes
+schizophrenia/SM
+schizophrenically
+schizophrenic/S
+schizo/S
+schlemiel/MS
+schlepped
+schlepping
+schlep/S
+Schlesinger/M
+Schliemann/M
+Schlitz/M
+schlock/SM
+schlocky/TR
+Schloss/M
+schmaltz/MS
+schmaltzy/TR
+Schmidt/M
+Schmitt/M
+schmoes
+schmo/M
+schmooze/GSD
+schmuck/MS
+Schnabel/M
+schnapps/M
+schnauzer/MS
+Schneider/M
+schnitzel/MS
+schnook/SM
+schnoz/S
+schnozzle/MS
+Schoenberg/M
+Schofield/M
+scholarship/MS
+scholar/SYM
+scholastically
+scholastic/S
+schoolbag/SM
+schoolbook/SM
+schoolboy/MS
+schoolchild/M
+schoolchildren
+schooldays
+schooled/U
+schoolfellow/S
+schoolfriend
+schoolgirlish
+schoolgirl/MS
+schoolhouse/MS
+schooling/M
+schoolmarmish
+schoolmarm/MS
+schoolmaster/SGDM
+schoolmate/MS
+schoolmistress/MS
+schoolroom/SM
+schoolteacher/MS
+schoolwork/SM
+schoolyard/SM
+school/ZGMRDJS
+schooner/SM
+Schopenhauer/M
+Schottky/M
+Schrieffer/M
+Schrödinger/M
+Schroeder/M
+Schroedinger/M
+Schubert/M
+Schultz/M
+Schulz/M
+Schumacher/M
+Schuman/M
+Schumann/M
+schussboomer/S
+schuss/SDMG
+Schuster/M
+Schuyler/M
+Schuylkill/M
+Schwab/M
+Schwartzkopf/M
+Schwartz/M
+Schwarzenegger/M
+schwa/SM
+Schweitzer/M
+Schweppes/M
+Schwinger/M
+Schwinn/M
+sci
+sciatica/SM
+sciatic/S
+science/FMS
+scientifically/U
+scientific/U
+scientist/SM
+Scientology/M
+scimitar/SM
+scintilla/MS
+scintillate/GNDSX
+scintillation/M
+scintillator/SM
+scion/SM
+Scipio/M
+scissor/SGD
+scleroses
+sclerosis/M
+sclerotic/S
+Sc/M
+scoffer/M
+scofflaw/MS
+scoff/RDGZS
+scolder/M
+scold/GSJRD
+scolioses
+scoliosis/M
+scollop's
+sconce/SDGM
+scone/SM
+scooper/M
+scoop/SRDMG
+scooter/M
+scoot/SRDGZ
+scope/DSGM
+Scopes/M
+scops
+scorbutic
+scorcher/M
+scorching/Y
+scorch/ZGRSD
+scoreboard/MS
+scorecard/MS
+scored/M
+scorekeeper/SM
+scoreless
+scoreline
+score/ZMDSRJG
+scorner/M
+scornfulness/M
+scornful/PY
+scorn/SGZMRD
+scorpion/SM
+Scorpio/SM
+Scorpius/M
+Scorsese/M
+Scotchgard/M
+Scotchman/M
+Scotchmen
+scotch/MSDG
+scotchs
+Scotch/S
+Scotchwoman
+Scotchwomen
+Scotia/M
+Scotian/M
+Scotland/M
+Scot/MS
+Scotsman/M
+Scotsmen
+Scotswoman
+Scotswomen
+Scottie/SM
+Scotti/M
+Scottish
+Scott/M
+Scottsdale/M
+Scotty's
+scoundrel/YMS
+scourer/M
+scourge/MGRSD
+scourger/M
+scouring/M
+scour/SRDGZ
+scouter/M
+scouting/M
+scoutmaster/SM
+Scout's
+scout/SRDMJG
+scow/DMGS
+scowler/M
+scowl/SRDG
+scrabble/DRSZG
+scrabbler/M
+Scrabble/SM
+scragged
+scragging
+scraggly/TR
+scraggy/TR
+scrag/SM
+scrambler/MS
+scrambler's/U
+scramble/UDSRG
+scrammed
+scramming
+scram/S
+Scranton/M
+scrapbook/SM
+scraper/M
+scrape/S
+scrapheap/SM
+scrapped
+scrapper/SM
+scrapping
+scrappy/RT
+scrap/SGZJRDM
+scrapyard/S
+scratched/U
+scratcher/M
+scratches/M
+scratchily
+scratchiness/S
+scratch/JDRSZG
+scratchy/TRP
+scrawler/M
+scrawl/GRDS
+scrawly/RT
+scrawniness/MS
+scrawny/TRP
+screamer/M
+screaming/Y
+scream/ZGSRD
+screecher/M
+screech/GMDRS
+screechy/TR
+screed/MS
+scree/DSM
+screened/U
+screening/M
+screenplay/MS
+screen/RDMJSG
+screenwriter/MS
+screwball/SM
+screwdriver/SM
+screwer/M
+screw/GUSD
+screwiness/S
+screw's
+screwup
+screwworm/MS
+screwy/RTP
+Scriabin/M
+scribal
+scribble/JZDRSG
+scribbler/M
+scribe/CDRSGIK
+scriber/MKIC
+scribe's
+Scribner/MS
+scrimmager/M
+scrimmage/RSDMG
+scrimp/DGS
+scrimshaw/GSDM
+scrim/SM
+Scripps/M
+scrip/SM
+scripted/U
+script/FGMDS
+scriptural/Y
+scripture/MS
+Scripture/MS
+scriptwriter/SM
+scriptwriting/M
+scrivener/M
+scriven/ZR
+scrod/M
+scrofula/MS
+scrofulous
+scrollbar/SM
+scroll/GMDSB
+Scrooge/MS
+scrooge/SDMG
+scrota
+scrotal
+scrotum/M
+scrounge/ZGDRS
+scroungy/TR
+scrubbed
+scrubber/MS
+scrubbing
+scrubby/TR
+scrub/S
+scruffily
+scruffiness/S
+scruff/SM
+scruffy/PRT
+Scruggs/M
+scrummage/MG
+scrum/MS
+scrumptious/Y
+scrunch/DSG
+scrunchy/S
+scruple/SDMG
+scrupulosity/SM
+scrupulousness's
+scrupulousness/US
+scrupulous/UPY
+scrutable/I
+scrutinized/U
+scrutinizer/M
+scrutinize/RSDGZ
+scrutinizingly/S
+scrutinizing/UY
+scrutiny/MS
+SCSI
+scuba/SDMG
+scudded
+scudding
+Scud/M
+scud/S
+scuff/GSD
+scuffle/SDG
+sculler/M
+scullery/MS
+Sculley/M
+scullion/MS
+scull/SRDMGZ
+sculptor/MS
+sculptress/MS
+sculpt/SDG
+sculptural/Y
+sculpture/SDGM
+scumbag/S
+scummed
+scumming
+scum/MS
+scummy/TR
+scupper/SDMG
+scurf/MS
+scurfy/TR
+scurrility/MS
+scurrilousness/MS
+scurrilous/PY
+scurry/GJSD
+scurvily
+scurviness/M
+scurvy/SRTP
+scutcheon/SM
+scuttlebutt/MS
+scuttle/MGSD
+scuzzy/RT
+Scylla/M
+scythe/SDGM
+Scythia/M
+SD
+SDI
+SE
+seabed/S
+seabird/S
+seaboard/MS
+Seaborg/M
+seaborne
+Seabrook/M
+seacoast/MS
+seafare/JRZG
+seafarer/M
+seafood/MS
+seafront/MS
+Seagate/M
+seagoing
+Seagram/M
+seagull/S
+seahorse/S
+sealant/MS
+sealed/AU
+sealer/M
+seal/MDRSGZ
+sealskin/SM
+seals/UA
+seamail
+seamanship/SM
+seaman/YM
+seamer/M
+seaminess/M
+seamlessness/M
+seamless/PY
+seam/MNDRGS
+seams/I
+seamstress/MS
+Seamus/M
+sea/MYS
+seamy/TRP
+Seana/M
+séance/SM
+Sean/M
+seaplane/SM
+seaport/SM
+seaquake/M
+Seaquarium/M
+searcher/AM
+searching/YS
+searchlight/SM
+search/RSDAGZ
+sear/DRSJGT
+searing/Y
+Sears/M
+seascape/SM
+seashell/MS
+seashore/SM
+seasickness/SM
+seasick/P
+seaside/SM
+seasonableness/M
+seasonable/UP
+seasonably/U
+seasonality
+seasonal/Y
+seasoned/U
+seasoner/M
+seasoning/M
+season/JRDYMBZSG
+seatbelt
+seated/A
+seater/M
+seating/SM
+SEATO
+seat's
+Seattle/M
+seat/UDSG
+seawall/S
+seaward/S
+seawater/S
+seaway/MS
+seaweed/SM
+seaworthinesses
+seaworthiness/MU
+seaworthy/TRP
+sebaceous
+Sebastian/M
+Sebastiano/M
+Sebastien/M
+seborrhea/SM
+SEC
+secant/SM
+secede/GRSD
+secessionist/MS
+secession/MS
+secludedness/M
+secluded/YP
+seclude/GSD
+seclusion/SM
+seclusive
+Seconal
+secondarily
+secondary/PS
+seconder/M
+secondhand
+second/RDYZGSL
+secrecy/MS
+secretarial
+secretariat/MS
+secretaryship/MS
+secretary/SM
+secrete/XNS
+secretion/M
+secretiveness/S
+secretive/PY
+secretory
+secret/TVGRDYS
+sec/S
+sectarianism/MS
+sectarian/S
+sectary/MS
+sectionalism/MS
+sectionalized
+sectional/SY
+section/ASEM
+sectioned
+sectioning
+sect/ISM
+sectoral
+sectored
+sector/EMS
+sectoring
+sects/E
+secularism/MS
+secularist/MS
+secularity/M
+secularization/MS
+secularized/U
+secularize/GSD
+secular/SY
+secured/U
+securely/I
+secure/PGTYRSDJ
+security/MSI
+secy
+sec'y
+sedan/SM
+sedateness/SM
+sedate/PXVNGTYRSD
+sedation/M
+sedative/S
+sedentary
+Seder/SM
+sedge/SM
+Sedgwick/M
+sedgy/RT
+sedimentary
+sedimentation/SM
+sediment/SGDM
+sedition/SM
+seditiousness/M
+seditious/PY
+seducer/M
+seduce/RSDGZ
+seduction/MS
+seductiveness/MS
+seductive/YP
+seductress/SM
+sedulous/Y
+Seebeck/M
+seed/ADSG
+seedbed/MS
+seedcase/SM
+seeded/U
+seeder/MS
+seediness/MS
+seeding/S
+seedless
+seedling/SM
+seedpod/S
+seed's
+seedy/TPR
+seeings
+seeing's
+seeing/U
+seeker/M
+seek/GZSR
+seeking/Y
+Seeley/M
+See/M
+seem/GJSYD
+seeming/Y
+seemliness's
+seemliness/US
+seemly/UTPR
+seen/U
+seepage/MS
+seep/GSD
+seer/SM
+seersucker/MS
+sees
+seesaw/DMSG
+seethe/SDGJ
+see/U
+segmental/Y
+segmentation/SM
+segmented/U
+segment/SGDM
+Segovia/M
+segregant
+segregated/U
+segregate/XCNGSD
+segregation/CM
+segregationist/SM
+segregative
+Segre/M
+segue/DS
+segueing
+Segundo/M
+Se/H
+Seidel/M
+seigneur/MS
+seignior/SM
+Seiko/M
+seine/GZMDSR
+Seine/M
+seiner/M
+Seinfeld/M
+seismic
+seismically
+seismographer/M
+seismographic
+seismographs
+seismography/SM
+seismograph/ZMR
+seismologic
+seismological
+seismologist/MS
+seismology/SM
+seismometer/S
+seize/BJGZDSR
+seizer/M
+seizing/M
+seizin/MS
+seizor/MS
+seizure/MS
+Seka/M
+Sela/M
+Selassie/M
+Selby/M
+seldom
+selected/UAC
+selectional
+selection/MS
+selectiveness/M
+selective/YP
+selectivity/MS
+selectman/M
+selectmen
+selectness/SM
+selector/SM
+select/PDSVGB
+Selectric/M
+selects/A
+Selena/M
+selenate/M
+Selene/M
+selenite/M
+selenium/MS
+selenographer/SM
+selenography/MS
+Selestina/M
+Seleucid/M
+Seleucus/M
+self/GPDMS
+selfishness/SU
+selfish/PUY
+selflessness/MS
+selfless/YP
+selfness/M
+Selfridge/M
+selfsameness/M
+selfsame/P
+Selia/M
+Selie/M
+Selig/M
+Selim/M
+Selina/M
+Selinda/M
+Seline/M
+Seljuk/M
+Selkirk/M
+Sella/M
+sell/AZGSR
+seller/AM
+Sellers/M
+Selle/ZM
+sellout/MS
+Selma/M
+seltzer/S
+selvage/MGSD
+selves/M
+Selznick/M
+semantical/Y
+semanticist/SM
+semantic/S
+semantics/M
+semaphore/GMSD
+Semarang/M
+semblance/ASME
+semen/SM
+semester/SM
+semiannual/Y
+semiarid
+semiautomated
+semiautomatic/S
+semicircle/SM
+semicircular
+semicolon/MS
+semiconductor/SM
+semiconscious
+semidefinite
+semidetached
+semidrying/M
+semifinalist/MS
+semifinal/MS
+semilogarithmic
+semimonthly/S
+seminal/Y
+seminarian/MS
+seminar/SM
+seminary/MS
+Seminole/SM
+semiofficial
+semioticians
+semiotic/S
+semiotics/M
+semipermanent/Y
+semipermeable
+semiprecious
+semiprivate
+semiprofessional/YS
+semipublic
+semiquantitative/Y
+Semiramis/M
+semiretired
+semisecret
+semiskilled
+semi/SM
+semisolid/S
+semistructured
+semisweet
+Semite/SM
+Semitic/MS
+semitic/S
+semitone/SM
+semitrailer/SM
+semitrance
+semitransparent
+semitropical
+semivowel/MS
+semiweekly/S
+semiyearly
+semolina/SM
+sempiternal
+sempstress/SM
+Semtex
+sen
+Sen
+Sena/M
+senate/MS
+Senate/MS
+senatorial
+senator/MS
+Sendai/M
+sender/M
+sends/A
+send/SRGZ
+Seneca/MS
+Senegalese
+Senegal/M
+senescence/SM
+senescent
+senile/SY
+senility/MS
+seniority/SM
+senior/MS
+Senior/S
+Sennacherib/M
+senna/MS
+Sennett/M
+Señora/M
+senora/S
+senorita/S
+senor/MS
+sensately/I
+sensate/YNX
+sensationalism/MS
+sensationalist/S
+sensationalize/GSD
+sensational/Y
+sensation/M
+sens/DSG
+senselessness/SM
+senseless/PY
+sense/M
+sensibility/ISM
+sensibleness/MS
+sensible/PRST
+sensibly/I
+sensitiveness/MS
+sensitiveness's/I
+sensitives
+sensitive/YIP
+sensitivity/ISM
+sensitization/CSM
+sensitized/U
+sensitizers
+sensitize/SDCG
+sensor/MS
+sensory
+sensualist/MS
+sensuality/MS
+sensual/YF
+sensuousness/S
+sensuous/PY
+Sensurround/M
+sentence/SDMG
+sentential/Y
+sententious/Y
+sentience/ISM
+sentient/YS
+sentimentalism/SM
+sentimentalist/SM
+sentimentality/SM
+sentimentalization/SM
+sentimentalize/RSDZG
+sentimentalizes/U
+sentimental/Y
+sentiment/MS
+sentinel/GDMS
+sentry/SM
+sent/UFEA
+Seoul/M
+sepal/SM
+separability/MSI
+separableness/MI
+separable/PI
+separably/I
+separateness/MS
+separates/M
+separate/YNGVDSXP
+separation/M
+separatism/SM
+separatist/SM
+separator/SM
+Sephardi/M
+Sephira/M
+sepia/MS
+Sepoy/M
+sepses
+sepsis/M
+septa/M
+septate/N
+September/MS
+septennial/Y
+septet/MS
+septicemia/SM
+septicemic
+septic/S
+septillion/M
+sept/M
+Sept/M
+septuagenarian/MS
+Septuagint/MS
+septum/M
+sepulcher/MGSD
+sepulchers/UA
+sepulchral/Y
+seq
+sequel/MS
+sequenced/A
+sequence/DRSJZMG
+sequencer/M
+sequence's/F
+sequences/F
+sequent/F
+sequentiality/FM
+sequentialize/DSG
+sequential/YF
+sequester/SDG
+sequestrate/XGNDS
+sequestration/M
+sequin/SDMG
+sequitur
+Sequoia/M
+sequoia/MS
+Sequoya/M
+Serafin/M
+seraglio/SM
+serape/S
+seraphic
+seraphically
+seraphim's
+seraph/M
+seraphs
+sera's
+Serbia/M
+Serbian/S
+Serb/MS
+Serbo/M
+serenade/MGDRS
+serenader/M
+Serena/M
+serendipitous/Y
+serendipity/MS
+serene/GTYRSDP
+Serene/M
+sereneness/SM
+Serengeti/M
+serenity/MS
+sere/TGDRS
+serfdom/MS
+serf/MS
+Sergeant/M
+sergeant/SM
+serge/DSGM
+Sergei/M
+Serge/M
+Sergent/M
+Sergio/M
+serialization/MS
+serialize/GSD
+serial/MYS
+series/M
+serif/SMD
+serigraph/M
+serigraphs
+seriousness/SM
+serious/PY
+sermonize/GSD
+sermon/SGDM
+serological/Y
+serology/MS
+serons
+serous
+Serpens/M
+serpent/GSDM
+serpentine/GYS
+Serra/M
+Serrano/M
+serrate/GNXSD
+serration/M
+serried
+serum/MS
+servant/SDMG
+serve/AGCFDSR
+served/U
+server/MCF
+servers
+serviceability/SM
+serviceableness/M
+serviceable/P
+serviced/U
+serviceman/M
+servicemen
+service/MGSRD
+service's/E
+services/E
+servicewoman
+servicewomen
+serviette/MS
+servilely
+servileness/M
+serviles
+servile/U
+servility/SM
+serving/SM
+servitor/SM
+servitude/MS
+servomechanism/MS
+servomotor/MS
+servo/S
+sesame/MS
+sesquicentennial/S
+sessile
+session/SM
+setback/S
+Seth/M
+Set/M
+Seton/M
+set's
+setscrew/SM
+set/SIA
+settable/A
+sett/BJGZSMR
+settee/MS
+setter/M
+setting/AS
+setting's
+settle/AUDSG
+settlement/ASM
+settler/MS
+settling/S
+setup/MS
+Seumas/M
+Seurat/M
+Seuss/M
+Sevastopol/M
+sevenfold
+sevenpence
+seven/SMH
+seventeen/HMS
+seventeenths
+sevenths
+seventieths
+seventy/MSH
+severalfold
+severalty/M
+several/YS
+severance/SM
+severed/E
+severeness/SM
+severe/PY
+severing/E
+severity/MS
+Severn/M
+severs/E
+sever/SGTRD
+Severus/M
+Seville/M
+sewage/MS
+Seward/M
+sewerage/SM
+sewer/GSMD
+sewing/SM
+sewn
+sew/SAGD
+sexagenarian/MS
+sex/GMDS
+sexily
+sexiness/MS
+sexism/SM
+sexist/SM
+sexless
+sexologist/SM
+sexology/MS
+sexpot/SM
+Sextans/M
+sextant/SM
+sextet/SM
+sextillion/M
+Sexton/M
+sexton/MS
+sextuple/MDG
+sextuplet/MS
+sexuality/MS
+sexualized
+sexual/Y
+sexy/RTP
+Seychelles
+Seyfert
+Seymour/M
+sf
+SF
+Sgt
+shabbily
+shabbiness/SM
+shabby/RTP
+shack/GMDS
+shackler/M
+shackle's
+Shackleton/M
+shackle/UGDS
+shad/DRJGSM
+shaded/U
+shadeless
+shade/SM
+shadily
+shadiness/MS
+shading/M
+shadowbox/SDG
+shadower/M
+shadow/GSDRM
+shadowiness/M
+Shadow/M
+shadowy/TRP
+shady/TRP
+Shae/M
+Shafer/M
+Shaffer/M
+shafting/M
+shaft/SDMG
+shagged
+shagginess/SM
+shagging
+shaggy/TPR
+shag/MS
+shah/M
+shahs
+Shaina/M
+Shaine/M
+shakable/U
+shakably/U
+shakeable
+shakedown/S
+shaken/U
+shakeout/SM
+shaker/M
+Shaker/S
+Shakespearean/S
+Shakespeare/M
+Shakespearian
+shake/SRGZB
+shakeup/S
+shakily
+shakiness/S
+shaking/M
+shaky/TPR
+shale/SM
+shall
+shallot/SM
+shallowness/SM
+shallow/STPGDRY
+Shalna/M
+Shalne/M
+shalom
+Shalom/M
+shalt
+shamanic
+shaman/SM
+shamble/DSG
+shambles/M
+shamefaced/Y
+shamefulness/S
+shameful/YP
+shamelessness/SM
+shameless/PY
+shame/SM
+sham/MDSG
+shammed
+shammer
+shamming
+shammy's
+shampoo/DRSMZG
+shampooer/M
+shamrock/SM
+Shamus/M
+Shana/M
+Shanan/M
+Shanda/M
+Shandee/M
+Shandeigh/M
+Shandie/M
+Shandra/M
+shandy/M
+Shandy/M
+Shane/M
+Shanghai/GM
+Shanghaiing/M
+shanghai/SDG
+Shanie/M
+Shani/M
+shank/SMDG
+Shannah/M
+Shanna/M
+Shannan/M
+Shannen/M
+Shannon/M
+Shanon/M
+shan't
+Shanta/M
+Shantee/M
+shantis
+Shantung/M
+shantung/MS
+shanty/SM
+shantytown/SM
+shape/AGDSR
+shaped/U
+shapelessness/SM
+shapeless/PY
+shapeliness/S
+shapely/RPT
+shaper/S
+shape's
+Shapiro/M
+sharable/U
+Sharai/M
+Shara/M
+shard/SM
+shareable
+sharecropped
+sharecropper/MS
+sharecropping
+sharecrop/S
+share/DSRGZMB
+shared/U
+shareholder/MS
+shareholding/S
+sharer/M
+shareware/S
+Shari'a
+Sharia/M
+sharia/SM
+Shari/M
+Sharity/M
+shark/SGMD
+sharkskin/SM
+Sharla/M
+Sharleen/M
+Sharlene/M
+Sharline/M
+Sharl/M
+Sharona/M
+Sharon/M
+Sharpe/M
+sharpen/ASGD
+sharpened/U
+sharpener/S
+sharper/M
+sharpie/SM
+Sharp/M
+sharpness/MS
+sharp/SGTZXPYRDN
+sharpshooter/M
+sharpshooting/M
+sharpshoot/JRGZ
+sharpy's
+Sharron/M
+Sharyl/M
+Shasta/M
+shat
+shatter/DSG
+shattering/Y
+shatterproof
+Shaughn/M
+Shaula/M
+Shauna/M
+Shaun/M
+shave/DSRJGZ
+shaved/U
+shaver/M
+Shavian
+shaving/M
+Shavuot/M
+Shawano/M
+shawl/SDMG
+shaw/M
+Shaw/M
+Shawna/M
+Shawnee/SM
+Shawn/M
+Shaylah/M
+Shayla/M
+Shaylyn/M
+Shaylynn/M
+Shay/M
+shay/MS
+Shayna/M
+Shayne/M
+Shcharansky/M
+sh/DRS
+sheaf/MDGS
+Shea/M
+shearer/M
+shear/RDGZS
+sheather/M
+sheathe/UGSD
+sheath/GJMDRS
+sheathing/M
+sheaths
+sheave/SDG
+sheaves/M
+Sheba/M
+shebang/MS
+Shebeli/M
+Sheboygan/M
+she'd
+shedding
+Shedir/M
+sheds
+shed's
+shed/U
+Sheelagh/M
+Sheelah/M
+Sheela/M
+Sheena/M
+sheen/MDGS
+sheeny/TRSM
+sheepdog/SM
+sheepfold/MS
+sheepherder/MS
+sheepishness/SM
+sheepish/YP
+sheep/M
+sheepskin/SM
+Sheeree/M
+sheerness/S
+sheer/PGTYRDS
+sheeting/M
+sheetlike
+sheet/RDMJSG
+Sheetrock
+Sheffielder/M
+Sheffield/RMZ
+Sheffie/M
+Sheff/M
+Sheffy/M
+sheikdom/SM
+sheikh's
+sheik/SM
+Sheilah/M
+Sheila/M
+shekel/MS
+Shelagh/M
+Shela/M
+Shelba/M
+Shelbi/M
+Shelby/M
+Shelden/M
+Sheldon/M
+shelf/MDGS
+Shelia/M
+she'll
+shellacked
+shellacking/MS
+shellac/S
+shelled/U
+Shelley/M
+shellfire/SM
+shellfish/SM
+Shellie/M
+Shelli/M
+Shell/M
+shell/RDMGS
+Shelly/M
+Shel/MY
+shelter/DRMGS
+sheltered/U
+shelterer/M
+Shelton/M
+shelve/JRSDG
+shelver/M
+shelves/M
+shelving/M
+she/M
+Shem/M
+Shena/M
+Shenandoah/M
+shenanigan/SM
+Shenyang/M
+Sheol/M
+Shepard/M
+shepherd/DMSG
+shepherdess/S
+Shepherd/M
+Shep/M
+Sheppard/M
+Shepperd/M
+Sheratan/M
+Sheraton/M
+sherbet/MS
+sherd's
+Sheree/M
+Sheridan/M
+Sherie/M
+sheriff/SM
+Sherill/M
+Sherilyn/M
+Sheri/M
+Sherline/M
+Sherlocke/M
+sherlock/M
+Sherlock/M
+Sher/M
+Sherman/M
+Shermie/M
+Sherm/M
+Shermy/M
+Sherpa/SM
+Sherrie/M
+Sherri/M
+Sherry/M
+sherry/MS
+Sherwin/M
+Sherwood/M
+Sherwynd/M
+Sherye/M
+Sheryl/M
+Shetland/S
+Shevardnadze/M
+shew/GSD
+shewn
+shh
+shiatsu/S
+shibboleth/M
+shibboleths
+shielded/U
+shielder/M
+shield/MDRSG
+Shields/M
+shiftily
+shiftiness/SM
+shiftlessness/S
+shiftless/PY
+shift/RDGZS
+shifty/TRP
+Shi'ite
+Shiite/SM
+Shijiazhuang
+Shikoku/M
+shill/DJSG
+shillelagh/M
+shillelaghs
+shilling/M
+Shillong/M
+Shiloh/M
+shimmed
+shimmer/DGS
+shimmery
+shimming
+shimmy/DSMG
+shim/SM
+Shina/M
+shinbone/SM
+shindig/MS
+shiner/M
+shine/S
+shingle/MDRSG
+shingler/M
+shinguard
+shininess/MS
+shining/Y
+shinned
+shinning
+shinny/GDSM
+shin/SGZDRM
+shinsplints
+Shintoism/S
+Shintoist/MS
+Shinto/MS
+shiny/PRT
+shipboard/MS
+shipborne
+shipbuilder/M
+shipbuild/RGZJ
+shipload/SM
+shipman/M
+shipmate/SM
+shipmen
+shipment/AMS
+shipowner/MS
+shippable
+shipped/A
+shipper/SM
+shipping/MS
+ship's
+shipshape
+ship/SLA
+shipwreck/GSMD
+shipwright/MS
+shipyard/MS
+Shiraz/M
+shire/MS
+shirker/M
+shirk/RDGZS
+Shirlee/M
+Shirleen/M
+Shirlene/M
+Shirley/M
+Shirline/M
+Shirl/M
+Shir/M
+shirr/GJDS
+shirtfront/S
+shirting/M
+shirt/JDMSG
+shirtless
+shirtmake/R
+shirtmaker/M
+shirtsleeve/MS
+shirttail/S
+shirtwaist/SM
+shit/S!
+shitting/!
+shitty/RT!
+Shiva/M
+shiverer/M
+shiver/GDR
+shivery
+shiv/SZRM
+shivved
+shivving
+shlemiel's
+Shmuel/M
+shoal/SRDMGT
+shoat/SM
+shocker/M
+shocking/Y
+Shockley/M
+shockproof
+shock/SGZRD
+shoddily
+shoddiness/SM
+shoddy/RSTP
+shod/U
+shoehorn/GSMD
+shoeing
+shoelace/MS
+shoemaker/M
+shoemake/RZ
+shoe/MS
+shoer's
+shoeshine/MS
+shoestring/MS
+shoetree/MS
+shogunate/SM
+shogun/MS
+Shoji/M
+Sholom/M
+shone
+shoo/DSG
+shoofly
+shook/SM
+shooter/M
+shootout/MS
+shoot/SJRGZ
+shopkeeper/M
+shopkeep/RGZ
+shoplifter/M
+shoplifting/M
+shoplift/SRDGZ
+shop/MS
+shopped/M
+shopper/M
+shoppe/RSDGZJ
+shopping/M
+shoptalk/SM
+shopworn
+shorebird/S
+shore/DSRGMJ
+shoreline/SM
+Shorewood/M
+shoring/M
+shortage/MS
+shortbread/MS
+shortcake/SM
+shortchange/DSG
+shortcoming/MS
+shortcrust
+shortcut/MS
+shortcutting
+shortener/M
+shortening/M
+shorten/RDGJ
+shortfall/SM
+shorthand/DMS
+Shorthorn/M
+shorthorn/MS
+shortie's
+shortish
+shortlist/GD
+Short/M
+shortness/MS
+short/SGTXYRDNP
+shortsightedness/S
+shortsighted/YP
+shortstop/MS
+shortwave/SM
+shorty/SM
+Shoshana/M
+Shoshanna/M
+Shoshone/SM
+Shostakovitch/M
+shotgunned
+shotgunner
+shotgunning
+shotgun/SM
+shot/MS
+shotted
+shotting
+shoulder/GMD
+shouldn't
+should/TZR
+shout/SGZRDM
+shove/DSRG
+shoveler/M
+shovelful/MS
+shovel/MDRSZG
+shover/M
+showbiz
+showbizzes
+showboat/SGDM
+showcase/MGSD
+showdown/MS
+shower/GDM
+showery/TR
+show/GDRZJS
+showgirl/SM
+showily
+showiness/MS
+showing/M
+showman/M
+showmanship/SM
+showmen
+shown
+showoff/S
+showpiece/SM
+showplace/SM
+showroom/MS
+showy/RTP
+shpt
+shrank
+shrapnel/SM
+shredded
+shredder/MS
+shredding
+shred/MS
+Shreveport/M
+shrewdness/SM
+shrewd/RYTP
+shrew/GSMD
+shrewishness/M
+shrewish/PY
+shrieker/M
+shriek/SGDRMZ
+shrift/SM
+shrike/SM
+shrill/DRTGPS
+shrillness/MS
+shrilly
+shrimp/MDGS
+shrine/SDGM
+shrinkage/SM
+shrinker/M
+shrinking/U
+shrink/SRBG
+shrivel/GSD
+shriven
+shrive/RSDG
+Shropshire/M
+shroud/GSMD
+shrubbed
+shrubbery/SM
+shrubbing
+shrubby/TR
+shrub/SM
+shrugged
+shrugging
+shrug/S
+shrunk/N
+shtick/S
+shucker/M
+shuck/SGMRD
+shucks/S
+shudder/DSG
+shuddery
+shuffleboard/MS
+shuffled/A
+shuffle/GDSRZ
+shuffles/A
+shuffling/A
+Shulman/M
+Shu/M
+shunned
+shunning
+shun/S
+shunter/M
+shunt/GSRD
+Shurlocke/M
+Shurlock/M
+Shurwood/M
+shush/SDG
+shutdown/MS
+shuteye/SM
+shutoff/M
+shutout/SM
+shut/S
+shutterbug/S
+shutter/DMGS
+shuttering/M
+shutting
+shuttlecock/MDSG
+shuttle/MGDS
+shy/DRSGTZY
+shyer
+shyest
+Shylockian/M
+Shylock/M
+shyness/SM
+shyster/SM
+Siamese/M
+Siam/M
+Siana/M
+Sianna/M
+Sian's
+Sibbie/M
+Sibby/M
+Sibeal/M
+Sibelius/M
+Sibella/M
+Sibelle/M
+Sibel/M
+Siberia/M
+Siberian/S
+sibilance/M
+sibilancy/M
+sibilant/SY
+Sibilla/M
+Sibley/M
+sibling/SM
+Sib/M
+Sibylla/M
+Sibylle/M
+sibylline
+Sibyl/M
+sibyl/SM
+Siciliana/M
+Sicilian/S
+Sicily/M
+sickbay/M
+sickbed/S
+sickener/M
+sickening/Y
+sicken/JRDG
+sicker/Y
+sick/GXTYNDRSP
+sickie/SM
+sickish/PY
+sickle/SDGM
+sickliness/M
+sickly/TRSDPG
+sickness/MS
+sicko/S
+sickout/S
+sickroom/SM
+sic/S
+sidearm/S
+sideband/MS
+sidebar/MS
+sideboard/SM
+sideburns
+sidecar/MS
+sided/A
+sidedness
+side/ISRM
+sidekick/MS
+sidelight/SM
+sideline/MGDRS
+sidelong
+sideman/M
+sidemen
+sidepiece/S
+sidereal
+sider/FA
+sides/A
+sidesaddle/MS
+sideshow/MS
+sidesplitting
+sidestepped
+sidestepping
+sidestep/S
+sidestroke/GMSD
+sideswipe/GSDM
+sidetrack/SDG
+sidewalk/MS
+sidewall/MS
+sidewards
+sideway/SM
+sidewinder/SM
+siding/SM
+sidle/DSG
+Sid/M
+Sidnee/M
+Sidney/M
+Sidoney/M
+Sidonia/M
+Sidonnie/M
+SIDS
+siege/GMDS
+Siegel/M
+Siegfried/M
+Sieglinda/M
+Siegmund/M
+Siemens/M
+Siena/M
+sienna/SM
+Sierpinski/M
+sierra/SM
+siesta/MS
+sieve/GZMDS
+Siffre/M
+sifted/UA
+sifter/M
+sift/GZJSDR
+Sigfrid/M
+Sigfried/M
+SIGGRAPH/M
+sigh/DRG
+sigher/M
+sighs
+sighted/P
+sighter/M
+sighting/S
+sight/ISM
+sightless/Y
+sightliness/UM
+sightly/TURP
+sightread
+sightseeing/S
+sightsee/RZ
+Sigismond/M
+Sigismondo/M
+Sigismund/M
+Sigismundo/M
+Sig/M
+sigma/SM
+sigmoid
+Sigmund/M
+signal/A
+signaled
+signaler/S
+signaling
+signalization/S
+signalize/GSD
+signally
+signalman/M
+signalmen
+signals
+signal's
+signatory/SM
+signature/MS
+signboard/MS
+signed/FU
+signer/SC
+signet/SGMD
+sign/GARDCS
+significance/IMS
+significantly/I
+significant/YS
+signification/M
+signify/DRSGNX
+signing/S
+Signora/M
+signora/SM
+signore/M
+signori
+signories
+signorina/SM
+signorine
+Signor/M
+signor/SFM
+signpost/DMSG
+sign's
+signs/F
+Sigrid/M
+Sigurd/M
+Sigvard/M
+Sihanouk/M
+Sikhism/MS
+Sikh/MS
+Sikhs
+Sikkimese
+Sikkim/M
+Sikorsky/M
+silage/GMSD
+Silas/M
+Sileas/M
+siled
+Sile/M
+silence/MZGRSD
+silencer/M
+silentness/M
+silent/TSPRY
+Silesia/M
+silhouette/GMSD
+silica/SM
+silicate/SM
+siliceous
+silicide/M
+silicone/SM
+silicon/MS
+silicoses
+silicosis/M
+silken/DG
+silk/GXNDMS
+silkily
+silkiness/SM
+silkscreen/SM
+silkworm/MS
+silky/RSPT
+silliness/SM
+sill/MS
+silly/PRST
+silo/GSM
+siltation/M
+silt/MDGS
+siltstone/M
+silty/RT
+Silurian/S
+Silvain/M
+Silva/M
+Silvana/M
+Silvan/M
+Silvano/M
+Silvanus/M
+silverer/M
+silverfish/MS
+Silverman/M
+silver/RDYMGS
+silversmith/M
+silversmiths
+Silverstein/M
+silverware/SM
+silvery/RTP
+Silvester/M
+Silvia/M
+Silvie/M
+Silvio/M
+Si/M
+SIMD
+Simenon/M
+Simeon/M
+simian/S
+similar/EY
+similarity/EMS
+simile/SM
+similitude/SME
+Simla/M
+simmer/GSD
+Simmonds/M
+Simmons/M
+Simmonsville/M
+Sim/MS
+Simms/M
+Simona/M
+Simone/M
+Simonette/M
+simonize/SDG
+Simon/M
+Simonne/M
+simony/MS
+simpatico
+simper/GDS
+simpleminded/YP
+simpleness/S
+simple/RSDGTP
+simpleton/SM
+simplex/S
+simplicity/MS
+simplified/U
+simplify/ZXRSDNG
+simplistic
+simplistically
+simply
+Simpson/M
+simulacrum/M
+Simula/M
+SIMULA/M
+simulate/XENGSD
+simulation/ME
+simulative
+simulator/SEM
+simulcast/GSD
+simultaneity/SM
+simultaneousness/M
+simultaneous/YP
+Sinai/M
+Sinatra/M
+since
+sincere/IY
+sincereness/M
+sincerer
+sincerest
+sincerity/MIS
+Sinclair/M
+Sinclare/M
+Sindbad/M
+Sindee/M
+Sindhi/M
+sinecure/MS
+sinecurist/M
+sine/SM
+sinew/SGMD
+sinewy
+sinfulness/SM
+sinful/YP
+Singaporean/S
+Singapore/M
+sing/BGJZYDR
+Singborg/M
+singeing
+singer/M
+Singer/M
+singe/S
+singing/Y
+singlehanded/Y
+singleness/SM
+single/PSDG
+Singleton/M
+singleton/SM
+singletree/SM
+singlet/SM
+singsong/GSMD
+singularity/SM
+singularization/M
+singular/SY
+Sinhalese/M
+sinisterness/M
+sinister/YP
+sinistral/Y
+sinkable/U
+sinker/M
+sink/GZSDRB
+sinkhole/SM
+Sinkiang/M
+sinking/M
+sinlessness/M
+sinless/YP
+sin/MAGS
+sinned
+sinner/MS
+sinning
+sinter/DM
+sinuosity/MS
+sinuousities
+sinuousness/M
+sinuous/PY
+sinusitis/SM
+sinus/MS
+sinusoidal/Y
+sinusoid/MS
+Siobhan/M
+Siouxie/M
+Sioux/M
+siphon/DMSG
+siphons/U
+sipped
+sipper/SM
+sipping
+sip/S
+sired/C
+sire/MS
+siren/M
+sires/C
+siring/C
+Sirius/M
+sirloin/MS
+Sir/MS
+sirocco/MS
+sirred
+sirring
+sirup's
+sir/XGMNDS
+sisal/MS
+Sisely/M
+Sisile/M
+sis/S
+Sissie/M
+sissified
+Sissy/M
+sissy/TRSM
+sister/GDYMS
+sisterhood/MS
+sisterliness/MS
+sisterly/P
+sister's/A
+Sistine
+Sisyphean
+Sisyphus/M
+sit/AG
+sitarist/SM
+sitar/SM
+sitcom/SM
+site/DSJM
+sits
+sitter/MS
+sitting/SM
+situate/GNSDX
+situational/Y
+situationist
+situation/M
+situ/S
+situs/M
+Siusan/M
+Siva/M
+Siward/M
+sixfold
+sixgun
+six/MRSH
+sixpence/MS
+sixpenny
+sixshooter
+sixteen/HRSM
+sixteenths
+sixths
+sixth/Y
+sixtieths
+sixty/SMH
+sizableness/M
+sizable/P
+sized/UA
+size/GJDRSBMZ
+sizer/M
+sizes/A
+sizing/M
+sizzler/M
+sizzle/RSDG
+SJ
+Sjaelland/M
+SK
+ska/S
+skateboard/SJGZMDR
+skater/M
+skate/SM
+skat/JMDRGZ
+skedaddle/GSD
+skeet/RMS
+skein/MDGS
+skeletal/Y
+skeleton/MS
+Skell/M
+Skelly/M
+skeptical/Y
+skepticism/MS
+skeptic/SM
+sketchbook/SM
+sketcher/M
+sketchily
+sketchiness/MS
+sketch/MRSDZG
+sketchpad
+sketchy/PRT
+skew/DRSPGZ
+skewer/GDM
+skewing/M
+skewness/M
+skidded
+skidding
+skid/S
+skiff/GMDS
+skiing/M
+skilfully
+skill/DMSG
+skilled/U
+skillet/MS
+skillfulnesses
+skillfulness/MU
+skillful/YUP
+skilling/M
+skimmed
+skimmer/MS
+skimming/SM
+ski/MNJSG
+skimp/GDS
+skimpily
+skimpiness/MS
+skimpy/PRT
+skim/SM
+skincare
+skindive/G
+skinflint/MS
+skinhead/SM
+skinless
+skinned
+Skinner/M
+skinner/SM
+skinniness/MS
+skinning
+skinny/TRSP
+skin/SM
+skintight
+Skip/M
+skipped
+Skipper/M
+skipper/SGDM
+Skippie/M
+skipping
+Skipp/RM
+Skippy/M
+skip/S
+Skipton/M
+skirmisher/M
+skirmish/RSDMZG
+skirter/M
+skirting/M
+skirt/RDMGS
+skit/GSMD
+skitter/SDG
+skittishness/SM
+skittish/YP
+skittle/SM
+skivvy/GSDM
+skoal/SDG
+Skopje/M
+skulduggery/MS
+skulker/M
+skulk/SRDGZ
+skullcap/MS
+skullduggery's
+skull/SDM
+skunk/GMDS
+skycap/MS
+skydiver/SM
+skydiving/MS
+Skye/M
+skyhook
+skyjacker/M
+skyjack/ZSGRDJ
+Skylab/M
+skylarker/M
+skylark/SRDMG
+Skylar/M
+Skyler/M
+skylight/MS
+skyline/MS
+Sky/M
+sky/MDRSGZ
+skyrocket/GDMS
+skyscraper/M
+skyscrape/RZ
+skyward/S
+skywave
+skyway/M
+skywriter/MS
+skywriting/MS
+slabbed
+slabbing
+slab/MS
+slacken/DG
+slacker/M
+slackness/MS
+slack/SPGTZXYRDN
+Slade/M
+slagged
+slagging
+slag/MS
+slain
+slake/DSG
+slaked/U
+slalom/SGMD
+slammed
+slammer/S
+slamming
+slam/S
+slander/MDRZSG
+slanderousness/M
+slanderous/PY
+slang/SMGD
+slangy/TR
+slanting/Y
+slant/SDG
+slantwise
+slapdash/S
+slaphappy/TR
+slap/MS
+slapped
+slapper
+slapping
+slapstick/MS
+slash/GZRSD
+slashing/Y
+slater/M
+Slater/M
+slate/SM
+slather/SMDG
+slating/M
+slat/MDRSGZ
+slatted
+slattern/MYS
+slatting
+slaughterer/M
+slaughterhouse/SM
+slaughter/SJMRDGZ
+slave/DSRGZM
+slaveholder/SM
+slaver/GDM
+slavery/SM
+Slavic/M
+slavishness/SM
+slavish/YP
+Slav/MS
+Slavonic/M
+slaw/MS
+slay/RGZS
+sleaze/S
+sleazily
+sleaziness/SM
+sleazy/RTP
+sledded
+sledder/S
+sledding
+sledgehammer/MDGS
+sledge/SDGM
+sled/SM
+sleekness/S
+sleek/PYRDGTS
+sleeper/M
+sleepily
+sleepiness/SM
+sleeping/M
+sleeplessness/SM
+sleepless/YP
+sleepover/S
+sleep/RMGZS
+sleepwalker/M
+sleepwalk/JGRDZS
+sleepwear/M
+sleepyhead/MS
+sleepy/PTR
+sleet/DMSG
+sleety/TR
+sleeveless
+sleeve/SDGM
+sleeving/M
+sleigh/GMD
+sleighs
+sleight/SM
+sleken/DG
+slenderize/DSG
+slenderness/MS
+slender/RYTP
+slept
+Slesinger/M
+sleuth/GMD
+sleuths
+slew/DGS
+slice/DSRGZM
+sliced/U
+slicer/M
+slicker/M
+slickness/MS
+slick/PSYRDGTZ
+slider/M
+slide/S
+slid/GZDR
+slight/DRYPSTG
+slighter/M
+slighting/Y
+slightness/S
+slime/SM
+sliminess/S
+slimline
+slimmed
+slimmer/S
+slimmest
+slimming/S
+slimness/S
+slim/SPGYD
+slimy/PTR
+sling/GMRS
+slingshot/MS
+slings/U
+slink/GS
+slinky/RT
+slipcase/MS
+slipcover/GMDS
+slipknot/SM
+slippage/SM
+slipped
+slipper/GSMD
+slipperiness/S
+slippery/PRT
+slipping
+slipshod
+slip/SM
+slipstream/MDGS
+slipway/SM
+slither/DSG
+slithery
+slit/SM
+slitted
+slitter/S
+slitting
+sliver/GSDM
+slivery
+Sloane/M
+Sloan/M
+slobber/SDG
+slobbery
+slob/MS
+Slocum/M
+sloe/MS
+sloganeer/MG
+slogan/MS
+slogged
+slogging
+slog/S
+sloop/SM
+slop/DRSGZ
+sloped/U
+slope/S
+slopped
+sloppily
+sloppiness/SM
+slopping
+sloppy/RTP
+slosh/GSDM
+slothfulness/MS
+slothful/PY
+sloth/GDM
+sloths
+slot/MS
+slotted
+slotting
+slouch/DRSZG
+sloucher/M
+slouchy/RT
+slough/GMD
+sloughs
+Slovakia/M
+Slovakian/S
+Slovak/S
+Slovene/S
+Slovenia/M
+Slovenian/S
+slovenliness/SM
+slovenly/TRP
+sloven/YMS
+slowcoaches
+slowdown/MS
+slowish
+slowness/MS
+slow/PGTYDRS
+slowpoke/MS
+SLR
+sludge/SDGM
+sludgy/TR
+slue/MGDS
+sluggard/MS
+slugged
+slugger/SM
+slugging
+sluggishness/SM
+sluggish/YP
+slug/MS
+sluice/SDGM
+slumberer/M
+slumber/MDRGS
+slumberous
+slumlord/MS
+slummed
+slummer
+slumming
+slum/MS
+slummy/TR
+slump/DSG
+slung/U
+slunk
+slur/MS
+slurp/GSD
+slurred
+slurried/M
+slurring
+slurrying/M
+slurry/MGDS
+slushiness/SM
+slush/SDMG
+slushy/RTP
+slut/MS
+sluttish
+slutty/TR
+Sly/M
+slyness/MS
+sly/RTY
+smacker/M
+smack/SMRDGZ
+smallholders
+smallholding/MS
+smallish
+Small/M
+smallness/S
+smallpox/SM
+small/SGTRDP
+smalltalk
+smalltime
+Smallwood/M
+smarmy/RT
+smarten/GD
+smartness/S
+smartypants
+smart/YRDNSGTXP
+smasher/M
+smash/GZRSD
+smashing/Y
+smashup/S
+smattering/SM
+smearer/M
+smear/GRDS
+smeary/TR
+smeller/M
+smelliness/MS
+smell/SBRDG
+smelly/TRP
+smelter/M
+smelt/SRDGZ
+Smetana/M
+smidgen/MS
+smilax/MS
+smile/GMDSR
+smiley/M
+smilies
+smiling/UY
+smirch/SDG
+smirk/GSMD
+Smirnoff/M
+smite/GSR
+smiter/M
+smith/DMG
+smithereens
+Smithfield/M
+Smith/M
+smiths
+Smithsonian/M
+Smithson/M
+Smithtown/M
+smithy/SM
+smitten
+Smitty/M
+Sm/M
+smocking/M
+smock/SGMDJ
+smoggy/TR
+smog/SM
+smoke/GZMDSRBJ
+smokehouse/MS
+smokeless
+smoker/M
+smokescreen/S
+smokestack/MS
+Smokey/M
+smokiness/S
+smoking/M
+smoky/RSPT
+smoldering/Y
+smolder/SGD
+Smolensk/M
+Smollett/M
+smooch/SDG
+smoothen/DG
+smoother/M
+smoothie/SM
+smoothness/MS
+smooths
+smooth/TZGPRDNY
+smörgåsbord/SM
+smote
+smother/GSD
+SMSA/MS
+SMTP
+Smucker/M
+smudge/GSD
+smudginess/M
+smudgy/TRP
+smugged
+smugger
+smuggest
+smugging
+smuggle/JZGSRD
+smuggler/M
+smugness/MS
+smug/YSP
+smut/SM
+Smuts/M
+smutted
+smuttiness/SM
+smutting
+smutty/TRP
+Smyrna/M
+snack/SGMD
+snaffle/GDSM
+snafu/DMSG
+snagged
+snagging
+snag/MS
+snail/GSDM
+Snake
+snakebird/M
+snakebite/MS
+snake/DSGM
+snakelike
+snakeroot/M
+snaky/TR
+snapback/M
+snapdragon/MS
+snapped/U
+snapper/SM
+snappily
+snappiness/SM
+snapping/U
+snappishness/SM
+snappish/PY
+snappy/PTR
+snapshot/MS
+snapshotted
+snapshotting
+snap/US
+snare/DSRGM
+snarer/M
+snarf/JSGD
+snarler/M
+snarling/Y
+snarl/UGSD
+snarly/RT
+snatch/DRSZG
+snatcher/M
+snazzily
+snazzy/TR
+Snead/M
+sneaker/MD
+sneakily
+sneakiness/SM
+sneaking/Y
+sneak/RDGZS
+sneaky/PRT
+Sneed/M
+sneerer/M
+sneer/GMRDJS
+sneering/Y
+sneeze/SRDG
+Snell/M
+snicker/GMRD
+snick/MRZ
+snideness/M
+Snider/M
+snide/YTSRP
+sniffer/M
+sniff/GZSRD
+sniffle/GDRS
+sniffler/M
+sniffles/M
+snifter/MDSG
+snigger's
+sniper/M
+snipe/SM
+snipped
+snipper/SM
+snippet/SM
+snipping
+snippy/RT
+snip/SGDRZ
+snitch/GDS
+snit/SM
+sniveler/M
+snivel/JSZGDR
+Sn/M
+snobbery/SM
+snobbishness/S
+snobbish/YP
+snobby/RT
+snob/MS
+Snodgrass/M
+snood/SGDM
+snooker/GMD
+snook/SMRZ
+snooper/M
+snoop/SRDGZ
+Snoopy/M
+snoopy/RT
+snootily
+snootiness/MS
+snoot/SDMG
+snooty/TRP
+snooze/GSD
+snore/DSRGZ
+snorkel/ZGSRDM
+snorter/M
+snort/GSZRD
+snot/MS
+snotted
+snottily
+snottiness/SM
+snotting
+snotty/TRP
+snout/SGDM
+snowball/SDMG
+snowbank/SM
+Snowbelt/SM
+snowbird/SM
+snowblower/S
+snowboard/GZDRJS
+snowbound
+snowcapped
+snowdrift/MS
+snowdrop/MS
+snowfall/MS
+snowfield/MS
+snowflake/MS
+snow/GDMS
+snowily
+snowiness/MS
+Snow/M
+snowman/M
+snowmen
+snowmobile/GMDRS
+snowplough/M
+snowploughs
+snowplow/SMGD
+snowshed
+snowshoeing
+snowshoe/MRS
+snowshoer/M
+snowstorm/MS
+snowsuit/S
+snowy/RTP
+snubbed
+snubber
+snubbing
+snub/SP
+snuffbox/SM
+snuffer/M
+snuff/GZSYRD
+snuffle/GDSR
+snuffler/M
+snuffly/RT
+snugged
+snugger
+snuggest
+snugging
+snuggle/GDS
+snuggly
+snugness/MS
+snug/SYP
+Snyder/M
+so
+SO
+soaker/M
+soak/GDRSJ
+soapbox/DSMG
+soapiness/S
+soap/MDRGS
+soapstone/MS
+soapsud/S
+soapy/RPT
+soar/DRJSG
+soarer/M
+soaring/Y
+sobbed
+sobbing/Y
+soberer/M
+soberness/SM
+sober/PGTYRD
+sobriety/SIM
+sobriquet/MS
+sob/SZR
+Soc
+soccer/MS
+sociabilities
+sociability/IM
+sociable/S
+sociably/IU
+socialism/SM
+socialistic
+socialist/SM
+socialite/SM
+sociality/M
+socialization/SM
+socialized/U
+socializer/M
+socialize/RSDG
+socially/U
+social/SY
+societal/Y
+society/MS
+socio
+sociobiology/M
+sociocultural/Y
+sociodemographic
+socioeconomically
+socioeconomic/S
+sociolinguistics/M
+sociological/MY
+sociologist/SM
+sociology/SM
+sociometric
+sociometry/M
+sociopath/M
+sociopaths
+socket/SMDG
+sock/GDMS
+Socorro/M
+Socrates/M
+Socratic/S
+soc/S
+soda/SM
+sodded
+sodden/DYPSG
+soddenness/M
+sodding
+Soddy/M
+sodium/MS
+sod/MS
+sodomite/MS
+sodomize/GDS
+Sodom/M
+sodomy/SM
+soever
+sofa/SM
+Sofia/M
+Sofie/M
+softball/MS
+softbound
+softener/M
+soften/ZGRD
+softhearted
+softie's
+softness/MS
+soft/SPXTYNR
+software/MS
+softwood/SM
+softy/SM
+soggily
+sogginess/S
+soggy/RPT
+Soho/M
+soigné
+soiled/U
+soil/SGMD
+soirée/SM
+sojourn/RDZGSM
+solace/GMSRD
+solacer/M
+solaria
+solarium/M
+solar/S
+solder/RDMSZG
+soldier/MDYSG
+soldiery/MS
+sold/RU
+solecism/MS
+soled/FA
+solemness
+solemnify/GSD
+solemnity/MS
+solemnization/SM
+solemnize/GSD
+solemnness/SM
+solemn/PTRY
+solenoid/MS
+soler/F
+soles/IFA
+sole/YSP
+sol/GSMDR
+solicitation/S
+solicited/U
+solicitor/MS
+solicitousness/S
+solicitous/YP
+solicit/SDG
+solicitude/MS
+solidarity/MS
+solidi
+solidification/M
+solidify/NXSDG
+solidity/S
+solidness/SM
+solid/STYRP
+solidus/M
+soliloquies
+soliloquize/DSG
+soliloquy/M
+soling/NM
+solipsism/MS
+solipsist/S
+Solis/M
+solitaire/SM
+solitary/SP
+solitude/SM
+Sollie/M
+Solly/M
+Sol/MY
+solo/DMSG
+soloist/SM
+Solomon/SM
+Solon/M
+Soloviev/M
+solstice/SM
+solubility/IMS
+soluble/SI
+solute/ENAXS
+solute's
+solution/AME
+solvable/UI
+solvating
+solve/ABSRDZG
+solved/EU
+solvency/IMS
+solvent/IS
+solvently
+solvent's
+solver/MEA
+solves/E
+solving/E
+Solzhenitsyn/M
+Somalia/M
+Somalian/S
+Somali/MS
+soma/M
+somatic
+somberness/SM
+somber/PY
+sombre
+sombrero/SM
+somebody'll
+somebody/SM
+someday
+somehow
+someone'll
+someone/SM
+someplace/M
+somersault/DSGM
+Somerset/M
+somerset/S
+somersetted
+somersetting
+Somerville/M
+something/S
+sometime/S
+someway/S
+somewhat/S
+somewhere/S
+some/Z
+sommelier/SM
+Somme/M
+somnambulism/SM
+somnambulist/SM
+somnolence/MS
+somnolent/Y
+Somoza/M
+sonar/SM
+sonata/MS
+sonatina/SM
+Sondheim/M
+Sondra/M
+Sonenberg/M
+songbag
+songbird/SM
+songbook/S
+songfest/MS
+songfulness/M
+songful/YP
+Songhai/M
+Songhua/M
+song/MS
+songster/MS
+songstress/SM
+songwriter/SM
+songwriting
+Sonia/M
+sonic/S
+Sonja/M
+Son/M
+sonnet/MDSG
+Sonnie/M
+Sonni/M
+Sonnnie/M
+Sonny/M
+sonny/SM
+Sonoma/M
+Sonora/M
+sonority/S
+sonorousness/SM
+sonorous/PY
+son/SMY
+Sontag/M
+sonuvabitch
+Sonya/M
+Sony/M
+soonish
+soon/TR
+soothe
+soother/M
+sooth/GZTYSRDMJ
+soothingness/M
+soothing/YP
+sooths
+soothsayer/M
+soothsay/JGZR
+soot/MGDS
+sooty/RT
+SOP
+Sophey/M
+Sophia/SM
+Sophie/M
+Sophi/M
+sophism/SM
+sophister/M
+sophistical
+sophisticatedly
+sophisticated/U
+sophisticate/XNGDS
+sophistication/MU
+sophistic/S
+sophist/RMS
+sophistry/SM
+Sophoclean
+Sophocles/M
+sophomore/SM
+sophomoric
+Sophronia/M
+soporifically
+soporific/SM
+sopped
+sopping/S
+soppy/RT
+soprano/SM
+sop/SM
+Sopwith/M
+sorbet/SM
+Sorbonne/M
+sorcerer/MS
+sorceress/S
+sorcery/MS
+Sorcha/M
+sordidness/SM
+sordid/PY
+sorehead/SM
+soreness/S
+Sorensen/M
+Sorenson/M
+sore/PYTGDRS
+sorghum/MS
+sorority/MS
+sorrel/SM
+Sorrentine/M
+sorrily
+sorriness/SM
+sorrower/M
+sorrowfulness/SM
+sorrowful/YP
+sorrow/GRDMS
+sorry/PTSR
+sorta
+sortable
+sorted/U
+sorter/MS
+sort/FSAGD
+sortieing
+sortie/MSD
+sort's
+sos
+SOS
+Sosa/M
+Sosanna/M
+Soto/M
+sot/SM
+sottish
+soubriquet's
+soufflé/MS
+sough/DG
+soughs
+sought/U
+soulfulness/MS
+soulful/YP
+soulless/Y
+soul/MDS
+sound/AUD
+soundboard/MS
+sounders
+sounder's
+sounder/U
+soundest
+sounding/AY
+soundings
+sounding's
+soundless/Y
+soundly/U
+soundness/UMS
+soundproof/GSD
+soundproofing/M
+sound's
+sounds/A
+soundtrack/MS
+soupçon/SM
+soup/GMDS
+Souphanouvong/M
+soupy/RT
+source/ASDMG
+sourceless
+sourdough
+sourdoughs
+sourish
+sourness/MS
+sourpuss/MS
+sour/TYDRPSG
+Sousa/M
+sousaphone/SM
+sous/DSG
+souse
+sou/SMH
+Southampton/M
+southbound
+southeastern
+southeaster/YM
+Southeast/MS
+southeast/RZMS
+southeastward/S
+southerly/S
+souther/MY
+southerner/M
+Southerner/MS
+southernisms
+southernmost
+southern/PZSYR
+Southey/M
+Southfield/M
+southing/M
+southland/M
+South/M
+southpaw/MS
+south/RDMG
+souths
+Souths
+southward/S
+southwestern
+southwester/YM
+Southwest/MS
+southwest/RMSZ
+southwestward/S
+souvenir/SM
+sou'wester
+sovereignty/MS
+sovereign/YMS
+soviet/MS
+Soviet/S
+sow/ADGS
+sowbelly/M
+sowens/M
+sower/DS
+Soweto/M
+sown/A
+sox's
+soybean/MS
+Soyinka/M
+soy/MS
+Soyuz/M
+Spaatz/M
+spacecraft/MS
+space/DSRGZMJ
+spaceflight/S
+spaceman/M
+spacemen
+spaceport/SM
+spacer/M
+spaceship/MS
+spacesuit/MS
+spacewalk/GSMD
+Spacewar/M
+spacewoman
+spacewomen
+spacey
+spacial
+spacier
+spaciest
+spaciness
+spacing/M
+spaciousness/SM
+spacious/PY
+Spackle
+spade/DSRGM
+spadeful/SM
+spader/M
+spadework/SM
+spadices
+spadix/M
+Spafford/M
+spaghetti/SM
+Spahn/M
+Spain/M
+spake
+Spalding/M
+Spam/M
+spa/MS
+Span
+spandex/MS
+spandrels
+spangle/GMDS
+Spanglish/S
+Spaniard/SM
+spanielled
+spanielling
+spaniel/SM
+Spanish/M
+spanker/M
+spanking/M
+spank/SRDJG
+span/MS
+spanned/U
+spanner/SM
+spanning
+SPARC/M
+SPARCstation/M
+spar/DRMGTS
+spareness/MS
+spare/PSY
+spareribs
+sparer/M
+sparing/UY
+sparker/M
+sparkle/DRSGZ
+sparkler/M
+Sparkman/M
+Sparks
+spark/SGMRD
+sparky/RT
+sparling/SM
+sparred
+sparrer
+sparring/U
+sparrow/MS
+sparseness/S
+sparse/YP
+sparsity/S
+spars/TR
+Spartacus/M
+Sparta/M
+spartan
+Spartan/S
+spasm/GSDM
+spasmodic
+spasmodically
+spastic/S
+spate/SM
+spathe/MS
+spatiality/M
+spatial/Y
+spat/MS
+spatted
+spatter/DGS
+spatterdock/M
+spatting
+spatula/SM
+spavin/DMS
+spawner/M
+spawn/MRDSG
+spay/DGS
+SPCA
+speakable/U
+speakeasy/SM
+speaker/M
+Speaker's
+speakership/M
+speaking/U
+speak/RBGZJS
+spearer/M
+spearfish/SDMG
+spearhead/GSDM
+spearmint/MS
+spear/MRDGS
+Spears
+spec'd
+specialism/MS
+specialist/MS
+specialization/SM
+specialized/U
+specialize/GZDSR
+specializing/U
+special/SRYP
+specialty/MS
+specie/MS
+specif
+specifiability
+specifiable
+specifiably
+specifically
+specification/SM
+specificity/S
+specific/SP
+specified/U
+specifier/SM
+specifies
+specify/AD
+specifying
+specimen/SM
+spec'ing
+speciousness/SM
+specious/YP
+speck/GMDS
+speckle/GMDS
+spec/SM
+spectacle/MSD
+spectacular/SY
+spectator/SM
+specter/DMS
+specter's/A
+spectralness/M
+spectral/YP
+spectra/M
+spectrogram/MS
+spectrographically
+spectrograph/M
+spectrography/M
+spectrometer/MS
+spectrometric
+spectrometry/M
+spectrophotometer/SM
+spectrophotometric
+spectrophotometry/M
+spectroscope/SM
+spectroscopic
+spectroscopically
+spectroscopy/SM
+spectrum/M
+specularity
+specular/Y
+speculate/VNGSDX
+speculation/M
+speculative/Y
+speculator/SM
+sped
+speech/GMDS
+speechlessness/SM
+speechless/YP
+speedboat/GSRM
+speedboating/M
+speeder/M
+speedily
+speediness/SM
+speedometer/MS
+speed/RMJGZS
+speedster/SM
+speedup/MS
+speedway/SM
+speedwell/MS
+speedy/PTR
+speer/M
+speleological
+speleologist/S
+speleology/MS
+spellbinder/M
+spellbind/SRGZ
+spellbound
+spelldown/MS
+spelled/A
+speller/M
+spelling/M
+spell/RDSJGZ
+spells/A
+spelunker/MS
+spelunking/S
+Spencerian
+Spencer/M
+Spence/RM
+spender/M
+spend/SBJRGZ
+spendthrift/MS
+Spenglerian
+Spengler/M
+Spense/MR
+Spenserian
+Spenser/M
+spent/U
+spermatophyte/M
+spermatozoa
+spermatozoon/M
+spermicidal
+spermicide/MS
+sperm/SM
+Sperry/M
+spew/DRGZJS
+spewer/M
+SPF
+sphagnum/SM
+sphere/SDGM
+spherical/Y
+spheric/S
+spherics/M
+spheroidal/Y
+spheroid/SM
+spherule/MS
+sphincter/SM
+Sphinx/M
+sphinx/MS
+Spica/M
+spic/DGM
+spicebush/M
+spice/SM
+spicily
+spiciness/SM
+spicule/MS
+spicy/PTR
+spider/SM
+spiderweb/S
+spiderwort/M
+spidery/TR
+Spiegel/M
+Spielberg/M
+spiel/GDMS
+spier/M
+spiffy/TDRSG
+spigot/MS
+spike/GMDSR
+Spike/M
+spiker/M
+spikiness/SM
+spiky/PTR
+spillage/SM
+Spillane/M
+spillover/SM
+spill/RDSG
+spillway/SM
+spinach/MS
+spinal/YS
+spindle/JGMDRS
+spindly/RT
+spinelessness/M
+spineless/YP
+spine/MS
+spinet/SM
+spininess/M
+spinnability/M
+spinnaker/SM
+spinneret/MS
+spinner/SM
+spinning/SM
+Spinoza/M
+spin/S
+spinsterhood/SM
+spinsterish
+spinster/MS
+spiny/PRT
+spiracle/SM
+spiraea's
+spiral/YDSG
+spire/AIDSGF
+spirea/MS
+spire's
+spiritedness/M
+spirited/PY
+spirit/GMDS
+spiritless
+spirits/I
+spiritualism/SM
+spiritualistic
+spiritualist/SM
+spirituality/SM
+spiritual/SYP
+spirituous
+spirochete/SM
+Spiro/M
+spiry/TR
+spitball/SM
+spite/CSDAG
+spitefuller
+spitefullest
+spitefulness/MS
+spiteful/PY
+spite's/A
+spitfire/SM
+spit/SGD
+spitted
+spitting
+spittle/SM
+spittoon/SM
+Spitz/M
+splashdown/MS
+splasher/M
+splash/GZDRS
+splashily
+splashiness/MS
+splashy/RTP
+splat/SM
+splatted
+splatter/DSG
+splatting
+splayfeet
+splayfoot/MD
+splay/SDG
+spleen/SM
+splendidness/M
+splendid/YRPT
+splendorous
+splendor/SM
+splenetic/S
+splicer/M
+splice/RSDGZJ
+spline/MSD
+splinter/GMD
+splintery
+splint/SGZMDR
+splits/M
+split/SM
+splittable
+splitter/MS
+splitting/S
+splodge/SM
+splotch/MSDG
+splotchy/RT
+splurge/GMDS
+splutterer/M
+splutter/RDSG
+Sp/M
+Spock/M
+spoilables
+spoilage/SM
+spoil/CSZGDR
+spoiled/U
+spoiler/MC
+spoilsport/SM
+Spokane/M
+spoke/DSG
+spoken/U
+spokeshave/MS
+spokesman/M
+spokesmen
+spokespeople
+spokesperson/S
+spokeswoman/M
+spokeswomen
+spoliation/MCS
+spongecake
+sponge/GMZRSD
+sponger/M
+sponginess/S
+spongy/TRP
+sponsor/DGMS
+sponsorship/S
+spontaneity/SM
+spontaneousness/M
+spontaneous/PY
+spoof/SMDG
+spookiness/MS
+spook/SMDG
+spooky/PRT
+spool/SRDMGZ
+spoonbill/SM
+spoonerism/SM
+spoonful/MS
+spoon/GSMD
+spoor/GSMD
+sporadically
+sporadic/Y
+spore/DSGM
+sporran/MS
+sportiness/SM
+sporting/Y
+sportiveness/M
+sportive/PY
+sportscast/RSGZM
+sportsmanlike/U
+sportsman/MY
+sportsmanship/MS
+sportsmen
+sportswear/M
+sportswoman/M
+sportswomen
+sportswriter/S
+sport/VGSRDM
+sporty/PRT
+Sposato/M
+spotlessness/MS
+spotless/YP
+spotlight/GDMS
+spotlit
+spot/MSC
+spotted/U
+spotter/MS
+spottily
+spottiness/SM
+spotting/M
+spotty/RTP
+spousal/MS
+spouse/GMSD
+spouter/M
+spout/SGRD
+sprain/SGD
+sprang/S
+sprat/SM
+sprawl/GSD
+sprayed/UA
+sprayer/M
+spray/GZSRDM
+sprays/A
+spreadeagled
+spreader/M
+spread/RSJGZB
+spreadsheet/S
+spreeing
+spree/MDS
+sprigged
+sprigging
+sprightliness/MS
+sprightly/PRT
+sprig/MS
+springboard/MS
+springbok/MS
+springeing
+springer/M
+Springfield/M
+springily
+springiness/SM
+springing/M
+springlike
+spring/SGZR
+Springsteen/M
+springtime/MS
+springy/TRP
+sprinkle/DRSJZG
+sprinkler/DM
+sprinkling/M
+Sprint/M
+sprint/SGZMDR
+sprite/SM
+spritz/GZDSR
+sprocket/DMGS
+sprocketed/U
+Sproul/M
+sprout/GSD
+spruce/GMTYRSDP
+spruceness/SM
+sprue/M
+sprung/U
+spryness/S
+spry/TRY
+SPSS
+spudded
+spudding
+spud/MS
+Spuds/M
+spume/DSGM
+spumone's
+spumoni/S
+spumy/TR
+spun
+spunk/GSMD
+spunky/SRT
+spurge/MS
+spuriousness/SM
+spurious/PY
+spur/MS
+spurn/RDSG
+spurred
+spurring
+spurt/SGD
+sputa
+Sputnik
+sputnik/MS
+sputter/DRGS
+sputum/M
+spy/DRSGM
+spyglass/MS
+sq
+sqq
+sqrt
+squabbed
+squabber
+squabbest
+squabbing
+squabbler/M
+squabble/ZGDRS
+squab/SM
+squadded
+squadding
+squadron/MDGS
+squad/SM
+squalidness/SM
+squalid/PRYT
+squaller/M
+squall/GMRDS
+squally/RT
+squalor/SM
+squamous/Y
+squander/GSRD
+Squanto
+square/GMTYRSDP
+squareness/SM
+squarer/M
+Squaresville/M
+squarish
+squash/GSRD
+squashiness/M
+squashy/RTP
+squatness/MS
+squat/SPY
+squatted
+squatter/SMDG
+squattest
+squatting
+squawker/M
+squawk/GRDMZS
+squaw/SM
+squeaker/M
+squeakily
+squeakiness/S
+squeak/RDMGZS
+squeaky/RPT
+squealer/M
+squeal/MRDSGZ
+squeamishness/SM
+squeamish/YP
+squeegee/DSM
+squeegeeing
+squeeze/GZSRDB
+squeezer/M
+squelcher/M
+squelch/GDRS
+squelchy/RT
+squibbed
+Squibb/GM
+squibbing
+Squibbing/M
+squib/SM
+squidded
+squidding
+squid/SM
+squiggle/MGDS
+squiggly/RT
+squinter/M
+squint/GTSRD
+squinting/Y
+squirehood
+squire/SDGM
+squirm/SGD
+squirmy/TR
+squirrel/SGYDM
+squirter/M
+squirt/GSRD
+squish/GSD
+squishy/RTP
+Sr
+Srinagar/M
+SRO
+S's
+SS
+SSA
+SSE
+ssh
+s's/KI
+SSS
+SST
+SSW
+ST
+stabbed
+stabber/S
+stabbing/S
+stability/ISM
+stabilizability
+stabilization/CS
+stabilization's
+stabilize/CGSD
+stabilizer/MS
+stableman/M
+stablemate
+stablemen
+stableness/UM
+stable/RSDGMTP
+stabler/U
+stable's/F
+stables/F
+stablest/U
+stabling/M
+stably/U
+stab/YS
+staccato/S
+Stacee/M
+Stace/M
+Stacey/M
+Stacia/M
+Stacie/M
+Staci/M
+stackable
+stacker/M
+stack's
+stack/USDG
+Stacy/M
+stadias
+stadia's
+stadium/MS
+Stael/M
+Stafani/M
+staff/ADSG
+Staffard/M
+staffer/MS
+Stafford/M
+Staffordshire/M
+staffroom
+staff's
+Staford/M
+stag/DRMJSGZ
+stagecoach/MS
+stagecraft/MS
+stagehand/MS
+stager/M
+stage/SM
+stagestruck
+stagflation/SM
+stagged
+staggerer/M
+stagger/GSJDR
+staggering/Y
+staggers/M
+stagging
+staginess/M
+staging/M
+stagnancy/SM
+stagnant/Y
+stagnate/NGDSX
+stagnation/M
+stagy/PTR
+Stahl/M
+staidness/MS
+staid/YRTP
+stained/U
+stainer/M
+stainless/YS
+stain/SGRD
+staircase/SM
+stair/MS
+stairway/SM
+stairwell/MS
+stake/DSGM
+stakeholder/S
+stakeout/SM
+stalactite/SM
+stalag/M
+stalagmite/SM
+stalemate/SDMG
+staleness/MS
+stale/PGYTDSR
+Staley/M
+Stalingrad/M
+Stalinist
+Stalin/SM
+stalker/M
+stalk/MRDSGZJ
+stall/DMSJG
+stalled/I
+stallholders
+stallion/SM
+Stallone/M
+stalls/I
+stalwartness/M
+stalwart/PYS
+Sta/M
+stamen/MS
+Stamford/M
+stamina/SM
+staminate
+stammer/DRSZG
+stammerer/M
+stammering/Y
+stampede/MGDRS
+stampeder/M
+stamped/U
+stamper/M
+stamp/RDSGZJ
+stance/MIS
+stancher/M
+stanch/GDRST
+stanchion/SGMD
+standalone
+standardization/AMS
+standardized/U
+standardize/GZDSR
+standardizer/M
+standardizes/A
+standard/YMS
+standby
+standbys
+standee/MS
+Standford/M
+standing/M
+Standish/M
+standoffish
+standoff/SM
+standout/MS
+standpipe/MS
+standpoint/SM
+stand/SJGZR
+standstill/SM
+Stanfield/M
+Stanford/M
+Stanislas/M
+Stanislaus/M
+Stanislavsky/M
+Stanislaw/M
+stank/S
+Stanleigh/M
+Stanley/M
+Stanly/M
+stannic
+stannous
+Stanton/M
+Stanwood/M
+Stan/YMS
+stanza/MS
+staph/M
+staphs
+staphylococcal
+staphylococci
+staphylococcus/M
+stapled/U
+stapler/M
+Stapleton/M
+staple/ZRSDGM
+starboard/SDMG
+starchily
+starchiness/MS
+starch/MDSG
+starchy/TRP
+stardom/MS
+star/DRMGZS
+stardust/MS
+stare/S
+starfish/SM
+Stargate/M
+stargaze/ZGDRS
+staring/U
+Starkey/M
+Stark/M
+starkness/MS
+stark/SPGTYRD
+Starla/M
+Starlene/M
+starless
+starlet/MS
+starlight/MS
+starling/MS
+Starlin/M
+starlit
+Star/M
+starred
+starring
+Starr/M
+starry/TR
+starship
+starstruck
+start/ASGDR
+starter/MS
+startle/GDS
+startling/PY
+startup/SM
+starvation/MS
+starveling/M
+starver/M
+starve/RSDG
+stash/GSD
+stasis/M
+stat/DRSGV
+statecraft/MS
+stated/U
+statehood/MS
+statehouse/S
+Statehouse's
+state/IGASD
+statelessness/MS
+stateless/P
+stateliness/MS
+stately/PRT
+statement/MSA
+Staten/M
+stater/M
+stateroom/SM
+stateside
+state's/K
+states/K
+statesmanlike
+statesman/MY
+statesmanship/SM
+statesmen
+stateswoman
+stateswomen
+statewide
+statical/Y
+static/S
+statics/M
+stationarity
+stationary/S
+stationer/M
+stationery/MS
+stationmaster/M
+station/SZGMDR
+statistical/Y
+statistician/MS
+statistic/MS
+Statler/M
+stator/SM
+statuary/SM
+statue/MSD
+statuesque/YP
+statuette/MS
+stature/MS
+status/SM
+statute/SM
+statutorily
+statutory/P
+Stauffer/M
+staunchness/S
+staunch/PDRSYTG
+stave/DGM
+Stavro/MS
+stay/DRGZS
+stayer/M
+std
+STD
+stdio
+steadfastness/MS
+steadfast/PY
+steadily/U
+steadiness's
+steadiness/US
+steading/M
+stead/SGDM
+steady/DRSUTGP
+steakhouse/SM
+steak/SM
+stealer/M
+stealing/M
+steal/SRHG
+stealthily
+stealthiness/MS
+stealth/M
+stealths
+stealthy/PTR
+steamboat/MS
+steamer/MDG
+steamfitter/S
+steamfitting/S
+steamily
+steaminess/SM
+steamroller/DMG
+steamroll/GZRDS
+steam/SGZRDMJ
+steamship/SM
+steamy/RSTP
+Stearne/M
+Stearn/SM
+steed/SM
+Steele/M
+steeliness/SM
+steelmaker/M
+steel/SDMGZ
+steelworker/M
+steelwork/ZSMR
+steelyard/MS
+steely/TPRS
+Steen/M
+steepen/GD
+steeper/M
+steeplebush/M
+steeplechase/GMSD
+steeplejack/MS
+steeple/MS
+steepness/S
+steep/SYRNDPGTX
+steerage/MS
+steerer/M
+steer/SGBRDJ
+steersman/M
+steersmen
+steeves
+Stefa/M
+Stefania/M
+Stefanie/M
+Stefan/M
+Stefano/M
+Steffane/M
+Steffen/M
+Steffie/M
+Steffi/M
+stegosauri
+stegosaurus/S
+Steinbeck/SM
+Steinberg/M
+Steinem/M
+Steiner/M
+Steinmetz/M
+Stein/RM
+stein/SGZMRD
+Steinway/M
+Stella/M
+stellar
+stellated
+Ste/M
+stemless
+stemmed/U
+stemming
+stem/MS
+stemware/MS
+stench/GMDS
+stenciler/M
+stencil/GDRMSZ
+stencillings
+Stendhal/M
+Stendler/M
+Stengel/M
+stenographer/SM
+stenographic
+stenography/SM
+steno/SM
+stenotype/M
+stentorian
+stepbrother/MS
+stepchild/M
+stepchildren
+stepdaughter/MS
+stepfather/SM
+Stepha/M
+Stephana/M
+Stephanie/M
+Stephani/M
+Stephan/M
+Stephannie/M
+Stephanus/M
+Stephenie/M
+Stephen/MS
+Stephenson/M
+Stephie/M
+Stephi/M
+Stephine/M
+stepladder/SM
+step/MIS
+stepmother/SM
+stepparent/SM
+stepper/M
+steppe/RSDGMZ
+steppingstone/S
+stepsister/SM
+stepson/SM
+stepwise
+stereographic
+stereography/M
+stereo/GSDM
+stereophonic
+stereoscope/MS
+stereoscopic
+stereoscopically
+stereoscopy/M
+stereotype/GMZDRS
+stereotypic
+stereotypical/Y
+sterile
+sterility/SM
+sterilization/SM
+sterilized/U
+sterilize/RSDGZ
+sterilizes/A
+Sterling/M
+sterling/MPYS
+sterlingness/M
+sternal
+Sternberg/M
+Sterne/M
+Stern/M
+sternness/S
+Sterno
+stern/SYRDPGT
+sternum/SM
+steroidal
+steroid/MS
+stertorous
+Stesha/M
+stethoscope/SM
+stet/MS
+stetson/MS
+Stetson/SM
+stetted
+stetting
+Steuben/M
+Stevana/M
+stevedore/GMSD
+Steve/M
+Stevena/M
+Steven/MS
+Stevenson/M
+Stevie/M
+Stevy/M
+steward/DMSG
+stewardess/SM
+Steward/M
+stewardship/MS
+Stewart/M
+stew/GDMS
+st/GBJ
+sticker/M
+stickily
+stickiness/SM
+stickleback/MS
+stickle/GZDR
+stickler/M
+stick/MRDSGZ
+stickpin/SM
+stickup/SM
+sticky/GPTDRS
+Stieglitz/M
+stiffen/JZRDG
+stiff/GTXPSYRND
+stiffness/MS
+stifle/GJRSD
+stifler/M
+stifling/Y
+stigma/MS
+stigmata
+stigmatic/S
+stigmatization/C
+stigmatizations
+stigmatization's
+stigmatize/DSG
+stigmatized/U
+stile/GMDS
+stiletto/MDSG
+stillbirth/M
+stillbirths
+stillborn/S
+stiller/MI
+stillest
+Stillman/M
+Stillmann/M
+stillness/MS
+still/RDIGS
+Stillwell/M
+stilted/PY
+stilt/GDMS
+Stilton/MS
+Stimson/M
+stimulant/MS
+stimulated/U
+stimulate/SDVGNX
+stimulation/M
+stimulative/S
+stimulator/M
+stimulatory
+stimuli/M
+stimulus/MS
+Stine/M
+stinger/M
+sting/GZR
+stingily
+stinginess/MS
+stinging/Y
+stingray/MS
+stingy/RTP
+stinkbug/S
+stinker/M
+stink/GZRJS
+stinking/Y
+stinkpot/M
+Stinky/M
+stinky/RT
+stinter/M
+stinting/U
+stint/JGRDMS
+stipendiary
+stipend/MS
+stipple/JDRSG
+stippler/M
+stipulate/XNGSD
+stipulation/M
+Stirling/M
+stirred/U
+stirrer/SM
+stirring/YS
+stirrup/SM
+stir/S
+stitch/ASDG
+stitcher/M
+stitchery/S
+stitching/MS
+stitch's
+St/M
+stoat/SM
+stochastic
+stochastically
+stochasticity
+stockade/SDMG
+stockbreeder/SM
+stockbroker/MS
+stockbroking/S
+stocker/SM
+Stockhausen/M
+stockholder/SM
+Stockholm/M
+stockily
+stockiness/SM
+stockinet's
+stockinette/S
+stocking/MDS
+stockist/MS
+stockpile/GRSD
+stockpiler/M
+stockpot/MS
+stockroom/MS
+stock's
+stock/SGAD
+stocktaking/MS
+Stockton/M
+stockyard/SM
+stocky/PRT
+Stoddard/M
+stodge/M
+stodgily
+stodginess/S
+stodgy/TRP
+stogy/SM
+stoical/Y
+stoichiometric
+stoichiometry/M
+stoicism/SM
+Stoicism/SM
+stoic/MS
+Stoic/MS
+stoke/DSRGZ
+stoker/M
+stokes/M
+Stokes/M
+STOL
+stole/MDS
+stolen
+stolidity/S
+stolidness/S
+stolid/PTYR
+stolon/SM
+stomachache/MS
+stomacher/M
+stomach/RSDMZG
+stomachs
+stomp/DSG
+stonecutter/SM
+stone/DSRGM
+Stonehenge/M
+stoneless
+Stone/M
+stonemason/MS
+stoner/M
+stonewall/GDS
+stoneware/MS
+stonewashed
+stonework/SM
+stonewort/M
+stonily
+stoniness/MS
+stony/TPR
+stood
+stooge/SDGM
+stool/SDMG
+stoop/SDG
+stopcock/MS
+stopgap/SM
+stoplight/SM
+stopover/MS
+stoppable/U
+stoppage/MS
+Stoppard/M
+stopped/U
+stopper/GMDS
+stopping/M
+stopple/GDSM
+stop's
+stops/M
+stop/US
+stopwatch/SM
+storage/SM
+store/ADSRG
+storefront/SM
+storehouse/MS
+storekeeper/M
+storekeep/ZR
+storeroom/SM
+store's
+stork/SM
+stormbound
+stormer/M
+Stormie/M
+stormily
+Stormi/M
+storminess/S
+Storm/M
+storm/SRDMGZ
+stormtroopers
+Stormy/M
+stormy/PTR
+storyboard/MDSG
+storybook/MS
+story/GSDM
+storyline
+storyteller/SM
+storytelling/MS
+Stouffer/M
+stoup/SM
+stouten/DG
+stouthearted
+Stout/M
+stoutness/MS
+stout/STYRNP
+stove/DSRGM
+stovepipe/SM
+stover/M
+stowage/SM
+stowaway/MS
+Stowe/M
+stow/GDS
+Strabo/M
+straddler/M
+straddle/ZDRSG
+Stradivari/SM
+Stradivarius/M
+strafe/GRSD
+strafer/M
+straggle/GDRSZ
+straggly/RT
+straightaway/S
+straightedge/MS
+straightener/M
+straighten/ZGDR
+straightforwardness/MS
+straightforward/SYP
+straightjacket's
+straightness/MS
+straight/RNDYSTXGP
+straightway/S
+strain/ASGZDR
+strained/UF
+strainer/MA
+straining/F
+strains/F
+straiten/DG
+straitjacket/GDMS
+straitlaced
+straitness/M
+strait/XTPSMGYDNR
+stranded/P
+strand/SDRG
+strangeness/SM
+strange/PYZTR
+stranger/GMD
+stranglehold/MS
+strangle/JDRSZG
+strangles/M
+strangulate/NGSDX
+strangulation/M
+strapless/S
+strapped/U
+strapping/S
+strap's
+strap/US
+Strasbourg/M
+stratagem/SM
+strata/MS
+strategical/Y
+strategic/S
+strategics/M
+strategist/SM
+strategy/SM
+Stratford/M
+strati
+stratification/M
+stratified/U
+stratify/NSDGX
+stratigraphic
+stratigraphical
+stratigraphy/M
+stratosphere/SM
+stratospheric
+stratospherically
+stratum/M
+stratus/M
+Strauss
+Stravinsky/M
+strawberry/SM
+strawflower/SM
+straw/SMDG
+strayer/M
+stray/GSRDM
+streak/DRMSGZ
+streaker/M
+streaky/TR
+streamed/U
+streamer/M
+stream/GZSMDR
+streaming/M
+streamline/SRDGM
+streetcar/MS
+streetlight/SM
+street/SMZ
+streetwalker/MS
+streetwise
+Streisand/M
+strengthen/AGDS
+strengthener/MS
+strength/NMX
+strengths
+strenuousness/SM
+strenuous/PY
+strep/MS
+streptococcal
+streptococci
+streptococcus/M
+streptomycin/SM
+stress/DSMG
+stressed/U
+stressful/YP
+stretchability/M
+stretchable/U
+stretch/BDRSZG
+stretcher/DMG
+stretchy/TRP
+strew/GDHS
+strewn
+striae
+stria/M
+striate/DSXGN
+striated/U
+striation/M
+stricken
+Strickland/M
+strict/AF
+stricter
+strictest
+strictly
+strictness/S
+stricture/SM
+stridden
+stridency/S
+strident/Y
+strider/M
+stride/RSGM
+strife/SM
+strikebreaker/M
+strikebreaking/M
+strikebreak/ZGR
+strikeout/S
+striker/M
+strike/RSGZJ
+striking/Y
+Strindberg/M
+stringed
+stringency/S
+stringent/Y
+stringer/MS
+stringiness/SM
+stringing/M
+string's
+string/SAG
+stringy/RTP
+striper/M
+stripe/SM
+strip/GRDMS
+stripling/M
+stripped/U
+stripper/MS
+stripping
+stripteaser/M
+striptease/SRDGZM
+stripy/RT
+strive/JRSG
+striven
+striver/M
+strobe/SDGM
+stroboscope/SM
+stroboscopic
+strode
+stroke/ZRSDGM
+stroking/M
+stroller/M
+stroll/GZSDR
+Stromberg/M
+Stromboli/M
+Strom/M
+strongbow
+strongbox/MS
+Strongheart/M
+stronghold/SM
+strongish
+Strong/M
+strongman/M
+strongmen
+strongroom/MS
+strong/YRT
+strontium/SM
+strophe/MS
+strophic
+stropped
+stropping
+strop/SM
+strove
+struck
+structuralism/M
+structuralist/SM
+structural/Y
+structured/AU
+structureless
+structures/A
+structure/SRDMG
+structuring/A
+strudel/MS
+struggle/GDRS
+struggler/M
+strummed
+strumming
+strumpet/GSDM
+strum/S
+strung/UA
+strut/S
+strutted
+strutter/M
+strutting
+strychnine/MS
+Stuart/MS
+stubbed/M
+stubbing
+Stubblefield/MS
+stubble/SM
+stubbly/RT
+stubbornness/SM
+stubborn/SGTYRDP
+stubby/SRT
+stub/MS
+stuccoes
+stucco/GDM
+stuck/U
+studbook/SM
+studded
+studding/SM
+Studebaker/M
+studentship/MS
+student/SM
+studiedness/M
+studied/PY
+studier/SM
+studio/MS
+studiousness/SM
+studious/PY
+stud/MS
+study/AGDS
+stuffily
+stuffiness/SM
+stuffing/M
+stuff/JGSRD
+stuffy/TRP
+stultify/NXGSD
+Stu/M
+stumble/GZDSR
+stumbling/Y
+stumpage/M
+stumper/M
+stump/RDMSG
+stumpy/RT
+stung
+stunk
+stunned
+stunner/M
+stunning/Y
+stun/S
+stunted/P
+stunt/GSDM
+stupefaction/SM
+stupefy/DSG
+stupendousness/M
+stupendous/PY
+stupidity/SM
+stupidness/M
+stupid/PTYRS
+stupor/MS
+sturdily
+sturdiness/SM
+sturdy/SRPT
+sturgeon/SM
+Sturm/M
+stutter/DRSZG
+Stuttgart/M
+Stuyvesant/M
+sty/DSGM
+Stygian
+styled/A
+style/GZMDSR
+styles/A
+styli
+styling/A
+stylishness/S
+stylish/PY
+stylistically
+stylistic/S
+stylist/MS
+stylites
+stylization/MS
+stylize/DSG
+stylos
+stylus/SM
+stymieing
+stymie/SD
+stymy's
+styptic/S
+styrene/MS
+Styrofoam/S
+Styx/M
+suable
+Suarez/M
+suasion/EMS
+suaveness/S
+suave/PRYT
+suavity/SM
+subaltern/SM
+subarctic/S
+subareas
+Subaru/M
+subassembly/M
+subatomic/S
+subbasement/SM
+subbed
+subbing
+subbranch/S
+subcaste/M
+subcategorizing
+subcategory/SM
+subchain
+subclassifications
+subclass/MS
+subclauses
+subcommand/S
+subcommittee/SM
+subcompact/S
+subcomponent/MS
+subcomputation/MS
+subconcept
+subconsciousness/SM
+subconscious/PSY
+subconstituent
+subcontinental
+subcontinent/MS
+subcontractor/SM
+subcontract/SMDG
+subcultural
+subculture/GMDS
+subcutaneous/Y
+subdirectory/S
+subdistrict/M
+subdivide/SRDG
+subdivision/SM
+subdued/Y
+subdue/GRSD
+subduer/M
+subexpression/MS
+subfamily/SM
+subfield/MS
+subfile/SM
+subfreezing
+subgoal/SM
+subgraph
+subgraphs
+subgroup/SGM
+subharmonic/S
+subheading/M
+subhead/MGJS
+subhuman/S
+subindex/M
+subinterval/MS
+subj
+subject/GVDMS
+subjection/SM
+subjectiveness/M
+subjective/PSY
+subjectivist/S
+subjectivity/SM
+subjoin/DSG
+subjugate/NGXSD
+subjugation/M
+subjunctive/S
+sublayer
+sublease/DSMG
+sublet/S
+subletting
+sublimate/GNSDX
+sublimation/M
+sublime/GRSDTYP
+sublimeness/M
+sublimer/M
+subliminal/Y
+sublimity/SM
+sublist/SM
+subliterary
+sublunary
+submachine
+submarginal
+submarine/MZGSRD
+submariner/M
+submerge/DSG
+submergence/SM
+submerse/XNGDS
+submersible/S
+submersion/M
+submicroscopic
+submission/SAM
+submissiveness/MS
+submissive/PY
+submit/SA
+submittable
+submittal
+submitted/A
+submitter/S
+submitting/A
+submode/S
+submodule/MS
+sub/MS
+subnational
+subnet/SM
+subnetwork/SM
+subnormal/SY
+suboptimal
+suborbital
+suborder/MS
+subordinately/I
+subordinates/I
+subordinate/YVNGXPSD
+subordination/IMS
+subordinator
+subornation/SM
+suborn/GSD
+subpage
+subparagraph/M
+subpart/MS
+subplot/MS
+subpoena/GSDM
+subpopulation/MS
+subproblem/SM
+subprocess/SM
+subprofessional/S
+subprogram/SM
+subproject
+subproof/SM
+subquestion/MS
+subrange/SM
+subregional/Y
+subregion/MS
+subrogation/M
+subroutine/SM
+subsample/MS
+subschema/MS
+subscribe/ASDG
+subscriber/SM
+subscripted/U
+subscription/MS
+subscript/SGD
+subsection/SM
+subsegment/SM
+subsentence
+subsequence/MS
+subsequent/SYP
+subservience/SM
+subservient/SY
+subset/MS
+subsidence/MS
+subside/SDG
+subsidiarity
+subsidiary/MS
+subsidization/MS
+subsidized/U
+subsidizer/M
+subsidize/ZRSDG
+subsidy/MS
+subsistence/MS
+subsistent
+subsist/SGD
+subsocietal
+subsoil/DRMSG
+subsonic
+subspace/MS
+subspecies/M
+substance/MS
+substandard
+substantially/IU
+substantialness/M
+substantial/PYS
+substantiated/U
+substantiate/VGNSDX
+substantiation/MFS
+substantiveness/M
+substantive/PSYM
+substantivity
+substation/MS
+substerilization
+substitutability
+substituted/U
+substitute/NGVBXDRS
+substitutionary
+substitution/M
+substitutive/Y
+substrata
+substrate/MS
+substratum/M
+substring/S
+substructure/SM
+subsume/SDG
+subsurface/S
+subsystem/MS
+subtable/S
+subtask/SM
+subteen/SM
+subtenancy/MS
+subtenant/SM
+subtend/DS
+subterfuge/SM
+subterranean/SY
+subtest
+subtext/SM
+subtitle/DSMG
+subtleness/M
+subtle/RPT
+subtlety/MS
+subtly/U
+subtopic/SM
+subtotal/GSDM
+subtracter/M
+subtraction/MS
+subtract/SRDZVG
+subtrahend/SM
+subtree/SM
+subtropical
+subtropic/S
+subtype/MS
+subunit/SM
+suburbanite/MS
+suburbanization/MS
+suburbanized
+suburbanizing
+suburban/S
+suburbia/SM
+suburb/MS
+subvention/MS
+subversion/SM
+subversiveness/MS
+subversive/SPY
+subverter/M
+subvert/SGDR
+subway/MDGS
+subzero
+succeeder/M
+succeed/GDRS
+successfulness/M
+successful/UY
+succession/SM
+successiveness/M
+successive/YP
+success/MSV
+successor/MS
+successorship
+succinctness/SM
+succinct/RYPT
+succored/U
+succorer/M
+succor/SGZRDM
+succotash/SM
+succubus/M
+succulence/SM
+succulency/MS
+succulent/S
+succumb/SDG
+such
+suchlike
+sucker/DMG
+suck/GZSDRB
+suckle/SDJG
+suckling/M
+Sucre/M
+sucrose/MS
+suction/SMGD
+Sudanese/M
+Sudanic/M
+Sudan/M
+suddenness/SM
+sudden/YPS
+Sudetenland/M
+sud/S
+suds/DSRG
+sudsy/TR
+sued/DG
+suede/SM
+Suellen/M
+Sue/M
+suer/M
+suet/MS
+Suetonius/M
+suety
+sue/ZGDRS
+Suez/M
+sufferance/SM
+sufferer/M
+suffering/M
+suffer/SJRDGZ
+suffice/GRSD
+sufficiency/SIM
+sufficient/IY
+suffixation/S
+suffixed/U
+suffix/GMRSD
+suffocate/XSDVGN
+suffocating/Y
+Suffolk/M
+suffragan/S
+suffrage/MS
+suffragette/MS
+suffragist/SM
+suffuse/VNGSDX
+suffusion/M
+Sufi/M
+Sufism/M
+sugarcane/S
+sugarcoat/GDS
+sugarless
+sugarplum/MS
+sugar/SJGMD
+sugary/TR
+suggest/DRZGVS
+suggester/M
+suggestibility/SM
+suggestible
+suggestion/MS
+suggestiveness/MS
+suggestive/PY
+sugillate
+Suharto/M
+suicidal/Y
+suicide/GSDM
+Sui/M
+suitability/SU
+suitableness/S
+suitable/P
+suitably/U
+suitcase/MS
+suited/U
+suite/SM
+suiting/M
+suit/MDGZBJS
+suitor/SM
+Sukarno/M
+Sukey/M
+Suki/M
+sukiyaki/SM
+Sukkoth's
+Sukkot/S
+Sula/M
+Sulawesi/M
+Suleiman/M
+sulfaquinoxaline
+sulfa/S
+sulfate/MSDG
+sulfide/S
+sulfite/M
+sulfonamide/SM
+sulfur/DMSG
+sulfuric
+sulfurousness/M
+sulfurous/YP
+sulk/GDS
+sulkily
+sulkiness/S
+sulky/RSPT
+Sulla/M
+sullenness/MS
+sullen/TYRP
+sullied/U
+Sullivan/M
+sully/GSD
+Sully/M
+sulphate/SM
+sulphide/MS
+sulphuric
+sultana/SM
+sultanate/MS
+sultan/SM
+sultrily
+sultriness/SM
+sultry/PRT
+Sulzberger/M
+sumach's
+sumac/SM
+Sumatra/M
+Sumatran/S
+sumer/F
+Sumeria/M
+Sumerian/M
+summability/M
+summable
+summand/MS
+summarily
+summarization/MS
+summarized/U
+summarize/GSRDZ
+summarizer/M
+summary/MS
+summation/FMS
+summed
+Summerdale/M
+summerhouse/MS
+summer/SGDM
+Summer/SM
+summertime/MS
+summery/TR
+summing
+summit/GMDS
+summitry/MS
+summoner/M
+summon/JSRDGZ
+summons/MSDG
+sum/MRS
+Sumner/M
+sumo/SM
+sump/SM
+sumptuousness/SM
+sumptuous/PY
+Sumter/M
+Sun
+sunbaked
+sunbathe
+sunbather/M
+sunbathing/M
+sunbaths
+sunbath/ZRSDG
+sunbeam/MS
+Sunbelt/M
+sunblock/S
+sunbonnet/MS
+sunburn/GSMD
+sunburst/MS
+suncream
+sundae/MS
+Sundanese/M
+Sundas
+Sunday/MS
+sunder/SDG
+sundial/MS
+sundowner/M
+sundown/MRDSZG
+sundris
+sundry/S
+sunfish/SM
+sunflower/MS
+sunglass/MS
+Sung/M
+sung/U
+sunk/SN
+sunlamp/S
+sunless
+sunlight/MS
+sunlit
+sun/MS
+sunned
+Sunni/MS
+sunniness/SM
+sunning
+Sunnite/SM
+Sunny/M
+sunny/RSTP
+Sunnyvale/M
+sunrise/GMS
+sunroof/S
+sunscreen/S
+sunset/MS
+sunsetting
+sunshade/MS
+Sunshine/M
+sunshine/MS
+sunshiny
+sunspot/SM
+sunstroke/MS
+suntanned
+suntanning
+suntan/SM
+sunup/MS
+superabundance/MS
+superabundant
+superannuate/GNXSD
+superannuation/M
+superbness/M
+superb/YRPT
+supercargoes
+supercargo/M
+supercharger/M
+supercharge/SRDZG
+superciliousness/SM
+supercilious/PY
+supercity/S
+superclass/M
+supercomputer/MS
+supercomputing
+superconcept
+superconducting
+superconductivity/SM
+superconductor/SM
+supercooled
+supercooling
+supercritical
+superdense
+super/DG
+superego/SM
+supererogation/MS
+supererogatory
+superficiality/S
+superficial/SPY
+superfine
+superfix/M
+superfluity/MS
+superfluousness/S
+superfluous/YP
+superheat/D
+superheroes
+superhero/SM
+superhighway/MS
+superhumanness/M
+superhuman/YP
+superimpose/SDG
+superimposition/MS
+superintendence/S
+superintendency/SM
+superintendent/SM
+superintend/GSD
+superiority/MS
+Superior/M
+superior/SMY
+superlativeness/M
+superlative/PYS
+superlunary
+supermachine
+superman/M
+Superman/M
+supermarket/SM
+supermen
+supermodel
+supermom/S
+supernal
+supernatant
+supernaturalism/M
+supernaturalness/M
+supernatural/SPY
+supernormal/Y
+supernovae
+supernova/MS
+supernumerary/S
+superordinate
+superpose/BSDG
+superposition/MS
+superpower/MS
+superpredicate
+supersaturate/XNGDS
+supersaturation/M
+superscribe/GSD
+superscript/DGS
+superscription/SM
+superseder/M
+supersede/SRDG
+supersensitiveness/M
+supersensitive/P
+superset/MS
+supersonically
+supersonic/S
+supersonics/M
+superstar/SM
+superstition/SM
+superstitious/YP
+superstore/S
+superstructural
+superstructure/SM
+supertanker/SM
+supertitle/MSDG
+superuser/MS
+supervene/GSD
+supervention/S
+supervised/U
+supervise/SDGNX
+supervision/M
+supervisor/SM
+supervisory
+superwoman/M
+superwomen
+supineness/M
+supine/PSY
+supper/DMG
+supplanter/M
+supplant/SGRD
+supplemental/S
+supplementary/S
+supplementation/S
+supplementer/M
+supplement/SMDRG
+suppleness/SM
+supple/SPLY
+suppliant/S
+supplicant/MS
+supplicate/NGXSD
+supplication/M
+supplier/AM
+suppl/RDGT
+supply/MAZGSRD
+supportability/M
+supportable/UI
+supported/U
+supporter/M
+supporting/Y
+supportive/Y
+support/ZGVSBDR
+supposed/Y
+suppose/SRDBJG
+supposition/MS
+suppository/MS
+suppressant/S
+suppressed/U
+suppressible/I
+suppression/SM
+suppressive/P
+suppressor/S
+suppress/VGSD
+suppurate/NGXSD
+suppuration/M
+supp/YDRGZ
+supra
+supranational
+supranationalism/M
+suprasegmental
+supremacist/SM
+supremacy/SM
+supremal
+supremeness/M
+supreme/PSRTY
+supremo/M
+sup/RSZ
+supt
+Supt/M
+Surabaya/M
+Surat/M
+surcease/DSMG
+surcharge/MGSD
+surcingle/MGSD
+surd/M
+sured/I
+surefire
+surefooted
+surely
+sureness/MS
+sureness's/U
+sure/PU
+surer/I
+surest
+surety/SM
+surfaced/UA
+surface/GSRDPZM
+surfacer/AMS
+surfaces/A
+surfacing/A
+surfactant/SM
+surfboard/MDSG
+surfeit/SDRMG
+surfer/M
+surfing/M
+surf/SJDRGMZ
+surged/A
+surge/GYMDS
+surgeon/MS
+surgery/MS
+surges/A
+surgical/Y
+Suriname
+Surinamese
+Surinam's
+surliness/SM
+surly/TPR
+surmiser/M
+surmise/SRDG
+surmountable/IU
+surmount/DBSG
+surname/GSDM
+surpassed/U
+surpass/GDS
+surpassing/Y
+surplice/SM
+surplus/MS
+surplussed
+surplussing
+surprised/U
+surprise/MGDRSJ
+surpriser/M
+surprising/YU
+surrealism/MS
+surrealistic
+surrealistically
+surrealist/S
+surreality
+surreal/S
+surrender/DRSG
+surrenderer/M
+surreptitiousness/S
+surreptitious/PY
+surrey/SM
+surrogacy/S
+surrogate/SDMNG
+surrogation/M
+surrounding/M
+surround/JGSD
+surtax/SDGM
+surveillance/SM
+surveillant
+surveyed/A
+surveying/M
+survey/JDSG
+surveyor/MS
+surveys/A
+survivability/M
+survivable/U
+survivalist/S
+survival/MS
+survive/SRDBG
+survivor/MS
+survivorship/M
+Surya/M
+Sus
+Susana/M
+Susanetta/M
+Susan/M
+Susannah/M
+Susanna/M
+Susanne/M
+Susann/M
+susceptibilities
+susceptibility/IM
+susceptible/I
+Susette/M
+sushi/SM
+Susie/M
+Susi/M
+suspected/U
+suspecter/M
+suspect/GSDR
+suspecting/U
+suspend/DRZGS
+suspended/UA
+suspender/M
+suspenseful
+suspense/MXNVS
+suspension/AM
+suspensive/Y
+suspensor/M
+suspicion/GSMD
+suspiciousness/M
+suspicious/YP
+Susquehanna/M
+Sussex/M
+sustainability
+sustainable/U
+sustain/DRGLBS
+sustainer/M
+sustainment/M
+sustenance/MS
+Susy/M
+Sutherland/M
+Sutherlan/M
+sutler/MS
+Sutton/M
+suture/GMSD
+SUV
+Suva/M
+Suwanee/M
+Suzanna/M
+Suzanne/M
+Suzann/M
+suzerain/SM
+suzerainty/MS
+Suzette/M
+Suzhou/M
+Suzie/M
+Suzi/M
+Suzuki/M
+Suzy/M
+Svalbard/M
+svelte/RPTY
+Svend/M
+Svengali
+Sven/M
+Sverdlovsk/M
+Svetlana/M
+SW
+swabbed
+swabbing
+swabby/S
+Swabian/SM
+swab/MS
+swaddle/SDG
+swagged
+swagger/GSDR
+swagging
+swag/GMS
+Swahili/MS
+swain/SM
+SWAK
+swallower/M
+swallow/GDRS
+swallowtail/SM
+swam
+swami/SM
+swamper/M
+swampland/MS
+swamp/SRDMG
+swampy/RPT
+Swanee/M
+swankily
+swankiness/MS
+swank/RDSGT
+swanky/PTRS
+swanlike
+swan/MS
+swanned
+swanning
+Swansea/M
+Swanson/M
+swappable/U
+swapped
+swapper/SM
+swapping
+swap/S
+sward/MSGD
+swarmer/M
+swarm/GSRDM
+swarthiness/M
+Swarthmore/M
+swarthy/RTP
+swart/P
+Swartz/M
+swashbuckler/SM
+swashbuckling/S
+swash/GSRD
+swastika/SM
+SWAT
+swatch/MS
+swathe
+swather/M
+swaths
+swath/SRDMGJ
+swat/S
+swatted
+swatter/MDSG
+swatting
+swayback/SD
+sway/DRGS
+swayer/M
+Swaziland/M
+Swazi/SM
+swearer/M
+swear/SGZR
+swearword/SM
+sweatband/MS
+sweater/M
+sweatily
+sweatiness/M
+sweatpants
+sweat/SGZRM
+sweatshirt/S
+sweatshop/MS
+sweaty/TRP
+Swedenborg/M
+Sweden/M
+swede/SM
+Swede/SM
+Swedish
+Swed/MN
+Sweeney/SM
+sweeper/M
+sweepingness/M
+sweeping/PY
+sweep/SBRJGZ
+sweeps/M
+sweepstakes
+sweepstake's
+sweetbread/SM
+sweetbrier/SM
+sweetcorn
+sweetened/U
+sweetener/M
+sweetening/M
+sweeten/ZDRGJ
+sweetheart/MS
+sweetie/MS
+sweeting/M
+sweetish/Y
+Sweet/M
+sweetmeat/MS
+sweetness/MS
+sweetshop
+sweet/TXSYRNPG
+swellhead/DS
+swelling/M
+swell/SJRDGT
+swelter/DJGS
+sweltering/Y
+Swen/M
+Swenson/M
+swept
+sweptback
+swerve/GSD
+swerving/U
+swifter/M
+swift/GTYRDPS
+Swift/M
+swiftness/MS
+swigged
+swigging
+swig/SM
+swill/SDG
+swimmer/MS
+swimming/MYS
+swim/S
+swimsuit/MS
+Swinburne/M
+swindle/GZRSD
+swindler/M
+swineherd/MS
+swine/SM
+swingeing
+swinger/M
+swinging/Y
+swing/SGRZJB
+swingy/R
+swinishness/M
+swinish/PY
+Swink/M
+swipe/DSG
+swirling/Y
+swirl/SGRD
+swirly/TR
+swish/GSRD
+swishy/R
+swiss
+Swiss/S
+switchback/GDMS
+switchblade/SM
+switchboard/MS
+switcher/M
+switch/GBZMRSDJ
+switchgear
+switchman/M
+switchmen/M
+switchover/M
+Switzerland/M
+Switzer/M
+Switz/MR
+swivel/GMDS
+swizzle/RDGM
+swob's
+swollen
+swoon/GSRD
+swooning/Y
+swoop/RDSG
+swoosh/GSD
+swop's
+sword/DMSG
+swordfish/SM
+swordplayer/M
+swordplay/RMS
+swordsman/M
+swordsmanship/SM
+swordsmen
+swordtail/M
+swore
+sworn
+swot/S
+swum
+swung
+s/XJBG
+sybarite/MS
+sybaritic
+Sybila/M
+Sybilla/M
+Sybille/M
+Sybil/M
+Sybyl/M
+sycamore/SM
+sycophancy/S
+sycophantic
+sycophantically
+sycophant/SYM
+Sydelle/M
+Sydel/M
+Syd/M
+Sydney/M
+Sykes/M
+Sylas/M
+syllabicate/GNDSX
+syllabication/M
+syllabicity
+syllabic/S
+syllabification/M
+syllabify/GSDXN
+syllabi's
+syllable/SDMG
+syllabub/M
+syllabus/MS
+syllabusss
+syllogism/MS
+syllogistic
+Sylow/M
+sylphic
+sylphlike
+sylph/M
+sylphs
+Sylvania/M
+Sylvan/M
+sylvan/S
+Sylvester/M
+Sylvia/M
+Sylvie/M
+Syman/M
+symbiont/M
+symbioses
+symbiosis/M
+symbiotic
+symbol/GMDS
+symbolical/Y
+symbolics/M
+symbolic/SM
+symbolism/MS
+symbolist/MS
+symbolization/MAS
+symbolized/U
+symbolize/GZRSD
+symbolizes/A
+Symington/M
+symmetric
+symmetrically/U
+symmetricalness/M
+symmetrical/PY
+symmetrization/M
+symmetrizing
+symmetry/MS
+Symon/M
+sympathetically/U
+sympathetic/S
+sympathized/U
+sympathizer/M
+sympathize/SRDJGZ
+sympathizing/MYUS
+sympathy/MS
+symphonic
+symphonists
+symphony/MS
+symposium/MS
+symptomatic
+symptomatically
+symptomatology/M
+symptom/MS
+syn
+synagogal
+synagogue/SM
+synapse/SDGM
+synaptic
+synchronism/M
+synchronization's
+synchronization/SA
+synchronize/AGCDS
+synchronized/U
+synchronizer/MS
+synchronousness/M
+synchronous/YP
+synchrony
+synchrotron/M
+syncopate/VNGXSD
+syncopation/M
+syncope/MS
+sync/SGD
+syndicalist
+syndicate/XSDGNM
+syndic/SM
+syndrome/SM
+synergism/SM
+synergistic
+synergy/MS
+synfuel/S
+Synge/M
+synod/SM
+synonymic
+synonymous/Y
+synonym/SM
+synonymy/MS
+synopses
+synopsis/M
+synopsized
+synopsizes
+synopsizing
+synoptic/S
+syntactical/Y
+syntactics/M
+syntactic/SY
+syntax/MS
+syntheses
+synthesis/M
+synthesized/U
+synthesize/GZSRD
+synthesizer/M
+synthesizes/A
+synthetically
+synthetic/S
+syphilis/MS
+syphilitic/S
+syphilized
+syphilizing
+Syracuse/M
+Syriac/M
+Syria/M
+Syrian/SM
+syringe/GMSD
+syrup/DMSG
+syrupy
+sys
+systematical/Y
+systematics/M
+systematic/SP
+systematization/SM
+systematized/U
+systematizer/M
+systematize/ZDRSG
+systematizing/U
+systemically
+systemic/S
+systemization/SM
+system/MS
+systole/MS
+systolic
+Szilard/M
+Szymborska/M
+TA
+Tabasco/MS
+Tabatha/M
+Tabbatha/M
+tabbed
+Tabbie/M
+Tabbi/M
+tabbing
+Tabbitha/M
+Tabb/M
+tabbouleh
+tabboulehs
+tabby/GSD
+Tabby/M
+Taber/M
+Tabernacle/S
+tabernacle/SDGM
+Tabina/M
+Tabitha/M
+tabla/MS
+tableau/M
+tableaux
+tablecloth/M
+tablecloths
+table/GMSD
+tableland/SM
+tablespoonful/MS
+tablespoon/SM
+tablet/MDGS
+tabletop/MS
+tableware/SM
+tabling/M
+tabloid/MS
+Tab/MR
+taboo/GSMD
+Tabor/M
+tabor/MDGS
+Tabriz/SM
+tab/SM
+tabula
+tabular/Y
+tabulate/XNGDS
+tabulation/M
+tabulator/MS
+tachometer/SM
+tachometry
+tachycardia/MS
+tachyon/SM
+tacitness/MS
+taciturnity/MS
+taciturn/Y
+Tacitus/M
+tacit/YP
+tacker/M
+tack/GZRDMS
+tackiness/MS
+tackler/M
+tackle/RSDMZG
+tackling/M
+tacky/RSTP
+Tacoma/M
+taco/MS
+tact/FSM
+tactfulness/S
+tactful/YP
+tactical/Y
+tactician/MS
+tactic/SM
+tactile/Y
+tactility/S
+tactlessness/SM
+tactless/PY
+tactual/Y
+Taddeo/M
+Taddeusz/M
+Tadd/M
+Tadeas/M
+Tadeo/M
+Tades
+Tadio/M
+Tad/M
+tadpole/MS
+tad/SM
+Tadzhikistan's
+Tadzhikstan/M
+Taegu/M
+Taejon/M
+taffeta/MS
+taffrail/SM
+Taffy/M
+taffy/SM
+Taft/M
+Tagalog/SM
+tagged/U
+tagger/S
+tagging
+Tagore/M
+tag/SM
+Tagus/M
+Tahitian/S
+Tahiti/M
+Tahoe/M
+Taichung/M
+taiga/MS
+tailback/MS
+tail/CMRDGAS
+tailcoat/S
+tailer/AM
+tailgate/MGRSD
+tailgater/M
+tailing/MS
+taillessness/M
+tailless/P
+taillight/MS
+tailor/DMJSGB
+Tailor/M
+tailpipe/SM
+tailspin/MS
+tailwind/SM
+Tainan/M
+Taine/M
+taint/DGS
+tainted/U
+Taipei/M
+Taite/M
+Tait/M
+Taiwanese
+Taiwan/M
+Taiyuan/M
+Tajikistan
+takeaway/S
+taken/A
+takeoff/SM
+takeout/S
+takeover/SM
+taker/M
+take/RSHZGJ
+takes/IA
+taking/IA
+Taklamakan/M
+Talbert/M
+Talbot/M
+talcked
+talcking
+talc/SM
+talcum/S
+talebearer/SM
+talented/M
+talentless
+talent/SMD
+taler/M
+tale/RSMN
+tali
+Talia/M
+Taliesin/M
+talion/M
+talismanic
+talisman/SM
+talkativeness/MS
+talkative/YP
+talker/M
+talk/GZSRD
+talkie/M
+talky/RST
+Talladega/M
+Tallahassee/M
+Tallahatchie/M
+Tallahoosa/M
+tallboy/MS
+Tallchief/M
+Talley/M
+Talleyrand/M
+Tallia/M
+Tallie/M
+Tallinn/M
+tallish
+tallness/MS
+Tallou/M
+tallow/DMSG
+tallowy
+tall/TPR
+Tallulah/M
+tally/GRSDZ
+tallyho/DMSG
+Tally/M
+Talmudic
+Talmudist/MS
+Talmud/MS
+talon/SMD
+talus/MS
+Talyah/M
+Talya/M
+Ta/M
+tamable/M
+tamale/SM
+tamarack/SM
+Tamarah/M
+Tamara/M
+tamarind/MS
+Tamar/M
+Tamarra/M
+Tamas
+tambourine/MS
+tamed/U
+Tameka/M
+tameness/S
+Tamera/M
+Tamerlane/M
+tame/SYP
+Tamika/M
+Tamiko/M
+Tamil/MS
+Tami/M
+Tam/M
+Tamma/M
+Tammany/M
+Tammara/M
+tam/MDRSTZGB
+Tammie/M
+Tammi/M
+Tammy/M
+Tampa/M
+Tampax/M
+tampered/U
+tamperer/M
+tamper/ZGRD
+tampon/DMSG
+tamp/SGZRD
+Tamqrah/M
+Tamra/M
+tanager/MS
+Tanaka/M
+Tana/M
+Tananarive/M
+tanbark/SM
+Tancred/M
+tandem/SM
+Tandie/M
+Tandi/M
+tandoori/S
+Tandy/M
+Taney/M
+T'ang
+Tanganyika/M
+tangelo/SM
+tangency/M
+tangential/Y
+tangent/SM
+tangerine/MS
+tang/GSYDM
+tangibility/MIS
+tangible/IPS
+tangibleness's/I
+tangibleness/SM
+tangibly/I
+Tangier/M
+tangle's
+tangle/UDSG
+tango/MDSG
+Tangshan/M
+tangy/RST
+Tanhya/M
+Tania/M
+Tani/M
+Tanisha/M
+Tanitansy/M
+tankard/MS
+tanker/M
+tankful/MS
+tank/GZSRDM
+Tan/M
+tan/MS
+tanned/U
+Tannenbaum/M
+Tanner/M
+tanner/SM
+tannery/MS
+tannest
+Tanney/M
+Tannhäuser/M
+Tannie/M
+tanning/SM
+tannin/SM
+Tann/RM
+Tanny/M
+Tansy/M
+tansy/SM
+tantalization/SM
+tantalized/U
+tantalize/GZSRD
+tantalizingly/S
+tantalizingness/S
+tantalizing/YP
+tantalum/MS
+Tantalus/M
+tantamount
+tantra/S
+tantrum/SM
+Tanya/M
+Tanzania/M
+Tanzanian/S
+taoism
+Taoism/MS
+Taoist/MS
+taoist/S
+Tao/M
+tao/S
+Tapdance/M
+taped/U
+tapeline/S
+taperer/M
+taper/GRD
+tape/SM
+tapestry/GMSD
+tapeworm/MS
+tapioca/MS
+tapir/MS
+tap/MSDRJZG
+tapped/U
+tapper/MS
+tappet/MS
+tapping/M
+taproom/MS
+taproot/SM
+taps/M
+Tarah/M
+Tara/M
+tarantella/MS
+tarantula/MS
+Tarawa/M
+Tarazed/M
+Tarbell/M
+tardily
+tardiness/S
+tardy/TPRS
+tare/MS
+target/GSMD
+tar/GSMD
+tariff/DMSG
+Tarim/M
+Tarkington/M
+tarmacked
+tarmacking
+tarmac/S
+tarnished/U
+tarnish/GDS
+tarn/MS
+taro/MS
+tarot/MS
+tarpapered
+tarpaulin/MS
+tarp/MS
+tarpon/MS
+tarragon/SM
+Tarrah/M
+Tarra/M
+Tarrance/M
+tarred/M
+tarring/M
+tarry/TGRSD
+Tarrytown/M
+tarsal/S
+tarsi
+tarsus/M
+tartan/MS
+tartaric
+Tartar's
+tartar/SM
+Tartary/M
+tartness/MS
+tart/PMYRDGTS
+Tartuffe/M
+Taryn/M
+Tarzan/M
+Tasha/M
+Tashkent/M
+Tasia/M
+task/GSDM
+taskmaster/SM
+taskmistress/MS
+Tasmania/M
+Tasmanian/S
+tassellings
+tassel/MDGS
+Tass/M
+tasted/EU
+tastefulness/SME
+tasteful/PEY
+taste/GZMJSRD
+tastelessness/SM
+tasteless/YP
+taster/M
+taste's/E
+tastes/E
+tastily
+tastiness/MS
+tasting/E
+tasty/RTP
+tatami/MS
+Tatar/SM
+Tate/M
+tater/M
+Tatiana/M
+Tatiania/M
+tat/SRZ
+tatted
+tatterdemalion/SM
+tattered/M
+tatter/GDS
+tatting/SM
+tattler/M
+tattle/RSDZG
+tattletale/SM
+tattooer/M
+tattooist/MS
+tattoo/ZRDMGS
+tatty/R
+Tatum/M
+taught/AU
+taunter/M
+taunting/Y
+taunt/ZGRDS
+taupe/SM
+Taurus/SM
+tau/SM
+tauten/GD
+tautness/S
+tautological/Y
+tautologous
+tautology/SM
+taut/PGTXYRDNS
+taverner/M
+tavern/RMS
+tawdrily
+tawdriness/SM
+tawdry/SRTP
+Tawney/M
+Tawnya/M
+tawny/RSMPT
+Tawsha/M
+taxable/S
+taxably
+taxation/MS
+taxed/U
+taxicab/MS
+taxidermist/SM
+taxidermy/MS
+taxi/MDGS
+taximeter/SM
+taxing/Y
+taxiway/MS
+taxonomic
+taxonomically
+taxonomist/SM
+taxonomy/SM
+taxpayer/MS
+taxpaying/M
+tax/ZGJMDRSB
+Taylor/SM
+Tb
+TB
+TBA
+Tbilisi/M
+tbs
+tbsp
+Tchaikovsky/M
+Tc/M
+TCP
+TD
+TDD
+Te
+teabag/S
+teacake/MS
+teacart/M
+teachable/P
+teach/AGS
+teacher/MS
+teaching/SM
+teacloth
+teacupful/MS
+teacup/MS
+Teador/M
+teahouse/SM
+teakettle/SM
+teak/SM
+teakwood/M
+tealeaves
+teal/MS
+tea/MDGS
+teammate/MS
+team/MRDGS
+teamster/MS
+teamwork/SM
+teapot/MS
+tearaway
+teardrop/MS
+tearer/M
+tearfulness/M
+tearful/YP
+teargas/S
+teargassed
+teargassing
+tearjerker/S
+tearoom/MS
+tear/RDMSG
+teary/RT
+Teasdale/M
+tease/KS
+teasel/DGSM
+teaser/M
+teashop/SM
+teasing/Y
+teaspoonful/MS
+teaspoon/MS
+teas/SRDGZ
+teatime/MS
+teat/MDS
+tech/D
+technetium/SM
+technicality/MS
+technicalness/M
+technical/YSP
+technician/MS
+Technicolor/MS
+Technion/M
+technique/SM
+technocracy/MS
+technocratic
+technocrat/S
+technological/Y
+technologist/MS
+technology/MS
+technophobia
+technophobic
+techs
+tectonically
+tectonic/S
+tectonics/M
+Tecumseh/M
+Tedda/M
+Teddie/M
+Teddi/M
+Tedd/M
+Teddy/M
+teddy/SM
+Tedie/M
+Tedi/M
+tediousness/SM
+tedious/YP
+tedium/MS
+Ted/M
+Tedman/M
+Tedmund/M
+Tedra/M
+tee/DRSMH
+teeing
+teem/GSD
+teemingness/M
+teeming/PY
+teenager/M
+teenage/RZ
+Teena/M
+teen/SR
+teenybopper/SM
+teeny/RT
+teepee's
+teeshirt/S
+teeter/GDS
+teethe
+teether/M
+teething/M
+teethmarks
+teeth/RSDJMG
+teetotaler/M
+teetotalism/MS
+teetotal/SRDGZ
+TEFL
+Teflon/MS
+Tegucigalpa/M
+Teheran's
+Tehran
+TEirtza/M
+tektite/SM
+Tektronix/M
+telecast/SRGZ
+telecommunicate/NX
+telecommunication/M
+telecommute/SRDZGJ
+telecoms
+teleconference/GMJSD
+Teledyne/M
+Telefunken/M
+telegenic
+telegrammed
+telegramming
+telegram/MS
+telegraphic
+telegraphically
+telegraphist/MS
+telegraph/MRDGZ
+telegraphs
+telegraphy/MS
+telekineses
+telekinesis/M
+telekinetic
+Telemachus/M
+Telemann/M
+telemarketer/S
+telemarketing/S
+telemeter/DMSG
+telemetric
+telemetry/MS
+teleological/Y
+teleology/M
+telepathic
+telepathically
+telepathy/SM
+telephone/SRDGMZ
+telephonic
+telephonist/SM
+telephony/MS
+telephotography/MS
+telephoto/S
+teleprinter/MS
+teleprocessing/S
+teleprompter
+TelePrompter/M
+TelePrompTer/S
+telescope/GSDM
+telescopic
+telescopically
+teletext/S
+telethon/MS
+teletype/SM
+Teletype/SM
+teletypewriter/SM
+televangelism/S
+televangelist/S
+televise/SDXNG
+television/M
+televisor/MS
+televisual
+telex/GSDM
+Telex/M
+tell/AGS
+Teller/M
+teller/SDMG
+telling/YS
+Tell/MR
+telltale/MS
+tellurium/SM
+telly/SM
+Telnet/M
+TELNET/M
+telnet/S
+telomeric
+tel/SY
+Telugu/M
+temblor/SM
+temerity/MS
+Tempe/M
+temperamental/Y
+temperament/SM
+temperance/IMS
+tempera/SLM
+temperately/I
+temperateness's/I
+temperateness/SM
+temperate/SDGPY
+temperature/MS
+tempered/UE
+temper/GRDM
+tempering/E
+temper's/E
+tempers/E
+tempest/DMSG
+tempestuousness/SM
+tempestuous/PY
+template/FS
+template's
+Temple/M
+Templeman/M
+temple/SDM
+Templeton/M
+Temp/M
+tempoes
+tempo/MS
+temporal/YS
+temporarily
+temporarinesses
+temporariness/FM
+temporary/SFP
+temporize/GJZRSD
+temporizer/M
+temporizings/U
+temporizing/YM
+temp/SGZTMRD
+temptation/MS
+tempted
+tempter/S
+tempt/FS
+tempting/YS
+temptress/MS
+tempura/SM
+tenabilities
+tenability/UM
+tenableness/M
+tenable/P
+tenably
+tenaciousness/S
+tenacious/YP
+tenacity/S
+tenancy/MS
+tenanted/U
+tenant/MDSG
+tenantry/MS
+tench/M
+tended/UE
+tendency/MS
+tendentiousness/SM
+tendentious/PY
+tendered
+tenderer
+tenderest
+tenderfoot/MS
+tender/FS
+tenderheartedness/MS
+tenderhearted/YP
+tendering
+tenderizer/M
+tenderize/SRDGZ
+tenderloin/SM
+tenderly
+tenderness/SM
+tending/E
+tendinitis/S
+tend/ISFRDG
+tendon/MS
+tendril/SM
+tends/E
+tenebrous
+tenement/MS
+tenet/SM
+Tenex/M
+TENEX/M
+tenfold/S
+ten/MHB
+Tenneco/M
+tenner
+Tennessean/S
+Tennessee/M
+Tenney/M
+tennis/SM
+Tenn/M
+Tennyson/M
+Tenochtitlan/M
+tenon/GSMD
+tenor/MS
+tenpin/SM
+tense/IPYTNVR
+tenseness's/I
+tenseness/SM
+tensile
+tensional/I
+tension/GMRDS
+tensionless
+tensions/E
+tension's/I
+tensity/IMS
+tensorial
+tensor/MS
+tenspot
+tens/SRDVGT
+tentacle/MSD
+tentativeness/S
+tentative/SPY
+tented/UF
+tenterhook/MS
+tenter/M
+tent/FSIM
+tenths
+tenth/SY
+tenting/F
+tenuity/S
+tenuousness/SM
+tenuous/YP
+tenure/SDM
+Teodoor/M
+Teodora/M
+Teodorico/M
+Teodor/M
+Teodoro/M
+tepee/MS
+tepidity/S
+tepidness/S
+tepid/YP
+tequila/SM
+Tera/M
+teratogenic
+teratology/MS
+terbium/SM
+tercel/M
+tercentenary/S
+tercentennial/S
+Terence/M
+Terencio/M
+Teresa/M
+Terese/M
+Tereshkova/M
+Teresina/M
+Teresita/M
+Teressa/M
+Teriann/M
+Teri/M
+Terkel/M
+termagant/SM
+termcap
+termer/M
+terminable/CPI
+terminableness/IMC
+terminal/SYM
+terminate/CXNV
+terminated/U
+terminates
+terminating
+termination/MC
+terminative/YC
+terminator/SM
+termini
+terminological/Y
+terminology/MS
+terminus/M
+termite/SM
+term/MYRDGS
+ternary/S
+tern/GIDS
+tern's
+terpsichorean
+Terpsichore/M
+terrace/MGSD
+terracing/M
+terracotta
+terrain/MS
+Terra/M
+terramycin
+Terrance/M
+Terran/M
+terrapin/MS
+terrarium/MS
+terrazzo/SM
+Terrell/M
+Terrel/M
+Terre/M
+Terrence/M
+terrestrial/YMS
+terribleness/SM
+terrible/P
+terribly
+Terrie/M
+terrier/M
+terrifically
+terrific/Y
+terrify/GDS
+terrifying/Y
+Terrijo/M
+Terrill/M
+Terri/M
+terrine/M
+territoriality/M
+Territorial/SM
+territorial/SY
+Territory's
+territory/SM
+terrorism/MS
+terroristic
+terrorist/MS
+terrorized/U
+terrorizer/M
+terrorize/RSDZG
+terror/MS
+terr/S
+terrycloth
+Terrye/M
+Terry/M
+terry/ZMRS
+terseness/SM
+terse/RTYP
+Tersina/M
+tertian
+Tertiary
+tertiary/S
+Terza/M
+TESL
+Tesla/M
+TESOL
+Tessa/M
+tessellate/XDSNG
+tessellation/M
+tesseral
+Tessie/M
+Tessi/M
+Tess/M
+Tessy/M
+testability/M
+testable/U
+testamentary
+testament/SM
+testate/IS
+testator/MS
+testatrices
+testatrix
+testbed/S
+testcard
+tested/AKU
+tester/MFCKS
+testes/M
+testicle/SM
+testicular
+testifier/M
+testify/GZDRS
+testily
+testimonial/SM
+testimony/SM
+testiness/S
+testing/S
+testis/M
+testosterone/SM
+test/RDBFZGSC
+tests/AK
+test's/AKF
+testy/RTP
+tetanus/MS
+tetchy/TR
+tether/DMSG
+tethered/U
+Tethys/M
+Tetons
+tetrachloride/M
+tetracycline/SM
+tetrafluoride
+tetragonal/Y
+tetrahalides
+tetrahedral/Y
+tetrahedron/SM
+tetrameron
+tetrameter/SM
+tetra/MS
+tetrasodium
+tetravalent
+Teutonic
+Teuton/SM
+Texaco/M
+Texan/S
+Texas/MS
+Tex/M
+TeX/M
+textbook/SM
+text/FSM
+textile/SM
+Textron/M
+textual/FY
+textural/Y
+textured/U
+texture/MGSD
+T/G
+Thacher/M
+Thackeray/M
+Thaddeus/M
+Thaddus/M
+Thadeus/M
+Thad/M
+Thailand/M
+Thaine/M
+Thain/M
+Thai/S
+thalami
+thalamus/M
+Thales/M
+Thalia/M
+thalidomide/MS
+thallium/SM
+thallophyte/M
+Thames
+than
+Thane/M
+thane/SM
+Thanh/M
+thanker/M
+thankfuller
+thankfullest
+thankfulness/SM
+thankful/YP
+thanklessness/SM
+thankless/PY
+thanksgiving/MS
+Thanksgiving/S
+thank/SRDG
+Thant/M
+Thar/M
+Thatcher/M
+thatching/M
+thatch/JMDRSZG
+Thatch/MR
+that'd
+that'll
+that/MS
+thaumaturge/M
+thaw/DGS
+Thaxter/M
+Thayer/M
+Thayne/M
+THC
+the
+Theadora/M
+Thea/M
+theatergoer/MS
+theatergoing/MS
+theater/SM
+theatricality/SM
+theatrical/YS
+theatric/S
+theatrics/M
+Thebault/M
+Thebes
+Theda/M
+Thedrick/M
+Thedric/M
+thee/DS
+theeing
+theft/MS
+Theiler/M
+their/MS
+theism/SM
+theistic
+theist/SM
+Thekla/M
+Thelma/M
+themas
+thematically
+thematics
+thematic/U
+theme/MS
+them/GD
+Themistocles/M
+themselves
+thence
+thenceforth
+thenceforward/S
+Theobald/M
+theocracy/SM
+theocratic
+Theocritus/M
+theodolite/MS
+Theodora/M
+Theodore/M
+Theodoric/M
+Theodor/M
+Theodosia/M
+Theodosian
+Theodosius/M
+theologian/SM
+theological/Y
+theologists
+theology/MS
+Theo/M
+theorem/MS
+theoretical/Y
+theoretician/MS
+theoretic/S
+theoretics/M
+theorist/SM
+theorization/SM
+theorize/ZGDRS
+theory/MS
+theosophic
+theosophical
+theosophist/MS
+Theosophy
+theosophy/SM
+therapeutically
+therapeutic/S
+therapeutics/M
+therapist/MS
+therapy/MS
+Theravada/M
+thereabout/S
+thereafter
+thereat
+thereby
+there'd
+therefor
+therefore
+therefrom
+therein
+there'll
+there/MS
+thereof
+thereon
+Theresa/M
+Therese/M
+Theresina/M
+Theresita/M
+Theressa/M
+thereto
+theretofore
+thereunder
+thereunto
+thereupon
+therewith
+Therine/M
+thermal/YS
+thermionic/S
+thermionics/M
+thermistor/MS
+therm/MS
+thermocouple/MS
+thermodynamical/Y
+thermodynamic/S
+thermodynamics/M
+thermoelastic
+thermoelectric
+thermoformed
+thermoforming
+thermogravimetric
+thermoluminescence/M
+thermometer/MS
+thermometric
+thermometry/M
+thermonuclear
+thermopile/M
+thermoplastic/S
+thermopower
+thermo/S
+thermosetting
+thermos/S
+Thermos/SM
+thermostable
+thermostatically
+thermostatic/S
+thermostatics/M
+thermostat/SM
+thermostatted
+thermostatting
+Theron/M
+thesauri
+thesaurus/MS
+these/S
+Theseus/M
+thesis/M
+thespian/S
+Thespian/S
+Thespis/M
+Thessalonian
+Thessaloníki/M
+Thessaly/M
+theta/MS
+thew/SM
+they
+they'd
+they'll
+they're
+they've
+th/GNJX
+Thia/M
+thiamine/MS
+Thibaud/M
+Thibaut/M
+thickener/M
+thickening/M
+thicken/RDJZG
+thicket/SMD
+thickheaded/M
+thickish
+thickness/MS
+thickset/S
+thick/TXPSRNY
+thief/M
+Thiensville/M
+Thieu/M
+thievery/MS
+thieve/SDJG
+thievishness/M
+thievish/P
+thighbone/SM
+thigh/DM
+thighs
+thimble/DSMG
+thimbleful/MS
+Thimbu/M
+Thimphu
+thine
+thingamabob/MS
+thingamajig/SM
+thing/MP
+thinkableness/M
+thinkable/U
+thinkably/U
+think/AGRS
+thinker/MS
+thinkingly/U
+thinking/SMYP
+thinned
+thinner/MS
+thinness/MS
+thinnest
+thinning
+thinnish
+thin/STPYR
+thiocyanate/M
+thiouracil/M
+third/DYGS
+thirster/M
+thirst/GSMDR
+thirstily
+thirstiness/S
+thirsty/TPR
+thirteen/MHS
+thirteenths
+thirtieths
+thirty/HMS
+this
+this'll
+thistledown/MS
+thistle/SM
+thither
+Th/M
+tho
+thole/GMSD
+Thomasa/M
+Thomasina/M
+Thomasine/M
+Thomasin/M
+Thoma/SM
+Thomism/M
+Thomistic
+Thom/M
+Thompson/M
+Thomson/M
+thong/SMD
+thoracic
+thorax/MS
+Thorazine
+Thoreau/M
+thoriate/D
+Thorin/M
+thorium/MS
+Thor/M
+Thornburg/M
+Thorndike/M
+Thornie/M
+thorniness/S
+Thorn/M
+thorn/SMDG
+Thornton/M
+Thorny/M
+thorny/PTR
+thoroughbred/S
+thoroughfare/MS
+thoroughgoing
+thoroughness/SM
+thorough/PTYR
+Thorpe/M
+Thorstein/M
+Thorsten/M
+Thorvald/M
+those
+Thoth/M
+thou/DSG
+though
+thoughtfully
+thoughtfulness/S
+thoughtful/U
+thoughtlessness/MS
+thoughtless/YP
+thought/MS
+thousandfold
+thousand/SHM
+thousandths
+Thrace/M
+Thracian/M
+thralldom/S
+thrall/GSMD
+thrash/DSRZGJ
+thrasher/M
+thrashing/M
+threadbare/P
+threader/M
+threading/A
+threadlike
+thread/MZDRGS
+thready/RT
+threatener/M
+threaten/GJRD
+threatening/Y
+threat/MDNSXG
+threefold
+three/MS
+threepence/M
+threepenny
+threescore/S
+threesome/SM
+threnody/SM
+thresh/DSRZG
+thresher/M
+threshold/MDGS
+threw
+thrice
+thriftily
+thriftiness/S
+thriftless
+thrift/SM
+thrifty/PTR
+thriller/M
+thrilling/Y
+thrill/ZMGDRS
+thriver/M
+thrive/RSDJG
+thriving/Y
+throatily
+throatiness/MS
+throat/MDSG
+throaty/PRT
+throbbed
+throbbing
+throb/S
+throeing
+throe/SDM
+thrombi
+thromboses
+thrombosis/M
+thrombotic
+thrombus/M
+Throneberry/M
+throne/CGSD
+throne's
+throng/GDSM
+throttle/DRSZMG
+throttler/M
+throughout
+throughput/SM
+throughway's
+through/Y
+throwaway/SM
+throwback/MS
+thrower/M
+thrown
+throwout
+throw/SZGR
+thrummed
+thrumming
+thrum/S
+thrush/MS
+thruster/M
+thrust/ZGSR
+Thruway/MS
+thruway/SM
+Thunderbird/M
+Thu
+Thucydides/M
+thudded
+thudding
+thud/MS
+thuggee/M
+thuggery/SM
+thuggish
+thug/MS
+Thule/M
+thulium/SM
+thumbnail/MS
+thumbscrew/SM
+thumb/SMDG
+thumbtack/GMDS
+thump/RDMSG
+thunderbolt/MS
+thunderclap/SM
+thundercloud/SM
+thunderer/M
+thunderhead/SM
+thundering/Y
+thunderous/Y
+thundershower/MS
+thunderstorm/MS
+thunderstruck
+thundery
+thunder/ZGJDRMS
+thunk
+Thurber/M
+Thurman/M
+Thur/MS
+Thursday/SM
+Thurstan/M
+Thurston/M
+thus/Y
+thwack/DRSZG
+thwacker/M
+thwarter/M
+thwart/GSDRY
+thy
+thyme/SM
+thymine/MS
+thymus/SM
+thyratron/M
+thyristor/MS
+thyroglobulin
+thyroidal
+thyroid/S
+thyronine
+thyrotoxic
+thyrotrophic
+thyrotrophin
+thyrotropic
+thyrotropin/M
+thyroxine/M
+thyself
+Tia/M
+Tianjin
+tiara/MS
+Tiberius/M
+Tiber/M
+Tibetan/S
+Tibet/M
+tibiae
+tibial
+tibia/M
+Tibold/M
+Tiburon/M
+ticker/M
+ticket/SGMD
+tick/GZJRDMS
+ticking/M
+tickler/M
+tickle/RSDZG
+ticklishness/MS
+ticklish/PY
+ticktacktoe/S
+ticktock/SMDG
+tic/MS
+Ticonderoga/M
+tidal/Y
+tidbit/MS
+tiddlywinks/M
+tide/GJDS
+tideland/MS
+tidewater/SM
+tideway/SM
+tidily/U
+tidiness/USM
+tidying/M
+tidy/UGDSRPT
+tie/AUDS
+tieback/MS
+Tiebold/M
+Tiebout/M
+tiebreaker/SM
+Tieck/M
+Tiena/M
+Tienanmen/M
+Tientsin's
+tier/DGM
+Tierney/M
+Tiertza/M
+Tiffanie/M
+Tiffani/M
+tiffany/M
+Tiffany/M
+tiff/GDMS
+Tiffie/M
+Tiffi/M
+Tiff/M
+Tiffy/M
+tigerish
+tiger/SM
+tightener/M
+tighten/JZGDR
+tightfisted
+tightness/MS
+tightrope/SM
+tight/STXPRNY
+tightwad/MS
+tigress/SM
+Tigris/M
+Tijuana/M
+tike's
+Tilda/M
+tilde/MS
+Tildie/M
+Tildi/M
+Tildy/M
+tile/DRSJMZG
+tiled/UE
+Tiler/M
+tiles/U
+tiling/M
+tillable
+tillage/SM
+till/EGSZDR
+tiller/GDM
+tiller's/E
+Tillich/M
+Tillie/M
+Tillman/M
+Tilly/M
+tilth/M
+tilt/RDSGZ
+Ti/M
+timber/DMSG
+timbering/M
+timberland/SM
+timberline/S
+timbrel/SM
+timbre/MS
+Timbuktu/M
+ti/MDRZ
+timebase
+time/DRSJMYZG
+timekeeper/MS
+timekeeping/SM
+timelessness/S
+timeless/PY
+timeliness/SMU
+timely/UTRP
+timeout/S
+timepiece/MS
+timer/M
+timescale/S
+timeserver/MS
+timeserving/S
+timeshare/SDG
+timespan
+timestamped
+timestamps
+timetable/GMSD
+timeworn
+Timex/M
+timezone/S
+timidity/SM
+timidness/MS
+timid/RYTP
+Timi/M
+timing/M
+Timmie/M
+Timmi/M
+Tim/MS
+Timmy/M
+Timofei/M
+Timon/M
+timorousness/MS
+timorous/YP
+Timoteo/M
+Timothea/M
+Timothee/M
+Timotheus/M
+Timothy/M
+timothy/MS
+timpani
+timpanist/S
+Timur/M
+Tina/M
+tincture/SDMG
+tinderbox/MS
+tinder/MS
+Tine/M
+tine/SM
+tinfoil/MS
+tingeing
+tinge/S
+ting/GYDM
+tingle/SDG
+tingling/Y
+tingly/TR
+Ting/M
+tinily
+tininess/MS
+tinker/SRDMZG
+Tinkertoy
+tinkle/SDG
+tinkling/M
+tinkly
+tin/MDGS
+tinned
+tinner/M
+tinnily
+tinniness/SM
+tinning/M
+tinnitus/MS
+tinny/RSTP
+tinplate/S
+tinsel/GMDYS
+Tinseltown/M
+tinsmith/M
+tinsmiths
+tinter/M
+tintinnabulation/MS
+Tintoretto/M
+tint/SGMRDB
+tintype/SM
+tinware/MS
+tiny/RPT
+Tioga/M
+Tiphanie/M
+Tiphani/M
+Tiphany/M
+tipi's
+tip/MS
+tipoff
+Tippecanoe/M
+tipped
+Tipperary/M
+tipper/MS
+tippet/MS
+tipping
+tippler/M
+tipple/ZGRSD
+tippy/R
+tipsily
+tipsiness/SM
+tipster/SM
+tipsy/TPR
+tiptoeing
+tiptoe/SD
+tiptop/S
+tirade/SM
+Tirana's
+Tirane
+tired/AYP
+tireder
+tiredest
+tiredness/S
+tirelessness/SM
+tireless/PY
+tire/MGDSJ
+tires/A
+Tiresias/M
+tiresomeness/S
+tiresome/PY
+tiring/AU
+Tirolean/S
+Tirol/M
+tiro's
+Tirrell/M
+tis
+Tisha/M
+Tish/M
+tissue/MGSD
+titanate/M
+Titania/M
+titanic
+titanically
+Titanic/M
+titanium/SM
+titan/SM
+Titan/SM
+titbit's
+titer/M
+tither/M
+tithe/SRDGZM
+tithing/M
+Titian/M
+titian/S
+Titicaca/M
+titillate/XSDVNG
+titillating/Y
+titillation/M
+titivate/NGDSX
+titivation/M
+titled/AU
+title/GMSRD
+titleholder/SM
+titling/A
+titmice
+titmouse/M
+tit/MRZS
+Tito/SM
+titrate/SDGN
+titration/M
+titted
+titter/GDS
+titting
+tittle/SDMG
+titular/SY
+Titus/M
+tizzy/SM
+TKO
+Tlaloc/M
+TLC
+Tlingit/M
+Tl/M
+TM
+Tm/M
+tn
+TN
+tnpk
+TNT
+toad/SM
+toadstool/SM
+toady/GSDM
+toadyism/M
+toaster/M
+toastmaster/MS
+toastmistress/S
+toast/SZGRDM
+toasty/TRS
+tobacconist/SM
+tobacco/SM
+tobaggon/SM
+Tobago/M
+Tobe/M
+Tobey/M
+Tobiah/M
+Tobias/M
+Tobie/M
+Tobi/M
+Tobin/M
+Tobit/M
+toboggan/MRDSZG
+Tobye/M
+Toby/M
+Tocantins/M
+toccata/M
+Tocqueville
+tocsin/MS
+to/D
+today'll
+today/SM
+Toddie/M
+toddler/M
+toddle/ZGSRD
+Todd/M
+Toddy/M
+toddy/SM
+Tod/M
+toecap/SM
+toeclip/S
+TOEFL
+toehold/MS
+toeing
+toe/MS
+toenail/DMGS
+toffee/SM
+tofu/S
+toga/SMD
+toge
+togetherness/MS
+together/P
+togged
+togging
+toggle/SDMG
+Togolese/M
+Togo/M
+tog/SMG
+Toiboid/M
+toilet/GMDS
+toiletry/MS
+toilette/SM
+toil/SGZMRD
+toilsomeness/M
+toilsome/PY
+Toinette/M
+Tojo/M
+tokamak
+Tokay/M
+toke/GDS
+tokenism/SM
+tokenized
+token/SMDG
+Tokugawa/M
+Tokyoite/MS
+Tokyo/M
+Toland/M
+told/AU
+Toledo/SM
+tole/MGDS
+tolerability/IM
+tolerable/I
+tolerably/I
+tolerance/SIM
+tolerant/IY
+tolerate/XVNGSD
+toleration/M
+Tolkien
+tollbooth/M
+tollbooths
+toll/DGS
+Tolley/M
+tollgate/MS
+tollhouse/M
+tollway/S
+Tolstoy/M
+toluene/MS
+Tolyatti/M
+tomahawk/SGMD
+Tomasina/M
+Tomasine/M
+Toma/SM
+Tomaso/M
+tomatoes
+tomato/M
+Tombaugh/M
+tomb/GSDM
+Tombigbee/M
+tomblike
+tombola/M
+tomboyish
+tomboy/MS
+tombstone/MS
+tomcat/SM
+tomcatted
+tomcatting
+Tome/M
+tome/SM
+tomfoolery/MS
+tomfool/M
+Tomi/M
+Tomkin/M
+Tomlin/M
+Tom/M
+tommed
+Tommie/M
+Tommi/M
+tomming
+tommy/M
+Tommy/M
+tomographic
+tomography/MS
+tomorrow/MS
+Tompkins/M
+Tomsk/M
+tom/SM
+tomtit/SM
+tonality/MS
+tonal/Y
+tonearm/S
+tone/ISRDZG
+tonelessness/M
+toneless/YP
+toner/IM
+tone's
+Tonga/M
+Tongan/SM
+tong/GRDS
+tongueless
+tongue/SDMG
+tonguing/M
+Tonia/M
+tonic/SM
+Tonie/M
+tonight/MS
+Toni/M
+Tonio/M
+tonk/MS
+tonnage/SM
+tonne/MS
+Tonnie/M
+tonsillectomy/MS
+tonsillitis/SM
+tonsil/SM
+ton/SKM
+tonsorial
+tonsure/SDGM
+Tonto/M
+Tonya/M
+Tonye/M
+Tony/M
+tony/RT
+toodle
+too/H
+took/A
+tool/AGDS
+toolbox/SM
+tooler/SM
+tooling/M
+toolkit/SM
+toolmaker/M
+toolmake/ZRG
+toolmaking/M
+tool's
+toolsmith
+Toomey/M
+tooter/M
+toot/GRDZS
+toothache/SM
+toothbrush/MSG
+tooth/DMG
+toothily
+toothless
+toothmarks
+toothpaste/SM
+toothpick/MS
+tooths
+toothsome
+toothy/TR
+tootle/SRDG
+tootsie
+Tootsie/M
+toots/M
+tootsy/MS
+topaz/MS
+topcoat/MS
+topdressing/S
+Topeka/M
+toper/M
+topflight
+topgallant/M
+topiary/S
+topicality/MS
+topical/Y
+topic/MS
+topknot/MS
+topless
+topmast/MS
+topmost
+topnotch/R
+topocentric
+topographer/SM
+topographic
+topographical/Y
+topography/MS
+topological/Y
+topologist/MS
+topology/MS
+topped
+topper/MS
+topping/MS
+topple/GSD
+topsail/MS
+topside/SRM
+top/SMDRG
+topsoil/GDMS
+topspin/MS
+Topsy/M
+toque/MS
+Torah/M
+Torahs
+torchbearer/SM
+torchlight/S
+torch/SDMG
+toreador/SM
+Tore/M
+tore/S
+Torey/M
+Torie/M
+tori/M
+Tori/M
+Torin/M
+torment/GSD
+tormenting/Y
+tormentor/MS
+torn
+tornadoes
+tornado/M
+toroidal/Y
+toroid/MS
+Toronto/M
+torpedoes
+torpedo/GMD
+torpidity/S
+torpid/SY
+torpor/MS
+Torquemada/M
+torque/MZGSRD
+Torrance/M
+Torre/MS
+torrence
+Torrence/M
+Torrens/M
+torrential
+torrent/MS
+Torrey/M
+Torricelli/M
+torridity/SM
+torridness/SM
+torrid/RYTP
+Torrie/M
+Torrin/M
+Torr/XM
+Torry/M
+torsional/Y
+torsion/IAM
+torsions
+torsi's
+tor/SLM
+torso/SM
+tors/S
+tort/ASFE
+tortellini/MS
+torte/MS
+torten
+tortilla/MS
+tortoiseshell/SM
+tortoise/SM
+Tortola/M
+tortoni/MS
+tort's
+Tortuga/M
+tortuousness/MS
+tortuous/PY
+torture/ZGSRD
+torturous
+torus/MS
+Tory/SM
+Tosca/M
+Toscanini/M
+Toshiba/M
+toss/SRDGZ
+tossup/MS
+totaler/M
+totalistic
+totalitarianism/SM
+totalitarian/S
+totality/MS
+totalizator/S
+totalizing
+total/ZGSRDYM
+totemic
+totem/MS
+toter/M
+tote/S
+toting/M
+tot/MDRSG
+Toto/M
+totted
+totterer/M
+tottering/Y
+totter/ZGRDS
+totting
+toucan/MS
+touchable/U
+touch/ASDG
+touchdown/SM
+touché
+touched/U
+toucher/M
+touchily
+touchiness/SM
+touching/SY
+touchline/M
+touchscreen
+touchstone/SM
+touchy/TPR
+toughen/DRZG
+toughener/M
+toughness/SM
+toughs
+tough/TXGRDNYP
+Toulouse/M
+toupee/SM
+toured/CF
+tourer/M
+tour/GZSRDM
+touring/F
+tourism/SM
+touristic
+tourist/SM
+touristy
+tourmaline/SM
+tournament/MS
+tourney/GDMS
+tourniquet/MS
+tour's/CF
+tours/CF
+tousle/GSD
+touter/M
+tout/SGRD
+Tova/M
+Tove/M
+towardliness/M
+towardly/P
+towards
+toward/YU
+towboat/MS
+tow/DRSZG
+towelette/S
+towel/GJDMS
+toweling/M
+tower/GMD
+towering/Y
+towhead/MSD
+towhee/SM
+towline/MS
+towner/M
+Townes
+Towney/M
+townhouse/S
+Townie/M
+townie/S
+Townley/M
+Town/M
+Townsend/M
+townsfolk
+township/MS
+townsman/M
+townsmen
+townspeople/M
+town/SRM
+townswoman/M
+townswomen
+Towny/M
+towpath/M
+towpaths
+towrope/MS
+Towsley/M
+toxemia/MS
+toxicity/MS
+toxicological
+toxicologist/SM
+toxicology/MS
+toxic/S
+toxin/MS
+toyer/M
+toymaker
+toy/MDRSG
+Toynbee/M
+Toyoda/M
+Toyota/M
+toyshop
+tr
+traceability/M
+traceableness/M
+traceable/P
+trace/ASDG
+traceback/MS
+traced/U
+Tracee/M
+traceless/Y
+Trace/M
+tracepoint/SM
+tracer/MS
+tracery/MDS
+trace's
+Tracey/M
+tracheae
+tracheal/M
+trachea/M
+tracheotomy/SM
+Tracie/M
+Traci/M
+tracing/SM
+trackage
+trackball/S
+trackbed
+tracked/U
+tracker/M
+trackless
+tracksuit/SM
+track/SZGMRD
+tractability/SI
+tractable/I
+tractably/I
+tract/ABS
+Tractarians
+traction/KSCEMAF
+tractive/KFE
+tractor/FKMASC
+tract's
+tracts/CEFK
+Tracy/M
+trademark/GSMD
+trader/M
+tradesman/M
+tradesmen
+tradespeople
+tradespersons
+trade/SRDGZM
+tradeswoman/M
+tradeswomen
+traditionalism/MS
+traditionalistic
+traditionalist/MS
+traditionalized
+traditionally
+traditional/U
+tradition/SM
+traduce/DRSGZ
+Trafalgar/M
+trafficked
+trafficker/MS
+trafficking/S
+traffic/SM
+tragedian/SM
+tragedienne/MS
+tragedy/MS
+tragically
+tragicomedy/SM
+tragicomic
+tragic/S
+trailblazer/MS
+trailblazing/S
+trailer/GDM
+trails/F
+trailside
+trail/SZGJRD
+trainable
+train/ASDG
+trained/U
+trainee/MS
+traineeships
+trainer/MS
+training/SM
+trainman/M
+trainmen
+trainspotter/S
+traipse/DSG
+trait/MS
+traitorous/Y
+traitor/SM
+Trajan/M
+trajectory/MS
+trammed
+trammeled/U
+trammel/GSD
+tramming
+tram/MS
+trample/DGRSZ
+trampler/M
+trampoline/GMSD
+tramp/RDSZG
+tramway/M
+trance/MGSD
+tranche/SM
+Tran/M
+tranquility/S
+tranquilized/U
+tranquilize/JGZDSR
+tranquilizer/M
+tranquilizes/A
+tranquilizing/YM
+tranquillize/GRSDZ
+tranquillizer/M
+tranquilness/M
+tranquil/PTRY
+transact/GSD
+transactional
+transaction/MS
+transactor/SM
+transalpine
+transaminase
+transatlantic
+Transcaucasia/M
+transceiver/SM
+transcendence/MS
+transcendentalism/SM
+transcendentalist/SM
+transcendental/YS
+transcendent/Y
+transcend/SDG
+transconductance
+transcontinental
+transcribe/DSRGZ
+transcriber/M
+transcription/SM
+transcript/SM
+transcultural
+transducer/SM
+transduction/M
+transect/DSG
+transept/SM
+transferability/M
+transferal/MS
+transfer/BSMD
+transferee/M
+transference/SM
+transferor/MS
+transferral/SM
+transferred
+transferrer/SM
+transferring
+transfiguration/SM
+transfigure/SDG
+transfinite/Y
+transfix/SDG
+transformational
+transformation/MS
+transform/DRZBSG
+transformed/U
+transformer/M
+transfuse/XSDGNB
+transfusion/M
+transgression/SM
+transgressor/S
+transgress/VGSD
+trans/I
+transience/SM
+transiency/S
+transient/YS
+transistorize/GDS
+transistor/SM
+Transite/M
+transitional/Y
+transition/MDGS
+transitivenesses
+transitiveness/IM
+transitive/PIY
+transitivity/MS
+transitoriness/M
+transitory/P
+transit/SGVMD
+transl
+translatability/M
+translatable/U
+translated/AU
+translate/VGNXSDB
+translational
+translation/M
+translator/SM
+transliterate/XNGSD
+translucence/SM
+translucency/MS
+translucent/Y
+transmigrate/XNGSD
+transmissible
+transmission/MSA
+transmissive
+transmit/AS
+transmittable
+transmittal/SM
+transmittance/MS
+transmitted/A
+transmitter/SM
+transmitting/A
+transmogrification/M
+transmogrify/GXDSN
+transmutation/SM
+transmute/GBSD
+transnational/S
+transoceanic
+transom/SM
+transonic
+transpacific
+transparency/MS
+transparentness/M
+transparent/YP
+transpiration/SM
+transpire/GSD
+transplantation/S
+transplant/GRDBS
+transpolar
+transponder/MS
+transportability
+transportable/U
+transportation/SM
+transport/BGZSDR
+transpose/BGSD
+transposed/U
+transposition/SM
+Transputer/M
+transsexualism/MS
+transsexual/SM
+transship/LS
+transshipment/SM
+transshipped
+transshipping
+transubstantiation/MS
+Transvaal/M
+transversal/YM
+transverse/GYDS
+transvestism/SM
+transvestite/SM
+transvestitism
+Transylvania/M
+trapdoor/S
+trapeze/DSGM
+trapezium/MS
+trapezoidal
+trapezoid/MS
+trap/MS
+trappable/U
+trapped
+trapper/SM
+trapping/S
+Trappist/MS
+trapshooting/SM
+trashcan/SM
+trashiness/SM
+trash/SRDMG
+trashy/TRP
+Trastevere/M
+trauma/MS
+traumatic
+traumatically
+traumatize/SDG
+travail/SMDG
+traveled/U
+traveler/M
+travelog's
+travelogue/S
+travel/SDRGZJ
+Traver/MS
+traversal/SM
+traverse/GBDRS
+traverser/M
+travertine/M
+travesty/SDGM
+Travis/M
+Travus/M
+trawler/M
+trawl/RDMSZG
+tray/SM
+treacherousness/SM
+treacherous/PY
+treachery/SM
+treacle/DSGM
+treacly
+treader/M
+treadle/GDSM
+treadmill/MS
+tread/SAGD
+Treadwell/M
+treas
+treason/BMS
+treasonous
+treasure/DRSZMG
+treasurer/M
+treasurership
+treasury/SM
+Treasury/SM
+treatable
+treated/U
+treater/S
+treatise/MS
+treatment/MS
+treat's
+treat/SAGDR
+treaty/MS
+treble/SDG
+Treblinka/M
+treeing
+treeless
+treelike
+tree/MDS
+treetop/SM
+trefoil/SM
+Trefor/M
+trekked
+trekker/MS
+Trekkie/M
+trekking
+trek/MS
+trellis/GDSM
+Tremaine/M
+Tremain/M
+trematode/SM
+Tremayne/M
+tremble/JDRSG
+trembler/M
+trembles/M
+trembly
+tremendousness/M
+tremendous/YP
+tremolo/MS
+tremor/MS
+tremulousness/SM
+tremulous/YP
+trenchancy/MS
+trenchant/Y
+trencherman/M
+trenchermen
+trencher/SM
+trench/GASD
+trench's
+trendily
+trendiness/S
+trend/SDMG
+trendy/PTRS
+Trenna/M
+Trent/M
+Trenton/M
+trepanned
+trepidation/MS
+Tresa/M
+Trescha/M
+trespasser/M
+trespass/ZRSDG
+Tressa/M
+tressed/E
+tresses/E
+tressing/E
+tress/MSDG
+trestle/MS
+Trevar/M
+Trevelyan/M
+Trever/M
+Trevino/M
+Trevor/M
+Trev/RM
+Trey/M
+trey/MS
+triableness/M
+triable/P
+triadic
+triad/MS
+triage/SDMG
+trial/ASM
+trialization
+trialled
+trialling
+triamcinolone
+triangle/SM
+triangulable
+triangularization/S
+triangular/Y
+triangulate/YGNXSD
+triangulation/M
+Triangulum/M
+Trianon/M
+Triassic
+triathlon/S
+triatomic
+tribalism/MS
+tribal/Y
+tribe/MS
+tribesman/M
+tribesmen
+tribeswoman
+tribeswomen
+tribulate/NX
+tribulation/M
+tribunal/MS
+tribune/SM
+tributary/MS
+tribute/EGSF
+tribute's
+trice/GSDM
+tricentennial/S
+triceps/SM
+triceratops/M
+trichinae
+trichina/M
+trichinoses
+trichinosis/M
+trichloroacetic
+trichloroethane
+trichotomy/M
+trichromatic
+Tricia/M
+trickery/MS
+trick/GMSRD
+trickily
+trickiness/SM
+trickle/DSG
+trickster/MS
+tricky/RPT
+tricolor/SMD
+tricycle/SDMG
+trident/SM
+tridiagonal
+tried/UA
+triennial/SY
+trier/AS
+trier's
+tries/A
+Trieste/M
+triffid/S
+trifle/MZGJSRD
+trifler/M
+trifluoride/M
+trifocals
+trigged
+trigger/GSDM
+triggest
+trigging
+triglyceride/MS
+trigonal/Y
+trigonometric
+trigonometrical
+trigonometry/MS
+trigram/S
+trig/S
+trihedral
+trike/GMSD
+trilateral/S
+trilby/SM
+trilingual
+trillion/SMH
+trillionth/M
+trillionths
+trillium/SM
+trill/RDMGS
+trilobite/MS
+trilogy/MS
+trimaran/MS
+Trimble/M
+trimer/M
+trimester/MS
+trimmed/U
+trimmer/MS
+trimmest
+trimming/MS
+trimness/S
+trimodal
+trimonthly
+trim/PSYR
+Trimurti/M
+Trina/M
+Trinidad/M
+trinitarian/S
+trinitrotoluene/SM
+trinity/MS
+Trinity/MS
+trinketer/M
+trinket/MRDSG
+triode/MS
+trio/SM
+trioxide/M
+tripartite/N
+tripartition/M
+tripe/MS
+triphenylarsine
+triphenylphosphine
+triphenylstibine
+triphosphopyridine
+triple/GSD
+triplet/SM
+triplex/S
+triplicate/SDG
+triplication/M
+triply/GDSN
+Trip/M
+tripodal
+tripod/MS
+tripoli/M
+Tripoli/M
+tripolyphosphate
+tripos/SM
+tripped
+Trippe/M
+tripper/MS
+tripping/Y
+Tripp/M
+trip/SMY
+triptych/M
+triptychs
+tripwire/MS
+trireme/SM
+Tris
+trisect/GSD
+trisection/S
+trisector
+Trisha/M
+Trish/M
+trisodium
+Trista/M
+Tristam/M
+Tristan/M
+tristate
+trisyllable/M
+tritely/F
+triteness/SF
+trite/SRPTY
+tritium/MS
+triton/M
+Triton/M
+triumphal
+triumphalism
+triumphant/Y
+triumph/GMD
+triumphs
+triumvirate/MS
+triumvir/MS
+triune
+trivalent
+trivet/SM
+trivia
+triviality/MS
+trivialization/MS
+trivialize/DSG
+trivial/Y
+trivium/M
+Trixie/M
+Trixi/M
+Trix/M
+Trixy/M
+Trobriand/M
+trochaic/S
+trochee/SM
+trod/AU
+trodden/UA
+trodes
+troff/MR
+troglodyte/MS
+troika/SM
+Trojan/MS
+troll/DMSG
+trolled/F
+trolleybus/S
+trolley/SGMD
+trolling/F
+trollish
+Trollope/M
+trollop/GSMD
+trolly's
+trombone/MS
+trombonist/SM
+tromp/DSG
+Trondheim/M
+trooper/M
+troopship/SM
+troop/SRDMZG
+trope/SM
+Tropez/M
+trophic
+trophy/MGDS
+tropical/SY
+tropic/MS
+tropism/SM
+tropocollagen
+troposphere/MS
+tropospheric
+troth/GDM
+troths
+trot/S
+Trotsky/M
+trotted
+trotter/SM
+trotting
+troubadour/SM
+troubled/U
+trouble/GDRSM
+troublemaker/MS
+troubler/M
+troubleshooter/M
+troubleshoot/SRDZG
+troubleshot
+troublesomeness/M
+troublesome/YP
+trough/M
+troughs
+trounce/GZDRS
+trouncer/M
+troupe/MZGSRD
+trouper/M
+trouser/DMGS
+trousseau/M
+trousseaux
+Troutman/M
+trout/SM
+trove/SM
+troweler/M
+trowel/SMDRGZ
+trow/SGD
+Troyes
+Troy/M
+troy/S
+Trstram/M
+truancy/MS
+truant/SMDG
+truce/SDGM
+Truckee/M
+trucker/M
+trucking/M
+truckle/GDS
+truckload/MS
+truck/SZGMRDJ
+truculence/SM
+truculent/Y
+Truda/M
+Trudeau/M
+Trude/M
+Trudey/M
+trudge/SRDG
+Trudie/M
+Trudi/M
+Trudy/M
+true/DRSPTG
+truelove/MS
+Trueman/M
+trueness/M
+truer/U
+truest/U
+truffle/MS
+truism/SM
+Trujillo/M
+Trula/M
+truly/U
+Trumaine/M
+Truman/M
+Trumann/M
+Trumbull/M
+trump/DMSG
+trumpery/SM
+trumpeter/M
+trumpet/MDRZGS
+Trump/M
+truncate/NGDSX
+truncation/M
+truncheon/MDSG
+trundle/GZDSR
+trundler/M
+trunk/GSMD
+trunnion/SM
+trusser/M
+trussing/M
+truss/SRDG
+trusted/EU
+trusteeing
+trustee/MDS
+trusteeship/SM
+truster/M
+trustful/EY
+trustfulness/SM
+trustiness/M
+trusting/Y
+trust/RDMSG
+trusts/E
+trustworthier
+trustworthiest
+trustworthiness/MS
+trustworthy/UP
+trusty/PTMSR
+Truth
+truthfulness/US
+truthful/UYP
+truths/U
+truth/UM
+TRW
+trying/Y
+try/JGDRSZ
+tryout/MS
+trypsin/M
+tryst/GDMS
+ts
+T's
+tsarevich
+tsarina's
+tsarism/M
+tsarist
+tsetse/S
+Tsimshian/M
+Tsiolkovsky/M
+Tsitsihar/M
+tsp
+tsunami/MS
+Tsunematsu/M
+Tswana/M
+TTL
+tty/M
+ttys
+Tuamotu/M
+Tuareg/M
+tubae
+tubal
+tuba/SM
+tubbed
+tubbing
+tubby/TR
+tubeless
+tubercle/MS
+tubercular/S
+tuberculin/MS
+tuberculoses
+tuberculosis/M
+tuberculous
+tuber/M
+tuberose/SM
+tuberous
+tube/SM
+tubing/M
+tub/JMDRSZG
+Tubman/M
+tubular/Y
+tubule/SM
+tucker/GDM
+Tucker/M
+tuck/GZSRD
+Tuckie/M
+Tuck/RM
+Tucky/M
+Tucson/M
+Tucuman/M
+Tudor/MS
+Tue/S
+Tuesday/SM
+tufter/M
+tuft/GZSMRD
+tufting/M
+tugboat/MS
+tugged
+tugging
+tug/S
+tuition/ISM
+Tulane/M
+tularemia/S
+tulip/SM
+tulle/SM
+Tulley/M
+Tull/M
+Tully/M
+Tulsa/M
+tum
+tumbledown
+tumbler/M
+tumbleweed/MS
+tumble/ZGRSDJ
+tumbrel/SM
+tumescence/S
+tumescent
+tumidity/MS
+tumid/Y
+tummy/SM
+tumor/MDS
+tumorous
+Tums/M
+tumult/SGMD
+tumultuousness/M
+tumultuous/PY
+tumulus/M
+tunableness/M
+tunable/P
+tuna/SM
+tundra/SM
+tun/DRJZGBS
+tune/CSDG
+tunefulness/MS
+tuneful/YP
+tuneless/Y
+tuner/M
+tune's
+tuneup/S
+tung
+tungstate/M
+tungsten/SM
+Tunguska/M
+Tungus/M
+tunic/MS
+tuning/A
+tuning's
+Tunisia/M
+Tunisian/S
+Tunis/M
+tunned
+tunneler/M
+tunnel/MRDSJGZ
+tunning
+tunny/SM
+tupelo/M
+Tupi/M
+tuple/SM
+tuppence/M
+Tupperware
+Tupungato/M
+turban/SDM
+turbid
+turbidity/SM
+turbinate/SD
+turbine/SM
+turbocharged
+turbocharger/SM
+turbofan/MS
+turbojet/MS
+turboprop/MS
+turbo/SM
+turbot/MS
+turbulence/SM
+turbulent/Y
+turd/MS
+tureen/MS
+turf/DGSM
+turfy/RT
+Turgenev/M
+turgidity/SM
+turgidness/M
+turgid/PY
+Turing/M
+Turin/M
+Turkestan/M
+Turkey/M
+turkey/SM
+Turkic/SM
+Turkish
+Turkmenistan/M
+turk/S
+Turk/SM
+turmeric/MS
+turmoil/SDMG
+turnabout/SM
+turnaround/MS
+turn/AZGRDBS
+turnbuckle/SM
+turncoat/SM
+turned/U
+turner/M
+Turner/M
+turning/MS
+turnip/SMDG
+turnkey/MS
+turnoff/MS
+turnout/MS
+turnover/SM
+turnpike/MS
+turnround/MS
+turnstile/SM
+turnstone/M
+turntable/SM
+turpentine/GMSD
+Turpin/M
+turpitude/SM
+turquoise/SM
+turret/SMD
+turtleback/MS
+turtledove/MS
+turtleneck/SDM
+turtle/SDMG
+turves's
+turvy
+Tuscaloosa/M
+Tuscan
+Tuscany/M
+Tuscarora/M
+Tuscon/M
+tush/SDG
+Tuskegee/M
+tusker/M
+tusk/GZRDMS
+tussle/GSD
+tussock/MS
+tussocky
+Tussuad/M
+Tutankhamen/M
+tutelage/MS
+tutelary/S
+Tut/M
+tutored/U
+tutorial/MS
+tutor/MDGS
+tutorship/S
+tut/S
+Tutsi
+tutted
+tutting
+tutti/S
+Tuttle/M
+tutu/SM
+Tuvalu
+tuxedo/SDM
+tux/S
+TVA
+TV/M
+TVs
+twaddle/GZMRSD
+twaddler/M
+Twain/M
+twain/S
+TWA/M
+twang/MDSG
+twangy/TR
+twas
+tweak/SGRD
+tweediness/M
+Tweedledee/M
+Tweedledum/M
+Tweed/M
+twee/DP
+tweed/SM
+tweedy/PTR
+tween
+tweeter/M
+tweet/ZSGRD
+tweezer/M
+tweeze/ZGRD
+twelfth
+twelfths
+twelvemonth/M
+twelvemonths
+twelve/MS
+twentieths
+twenty/MSH
+twerp/MS
+twice/R
+twiddle/GRSD
+twiddler/M
+twiddly/RT
+twigged
+twigging
+twiggy/RT
+twig/SM
+Twila/M
+twilight/MS
+twilit
+twill/SGD
+twiner/M
+twine/SM
+twinge/SDMG
+Twinkie
+twinkler/M
+twinkle/RSDG
+twinkling/M
+twinkly
+twinned
+twinning
+twin/RDMGZS
+twirler/M
+twirling/Y
+twirl/SZGRD
+twirly/TR
+twisted/U
+twister/M
+twists/U
+twist/SZGRD
+twisty
+twitch/GRSD
+twitchy/TR
+twit/S
+twitted
+twitterer/M
+twitter/SGRD
+twittery
+twitting
+twixt
+twofer/MS
+twofold/S
+two/MS
+twopence/SM
+twopenny/S
+twosome/MS
+twp
+Twp
+TWX
+Twyla/M
+TX
+t/XTJBG
+Tybalt/M
+Tybie/M
+Tybi/M
+tycoon/MS
+tyeing
+Tye/M
+tying/UA
+tyke/SM
+Tylenol/M
+Tyler/M
+Ty/M
+Tymon/M
+Tymothy/M
+tympani
+tympanist/SM
+tympanum/SM
+Tynan/M
+Tyndale/M
+Tyndall/M
+Tyne/M
+typeahead
+typecast/SG
+typed/AU
+typedef/S
+typeface/MS
+typeless
+type/MGDRSJ
+types/A
+typescript/SM
+typeset/S
+typesetter/MS
+typesetting/SM
+typewriter/M
+typewrite/SRJZG
+typewriting/M
+typewritten
+typewrote
+typhoid/SM
+Typhon/M
+typhoon/SM
+typhus/SM
+typicality/MS
+typically
+typicalness/M
+typical/U
+typification/M
+typify/SDNXG
+typing/A
+typist/MS
+typographer/SM
+typographic
+typographical/Y
+typography/MS
+typological/Y
+typology/MS
+typo/MS
+tyrannic
+tyrannicalness/M
+tyrannical/PY
+tyrannicide/M
+tyrannizer/M
+tyrannize/ZGJRSD
+tyrannizing/YM
+tyrannosaur/MS
+tyrannosaurus/S
+tyrannous
+tyranny/MS
+tyrant/MS
+Tyree/M
+tyreo
+Tyrolean/S
+Tyrol's
+Tyrone/M
+tyrosine/M
+tyro/SM
+Tyrus/M
+Tyson/M
+tzarina's
+tzar's
+Tzeltal/M
+u
+U
+UAR
+UART
+UAW
+Ubangi/M
+ubiquitous/YP
+ubiquity/S
+Ucayali/M
+Uccello/M
+UCLA/M
+Udale/M
+Udall/M
+udder/SM
+Udell/M
+Ufa/M
+ufologist/S
+ufology/MS
+UFO/S
+Uganda/M
+Ugandan/S
+ugh
+ughs
+uglification
+ugliness/MS
+uglis
+ugly/PTGSRD
+Ugo/M
+uh
+UHF
+Uighur
+Ujungpandang/M
+UK
+ukase/SM
+Ukraine/M
+Ukrainian/S
+ukulele/SM
+UL
+Ula/M
+Ulberto/M
+ulcerate/NGVXDS
+ulceration/M
+ulcer/MDGS
+ulcerous
+Ulick/M
+Ulises/M
+Ulla/M
+Ullman/M
+ulnae
+ulna/M
+ulnar
+Ulrica/M
+Ulrich/M
+Ulrick/M
+Ulric/M
+Ulrika/M
+Ulrikaumeko/M
+Ulrike/M
+Ulster/M
+ulster/MS
+ult
+ulterior/Y
+ultimas
+ultimate/DSYPG
+ultimateness/M
+ultimatum/MS
+ultimo
+ultracentrifugally
+ultracentrifugation
+ultracentrifuge/M
+ultraconservative/S
+ultrafast
+ultrahigh
+ultralight/S
+ultramarine/SM
+ultramodern
+ultramontane
+ultra/S
+ultrashort
+ultrasonically
+ultrasonic/S
+ultrasonics/M
+ultrasound/SM
+ultrastructure/M
+Ultrasuede
+ultraviolet/SM
+Ultrix/M
+ULTRIX/M
+ululate/DSXGN
+ululation/M
+Ulyanovsk/M
+Ulysses/M
+um
+umbel/MS
+umber/GMDS
+Umberto/M
+umbilical/S
+umbilici
+umbilicus/M
+umbrage/MGSD
+umbrageous
+umbra/MS
+umbrella/GDMS
+Umbriel/M
+Umeko/M
+umiak/MS
+umlaut/GMDS
+umpire/MGSD
+ump/MDSG
+umpteen/H
+UN
+unabated/Y
+unabridged/S
+unacceptability
+unacceptable
+unaccepted
+unaccommodating
+unaccountability
+unaccustomed/Y
+unadapted
+unadulterated/Y
+unadventurous
+unalienability
+unalterableness/M
+unalterable/P
+unalterably
+Una/M
+unambiguity
+unambiguous
+unambitious
+unamused
+unanimity/SM
+unanimous/Y
+unanticipated/Y
+unapologetic
+unapologizing/M
+unappeasable
+unappeasably
+unappreciative
+unary
+unassailableness/M
+unassailable/P
+unassertive
+unassumingness/M
+unassuming/PY
+unauthorized/PY
+unavailing/PY
+unaware/SPY
+unbalanced/P
+unbar
+unbarring
+unbecoming/P
+unbeknown
+unbelieving/Y
+unbiased/P
+unbid
+unbind/G
+unblessed
+unblinking/Y
+unbodied
+unbolt/G
+unbreakability
+unbred
+unbroken
+unbuckle
+unbudging/Y
+unburnt
+uncap
+uncapping
+uncatalogued
+uncauterized/MS
+unceasing/Y
+uncelebrated
+uncertain/P
+unchallengeable
+unchangingness/M
+unchanging/PY
+uncharacteristic
+uncharismatic
+unchastity
+unchristian
+uncial/S
+uncivilized/Y
+unclassified
+uncle/MSD
+unclouded/Y
+uncodable
+uncollected
+uncoloredness/M
+uncolored/PY
+uncombable
+uncommunicative
+uncompetitive
+uncomplicated
+uncomprehending/Y
+uncompromisable
+unconcerned/P
+unconcern/M
+unconfirmed
+unconfused
+unconscionableness/M
+unconscionable/P
+unconscionably
+unconstitutional
+unconsumed
+uncontentious
+uncontrollability
+unconvertible
+uncool
+uncooperative
+uncork/G
+uncouple/G
+uncouthness/M
+uncouth/YP
+uncreate/V
+uncritical
+uncross/GB
+uncrowded
+unction/IM
+unctions
+unctuousness/MS
+unctuous/PY
+uncustomary
+uncut
+undated/I
+undaunted/Y
+undeceive
+undecided/S
+undedicated
+undefinability
+undefinedness/M
+undefined/P
+undelete
+undeliverability
+undeniableness/M
+undeniable/P
+undeniably
+undependable
+underachiever/M
+underachieve/SRDGZ
+underact/GDS
+underadjusting
+underage/S
+underarm/DGS
+underbedding
+underbelly/MS
+underbidding
+underbid/S
+underbracing
+underbrush/MSDG
+undercarriage/MS
+undercharge/GSD
+underclassman
+underclassmen
+underclass/S
+underclothes
+underclothing/MS
+undercoating/M
+undercoat/JMDGS
+underconsumption/M
+undercooked
+undercount/S
+undercover
+undercurrent/SM
+undercut/S
+undercutting
+underdeveloped
+underdevelopment/MS
+underdog/MS
+underdone
+undereducated
+underemphasis
+underemployed
+underemployment/SM
+underenumerated
+underenumeration
+underestimate/NGXSD
+underexploited
+underexpose/SDG
+underexposure/SM
+underfed
+underfeed/SG
+underfloor
+underflow/GDMS
+underfoot
+underfund/DG
+underfur/MS
+undergarment/SM
+undergirding
+undergoes
+undergo/G
+undergone
+undergrad/MS
+undergraduate/MS
+underground/RMS
+undergrowth/M
+undergrowths
+underhand/D
+underhandedness/MS
+underhanded/YP
+underheat
+underinvestment
+underlaid
+underlain/S
+underlay/GS
+underlie
+underline/GSDJ
+underling/MS
+underlip/SM
+underloaded
+underly/GS
+undermanned
+undermentioned
+undermine/SDG
+undermost
+underneath
+underneaths
+undernourished
+undernourishment/SM
+underpaid
+underpants
+underpart/MS
+underpass/SM
+underpay/GSL
+underpayment/SM
+underperformed
+underpinned
+underpinning/MS
+underpin/S
+underplay/SGD
+underpopulated
+underpopulation/M
+underpowered
+underpricing
+underprivileged
+underproduction/MS
+underrate/GSD
+underregistration/M
+underreported
+underreporting
+underrepresentation/M
+underrepresented
+underscore/SDG
+undersealed
+undersea/S
+undersecretary/SM
+undersell/SG
+undersexed
+undershirt/SM
+undershoot/SG
+undershorts
+undershot
+underside/SM
+undersigned/M
+undersign/SGD
+undersized
+undersizes
+undersizing
+underskirt/MS
+undersold
+underspecification
+underspecified
+underspend/G
+understaffed
+understandability/M
+understandably
+understanding/YM
+understand/RGSJB
+understate/GSDL
+understatement/MS
+understocked
+understood
+understrength
+understructure/SM
+understudy/GMSD
+undertaken
+undertaker/M
+undertake/SRGZJ
+undertaking/M
+underthings
+undertone/SM
+undertook
+undertow/MS
+underused
+underusing
+underutilization/M
+underutilized
+undervaluation/S
+undervalue/SDG
+underwater/S
+underway
+underwear/M
+underweight/S
+underwent
+underwhelm/DGS
+underwood/M
+Underwood/M
+underworld/MS
+underwrite/GZSR
+underwriter/M
+underwritten
+underwrote
+under/Y
+undeserving
+undesigned
+undeviating/Y
+undialyzed/SM
+undiplomatic
+undiscerning
+undiscriminating
+undo/GJ
+undoubted/Y
+undramatic
+undramatized/SM
+undress/G
+undrinkability
+undrinkable
+undroppable
+undue
+undulant
+undulate/XDSNG
+undulation/M
+unearthliness/S
+unearthly/P
+unearth/YG
+unease
+uneconomic
+uneducated
+unemployed/S
+unencroachable
+unending/Y
+unendurable/P
+unenergized/MS
+unenforced
+unenterprising
+UNESCO
+unethical
+uneulogized/SM
+unexacting
+unexceptionably
+unexcited
+unexpectedness/MS
+unfading/Y
+unfailingness/M
+unfailing/P
+unfamiliar
+unfashionable
+unfathomably
+unfavored
+unfeeling
+unfeigned/Y
+unfelt
+unfeminine
+unfertile
+unfetchable
+unflagging
+unflappability/S
+unflappable
+unflappably
+unflinching/Y
+unfold/LG
+unfoldment/M
+unforced
+unforgeable
+unfossilized/MS
+unfraternizing/SM
+unfrozen
+unfulfillable
+unfunny
+unfussy
+ungainliness/MS
+ungainly/PRT
+Ungava/M
+ungenerous
+ungentle
+unglamorous
+ungrammaticality
+ungrudging
+unguent/MS
+ungulate/MS
+unharmonious
+unharness/G
+unhistorical
+unholy/TP
+unhook/DG
+unhydrolyzed/SM
+unhygienic
+Unibus/M
+unicameral
+UNICEF
+unicellular
+Unicode/M
+unicorn/SM
+unicycle/MGSD
+unicyclist/MS
+unideal
+unidimensional
+unidiomatic
+unidirectionality
+unidirectional/Y
+unidolized/MS
+unifiable
+unification/MA
+unifier/MS
+unifilar
+uniformity/MS
+uniformness/M
+uniform/TGSRDYMP
+unify/AXDSNG
+unilateralism/M
+unilateralist
+unilateral/Y
+unimodal
+unimpeachably
+unimportance
+unimportant
+unimpressive
+unindustrialized/MS
+uninhibited/YP
+uninominal
+uninsured
+unintellectual
+unintended
+uninteresting
+uninterruptedness/M
+uninterrupted/YP
+unintuitive
+uninviting
+union/AEMS
+unionism/SM
+unionist/SM
+Unionist/SM
+unionize
+Union/MS
+UniPlus/M
+unipolar
+uniprocessor/SM
+uniqueness/S
+unique/TYSRP
+Uniroyal/M
+unisex/S
+UniSoft/M
+unison/MS
+Unisys/M
+unitarianism/M
+Unitarianism/SM
+unitarian/MS
+Unitarian/MS
+unitary
+unite/AEDSG
+united/Y
+uniter/M
+unitize/GDS
+unit/VGRD
+unity/SEM
+univ
+Univac/M
+univalent/S
+univalve/MS
+univariate
+universalism/M
+universalistic
+universality/SM
+universalize/DSRZG
+universalizer/M
+universal/YSP
+universe/MS
+university/MS
+Unix/M
+UNIX/M
+unjam
+unkempt
+unkind/TP
+unkink
+unknightly
+unknowable/S
+unknowing
+unlabored
+unlace/G
+unlearn/G
+unlikeable
+unlikeliness/S
+unlimber/G
+unlimited
+unlit
+unliterary
+unloose/G
+unlucky/TP
+unmagnetized/MS
+unmanageably
+unmannered/Y
+unmask/G
+unmeaning
+unmeasured
+unmeetable
+unmelodious
+unmemorable
+unmemorialized/MS
+unmentionable/S
+unmerciful
+unmeritorious
+unmethodical
+unmineralized/MS
+unmissable
+unmistakably
+unmitigated/YP
+unmnemonic
+unmobilized/SM
+unmoral
+unmount/B
+unmovable
+unmoving
+unnaturalness/M
+unnavigable
+unnerving/Y
+unobliging
+unoffensive
+unofficial
+unorganized/YP
+unorthodox
+unpack/G
+unpaintable
+unpalatability
+unpalatable
+unpartizan
+unpatronizing
+unpeople
+unperceptive
+unperson
+unperturbed/Y
+unphysical
+unpick/G
+unpicturesque
+unpinning
+unpleasing
+unploughed
+unpolarized/SM
+unpopular
+unpractical
+unprecedented/Y
+unpredictable/S
+unpreemphasized
+unpremeditated
+unpretentiousness/M
+unprincipled/P
+unproblematic
+unproductive
+unpropitious
+unprovable
+unproven
+unprovocative
+unpunctual
+unquestionable
+unraisable
+unravellings
+unreadability
+unread/B
+unreal
+unrealizable
+unreasoning/Y
+unreceptive
+unrecordable
+unreflective
+unrelenting/Y
+unremitting/Y
+unrepeatability
+unrepeated
+unrepentant
+unreported
+unrepresentative
+unreproducible
+unrest/G
+unrestrained/P
+unrewarding
+unriddle
+unripe/P
+unromantic
+unruliness/SM
+unruly/PTR
+unsaleable
+unsanitary
+unsavored/YP
+unsavoriness/M
+unseal/GB
+unsearchable
+unseasonal
+unseeing/Y
+unseen/S
+unselfconsciousness/M
+unselfconscious/P
+unselfishness/M
+unsellable
+unsentimental
+unset
+unsettledness/M
+unsettled/P
+unsettling/Y
+unshapely
+unshaven
+unshorn
+unsighted
+unsightliness/S
+unskilful
+unsociability
+unsociable/P
+unsocial
+unsound/PT
+unspeakably
+unspecific
+unspectacular
+unspoilt
+unspoke
+unsporting
+unstable/P
+unstigmatized/SM
+unstilted
+unstinting/Y
+unstopping
+unstrapping
+unstudied
+unstuffy
+unsubdued
+unsubstantial
+unsubtle
+unsuitable
+unsuspecting/Y
+unswerving/Y
+unsymmetrical
+unsympathetic
+unsystematic
+unsystematized/Y
+untactful
+untalented
+untaxing
+unteach/B
+untellable
+untenable
+unthinking
+until/G
+untiring/Y
+unto
+untouchable/MS
+untowardness/M
+untoward/P
+untraceable
+untrue
+untruthfulness/M
+untwist/G
+Unukalhai/M
+unusualness/M
+unutterable
+unutterably
+unvocalized/MS
+unvulcanized/SM
+unwaivering
+unwarrantable
+unwarrantably
+unwashed/PS
+unwearable
+unwearied/Y
+unwed
+unwedge
+unwelcome
+unwell/M
+unwieldiness/MS
+unwieldy/TPR
+unwind/B
+unwomanly
+unworkable/S
+unworried
+unwrap
+unwrapping
+unyielding/Y
+unyoke
+unzip
+up
+Upanishads
+uparrow
+upbeat/SM
+upbraid/GDRS
+upbringing/M
+upbring/JG
+UPC
+upchuck/SDG
+upcome/G
+upcountry/S
+updatability
+updater/M
+update/RSDG
+Updike/M
+updraft/SM
+upend/SDG
+upfield
+upfront
+upgradeable
+upgrade/DSJG
+upheaval/MS
+upheld
+uphill/S
+upholder/M
+uphold/RSGZ
+upholster/ADGS
+upholsterer/SM
+upholstery/MS
+UPI
+upkeep/SM
+uplander/M
+upland/MRS
+uplifter/M
+uplift/SJDRG
+upload/GSD
+upmarket
+upon
+upped
+uppercase/GSD
+upperclassman/M
+upperclassmen
+uppercut/S
+uppercutting
+uppermost
+upper/S
+upping
+uppish
+uppity
+upraise/GDS
+uprated
+uprating
+uprear/DSG
+upright/DYGSP
+uprightness/S
+uprise/RGJ
+uprising/M
+upriver/S
+uproariousness/M
+uproarious/PY
+uproar/MS
+uproot/DRGS
+uprooter/M
+ups
+UPS
+upscale/GDS
+upset/S
+upsetting/MS
+upshot/SM
+upside/MS
+upsilon/MS
+upslope
+upstage/DSRG
+upstairs
+upstandingness/M
+upstanding/P
+upstart/MDGS
+upstate/SR
+upstream/DSG
+upstroke/MS
+upsurge/DSG
+upswing/GMS
+upswung
+uptake/SM
+upthrust/GMS
+uptight
+uptime
+Upton/M
+uptown/RS
+uptrend/M
+upturn/GDS
+upwardness/M
+upward/SYP
+upwelling
+upwind/S
+uracil/MS
+Ural/MS
+Urania/M
+uranium/MS
+Uranus/M
+uranyl/M
+Urbain/M
+Urbana/M
+urbane/Y
+urbanism/M
+urbanite/SM
+urbanity/SM
+urbanization/MS
+urbanize/DSG
+Urban/M
+urbanologist/S
+urbanology/S
+Urbano/M
+urban/RT
+Urbanus/M
+urchin/SM
+Urdu/M
+urea/SM
+uremia/MS
+uremic
+ureter/MS
+urethane/MS
+urethrae
+urethral
+urethra/M
+urethritis/M
+Urey/M
+urge/GDRSJ
+urgency/SM
+urgent/Y
+urger/M
+Uriah/M
+uric
+Uriel/M
+urinal/MS
+urinalyses
+urinalysis/M
+urinary/MS
+urinate/XDSNG
+urination/M
+urine/MS
+Uri/SM
+URL
+Ur/M
+urning/M
+urn/MDGS
+urogenital
+urological
+urologist/S
+urology/MS
+Urquhart/M
+Ursala/M
+Ursa/M
+ursine
+Ursola/M
+Urson/M
+Ursula/M
+Ursulina/M
+Ursuline/M
+urticaria/MS
+Uruguayan/S
+Uruguay/M
+Urumqi
+US
+USA
+usability/S
+usable/U
+usably/U
+USAF
+usage/SM
+USART
+USCG
+USC/M
+USDA
+us/DRSBZG
+used/U
+use/ESDAG
+usefulness/SM
+useful/YP
+uselessness/MS
+useless/PY
+Usenet/M
+Usenix/M
+user/M
+USG/M
+usherette/SM
+usher/SGMD
+USIA
+USMC
+USN
+USO
+USP
+USPS
+USS
+USSR
+Ustinov/M
+usu
+usuals
+usual/UPY
+usurer/SM
+usuriousness/M
+usurious/PY
+usurpation/MS
+usurper/M
+usurp/RDZSG
+usury/SM
+UT
+Utahan/SM
+Utah/M
+Uta/M
+Ute/M
+utensil/SM
+uteri
+uterine
+uterus/M
+Utica/M
+utile/I
+utilitarianism/MS
+utilitarian/S
+utility/MS
+utilization/MS
+utilization's/A
+utilize/GZDRS
+utilizer/M
+utilizes/A
+utmost/S
+Utopia/MS
+utopianism/M
+utopian's
+Utopian/S
+utopia/S
+Utrecht/M
+Utrillo/M
+utterance/MS
+uttered/U
+utterer/M
+uttermost/S
+utter/TRDYGS
+uucp/M
+UV
+uvula/MS
+uvular/S
+uxorious
+Uzbekistan
+Uzbek/M
+Uzi/M
+V
+VA
+vacancy/MS
+vacantness/M
+vacant/PY
+vacate/NGXSD
+vacationist/SM
+vacationland
+vacation/MRDZG
+vaccinate/NGSDX
+vaccination/M
+vaccine/SM
+vaccinial
+vaccinia/M
+Vachel/M
+vacillate/XNGSD
+vacillating/Y
+vacillation/M
+vacillator/SM
+Vaclav/M
+vacua's
+vacuity/MS
+vacuo
+vacuolated/U
+vacuolate/SDGN
+vacuole/SM
+vacuolization/SM
+vacuousness/MS
+vacuous/PY
+vacuum/GSMD
+Vader/M
+Vaduz/M
+vagabondage/MS
+vagabond/DMSG
+vagarious
+vagary/MS
+vaginae
+vaginal/Y
+vagina/M
+vagrancy/MS
+vagrant/SMY
+vagueing
+vagueness/MS
+vague/TYSRDP
+Vail/M
+vaingloriousness/M
+vainglorious/YP
+vainglory/MS
+vain/TYRP
+val
+valance/SDMG
+Valaree/M
+Valaria/M
+Valarie/M
+Valdemar/M
+Valdez/M
+Valeda/M
+valediction/MS
+valedictorian/MS
+valedictory/MS
+Vale/M
+valence/SM
+Valencia/MS
+valency/MS
+Valene/M
+Valenka/M
+Valentia/M
+Valentijn/M
+Valentina/M
+Valentine/M
+valentine/SM
+Valentin/M
+Valentino/M
+Valenzuela/M
+Valera/M
+Valeria/M
+Valerian/M
+Valerie/M
+Valerye/M
+Valéry/M
+vale/SM
+valet/GDMS
+valetudinarianism/MS
+valetudinarian/MS
+Valhalla/M
+valiance/S
+valiantness/M
+valiant/SPY
+Valida/M
+validated/AU
+validate/INGSDX
+validates/A
+validation/AMI
+validity/IMS
+validnesses
+validness/MI
+valid/PIY
+Valina/M
+valise/MS
+Valium/S
+Valkyrie/SM
+Vallejo
+Valle/M
+Valletta/M
+valley/SM
+Vallie/M
+Valli/M
+Vally/M
+Valma/M
+Val/MY
+Valois/M
+valor/MS
+valorous/Y
+Valparaiso/M
+Valry/M
+valuable/IP
+valuableness/IM
+valuables
+valuably/I
+valuate/NGXSD
+valuation/CSAM
+valuator/SM
+value/CGASD
+valued/U
+valuelessness/M
+valueless/P
+valuer/SM
+value's
+values/E
+valve/GMSD
+valveless
+valvular
+Va/M
+vamoose/GSD
+vamp/ADSG
+vamper
+vampire/MGSD
+vamp's
+vanadium/MS
+Vance/M
+Vancouver/M
+vandalism/MS
+vandalize/GSD
+vandal/MS
+Vandal/MS
+Vanda/M
+Vandenberg/M
+Vanderbilt/M
+Vanderburgh/M
+Vanderpoel/M
+Vandyke/SM
+vane/MS
+Vanessa/M
+Vang/M
+vanguard/MS
+Vania/M
+vanilla/MS
+vanisher/M
+vanish/GRSDJ
+vanishing/Y
+vanity/SM
+Van/M
+Vanna/M
+vanned
+Vannie/M
+Vanni/M
+vanning
+Vanny/M
+vanquisher/M
+vanquish/RSDGZ
+van/SMD
+vantage/MS
+Vanuatu
+Vanya/M
+Vanzetti/M
+vapidity/MS
+vapidness/SM
+vapid/PY
+vaporer/M
+vaporing/MY
+vaporisation
+vaporise/DSG
+vaporization/AMS
+vaporize/DRSZG
+vaporizer/M
+vapor/MRDJGZS
+vaporous
+vapory
+vaquero/SM
+VAR
+Varanasi/M
+Varese/M
+Vargas/M
+variability/IMS
+variableness/IM
+variable/PMS
+variables/I
+variably/I
+variance/I
+variances
+variance's
+Varian/M
+variant/ISY
+variate/MGNSDX
+variational
+variation/M
+varicolored/MS
+varicose/S
+variedly
+varied/U
+variegate/NGXSD
+variegation/M
+varier/M
+varietal/S
+variety/MS
+various/PY
+varistor/M
+Varityping/M
+varlet/MS
+varmint/SM
+varnished/U
+varnisher/M
+varnish/ZGMDRS
+var/S
+varsity/MS
+varying/UY
+vary/SRDJG
+vascular
+vasectomy/SM
+Vaseline/DSMG
+vase/SM
+Vasili/MS
+Vasily/M
+vasomotor
+Vasquez/M
+vassalage/MS
+vassal/GSMD
+Vassar/M
+Vassili/M
+Vassily/M
+vastness/MS
+vast/PTSYR
+v/ASV
+VAT
+Vatican/M
+vat/SM
+vatted
+vatting
+vaudeville/SM
+vaudevillian/SM
+Vaudois
+Vaughan/M
+Vaughn/M
+vaulter/M
+vaulting/M
+vault/ZSRDMGJ
+vaunter/M
+vaunt/GRDS
+VAXes
+Vax/M
+VAX/M
+Vazquez/M
+vb
+VCR
+VD
+VDT
+VDU
+vealed/A
+vealer/MA
+veal/MRDGS
+veals/A
+Veblen/M
+vectorial
+vectorization
+vectorized
+vectorizing
+vector's/F
+vector/SGDM
+Veda/MS
+Vedanta/M
+veejay/S
+veep/S
+veer/DSG
+veering/Y
+vegan/SM
+Vega/SM
+Vegemite/M
+veges
+vegetable/MS
+vegetarianism/MS
+vegetarian/SM
+vegetate/DSNGVX
+vegetation/M
+vegetative/PY
+vegged
+veggie/S
+vegging
+veg/M
+vehemence/MS
+vehemency/S
+vehement/Y
+vehicle/SM
+vehicular
+veiling/MU
+veil's
+veil/UGSD
+vein/GSRDM
+veining/M
+vela/M
+Vela/M
+velarize/SDG
+velar/S
+Velásquez/M
+Velázquez
+Velcro/SM
+veld/SM
+veldt's
+Velez/M
+Vella/M
+vellum/MS
+Velma/M
+velocipede/SM
+velocity/SM
+velor/S
+velour's
+velum/M
+Velveeta/M
+velveteen/MS
+velvet/GSMD
+Velvet/M
+velvety/RT
+venality/MS
+venal/Y
+venation/SM
+vend/DSG
+vender's/K
+vendetta/MS
+vendible/S
+vendor/MS
+veneerer/M
+veneer/GSRDM
+veneering/M
+venerability/S
+venerable/P
+venerate/XNGSD
+veneration/M
+venereal
+venetian
+Venetian/SM
+Venezuela/M
+Venezuelan/S
+vengeance/MS
+vengeful/APY
+vengefulness/AM
+venialness/M
+venial/YP
+Venice/M
+venireman/M
+veniremen
+venison/SM
+Venita/M
+Venn/M
+venomousness/M
+venomous/YP
+venom/SGDM
+venous/Y
+venter/M
+ventilated/U
+ventilate/XSDVGN
+ventilation/M
+ventilator/MS
+vent/ISGFD
+ventral/YS
+ventricle/MS
+ventricular
+ventriloquies
+ventriloquism/MS
+ventriloquist/MS
+ventriloquy
+vent's/F
+Ventura/M
+venture/RSDJZG
+venturesomeness/SM
+venturesome/YP
+venturi/S
+venturousness/MS
+venturous/YP
+venue/MAS
+Venusian/S
+Venus/S
+veraciousness/M
+veracious/YP
+veracities
+veracity/IM
+Veracruz/M
+Veradis
+Vera/M
+verandahed
+veranda/SDM
+verbalization/MS
+verbalized/U
+verbalizer/M
+verbalize/ZGRSD
+verballed
+verballing
+verbal/SY
+verbatim
+verbena/MS
+verbiage/SM
+verb/KSM
+verbose/YP
+verbosity/SM
+verboten
+verdant/Y
+Verde/M
+Verderer/M
+verdict/SM
+verdigris/GSDM
+Verdi/M
+verdure/SDM
+Vere/M
+Verena/M
+Verene/M
+verge/FGSD
+Verge/M
+verger/SM
+verge's
+Vergil's
+veridical/Y
+Veriee/M
+verifiability/M
+verifiableness/M
+verifiable/U
+verification/S
+verified/U
+verifier/MS
+verify/GASD
+Verile/M
+verily
+Verina/M
+Verine/M
+verisimilitude/SM
+veritableness/M
+veritable/P
+veritably
+verity/MS
+Verlag/M
+Verlaine/M
+Verla/M
+Vermeer/M
+vermicelli/MS
+vermiculite/MS
+vermiform
+vermilion/MS
+vermin/M
+verminous
+Vermonter/M
+Vermont/ZRM
+vermouth/M
+vermouths
+vernacular/YS
+vernal/Y
+Verna/M
+Verne/M
+Vernen/M
+Verney/M
+Vernice/M
+vernier/SM
+Vern/NM
+Vernon/M
+Vernor/M
+Verona/M
+Veronese/M
+Veronica/M
+veronica/SM
+Veronika/M
+Veronike/M
+Veronique/M
+verrucae
+verruca/MS
+versa
+Versailles/M
+Versatec/M
+versatileness/M
+versatile/YP
+versatility/SM
+versed/UI
+verse's
+verses/I
+verse/XSRDAGNF
+versicle/M
+versification/M
+versifier/M
+versify/GDRSZXN
+versing/I
+version/MFISA
+verso/SM
+versus
+vertebrae
+vertebral/Y
+vertebra/M
+vertebrate/IMS
+vertebration/M
+vertex/SM
+vertical/YPS
+vertices's
+vertiginous
+vertigoes
+vertigo/M
+verve/SM
+very/RT
+Vesalius/M
+vesicle/SM
+vesicular/Y
+vesiculate/GSD
+Vespasian/M
+vesper/SM
+Vespucci/M
+vessel/MS
+vestal/YS
+Vesta/M
+vest/DIGSL
+vestibular
+vestibule/SDM
+vestige/SM
+vestigial/Y
+vesting/SM
+vestment/ISM
+vestryman/M
+vestrymen
+vestry/MS
+vest's
+vesture/SDMG
+Vesuvius/M
+vetch/SM
+veteran/SM
+veterinarian/MS
+veterinary/S
+veter/M
+veto/DMG
+vetoes
+vet/SMR
+vetted
+vetting/A
+Vevay/M
+vexation/SM
+vexatiousness/M
+vexatious/PY
+vexed/Y
+vex/GFSD
+VF
+VFW
+VG
+VGA
+vhf
+VHF
+VHS
+VI
+via
+viability/SM
+viable/I
+viably
+viaduct/MS
+Viagra/M
+vial/MDGS
+viand/SM
+vibe/S
+vibraharp/MS
+vibrancy/MS
+vibrant/YS
+vibraphone/MS
+vibraphonist/SM
+vibrate/XNGSD
+vibrational/Y
+vibration/M
+vibrato/MS
+vibrator/SM
+vibratory
+vibrio/M
+vibrionic
+viburnum/SM
+vicarage/SM
+vicariousness/MS
+vicarious/YP
+vicar/SM
+vice/CMS
+viced
+vicegerent/MS
+vicennial
+Vicente/M
+viceregal
+viceroy/SM
+Vichy/M
+vichyssoise/MS
+vicing
+vicinity/MS
+viciousness/S
+vicious/YP
+vicissitude/MS
+Vickers/M
+Vickie/M
+Vicki/M
+Vicksburg/M
+Vicky/M
+Vick/ZM
+Vic/M
+victimization/SM
+victimized/U
+victimizer/M
+victimize/SRDZG
+victim/SM
+Victoir/M
+Victoria/M
+Victorianism/S
+Victorian/S
+victoriousness/M
+victorious/YP
+Victor/M
+victor/SM
+victory/MS
+Victrola/SM
+victualer/M
+victual/ZGSDR
+vicuña/S
+Vidal/M
+Vida/M
+videlicet
+videocassette/S
+videoconferencing
+videodisc/S
+videodisk/SM
+video/GSMD
+videophone/SM
+videotape/SDGM
+Vidovic/M
+Vidovik/M
+Vienna/M
+Viennese/M
+Vientiane/M
+vier/M
+vie/S
+Vietcong/M
+Viet/M
+Vietminh/M
+Vietnamese/M
+Vietnam/M
+viewed/A
+viewer/AS
+viewer's
+viewfinder/MS
+viewgraph/SM
+viewing/M
+viewless/Y
+view/MBGZJSRD
+viewpoint/SM
+views/A
+vigesimal
+vigilance/MS
+vigilante/SM
+vigilantism/MS
+vigilantist
+vigilant/Y
+vigil/SM
+vignette/MGDRS
+vignetter/M
+vignetting/M
+vignettist/MS
+vigor/MS
+vigorousness/M
+vigorous/YP
+vii
+viii
+Vijayawada/M
+Viki/M
+Viking/MS
+viking/S
+Vikki/M
+Vikky/M
+Vikram/M
+Vila
+vile/AR
+vilely
+vileness/MS
+vilest
+Vilhelmina/M
+vilification/M
+vilifier/M
+vilify/GNXRSD
+villager/M
+village/RSMZ
+villainousness/M
+villainous/YP
+villain/SM
+villainy/MS
+Villa/M
+villa/MS
+Villarreal/M
+ville
+villeinage/SM
+villein/MS
+villi
+Villon/M
+villus/M
+Vilma/M
+Vilnius/M
+Vilyui/M
+Vi/M
+vi/MDR
+vim/MS
+vinaigrette/MS
+Vina/M
+Vince/M
+Vincent/MS
+Vincenty/M
+Vincenz/M
+vincible/I
+Vinci/M
+Vindemiatrix/M
+vindicate/XSDVGN
+vindication/M
+vindicator/SM
+vindictiveness/MS
+vindictive/PY
+vinegar/DMSG
+vinegary
+vine/MGDS
+vineyard/SM
+Vinita/M
+Vin/M
+Vinnie/M
+Vinni/M
+Vinny/M
+vino/MS
+vinous
+Vinson/M
+vintage/MRSDG
+vintager/M
+vintner/MS
+vinyl/SM
+violable/I
+Viola/M
+Violante/M
+viola/SM
+violate/VNGXSD
+violator/MS
+Viole/M
+violence/SM
+violent/Y
+Violet/M
+violet/SM
+Violetta/M
+Violette/M
+violinist/SM
+violin/MS
+violist/MS
+viol/MSB
+violoncellist/S
+violoncello/MS
+viper/MS
+viperous
+VIP/S
+viragoes
+virago/M
+viral/Y
+vireo/SM
+Virge/M
+Virgie/M
+Virgilio/M
+Virgil/M
+virginal/YS
+Virgina/M
+Virginia/M
+Virginian/S
+Virginie/M
+virginity/SM
+virgin/SM
+Virgo/MS
+virgule/MS
+virile
+virility/MS
+virologist/S
+virology/SM
+virtual/Y
+virtue/SM
+virtuosity/MS
+virtuosoes
+virtuoso/MS
+virtuousness/SM
+virtuous/PY
+virulence/SM
+virulent/Y
+virus/MS
+visage/MSD
+Visakhapatnam's
+Visa/M
+visa/SGMD
+Visayans
+viscera
+visceral/Y
+viscid/Y
+viscoelastic
+viscoelasticity
+viscometer/SM
+viscose/MS
+viscosity/MS
+viscountcy/MS
+viscountess/SM
+viscount/MS
+viscousness/M
+viscous/PY
+viscus/M
+vise/CAXNGSD
+viselike
+vise's
+Vishnu/M
+visibility/ISM
+visible/PI
+visibly/I
+Visigoth/M
+Visigoths
+visionariness/M
+visionary/PS
+vision/KMDGS
+vision's/A
+visitable/U
+visitant/SM
+visitation/SM
+visited/U
+visit/GASD
+visitor/MS
+vis/MDSGV
+visor/SMDG
+VISTA
+vista/GSDM
+Vistula/M
+visualization/AMS
+visualized/U
+visualizer/M
+visualizes/A
+visualize/SRDZG
+visual/SY
+vitae
+vitality/MS
+vitalization/AMS
+vitalize/ASDGC
+vital/SY
+vita/M
+Vita/M
+vitamin/SM
+Vite/M
+Vitia/M
+vitiate/XGNSD
+vitiation/M
+viticulture/SM
+viticulturist/S
+Vitim/M
+Vito/M
+Vitoria/M
+vitreous/YSP
+vitrifaction/S
+vitrification/M
+vitrify/XDSNG
+vitrine/SM
+vitriolic
+vitriol/MDSG
+vitro
+vittles
+Vittoria/M
+Vittorio/M
+vituperate/SDXVGN
+vituperation/M
+vituperative/Y
+Vitus/M
+vivace/S
+vivaciousness/MS
+vivacious/YP
+vivacity/SM
+viva/DGS
+Vivaldi
+Viva/M
+vivaria
+vivarium/MS
+vivaxes
+Vivekananda/M
+vive/Z
+Vivia/M
+Viviana/M
+Vivian/M
+Vivianna/M
+Vivianne/M
+vividness/SM
+vivid/PTYR
+Vivie/M
+Viviene/M
+Vivien/M
+Vivienne/M
+vivifier
+vivify/NGASD
+Vivi/MN
+viviparous
+vivisect/DGS
+vivisectional
+vivisectionist/SM
+vivisection/MS
+Viviyan/M
+Viv/M
+vivo
+Vivyan/M
+Vivyanne/M
+vixenish/Y
+vixen/SM
+viz
+vizier/MS
+vizor's
+VJ
+Vladamir/M
+Vladimir/M
+Vladivostok/M
+Vlad/M
+VLF
+VLSI
+VMS/M
+VOA
+vocable/SM
+vocab/S
+vocabularian
+vocabularianism
+vocabulary/MS
+vocalic/S
+vocalise's
+vocalism/M
+vocalist/MS
+vocalization/SM
+vocalized/U
+vocalizer/M
+vocalize/ZGDRS
+vocal/SY
+vocation/AKMISF
+vocational/Y
+vocative/KYS
+vociferate/NGXSD
+vociferation/M
+vociferousness/MS
+vociferous/YP
+vocoded
+vocoder
+vodka/MS
+voe/S
+Vogel/M
+vogue/GMSRD
+vogueing
+voguish
+voiceband
+voiced/CU
+voice/IMGDS
+voicelessness/SM
+voiceless/YP
+voicer/S
+voices/C
+voicing/C
+voidable
+void/C
+voided
+voider/M
+voiding
+voidness/M
+voids
+voilà
+voile/MS
+volar
+volatileness/M
+volatile/PS
+volatility/MS
+volatilization/MS
+volatilize/SDG
+volcanically
+volcanic/S
+volcanism/M
+volcanoes
+volcano/M
+vole/MS
+Volga/M
+Volgograd/M
+vol/GSD
+volitionality
+volitional/Y
+volition/MS
+Volkswagen/SM
+volleyball/MS
+volleyer/M
+volley/SMRDG
+Vol/M
+Volstead/M
+voltage/SM
+voltaic
+Voltaire/M
+Volta/M
+volt/AMS
+Volterra/M
+voltmeter/MS
+volubility/S
+voluble/P
+volubly
+volume/SDGM
+volumetric
+volumetrically
+voluminousness/MS
+voluminous/PY
+voluntarily/I
+voluntariness/MI
+voluntarism/MS
+voluntary/PS
+volunteer/DMSG
+voluptuary/SM
+voluptuousness/S
+voluptuous/YP
+volute/S
+Volvo/M
+vomit/GRDS
+Vonda/M
+Von/M
+Vonnegut/M
+Vonnie/M
+Vonni/M
+Vonny/M
+voodoo/GDMS
+voodooism/S
+voraciousness/MS
+voracious/YP
+voracity/MS
+Voronezh/M
+Vorster/M
+vortex/SM
+vortices's
+vorticity/M
+votary/MS
+vote/CSDG
+voter/SM
+vote's
+votive/YP
+voucher/GMD
+vouchsafe/SDG
+vouch/SRDGZ
+vowelled
+vowelling
+vowel/MS
+vower/M
+vow/SMDRG
+voyage/GMZJSRD
+voyager/M
+voyageur/SM
+voyeurism/MS
+voyeuristic
+voyeur/MS
+VP
+vs
+V's
+VT
+Vt/M
+VTOL
+vulcanization/SM
+vulcanized/U
+vulcanize/SDG
+Vulcan/M
+vulgarian/MS
+vulgarism/MS
+vulgarity/MS
+vulgarization/S
+vulgarize/GZSRD
+vulgar/TSYR
+Vulgate/SM
+Vulg/M
+vulnerability/SI
+vulnerable/IP
+vulnerably/I
+vulpine
+vulturelike
+vulture/SM
+vulturous
+vulvae
+vulva/M
+vying
+Vyky/M
+WA
+Waals
+Wabash/M
+WAC
+Wacke/M
+wackes
+wackiness/MS
+wacko/MS
+wacky/RTP
+Waco/M
+Wac/S
+wadded
+wadding/SM
+waddle/GRSD
+Wade/M
+wader/M
+wade/S
+wadi/SM
+wad/MDRZGS
+Wadsworth/M
+wafer/GSMD
+waffle/GMZRSD
+Wafs
+wafter/M
+waft/SGRD
+wag/DRZGS
+waged/U
+wager/GZMRD
+wage/SM
+wagged
+waggery/MS
+wagging
+waggishness/SM
+waggish/YP
+waggle/SDG
+waggly
+Wagnerian
+Wagner/M
+wagoner/M
+wagon/SGZMRD
+wagtail/SM
+Wahl/M
+waif/SGDM
+Waikiki/M
+wailer/M
+wail/SGZRD
+wain/GSDM
+Wain/M
+wainscot/SGJD
+Wainwright/M
+wainwright/SM
+waistband/MS
+waistcoat/GDMS
+waister/M
+waist/GSRDM
+waistline/MS
+Waite/M
+waiter/DMG
+Waiter/M
+wait/GSZJRD
+Wait/MR
+waitpeople
+waitperson/S
+waitress/GMSD
+waiver/MB
+waive/SRDGZ
+Wakefield/M
+wakefulness/MS
+wakeful/PY
+Wake/M
+wake/MGDRSJ
+waken/SMRDG
+waker/M
+wakeup
+Waksman/M
+Walbridge/M
+Walcott/M
+Waldemar/M
+Walden/M
+Waldensian
+Waldheim/M
+Wald/MN
+Waldo/M
+Waldon/M
+Waldorf/M
+wale/DRSMG
+Wales
+Walesa/M
+Walford/M
+Walgreen/M
+waling/M
+walkabout/M
+walkaway/SM
+walker/M
+Walker/M
+walk/GZSBJRD
+walkie
+Walkman/S
+walkout/SM
+walkover/SM
+walkway/MS
+wallaby/MS
+Wallace/M
+Wallache/M
+wallah/M
+Wallas/M
+wallboard/MS
+Wallenstein/M
+Waller/M
+wallet/SM
+walleye/MSD
+wallflower/MS
+Wallie/M
+Wallis
+Walliw/M
+Walloon/SM
+walloper/M
+walloping/M
+wallop/RDSJG
+wallower/M
+wallow/RDSG
+wallpaper/DMGS
+wall/SGMRD
+Wall/SMR
+Wally/M
+wally/S
+walnut/SM
+Walpole/M
+Walpurgisnacht
+walrus/SM
+Walsh/M
+Walter/M
+Walther/M
+Walton/M
+waltzer/M
+Walt/ZMR
+waltz/MRSDGZ
+Walworth/M
+Waly/M
+wampum/SM
+Wanamaker/M
+Wanda/M
+wanderer/M
+wander/JZGRD
+wanderlust/SM
+Wandie/M
+Wandis/M
+wand/MRSZ
+wane/S
+Waneta/M
+wangler/M
+wangle/RSDGZ
+Wang/M
+Wanids/M
+Wankel/M
+wanna
+wannabe/S
+wanned
+wanner
+wanness/S
+wannest
+wanning
+wan/PGSDY
+Wansee/M
+Wansley/M
+wanted/U
+wanter/M
+want/GRDSJ
+wantonness/S
+wanton/PGSRDY
+wapiti/MS
+warble/GZRSD
+warbler/M
+warbonnet/S
+ward/AGMRDS
+Warde/M
+warden/DMGS
+Warden/M
+warder/DMGS
+Ward/MN
+wardrobe/MDSG
+wardroom/MS
+wardship/M
+wards/I
+warehouseman/M
+warehouse/MGSRD
+Ware/MG
+ware/MS
+warfare/SM
+Warfield/M
+war/GSMD
+warhead/MS
+Warhol/M
+warhorse/SM
+warily/U
+warinesses/U
+wariness/MS
+Waring/M
+warless
+warlike
+warlock/SM
+warlord/MS
+warmblooded
+warmed/A
+warmer/M
+warmheartedness/SM
+warmhearted/PY
+warmish
+warmness/MS
+warmongering/M
+warmonger/JGSM
+warms/A
+warmth/M
+warmths
+warm/YRDHPGZTS
+warned/U
+warner/M
+Warner/M
+warn/GRDJS
+warning/YM
+Warnock/M
+warpaint
+warpath/M
+warpaths
+warper/M
+warplane/MS
+warp/MRDGS
+warranted/U
+warranter/M
+warrant/GSMDR
+warranty/SDGM
+warred/M
+warrener/M
+Warren/M
+warren/SZRM
+warring/M
+warrior/MS
+Warsaw/M
+wars/C
+warship/MS
+warthog/S
+wartime/SM
+wart/MDS
+warty/RT
+Warwick/M
+wary/URPT
+Wasatch/M
+washable/S
+wash/AGSD
+washbasin/SM
+washboard/SM
+washbowl/SM
+Washburn/M
+washcloth/M
+washcloths
+washday/M
+washed/U
+washer/GDMS
+washerwoman/M
+washerwomen
+washing/SM
+Washingtonian/S
+Washington/M
+Wash/M
+Washoe/M
+washout/SM
+washrag/SM
+washroom/MS
+washstand/SM
+washtub/MS
+washy/RT
+wasn't
+WASP
+waspishness/SM
+waspish/PY
+Wasp's
+wasp/SM
+was/S
+wassail/GMDS
+Wasserman/M
+Wassermann/M
+wastage/SM
+wastebasket/SM
+wastefulness/S
+wasteful/YP
+wasteland/MS
+wastepaper/MS
+waster/DG
+waste/S
+wastewater
+wast/GZSRD
+wasting/Y
+wastrel/MS
+Watanabe/M
+watchable/U
+watchband/SM
+watchdogged
+watchdogging
+watchdog/SM
+watched/U
+watcher/M
+watchfulness/MS
+watchful/PY
+watch/JRSDGZB
+watchmake/JRGZ
+watchmaker/M
+watchman/M
+watchmen
+watchpoints
+watchtower/MS
+watchword/MS
+waterbird/S
+waterborne
+Waterbury/M
+watercolor/DMGS
+watercolorist/SM
+watercourse/SM
+watercraft/M
+watercress/SM
+waterer/M
+waterfall/SM
+waterfowl/M
+waterfront/SM
+Watergate/M
+waterhole/S
+Waterhouse/M
+wateriness/SM
+watering/M
+water/JGSMRD
+waterless
+waterlily/S
+waterline/S
+waterlogged
+waterloo
+Waterloo/SM
+waterman/M
+watermark/GSDM
+watermelon/SM
+watermill/S
+waterproof/PGRDSJ
+watershed/SM
+waterside/MSR
+watersider/M
+Waters/M
+waterspout/MS
+watertightness/M
+watertight/P
+Watertown/M
+waterway/MS
+waterwheel/S
+waterworks/M
+watery/PRT
+Watkins
+WATS
+Watson/M
+wattage/SM
+Watteau/M
+Wattenberg/M
+Watterson/M
+wattle/SDGM
+Watt/MS
+watt/TMRS
+Watusi/M
+Wat/ZM
+Waugh/M
+Waukesha/M
+Waunona/M
+Waupaca/M
+Waupun/M
+Wausau/M
+Wauwatosa/M
+waveband/MS
+waveform/SM
+wavefront/MS
+waveguide/MS
+Waveland/M
+wavelength/M
+wavelengths
+wavelet/SM
+wavelike
+wavenumber
+waver/GZRD
+wavering/YU
+Waverley/M
+Waverly/M
+Wave/S
+wave/ZGDRS
+wavily
+waviness/MS
+wavy/SRTP
+waxer/M
+waxiness/MS
+wax/MNDRSZG
+waxwing/MS
+waxwork/MS
+waxy/PRT
+wayfarer/MS
+wayfaring/S
+waylaid
+Wayland/M
+Waylan/M
+waylayer/M
+waylay/GRSZ
+wayleave/MS
+Waylen/M
+Waylin/M
+Waylon/M
+Way/M
+waymarked
+way/MS
+Wayne/M
+Waynesboro/M
+wayside/MS
+waywardness/S
+wayward/YP
+WC
+we
+weakener/M
+weaken/ZGRD
+weakfish/SM
+weakish
+weakliness/M
+weakling/SM
+weakly/RTP
+weakness/MS
+weak/TXPYRN
+weal/MHS
+wealthiness/MS
+wealth/M
+wealths
+wealthy/PTR
+weaner/M
+weanling/M
+wean/RDGS
+weapon/GDMS
+weaponless
+weaponry/MS
+wearable/S
+wearer/M
+wearied/U
+wearily
+weariness/MS
+wearing/Y
+wearisomeness/M
+wearisome/YP
+wear/RBSJGZ
+wearying/Y
+weary/TGPRSD
+weasel/SGMDY
+weatherbeaten
+weathercock/SDMG
+weatherer/M
+Weatherford/M
+weathering/M
+weatherize/GSD
+weatherman/M
+weather/MDRYJGS
+weathermen
+weatherperson/S
+weatherproof/SGPD
+weatherstripped
+weatherstripping/S
+weatherstrip/S
+weaver/M
+Weaver/M
+weaves/A
+weave/SRDGZ
+weaving/A
+webbed
+Webber/M
+webbing/MS
+Webb/RM
+weber/M
+Weber/M
+Webern/M
+webfeet
+webfoot/M
+Web/MR
+website/S
+web/SMR
+Webster/MS
+Websterville/M
+we'd
+wedded/A
+Weddell/M
+wedder
+wedding/SM
+wedge/SDGM
+wedgie/RST
+Wedgwood/M
+wedlock/SM
+Wed/M
+Wednesday/SM
+wed/SA
+weeder/M
+weediness/M
+weedkiller/M
+weedless
+wee/DRST
+weed/SGMRDZ
+weedy/TRP
+weeing
+weekday/MS
+weekender/M
+weekend/SDRMG
+weekly/S
+weeknight/SM
+Weeks/M
+week/SYM
+weenie/M
+ween/SGD
+weeny/RSMT
+weeper/M
+weep/SGZJRD
+weepy/RST
+weevil/MS
+weft/SGMD
+Wehr/M
+Weibull/M
+Weidar/M
+Weider/M
+Weidman/M
+Weierstrass/M
+weighed/UA
+weigher/M
+weigh/RDJG
+weighs/A
+weighted/U
+weighter/M
+weightily
+weightiness/SM
+weighting/M
+weight/JMSRDG
+weightlessness/SM
+weightless/YP
+weightlifter/S
+weightlifting/MS
+weighty/TPR
+Weill/M
+Wei/M
+Weinberg/M
+Weiner/M
+Weinstein/M
+weirdie/SM
+weirdness/MS
+weirdo/SM
+weird/YRDPGTS
+weir/SDMG
+Weisenheimer/M
+Weiss/M
+Weissman/M
+Weissmuller/M
+Weizmann/M
+Welbie/M
+Welby/M
+Welcher/M
+Welches
+welcomeness/M
+welcome/PRSDYG
+welcoming/U
+welder/M
+Weldon/M
+weld/SBJGZRD
+Weldwood/M
+welfare/SM
+welkin/SM
+we'll
+Welland/M
+wellbeing/M
+Weller/M
+Wellesley/M
+Welles/M
+wellhead/SM
+Wellington/MS
+wellington/S
+Wellman/M
+wellness/MS
+well/SGPD
+Wells/M
+wellspring/SM
+Wellsville/M
+Welmers/M
+Welsh
+welsher/M
+Welshman/M
+Welshmen
+welsh/RSDGZ
+Welshwoman/M
+Welshwomen
+welter/GD
+welterweight/MS
+welt/GZSMRD
+wencher/M
+wench/GRSDM
+Wendall/M
+Wenda/M
+wend/DSG
+Wendeline/M
+Wendell/M
+Wendel/M
+Wendie/M
+Wendi/M
+Wendye/M
+Wendy/M
+wen/M
+Wenonah/M
+Wenona/M
+went
+Wentworth/M
+wept/U
+were
+we're
+weren't
+werewolf/M
+werewolves
+Werner/M
+Wernher/M
+Werther/M
+werwolf's
+Wes
+Wesleyan
+Wesley/M
+Wessex/M
+Wesson/M
+westbound
+Westbrooke/M
+Westbrook/M
+Westchester/M
+wester/DYG
+westerly/S
+westerner/M
+westernization/MS
+westernize/GSD
+westernmost
+Western/ZRS
+western/ZSR
+Westfield/M
+Westhampton/M
+Westinghouse/M
+westing/M
+Westleigh/M
+Westley/M
+Westminster/M
+Westmore/M
+West/MS
+Weston/M
+Westphalia/M
+Westport/M
+west/RDGSM
+westward/S
+Westwood/M
+wetback/MS
+wetland/S
+wetness/MS
+wet/SPY
+wettable
+wetter/S
+wettest
+wetting
+we've
+Weyden/M
+Weyerhauser/M
+Weylin/M
+Wezen/M
+WFF
+whacker/M
+whack/GZRDS
+whaleboat/MS
+whalebone/SM
+whale/GSRDZM
+Whalen/M
+whaler/M
+whaling/M
+whammed
+whamming/M
+wham/MS
+whammy/S
+wharf/SGMD
+Wharton/M
+wharves
+whatchamacallit/MS
+what'd
+whatever
+what/MS
+whatnot/MS
+what're
+whatsoever
+wheal/MS
+wheatgerm
+Wheaties/M
+Wheatland/M
+wheat/NMXS
+Wheaton/M
+Wheatstone/M
+wheedle/ZDRSG
+wheelbarrow/GSDM
+wheelbase/MS
+wheelchair/MS
+wheeler/M
+Wheeler/M
+wheelhouse/SM
+wheelie/MS
+wheeling/M
+Wheeling/M
+Wheelock/M
+wheel/RDMJSGZ
+wheelwright/MS
+whee/S
+wheeze/SDG
+wheezily
+wheeziness/SM
+wheezy/PRT
+Whelan/M
+whelk/MDS
+Wheller/M
+whelm/DGS
+whelp/DMGS
+whence/S
+whenever
+when/S
+whensoever
+whereabout/S
+whereas/S
+whereat
+whereby
+where'd
+wherefore/MS
+wherein
+where/MS
+whereof
+whereon
+where're
+wheresoever
+whereto
+whereupon
+wherever
+wherewith
+wherewithal/SM
+wherry/DSGM
+whether
+whet/S
+whetstone/MS
+whetted
+whetting
+whew/GSD
+whey/MS
+which
+whichever
+whiff/GSMD
+whiffle/DRSG
+whiffler/M
+whiffletree/SM
+whig/S
+Whig/SM
+while/GSD
+whilom
+whilst
+whimmed
+whimming
+whimper/DSG
+whimsey's
+whimsicality/MS
+whimsical/YP
+whim/SM
+whimsy/TMDRS
+whine/GZMSRD
+whining/Y
+whinny/GTDRS
+whiny/RT
+whipcord/SM
+whiplash/SDMG
+Whippany/M
+whipped
+whipper/MS
+whippersnapper/MS
+whippet/MS
+whipping/SM
+Whipple/M
+whippletree/SM
+whippoorwill/SM
+whipsaw/GDMS
+whips/M
+whip/SM
+whirligig/MS
+whirlpool/MS
+whirl/RDGS
+whirlwind/MS
+whirlybird/MS
+whirly/MS
+whirred
+whirring
+whir/SY
+whisker/DM
+whiskery
+whiskey/SM
+whisk/GZRDS
+whisperer/M
+whisper/GRDJZS
+whispering/YM
+whist/GDMS
+whistleable
+whistle/DRSZG
+whistler/M
+Whistler/M
+whistling/M
+Whitaker/M
+Whitby/M
+Whitcomb/M
+whitebait/M
+whitecap/MS
+whiteface/M
+Whitefield/M
+whitefish/SM
+Whitehall/M
+Whitehead/M
+whitehead/S
+Whitehorse/M
+Whiteleaf/M
+Whiteley/M
+White/MS
+whitener/M
+whiteness/MS
+whitening/M
+whiten/JZDRG
+whiteout/S
+white/PYS
+whitespace
+whitetail/S
+whitewall/SM
+whitewash/GRSDM
+whitewater
+Whitewater/M
+whitey/MS
+Whitfield/M
+whither/DGS
+whitier
+whitiest
+whiting/M
+whitish
+Whitley/M
+Whitlock/M
+Whit/M
+Whitman/M
+Whitney/M
+whit/SJGTXMRND
+Whitsunday/MS
+Whittaker/M
+whitter
+Whittier
+whittle/JDRSZG
+whittler/M
+whiz
+whizkid
+whizzbang/S
+whizzed
+whizzes
+whizzing
+WHO
+whoa/S
+who'd
+whodunit/SM
+whoever
+wholegrain
+wholeheartedness/MS
+wholehearted/PY
+wholemeal
+wholeness/S
+wholesale/GZMSRD
+wholesaler/M
+wholesomeness/USM
+wholesome/UYP
+whole/SP
+wholewheat
+who'll
+wholly
+whom
+who/M
+whomever
+whomsoever
+whoopee/S
+whooper/M
+whoop/SRDGZ
+whoosh/DSGM
+whop
+whopper/MS
+whopping/S
+who're
+whorehouse/SM
+whoreish
+whore/SDGM
+whorish
+whorl/SDM
+whose
+whoso
+whosoever
+who've
+why
+whys
+WI
+Wiatt/M
+Wichita/M
+wickedness/MS
+wicked/RYPT
+wicker/M
+wickerwork/MS
+wicketkeeper/SM
+wicket/SM
+wick/GZRDMS
+wicking/M
+widemouthed
+widener/M
+wideness/S
+widen/SGZRD
+wide/RSYTP
+widespread
+widgeon's
+widget/SM
+widower/M
+widowhood/S
+widow/MRDSGZ
+width/M
+widths
+widthwise
+Wieland/M
+wielder/M
+wield/GZRDS
+Wiemar/M
+wiener/SM
+wienie/SM
+Wier/M
+Wiesel/M
+wife/DSMYG
+wifeless
+wifely/RPT
+wigeon/MS
+wigged
+wigging/M
+Wiggins
+wiggler/M
+wiggle/RSDGZ
+wiggly/RT
+wight/SGDM
+wiglet/S
+wigmaker
+wig/MS
+Wigner/M
+wigwagged
+wigwagging
+wigwag/S
+wigwam/MS
+Wilberforce/M
+Wilbert/M
+Wilbur/M
+Wilburn/M
+Wilburt/M
+Wilcox/M
+Wilda/M
+wildcat/SM
+wildcatted
+wildcatter/MS
+wildcatting
+wildebeest/SM
+Wilde/MR
+Wilden/M
+Wilder/M
+wilderness/SM
+wilder/P
+wildfire/MS
+wildflower/S
+wildfowl/M
+wilding/M
+wildlife/M
+wildness/MS
+Wildon/M
+wild/SPGTYRD
+wile/DSMG
+Wileen/M
+Wilek/M
+Wiley/M
+Wilford/M
+Wilfred/M
+Wilfredo/M
+Wilfrid/M
+wilfulness's
+Wilhelmina/M
+Wilhelmine/M
+Wilhelm/M
+Wilie/M
+wilily
+wiliness/MS
+Wilkerson/M
+Wilkes/M
+Wilkins/M
+Wilkinson/M
+Willabella/M
+Willa/M
+Willamette/M
+Willamina/M
+Willard/M
+Willcox/M
+Willdon/M
+willed/U
+Willem/M
+Willemstad/M
+willer/M
+Willetta/M
+Willette/M
+Willey/M
+willfulness/S
+willful/YP
+Williamsburg/M
+William/SM
+Williamson/M
+Willied/M
+Willie/M
+willies
+Willi/MS
+willinger
+willingest
+willingness's
+willingness/US
+willing/UYP
+Willisson/M
+williwaw/MS
+Will/M
+Willoughby/M
+willower/M
+Willow/M
+willow/RDMSG
+willowy/TR
+willpower/MS
+will/SGJRD
+Willy/SDM
+Willyt/M
+Wilma/M
+Wilmar/M
+Wilmer/M
+Wilmette/M
+Wilmington/M
+Wilona/M
+Wilone/M
+Wilow/M
+Wilshire/M
+Wilsonian
+Wilson/M
+wilt/DGS
+Wilt/M
+Wilton/M
+wily/PTR
+Wimbledon/M
+wimp/GSMD
+wimpish
+wimple/SDGM
+wimpy/RT
+wince/SDG
+Winchell/M
+wincher/M
+winchester/M
+Winchester/MS
+winch/GRSDM
+windbag/SM
+windblown
+windbreak/MZSR
+windburn/GSMD
+winded
+winder/UM
+windfall/SM
+windflower/MS
+Windham/M
+Windhoek/M
+windily
+windiness/SM
+winding/MS
+windjammer/SM
+windlass/GMSD
+windless/YP
+windmill/GDMS
+window/DMGS
+windowless
+windowpane/SM
+Windows
+windowsill/SM
+windpipe/SM
+windproof
+windrow/GDMS
+wind's
+winds/A
+windscreen/MS
+windshield/SM
+windsock/MS
+Windsor/MS
+windstorm/MS
+windsurf/GZJSRD
+windswept
+windup/MS
+wind/USRZG
+Windward/M
+windward/SY
+Windy/M
+windy/TPR
+wineglass/SM
+winegrower/SM
+Winehead/M
+winemake
+winemaster
+wine/MS
+winery/MS
+Winesap/M
+wineskin/M
+Winfield/M
+Winfred/M
+Winfrey/M
+wingback/M
+wingding/MS
+wingeing
+winger/M
+wing/GZRDM
+wingless
+winglike
+wingman
+wingmen
+wingspan/SM
+wingspread/MS
+wingtip/S
+Winifield/M
+Winifred/M
+Wini/M
+winker/M
+wink/GZRDS
+winking/U
+Winkle/M
+winkle/SDGM
+winless
+Win/M
+winnable
+Winnah/M
+Winna/M
+Winnebago/M
+Winne/M
+winner/MS
+Winnetka/M
+Winnie/M
+Winnifred/M
+Winni/M
+winning/SY
+Winnipeg/M
+Winn/M
+winnow/SZGRD
+Winny/M
+Winograd/M
+wino/MS
+Winonah/M
+Winona/M
+Winooski/M
+Winsborough/M
+Winsett/M
+Winslow/M
+winsomeness/SM
+winsome/PRTY
+Winston/M
+winterer/M
+wintergreen/SM
+winterize/GSD
+Winters
+winter/SGRDYM
+wintertime/MS
+Winthrop/M
+wintriness/M
+wintry/TPR
+winy/RT
+win/ZGDRS
+wipe/DRSZG
+wiper/M
+wirehair/MS
+wireless/MSDG
+wireman/M
+wiremen
+wirer/M
+wire's
+wires/A
+wiretap/MS
+wiretapped
+wiretapper/SM
+wiretapping
+wire/UDA
+wiriness/S
+wiring/SM
+wiry/RTP
+Wisc
+Wisconsinite/SM
+Wisconsin/M
+wisdoms
+wisdom/UM
+wiseacre/MS
+wisecrack/GMRDS
+wised
+wisely/TR
+Wise/M
+wiseness
+wisenheimer/M
+Wisenheimer/M
+wises
+wise/URTY
+wishbone/MS
+wishfulness/M
+wishful/PY
+wish/GZSRD
+wishy
+wising
+Wis/M
+wisp/MDGS
+wispy/RT
+wist/DGS
+wisteria/SM
+wistfulness/MS
+wistful/PY
+witchcraft/SM
+witchdoctor/S
+witchery/MS
+witch/SDMG
+withal
+withdrawal/MS
+withdrawer/M
+withdrawnness/M
+withdrawn/P
+withdraw/RGS
+withdrew
+withe/M
+wither/GDJ
+withering/Y
+Witherspoon/M
+with/GSRDZ
+withheld
+withholder/M
+withhold/SJGZR
+within/S
+without/S
+withs
+withstand/SG
+withstood
+witlessness/MS
+witless/PY
+Wit/M
+witness/DSMG
+witnessed/U
+wit/PSM
+witted
+witter/G
+Wittgenstein/M
+witticism/MS
+Wittie/M
+wittily
+wittiness/SM
+wittings
+witting/UY
+Witt/M
+Witty/M
+witty/RTP
+Witwatersrand/M
+wive/GDS
+wives/M
+wizard/MYS
+wizardry/MS
+wizen/D
+wiz's
+wk/Y
+Wm/M
+WNW
+woad/MS
+wobble/GSRD
+wobbler/M
+wobbliness/S
+wobbly/PRST
+Wodehouse/M
+woebegone/P
+woefuller
+woefullest
+woefulness/SM
+woeful/PY
+woe/PSM
+woke
+wok/SMN
+Wolcott/M
+wold/MS
+Wolfe/M
+wolfer/M
+Wolff/M
+Wolfgang/M
+wolfhound/MS
+Wolfie/M
+wolfishness/M
+wolfish/YP
+Wolf/M
+wolfram/MS
+wolf/RDMGS
+Wolfy/M
+Wollongong/M
+Wollstonecraft/M
+Wolsey/M
+Wolverhampton/M
+wolverine/SM
+Wolverton/M
+wolves/M
+woman/GSMYD
+womanhood/MS
+womanish
+womanized/U
+womanizer/M
+womanize/RSDZG
+womanizes/U
+womankind/M
+womanlike
+womanliness/SM
+womanly/PRT
+wombat/MS
+womb/SDM
+womenfolk/MS
+women/MS
+wonderer/M
+wonderfulness/SM
+wonderful/PY
+wonder/GLRDMS
+wondering/Y
+wonderland/SM
+wonderment/SM
+wondrousness/M
+wondrous/YP
+Wong/M
+wonk/S
+wonky/RT
+wonned
+wonning
+won/SG
+won't
+wontedness/MU
+wonted/PUY
+wont/SGMD
+Woodard/M
+Woodberry/M
+woodbine/SM
+woodblock/S
+Woodbury/M
+woodcarver/S
+woodcarving/MS
+woodchopper/SM
+woodchuck/MS
+woodcock/MS
+woodcraft/MS
+woodcut/SM
+woodcutter/MS
+woodcutting/MS
+woodenness/SM
+wooden/TPRY
+woodgrain/G
+woodhen
+Woodhull/M
+Woodie/M
+woodiness/MS
+woodland/SRM
+Woodlawn/M
+woodlice
+woodlot/S
+woodlouse/M
+woodman/M
+Woodman/M
+woodmen
+woodpecker/SM
+woodpile/SM
+Woodrow/M
+woodruff/M
+woo/DRZGS
+woodshedded
+woodshedding
+woodshed/SM
+woodside
+Wood/SM
+woodsman/M
+woodsmen
+wood/SMNDG
+woodsmoke
+woods/R
+Woodstock/M
+woodsy/TRP
+Woodward/MS
+woodwind/S
+woodworker/M
+woodworking/M
+woodwork/SMRGZJ
+woodworm/M
+woodyard
+Woody/M
+woody/TPSR
+woofer/M
+woof/SRDMGZ
+Woolf/M
+woolgatherer/M
+woolgathering/M
+woolgather/RGJ
+woolliness/MS
+woolly/RSPT
+Woolongong/M
+wool/SMYNDX
+Woolworth/M
+Woonsocket/M
+Wooster/M
+Wooten/M
+woozily
+wooziness/MS
+woozy/RTP
+wop/MS!
+Worcestershire/M
+Worcester/SM
+wordage/SM
+word/AGSJD
+wordbook/MS
+Worden/M
+wordily
+wordiness/SM
+wording/AM
+wordless/Y
+wordplay/SM
+word's
+Wordsworth/M
+wordy/TPR
+wore
+workability's
+workability/U
+workableness/M
+workable/U
+workably
+workaday
+workaholic/S
+workaround/SM
+workbench/MS
+workbook/SM
+workday/SM
+worked/A
+worker/M
+workfare/S
+workforce/S
+work/GZJSRDMB
+workhorse/MS
+workhouse/SM
+working/M
+workingman/M
+workingmen
+workingwoman/M
+workingwomen
+workload/SM
+workmanlike
+Workman/M
+workman/MY
+workmanship/MS
+workmate/S
+workmen/M
+workout/SM
+workpiece/SM
+workplace/SM
+workroom/MS
+works/A
+worksheet/S
+workshop/MS
+workspace/S
+workstation/MS
+worktable/SM
+worktop/S
+workup/S
+workweek/SM
+worldlier
+worldliest
+worldliness/USM
+worldly/UP
+worldwide
+world/ZSYM
+wormer/M
+wormhole/SM
+worm/SGMRD
+Worms/M
+wormwood/SM
+wormy/RT
+worn/U
+worried/Y
+worrier/M
+worriment/MS
+worrisome/YP
+worrying/Y
+worrywart/SM
+worry/ZGSRD
+worsen/GSD
+worse/SR
+worshiper/M
+worshipfulness/M
+worshipful/YP
+worship/ZDRGS
+worsted/MS
+worst/SGD
+worth/DG
+worthily/U
+worthinesses/U
+worthiness/SM
+Worthington/M
+worthlessness/SM
+worthless/PY
+Worth/M
+worths
+worthwhile/P
+Worthy/M
+worthy/UTSRP
+wort/SM
+wost
+wot
+Wotan/M
+wouldn't
+would/S
+wouldst
+would've
+wound/AU
+wounded/U
+wounder
+wounding
+wounds
+wound's
+wove/A
+woven/AU
+wovens
+wow/SDG
+Wozniak/M
+WP
+wpm
+wrack/SGMD
+wraith/M
+wraiths
+Wrangell/M
+wrangle/GZDRS
+wrangler/M
+wraparound/S
+wrap/MS
+wrapped/U
+wrapper/MS
+wrapping/SM
+wraps/U
+wrasse/SM
+wrathful/YP
+wrath/GDM
+wraths
+wreak/SDG
+wreathe
+wreath/GMDS
+wreaths
+wreckage/MS
+wrecker/M
+wreck/GZRDS
+wrenching/Y
+wrench/MDSG
+wren/MS
+Wren/MS
+Wrennie/M
+wrester/M
+wrestle/JGZDRS
+wrestler/M
+wrestling/M
+wrest/SRDG
+wretchedness/SM
+wretched/TPYR
+wretch/MDS
+wriggle/DRSGZ
+wriggler/M
+wriggly/RT
+Wright/M
+wright/MS
+Wrigley/M
+wringer/M
+wring/GZRS
+wrinkled/U
+wrinkle/GMDS
+wrinkly/RST
+wristband/SM
+wrist/MS
+wristwatch/MS
+writable/U
+write/ASBRJG
+writer/MA
+writeup
+writhe/SDG
+writing/M
+writ/MRSBJGZ
+written/UA
+Wroclaw
+wrongdoer/MS
+wrongdoing/MS
+wronger/M
+wrongfulness/MS
+wrongful/PY
+wrongheadedness/MS
+wrongheaded/PY
+wrongness/MS
+wrong/PSGTYRD
+Wronskian/M
+wrote/A
+wroth
+wrought/I
+wrung
+wry/DSGY
+wryer
+wryest
+wryness/SM
+W's
+WSW
+wt
+W/T
+Wuhan/M
+Wu/M
+Wurlitzer/M
+wurst/SM
+wuss/S
+wussy/TRS
+WV
+WW
+WWI
+WWII
+WWW
+w/XTJGV
+WY
+Wyatan/M
+Wyatt/M
+Wycherley/M
+Wycliffe/M
+Wye/MH
+Wyeth/M
+Wylie/M
+Wylma/M
+Wyman/M
+Wyndham/M
+Wyn/M
+Wynne/M
+Wynnie/M
+Wynn/M
+Wynny/M
+Wyo/M
+Wyomingite/SM
+Wyoming/M
+WYSIWYG
+x
+X
+Xanadu
+Xanthippe/M
+Xanthus/M
+Xaviera/M
+Xavier/M
+Xebec/M
+Xe/M
+XEmacs/M
+Xenakis/M
+Xena/M
+Xenia/M
+Xenix/M
+xenon/SM
+xenophobe/MS
+xenophobia/SM
+xenophobic
+Xenophon/M
+Xenos
+xerographic
+xerography/MS
+xerox/GSD
+Xerox/MGSD
+Xerxes/M
+Xever/M
+Xhosa/M
+Xi'an
+Xian/S
+Xiaoping/M
+xii
+xiii
+xi/M
+Ximenes/M
+Ximenez/M
+Ximian/SM
+Xingu/M
+xis
+xiv
+xix
+XL
+Xmas/SM
+XML
+Xochipilli/M
+XOR
+X's
+XS
+xterm/M
+Xuzhou/M
+xv
+xvi
+xvii
+xviii
+xx
+XXL
+xylem/SM
+xylene/M
+Xylia/M
+Xylina/M
+xylophone/MS
+xylophonist/S
+Xymenes/M
+Y
+ya
+yacc/M
+Yacc/M
+yachting/M
+yachtsman
+yachtsmen
+yachtswoman/M
+yachtswomen
+yacht/ZGJSDM
+yack's
+Yagi/M
+yahoo/MS
+Yahweh/M
+Yakima/M
+yakked
+yakking
+yak/SM
+Yakut/M
+Yakutsk/M
+Yale/M
+Yalies/M
+y'all
+Yalonda/M
+Yalow/M
+Yalta/M
+Yalu/M
+Yamaha/M
+yammer/RDZGS
+Yamoussoukro
+yam/SM
+Yanaton/M
+Yance/M
+Yancey/M
+Yancy/M
+Yang/M
+Yangon
+yang/S
+Yangtze/M
+Yankee/SM
+yank/GDS
+Yank/MS
+Yaounde/M
+yapped
+yapping
+yap/S
+Yaqui/M
+yardage/SM
+yardarm/SM
+Yardley/M
+Yard/M
+yardman/M
+yardmaster/S
+yardmen
+yard/SMDG
+yardstick/SM
+yarmulke/SM
+yarn/SGDM
+Yaroslavl/M
+yarrow/MS
+Yasmeen/M
+Yasmin/M
+Yates
+yaw/DSG
+yawl/SGMD
+yawner/M
+yawn/GZSDR
+yawning/Y
+Yb/M
+yd
+Yeager/M
+yeah
+yeahs
+yearbook/SM
+yearling/M
+yearlong
+yearly/S
+yearner/M
+yearning/MY
+yearn/JSGRD
+year/YMS
+yea/S
+yeastiness/M
+yeast/SGDM
+yeasty/PTR
+Yeats/M
+yecch
+yegg/MS
+Yehudi/M
+Yehudit/M
+Yekaterinburg/M
+Yelena/M
+yell/GSDR
+yellowhammers
+yellowish
+Yellowknife/M
+yellowness/MS
+Yellowstone/M
+yellow/TGPSRDM
+yellowy
+yelper/M
+yelp/GSDR
+Yeltsin
+Yemeni/S
+Yemenite/SM
+Yemen/M
+Yenisei/M
+yenned
+yenning
+yen/SM
+Yentl/M
+yeomanry/MS
+yeoman/YM
+yeomen
+yep/S
+Yerevan/M
+Yerkes/M
+Yesenia/M
+yeshiva/SM
+yes/S
+yessed
+yessing
+yesterday/MS
+yesteryear/SM
+yet
+ye/T
+yeti/SM
+Yetta/M
+Yettie/M
+Yetty/M
+Yevette/M
+Yevtushenko/M
+yew/SM
+y/F
+Yggdrasil/M
+Yiddish/M
+yielded/U
+yielding/U
+yield/JGRDS
+yikes
+yin/S
+yipe/S
+yipped
+yippee/S
+yipping
+yip/S
+YMCA
+YMHA
+Ymir/M
+YMMV
+Ynes/M
+Ynez/M
+yo
+Yoda/M
+yodeler/M
+yodel/SZRDG
+Yoder/M
+yoga/MS
+yoghurt's
+yogi/MS
+yogurt/SM
+yoke/DSMG
+yoked/U
+yokel/SM
+yokes/U
+yoking/U
+Yoknapatawpha/M
+Yokohama/M
+Yoko/M
+Yolanda/M
+Yolande/M
+Yolane/M
+Yolanthe/M
+yolk/DMS
+yon
+yonder
+Yong/M
+Yonkers/M
+yore/MS
+Yorgo/MS
+Yorick/M
+Yorke/M
+Yorker/M
+yorker/SM
+Yorkshire/MS
+Yorktown/M
+York/ZRMS
+Yoruba/M
+Yosemite/M
+Yoshiko/M
+Yoshi/M
+Yost/M
+you'd
+you'll
+youngish
+Young/M
+youngster/MS
+Youngstown/M
+young/TRYP
+you're
+your/MS
+yourself
+yourselves
+you/SH
+youthfulness/SM
+youthful/YP
+youths
+youth/SM
+you've
+Yovonnda/M
+yow
+yowl/GSD
+Ypres/M
+Ypsilanti/M
+yr
+yrs
+Y's
+Ysabel/M
+YT
+ytterbium/MS
+yttrium/SM
+yuan/M
+Yuba/M
+Yucatan
+yucca/MS
+yuck/GSD
+yucky/RT
+Yugo/M
+Yugoslavia/M
+Yugoslavian/S
+Yugoslav/M
+Yuh/M
+Yuki/M
+yukked
+yukking
+Yukon/M
+yuk/S
+yule/MS
+Yule/MS
+yuletide/MS
+Yuletide/S
+Yul/M
+Yulma/M
+yum
+Yuma/M
+yummy/TRS
+Yunnan/M
+yuppie/SM
+yup/S
+Yurik/M
+Yuri/M
+yurt/SM
+Yves/M
+Yvette/M
+Yvon/M
+Yvonne/M
+Yvor/M
+YWCA
+YWHA
+Zabrina/M
+Zaccaria/M
+Zachariah/M
+Zacharia/SM
+Zacharie/M
+Zachary/M
+Zacherie/M
+Zachery/M
+Zach/M
+Zackariah/M
+Zack/M
+zagging
+Zagreb/M
+zag/S
+Zahara/M
+Zaire/M
+Zairian/S
+Zak/M
+Zambezi/M
+Zambia/M
+Zambian/S
+Zamboni
+Zamenhof/M
+Zamora/M
+Zandra/M
+Zane/M
+Zaneta/M
+zaniness/MS
+Zan/M
+Zanuck/M
+zany/PDSRTG
+Zanzibar/M
+Zapata/M
+Zaporozhye/M
+Zappa/M
+zapped
+zapper/S
+zapping
+zap/S
+Zarah/M
+Zara/M
+Zared/M
+Zaria/M
+Zarla/M
+Zealand/M
+zeal/MS
+zealot/MS
+zealotry/MS
+zealousness/SM
+zealous/YP
+Zea/M
+Zebadiah/M
+Zebedee/M
+Zeb/M
+zebra/MS
+Zebulen/M
+Zebulon/M
+zebu/SM
+Zechariah/M
+Zedekiah/M
+Zed/M
+Zedong/M
+zed/SM
+Zeffirelli/M
+Zeiss/M
+zeitgeist/S
+Zeke/M
+Zelda/M
+Zelig/M
+Zellerbach/M
+Zelma/M
+Zena/M
+Zenger/M
+Zenia/M
+zenith/M
+zeniths
+Zen/M
+Zennist/M
+Zeno/M
+Zephaniah/M
+zephyr/MS
+Zephyrus/M
+Zeppelin's
+zeppelin/SM
+Zerk/M
+zeroed/M
+zeroing/M
+zero/SDHMG
+zestfulness/MS
+zestful/YP
+zest/MDSG
+zesty/RT
+zeta/SM
+zeugma/M
+Zeus/M
+Zhdanov/M
+Zhengzhou
+Zhivago/M
+Zhukov/M
+Zia/M
+Zibo/M
+Ziegfeld/MS
+Ziegler/M
+zig
+zigged
+zigging
+Ziggy/M
+zigzagged
+zigzagger
+zigzagging
+zigzag/MS
+zilch/S
+zillion/MS
+Zilvia/M
+Zimbabwean/S
+Zimbabwe/M
+Zimmerman/M
+zincked
+zincking
+zinc/MS
+zing/GZDRM
+zingy/RT
+zinnia/SM
+Zionism/MS
+Zionist/MS
+Zion/SM
+zip/MS
+zipped/U
+zipper/GSDM
+zipping/U
+zippy/RT
+zips/U
+zirconium/MS
+zircon/SM
+Zita/M
+Zitella/M
+zither/SM
+zit/S
+zloty/SM
+Zn/M
+zodiacal
+zodiac/SM
+Zoe/M
+Zola/M
+Zollie/M
+Zolly/M
+Zomba/M
+zombie/SM
+zombi's
+zonal/Y
+Zonda/M
+Zondra/M
+zoned/A
+zone/MYDSRJG
+zones/A
+zoning/A
+zonked
+Zonnya/M
+zookeepers
+zoological/Y
+zoologist/SM
+zoology/MS
+zoom/DGS
+zoophyte/SM
+zoophytic
+zoo/SM
+Zorah/M
+Zora/M
+Zorana/M
+Zorina/M
+Zorine/M
+Zorn/M
+Zoroaster/M
+Zoroastrianism/MS
+Zoroastrian/S
+Zorro/M
+Zosma/M
+zounds/S
+Zr/M
+Zs
+Zsazsa/M
+Zsigmondy/M
+z/TGJ
+Zubenelgenubi/M
+Zubeneschamali/M
+zucchini/SM
+Zukor/M
+Zulema/M
+Zululand/M
+Zulu/MS
+Zuni/S
+Zürich/M
+Zuzana/M
+zwieback/MS
+Zwingli/M
+Zworykin/M
+Z/X
+zydeco/S
+zygote/SM
+zygotic
+zymurgy/S
diff --git a/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml
new file mode 100644
index 0000000000..1a91653e56
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US_custom/settings.yml
@@ -0,0 +1,2 @@
+ignore_case: true
+strict_affix_parsing: true \ No newline at end of file
diff --git a/core/src/test/resources/indices/analyze/no_aff_conf_dir/hunspell/en_US/en_US.dic b/core/src/test/resources/indices/analyze/no_aff_conf_dir/hunspell/en_US/en_US.dic
new file mode 100755
index 0000000000..4f69807a28
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/no_aff_conf_dir/hunspell/en_US/en_US.dic
@@ -0,0 +1,62120 @@
+62118
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
+a
+A
+AA
+AAA
+Aachen/M
+aardvark/SM
+Aaren/M
+Aarhus/M
+Aarika/M
+Aaron/M
+AB
+aback
+abacus/SM
+abaft
+Abagael/M
+Abagail/M
+abalone/SM
+abandoner/M
+abandon/LGDRS
+abandonment/SM
+abase/LGDSR
+abasement/S
+abaser/M
+abashed/UY
+abashment/MS
+abash/SDLG
+abate/DSRLG
+abated/U
+abatement/MS
+abater/M
+abattoir/SM
+Abba/M
+Abbe/M
+abbé/S
+abbess/SM
+Abbey/M
+abbey/MS
+Abbie/M
+Abbi/M
+Abbot/M
+abbot/MS
+Abbott/M
+abbr
+abbrev
+abbreviated/UA
+abbreviates/A
+abbreviate/XDSNG
+abbreviating/A
+abbreviation/M
+Abbye/M
+Abby/M
+ABC/M
+Abdel/M
+abdicate/NGDSX
+abdication/M
+abdomen/SM
+abdominal/YS
+abduct/DGS
+abduction/SM
+abductor/SM
+Abdul/M
+ab/DY
+abeam
+Abelard/M
+Abel/M
+Abelson/M
+Abe/M
+Aberdeen/M
+Abernathy/M
+aberrant/YS
+aberrational
+aberration/SM
+abet/S
+abetted
+abetting
+abettor/SM
+Abeu/M
+abeyance/MS
+abeyant
+Abey/M
+abhorred
+abhorrence/MS
+abhorrent/Y
+abhorrer/M
+abhorring
+abhor/S
+abidance/MS
+abide/JGSR
+abider/M
+abiding/Y
+Abidjan/M
+Abie/M
+Abigael/M
+Abigail/M
+Abigale/M
+Abilene/M
+ability/IMES
+abjection/MS
+abjectness/SM
+abject/SGPDY
+abjuration/SM
+abjuratory
+abjurer/M
+abjure/ZGSRD
+ablate/VGNSDX
+ablation/M
+ablative/SY
+ablaze
+abler/E
+ables/E
+ablest
+able/U
+abloom
+ablution/MS
+Ab/M
+ABM/S
+abnegate/NGSDX
+abnegation/M
+Abner/M
+abnormality/SM
+abnormal/SY
+aboard
+abode/GMDS
+abolisher/M
+abolish/LZRSDG
+abolishment/MS
+abolitionism/SM
+abolitionist/SM
+abolition/SM
+abominable
+abominably
+abominate/XSDGN
+abomination/M
+aboriginal/YS
+aborigine/SM
+Aborigine/SM
+aborning
+abortionist/MS
+abortion/MS
+abortiveness/M
+abortive/PY
+abort/SRDVG
+Abo/SM!
+abound/GDS
+about/S
+aboveboard
+aboveground
+above/S
+abracadabra/S
+abrader/M
+abrade/SRDG
+Abraham/M
+Abrahan/M
+Abra/M
+Abramo/M
+Abram/SM
+Abramson/M
+Abran/M
+abrasion/MS
+abrasiveness/S
+abrasive/SYMP
+abreaction/MS
+abreast
+abridge/DSRG
+abridged/U
+abridger/M
+abridgment/SM
+abroad
+abrogate/XDSNG
+abrogation/M
+abrogator/SM
+abruptness/SM
+abrupt/TRYP
+ABS
+abscess/GDSM
+abscissa/SM
+abscission/SM
+absconder/M
+abscond/SDRZG
+abseil/SGDR
+absence/SM
+absenteeism/SM
+absentee/MS
+absentia/M
+absentmindedness/S
+absentminded/PY
+absent/SGDRY
+absinthe/SM
+abs/M
+absoluteness/SM
+absolute/NPRSYTX
+absolution/M
+absolutism/MS
+absolutist/SM
+absolve/GDSR
+absolver/M
+absorb/ASGD
+absorbed/U
+absorbency/MS
+absorbent/MS
+absorber/SM
+absorbing/Y
+absorption/MS
+absorptive
+absorptivity/M
+abstainer/M
+abstain/GSDRZ
+abstemiousness/MS
+abstemious/YP
+abstention/SM
+abstinence/MS
+abstinent/Y
+abstractedness/SM
+abstracted/YP
+abstracter/M
+abstractionism/M
+abstractionist/SM
+abstraction/SM
+abstractness/SM
+abstractor/MS
+abstract/PTVGRDYS
+abstruseness/SM
+abstruse/PRYT
+absurdity/SM
+absurdness/SM
+absurd/PRYST
+Abuja
+abundance/SM
+abundant/Y
+abused/E
+abuse/GVZDSRB
+abuser/M
+abuses/E
+abusing/E
+abusiveness/SM
+abusive/YP
+abut/LS
+abutment/SM
+abutted
+abutter/MS
+abutting
+abuzz
+abysmal/Y
+abyssal
+Abyssinia/M
+Abyssinian
+abyss/SM
+AC
+acacia/SM
+academe/MS
+academia/SM
+academical/Y
+academicianship
+academician/SM
+academic/S
+academy/SM
+Acadia/M
+acanthus/MS
+Acapulco/M
+accede/SDG
+accelerated/U
+accelerate/NGSDXV
+accelerating/Y
+acceleration/M
+accelerator/SM
+accelerometer/SM
+accented/U
+accent/SGMD
+accentual/Y
+accentuate/XNGSD
+accentuation/M
+acceptability/SM
+acceptability's/U
+acceptableness/SM
+acceptable/P
+acceptably/U
+acceptance/SM
+acceptant
+acceptation/SM
+accepted/Y
+accepter/M
+accepting/PY
+acceptor/MS
+accept/RDBSZVG
+accessed/A
+accessibility/IMS
+accessible/IU
+accessibly/I
+accession/SMDG
+accessors
+accessory/SM
+access/SDMG
+accidence/M
+accidentalness/M
+accidental/SPY
+accident/MS
+acclaimer/M
+acclaim/SDRG
+acclamation/MS
+acclimate/XSDGN
+acclimation/M
+acclimatisation
+acclimatise/DG
+acclimatization/AMS
+acclimatized/U
+acclimatize/RSDGZ
+acclimatizes/A
+acclivity/SM
+accolade/GDSM
+accommodated/U
+accommodate/XVNGSD
+accommodating/Y
+accommodation/M
+accommodativeness/M
+accommodative/P
+accompanied/U
+accompanier/M
+accompaniment/MS
+accompanist/SM
+accompany/DRSG
+accomplice/MS
+accomplished/U
+accomplisher/M
+accomplishment/SM
+accomplish/SRDLZG
+accordance/SM
+accordant/Y
+accorder/M
+according/Y
+accordionist/SM
+accordion/MS
+accord/SZGMRD
+accost/SGD
+accountability/MS
+accountability's/U
+accountableness/M
+accountable/U
+accountably/U
+accountancy/SM
+accountant/MS
+account/BMDSGJ
+accounted/U
+accounting/M
+accouter/GSD
+accouterments
+accouterment's
+accoutrement/M
+Accra/M
+accreditation/SM
+accredited/U
+accredit/SGD
+accretion/SM
+accrual/MS
+accrue/SDG
+acct
+acculturate/XSDVNG
+acculturation/M
+accumulate/VNGSDX
+accumulation/M
+accumulativeness/M
+accumulative/YP
+accumulator/MS
+accuracy/IMS
+accurate/IY
+accurateness/SM
+accursedness/SM
+accursed/YP
+accusal/M
+accusation/SM
+accusative/S
+accusatory
+accused/M
+accuser/M
+accuse/SRDZG
+accusing/Y
+accustomedness/M
+accustomed/P
+accustom/SGD
+ac/DRG
+aced/M
+acerbate/DSG
+acerbic
+acerbically
+acerbity/MS
+ace/SM
+acetaminophen/S
+acetate/MS
+acetic
+acetone/SM
+acetonic
+acetylene/MS
+Acevedo/M
+Achaean/M
+Achebe/M
+ached/A
+ache/DSG
+achene/SM
+Achernar/M
+aches/A
+Acheson/M
+achievable/U
+achieved/UA
+achieve/LZGRSDB
+achievement/SM
+achiever/M
+Achilles
+aching/Y
+achoo
+achromatic
+achy/TR
+acidic
+acidification/M
+acidify/NSDG
+acidity/SM
+acidness/M
+acidoses
+acidosis/M
+acid/SMYP
+acidulous
+acing/M
+Ackerman/M
+acknowledgeable
+acknowledgedly
+acknowledged/U
+acknowledge/GZDRS
+acknowledger/M
+acknowledgment/SAM
+ACLU
+Ac/M
+ACM
+acme/SM
+acne/MDS
+acolyte/MS
+Aconcagua/M
+aconite/MS
+acorn/SM
+Acosta/M
+acoustical/Y
+acoustician/M
+acoustic/S
+acoustics/M
+acquaintance/MS
+acquaintanceship/S
+acquainted/U
+acquaint/GASD
+acquiesce/GSD
+acquiescence/SM
+acquiescent/Y
+acquirable
+acquire/ASDG
+acquirement/SM
+acquisition's/A
+acquisition/SM
+acquisitiveness/MS
+acquisitive/PY
+acquit/S
+acquittal/MS
+acquittance/M
+acquitted
+acquitter/M
+acquitting
+acreage/MS
+acre/MS
+acridity/MS
+acridness/SM
+acrid/TPRY
+acrimoniousness/MS
+acrimonious/YP
+acrimony/MS
+acrobatically
+acrobatic/S
+acrobatics/M
+acrobat/SM
+acronym/SM
+acrophobia/SM
+Acropolis/M
+acropolis/SM
+across
+acrostic/SM
+Acrux/M
+acrylate/M
+acrylic/S
+ACT
+Actaeon/M
+Acta/M
+ACTH
+acting/S
+actinic
+actinide/SM
+actinium/MS
+actinometer/MS
+action/DMSGB
+actions/AI
+action's/IA
+activate/AXCDSNGI
+activated/U
+activation/AMCI
+activator/SM
+active/APY
+actively/I
+activeness/MS
+actives
+activism/MS
+activist/MS
+activities/A
+activity/MSI
+Acton/M
+actor/MAS
+actress/SM
+act's
+Acts
+act/SADVG
+actuality/SM
+actualization/MAS
+actualize/GSD
+actualizes/A
+actual/SY
+actuarial/Y
+actuary/MS
+actuate/GNXSD
+actuation/M
+actuator/SM
+acuity/MS
+acumen/SM
+acupressure/S
+acupuncture/SM
+acupuncturist/S
+acuteness/MS
+acute/YTSRP
+acyclic
+acyclically
+acyclovir/S
+AD
+adage/MS
+adagio/S
+Adah/M
+Adair/M
+Adaline/M
+Ada/M
+adamant/SY
+Adamo/M
+Adam/SM
+Adamson/M
+Adana/M
+Adan/M
+adaptability/MS
+adaptable/U
+adaptation/MS
+adaptedness/M
+adapted/P
+adapter/M
+adapting/A
+adaption
+adaptively
+adaptiveness/M
+adaptive/U
+adaptivity
+adapt/SRDBZVG
+Adara/M
+ad/AS
+ADC
+Adda/M
+Addams
+addenda
+addend/SM
+addendum/M
+adder/M
+Addia/M
+addiction/MS
+addictive/P
+addict/SGVD
+Addie/M
+Addi/M
+Addison/M
+additional/Y
+addition/MS
+additive/YMS
+additivity
+addle/GDS
+addressability
+addressable/U
+addressed/A
+addressee/SM
+addresser/M
+addresses/A
+address/MDRSZGB
+Addressograph/M
+adduce/GRSD
+adducer/M
+adduct/DGVS
+adduction/M
+adductor/M
+Addy/M
+add/ZGBSDR
+Adelaida/M
+Adelaide/M
+Adela/M
+Adelbert/M
+Adele/M
+Adelheid/M
+Adelice/M
+Adelina/M
+Adelind/M
+Adeline/M
+Adella/M
+Adelle/M
+Adel/M
+Ade/M
+Adena/M
+Adenauer/M
+adenine/SM
+Aden/M
+adenoidal
+adenoid/S
+adeptness/MS
+adept/RYPTS
+adequacy/IMS
+adequate/IPY
+adequateness's/I
+adequateness/SM
+Adey/M
+Adham/M
+Adhara/M
+adherence/SM
+adherent/YMS
+adherer/M
+adhere/ZGRSD
+adhesion/MS
+adhesiveness/MS
+adhesive/PYMS
+adiabatic
+adiabatically
+Adiana/M
+Adidas/M
+adieu/S
+Adi/M
+Adina/M
+adiós
+adipose/S
+Adirondack/SM
+adj
+adjacency/MS
+adjacent/Y
+adjectival/Y
+adjective/MYS
+adjoin/SDG
+adjoint/M
+adjourn/DGLS
+adjournment/SM
+adjudge/DSG
+adjudicate/VNGXSD
+adjudication/M
+adjudicator/SM
+adjudicatory
+adjunct/VSYM
+adjuration/SM
+adjure/GSD
+adjustable/U
+adjustably
+adjust/DRALGSB
+adjusted/U
+adjuster's/A
+adjuster/SM
+adjustive
+adjustment/MAS
+adjustor's
+adjutant/SM
+Adkins/M
+Adlai/M
+Adler/M
+adman/M
+admen
+administer/GDJS
+administrable
+administrate/XSDVNG
+administration/M
+administrative/Y
+administrator/MS
+administratrix/M
+admirableness/M
+admirable/P
+admirably
+admiral/SM
+admiralty/MS
+Admiralty/S
+admiration/MS
+admirer/M
+admire/RSDZBG
+admiring/Y
+admissibility/ISM
+admissible/I
+admissibly
+admission/AMS
+admit/AS
+admittance/MS
+admitted/A
+admittedly
+admitting/A
+admix/SDG
+admixture/SM
+Adm/M
+Ad/MN
+admonisher/M
+admonish/GLSRD
+admonishing/Y
+admonishment/SM
+admonition/MS
+admonitory
+adobe/MS
+adolescence/MS
+adolescent/SYM
+Adolf/M
+Adolfo/M
+Adolphe/M
+Adolph/M
+Adolpho/M
+Adolphus/M
+Ado/M
+ado/MS
+Adonis/SM
+adopted/AU
+adopter/M
+adoption/MS
+adoptive/Y
+adopt/RDSBZVG
+adopts/A
+adorableness/SM
+adorable/P
+adorably
+Adora/M
+adoration/SM
+adore/DSRGZB
+Adoree/M
+Adore/M
+adorer/M
+adoring/Y
+adorned/U
+Adorne/M
+adornment/SM
+adorn/SGLD
+ADP
+Adrea/M
+adrenalin
+adrenaline/MS
+Adrenalin/MS
+adrenal/YS
+Adria/MX
+Adriana/M
+Adriane/M
+Adrian/M
+Adrianna/M
+Adrianne/M
+Adriano/M
+Adriatic
+Adriena/M
+Adrien/M
+Adrienne/M
+adrift
+adroitness/MS
+adroit/RTYP
+ads
+ad's
+adsorbate/M
+adsorbent/S
+adsorb/GSD
+adsorption/MS
+adsorptive/Y
+adulate/GNDSX
+adulation/M
+adulator/SM
+adulatory
+adulterant/SM
+adulterated/U
+adulterate/NGSDX
+adulteration/M
+adulterer/SM
+adulteress/MS
+adulterous/Y
+adultery/SM
+adulthood/MS
+adult/MYPS
+adultness/M
+adumbrate/XSDVGN
+adumbration/M
+adumbrative/Y
+adv
+advance/DSRLZG
+advancement/MS
+advancer/M
+advantage/GMEDS
+advantageous/EY
+advantageousness/M
+Adventist/M
+adventist/S
+adventitiousness/M
+adventitious/PY
+adventive/Y
+Advent/SM
+advent/SVM
+adventurer/M
+adventuresome
+adventure/SRDGMZ
+adventuress/SM
+adventurousness/SM
+adventurous/YP
+adverbial/MYS
+adverb/SM
+adversarial
+adversary/SM
+adverse/DSRPYTG
+adverseness/MS
+adversity/SM
+advert/GSD
+advertised/U
+advertise/JGZSRDL
+advertisement/SM
+advertiser/M
+advertising/M
+advertorial/S
+advice/SM
+Advil/M
+advisability/SIM
+advisable/I
+advisableness/M
+advisably
+advisedly/I
+advised/YU
+advisee/MS
+advisement/MS
+adviser/M
+advise/ZRSDGLB
+advisor/S
+advisor's
+advisory/S
+advocacy/SM
+advocate/NGVDS
+advocation/M
+advt
+adze's
+adz/MDSG
+Aegean
+aegis/SM
+Aelfric/M
+Aeneas
+Aeneid/M
+aeolian
+Aeolus/M
+aeon's
+aerate/XNGSD
+aeration/M
+aerator/MS
+aerialist/MS
+aerial/SMY
+Aeriela/M
+Aeriell/M
+Aeriel/M
+aerie/SRMT
+aeroacoustic
+aerobatic/S
+aerobically
+aerobic/S
+aerodrome/SM
+aerodynamically
+aerodynamic/S
+aerodynamics/M
+aeronautical/Y
+aeronautic/S
+aeronautics/M
+aerosolize/D
+aerosol/MS
+aerospace/SM
+Aeschylus/M
+Aesculapius/M
+Aesop/M
+aesthete/S
+aesthetically
+aestheticism/MS
+aesthetics/M
+aesthetic/U
+aether/M
+aetiology/M
+AF
+AFAIK
+afar/S
+AFB
+AFC
+AFDC
+affability/MS
+affable/TR
+affably
+affair/SM
+affectation/MS
+affectedness/EM
+affected/UEYP
+affect/EGSD
+affecter/M
+affecting/Y
+affectionate/UY
+affectioned
+affection/EMS
+affectioning
+affective/MY
+afferent/YS
+affiance/GDS
+affidavit/SM
+affiliated/U
+affiliate/EXSDNG
+affiliation/EM
+affine
+affinity/SM
+affirm/ASDG
+affirmation/SAM
+affirmative/SY
+affix/SDG
+afflatus/MS
+afflict/GVDS
+affliction/SM
+afflictive/Y
+affluence/SM
+affluent/YS
+afford/DSBG
+afforest/A
+afforestation/SM
+afforested
+afforesting
+afforests
+affray/MDSG
+affricate/VNMS
+affrication/M
+affricative/M
+affright
+affront/GSDM
+Afghani/SM
+Afghanistan/M
+afghan/MS
+Afghan/SM
+aficionado/MS
+afield
+afire
+aflame
+afloat
+aflutter
+afoot
+afore
+aforementioned
+aforesaid
+aforethought/S
+afoul
+Afr
+afraid/U
+afresh
+Africa/M
+African/MS
+Afrikaans/M
+Afrikaner/SM
+afro
+Afrocentric
+Afrocentrism/S
+Afro/MS
+afterbirth/M
+afterbirths
+afterburner/MS
+aftercare/SM
+aftereffect/MS
+afterglow/MS
+afterimage/MS
+afterlife/M
+afterlives
+aftermath/M
+aftermaths
+aftermost
+afternoon/SM
+aftershave/S
+aftershock/SM
+afters/M
+aftertaste/SM
+afterthought/MS
+afterward/S
+afterworld/MS
+Afton/M
+aft/ZR
+Agace/M
+again
+against
+Agamemnon/M
+agapae
+agape/S
+agar/MS
+Agassiz/M
+Agata/M
+agate/SM
+Agatha/M
+Agathe/M
+agave/SM
+agedness/M
+aged/PY
+age/GJDRSMZ
+ageism/S
+ageist/S
+agelessness/MS
+ageless/YP
+agency/SM
+agenda/MS
+agent/AMS
+agented
+agenting
+agentive
+ageratum/M
+Aggie/M
+Aggi/M
+agglomerate/XNGVDS
+agglomeration/M
+agglutinate/VNGXSD
+agglutination/M
+agglutinin/MS
+aggrandize/LDSG
+aggrandizement/SM
+aggravate/SDNGX
+aggravating/Y
+aggravation/M
+aggregated/U
+aggregate/EGNVD
+aggregately
+aggregateness/M
+aggregates
+aggregation/SM
+aggregative/Y
+aggression/SM
+aggressively
+aggressiveness/S
+aggressive/U
+aggressor/MS
+aggrieved/Y
+aggrieve/GDS
+Aggy/SM
+aghast
+agile/YTR
+agility/MS
+agitated/Y
+agitate/XVNGSD
+agitation/M
+agitator/SM
+agitprop/MS
+Aglaia/M
+agleam
+aglitter
+aglow
+Ag/M
+Agna/M
+Agnella/M
+Agnese/M
+Agnes/M
+Agnesse/M
+Agneta/M
+Agnew/M
+Agni/M
+Agnola/M
+agnosticism/MS
+agnostic/SM
+ago
+agog
+agonizedly/S
+agonized/Y
+agonize/ZGRSD
+agonizing/Y
+agony/SM
+agoraphobia/MS
+agoraphobic/S
+Agosto/M
+Agra/M
+agrarianism/MS
+agrarian/S
+agreeable/EP
+agreeableness/SME
+agreeably/E
+agreeing/E
+agree/LEBDS
+agreement/ESM
+agreer/S
+Agretha/M
+agribusiness/SM
+Agricola/M
+agriculturalist/S
+agricultural/Y
+agriculture/MS
+agriculturist/SM
+Agrippa/M
+Agrippina/M
+agrochemicals
+agronomic/S
+agronomist/SM
+agronomy/MS
+aground
+Aguascalientes/M
+ague/MS
+Aguie/M
+Aguilar/M
+Aguinaldo/M
+Aguirre/M
+Aguistin/M
+Aguste/M
+Agustin/M
+ah
+Ahab/M
+Aharon/M
+aha/S
+ahead
+ahem/S
+Ahmadabad
+Ahmad/M
+Ahmed/M
+ahoy/S
+Ahriman/M
+AI
+Aida/M
+Aidan/M
+aided/U
+aide/MS
+aider/M
+AIDS
+aid/ZGDRS
+Aigneis/M
+aigrette/SM
+Aiken/M
+Aila/M
+Ailbert/M
+Ailee/M
+Aileen/M
+Aile/M
+Ailene/M
+aileron/MS
+Ailey/M
+Ailina/M
+Aili/SM
+ail/LSDG
+ailment/SM
+Ailsun/M
+Ailyn/M
+Aimee/M
+Aime/M
+aimer/M
+Aimil/M
+aimlessness/MS
+aimless/YP
+aim/ZSGDR
+Aindrea/M
+Ainslee/M
+Ainsley/M
+Ainslie/M
+ain't
+Ainu/M
+airbag/MS
+airbase/S
+airborne
+airbrush/SDMG
+Airbus/M
+airbus/SM
+aircraft/MS
+aircrew/M
+airdrop/MS
+airdropped
+airdropping
+Airedale/SM
+Aires
+airfare/S
+airfield/MS
+airflow/SM
+airfoil/MS
+airframe/MS
+airfreight/SGD
+airhead/MS
+airily
+airiness/MS
+airing/M
+airlessness/S
+airless/P
+airlift/MDSG
+airliner/M
+airline/SRMZ
+airlock/MS
+airmail/DSG
+airman/M
+airmass
+air/MDRTZGJS
+airmen
+airpark
+airplane/SM
+airplay/S
+airport/MS
+airship/MS
+airsickness/SM
+airsick/P
+airspace/SM
+airspeed/SM
+airstrip/MS
+airtightness/M
+airtight/P
+airtime
+airwaves
+airway/SM
+airworthiness/SM
+airworthy/PTR
+airy/PRT
+Aisha/M
+aisle/DSGM
+aitch/MS
+ajar
+Ajax/M
+Ajay/M
+AK
+aka
+Akbar/M
+Akihito/M
+akimbo
+Akim/M
+akin
+Akita/M
+Akkad/M
+Akron/M
+Aksel/M
+AL
+Alabama/M
+Alabaman/S
+Alabamian/MS
+alabaster/MS
+alack/S
+alacrity/SM
+Aladdin/M
+Alaine/M
+Alain/M
+Alair/M
+Alameda/M
+Alamogordo/M
+Alamo/SM
+ala/MS
+Ala/MS
+Alanah/M
+Alana/M
+Aland/M
+Alane/M
+alanine/M
+Alan/M
+Alanna/M
+Alano/M
+Alanson/M
+Alard/M
+Alaric/M
+Alar/M
+alarming/Y
+alarmist/MS
+alarm/SDG
+Alasdair/M
+Alaska/M
+Alaskan/S
+alas/S
+Alastair/M
+Alasteir/M
+Alaster/M
+Alayne/M
+albacore/SM
+alba/M
+Alba/M
+Albania/M
+Albanian/SM
+Albany/M
+albatross/SM
+albedo/M
+Albee/M
+albeit
+Alberich/M
+Alberik/M
+Alberio/M
+Alberta/M
+Albertan/S
+Albertina/M
+Albertine/M
+Albert/M
+Alberto/M
+Albie/M
+Albigensian
+Albina/M
+albinism/SM
+albino/MS
+Albion/M
+Albireo/M
+alb/MS
+Albrecht/M
+albumen/M
+albumin/MS
+albuminous
+album/MNXS
+Albuquerque/M
+Alcatraz/M
+Alcestis/M
+alchemical
+alchemist/SM
+alchemy/MS
+Alcibiades/M
+Alcmena/M
+Alcoa/M
+alcoholically
+alcoholic/MS
+alcoholism/SM
+alcohol/MS
+Alcott/M
+alcove/MSD
+Alcuin/M
+Alcyone/M
+Aldan/M
+Aldebaran/M
+aldehyde/M
+Alden/M
+Alderamin/M
+alderman/M
+aldermen
+alder/SM
+alderwoman
+alderwomen
+Aldin/M
+Aldis/M
+Aldo/M
+Aldon/M
+Aldous/M
+Aldrich/M
+Aldric/M
+Aldridge/M
+Aldrin/M
+Aldus/M
+Aldwin/M
+aleatory
+Alecia/M
+Aleck/M
+Alec/M
+Aleda/M
+alee
+Aleece/M
+Aleen/M
+alehouse/MS
+Aleichem/M
+Alejandra/M
+Alejandrina/M
+Alejandro/M
+Alejoa/M
+Aleksandr/M
+Alembert/M
+alembic/SM
+ale/MVS
+Alena/M
+Alene/M
+aleph/M
+Aleppo/M
+Aler/M
+alerted/Y
+alertness/MS
+alert/STZGPRDY
+Alessandra/M
+Alessandro/M
+Aleta/M
+Alethea/M
+Aleutian/S
+Aleut/SM
+alewife/M
+alewives
+Alexa/M
+Alexander/SM
+Alexandra/M
+Alexandre/M
+Alexandria/M
+Alexandrian/S
+Alexandrina/M
+Alexandr/M
+Alexandro/MS
+Alexei/M
+Alexia/M
+Alexina/M
+Alexine/M
+Alexio/M
+Alexi/SM
+Alex/M
+alfalfa/MS
+Alfa/M
+Alfie/M
+Alfi/M
+Alf/M
+Alfonse/M
+Alfons/M
+Alfonso/M
+Alfonzo/M
+Alford/M
+Alfreda/M
+Alfred/M
+Alfredo/M
+alfresco
+Alfy/M
+algae
+algaecide
+algal
+alga/M
+algebraic
+algebraical/Y
+algebraist/M
+algebra/MS
+Algenib/M
+Algeria/M
+Algerian/MS
+Alger/M
+Algernon/M
+Algieba/M
+Algiers/M
+alginate/SM
+ALGOL
+Algol/M
+Algonquian/SM
+Algonquin/SM
+algorithmic
+algorithmically
+algorithm/MS
+Alhambra/M
+Alhena/M
+Alia/M
+alias/GSD
+alibi/MDSG
+Alica/M
+Alicea/M
+Alice/M
+Alicia/M
+Alick/M
+Alic/M
+Alida/M
+Alidia/M
+Alie/M
+alienable/IU
+alienate/SDNGX
+alienation/M
+alienist/MS
+alien/RDGMBS
+Alighieri/M
+alight/DSG
+aligned/U
+aligner/SM
+align/LASDG
+alignment/SAM
+Alika/M
+Alikee/M
+alikeness/M
+alike/U
+alimentary
+aliment/SDMG
+alimony/MS
+Ali/MS
+Alina/M
+Aline/M
+alinement's
+Alioth/M
+aliquot/S
+Alisa/M
+Alisander/M
+Alisha/M
+Alison/M
+Alissa/M
+Alistair/M
+Alister/M
+Alisun/M
+aliveness/MS
+alive/P
+Alix/M
+aliyah/M
+aliyahs
+Aliza/M
+Alkaid/M
+alkalies
+alkali/M
+alkaline
+alkalinity/MS
+alkalize/SDG
+alkaloid/MS
+alkyd/S
+alkyl/M
+Allahabad/M
+Allah/M
+Alla/M
+Allan/M
+Allard/M
+allay/GDS
+Allayne/M
+Alleen/M
+allegation/SM
+alleged/Y
+allege/SDG
+Allegheny/MS
+allegiance/SM
+allegiant
+allegoric
+allegoricalness/M
+allegorical/YP
+allegorist/MS
+allegory/SM
+Allegra/M
+allegretto/MS
+allegri
+allegro/MS
+allele/SM
+alleluia/S
+allemande/M
+Allendale/M
+Allende/M
+Allene/M
+Allen/M
+Allentown/M
+allergenic
+allergen/MS
+allergic
+allergically
+allergist/MS
+allergy/MS
+alleviate/SDVGNX
+alleviation/M
+alleviator/MS
+Alley/M
+alley/MS
+Alleyn/M
+alleyway/MS
+Allhallows
+alliance/MS
+Allianora/M
+Allie/M
+allier
+allies/M
+alligator/DMGS
+Alli/MS
+Allina/M
+Allin/M
+Allison/M
+Allissa/M
+Allister/M
+Allistir/M
+alliterate/XVNGSD
+alliteration/M
+alliterative/Y
+Allix/M
+allocable/U
+allocatable
+allocate/ACSDNGX
+allocated/U
+allocation/AMC
+allocative
+allocator/AMS
+allophone/MS
+allophonic
+allotment/MS
+allotments/A
+allotrope/M
+allotropic
+allots/A
+allot/SDL
+allotted/A
+allotter/M
+allotting/A
+allover/S
+allowableness/M
+allowable/P
+allowably
+allowance/GSDM
+allowed/Y
+allowing/E
+allow/SBGD
+allows/E
+alloyed/U
+alloy/SGMD
+all/S
+allspice/MS
+Allstate/M
+Allsun/M
+allude/GSD
+allure/GLSD
+allurement/SM
+alluring/Y
+allusion/MS
+allusiveness/MS
+allusive/PY
+alluvial/S
+alluvions
+alluvium/MS
+Allx/M
+ally/ASDG
+Allyce/M
+Ally/MS
+Allyn/M
+Allys
+Allyson/M
+alma
+Almach/M
+Almaden/M
+almagest
+Alma/M
+almanac/MS
+Almaty/M
+Almeda/M
+Almeria/M
+Almeta/M
+almightiness/M
+Almighty/M
+almighty/P
+Almira/M
+Almire/M
+almond/SM
+almoner/MS
+almost
+Al/MRY
+alms/A
+almshouse/SM
+almsman/M
+alnico
+Alnilam/M
+Alnitak/M
+aloe/MS
+aloft
+aloha/SM
+Aloin/M
+Aloise/M
+Aloisia/M
+aloneness/M
+alone/P
+along
+alongshore
+alongside
+Alon/M
+Alonso/M
+Alonzo/M
+aloofness/MS
+aloof/YP
+aloud
+Aloysia/M
+Aloysius/M
+alpaca/SM
+Alpert/M
+alphabetical/Y
+alphabetic/S
+alphabetization/SM
+alphabetizer/M
+alphabetize/SRDGZ
+alphabet/SGDM
+alpha/MS
+alphanumerical/Y
+alphanumeric/S
+Alphard/M
+Alphecca/M
+Alpheratz/M
+Alphonse/M
+Alphonso/M
+Alpine
+alpine/S
+alp/MS
+Alps
+already
+Alric/M
+alright
+Alsace/M
+Alsatian/MS
+also
+Alsop/M
+Alston/M
+Altaic/M
+Altai/M
+Altair/M
+Alta/M
+altar/MS
+altarpiece/SM
+alterable/UI
+alteration/MS
+altercate/NX
+altercation/M
+altered/U
+alternate/SDVGNYX
+alternation/M
+alternativeness/M
+alternative/YMSP
+alternator/MS
+alter/RDZBG
+Althea/M
+although
+altimeter/SM
+Altiplano/M
+altitude/SM
+altogether/S
+Alton/M
+alto/SM
+Altos/M
+altruism/SM
+altruistic
+altruistically
+altruist/SM
+alt/RZS
+ALU
+Aludra/M
+Aluin/M
+Aluino/M
+alumina/SM
+aluminum/MS
+alumnae
+alumna/M
+alumni
+alumnus/MS
+alum/SM
+alundum
+Alva/M
+Alvan/M
+Alvarado/M
+Alvarez/M
+Alvaro/M
+alveolar/Y
+alveoli
+alveolus/M
+Alvera/M
+Alverta/M
+Alvie/M
+Alvina/M
+Alvinia/M
+Alvin/M
+Alvira/M
+Alvis/M
+Alvy/M
+alway/S
+Alwin/M
+Alwyn/M
+Alyce/M
+Alyda/M
+Alyosha/M
+Alysa/M
+Alyse/M
+Alysia/M
+Alys/M
+Alyson/M
+Alyss
+Alyssa/M
+Alzheimer/M
+AM
+AMA
+Amabelle/M
+Amabel/M
+Amadeus/M
+Amado/M
+amain
+Amalea/M
+Amalee/M
+Amaleta/M
+amalgamate/VNGXSD
+amalgamation/M
+amalgam/MS
+Amalia/M
+Amalie/M
+Amalita/M
+Amalle/M
+Amanda/M
+Amandie/M
+Amandi/M
+Amandy/M
+amanuenses
+amanuensis/M
+Amara/M
+amaranth/M
+amaranths
+amaretto/S
+Amargo/M
+Amarillo/M
+amaryllis/MS
+am/AS
+amasser/M
+amass/GRSD
+Amata/M
+amateurishness/MS
+amateurish/YP
+amateurism/MS
+amateur/SM
+Amati/M
+amatory
+amazed/Y
+amaze/LDSRGZ
+amazement/MS
+amazing/Y
+amazonian
+Amazonian
+amazon/MS
+Amazon/SM
+ambassadorial
+ambassador/MS
+ambassadorship/MS
+ambassadress/SM
+ambergris/SM
+Amberly/M
+amber/MS
+Amber/YM
+ambiance/MS
+ambidexterity/MS
+ambidextrous/Y
+ambience's
+ambient/S
+ambiguity/MS
+ambiguously/U
+ambiguousness/M
+ambiguous/YP
+ambition/GMDS
+ambitiousness/MS
+ambitious/PY
+ambit/M
+ambivalence/SM
+ambivalent/Y
+amble/GZDSR
+Amble/M
+ambler/M
+ambrose
+Ambrose/M
+ambrosial/Y
+ambrosia/SM
+Ambrosi/M
+Ambrosio/M
+Ambrosius/M
+Ambros/M
+ambulance/MS
+ambulant/S
+ambulate/DSNGX
+ambulation/M
+ambulatory/S
+Ambur/M
+ambuscade/MGSRD
+ambuscader/M
+ambusher/M
+ambush/MZRSDG
+Amby/M
+Amdahl/M
+ameba's
+Amelia/M
+Amelie/M
+Amelina/M
+Ameline/M
+ameliorate/XVGNSD
+amelioration/M
+Amelita/M
+amenability/SM
+amenably
+amended/U
+amender/M
+amendment/SM
+amen/DRGTSB
+amend/SBRDGL
+amends/M
+Amenhotep/M
+amenity/MS
+amenorrhea/M
+Amerada/M
+Amerasian/S
+amercement/MS
+amerce/SDLG
+Americana/M
+Americanism/SM
+Americanization/SM
+americanized
+Americanize/SDG
+American/MS
+America/SM
+americium/MS
+Amerigo/M
+Amerindian/MS
+Amerind/MS
+Amer/M
+Amery/M
+Ameslan/M
+Ame/SM
+amethystine
+amethyst/MS
+Amharic/M
+Amherst/M
+amiability/MS
+amiableness/M
+amiable/RPT
+amiably
+amicability/SM
+amicableness/M
+amicable/P
+amicably
+amide/SM
+amid/S
+amidships
+amidst
+Amie/M
+Amiga/M
+amigo/MS
+Amii/M
+Amil/M
+Ami/M
+amines
+aminobenzoic
+amino/M
+amir's
+Amish
+amiss
+Amitie/M
+Amity/M
+amity/SM
+Ammamaria/M
+Amman/M
+Ammerman/M
+ammeter/MS
+ammo/MS
+ammoniac
+ammonia/MS
+ammonium/M
+Am/MR
+ammunition/MS
+amnesiac/MS
+amnesia/SM
+amnesic/S
+amnesty/GMSD
+amniocenteses
+amniocentesis/M
+amnion/SM
+amniotic
+Amoco/M
+amoeba/SM
+amoebic
+amoeboid
+amok/MS
+among
+amongst
+Amontillado/M
+amontillado/MS
+amorality/MS
+amoral/Y
+amorousness/SM
+amorous/PY
+amorphousness/MS
+amorphous/PY
+amortization/SUM
+amortized/U
+amortize/SDG
+Amory/M
+Amos
+amount/SMRDZG
+amour/MS
+Amparo/M
+amperage/SM
+Ampere/M
+ampere/MS
+ampersand/MS
+Ampex/M
+amphetamine/MS
+amphibian/SM
+amphibiousness/M
+amphibious/PY
+amphibology/M
+amphitheater/SM
+amphorae
+amphora/M
+ampleness/M
+ample/PTR
+amplification/M
+amplifier/M
+amplify/DRSXGNZ
+amplitude/MS
+ampoule's
+amp/SGMDY
+ampule/SM
+amputate/DSNGX
+amputation/M
+amputee/SM
+Amritsar/M
+ams
+Amsterdam/M
+amt
+Amtrak/M
+amuck's
+amulet/SM
+Amundsen/M
+Amur/M
+amused/Y
+amuse/LDSRGVZ
+amusement/SM
+amuser/M
+amusingness/M
+amusing/YP
+Amway/M
+Amye/M
+amylase/MS
+amyl/M
+Amy/M
+Anabal/M
+Anabaptist/SM
+Anabella/M
+Anabelle/M
+Anabel/M
+anabolic
+anabolism/MS
+anachronism/SM
+anachronistic
+anachronistically
+Anacin/M
+anaconda/MS
+Anacreon/M
+anaerobe/SM
+anaerobic
+anaerobically
+anaglyph/M
+anagrammatic
+anagrammatically
+anagrammed
+anagramming
+anagram/MS
+Anaheim/M
+Analects/M
+analgesia/MS
+analgesic/S
+Analiese/M
+Analise/M
+Anallese/M
+Anallise/M
+analogical/Y
+analogize/SDG
+analogousness/MS
+analogous/YP
+analog/SM
+analogue/SM
+analogy/MS
+anal/Y
+analysand/MS
+analyses
+analysis/AM
+analyst/SM
+analytical/Y
+analyticity/S
+analytic/S
+analytics/M
+analyzable/U
+analyze/DRSZGA
+analyzed/U
+analyzer/M
+Ana/M
+anamorphic
+Ananias/M
+anapaest's
+anapestic/S
+anapest/SM
+anaphora/M
+anaphoric
+anaphorically
+anaplasmosis/M
+anarchic
+anarchical/Y
+anarchism/MS
+anarchistic
+anarchist/MS
+anarchy/MS
+Anastasia/M
+Anastasie/M
+Anastassia/M
+anastigmatic
+anastomoses
+anastomosis/M
+anastomotic
+anathema/MS
+anathematize/GSD
+Anatola/M
+Anatole/M
+Anatolia/M
+Anatolian
+Anatollo/M
+Anatol/M
+anatomic
+anatomical/YS
+anatomist/MS
+anatomize/GSD
+anatomy/MS
+Anaxagoras/M
+Ancell/M
+ancestor/SMDG
+ancestral/Y
+ancestress/SM
+ancestry/SM
+Anchorage/M
+anchorage/SM
+anchored/U
+anchorite/MS
+anchoritism/M
+anchorman/M
+anchormen
+anchorpeople
+anchorperson/S
+anchor/SGDM
+anchorwoman
+anchorwomen
+anchovy/MS
+ancientness/MS
+ancient/SRYTP
+ancillary/S
+an/CS
+Andalusia/M
+Andalusian
+Andaman
+andante/S
+and/DZGS
+Andean/M
+Andeee/M
+Andee/M
+Anderea/M
+Andersen/M
+Anders/N
+Anderson/M
+Andes
+Andie/M
+Andi/M
+andiron/MS
+Andonis/M
+Andorra/M
+Andover/M
+Andra/SM
+Andrea/MS
+Andreana/M
+Andree/M
+Andrei/M
+Andrej/M
+Andre/SM
+Andrew/MS
+Andrey/M
+Andria/M
+Andriana/M
+Andriette/M
+Andris
+androgenic
+androgen/SM
+androgynous
+androgyny/SM
+android/MS
+Andromache/M
+Andromeda/M
+Andropov/M
+Andros/M
+Andrus/M
+Andy/M
+anecdotal/Y
+anecdote/SM
+anechoic
+anemia/SM
+anemically
+anemic/S
+anemometer/MS
+anemometry/M
+anemone/SM
+anent
+aneroid
+Anestassia/M
+anesthesia/MS
+anesthesiologist/MS
+anesthesiology/SM
+anesthetically
+anesthetic/SM
+anesthetist/MS
+anesthetization/SM
+anesthetizer/M
+anesthetize/ZSRDG
+Anet/M
+Anetta/M
+Anette/M
+Anett/M
+aneurysm/MS
+anew
+Angara/M
+Angela/M
+Angeleno/SM
+Angele/SM
+angelfish/SM
+Angelia/M
+angelic
+angelical/Y
+Angelica/M
+angelica/MS
+Angelico/M
+Angelika/M
+Angeli/M
+Angelina/M
+Angeline/M
+Angelique/M
+Angelita/M
+Angelle/M
+Angel/M
+angel/MDSG
+Angelo/M
+Angelou/M
+Ange/M
+anger/GDMS
+Angevin/M
+Angie/M
+Angil/M
+angina/MS
+angiography
+angioplasty/S
+angiosperm/MS
+Angkor/M
+angle/GMZDSRJ
+angler/M
+Angles
+angleworm/MS
+Anglia/M
+Anglicanism/MS
+Anglican/MS
+Anglicism/SM
+Anglicization/MS
+anglicize/SDG
+Anglicize/SDG
+angling/M
+Anglo/MS
+Anglophile/SM
+Anglophilia/M
+Anglophobe/MS
+Anglophobia/M
+Angola/M
+Angolan/S
+angora/MS
+Angora/MS
+angrily
+angriness/M
+angry/RTP
+angst/MS
+Ångström/M
+angstrom/MS
+Anguilla/M
+anguish/DSMG
+angularity/MS
+angular/Y
+Angus/M
+Angy/M
+Anheuser/M
+anhydride/M
+anhydrite/M
+anhydrous/Y
+Aniakchak/M
+Ania/M
+Anibal/M
+Anica/M
+aniline/SM
+animadversion/SM
+animadvert/DSG
+animalcule/MS
+animal/MYPS
+animated/A
+animatedly
+animately/I
+animateness/MI
+animates/A
+animate/YNGXDSP
+animating/A
+animation/AMS
+animator/SM
+animism/SM
+animistic
+animist/S
+animized
+animosity/MS
+animus/SM
+anionic/S
+anion/MS
+aniseed/MS
+aniseikonic
+anise/MS
+anisette/SM
+anisotropic
+anisotropy/MS
+Anissa/M
+Anita/M
+Anitra/M
+Anjanette/M
+Anjela/M
+Ankara/M
+ankh/M
+ankhs
+anklebone/SM
+ankle/GMDS
+anklet/MS
+Annabal/M
+Annabela/M
+Annabella/M
+Annabelle/M
+Annabell/M
+Annabel/M
+Annadiana/M
+Annadiane/M
+Annalee/M
+Annaliese/M
+Annalise/M
+annalist/MS
+annal/MNS
+Anna/M
+Annamaria/M
+Annamarie/M
+Annapolis/M
+Annapurna/M
+anneal/DRSZG
+annealer/M
+Annecorinne/M
+annelid/MS
+Anneliese/M
+Annelise/M
+Anne/M
+Annemarie/M
+Annetta/M
+Annette/M
+annexation/SM
+annexe/M
+annex/GSD
+Annice/M
+Annie/M
+annihilate/XSDVGN
+annihilation/M
+annihilator/MS
+Anni/MS
+Annissa/M
+anniversary/MS
+Ann/M
+Annmaria/M
+Annmarie/M
+Annnora/M
+Annora/M
+annotated/U
+annotate/VNGXSD
+annotation/M
+annotator/MS
+announced/U
+announcement/SM
+announcer/M
+announce/ZGLRSD
+annoyance/MS
+annoyer/M
+annoying/Y
+annoy/ZGSRD
+annualized
+annual/YS
+annuitant/MS
+annuity/MS
+annular/YS
+annuli
+annulled
+annulling
+annulment/MS
+annul/SL
+annulus/M
+annum
+annunciate/XNGSD
+annunciation/M
+Annunciation/S
+annunciator/SM
+Anny/M
+anode/SM
+anodic
+anodize/GDS
+anodyne/SM
+anoint/DRLGS
+anointer/M
+anointment/SM
+anomalousness/M
+anomalous/YP
+anomaly/MS
+anomic
+anomie/M
+anon/S
+anonymity/MS
+anonymousness/M
+anonymous/YP
+anopheles/M
+anorak/SM
+anorectic/S
+anorexia/SM
+anorexic/S
+another/M
+Anouilh/M
+Ansell/M
+Ansel/M
+Anselma/M
+Anselm/M
+Anselmo/M
+Anshan/M
+ANSI/M
+Ansley/M
+ans/M
+Anson/M
+Anstice/M
+answerable/U
+answered/U
+answerer/M
+answer/MZGBSDR
+antacid/MS
+Antaeus/M
+antagonism/MS
+antagonistic
+antagonistically
+antagonist/MS
+antagonized/U
+antagonize/GZRSD
+antagonizing/U
+Antananarivo/M
+antarctic
+Antarctica/M
+Antarctic/M
+Antares
+anteater/MS
+antebellum
+antecedence/MS
+antecedent/SMY
+antechamber/SM
+antedate/GDS
+antediluvian/S
+anteing
+antelope/MS
+ante/MS
+antenatal
+antennae
+antenna/MS
+anterior/SY
+anteroom/SM
+ant/GSMD
+Anthea/M
+Anthe/M
+anthem/MGDS
+anther/MS
+Anthia/M
+Anthiathia/M
+anthill/S
+anthologist/MS
+anthologize/GDS
+anthology/SM
+Anthony/M
+anthraces
+anthracite/MS
+anthrax/M
+anthropic
+anthropocentric
+anthropogenic
+anthropoid/S
+anthropological/Y
+anthropologist/MS
+anthropology/SM
+anthropometric/S
+anthropometry/M
+anthropomorphic
+anthropomorphically
+anthropomorphism/SM
+anthropomorphizing
+anthropomorphous
+antiabortion
+antiabortionist/S
+antiaircraft
+antibacterial/S
+antibiotic/SM
+antibody/MS
+anticancer
+Antichrist/MS
+anticipated/U
+anticipate/XVGNSD
+anticipation/M
+anticipative/Y
+anticipatory
+anticked
+anticking
+anticlerical/S
+anticlimactic
+anticlimactically
+anticlimax/SM
+anticline/SM
+anticlockwise
+antic/MS
+anticoagulant/S
+anticoagulation/M
+anticommunism/SM
+anticommunist/SM
+anticompetitive
+anticyclone/MS
+anticyclonic
+antidemocratic
+antidepressant/SM
+antidisestablishmentarianism/M
+antidote/DSMG
+Antietam/M
+antifascist/SM
+antiformant
+antifreeze/SM
+antifundamentalist/M
+antigenic
+antigenicity/SM
+antigen/MS
+antigone
+Antigone/M
+Antigua/M
+antiheroes
+antihero/M
+antihistamine/MS
+antihistorical
+antiknock/MS
+antilabor
+Antillean
+Antilles
+antilogarithm/SM
+antilogs
+antimacassar/SM
+antimalarial/S
+antimatter/SM
+antimicrobial/S
+antimissile/S
+antimony/SM
+anting/M
+Antin/M
+antinomian
+antinomy/M
+antinuclear
+Antioch/M
+antioxidant/MS
+antiparticle/SM
+Antipas/M
+antipasti
+antipasto/MS
+antipathetic
+antipathy/SM
+antipersonnel
+antiperspirant/MS
+antiphonal/SY
+antiphon/SM
+antipodal/S
+antipodean/S
+antipode/MS
+Antipodes
+antipollution/S
+antipoverty
+antiquarianism/MS
+antiquarian/MS
+antiquary/SM
+antiquate/NGSD
+antiquation/M
+antique/MGDS
+antiquity/SM
+antiredeposition
+antiresonance/M
+antiresonator
+anti/S
+antisemitic
+antisemitism/M
+antisepses
+antisepsis/M
+antiseptically
+antiseptic/S
+antiserum/SM
+antislavery/S
+antisocial/Y
+antispasmodic/S
+antisubmarine
+antisymmetric
+antisymmetry
+antitank
+antitheses
+antithesis/M
+antithetic
+antithetical/Y
+antithyroid
+antitoxin/MS
+antitrust/MR
+antivenin/MS
+antiviral/S
+antivivisectionist/S
+antiwar
+antler/SDM
+Antofagasta/M
+Antoine/M
+Antoinette/M
+Antonella/M
+Antone/M
+Antonetta/M
+Antonia/M
+Antonie/M
+Antonietta/M
+Antoni/M
+Antonina/M
+Antonin/M
+Antonino/M
+Antoninus/M
+Antonio/M
+Antonius/M
+Anton/MS
+Antonovics/M
+Antony/M
+antonymous
+antonym/SM
+antral
+antsy/RT
+Antwan/M
+Antwerp/M
+Anubis/M
+anus/SM
+anvil/MDSG
+anxiety/MS
+anxiousness/SM
+anxious/PY
+any
+Anya/M
+anybody/S
+anyhow
+Any/M
+anymore
+anyone/MS
+anyplace
+anything/S
+anytime
+anyway/S
+anywhere/S
+anywise
+AOL/M
+aorta/MS
+aortic
+AP
+apace
+apache/MS
+Apache/MS
+Apalachicola/M
+apartheid/SM
+apart/LP
+apartment/MS
+apartness/M
+apathetic
+apathetically
+apathy/SM
+apatite/MS
+APB
+aped/A
+apelike
+ape/MDRSG
+Apennines
+aper/A
+aperiodic
+aperiodically
+aperiodicity/M
+aperitif/S
+aperture/MDS
+apex/MS
+aphasia/SM
+aphasic/S
+aphelia
+aphelion/SM
+aphid/MS
+aphonic
+aphorism/MS
+aphoristic
+aphoristically
+aphrodisiac/SM
+Aphrodite/M
+Apia/M
+apiarist/SM
+apiary/SM
+apical/YS
+apices's
+apiece
+apishness/M
+apish/YP
+aplenty
+aplomb/SM
+APO
+Apocalypse/M
+apocalypse/MS
+apocalyptic
+apocryphalness/M
+apocryphal/YP
+apocrypha/M
+Apocrypha/M
+apogee/MS
+apolar
+apolitical/Y
+Apollinaire/M
+Apollonian
+Apollo/SM
+apologetically/U
+apologetic/S
+apologetics/M
+apologia/SM
+apologist/MS
+apologize/GZSRD
+apologizer/M
+apologizes/A
+apologizing/U
+apology/MS
+apoplectic
+apoplexy/SM
+apostasy/SM
+apostate/SM
+apostatize/DSG
+apostleship/SM
+apostle/SM
+apostolic
+apostrophe/SM
+apostrophized
+apothecary/MS
+apothegm/MS
+apotheoses
+apotheosis/M
+apotheosized
+apotheosizes
+apotheosizing
+Appalachia/M
+Appalachian/MS
+appalling/Y
+appall/SDG
+Appaloosa/MS
+appaloosa/S
+appanage/M
+apparatus/SM
+apparel/SGMD
+apparency
+apparently/I
+apparentness/M
+apparent/U
+apparition/SM
+appealer/M
+appealing/UY
+appeal/SGMDRZ
+appear/AEGDS
+appearance/AMES
+appearer/S
+appease/DSRGZL
+appeased/U
+appeasement/MS
+appeaser/M
+appellant/MS
+appellate/VNX
+appellation/M
+appellative/MY
+appendage/MS
+appendectomy/SM
+appendices
+appendicitis/SM
+appendix/SM
+append/SGZDR
+appertain/DSG
+appetite/MVS
+appetizer/SM
+appetizing/YU
+Appia/M
+Appian/M
+applauder/M
+applaud/ZGSDR
+applause/MS
+applecart/M
+applejack/MS
+Apple/M
+apple/MS
+applesauce/SM
+Appleseed/M
+Appleton/M
+applet/S
+appliance/SM
+applicabilities
+applicability/IM
+applicable/I
+applicably
+applicant/MS
+applicate/V
+application/MA
+applicative/Y
+applicator/MS
+applier/SM
+appliquéd
+appliqué/MSG
+apply/AGSDXN
+appointee/SM
+appoint/ELSADG
+appointer/MS
+appointive
+appointment/ASEM
+Appolonia/M
+Appomattox/M
+apportion/GADLS
+apportionment/SAM
+appose/SDG
+appositeness/MS
+apposite/XYNVP
+apposition/M
+appositive/SY
+appraisal/SAM
+appraised/A
+appraisees
+appraiser/M
+appraises/A
+appraise/ZGDRS
+appraising/Y
+appreciable/I
+appreciably/I
+appreciated/U
+appreciate/XDSNGV
+appreciation/M
+appreciativeness/MI
+appreciative/PIY
+appreciator/MS
+appreciatory
+apprehend/DRSG
+apprehender/M
+apprehensible
+apprehension/SM
+apprehensiveness/SM
+apprehensive/YP
+apprentice/DSGM
+apprenticeship/SM
+apprise/DSG
+apprizer/SM
+apprizingly
+apprizings
+approachability/UM
+approachable/UI
+approach/BRSDZG
+approacher/M
+approbate/NX
+approbation/EMS
+appropriable
+appropriated/U
+appropriately/I
+appropriateness/SMI
+appropriate/XDSGNVYTP
+appropriation/M
+appropriator/SM
+approval/ESM
+approve/DSREG
+approved/U
+approver's/E
+approver/SM
+approving/YE
+approx
+approximate/XGNVYDS
+approximation/M
+approximative/Y
+appurtenance/MS
+appurtenant/S
+APR
+apricot/MS
+Aprilette/M
+April/MS
+Apr/M
+apron/SDMG
+apropos
+apse/MS
+apsis/M
+apter
+aptest
+aptitude/SM
+aptness/SMI
+aptness's/U
+apt/UPYI
+Apuleius/M
+aquaculture/MS
+aqualung/SM
+aquamarine/SM
+aquanaut/SM
+aquaplane/GSDM
+aquarium/MS
+Aquarius/MS
+aqua/SM
+aquatically
+aquatic/S
+aquavit/SM
+aqueduct/MS
+aqueous/Y
+aquiculture's
+aquifer/SM
+Aquila/M
+aquiline
+Aquinas/M
+Aquino/M
+Aquitaine/M
+AR
+Arabela/M
+Arabele/M
+Arabella/M
+Arabelle/M
+Arabel/M
+arabesque/SM
+Arabia/M
+Arabian/MS
+Arabic/M
+arability/MS
+Arabist/MS
+arable/S
+Arab/MS
+Araby/M
+Araceli/M
+arachnid/MS
+arachnoid/M
+arachnophobia
+Arafat/M
+Araguaya/M
+Araldo/M
+Aral/M
+Ara/M
+Aramaic/M
+Aramco/M
+Arapahoes
+Arapahoe's
+Arapaho/MS
+Ararat/M
+Araucanian/M
+Arawakan/M
+Arawak/M
+arbiter/MS
+arbitrage/GMZRSD
+arbitrager/M
+arbitrageur/S
+arbitrament/MS
+arbitrarily
+arbitrariness/MS
+arbitrary/P
+arbitrate/SDXVNG
+arbitration/M
+arbitrator/SM
+arbor/DMS
+arboreal/Y
+arbores
+arboretum/MS
+arborvitae/MS
+arbutus/SM
+ARC
+arcade/SDMG
+Arcadia/M
+Arcadian
+arcana/M
+arcane/P
+arc/DSGM
+archaeological/Y
+archaeologist/SM
+archaically
+archaic/P
+Archaimbaud/M
+archaism/SM
+archaist/MS
+archaize/GDRSZ
+archaizer/M
+Archambault/M
+archangel/SM
+archbishopric/SM
+archbishop/SM
+archdeacon/MS
+archdiocesan
+archdiocese/SM
+archduchess/MS
+archduke/MS
+Archean
+archenemy/SM
+archeologist's
+archeology/MS
+archer/M
+Archer/M
+archery/MS
+archetypal
+archetype/SM
+archfiend/SM
+archfool
+Archibald/M
+Archibaldo/M
+Archibold/M
+Archie/M
+archiepiscopal
+Archimedes/M
+arching/M
+archipelago/SM
+architect/MS
+architectonic/S
+architectonics/M
+architectural/Y
+architecture/SM
+architrave/MS
+archival
+archive/DRSGMZ
+archived/U
+archivist/MS
+Arch/MR
+archness/MS
+arch/PGVZTMYDSR
+archway/SM
+Archy/M
+arclike
+ARCO/M
+arcsine
+arctangent
+Arctic/M
+arctic/S
+Arcturus/M
+Ardabil
+Arda/MH
+Ardath/M
+Ardeen/M
+Ardelia/M
+Ardelis/M
+Ardella/M
+Ardelle/M
+ardency/M
+Ardene/M
+Ardenia/M
+Arden/M
+ardent/Y
+Ardine/M
+Ardisj/M
+Ardis/M
+Ardith/M
+ardor/SM
+Ardra/M
+arduousness/SM
+arduous/YP
+Ardyce/M
+Ardys
+Ardyth/M
+areal
+area/SM
+areawide
+are/BS
+Arel/M
+arenaceous
+arena/SM
+aren't
+Arequipa/M
+Ares
+Aretha/M
+Argentina/M
+Argentinean/S
+Argentine/SM
+Argentinian/S
+argent/MS
+arginine/MS
+Argonaut/MS
+argonaut/S
+argon/MS
+Argonne/M
+Argo/SM
+argosy/SM
+argot/SM
+arguable/IU
+arguably/IU
+argue/DSRGZ
+arguer/M
+argumentation/SM
+argumentativeness/MS
+argumentative/YP
+argument/SM
+Argus/M
+argyle/S
+Ariadne/M
+Ariana/M
+Arianism/M
+Arianist/SM
+aria/SM
+Aridatha/M
+aridity/SM
+aridness/M
+arid/TYRP
+Ariela/M
+Ariella/M
+Arielle/M
+Ariel/M
+Arie/SM
+Aries/S
+aright
+Ari/M
+Arin/M
+Ario/M
+Ariosto/M
+arise/GJSR
+arisen
+Aristarchus/M
+Aristides
+aristocracy/SM
+aristocratic
+aristocratically
+aristocrat/MS
+Aristophanes/M
+Aristotelean
+Aristotelian/M
+Aristotle/M
+arithmetical/Y
+arithmetician/SM
+arithmetic/MS
+arithmetize/SD
+Arius/M
+Ariz/M
+Arizona/M
+Arizonan/S
+Arizonian/S
+Arjuna/M
+Arkansan/MS
+Arkansas/M
+Arkhangelsk/M
+Ark/M
+ark/MS
+Arkwright/M
+Arlana/M
+Arlan/M
+Arlee/M
+Arleen/M
+Arlena/M
+Arlene/M
+Arlen/M
+Arleta/M
+Arlette/M
+Arley/M
+Arleyne/M
+Arlie/M
+Arliene/M
+Arlina/M
+Arlinda/M
+Arline/M
+Arlington/M
+Arlin/M
+Arluene/M
+Arly/M
+Arlyne/M
+Arlyn/M
+Armada/M
+armada/SM
+armadillo/MS
+Armageddon/SM
+Armagnac/M
+armament/EAS
+armament's/E
+Armand/M
+Armando/M
+Arman/M
+arm/ASEDG
+Armata/M
+armature/MGSD
+armband/SM
+armchair/MS
+Armco/M
+armed/U
+Armenia/M
+Armenian/MS
+armer/MES
+armful/SM
+armhole/MS
+arming/M
+Arminius/M
+Armin/M
+armistice/MS
+armless
+armlet/SM
+armload/M
+Armonk/M
+armored/U
+armorer/M
+armorial/S
+armory/DSM
+armor/ZRDMGS
+Armour/M
+armpit/MS
+armrest/MS
+arm's
+Armstrong/M
+Ar/MY
+army/SM
+Arnaldo/M
+Arneb/M
+Arne/M
+Arney/M
+Arnhem/M
+Arnie/M
+Arni/M
+Arnold/M
+Arnoldo/M
+Arno/M
+Arnuad/M
+Arnulfo/M
+Arny/M
+aroma/SM
+aromatherapist/S
+aromatherapy/S
+aromatically
+aromaticity/M
+aromaticness/M
+aromatic/SP
+Aron/M
+arose
+around
+arousal/MS
+aroused/U
+arouse/GSD
+ARPA/M
+Arpanet/M
+ARPANET/M
+arpeggio/SM
+arrack/M
+Arragon/M
+arraignment/MS
+arraign/SDGL
+arrangeable/A
+arranged/EA
+arrangement/AMSE
+arranger/M
+arranges/EA
+arrange/ZDSRLG
+arranging/EA
+arrant/Y
+arras/SM
+arrayer
+array/ESGMD
+arrear/SM
+arrest/ADSG
+arrestee/MS
+arrester/MS
+arresting/Y
+arrestor/MS
+Arrhenius/M
+arrhythmia/SM
+arrhythmic
+arrhythmical
+Arri/M
+arrival/MS
+arriver/M
+arrive/SRDG
+arrogance/MS
+arrogant/Y
+arrogate/XNGDS
+arrogation/M
+Arron/M
+arrowhead/SM
+arrowroot/MS
+arrow/SDMG
+arroyo/MS
+arr/TV
+arsenal/MS
+arsenate/M
+arsenic/MS
+arsenide/M
+arsine/MS
+arsonist/MS
+arson/SM
+Artair/M
+Artaxerxes/M
+artefact's
+Arte/M
+Artemas
+Artemis/M
+Artemus/M
+arterial/SY
+arteriolar
+arteriole/SM
+arterioscleroses
+arteriosclerosis/M
+artery/SM
+artesian
+artfulness/SM
+artful/YP
+Arther/M
+arthritic/S
+arthritides
+arthritis/M
+arthrogram/MS
+arthropod/SM
+arthroscope/S
+arthroscopic
+Arthurian
+Arthur/M
+artichoke/SM
+article/GMDS
+articulable/I
+articular
+articulated/EU
+articulately/I
+articulateness/IMS
+articulates/I
+articulate/VGNYXPSD
+articulation/M
+articulator/SM
+articulatory
+Artie/M
+artifact/MS
+artificer/M
+artifice/ZRSM
+artificiality/MS
+artificialness/M
+artificial/PY
+artillerist
+artilleryman/M
+artillerymen
+artillery/SM
+artiness/MS
+artisan/SM
+artiste/SM
+artistically/I
+artistic/I
+artist/MS
+artistry/SM
+artlessness/MS
+artless/YP
+Art/M
+art/SM
+artsy/RT
+Artur/M
+Arturo/M
+Artus/M
+artwork/MS
+Arty/M
+arty/TPR
+Aruba/M
+arum/MS
+Arvie/M
+Arvin/M
+Arv/M
+Arvy/M
+Aryan/MS
+Aryn/M
+as
+As
+A's
+Asa/M
+Asama/M
+asap
+ASAP
+asbestos/MS
+Ascella/M
+ascend/ADGS
+ascendancy/MS
+ascendant/SY
+ascender/SM
+Ascension/M
+ascension/SM
+ascent/SM
+ascertain/DSBLG
+ascertainment/MS
+ascetically
+asceticism/MS
+ascetic/SM
+ASCII
+ascot/MS
+ascribe/GSDB
+ascription/MS
+ascriptive
+Ase/M
+aseptically
+aseptic/S
+asexuality/MS
+asexual/Y
+Asgard/M
+ashame/D
+ashamed/UY
+Ashanti/M
+Ashbey/M
+Ashby/M
+ashcan/SM
+Ashely/M
+Asher/M
+Asheville/M
+Ashia/M
+Ashien/M
+Ashil/M
+Ashkenazim
+Ashkhabad/M
+Ashla/M
+Ashland/M
+Ashlan/M
+ashlar/GSDM
+Ashlee/M
+Ashleigh/M
+Ashlen/M
+Ashley/M
+Ashlie/M
+Ashli/M
+Ashlin/M
+Ashly/M
+ashman/M
+ash/MNDRSG
+Ashmolean/M
+Ash/MRY
+ashore
+ashram/SM
+Ashton/M
+ashtray/MS
+Ashurbanipal/M
+ashy/RT
+Asia/M
+Asian/MS
+Asiatic/SM
+aside/S
+Asilomar/M
+Asimov
+asinine/Y
+asininity/MS
+askance
+ask/DRZGS
+asked/U
+asker/M
+askew/P
+ASL
+aslant
+asleep
+Asmara/M
+asocial/S
+Asoka/M
+asparagus/MS
+aspartame/S
+ASPCA
+aspect/SM
+Aspell/M
+aspen/M
+Aspen/M
+asperity/SM
+asper/M
+aspersion/SM
+asphalt/MDRSG
+asphodel/MS
+asphyxia/MS
+asphyxiate/GNXSD
+asphyxiation/M
+aspic/MS
+Aspidiske/M
+aspidistra/MS
+aspirant/MS
+aspirate/NGDSX
+aspirational
+aspiration/M
+aspirator/SM
+aspire/GSRD
+aspirer/M
+aspirin/SM
+asplenium
+asp/MNRXS
+Asquith/M
+Assad/M
+assailable/U
+assailant/SM
+assail/BGDS
+Assamese/M
+Assam/M
+assassinate/DSGNX
+assassination/M
+assassin/MS
+assaulter/M
+assaultive/YP
+assault/SGVMDR
+assayer/M
+assay/SZGRD
+assemblage/MS
+assemble/ADSREG
+assembled/U
+assembler/EMS
+assemblies/A
+assembly/EAM
+assemblyman/M
+assemblymen
+Assembly/MS
+assemblywoman
+assemblywomen
+assent/SGMRD
+assert/ADGS
+asserter/MS
+assertional
+assertion/AMS
+assertiveness/SM
+assertive/PY
+assess/BLSDG
+assessed/A
+assesses/A
+assessment/SAM
+assessor/MS
+asset/SM
+asseverate/XSDNG
+asseveration/M
+asshole/MS!
+assiduity/SM
+assiduousness/SM
+assiduous/PY
+assign/ALBSGD
+assignation/MS
+assigned/U
+assignee/MS
+assigner/MS
+assignment/MAS
+assignor/MS
+assigns/CU
+assimilate/VNGXSD
+assimilationist/M
+assimilation/M
+Assisi/M
+assistance/SM
+assistantship/SM
+assistant/SM
+assisted/U
+assister/M
+assist/RDGS
+assize/MGSD
+ass/MNS
+assn
+assoc
+associable
+associated/U
+associate/SDEXNG
+associateship
+associational
+association/ME
+associative/Y
+associativity/S
+associator/MS
+assonance/SM
+assonant/S
+assorter/M
+assort/LRDSG
+assortment/SM
+asst
+assuaged/U
+assuage/SDG
+assumability
+assumer/M
+assume/SRDBJG
+assuming/UA
+assumption/SM
+assumptive
+assurance/AMS
+assure/AGSD
+assuredness/M
+assured/PYS
+assurer/SM
+assuring/YA
+Assyria/M
+Assyrian/SM
+Assyriology/M
+Astaire/SM
+Astarte/M
+astatine/MS
+aster/ESM
+asteria
+asterisked/U
+asterisk/SGMD
+astern
+asteroidal
+asteroid/SM
+asthma/MS
+asthmatic/S
+astigmatic/S
+astigmatism/SM
+astir
+astonish/GSDL
+astonishing/Y
+astonishment/SM
+Aston/M
+Astoria/M
+Astor/M
+astounding/Y
+astound/SDG
+astraddle
+Astrakhan/M
+astrakhan/SM
+astral/SY
+Astra/M
+astray
+astride
+Astrid/M
+astringency/SM
+astringent/YS
+Astrix/M
+astrolabe/MS
+astrologer/MS
+astrological/Y
+astrologist/M
+astrology/SM
+astronautical
+astronautic/S
+astronautics/M
+astronaut/SM
+astronomer/MS
+astronomic
+astronomical/Y
+astronomy/SM
+astrophysical
+astrophysicist/SM
+astrophysics/M
+Astroturf/M
+AstroTurf/S
+Asturias/M
+astuteness/MS
+astute/RTYP
+Asunción/M
+asunder
+Aswan/M
+asylum/MS
+asymmetric
+asymmetrical/Y
+asymmetry/MS
+asymptomatic
+asymptomatically
+asymptote/MS
+asymptotically
+asymptotic/Y
+asynchronism/M
+asynchronous/Y
+asynchrony
+at
+Atacama/M
+Atahualpa/M
+Atalanta/M
+Atari/M
+Atatürk/M
+atavism/MS
+atavistic
+atavist/MS
+ataxia/MS
+ataxic/S
+atelier/SM
+atemporal
+ate/S
+Athabasca/M
+Athabascan's
+Athabaskan/MS
+Athabaska's
+atheism/SM
+atheistic
+atheist/SM
+Athena/M
+Athene/M
+Athenian/SM
+Athens/M
+atheroscleroses
+atherosclerosis/M
+athirst
+athlete/MS
+athletically
+athleticism/M
+athletic/S
+athletics/M
+athwart
+atilt
+Atkins/M
+Atkinson/M
+Atlanta/M
+Atlante/MS
+atlantes
+Atlantic/M
+Atlantis/M
+atlas/SM
+Atlas/SM
+At/M
+Atman
+ATM/M
+atmosphere/DSM
+atmospherically
+atmospheric/S
+atoll/MS
+atomically
+atomicity/M
+atomic/S
+atomics/M
+atomistic
+atomization/SM
+atomize/GZDRS
+atomizer/M
+atom/SM
+atonality/MS
+atonal/Y
+atone/LDSG
+atonement/SM
+atop
+ATP
+Atreus/M
+atria
+atrial
+Atria/M
+atrium/M
+atrociousness/SM
+atrocious/YP
+atrocity/SM
+atrophic
+atrophy/DSGM
+atropine/SM
+Atropos/M
+Ats
+attach/BLGZMDRS
+attached/UA
+attacher/M
+attaché/S
+attachment/ASM
+attacker/M
+attack/GBZSDR
+attainabilities
+attainability/UM
+attainableness/M
+attainable/U
+attainably/U
+attain/AGSD
+attainder/MS
+attained/U
+attainer/MS
+attainment/MS
+attar/MS
+attempt/ADSG
+attempter/MS
+attendance/MS
+attendant/SM
+attended/U
+attendee/SM
+attender/M
+attend/SGZDR
+attentional
+attentionality
+attention/IMS
+attentiveness/IMS
+attentive/YIP
+attenuated/U
+attenuate/SDXGN
+attenuation/M
+attenuator/MS
+attestation/SM
+attested/U
+attester/M
+attest/GSDR
+Attic
+Attica/M
+attic/MS
+Attila/M
+attire/SDG
+attitude/MS
+attitudinal/Y
+attitudinize/SDG
+Attlee/M
+attn
+Attn
+attorney/SM
+attractant/SM
+attract/BSDGV
+attraction/MS
+attractivenesses
+attractiveness/UM
+attractive/UYP
+attractor/MS
+attributable/U
+attribute/BVNGRSDX
+attributed/U
+attributer/M
+attributional
+attribution/M
+attributive/SY
+attrition/MS
+Attucks
+attune/SDG
+atty
+ATV/S
+atwitter
+Atwood/M
+atypical/Y
+Aube/M
+Auberge/M
+aubergine/MS
+Auberon/M
+Auberta/M
+Aubert/M
+Aubine/M
+Aubree/M
+Aubrette/M
+Aubrey/M
+Aubrie/M
+Aubry/M
+auburn/SM
+Auckland/M
+auctioneer/SDMG
+auction/MDSG
+audaciousness/SM
+audacious/PY
+audacity/MS
+Auden/M
+audibility/MSI
+audible/I
+audibles
+audibly/I
+Audie/M
+audience/MS
+Audi/M
+audiogram/SM
+audiological
+audiologist/MS
+audiology/SM
+audiometer/MS
+audiometric
+audiometry/M
+audiophile/SM
+audio/SM
+audiotape/S
+audiovisual/S
+audited/U
+audition/MDSG
+auditorium/MS
+auditor/MS
+auditory/S
+audit/SMDVG
+Audra/M
+Audre/M
+Audrey/M
+Audrie/M
+Audrye/M
+Audry/M
+Audubon/M
+Audy/M
+Auerbach/M
+Augean
+auger/SM
+aught/S
+Augie/M
+Aug/M
+augmentation/SM
+augmentative/S
+augment/DRZGS
+augmenter/M
+augur/GDMS
+augury/SM
+Augusta/M
+Augustan/S
+Auguste/M
+Augustina/M
+Augustine/M
+Augustinian/S
+Augustin/M
+augustness/SM
+Augusto/M
+August/SM
+august/STPYR
+Augustus/M
+Augy/M
+auk/MS
+Au/M
+Aundrea/M
+auntie/MS
+aunt/MYS
+aunty's
+aural/Y
+Aura/M
+aura/SM
+Aurea/M
+Aurelea/M
+Aurelia/M
+Aurelie/M
+Aurelio/M
+Aurelius/M
+Aurel/M
+aureole/GMSD
+aureomycin
+Aureomycin/M
+Auria/M
+auric
+auricle/SM
+auricular
+Aurie/M
+Auriga/M
+Aurilia/M
+Aurlie/M
+Auroora/M
+auroral
+Aurora/M
+aurora/SM
+Aurore/M
+Aurthur/M
+Auschwitz/M
+auscultate/XDSNG
+auscultation/M
+auspice/SM
+auspicious/IPY
+auspiciousnesses
+auspiciousness/IM
+Aussie/MS
+Austen/M
+austereness/M
+austere/TYRP
+austerity/SM
+Austina/M
+Austine/M
+Austin/SM
+austral
+Australasia/M
+Australasian/S
+australes
+Australia/M
+Australian/MS
+Australis/M
+australites
+Australoid
+Australopithecus/M
+Austria/M
+Austrian/SM
+Austronesian
+authentically
+authenticated/U
+authenticate/GNDSX
+authentication/M
+authenticator/MS
+authenticity/MS
+authentic/UI
+author/DMGS
+authoress/S
+authorial
+authoritarianism/MS
+authoritarian/S
+authoritativeness/SM
+authoritative/PY
+authority/SM
+authorization/MAS
+authorize/AGDS
+authorized/U
+authorizer/SM
+authorizes/U
+authorship/MS
+autism/MS
+autistic/S
+autobahn/MS
+autobiographer/MS
+autobiographic
+autobiographical/Y
+autobiography/MS
+autoclave/SDGM
+autocollimator/M
+autocorrelate/GNSDX
+autocorrelation/M
+autocracy/SM
+autocratic
+autocratically
+autocrat/SM
+autodial/R
+autodidact/MS
+autofluorescence
+autograph/MDG
+autographs
+autoignition/M
+autoimmune
+autoimmunity/S
+autoloader
+automaker/S
+automata's
+automate/NGDSX
+automatically
+automatic/S
+automation/M
+automatism/SM
+automatize/DSG
+automaton/SM
+automobile/GDSM
+automorphism/SM
+automotive
+autonavigator/SM
+autonomic/S
+autonomous/Y
+autonomy/MS
+autopilot/SM
+autopsy/MDSG
+autoregressive
+autorepeat/GS
+auto/SDMG
+autostart
+autosuggestibility/M
+autotransformer/M
+autoworker/S
+autumnal/Y
+Autumn/M
+autumn/MS
+aux
+auxiliary/S
+auxin/MS
+AV
+availability/USM
+availableness/M
+available/U
+availably
+avail/BSZGRD
+availing/U
+avalanche/MGSD
+Avalon/M
+Ava/M
+avant
+avarice/SM
+avariciousness/M
+avaricious/PY
+avast/S
+avatar/MS
+avaunt/S
+avdp
+Aveline/M
+Ave/MS
+avenged/U
+avenger/M
+avenge/ZGSRD
+Aventine/M
+Aventino/M
+avenue/MS
+average/DSPGYM
+Averell/M
+Averill/M
+Averil/M
+Avernus/M
+averred
+averrer
+averring
+Averroes/M
+averseness/M
+averse/YNXP
+aversion/M
+avers/V
+avert/GSD
+Averyl/M
+Avery/M
+ave/S
+aves/C
+Avesta/M
+avg
+avian/S
+aviary/SM
+aviate/NX
+aviation/M
+aviator/SM
+aviatrices
+aviatrix/SM
+Avicenna/M
+Avictor/M
+avidity/MS
+avid/TPYR
+Avie/M
+Avigdor/M
+Avignon/M
+Avila/M
+avionic/S
+avionics/M
+Avior/M
+Avis
+avitaminoses
+avitaminosis/M
+Avivah/M
+Aviva/M
+Aviv/M
+avocado/MS
+avocational
+avocation/SM
+Avogadro/M
+avoidable/U
+avoidably/U
+avoidance/SM
+avoider/M
+avoid/ZRDBGS
+avoirdupois/MS
+Avon/M
+avouch/GDS
+avowal/EMS
+avowed/Y
+avower/M
+avow/GEDS
+Avram/M
+Avril/M
+Avrit/M
+Avrom/M
+avuncular
+av/ZR
+AWACS
+await/SDG
+awake/GS
+awakened/U
+awakener/M
+awakening/S
+awaken/SADG
+awarder/M
+award/RDSZG
+awareness/MSU
+aware/TRP
+awash
+away/PS
+aweigh
+awe/SM
+awesomeness/SM
+awesome/PY
+awestruck
+awfuller
+awfullest
+awfulness/SM
+awful/YP
+aw/GD
+awhile/S
+awkwardness/MS
+awkward/PRYT
+awl/MS
+awning/DM
+awn/MDJGS
+awoke
+awoken
+AWOL
+awry/RT
+ax/DRSZGM
+axehead/S
+Axel/M
+Axe/M
+axeman
+axial/Y
+axillary
+axiological/Y
+axiology/M
+axiomatically
+axiomatic/S
+axiomatization/MS
+axiomatize/GDS
+axiom/SM
+axion/SM
+axis/SM
+axle/MS
+axletree/MS
+Ax/M
+axolotl/SM
+axon/SM
+ayah/M
+ayahs
+Ayala/M
+ayatollah
+ayatollahs
+aye/MZRS
+Ayers
+Aylmar/M
+Aylmer/M
+Aymara/M
+Aymer/M
+Ayn/M
+AZ
+azalea/SM
+Azania/M
+Azazel/M
+Azerbaijan/M
+azimuthal/Y
+azimuth/M
+azimuths
+Azores
+Azov/M
+AZT
+Aztecan
+Aztec/MS
+azure/MS
+BA
+Baal/SM
+baa/SDG
+Babara/M
+Babar's
+Babbage/M
+Babbette/M
+Babbie/M
+babbitt/GDS
+Babbitt/M
+babbler/M
+babble/RSDGZ
+Babb/M
+Babcock/M
+Babel/MS
+babel/S
+babe/SM
+Babette/M
+Babita/M
+Babka/M
+baboon/MS
+Bab/SM
+babushka/MS
+babyhood/MS
+babyish
+Babylonia/M
+Babylonian/SM
+Babylon/MS
+babysat
+babysit/S
+babysitter/S
+babysitting
+baby/TDSRMG
+Bacall/M
+Bacardi/M
+baccalaureate/MS
+baccarat/SM
+bacchanalia
+Bacchanalia/M
+bacchanalian/S
+bacchanal/SM
+Bacchic
+Bacchus/M
+bachelorhood/SM
+bachelor/SM
+Bach/M
+bacillary
+bacilli
+bacillus/MS
+backache/SM
+backarrow
+backbencher/M
+backbench/ZR
+backbiter/M
+backbite/S
+backbitten
+backbit/ZGJR
+backboard/SM
+backbone/SM
+backbreaking
+backchaining
+backcloth/M
+backdate/GDS
+backdrop/MS
+backdropped
+backdropping
+backed/U
+backer/M
+backfield/SM
+backfill/SDG
+backfire/GDS
+backgammon/MS
+background/SDRMZG
+back/GZDRMSJ
+backhanded/Y
+backhander/M
+backhand/RDMSZG
+backhoe/S
+backing/M
+backlash/GRSDM
+backless
+backlogged
+backlogging
+backlog/MS
+backorder
+backpacker/M
+backpack/ZGSMRD
+backpedal/DGS
+backplane/MS
+backplate/SM
+backrest/MS
+backscatter/SMDG
+backseat/S
+backside/SM
+backslapper/MS
+backslapping/M
+backslash/DSG
+backslider/M
+backslide/S
+backslid/RZG
+backspace/GSD
+backspin/SM
+backstabber/M
+backstabbing
+backstage
+backstair/S
+backstitch/GDSM
+backstop/MS
+backstopped
+backstopping
+backstreet/M
+backstretch/SM
+backstroke/GMDS
+backtalk/S
+backtrack/SDRGZ
+backup/SM
+Backus/M
+backwardness/MS
+backward/YSP
+backwash/SDMG
+backwater/SM
+backwood/S
+backwoodsman/M
+backwoodsmen
+backyard/MS
+baconer/M
+Bacon/M
+bacon/SRM
+bacterial/Y
+bacteria/MS
+bactericidal
+bactericide/SM
+bacteriologic
+bacteriological
+bacteriologist/MS
+bacteriology/SM
+bacterium/M
+Bactria/M
+badder
+baddest
+baddie/MS
+bade
+Baden/M
+badge/DSRGMZ
+badger/DMG
+badinage/DSMG
+badland/S
+Badlands/M
+badman/M
+badmen
+badminton/MS
+badmouth/DG
+badmouths
+badness/SM
+bad/PSNY
+Baedeker/SM
+Baez/M
+Baffin/M
+bafflement/MS
+baffler/M
+baffle/RSDGZL
+baffling/Y
+bagatelle/MS
+bagel/SM
+bagful/MS
+baggageman
+baggagemen
+baggage/SM
+bagged/M
+bagger/SM
+baggily
+bagginess/MS
+bagging/M
+baggy/PRST
+Baghdad/M
+bagpiper/M
+bagpipe/RSMZ
+Bagrodia/MS
+bag/SM
+baguette/SM
+Baguio/M
+bah
+Baha'i
+Bahama/MS
+Bahamanian/S
+Bahamian/MS
+Baha'ullah
+Bahia/M
+Bahrain/M
+bahs
+Baikal/M
+Bailey/SM
+bail/GSMYDRB
+Bailie/M
+bailiff/SM
+bailiwick/MS
+Baillie/M
+Bail/M
+bailout/MS
+bailsman/M
+bailsmen
+Baily/M
+Baird/M
+bairn/SM
+baiter/M
+bait/GSMDR
+baize/GMDS
+Baja/M
+baked/U
+bakehouse/M
+Bakelite/M
+baker/M
+Baker/M
+Bakersfield/M
+bakery/SM
+bakeshop/S
+bake/ZGJDRS
+baking/M
+baklava/M
+baksheesh/SM
+Baku/M
+Bakunin/M
+balaclava/MS
+balalaika/MS
+balanced/A
+balancedness
+balancer/MS
+balance's
+balance/USDG
+Balanchine/M
+Balboa/M
+balboa/SM
+balcony/MSD
+balderdash/MS
+Balder/M
+baldfaced
+Bald/MR
+baldness/MS
+bald/PYDRGST
+baldric/SM
+Balduin/M
+Baldwin/M
+baldy
+Balearic/M
+baleen/MS
+balefuller
+balefullest
+balefulness/MS
+baleful/YP
+Bale/M
+bale/MZGDRS
+baler/M
+Balfour/M
+Bali/M
+Balinese
+balkanization
+balkanize/DG
+Balkan/SM
+balker/M
+balk/GDRS
+Balkhash/M
+balkiness/M
+balky/PRT
+balladeer/MS
+ballade/MS
+balladry/MS
+ballad/SM
+Ballard/SM
+ballast/SGMD
+ballcock/S
+ballerina/MS
+baller/M
+balletic
+ballet/MS
+ballfields
+ballgame/S
+ball/GZMSDR
+ballistic/S
+ballistics/M
+Ball/M
+balloonist/S
+balloon/RDMZGS
+balloter/M
+ballot/MRDGS
+ballpark/SM
+ballplayer/SM
+ballpoint/SM
+ballroom/SM
+ballsy/TR
+ballyhoo/SGMD
+balminess/SM
+balm/MS
+balmy/PRT
+baloney/SM
+balsam/GMDS
+balsamic
+balsa/MS
+Balthazar/M
+Baltic/M
+Baltimore/M
+Baluchistan/M
+baluster/MS
+balustrade/SM
+Balzac/M
+Ba/M
+Bamako/M
+Bamberger/M
+Bambie/M
+Bambi/M
+bamboo/SM
+bamboozle/GSD
+Bamby/M
+Banach/M
+banality/MS
+banal/TYR
+banana/SM
+Bancroft/M
+bandager/M
+bandage/RSDMG
+bandanna/SM
+bandbox/MS
+bandeau/M
+bandeaux
+band/EDGS
+bander/M
+banding/M
+bandit/MS
+banditry/MS
+bandmaster/MS
+bandoleer/SM
+bandpass
+band's
+bandsman/M
+bandsmen
+bandstand/SM
+bandstop
+Bandung/M
+bandwagon/MS
+bandwidth/M
+bandwidths
+bandy/TGRSD
+banefuller
+banefullest
+baneful/Y
+bane/MS
+Bangalore/M
+banger/M
+bang/GDRZMS
+bangkok
+Bangkok/M
+Bangladeshi/S
+Bangladesh/M
+bangle/MS
+Bangor/M
+Bangui/M
+bani
+banisher/M
+banishment/MS
+banish/RSDGL
+banister/MS
+Banjarmasin/M
+banjoist/SM
+banjo/MS
+Banjul/M
+bankbook/SM
+bankcard/S
+banker/M
+bank/GZJDRMBS
+banking/M
+Bank/MS
+banknote/S
+bankroll/DMSG
+bankruptcy/MS
+bankrupt/DMGS
+Banky/M
+Ban/M
+banned/U
+Banneker/M
+banner/SDMG
+banning/U
+Bannister/M
+bannister's
+bannock/SM
+banns
+banqueter/M
+banquet/SZGJMRD
+banquette/MS
+ban/SGMD
+banshee/MS
+bans/U
+bantam/MS
+bantamweight/MS
+banterer/M
+bantering/Y
+banter/RDSG
+Banting/M
+Bantu/SM
+banyan/MS
+banzai/S
+baobab/SM
+Baotou/M
+baptismal/Y
+baptism/SM
+Baptiste/M
+baptistery/MS
+baptist/MS
+Baptist/MS
+baptistry's
+baptized/U
+baptizer/M
+baptize/SRDZG
+baptizes/U
+Barabbas/M
+Barbabas/M
+Barbabra/M
+Barbadian/S
+Barbados/M
+Barbaraanne/M
+Barbara/M
+Barbarella/M
+barbarianism/MS
+barbarian/MS
+barbaric
+barbarically
+barbarism/MS
+barbarity/SM
+barbarize/SDG
+Barbarossa/M
+barbarousness/M
+barbarous/PY
+Barbary/M
+barb/DRMSGZ
+barbecue/DRSMG
+barbed/P
+Barbee/M
+barbell/SM
+barbel/MS
+Barbe/M
+barbeque's
+barber/DMG
+barbered/U
+Barber/M
+barberry/MS
+barbershop/MS
+Barbette/M
+Barbey/M
+Barbie/M
+Barbi/M
+barbital/M
+barbiturate/MS
+Barbour/M
+Barbra/M
+Barb/RM
+Barbuda/M
+barbwire/SM
+Barby/M
+barcarole/SM
+Barcelona/M
+Barclay/M
+Bardeen/M
+Barde/M
+bardic
+Bard/M
+bard/MDSG
+bareback/D
+barefacedness/M
+barefaced/YP
+barefoot/D
+barehanded
+bareheaded
+barelegged
+bareness/MS
+Barents/M
+bare/YSP
+barfly/SM
+barf/YDSG
+bargainer/M
+bargain/ZGSDRM
+barge/DSGM
+bargeman/M
+bargemen
+bargepole/M
+barhopped
+barhopping
+barhop/S
+Bari/M
+baritone/MS
+barium/MS
+barked/C
+barkeeper/M
+barkeep/SRZ
+barker/M
+Barker/M
+bark/GZDRMS
+Barkley/M
+barks/C
+barleycorn/MS
+barley/MS
+Barlow/M
+barmaid/SM
+barman/M
+barmen
+Bar/MH
+Barnabas
+Barnabe/M
+Barnaby/M
+barnacle/MDS
+Barnard/M
+Barnaul/M
+Barnebas/M
+Barnes
+Barnett/M
+Barney/M
+barnful
+barn/GDSM
+Barnhard/M
+Barnie/M
+Barn/M
+barnsful
+barnstorm/DRGZS
+barnstormer/M
+Barnum/M
+barnyard/MS
+Barny/M
+Baroda/M
+barometer/MS
+barometric
+barometrically
+baronage/MS
+baroness/MS
+baronetcy/SM
+baronet/MS
+baronial
+Baron/M
+baron/SM
+barony/SM
+baroque/SPMY
+barque's
+Barquisimeto/M
+barracker/M
+barrack/SDRG
+barracuda/MS
+barrage/MGSD
+Barranquilla/M
+barred/ECU
+barre/GMDSJ
+barrel/SGMD
+barrenness/SM
+barren/SPRT
+Barrera/M
+Barret/M
+barrette/SM
+Barrett/M
+barricade/SDMG
+Barrie/M
+barrier/MS
+barring/R
+barrio/SM
+Barri/SM
+barrister/MS
+Barr/M
+Barron/M
+barroom/SM
+barrow/MS
+Barry/M
+Barrymore/MS
+bars/ECU
+barstool/SM
+Barstow/M
+Bartel/M
+bartender/M
+bartend/ZR
+barterer/M
+barter/SRDZG
+bar/TGMDRS
+Barthel/M
+Barth/M
+Bartholdi/M
+Bartholemy/M
+Bartholomeo/M
+Bartholomeus/M
+Bartholomew/M
+Bartie/M
+Bartlet/M
+Bartlett/M
+Bart/M
+Bartók/M
+Bartolemo/M
+Bartolomeo/M
+Barton/M
+Bartram/M
+Barty/M
+barycenter
+barycentre's
+barycentric
+Bary/M
+baryon/SM
+Baryram/M
+Baryshnikov/M
+basaltic
+basalt/SM
+basal/Y
+Bascom/M
+bas/DRSTG
+baseball/MS
+baseband
+baseboard/MS
+base/CGRSDL
+baseless
+baseline/SM
+Basel/M
+basely
+Base/M
+baseman/M
+basemen
+basement/CSM
+baseness/MS
+baseplate/M
+base's
+basetting
+bashfulness/MS
+bashful/PY
+bash/JGDSR
+Basho/M
+Basia/M
+BASIC
+basically
+basic/S
+Basie/M
+basilar
+Basile/M
+basilica/SM
+Basilio/M
+basilisk/SM
+Basilius/M
+Basil/M
+basil/MS
+basin/DMS
+basinful/S
+basis/M
+basketball/MS
+basketry/MS
+basket/SM
+basketwork/SM
+bask/GSD
+basophilic
+Basque/SM
+Basra/M
+Basseterre/M
+basset/GMDS
+Bassett/M
+bassinet/SM
+bassist/MS
+Bass/M
+basso/MS
+bassoonist/MS
+bassoon/MS
+bass/SM
+basswood/SM
+bastardization/MS
+bastardized/U
+bastardize/SDG
+bastard/MYS
+bastardy/MS
+baste/NXS
+baster/M
+Bastian/M
+Bastien/M
+Bastille/M
+basting/M
+bastion/DM
+bast/SGZMDR
+Basutoland/M
+Bataan/M
+Batavia/M
+batch/MRSDG
+bated/U
+bate/KGSADC
+bater/AC
+Bates
+bathe
+bather/M
+bathetic
+bathhouse/SM
+bath/JMDSRGZ
+bathmat/S
+Batholomew/M
+bathos/SM
+bathrobe/MS
+bathroom/SDM
+baths
+Bathsheba/M
+bathtub/MS
+bathwater
+bathyscaphe's
+bathysphere/MS
+batik/DMSG
+Batista/M
+batiste/SM
+Bat/M
+batman/M
+Batman/M
+batmen
+baton/SM
+Batsheva/M
+batsman/M
+bat/SMDRG
+batsmen
+battalion/MS
+batted
+batten/SDMG
+batter/SRDZG
+battery/MS
+batting/MS
+battledore/MS
+battledress
+battlefield/SM
+battlefront/SM
+battle/GMZRSDL
+battleground/SM
+Battle/M
+battlement/SMD
+battler/M
+battleship/MS
+batty/RT
+Batu/M
+batwings
+bauble/SM
+Baudelaire/M
+baud/M
+Baudoin/M
+Baudouin/M
+Bauer/M
+Bauhaus/M
+baulk/GSDM
+Bausch/M
+bauxite/SM
+Bavaria/M
+Bavarian/S
+bawdily
+bawdiness/MS
+bawd/SM
+bawdy/PRST
+bawler/M
+bawl/SGDR
+Baxie/M
+Bax/M
+Baxter/M
+Baxy/M
+Bayamon
+Bayard/M
+bayberry/MS
+Bayda/M
+Bayer/M
+Bayes
+Bayesian
+bay/GSMDY
+Baylor/M
+Bay/MR
+bayonet/SGMD
+Bayonne/M
+bayou/MS
+Bayreuth/M
+bazaar/MS
+bazillion/S
+bazooka/MS
+BB
+BBB
+BBC
+bbl
+BBQ
+BBS
+BC
+BCD
+bdrm
+beachcomber/SM
+beachhead/SM
+Beach/M
+beach/MSDG
+beachwear/M
+beacon/DMSG
+beading/M
+Beadle/M
+beadle/SM
+bead/SJGMD
+beadsman/M
+beadworker
+beady/TR
+beagle/SDGM
+beaker/M
+beak/ZSDRM
+Beale/M
+Bealle/M
+Bea/M
+beam/MDRSGZ
+beanbag/SM
+bean/DRMGZS
+beanie/SM
+Bean/M
+beanpole/MS
+beanstalk/SM
+bearable/U
+bearably/U
+beard/DSGM
+bearded/P
+beardless
+Beard/M
+Beardmore/M
+Beardsley/M
+bearer/M
+bearing/M
+bearishness/SM
+bearish/PY
+bearlike
+Bear/M
+Bearnaise/M
+Bearnard/M
+bearskin/MS
+bear/ZBRSJG
+Beasley/M
+beasties
+beastings/M
+beastliness/MS
+beastly/PTR
+beast/SJMY
+beatable/U
+beatably/U
+beaten/U
+beater/M
+beatific
+beatifically
+beatification/M
+beatify/GNXDS
+beating/M
+beatitude/MS
+Beatlemania/M
+Beatles/M
+beatnik/SM
+beat/NRGSBZJ
+Beatrice/M
+Beatrisa/M
+Beatrix/M
+Beatriz/M
+Beauchamps
+Beaufort/M
+Beaujolais/M
+Beau/M
+Beaumarchais/M
+Beaumont/M
+beau/MS
+Beauregard/M
+beauteousness/M
+beauteous/YP
+beautician/MS
+beautification/M
+beautifier/M
+beautifully/U
+beautifulness/M
+beautiful/PTYR
+beautify/SRDNGXZ
+beaut/SM
+beauty/SM
+Beauvoir/M
+beaux's
+beaver/DMSG
+Beaverton/M
+Bebe/M
+bebop/MS
+becalm/GDS
+became
+because
+Becca/M
+Bechtel/M
+Becka/M
+Becker/M
+Becket/M
+Beckett/M
+beck/GSDM
+Beckie/M
+Becki/M
+beckon/SDG
+Beck/RM
+Becky/M
+becloud/SGD
+become/GJS
+becoming/UY
+Becquerel/M
+bedaub/GDS
+bedazzle/GLDS
+bedazzlement/SM
+bedbug/SM
+bedchamber/M
+bedclothes
+bedded
+bedder/MS
+bedding/MS
+bedeck/DGS
+Bede/M
+bedevil/DGLS
+bedevilment/SM
+bedfast
+bedfellow/MS
+Bedford/M
+bedimmed
+bedimming
+bedim/S
+bedizen/DGS
+bedlam/MS
+bedlinen
+bedmaker/SM
+bedmate/MS
+bed/MS
+Bedouin/SM
+bedpan/SM
+bedpost/SM
+bedraggle/GSD
+bedridden
+bedrock/SM
+bedroll/SM
+bedroom/DMS
+bedsheets
+bedside/MS
+bedsit
+bedsitter/M
+bedsore/MS
+bedspread/SM
+bedspring/SM
+bedstead/SM
+bedstraw/M
+bedtime/SM
+Beebe/M
+beebread/MS
+Beecher/M
+beech/MRSN
+beechnut/MS
+beechwood
+beefburger/SM
+beefcake/MS
+beef/GZSDRM
+beefiness/MS
+beefsteak/MS
+beefy/TRP
+beehive/MS
+beekeeper/MS
+beekeeping/SM
+beeline/MGSD
+Beelzebub/M
+Bee/M
+bee/MZGJRS
+been/S
+beeper/M
+beep/GZSMDR
+Beerbohm/M
+beer/M
+beermat/S
+beery/TR
+beeswax/DSMG
+Beethoven/M
+beetle/GMRSD
+Beeton/M
+beetroot/M
+beet/SM
+beeves/M
+befall/SGN
+befell
+befit/SM
+befitted
+befitting/Y
+befogged
+befogging
+befog/S
+before
+beforehand
+befoul/GSD
+befriend/DGS
+befuddle/GLDS
+befuddlement/SM
+began
+beget/S
+begetting
+beggar/DYMSG
+beggarliness/M
+beggarly/P
+beggary/MS
+begged
+begging
+Begin/M
+beginner/MS
+beginning/MS
+begin/S
+begone/S
+begonia/SM
+begot
+begotten
+begrime/SDG
+begrudge/GDRS
+begrudging/Y
+beg/S
+beguilement/SM
+beguiler/M
+beguile/RSDLZG
+beguiling/Y
+beguine/SM
+begum/MS
+begun
+behalf/M
+behalves
+Behan/M
+behave/GRSD
+behavioral/Y
+behaviorism/MS
+behavioristic/S
+behaviorist/S
+behavior/SMD
+behead/GSD
+beheld
+behemoth/M
+behemoths
+behest/SM
+behindhand
+behind/S
+beholder/M
+behold/ZGRNS
+behoofs
+behoove/SDJMG
+behooving/YM
+Behring/M
+Beiderbecke/M
+beige/MS
+Beijing
+Beilul/M
+being/M
+Beirut/M
+Beitris/M
+bejewel/SDG
+Bekesy/M
+Bekki/M
+be/KS
+belabor/MDSG
+Bela/M
+Belarus
+belate/D
+belatedness/M
+belated/PY
+Belau/M
+belay/GSD
+belch/GSD
+beleaguer/GDS
+Belem/M
+Belfast/M
+belfry/SM
+Belgian/MS
+Belgium/M
+Belg/M
+Belgrade/M
+Belia/M
+Belicia/M
+belie
+belief/ESUM
+belier/M
+believability's
+believability/U
+believable/U
+believably/U
+believed/U
+believe/EZGDRS
+believer/MUSE
+believing/U
+Belinda/M
+Belita/M
+belittlement/MS
+belittler/M
+belittle/RSDGL
+Belize/M
+belladonna/MS
+Bella/M
+Bellamy/M
+Bellanca/M
+Bellatrix/M
+bellboy/MS
+belled/A
+Belle/M
+belle/MS
+belletristic
+belletrist/SM
+Belleville/M
+bellflower/M
+bell/GSMD
+bellhop/MS
+bellicoseness/M
+bellicose/YP
+bellicosity/MS
+belligerence/SM
+belligerency/MS
+belligerent/SMY
+Bellina/M
+belling/A
+Bellini/M
+Bell/M
+bellman/M
+bellmen
+Bellovin/M
+bellow/DGS
+Bellow/M
+bellows/M
+bells/A
+bellwether/MS
+Bellwood/M
+bellyacher/M
+bellyache/SRDGM
+bellybutton/MS
+bellyfull
+bellyful/MS
+belly/SDGM
+Bel/M
+Belmont/M
+Belmopan/M
+Beloit/M
+belong/DGJS
+belonging/MP
+Belorussian/S
+Belorussia's
+belove/D
+beloved/S
+below/S
+Belshazzar/M
+belted/U
+belt/GSMD
+belting/M
+Belton/M
+Beltran/M
+Beltsville/M
+beltway/SM
+beluga/SM
+Belushi/M
+Belva/M
+belvedere/M
+Belvia/M
+bely/DSRG
+beman
+Be/MH
+bemire/SDG
+bemoan/GDS
+bemused/Y
+bemuse/GSDL
+bemusement/SM
+Benacerraf/M
+Benares's
+bencher/M
+benchmark/GDMS
+bench/MRSDG
+bend/BUSG
+bended
+Bender/M
+bender/MS
+Bendick/M
+Bendicty/M
+Bendite/M
+Bendix/M
+beneath
+Benedetta/M
+Benedetto/M
+Benedick/M
+Benedicta/M
+Benedictine/MS
+benediction/MS
+Benedict/M
+Benedicto/M
+benedictory
+Benedikta/M
+Benedikt/M
+benefaction/MS
+benefactor/MS
+benefactress/S
+benefice/MGSD
+beneficence/SM
+beneficent/Y
+beneficialness/M
+beneficial/PY
+beneficiary/MS
+benefiter/M
+benefit/SRDMZG
+Benelux/M
+Benet/M
+Benetta/M
+Benetton/M
+benevolence/SM
+benevolentness/M
+benevolent/YP
+Bengali/M
+Bengal/SM
+Benghazi/M
+Bengt/M
+Beniamino/M
+benightedness/M
+benighted/YP
+benignant
+benignity/MS
+benign/Y
+Beninese
+Benin/M
+Benita/M
+Benito/M
+Benjamen/M
+Benjamin/M
+Benjie/M
+Benji/M
+Benjy/M
+Ben/M
+Bennett/M
+Bennie/M
+Benni/M
+Bennington/M
+Benn/M
+Benny/M
+Benoite/M
+Benoit/M
+Benson/M
+Bentham/M
+Bentlee/M
+Bentley/MS
+Bent/M
+Benton/M
+bents
+bent/U
+bentwood/SM
+benumb/SGD
+Benyamin/M
+Benzedrine/M
+benzene/MS
+benzine/SM
+Benz/M
+Beograd's
+Beowulf/M
+bequeath/GSD
+bequeaths
+bequest/MS
+berate/GSD
+Berber/MS
+bereave/GLSD
+bereavement/MS
+bereft
+Berenice/M
+Beret/M
+beret/SM
+Bergen/M
+Bergerac/M
+Berger/M
+Berget/M
+Berglund/M
+Bergman/M
+Berg/NRM
+berg/NRSM
+Bergson/M
+Bergsten/M
+Bergstrom/M
+beribbon/D
+beriberi/SM
+Beringer/M
+Bering/RM
+Berkeley/M
+berkelium/SM
+Berke/M
+Berkie/M
+Berkley/M
+Berkly/M
+Berkowitz/M
+Berkshire/SM
+Berky/M
+Berk/YM
+Berle/M
+Berliner/M
+Berlin/SZRM
+Berlioz/M
+Berlitz/M
+Berman/M
+Ber/MG
+berm/SM
+Bermuda/MS
+Bermudan/S
+Bermudian/S
+Bernadene/M
+Bernadette/M
+Bernadina/M
+Bernadine/M
+Berna/M
+Bernardina/M
+Bernardine/M
+Bernardino/M
+Bernard/M
+Bernardo/M
+Bernarr/M
+Bernays/M
+Bernbach/M
+Bernelle/M
+Berne's
+Bernese
+Bernete/M
+Bernetta/M
+Bernette/M
+Bernhard/M
+Bernhardt/M
+Bernice/M
+Berniece/M
+Bernie/M
+Berni/M
+Bernini/M
+Bernita/M
+Bern/M
+Bernoulli/M
+Bernstein/M
+Berny/M
+Berra/M
+Berrie/M
+Berri/M
+berrylike
+Berry/M
+berry/SDMG
+berserker/M
+berserk/SR
+Berta/M
+Berte/M
+Bertha/M
+Berthe/M
+berth/MDGJ
+berths
+Bertie/M
+Bertillon/M
+Berti/M
+Bertina/M
+Bertine/M
+Bert/M
+Berton/M
+Bertram/M
+Bertrand/M
+Bertrando/M
+Berty/M
+Beryle/M
+beryllium/MS
+Beryl/M
+beryl/SM
+Berzelius/M
+bes
+beseecher/M
+beseeching/Y
+beseech/RSJZG
+beseem/GDS
+beset/S
+besetting
+beside/S
+besieger/M
+besiege/SRDZG
+besmear/GSD
+besmirch/GSD
+besom/GMDS
+besot/S
+besotted
+besotting
+besought
+bespangle/GSD
+bespatter/SGD
+bespeak/SG
+bespectacled
+bespoke
+bespoken
+Bess
+Bessel/M
+Bessemer/M
+Bessie/M
+Bessy/M
+best/DRSG
+bestiality/MS
+bestial/Y
+bestiary/MS
+bestirred
+bestirring
+bestir/S
+Best/M
+bestowal/SM
+bestow/SGD
+bestrew/DGS
+bestrewn
+bestridden
+bestride/SG
+bestrode
+bestseller/MS
+bestselling
+bestubble/D
+betaken
+betake/SG
+beta/SM
+betatron/M
+betcha
+Betelgeuse/M
+betel/MS
+Bethanne/M
+Bethany/M
+bethel/M
+Bethe/M
+Bethena/M
+Bethesda/M
+Bethina/M
+bethink/GS
+Bethlehem/M
+beth/M
+Beth/M
+bethought
+Bethune
+betide/GSD
+betimes
+bet/MS
+betoken/GSD
+betook
+betrayal/SM
+betrayer/M
+betray/SRDZG
+betrothal/SM
+betrothed/U
+betroth/GD
+betroths
+Betsey/M
+Betsy/M
+Betta/M
+Betteanne/M
+Betteann/M
+Bette/M
+betterment/MS
+better/SDLG
+Bettie/M
+Betti/M
+Bettina/M
+Bettine/M
+betting
+bettor/SM
+Bettye/M
+Betty/SM
+betweenness/M
+between/SP
+betwixt
+Beulah/M
+Bevan/M
+bevel/SJGMRD
+beverage/MS
+Beverie/M
+Beverlee/M
+Beverley/M
+Beverlie/M
+Beverly/M
+Bevin/M
+Bevon/M
+Bev's
+Bevvy/M
+bevy/SM
+bewail/GDS
+beware/GSD
+bewhisker/D
+bewigged
+bewildered/PY
+bewildering/Y
+bewilder/LDSG
+bewilderment/SM
+bewitching/Y
+bewitch/LGDS
+bewitchment/SM
+bey/MS
+beyond/S
+bezel/MS
+bf
+B/GT
+Bhopal/M
+Bhutanese
+Bhutan/M
+Bhutto/M
+Bialystok/M
+Bianca/M
+Bianco/M
+Bianka/M
+biannual/Y
+bias/DSMPG
+biased/U
+biathlon/MS
+biaxial/Y
+bibbed
+Bibbie/M
+bibbing
+Bibbye/M
+Bibby/M
+Bibi/M
+bible/MS
+Bible/MS
+biblical/Y
+biblicists
+bibliographer/MS
+bibliographical/Y
+bibliographic/S
+bibliography/MS
+bibliophile/MS
+Bib/M
+bib/MS
+bibulous
+bicameral
+bicameralism/MS
+bicarb/MS
+bicarbonate/MS
+bicentenary/S
+bicentennial/S
+bicep/S
+biceps/M
+bichromate/DM
+bickerer/M
+bickering/M
+bicker/SRDZG
+biconcave
+biconnected
+biconvex
+bicuspid/S
+bicycler/M
+bicycle/RSDMZG
+bicyclist/SM
+biddable
+bidden/U
+bidder/MS
+Biddie/M
+bidding/MS
+Biddle/M
+Biddy/M
+biddy/SM
+bider/M
+bide/S
+bidet/SM
+Bidget/M
+bid/GMRS
+bidiagonal
+bidirectional/Y
+bids/A
+biennial/SY
+biennium/SM
+Bienville/M
+Bierce/M
+bier/M
+bifocal/S
+bifurcate/SDXGNY
+bifurcation/M
+bigamist/SM
+bigamous
+bigamy/SM
+Bigelow/M
+Bigfoot
+bigged
+bigger
+biggest
+biggie/SM
+bigging
+biggish
+bighead/MS
+bigheartedness/S
+bighearted/P
+bighorn/MS
+bight/SMDG
+bigmouth/M
+bigmouths
+bigness/SM
+bigoted/Y
+bigot/MDSG
+bigotry/MS
+big/PYS
+bigwig/MS
+biharmonic
+bijection/MS
+bijective/Y
+bijou/M
+bijoux
+bike/MZGDRS
+biker/M
+bikini/SMD
+Biko/M
+bilabial/S
+bilateralness/M
+bilateral/PY
+bilayer/S
+Bilbao/M
+bilberry/MS
+Bilbo/M
+bile/SM
+bilge/GMDS
+biliary
+Bili/M
+bilinear
+bilingualism/SM
+bilingual/SY
+biliousness/SM
+bilious/P
+bilker/M
+bilk/GZSDR
+billboard/MDGS
+biller/M
+billet/MDGS
+billfold/MS
+billiard/SM
+Billie/M
+Billi/M
+billing/M
+billingsgate/SM
+Billings/M
+billionaire/MS
+billion/SHM
+billionths
+bill/JGZSBMDR
+Bill/JM
+billow/DMGS
+billowy/RT
+billposters
+Billye/M
+Billy/M
+billy/SM
+Bil/MY
+bi/M
+Bi/M
+bimbo/MS
+bimetallic/S
+bimetallism/MS
+Bimini/M
+bimodal
+bimolecular/Y
+bimonthly/S
+binary/S
+binaural/Y
+binder/M
+bindery/MS
+binding/MPY
+bindingness/M
+bind/JDRGZS
+bindle/M
+binds/AU
+bindweed/MS
+binge/MS
+bing/GNDM
+Bingham/M
+Binghamton/M
+Bing/M
+bingo/MS
+Bini/M
+Bink/M
+Binky/M
+binnacle/MS
+binned
+Binnie/M
+Binni/M
+binning
+Binny/M
+binocular/SY
+binodal
+binomial/SYM
+bin/SM
+binuclear
+biochemical/SY
+biochemist/MS
+biochemistry/MS
+biodegradability/S
+biodegradable
+biodiversity/S
+bioengineering/M
+bioethics
+biofeedback/SM
+biographer/M
+biographic
+biographical/Y
+biograph/RZ
+biography/MS
+biog/S
+Bioko/M
+biol
+biological/SY
+biologic/S
+biologist/SM
+biology/MS
+biomass/SM
+biomedical
+biomedicine/M
+biometric/S
+biometrics/M
+biometry/M
+biomolecule/S
+biomorph
+bionically
+bionic/S
+bionics/M
+biophysical/Y
+biophysicist/SM
+biophysic/S
+biophysics/M
+biopic/S
+biopsy/SDGM
+biorhythm/S
+BIOS
+bioscience/S
+biosphere/MS
+biostatistic/S
+biosynthesized
+biotechnological
+biotechnologist
+biotechnology/SM
+biotic
+biotin/SM
+bipartisan
+bipartisanship/MS
+bipartite/YN
+bipartition/M
+bipedal
+biped/MS
+biplane/MS
+bipolar
+bipolarity/MS
+biracial
+Birch/M
+birch/MRSDNG
+birdbath/M
+birdbaths
+birdbrain/SDM
+birdcage/SM
+birder/M
+birdhouse/MS
+birdieing
+Birdie/M
+birdie/MSD
+birdlike
+birdlime/MGDS
+Bird/M
+birdseed/MS
+Birdseye/M
+bird/SMDRGZ
+birdsong
+birdtables
+birdwatch/GZR
+birefringence/M
+birefringent
+biretta/SM
+Birgit/M
+Birgitta/M
+Birkenstock/M
+Birk/M
+Birmingham/M
+Biro/M
+Biron/M
+birthday/SM
+birthmark/MS
+birth/MDG
+birthplace/SM
+birthrate/MS
+birthright/MS
+birth's/A
+births/A
+birthstone/SM
+bis
+Biscay/M
+Biscayne/M
+biscuit/MS
+bisect/DSG
+bisection/MS
+bisector/MS
+biserial
+bisexuality/MS
+bisexual/YMS
+Bishkek
+bishop/DGSM
+Bishop/M
+bishopric/SM
+Bismarck/M
+Bismark/M
+bismuth/M
+bismuths
+bison/M
+bisque/SM
+Bissau/M
+bistable
+bistate
+bistro/SM
+bisyllabic
+bitblt/S
+bitchily
+bitchiness/MS
+bitch/MSDG
+bitchy/PTR
+biter/M
+bite/S
+biting/Y
+bitmap/SM
+bit/MRJSZG
+BITNET/M
+bit's/C
+bits/C
+bitser/M
+bitted
+bitten
+bitterness/SM
+bittern/SM
+bitternut/M
+bitter/PSRDYTG
+bitterroot/M
+bittersweet/YMSP
+bitting
+bitty/PRT
+bitumen/MS
+bituminous
+bitwise
+bivalent/S
+bivalve/MSD
+bivariate
+bivouacked
+bivouacking
+bivouac/MS
+biweekly/S
+biyearly
+bizarreness/M
+bizarre/YSP
+Bizet/M
+biz/M
+bizzes
+Bjorn/M
+bk
+b/KGD
+Bk/M
+blabbed
+blabber/GMDS
+blabbermouth/M
+blabbermouths
+blabbing
+blab/S
+blackamoor/SM
+blackball/SDMG
+blackberry/GMS
+blackbirder/M
+blackbird/SGDRM
+blackboard/SM
+blackbody/S
+Blackburn/M
+blackcurrant/M
+blackener/M
+blacken/GDR
+Blackfeet
+Blackfoot/M
+blackguard/MDSG
+blackhead/SM
+blacking/M
+blackish
+blackjack/SGMD
+blackleg/M
+blacklist/DRMSG
+blackmail/DRMGZS
+blackmailer/M
+Blackman/M
+Blackmer/M
+blackness/MS
+blackout/SM
+Blackpool/M
+Black's
+black/SJTXPYRDNG
+blacksmith/MG
+blacksmiths
+blacksnake/MS
+blackspot
+Blackstone/M
+blackthorn/MS
+blacktop/MS
+blacktopped
+blacktopping
+Blackwell/MS
+bladder/MS
+bladdernut/M
+bladderwort/M
+blade/DSGM
+blah/MDG
+blahs
+Blaine/M
+Blaire/M
+Blair/M
+Blakelee/M
+Blakeley/M
+Blake/M
+Blakey/M
+blame/DSRBGMZ
+blamelessness/SM
+blameless/YP
+blamer/M
+blameworthiness/SM
+blameworthy/P
+Blanca/M
+Blancha/M
+Blanchard/M
+blanch/DRSG
+Blanche/M
+blancher/M
+Blanch/M
+blanc/M
+blancmange/SM
+blandishment/MS
+blandish/SDGL
+blandness/MS
+bland/PYRT
+Blane/M
+Blankenship/M
+blanketing/M
+blanket/SDRMZG
+blankness/MS
+blank/SPGTYRD
+Blanton/M
+Blantyre/M
+blare/DSG
+blarney/DMGS
+blasé
+blasphemer/M
+blaspheme/RSDZG
+blasphemousness/M
+blasphemous/PY
+blasphemy/SM
+blaster/M
+blasting/M
+blastoff/SM
+blast/SMRDGZ
+blatancy/SM
+blatant/YP
+blather/DRGS
+blatting
+Blatz/M
+Blavatsky/M
+Blayne/M
+blaze/DSRGMZ
+blazer/M
+blazing/Y
+blazoner/M
+blazon/SGDR
+bl/D
+bldg
+bleach/DRSZG
+bleached/U
+bleacher/M
+bleakness/MS
+bleak/TPYRS
+blear/GDS
+blearily
+bleariness/SM
+bleary/PRT
+bleater/M
+bleat/RDGS
+bleeder/M
+bleed/ZRJSG
+Bleeker/M
+bleep/GMRDZS
+blemish/DSMG
+blemished/U
+blench/DSG
+blender/M
+blend/GZRDS
+Blenheim/M
+blessedness/MS
+blessed/PRYT
+blessing/M
+bless/JGSD
+Blevins/M
+blew
+Bligh/M
+blighter/M
+blight/GSMDR
+blimey/S
+blimp/MS
+blinded/U
+blinder/M
+blindfold/SDG
+blinding/MY
+blind/JGTZPYRDS
+blindness/MS
+blindside/SDG
+blinker/MDG
+blinking/U
+blink/RDGSZ
+blinks/M
+Blinnie/M
+Blinni/M
+Blinny/M
+blintze/M
+blintz/SM
+blip/MS
+blipped
+blipping
+Blisse/M
+blissfulness/MS
+blissful/PY
+Bliss/M
+bliss/SDMG
+blistering/Y
+blister/SMDG
+blistery
+Blithe/M
+blitheness/SM
+blither/G
+blithesome
+blithe/TYPR
+blitz/GSDM
+blitzkrieg/SM
+blizzard/MS
+bloater/M
+bloat/SRDGZ
+blobbed
+blobbing
+blob/MS
+Bloch/M
+blockader/M
+blockade/ZMGRSD
+blockage/MS
+blockbuster/SM
+blockbusting/MS
+blocker/MS
+blockhead/MS
+blockhouse/SM
+block's
+block/USDG
+blocky/R
+bloc/MS
+Bloemfontein/M
+bloke/SM
+Blomberg/M
+Blomquist/M
+Blondelle/M
+Blondell/M
+blonde's
+Blondie/M
+blondish
+blondness/MS
+blond/SPMRT
+Blondy/M
+bloodbath
+bloodbaths
+bloodcurdling
+bloodhound/SM
+bloodied/U
+bloodiness/MS
+bloodlessness/SM
+bloodless/PY
+bloodletting/MS
+bloodline/SM
+bloodmobile/MS
+bloodroot/M
+bloodshed/SM
+bloodshot
+blood/SMDG
+bloodsport/S
+bloodstain/MDS
+bloodstock/SM
+bloodstone/M
+bloodstream/SM
+bloodsucker/SM
+bloodsucking/S
+bloodthirstily
+bloodthirstiness/MS
+bloodthirsty/RTP
+bloodworm/M
+bloodymindedness
+bloody/TPGDRS
+bloomer/M
+Bloomer/M
+Bloomfield/M
+Bloomington/M
+Bloom/MR
+bloom/SMRDGZ
+blooper/M
+bloop/GSZRD
+blossom/DMGS
+blossomy
+blotch/GMDS
+blotchy/RT
+blot/MS
+blotted
+blotter/MS
+blotting
+blotto
+blouse/GMSD
+blower/M
+blowfish/M
+blowfly/MS
+blowgun/SM
+blow/GZRS
+blowing/M
+blown/U
+blowout/MS
+blowpipe/SM
+blowtorch/SM
+blowup/MS
+blowy/RST
+blowzy/RT
+BLT
+blubber/GSDR
+blubbery
+Blucher/M
+bludgeon/GSMD
+blueback
+Bluebeard/M
+bluebell/MS
+blueberry/SM
+bluebill/M
+bluebird/MS
+bluebonnet/SM
+bluebook/M
+bluebottle/MS
+bluebush
+bluefish/SM
+bluegill/SM
+bluegrass/MS
+blueing's
+blueish
+bluejacket/MS
+bluejeans
+blue/JMYTGDRSP
+blueness/MS
+bluenose/MS
+bluepoint/SM
+blueprint/GDMS
+bluer/M
+bluest/M
+bluestocking/SM
+bluesy/TR
+bluet/MS
+bluffer/M
+bluffness/MS
+bluff/SPGTZYRD
+bluing/M
+bluishness/M
+bluish/P
+Blumenthal/M
+Blum/M
+blunderbuss/MS
+blunderer/M
+blunder/GSMDRJZ
+blundering/Y
+bluntness/MS
+blunt/PSGTYRD
+blurb/GSDM
+blur/MS
+blurred/Y
+blurriness/S
+blurring/Y
+blurry/RPT
+blurt/GSRD
+blusher/M
+blushing/UY
+blush/RSDGZ
+blusterer/M
+blustering/Y
+blusterous
+bluster/SDRZG
+blustery
+blvd
+Blvd
+Blythe/M
+BM
+BMW/M
+BO
+boarded
+boarder/SM
+boardgames
+boardinghouse/SM
+boarding/SM
+board/IS
+boardroom/MS
+board's
+boardwalk/SM
+boar/MS
+boa/SM
+boaster/M
+boastfulness/MS
+boastful/YP
+boast/SJRDGZ
+boatclubs
+boater/M
+boathouse/SM
+boating/M
+boatload/SM
+boatman/M
+boat/MDRGZJS
+boatmen
+boatswain/SM
+boatyard/SM
+bobbed
+Bobbee/M
+Bobbe/M
+Bobbette/M
+Bobbie/M
+Bobbi/M
+bobbing/M
+bobbin/MS
+Bobbitt/M
+bobble/SDGM
+Bobbsey/M
+Bobbye/M
+Bobby/M
+bobby/SM
+bobbysoxer's
+bobcat/MS
+Bobette/M
+Bobina/M
+Bobine/M
+Bobinette/M
+Bob/M
+bobolink/SM
+Bobrow/M
+bobsledded
+bobsledder/MS
+bobsledding/M
+bobsled/MS
+bobsleigh/M
+bobsleighs
+bobs/M
+bob/SM
+bobtail/SGDM
+bobwhite/SM
+Boca/M
+Boccaccio/M
+boccie/SM
+bock/GDS
+bockwurst
+bodega/MS
+Bodenheim/M
+bode/S
+Bodhidharma/M
+bodhisattva
+Bodhisattva/M
+bodice/SM
+bodied/M
+bodiless
+bodily
+boding/M
+bodkin/SM
+bod/SGMD
+bodybuilder/SM
+bodybuilding/S
+body/DSMG
+bodyguard/MS
+bodying/M
+bodysuit/S
+bodyweight
+bodywork/SM
+Boeing/M
+Boeotia/M
+Boeotian
+Boer/M
+Bogartian/M
+Bogart/M
+Bogey/M
+bogeyman/M
+bogeymen
+bogey/SGMD
+bogged
+bogging
+boggle/SDG
+boggling/Y
+boggy/RT
+bogie's
+bog/MS
+Bogotá/M
+bogus
+bogyman
+bogymen
+bogy's
+Boheme/M
+bohemianism/S
+bohemian/S
+Bohemian/SM
+Bohemia/SM
+Bohr/M
+Boigie/M
+boiled/AU
+boiler/M
+boilermaker/MS
+boilerplate/SM
+boil/JSGZDR
+boils/A
+Boise/M
+Bois/M
+boisterousness/MS
+boisterous/YP
+bola/SM
+boldface/SDMG
+boldness/MS
+bold/YRPST
+bole/MS
+bolero/MS
+Boleyn/M
+bolivares
+Bolivar/M
+bolivar/MS
+Bolivia/M
+Bolivian/S
+bollard/SM
+bollix/GSD
+boll/MDSG
+Bologna/M
+bologna/MS
+bolometer/MS
+bolo/MS
+boloney's
+Bolshevik/MS
+Bolshevism/MS
+Bolshevistic/M
+Bolshevist/MS
+Bolshoi/M
+bolsterer/M
+bolster/SRDG
+bolted/U
+bolter/M
+bolt/MDRGS
+Bolton/M
+bolts/U
+Boltzmann/M
+bolus/SM
+bombardier/MS
+bombard/LDSG
+bombardment/SM
+bombastic
+bombastically
+bombast/RMS
+Bombay/M
+bomber/M
+bombproof
+bomb/SGZDRJ
+bombshell/SM
+Bo/MRZ
+bona
+bonanza/MS
+Bonaparte/M
+Bonaventure/M
+bonbon/SM
+bondage/SM
+bonder/M
+bondholder/SM
+Bondie/M
+bond/JMDRSGZ
+Bond/M
+bondman/M
+bondmen
+Bondon/M
+bonds/A
+bondsman/M
+bondsmen
+bondwoman/M
+bondwomen
+Bondy/M
+boned/U
+bonehead/SDM
+boneless
+Bone/M
+bone/MZDRSG
+boner/M
+bonfire/MS
+bong/GDMS
+bongo/MS
+Bonham/M
+bonhomie/MS
+Boniface/M
+boniness/MS
+Bonita/M
+bonito/MS
+bonjour
+bonkers
+Bonnee/M
+Bonner/M
+bonneted/U
+bonnet/SGMD
+Bonneville/M
+Bonnibelle/M
+bonnie
+Bonnie/M
+Bonni/M
+Bonn/RM
+Bonny/M
+bonny/RT
+bonsai/SM
+Bontempo/M
+bonus/SM
+bony/RTP
+bonzes
+boob/DMSG
+booby/SM
+boodle/GMSD
+boogeyman's
+boogieing
+boogie/SD
+boo/GSDH
+boohoo/GDS
+bookbinder/M
+bookbindery/SM
+bookbinding/M
+bookbind/JRGZ
+bookcase/MS
+booked/U
+bookend/SGD
+Booker/M
+book/GZDRMJSB
+bookie/SM
+booking/M
+bookishness/M
+bookish/PY
+bookkeeper/M
+bookkeep/GZJR
+bookkeeping/M
+booklet/MS
+bookmaker/MS
+bookmaking/MS
+bookmark/MDGS
+bookmobile/MS
+bookplate/SM
+bookseller/SM
+bookshelf/M
+bookshelves
+bookshop/MS
+bookstall/MS
+bookstore/SM
+bookwork/M
+bookworm/MS
+Boolean
+boolean/S
+Boole/M
+boom/DRGJS
+boomerang/MDSG
+boomer/M
+boomtown/S
+boondocks
+boondoggle/DRSGZ
+boondoggler/M
+Boone/M
+Boonie/M
+boonies
+boon/MS
+Boony/M
+boorishness/SM
+boorish/PY
+boor/MS
+boosterism
+booster/M
+boost/SGZMRD
+boot/AGDS
+bootblack/MS
+bootee/MS
+Boote/M
+Boötes
+Boothe/M
+booth/M
+Booth/M
+booths
+bootie's
+bootlaces
+bootlegged/M
+bootlegger/SM
+bootlegging/M
+bootleg/S
+Bootle/M
+bootless
+Boot/M
+bootprints
+boot's
+bootstrapped
+bootstrapping
+bootstrap/SM
+booty/SM
+booze/DSRGMZ
+boozer/M
+boozy/TR
+bopped
+bopping
+bop/S
+borate/MSD
+borax/MS
+Bordeaux/M
+bordello/MS
+Borden/M
+borderer/M
+border/JRDMGS
+borderland/SM
+borderline/MS
+Bordie/M
+Bord/MN
+Bordon/M
+Bordy/M
+Borealis/M
+Boreas/M
+boredom/MS
+boreholes
+borer/M
+bore/ZGJDRS
+Borges
+Borgia/M
+Borg/M
+boric
+boring/YMP
+Boris
+Bork/M
+born/AIU
+Borneo/M
+borne/U
+Born/M
+Borodin/M
+boron/SM
+borosilicate/M
+borough/M
+boroughs
+Borroughs/M
+borrower/M
+borrowing/M
+borrow/JZRDGBS
+borscht/SM
+borstal/MS
+Boru/M
+borzoi/MS
+Bosch/M
+Bose/M
+bosh/MS
+Bosnia/M
+Bosnian/S
+bosom's
+bosom/SGUD
+bosomy/RT
+boson/SM
+Bosporus/M
+boss/DSRMG
+bossily
+bossiness/MS
+bossism/MS
+bossy/PTSR
+Bostitch/M
+Bostonian/SM
+Boston/MS
+bosun's
+Boswell/MS
+botanical/SY
+botanic/S
+botanist/SM
+botany/SM
+botcher/M
+botch/SRDGZ
+botfly/M
+bother/DG
+bothersome
+bothy/M
+both/ZR
+bot/S
+Botswana/M
+Botticelli/M
+bottle/GMZSRD
+bottleneck/GSDM
+bottler/M
+bottomlessness/M
+bottomless/YP
+bottommost
+bottom/SMRDG
+botulin/M
+botulinus/M
+botulism/SM
+Boucher/M
+boudoir/MS
+bouffant/S
+bougainvillea/SM
+bough/MD
+boughs
+bought/N
+bouillabaisse/MS
+bouillon/MS
+boulder/GMDS
+Boulder/M
+boulevard/MS
+bouncer/M
+bounce/SRDGZ
+bouncily
+bouncing/Y
+bouncy/TRP
+boundary/MS
+bound/AUDI
+boundedness/MU
+bounded/UP
+bounden
+bounder/AM
+bounders
+bounding
+boundlessness/SM
+boundless/YP
+bounds/IA
+bounteousness/MS
+bounteous/PY
+bountifulness/SM
+bountiful/PY
+bounty/SDM
+bouquet/SM
+Bourbaki/M
+bourbon/SM
+Bourbon/SM
+bourgeoisie/SM
+bourgeois/M
+Bourke/M
+Bourne/M
+Bournemouth/M
+boutique/MS
+bout/MS
+boutonnière/MS
+Bouvier
+Bovary/M
+bovine/YS
+Bowditch/M
+bowdlerization/MS
+bowdlerize/GRSD
+bowed/U
+bowel/GMDS
+Bowell/M
+Bowen/M
+bower/DMG
+Bowers
+Bowery/M
+Bowes
+bowie
+Bowie/M
+bowing/M
+bowlder's
+bowlegged
+bowleg/SM
+bowler/M
+bowlful/S
+bowl/GZSMDR
+bowline/MS
+bowling/M
+bowman/M
+Bowman/M
+bowmen
+bowser/M
+bowsprit/SM
+bows/R
+bowstring/GSMD
+bow/SZGNDR
+bowwow/DMGS
+boxcar/SM
+box/DRSJZGM
+boxer/M
+boxful/M
+boxing/M
+boxlike
+boxtops
+boxwood/SM
+boxy/TPR
+Boyce/M
+Boycey/M
+Boycie/M
+boycotter/M
+boycott/RDGS
+Boyd/M
+Boyer/M
+boyfriend/MS
+boyhood/SM
+boyishness/MS
+boyish/PY
+Boyle/M
+Boy/MR
+boy/MRS
+boyscout
+boysenberry/SM
+bozo/SM
+bpi
+bps
+BR
+brace/DSRJGM
+braced/U
+bracelet/MS
+bracer/M
+brachia
+brachium/M
+bracken/SM
+bracketed/U
+bracketing/M
+bracket/SGMD
+brackishness/SM
+brackish/P
+bract/SM
+Bradan/M
+bradawl/M
+Bradbury/M
+Bradburys
+bradded
+bradding
+Braddock/M
+Brade/M
+Braden/M
+Bradford/M
+Bradley/M
+Bradly/M
+Brad/MYN
+Bradney/M
+Bradshaw/M
+brad/SM
+Bradstreet/M
+Brady/M
+brae/SM
+braggadocio/SM
+braggart/SM
+bragged
+bragger/MS
+braggest
+bragging
+Bragg/M
+brag/S
+Brahe/M
+Brahma/MS
+Brahmanism/MS
+Brahman/SM
+Brahmaputra/M
+Brahmin's
+Brahms
+braider/M
+braiding/M
+braid/RDSJG
+braille/DSG
+Braille/GDSM
+Brainard/SM
+braincell/S
+brainchild/M
+brainchildren
+brain/GSDM
+braininess/MS
+brainlessness/M
+brainless/YP
+Brain/M
+brainpower/M
+brainstorm/DRMGJS
+brainstorming/M
+brainteaser/S
+brainteasing
+brainwasher/M
+brainwashing/M
+brainwash/JGRSD
+brainwave/S
+brainy/RPT
+braise/SDG
+brake/DSGM
+brakeman/M
+brakemen/M
+bramble/DSGM
+brambling/M
+brambly/RT
+Bram/M
+Brampton/M
+bra/MS
+Brana/M
+branched/U
+branching/M
+branchlike
+Branch/M
+branch/MDSJG
+Branchville/M
+Brandais/M
+Brandea/M
+branded/U
+Brandeis/M
+Brandel/M
+Brande/M
+Brandenburg/M
+Branden/M
+brander/GDM
+Brander/M
+Brandice/M
+Brandie/M
+Brandi/M
+Brandise/M
+brandish/GSD
+Brand/MRN
+Brando/M
+Brandon/M
+brand/SMRDGZ
+Brandt/M
+Brandtr/M
+brandy/GDSM
+Brandy/M
+Brandyn/M
+brandywine
+Braniff/M
+Bran/M
+branned
+branning
+Brannon/M
+bran/SM
+Brantley/M
+Brant/M
+Braque/M
+brashness/MS
+brash/PYSRT
+Brasilia
+brasserie/SM
+brass/GSDM
+brassiere/MS
+brassily
+brassiness/SM
+brassy/RSPT
+Bratislava/M
+brat/SM
+Brattain/M
+bratty/RT
+bratwurst/MS
+Braun/M
+bravadoes
+bravado/M
+brave/DSRGYTP
+braveness/MS
+bravery/MS
+bravest/M
+bravo/SDG
+bravura/SM
+brawler/M
+brawl/MRDSGZ
+brawniness/SM
+brawn/MS
+brawny/TRP
+brayer/M
+Bray/M
+bray/SDRG
+braze/GZDSR
+brazenness/MS
+brazen/PYDSG
+brazer/M
+brazier/SM
+Brazilian/MS
+Brazil/M
+Brazos/M
+Brazzaville/M
+breacher/M
+breach/MDRSGZ
+breadbasket/SM
+breadboard/SMDG
+breadbox/S
+breadcrumb/S
+breadfruit/MS
+breadline/MS
+bread/SMDHG
+breadth/M
+breadths
+breadwinner/MS
+breakables
+breakable/U
+breakage/MS
+breakaway/MS
+breakdown/MS
+breaker/M
+breakfaster/M
+breakfast/RDMGZS
+breakfront/S
+breaking/M
+breakneck
+breakout/MS
+breakpoint/SMDG
+break/SZRBG
+breakthroughs
+breakthrough/SM
+breakup/SM
+breakwater/SM
+bream/SDG
+Breanne/M
+Brear/M
+breastbone/MS
+breastfed
+breastfeed/G
+breasting/M
+breast/MDSG
+breastplate/SM
+breaststroke/SM
+breastwork/MS
+breathable/U
+breathalyser/S
+Breathalyzer/SM
+breathe
+breather/M
+breathing/M
+breathlessness/SM
+breathless/PY
+breaths
+breathtaking/Y
+breathy/TR
+breath/ZBJMDRSG
+Brecht/M
+Breckenridge/M
+bred/DG
+bredes
+breeching/M
+breech/MDSG
+breeder/I
+breeder's
+breeding/IM
+breeds/I
+breed/SZJRG
+Bree/M
+Breena/M
+breeze/GMSD
+breezeway/SM
+breezily
+breeziness/SM
+breezy/RPT
+Bremen/M
+bremsstrahlung/M
+Brena/M
+Brenda/M
+Brendan/M
+Brenden/M
+Brendin/M
+Brendis/M
+Brendon/M
+Bren/M
+Brenna/M
+Brennan/M
+Brennen/M
+Brenner/M
+Brenn/RNM
+Brent/M
+Brenton/M
+Bresenham/M
+Brest/M
+brethren
+Bret/M
+Breton
+Brett/M
+breve/SM
+brevet/MS
+brevetted
+brevetting
+breviary/SM
+brevity/MS
+brew/DRGZS
+brewer/M
+Brewer/M
+brewery/MS
+brewing/M
+brewpub/S
+Brew/RM
+Brewster/M
+Brezhnev/M
+Bria/M
+Briana/M
+Brian/M
+Brianna/M
+Brianne/M
+Briano/M
+Briant/M
+briar's
+bribe/GZDSR
+briber/M
+bribery/MS
+Brice/M
+brickbat/SM
+brick/GRDSM
+bricklayer/MS
+bricklaying/SM
+brickmason/S
+brickwork/SM
+brickyard/M
+bridal/S
+Bridalveil/M
+bridegroom/MS
+Bride/M
+bride/MS
+bridesmaid/MS
+Bridewell/M
+bridgeable/U
+bridged/U
+bridgehead/MS
+Bridgeport/M
+Bridger/M
+Bridges
+bridge/SDGM
+Bridget/M
+Bridgetown/M
+Bridgette/M
+Bridgett/M
+Bridgewater/M
+bridgework/MS
+bridging/M
+Bridgman/M
+Bridie/M
+bridled/U
+bridle/SDGM
+bridleway/S
+briefcase/SM
+briefed/C
+briefing/M
+briefness/MS
+briefs/C
+brief/YRDJPGTS
+Brien/M
+Brier/M
+brier/MS
+Brie/RSM
+Brietta/M
+brigade/GDSM
+brigadier/MS
+Brigadoon
+brigandage/MS
+brigand/MS
+brigantine/MS
+Brigg/MS
+Brigham/M
+brightener/M
+brighten/RDZG
+bright/GXTPSYNR
+Bright/M
+brightness/SM
+Brighton/M
+Brigida/M
+Brigid/M
+Brigit/M
+Brigitta/M
+Brigitte/M
+Brig/M
+brig/SM
+brilliance/MS
+brilliancy/MS
+brilliantine/MS
+brilliantness/M
+brilliant/PSY
+Brillo
+Brillouin/M
+brimful
+brimless
+brimmed
+brimming
+brim/SM
+brimstone/MS
+Brina/M
+Brindisi/M
+brindle/DSM
+brine/GMDSR
+briner/M
+Briney/M
+bringer/M
+bring/RGZS
+brininess/MS
+Brinkley/M
+brinkmanship/SM
+brink/MS
+Brinna/M
+Brinn/M
+Briny/M
+briny/PTSR
+brioche/SM
+Brion/M
+briquet's
+briquette/MGSD
+Brisbane/M
+brisket/SM
+briskness/MS
+brisk/YRDPGTS
+bristle/DSGM
+bristly/TR
+Bristol/M
+bristol/S
+Britain/M
+Brita/M
+Britannia/M
+Britannic
+Britannica/M
+britches
+Briticism/MS
+Britisher/M
+Britishly/M
+British/RYZ
+Brit/MS
+Britney/M
+Britni/M
+Briton/MS
+Britta/M
+Brittaney/M
+Brittani/M
+Brittan/M
+Brittany/MS
+Britte/M
+Britten/M
+Britteny/M
+brittleness/MS
+brittle/YTPDRSG
+Britt/MN
+Brittne/M
+Brittney/M
+Brittni/M
+Brnaba/M
+Brnaby/M
+Brno/M
+broach/DRSG
+broacher/M
+broadband
+broadcaster/M
+broadcast/RSGZJ
+broadcasts/A
+broadcloth/M
+broadcloths
+broaden/JGRDZ
+broadleaved
+broadloom/SM
+broadminded/P
+broadness/S
+broadsheet/MS
+broadside/SDGM
+broadsword/MS
+broad/TXSYRNP
+Broadway/SM
+Brobdingnagian
+Brobdingnag/M
+brocade/DSGM
+broccoli/MS
+brochette/SM
+brochure/SM
+Brockie/M
+Brock/M
+Brocky/M
+Broddie/M
+Broddy/M
+Broderick/M
+Broderic/M
+Brodie/M
+Brod/M
+Brody/M
+brogan/MS
+Broglie/M
+brogue/MS
+broiler/M
+broil/RDSGZ
+brokenhearted/Y
+brokenness/MS
+broken/YP
+brokerage/MS
+broker/DMG
+broke/RGZ
+Brok/M
+bromide/MS
+bromidic
+bromine/MS
+bronchial
+bronchi/M
+bronchiolar
+bronchiole/MS
+bronchiolitis
+bronchitic/S
+bronchitis/MS
+broncho's
+bronchus/M
+broncobuster/SM
+bronco/SM
+bronc/S
+Bron/M
+Bronnie/M
+Bronny/M
+Bronson/M
+Bronte
+brontosaur/SM
+brontosaurus/SM
+Bronx/M
+bronzed/M
+bronze/SRDGM
+bronzing/M
+brooch/MS
+brooder/M
+broodiness/M
+brooding/Y
+broodmare/SM
+brood/SMRDGZ
+broody/PTR
+Brookdale/M
+Brooke/M
+Brookfield/M
+Brookhaven/M
+brooklet/MS
+Brooklyn/M
+Brookmont/M
+brook/SGDM
+brookside
+Brook/SM
+broom/SMDG
+broomstick/MS
+Bros
+Brose/M
+bro/SH
+bros/S
+brothel/MS
+brother/DYMG
+brotherhood/SM
+brotherliness/MS
+brotherly/P
+broths
+broth/ZMR
+brougham/MS
+brought
+brouhaha/MS
+browbeat/NSG
+brow/MS
+Brownell/M
+Browne/M
+Brownian/M
+Brownie/MS
+brownie/MTRS
+browning/M
+Browning/M
+brownish
+Brown/MG
+brownness/MS
+brownout/MS
+brownstone/MS
+Brownsville/M
+brown/YRDMSJGTP
+browse
+browser/M
+brows/SRDGZ
+brr
+Br/TMN
+Brubeck/M
+brucellosis/M
+Bruce/M
+Brucie/M
+Bruckner/M
+Bruegel/M
+Brueghel's
+bruin/MS
+bruised/U
+bruise/JGSRDZ
+bruiser/M
+Bruis/M
+bruit/DSG
+Brumidi/M
+Brummel/M
+brunch/MDSG
+Brunei/M
+Brunelleschi/M
+brunet/S
+brunette/SM
+Brunhilda/M
+Brunhilde/M
+Bruno/M
+Brunswick/M
+brunt/GSMD
+brusher/M
+brushfire/MS
+brushlike
+brush/MSRDG
+brushoff/S
+brushwood/SM
+brushwork/MS
+brushy/R
+brusqueness/MS
+brusque/PYTR
+Brussels
+brutality/SM
+brutalization/SM
+brutalized/U
+brutalizes/AU
+brutalize/SDG
+brutal/Y
+brute/DSRGM
+brutishness/SM
+brutish/YP
+Brutus/M
+Bruxelles/M
+Bryana/M
+Bryan/M
+Bryant/M
+Bryanty/M
+Bryce/M
+Bryna/M
+Bryn/M
+Brynna/M
+Brynne/M
+Brynner/M
+Brynn/RM
+Bryon/M
+Brzezinski/M
+B's
+BS
+BSA
+BSD
+Btu
+BTU
+BTW
+bu
+bubblegum/S
+bubbler/M
+bubble/RSDGM
+bubbly/TRS
+Buber/M
+bub/MS
+buboes
+bubo/M
+bubonic
+buccaneer/GMDS
+Buchanan/M
+Bucharest/M
+Buchenwald/M
+Buchwald/M
+buckaroo/SM
+buckboard/SM
+bucker/M
+bucketful/MS
+bucket/SGMD
+buckeye/SM
+buck/GSDRM
+buckhorn/M
+Buckie/M
+Buckingham/M
+buckled/U
+buckler/MDG
+buckle/RSDGMZ
+buckles/U
+Buckley/M
+buckling's
+buckling/U
+Buck/M
+Buckner/M
+buckram/GSDM
+bucksaw/SM
+buckshot/MS
+buckskin/SM
+buckteeth
+bucktooth/DM
+buckwheat/SM
+Bucky/M
+bucolically
+bucolic/S
+Budapest/M
+budded
+Buddha/MS
+Buddhism/SM
+Buddhist/SM
+Buddie/M
+budding/S
+Budd/M
+buddy/GSDM
+Buddy/M
+budge/GDS
+budgerigar/MS
+budgetary
+budgeter/M
+budget/GMRDZS
+budgie/MS
+budging/U
+Bud/M
+bud/MS
+Budweiser/MS
+Buehring/M
+Buena/M
+buffaloes
+Buffalo/M
+buffalo/MDG
+buff/ASGD
+buffered/U
+bufferer/M
+buffer/RDMSGZ
+buffet/GMDJS
+bufflehead/M
+buffoonery/MS
+buffoonish
+buffoon/SM
+buff's
+Buffy/M
+Buford/M
+bugaboo/SM
+Bugatti/M
+bugbear/SM
+bug/CS
+bugeyed
+bugged/C
+buggered
+buggering
+bugger/SCM!
+buggery/M
+bugging/C
+buggy/RSMT
+bugle/GMDSRZ
+bugler/M
+bug's
+Buick/M
+builder/SM
+building/SM
+build/SAG
+buildup/MS
+built/AUI
+Buiron/M
+Bujumbura/M
+Bukhara/M
+Bukharin/M
+Bulawayo/M
+Bulba/M
+bulb/DMGS
+bulblet
+bulbous
+Bulfinch/M
+Bulganin/M
+Bulgaria/M
+Bulgarian/S
+bulge/DSGM
+bulgy/RT
+bulimarexia/S
+bulimia/MS
+bulimic/S
+bulk/GDRMS
+bulkhead/SDM
+bulkiness/SM
+bulky/RPT
+bulldogged
+bulldogger
+bulldogging
+bulldog/SM
+bulldoze/GRSDZ
+bulldozer/M
+bullet/GMDS
+bulletin/SGMD
+bulletproof/SGD
+bullfighter/M
+bullfighting/M
+bullfight/SJGZMR
+bullfinch/MS
+bullfrog/SM
+bullhead/DMS
+bullheadedness/SM
+bullheaded/YP
+bullhide
+bullhorn/SM
+bullied/M
+bullion/SM
+bullishness/SM
+bullish/PY
+bull/MDGS
+Bullock/M
+bullock/MS
+bullpen/MS
+bullring/SM
+bullseye
+bullshit/MS!
+bullshitted/!
+bullshitter/S!
+bullshitting/!
+bullwhackers
+Bullwinkle/M
+bullyboy/MS
+bullying/M
+bully/TRSDGM
+bulrush/SM
+Bultmann/M
+bulwark/GMDS
+bumblebee/MS
+bumble/JGZRSD
+bumbler/M
+bumbling/Y
+Bumbry/M
+bummed/M
+bummer/MS
+bummest
+bumming/M
+bumper/DMG
+bump/GZDRS
+bumpiness/MS
+bumpkin/MS
+Bumppo/M
+bumptiousness/SM
+bumptious/PY
+bumpy/PRT
+bum/SM
+Bunche/M
+bunch/MSDG
+bunchy/RT
+buncombe's
+bunco's
+Bundestag/M
+bundled/U
+bundle/GMRSD
+bundler/M
+Bundy/M
+bungalow/MS
+bungee/SM
+bung/GDMS
+bunghole/MS
+bungle/GZRSD
+bungler/M
+bungling/Y
+Bunin/M
+bunion/SM
+bunk/CSGDR
+Bunker/M
+bunker's/C
+bunker/SDMG
+bunkhouse/SM
+bunkmate/MS
+bunko's
+bunk's
+bunkum/SM
+Bunnie/M
+Bunni/M
+Bunny/M
+bunny/SM
+Bunsen/SM
+bun/SM
+bunt/GJZDRS
+bunting/M
+Buñuel/M
+Bunyan/M
+buoyancy/MS
+buoyant/Y
+buoy/SMDG
+Burbank/M
+burbler/M
+burble/RSDG
+burbs
+Burch/M
+burden's
+burdensomeness/M
+burdensome/PY
+burden/UGDS
+burdock/SM
+bureaucracy/MS
+bureaucratically
+bureaucratic/U
+bureaucratization/MS
+bureaucratize/SDG
+bureaucrat/MS
+bureau/MS
+burgeon/GDS
+burger/M
+Burger/M
+Burgess/M
+burgess/MS
+burgher/M
+burgh/MRZ
+burghs
+burglarize/GDS
+burglarproof/DGS
+burglar/SM
+burglary/MS
+burgle/SDG
+burgomaster/SM
+Burgoyne/M
+Burg/RM
+burg/SZRM
+Burgundian/S
+Burgundy/MS
+burgundy/S
+burial/ASM
+buried/U
+burier/M
+Burke/M
+Burk/SM
+burlap/MS
+burler/M
+burlesquer/M
+burlesque/SRDMYG
+burley/M
+Burlie/M
+burliness/SM
+Burlingame/M
+Burlington/M
+Burl/M
+burl/SMDRG
+burly/PRT
+Burma/M
+Burmese
+bur/MYS
+burnable/S
+Burnaby/M
+Burnard/M
+burned/U
+Burne/MS
+burner/M
+Burnett/M
+burn/GZSDRBJ
+burning/Y
+burnisher/M
+burnish/GDRSZ
+burnoose/MS
+burnout/MS
+Burns
+Burnside/MS
+burnt/YP
+burp/SGMD
+burr/GSDRM
+Burris/M
+burrito/S
+Burr/M
+burro/SM
+Burroughs/M
+burrower/M
+burrow/GRDMZS
+bursae
+bursa/M
+Bursa/M
+bursar/MS
+bursary/MS
+bursitis/MS
+burster/M
+burst/SRG
+Burtie/M
+Burt/M
+Burton/M
+Burty/M
+Burundian/S
+Burundi/M
+bury/ASDG
+busboy/MS
+busby/SM
+Busch/M
+buses/A
+busgirl/S
+bus/GMDSJ
+bushel/MDJSG
+Bushido/M
+bushiness/MS
+bushing/M
+bush/JMDSRG
+bushland
+Bush/M
+bushman/M
+bushmaster/SM
+bushmen
+Bushnell/M
+bushwhacker/M
+bushwhacking/M
+bushwhack/RDGSZ
+bushy/PTR
+busily
+businesslike
+businessman/M
+businessmen
+business/MS
+businesspeople
+businessperson/S
+businesswoman/M
+businesswomen
+busker/M
+busk/GRM
+buskin/SM
+bus's/A
+buss/D
+bustard/MS
+buster/M
+bustle/GSD
+bustling/Y
+bust/MSDRGZ
+busty/RT
+busybody/MS
+busy/DSRPTG
+busyness/MS
+busywork/SM
+but/ACS
+butane/MS
+butcherer/M
+butcher/MDRYG
+butchery/MS
+Butch/M
+butch/RSZ
+butene/M
+Butler/M
+butler/SDMG
+butted/A
+butte/MS
+butterball/MS
+buttercup/SM
+buttered/U
+butterfat/MS
+Butterfield/M
+butterfingered
+butterfingers/M
+butterfly/MGSD
+buttermilk/MS
+butternut/MS
+butter/RDMGZ
+butterscotch/SM
+buttery/TRS
+butting/M
+buttock/SGMD
+buttoner/M
+buttonhole/GMRSD
+buttonholer/M
+button's
+button/SUDG
+buttonweed
+buttonwood/SM
+buttress/MSDG
+butt/SGZMDR
+butyl/M
+butyrate/M
+buxomness/M
+buxom/TPYR
+Buxtehude/M
+buyback/S
+buyer/M
+buyout/S
+buy/ZGRS
+buzzard/MS
+buzz/DSRMGZ
+buzzer/M
+buzzword/SM
+buzzy
+bx
+bxs
+byelaw's
+Byelorussia's
+bye/MZS
+Byers/M
+bygone/S
+bylaw/SM
+byliner/M
+byline/RSDGM
+BYOB
+bypass/GSDM
+bypath/M
+bypaths
+byplay/S
+byproduct/SM
+Byram/M
+Byran/M
+Byrann/M
+Byrd/M
+byre/SM
+Byrle/M
+Byrne/M
+byroad/MS
+Byrom/M
+Byronic
+Byronism/M
+Byron/M
+bystander/SM
+byte/SM
+byway/SM
+byword/SM
+byzantine
+Byzantine/S
+Byzantium/M
+by/ZR
+C
+ca
+CA
+cabala/MS
+caballed
+caballero/SM
+caballing
+cabal/SM
+cabana/MS
+cabaret/SM
+cabbage/MGSD
+cabbed
+cabbing
+cabby's
+cabdriver/SM
+caber/M
+Cabernet/M
+cabinetmaker/SM
+cabinetmaking/MS
+cabinet/MS
+cabinetry/SM
+cabinetwork/MS
+cabin/GDMS
+cablecast/SG
+cable/GMDS
+cablegram/SM
+cabochon/MS
+caboodle/SM
+caboose/MS
+Cabot/M
+Cabrera/M
+Cabrini/M
+cabriolet/MS
+cab/SMR
+cabstand/MS
+cacao/SM
+cacciatore
+cache/DSRGM
+cachepot/MS
+cachet/MDGS
+Cacilia/M
+Cacilie/M
+cackler/M
+cackle/RSDGZ
+cackly
+CACM
+cacophonist
+cacophonous
+cacophony/SM
+cacti
+cactus/M
+CAD
+cadaverous/Y
+cadaver/SM
+caddishness/SM
+caddish/PY
+Caddric/M
+caddy/GSDM
+cadence/CSM
+cadenced
+cadencing
+cadent/C
+cadenza/MS
+cadet/SM
+Cadette/S
+cadge/DSRGZ
+cadger/M
+Cadillac/MS
+Cadiz/M
+Cad/M
+cadmium/MS
+cadre/SM
+cad/SM
+caducei
+caduceus/M
+Caedmon/M
+Caesar/MS
+caesura/SM
+café/MS
+cafeteria/SM
+caffeine/SM
+caftan/SM
+caged/U
+Cage/M
+cage/MZGDRS
+cager/M
+cagey/P
+cagier
+cagiest
+cagily
+caginess/MS
+Cagney/M
+Cahokia/M
+cahoot/MS
+Cahra/M
+CAI
+Caiaphas/M
+caiman's
+Caine/M
+Cain/MS
+Cairistiona/M
+cairn/SDM
+Cairo/M
+caisson/SM
+caitiff/MS
+Caitlin/M
+Caitrin/M
+cajole/LGZRSD
+cajolement/MS
+cajoler/M
+cajolery/SM
+Cajun/MS
+cake/MGDS
+cakewalk/SMDG
+calabash/SM
+calaboose/MS
+Calais/M
+calamari/S
+calamine/GSDM
+calamitousness/M
+calamitous/YP
+calamity/MS
+cal/C
+calcareousness/M
+calcareous/PY
+calciferous
+calcification/M
+calcify/XGNSD
+calcimine/GMSD
+calcine/SDG
+calcite/SM
+calcium/SM
+Calcomp/M
+CalComp/M
+CALCOMP/M
+calculability/IM
+calculable/IP
+calculate/AXNGDS
+calculated/PY
+calculatingly
+calculating/U
+calculation/AM
+calculative
+calculator/SM
+calculi
+calculus/M
+Calcutta/M
+caldera/SM
+Calder/M
+Calderon/M
+caldron's
+Caldwell/M
+Caleb/M
+Caledonia/M
+Cale/M
+calendar/MDGS
+calender/MDGS
+calf/M
+calfskin/SM
+Calgary/M
+Calhoun/M
+Caliban/M
+caliber/SM
+calibrated/U
+calibrater's
+calibrate/XNGSD
+calibrating/A
+calibration/M
+calibrator/MS
+calicoes
+calico/M
+Calida/M
+Calif/M
+California/M
+Californian/MS
+californium/SM
+calif's
+Caligula/M
+Cali/M
+caliper/SDMG
+caliphate/SM
+caliph/M
+caliphs
+calisthenic/S
+calisthenics/M
+Callaghan/M
+call/AGRDBS
+Callahan/M
+calla/MS
+Calla/MS
+Callao/M
+callback/S
+Callean/M
+called/U
+callee/M
+caller/MS
+Calley/M
+Callida/M
+Callie/M
+calligrapher/M
+calligraphic
+calligraphist/MS
+calligraph/RZ
+calligraphy/MS
+Calli/M
+calling/SM
+Calliope/M
+calliope/SM
+callisthenics's
+Callisto/M
+callosity/MS
+callousness/SM
+callous/PGSDY
+callowness/MS
+callow/RTSP
+callus/SDMG
+Cally/M
+calming/Y
+calmness/MS
+calm/PGTYDRS
+Cal/MY
+Caloocan/M
+caloric/S
+calorie/SM
+calorific
+calorimeter/MS
+calorimetric
+calorimetry/M
+Caltech/M
+Calumet/M
+calumet/MS
+calumniate/NGSDX
+calumniation/M
+calumniator/SM
+calumnious
+calumny/MS
+calvary/M
+Calvary/M
+calve/GDS
+Calvert/M
+calves/M
+Calvinism/MS
+Calvinistic
+Calvinist/MS
+Calvin/M
+Calv/M
+calyces's
+Calypso/M
+calypso/SM
+calyx/MS
+Ca/M
+CAM
+Camacho/M
+Camala/M
+camaraderie/SM
+camber/DMSG
+cambial
+cambium/SM
+Cambodia/M
+Cambodian/S
+Cambrian/S
+cambric/MS
+Cambridge/M
+camcorder/S
+Camden/M
+camelhair's
+Camella/M
+Camellia/M
+camellia/MS
+Camel/M
+Camelopardalis/M
+Camelot/M
+camel/SM
+Camembert/MS
+cameo/GSDM
+camerae
+cameraman/M
+cameramen
+camera/MS
+camerawoman
+camerawomen
+Cameron/M
+Cameroonian/S
+Cameroon/SM
+came/N
+Camey/M
+Camila/M
+Camile/M
+Camilla/M
+Camille/M
+Cami/M
+Camino/M
+camion/M
+camisole/MS
+Cam/M
+cammed
+Cammie/M
+Cammi/M
+cam/MS
+Cammy/M
+Camoens/M
+camomile's
+camouflage/DRSGZM
+camouflager/M
+campaigner/M
+campaign/ZMRDSG
+campanile/SM
+campanological
+campanologist/SM
+campanology/MS
+Campbell/M
+Campbellsport/M
+camper/SM
+campesinos
+campest
+campfire/SM
+campground/MS
+camphor/MS
+Campinas/M
+camping/S
+Campos
+camp's
+camp/SCGD
+campsite/MS
+campus/GSDM
+campy/RT
+Camry/M
+camshaft/SM
+Camus/M
+Canaanite/SM
+Canaan/M
+Canada/M
+Canadianism/SM
+Canadian/S
+Canad/M
+Canaletto/M
+canalization/MS
+canalize/GSD
+canal/SGMD
+canapé/S
+canard/MS
+Canaries
+canary/SM
+canasta/SM
+Canaveral/M
+Canberra/M
+cancan/SM
+cancelate/D
+canceled/U
+canceler/M
+cancellation/MS
+cancel/RDZGS
+cancer/MS
+Cancer/MS
+cancerous/Y
+Cancun/M
+Candace/M
+candelabra/S
+candelabrum/M
+Candice/M
+candidacy/MS
+Candida/M
+candidate/SM
+candidature/S
+Candide/M
+candidly/U
+candidness/SM
+candid/TRYPS
+Candie/M
+Candi/SM
+candle/GMZRSD
+candlelight/SMR
+candlelit
+candlepower/SM
+candler/M
+candlestick/SM
+Candlewick/M
+candlewick/MS
+candor/MS
+Candra/M
+candy/GSDM
+Candy/M
+canebrake/SM
+caner/M
+cane/SM
+canine/S
+caning/M
+Canis/M
+canister/SGMD
+cankerous
+canker/SDMG
+Can/M
+can/MDRSZGJ
+cannabis/MS
+canned
+cannelloni
+canner/SM
+cannery/MS
+Cannes
+cannibalism/MS
+cannibalistic
+cannibalization/SM
+cannibalize/GSD
+cannibal/SM
+cannily/U
+canninesses
+canniness/UM
+canning/M
+cannister/SM
+cannonade/SDGM
+cannonball/SGDM
+Cannon/M
+cannon/SDMG
+cannot
+canny/RPUT
+canoe/DSGM
+canoeist/SM
+Canoga/M
+canonic
+canonicalization
+canonicalize/GSD
+canonical/SY
+canonist/M
+canonization/MS
+canonized/U
+canonize/SDG
+canon/SM
+Canopus/M
+canopy/GSDM
+canst
+can't
+cantabile/S
+Cantabrigian
+cantaloupe/MS
+cantankerousness/SM
+cantankerous/PY
+cantata/SM
+cant/CZGSRD
+canted/IA
+canteen/MS
+Canterbury/M
+canter/CM
+cantered
+cantering
+canticle/SM
+cantilever/SDMG
+canto/MS
+cantonal
+Cantonese/M
+Canton/M
+cantonment/SM
+canton/MGSLD
+Cantor/M
+cantor/MS
+Cantrell/M
+cant's
+cants/A
+Cantu/M
+Canute/M
+canvasback/MS
+canvas/RSDMG
+canvasser/M
+canvass/RSDZG
+canyon/MS
+CAP
+capability/ISM
+capableness/IM
+capable/PI
+capabler
+capablest
+capably/I
+capaciousness/MS
+capacious/PY
+capacitance/SM
+capacitate/V
+capacitive/Y
+capacitor/MS
+capacity/IMS
+caparison/SDMG
+Capek/M
+Capella/M
+caper/GDM
+capeskin/SM
+cape/SM
+Capet/M
+Capetown/M
+Caph/M
+capillarity/MS
+capillary/S
+Capistrano/M
+capitalism/SM
+capitalistic
+capitalistically
+capitalist/SM
+capitalization/SMA
+capitalized/AU
+capitalizer/M
+capitalize/RSDGZ
+capitalizes/A
+capital/SMY
+capita/M
+Capitan/M
+capitation/CSM
+Capitoline/M
+Capitol/MS
+capitol/SM
+capitulate/AXNGSD
+capitulation/MA
+caplet/S
+cap/MDRSZB
+Capone/M
+capon/SM
+capo/SM
+Capote/M
+capped/UA
+capping/M
+cappuccino/MS
+Cappy/M
+Capra/M
+Caprice/M
+caprice/MS
+capriciousness/MS
+capricious/PY
+Capricorn/MS
+Capri/M
+caps/AU
+capsicum/MS
+capsize/SDG
+capstan/MS
+capstone/MS
+capsular
+capsule/MGSD
+capsulize/GSD
+captaincy/MS
+captain/SGDM
+caption/GSDRM
+captiousness/SM
+captious/PY
+captivate/XGNSD
+captivation/M
+captivator/SM
+captive/MS
+captivity/SM
+Capt/M
+captor/SM
+capture/AGSD
+capturer/MS
+capt/V
+Capulet/M
+Caputo/M
+Caracalla/M
+Caracas/M
+caracul's
+carafe/SM
+Caralie/M
+Cara/M
+caramelize/SDG
+caramel/MS
+carapace/SM
+carapaxes
+carat/SM
+Caravaggio/M
+caravan/DRMGS
+caravaner/M
+caravansary/MS
+caravanserai's
+caravel/MS
+caraway/MS
+carbide/MS
+carbine/MS
+carbohydrate/MS
+carbolic
+Carboloy/M
+carbonaceous
+carbonate/SDXMNG
+carbonation/M
+Carbondale/M
+Carbone/MS
+carbonic
+carboniferous
+Carboniferous
+carbonization/SAM
+carbonizer/AS
+carbonizer's
+carbonizes/A
+carbonize/ZGRSD
+carbon/MS
+carbonyl/M
+carborundum
+Carborundum/MS
+carboy/MS
+carbuncle/SDM
+carbuncular
+carburetor/MS
+carburetter/S
+carburettor/SM
+carcase/MS
+carcass/SM
+Carce/M
+carcinogenic
+carcinogenicity/MS
+carcinogen/SM
+carcinoma/SM
+cardamom/MS
+cardboard/MS
+card/EDRSG
+Cardenas/M
+carder/MS
+carder's/E
+cardholders
+cardiac/S
+Cardiff/M
+cardigan/SM
+cardinality/SM
+cardinal/SYM
+carding/M
+Cardin/M
+Cardiod/M
+cardiogram/MS
+cardiograph/M
+cardiographs
+cardioid/M
+cardiologist/SM
+cardiology/MS
+cardiomegaly/M
+cardiopulmonary
+cardiovascular
+card's
+cardsharp/ZSMR
+CARE
+cared/U
+careen/DSG
+careerism/M
+careerist/MS
+career/SGRDM
+carefree
+carefuller
+carefullest
+carefulness/MS
+careful/PY
+caregiver/S
+carelessness/MS
+careless/YP
+Care/M
+Carena/M
+Caren/M
+carer/M
+care/S
+Caresa/M
+Caressa/M
+Caresse/M
+caresser/M
+caressing/Y
+caressive/Y
+caress/SRDMVG
+caretaker/SM
+caret/SM
+careworn
+Carey/M
+carfare/MS
+cargoes
+cargo/M
+carhopped
+carhopping
+carhop/SM
+Caria/M
+Caribbean/S
+Carib/M
+caribou/MS
+caricature/GMSD
+caricaturisation
+caricaturist/MS
+caricaturization
+Carie/M
+caries/M
+carillonned
+carillonning
+carillon/SM
+Caril/M
+Carilyn/M
+Cari/M
+Carina/M
+Carine/M
+caring/U
+Carin/M
+Cariotta/M
+carious
+Carissa/M
+Carita/M
+Caritta/M
+carjack/GSJDRZ
+Carla/M
+Carlee/M
+Carleen/M
+Carlene/M
+Carlen/M
+Carletonian/M
+Carleton/M
+Carley/M
+Carlie/M
+Carlina/M
+Carline/M
+Carling/M
+Carlin/M
+Carlita/M
+Carl/MNG
+carload/MSG
+Carlo/SM
+Carlota/M
+Carlotta/M
+Carlsbad/M
+Carlson/M
+Carlton/M
+Carlye/M
+Carlyle/M
+Carly/M
+Carlyn/M
+Carlynne/M
+Carlynn/M
+Carma/M
+Carmela/M
+Carmelia/M
+Carmelina/M
+Carmelita/M
+Carmella/M
+Carmelle/M
+Carmel/M
+Carmelo/M
+Carmencita/M
+Carmen/M
+Carmichael/M
+Carmina/M
+Carmine/M
+carmine/MS
+Carmita/M
+Car/MNY
+Carmon/M
+carnage/MS
+carnality/SM
+carnal/Y
+Carnap/M
+carnation/IMS
+Carnegie/M
+carnelian/SM
+Carney/M
+carney's
+carnival/MS
+carnivore/SM
+carnivorousness/MS
+carnivorous/YP
+Carnot/M
+Carny/M
+carny/SDG
+carob/SM
+Carola/M
+Carolan/M
+Carolann/M
+Carolee/M
+Carole/M
+caroler/M
+Carolina/MS
+Caroline/M
+Carolingian
+Carolinian/S
+Carolin/M
+Caroljean/M
+Carol/M
+carol/SGZMRD
+Carolus/M
+Carolyne/M
+Carolyn/M
+Carolynn/M
+Caro/M
+carom/GSMD
+Caron/M
+carotene/MS
+carotid/MS
+carousal/MS
+carousel/MS
+carouser/M
+carouse/SRDZG
+carpal/SM
+Carpathian/MS
+carpel/SM
+carpenter/DSMG
+carpentering/M
+Carpenter/M
+carpentry/MS
+carper/M
+carpetbagged
+carpetbagger/MS
+carpetbagging
+carpetbag/MS
+carpeting/M
+carpet/MDJGS
+carpi/M
+carping/Y
+carp/MDRSGZ
+carpool/DGS
+carport/MS
+carpus/M
+carrageen/M
+Carree/M
+carrel/SM
+carriage/SM
+carriageway/SM
+Carrie/M
+carrier/M
+Carrier/M
+Carrillo/M
+Carri/M
+carrion/SM
+Carrissa/M
+Carr/M
+Carroll/M
+Carrol/M
+carrot/MS
+carroty/RT
+carrousel's
+carryall/MS
+Carry/MR
+carryout/S
+carryover/S
+carry/RSDZG
+carsickness/SM
+carsick/P
+Carson/M
+cartage/MS
+cartel/SM
+carte/M
+carter/M
+Carter/M
+Cartesian
+Carthage/M
+Carthaginian/S
+carthorse/MS
+Cartier/M
+cartilage/MS
+cartilaginous
+cartload/MS
+cart/MDRGSZ
+Cart/MR
+cartographer/MS
+cartographic
+cartography/MS
+carton/GSDM
+cartoon/GSDM
+cartoonist/MS
+cartridge/SM
+cartwheel/MRDGS
+Cartwright/M
+Carty/RM
+Caruso/M
+carve/DSRJGZ
+carven
+carver/M
+Carver/M
+carving/M
+caryatid/MS
+Caryl/M
+Cary/M
+Caryn/M
+car/ZGSMDR
+casaba/SM
+Casablanca/M
+Casals/M
+Casandra/M
+Casanova/SM
+Casar/M
+casbah/M
+cascade/MSDG
+Cascades/M
+cascara/MS
+casebook/SM
+case/DSJMGL
+cased/U
+caseharden/SGD
+casein/SM
+caseload/MS
+Case/M
+casement/SM
+caseworker/M
+casework/ZMRS
+Casey/M
+cashbook/SM
+cashew/MS
+cash/GZMDSR
+cashier/SDMG
+cashless
+Cash/M
+cashmere/MS
+Casie/M
+Casi/M
+casing/M
+casino/MS
+casket/SGMD
+cask/GSDM
+Caspar/M
+Casper/M
+Caspian
+Cass
+Cassandra/SM
+Cassandre/M
+Cassandry/M
+Cassatt/M
+Cassaundra/M
+cassava/MS
+casserole/MGSD
+cassette/SM
+Cassey/M
+cassia/MS
+Cassie/M
+Cassi/M
+cassino's
+Cassiopeia/M
+Cassite/M
+Cassius/M
+cassock/SDM
+Cassondra/M
+cassowary/SM
+Cassy/M
+Castaneda/M
+castanet/SM
+castaway/SM
+castellated
+caste/MHS
+caster/M
+cast/GZSJMDR
+castigate/XGNSD
+castigation/M
+castigator/SM
+Castile's
+Castillo/M
+casting/M
+castle/GMSD
+castoff/S
+Castor/M
+castor's
+castrate/DSNGX
+castration/M
+Castries/M
+Castro/M
+casts/A
+casualness/SM
+casual/SYP
+casualty/SM
+casuistic
+casuist/MS
+casuistry/SM
+cataclysmal
+cataclysmic
+cataclysm/MS
+catacomb/MS
+catafalque/SM
+Catalan/MS
+catalepsy/MS
+cataleptic/S
+Catalina/M
+cataloger/M
+catalog/SDRMZG
+Catalonia/M
+catalpa/SM
+catalysis/M
+catalyst/SM
+catalytic
+catalytically
+catalyze/DSG
+catamaran/MS
+catapult/MGSD
+cataract/MS
+Catarina/M
+catarrh/M
+catarrhs
+catastrophe/SM
+catastrophic
+catastrophically
+catatonia/MS
+catatonic/S
+Catawba/M
+catbird/MS
+catboat/SM
+catcall/SMDG
+catchable/U
+catchall/MS
+catch/BRSJLGZ
+catcher/M
+catchment/SM
+catchpenny/S
+catchphrase/S
+catchup/MS
+catchword/MS
+catchy/TR
+catechism/MS
+catechist/SM
+catechize/SDG
+catecholamine/MS
+categoric
+categorical/Y
+categorization/MS
+categorized/AU
+categorize/RSDGZ
+category/MS
+Cate/M
+catenate/NF
+catenation/MF
+catercorner
+caterer/M
+cater/GRDZ
+Caterina/M
+catering/M
+Caterpillar
+caterpillar/SM
+caterwaul/DSG
+catfish/MS
+catgut/SM
+Catha/M
+Catharina/M
+Catharine/M
+catharses
+catharsis/M
+cathartic/S
+Cathay/M
+cathedral/SM
+Cathee/M
+Catherina/M
+Catherine/M
+Catherin/M
+Cather/M
+Cathe/RM
+catheterize/GSD
+catheter/SM
+Cathie/M
+Cathi/M
+Cathleen/M
+Cathlene/M
+cathode/MS
+cathodic
+catholicism
+Catholicism/SM
+catholicity/MS
+catholic/MS
+Catholic/S
+Cathrine/M
+Cathrin/M
+Cathryn/M
+Cathyleen/M
+Cathy/M
+Catie/M
+Catiline/M
+Cati/M
+Catina/M
+cationic
+cation/MS
+catkin/SM
+Catlaina/M
+Catlee/M
+catlike
+Catlin/M
+catnapped
+catnapping
+catnap/SM
+catnip/MS
+Cato/M
+Catrina/M
+Catriona/M
+Catskill/SM
+cat/SMRZ
+catsup's
+cattail/SM
+catted
+cattery/M
+cattily
+cattiness/SM
+catting
+cattle/M
+cattleman/M
+cattlemen
+Catt/M
+catty/PRST
+Catullus/M
+CATV
+catwalk/MS
+Caty/M
+Caucasian/S
+Caucasoid/S
+Caucasus/M
+Cauchy/M
+caucus/SDMG
+caudal/Y
+caught/U
+cauldron/MS
+cauliflower/MS
+caulker/M
+caulk/JSGZRD
+causality/SM
+causal/YS
+causate/XVN
+causation/M
+causative/SY
+cause/DSRGMZ
+caused/U
+causeless
+causerie/MS
+causer/M
+causeway/SGDM
+caustically
+causticity/MS
+caustic/YS
+cauterization/SM
+cauterized/U
+cauterize/GSD
+cautionary
+cautioner/M
+caution/GJDRMSZ
+cautiousness's/I
+cautiousness/SM
+cautious/PIY
+cavalcade/MS
+cavalierness/M
+cavalier/SGYDP
+cavalryman/M
+cavalrymen
+cavalry/MS
+caveat/SM
+caveatted
+caveatting
+cave/GFRSD
+caveman/M
+cavemen
+Cavendish/M
+caver/M
+cavern/GSDM
+cavernous/Y
+cave's
+caviar/MS
+caviler/M
+cavil/SJRDGZ
+caving/MS
+cavity/MFS
+cavort/SDG
+Cavour/M
+caw/SMDG
+Caxton/M
+Caye/M
+Cayenne/M
+cayenne/SM
+Cayla/M
+Cayman/M
+cayman/SM
+cay's
+cay/SC
+Cayuga/M
+cayuse/SM
+Caz/M
+Cazzie/M
+c/B
+CB
+CBC
+Cb/M
+CBS
+cc
+Cchaddie/M
+CCTV
+CCU
+CD
+CDC/M
+Cd/M
+CDT
+Ce
+cease/DSCG
+ceasefire/S
+ceaselessness/SM
+ceaseless/YP
+ceasing/U
+Ceausescu/M
+Cebuano/M
+Cebu/M
+ceca
+cecal
+Cecelia/M
+Cece/M
+Cecile/M
+Ceciley/M
+Cecilia/M
+Cecilio/M
+Cecilius/M
+Cecilla/M
+Cecil/M
+Cecily/M
+cecum/M
+cedar/SM
+ceded/A
+cede/FRSDG
+ceder's/F
+ceder/SM
+cedes/A
+cedilla/SM
+ceding/A
+Ced/M
+Cedric/M
+ceilidh/M
+ceiling/MDS
+Ceil/M
+celandine/MS
+Celanese/M
+Celebes's
+celebrant/MS
+celebratedness/M
+celebrated/P
+celebrate/XSDGN
+celebration/M
+celebrator/MS
+celebratory
+celebrity/MS
+Cele/M
+Celene/M
+celerity/SM
+celery/SM
+Celesta/M
+celesta/SM
+Celeste/M
+celestial/YS
+Celestia/M
+Celestina/M
+Celestine/M
+Celestyna/M
+Celestyn/M
+Celia/M
+celibacy/MS
+celibate/SM
+Celie/M
+Celina/M
+Celinda/M
+Celine/M
+Celinka/M
+Celisse/M
+Celka/M
+cellarer/M
+cellar/RDMGS
+Celle/M
+cell/GMDS
+Cellini/M
+cellist/SM
+Cello/M
+cello/MS
+cellophane/SM
+cellphone/S
+cellular/SY
+cellulite/S
+celluloid/SM
+cellulose/SM
+Celsius/S
+Celtic/SM
+Celt/MS
+cementa
+cementer/M
+cementum/SM
+cement/ZGMRDS
+cemetery/MS
+cenobite/MS
+cenobitic
+cenotaph/M
+cenotaphs
+Cenozoic
+censer/MS
+censored/U
+censor/GDMS
+censorial
+censoriousness/MS
+censorious/YP
+censorship/MS
+censure/BRSDZMG
+censurer/M
+census/SDMG
+centaur/SM
+Centaurus/M
+centavo/SM
+centenarian/MS
+centenary/S
+centennial/YS
+center/AC
+centerboard/SM
+centered
+centerer/S
+centerfold/S
+centering/SM
+centerline/SM
+centerpiece/SM
+center's
+Centigrade
+centigrade/S
+centigram/SM
+centiliter/MS
+centime/SM
+centimeter/SM
+centipede/MS
+Centralia/M
+centralism/M
+centralist/M
+centrality/MS
+centralization/CAMS
+centralize/CGSD
+centralizer/SM
+centralizes/A
+central/STRY
+centrefold's
+Centrex
+CENTREX/M
+centric/F
+centrifugal/SY
+centrifugate/NM
+centrifugation/M
+centrifuge/GMSD
+centripetal/Y
+centrist/MS
+centroid/MS
+cent/SZMR
+centurion/MS
+century/MS
+CEO
+cephalic/S
+Cepheid
+Cepheus/M
+ceramicist/S
+ceramic/MS
+ceramist/MS
+cerate/MD
+Cerberus/M
+cereal/MS
+cerebellar
+cerebellum/MS
+cerebra
+cerebral/SY
+cerebrate/XSDGN
+cerebration/M
+cerebrum/MS
+cerement/SM
+ceremonial/YSP
+ceremoniousness/MS
+ceremoniousness's/U
+ceremonious/YUP
+ceremony/MS
+Cerenkov/M
+Ceres/M
+Cerf/M
+cerise/SM
+cerium/MS
+cermet/SM
+CERN/M
+certainer
+certainest
+certainty/UMS
+certain/UY
+cert/FS
+certifiable
+certifiably
+certificate/SDGM
+certification/AMC
+certified/U
+certifier/M
+certify/DRSZGNX
+certiorari/M
+certitude/ISM
+cerulean/MS
+Cervantes/M
+cervical
+cervices/M
+cervix/M
+Cesarean
+cesarean/S
+Cesare/M
+Cesar/M
+Cesaro/M
+cesium/MS
+cessation/SM
+cession/FAMSK
+Cessna/M
+cesspit/M
+cesspool/SM
+Cesya/M
+cetacean/S
+cetera/S
+Cetus/M
+Ceylonese
+Ceylon/M
+Cezanne/S
+cf
+CF
+CFC
+Cf/M
+CFO
+cg
+Chablis/SM
+Chaddie/M
+Chadd/M
+Chaddy/M
+Chadian/S
+Chad/M
+Chadwick/M
+chafe/GDSR
+chafer/M
+chaffer/DRG
+chafferer/M
+Chaffey/M
+chaff/GRDMS
+chaffinch/SM
+Chagall/M
+chagrin/DGMS
+Chaim/M
+chainlike
+chain's
+chainsaw/SGD
+chain/SGUD
+chairlady/M
+chairlift/MS
+chairman/MDGS
+chairmanship/MS
+chairmen
+chairperson/MS
+chair/SGDM
+chairwoman/M
+chairwomen
+chaise/SM
+chalcedony/MS
+Chaldea/M
+Chaldean/M
+chalet/SM
+chalice/DSM
+chalkboard/SM
+chalk/DSMG
+chalkiness/S
+chalkline
+chalky/RPT
+challenged/U
+challenger/M
+challenge/ZGSRD
+challenging/Y
+challis/SM
+Chalmers
+chamberer/M
+Chamberlain/M
+chamberlain/MS
+chambermaid/MS
+chamberpot/S
+Chambers/M
+chamber/SZGDRM
+chambray/MS
+chameleon/SM
+chamfer/DMGS
+chammy's
+chamois/DSMG
+chamomile/MS
+champagne/MS
+champaign/M
+champ/DGSZ
+champion/MDGS
+championship/MS
+Champlain/M
+chanced/M
+chance/GMRSD
+chancellery/SM
+chancellorship/SM
+chancellor/SM
+Chancellorsville/M
+chancel/SM
+Chance/M
+chancery/SM
+Chancey/M
+chanciness/S
+chancing/M
+chancre/SM
+chancy/RPT
+Chandal/M
+Chanda/M
+chandelier/SM
+Chandigarh/M
+Chandler/M
+chandler/MS
+Chandragupta/M
+Chandra/M
+Chandrasekhar/M
+Chandy/M
+Chanel/M
+Chane/M
+Chaney/M
+Changchun/M
+changeabilities
+changeability/UM
+changeableness/SM
+changeable/U
+changeably/U
+changed/U
+change/GZRSD
+changeless
+changeling/M
+changeover/SM
+changer/M
+changing/U
+Chang/M
+Changsha/M
+Chan/M
+Channa/M
+channeler/M
+channeling/M
+channelization/SM
+channelize/GDS
+channellings
+channel/MDRZSG
+Channing/M
+chanson/SM
+Chantalle/M
+Chantal/M
+chanter/M
+chanteuse/MS
+chantey/SM
+chanticleer/SM
+Chantilly/M
+chantry/MS
+chant/SJGZMRD
+chanty's
+Chanukah's
+Chao/M
+chaos/SM
+chaotic
+chaotically
+chaparral/MS
+chapbook/SM
+chapeau/MS
+chapel/MS
+chaperonage/MS
+chaperoned/U
+chaperone's
+chaperon/GMDS
+chaplaincy/MS
+chaplain/MS
+chaplet/SM
+Chaplin/M
+Chapman/M
+chap/MS
+Chappaquiddick/M
+chapped
+chapping
+chapter/SGDM
+Chara
+charabanc/MS
+characterful
+characteristically/U
+characteristic/SM
+characterizable/MS
+characterization/MS
+characterize/DRSBZG
+characterized/U
+characterizer/M
+characterless
+character/MDSG
+charade/SM
+charbroil/SDG
+charcoal/MGSD
+Chardonnay
+chardonnay/S
+chard/SM
+chargeableness/M
+chargeable/P
+charged/U
+charge/EGRSDA
+charger/AME
+chargers
+char/GS
+Charil/M
+charily
+chariness/MS
+Charin/M
+charioteer/GSDM
+Chariot/M
+chariot/SMDG
+Charis
+charisma/M
+charismata
+charismatically
+charismatic/S
+Charissa/M
+Charisse/M
+charitablenesses
+charitableness/UM
+charitable/UP
+charitably/U
+Charita/M
+Charity/M
+charity/MS
+charlady/M
+Charla/M
+charlatanism/MS
+charlatanry/SM
+charlatan/SM
+Charlean/M
+Charleen/M
+Charlemagne/M
+Charlena/M
+Charlene/M
+Charles/M
+Charleston/SM
+Charley/M
+Charlie/M
+Charline/M
+Charlot/M
+Charlotta/M
+Charlotte/M
+Charlottesville/M
+Charlottetown/M
+Charlton/M
+Charmaine/M
+Charmain/M
+Charmane/M
+charmer/M
+Charmian/M
+Charmine/M
+charming/RYT
+Charmin/M
+Charmion/M
+charmless
+charm/SGMZRD
+Charolais
+Charo/M
+Charon/M
+charred
+charring
+charted/U
+charter/AGDS
+chartered/U
+charterer/SM
+charter's
+chartist/SM
+Chartres/M
+chartreuse/MS
+chartroom/S
+chart/SJMRDGBZ
+charwoman/M
+charwomen
+Charybdis/M
+Charyl/M
+chary/PTR
+Chas
+chase/DSRGZ
+Chase/M
+chaser/M
+chasing/M
+Chasity/M
+chasm/SM
+chassis/M
+chastely
+chasteness/SM
+chasten/GSD
+chaste/UTR
+chastisement/SM
+chastiser/M
+chastise/ZGLDRS
+Chastity/M
+chastity/SM
+chastity's/U
+chasuble/SM
+Chateaubriand
+château/M
+chateaus
+châteaux
+châtelaine/SM
+chat/MS
+Chattahoochee/M
+Chattanooga/M
+chatted
+chattel/MS
+chatterbox/MS
+chatterer/M
+Chatterley/M
+chatter/SZGDRY
+Chatterton/M
+chattily
+chattiness/SM
+chatting
+chatty/RTP
+Chaucer/M
+chauffeur/GSMD
+Chaunce/M
+Chauncey/M
+Chautauqua/M
+chauvinism/MS
+chauvinistic
+chauvinistically
+chauvinist/MS
+Chavez/M
+chaw
+Chayefsky/M
+cheapen/DG
+cheapish
+cheapness/MS
+cheapskate/MS
+cheap/YRNTXSP
+cheater/M
+cheat/RDSGZ
+Chechen/M
+Chechnya/M
+checkable/U
+checkbook/MS
+checked/UA
+checkerboard/MS
+checker/DMG
+check/GZBSRDM
+checklist/S
+checkmate/MSDG
+checkoff/SM
+checkout/S
+checkpoint/MS
+checkroom/MS
+check's/A
+checks/A
+checksummed
+checksumming
+checksum/SM
+checkup/MS
+Cheddar/MS
+cheddar/S
+cheekbone/SM
+cheek/DMGS
+cheekily
+cheekiness/SM
+cheeky/PRT
+cheep/GMDS
+cheerer/M
+cheerfuller
+cheerfullest
+cheerfulness/MS
+cheerful/YP
+cheerily
+cheeriness/SM
+cheerio/S
+Cheerios/M
+cheerleader/SM
+cheerlessness/SM
+cheerless/PY
+cheers/S
+cheery/PTR
+cheer/YRDGZS
+cheeseburger/SM
+cheesecake/SM
+cheesecloth/M
+cheesecloths
+cheeseparing/S
+cheese/SDGM
+cheesiness/SM
+cheesy/PRT
+cheetah/M
+cheetahs
+Cheeto/M
+Cheever/M
+cheffed
+cheffing
+chef/SM
+Chekhov/M
+chelate/XDMNG
+chelation/M
+Chelsae/M
+Chelsea/M
+Chelsey/M
+Chelsie/M
+Chelsy/M
+Chelyabinsk/M
+chem
+Che/M
+chemic
+chemical/SYM
+chemiluminescence/M
+chemiluminescent
+chemise/SM
+chemistry/SM
+chemist/SM
+chemotherapeutic/S
+chemotherapy/SM
+chemurgy/SM
+Chengdu
+Cheng/M
+chenille/SM
+Chen/M
+Cheops/M
+Chere/M
+Cherey/M
+Cherianne/M
+Cherice/M
+Cherida/M
+Cherie/M
+Cherilyn/M
+Cherilynn/M
+Cheri/M
+Cherin/M
+Cherise/M
+cherisher/M
+cherish/GDRS
+Cherish/M
+Cheriton/M
+Cherlyn/M
+Cher/M
+Chernenko/M
+Chernobyl/M
+Cherokee/MS
+cheroot/MS
+Cherri/M
+Cherrita/M
+Cherry/M
+cherry/SM
+chert/MS
+cherubic
+cherubim/S
+cherub/SM
+chervil/MS
+Cherye/M
+Cheryl/M
+Chery/M
+Chesapeake/M
+Cheshire/M
+Cheslie/M
+chessboard/SM
+chessman/M
+chessmen
+chess/SM
+Chesterfield/M
+chesterfield/MS
+Chester/M
+Chesterton/M
+chestful/S
+chest/MRDS
+chestnut/SM
+Cheston/M
+chesty/TR
+Chet/M
+Chevalier/M
+chevalier/SM
+Cheviot/M
+cheviot/S
+Chev/M
+Chevrolet/M
+chevron/DMS
+Chevy/M
+chewer/M
+chew/GZSDR
+chewiness/S
+chewy/RTP
+Cheyenne/SM
+chg
+chge
+Chiang/M
+chianti/M
+Chianti/S
+chiaroscuro/SM
+Chiarra/M
+Chiba/M
+Chicagoan/SM
+Chicago/M
+Chicana/MS
+chicane/MGDS
+chicanery/MS
+Chicano/MS
+chichi/RTS
+chickadee/SM
+Chickasaw/SM
+chickenfeed
+chicken/GDM
+chickenhearted
+chickenpox/MS
+Chickie/M
+Chick/M
+chickpea/MS
+chickweed/MS
+chick/XSNM
+Chicky/M
+chicle/MS
+Chic/M
+chicness/S
+Chico/M
+chicory/MS
+chic/SYRPT
+chide/GDS
+chiding/Y
+chiefdom/MS
+chieftain/SM
+chief/YRMST
+chiffonier/MS
+chiffon/MS
+chigger/MS
+chignon/MS
+Chihuahua/MS
+chihuahua/S
+chilblain/MS
+childbearing/MS
+childbirth/M
+childbirths
+childcare/S
+childes
+child/GMYD
+childhood/MS
+childishness/SM
+childish/YP
+childlessness/SM
+childless/P
+childlikeness/M
+childlike/P
+childminders
+childproof/GSD
+childrearing
+children/M
+Chilean/S
+Chile/MS
+chile's
+chilies
+chili/M
+chiller/M
+chilliness/MS
+chilling/Y
+chilli's
+chill/MRDJGTZPS
+chillness/MS
+chilly/TPRS
+Chilton/M
+Chi/M
+chimaera's
+chimaerical
+Chimborazo/M
+chime/DSRGMZ
+Chimera/S
+chimera/SM
+chimeric
+chimerical
+chimer/M
+Chimiques
+chimney/SMD
+chimpanzee/SM
+chimp/MS
+chi/MS
+Chimu/M
+Ch'in
+China/M
+Chinaman/M
+Chinamen
+china/MS
+Chinatown/SM
+chinchilla/SM
+chine/MS
+Chinese/M
+Ching/M
+chink/DMSG
+chinless
+Chin/M
+chinned
+chinner/S
+chinning
+chino/MS
+Chinook/MS
+chin/SGDM
+chinstrap/S
+chintz/SM
+chintzy/TR
+chipboard/M
+Chipewyan/M
+Chip/M
+chipmunk/SM
+chipped
+Chippendale/M
+chipper/DGS
+Chippewa/MS
+chipping/MS
+chip/SM
+Chiquia/M
+Chiquita/M
+chiral
+Chirico/M
+chirography/SM
+chiropodist/SM
+chiropody/MS
+chiropractic/MS
+chiropractor/SM
+chirp/GDS
+chirpy/RT
+chirrup/DGS
+chiseler/M
+chisel/ZGSJMDR
+Chisholm/M
+Chisinau/M
+chitchat/SM
+chitchatted
+chitchatting
+chitinous
+chitin/SM
+chit/SM
+Chittagong/M
+chitterlings
+chivalric
+chivalrously/U
+chivalrousness/MS
+chivalrous/YP
+chivalry/SM
+chive/GMDS
+chivvy/D
+chivying
+chlamydiae
+chlamydia/S
+Chloe/M
+Chloette/M
+Chlo/M
+chloral/MS
+chlorate/M
+chlordane/MS
+chloride/MS
+chlorinated/C
+chlorinates/C
+chlorinate/XDSGN
+chlorination/M
+chlorine/MS
+Chloris
+chlorofluorocarbon/S
+chloroform/DMSG
+chlorophyll/SM
+chloroplast/MS
+chloroquine/M
+chm
+Ch/MGNRS
+chockablock
+chock/SGRDM
+chocoholic/S
+chocolate/MS
+chocolaty
+Choctaw/MS
+choiceness/M
+choice/RSMTYP
+choirboy/MS
+choirmaster/SM
+choir/SDMG
+chokeberry/M
+chokecherry/SM
+choke/DSRGZ
+choker/M
+chokes/M
+choking/Y
+cholera/SM
+choleric
+choler/SM
+cholesterol/SM
+choline/M
+cholinesterase/M
+chomp/DSG
+Chomsky/M
+Chongqing
+choose/GZRS
+chooser/M
+choosiness/S
+choosy/RPT
+chophouse/SM
+Chopin/M
+chopped
+chopper/SDMG
+choppily
+choppiness/MS
+chopping
+choppy/RPT
+chop/S
+chopstick/SM
+chorale/MS
+choral/SY
+chordal
+chordata
+chordate/MS
+chording/M
+chord/SGMD
+chorea/MS
+chore/DSGNM
+choreographer/M
+choreographic
+choreographically
+choreographs
+choreography/MS
+choreograph/ZGDR
+chorines
+chorion/M
+chorister/SM
+choroid/S
+chortler/M
+chortle/ZGDRS
+chorus/GDSM
+chosen/U
+chose/S
+Chou/M
+chowder/SGDM
+chow/DGMS
+Chretien/M
+Chris/M
+chrism/SM
+chrissake
+Chrisse/M
+Chrissie/M
+Chrissy/M
+Christabella/M
+Christabel/M
+Christalle/M
+Christal/M
+Christa/M
+Christan/M
+Christchurch/M
+Christean/M
+Christel/M
+Christendom/MS
+christened/U
+christening/SM
+Christen/M
+christen/SAGD
+Christensen/M
+Christenson/M
+Christiana/M
+Christiane/M
+Christianity/SM
+Christianize/GSD
+Christian/MS
+Christiano/M
+Christiansen/M
+Christians/N
+Christie/SM
+Christi/M
+Christina/M
+Christine/M
+Christin/M
+Christlike
+Christmas/SM
+Christmastide/SM
+Christmastime/S
+Christoffel/M
+Christoffer/M
+Christoforo/M
+Christoper/M
+Christophe/M
+Christopher/M
+Christoph/MR
+Christophorus/M
+Christos/M
+Christ/SMN
+Christye/M
+Christyna/M
+Christy's
+Chrisy/M
+chroma/M
+chromate/M
+chromatically
+chromaticism/M
+chromaticness/M
+chromatic/PS
+chromatics/M
+chromatin/MS
+chromatogram/MS
+chromatograph
+chromatographic
+chromatography/M
+chrome/GMSD
+chromic
+chromite/M
+chromium/SM
+chromosomal
+chromosome/MS
+chromosphere/M
+chronically
+chronicled/U
+chronicler/M
+chronicle/SRDMZG
+chronic/S
+chronograph/M
+chronographs
+chronography
+chronological/Y
+chronologist/MS
+chronology/MS
+chronometer/MS
+chronometric
+Chrotoem/M
+chrysalids
+chrysalis/SM
+Chrysa/M
+chrysanthemum/MS
+Chrysler/M
+Chrysostom/M
+Chrystal/M
+Chrystel/M
+Chryste/M
+chubbiness/SM
+chubby/RTP
+chub/MS
+Chucho/M
+chuck/GSDM
+chuckhole/SM
+chuckle/DSG
+chuckling/Y
+Chuck/M
+chuff/DM
+chugged
+chugging
+chug/MS
+Chukchi/M
+chukka/S
+Chumash/M
+chummed
+chummily
+chumminess/MS
+chumming
+chum/MS
+chummy/SRTP
+chumping/M
+chump/MDGS
+Chungking's
+Chung/M
+chunkiness/MS
+chunk/SGDM
+chunky/RPT
+chuntering
+churchgoer/SM
+churchgoing/SM
+Churchillian
+Churchill/M
+churchliness/M
+churchly/P
+churchman/M
+church/MDSYG
+churchmen
+Church/MS
+churchwarden/SM
+churchwoman/M
+churchwomen
+churchyard/SM
+churlishness/SM
+churlish/YP
+churl/SM
+churner/M
+churning/M
+churn/SGZRDM
+chute/DSGM
+chutney/MS
+chutzpah/M
+chutzpahs
+chutzpa/SM
+Chuvash/M
+ch/VT
+chyme/SM
+Ci
+CIA
+ciao/S
+cicada/MS
+cicatrice/S
+cicatrix's
+Cicely/M
+Cicero/M
+cicerone/MS
+ciceroni
+Ciceronian
+Cicily/M
+CID
+cider's/C
+cider/SM
+Cid/M
+Ciel/M
+cigarette/MS
+cigarillo/MS
+cigar/SM
+cilantro/S
+cilia/M
+ciliate/FDS
+ciliately
+cilium/M
+Cilka/M
+cinch/MSDG
+cinchona/SM
+Cincinnati/M
+cincture/MGSD
+Cinda/M
+Cindee/M
+Cindelyn/M
+cinder/DMGS
+Cinderella/MS
+Cindie/M
+Cindi/M
+Cindra/M
+Cindy/M
+cine/M
+cinema/SM
+cinematic
+cinematographer/MS
+cinematographic
+cinematography/MS
+Cinerama/M
+cinnabar/MS
+Cinnamon/M
+cinnamon/MS
+ciphered/C
+cipher/MSGD
+ciphers/C
+cir
+circa
+circadian
+Circe/M
+circler/M
+circle/RSDGM
+circlet/MS
+circuital
+circuit/GSMD
+circuitousness/MS
+circuitous/YP
+circuitry/SM
+circuity/MS
+circulant
+circularity/SM
+circularize/GSD
+circularness/M
+circular/PSMY
+circulate/ASDNG
+circulation/MA
+circulations
+circulative
+circulatory
+circumcise/DRSXNG
+circumcised/U
+circumciser/M
+circumcision/M
+circumference/SM
+circumferential/Y
+circumflex/MSDG
+circumlocution/MS
+circumlocutory
+circumnavigate/DSNGX
+circumnavigational
+circumnavigation/M
+circumpolar
+circumscribe/GSD
+circumscription/SM
+circumspection/SM
+circumspect/Y
+circumsphere
+circumstance/SDMG
+circumstantial/YS
+circumvention/MS
+circumvent/SBGD
+circus/SM
+Cirillo/M
+Cirilo/M
+Ciro/M
+cirque/SM
+cirrhoses
+cirrhosis/M
+cirrhotic/S
+cirri/M
+cirrus/M
+Cissiee/M
+Cissy/M
+cistern/SM
+citadel/SM
+citations/I
+citation/SMA
+cit/DSG
+cite/ISDAG
+Citibank/M
+citified
+citizenry/SM
+citizenship/MS
+citizen/SYM
+citrate/DM
+citric
+Citroen/M
+citronella/MS
+citron/MS
+citrus/SM
+city/DSM
+cityscape/MS
+citywide
+civet/SM
+civic/S
+civics/M
+civilian/SM
+civility/IMS
+civilizational/MS
+civilization/AMS
+civilizedness/M
+civilized/PU
+civilize/DRSZG
+civilizer/M
+civilizes/AU
+civil/UY
+civvies
+ck/C
+clack/SDG
+cladding/SM
+clads
+clad/U
+Claiborne/M
+Claiborn/M
+claimable
+claimant/MS
+claim/CDRSKAEGZ
+claimed/U
+claimer/KMACE
+Claire/M
+Clair/M
+Clairol/M
+clairvoyance/MS
+clairvoyant/YS
+clambake/MS
+clamberer/M
+clamber/SDRZG
+clammed
+clammily
+clamminess/MS
+clamming
+clam/MS
+clammy/TPR
+clamorer/M
+clamor/GDRMSZ
+clamorousness/UM
+clamorous/PUY
+clampdown/SM
+clamper/M
+clamp/MRDGS
+clamshell/MS
+Clancy/M
+clandestineness/M
+clandestine/YP
+clanger/M
+clangor/MDSG
+clangorous/Y
+clang/SGZRD
+clanking/Y
+clank/SGDM
+clan/MS
+clannishness/SM
+clannish/PY
+clansman/M
+clansmen
+clapboard/SDGM
+Clapeyron/M
+clapped
+clapper/GMDS
+clapping
+clap/S
+Clapton/M
+claptrap/SM
+claque/MS
+Clarabelle/M
+Clara/M
+Clarance/M
+Clare/M
+Claremont/M
+Clarence/M
+Clarendon/M
+Claresta/M
+Clareta/M
+claret/MDGS
+Claretta/M
+Clarette/M
+Clarey/M
+Claribel/M
+Clarice/M
+Clarie/M
+clarification/M
+clarifier/M
+clarify/NGXDRS
+Clari/M
+Clarinda/M
+Clarine/M
+clarinetist/SM
+clarinet/SM
+clarinettist's
+clarion/GSMD
+Clarissa/M
+Clarisse/M
+Clarita/M
+clarities
+clarity/UM
+Clarke/M
+Clark/M
+Clarridge/M
+Clary/M
+clasher/M
+clash/RSDG
+clasped/M
+clasper/M
+clasp's
+clasp/UGSD
+classer/M
+class/GRSDM
+classical/Y
+classicism/SM
+classicist/SM
+classic/S
+classics/M
+classifiable/U
+classification/AMC
+classificatory
+classified/S
+classifier/SM
+classify/CNXASDG
+classiness/SM
+classless/P
+classmate/MS
+classroom/MS
+classwork/M
+classy/PRT
+clatterer/M
+clattering/Y
+clatter/SGDR
+clattery
+Claudelle/M
+Claudell/M
+Claude/M
+Claudetta/M
+Claudette/M
+Claudia/M
+Claudian/M
+Claudianus/M
+Claudie/M
+Claudina/M
+Claudine/M
+Claudio/M
+Claudius/M
+clausal
+clause/MS
+Clausen/M
+Clausewitz/M
+Clausius/M
+Claus/NM
+claustrophobia/SM
+claustrophobic
+clave/RM
+clave's/F
+clavichord/SM
+clavicle/MS
+clavier/MS
+clawer/M
+claw/GDRMS
+Clayborne/M
+Clayborn/M
+Claybourne/M
+clayey
+clayier
+clayiest
+Clay/M
+clay/MDGS
+claymore/MS
+Clayson/M
+Clayton/M
+Clea/M
+cleanable
+cleaner/MS
+cleaning/SM
+cleanliness/UMS
+cleanly/PRTU
+cleanness/MSU
+cleanse
+cleanser/M
+cleans/GDRSZ
+cleanup/MS
+clean/UYRDPT
+clearance/MS
+clearcut
+clearer/M
+clearheadedness/M
+clearheaded/PY
+clearinghouse/S
+clearing/MS
+clearly
+clearness/MS
+clears
+clear/UTRD
+Clearwater/M
+clearway/M
+cleat/MDSG
+cleavage/MS
+cleaver/M
+cleave/RSDGZ
+Cleavland/M
+clef/SM
+cleft/MDGS
+clematis/MS
+clemence
+Clemenceau/M
+Clemence/M
+clemency/ISM
+Clemente/M
+Clementia/M
+Clementina/M
+Clementine/M
+Clementius/M
+clement/IY
+Clement/MS
+clements
+Clemmie/M
+Clemmy/M
+Clemons
+Clemson/M
+Clem/XM
+clenches
+clenching
+clench/UD
+Cleo/M
+Cleon/M
+Cleopatra/M
+Clerc/M
+clerestory/MS
+clergyman/M
+clergymen
+clergy/MS
+clergywoman
+clergywomen
+clericalism/SM
+clerical/YS
+cleric/SM
+Clerissa/M
+clerk/SGYDM
+clerkship/MS
+Cletis
+Cletus/M
+Cleveland/M
+Cleve/M
+cleverness/SM
+clever/RYPT
+Clevey/M
+Clevie/M
+clevis/SM
+clew/DMGS
+cl/GJ
+Cliburn/M
+clichéd
+cliché/SM
+clicker/M
+click/GZSRDM
+clientèle/SM
+client/SM
+cliffhanger/MS
+cliffhanging
+Cliff/M
+Clifford/M
+cliff/SM
+Clifton/M
+climacteric/SM
+climactic
+climate/MS
+climatic
+climatically
+climatological/Y
+climatologist/SM
+climatology/MS
+climax/MDSG
+climbable/U
+climb/BGZSJRD
+climbdown
+climbed/U
+climber/M
+clime/SM
+Clim/M
+clinch/DRSZG
+clincher/M
+clinching/Y
+Cline/M
+clinger/MS
+clinging
+cling/U
+clingy/TR
+clinical/Y
+clinician/MS
+clinic/MS
+clinker/GMD
+clink/RDGSZ
+clinometer/MIS
+Clint/M
+Clinton/M
+Clio/M
+cliometrician/S
+cliometric/S
+clipboard/SM
+clipped/U
+clipper/MS
+clipping/SM
+clip/SM
+clique/SDGM
+cliquey
+cliquier
+cliquiest
+cliquishness/SM
+cliquish/YP
+clitoral
+clitorides
+clitoris/MS
+Clive/M
+cloacae
+cloaca/M
+cloakroom/MS
+cloak's
+cloak/USDG
+clobber/DGS
+cloche/MS
+clocker/M
+clockmaker/M
+clock/SGZRDMJ
+clockwatcher
+clockwise
+clockwork/MS
+clodded
+clodding
+cloddishness/M
+cloddish/P
+clodhopper/SM
+clod/MS
+Cloe/M
+clogged/U
+clogging/U
+clog's
+clog/US
+cloisonné
+cloisonnes
+cloister/MDGS
+cloistral
+Clo/M
+clomp/MDSG
+clonal
+clone/DSRGMZ
+clonk/SGD
+clopped
+clopping
+clop/S
+Cloris/M
+closed/U
+close/EDSRG
+closefisted
+closely
+closemouthed
+closeness/MS
+closeout/MS
+closer/EM
+closers
+closest
+closet/MDSG
+closeup/S
+closing/S
+closured
+closure/EMS
+closure's/I
+closuring
+clothbound
+clothesbrush
+clotheshorse/MS
+clothesline/SDGM
+clothesman
+clothesmen
+clothespin/MS
+clothe/UDSG
+cloth/GJMSD
+clothier/MS
+clothing/M
+Clotho/M
+cloths
+Clotilda/M
+clot/MS
+clotted
+clotting
+cloture/MDSG
+cloudburst/MS
+clouded/U
+cloudiness/SM
+cloudlessness/M
+cloudless/YP
+cloudscape/SM
+cloud/SGMD
+cloudy/TPR
+clout/GSMD
+cloven
+cloverleaf/MS
+clover/M
+clove/SRMZ
+Clovis/M
+clown/DMSG
+clownishness/SM
+clownish/PY
+cloy/DSG
+cloying/Y
+clubbed/M
+clubbing/M
+clubfeet
+clubfoot/DM
+clubhouse/SM
+club/MS
+clubroom/SM
+cluck/GSDM
+clueless
+clue/MGDS
+Cluj/M
+clump/MDGS
+clumpy/RT
+clumsily
+clumsiness/MS
+clumsy/PRT
+clung
+clunk/SGZRDM
+clunky/PRYT
+clustered/AU
+clusters/A
+cluster/SGJMD
+clutch/DSG
+cluttered/U
+clutter/GSD
+Cl/VM
+Clyde/M
+Clydesdale/M
+Cly/M
+Clytemnestra/M
+Clyve/M
+Clywd/M
+cm
+Cm/M
+CMOS
+cnidarian/MS
+CNN
+CNS
+CO
+coacher/M
+coachman/M
+coachmen
+coach/MSRDG
+coachwork/M
+coadjutor/MS
+coagulable
+coagulant/SM
+coagulate/GNXSD
+coagulation/M
+coagulator/S
+coaler/M
+coalesce/GDS
+coalescence/SM
+coalescent
+coalface/SM
+coalfield/MS
+coalitionist/SM
+coalition/MS
+coal/MDRGS
+coalminers
+coarseness/SM
+coarsen/SGD
+coarse/TYRP
+coastal
+coaster/M
+coastguard/MS
+coastline/SM
+coast/SMRDGZ
+coated/U
+Coates/M
+coating/M
+coat/MDRGZJS
+coattail/S
+coattest
+coauthor/MDGS
+coaxer/M
+coax/GZDSR
+coaxial/Y
+coaxing/Y
+Cobain/M
+cobalt/MS
+cobbed
+Cobbie/M
+cobbing
+cobbler/M
+cobble/SRDGMZ
+cobblestone/MSD
+Cobb/M
+Cobby/M
+coble/M
+Cob/M
+COBOL
+Cobol/M
+cobra/MS
+cob/SM
+cobwebbed
+cobwebbing
+cobwebby/RT
+cobweb/SM
+cocaine/MS
+coca/MS
+cocci/MS
+coccus/M
+coccyges
+coccyx/M
+Cochabamba/M
+cochineal/SM
+Cochin/M
+Cochise/M
+cochleae
+cochlear
+cochlea/SM
+Cochran/M
+cockade/SM
+cockamamie
+cockatoo/SM
+cockatrice/MS
+cockcrow/MS
+cockerel/MS
+cocker/M
+cockeye/DM
+cockeyed/PY
+cockfighting/M
+cockfight/MJSG
+cock/GDRMS
+cockily
+cockiness/MS
+cocklebur/M
+cockle/SDGM
+cockleshell/SM
+Cockney
+cockney/MS
+cockpit/MS
+cockroach/SM
+cockscomb/SM
+cockshies
+cocksucker/S!
+cocksure
+cocktail/GDMS
+cocky/RPT
+cocoa/SM
+coco/MS
+coconut/SM
+cocoon/GDMS
+Cocteau/M
+COD
+coda/SM
+codded
+codding
+coddle/GSRD
+coddler/M
+codebook/S
+codebreak/R
+coded/UA
+Codee/M
+codeine/MS
+codename/D
+codependency/S
+codependent/S
+coder/CM
+code's
+co/DES
+codes/A
+code/SCZGJRD
+codetermine/S
+codeword/SM
+codex/M
+codfish/SM
+codger/MS
+codices/M
+codicil/SM
+Codie/M
+codification/M
+codifier/M
+codify/NZXGRSD
+Codi/M
+coding/M
+codling/M
+Cod/M
+cod/MDRSZGJ
+codpiece/MS
+Cody/M
+coedited
+coediting
+coeditor/MS
+coedits
+coed/SM
+coeducational
+coeducation/SM
+coefficient/SYM
+coelenterate/MS
+coequal/SY
+coercer/M
+coerce/SRDXVGNZ
+coercible/I
+coercion/M
+coerciveness/M
+coercive/PY
+coeval/YS
+coexistence/MS
+coexistent
+coexist/GDS
+coextensive/Y
+cofactor/MS
+coffeecake/SM
+coffeecup
+coffeehouse/SM
+coffeemaker/S
+coffeepot/MS
+coffee/SM
+cofferdam/SM
+coffer/DMSG
+Coffey/M
+coffin/DMGS
+Coffman/M
+cogency/MS
+cogent/Y
+cogged
+cogging
+cogitate/DSXNGV
+cogitation/M
+cogitator/MS
+cog/MS
+Cognac/M
+cognac/SM
+cognate/SXYN
+cognation/M
+cognitional
+cognition/SAM
+cognitive/SY
+cognizable
+cognizance/MAI
+cognizances/A
+cognizant/I
+cognomen/SM
+cognoscente
+cognoscenti
+cogwheel/SM
+cohabitant/MS
+cohabitational
+cohabitation/SM
+cohabit/SDG
+Cohan/M
+coheir/MS
+Cohen/M
+cohere/GSRD
+coherence/SIM
+coherencies
+coherency/I
+coherent/IY
+coherer/M
+cohesion/MS
+cohesiveness/SM
+cohesive/PY
+Cohn/M
+cohoes
+coho/MS
+cohort/SM
+coiffed
+coiffing
+coiffure/MGSD
+coif/SM
+coil/UGSAD
+Coimbatore/M
+coinage's/A
+coinage/SM
+coincide/GSD
+coincidence/MS
+coincidental/Y
+coincident/Y
+coined/U
+coiner/M
+coin/GZSDRM
+coinsurance/SM
+Cointon/M
+cointreau
+coital/Y
+coitus/SM
+coke/MGDS
+Coke/MS
+COL
+COLA
+colander/SM
+Colan/M
+Colas
+cola/SM
+colatitude/MS
+Colbert/M
+Colby/M
+coldblooded
+coldish
+coldness/MS
+cold/YRPST
+Coleen/M
+Cole/M
+Coleman/M
+Colene/M
+Coleridge/M
+coleslaw/SM
+Colet/M
+Coletta/M
+Colette/M
+coleus/SM
+Colfax/M
+Colgate/M
+colicky
+colic/SM
+coliform
+Colin/M
+coliseum/SM
+colitis/MS
+collaborate/VGNXSD
+collaboration/M
+collaborative/SY
+collaborator/SM
+collage/MGSD
+collagen/M
+collapse/SDG
+collapsibility/M
+collapsible
+collarbone/MS
+collar/DMGS
+collard/SM
+collarless
+collated/U
+collateral/SYM
+collate/SDVNGX
+collation/M
+collator/MS
+colleague/SDGM
+collectedness/M
+collected/PY
+collectible/S
+collection/AMS
+collective/SY
+collectivism/SM
+collectivist/MS
+collectivity/MS
+collectivization/MS
+collectivize/DSG
+collector/MS
+collect/SAGD
+Colleen/M
+colleen/SM
+college/SM
+collegiality/S
+collegian/SM
+collegiate/Y
+Collen/M
+Collete/M
+Collette/M
+coll/G
+collide/SDG
+Collie/M
+collie/MZSRD
+collier/M
+Collier/M
+colliery/MS
+collimate/C
+collimated/U
+collimates
+collimating
+collimation/M
+collimator/M
+collinear
+collinearity/M
+Colline/M
+Collin/MS
+collisional
+collision/SM
+collocate/XSDGN
+collocation/M
+colloidal/Y
+colloid/MS
+colloq
+colloquialism/MS
+colloquial/SY
+colloquies
+colloquium/SM
+colloquy/M
+collude/SDG
+collusion/SM
+collusive
+collying
+Colly/RM
+Colman/M
+Col/MY
+Cologne/M
+cologne/MSD
+Colo/M
+Colombia/M
+Colombian/S
+Colombo/M
+colonelcy/MS
+colonel/MS
+colonialism/MS
+colonialist/MS
+colonial/SPY
+colonist/SM
+colonization/ACSM
+colonize/ACSDG
+colonized/U
+colonizer/MS
+colonizes/U
+Colon/M
+colonnade/MSD
+colon/SM
+colony/SM
+colophon/SM
+Coloradan/S
+Coloradoan/S
+Colorado/M
+colorant/SM
+coloration/EMS
+coloratura/SM
+colorblindness/S
+colorblind/P
+colored/USE
+colorer/M
+colorfastness/SM
+colorfast/P
+colorfulness/MS
+colorful/PY
+colorimeter/SM
+colorimetry
+coloring/M
+colorization/S
+colorize/GSD
+colorizing/C
+colorlessness/SM
+colorless/PY
+colors/EA
+color/SRDMGZJ
+colossal/Y
+Colosseum/M
+colossi
+colossus/M
+colostomy/SM
+colostrum/SM
+col/SD
+colter/M
+coltishness/M
+coltish/PY
+Colt/M
+colt/MRS
+Coltrane/M
+Columbia/M
+Columbian
+Columbine/M
+columbine/SM
+Columbus/M
+columnar
+columnist/MS
+columnize/GSD
+column/SDM
+Colver/M
+Co/M
+comae
+comaker/SM
+Comanche/MS
+coma/SM
+comatose
+combatant/SM
+combativeness/MS
+combative/PY
+combat/SVGMD
+combed/U
+comber/M
+combinational/A
+combination/ASM
+combinatorial/Y
+combinatoric/S
+combinator/SM
+combined/AU
+combiner/M
+combines/A
+combine/ZGBRSD
+combining/A
+combo/MS
+comb/SGZDRMJ
+Combs/M
+combusted
+combustibility/SM
+combustible/SI
+combustion/MS
+combustive
+Comdex/M
+Comdr/M
+comeback/SM
+comedian/SM
+comedic
+comedienne/SM
+comedown/MS
+comedy/SM
+come/IZSRGJ
+comeliness/SM
+comely/TPR
+comer/IM
+comes/M
+comestible/MS
+cometary
+cometh
+comet/SM
+comeuppance/SM
+comfit's
+comfit/SE
+comfortability/S
+comfortableness/MS
+comfortable/U
+comfortably/U
+comforted/U
+comforter/MS
+comfort/ESMDG
+comforting/YE
+comfy/RT
+comicality/MS
+comical/Y
+comic/MS
+Cominform/M
+comity/SM
+com/LJRTZG
+comm
+Com/M
+comma/MS
+commandant/MS
+commandeer/SDG
+commander/M
+commanding/Y
+commandment/SM
+commando/SM
+command/SZRDMGL
+commemorate/SDVNGX
+commemoration/M
+commemorative/YS
+commemorator/S
+commence/ALDSG
+commencement/AMS
+commencer/M
+commendably
+commendation/ASM
+commendatory/A
+commender/AM
+commend/GSADRB
+commensurable/I
+commensurate/IY
+commensurates
+commensuration/SM
+commentary/MS
+commentate/GSD
+commentator/SM
+commenter/M
+comment's
+comment/SUGD
+commerce/MGSD
+commercialism/MS
+commercialization/SM
+commercialize/GSD
+commercial/PYS
+Commie
+commie/SM
+commingle/GSD
+commiserate/VGNXSD
+commiseration/M
+commissariat/MS
+commissar/MS
+commissary/MS
+commission/ASCGD
+commissioner/SM
+commission's/A
+commitment/SM
+commit/SA
+committable
+committal/MA
+committals
+committed/UA
+committeeman/M
+committeemen
+committee/MS
+committeewoman/M
+committeewomen
+committing/A
+commode/MS
+commodes/IE
+commodiousness/MI
+commodious/YIP
+commodity/MS
+commodore/SM
+commonality/MS
+commonalty/MS
+commoner/MS
+commonness/MSU
+commonplaceness/M
+commonplace/SP
+common/RYUPT
+commonsense
+commons/M
+Commons/M
+commonweal/SHM
+commonwealth/M
+Commonwealth/M
+commonwealths
+Commonwealths
+commotion/MS
+communality/M
+communal/Y
+commune/XSDNG
+communicability/MS
+communicable/IU
+communicably
+communicant/MS
+communicate/VNGXSD
+communicational
+communication/M
+communicativeness/M
+communicative/PY
+communicator/SM
+communion/M
+Communion/SM
+communique/S
+communism/MS
+Communism/S
+communistic
+communist/MS
+Communist/S
+communitarian/M
+community/MS
+communize/SDG
+commutable/I
+commutate/XVGNSD
+commutation/M
+commutative/Y
+commutativity
+commutator/MS
+commute/BZGRSD
+commuter/M
+Comoros
+compaction/M
+compactness/MS
+compactor/MS
+compact/TZGSPRDY
+companionableness/M
+companionable/P
+companionably
+companion/GBSMD
+companionship/MS
+companionway/MS
+company/MSDG
+Compaq/M
+comparabilities
+comparability/IM
+comparableness/M
+comparable/P
+comparably/I
+comparativeness/M
+comparative/PYS
+comparator/SM
+compare/GRSDB
+comparer/M
+comparison/MS
+compartmental
+compartmentalization/SM
+compartmentalize/DSG
+compartment/SDMG
+compassionateness/M
+compassionate/PSDGY
+compassion/MS
+compass/MSDG
+compatibility/IMS
+compatibleness/M
+compatible/SI
+compatibly/I
+compatriot/SM
+compeer/DSGM
+compellable
+compelled
+compelling/YM
+compel/S
+compendious
+compendium/MS
+compensable
+compensated/U
+compensate/XVNGSD
+compensation/M
+compensator/M
+compensatory
+compete/GSD
+competence/ISM
+competency/IS
+competency's
+competent/IY
+competition/SM
+competitiveness/SM
+competitive/YP
+competitor/MS
+comp/GSYD
+compilable/U
+compilation/SAM
+compile/ASDCG
+compiler/CS
+compiler's
+complacence/S
+complacency/SM
+complacent/Y
+complainant/MS
+complainer/M
+complain/GZRDS
+complaining/YU
+complaint/MS
+complaisance/SM
+complaisant/Y
+complected
+complementariness/M
+complementarity
+complementary/SP
+complementation/M
+complementer/M
+complement/ZSMRDG
+complete/BTYVNGPRSDX
+completed/U
+completely/I
+completeness/ISM
+completer/M
+completion/MI
+complexional
+complexion/DMS
+complexity/MS
+complexness/M
+complex/TGPRSDY
+compliance/SM
+compliant/Y
+complicatedness/M
+complicated/YP
+complicate/SDG
+complication/M
+complicator/SM
+complicit
+complicity/MS
+complier/M
+complimentary/U
+complimenter/M
+compliment/ZSMRDG
+comply/ZXRSDNG
+component/SM
+comport/GLSD
+comportment/SM
+compose/CGASDE
+composedness/M
+composed/PY
+composer/CM
+composers
+composite/YSDXNG
+compositional/Y
+composition/CMA
+compositions/C
+compositor/MS
+compost/DMGS
+composure/ESM
+compote/MS
+compounded/U
+compounder/M
+compound/RDMBGS
+comprehend/DGS
+comprehending/U
+comprehensibility/SIM
+comprehensibleness/IM
+comprehensible/PI
+comprehensibly/I
+comprehension/IMS
+comprehensiveness/SM
+comprehensive/YPS
+compressed/Y
+compressibility/IM
+compressible/I
+compressional
+compression/CSM
+compressive/Y
+compressor/MS
+compress/SDUGC
+comprise/GSD
+compromiser/M
+compromise/SRDGMZ
+compromising/UY
+Compton/M
+comptroller/SM
+compulsion/SM
+compulsiveness/MS
+compulsive/PYS
+compulsivity
+compulsorily
+compulsory/S
+compunction/MS
+Compuserve/M
+CompuServe/M
+computability/M
+computable/UI
+computably
+computational/Y
+computation/SM
+computed/A
+computerese
+computerization/MS
+computerize/SDG
+computer/M
+compute/RSDZBG
+computes/A
+computing/A
+comradely/P
+comradeship/MS
+comrade/YMS
+Comte/M
+Conakry/M
+Conan/M
+Conant/M
+concatenate/XSDG
+concaveness/MS
+concave/YP
+conceal/BSZGRDL
+concealed/U
+concealer/M
+concealing/Y
+concealment/MS
+conceded/Y
+conceitedness/SM
+conceited/YP
+conceit/SGDM
+conceivable/IU
+conceivably/I
+conceive/BGRSD
+conceiver/M
+concentrate/VNGSDX
+concentration/M
+concentrator/MS
+concentrically
+Concepción/M
+conceptional
+conception/MS
+concept/SVM
+conceptuality/M
+conceptualization/A
+conceptualizations
+conceptualization's
+conceptualize/DRSG
+conceptualizing/A
+conceptual/Y
+concerned/YU
+concern/USGD
+concerted/PY
+concert/EDSG
+concertina/MDGS
+concertize/GDS
+concertmaster/MS
+concerto/SM
+concert's
+concessionaire/SM
+concessional
+concessionary
+concession/R
+Concetta/M
+Concettina/M
+Conchita/M
+conch/MDG
+conchs
+concierge/SM
+conciliar
+conciliate/GNVX
+conciliation/ASM
+conciliator/MS
+conciliatory/A
+conciseness/SM
+concise/TYRNPX
+concision/M
+conclave/S
+concluder/M
+conclude/RSDG
+conclusion/SM
+conclusive/IPY
+conclusiveness/ISM
+concocter/M
+concoction/SM
+concoct/RDVGS
+concomitant/YS
+concordance/MS
+concordant/Y
+concordat/SM
+Concorde/M
+Concordia/M
+Concord/MS
+concourse
+concreteness/MS
+concrete/NGXRSDPYM
+concretion/M
+concubinage/SM
+concubine/SM
+concupiscence/SM
+concupiscent
+concurrence/MS
+concur/S
+concussion/MS
+concuss/VD
+condemnate/XN
+condemnation/M
+condemnatory
+condemner/M
+condemn/ZSGRDB
+condensate/NMXS
+condensation/M
+condenser/M
+condense/ZGSD
+condensible
+condescend
+condescending/Y
+condescension/MS
+condign
+condiment/SM
+condition/AGSJD
+conditionals
+conditional/UY
+conditioned/U
+conditioner/MS
+conditioning/M
+condition's
+condole
+condolence/MS
+condominium/MS
+condom/SM
+condone/GRSD
+condoner/M
+Condorcet/M
+condor/MS
+condo/SM
+conduce/VGSD
+conduciveness/M
+conducive/P
+conductance/SM
+conductibility/SM
+conductible
+conduction/MS
+conductive/Y
+conductivity/MS
+conductor/MS
+conductress/MS
+conduct/V
+conduit/MS
+coneflower/M
+Conestoga
+coney's
+confabbed
+confabbing
+confab/MS
+confabulate/XSDGN
+confabulation/M
+confectioner/M
+confectionery/SM
+confectionist
+confection/RDMGZS
+confect/S
+Confederacy/M
+confederacy/MS
+confederate/M
+Confederate/S
+conferee/MS
+conference/DSGM
+conferrable
+conferral/SM
+conferred
+conferrer/SM
+conferring
+confer/SB
+confessed/Y
+confessional/SY
+confession/MS
+confessor/SM
+confetti/M
+confidante/SM
+confidant/SM
+confidence/SM
+confidentiality/MS
+confidentialness/M
+confidential/PY
+confident/Y
+confider/M
+confide/ZGRSD
+confiding/PY
+configuration/ASM
+configure/AGSDB
+confined/U
+confine/L
+confinement/MS
+confiner/M
+confirm/AGDS
+confirmation/ASM
+confirmatory
+confirmedness/M
+confirmed/YP
+confiscate/DSGNX
+confiscation/M
+confiscator/MS
+confiscatory
+conflagration/MS
+conflate/NGSDX
+conflation/M
+conflicting/Y
+conflict/SVGDM
+confluence/MS
+conformable/U
+conformal
+conformance/SM
+conformational/Y
+conform/B
+conformer/M
+conformism/SM
+conformist/SM
+conformities
+conformity/MUI
+confounded/Y
+confound/R
+confrère/MS
+confrontational
+confrontation/SM
+confronter/M
+confront/Z
+Confucianism/SM
+Confucian/S
+Confucius/M
+confusedness/M
+confused/PY
+confuse/RBZ
+confusing/Y
+confutation/MS
+confute/GRSD
+confuter/M
+conga/MDG
+congeal/GSDL
+congealment/MS
+congeniality/UM
+congenial/U
+congeries/M
+conger/SM
+congestion/MS
+congest/VGSD
+conglomerate/XDSNGVM
+conglomeration/M
+Cong/M
+Congolese
+Congo/M
+congrats
+congratulate/NGXSD
+congratulation/M
+congratulatory
+congregate/DSXGN
+congregational
+Congregational
+congregationalism/MS
+congregationalist/MS
+Congregationalist/S
+congregation/M
+congressional/Y
+congressman/M
+congressmen
+Congress/MS
+congress/MSDG
+congresspeople
+congressperson/S
+congresswoman/M
+congresswomen
+Congreve/M
+congruence/IM
+congruences
+congruency/M
+congruential
+congruent/YI
+congruity/MSI
+congruousness/IM
+congruous/YIP
+conicalness/M
+conical/PSY
+conic/S
+conics/M
+conifer/MS
+coniferous
+conjectural/Y
+conjecture/GMDRS
+conjecturer/M
+conjoint
+conjugacy
+conjugal/Y
+conjugate/XVNGYSDP
+conjugation/M
+conjunct/DSV
+conjunctiva/MS
+conjunctive/YS
+conjunctivitis/SM
+conjuration/MS
+conjurer/M
+conjure/RSDZG
+conjuring/M
+conker/M
+conk/ZDR
+Conley/M
+Con/M
+conman
+connect/ADGES
+connectedly/E
+connectedness/ME
+connected/U
+connectible
+Connecticut/M
+connection/AME
+connectionless
+connections/E
+connective/SYM
+connectivity/MS
+connector/MS
+Connelly/M
+Conner/M
+Connery/M
+connexion/MS
+Conney/M
+conn/GVDR
+Connie/M
+Conni/M
+conniption/MS
+connivance/MS
+conniver/M
+connive/ZGRSD
+connoisseur/MS
+Connor/SM
+connotative/Y
+Conn/RM
+connubial/Y
+Conny/M
+conquerable/U
+conquered/AU
+conqueror/MS
+conquer/RDSBZG
+conquers/A
+conquest/ASM
+conquistador/MS
+Conrade/M
+Conrad/M
+Conrado/M
+Conrail/M
+Conroy/M
+Consalve/M
+consanguineous/Y
+consanguinity/SM
+conscienceless
+conscientiousness/MS
+conscientious/YP
+conscionable/U
+consciousness/MUS
+conscious/UYSP
+conscription/SM
+consecrated/AU
+consecrates/A
+consecrate/XDSNGV
+consecrating/A
+consecration/AMS
+consecutiveness/M
+consecutive/YP
+consensus/SM
+consenter/M
+consenting/Y
+consent/SZGRD
+consequence
+consequentiality/S
+consequential/IY
+consequentialness/M
+consequently/I
+consequent/PSY
+conservancy/SM
+conservationism
+conservationist/SM
+conservation/SM
+conservatism/SM
+conservativeness/M
+Conservative/S
+conservative/SYP
+conservator/MS
+conservatory/MS
+con/SGM
+considerable/I
+considerables
+considerably/I
+considerateness/MSI
+considerate/XIPNY
+consideration/ASMI
+considered/U
+considerer/M
+consider/GASD
+considering/S
+consign/ASGD
+consignee/SM
+consignment/SM
+consist/DSG
+consistence/S
+consistency/IMS
+consistent/IY
+consistory/MS
+consolable/I
+Consolata/M
+consolation/MS
+consolation's/E
+consolatory
+consoled/U
+consoler/M
+console/ZBG
+consolidated/AU
+consolidate/NGDSX
+consolidates/A
+consolidation/M
+consolidator/SM
+consoling/Y
+consommé/S
+consonance/IM
+consonances
+consonantal
+consonant/MYS
+consortia
+consortium/M
+conspectus/MS
+conspicuousness/IMS
+conspicuous/YIP
+conspiracy/MS
+conspiratorial/Y
+conspirator/SM
+constable
+Constable/M
+constabulary/MS
+constance
+Constance/M
+Constancia/M
+constancy/IMS
+Constancy/M
+Constanta/M
+Constantia/M
+Constantina/M
+Constantine/M
+Constantin/M
+Constantino/M
+Constantinople/M
+constant/IY
+constants
+constellation/SM
+consternate/XNGSD
+consternation/M
+constipate/XDSNG
+constipation/M
+constituency/MS
+constituent/SYM
+constituted/A
+constitute/NGVXDS
+constitutes/A
+constituting/A
+Constitution
+constitutionality's
+constitutionality/US
+constitutionally/U
+constitutional/SY
+constitution/AMS
+constitutive/Y
+constrain
+constrainedly
+constrained/U
+constraint/MS
+constriction/MS
+constrictor/MS
+constrict/SDGV
+construable
+construct/ASDGV
+constructibility
+constructible/A
+constructional/Y
+constructionist/MS
+construction/MAS
+constructions/C
+constructiveness/SM
+constructive/YP
+constructor/MS
+construe/GSD
+Consuela/M
+Consuelo/M
+consular/S
+consulate/MS
+consul/KMS
+consulship/MS
+consultancy/S
+consultant/MS
+consultation/SM
+consultative
+consulted/A
+consulter/M
+consult/RDVGS
+consumable/S
+consumed/Y
+consume/JZGSDB
+consumerism/MS
+consumerist/S
+consumer/M
+consuming/Y
+consummate/DSGVY
+consummated/U
+consumption/SM
+consumptive/YS
+cont
+contact/BGD
+contacted/A
+contact's/A
+contacts/A
+contagion/SM
+contagiousness/MS
+contagious/YP
+containerization/SM
+containerize/GSD
+container/M
+containment/SM
+contain/SLZGBRD
+contaminant/SM
+contaminated/AU
+contaminates/A
+contaminate/SDCXNG
+contaminating/A
+contamination/CM
+contaminative
+contaminator/MS
+contd
+cont'd
+contemn/SGD
+contemplate/DVNGX
+contemplation/M
+contemplativeness/M
+contemplative/PSY
+contemporaneity/MS
+contemporaneousness/M
+contemporaneous/PY
+contemptibleness/M
+contemptible/P
+contemptibly
+contempt/M
+contemptuousness/SM
+contemptuous/PY
+contentedly/E
+contentedness/SM
+contented/YP
+content/EMDLSG
+contention/MS
+contentiousness/SM
+contentious/PY
+contently
+contentment/ES
+contentment's
+conterminous/Y
+contestable/I
+contestant/SM
+contested/U
+contextualize/GDS
+contiguity/MS
+contiguousness/M
+contiguous/YP
+continence/ISM
+Continental/S
+continental/SY
+continent/IY
+Continent/M
+continents
+continent's
+contingency/SM
+contingent/SMY
+continua
+continuable
+continual/Y
+continuance/ESM
+continuant/M
+continuation/ESM
+continue/ESDG
+continuer/M
+continuity/SEM
+continuousness/M
+continuous/YE
+continuum/M
+contortionist/SM
+contortion/MS
+contort/VGD
+contour
+contraband/SM
+contrabass/M
+contraception/SM
+contraceptive/S
+contract/DG
+contractible
+contractile
+contractual/Y
+contradict/GDS
+contradiction/MS
+contradictorily
+contradictoriness/M
+contradictory/PS
+contradistinction/MS
+contraflow/S
+contrail/M
+contraindicate/SDVNGX
+contraindication/M
+contralto/SM
+contrapositive/S
+contraption/MS
+contrapuntal/Y
+contrariety/MS
+contrarily
+contrariness/MS
+contrariwise
+contrary/PS
+contra/S
+contrasting/Y
+contrastive/Y
+contrast/SRDVGZ
+contravene/GSRD
+contravener/M
+contravention/MS
+Contreras/M
+contretemps/M
+contribute/XVNZRD
+contribution/M
+contributive/Y
+contributorily
+contributor/SM
+contributory/S
+contriteness/M
+contrite/NXP
+contrition/M
+contrivance/SM
+contriver/M
+contrive/ZGRSD
+control/CS
+controllability/M
+controllable/IU
+controllably/U
+controlled/CU
+controller/SM
+controlling/C
+control's
+controversialists
+controversial/UY
+controversy/MS
+controvert/DGS
+controvertible/I
+contumacious/Y
+contumacy/MS
+contumelious
+contumely/MS
+contuse/NGXSD
+contusion/M
+conundrum/SM
+conurbation/MS
+convalesce/GDS
+convalescence/SM
+convalescent/S
+convect/DSVG
+convectional
+convection/MS
+convector
+convene/ASDG
+convener/MS
+convenience/ISM
+convenient/IY
+conventicle/SM
+conventionalism/M
+conventionalist/M
+conventionality/SUM
+conventionalize/GDS
+conventional/UY
+convention/MA
+conventions
+convergence/MS
+convergent
+conversant/Y
+conversationalist/SM
+conversational/Y
+conversation/SM
+conversazione/M
+converse/Y
+conversion/AM
+conversioning
+converted/U
+converter/MS
+convert/GADS
+convertibility's/I
+convertibility/SM
+convertibleness/M
+convertible/PS
+convexity/MS
+convex/Y
+conveyance/DRSGMZ
+conveyancer/M
+conveyancing/M
+convey/BDGS
+conveyor/MS
+conviction/MS
+convict/SVGD
+convinced/U
+convincer/M
+convince/RSDZG
+convincingness/M
+convincing/PUY
+conviviality/MS
+convivial/Y
+convoke/GSD
+convolute/XDNY
+convolution/M
+convolve/C
+convolved
+convolves
+convolving
+convoy/GMDS
+convulse/SDXVNG
+convulsion/M
+convulsiveness/M
+convulsive/YP
+Conway/M
+cony/SM
+coo/GSD
+cookbook/SM
+cooked/AU
+Cooke/M
+cooker/M
+cookery/MS
+cook/GZDRMJS
+Cookie/M
+cookie/SM
+cooking/M
+Cook/M
+cookout/SM
+cooks/A
+cookware/SM
+cooky's
+coolant/SM
+cooled/U
+cooler/M
+Cooley/M
+coolheaded
+Coolidge/M
+coolie/MS
+coolness/MS
+cool/YDRPJGZTS
+coon/MS!
+coonskin/MS
+cooperage/MS
+cooperate/VNGXSD
+cooperation/M
+cooperativeness/SM
+cooperative/PSY
+cooperator/MS
+cooper/GDM
+Cooper/M
+coop/MDRGZS
+Coop/MR
+coordinated/U
+coordinateness/M
+coordinate/XNGVYPDS
+coordination/M
+coordinator/MS
+Coors/M
+cootie/SM
+coot/MS
+copay/S
+Copeland/M
+Copenhagen/M
+coper/M
+Copernican
+Copernicus/M
+cope/S
+copied/A
+copier/M
+copies/A
+copilot/SM
+coping/M
+copiousness/SM
+copious/YP
+coplanar
+Copland/M
+Copley/M
+copolymer/MS
+copora
+copped
+Copperfield/M
+copperhead/MS
+copper/MSGD
+copperplate/MS
+coppersmith/M
+coppersmiths
+coppery
+coppice's
+copping
+Coppola/M
+copra/MS
+coprolite/M
+coprophagous
+copse/M
+cops/GDS
+cop/SJMDRG
+copter/SM
+Coptic/M
+copula/MS
+copulate/XDSNGV
+copulation/M
+copulative/S
+copybook/MS
+copycat/SM
+copycatted
+copycatting
+copyist/SM
+copy/MZBDSRG
+copyrighter/M
+copyright/MSRDGZ
+copywriter/MS
+coquetry/MS
+coquette/DSMG
+coquettish/Y
+Corabella/M
+Corabelle/M
+Corabel/M
+coracle/SM
+Coralie/M
+Coraline/M
+coralline
+Coral/M
+coral/SM
+Coralyn/M
+Cora/M
+corbel/GMDJS
+Corbet/M
+Corbett/M
+Corbie/M
+Corbin/M
+Corby/M
+cordage/MS
+corded/AE
+Cordelia/M
+Cordelie/M
+Cordell/M
+corder/AM
+Cordey/M
+cord/FSAEM
+cordiality/MS
+cordialness/M
+cordial/PYS
+Cordie/M
+cordillera/MS
+Cordilleras
+Cordi/M
+cording/MA
+cordite/MS
+cordless
+Cord/M
+Cordoba
+cordon/DMSG
+cordovan/SM
+Cordula/M
+corduroy/GDMS
+Cordy/M
+cored/A
+Coreen/M
+Corella/M
+core/MZGDRS
+Corenda/M
+Corene/M
+corer/M
+corespondent/MS
+Coretta/M
+Corette/M
+Corey/M
+Corfu/M
+corgi/MS
+coriander/SM
+Corie/M
+Corilla/M
+Cori/M
+Corina/M
+Corine/M
+coring/M
+Corinna/M
+Corinne/M
+Corinthian/S
+Corinthians/M
+Corinth/M
+Coriolanus/M
+Coriolis/M
+Corissa/M
+Coriss/M
+corked/U
+corker/M
+cork/GZDRMS
+Cork/M
+corkscrew/DMGS
+corks/U
+Corliss/M
+Corly/M
+Cormack/M
+corm/MS
+cormorant/MS
+Cornall/M
+cornball/SM
+cornbread/S
+corncob/SM
+corncrake/M
+corneal
+cornea/SM
+Corneille/M
+Cornela/M
+Cornelia/M
+Cornelius/M
+Cornelle/M
+Cornell/M
+corner/GDM
+cornerstone/MS
+cornet/SM
+Corney/M
+cornfield/SM
+cornflake/S
+cornflour/M
+cornflower/SM
+corn/GZDRMS
+cornice/GSDM
+Cornie/M
+cornily
+corniness/S
+Cornish/S
+cornmeal/S
+cornrow/GDS
+cornstalk/MS
+cornstarch/SM
+cornucopia/MS
+Cornwallis/M
+Cornwall/M
+Corny/M
+corny/RPT
+corolla/MS
+corollary/SM
+Coronado/M
+coronal/MS
+coronary/S
+corona/SM
+coronate/NX
+coronation/M
+coroner/MS
+coronet/DMS
+Corot/M
+coroutine/SM
+Corp
+corporal/SYM
+corpora/MS
+corporate/INVXS
+corporately
+corporation/MI
+corporatism/M
+corporatist
+corporeality/MS
+corporeal/IY
+corporealness/M
+corp/S
+corpse/M
+corpsman/M
+corpsmen
+corps/SM
+corpulence/MS
+corpulentness/S
+corpulent/YP
+corpuscle/SM
+corpuscular
+corpus/M
+corr
+corralled
+corralling
+corral/MS
+correctable/U
+correct/BPSDRYTGV
+corrected/U
+correctional
+correction/MS
+corrective/YPS
+correctly/I
+correctness/MSI
+corrector/MS
+Correggio/M
+correlated/U
+correlate/SDXVNG
+correlation/M
+correlative/YS
+Correna/M
+correspond/DSG
+correspondence/MS
+correspondent/SM
+corresponding/Y
+Correy/M
+Corrianne/M
+corridor/SM
+Corrie/M
+corrigenda
+corrigendum/M
+corrigible/I
+Corri/M
+Corrina/M
+Corrine/M
+Corrinne/M
+corroborated/U
+corroborate/GNVXDS
+corroboration/M
+corroborative/Y
+corroborator/MS
+corroboratory
+corrode/SDG
+corrodible
+corrosion/SM
+corrosiveness/M
+corrosive/YPS
+corrugate/NGXSD
+corrugation/M
+corrupt/DRYPTSGV
+corrupted/U
+corrupter/M
+corruptibility/SMI
+corruptible/I
+corruption/IM
+corruptions
+corruptive/Y
+corruptness/MS
+Corry/M
+corsage/MS
+corsair/SM
+corset/GMDS
+Corsica/M
+Corsican/S
+cortège/MS
+Cortes/S
+cortex/M
+Cortez's
+cortical/Y
+cortices
+corticosteroid/SM
+Cortie/M
+cortisone/SM
+Cortland/M
+Cort/M
+Cortney/M
+Corty/M
+corundum/MS
+coruscate/XSDGN
+coruscation/M
+Corvallis/M
+corvette/MS
+Corvus/M
+Cory/M
+Cos
+Cosby/M
+Cosetta/M
+Cosette/M
+cos/GDS
+cosignatory/MS
+cosign/SRDZG
+cosily
+Cosimo/M
+cosine/MS
+cosiness/MS
+Cosme/M
+cosmetically
+cosmetician/MS
+cosmetic/SM
+cosmetologist/MS
+cosmetology/MS
+cosmic
+cosmical/Y
+cosmogonist/MS
+cosmogony/SM
+cosmological/Y
+cosmologist/MS
+cosmology/SM
+Cosmo/M
+cosmonaut/MS
+cosmopolitanism/MS
+cosmopolitan/SM
+cosmos/SM
+cosponsor/DSG
+cossack/S
+Cossack/SM
+cosset/GDS
+Costa/M
+Costanza/M
+costarred
+costarring
+costar/S
+Costello/M
+costiveness/M
+costive/PY
+costless
+costliness/SM
+costly/RTP
+cost/MYGVJS
+Costner/M
+costumer/M
+costume/ZMGSRD
+cotangent/SM
+Cote/M
+cote/MS
+coterie/MS
+coterminous/Y
+cotillion/SM
+Cotonou/M
+Cotopaxi/M
+cot/SGMD
+cottager/M
+cottage/ZMGSRD
+cottar's
+cotted
+cotter/SDM
+cotton/GSDM
+Cotton/M
+cottonmouth/M
+cottonmouths
+cottonseed/MS
+cottontail/SM
+cottonwood/SM
+cottony
+cotyledon/MS
+couching/M
+couch/MSDG
+cougar/MS
+cougher/M
+cough/RDG
+coughs
+couldn't
+could/T
+could've
+coulée/MS
+Coulomb/M
+coulomb/SM
+councilman/M
+councilmen
+councilor/MS
+councilperson/S
+council/SM
+councilwoman/M
+councilwomen
+counsel/GSDM
+counsellings
+counselor/MS
+countability/E
+countable/U
+countably/U
+countdown/SM
+counted/U
+count/EGARDS
+countenance/EGDS
+countenancer/M
+countenance's
+counteract/DSVG
+counteraction/SM
+counterargument/SM
+counterattack/DRMGS
+counterbalance/MSDG
+counterclaim/GSDM
+counterclockwise
+counterculture/MS
+countercyclical
+counterespionage/MS
+counterexample/S
+counterfeiter/M
+counterfeit/ZSGRD
+counterflow
+counterfoil/MS
+counterforce/M
+counter/GSMD
+counterinsurgency/MS
+counterintelligence/MS
+counterintuitive
+countermand/DSG
+counterman/M
+countermeasure/SM
+countermen
+counteroffensive/SM
+counteroffer/SM
+counterpane/SM
+counterpart/SM
+counterpoint/GSDM
+counterpoise/GMSD
+counterproductive
+counterproposal/M
+counterrevolutionary/MS
+counterrevolution/MS
+counter's/E
+counters/E
+countersignature/MS
+countersign/SDG
+countersink/SG
+counterspy/MS
+counterstrike
+countersunk
+countertenor/SM
+countervail/DSG
+counterweight/GMDS
+countess/MS
+countless/Y
+countrify/D
+countryman/M
+countrymen
+country/MS
+countryside/MS
+countrywide
+countrywoman/M
+countrywomen
+county/SM
+coup/ASDG
+coupe/MS
+Couperin/M
+couple/ACU
+coupled/CU
+coupler/C
+couplers
+coupler's
+couple's
+couples/CU
+couplet/SM
+coupling's/C
+coupling/SM
+coupon/SM
+coup's
+courage/MS
+courageously
+courageousness/MS
+courageous/U
+courages/E
+Courbet/M
+courgette/MS
+courier/GMDS
+course/EGSRDM
+courser's/E
+courser/SM
+course's/AF
+courses/FA
+coursework
+coursing/M
+Courtenay/M
+courteousness/EM
+courteousnesses
+courteous/PEY
+courtesan/MS
+courtesied
+courtesy/ESM
+courtesying
+court/GZMYRDS
+courthouse/MS
+courtier/SM
+courtliness/MS
+courtly/RTP
+Court/M
+Courtnay/M
+Courtney/M
+courtroom/MS
+courtship/SM
+courtyard/SM
+couscous/MS
+cousinly/U
+cousin/YMS
+Cousteau/M
+couture/SM
+couturier/SM
+covalent/Y
+covariance/SM
+covariant/S
+covariate/SN
+covary
+cove/DRSMZG
+covenanted/U
+covenanter/M
+covenant/SGRDM
+coven/SM
+Covent/M
+Coventry/MS
+coverable/E
+cover/AEGUDS
+coverage/MS
+coverall/DMS
+coverer/AME
+covering/MS
+coverlet/MS
+coversheet
+covers/M
+covertness/SM
+covert/YPS
+coveter/M
+coveting/Y
+covetousness/SM
+covetous/PY
+covet/SGRD
+covey/SM
+covington
+cowardice/MS
+cowardliness/MS
+cowardly/P
+Coward/M
+coward/MYS
+cowbell/MS
+cowbird/MS
+cowboy/MS
+cowcatcher/SM
+cowed/Y
+cowering/Y
+cower/RDGZ
+cowgirl/MS
+cowhand/S
+cowherd/SM
+cowhide/MGSD
+Cowley/M
+cowlick/MS
+cowling/M
+cowl/SGMD
+cowman/M
+cow/MDRSZG
+cowmen
+coworker/MS
+Cowper/M
+cowpoke/MS
+cowpony
+cowpox/MS
+cowpuncher/M
+cowpunch/RZ
+cowrie/SM
+cowshed/SM
+cowslip/MS
+coxcomb/MS
+Cox/M
+cox/MDSG
+coxswain/GSMD
+coy/CDSG
+coyer
+coyest
+coyly
+Coy/M
+coyness/MS
+coyote/SM
+coypu/SM
+cozenage/MS
+cozen/SGD
+cozily
+coziness/MS
+Cozmo/M
+Cozumel/M
+cozy/DSRTPG
+CPA
+cpd
+CPI
+cpl
+Cpl
+CPO
+CPR
+cps
+CPU/SM
+crabapple
+crabbedness/M
+crabbed/YP
+Crabbe/M
+crabber/MS
+crabbily
+crabbiness/S
+crabbing/M
+crabby/PRT
+crabgrass/S
+crablike
+crab/MS
+crackable/U
+crackdown/MS
+crackerjack/S
+cracker/M
+crackle/GJDS
+crackling/M
+crackly/RT
+crackpot/SM
+crackup/S
+crack/ZSBYRDG
+cradler/M
+cradle/SRDGM
+cradling/M
+craftily
+craftiness/SM
+Craft/M
+craft/MRDSG
+craftsman/M
+craftsmanship/SM
+craftsmen
+craftspeople
+craftspersons
+craftswoman
+craftswomen
+crafty/TRP
+Craggie/M
+cragginess/SM
+Craggy/M
+craggy/RTP
+crag/SM
+Craig/M
+Cramer/M
+crammed
+crammer/M
+cramming
+cramper/M
+cramp/MRDGS
+crampon/SM
+cram/S
+Cranach/M
+cranberry/SM
+Crandall/M
+crane/DSGM
+cranelike
+Crane/M
+Cranford/M
+cranial
+cranium/MS
+crankcase/MS
+crankily
+crankiness/MS
+crank/SGTRDM
+crankshaft/MS
+cranky/TRP
+Cranmer/M
+cranny/DSGM
+Cranston/M
+crape/SM
+crapped
+crappie/M
+crapping
+crappy/RST
+crapshooter/SM
+crap/SMDG!
+crasher/M
+crashing/Y
+crash/SRDGZ
+crassness/MS
+crass/TYRP
+crate/DSRGMZ
+crater/DMG
+Crater/M
+cravat/SM
+cravatted
+cravatting
+crave/DSRGJ
+cravenness/SM
+craven/SPYDG
+craver/M
+craving/M
+crawdad/S
+crawfish's
+Crawford/M
+crawler/M
+crawl/RDSGZ
+crawlspace/S
+crawlway
+crawly/TRS
+craw/SYM
+crayfish/GSDM
+Crayola/M
+crayon/GSDM
+Cray/SM
+craze/GMDS
+crazily
+craziness/MS
+crazy/SRTP
+creakily
+creakiness/SM
+creak/SDG
+creaky/PTR
+creamer/M
+creamery/MS
+creamily
+creaminess/SM
+cream/SMRDGZ
+creamy/TRP
+creased/CU
+crease/IDRSG
+crease's
+creases/C
+creasing/C
+created/U
+create/XKVNGADS
+creationism/MS
+creationist/MS
+Creation/M
+creation/MAK
+creativeness/SM
+creative/YP
+creativities
+creativity/K
+creativity's
+Creator/M
+creator/MS
+creatureliness/M
+creaturely/P
+creature/YMS
+crèche/SM
+credence/MS
+credent
+credential/SGMD
+credenza/SM
+credibility/IMS
+credible/I
+credibly/I
+creditability/M
+creditableness/M
+creditable/P
+creditably/E
+credited/U
+credit/EGBSD
+creditor/MS
+credit's
+creditworthiness
+credo/SM
+credulity/ISM
+credulous/IY
+credulousness/SM
+creedal
+creed/C
+creeds
+creed's
+creekside
+creek/SM
+Creek/SM
+creel/SMDG
+Cree/MDS
+creeper/M
+creepily
+creepiness/SM
+creep/SGZR
+creepy/PRST
+Creigh/M
+Creight/M
+Creighton/M
+cremate/XDSNG
+cremation/M
+crematoria
+crematorium/MS
+crematory/S
+creme/S
+crenelate/XGNSD
+crenelation/M
+Creole/MS
+creole/SM
+Creon/M
+creosote/MGDS
+crepe/DSGM
+crept
+crescendoed
+crescendoing
+crescendo/SCM
+crescent/MS
+cress/S
+crestfallenness/M
+crestfallen/PY
+cresting/M
+crestless
+crest/SGMD
+Crestview/M
+cretaceous
+Cretaceously/M
+Cretaceous/Y
+Cretan/S
+Crete/M
+cretinism/MS
+cretin/MS
+cretinous
+cretonne/SM
+crevasse/DSMG
+crevice/SM
+crew/DMGS
+crewel/SM
+crewelwork/SM
+crewman/M
+crewmen
+cribbage/SM
+cribbed
+cribber/SM
+cribbing/M
+crib/SM
+Crichton/M
+cricketer/M
+cricket/SMZRDG
+crick/GDSM
+Crick/M
+cried/C
+crier/CM
+cries/C
+Crimea/M
+Crimean
+crime/GMDS
+criminality/MS
+criminalization/C
+criminalize/GC
+criminal/SYM
+criminologist/SM
+criminology/MS
+crimper/M
+crimp/RDGS
+crimson/DMSG
+cringer/M
+cringe/SRDG
+crinkle/DSG
+crinkly/TRS
+Crin/M
+crinoline/SM
+cripple/GMZDRS
+crippler/M
+crippling/Y
+Crisco/M
+crises
+crisis/M
+Cris/M
+crisper/M
+crispiness/SM
+crispness/MS
+crisp/PGTYRDS
+crispy/RPT
+criss
+crisscross/GDS
+Crissie/M
+Crissy/M
+Cristabel/M
+Cristal/M
+Crista/M
+Cristen/M
+Cristian/M
+Cristiano/M
+Cristie/M
+Cristi/M
+Cristina/M
+Cristine/M
+Cristin/M
+Cristionna/M
+Cristobal/M
+Cristy/M
+criteria
+criterion/M
+criticality
+critically/U
+criticalness/M
+critical/YP
+criticism/MS
+criticized/U
+criticize/GSRDZ
+criticizer/M
+criticizes/A
+criticizingly/S
+criticizing/UY
+critic/MS
+critique/MGSD
+critter/SM
+Cr/M
+croaker/M
+croak/SRDGZ
+croaky/RT
+Croatia/M
+Croatian/S
+Croat/SM
+Croce/M
+crocheter/M
+crochet/RDSZJG
+crockery/SM
+Crockett/M
+Crockpot/M
+crock/SGRDM
+crocodile/MS
+crocus/SM
+Croesus/SM
+crofter/M
+croft/MRGZS
+croissant/MS
+Croix/M
+Cromwellian
+Cromwell/M
+crone/SM
+Cronin/M
+Cronkite/M
+Cronus/M
+crony/SM
+crookedness/SM
+crooked/TPRY
+Crookes/M
+crookneck/MS
+crook/SGDM
+crooner/M
+croon/SRDGZ
+cropland/MS
+crop/MS
+cropped
+cropper/SM
+cropping
+croquet/MDSG
+croquette/SM
+Crosby/M
+crosier/SM
+crossarm
+crossbarred
+crossbarring
+crossbar/SM
+crossbeam/MS
+crossbones
+crossbowman/M
+crossbowmen
+crossbow/SM
+crossbred/S
+crossbreed/SG
+crosscheck/SGD
+crosscurrent/SM
+crosscut/SM
+crosscutting
+crossed/UA
+crosses/UA
+crossfire/SM
+crosshatch/GDS
+crossing/M
+Cross/M
+crossness/MS
+crossover/MS
+crosspatch/MS
+crosspiece/SM
+crosspoint
+crossproduct/S
+crossroad/GSM
+crossroads/M
+crosstalk/M
+crosstown
+crosswalk/MS
+crossway/M
+crosswind/SM
+crosswise
+crossword/MS
+cross/ZTYSRDMPBJG
+crotchetiness/M
+crotchet/MS
+crotchety/P
+crotchless
+crotch/MDS
+crouch/DSG
+croupier/M
+croup/SMDG
+croupy/TZR
+croûton/MS
+crowbait
+crowbarred
+crowbarring
+crowbar/SM
+crowdedness/M
+crowded/P
+crowd/MRDSG
+crowfeet
+crowfoot/M
+crow/GDMS
+Crowley/M
+crowned/U
+crowner/M
+crown/RDMSJG
+crozier's
+CRT/S
+crucial/Y
+crucible/MS
+crucifiable
+crucifixion/MS
+Crucifixion/MS
+crucifix/SM
+cruciform/S
+crucify/NGDS
+crudded
+crudding
+cruddy/TR
+crudeness/MS
+crude/YSP
+crudités
+crudity/MS
+crud/STMR
+cruelness/MS
+cruelty/SM
+cruel/YRTSP
+cruet/MS
+cruft
+crufty
+Cruikshank/M
+cruise/GZSRD
+cruiser/M
+cruller/SM
+crumb/GSYDM
+crumble/DSJG
+crumbliness/MS
+crumbly/PTRS
+crumby/RT
+crumminess/S
+crummy/SRTP
+crump
+crumpet/SM
+crumple/DSG
+crunch/DSRGZ
+crunchiness/MS
+crunchy/TRP
+crupper/MS
+crusade/GDSRMZ
+crusader/M
+cruse/MS
+crushable/U
+crusher/M
+crushing/Y
+crushproof
+crush/SRDBGZ
+Crusoe/M
+crustacean/MS
+crustal
+crust/GMDS
+crustily
+crustiness/SM
+crusty/SRTP
+crutch/MDSG
+Crux/M
+crux/MS
+Cruz/M
+crybaby/MS
+cry/JGDRSZ
+cryogenic/S
+cryogenics/M
+cryostat/M
+cryosurgery/SM
+cryptanalysis/M
+cryptanalyst/M
+cryptanalytic
+crypt/CS
+cryptic
+cryptically
+cryptogram/MS
+cryptographer/MS
+cryptographic
+cryptographically
+cryptography/MS
+cryptologic
+cryptological
+cryptologist/M
+cryptology/M
+Cryptozoic/M
+crypt's
+crystalline/S
+crystallite/SM
+crystallization/AMS
+crystallized/UA
+crystallizes/A
+crystallize/SRDZG
+crystallizing/A
+crystallographer/MS
+crystallographic
+crystallography/M
+Crystal/M
+crystal/SM
+Crysta/M
+Crystie/M
+Cs
+C's
+cs/EA
+cs's
+CST
+ct
+CT
+Cthrine/M
+Ct/M
+ctn
+ctr
+Cuba/M
+Cuban/S
+cubbed
+cubbing
+cubbyhole/MS
+cuber/M
+cube/SM
+cubical/Y
+cubicle/SM
+cubic/YS
+cubism/SM
+cubist/MS
+cubit/MS
+cub/MDRSZG
+cuboid
+Cuchulain/M
+cuckold/GSDM
+cuckoldry/MS
+cuckoo/SGDM
+cucumber/MS
+cuddle/GSD
+cuddly/TRP
+cu/DG
+cudgel/GSJMD
+cud/MS
+cue/MS
+cuff/GSDM
+Cuisinart/M
+cuisine/MS
+Culbertson/M
+culinary
+Cullan/M
+cull/DRGS
+cullender's
+Cullen/M
+culler/M
+Culley/M
+Cullie/M
+Cullin/M
+Cull/MN
+Cully/M
+culminate/XSDGN
+culmination/M
+culotte/S
+culpability/MS
+culpable/I
+culpableness/M
+culpably
+culpa/SM
+culprit/SM
+cultism/SM
+cultist/SM
+cultivable
+cultivated/U
+cultivate/XBSDGN
+cultivation/M
+cultivator/SM
+cult/MS
+cultural/Y
+cultured/U
+culture/SDGM
+Culver/MS
+culvert/SM
+Cu/M
+cumber/DSG
+Cumberland/M
+cumbersomeness/MS
+cumbersome/YP
+cumbrous
+cumin/MS
+cummerbund/MS
+Cummings
+cumquat's
+cum/S
+cumulate/XVNGSD
+cumulation/M
+cumulative/Y
+cumuli
+cumulonimbi
+cumulonimbus/M
+cumulus/M
+Cunard/M
+cuneiform/S
+cunnilingus/SM
+Cunningham/M
+cunningness/M
+cunning/RYSPT
+cunt/SM!
+cupboard/SM
+cupcake/SM
+Cupertino/M
+cupful/SM
+cupidinously
+cupidity/MS
+Cupid/M
+cupid/S
+cup/MS
+cupola/MDGS
+cupped
+cupping/M
+cupric
+cuprous
+curability/MS
+curable/IP
+curableness/MI
+curably/I
+Curacao/M
+curacy/SM
+curare/MS
+curate/VGMSD
+curative/YS
+curatorial
+curator/KMS
+curbing/M
+curbside
+curb/SJDMG
+curbstone/MS
+Curcio/M
+curdle/SDG
+curd/SMDG
+cured/U
+cure/KBDRSGZ
+curer/MK
+curettage/SM
+curfew/SM
+curfs
+curiae
+curia/M
+cur/IBS
+Curie/M
+curie/SM
+curiosity/SM
+curio/SM
+curiousness/SM
+curious/TPRY
+Curitiba/M
+curium/MS
+curler/SM
+curlew/MS
+curlicue/MGDS
+curliness/SM
+curling/M
+curl/UDSG
+curlycue's
+curly/PRT
+curmudgeon/MYS
+Curran/M
+currant/SM
+curred/AFI
+currency's
+currency/SF
+current/FSY
+currently/A
+currentness/M
+Currey/M
+curricle/M
+curricula
+curricular
+curriculum/M
+Currie/M
+currier/M
+Currier/M
+curring/FAI
+Curr/M
+currycomb/DMGS
+Curry/MR
+curry/RSDMG
+cur's
+curs/ASDVG
+curse/A
+cursedness/M
+cursed/YRPT
+curse's
+cursive/EPYA
+cursiveness/EM
+cursives
+cursor/DMSG
+cursorily
+cursoriness/SM
+cursory/P
+curtailer/M
+curtail/LSGDR
+curtailment/SM
+curtain/GSMD
+Curtice/M
+Curtis/M
+Curt/M
+curtness/MS
+curtsey's
+curtsy/SDMG
+curt/TYRP
+curvaceousness/S
+curvaceous/YP
+curvature/MS
+curved/A
+curved's
+curve/DSGM
+curvilinearity/M
+curvilinear/Y
+curving/M
+curvy/RT
+cushion/SMDG
+Cushman/M
+cushy/TR
+cuspid/MS
+cuspidor/MS
+cusp/MS
+cussedness/M
+cussed/YP
+cuss/EGDSR
+cusses/F
+cussing/F
+cuss's
+custard/MS
+Custer/M
+custodial
+custodianship/MS
+custodian/SM
+custody/MS
+customarily
+customariness/M
+customary/PS
+customer/M
+customhouse/S
+customization/SM
+customize/ZGBSRD
+custom/SMRZ
+cutaneous/Y
+cutaway/SM
+cutback/SM
+cuteness/MS
+cute/SPY
+cutesy/RT
+cuticle/SM
+cutlass/MS
+cutler/SM
+cutlery/MS
+cutlet/SM
+cut/MRST
+cutoff/MS
+cutout/SM
+cutter/SM
+cutthroat/SM
+cutting/MYS
+cuttlebone/SM
+cuttlefish/MS
+cuttle/M
+cutup/MS
+cutworm/MS
+Cuvier/M
+Cuzco/M
+CV
+cw
+cwt
+Cyanamid/M
+cyanate/M
+cyanic
+cyanide/GMSD
+cyan/MS
+cyanogen/M
+Cybele/M
+cybernetic/S
+cybernetics/M
+cyberpunk/S
+cyberspace/S
+Cybill/M
+Cybil/M
+Cyb/M
+cyborg/S
+Cyclades
+cyclamen/MS
+cycle/ASDG
+cycler
+cycle's
+cycleway/S
+cyclic
+cyclical/SY
+cycling/M
+cyclist/MS
+cyclohexanol
+cycloidal
+cycloid/SM
+cyclometer/MS
+cyclone/SM
+cyclonic
+cyclopean
+cyclopedia/MS
+cyclopes
+Cyclopes
+cyclops
+Cyclops/M
+cyclotron/MS
+cyder/SM
+cygnet/MS
+Cygnus/M
+cylinder/GMDS
+cylindric
+cylindrical/Y
+Cy/M
+cymbalist/MS
+cymbal/SM
+Cymbre/M
+Cynde/M
+Cyndia/M
+Cyndie/M
+Cyndi/M
+Cyndy/M
+cynical/UY
+cynicism/MS
+cynic/MS
+cynosure/SM
+Cynthea/M
+Cynthia/M
+Cynthie/M
+Cynthy/M
+cypher/MGSD
+cypreses
+cypress/SM
+Cyprian
+Cypriot/SM
+Cyprus/M
+Cyrano/M
+Cyrille/M
+Cyrillic
+Cyrill/M
+Cyrillus/M
+Cyril/M
+Cyrus/M
+cystic
+cyst/MS
+cytochemistry/M
+cytochrome/M
+cytologist/MS
+cytology/MS
+cytolysis/M
+cytoplasmic
+cytoplasm/SM
+cytosine/MS
+cytotoxic
+CZ
+czarevitch/M
+czarina/SM
+czarism/M
+czarist/S
+czarship
+czar/SM
+Czech
+Czechoslovakia/M
+Czechoslovakian/S
+Czechoslovak/S
+Czechs
+Czerniak/M
+Czerny/M
+D
+DA
+dabbed
+dabber/MS
+dabbing
+dabbler/M
+dabble/RSDZG
+dab/S
+Dacca's
+dace/MS
+Dacey/M
+dacha/SM
+Dachau/M
+dachshund/SM
+Dacia/M
+Dacie/M
+Dacron/MS
+dactylic/S
+dactyl/MS
+Dacy/M
+Dadaism/M
+dadaism/S
+Dadaist/M
+dadaist/S
+Dada/M
+daddy/SM
+Dade/M
+dado/DMG
+dadoes
+dad/SM
+Daedalus/M
+Dael/M
+daemonic
+daemon/SM
+Daffie/M
+Daffi/M
+daffiness/S
+daffodil/MS
+Daffy/M
+daffy/PTR
+daftness/MS
+daft/TYRP
+DAG
+dagger/DMSG
+Dag/M
+Dagmar/M
+Dagny/M
+Daguerre/M
+daguerreotype/MGDS
+Dagwood/M
+Dahlia/M
+dahlia/MS
+Dahl/M
+Dahomey/M
+Daile/M
+dailiness/MS
+daily/PS
+Daimler/M
+daintily
+daintiness/MS
+dainty/TPRS
+daiquiri/SM
+dairying/M
+dairyland
+dairymaid/SM
+dairyman/M
+dairymen
+dairy/MJGS
+dairywoman/M
+dairywomen
+Daisey/M
+Daisie/M
+Daisi/M
+dais/SM
+Daisy/M
+daisy/SM
+Dakar/M
+Dakotan
+Dakota/SM
+Dale/M
+Dalenna/M
+dale/SMH
+daleth/M
+Daley/M
+Dalhousie/M
+Dalia/M
+Dalian/M
+Dalila/M
+Dali/SM
+Dallas/M
+dalliance/SM
+dallier/M
+Dalli/MS
+Dall/M
+Dallon/M
+dally/ZRSDG
+Dal/M
+Dalmatia/M
+dalmatian/S
+Dalmatian/SM
+Daloris/M
+Dalston/M
+Dalt/M
+Dalton/M
+Daly/M
+damageable
+damaged/U
+damage/MZGRSD
+damager/M
+damaging/Y
+Damara/M
+Damaris/M
+Damascus/M
+damask/DMGS
+dame/SM
+Dame/SMN
+Damian/M
+Damiano/M
+Damien/M
+Damion/M
+Damita/M
+dam/MDS
+dammed
+damming
+dammit/S
+damnably
+damnation/MS
+damnedest/MS
+damned/TR
+damn/GSBRD
+damning/Y
+Damocles/M
+Damon/M
+damped/U
+dampener/M
+dampen/RDZG
+damper/M
+dampness/MS
+damp/SGZTXYRDNP
+damselfly/MS
+damsel/MS
+damson/MS
+Danaë
+Dana/M
+Danbury/M
+dancelike
+dancer/M
+dance/SRDJGZ
+dandelion/MS
+dander/DMGS
+dandify/SDG
+dandily
+dandle/GSD
+dandruff/MS
+dandy/TRSM
+Danelaw/M
+Danella/M
+Danell/M
+Dane/SM
+Danette/M
+danger/DMG
+Dangerfield/M
+dangerousness/M
+dangerous/YP
+dangler/M
+dangle/ZGRSD
+dangling/Y
+dang/SGZRD
+Danial/M
+Dania/M
+Danica/M
+Danice/M
+Daniela/M
+Daniele/M
+Daniella/M
+Danielle/M
+Daniel/SM
+Danielson/M
+Danie/M
+Danika/M
+Danila/M
+Dani/M
+Danish
+danish/S
+Danita/M
+Danit/M
+dankness/MS
+dank/TPYR
+Danna/M
+Dannel/M
+Dannie/M
+Danni/M
+Dannye/M
+Danny/M
+danseuse/SM
+Dan/SM
+Dante/M
+Danton/M
+Danube/M
+Danubian
+Danville/M
+Danya/M
+Danyelle/M
+Danyette/M
+Danzig/M
+Daphene/M
+Daphna/M
+Daphne/M
+dapperness/M
+dapper/PSTRY
+dapple/SDG
+Dara/M
+Darbee/M
+Darbie/M
+Darb/M
+Darby/M
+Darcee/M
+Darcey/M
+Darcie/M
+Darci/M
+D'Arcy
+Darcy/M
+Darda/M
+Dardanelles
+daredevil/MS
+daredevilry/S
+Dareen/M
+Darelle/M
+Darell/M
+Dare/M
+Daren/M
+darer/M
+daresay
+dare/ZGDRSJ
+d'Arezzo
+Daria/M
+Darice/M
+Darill/M
+Dari/M
+daringness/M
+daring/PY
+Darin/M
+Dario/M
+Darius/M
+Darjeeling/M
+darkener/M
+darken/RDZG
+dark/GTXYRDNSP
+darkish
+darkly/TR
+darkness/MS
+darkroom/SM
+Darla/M
+Darleen/M
+Darlene/M
+Darline/M
+Darling/M
+darlingness/M
+Darlington/M
+darling/YMSP
+Darlleen/M
+Dar/MNH
+Darnall/M
+darned/TR
+Darnell/M
+darner/M
+darn/GRDZS
+darning/M
+Darn/M
+Daron/M
+DARPA/M
+Darrelle/M
+Darrell/M
+Darrel/M
+Darren/M
+Darrick/M
+Darrin/M
+Darrow/M
+Darryl/M
+Darsey/M
+Darsie/M
+d'art
+dartboard/SM
+darter/M
+Darth/M
+Dartmouth/M
+dart/MRDGZS
+Darvon/M
+Darwinian/S
+Darwinism/MS
+Darwinist/MS
+Darwin/M
+Darya/M
+Daryle/M
+Daryl/M
+Daryn/M
+Dasha/M
+dashboard/SM
+dasher/M
+dash/GZSRD
+dashiki/SM
+dashing/Y
+Dasie/M
+Dasi/M
+dastardliness/SM
+dastardly/P
+dastard/MYS
+Dasya/M
+DAT
+database/DSMG
+datafile
+datagram/MS
+data/M
+Datamation/M
+Datamedia/M
+dataset/S
+datedly
+datedness
+date/DRSMZGV
+dated/U
+dateless
+dateline/DSMG
+dater/M
+Datha/M
+dative/S
+Datsun/M
+datum/MS
+dauber/M
+daub/RDSGZ
+Daugherty/M
+daughter/MYS
+Daumier/M
+Daune/M
+daunt/DSG
+daunted/U
+daunting/Y
+dauntlessness/SM
+dauntless/PY
+dauphin/SM
+Davao/M
+Daveen/M
+Dave/M
+Daven/M
+Davenport/M
+davenport/MS
+Daveta/M
+Davey/M
+Davida/M
+Davidde/M
+Davide/M
+David/SM
+Davidson/M
+Davie/M
+Davina/M
+Davine/M
+Davinich/M
+Davin/M
+Davis/M
+Davita/M
+davit/SM
+Dav/MN
+Davon/M
+Davy/SM
+dawdler/M
+dawdle/ZGRSD
+Dawes/M
+Dawna/M
+dawn/GSDM
+Dawn/M
+Dawson/M
+daybed/S
+daybreak/SM
+daycare/S
+daydreamer/M
+daydream/RDMSZG
+Dayle/M
+daylight/GSDM
+Day/M
+Dayna/M
+daysack
+day/SM
+daytime/SM
+Dayton/M
+dazed/PY
+daze/DSG
+dazzler/M
+dazzle/ZGJRSD
+dazzling/Y
+db
+DB
+dbl
+dB/M
+DBMS
+DC
+DD
+Ddene/M
+DDS
+DDT
+DE
+deacon/DSMG
+deaconess/MS
+deadbeat/SM
+deadbolt/S
+deadener/M
+deadening/MY
+deaden/RDG
+deadhead/MS
+deadline/MGDS
+deadliness/SM
+deadlock/MGDS
+deadly/RPT
+deadness/M
+deadpanned
+deadpanner
+deadpanning
+deadpan/S
+dead/PTXYRN
+deadwood/SM
+deafening/MY
+deafen/JGD
+deafness/MS
+deaf/TXPYRN
+dealer/M
+dealership/MS
+dealing/M
+deallocator
+deal/RSGZJ
+dealt
+Deana/M
+dean/DMG
+Deandre/M
+Deane/M
+deanery/MS
+Dean/M
+Deanna/M
+Deanne/M
+Deann/M
+deanship/SM
+Dearborn/M
+dearness/MS
+dearth/M
+dearths
+dear/TYRHPS
+deary/MS
+deassign
+deathbed/MS
+deathblow/SM
+deathless/Y
+deathlike
+deathly/TR
+death/MY
+deaths
+deathtrap/SM
+deathward
+deathwatch/MS
+debacle/SM
+debarkation/SM
+debark/G
+debar/L
+debarment/SM
+debarring
+debaser/M
+debatable/U
+debate/BMZ
+debater/M
+debauchedness/M
+debauched/PY
+debauchee/SM
+debaucher/M
+debauchery/SM
+debauch/GDRS
+Debbie/M
+Debbi/M
+Debby/M
+Debee/M
+debenture/MS
+Debera/M
+debilitate/NGXSD
+debilitation/M
+debility/MS
+Debi/M
+debit/DG
+deb/MS
+Deb/MS
+debonairness/SM
+debonair/PY
+Deborah/M
+Debora/M
+Debor/M
+debouch/DSG
+Debra/M
+debrief/GJ
+debris/M
+debtor/SM
+debt/SM
+Debussy/M
+débutante/SM
+debut/MDG
+decade/MS
+decadency/S
+decadent/YS
+decaffeinate/DSG
+decaf/S
+decagon/MS
+Decalogue/M
+decal/SM
+decamp/L
+decampment/MS
+decapitate/GSD
+decapitator/SM
+decathlon/SM
+Decatur/M
+decay/GRD
+Decca/M
+Deccan/M
+decease/M
+decedent/MS
+deceitfulness/SM
+deceitful/PY
+deceit/SM
+deceived/U
+deceiver/M
+deceives/U
+deceive/ZGRSD
+deceivingly
+deceiving/U
+decelerate/XNGSD
+deceleration/M
+decelerator/SM
+December/SM
+decency/ISM
+decennial/SY
+decent/TIYR
+deception/SM
+deceptiveness/SM
+deceptive/YP
+decertify/N
+dechlorinate/N
+decibel/MS
+decidability/U
+decidable/U
+decidedness/M
+decided/PY
+decide/GRSDB
+deciduousness/M
+deciduous/YP
+decile/SM
+deciliter/SM
+decimal/SYM
+decimate/XNGDS
+decimation/M
+decimeter/MS
+decipherable/IU
+decipher/BRZG
+decipherer/M
+decisional
+decisioned
+decisioning
+decision/ISM
+decisive/IPY
+decisiveness/MSI
+deckchair
+decker/M
+Decker/M
+deck/GRDMSJ
+deckhand/S
+decking/M
+Deck/RM
+declamation/SM
+declamatory
+declarable
+declaration/MS
+declaration's/A
+declarative/SY
+declarator/MS
+declaratory
+declare/AGSD
+declared/U
+declarer/MS
+declension/SM
+declination/MS
+decliner/M
+decline/ZGRSD
+declivity/SM
+Dec/M
+DEC/M
+DECNET
+DECnet/M
+deco
+décolletage/S
+décolleté
+decolletes
+decolorising
+decomposability/M
+decomposable/IU
+decompose/B
+decompress/R
+decongestant/S
+deconstruction
+deconvolution
+decorated/AU
+decorate/NGVDSX
+decorates/A
+decorating/A
+decoration/ASM
+decorativeness/M
+decorative/YP
+decorator/SM
+decorousness/MS
+decorousness's/I
+decorous/PIY
+decor/S
+decorticate/GNDS
+decortication/M
+decorum/MS
+decoupage/MGSD
+decouple/G
+decoy/M
+decrease
+decreasing/Y
+decreeing
+decree/RSM
+decremental
+decrement/DMGS
+decrepit
+decrepitude/SM
+decriminalization/S
+decriminalize/DS
+decry/G
+decrypt/GD
+decryption
+DECstation/M
+DECsystem/M
+DECtape/M
+decustomised
+Dedekind/M
+Dede/M
+dedicate/AGDS
+dedicated/Y
+dedication/MS
+dedicative
+dedicator/MS
+dedicatory
+Dedie/M
+Dedra/M
+deduce/RSDG
+deducible
+deductibility/M
+deductible/S
+deduction/SM
+deductive/Y
+deduct/VG
+Deeanne/M
+Deeann/M
+deeded
+Deedee/M
+deeding
+deed/IS
+deed's
+deejay/MDSG
+Dee/M
+deem/ADGS
+deemphasis
+Deena/M
+deepen/DG
+deepish
+deepness/MS
+deep/PTXSYRN
+Deerdre/M
+Deere/M
+deerskin/MS
+deer/SM
+deerstalker/SM
+deerstalking/M
+Deeyn/M
+deface/LZ
+defacement/SM
+defaecate
+defalcate/NGXSD
+defalcation/M
+defamation/SM
+defamatory
+defamer/M
+defame/ZR
+defaulter/M
+default/ZR
+defeated/U
+defeater/M
+defeatism/SM
+defeatist/SM
+defeat/ZGD
+defecate/DSNGX
+defecation/M
+defection/SM
+defectiveness/MS
+defective/PYS
+defect/MDSVG
+defector/MS
+defendant/SM
+defended/U
+defenestrate/GSD
+defenselessness/MS
+defenseless/PY
+defenses/U
+defense/VGSDM
+defensibility/M
+defensible/I
+defensibly/I
+defensiveness/MS
+defensive/PSY
+deference/MS
+deferential/Y
+deferent/S
+deferrable
+deferral/SM
+deferred
+deferrer/MS
+deferring
+deffer
+defiance/MS
+defiant/Y
+defibrillator/M
+deficiency/MS
+deficient/SY
+deficit/MS
+defier/M
+defile/L
+defilement/MS
+definable/UI
+definably/I
+define/AGDRS
+defined/U
+definer/SM
+definite/IPY
+definiteness/IMS
+definitional
+definition/ASM
+definitiveness/M
+definitive/SYP
+defis
+deflate/XNGRSDB
+deflationary
+deflation/M
+deflect/DSGV
+deflected/U
+deflection/MS
+deflector/MS
+defocus
+defocussing
+Defoe/M
+defog
+defogger/S
+defoliant/SM
+defoliator/SM
+deformational
+deform/B
+deformed/U
+deformity/SM
+defrauder/M
+defraud/ZGDR
+defrayal/SM
+defroster/M
+defrost/RZ
+deftness/MS
+deft/TYRP
+defunct/S
+defying/Y
+defy/RDG
+def/Z
+deg
+Degas/M
+degassing
+degauss/GD
+degeneracy/MS
+degenerateness/M
+degenerate/PY
+degrade/B
+degradedness/M
+degraded/YP
+degrading/Y
+degrease
+degree/SM
+degum
+Dehlia/M
+dehumanize
+dehydrator/MS
+deicer/M
+deice/ZR
+deictic
+Deidre/M
+deification/M
+deify/SDXGN
+deign/DGS
+Deimos/M
+Deina/M
+Deirdre/MS
+deistic
+deist/SM
+Deity/M
+deity/SM
+deja
+deject/DSG
+dejectedness/M
+dejected/PY
+dejection/SM
+Dejesus/M
+DeKalb/M
+DeKastere/M
+Delacroix/M
+Delacruz/M
+Delainey/M
+Dela/M
+Delaney/M
+Delano/M
+Delawarean/SM
+Delaware/MS
+delay/D
+delayer/G
+Delbert/M
+Delcina/M
+Delcine/M
+delectableness/M
+delectable/SP
+delectably
+delectation/MS
+delegable
+Deleon/M
+deleted/U
+deleteriousness/M
+deleterious/PY
+delete/XBRSDNG
+deletion/M
+delfs
+Delft/M
+delft/MS
+delftware/S
+Delgado/M
+Delhi/M
+Delia/M
+deliberateness/SM
+deliberate/PVY
+deliberativeness/M
+deliberative/PY
+Delibes/M
+delicacy/IMS
+delicate/IYP
+delicatenesses
+delicateness/IM
+delicates
+delicatessen/MS
+deliciousness/MS
+delicious/YSP
+delicti
+delightedness/M
+delighted/YP
+delightfulness/M
+delightful/YP
+Delilah/M
+Delilahs
+Delila/M
+Delinda/M
+delineate/SDXVNG
+delineation/M
+delinquency/MS
+delinquent/SYM
+deliquesce/GSD
+deliquescent
+deliriousness/MS
+delirious/PY
+delirium/SM
+deli/SM
+Delius/M
+deliverables
+deliverable/U
+deliver/AGSD
+deliverance/SM
+delivered/U
+deliverer/SM
+delivery/AM
+deliverymen/M
+Della/M
+Dell/M
+dell/SM
+Dellwood/M
+Delly/M
+Delmar/M
+Delmarva/M
+Delmer/M
+Delmonico
+Delmore/M
+Delmor/M
+Del/MY
+Delora/M
+Delores/M
+Deloria/M
+Deloris/M
+Delphic
+Delphi/M
+Delphine/M
+Delphinia/M
+delphinium/SM
+Delphinus/M
+Delta/M
+delta/MS
+deltoid/SM
+deluder/M
+delude/RSDG
+deluding/Y
+deluge/SDG
+delusional
+delusion/SM
+delusiveness/M
+delusive/PY
+deluxe
+delve/GZSRD
+delver/M
+demagnify/N
+demagogic
+demagogue/GSDM
+demagoguery/SM
+demagogy/MS
+demander/M
+demand/GSRD
+demandingly
+demanding/U
+demarcate/SDNGX
+demarcation/M
+Demavend/M
+demean/GDS
+demeanor/SM
+dementedness/M
+demented/YP
+dementia/MS
+Demerol/M
+demesne/SM
+Demeter/M
+Demetra/M
+Demetre/M
+Demetria/M
+Demetri/MS
+Demetrius/M
+demigod/MS
+demijohn/MS
+demimondaine/SM
+demimonde/SM
+demineralization/SM
+Deming/M
+demise/DMG
+demit
+demitasse/MS
+demitted
+demitting
+Dem/MG
+democracy/MS
+Democratic
+democratically/U
+democratic/U
+democratization/MS
+democratize/DRSG
+democratizes/U
+Democrat/MS
+democrat/SM
+Democritus/M
+démodé
+demo/DMPG
+demographer/MS
+demographical/Y
+demographic/S
+demography/MS
+demolisher/M
+demolish/GSRD
+demolition/MS
+demonetization/S
+demoniacal/Y
+demoniac/S
+demonic
+demonology/M
+demon/SM
+demonstrable/I
+demonstrableness/M
+demonstrably/I
+demonstrate/XDSNGV
+demonstration/M
+demonstrativenesses
+demonstrativeness/UM
+demonstratives
+demonstrative/YUP
+demonstrator/MS
+demoralization/M
+demoralizer/M
+demoralizing/Y
+DeMorgan/M
+Demosthenes/M
+demote/DGX
+demotic/S
+Demott/M
+demount/B
+Dempsey/M
+demulcent/S
+demultiplex
+demureness/SM
+demure/YP
+demurral/MS
+demurred
+demurrer/MS
+demurring
+demur/RTS
+demythologization/M
+demythologize/R
+den
+Dena/M
+dendrite/MS
+Deneb/M
+Denebola/M
+Deneen/M
+Dene/M
+Deng/M
+dengue/MS
+deniable/U
+denial/SM
+Denice/M
+denier/M
+denigrate/VNGXSD
+denigration/M
+denim/SM
+Denise/M
+Deni/SM
+denizen/SMDG
+Den/M
+De/NM
+Denmark/M
+Denna/M
+denned
+Dennet/M
+Denney/M
+Dennie/M
+Denni/MS
+denning
+Dennison/M
+Denny/M
+denominate/V
+denominational/Y
+denote/B
+denouement/MS
+denounce/LZRSDG
+denouncement/SM
+denouncer/M
+dense/FR
+densely
+denseness/SM
+densitometer/MS
+densitometric
+densitometry/M
+density/MS
+dens/RT
+dental/YS
+dentifrice/SM
+dentine's
+dentin/SM
+dent/ISGD
+dentistry/MS
+dentist/SM
+dentition/MS
+dent's
+denture/IMS
+denuclearize/GSD
+denudation/SM
+denude/DG
+denuder/M
+denunciate/VNGSDX
+denunciation/M
+Denver/M
+denying/Y
+Deny/M
+Denys
+Denyse/M
+deny/SRDZG
+deodorant/SM
+deodorization/SM
+deodorize/GZSRD
+deodorizer/M
+Deon/M
+Deonne/M
+deoxyribonucleic
+depart/L
+departmentalization/SM
+departmentalize/DSG
+departmental/Y
+department/MS
+departure/MS
+dependability/MS
+dependableness/M
+dependable/P
+dependably
+Dependant/MS
+depend/B
+dependence/ISM
+dependency/MS
+dependent/IYS
+dependent's
+depicted/U
+depicter/M
+depiction/SM
+depict/RDSG
+depilatory/S
+deplete/VGNSDX
+depletion/M
+deplorableness/M
+deplorable/P
+deplorably
+deplorer/M
+deplore/SRDBG
+deploring/Y
+deployable
+deploy/AGDLS
+deployment/SAM
+depolarize
+deponent/S
+deportation/MS
+deportee/SM
+deport/LG
+deportment/MS
+depose
+deposit/ADGS
+depositary/M
+deposition/A
+depositor/SAM
+depository/MS
+depravedness/M
+depraved/PY
+deprave/GSRD
+depraver/M
+depravity/SM
+deprecate/XSDNG
+deprecating/Y
+deprecation/M
+deprecatory
+depreciable
+depreciate/XDSNGV
+depreciating/Y
+depreciation/M
+depreciative/Y
+depressant/S
+depressible
+depression/MS
+depressive/YS
+depressor/MS
+depress/V
+deprive/GSD
+depth/M
+depths
+Dept/M
+deputation/SM
+depute/SDG
+deputize/DSG
+deputy/MS
+dequeue
+derail/L
+dérailleur/MS
+derailment/MS
+derange/L
+derangement/MS
+Derbyshire/M
+derby/SM
+Derby/SM
+dereference/Z
+Derek/M
+dereliction/SM
+derelict/S
+Derick/M
+deride/D
+deriding/Y
+derision/SM
+derisiveness/MS
+derisive/PY
+derisory
+derivable/U
+derivate/XNV
+derivation/M
+derivativeness/M
+derivative/SPYM
+derive/B
+derived/U
+Derk/M
+Der/M
+dermal
+dermatitides
+dermatitis/MS
+dermatological
+dermatologist/MS
+dermatology/MS
+dermis/SM
+Dermot/M
+derogate/XDSNGV
+derogation/M
+derogatorily
+derogatory
+Derrek/M
+Derrick/M
+derrick/SMDG
+Derrida/M
+derrière/S
+Derrik/M
+Derril/M
+derringer/SM
+Derron/M
+Derry/M
+dervish/SM
+Derward/M
+Derwin/M
+Des
+desalinate/NGSDX
+desalination/M
+desalinization/MS
+desalinize/GSD
+desalt/G
+descant/M
+Descartes/M
+descendant/SM
+descended/FU
+descendent's
+descender/M
+descending/F
+descends/F
+descend/ZGSDR
+descent
+describable/I
+describe/ZB
+description/MS
+descriptiveness/MS
+descriptive/SYP
+descriptor/SM
+descry/SDG
+Desdemona/M
+desecrater/M
+desecrate/SRDGNX
+desecration/M
+deserter/M
+desertification
+desertion/MS
+desert/ZGMRDS
+deservedness/M
+deserved/YU
+deserve/J
+deserving/Y
+déshabillé's
+desiccant/S
+desiccate/XNGSD
+desiccation/M
+desiccator/SM
+desiderata
+desideratum/M
+designable
+design/ADGS
+designate/VNGSDX
+designational
+designation/M
+designator/SM
+designed/Y
+designer/M
+designing/U
+Desi/M
+desirabilia
+desirability's
+desirability/US
+desirableness/SM
+desirableness's/U
+desirable/UPS
+desirably/U
+Desirae/M
+desire/BR
+desired/U
+Desiree/M
+desirer/M
+Desiri/M
+desirousness/M
+desirous/PY
+desist/DSG
+desk/SM
+desktop/S
+Desmond/M
+Desmund/M
+desolateness/SM
+desolate/PXDRSYNG
+desolater/M
+desolating/Y
+desolation/M
+desorption/M
+despairer/M
+despairing/Y
+despair/SGDR
+desperadoes
+desperado/M
+desperateness/SM
+desperate/YNXP
+desperation/M
+despicable
+despicably
+despiser/M
+despise/SRDG
+despoil/L
+despoilment/MS
+despond
+despondence/S
+despondency/MS
+despondent/Y
+despotic
+despotically
+despotism/SM
+dessert/SM
+dessicate/DN
+d'Estaing
+destinate/NX
+destination/M
+destine/GSD
+destiny/MS
+destituteness/M
+destitute/NXP
+destitution/M
+destroy/BZGDRS
+destroyer/M
+destructibility/SMI
+destructible/I
+destruction/SM
+destructiveness/MS
+destructive/YP
+destructor/M
+destruct/VGSD
+desuetude/MS
+desultorily
+desultoriness/M
+desultory/P
+detachedness/M
+detached/YP
+detacher/M
+detach/LSRDBG
+detachment/SM
+detailedness/M
+detailed/YP
+detainee/S
+detainer/M
+detain/LGRDS
+detainment/MS
+d'etat
+detectability/U
+detectable/U
+detectably/U
+detect/DBSVG
+detected/U
+detection/SM
+detective/MS
+detector/MS
+détente
+detentes
+detention/SM
+detergency/M
+detergent/SM
+deteriorate/XDSNGV
+deterioration/M
+determent/SM
+determinability/M
+determinable/IP
+determinableness/IM
+determinacy/I
+determinant/MS
+determinateness/IM
+determinate/PYIN
+determination/IM
+determinativeness/M
+determinative/P
+determinedly
+determinedness/M
+determined/U
+determine/GASD
+determiner/SM
+determinism/MS
+determinism's/I
+deterministically
+deterministic/I
+deterred/U
+deterrence/SM
+deterrent/SMY
+deterring
+detersive/S
+deter/SL
+deters/V
+detestableness/M
+detestable/P
+detestably
+detestation/SM
+dethrone/L
+dethronement/SM
+detonable
+detonated/U
+detonate/XDSNGV
+detonation/M
+detonator/MS
+detour/G
+detoxification/M
+detoxify/NXGSD
+detox/SDG
+detract/GVD
+detractive/Y
+d'etre
+detribalize/GSD
+detrimental/SY
+detriment/SM
+detritus/M
+Detroit/M
+deuced/Y
+deuce/SDGM
+deus
+deuterium/MS
+deuteron/M
+Deuteronomy/M
+Deutsch/M
+Deva/M
+Devanagari/M
+Devan/M
+devastate/XVNGSD
+devastating/Y
+devastation/M
+devastator/SM
+develop/ALZSGDR
+developed/U
+developer/MA
+developmental/Y
+development/ASM
+deviance/MS
+deviancy/S
+deviant/YMS
+deviated/U
+deviate/XSDGN
+deviating/U
+deviation/M
+devilishness/MS
+devilish/PY
+devilment/SM
+devilry/MS
+devil/SLMDG
+deviltry/MS
+Devi/M
+Devina/M
+Devin/M
+Devinne/M
+deviousness/SM
+devious/YP
+devise/JR
+deviser/M
+Devland/M
+Devlen/M
+Devlin/M
+Dev/M
+devoice
+devolution/MS
+devolve/GSD
+Devondra/M
+Devonian
+Devon/M
+Devonna/M
+Devonne/M
+Devonshire/M
+Devora/M
+devoted/Y
+devotee/MS
+devote/XN
+devotional/YS
+devotion/M
+devourer/M
+devour/SRDZG
+devoutness/MS
+devout/PRYT
+Devy/M
+Dewain/M
+dewar
+Dewar/M
+Dewayne/M
+dewberry/MS
+dewclaw/SM
+dewdrop/MS
+Dewey/M
+Dewie/M
+dewiness/MS
+Dewitt/M
+dewlap/MS
+Dew/M
+dew/MDGS
+dewy/TPR
+Dexedrine/M
+dexes/I
+Dex/M
+dexter
+dexterity/MS
+Dexter/M
+dexterousness/MS
+dexterous/PY
+dextrose/SM
+DH
+Dhaka
+Dhaulagiri/M
+dhoti/SM
+dhow/MS
+DI
+diabase/M
+diabetes/M
+diabetic/S
+diabolic
+diabolicalness/M
+diabolical/YP
+diabolism/M
+diachronic/P
+diacritical/YS
+diacritic/MS
+diadem/GMDS
+diaereses
+diaeresis/M
+Diaghilev/M
+diagnometer/SM
+diagnosable/U
+diagnose/BGDS
+diagnosed/U
+diagnosis/M
+diagnostically
+diagnostician/SM
+diagnostic/MS
+diagnostics/M
+diagonalize/GDSB
+diagonal/YS
+diagrammable
+diagrammatic
+diagrammaticality
+diagrammatically
+diagrammed
+diagrammer/SM
+diagramming
+diagram/MS
+Diahann/M
+dialectal/Y
+dialectical/Y
+dialectic/MS
+dialect/MS
+dialed/A
+dialer/M
+dialing/M
+dial/MRDSGZJ
+dialogged
+dialogging
+dialog/MS
+dials/A
+dialysis/M
+dialyzed/U
+dialyzes
+diam
+diamagnetic
+diameter/MS
+diametric
+diametrical/Y
+diamondback/SM
+diamond/GSMD
+Diana/M
+Diandra/M
+Diane/M
+Dianemarie/M
+Dian/M
+Dianna/M
+Dianne/M
+Diann/M
+Diannne/M
+diapason/MS
+diaper/SGDM
+diaphanousness/M
+diaphanous/YP
+diaphragmatic
+diaphragm/SM
+diarist/SM
+Diarmid/M
+diarrheal
+diarrhea/MS
+diary/MS
+diaspora
+Diaspora/SM
+diastase/SM
+diastole/MS
+diastolic
+diathermy/SM
+diathesis/M
+diatomic
+diatom/SM
+diatonic
+diatribe/MS
+Diaz's
+dibble/SDMG
+dibs
+DiCaprio/M
+dice/GDRS
+dicer/M
+dicey
+dichloride/M
+dichotomization/M
+dichotomize/DSG
+dichotomous/PY
+dichotomy/SM
+dicier
+diciest
+dicing/M
+Dickensian/S
+dickens/M
+Dickens/M
+dicker/DG
+Dickerson/M
+dickey/SM
+dick/GZXRDMS!
+Dickie/M
+dickier
+dickiest
+Dickinson/M
+Dickson/M
+Dick/XM
+Dicky/M
+dicky's
+dicotyledonous
+dicotyledon/SM
+dicta/M
+Dictaphone/SM
+dictate/SDNGX
+dictation/M
+dictatorialness/M
+dictatorial/YP
+dictator/MS
+dictatorship/SM
+dictionary/SM
+diction/MS
+dictum/M
+didactically
+didactic/S
+didactics/M
+did/AU
+diddler/M
+diddle/ZGRSD
+Diderot/M
+Didi/M
+didn't
+didoes
+dido/M
+Dido/M
+didst
+die/DS
+Diefenbaker/M
+Diego/M
+dieing
+dielectric/MS
+diem
+Diem/M
+Diena/M
+Dierdre/M
+diereses
+dieresis/M
+diesel/GMDS
+Diesel's
+dies's
+dies/U
+dietary/S
+dieter/M
+Dieter/M
+dietetic/S
+dietetics/M
+diethylaminoethyl
+diethylstilbestrol/M
+dietitian/MS
+diet/RDGZSM
+Dietrich/M
+Dietz/M
+difference/DSGM
+difference's/I
+differences/I
+differentiability
+differentiable
+differential/SMY
+differentiated/U
+differentiate/XSDNG
+differentiation/M
+differentiator/SM
+differentness
+different/YI
+differ/SZGRD
+difficile
+difficult/Y
+difficulty/SM
+diffidence/MS
+diffident/Y
+diffract/GSD
+diffraction/SM
+diffractometer/SM
+diffuseness/MS
+diffuse/PRSDZYVXNG
+diffuser/M
+diffusible
+diffusional
+diffusion/M
+diffusiveness/M
+diffusive/YP
+diffusivity/M
+digerati
+digested/IU
+digester/M
+digestibility/MS
+digestible/I
+digestifs
+digestion/ISM
+digestive/YSP
+digest/RDVGS
+digger/MS
+digging/S
+digitalis/M
+digitalization/MS
+digitalized
+digitalizes
+digitalizing
+digital/SY
+digitization/M
+digitizer/M
+digitize/ZGDRS
+digit/SM
+dignified/U
+dignify/DSG
+dignitary/SM
+dignity/ISM
+digram
+digraph/M
+digraphs
+digress/GVDS
+digression/SM
+digressiveness/M
+digressive/PY
+dig/TS
+dihedral
+Dijkstra/M
+Dijon/M
+dike/DRSMG
+diker/M
+diktat/SM
+Dilan/M
+dilapidate/XGNSD
+dilapidation/M
+dilatation/SM
+dilated/YP
+dilate/XVNGSD
+dilation/M
+dilatoriness/M
+dilator/SM
+dilatory/P
+Dilbert/M
+dilemma/MS
+dilettante/MS
+dilettantish
+dilettantism/MS
+diligence/SM
+diligentness/M
+diligent/YP
+dilithium
+Dillard/M
+Dillie/M
+Dillinger/M
+dilling/R
+dillis
+Dill/M
+Dillon/M
+dill/SGMD
+dillydally/GSD
+Dilly/M
+dilly/SM
+dilogarithm
+diluent
+diluted/U
+diluteness/M
+dilute/RSDPXYVNG
+dilution/M
+Di/M
+DiMaggio/M
+dimensionality/M
+dimensional/Y
+dimensionless
+dimension/MDGS
+dimer/M
+dime/SM
+dimethylglyoxime
+dimethyl/M
+diminished/U
+diminish/SDGBJ
+diminuendo/SM
+diminution/SM
+diminutiveness/M
+diminutive/SYP
+Dimitri/M
+Dimitry/M
+dimity/MS
+dimmed/U
+dimmer/MS
+dimmest
+dimming
+dimness/SM
+dimorphism/M
+dimple/MGSD
+dimply/RT
+dim/RYPZS
+dimwit/MS
+dimwitted
+Dinah/M
+Dina/M
+dinar/SM
+diner/M
+dine/S
+dinette/MS
+dingbat/MS
+ding/GD
+dinghy/SM
+dingily
+dinginess/SM
+dingle/MS
+dingoes
+dingo/MS
+dingus/SM
+dingy/PRST
+dinky/RST
+din/MDRZGS
+dinned
+dinner/SM
+dinnertime/S
+dinnerware/MS
+Dinnie/M
+dinning
+Dinny/M
+Dino/M
+dinosaur/MS
+dint/SGMD
+diocesan/S
+diocese/SM
+Diocletian/M
+diode/SM
+Diogenes/M
+Dione/M
+Dionisio/M
+Dionis/M
+Dion/M
+Dionne/M
+Dionysian
+Dionysus/M
+Diophantine/M
+diopter/MS
+diorama/SM
+Dior/M
+dioxalate
+dioxide/MS
+dioxin/S
+diphtheria/SM
+diphthong/SM
+diplexers
+diploid/S
+diplomacy/SM
+diploma/SMDG
+diplomata
+diplomatically
+diplomatic/S
+diplomatics/M
+diplomatist/SM
+diplomat/MS
+dipodic
+dipody/M
+dipole/MS
+dipped
+Dipper/M
+dipper/SM
+dipping/S
+dippy/TR
+dip/S
+dipsomaniac/MS
+dipsomania/SM
+dipstick/MS
+dipterous
+diptych/M
+diptychs
+Dir
+Dirac/M
+directed/IUA
+directionality
+directional/SY
+direction/MIS
+directions/A
+directive/SM
+directivity/M
+directly/I
+directness/ISM
+director/AMS
+directorate/SM
+directorial
+directorship/SM
+directory/SM
+direct/RDYPTSVG
+directrix/MS
+directs/IA
+direful/Y
+direness/M
+dire/YTRP
+dirge/GSDM
+Dirichlet/M
+dirigible/S
+dirk/GDMS
+Dirk/M
+dirndl/MS
+dirtily
+dirtiness/SM
+dirt/MS
+dirty/GPRSDT
+Dis
+disable/LZGD
+disablement/MS
+disabler/M
+disabuse
+disadvantaged/P
+disagreeable/S
+disallow/D
+disambiguate/DSGNX
+disappointed/Y
+disappointing/Y
+disarming/Y
+disarrange/L
+disastrous/Y
+disband/L
+disbandment/SM
+disbar/L
+disbarment/MS
+disbarring
+disbelieving/Y
+disbursal/S
+disburse/GDRSL
+disbursement/MS
+disburser/M
+discerner/M
+discernibility
+discernible/I
+discernibly
+discerning/Y
+discernment/MS
+discern/SDRGL
+disc/GDM
+discharged/U
+disciple/DSMG
+discipleship/SM
+disciplinarian/SM
+disciplinary
+disciplined/U
+discipline/IDM
+discipliner/M
+disciplines
+disciplining
+disclosed/U
+discography/MS
+discolored/MP
+discoloreds/U
+discolor/G
+discombobulate/SDGNX
+discomfit/DG
+discomfiture/MS
+disco/MG
+discommode/DG
+disconcerting/Y
+disconnectedness/S
+disconnected/P
+disconnecter/M
+disconnect/R
+disconsolate/YN
+discordance/SM
+discordant/Y
+discord/G
+discorporate/D
+discotheque/MS
+discount/B
+discourage/LGDR
+discouragement/MS
+discouraging/Y
+discoverable/I
+discover/ADGS
+discovered/U
+discoverer/S
+discovery/SAM
+discreetly/I
+discreetness's/I
+discreetness/SM
+discreet/TRYP
+discrepancy/SM
+discrepant/Y
+discreteness/SM
+discrete/YPNX
+discretionary
+discretion/IMS
+discretization
+discretized
+discriminable
+discriminant/MS
+discriminated/U
+discriminate/SDVNGX
+discriminating/YI
+discrimination/MI
+discriminator/MS
+discriminatory
+discursiveness/S
+discussant/MS
+discussed/UA
+discusser/M
+discussion/SM
+discus/SM
+disdainfulness/M
+disdainful/YP
+disdain/MGSD
+disease/G
+disembowelment/SM
+disembowel/SLGD
+disengage/L
+disfigure/L
+disfigurement/MS
+disfranchise/L
+disfranchisement/MS
+disgorge
+disgrace/R
+disgracer/M
+disgruntle/DSLG
+disgruntlement/MS
+disguised/UY
+disguise/R
+disguiser/M
+disgust
+disgusted/Y
+disgustful/Y
+disgusting/Y
+dishabille/SM
+disharmonious
+dishcloth/M
+dishcloths
+dishevel/LDGS
+dishevelment/MS
+dish/GD
+dishonest
+dishonored/U
+dishpan/MS
+dishrag/SM
+dishtowel/SM
+dishwasher/MS
+dishwater/SM
+disillusion/LGD
+disillusionment/SM
+disinfectant/MS
+disinherit
+disinterestedness/SM
+disinterested/P
+disinvest/L
+disjoin
+disjointedness/S
+disjunctive/YS
+disjunct/VS
+disk/D
+diskette/S
+dislike/G
+dislodge/LG
+dislodgement/M
+dismalness/M
+dismal/PSTRY
+dismantle/L
+dismantlement/SM
+dismay/D
+dismayed/U
+dismaying/Y
+dis/MB
+dismember/LG
+dismemberment/MS
+dismissive/Y
+dismiss/RZ
+Disneyland/M
+Disney/M
+disoblige/G
+disorderedness/M
+disordered/YP
+disorderliness/M
+disorderly/P
+disorder/Y
+disorganize
+disorganized/U
+disparagement/MS
+disparager/M
+disparage/RSDLG
+disparaging/Y
+disparateness/M
+disparate/PSY
+dispatch/Z
+dispelled
+dispelling
+dispel/S
+dispensable/I
+dispensary/MS
+dispensate/NX
+dispensation/M
+dispenser/M
+dispense/ZGDRSB
+dispersal/MS
+dispersant/M
+dispersed/Y
+disperser/M
+disperse/XDRSZLNGV
+dispersible
+dispersion/M
+dispersiveness/M
+dispersive/PY
+dispirit/DSG
+displace/L
+display/AGDS
+displayed/U
+displeased/Y
+displease/G
+displeasure
+disport
+disposable/S
+disposal/SM
+dispose/IGSD
+dispositional
+disposition/ISM
+disproportional
+disproportionate/N
+disproportionation/M
+disprove/B
+disputable/I
+disputably/I
+disputant/SM
+disputation/SM
+disputatious/Y
+disputed/U
+disputer/M
+dispute/ZBGSRD
+disquieting/Y
+disquiet/M
+disquisition/SM
+Disraeli/M
+disregardful
+disrepair/M
+disreputableness/M
+disreputable/P
+disrepute/M
+disrespect
+disrupted/U
+disrupter/M
+disrupt/GVDRS
+disruption/MS
+disruptive/YP
+disruptor/M
+dissatisfy
+dissect/DG
+dissed
+dissembler/M
+dissemble/ZGRSD
+disseminate/XGNSD
+dissemination/M
+dissension/SM
+dissenter/M
+dissent/ZGSDR
+dissertation/SM
+disservice
+disses
+dissever
+dissidence/SM
+dissident/MS
+dissimilar/S
+dissing
+dissipatedly
+dissipatedness/M
+dissipated/U
+dissipater/M
+dissipate/XRSDVNG
+dissipation/M
+dissociable/I
+dissociate/DSXNGV
+dissociated/U
+dissociation/M
+dissociative/Y
+dissoluble/I
+dissoluteness/SM
+dissolute/PY
+dissolve/ASDG
+dissolved/U
+dissonance/SM
+dissonant/Y
+dissuade/GDRS
+dissuader/M
+dissuasive
+dist
+distaff/SM
+distal/Y
+distance/DSMG
+distantness/M
+distant/YP
+distaste
+distemper
+distend
+distension
+distention/SM
+distillate/XNMS
+distillation/M
+distillery/MS
+distincter
+distinctest
+distinction/MS
+distinctiveness/MS
+distinctive/YP
+distinct/IYVP
+distinctness/MSI
+distinguishable/I
+distinguishably/I
+distinguish/BDRSG
+distinguished/U
+distinguisher/M
+distort/BGDR
+distorted/U
+distorter/M
+distortion/MS
+distract/DG
+distractedness/M
+distracted/YP
+distracting/Y
+distrait
+distraught/Y
+distress
+distressful
+distressing/Y
+distribute/ADXSVNGB
+distributed/U
+distributer
+distributional
+distribution/AM
+distributiveness/M
+distributive/SPY
+distributivity
+distributorship/M
+distributor/SM
+district/GSAD
+district's
+distrust/G
+disturbance/SM
+disturbed/U
+disturber/M
+disturbing/Y
+disturb/ZGDRS
+disulfide/M
+disuse/M
+disyllable/M
+Dita/M
+ditcher/M
+ditch/MRSDG
+dither/RDZSG
+ditsy/TR
+ditto/DMGS
+ditty/SDGM
+Ditzel/M
+ditz/S
+diuresis/M
+diuretic/S
+diurnal/SY
+divalent/S
+diva/MS
+divan/SM
+dived/M
+divergence/SM
+divergent/Y
+diverge/SDG
+diver/M
+diverseness/MS
+diverse/XYNP
+diversification/M
+diversifier/M
+diversify/GSRDNX
+diversionary
+diversion/M
+diversity/SM
+divert/GSD
+diverticulitis/SM
+divertimento/M
+dive/S
+divestiture/MS
+divest/LDGS
+divestment/S
+dividable
+divide/AGDS
+divided/U
+dividend/MS
+divider/MS
+divination/SM
+diviner/M
+divine/RSDTZYG
+divinity/MS
+divisibility/IMS
+divisible/I
+divisional
+division/SM
+divisiveness/MS
+divisive/PY
+divisor/SM
+divorcée/MS
+divorce/GSDLM
+divorcement/MS
+divot/MS
+div/TZGJDRS
+divulge/GSD
+divvy/GSDM
+Dixiecrat/MS
+dixieland
+Dixieland/MS
+Dixie/M
+Dix/M
+Dixon/M
+dizzily
+dizziness/SM
+dizzying/Y
+dizzy/PGRSDT
+DJ
+Djakarta's
+djellabah's
+djellaba/S
+d/JGVX
+Djibouti/M
+DMD
+Dmitri/M
+DMZ
+DNA
+Dnepropetrovsk/M
+Dnepr's
+Dnieper's
+Dniester/M
+Dniren/M
+DOA
+doable
+DOB
+Dobbin/M
+dobbin/MS
+Doberman
+Dobro/M
+docent/SM
+docile/Y
+docility/MS
+docker/M
+docket/GSMD
+dock/GZSRDM
+dockland/MS
+dockside/M
+dockworker/S
+dockyard/SM
+doc/MS
+Doctor
+doctoral
+doctorate/SM
+doctor/GSDM
+Doctorow/M
+doctrinaire/S
+doctrinal/Y
+doctrine/SM
+docudrama/S
+documentary/MS
+documentation/MS
+documented/U
+document/RDMZGS
+DOD
+dodder/DGS
+dodecahedra
+dodecahedral
+dodecahedron/M
+Dode/M
+dodge/GZSRD
+Dodge/M
+dodgem/S
+dodger/M
+Dodgson/M
+Dodie/M
+Dodi/M
+Dodington/M
+Dodoma/M
+dodo/SM
+Dodson/M
+Dody/M
+DOE
+Doe/M
+doe/MS
+doer/MU
+does/AU
+doeskin/MS
+doesn't
+d'oeuvre
+doff/SGD
+dogcart/SM
+dogcatcher/MS
+dogeared
+Doge/M
+doge/SM
+dogfight/GMS
+dogfish/SM
+dogfought
+doggedness/SM
+dogged/PY
+doggerel/SM
+dogging
+doggone/RSDTG
+doggy/SRMT
+doghouse/SM
+dogie/SM
+doglegged
+doglegging
+dogleg/SM
+dogma/MS
+dogmatically/U
+dogmatic/S
+dogmatics/M
+dogmatism/SM
+dogmatist/SM
+dogsbody/M
+dog/SM
+dogtooth/M
+Dogtown/M
+dogtrot/MS
+dogtrotted
+dogtrotting
+dogwood/SM
+dogy's
+Doha/M
+doh's
+doily/SM
+doing/MU
+Dolby/SM
+doldrum/S
+doldrums/M
+doled/F
+dolefuller
+dolefullest
+dolefulness/MS
+doleful/PY
+Dole/M
+dole/MGDS
+doles/F
+Dolf/M
+doling/F
+dollar/SM
+Dolley/M
+Dollie/M
+Dolli/M
+Doll/M
+doll/MDGS
+dollop/GSMD
+Dolly/M
+dolly/SDMG
+dolmen/MS
+dolomite/SM
+dolomitic
+Dolores/M
+Dolorita/SM
+dolorous/Y
+dolor/SM
+dolphin/SM
+Dolph/M
+doltishness/SM
+doltish/YP
+dolt/MS
+domain/MS
+dome/DSMG
+Domenic/M
+Domenico/M
+Domeniga/M
+Domesday/M
+domestically
+domesticate/DSXGN
+domesticated/U
+domestication/M
+domesticity/MS
+domestic/S
+domicile/SDMG
+domiciliary
+dominance/MS
+dominant/YS
+dominate/VNGXSD
+domination/M
+dominator/M
+dominatrices
+dominatrix
+domineer/DSG
+domineeringness/M
+domineering/YP
+Dominga/M
+Domingo/M
+Dominguez/M
+Dominica/M
+Dominican/MS
+Dominick/M
+Dominic/M
+Dominik/M
+Domini/M
+dominion/MS
+Dominique/M
+dominoes
+domino/M
+Domitian/M
+Dom/M
+Donahue/M
+Donald/M
+Donaldson/M
+Donall/M
+Donal/M
+Donalt/M
+Dona/M
+dona/MS
+Donatello/M
+donate/XVGNSD
+donation/M
+donative/M
+Donaugh/M
+Donavon/M
+done/AUF
+Donella/M
+Donelle/M
+Donetsk/M
+Donetta/M
+dong/GDMS
+dongle/S
+Donia/M
+Donica/M
+Donielle/M
+Donizetti/M
+donkey/MS
+Donna/M
+Donnamarie/M
+donned
+Donnell/M
+Donnelly/M
+Donne/M
+Donner/M
+Donnie/M
+Donni/M
+donning
+donnishness/M
+donnish/YP
+Donn/RM
+donnybrook/MS
+Donny/M
+donor/MS
+Donovan/M
+don/S
+Don/SM
+don't
+donut/MS
+donutted
+donutting
+doodad/MS
+doodlebug/MS
+doodler/M
+doodle/SRDZG
+doohickey/MS
+Dooley/M
+Doolittle/M
+doom/MDGS
+doomsday/SM
+Doonesbury/M
+doorbell/SM
+door/GDMS
+doorhandles
+doorkeeper/M
+doorkeep/RZ
+doorknob/SM
+doorman/M
+doormat/SM
+doormen
+doornail/M
+doorplate/SM
+doors/I
+doorstep/MS
+doorstepped
+doorstepping
+doorstop/MS
+doorway/MS
+dooryard/SM
+dopamine
+dopant/M
+dopa/SM
+dope/DRSMZG
+doper/M
+dopey
+dopier
+dopiest
+dopiness/S
+Doppler/M
+Dorado/M
+Doralia/M
+Doralin/M
+Doralyn/M
+Doralynne/M
+Doralynn/M
+Dora/M
+Dorcas
+Dorchester/M
+Doreen/M
+Dorelia/M
+Dorella/M
+Dorelle/M
+Doré/M
+Dorena/M
+Dorene/M
+Doretta/M
+Dorette/M
+Dorey/M
+Doria/M
+Dorian/M
+Doric
+Dorice/M
+Dorie/M
+Dori/MS
+Dorine/M
+Dorisa/M
+Dorise/M
+Dorita/M
+dork/S
+dorky/RT
+dormancy/MS
+dormant/S
+dormer/M
+dormice
+dormitory/SM
+dorm/MRZS
+dormouse/M
+Dorolice/M
+Dorolisa/M
+Doro/M
+Dorotea/M
+Doroteya/M
+Dorothea/M
+Dorothee/M
+Dorothy/M
+Dorree/M
+Dorrie/M
+Dorri/SM
+Dorry/M
+dorsal/YS
+Dorsey/M
+Dorthea/M
+Dorthy/M
+Dortmund/M
+Dory/M
+dory/SM
+DOS
+dosage/SM
+dose/M
+dos/GDS
+Dosi/M
+dosimeter/MS
+dosimetry/M
+dossier/MS
+dost
+Dostoevsky/M
+DOT
+dotage/SM
+dotard/MS
+doter/M
+dote/S
+Doti/M
+doting/Y
+Dot/M
+dot/MDRSJZG
+Dotson/M
+dotted
+Dottie/M
+Dotti/M
+dottiness/M
+dotting
+Dotty/M
+dotty/PRT
+do/TZRHGJ
+Douala/M
+Douay/M
+Doubleday/M
+doubled/UA
+double/GPSRDZ
+doubleheader/MS
+doubleness/M
+doubler/M
+doubles/M
+doublespeak/S
+doublethink/M
+doublet/MS
+doubleton/M
+doubling/A
+doubloon/MS
+doubly
+doubt/AGSDMB
+doubted/U
+doubter/SM
+doubtfulness/SM
+doubtful/YP
+doubting/Y
+doubtlessness/M
+doubtless/YP
+douche/GSDM
+Dougherty/M
+dough/M
+doughs
+doughty/RT
+doughy/RT
+Dougie/M
+Douglas/M
+Douglass
+Doug/M
+Dougy/M
+dourness/MS
+Douro/M
+dour/TYRP
+douser/M
+douse/SRDG
+dovecote/MS
+Dover/M
+dove/RSM
+dovetail/GSDM
+dovish
+Dov/MR
+dowager/SM
+dowdily
+dowdiness/MS
+dowdy/TPSR
+dowel/GMDS
+dower/GDMS
+Dow/M
+downbeat/SM
+downcast/S
+downdraft/M
+downer/M
+Downey/M
+downfall/NMS
+downgrade/GSD
+down/GZSRD
+downheartedness/MS
+downhearted/PY
+downhill/RS
+downland
+download/DGS
+downpipes
+downplay/GDS
+downpour/MS
+downrange
+downrightness/M
+downright/YP
+downriver
+Downs
+downscale/GSD
+downside/S
+downsize/DSG
+downslope
+downspout/SM
+downstage/S
+downstairs
+downstate/SR
+downstream
+downswing/MS
+downtime/SM
+downtowner/M
+downtown/MRS
+downtrend/M
+downtrodden
+downturn/MS
+downwardness/M
+downward/YPS
+downwind
+downy/RT
+dowry/SM
+dowse/GZSRD
+dowser/M
+doxology/MS
+doyenne/SM
+doyen/SM
+Doyle/M
+Doy/M
+doze
+dozen/GHD
+dozenths
+dozer/M
+doz/XGNDRS
+dozy
+DP
+DPs
+dpt
+DPT
+drabbed
+drabber
+drabbest
+drabbing
+drabness/MS
+drab/YSP
+drachma/MS
+Draco/M
+draconian
+Draconian
+Dracula/M
+draft/AMDGS
+draftee/SM
+drafter/MS
+draftily
+draftiness/SM
+drafting/S
+draftsman/M
+draftsmanship/SM
+draftsmen
+draftsperson
+draftswoman
+draftswomen
+drafty/PTR
+dragged
+dragger/M
+dragging/Y
+draggy/RT
+drag/MS
+dragnet/MS
+dragonfly/SM
+dragonhead/M
+dragon/SM
+dragoon/DMGS
+drainage/MS
+drainboard/SM
+drained/U
+drainer/M
+drainpipe/MS
+drain/SZGRDM
+Drake/M
+drake/SM
+Dramamine/MS
+drama/SM
+dramatically/U
+dramatical/Y
+dramatic/S
+dramatics/M
+dramatist/MS
+dramatization/MS
+dramatized/U
+dramatizer/M
+dramatize/SRDZG
+dramaturgy/M
+Drambuie/M
+drammed
+dramming
+dram/MS
+drank
+Drano/M
+draper/M
+drapery/MS
+drape/SRDGZ
+drastic
+drastically
+drat/S
+dratted
+dratting
+Dravidian/M
+drawable
+draw/ASG
+drawback/MS
+drawbridge/SM
+drawer/SM
+drawing/SM
+drawler/M
+drawling/Y
+drawl/RDSG
+drawly
+drawn/AI
+drawnly
+drawnness
+drawstring/MS
+dray/SMDG
+dreadfulness/SM
+dreadful/YPS
+dreadlocks
+dreadnought/SM
+dread/SRDG
+dreamboat/SM
+dreamed/U
+dreamer/M
+dreamily
+dreaminess/SM
+dreaming/Y
+dreamland/SM
+dreamlessness/M
+dreamless/PY
+dreamlike
+dream/SMRDZG
+dreamworld/S
+dreamy/PTR
+drearily
+dreariness/SM
+drear/S
+dreary/TRSP
+Dreddy/M
+dredge/MZGSRD
+dredger/M
+Dredi/M
+dreg/MS
+Dreiser/M
+Dre/M
+drencher/M
+drench/GDRS
+Dresden/M
+dress/ADRSG
+dressage/MS
+dressed/U
+dresser/MS
+dresser's/A
+dresses/U
+dressiness/SM
+dressing/MS
+dressmaker/MS
+dressmaking/SM
+dressy/PTR
+drew/A
+Drew/M
+Drexel/M
+Dreyfus/M
+Dreyfuss
+dribble/DRSGZ
+dribbler/M
+driblet/SM
+drib/SM
+dried/U
+drier/M
+drifter/M
+drifting/Y
+drift/RDZSG
+driftwood/SM
+driller/M
+drilling/M
+drillmaster/SM
+drill/MRDZGS
+drinkable/S
+drink/BRSZG
+drinker/M
+dripped
+dripping/MS
+drippy/RT
+drip/SM
+driveler/M
+drivel/GZDRS
+driven/P
+driver/M
+drive/SRBGZJ
+driveway/MS
+drizzle/DSGM
+drizzling/Y
+drizzly/TR
+Dr/M
+drogue/MS
+drollery/SM
+drollness/MS
+droll/RDSPTG
+drolly
+dromedary/MS
+Drona/M
+drone/SRDGM
+droning/Y
+drool/GSRD
+droopiness/MS
+drooping/Y
+droop/SGD
+droopy/PRT
+drophead
+dropkick/S
+droplet/SM
+dropout/MS
+dropped
+dropper/SM
+dropping/MS
+dropsical
+drop/SM
+dropsy/MS
+drosophila/M
+dross/SM
+drought/SM
+drover/M
+drove/SRDGZ
+drowner/M
+drown/RDSJG
+drowse/SDG
+drowsily
+drowsiness/SM
+drowsy/PTR
+drubbed
+drubber/MS
+drubbing/SM
+drub/S
+Drucie/M
+Drucill/M
+Druci/M
+Drucy/M
+drudge/MGSRD
+drudger/M
+drudgery/SM
+drudging/Y
+Drud/M
+drugged
+druggie/SRT
+drugging
+druggist/SM
+Drugi/M
+drugless
+drug/SM
+drugstore/SM
+druidism/MS
+druid/MS
+Druid's
+Dru/M
+drumbeat/SGM
+drumhead/M
+drumlin/MS
+drummed
+drummer/SM
+drumming
+Drummond/M
+drum/SM
+drumstick/SM
+drunkard/SM
+drunkenness/SM
+drunken/YP
+drunk/SRNYMT
+drupe/SM
+Drury/M
+Drusie/M
+Drusilla/M
+Drusi/M
+Drusy/M
+druthers
+dryad/MS
+Dryden/M
+dryer/MS
+dry/GYDRSTZ
+dryish
+dryness/SM
+drys
+drystone
+drywall/GSD
+D's
+d's/A
+Dshubba/M
+DST
+DTP
+dualism/MS
+dualistic
+dualist/M
+duality/MS
+dual/YS
+Duane/M
+Dubai/M
+dubbed
+dubber/S
+dubbing/M
+dubbin/MS
+Dubcek/M
+Dubhe/M
+dubiety/MS
+dubiousness/SM
+dubious/YP
+Dublin/M
+Dubrovnik/M
+dub/S
+Dubuque/M
+ducal
+ducat/SM
+duce/CAIKF
+duce's
+Duchamp/M
+duchess/MS
+duchy/SM
+duckbill/SM
+ducker/M
+duck/GSRDM
+duckling/SM
+duckpins
+duckpond
+duckweed/MS
+ducky/RSMT
+ducted/CFI
+ductile/I
+ductility/SM
+ducting/F
+duct/KMSF
+ductless
+duct's/A
+ducts/CI
+ductwork/M
+dudder
+dude/MS
+dudgeon/SM
+dud/GMDS
+Dudley/M
+Dud/M
+duelist/MS
+duel/MRDGZSJ
+dueness/M
+duenna/MS
+due/PMS
+duet/MS
+duetted
+duetting
+duffel/M
+duffer/M
+duff/GZSRDM
+Duffie/M
+Duff/M
+Duffy/M
+Dugald/M
+dugout/SM
+dug/S
+duh
+DUI
+Duisburg/M
+dukedom/SM
+duke/DSMG
+Duke/M
+Dukey/M
+Dukie/M
+Duky/M
+Dulcea/M
+Dulce/M
+dulcet/SY
+Dulcia/M
+Dulciana/M
+Dulcie/M
+dulcify
+Dulci/M
+dulcimer/MS
+Dulcinea/M
+Dulcine/M
+Dulcy/M
+dullard/MS
+Dulles/M
+dullness/MS
+dull/SRDPGT
+dully
+dulness's
+Dulsea/M
+Duluth/M
+duly/U
+Du/M
+Dumas
+dumbbell/MS
+dumbfound/GSDR
+dumbness/MS
+Dumbo/M
+dumb/PSGTYRD
+dumbstruck
+dumbwaiter/SM
+dumdum/MS
+dummy/SDMG
+Dumont/M
+dumper/UM
+dumpiness/MS
+dumpling/MS
+dump/SGZRD
+dumpster/S
+Dumpster/S
+Dumpty/M
+dumpy/PRST
+Dunant/M
+Dunbar/M
+Duncan/M
+dunce/MS
+Dunc/M
+Dundee/M
+dunderhead/MS
+Dunedin/M
+dune/SM
+dungaree/SM
+dungeon/GSMD
+dunghill/MS
+dung/SGDM
+Dunham/M
+dunker/M
+dunk/GSRD
+Dunkirk/M
+Dunlap/M
+Dun/M
+dunned
+Dunne/M
+dunner
+dunnest
+dunning
+Dunn/M
+dunno/M
+dun/S
+Dunstan/M
+duodecimal/S
+duodena
+duodenal
+duodenum/M
+duologue/M
+duo/MS
+duopolist
+duopoly/M
+dupe/NGDRSMZ
+duper/M
+dupion/M
+duple
+duplexer/M
+duplex/MSRDG
+duplicability/M
+duplicable
+duplicate/ADSGNX
+duplication/AM
+duplicative
+duplicator/MS
+duplicitous
+duplicity/SM
+Dupont/MS
+DuPont/MS
+durability/MS
+durableness/M
+durable/PS
+durably
+Duracell/M
+durance/SM
+Durand/M
+Duran/M
+Durante/M
+Durant/M
+durational
+duration/MS
+Durban/M
+Dürer/M
+duress/SM
+Durex/M
+Durham/MS
+during
+Durkee/M
+Durkheim/M
+Dur/M
+Durocher/M
+durst
+durum/MS
+Durward/M
+Duse/M
+Dusenberg/M
+Dusenbury/M
+Dushanbe/M
+dusk/GDMS
+duskiness/MS
+dusky/RPT
+Düsseldorf
+dustbin/MS
+dustcart/M
+dustcover
+duster/M
+dustily
+dustiness/MS
+dusting/M
+Dustin/M
+dustless
+dustman/M
+dustmen
+dust/MRDGZS
+dustpan/SM
+Dusty/M
+dusty/RPT
+Dutch/M
+Dutchman/M
+Dutchmen
+dutch/MS
+Dutchwoman
+Dutchwomen
+duteous/Y
+dutiable
+dutifulness/S
+dutiful/UPY
+duty/SM
+Duvalier/M
+duvet/SM
+duxes
+Dvina/M
+Dvorák/M
+Dwain/M
+dwarfish
+dwarfism/MS
+dwarf/MTGSPRD
+Dwayne/M
+dweeb/S
+dweller/SM
+dwell/IGS
+dwelling/MS
+dwelt/I
+DWI
+Dwight/M
+dwindle/GSD
+dyadic
+dyad/MS
+Dyana/M
+Dyane/M
+Dyan/M
+Dyanna/M
+Dyanne/M
+Dyann/M
+dybbukim
+dybbuk/SM
+dyed/A
+dyeing/M
+dye/JDRSMZG
+dyer/M
+Dyer/M
+dyes/A
+dyestuff/SM
+dying/UA
+Dyke/M
+dyke's
+Dylan/M
+Dy/M
+Dynah/M
+Dyna/M
+dynamical/Y
+dynamic/S
+dynamics/M
+dynamism/SM
+dynamiter/M
+dynamite/RSDZMG
+dynamized
+dynamo/MS
+dynastic
+dynasty/MS
+dyne/M
+dysentery/SM
+dysfunctional
+dysfunction/MS
+dyslectic/S
+dyslexia/MS
+dyslexically
+dyslexic/S
+dyspepsia/MS
+dyspeptic/S
+dysprosium/MS
+dystopia/M
+dystrophy/M
+dz
+Dzerzhinsky/M
+E
+ea
+each
+Eachelle/M
+Eada/M
+Eadie/M
+Eadith/M
+Eadmund/M
+eagerness/MS
+eager/TSPRYM
+eagle/SDGM
+eaglet/SM
+Eakins/M
+Ealasaid/M
+Eal/M
+Eamon/M
+earache/SM
+eardrum/SM
+earful/MS
+ear/GSMDYH
+Earhart/M
+earing/M
+earldom/MS
+Earle/M
+Earlene/M
+Earlie/M
+Earline/M
+earliness/SM
+Earl/M
+earl/MS
+earlobe/S
+Early/M
+early/PRST
+earmark/DGSJ
+earmuff/SM
+earned/U
+earner/M
+Earnestine/M
+Earnest/M
+earnestness/MS
+earnest/PYS
+earn/GRDZTSJ
+earning/M
+earphone/MS
+earpieces
+earplug/MS
+Earp/M
+earring/MS
+earshot/MS
+earsplitting
+Eartha/M
+earthbound
+earthed/U
+earthenware/MS
+earthiness/SM
+earthliness/M
+earthling/MS
+earthly/TPR
+earth/MDNYG
+earthmen
+earthmover/M
+earthmoving
+earthquake/SDGM
+earthshaking
+earths/U
+earthward/S
+earthwork/MS
+earthworm/MS
+earthy/PTR
+Earvin/M
+earwax/MS
+earwigged
+earwigging
+earwig/MS
+eased/E
+ease/LDRSMG
+easel/MS
+easement/MS
+easer/M
+ease's/EU
+eases/UE
+easies
+easily/U
+easiness/MSU
+easing/M
+eastbound
+easterly/S
+Easter/M
+easterner/M
+Easterner/M
+easternmost
+Eastern/RZ
+eastern/ZR
+easter/Y
+east/GSMR
+Easthampton/M
+easting/M
+Eastland/M
+Eastman/M
+eastward/S
+Eastwick/M
+Eastwood/M
+East/ZSMR
+easygoingness/M
+easygoing/P
+easy/PUTR
+eatables
+eatable/U
+eaten/U
+eater/M
+eatery/MS
+eating/M
+Eaton/M
+eat/SJZGNRB
+eavesdropped
+eavesdropper/MS
+eavesdropping
+eavesdrop/S
+eave/SM
+Eba/M
+Ebba/M
+ebb/DSG
+EBCDIC
+Ebeneezer/M
+Ebeneser/M
+Ebenezer/M
+Eben/M
+Eberhard/M
+Eberto/M
+Eb/MN
+Ebola
+Ebonee/M
+Ebonics
+Ebony/M
+ebony/SM
+Ebro/M
+ebullience/SM
+ebullient/Y
+ebullition/SM
+EC
+eccentrically
+eccentricity/SM
+eccentric/MS
+eccl
+Eccles
+Ecclesiastes/M
+ecclesiastical/Y
+ecclesiastic/MS
+ECG
+echelon/SGDM
+echinoderm/SM
+echo/DMG
+echoed/A
+echoes/A
+echoic
+echolocation/SM
+éclair/MS
+éclat/MS
+eclectically
+eclecticism/MS
+eclectic/S
+eclipse/MGSD
+ecliptic/MS
+eclogue/MS
+ecocide/SM
+ecol
+Ecole/M
+ecologic
+ecological/Y
+ecologist/MS
+ecology/MS
+Eco/M
+econ
+Econometrica/M
+econometricians
+econometric/S
+econometrics/M
+economical/YU
+economic/S
+economics/M
+economist/MS
+economization
+economize/GZSRD
+economizer/M
+economizing/U
+economy/MS
+ecosystem/MS
+ecru/SM
+ecstasy/MS
+Ecstasy/S
+ecstatically
+ecstatic/S
+ectoplasm/M
+Ecuadoran/S
+Ecuadorean/S
+Ecuadorian/S
+Ecuador/M
+ecumenical/Y
+ecumenicism/SM
+ecumenicist/MS
+ecumenic/MS
+ecumenics/M
+ecumenism/SM
+ecumenist/MS
+eczema/MS
+Eda/M
+Edam/SM
+Edan/M
+ed/ASC
+Edda/M
+Eddie/M
+Eddi/M
+Edd/M
+Eddy/M
+eddy/SDMG
+Edee/M
+Edeline/M
+edelweiss/MS
+Ede/M
+edema/SM
+edematous
+eden
+Eden/M
+Edgard/M
+Edgardo/M
+Edgar/M
+edge/DRSMZGJ
+edgeless
+edger/M
+Edgerton/M
+Edgewater/M
+edgewise
+Edgewood/M
+edgily
+edginess/MS
+edging/M
+edgy/TRP
+edibility/MS
+edibleness/SM
+edible/SP
+edict/SM
+Edie/M
+edification/M
+edifice/SM
+edifier/M
+edifying/U
+edify/ZNXGRSD
+Edik/M
+Edi/MH
+Edinburgh/M
+Edin/M
+Edison/M
+editable
+Edita/M
+edited/IU
+Editha/M
+Edithe/M
+Edith/M
+edition/SM
+editorialist/M
+editorialize/DRSG
+editorializer/M
+editorial/YS
+editor/MS
+editorship/MS
+edit/SADG
+Ediva/M
+Edlin/M
+Edmond/M
+Edmon/M
+Edmonton/M
+Edmund/M
+Edna/M
+Edouard/M
+EDP
+eds
+Edsel/M
+Edsger/M
+EDT
+Eduard/M
+Eduardo/M
+educability/SM
+educable/S
+educated/YP
+educate/XASDGN
+educationalists
+educational/Y
+education/AM
+educationists
+educative
+educator/MS
+educ/DBG
+educe/S
+eduction/M
+Eduino/M
+edutainment/S
+Edvard/M
+Edwardian
+Edwardo/M
+Edward/SM
+Edwina/M
+Edwin/M
+Ed/XMN
+Edy/M
+Edythe/M
+Edyth/M
+EEC
+EEG
+eek/S
+eelgrass/M
+eel/MS
+e'en
+EEO
+EEOC
+e'er
+eerie/RT
+eerily
+eeriness/MS
+Eeyore/M
+effaceable/I
+effacement/MS
+effacer/M
+efface/SRDLG
+effectiveness/ISM
+effectives
+effective/YIP
+effector/MS
+effect/SMDGV
+effectual/IYP
+effectualness/MI
+effectuate/SDGN
+effectuation/M
+effeminacy/MS
+effeminate/SY
+effendi/MS
+efferent/SY
+effervesce/GSD
+effervescence/SM
+effervescent/Y
+effeteness/SM
+effete/YP
+efficacious/IPY
+efficaciousness/MI
+efficacy/IMS
+efficiency/MIS
+efficient/ISY
+Effie/M
+effigy/SM
+effloresce
+efflorescence/SM
+efflorescent
+effluence/SM
+effluent/MS
+effluvia
+effluvium/M
+effluxion
+efflux/M
+effortlessness/SM
+effortless/PY
+effort/MS
+effrontery/MS
+effulgence/SM
+effulgent
+effuse/XSDVGN
+effusion/M
+effusiveness/MS
+effusive/YP
+EFL
+e/FMDS
+Efrain/M
+Efrem/M
+Efren/M
+EFT
+egad
+egalitarian/I
+egalitarianism/MS
+egalitarians
+EGA/M
+Egan/M
+Egbert/M
+Egerton/M
+eggbeater/SM
+eggcup/MS
+egger/M
+egg/GMDRS
+eggheaded/P
+egghead/SDM
+eggnog/SM
+eggplant/MS
+eggshell/SM
+egis's
+eglantine/MS
+egocentrically
+egocentricity/SM
+egocentric/S
+egoism/SM
+egoistic
+egoistical/Y
+egoist/SM
+egomaniac/MS
+egomania/MS
+Egon/M
+Egor/M
+ego/SM
+egotism/SM
+egotistic
+egotistical/Y
+egotist/MS
+egregiousness/MS
+egregious/PY
+egress/SDMG
+egret/SM
+Egyptian/S
+Egypt/M
+Egyptology/M
+eh
+Ehrlich/M
+Eichmann/M
+eiderdown/SM
+eider/SM
+eidetic
+Eiffel/M
+eigenfunction/MS
+eigenstate/S
+eigenvalue/SM
+eigenvector/MS
+eighteen/MHS
+eighteenths
+eightfold
+eighth/MS
+eighths
+eightieths
+eightpence
+eight/SM
+eighty/SHM
+Eileen/M
+Eilis/M
+Eimile/M
+Einsteinian
+einsteinium/MS
+Einstein/SM
+Eire/M
+Eirena/M
+Eisenhower/M
+Eisenstein/M
+Eisner/M
+eisteddfod/M
+either
+ejaculate/SDXNG
+ejaculation/M
+ejaculatory
+ejecta
+ejection/SM
+ejector/SM
+eject/VGSD
+Ekaterina/M
+Ekberg/M
+eked/A
+eke/DSG
+EKG
+Ekstrom/M
+Ektachrome/M
+elaborateness/SM
+elaborate/SDYPVNGX
+elaboration/M
+elaborators
+Elaina/M
+Elaine/M
+Elana/M
+eland/SM
+Elane/M
+élan/M
+Elanor/M
+elans
+elapse/SDG
+el/AS
+elastically/I
+elasticated
+elasticity/SM
+elasticize/GDS
+elastic/S
+elastodynamics
+elastomer/M
+elatedness/M
+elated/PY
+elater/M
+elate/SRDXGN
+elation/M
+Elayne/M
+Elba/MS
+Elbe/M
+Elberta/M
+Elbertina/M
+Elbertine/M
+Elbert/M
+elbow/GDMS
+elbowroom/SM
+Elbrus/M
+Elden/M
+elderberry/MS
+elderflower
+elderliness/M
+elderly/PS
+elder/SY
+eldest
+Eldin/M
+Eldon/M
+Eldorado's
+Eldredge/M
+Eldridge/M
+Eleanora/M
+Eleanore/M
+Eleanor/M
+Eleazar/M
+electable/U
+elect/ASGD
+elected/U
+electioneer/GSD
+election/SAM
+electiveness/M
+elective/SPY
+electoral/Y
+electorate/SM
+elector/SM
+Electra/M
+electress/M
+electricalness/M
+electrical/PY
+electrician/SM
+electricity/SM
+electric/S
+electrification/M
+electrifier/M
+electrify/ZXGNDRS
+electrocardiogram/MS
+electrocardiograph/M
+electrocardiographs
+electrocardiography/MS
+electrochemical/Y
+electrocute/GNXSD
+electrocution/M
+electrode/SM
+electrodynamics/M
+electrodynamic/YS
+electroencephalogram/SM
+electroencephalographic
+electroencephalograph/M
+electroencephalographs
+electroencephalography/MS
+electrologist/MS
+electroluminescent
+electrolysis/M
+electrolyte/SM
+electrolytic
+electrolytically
+electrolyze/SDG
+electro/M
+electromagnetic
+electromagnetically
+electromagnetism/SM
+electromagnet/SM
+electromechanical
+electromechanics
+electromotive
+electromyograph
+electromyographic
+electromyographically
+electromyography/M
+electronegative
+electronically
+electronic/S
+electronics/M
+electron/MS
+electrophoresis/M
+electrophorus/M
+electroplate/DSG
+electroscope/MS
+electroscopic
+electroshock/GDMS
+electrostatic/S
+electrostatics/M
+electrotherapist/M
+electrotype/GSDZM
+electroweak
+eleemosynary
+Eleen/M
+elegance/ISM
+elegant/YI
+elegiacal
+elegiac/S
+elegy/SM
+elem
+elemental/YS
+elementarily
+elementariness/M
+elementary/P
+element/MS
+Elena/M
+Elene/M
+Eleni/M
+Elenore/M
+Eleonora/M
+Eleonore/M
+elephantiases
+elephantiasis/M
+elephantine
+elephant/SM
+elevated/S
+elevate/XDSNG
+elevation/M
+elevator/SM
+eleven/HM
+elevens/S
+elevenths
+elev/NX
+Elfie/M
+elfin/S
+elfish
+elf/M
+Elfreda/M
+Elfrida/M
+Elfrieda/M
+Elga/M
+Elgar/M
+Elianora/M
+Elianore/M
+Elia/SM
+Elicia/M
+elicitation/MS
+elicit/GSD
+elide/GSD
+Elie/M
+eligibility/ISM
+eligible/SI
+Elihu/M
+Elijah/M
+Eli/M
+eliminate/XSDYVGN
+elimination/M
+eliminator/SM
+Elinore/M
+Elinor/M
+Eliot/M
+Elisabeth/M
+Elisabet/M
+Elisabetta/M
+Elisa/M
+Elise/M
+Eliseo/M
+Elisha/M
+elision/SM
+Elissa/M
+Elita/M
+elite/MPS
+elitism/SM
+elitist/SM
+elixir/MS
+Elizabethan/S
+Elizabeth/M
+Elizabet/M
+Eliza/M
+Elka/M
+Elke/M
+Elkhart/M
+elk/MS
+Elladine/M
+Ella/M
+Ellary/M
+Elle/M
+Ellene/M
+Ellen/M
+Ellerey/M
+Ellery/M
+Ellesmere/M
+Ellette/M
+Ellie/M
+Ellington/M
+Elliot/M
+Elliott/M
+ellipse/MS
+ellipsis/M
+ellipsoidal
+ellipsoid/MS
+ellipsometer/MS
+ellipsometry
+elliptic
+elliptical/YS
+ellipticity/M
+Elli/SM
+Ellison/M
+Ellissa/M
+ell/MS
+Ellswerth/M
+Ellsworth/M
+Ellwood/M
+Elly/M
+Ellyn/M
+Ellynn/M
+Elma/M
+Elmer/M
+Elmhurst/M
+Elmira/M
+elm/MRS
+Elmo/M
+Elmore/M
+Elmsford/M
+El/MY
+Elna/MH
+Elnar/M
+Elnath/M
+Elnora/M
+Elnore/M
+elocutionary
+elocutionist/MS
+elocution/SM
+elodea/S
+Elohim/M
+Eloisa/M
+Eloise/M
+elongate/NGXSD
+elongation/M
+Elonore/M
+elopement/MS
+eloper/M
+elope/SRDLG
+eloquence/SM
+eloquent/IY
+Elora/M
+Eloy/M
+Elroy/M
+els
+Elsa/M
+Elsbeth/M
+else/M
+Else/M
+Elset/M
+elsewhere
+Elsey/M
+Elsie/M
+Elsi/M
+Elsinore/M
+Elspeth/M
+Elston/M
+Elsworth/M
+Elsy/M
+Eltanin/M
+Elton/M
+eluate/SM
+elucidate/SDVNGX
+elucidation/M
+elude/GSD
+elusiveness/SM
+elusive/YP
+elute/DGN
+elution/M
+Elva/M
+elven
+Elvera/M
+elver/SM
+elves/M
+Elvia/M
+Elvina/M
+Elvin/M
+Elvira/M
+elvish
+Elvis/M
+Elvyn/M
+Elwin/M
+Elwira/M
+Elwood/M
+Elwyn/M
+Ely/M
+Elyn/M
+Elysée/M
+Elysees
+Elyse/M
+Elysha/M
+Elysia/M
+elysian
+Elysian
+Elysium/SM
+Elyssa/M
+EM
+emaciate/NGXDS
+emaciation/M
+emacs/M
+Emacs/M
+email/SMDG
+Emalee/M
+Emalia/M
+Ema/M
+emanate/XSDVNG
+emanation/M
+emancipate/DSXGN
+emancipation/M
+emancipator/MS
+Emanuele/M
+Emanuel/M
+emasculate/GNDSX
+emasculation/M
+embalmer/M
+embalm/ZGRDS
+embank/GLDS
+embankment/MS
+embarcadero
+embargoes
+embargo/GMD
+embark/ADESG
+embarkation/EMS
+embarrassedly
+embarrassed/U
+embarrassing/Y
+embarrassment/MS
+embarrass/SDLG
+embassy/MS
+embattle/DSG
+embeddable
+embedded
+embedder
+embedding/MS
+embed/S
+embellished/U
+embellisher/M
+embellish/LGRSD
+embellishment/MS
+ember/MS
+embezzle/LZGDRS
+embezzlement/MS
+embezzler/M
+embitter/LGDS
+embitterment/SM
+emblazon/DLGS
+emblazonment/SM
+emblematic
+emblem/GSMD
+embodier/M
+embodiment/ESM
+embody/ESDGA
+embolden/DSG
+embolism/SM
+embosom
+embosser/M
+emboss/ZGRSD
+embouchure/SM
+embower/GSD
+embraceable
+embracer/M
+embrace/RSDVG
+embracing/Y
+embrasure/MS
+embrittle
+embrocation/SM
+embroiderer/M
+embroider/SGZDR
+embroidery/MS
+embroilment/MS
+embroil/SLDG
+embryologist/SM
+embryology/MS
+embryonic
+embryo/SM
+emceeing
+emcee/SDM
+Emelda/M
+Emelen/M
+Emelia/M
+Emelina/M
+Emeline/M
+Emelita/M
+Emelyne/M
+emendation/MS
+emend/SRDGB
+emerald/SM
+Emera/M
+emerge/ADSG
+emergence/MAS
+emergency/SM
+emergent/S
+emerita
+emeritae
+emeriti
+emeritus
+Emerson/M
+Emery/M
+emery/MGSD
+emetic/S
+emf/S
+emigrant/MS
+emigrate/SDXNG
+emigration/M
+émigré/S
+Emilee/M
+Emile/M
+Emilia/M
+Emilie/M
+Emili/M
+Emiline/M
+Emilio/M
+Emil/M
+Emily/M
+eminence/MS
+Eminence/MS
+eminent/Y
+emirate/SM
+emir/SM
+emissary/SM
+emission/AMS
+emissivity/MS
+emit/S
+emittance/M
+emitted
+emitter/SM
+emitting
+Emlen/M
+Emlyn/M
+Emlynne/M
+Emlynn/M
+em/M
+Em/M
+Emmalee/M
+Emmaline/M
+Emmalyn/M
+Emmalynne/M
+Emmalynn/M
+Emma/M
+Emmanuel/M
+Emmeline/M
+Emmerich/M
+Emmery/M
+Emmet/M
+Emmett/M
+Emmey/M
+Emmie/M
+Emmi/M
+Emmit/M
+Emmott/M
+Emmye/M
+Emmy/SM
+Emogene/M
+emollient/S
+emolument/SM
+Emory/M
+emote/SDVGNX
+emotionalism/MS
+emotionality/M
+emotionalize/GDS
+emotional/UY
+emotionless
+emotion/M
+emotive/Y
+empaneled
+empaneling
+empath
+empathetic
+empathetical/Y
+empathic
+empathize/SDG
+empathy/MS
+emperor/MS
+emphases
+emphasis/M
+emphasize/ZGCRSDA
+emphatically/U
+emphatic/U
+emphysema/SM
+emphysematous
+empire/MS
+empirical/Y
+empiricism/SM
+empiricist/SM
+empiric/SM
+emplace/L
+emplacement/MS
+employability/UM
+employable/US
+employed/U
+employee/SM
+employer/SM
+employ/LAGDS
+employment/UMAS
+emporium/MS
+empower/GLSD
+empowerment/MS
+empress/MS
+emptier/M
+emptily
+emptiness/SM
+empty/GRSDPT
+empyrean/SM
+ems/C
+EMT
+emulate/SDVGNX
+emulation/M
+emulative/Y
+emulator/MS
+emulsification/M
+emulsifier/M
+emulsify/NZSRDXG
+emulsion/SM
+emu/SM
+Emylee/M
+Emyle/M
+enabler/M
+enable/SRDZG
+enactment/ASM
+enact/SGALD
+enameler/M
+enamelware/SM
+enamel/ZGJMDRS
+enamor/DSG
+en/BM
+enc
+encamp/LSDG
+encampment/MS
+encapsulate/SDGNX
+encapsulation/M
+encase/GSDL
+encasement/SM
+encephalitic
+encephalitides
+encephalitis/M
+encephalographic
+encephalopathy/M
+enchain/SGD
+enchanter/MS
+enchant/ESLDG
+enchanting/Y
+enchantment/MSE
+enchantress/MS
+enchilada/SM
+encipherer/M
+encipher/SRDG
+encircle/GLDS
+encirclement/SM
+encl
+enclave/MGDS
+enclosed/U
+enclose/GDS
+enclosure/SM
+encoder/M
+encode/ZJGSRD
+encomium/SM
+encompass/GDS
+encore/GSD
+encounter/GSD
+encouragement/SM
+encourager/M
+encourage/SRDGL
+encouraging/Y
+encroacher/M
+encroach/LGRSD
+encroachment/MS
+encrustation/MS
+encrust/DSG
+encrypt/DGS
+encrypted/U
+encryption/SM
+encumbered/U
+encumber/SEDG
+encumbrancer/M
+encumbrance/SRM
+ency
+encyclical/SM
+encyclopaedia's
+encyclopedia/SM
+encyclopedic
+encyst/GSLD
+encystment/MS
+endanger/DGSL
+endangerment/SM
+endear/GSLD
+endearing/Y
+endearment/MS
+endeavored/U
+endeavorer/M
+endeavor/GZSMRD
+endemically
+endemicity
+endemic/S
+ender/M
+endgame/M
+Endicott/M
+ending/M
+endive/SM
+endlessness/MS
+endless/PY
+endmost
+endnote/MS
+endocrine/S
+endocrinologist/SM
+endocrinology/SM
+endogamous
+endogamy/M
+endogenous/Y
+endomorphism/SM
+endorse/DRSZGL
+endorsement/MS
+endorser/M
+endoscope/MS
+endoscopic
+endoscopy/SM
+endosperm/M
+endothelial
+endothermic
+endow/GSDL
+endowment/SM
+endpoint/MS
+endue/SDG
+endungeoned
+endurable/U
+endurably/U
+endurance/SM
+endure/BSDG
+enduringness/M
+enduring/YP
+endways
+Endymion/M
+end/ZGVMDRSJ
+ENE
+enema/SM
+enemy/SM
+energetically
+energetic/S
+energetics/M
+energized/U
+energizer/M
+energize/ZGDRS
+energy/MS
+enervate/XNGVDS
+enervation/M
+enfeeble/GLDS
+enfeeblement/SM
+enfilade/MGDS
+enfold/SGD
+enforceability/M
+enforceable/U
+enforced/Y
+enforce/LDRSZG
+enforcement/SM
+enforcer/M
+enforcible/U
+enfranchise/ELDRSG
+enfranchisement/EMS
+enfranchiser/M
+engage/ADSGE
+engagement/SEM
+engaging/Y
+Engelbert/M
+Engel/MS
+engender/DGS
+engineer/GSMDJ
+engineering/MY
+engine/MGSD
+England/M
+england/ZR
+Englebert/M
+Englewood/M
+English/GDRSM
+Englishman/M
+Englishmen
+Englishwoman/M
+Englishwomen
+Eng/M
+engorge/LGDS
+engorgement/MS
+Engracia/M
+engram/MS
+engraver/M
+engrave/ZGDRSJ
+engraving/M
+engrossed/Y
+engrosser/M
+engross/GLDRS
+engrossing/Y
+engrossment/SM
+engulf/GDSL
+engulfment/SM
+enhanceable
+enhance/LZGDRS
+enhancement/MS
+enhancer/M
+enharmonic
+Enid/M
+Enif/M
+enigma/MS
+enigmatic
+enigmatically
+Eniwetok/M
+enjambement's
+enjambment/MS
+enjoinder
+enjoin/GSD
+enjoyability
+enjoyableness/M
+enjoyable/P
+enjoyably
+enjoy/GBDSL
+enjoyment/SM
+Enkidu/M
+enlargeable
+enlarge/LDRSZG
+enlargement/MS
+enlarger/M
+enlightened/U
+enlighten/GDSL
+enlightening/U
+enlightenment/SM
+enlistee/MS
+enlister/M
+enlistment/SAM
+enlist/SAGDL
+enliven/LDGS
+enlivenment/SM
+enmesh/DSLG
+enmeshment/SM
+enmity/MS
+Ennis/M
+ennoble/LDRSG
+ennoblement/SM
+ennobler/M
+ennui/SM
+Enoch/M
+enormity/SM
+enormousness/MS
+enormous/YP
+Enos
+enough
+enoughs
+enplane/DSG
+enqueue/DS
+enquirer/S
+enquiringly
+enrage/SDG
+enrapture/GSD
+Enrica/M
+enricher/M
+Enrichetta/M
+enrich/LDSRG
+enrichment/SM
+Enrico/M
+Enrika/M
+Enrique/M
+Enriqueta/M
+enrobed
+enrollee/SM
+enroll/LGSD
+enrollment/SM
+ens
+ensconce/DSG
+ensemble/MS
+enshrine/DSLG
+enshrinement/SM
+enshroud/DGS
+ensign/SM
+ensilage/DSMG
+enslavement/MS
+enslaver/M
+enslave/ZGLDSR
+ensnare/GLDS
+ensnarement/SM
+Ensolite/M
+ensue/SDG
+ensurer/M
+ensure/SRDZG
+entailer/M
+entailment/MS
+entail/SDRLG
+entangle/EGDRSL
+entanglement/ESM
+entangler/EM
+entente/MS
+enter/ASDG
+entered/U
+enterer/M
+enteritides
+enteritis/SM
+enterprise/GMSR
+Enterprise/M
+enterpriser/M
+enterprising/Y
+entertainer/M
+entertaining/Y
+entertainment/SM
+entertain/SGZRDL
+enthalpy/SM
+enthrall/GDSL
+enthrallment/SM
+enthrone/GDSL
+enthronement/MS
+enthuse/DSG
+enthusiasm/SM
+enthusiastically/U
+enthusiastic/U
+enthusiast/MS
+enticement/SM
+entice/SRDJLZG
+enticing/Y
+entire/SY
+entirety/SM
+entitle/GLDS
+entitlement/MS
+entity/SM
+entomb/GDSL
+entombment/MS
+entomological
+entomologist/S
+entomology/MS
+entourage/SM
+entr'acte/S
+entrails
+entrainer/M
+entrain/GSLDR
+entrancement/MS
+entrance/MGDSL
+entranceway/M
+entrancing/Y
+entrant/MS
+entrapment/SM
+entrapped
+entrapping
+entrap/SL
+entreating/Y
+entreat/SGD
+entreaty/SM
+entrée/S
+entrench/LSDG
+entrenchment/MS
+entrepreneurial
+entrepreneur/MS
+entrepreneurship/M
+entropic
+entropy/MS
+entrust/DSG
+entry/ASM
+entryway/SM
+entwine/DSG
+enumerable
+enumerate/AN
+enumerated/U
+enumerates
+enumerating
+enumeration's/A
+enumeration/SM
+enumerative
+enumerator/SM
+enunciable
+enunciated/U
+enunciate/XGNSD
+enunciation/M
+enureses
+enuresis/M
+envelope/MS
+enveloper/M
+envelopment/MS
+envelop/ZGLSDR
+envenom/SDG
+enviableness/M
+enviable/U
+enviably
+envied/U
+envier/M
+enviousness/SM
+envious/PY
+environ/LGSD
+environmentalism/SM
+environmentalist/SM
+environmental/Y
+environment/MS
+envisage/DSG
+envision/GSD
+envoy/SM
+envying/Y
+envy/SRDMG
+enzymatic
+enzymatically
+enzyme/SM
+enzymology/M
+Eocene
+EOE
+eohippus/M
+Eolanda/M
+Eolande/M
+eolian
+eon/SM
+EPA
+epaulet/SM
+épée/S
+ephedrine/MS
+ephemeral/SY
+ephemera/MS
+ephemerids
+ephemeris/M
+Ephesian/S
+Ephesians/M
+Ephesus/M
+Ephraim/M
+Ephrayim/M
+Ephrem/M
+epically
+epicenter/SM
+epic/SM
+Epictetus/M
+Epicurean
+epicurean/S
+epicure/SM
+Epicurus/M
+epicycle/MS
+epicyclic
+epicyclical/Y
+epicycloid/M
+epidemically
+epidemic/MS
+epidemiological/Y
+epidemiologist/MS
+epidemiology/MS
+epidermal
+epidermic
+epidermis/MS
+epidural
+epigenetic
+epiglottis/SM
+epigrammatic
+epigram/MS
+epigrapher/M
+epigraph/RM
+epigraphs
+epigraphy/MS
+epilepsy/SM
+epileptic/S
+epilogue/SDMG
+Epimethius/M
+epinephrine/SM
+epiphany/SM
+Epiphany/SM
+epiphenomena
+episcopacy/MS
+episcopalian
+Episcopalian/S
+Episcopal/S
+episcopal/Y
+episcopate/MS
+episode/SM
+episodic
+episodically
+epistemic
+epistemological/Y
+epistemology/M
+epistle/MRS
+Epistle/SM
+epistolary/S
+epistolatory
+epitaph/GMD
+epitaphs
+epitaxial/Y
+epitaxy/M
+epithelial
+epithelium/MS
+epithet/MS
+epitome/MS
+epitomized/U
+epitomizer/M
+epitomize/SRDZG
+epochal/Y
+epoch/M
+epochs
+eponymous
+epoxy/GSD
+epsilon/SM
+Epsom/M
+Epstein/M
+equability/MS
+equableness/M
+equable/P
+equably
+equaling
+equality/ISM
+equalization/MS
+equalize/DRSGJZ
+equalized/U
+equalizer/M
+equalizes/U
+equal/USDY
+equanimity/MS
+equate/NGXBSD
+equation/M
+equatorial/S
+equator/SM
+equerry/MS
+equestrianism/SM
+equestrian/S
+equestrienne/SM
+equiangular
+equidistant/Y
+equilateral/S
+equilibrate/GNSD
+equilibration/M
+equilibrium/MSE
+equine/S
+equinoctial/S
+equinox/MS
+equipage/SM
+equipartition/M
+equip/AS
+equipment/SM
+equipoise/GMSD
+equipotent
+equipped/AU
+equipping/A
+equiproportional
+equiproportionality
+equiproportionate
+equitable/I
+equitableness/M
+equitably/I
+equitation/SM
+equity/IMS
+equiv
+equivalence/DSMG
+equivalent/SY
+equivocalness/MS
+equivocal/UY
+equivocate/NGSDX
+equivocation/M
+equivocator/SM
+Equuleus/M
+ER
+ERA
+eradicable/I
+eradicate/SDXVGN
+eradication/M
+eradicator/SM
+era/MS
+Eran/M
+erase/N
+eraser/M
+erasion/M
+Erasmus/M
+eras/SRDBGZ
+Erastus/M
+erasure/MS
+Erato/M
+Eratosthenes/M
+erbium/SM
+Erda/M
+ere
+Erebus/M
+erect/GPSRDY
+erectile
+erection/SM
+erectness/MS
+erector/SM
+Erek/M
+erelong
+eremite/MS
+Erena/M
+ergo
+ergodic
+ergodicity/M
+ergonomically
+ergonomics/M
+ergonomic/U
+ergophobia
+ergosterol/SM
+ergot/SM
+erg/SM
+Erhard/M
+Erhart/M
+Erica/M
+Ericha/M
+Erich/M
+Ericka/M
+Erick/M
+Erickson/M
+Eric/M
+Ericson's
+Ericsson's
+Eridanus/M
+Erie/SM
+Erika/M
+Erik/M
+Erikson/M
+Erina/M
+Erin/M
+Erinna/M
+Erinn/M
+eris
+Eris
+Eritrea/M
+Erlang/M
+Erlenmeyer/M
+Erl/M
+Er/M
+Erma/M
+Ermanno/M
+Ermengarde/M
+Ermentrude/M
+Ermina/M
+ermine/MSD
+Erminia/M
+Erminie/M
+Ermin/M
+Ernaline/M
+Erna/M
+Ernesta/M
+Ernestine/M
+Ernest/M
+Ernesto/M
+Ernestus/M
+Ernie/M
+Ernst/M
+Erny/M
+erode/SDG
+erodible
+erogenous
+erosible
+erosional
+erosion/SM
+erosiveness/M
+erosive/P
+Eros/SM
+erotically
+erotica/M
+eroticism/MS
+erotic/S
+errancy/MS
+errand/MS
+errantry/M
+errant/YS
+errata/SM
+erratically
+erratic/S
+erratum/MS
+err/DGS
+Errick/M
+erring/UY
+Erroll/M
+Errol/M
+erroneousness/M
+erroneous/YP
+error/SM
+ersatz/S
+Erse/M
+Erskine/M
+erst
+erstwhile
+Ertha/M
+eructation/MS
+eruct/DGS
+erudite/NYX
+erudition/M
+erupt/DSVG
+eruption/SM
+eruptive/SY
+Ervin/M
+ErvIn/M
+Erv/M
+Erwin/M
+Eryn/M
+erysipelas/SM
+erythrocyte/SM
+es
+e's
+Es
+E's
+Esau/M
+escadrille/M
+escalate/CDSXGN
+escalation/MC
+escalator/SM
+escallop/SGDM
+escapable/I
+escapade/SM
+escapee/MS
+escape/LGSRDB
+escapement/MS
+escaper/M
+escapism/SM
+escapist/S
+escapology
+escarole/MS
+escarpment/MS
+eschatology/M
+Escherichia/M
+Escher/M
+eschew/SGD
+Escondido/M
+escort/SGMD
+escritoire/SM
+escrow/DMGS
+escudo/MS
+escutcheon/SM
+Esdras/M
+ESE
+Eskimo/SM
+ESL
+Esma/M
+Esmaria/M
+Esmark/M
+Esme/M
+Esmeralda/M
+esophageal
+esophagi
+esophagus/M
+esoteric
+esoterica
+esoterically
+esp
+ESP
+espadrille/MS
+Espagnol/M
+espalier/SMDG
+especial/Y
+Esperanto/M
+Esperanza/M
+Espinoza/M
+espionage/SM
+esplanade/SM
+Esp/M
+Esposito/M
+espousal/MS
+espouser/M
+espouse/SRDG
+espresso/SM
+esprit/SM
+espy/GSD
+Esq/M
+esquire/GMSD
+Esquire/S
+Esra/M
+Essa/M
+essayer/M
+essayist/SM
+essay/SZMGRD
+essence/MS
+Essene/SM
+Essen/M
+essentialist/M
+essentially
+essentialness/M
+essential/USI
+Essequibo/M
+Essex/M
+Essie/M
+Essy/M
+EST
+established/U
+establisher/M
+establish/LAEGSD
+establishment/EMAS
+Establishment/MS
+Esta/M
+estate/GSDM
+Esteban/M
+esteem/EGDS
+Estela/M
+Estele/M
+Estella/M
+Estelle/M
+Estell/M
+Estel/M
+Esterházy/M
+ester/M
+Ester/M
+Estes
+Estevan/M
+Esther/M
+esthete's
+esthetically
+esthetic's
+esthetics's
+estimable/I
+estimableness/M
+estimate/XDSNGV
+estimating/A
+estimation/M
+estimator/SM
+Estonia/M
+Estonian/S
+estoppal
+Estrada/M
+estrange/DRSLG
+estrangement/SM
+estranger/M
+Estrella/M
+Estrellita/M
+estrogen/SM
+estrous
+estrus/SM
+est/RZ
+estuarine
+estuary/SM
+et
+ET
+ETA
+Etan/M
+eta/SM
+etc
+etcetera/SM
+etcher/M
+etch/GZJSRD
+etching/M
+ETD
+eternalness/SM
+eternal/PSY
+eternity/SM
+ethane/SM
+Ethan/M
+ethanol/MS
+Ethelbert/M
+Ethelda/M
+Ethelind/M
+Etheline/M
+Ethelin/M
+Ethel/M
+Ethelred/M
+Ethelyn/M
+Ethe/M
+etherealness/M
+ethereal/PY
+etherized
+Ethernet/MS
+ether/SM
+ethically/U
+ethicalness/M
+ethical/PYS
+ethicist/S
+ethic/MS
+Ethiopia/M
+Ethiopian/S
+ethnically
+ethnicity/MS
+ethnic/S
+ethnocentric
+ethnocentrism/MS
+ethnographers
+ethnographic
+ethnography/M
+ethnological
+ethnologist/SM
+ethnology/SM
+ethnomethodology
+ethological
+ethologist/MS
+ethology/SM
+ethos/SM
+ethylene/MS
+Ethyl/M
+ethyl/SM
+Etienne/M
+etiologic
+etiological
+etiology/SM
+etiquette/SM
+Etna/M
+Etruria/M
+Etruscan/MS
+Etta/M
+Ettie/M
+Etti/M
+Ettore/M
+Etty/M
+étude/MS
+etymological/Y
+etymologist/SM
+etymology/MS
+EU
+eucalypti
+eucalyptus/SM
+Eucharistic
+Eucharist/SM
+euchre/MGSD
+euclidean
+Euclid/M
+Eudora/M
+Euell/M
+Eugene/M
+Eugenia/M
+eugenically
+eugenicist/SM
+eugenic/S
+eugenics/M
+Eugenie/M
+Eugenio/M
+Eugenius/M
+Eugen/M
+Eugine/M
+Eulalie/M
+Eula/M
+Eulerian/M
+Euler/M
+eulogistic
+eulogist/MS
+eulogized/U
+eulogize/GRSDZ
+eulogizer/M
+eulogy/MS
+Eu/M
+Eumenides
+Eunice/M
+eunuch/M
+eunuchs
+Euphemia/M
+euphemism/MS
+euphemistic
+euphemistically
+euphemist/M
+euphonious/Y
+euphonium/M
+euphony/SM
+euphoria/SM
+euphoric
+euphorically
+Euphrates/M
+Eurasia/M
+Eurasian/S
+eureka/S
+Euripides/M
+Eur/M
+Eurodollar/SM
+Europa/M
+Europeanization/SM
+Europeanized
+European/MS
+Europe/M
+europium/MS
+Eurydice/M
+Eustace/M
+Eustachian/M
+Eustacia/M
+eutectic
+Euterpe/M
+euthanasia/SM
+euthenics/M
+evacuate/DSXNGV
+evacuation/M
+evacuee/MS
+evader/M
+evade/SRDBGZ
+Evaleen/M
+evaluable
+evaluate/ADSGNX
+evaluated/U
+evaluational
+evaluation/MA
+evaluative
+evaluator/MS
+Eva/M
+evanescence/MS
+evanescent
+Evangelia/M
+evangelic
+evangelicalism/SM
+Evangelical/S
+evangelical/YS
+Evangelina/M
+Evangeline/M
+Evangelin/M
+evangelism/SM
+evangelistic
+evangelist/MS
+Evangelist/MS
+evangelize/GDS
+Evania/M
+Evan/MS
+Evanne/M
+Evanston/M
+Evansville/M
+evaporate/VNGSDX
+evaporation/M
+evaporative/Y
+evaporator/MS
+evasion/SM
+evasiveness/SM
+evasive/PY
+Eveleen/M
+Evelina/M
+Eveline/M
+Evelin/M
+Evelyn/M
+Eve/M
+evened
+evener/M
+evenhanded/YP
+evening/SM
+Evenki/M
+Even/M
+evenness/MSU
+even/PUYRT
+evens
+evensong/MS
+eventfulness/SM
+eventful/YU
+eventide/SM
+event/SGM
+eventuality/MS
+eventual/Y
+eventuate/GSD
+Everard/M
+Eveready/M
+Evered/M
+Everest/M
+Everette/M
+Everett/M
+everglade/MS
+Everglades
+evergreen/S
+Everhart/M
+everlastingness/M
+everlasting/PYS
+everliving
+evermore
+EverReady/M
+eve/RSM
+ever/T
+every
+everybody/M
+everydayness/M
+everyday/P
+everyman
+everyone/MS
+everyplace
+everything
+everywhere
+eve's/A
+eves/A
+Evey/M
+evict/DGS
+eviction/SM
+evidence/MGSD
+evidential/Y
+evident/YS
+Evie/M
+evildoer/SM
+evildoing/MS
+evilness/MS
+evil/YRPTS
+evince/SDG
+Evin/M
+eviscerate/GNXDS
+evisceration/M
+Evita/M
+Ev/MN
+evocable
+evocate/NVX
+evocation/M
+evocativeness/M
+evocative/YP
+evoke/SDG
+evolute/NMXS
+evolutionarily
+evolutionary
+evolutionist/MS
+evolution/M
+evolve/SDG
+Evonne/M
+Evvie/M
+Evvy/M
+Evy/M
+Evyn/M
+Ewan/M
+Eward/M
+Ewart/M
+Ewell/M
+ewe/MZRS
+Ewen/M
+ewer/M
+Ewing/M
+exacerbate/NGXDS
+exacerbation/M
+exacter/M
+exactingness/M
+exacting/YP
+exaction/SM
+exactitude/ISM
+exactly/I
+exactness/MSI
+exact/TGSPRDY
+exaggerate/DSXNGV
+exaggerated/YP
+exaggeration/M
+exaggerative/Y
+exaggerator/MS
+exaltation/SM
+exalted/Y
+exalter/M
+exalt/ZRDGS
+examen/M
+examination/AS
+examination's
+examine/BGZDRS
+examined/AU
+examinees
+examiner/M
+examines/A
+examining/A
+exam/MNS
+example/DSGM
+exampled/U
+exasperate/DSXGN
+exasperated/Y
+exasperating/Y
+exasperation/M
+Excalibur/M
+excavate/NGDSX
+excavation/M
+excavator/SM
+Excedrin/M
+exceeder/M
+exceeding/Y
+exceed/SGDR
+excelled
+excellence/SM
+excellency/MS
+Excellency/MS
+excellent/Y
+excelling
+excel/S
+excelsior/S
+except/DSGV
+exceptionable/U
+exceptionalness/M
+exceptional/YU
+exception/BMS
+excerpter/M
+excerpt/GMDRS
+excess/GVDSM
+excessiveness/M
+excessive/PY
+exchangeable
+exchange/GDRSZ
+exchanger/M
+exchequer/SM
+Exchequer/SM
+excise/XMSDNGB
+excision/M
+excitability/MS
+excitableness/M
+excitable/P
+excitably
+excitation/SM
+excitatory
+excited/Y
+excitement/MS
+exciter/M
+excite/RSDLBZG
+excitingly
+exciting/U
+exciton/M
+exclaimer/M
+exclaim/SZDRG
+exclamation/MS
+exclamatory
+exclude/DRSG
+excluder/M
+exclusionary
+exclusioner/M
+exclusion/SZMR
+exclusiveness/SM
+exclusive/SPY
+exclusivity/MS
+excommunicate/XVNGSD
+excommunication/M
+excoriate/GNXSD
+excoriation/M
+excremental
+excrement/SM
+excrescence/MS
+excrescent
+excreta
+excrete/NGDRSX
+excreter/M
+excretion/M
+excretory/S
+excruciate/NGDS
+excruciating/Y
+excruciation/M
+exculpate/XSDGN
+exculpation/M
+exculpatory
+excursionist/SM
+excursion/MS
+excursiveness/SM
+excursive/PY
+excursus/MS
+excusable/IP
+excusableness/IM
+excusably/I
+excuse/BGRSD
+excused/U
+excuser/M
+exec/MS
+execrableness/M
+execrable/P
+execrably
+execrate/DSXNGV
+execration/M
+executable/MS
+execute/NGVZBXDRS
+executer/M
+executional
+executioner/M
+execution/ZMR
+executive/SM
+executor/SM
+executrices
+executrix/M
+exegeses
+exegesis/M
+exegete/M
+exegetical
+exegetic/S
+exemplariness/M
+exemplar/MS
+exemplary/P
+exemplification/M
+exemplifier/M
+exemplify/ZXNSRDG
+exemption/MS
+exempt/SDG
+exerciser/M
+exercise/ZDRSGB
+exertion/MS
+exert/SGD
+Exeter/M
+exeunt
+exhalation/SM
+exhale/GSD
+exhausted/Y
+exhauster/M
+exhaustible/I
+exhausting/Y
+exhaustion/SM
+exhaustiveness/MS
+exhaustive/YP
+exhaust/VGRDS
+exhibitioner/M
+exhibitionism/MS
+exhibitionist/MS
+exhibition/ZMRS
+exhibitor/SM
+exhibit/VGSD
+exhilarate/XSDVNG
+exhilarating/Y
+exhilaration/M
+exhortation/SM
+exhort/DRSG
+exhorter/M
+exhumation/SM
+exhume/GRSD
+exhumer/M
+exigence/S
+exigency/SM
+exigent/SY
+exiguity/SM
+exiguous
+exile/SDGM
+existence/MS
+existent/I
+existentialism/MS
+existentialistic
+existentialist/MS
+existential/Y
+existents
+exist/SDG
+exit/MDSG
+exobiology/MS
+exocrine
+Exodus/M
+exodus/SM
+exogamous
+exogamy/M
+exogenous/Y
+exonerate/SDVGNX
+exoneration/M
+exorbitance/MS
+exorbitant/Y
+exorcise/SDG
+exorcism/SM
+exorcist/SM
+exorcizer/M
+exoskeleton/MS
+exosphere/SM
+exothermic
+exothermically
+exotica
+exotically
+exoticism/SM
+exoticness/M
+exotic/PS
+exp
+expandability/M
+expand/DRSGZB
+expanded/U
+expander/M
+expanse/DSXGNVM
+expansible
+expansionary
+expansionism/MS
+expansionist/MS
+expansion/M
+expansiveness/S
+expansive/YP
+expatiate/XSDNG
+expatiation/M
+expatriate/SDNGX
+expatriation/M
+expectancy/MS
+expectant/YS
+expectational
+expectation/MS
+expected/UPY
+expecting/Y
+expectorant/S
+expectorate/NGXDS
+expectoration/M
+expect/SBGD
+expedience/IS
+expediency/IMS
+expedients
+expedient/YI
+expediter/M
+expedite/ZDRSNGX
+expeditionary
+expedition/M
+expeditiousness/MS
+expeditious/YP
+expeditor's
+expellable
+expelled
+expelling
+expel/S
+expendable/S
+expended/U
+expender/M
+expenditure/SM
+expend/SDRGB
+expense/DSGVM
+expensive/IYP
+expensiveness/SMI
+experienced/U
+experience/ISDM
+experiencing
+experiential/Y
+experimentalism/M
+experimentalist/SM
+experimental/Y
+experimentation/SM
+experimenter/M
+experiment/GSMDRZ
+experted
+experting
+expertise/SM
+expertize/GD
+expertnesses
+expertness/IM
+expert/PISY
+expert's
+expiable/I
+expiate/XGNDS
+expiation/M
+expiatory
+expiration/MS
+expired/U
+expire/SDG
+expiry/MS
+explainable/UI
+explain/ADSG
+explained/U
+explainer/SM
+explanation/MS
+explanatory
+expletive/SM
+explicable/I
+explicate/VGNSDX
+explication/M
+explicative/Y
+explicitness/SM
+explicit/PSY
+explode/DSRGZ
+exploded/U
+exploder/M
+exploitation/MS
+exploitative
+exploited/U
+exploiter/M
+exploit/ZGVSMDRB
+exploration/MS
+exploratory
+explore/DSRBGZ
+explored/U
+explorer/M
+explosion/MS
+explosiveness/SM
+explosive/YPS
+expo/MS
+exponential/SY
+exponentiate/XSDNG
+exponentiation/M
+exponent/MS
+exportability
+exportable
+export/AGSD
+exportation/SM
+exporter/MS
+export's
+expose
+exposed/U
+exposer/M
+exposit/D
+exposition/SM
+expositor/MS
+expository
+expos/RSDZG
+expostulate/DSXNG
+expostulation/M
+exposure/SM
+expounder/M
+expound/ZGSDR
+expressed/U
+expresser/M
+express/GVDRSY
+expressibility/I
+expressible/I
+expressibly/I
+expressionism/SM
+expressionistic
+expressionist/S
+expressionless/YP
+expression/MS
+expressive/IYP
+expressiveness/MS
+expressiveness's/I
+expressway/SM
+expropriate/XDSGN
+expropriation/M
+expropriator/SM
+expulsion/MS
+expunge/GDSR
+expunger/M
+expurgated/U
+expurgate/SDGNX
+expurgation/M
+exquisiteness/SM
+exquisite/YPS
+ex/S
+ext
+extant
+extemporaneousness/MS
+extemporaneous/YP
+extempore/S
+extemporization/SM
+extemporizer/M
+extemporize/ZGSRD
+extendability/M
+extendedly
+extendedness/M
+extended/U
+extender/M
+extendibility/M
+extendibles
+extend/SGZDR
+extensibility/M
+extensible/I
+extensional/Y
+extension/SM
+extensiveness/SM
+extensive/PY
+extensor/MS
+extent/SM
+extenuate/XSDGN
+extenuation/M
+exterior/MYS
+exterminate/XNGDS
+extermination/M
+exterminator/SM
+externalities
+externalization/SM
+externalize/GDS
+external/YS
+extern/M
+extinct/DGVS
+extinction/MS
+extinguishable/I
+extinguish/BZGDRS
+extinguisher/M
+extirpate/XSDVNG
+extirpation/M
+extolled
+extoller/M
+extolling
+extol/S
+extort/DRSGV
+extorter/M
+extortionate/Y
+extortioner/M
+extortionist/SM
+extortion/ZSRM
+extracellular/Y
+extract/GVSBD
+extraction/SM
+extractive/Y
+extractor/SM
+extracurricular/S
+extradite/XNGSDB
+extradition/M
+extragalactic
+extralegal/Y
+extramarital
+extramural
+extraneousness/M
+extraneous/YP
+extraordinarily
+extraordinariness/M
+extraordinary/PS
+extrapolate/XVGNSD
+extrapolation/M
+extra/S
+extrasensory
+extraterrestrial/S
+extraterritorial
+extraterritoriality/MS
+extravagance/MS
+extravagant/Y
+extravaganza/SM
+extravehicular
+extravert's
+extrema
+extremal
+extreme/DSRYTP
+extremeness/MS
+extremism/SM
+extremist/MS
+extremity/SM
+extricable/I
+extricate/XSDNG
+extrication/M
+extrinsic
+extrinsically
+extroversion/SM
+extrovert/GMDS
+extrude/GDSR
+extruder/M
+extrusion/MS
+extrusive
+exuberance/MS
+exuberant/Y
+exudate/XNM
+exudation/M
+exude/GSD
+exultant/Y
+exultation/SM
+exult/DGS
+exulting/Y
+exurban
+exurbanite/SM
+exurbia/MS
+exurb/MS
+Exxon/M
+Eyck/M
+Eyde/M
+Eydie/M
+eyeball/GSMD
+eyebrow/MS
+eyed/P
+eyedropper/MS
+eyeful/MS
+eye/GDRSMZ
+eyeglass/MS
+eyelash/MS
+eyeless
+eyelet/GSMD
+eyelid/SM
+eyeliner/MS
+eyeopener/MS
+eyeopening
+eyepiece/SM
+eyer/M
+eyeshadow
+eyesight/MS
+eyesore/SM
+eyestrain/MS
+eyeteeth
+eyetooth/M
+eyewash/MS
+eyewitness/SM
+Eyre/M
+eyrie's
+Eysenck/M
+Ezechiel/M
+Ezekiel/M
+Ezequiel/M
+Eziechiele/M
+Ezmeralda/M
+Ezra/M
+Ezri/M
+F
+FAA
+Fabe/MR
+Fabergé/M
+Faber/M
+Fabiano/M
+Fabian/S
+Fabien/M
+Fabio/M
+fable/GMSRD
+fabler/M
+fabricate/SDXNG
+fabrication/M
+fabricator/MS
+fabric/MS
+fabulists
+fabulousness/M
+fabulous/YP
+facade/GMSD
+face/AGCSD
+facecloth
+facecloths
+faceless/P
+faceplate/M
+facer/CM
+face's
+facetiousness/MS
+facetious/YP
+facet/SGMD
+facial/YS
+facileness/M
+facile/YP
+facilitate/VNGXSD
+facilitation/M
+facilitator/SM
+facilitatory
+facility/MS
+facing/MS
+facsimileing
+facsimile/MSD
+factional
+factionalism/SM
+faction/SM
+factiousness/M
+factious/PY
+factitious
+fact/MS
+facto
+factoid/S
+factorial/MS
+factoring/A
+factoring's
+factorisable
+factorization/SM
+factorize/GSD
+factor/SDMJG
+factory/MS
+factotum/MS
+factuality/M
+factualness/M
+factual/PY
+faculty/MS
+faddish
+faddist/SM
+fadedly
+faded/U
+fadeout
+fader/M
+fade/S
+fading's
+fading/U
+fad/ZGSMDR
+Fae/M
+faerie/MS
+Faeroe/M
+faery's
+Fafnir/M
+fagged
+fagging
+faggoting's
+Fagin/M
+fag/MS
+fagoting/M
+fagot/MDSJG
+Fahd/M
+Fahrenheit/S
+faïence/S
+failing's
+failing/UY
+fail/JSGD
+faille/MS
+failsafe
+failure/SM
+Faina/M
+fain/GTSRD
+fainter/M
+fainthearted
+faintness/MS
+faint/YRDSGPT
+Fairbanks
+Fairchild/M
+faired
+Fairfax/M
+Fairfield/M
+fairgoer/S
+fairground/MS
+fairing/MS
+fairish
+Fairleigh/M
+fairless
+Fairlie/M
+Fair/M
+Fairmont/M
+fairness's
+fairness/US
+Fairport/M
+fairs
+fair/TURYP
+Fairview/M
+fairway/MS
+fairyland/MS
+fairy/MS
+fairytale
+Faisalabad
+Faisal/M
+faithed
+faithfulness/MSU
+faithfuls
+faithful/UYP
+faithing
+faithlessness/SM
+faithless/YP
+Faith/M
+faiths
+faith's
+faith/U
+fajitas
+faker/M
+fake/ZGDRS
+fakir/SM
+falafel
+falconer/M
+falconry/MS
+falcon/ZSRM
+Falito/M
+Falkland/MS
+Falk/M
+Falkner/M
+fallaciousness/M
+fallacious/PY
+fallacy/MS
+faller/M
+fallibility/MSI
+fallible/I
+fallibleness/MS
+fallibly/I
+falloff/S
+Fallon/M
+fallopian
+Fallopian/M
+fallout/MS
+fallowness/M
+fallow/PSGD
+fall/SGZMRN
+falsehood/SM
+falseness/SM
+false/PTYR
+falsetto/SM
+falsie/MS
+falsifiability/M
+falsifiable/U
+falsification/M
+falsifier/M
+falsify/ZRSDNXG
+falsity/MS
+Falstaff/M
+falterer/M
+faltering/UY
+falter/RDSGJ
+Falwell/M
+fa/M
+famed/C
+fame/DSMG
+fames/C
+familial
+familiarity/MUS
+familiarization/MS
+familiarized/U
+familiarizer/M
+familiarize/ZGRSD
+familiarizing/Y
+familiarly/U
+familiarness/M
+familiar/YPS
+family/MS
+famine/SM
+faming/C
+famish/GSD
+famously/I
+famousness/M
+famous/PY
+fanaticalness/M
+fanatical/YP
+fanaticism/MS
+fanatic/SM
+Fanchette/M
+Fanchon/M
+fancied
+Fancie/M
+fancier/SM
+fanciest
+fancifulness/MS
+fanciful/YP
+fancily
+fanciness/SM
+fancying
+fancy/IS
+Fancy/M
+fancywork/SM
+fandango/SM
+Fanechka/M
+fanfare/SM
+fanfold/M
+fang/DMS
+fangled
+Fania/M
+fanlight/SM
+Fan/M
+fanned
+Fannie/M
+Fanni/M
+fanning
+fanny/SM
+Fanny/SM
+fanout
+fan/SM
+fantail/SM
+fantasia/SM
+fantasist/M
+fantasize/SRDG
+fantastical/Y
+fantastic/S
+fantasy/GMSD
+Fanya/M
+fanzine/S
+FAQ/SM
+Faraday/M
+farad/SM
+Farah/M
+Fara/M
+Farand/M
+faraway
+Farber/M
+farce/SDGM
+farcical/Y
+fare/MS
+farer/M
+farewell/DGMS
+farfetchedness/M
+far/GDR
+Fargo/M
+Farica/M
+farinaceous
+farina/MS
+Farkas/M
+Farlay/M
+Farlee/M
+Farleigh/M
+Farley/M
+Farlie/M
+Farly/M
+farmer/M
+Farmer/M
+farmhand/S
+farmhouse/SM
+farming/M
+Farmington/M
+farmland/SM
+farm/MRDGZSJ
+farmstead/SM
+farmworker/S
+Far/MY
+farmyard/MS
+faro/MS
+farragoes
+farrago/M
+Farragut/M
+Farrah/M
+Farrakhan/M
+Farra/M
+Farrand/M
+Farrell/M
+Farrel/M
+farrier/SM
+Farris/M
+Farr/M
+farrow/DMGS
+farseeing
+farsightedness/SM
+farsighted/YP
+farther
+farthermost
+farthest
+farthing/SM
+fart/MDGS!
+fas
+fascia/SM
+fascicle/DSM
+fasciculate/DNX
+fasciculation/M
+fascinate/SDNGX
+fascinating/Y
+fascination/M
+fascism/MS
+Fascism's
+fascistic
+Fascist's
+fascist/SM
+fashionableness/M
+fashionable/PS
+fashionably/U
+fashion/ADSG
+fashioner/SM
+fashion's
+Fassbinder/M
+fastback/MS
+fastball/S
+fasten/AGUDS
+fastener/MS
+fastening/SM
+fast/GTXSPRND
+fastidiousness/MS
+fastidious/PY
+fastness/MS
+fatalism/MS
+fatalistic
+fatalistically
+fatalist/MS
+fatality/MS
+fatal/SY
+fatback/SM
+fatefulness/MS
+fateful/YP
+fate/MS
+Fates
+fatheaded/P
+fathead/SMD
+father/DYMGS
+fathered/U
+fatherhood/MS
+fatherland/SM
+fatherless
+fatherliness/M
+fatherly/P
+Father/SM
+fathomable/U
+fathomless
+fathom/MDSBG
+fatigued/U
+fatigue/MGSD
+fatiguing/Y
+Fatima/M
+fatness/SM
+fat/PSGMDY
+fatso/M
+fatted
+fattener/M
+fatten/JZGSRD
+fatter
+fattest/M
+fattiness/SM
+fatting
+fatty/RSPT
+fatuity/MS
+fatuousness/SM
+fatuous/YP
+fatwa/SM
+faucet/SM
+Faulknerian
+Faulkner/M
+fault/CGSMD
+faultfinder/MS
+faultfinding/MS
+faultily
+faultiness/MS
+faultlessness/SM
+faultless/PY
+faulty/RTP
+fauna/MS
+Faunie/M
+Faun/M
+faun/MS
+Fauntleroy/M
+Faustian
+Faustina/M
+Faustine/M
+Faustino/M
+Faust/M
+Faustus/M
+fauvism/S
+favorableness/MU
+favorable/UMPS
+favorably/U
+favoredness/M
+favored's/U
+favored/YPSM
+favorer/EM
+favor/ESMRDGZ
+favoring/MYS
+favorings/U
+favorite/SMU
+favoritism/MS
+favors/A
+Fawkes/M
+Fawne/M
+fawner/M
+fawn/GZRDMS
+Fawnia/M
+fawning/Y
+Fawn/M
+fax/GMDS
+Fax/M
+Faydra/M
+Faye/M
+Fayette/M
+Fayetteville/M
+Fayina/M
+Fay/M
+fay/MDRGS
+Fayre/M
+Faythe/M
+Fayth/M
+faze/DSG
+FBI
+FCC
+FD
+FDA
+FDIC
+FDR/M
+fealty/MS
+fearfuller
+fearfullest
+fearfulness/MS
+fearful/YP
+fearlessness/MS
+fearless/PY
+fear/RDMSG
+fearsomeness/M
+fearsome/PY
+feasibility/SM
+feasibleness/M
+feasible/UI
+feasibly/U
+feaster/M
+feast/GSMRD
+feater/C
+featherbed
+featherbedding/SM
+featherbrain/MD
+feathered/U
+feathering/M
+featherless
+featherlight
+Featherman/M
+feathertop
+featherweight/SM
+feathery/TR
+feather/ZMDRGS
+feat/MYRGTS
+feats/C
+featureless
+feature/MGSD
+Feb/M
+febrile
+February/MS
+fecal
+feces
+fecklessness/M
+feckless/PY
+fecundability
+fecundate/XSDGN
+fecundation/M
+fecund/I
+fecundity/SM
+federalism/SM
+Federalist
+federalist/MS
+federalization/MS
+federalize/GSD
+Federal/S
+federal/YS
+federated/U
+federate/FSDXVNG
+federation/FM
+federative/Y
+Federica/M
+Federico/M
+FedEx/M
+Fedora/M
+fedora/SM
+feds
+Fed/SM
+fed/U
+feebleness/SM
+feeble/TPR
+feebly
+feedback/SM
+feedbag/MS
+feeder/M
+feed/GRZJS
+feeding/M
+feedlot/SM
+feedstock
+feedstuffs
+feeing
+feeler/M
+feel/GZJRS
+feelingly/U
+feeling/MYP
+feelingness/M
+Fee/M
+fee/MDS
+feet/M
+feigned/U
+feigner/M
+feign/RDGS
+feint/MDSG
+feisty/RT
+Felder/M
+Feldman/M
+feldspar/MS
+Felecia/M
+Felicdad/M
+Felice/M
+Felicia/M
+Felicio/M
+felicitate/XGNSD
+felicitation/M
+felicitous/IY
+felicitousness/M
+felicity/IMS
+Felicity/M
+Felicle/M
+Felic/M
+Felike/M
+Feliks/M
+feline/SY
+Felipa/M
+Felipe/M
+Felisha/M
+Felita/M
+Felix/M
+Feliza/M
+Felizio/M
+fella/S
+fellatio/SM
+felled/A
+feller/M
+felling/A
+Fellini/M
+fellness/M
+fellowman
+fellowmen
+fellow/SGDYM
+fellowshipped
+fellowshipping
+fellowship/SM
+fell/PSGZTRD
+feloniousness/M
+felonious/PY
+felon/MS
+felony/MS
+felt/GSD
+felting/M
+Fe/M
+female/MPS
+femaleness/SM
+feminineness/M
+feminine/PYS
+femininity/MS
+feminism/MS
+feminist/MS
+femme/MS
+femoral
+fem/S
+femur/MS
+fenced/U
+fencepost/M
+fencer/M
+fence/SRDJGMZ
+fencing/M
+fender/CM
+fend/RDSCZG
+Fenelia/M
+fenestration/CSM
+Fenian/M
+fenland/M
+fen/MS
+fennel/SM
+Fenwick/M
+Feodora/M
+Feodor/M
+feral
+Ferber/M
+Ferdie/M
+Ferdinanda/M
+Ferdinande/M
+Ferdinand/M
+Ferdinando/M
+Ferd/M
+Ferdy/M
+fer/FLC
+Fergus/M
+Ferguson/M
+Ferlinghetti/M
+Fermat/M
+fermentation/MS
+fermented
+fermenter
+ferment/FSCM
+fermenting
+Fermi/M
+fermion/MS
+fermium/MS
+Fernanda/M
+Fernande/M
+Fernandez/M
+Fernandina/M
+Fernando/M
+Ferne/M
+fernery/M
+Fern/M
+fern/MS
+ferny/TR
+ferociousness/MS
+ferocious/YP
+ferocity/MS
+Ferrari/M
+Ferraro/M
+Ferreira/M
+Ferrell/M
+Ferrel/M
+Ferrer/M
+ferreter/M
+ferret/SMRDG
+ferric
+ferris
+Ferris
+ferrite/M
+ferro
+ferroelectric
+ferromagnetic
+ferromagnet/M
+ferrous
+ferrule/MGSD
+ferryboat/MS
+ferryman/M
+ferrymen
+ferry/SDMG
+fertileness/M
+fertile/YP
+fertility/IMS
+fertilization/ASM
+fertilized/U
+fertilizer/M
+fertilizes/A
+fertilize/SRDZG
+ferule/SDGM
+fervency/MS
+fervent/Y
+fervidness/M
+fervid/YP
+fervor/MS
+fess/KGFSD
+Fess/M
+fess's
+festal/S
+fester/GD
+festival/SM
+festiveness/SM
+festive/PY
+festivity/SM
+festoon/SMDG
+fest/RVZ
+fetal
+feta/MS
+fetcher/M
+fetching/Y
+fetch/RSDGZ
+feted
+fête/MS
+fetich's
+fetidness/SM
+fetid/YP
+feting
+fetishism/SM
+fetishistic
+fetishist/SM
+fetish/MS
+fetlock/MS
+fetter's
+fetter/UGSD
+fettle/GSD
+fettling/M
+fettuccine/S
+fetus/SM
+feudalism/MS
+feudalistic
+feudal/Y
+feudatory/M
+feud/MDSG
+feverishness/SM
+feverish/PY
+fever/SDMG
+fewness/MS
+few/PTRS
+Fey/M
+Feynman/M
+fey/RT
+fez/M
+Fez/M
+fezzes
+ff
+FHA
+fiancée/S
+fiancé/MS
+Fianna/M
+Fiann/M
+fiascoes
+fiasco/M
+Fiat/M
+fiat/MS
+fibbed
+fibber/MS
+fibbing
+fiberboard/MS
+fiber/DM
+fiberfill/S
+Fiberglas/M
+fiberglass/DSMG
+Fibonacci/M
+fibrillate/XGNDS
+fibrillation/M
+fibril/MS
+fibrin/MS
+fibroblast/MS
+fibroid/S
+fibroses
+fibrosis/M
+fibrousness/M
+fibrous/YP
+fib/SZMR
+fibulae
+fibula/M
+fibular
+FICA
+fices
+fiche/SM
+Fichte/M
+fichu/SM
+fickleness/MS
+fickle/RTP
+ficos
+fictionalization/MS
+fictionalize/DSG
+fictional/Y
+fiction/SM
+fictitiousness/M
+fictitious/PY
+fictive/Y
+ficus
+fiddle/GMZJRSD
+fiddler/M
+fiddlestick/SM
+fiddly
+fide/F
+Fidela/M
+Fidelia/M
+Fidelio/M
+fidelity/IMS
+Fidelity/M
+Fidel/M
+fidget/DSG
+fidgety
+Fidole/M
+Fido/M
+fiducial/Y
+fiduciary/MS
+fiefdom/S
+fief/MS
+fielded
+fielder/IM
+fielding
+Fielding/M
+Field/MGS
+fieldstone/M
+fieldworker/M
+fieldwork/ZMRS
+field/ZISMR
+fiendishness/M
+fiendish/YP
+fiend/MS
+fierceness/SM
+fierce/RPTY
+fierily
+fieriness/MS
+fiery/PTR
+fie/S
+fies/C
+fiesta/MS
+fife/DRSMZG
+fifer/M
+Fifi/M
+Fifine/M
+FIFO
+fifteen/HRMS
+fifteenths
+fifths
+fifth/Y
+fiftieths
+fifty/HSM
+Figaro/M
+figged
+figging
+fightback
+fighter/MIS
+fighting/IS
+fight/ZSJRG
+figment/MS
+fig/MLS
+Figueroa/M
+figural
+figuration/FSM
+figurativeness/M
+figurative/YP
+figure/GFESD
+figurehead/SM
+figurer/SM
+figure's
+figurine/SM
+figuring/S
+Fijian/SM
+Fiji/M
+filamentary
+filament/MS
+filamentous
+Filberte/M
+Filbert/M
+filbert/MS
+Filberto/M
+filch/SDG
+filed/AC
+file/KDRSGMZ
+filename/SM
+filer/KMCS
+files/AC
+filet's
+filial/UY
+Filia/M
+filibusterer/M
+filibuster/MDRSZG
+Filide/M
+filigreeing
+filigree/MSD
+filing/AC
+filings
+Filipino/SM
+Filip/M
+Filippa/M
+Filippo/M
+fill/BAJGSD
+filled/U
+filler/MS
+filleting/M
+fillet/MDSG
+filling/M
+fillip/MDGS
+Fillmore/M
+filly/SM
+filmdom/M
+Filmer/M
+filminess/SM
+filming/M
+filmmaker/S
+Filmore/M
+film/SGMD
+filmstrip/SM
+filmy/RTP
+Filofax/S
+filtered/U
+filterer/M
+filter/RDMSZGB
+filthily
+filthiness/SM
+filth/M
+filths
+filthy/TRSDGP
+filtrated/I
+filtrate/SDXMNG
+filtrates/I
+filtrating/I
+filtration/IMS
+finagler/M
+finagle/RSDZG
+finale/MS
+finalist/MS
+finality/MS
+finalization/SM
+finalize/GSD
+final/SY
+Fina/M
+financed/A
+finance/MGSDJ
+finances/A
+financial/Y
+financier/DMGS
+financing/A
+Finch/M
+finch/MS
+findable/U
+find/BRJSGZ
+finder/M
+finding/M
+Findlay/M
+Findley/M
+fine/FGSCRDA
+finely
+fineness/MS
+finery/MAS
+fine's
+finespun
+finesse/SDMG
+fingerboard/SM
+fingerer/M
+fingering/M
+fingerless
+fingerling/M
+fingernail/MS
+fingerprint/SGDM
+finger/SGRDMJ
+fingertip/MS
+finial/SM
+finical
+finickiness/S
+finicky/RPT
+fining/M
+finished/UA
+finisher/M
+finishes/A
+finish/JZGRSD
+finis/SM
+finite/ISPY
+finitely/C
+finiteness/MIC
+fink/GDMS
+Finland/M
+Finlay/M
+Finley/M
+Fin/M
+Finnbogadottir/M
+finned
+Finnegan/M
+finner
+finning
+Finnish
+Finn/MS
+finny/RT
+fin/TGMDRS
+Fiona/M
+Fionna/M
+Fionnula/M
+fiord's
+Fiorello/M
+Fiorenze/M
+Fiori/M
+f/IRAC
+firearm/SM
+fireball/SM
+fireboat/M
+firebomb/MDSG
+firebox/MS
+firebrand/MS
+firebreak/SM
+firebrick/SM
+firebug/SM
+firecracker/SM
+firedamp/SM
+fired/U
+firefight/JRGZS
+firefly/MS
+Firefox/M
+fireguard/M
+firehouse/MS
+firelight/GZSM
+fireman/M
+firemen
+fire/MS
+fireplace/MS
+fireplug/MS
+firepower/SM
+fireproof/SGD
+firer/M
+firesafe
+fireside/SM
+Firestone/M
+firestorm/SM
+firetrap/SM
+firetruck/S
+firewall/S
+firewater/SM
+firewood/MS
+firework/MS
+firing/M
+firkin/M
+firmament/MS
+firmer
+firmest
+firm/ISFDG
+firmly/I
+firmness/MS
+firm's
+firmware/MS
+firring
+firstborn/S
+firsthand
+first/SY
+firth/M
+firths
+fir/ZGJMDRHS
+fiscal/YS
+Fischbein/M
+Fischer/M
+fishbowl/MS
+fishcake/S
+fisher/M
+Fisher/M
+fisherman/M
+fishermen/M
+fishery/MS
+fishhook/MS
+fishily
+fishiness/MS
+fishing/M
+fish/JGZMSRD
+Fishkill/M
+fishmeal
+fishmonger/MS
+fishnet/SM
+fishpond/SM
+fishtail/DMGS
+fishtanks
+fishwife/M
+fishwives
+fishy/TPR
+Fiske/M
+Fisk/M
+fissile
+fissionable/S
+fission/BSDMG
+fissure/MGSD
+fistfight/SM
+fistful/MS
+fisticuff/SM
+fist/MDGS
+fistula/SM
+fistulous
+Fitchburg/M
+Fitch/M
+fitfulness/SM
+fitful/PY
+fitments
+fitness/USM
+fits/AK
+fit's/K
+fitted/UA
+fitter/SM
+fittest
+fitting/AU
+fittingly
+fittingness/M
+fittings
+fit/UYPS
+Fitzgerald/M
+Fitz/M
+Fitzpatrick/M
+Fitzroy/M
+fivefold
+five/MRS
+fiver/M
+fixable
+fixate/VNGXSD
+fixatifs
+fixation/M
+fixative/S
+fixedness/M
+fixed/YP
+fixer/SM
+fixes/I
+fixing/SM
+fixity/MS
+fixture/SM
+fix/USDG
+Fizeau/M
+fizzer/M
+fizzle/GSD
+fizz/SRDG
+fizzy/RT
+fjord/SM
+FL
+flabbergast/GSD
+flabbergasting/Y
+flabbily
+flabbiness/SM
+flabby/TPR
+flab/MS
+flaccidity/MS
+flaccid/Y
+flack/SGDM
+flagella/M
+flagellate/DSNGX
+flagellation/M
+flagellum/M
+flagged
+flaggingly/U
+flagging/SMY
+flagman/M
+flagmen
+flag/MS
+flagon/SM
+flagpole/SM
+flagrance/MS
+flagrancy/SM
+flagrant/Y
+flagship/MS
+flagstaff/MS
+flagstone/SM
+flail/SGMD
+flair/SM
+flaker/M
+flake/SM
+flakiness/MS
+flak/RDMGS
+flaky/PRT
+Fla/M
+flambé/D
+flambeing
+flambes
+flamboyance/MS
+flamboyancy/MS
+flamboyant/YS
+flamenco/SM
+flamen/M
+flameproof/DGS
+flamer/IM
+flame's
+flame/SIGDR
+flamethrower/SM
+flamingo/SM
+flaming/Y
+flammability/ISM
+flammable/SI
+flam/MRNDJGZ
+Flanagan/M
+Flanders/M
+flange/GMSD
+flanker/M
+flank/SGZRDM
+flan/MS
+flannel/DMGS
+flannelet/MS
+flannelette's
+flapjack/SM
+flap/MS
+flapped
+flapper/SM
+flapping
+flaps/M
+flare/SDG
+flareup/S
+flaring/Y
+flashback/SM
+flashbulb/SM
+flashcard/S
+flashcube/MS
+flasher/M
+flashgun/S
+flashily
+flashiness/SM
+flashing/M
+flash/JMRSDGZ
+flashlight/MS
+flashy/TPR
+flask/SM
+flatbed/S
+flatboat/MS
+flatcar/MS
+flatfeet
+flatfish/SM
+flatfoot/SGDM
+flathead/M
+flatiron/SM
+flatland/RS
+flatmate/M
+flat/MYPS
+flatness/MS
+flatted
+flattener/M
+flatten/SDRG
+flatter/DRSZG
+flatterer/M
+flattering/YU
+flattery/SM
+flattest/M
+flatting
+flattish
+Flatt/M
+flattop/MS
+flatulence/SM
+flatulent/Y
+flatus/SM
+flatware/MS
+flatworm/SM
+Flaubert/M
+flaunting/Y
+flaunt/SDG
+flautist/SM
+flavored/U
+flavorer/M
+flavorful
+flavoring/M
+flavorless
+flavor/SJDRMZG
+flavorsome
+flaw/GDMS
+flawlessness/MS
+flawless/PY
+flax/MSN
+flaxseed/M
+flayer/M
+flay/RDGZS
+fleabag/MS
+fleabites
+flea/SM
+fleawort/M
+fleck/GRDMS
+Fledermaus/M
+fledged/U
+fledge/GSD
+fledgling/SM
+fleecer/M
+fleece/RSDGMZ
+fleeciness/SM
+fleecy/RTP
+fleeing
+flee/RS
+fleetingly/M
+fleetingness/SM
+fleeting/YP
+fleet/MYRDGTPS
+fleetness/MS
+Fleischer/M
+Fleischman/M
+Fleisher/M
+Fleming/M
+Flemished/M
+Flemish/GDSM
+Flemishing/M
+Flem/JGM
+Flemming/M
+flesher/M
+fleshiness/M
+flesh/JMYRSDG
+fleshless
+fleshly/TR
+fleshpot/SM
+fleshy/TPR
+fletch/DRSGJ
+fletcher/M
+Fletcher/M
+fletching/M
+Fletch/MR
+Fleurette/M
+Fleur/M
+flew/S
+flews/M
+flexed/I
+flexibility/MSI
+flexible/I
+flexibly/I
+flexitime's
+flex/MSDAG
+flextime/S
+flexural
+flexure/M
+fl/GJD
+flibbertigibbet/MS
+flicker/GD
+flickering/Y
+flickery
+flick/GZSRD
+flier/M
+flight/GMDS
+flightiness/SM
+flightless
+flightpath
+flighty/RTP
+flimflammed
+flimflamming
+flimflam/MS
+flimsily
+flimsiness/MS
+flimsy/PTRS
+flincher/M
+flinch/GDRS
+flinching/U
+flinger/M
+fling/RMG
+Flin/M
+Flinn/M
+flintiness/M
+flintless
+flintlock/MS
+Flint/M
+flint/MDSG
+Flintstones
+flinty/TRP
+flipflop
+flippable
+flippancy/MS
+flippant/Y
+flipped
+flipper/SM
+flippest
+flipping
+flip/S
+flirtation/SM
+flirtatiousness/MS
+flirtatious/PY
+flirt/GRDS
+flit/S
+flitted
+flitting
+floater/M
+float/SRDGJZ
+floaty
+flocculate/GNDS
+flocculation/M
+flock/SJDMG
+floe/MS
+flogged
+flogger/SM
+flogging/SM
+flog/S
+Flo/M
+floodgate/MS
+floodlight/DGMS
+floodlit
+floodplain/S
+flood/SMRDG
+floodwater/SM
+floorboard/MS
+floorer/M
+flooring/M
+floor/SJRDMG
+floorspace
+floorwalker/SM
+floozy/SM
+flophouse/SM
+flop/MS
+flopped
+flopper/M
+floppily
+floppiness/SM
+flopping
+floppy/TMRSP
+floral/SY
+Flora/M
+Florance/M
+flora/SM
+Florella/M
+Florence/M
+Florencia/M
+Florentia/M
+Florentine/S
+Florenza/M
+florescence/MIS
+florescent/I
+Flore/SM
+floret/MS
+Florette/M
+Floria/M
+Florian/M
+Florida/M
+Floridan/S
+Floridian/S
+floridness/SM
+florid/YP
+Florie/M
+Florina/M
+Florinda/M
+Florine/M
+florin/MS
+Flori/SM
+florist/MS
+Flor/M
+Florrie/M
+Florri/M
+Florry/M
+Flory/M
+floss/GSDM
+Flossie/M
+Flossi/M
+Flossy/M
+flossy/RST
+flotation/SM
+flotilla/SM
+flotsam/SM
+flounce/GDS
+flouncing/M
+flouncy/RT
+flounder/SDG
+flourisher/M
+flourish/GSRD
+flourishing/Y
+flour/SGDM
+floury/TR
+flouter/M
+flout/GZSRD
+flowchart/SG
+flowed
+flowerbed/SM
+flower/CSGD
+flowerer/M
+floweriness/SM
+flowerless
+flowerpot/MS
+flower's
+Flowers
+flowery/TRP
+flowing/Y
+flow/ISG
+flown
+flowstone
+Floyd/M
+Flss/M
+flt
+flubbed
+flubbing
+flub/S
+fluctuate/XSDNG
+fluctuation/M
+fluency/MS
+fluently
+fluent/SF
+flue/SM
+fluffiness/SM
+fluff/SGDM
+fluffy/PRT
+fluidity/SM
+fluidized
+fluid/MYSP
+fluidness/M
+fluke/SDGM
+fluky/RT
+flume/SDGM
+flummox/DSG
+flu/MS
+flung
+flunkey's
+flunk/SRDG
+flunky/MS
+fluoresce/GSRD
+fluorescence/MS
+fluorescent/S
+fluoridate/XDSGN
+fluoridation/M
+fluoride/SM
+fluorimetric
+fluorinated
+fluorine/SM
+fluorite/MS
+fluorocarbon/MS
+fluoroscope/MGDS
+fluoroscopic
+flurry/GMDS
+flushness/M
+flush/TRSDPBG
+fluster/DSG
+fluter/M
+flute/SRDGMJ
+fluting/M
+flutist/MS
+flutter/DRSG
+flutterer/M
+fluttery
+fluxed/A
+fluxes/A
+flux/IMS
+fluxing
+flyaway
+flyblown
+flyby/M
+flybys
+flycatcher/MS
+flyer's
+fly/JGBDRSTZ
+flyleaf/M
+flyleaves
+Flynn/M
+flyover/MS
+flypaper/MS
+flysheet/S
+flyspeck/MDGS
+flyswatter/S
+flyway/MS
+flyweight/MS
+flywheel/MS
+FM
+Fm/M
+FNMA/M
+foal/MDSG
+foaminess/MS
+foam/MRDSG
+foamy/RPT
+fobbed
+fobbing
+fob/SM
+focal/F
+focally
+Foch/M
+foci's
+focused/AU
+focuser/M
+focuses/A
+focus/SRDMBG
+fodder/GDMS
+foe/SM
+foetid
+FOFL
+fogbound
+fogged/C
+foggily
+fogginess/MS
+fogging/C
+foggy/RPT
+foghorn/SM
+fogs/C
+fog/SM
+fogyish
+fogy/SM
+foible/MS
+foil/GSD
+foist/GDS
+Fokker/M
+foldaway/S
+folded/AU
+folder/M
+foldout/MS
+fold/RDJSGZ
+folds/UA
+Foley/M
+foliage/MSD
+foliate/CSDXGN
+foliation/CM
+folio/SDMG
+folklike
+folklore/MS
+folkloric
+folklorist/SM
+folk/MS
+folksiness/MS
+folksinger/S
+folksinging/S
+folksong/S
+folksy/TPR
+folktale/S
+folkway/S
+foll
+follicle/SM
+follicular
+follower/M
+follow/JSZBGRD
+followup's
+folly/SM
+Folsom
+fol/Y
+Fomalhaut/M
+fomentation/SM
+fomenter/M
+foment/RDSG
+Fonda/M
+fondant/SM
+fondle/GSRD
+fondler/M
+fondness/MS
+fond/PMYRDGTS
+fondue/MS
+Fons
+Fonsie/M
+Fontainebleau/M
+Fontaine/M
+Fontana/M
+fontanelle's
+fontanel/MS
+font/MS
+Fonzie/M
+Fonz/M
+foodie/S
+food/MS
+foodstuff/MS
+foolery/MS
+foolhardily
+foolhardiness/SM
+foolhardy/PTR
+foolishness/SM
+foolish/PRYT
+fool/MDGS
+foolproof
+foolscap/MS
+footage/SM
+football/SRDMGZ
+footbridge/SM
+Foote/M
+footer/M
+footfall/SM
+foothill/SM
+foothold/MS
+footing/M
+footless
+footlights
+footling
+footlocker/SM
+footloose
+footman/M
+footmarks
+footmen
+footnote/MSDG
+footpad/SM
+footpath/M
+footpaths
+footplate/M
+footprint/MS
+footrace/S
+footrest/MS
+footsie/SM
+foot/SMRDGZJ
+footsore
+footstep/SM
+footstool/SM
+footwear/M
+footwork/SM
+fop/MS
+fopped
+foppery/MS
+fopping
+foppishness/SM
+foppish/YP
+forage/GSRDMZ
+forager/M
+forayer/M
+foray/SGMRD
+forbade
+forbearance/SM
+forbearer/M
+forbear/MRSG
+Forbes/M
+forbidden
+forbiddingness/M
+forbidding/YPS
+forbid/S
+forbore
+forborne
+forced/Y
+forcefield/MS
+forcefulness/MS
+forceful/PY
+forceps/M
+forcer/M
+force/SRDGM
+forcibleness/M
+forcible/P
+forcibly
+fordable/U
+Fordham/M
+Ford/M
+ford/SMDBG
+forearm/GSDM
+forebear/MS
+forebode/GJDS
+forebodingness/M
+foreboding/PYM
+forecaster/M
+forecastle/MS
+forecast/SZGR
+foreclose/GSD
+foreclosure/MS
+forecourt/SM
+foredoom/SDG
+forefather/SM
+forefeet
+forefinger/MS
+forefoot/M
+forefront/SM
+foregoer/M
+foregoing/S
+foregone
+foregos
+foreground/MGDS
+forehand/S
+forehead/MS
+foreigner/M
+foreignness/SM
+foreign/PRYZS
+foreknew
+foreknow/GS
+foreknowledge/MS
+foreknown
+foreleg/MS
+forelimb/MS
+forelock/MDSG
+foreman/M
+Foreman/M
+foremast/SM
+foremen
+foremost
+forename/DSM
+forenoon/SM
+forensically
+forensic/S
+forensics/M
+foreordain/DSG
+forepart/MS
+forepaws
+forepeople
+foreperson/S
+foreplay/MS
+forequarter/SM
+forerunner/MS
+fore/S
+foresail/SM
+foresaw
+foreseeable/U
+foreseeing
+foreseen/U
+foreseer/M
+foresee/ZSRB
+foreshadow/SGD
+foreshore/M
+foreshorten/DSG
+foresightedness/SM
+foresighted/PY
+foresight/SMD
+foreskin/SM
+forestaller/M
+forestall/LGSRD
+forestallment/M
+forestation/MCS
+forestations/A
+forest/CSAGD
+Forester/M
+forester/SM
+forestland/S
+Forest/MR
+forestry/MS
+forest's
+foretaste/MGSD
+foreteller/M
+foretell/RGS
+forethought/MS
+foretold
+forevermore
+forever/PS
+forewarner/M
+forewarn/GSJRD
+forewent
+forewoman/M
+forewomen
+foreword/SM
+forfeiter/M
+forfeiture/MS
+forfeit/ZGDRMS
+forfend/GSD
+forgather/GSD
+forgave
+forged/A
+forge/JVGMZSRD
+forger/M
+forgery/MS
+forges/A
+forgetfulness/SM
+forgetful/PY
+forget/SV
+forgettable/U
+forgettably/U
+forgetting
+forging/M
+forgivable/U
+forgivably/U
+forgiven
+forgiveness/SM
+forgiver/M
+forgive/SRPBZG
+forgivingly
+forgivingness/M
+forgiving/UP
+forgoer/M
+forgoes
+forgone
+forgo/RSGZ
+forgot
+forgotten/U
+for/HT
+forkful/S
+fork/GSRDM
+forklift/DMSG
+forlornness/M
+forlorn/PTRY
+formability/AM
+formaldehyde/SM
+formalin/M
+formalism/SM
+formalistic
+formalist/SM
+formality/SMI
+formal/IY
+formalization/SM
+formalized/U
+formalizer/M
+formalizes/I
+formalize/ZGSRD
+formalness/M
+formals
+formant/MIS
+format/AVS
+formate/MXGNSD
+formation/AFSCIM
+formatively/I
+formativeness/IM
+formative/SYP
+format's
+formatted/UA
+formatter/A
+formatters
+formatter's
+formatting/A
+form/CGSAFDI
+formed/U
+former/FSAI
+formerly
+formfitting
+formic
+Formica/MS
+formidableness/M
+formidable/P
+formidably
+formlessness/MS
+formless/PY
+Formosa/M
+Formosan
+form's
+formulaic
+formula/SM
+formulate/AGNSDX
+formulated/U
+formulation/AM
+formulator/SM
+fornicate/GNXSD
+fornication/M
+fornicator/SM
+Forrester/M
+Forrest/RM
+forsaken
+forsake/SG
+forsook
+forsooth
+Forster/M
+forswear/SG
+forswore
+forsworn
+forsythia/MS
+Fortaleza/M
+forte/MS
+forthcome/JG
+forthcoming/U
+FORTH/M
+forthrightness/SM
+forthright/PYS
+forthwith
+fortieths
+fortification/MS
+fortified/U
+fortifier/SM
+fortify/ADSG
+fortiori
+fortissimo/S
+fortitude/SM
+fortnightly/S
+fortnight/MYS
+FORTRAN
+Fortran/M
+fortress/GMSD
+fort/SM
+fortuitousness/SM
+fortuitous/YP
+fortuity/MS
+fortunateness/M
+fortunate/YUS
+fortune/MGSD
+fortuneteller/SM
+fortunetelling/SM
+forty/SRMH
+forum/MS
+forwarder/M
+forwarding/M
+forwardness/MS
+forward/PTZSGDRY
+forwent
+fossiliferous
+fossilization/MS
+fossilized/U
+fossilize/GSD
+fossil/MS
+Foss/M
+fosterer/M
+Foster/M
+foster/SRDG
+Foucault/M
+fought
+foulard/SM
+foulmouth/D
+foulness/MS
+fouls/M
+foul/SYRDGTP
+foundational
+foundation/SM
+founded/UF
+founder/MDG
+founder's/F
+founding/F
+foundling/MS
+found/RDGZS
+foundry/MS
+founds/KF
+fountainhead/SM
+fountain/SMDG
+fount/MS
+fourfold
+Fourier/M
+fourpence/M
+fourpenny
+fourposter/SM
+fourscore/S
+four/SHM
+foursome/SM
+foursquare
+fourteener/M
+fourteen/SMRH
+fourteenths
+Fourth
+fourths
+Fourths
+fourth/Y
+fovea/M
+fowler/M
+Fowler/M
+fowling/M
+fowl/SGMRD
+foxfire/SM
+foxglove/SM
+Foxhall/M
+foxhole/SM
+foxhound/SM
+foxily
+foxiness/MS
+foxing/M
+fox/MDSG
+Fox/MS
+foxtail/M
+foxtrot/MS
+foxtrotted
+foxtrotting
+foxy/TRP
+foyer/SM
+FPO
+fps
+fr
+fracas/SM
+fractal/SM
+fractional/Y
+fractionate/DNG
+fractionation/M
+fractioned
+fractioning
+fraction/ISMA
+fractiousness/SM
+fractious/PY
+fracture/MGDS
+fragile/Y
+fragility/MS
+fragmentarily
+fragmentariness/M
+fragmentary/P
+fragmentation/MS
+fragment/SDMG
+Fragonard/M
+fragrance/SM
+fragrant/Y
+frailness/MS
+frail/STPYR
+frailty/MS
+framed/U
+framer/M
+frame/SRDJGMZ
+framework/SM
+framing/M
+Francaise/M
+France/MS
+Francene/M
+Francesca/M
+Francesco/M
+franchisee/S
+franchise/ESDG
+franchiser/SM
+franchise's
+Franchot/M
+Francie/M
+Francine/M
+Francis
+Francisca/M
+Franciscan/MS
+Francisco/M
+Franciska/M
+Franciskus/M
+francium/MS
+Francklin/M
+Francklyn/M
+Franck/M
+Francoise/M
+Francois/M
+Franco/M
+francophone/M
+franc/SM
+Francyne/M
+frangibility/SM
+frangible
+Frankel/M
+Frankenstein/MS
+franker/M
+Frankford/M
+Frankfort/M
+Frankfurter/M
+frankfurter/MS
+Frankfurt/RM
+Frankie/M
+frankincense/MS
+Frankish/M
+franklin/M
+Franklin/M
+Franklyn/M
+frankness/MS
+frank/SGTYRDP
+Frank/SM
+Franky/M
+Fran/MS
+Frannie/M
+Franni/M
+Franny/M
+Fransisco/M
+frantically
+franticness/M
+frantic/PY
+Frants/M
+Franzen/M
+Franz/NM
+frappé
+frappeed
+frappeing
+frappes
+Frasco/M
+Fraser/M
+Frasier/M
+Frasquito/M
+fraternal/Y
+fraternity/MSF
+fraternization/SM
+fraternize/GZRSD
+fraternizer/M
+fraternizing/U
+frat/MS
+fratricidal
+fratricide/MS
+fraud/CS
+fraud's
+fraudsters
+fraudulence/S
+fraudulent/YP
+fraught/SGD
+Fraulein/S
+Frau/MN
+fray/CSDG
+Frayda/M
+Frayne/M
+fray's
+Fraze/MR
+Frazer/M
+Frazier/M
+frazzle/GDS
+freakishness/SM
+freakish/YP
+freak/SGDM
+freaky/RT
+freckle/GMDS
+freckly/RT
+Freda/M
+Freddie/M
+Freddi/M
+Freddy/M
+Fredek/M
+Fredelia/M
+Frederica/M
+Frederich/M
+Fredericka/M
+Frederick/MS
+Frederic/M
+Frederico/M
+Fredericton/M
+Frederigo/M
+Frederik/M
+Frederique/M
+Fredholm/M
+Fredia/M
+Fredi/M
+Fred/M
+Fredra/M
+Fredrick/M
+Fredrickson/M
+Fredric/M
+Fredrika/M
+freebase/GDS
+freebie/MS
+freebooter/M
+freeboot/ZR
+freeborn
+freedman/M
+Freedman/M
+freedmen
+freedom/MS
+freehand/D
+freehanded/Y
+freeholder/M
+freehold/ZSRM
+freeing/S
+freelance/SRDGZM
+Freeland/M
+freeloader/M
+freeload/SRDGZ
+Free/M
+freeman/M
+Freeman/M
+freemasonry/M
+Freemasonry/MS
+Freemason/SM
+freemen
+Freemon/M
+freeness/M
+Freeport/M
+freestanding
+freestone/SM
+freestyle/SM
+freethinker/MS
+freethinking/S
+Freetown/M
+freeway/MS
+freewheeler/M
+freewheeling/P
+freewheel/SRDMGZ
+freewill
+free/YTDRSP
+freezable
+freezer/SM
+freeze/UGSA
+freezing/S
+Freida/M
+freighter/M
+freight/ZGMDRS
+Fremont/M
+Frenchman/M
+French/MDSG
+Frenchmen
+Frenchwoman/M
+Frenchwomen
+frenetically
+frenetic/S
+frenzied/Y
+frenzy/MDSG
+freon/S
+Freon/SM
+freq
+frequency/ISM
+frequented/U
+frequenter/MS
+frequentest
+frequenting
+frequent/IY
+frequentness/M
+frequents
+fresco/DMG
+frescoes
+fresh/AZSRNDG
+freshener/M
+freshen/SZGDR
+fresher/MA
+freshest
+freshet/SM
+freshly
+freshman/M
+freshmen
+freshness/MS
+freshwater/SM
+Fresnel/M
+Fresno/M
+fretboard
+fretfulness/MS
+fretful/PY
+fret/S
+fretsaw/S
+fretted
+fretting
+fretwork/MS
+Freudian/S
+Freud/M
+Freya/M
+Frey/M
+friableness/M
+friable/P
+friary/MS
+friar/YMS
+fricasseeing
+fricassee/MSD
+frication/M
+fricative/MS
+Frick/M
+frictional/Y
+frictionless/Y
+friction/MS
+Friday/SM
+fridge/SM
+fried/A
+Frieda/M
+Friedan/M
+friedcake/SM
+Friederike/M
+Friedman/M
+Friedrich/M
+Friedrick/M
+friendlessness/M
+friendless/P
+friendlies
+friendlily
+friendliness/USM
+friendly/PUTR
+friend/SGMYD
+friendship/MS
+frier's
+fries/M
+frieze/SDGM
+frigate/SM
+Frigga/M
+frigged
+frigging/S
+frighten/DG
+frightening/Y
+frightfulness/MS
+frightful/PY
+fright/GXMDNS
+Frigidaire/M
+frigidity/MS
+frigidness/SM
+frigid/YP
+frig/S
+frill/MDGS
+frilly/RST
+Fri/M
+fringe/IGSD
+fringe's
+frippery/SM
+Frisbee/MS
+Frisco/M
+Frisian/SM
+frisker/M
+friskily
+friskiness/SM
+frisk/RDGS
+frisky/RTP
+frisson/M
+Frito/M
+fritterer/M
+fritter/RDSG
+Fritz/M
+fritz/SM
+frivolity/MS
+frivolousness/SM
+frivolous/PY
+frizz/GYSD
+frizzle/DSG
+frizzly/RT
+frizzy/RT
+Fr/MD
+Frobisher/M
+frocking/M
+frock's
+frock/SUDGC
+frogged
+frogging
+frogman/M
+frogmarched
+frogmen
+frog/MS
+fro/HS
+Froissart/M
+frolicked
+frolicker/SM
+frolicking
+frolic/SM
+frolicsome
+from
+Fromm/M
+frond/SM
+frontage/MS
+frontal/SY
+Frontenac/M
+front/GSFRD
+frontier/SM
+frontiersman/M
+frontiersmen
+frontispiece/SM
+frontrunner's
+front's
+frontward/S
+frosh/M
+Frostbelt/M
+frostbite/MS
+frostbit/G
+frostbiting/M
+frostbitten
+frost/CDSG
+frosteds
+frosted/U
+frostily
+frostiness/SM
+frosting/MS
+Frost/M
+frost's
+frosty/PTR
+froth/GMD
+frothiness/SM
+froths
+frothy/TRP
+froufrou/MS
+frowardness/MS
+froward/P
+frowner/M
+frowning/Y
+frown/RDSG
+frowzily
+frowziness/SM
+frowzy/RPT
+frozenness/M
+frozen/YP
+froze/UA
+fructify/GSD
+fructose/MS
+Fruehauf/M
+frugality/SM
+frugal/Y
+fruitcake/SM
+fruiterer/M
+fruiter/RM
+fruitfuller
+fruitfullest
+fruitfulness/MS
+fruitful/UYP
+fruit/GMRDS
+fruitiness/MS
+fruition/SM
+fruitlessness/MS
+fruitless/YP
+fruity/RPT
+frumpish
+frump/MS
+frumpy/TR
+Frunze/M
+frustrater/M
+frustrate/RSDXNG
+frustrating/Y
+frustration/M
+frustum/SM
+Frye/M
+fryer/MS
+Fry/M
+fry/NGDS
+F's
+f's/KA
+FSLIC
+ft/C
+FTC
+FTP
+fuchsia/MS
+Fuchs/M
+fucker/M!
+fuck/GZJRDMS!
+FUD
+fuddle/GSD
+fudge/GMSD
+fuel/ASDG
+fueler/SM
+fuel's
+Fuentes/M
+fugal
+Fugger/M
+fugitiveness/M
+fugitive/SYMP
+fugue/GMSD
+fuhrer/S
+Fuji/M
+Fujitsu/M
+Fujiyama
+Fukuoka/M
+Fulani/M
+Fulbright/M
+fulcrum/SM
+fulfilled/U
+fulfiller/M
+fulfill/GLSRD
+fulfillment/MS
+fullback/SMG
+fuller/DMG
+Fuller/M
+Fullerton/M
+fullish
+fullness/MS
+full/RDPSGZT
+fullstops
+fullword/SM
+fully
+fulminate/XSDGN
+fulmination/M
+fulness's
+fulsomeness/SM
+fulsome/PY
+Fulton/M
+Fulvia/M
+fumble/GZRSD
+fumbler/M
+fumbling/Y
+fume/DSG
+fumigant/MS
+fumigate/NGSDX
+fumigation/M
+fumigator/SM
+fuming/Y
+fumy/TR
+Funafuti
+functionalism/M
+functionalist/SM
+functionality/S
+functional/YS
+functionary/MS
+function/GSMD
+functor/SM
+fundamentalism/SM
+fundamentalist/SM
+fundamental/SY
+fund/ASMRDZG
+funded/U
+fundholders
+fundholding
+funding/S
+Fundy/M
+funeral/MS
+funerary
+funereal/Y
+funfair/M
+fungal/S
+fungible/M
+fungicidal
+fungicide/SM
+fungi/M
+fungoid/S
+fungous
+fungus/M
+funicular/SM
+funk/GSDM
+funkiness/S
+funky/RTP
+fun/MS
+funned
+funnel/SGMD
+funner
+funnest
+funnily/U
+funniness/SM
+funning
+funny/RSPT
+furbelow/MDSG
+furbisher/M
+furbish/GDRSA
+furiousness/M
+furious/RYP
+furlong/MS
+furlough/DGM
+furloughs
+furl/UDGS
+furn
+furnace/GMSD
+furnished/U
+furnisher/MS
+furnish/GASD
+furnishing/SM
+furniture/SM
+furore/MS
+furor/MS
+fur/PMS
+furred
+furrier/M
+furriness/SM
+furring/SM
+furrow/DMGS
+furry/RTZP
+furtherance/MS
+furtherer/M
+furthermore
+furthermost
+further/TGDRS
+furthest
+furtiveness/SM
+furtive/PY
+fury/SM
+furze/SM
+fusebox/S
+fusee/SM
+fuse/FSDAGCI
+fuselage/SM
+fuse's/A
+Fushun/M
+fusibility/SM
+fusible/I
+fusiform
+fusilier/MS
+fusillade/SDMG
+fusion/KMFSI
+fussbudget/MS
+fusser/M
+fussily
+fussiness/MS
+fusspot/SM
+fuss/SRDMG
+fussy/PTR
+fustian/MS
+fustiness/MS
+fusty/RPT
+fut
+futileness/M
+futile/PY
+futility/MS
+futon/S
+future/SM
+futurism/SM
+futuristic/S
+futurist/S
+futurity/MS
+futurologist/S
+futurology/MS
+futz/GSD
+fuze's
+Fuzhou/M
+Fuzzbuster/M
+fuzzily
+fuzziness/SM
+fuzz/SDMG
+fuzzy/PRT
+fwd
+FWD
+fwy
+FY
+FYI
+GA
+gabardine/SM
+gabbed
+Gabbey/M
+Gabbie/M
+Gabbi/M
+gabbiness/S
+gabbing
+gabble/SDG
+Gabby/M
+gabby/TRP
+Gabe/M
+gaberdine's
+Gabey/M
+gabfest/MS
+Gabie/M
+Gabi/M
+gable/GMSRD
+Gable/M
+Gabonese
+Gabon/M
+Gaborone/M
+Gabriela/M
+Gabriele/M
+Gabriella/M
+Gabrielle/M
+Gabriellia/M
+Gabriell/M
+Gabriello/M
+Gabriel/M
+Gabrila/M
+gab/S
+Gaby/M
+Gacrux/M
+gadabout/MS
+gadded
+gadder/MS
+gadding
+gadfly/MS
+gadgetry/MS
+gadget/SM
+gadolinium/MS
+gad/S
+Gadsden/M
+Gaea/M
+Gaelan/M
+Gaelic/M
+Gael/SM
+Gae/M
+gaffe/MS
+gaffer/M
+gaff/SGZRDM
+gaga
+Gagarin/M
+gag/DRSG
+Gage/M
+gager/M
+gage/SM
+gagged
+gagging
+gaggle/SDG
+gagwriter/S
+gaiety/MS
+Gaile/M
+Gail/M
+gaily
+gain/ADGS
+gainer/SM
+Gaines/M
+Gainesville/M
+gainfulness/M
+gainful/YP
+gaining/S
+gainly/U
+gainsaid
+gainsayer/M
+gainsay/RSZG
+Gainsborough/M
+gaiter/M
+gait/GSZMRD
+Gaithersburg/M
+galactic
+Galahad/MS
+Galapagos/M
+gal/AS
+gala/SM
+Galatea/M
+Galatia/M
+Galatians/M
+Galaxy/M
+galaxy/MS
+Galbraith/M
+Galbreath/M
+gale/AS
+Gale/M
+galen
+galena/MS
+galenite/M
+Galen/M
+gale's
+Galibi/M
+Galilean/MS
+Galilee/M
+Galileo/M
+Galina/M
+Gallagher/M
+gallanted
+gallanting
+gallantry/MS
+gallants
+gallant/UY
+Gallard/M
+gallbladder/MS
+Gallegos/M
+galleon/SM
+galleria/S
+gallery/MSDG
+galley/MS
+Gallic
+Gallicism/SM
+gallimaufry/MS
+galling/Y
+gallium/SM
+gallivant/GDS
+Gall/M
+gallonage/M
+gallon/SM
+galloper/M
+gallop/GSRDZ
+Galloway/M
+gallows/M
+gall/SGMD
+gallstone/MS
+Gallup/M
+Gal/MN
+Galois/M
+galoot/MS
+galore/S
+galosh/GMSD
+gal's
+Galsworthy/M
+galumph/GD
+galumphs
+galvanic
+Galvani/M
+galvanism/MS
+galvanization/SM
+galvanize/SDG
+Galvan/M
+galvanometer/SM
+galvanometric
+Galven/M
+Galveston/M
+Galvin/M
+Ga/M
+Gamaliel/M
+Gama/M
+Gambia/M
+Gambian/S
+gambit/MS
+gamble/GZRSD
+Gamble/M
+gambler/M
+gambol/SGD
+gamecock/SM
+gamekeeper/MS
+gameness/MS
+game/PJDRSMYTZG
+gamesmanship/SM
+gamesmen
+gamester/M
+gamest/RZ
+gamete/MS
+gametic
+gamine/SM
+gaminess/MS
+gaming/M
+gamin/MS
+gamma/MS
+gammon/DMSG
+Gamow/M
+gamut/MS
+gamy/TRP
+gander/DMGS
+Gandhian
+Gandhi/M
+gangbusters
+ganger/M
+Ganges/M
+gang/GRDMS
+gangland/SM
+ganglia/M
+gangling
+ganglionic
+ganglion/M
+gangplank/SM
+gangrene/SDMG
+gangrenous
+gangster/SM
+Gangtok/M
+gangway/MS
+Gan/M
+gannet/SM
+Gannie/M
+Gannon/M
+Ganny/M
+gantlet/GMDS
+Gantry/M
+gantry/MS
+Ganymede/M
+GAO
+gaoler/M
+gaol/MRDGZS
+gaper/M
+gape/S
+gaping/Y
+gapped
+gapping
+gap/SJMDRG
+garage/GMSD
+Garald/M
+garbageman/M
+garbage/SDMG
+garbanzo/MS
+garb/DMGS
+garbler/M
+garble/RSDG
+Garbo/M
+Garcia/M
+garçon/SM
+gardener/M
+Gardener/M
+gardenia/SM
+gardening/M
+garden/ZGRDMS
+Gardie/M
+Gardiner/M
+Gard/M
+Gardner/M
+Gardy/M
+Garek/M
+Gare/MH
+Gareth/M
+Garey/M
+Garfield/M
+garfish/MS
+Garfunkel/M
+Gargantua/M
+gargantuan
+gargle/SDG
+gargoyle/DSM
+Garibaldi/M
+Garik/M
+garishness/MS
+garish/YP
+Garland/M
+garland/SMDG
+garlicked
+garlicking
+garlicky
+garlic/SM
+garment/MDGS
+Gar/MH
+Garner/M
+garner/SGD
+Garnet/M
+garnet/SM
+Garnette/M
+Garnett/M
+garnish/DSLG
+garnisheeing
+garnishee/SDM
+garnishment/MS
+Garold/M
+garote's
+garotte's
+Garrard/M
+garred
+Garrek/M
+Garreth/M
+Garret/M
+garret/SM
+Garrett/M
+Garrick/M
+Garrik/M
+garring
+Garrison/M
+garrison/SGMD
+garroter/M
+garrote/SRDMZG
+Garrot/M
+garrotte's
+Garrott/M
+garrulity/SM
+garrulousness/MS
+garrulous/PY
+Garry/M
+gar/SLM
+garter/SGDM
+Garth/M
+Garvey/M
+Garvin/M
+Garv/M
+Garvy/M
+Garwin/M
+Garwood/M
+Gary/M
+Garza/M
+gasbag/MS
+Gascony/M
+gaseousness/M
+gaseous/YP
+gases/C
+gas/FC
+gash/GTMSRD
+gasification/M
+gasifier/M
+gasify/SRDGXZN
+gasket/SM
+gaslight/DMS
+gasohol/S
+gasoline/MS
+gasometer/M
+Gaspard/M
+Gaspar/M
+Gasparo/M
+gasper/M
+Gasper/M
+gasp/GZSRD
+gasping/Y
+gas's
+gassed/C
+Gasser/M
+gasser/MS
+Gasset/M
+gassiness/M
+gassing/SM
+gassy/PTR
+Gaston/M
+gastric
+gastritides
+gastritis/MS
+gastroenteritides
+gastroenteritis/M
+gastrointestinal
+gastronome/SM
+gastronomic
+gastronomical/Y
+gastronomy/MS
+gastropod/SM
+gasworks/M
+gateau/MS
+gateaux
+gatecrash/GZSRD
+gatehouse/MS
+gatekeeper/SM
+gate/MGDS
+gatepost/SM
+Gates
+gateway/MS
+gathered/IA
+gatherer/M
+gathering/M
+gather/JRDZGS
+gathers/A
+Gatlinburg/M
+Gatling/M
+Gatorade/M
+gator/MS
+Gatsby/M
+Gatun/M
+gaucheness/SM
+gaucherie/SM
+gauche/TYPR
+gaucho/SM
+gaudily
+gaudiness/MS
+gaudy/PRST
+gaugeable
+gauger/M
+Gauguin/M
+Gaulish/M
+Gaulle/M
+Gaul/MS
+Gaultiero/M
+gauntlet/GSDM
+Gauntley/M
+gauntness/MS
+gaunt/PYRDSGT
+gauss/C
+gausses
+Gaussian
+Gauss/M
+gauss's
+Gautama/M
+Gauthier/M
+Gautier/M
+gauze/SDGM
+gauziness/MS
+gauzy/TRP
+Gavan/M
+gave
+gavel/GMDS
+Gaven/M
+Gavin/M
+Gav/MN
+gavotte/MSDG
+Gavra/M
+Gavrielle/M
+Gawain/M
+Gawen/M
+gawkily
+gawkiness/MS
+gawk/SGRDM
+gawky/RSPT
+Gayel/M
+Gayelord/M
+Gaye/M
+gayety's
+Gayla/M
+Gayleen/M
+Gaylene/M
+Gayler/M
+Gayle/RM
+Gaylord/M
+Gaylor/M
+Gay/M
+gayness/SM
+Gaynor/M
+gay/RTPS
+Gaza/M
+gazebo/SM
+gaze/DRSZG
+gazelle/MS
+gazer/M
+gazetteer/SGDM
+gazette/MGSD
+Gaziantep/M
+gazillion/S
+gazpacho/MS
+GB
+G/B
+Gdansk/M
+Gd/M
+GDP
+Gearalt/M
+Gearard/M
+gearbox/SM
+gear/DMJSG
+gearing/M
+gearshift/MS
+gearstick
+gearwheel/SM
+Geary/M
+gecko/MS
+GED
+geegaw's
+geeing
+geek/SM
+geeky/RT
+geese/M
+geest/M
+gee/TDS
+geezer/MS
+Gehenna/M
+Gehrig/M
+Geiger/M
+Geigy/M
+geisha/M
+gelatinousness/M
+gelatinous/PY
+gelatin/SM
+gelcap
+gelding/M
+geld/JSGD
+gelid
+gelignite/MS
+gelled
+gelling
+gel/MBS
+Gelya/M
+Ge/M
+GE/M
+Gemini/SM
+gemlike
+Gemma/M
+gemmed
+gemming
+gem/MS
+gemological
+gemologist/MS
+gemology/MS
+gemstone/SM
+gen
+Gena/M
+Genaro/M
+gendarme/MS
+gender/DMGS
+genderless
+genealogical/Y
+genealogist/SM
+genealogy/MS
+Gene/M
+gene/MS
+generalissimo/SM
+generalist/MS
+generality/MS
+generalizable/SM
+generalization/MS
+generalized/U
+generalize/GZBSRD
+generalizer/M
+general/MSPY
+generalness/M
+generalship/SM
+genera/M
+generate/CXAVNGSD
+generational
+generation/MCA
+generative/AY
+generators/A
+generator/SM
+generically
+generic/PS
+generosity/MS
+generously/U
+generousness/SM
+generous/PY
+Genesco/M
+genesis/M
+Genesis/M
+genes/S
+genetically
+geneticist/MS
+genetic/S
+genetics/M
+Genet/M
+Geneva/M
+Genevieve/M
+Genevra/M
+Genghis/M
+geniality/FMS
+genially/F
+genialness/M
+genial/PY
+Genia/M
+genies/K
+genie/SM
+genii/M
+genitalia
+genitals
+genital/YF
+genitive/SM
+genitourinary
+genius/SM
+Gen/M
+Genna/M
+Gennie/M
+Gennifer/M
+Genni/M
+Genny/M
+Genoa/SM
+genocidal
+genocide/SM
+Geno/M
+genome/SM
+genotype/MS
+Genovera/M
+genre/MS
+gent/AMS
+genteelness/MS
+genteel/PRYT
+gentian/SM
+gentile/S
+Gentile's
+gentility/MS
+gentlefolk/S
+gentlemanliness/M
+gentlemanly/U
+gentleman/YM
+gentlemen
+gentleness/SM
+gentle/PRSDGT
+gentlewoman/M
+gentlewomen/M
+gently
+gentrification/M
+gentrify/NSDGX
+Gentry/M
+gentry/MS
+genuflect/GDS
+genuflection/MS
+genuineness/SM
+genuine/PY
+genus
+Genvieve/M
+geocentric
+geocentrically
+geocentricism
+geochemical/Y
+geochemistry/MS
+geochronology/M
+geodesic/S
+geode/SM
+geodesy/MS
+geodetic/S
+Geoff/M
+Geoffrey/M
+Geoffry/M
+geog
+geographer/MS
+geographic
+geographical/Y
+geography/MS
+geologic
+geological/Y
+geologist/MS
+geology/MS
+geom
+Geo/M
+geomagnetic
+geomagnetically
+geomagnetism/SM
+geometer/MS
+geometrical/Y
+geometrician/M
+geometric/S
+geometry/MS
+geomorphological
+geomorphology/M
+geophysical/Y
+geophysicist/MS
+geophysics/M
+geopolitical/Y
+geopolitic/S
+geopolitics/M
+Georas/M
+Geordie/M
+Georgeanna/M
+Georgeanne/M
+Georgena/M
+George/SM
+Georgeta/M
+Georgetown/M
+Georgetta/M
+Georgette/M
+Georgia/M
+Georgiana/M
+Georgianna/M
+Georgianne/M
+Georgian/S
+Georgie/M
+Georgi/M
+Georgina/M
+Georgine/M
+Georg/M
+Georgy/M
+geostationary
+geosynchronous
+geosyncline/SM
+geothermal
+geothermic
+Geralda/M
+Geraldine/M
+Gerald/M
+geranium/SM
+Gerard/M
+Gerardo/M
+Gerber/M
+gerbil/MS
+Gerda/M
+Gerek/M
+Gerhardine/M
+Gerhard/M
+Gerhardt/M
+Gerianna/M
+Gerianne/M
+geriatric/S
+geriatrics/M
+Gerick/M
+Gerik/M
+Geri/M
+Geritol/M
+Gerladina/M
+Ger/M
+Germaine/M
+Germain/M
+Germana/M
+germane
+Germania/M
+Germanic/M
+germanium/SM
+germanized
+German/SM
+Germantown/M
+Germany/M
+Germayne/M
+germen/M
+germicidal
+germicide/MS
+germinal/Y
+germinated/U
+germinate/XVGNSD
+germination/M
+germinative/Y
+germ/MNS
+Gerome/M
+Geronimo/M
+gerontocracy/M
+gerontological
+gerontologist/SM
+gerontology/SM
+Gerrard/M
+Gerrie/M
+Gerrilee/M
+Gerri/M
+Gerry/M
+gerrymander/SGD
+Gershwin/MS
+Gerta/M
+Gertie/M
+Gerti/M
+Gert/M
+Gertruda/M
+Gertrude/M
+Gertrudis/M
+Gertrud/M
+Gerty/M
+gerundive/M
+gerund/SVM
+Gery/M
+gestalt/M
+gestapo/S
+Gestapo/SM
+gestate/SDGNX
+gestational
+gestation/M
+gesticulate/XSDVGN
+gesticulation/M
+gesticulative/Y
+gestural
+gesture/SDMG
+gesundheit
+getaway/SM
+Gethsemane/M
+get/S
+getter/SDM
+getting
+Getty/M
+Gettysburg/M
+getup/MS
+gewgaw/MS
+Gewürztraminer
+geyser/GDMS
+Ghanaian/MS
+Ghana/M
+Ghanian's
+ghastliness/MS
+ghastly/TPR
+ghat/MS
+Ghats/M
+Ghent/M
+Gherardo/M
+gherkin/SM
+ghetto/DGMS
+ghettoize/SDG
+Ghibelline/M
+ghostlike
+ghostliness/MS
+ghostly/TRP
+ghost/SMYDG
+ghostwrite/RSGZ
+ghostwritten
+ghostwrote
+ghoulishness/SM
+ghoulish/PY
+ghoul/SM
+GHQ
+GI
+Giacinta/M
+Giacobo/M
+Giacometti/M
+Giacomo/M
+Giacopo/M
+Giana/M
+Gianina/M
+Gian/M
+Gianna/M
+Gianni/M
+Giannini/M
+giantess/MS
+giantkiller
+giant/SM
+Giauque/M
+Giavani/M
+gibber/DGS
+gibberish/MS
+gibbet/MDSG
+Gibbie/M
+Gibb/MS
+Gibbon/M
+gibbon/MS
+gibbousness/M
+gibbous/YP
+Gibby/M
+gibe/GDRS
+giber/M
+giblet/MS
+Gib/M
+Gibraltar/MS
+Gibson/M
+giddap
+giddily
+giddiness/SM
+Giddings/M
+giddy/GPRSDT
+Gide/M
+Gideon/MS
+Gielgud/M
+Gienah/M
+Giffard/M
+Giffer/M
+Giffie/M
+Gifford/M
+Giff/RM
+Giffy/M
+giftedness/M
+gifted/PY
+gift/SGMD
+gigabyte/S
+gigacycle/MS
+gigahertz/M
+gigantically
+giganticness/M
+gigantic/P
+gigavolt
+gigawatt/M
+gigged
+gigging
+giggler/M
+giggle/RSDGZ
+giggling/Y
+giggly/TR
+Gigi/M
+gig/MS
+GIGO
+gigolo/MS
+gila
+Gila/M
+Gilberta/M
+Gilberte/M
+Gilbertina/M
+Gilbertine/M
+gilbert/M
+Gilbert/M
+Gilberto/M
+Gilbertson/M
+Gilburt/M
+Gilchrist/M
+Gilda/M
+gilder/M
+gilding/M
+gild/JSGZRD
+Gilead/M
+Gilemette/M
+Giles
+Gilgamesh/M
+Gilkson/M
+Gillan/M
+Gilles
+Gillespie/M
+Gillette/M
+Gilliam/M
+Gillian/M
+Gillie/M
+Gilligan/M
+Gilli/M
+Gill/M
+gill/SGMRD
+Gilly/M
+Gilmore/M
+Gil/MY
+gilt/S
+gimbaled
+gimbals
+Gimbel/M
+gimcrackery/SM
+gimcrack/S
+gimlet/MDSG
+gimme/S
+gimmick/GDMS
+gimmickry/MS
+gimmicky
+gimp/GSMD
+gimpy/RT
+Gina/M
+Ginelle/M
+Ginevra/M
+gingerbread/SM
+gingerliness/M
+gingerly/P
+Ginger/M
+ginger/SGDYM
+gingersnap/SM
+gingery
+gingham/SM
+gingivitis/SM
+Gingrich/M
+ginkgoes
+ginkgo/M
+ginmill
+gin/MS
+ginned
+Ginnie/M
+Ginnifer/M
+Ginni/M
+ginning
+Ginny/M
+Gino/M
+Ginsberg/M
+Ginsburg/M
+ginseng/SM
+Gioconda/M
+Giordano/M
+Giorgia/M
+Giorgi/M
+Giorgio/M
+Giorgione/M
+Giotto/M
+Giovanna/M
+Giovanni/M
+Gipsy's
+giraffe/MS
+Giralda/M
+Giraldo/M
+Giraud/M
+Giraudoux/M
+girded/U
+girder/M
+girdle/GMRSD
+girdler/M
+gird/RDSGZ
+girlfriend/MS
+girlhood/SM
+girlie/M
+girlishness/SM
+girlish/YP
+girl/MS
+giro/M
+girt/GDS
+girth/MDG
+girths
+Gisela/M
+Giselbert/M
+Gisele/M
+Gisella/M
+Giselle/M
+Gish/M
+gist/MS
+git/M
+Giuditta/M
+Giulia/M
+Giuliano/M
+Giulietta/M
+Giulio/M
+Giuseppe/M
+Giustina/M
+Giustino/M
+Giusto/M
+giveaway/SM
+giveback/S
+give/HZGRS
+given/SP
+giver/M
+giving/Y
+Giza/M
+Gizela/M
+gizmo's
+gizzard/SM
+Gk/M
+glacé/DGS
+glacial/Y
+glaciate/XNGDS
+glaciation/M
+glacier/SM
+glaciological
+glaciologist/M
+glaciology/M
+gladded
+gladden/GDS
+gladder
+gladdest
+gladding
+gladdy
+glade/SM
+gladiatorial
+gladiator/SM
+Gladi/M
+gladiola/MS
+gladioli
+gladiolus/M
+gladly/RT
+Glad/M
+gladness/MS
+gladsome/RT
+Gladstone/MS
+Gladys
+glad/YSP
+glamor/DMGS
+glamorization/MS
+glamorizer/M
+glamorize/SRDZG
+glamorousness/M
+glamorous/PY
+glance/GJSD
+glancing/Y
+glanders/M
+glandes
+glandular/Y
+gland/ZSM
+glans/M
+glare/SDG
+glaringness/M
+glaring/YP
+Glaser/M
+Glasgow/M
+glasnost/S
+glassblower/S
+glassblowing/MS
+glassful/MS
+glass/GSDM
+glasshouse/SM
+glassily
+glassiness/SM
+glassless
+Glass/M
+glassware/SM
+glasswort/M
+glassy/PRST
+Glastonbury/M
+Glaswegian/S
+glaucoma/SM
+glaucous
+glazed/U
+glazer/M
+glaze/SRDGZJ
+glazier/SM
+glazing/M
+gleam/MDGS
+gleaner/M
+gleaning/M
+glean/RDGZJS
+Gleason/M
+Gleda/M
+gleed/M
+glee/DSM
+gleefulness/MS
+gleeful/YP
+gleeing
+Glendale/M
+Glenda/M
+Glenden/M
+Glendon/M
+Glenine/M
+Glen/M
+Glenna/M
+Glennie/M
+Glennis/M
+Glenn/M
+glen/SM
+glibber
+glibbest
+glibness/MS
+glib/YP
+glide/JGZSRD
+glider/M
+glim/M
+glimmer/DSJG
+glimmering/M
+glimpse/DRSZMG
+glimpser/M
+glint/DSG
+glissandi
+glissando/M
+glisten/DSG
+glister/DGS
+glitch/MS
+glitter/GDSJ
+glittering/Y
+glittery
+glitz/GSD
+glitzy/TR
+gloaming/MS
+gloater/M
+gloating/Y
+gloat/SRDG
+globalism/S
+globalist/S
+global/SY
+globe/SM
+globetrotter/MS
+glob/GDMS
+globularity/M
+globularness/M
+globular/PY
+globule/MS
+globulin/MS
+glockenspiel/SM
+glommed
+gloom/GSMD
+gloomily
+gloominess/MS
+gloomy/RTP
+glop/MS
+glopped
+glopping
+gloppy/TR
+Gloria/M
+Gloriana/M
+Gloriane/M
+glorification/M
+glorifier/M
+glorify/XZRSDNG
+Glori/M
+glorious/IYP
+gloriousness/IM
+Glory/M
+glory/SDMG
+glossary/MS
+gloss/GSDM
+glossily
+glossiness/SM
+glossolalia/SM
+glossy/RSPT
+glottal
+glottalization/M
+glottis/MS
+Gloucester/M
+gloveless
+glover/M
+Glover/M
+glove/SRDGMZ
+glower/GD
+glow/GZRDMS
+glowing/Y
+glowworm/SM
+glucose/SM
+glue/DRSMZG
+glued/U
+gluer/M
+gluey
+gluier
+gluiest
+glummer
+glummest
+glumness/MS
+glum/SYP
+gluon/M
+glutamate/M
+gluten/M
+glutenous
+glutinousness/M
+glutinous/PY
+glut/SMNX
+glutted
+glutting
+glutton/MS
+gluttonous/Y
+gluttony/SM
+glyceride/M
+glycerinate/MD
+glycerine's
+glycerin/SM
+glycerolized/C
+glycerol/SM
+glycine/M
+glycogen/SM
+glycol/MS
+Glynda/M
+Glynis/M
+Glyn/M
+Glynnis/M
+Glynn/M
+glyph/M
+glyphs
+gm
+GM
+GMT
+gnarl/SMDG
+gnash/SDG
+gnat/MS
+gnawer/M
+gnaw/GRDSJ
+gnawing/M
+gneiss/SM
+Gnni/M
+gnomelike
+GNOME/M
+gnome/SM
+gnomic
+gnomish
+gnomonic
+gnosticism
+Gnosticism/M
+gnostic/K
+Gnostic/M
+GNP
+gnu/MS
+goad/MDSG
+goalie/SM
+goalkeeper/MS
+goalkeeping/M
+goalless
+goal/MDSG
+goalmouth/M
+goalpost/S
+goalscorer
+goalscoring
+goaltender/SM
+Goa/M
+goatee/SM
+goatherd/MS
+goat/MS
+goatskin/SM
+gobbed
+gobbet/MS
+gobbing
+gobbledegook's
+gobbledygook/S
+gobbler/M
+gobble/SRDGZ
+Gobi/M
+goblet/MS
+goblin/SM
+gob/SM
+Godard/M
+Godart/M
+godchild/M
+godchildren
+goddammit
+goddamn/GS
+Goddard/M
+Goddart/M
+goddaughter/SM
+godded
+goddess/MS
+godding
+Gödel/M
+godfather/GSDM
+godforsaken
+Godfree/M
+Godfrey/M
+Godfry/M
+godhead/S
+godhood/SM
+Godiva/M
+godlessness/MS
+godless/P
+godlikeness/M
+godlike/P
+godliness/UMS
+godly/UTPR
+God/M
+godmother/MS
+Godot/M
+godparent/SM
+godsend/MS
+god/SMY
+godson/MS
+Godspeed/S
+Godthaab/M
+Godunov/M
+Godwin/M
+Godzilla/M
+Goebbels/M
+Goering/M
+goer/MG
+goes
+Goethals/M
+Goethe/M
+gofer/SM
+Goff/M
+goggler/M
+goggle/SRDGZ
+Gogh/M
+Gog/M
+Gogol/M
+Goiania/M
+going/M
+goiter/SM
+Golan/M
+Golconda/M
+Golda/M
+Goldarina/M
+Goldberg/M
+goldbricker/M
+goldbrick/GZRDMS
+Golden/M
+goldenness/M
+goldenrod/SM
+goldenseal/M
+golden/TRYP
+goldfinch/MS
+goldfish/SM
+Goldia/M
+Goldie/M
+Goldilocks/M
+Goldi/M
+Goldina/M
+Golding/M
+Goldman/M
+goldmine/S
+gold/MRNGTS
+goldsmith/M
+Goldsmith/M
+goldsmiths
+Goldstein/M
+Goldwater/M
+Goldwyn/M
+Goldy/M
+Goleta/M
+golfer/M
+golf/RDMGZS
+Golgotha/M
+Goliath/M
+Goliaths
+golly/S
+Gomez/M
+Gomorrah/M
+Gompers/M
+go/MRHZGJ
+gonadal
+gonad/SM
+gondola/SM
+gondolier/MS
+Gondwanaland/M
+goner/M
+gone/RZN
+gong/SGDM
+gonion/M
+gonna
+gonorrheal
+gonorrhea/MS
+Gonzales/M
+Gonzalez/M
+Gonzalo/M
+Goober/M
+goober/MS
+goodbye/MS
+goodhearted
+goodie's
+goodish
+goodly/TR
+Good/M
+Goodman/M
+goodness/MS
+goodnight
+Goodrich/M
+good/SYP
+goodwill/MS
+Goodwin/M
+Goodyear/M
+goody/SM
+gooey
+goofiness/MS
+goof/SDMG
+goofy/RPT
+Google/M
+gooier
+gooiest
+gook/SM
+goo/MS
+goon/SM
+goop/SM
+gooseberry/MS
+goosebumps
+goose/M
+goos/SDG
+GOP
+Gopher
+gopher/SM
+Goran/M
+Goraud/M
+Gorbachev
+Gordan/M
+Gorden/M
+Gordian/M
+Gordie/M
+Gordimer/M
+Gordon/M
+Gordy/M
+gore/DSMG
+Gore/M
+Goren/M
+Gorey/M
+Gorgas
+gorged/E
+gorge/GMSRD
+gorgeousness/SM
+gorgeous/YP
+gorger/EM
+gorges/E
+gorging/E
+Gorgon/M
+gorgon/S
+Gorgonzola/M
+Gorham/M
+gorilla/MS
+gorily
+goriness/MS
+goring/M
+Gorky/M
+gormandizer/M
+gormandize/SRDGZ
+gormless
+gorp/S
+gorse/SM
+gory/PRT
+gos
+goshawk/MS
+gosh/S
+gosling/M
+gospeler/M
+gospel/MRSZ
+Gospel/SM
+gossamer/SM
+gossipy
+gossip/ZGMRDS
+gotcha/SM
+Göteborg/M
+Gotham/M
+Gothart/M
+Gothicism/M
+Gothic/S
+Goth/M
+Goths
+got/IU
+goto
+GOTO/MS
+gotta
+gotten/U
+Gottfried/M
+Goucher/M
+Gouda/SM
+gouge/GZSRD
+gouger/M
+goulash/SM
+Gould/M
+Gounod/M
+gourde/SM
+gourd/MS
+gourmand/MS
+gourmet/MS
+gout/SM
+gouty/RT
+governable/U
+governance/SM
+governed/U
+governess/SM
+govern/LBGSD
+governmental/Y
+government/MS
+Governor
+governor/MS
+governorship/SM
+gov/S
+govt
+gown/GSDM
+Goya/M
+GP
+GPA
+GPO
+GPSS
+gr
+grabbed
+grabber/SM
+grabbing/S
+grab/S
+Gracchus/M
+grace/ESDMG
+graceful/EYPU
+gracefuller
+gracefullest
+gracefulness/ESM
+Graceland/M
+gracelessness/MS
+graceless/PY
+Grace/M
+Gracia/M
+Graciela/M
+Gracie/M
+graciousness/SM
+gracious/UY
+grackle/SM
+gradate/DSNGX
+gradation/MCS
+grade/ACSDG
+graded/U
+Gradeigh/M
+gradely
+grader/MC
+grade's
+Gradey/M
+gradient/RMS
+grad/MRDGZJS
+gradualism/MS
+gradualist/MS
+gradualness/MS
+gradual/SYP
+graduand/SM
+graduate/MNGDSX
+graduation/M
+Grady/M
+Graehme/M
+Graeme/M
+Graffias/M
+graffiti
+graffito/M
+Graff/M
+grafter/M
+grafting/M
+graft/MRDSGZ
+Grafton/M
+Grahame/M
+Graham/M
+graham/SM
+Graig/M
+grail/S
+Grail/SM
+grainer/M
+grain/IGSD
+graininess/MS
+graining/M
+grain's
+grainy/RTP
+gram/KSM
+Gram/M
+grammarian/SM
+grammar/MS
+grammaticality/M
+grammaticalness/M
+grammatical/UY
+grammatic/K
+gramme/SM
+Grammy/S
+gramophone/SM
+Grampians
+grampus/SM
+Granada/M
+granary/MS
+grandam/SM
+grandaunt/MS
+grandchild/M
+grandchildren
+granddaddy/MS
+granddad/SM
+granddaughter/MS
+grandee/SM
+grandeur/MS
+grandfather/MYDSG
+grandiloquence/SM
+grandiloquent/Y
+grandiose/YP
+grandiosity/MS
+grandkid/SM
+grandma/MS
+grandmaster/MS
+grandmother/MYS
+grandnephew/MS
+grandness/MS
+grandniece/SM
+grandpa/MS
+grandparent/MS
+grandson/MS
+grandstander/M
+grandstand/SRDMG
+grand/TPSYR
+granduncle/MS
+Grange/MR
+grange/MSR
+Granger/M
+granite/MS
+granitic
+Gran/M
+Grannie/M
+Granny/M
+granny/MS
+granola/S
+grantee/MS
+granter/M
+Grantham/M
+Granthem/M
+Grantley/M
+Grant/M
+grantor's
+grant/SGZMRD
+grantsmanship/S
+granularity/SM
+granular/Y
+granulate/SDXVGN
+granulation/M
+granule/SM
+granulocytic
+Granville/M
+grapefruit/SM
+grape/SDGM
+grapeshot/M
+grapevine/MS
+grapheme/M
+graph/GMD
+graphical/Y
+graphicness/M
+graphic/PS
+graphics/M
+graphite/SM
+graphologist/SM
+graphology/MS
+graphs
+grapnel/SM
+grapple/DRSG
+grappler/M
+grappling/M
+grasper/M
+graspingness/M
+grasping/PY
+grasp/SRDBG
+grass/GZSDM
+grasshopper/SM
+grassland/MS
+Grass/M
+grassroots
+grassy/RT
+Grata/M
+gratefuller
+gratefullest
+gratefulness/USM
+grateful/YPU
+grater/M
+grates/I
+grate/SRDJGZ
+Gratia/M
+Gratiana/M
+graticule/M
+gratification/M
+gratified/U
+gratifying/Y
+gratify/NDSXG
+grating/YM
+gratis
+gratitude/IMS
+gratuitousness/MS
+gratuitous/PY
+gratuity/SM
+gravamen/SM
+gravedigger/SM
+gravel/SGMYD
+graven
+graveness/MS
+graver/M
+graveside/S
+Graves/M
+grave/SRDPGMZTY
+gravestone/SM
+graveyard/MS
+gravidness/M
+gravid/PY
+gravimeter/SM
+gravimetric
+gravitas
+gravitate/XVGNSD
+gravitational/Y
+gravitation/M
+graviton/SM
+gravity/MS
+gravy/SM
+graybeard/MS
+Grayce/M
+grayish
+Gray/M
+grayness/S
+gray/PYRDGTS
+Grayson/M
+graze/GZSRD
+grazer/M
+Grazia/M
+grazing/M
+grease/GMZSRD
+greasepaint/MS
+greaseproof
+greaser/M
+greasily
+greasiness/SM
+greasy/PRT
+greatcoat/DMS
+greaten/DG
+greathearted
+greatness/MS
+great/SPTYRN
+grebe/MS
+Grecian/S
+Greece/M
+greed/C
+greedily
+greediness/SM
+greeds
+greed's
+greedy/RTP
+Greek/SM
+Greeley/M
+greenback/MS
+greenbelt/S
+Greenberg/M
+Greenblatt/M
+Greenbriar/M
+Greene/M
+greenery/MS
+Greenfeld/M
+greenfield
+Greenfield/M
+greenfly/M
+greengage/SM
+greengrocer/SM
+greengrocery/M
+greenhorn/SM
+greenhouse/SM
+greening/M
+greenish/P
+Greenland/M
+Green/M
+greenmail/GDS
+greenness/MS
+Greenpeace/M
+greenroom/SM
+Greensboro/M
+Greensleeves/M
+Greensville/M
+greensward/SM
+green/SYRDMPGT
+Greentree/M
+Greenville/M
+Greenwich/M
+greenwood/MS
+Greer/M
+greeter/M
+greeting/M
+greets/A
+greet/SRDJGZ
+gregariousness/MS
+gregarious/PY
+Gregg/M
+Greggory/M
+Greg/M
+Gregoire/M
+Gregoor/M
+Gregorian
+Gregorio/M
+Gregorius/M
+Gregor/M
+Gregory/M
+gremlin/SM
+Grenada/M
+grenade/MS
+Grenadian/S
+grenadier/SM
+Grenadines
+grenadine/SM
+Grendel/M
+Grenier/M
+Grenoble/M
+Grenville/M
+Gresham/M
+Gretal/M
+Greta/M
+Gretchen/M
+Gretel/M
+Grete/M
+Grethel/M
+Gretna/M
+Gretta/M
+Gretzky/M
+grew/A
+greybeard/M
+greyhound/MS
+Grey/M
+greyness/M
+gridded
+griddlecake/SM
+griddle/DSGM
+gridiron/GSMD
+gridlock/DSG
+grids/A
+grid/SGM
+grief/MS
+Grieg/M
+Grier/M
+grievance/SM
+griever/M
+grieve/SRDGZ
+grieving/Y
+grievousness/SM
+grievous/PY
+Griffie/M
+Griffin/M
+griffin/SM
+Griffith/M
+Griff/M
+griffon's
+Griffy/M
+griller/M
+grille/SM
+grill/RDGS
+grillwork/M
+grimace/DRSGM
+grimacer/M
+Grimaldi/M
+grime/MS
+Grimes
+griminess/MS
+grimmer
+grimmest
+Grimm/M
+grimness/MS
+grim/PGYD
+grimy/TPR
+Grinch/M
+grind/ASG
+grinder/MS
+grinding/SY
+grindstone/SM
+gringo/SM
+grinned
+grinner/M
+grinning/Y
+grin/S
+griper/M
+gripe/S
+grippe/GMZSRD
+gripper/M
+gripping/Y
+grip/SGZMRD
+Griselda/M
+grisliness/SM
+grisly/RPT
+Gris/M
+Grissel/M
+gristle/SM
+gristliness/M
+gristly/TRP
+gristmill/MS
+grist/MYS
+Griswold/M
+grit/MS
+gritted
+gritter/MS
+grittiness/SM
+gritting
+gritty/PRT
+Griz/M
+grizzle/DSG
+grizzling/M
+grizzly/TRS
+Gr/M
+groaner/M
+groan/GZSRDM
+groat/SM
+grocer/MS
+grocery/MS
+groggily
+grogginess/SM
+groggy/RPT
+grog/MS
+groin/MGSD
+grokked
+grokking
+grok/S
+grommet/GMDS
+Gromyko/M
+groofs
+groomer/M
+groom/GZSMRD
+groomsman/M
+groomsmen
+Groot/M
+groover/M
+groove/SRDGM
+groovy/TR
+groper/M
+grope/SRDJGZ
+Gropius/M
+grosbeak/SM
+grosgrain/MS
+Gross
+Grosset/M
+gross/GTYSRDP
+Grossman/M
+grossness/MS
+Grosvenor/M
+Grosz/M
+grotesqueness/MS
+grotesque/PSY
+Grotius/M
+Groton/M
+grottoes
+grotto/M
+grouch/GDS
+grouchily
+grouchiness/MS
+grouchy/RPT
+groundbreaking/S
+grounded/U
+grounder/M
+groundhog/SM
+ground/JGZMDRS
+groundlessness/M
+groundless/YP
+groundnut/MS
+groundsheet/M
+groundskeepers
+groundsman/M
+groundswell/S
+groundwater/S
+groundwork/SM
+grouped/A
+grouper/M
+groupie/MS
+grouping/M
+groups/A
+group/ZJSMRDG
+grouse/GMZSRD
+grouser/M
+grouter/M
+grout/GSMRD
+groveler/M
+grovelike
+groveling/Y
+grovel/SDRGZ
+Grover/M
+Grove/RM
+grove/SRMZ
+grower/M
+grow/GZYRHS
+growing/I
+growingly
+growler/M
+growling/Y
+growl/RDGZS
+growly/RP
+grown/IA
+grownup/MS
+grows/A
+growth/IMA
+growths/IA
+grubbed
+grubber/SM
+grubbily
+grubbiness/SM
+grubbing
+grubby/RTP
+grub/MS
+grubstake/MSDG
+grudge/GMSRDJ
+grudger/M
+grudging/Y
+grueling/Y
+gruel/MDGJS
+gruesomeness/SM
+gruesome/RYTP
+gruffness/MS
+gruff/PSGTYRD
+grumble/GZJDSR
+grumbler/M
+grumbling/Y
+Grumman/M
+grumpily
+grumpiness/MS
+grump/MDGS
+grumpy/TPR
+Grundy/M
+Grünewald/M
+grunge/S
+grungy/RT
+grunion/SM
+grunter/M
+grunt/SGRD
+Grusky/M
+Grus/M
+Gruyère
+Gruyeres
+gryphon's
+g's
+G's
+gs/A
+GSA
+gt
+GU
+guacamole/MS
+Guadalajara/M
+Guadalcanal/M
+Guadalquivir/M
+Guadalupe/M
+Guadeloupe/M
+Guallatiri/M
+Gualterio/M
+Guamanian/SM
+Guam/M
+Guangzhou
+guanine/MS
+guano/MS
+Guantanamo/M
+Guarani/M
+guarani/SM
+guaranteeing
+guarantee/RSDZM
+guarantor/SM
+guaranty/MSDG
+guardedness/UM
+guarded/UYP
+guarder/M
+guardhouse/SM
+Guardia/M
+guardianship/MS
+guardian/SM
+guardrail/SM
+guard/RDSGZ
+guardroom/SM
+guardsman/M
+guardsmen
+Guarnieri/M
+Guatemala/M
+Guatemalan/S
+guava/SM
+Guayaquil/M
+gubernatorial
+Gucci/M
+gudgeon/M
+Guelph/M
+Guendolen/M
+Guenevere/M
+Guenna/M
+Guenther/M
+guernsey/S
+Guernsey/SM
+Guerra/M
+Guerrero/M
+guerrilla/MS
+guessable/U
+guess/BGZRSD
+guessed/U
+guesser/M
+guesstimate/DSMG
+guesswork/MS
+guest/SGMD
+Guevara/M
+guffaw/GSDM
+guff/SM
+Guggenheim/M
+Guglielma/M
+Guglielmo/M
+Guhleman/M
+GUI
+Guiana/M
+guidance/MS
+guidebook/SM
+guided/U
+guide/GZSRD
+guideline/SM
+guidepost/MS
+guider/M
+Guido/M
+Guilbert/M
+guilder/M
+guildhall/SM
+guild/SZMR
+guileful
+guilelessness/MS
+guileless/YP
+guile/SDGM
+Guillaume/M
+Guillema/M
+Guillemette/M
+guillemot/MS
+Guillermo/M
+guillotine/SDGM
+guiltily
+guiltiness/MS
+guiltlessness/M
+guiltless/YP
+guilt/SM
+guilty/PTR
+Gui/M
+Guinea/M
+Guinean/S
+guinea/SM
+Guinevere/M
+Guinna/M
+Guinness/M
+guise's
+guise/SDEG
+guitarist/SM
+guitar/SM
+Guiyang
+Guizot/M
+Gujarati/M
+Gujarat/M
+Gujranwala/M
+gulag/S
+gulch/MS
+gulden/MS
+gulf/DMGS
+Gullah/M
+gullet/MS
+gulley's
+gullibility/MS
+gullible
+Gulliver/M
+gull/MDSG
+gully/SDMG
+gulp/RDGZS
+gumboil/MS
+gumbo/MS
+gumboots
+gumdrop/SM
+gummed
+gumminess/M
+gumming/C
+gum/MS
+gummy/RTP
+gumption/SM
+gumshoeing
+gumshoe/SDM
+gumtree/MS
+Gunar/M
+gunboat/MS
+Gunderson/M
+gunfighter/M
+gunfight/SRMGZ
+gunfire/SM
+gunflint/M
+gunfought
+Gunilla/M
+gunk/SM
+gunky/RT
+Gun/M
+gunman/M
+gunmen
+gunmetal/MS
+gun/MS
+Gunnar/M
+gunned
+gunnel's
+Gunner/M
+gunner/SM
+gunnery/MS
+gunning/M
+gunnysack/SM
+gunny/SM
+gunpoint/MS
+gunpowder/SM
+gunrunner/MS
+gunrunning/MS
+gunship/S
+gunshot/SM
+gunslinger/M
+gunsling/GZR
+gunsmith/M
+gunsmiths
+Guntar/M
+Gunter/M
+Gunther/M
+gunwale/MS
+Guofeng/M
+guppy/SM
+Gupta/M
+gurgle/SDG
+Gurkha/M
+gurney/S
+guru/MS
+Gusella/M
+gusher/M
+gush/SRDGZ
+gushy/TR
+Gus/M
+Guss
+gusset/MDSG
+Gussie/M
+Gussi/M
+gussy/GSD
+Gussy/M
+Gustaf/M
+Gustafson/M
+Gusta/M
+gustatory
+Gustave/M
+Gustav/M
+Gustavo/M
+Gustavus/M
+gusted/E
+Gustie/M
+gustily
+Gusti/M
+gustiness/M
+gusting/E
+gust/MDGS
+gustoes
+gusto/M
+gusts/E
+Gusty/M
+gusty/RPT
+Gutenberg/M
+Guthrey/M
+Guthrie/M
+Guthry/M
+Gutierrez/M
+gutlessness/S
+gutless/P
+gutser/M
+gutsiness/M
+gut/SM
+guts/R
+gutsy/PTR
+gutted
+gutter/GSDM
+guttering/M
+guttersnipe/M
+gutting
+gutturalness/M
+guttural/SPY
+gutty/RSMT
+Guyana/M
+Guyanese
+Guy/M
+guy/MDRZGS
+Guzman/M
+guzzle/GZRSD
+guzzler/M
+g/VBX
+Gwalior/M
+Gwendolen/M
+Gwendoline/M
+Gwendolin/M
+Gwendolyn/M
+Gweneth/M
+Gwenette/M
+Gwen/M
+Gwenneth/M
+Gwennie/M
+Gwenni/M
+Gwenny/M
+Gwenora/M
+Gwenore/M
+Gwyneth/M
+Gwyn/M
+Gwynne/M
+gymkhana/SM
+gym/MS
+gymnasia's
+gymnasium/SM
+gymnastically
+gymnastic/S
+gymnastics/M
+gymnast/SM
+gymnosperm/SM
+gynecologic
+gynecological/MS
+gynecologist/SM
+gynecology/MS
+gypped
+gypper/S
+gypping
+gyp/S
+gypsite
+gypster/S
+gypsum/MS
+gypsy/SDMG
+Gypsy/SM
+gyrate/XNGSD
+gyration/M
+gyrator/MS
+gyrfalcon/SM
+gyrocompass/M
+gyro/MS
+gyroscope/SM
+gyroscopic
+gyve/GDS
+H
+Haag/M
+Haas/M
+Habakkuk/M
+habeas
+haberdasher/SM
+haberdashery/SM
+Haber/M
+Haberman/M
+Habib/M
+habiliment/SM
+habitability/MS
+habitableness/M
+habitable/P
+habitant/ISM
+habitation/MI
+habitations
+habitat/MS
+habit/IBDGS
+habit's
+habitualness/SM
+habitual/SYP
+habituate/SDNGX
+habituation/M
+habitué/MS
+hacienda/MS
+hacker/M
+Hackett/M
+hack/GZSDRBJ
+hackler/M
+hackle/RSDMG
+hackney/SMDG
+hacksaw/SDMG
+hackwork/S
+Hadamard/M
+Hadar/M
+Haddad/M
+haddock/MS
+hades
+Hades
+had/GD
+hadji's
+hadj's
+Hadlee/M
+Hadleigh/M
+Hadley/M
+Had/M
+hadn't
+Hadria/M
+Hadrian/M
+hadron/MS
+hadst
+haemoglobin's
+haemophilia's
+haemorrhage's
+Hafiz/M
+hafnium/MS
+haft/GSMD
+Hagan/M
+Hagar/M
+Hagen/M
+Hager/M
+Haggai/M
+haggardness/MS
+haggard/SYP
+hagged
+hagging
+haggish
+haggis/SM
+haggler/M
+haggle/RSDZG
+Hagiographa/M
+hagiographer/SM
+hagiography/MS
+hag/SMN
+Hagstrom/M
+Hague/M
+ha/H
+hahnium/S
+Hahn/M
+Haifa/M
+haiku/M
+Hailee/M
+hailer/M
+Hailey/M
+hail/SGMDR
+hailstone/SM
+hailstorm/SM
+Haily/M
+Haiphong/M
+hairball/SM
+hairbreadth/M
+hairbreadths
+hairbrush/SM
+haircare
+haircloth/M
+haircloths
+haircut/MS
+haircutting
+hairdo/SM
+hairdresser/SM
+hairdressing/SM
+hairdryer/S
+hairiness/MS
+hairlessness/M
+hairless/P
+hairlike
+hairline/SM
+hairnet/MS
+hairpiece/MS
+hairpin/MS
+hairsbreadth
+hairsbreadths
+hair/SDM
+hairsplitter/SM
+hairsplitting/MS
+hairspray
+hairspring/SM
+hairstyle/SMG
+hairstylist/S
+hairy/PTR
+Haitian/S
+Haiti/M
+hajjes
+hajji/MS
+hajj/M
+Hakeem/M
+hake/MS
+Hakim/M
+Hakka/M
+Hakluyt/M
+halalled
+halalling
+halal/S
+halberd/SM
+halcyon/S
+Haldane/M
+Haleakala/M
+Haleigh/M
+hale/ISRDG
+Hale/M
+haler/IM
+halest
+Halette/M
+Haley/M
+halfback/SM
+halfbreed
+halfheartedness/MS
+halfhearted/PY
+halfpence/S
+halfpenny/MS
+halfpennyworth
+half/PM
+halftime/S
+halftone/MS
+halfway
+halfword/MS
+halibut/SM
+halide/SM
+Halie/M
+Halifax/M
+Hali/M
+Halimeda/M
+halite/MS
+halitoses
+halitosis/M
+hallelujah
+hallelujahs
+Halley/M
+halliard's
+Hallie/M
+Halli/M
+Hallinan/M
+Hall/M
+Hallmark/M
+hallmark/SGMD
+hallo/GDS
+halloo's
+Halloween/MS
+hallowing
+hallows
+hallow/UD
+hall/SMR
+Hallsy/M
+hallucinate/VNGSDX
+hallucination/M
+hallucinatory
+hallucinogenic/S
+hallucinogen/SM
+hallway/SM
+Hally/M
+halocarbon
+halogenated
+halogen/SM
+halon
+halo/SDMG
+Halpern/M
+Halsey/M
+Hal/SMY
+Halsy/M
+halter/GDM
+halt/GZJSMDR
+halting/Y
+halve/GZDS
+halves/M
+halyard/MS
+Ha/M
+Hamal/M
+Haman/M
+hamburger/M
+Hamburg/MS
+hamburg/SZRM
+Hamel/M
+Hamey/M
+Hamhung/M
+Hamid/M
+Hamilcar/M
+Hamil/M
+Hamiltonian/MS
+Hamilton/M
+Hamish/M
+Hamitic/M
+Hamlen/M
+Hamlet/M
+hamlet/MS
+Hamlin/M
+Ham/M
+Hammad/M
+Hammarskjold/M
+hammed
+hammerer/M
+hammerhead/SM
+hammering/M
+hammerless
+hammerlock/MS
+Hammerstein/M
+hammertoe/SM
+hammer/ZGSRDM
+Hammett/M
+hamming
+hammock/MS
+Hammond/M
+Hammurabi/M
+hammy/RT
+Hamnet/M
+hampered/U
+hamper/GSD
+Hampshire/M
+Hampton/M
+ham/SM
+hamster/MS
+hamstring/MGS
+hamstrung
+Hamsun/M
+Hana/M
+Hanan/M
+Hancock/M
+handbagged
+handbagging
+handbag/MS
+handball/SM
+handbarrow/MS
+handbasin
+handbill/MS
+handbook/SM
+handbrake/M
+handcar/SM
+handcart/MS
+handclasp/MS
+handcraft/GMDS
+handcuff/GSD
+handcuffs/M
+handedness/M
+handed/PY
+Handel/M
+hander/S
+handful/SM
+handgun/SM
+handhold/M
+handicapped
+handicapper/SM
+handicapping
+handicap/SM
+handicraftsman/M
+handicraftsmen
+handicraft/SMR
+handily/U
+handiness/SM
+handiwork/MS
+handkerchief/MS
+handleable
+handlebar/SM
+handle/MZGRSD
+handler/M
+handless
+handling/M
+handmade
+handmaiden/M
+handmaid/NMSX
+handout/SM
+handover
+handpick/GDS
+handrail/SM
+hand's
+handsaw/SM
+handset/SM
+handshake/GMSR
+handshaker/M
+handshaking/M
+handsomely/U
+handsomeness/MS
+handsome/RPTY
+handspike/SM
+handspring/SM
+handstand/MS
+hand/UDSG
+handwork/SM
+handwoven
+handwrite/GSJ
+handwriting/M
+handwritten
+Handy/M
+handyman/M
+handymen
+handy/URT
+Haney/M
+hangar/SGDM
+hangdog/S
+hanged/A
+hanger/M
+hang/GDRZBSJ
+hanging/M
+hangman/M
+hangmen
+hangnail/MS
+hangout/MS
+hangover/SM
+hangs/A
+Hangul/M
+hangup/S
+Hangzhou
+Hankel/M
+hankerer/M
+hanker/GRDJ
+hankering/M
+hank/GZDRMS
+hankie/SM
+Hank/M
+hanky's
+Hannah/M
+Hanna/M
+Hannibal/M
+Hannie/M
+Hanni/MS
+Hanny/M
+Hanoi/M
+Hanoverian
+Hanover/M
+Hansel/M
+Hansen/M
+Hansiain/M
+Han/SM
+Hans/N
+hansom/MS
+Hanson/M
+Hanuka/S
+Hanukkah/M
+Hanukkahs
+Hapgood/M
+haphazardness/SM
+haphazard/SPY
+haplessness/MS
+hapless/YP
+haploid/S
+happed
+happening/M
+happen/JDGS
+happenstance/SM
+happily/U
+happiness/UMS
+happing
+Happy/M
+happy/UTPR
+Hapsburg/M
+hap/SMY
+Harald/M
+harangue/GDRS
+haranguer/M
+Harare
+harasser/M
+harass/LSRDZG
+harassment/SM
+Harbert/M
+harbinger/DMSG
+Harbin/M
+harborer/M
+harbor/ZGRDMS
+Harcourt/M
+hardback/SM
+hardball/SM
+hardboard/SM
+hardboiled
+hardbound
+hardcore/MS
+hardcover/SM
+hardened/U
+hardener/M
+hardening/M
+harden/ZGRD
+hardhat/S
+hardheadedness/SM
+hardheaded/YP
+hardheartedness/SM
+hardhearted/YP
+hardihood/MS
+hardily
+hardiness/SM
+Harding/M
+Hardin/M
+hardliner/S
+hardness/MS
+hardscrabble
+hardshell
+hardship/MS
+hardstand/S
+hardtack/MS
+hardtop/MS
+hardware/SM
+hardwire/DSG
+hardwood/MS
+hardworking
+Hardy/M
+hard/YNRPJGXTS
+hardy/PTRS
+harebell/MS
+harebrained
+harelip/MS
+harelipped
+hare/MGDS
+harem/SM
+Hargreaves/M
+hark/GDS
+Harland/M
+Harlan/M
+Harlem/M
+Harlene/M
+Harlen/M
+Harlequin
+harlequin/MS
+Harley/M
+Harlie/M
+Harli/M
+Harlin/M
+harlotry/MS
+harlot/SM
+Harlow/M
+Harman/M
+harmed/U
+harmer/M
+harmfulness/MS
+harmful/PY
+harmlessness/SM
+harmless/YP
+harm/MDRGS
+Harmonia/M
+harmonically
+harmonica/MS
+harmonic/S
+harmonics/M
+Harmonie/M
+harmonious/IPY
+harmoniousness/MS
+harmoniousness's/I
+harmonium/MS
+harmonization/A
+harmonizations
+harmonization's
+harmonized/U
+harmonizer/M
+harmonizes/UA
+harmonize/ZGSRD
+Harmon/M
+harmony/EMS
+Harmony/M
+harness/DRSMG
+harnessed/U
+harnesser/M
+harnesses/U
+Harold/M
+Haroun/M
+harper/M
+Harper/M
+harping/M
+harpist/SM
+harp/MDRJGZS
+Harp/MR
+harpooner/M
+harpoon/SZGDRM
+harpsichordist/MS
+harpsichord/SM
+harpy/SM
+Harpy/SM
+Harrell/M
+harridan/SM
+Harrie/M
+harrier/M
+Harriet/M
+Harrietta/M
+Harriette/M
+Harriett/M
+Harrington/M
+Harriot/M
+Harriott/M
+Harrisburg/M
+Harri/SM
+Harrisonburg/M
+Harrison/M
+harrower/M
+harrow/RDMGS
+harrumph/SDG
+Harry/M
+harry/RSDGZ
+harshen/GD
+harshness/SM
+harsh/TRNYP
+Harte/M
+Hartford/M
+Hartley/M
+Hartline/M
+Hart/M
+Hartman/M
+hart/MS
+Hartwell/M
+Harvard/M
+harvested/U
+harvester/M
+harvestman/M
+harvest/MDRZGS
+Harvey/MS
+Harv/M
+Harwell/M
+Harwilll/M
+has
+Hasbro/M
+hash/AGSD
+Hasheem/M
+hasher/M
+Hashim/M
+hashing/M
+hashish/MS
+hash's
+Hasidim
+Haskell/M
+Haskel/M
+Haskins/M
+Haslett/M
+hasn't
+hasp/GMDS
+hassle/MGRSD
+hassock/MS
+haste/MS
+hastener/M
+hasten/GRD
+hast/GXJDN
+Hastie/M
+hastily
+hastiness/MS
+Hastings/M
+Hasty/M
+hasty/RPT
+hatchback/SM
+hatcheck/S
+hatched/U
+hatcher/M
+hatchery/MS
+hatchet/MDSG
+hatching/M
+hatch/RSDJG
+Hatchure/M
+hatchway/MS
+hatefulness/MS
+hateful/YP
+hater/M
+hate/S
+Hatfield/M
+Hathaway/M
+hatless
+hat/MDRSZG
+hatred/SM
+hatstands
+hatted
+Hatteras/M
+hatter/SM
+Hattie/M
+Hatti/M
+hatting
+Hatty/M
+hauberk/SM
+Haugen/M
+haughtily
+haughtiness/SM
+haughty/TPR
+haulage/MS
+hauler/M
+haul/SDRGZ
+haunch/GMSD
+haunter/M
+haunting/Y
+haunt/JRDSZG
+Hauptmann/M
+Hausa/M
+Hausdorff/M
+Hauser/M
+hauteur/MS
+Havana/SM
+Havarti
+Havel/M
+haven/DMGS
+Haven/M
+haven't
+haver/G
+haversack/SM
+have/ZGSR
+havocked
+havocking
+havoc/SM
+Haw
+Hawaiian/S
+Hawaii/M
+hawker/M
+hawk/GZSDRM
+Hawking
+hawking/M
+Hawkins/M
+hawkishness/S
+hawkish/P
+Hawley/M
+haw/MDSG
+hawser/M
+haws/RZ
+Hawthorne/M
+hawthorn/MS
+haycock/SM
+Hayden/M
+Haydn/M
+Haydon/M
+Hayes
+hayfield/MS
+hay/GSMDR
+Hayley/M
+hayloft/MS
+haymow/MS
+Haynes
+hayrick/MS
+hayride/MS
+hayseed/MS
+Hay/SM
+haystack/SM
+haywain
+Hayward/M
+haywire/MS
+Haywood/M
+Hayyim/M
+hazard/MDGS
+hazardousness/M
+hazardous/PY
+haze/DSRJMZG
+Hazel/M
+hazel/MS
+hazelnut/SM
+Haze/M
+hazer/M
+hazily
+haziness/MS
+hazing/M
+Hazlett/M
+Hazlitt/M
+hazy/PTR
+HBO/M
+hdqrs
+HDTV
+headache/MS
+headband/SM
+headboard/MS
+headcount
+headdress/MS
+header/M
+headfirst
+headgear/SM
+headhunter/M
+headhunting/M
+headhunt/ZGSRDMJ
+headily
+headiness/S
+heading/M
+headlamp/S
+headland/MS
+headlessness/M
+headless/P
+headlight/MS
+headline/DRSZMG
+headliner/M
+headlock/MS
+headlong
+Head/M
+headman/M
+headmaster/MS
+headmastership/M
+headmen
+headmistress/MS
+headphone/SM
+headpiece/SM
+headpin/MS
+headquarter/GDS
+headrest/MS
+headroom/SM
+headscarf/M
+headset/SM
+headship/SM
+headshrinker/MS
+head/SJGZMDR
+headsman/M
+headsmen
+headstall/SM
+headstand/MS
+headstock/M
+headstone/MS
+headstrong
+headwaiter/SM
+headwall/S
+headwater/S
+headway/MS
+headwind/SM
+headword/MS
+heady/PTR
+heal/DRHSGZ
+healed/U
+healer/M
+Heall/M
+healthfully
+healthfulness/SM
+healthful/U
+healthily/U
+healthiness/MSU
+health/M
+healths
+healthy/URPT
+heap/SMDG
+heard/UA
+hearer/M
+hearing/AM
+hearken/SGD
+hearsay/SM
+hearse/M
+hears/SDAG
+Hearst/M
+heartache/SM
+heartbeat/MS
+heartbreak/GMS
+heartbreaking/Y
+heartbroke
+heartbroken
+heartburning/M
+heartburn/SGM
+hearted/Y
+hearten/EGDS
+heartening/EY
+heartfelt
+hearth/M
+hearthrug
+hearths
+hearthstone/MS
+heartily
+heartiness/SM
+heartland/SM
+heartlessness/SM
+heartless/YP
+heartrending/Y
+heartsickness/MS
+heartsick/P
+heart/SMDNXG
+heartstrings
+heartthrob/MS
+heartwarming
+Heartwood/M
+heartwood/SM
+hearty/TRSP
+hear/ZTSRHJG
+heatedly
+heated/UA
+heater/M
+heathendom/SM
+heathenish/Y
+heathenism/MS
+heathen/M
+heather/M
+Heather/M
+heathery
+Heathkit/M
+heathland
+Heathman/M
+Heath/MR
+heath/MRNZX
+heaths
+heatproof
+heats/A
+heat/SMDRGZBJ
+heatstroke/MS
+heatwave
+heave/DSRGZ
+heavenliness/M
+heavenly/PTR
+heaven/SYM
+heavenward/S
+heaver/M
+heaves/M
+heavily
+heaviness/MS
+Heaviside/M
+heavyhearted
+heavyset
+heavy/TPRS
+heavyweight/SM
+Hebe/M
+hebephrenic
+Hebert/M
+Heb/M
+Hebraic
+Hebraism/MS
+Hebrew/SM
+Hebrides/M
+Hecate/M
+hecatomb/M
+heckler/M
+heckle/RSDZG
+heck/S
+hectare/MS
+hectically
+hectic/S
+hectogram/MS
+hectometer/SM
+Hector/M
+hector/SGD
+Hecuba/M
+he'd
+Heda/M
+Hedda/M
+Heddie/M
+Heddi/M
+hedge/DSRGMZ
+hedgehog/MS
+hedgehopped
+hedgehopping
+hedgehop/S
+hedger/M
+hedgerow/SM
+hedging/Y
+Hedi/M
+hedonism/SM
+hedonistic
+hedonist/MS
+Hedvige/M
+Hedvig/M
+Hedwiga/M
+Hedwig/M
+Hedy/M
+heeded/U
+heedfulness/M
+heedful/PY
+heeding/U
+heedlessness/SM
+heedless/YP
+heed/SMGD
+heehaw/DGS
+heeler/M
+heeling/M
+heelless
+heel/SGZMDR
+Heep/M
+Hefner/M
+heft/GSD
+heftily
+heftiness/SM
+hefty/TRP
+Hegelian
+Hegel/M
+hegemonic
+hegemony/MS
+Hegira/M
+hegira/S
+Heida/M
+Heidegger/M
+Heidelberg/M
+Heidie/M
+Heidi/M
+heifer/MS
+Heifetz/M
+heighten/GD
+height/SMNX
+Heimlich/M
+Heindrick/M
+Heineken/M
+Heine/M
+Heinlein/M
+heinousness/SM
+heinous/PY
+Heinrich/M
+Heinrick/M
+Heinrik/M
+Heinze/M
+Heinz/M
+heiress/MS
+heirloom/MS
+heir/SDMG
+Heisenberg/M
+Heiser/M
+heister/M
+heist/GSMRD
+Hejira's
+Helaina/M
+Helaine/M
+held
+Helena/M
+Helene/M
+Helenka/M
+Helen/M
+Helga/M
+Helge/M
+helical/Y
+helices/M
+helicon/M
+Helicon/M
+helicopter/GSMD
+heliocentric
+heliography/M
+Heliopolis/M
+Helios/M
+heliosphere
+heliotrope/SM
+heliport/MS
+helium/MS
+helix/M
+he'll
+hellbender/M
+hellbent
+hellcat/SM
+hellebore/SM
+Hellene/SM
+Hellenic
+Hellenism/MS
+Hellenistic
+Hellenist/MS
+Hellenization/M
+Hellenize
+heller/M
+Heller/M
+Hellespont/M
+hellfire/M
+hell/GSMDR
+hellhole/SM
+Helli/M
+hellion/SM
+hellishness/SM
+hellish/PY
+Hellman/M
+hello/GMS
+Hell's
+helluva
+helmed
+helmet/GSMD
+Helmholtz/M
+helming
+helms
+helm's
+helmsman/M
+helmsmen
+helm/U
+Helmut/M
+Héloise/M
+helot/S
+helper/M
+helpfulness/MS
+helpful/UY
+help/GZSJDR
+helping/M
+helplessness/SM
+helpless/YP
+helpline/S
+helpmate/SM
+helpmeet's
+Helsa/M
+Helsinki/M
+helve/GMDS
+Helvetian/S
+Helvetius/M
+Helyn/M
+He/M
+hematite/MS
+hematologic
+hematological
+hematologist/SM
+hematology/MS
+heme/MS
+Hemingway/M
+hemisphere/MSD
+hemispheric
+hemispherical
+hemline/SM
+hemlock/MS
+hemmed
+hemmer/SM
+hemming
+hem/MS
+hemoglobin/MS
+hemolytic
+hemophiliac/SM
+hemophilia/SM
+hemorrhage/GMDS
+hemorrhagic
+hemorrhoid/MS
+hemostat/SM
+hemp/MNS
+h/EMS
+hemstitch/DSMG
+henceforth
+henceforward
+hence/S
+Hench/M
+henchman/M
+henchmen
+Henderson/M
+Hendrick/SM
+Hendrickson/M
+Hendrika/M
+Hendrik/M
+Hendrix/M
+henge/M
+Henka/M
+Henley/M
+hen/MS
+henna/MDSG
+Hennessey/M
+henning
+henpeck/GSD
+Henrie/M
+Henrieta/M
+Henrietta/M
+Henriette/M
+Henrik/M
+Henri/M
+Henryetta/M
+henry/M
+Henry/M
+Hensley/M
+Henson/M
+heparin/MS
+hepatic/S
+hepatitides
+hepatitis/M
+Hepburn/M
+Hephaestus/M
+Hephzibah/M
+hepper
+heppest
+Hepplewhite
+hep/S
+heptagonal
+heptagon/SM
+heptane/M
+heptathlon/S
+her
+Heracles/M
+Heraclitus/M
+heralded/U
+heraldic
+herald/MDSG
+heraldry/MS
+Hera/M
+herbaceous
+herbage/MS
+herbalism
+herbalist/MS
+herbal/S
+Herbart/M
+Herbert/M
+herbicidal
+herbicide/MS
+Herbie/M
+herbivore/SM
+herbivorous/Y
+Herb/M
+herb/MS
+Herby/M
+Herc/M
+Herculaneum/M
+herculean
+Herculean
+Hercule/MS
+Herculie/M
+herder/M
+Herder/M
+herd/MDRGZS
+herdsman/M
+herdsmen
+hereabout/S
+hereafter/S
+hereby
+hereditary
+heredity/MS
+Hereford/SM
+herein
+hereinafter
+here/IS
+hereof
+hereon
+here's
+heres/M
+heresy/SM
+heretical
+heretic/SM
+hereto
+heretofore
+hereunder
+hereunto
+hereupon
+herewith
+Heriberto/M
+heritable
+heritage/MS
+heritor/IM
+Herkimer/M
+Herman/M
+Hermann/M
+hermaphrodite/SM
+hermaphroditic
+Hermaphroditus/M
+hermeneutic/S
+hermeneutics/M
+Hermes
+hermetical/Y
+hermetic/S
+Hermia/M
+Hermie/M
+Hermina/M
+Hermine/M
+Herminia/M
+Hermione/M
+hermitage/SM
+Hermite/M
+hermitian
+hermit/MS
+Hermon/M
+Hermosa/M
+Hermosillo/M
+Hermy/M
+Hernandez/M
+Hernando/M
+hernial
+hernia/MS
+herniate/NGXDS
+Herod/M
+Herodotus/M
+heroes
+heroically
+heroics
+heroic/U
+heroine/SM
+heroin/MS
+heroism/SM
+Herold/M
+hero/M
+heron/SM
+herpes/M
+herpetologist/SM
+herpetology/MS
+Herrera/M
+Herrick/M
+herringbone/SDGM
+Herring/M
+herring/SM
+Herrington/M
+Herr/MG
+Herschel/M
+Hersch/M
+herself
+Hersey/M
+Hershel/M
+Hershey/M
+Hersh/M
+Herta/M
+Hertha/M
+hertz/M
+Hertz/M
+Hertzog/M
+Hertzsprung/M
+Herve/M
+Hervey/M
+Herzegovina/M
+Herzl/M
+hes
+Hesiod/M
+hesitance/S
+hesitancy/SM
+hesitantly
+hesitant/U
+hesitater/M
+hesitate/XDRSNG
+hesitating/UY
+hesitation/M
+Hesperus/M
+Hesse/M
+Hessian/MS
+Hess/M
+Hester/M
+Hesther/M
+Hestia/M
+Heston/M
+heterodox
+heterodoxy/MS
+heterodyne
+heterogamous
+heterogamy/M
+heterogeneity/SM
+heterogeneousness/M
+heterogeneous/PY
+heterosexuality/SM
+heterosexual/YMS
+heterostructure
+heterozygous
+Hettie/M
+Hetti/M
+Hetty/M
+Heublein/M
+heuristically
+heuristic/SM
+Heusen/M
+Heuser/M
+he/VMZ
+hew/DRZGS
+Hewe/M
+hewer/M
+Hewet/M
+Hewett/M
+Hewie/M
+Hewitt/M
+Hewlett/M
+Hew/M
+hexachloride/M
+hexadecimal/YS
+hexafluoride/M
+hexagonal/Y
+hexagon/SM
+hexagram/SM
+hexameter/SM
+hex/DSRG
+hexer/M
+hey
+heyday/MS
+Heyerdahl/M
+Heywood/M
+Hezekiah/M
+hf
+HF
+Hf/M
+Hg/M
+hgt
+hgwy
+HHS
+HI
+Hialeah/M
+hiatus/SM
+Hiawatha/M
+hibachi/MS
+hibernate/XGNSD
+hibernation/M
+hibernator/SM
+Hibernia/M
+Hibernian/S
+hibiscus/MS
+hiccup/MDGS
+hickey/SM
+Hickey/SM
+Hickman/M
+Hickok/M
+hickory/MS
+hick/SM
+Hicks/M
+hi/D
+hidden/U
+hideaway/SM
+hidebound
+hideousness/SM
+hideous/YP
+hideout/MS
+hider/M
+hide/S
+hiding/M
+hid/ZDRGJ
+hieing
+hierarchal
+hierarchic
+hierarchical/Y
+hierarchy/SM
+hieratic
+hieroglyph
+hieroglyphic/S
+hieroglyphics/M
+hieroglyphs
+Hieronymus/M
+hie/S
+hifalutin
+Higashiosaka
+Higgins/M
+highball/GSDM
+highborn
+highboy/MS
+highbrow/SM
+highchair/SM
+highfalutin
+Highfield/M
+highhandedness/SM
+highhanded/PY
+highish
+Highlander/SM
+Highlands
+highland/ZSRM
+highlight/GZRDMS
+Highness/M
+highness/MS
+highpoint
+high/PYRT
+highroad/MS
+highs
+hight
+hightail/DGS
+highwayman/M
+highwaymen
+highway/MS
+hijacker/M
+hijack/JZRDGS
+hiker/M
+hike/ZGDSR
+Hilario/M
+hilariousness/MS
+hilarious/YP
+hilarity/MS
+Hilarius/M
+Hilary/M
+Hilbert/M
+Hildagarde/M
+Hildagard/M
+Hilda/M
+Hildebrand/M
+Hildegaard/M
+Hildegarde/M
+Hilde/M
+Hildy/M
+Hillard/M
+Hillary/M
+hillbilly/MS
+Hillcrest/M
+Hillel/M
+hiller/M
+Hillery/M
+hill/GSMDR
+Hilliard/M
+Hilliary/M
+Hillie/M
+Hillier/M
+hilliness/SM
+Hill/M
+hillman
+hillmen
+hillock/SM
+Hillsboro/M
+Hillsdale/M
+hillside/SM
+hilltop/MS
+hillwalking
+Hillyer/M
+Hilly/RM
+hilly/TRP
+hilt/MDGS
+Hilton/M
+Hi/M
+Himalaya/MS
+Himalayan/S
+Himmler/M
+him/S
+himself
+Hinayana/M
+Hinda/M
+Hindemith/M
+Hindenburg/M
+hindered/U
+hinderer/M
+hinder/GRD
+Hindi/M
+hindmost
+hindquarter/SM
+hindrance/SM
+hind/RSZ
+hindsight/SM
+Hinduism/SM
+Hindu/MS
+Hindustani/MS
+Hindustan/M
+Hines/M
+hinger
+hinge's
+hinge/UDSG
+Hinkle/M
+Hinsdale/M
+hinterland/MS
+hinter/M
+hint/GZMDRS
+Hinton/M
+Hinze/M
+hipbone/SM
+hipness/S
+Hipparchus/M
+hipped
+hipper
+hippest
+hippie/MTRS
+hipping/M
+Hippocrates/M
+Hippocratic
+hippodrome/MS
+hippo/MS
+hippopotamus/SM
+hip/PSM
+hippy's
+hipster/MS
+hiragana
+Hiram/M
+hire/AGSD
+hireling/SM
+hirer/SM
+Hirey/M
+hiring/S
+Hirohito/M
+Hiroshi/M
+Hiroshima/M
+Hirsch/M
+hirsuteness/MS
+hirsute/P
+his
+Hispanic/SM
+Hispaniola/M
+hiss/DSRMJG
+hisser/M
+hissing/M
+Hiss/M
+histamine/SM
+histidine/SM
+histochemic
+histochemical
+histochemistry/M
+histogram/MS
+histological
+histologist/MS
+histology/SM
+historian/MS
+historic
+historicalness/M
+historical/PY
+historicism/M
+historicist/M
+historicity/MS
+historiographer/SM
+historiography/MS
+history/MS
+histrionically
+histrionic/S
+histrionics/M
+hist/SDG
+Hitachi/M
+Hitchcock/M
+hitcher/MS
+hitchhike/RSDGZ
+hitch/UGSD
+hither
+hitherto
+Hitler/SM
+hitless
+hit/MS
+hittable
+hitter/SM
+hitting
+Hittite/SM
+HIV
+hive/MGDS
+h'm
+HM
+HMO
+Hmong
+HMS
+hoarder/M
+hoarding/M
+hoard/RDJZSGM
+hoarfrost/SM
+hoariness/MS
+hoar/M
+hoarseness/SM
+hoarse/RTYP
+hoary/TPR
+hoaxer/M
+hoax/GZMDSR
+Hobard/M
+Hobart/M
+hobbed
+Hobbes/M
+hobbing
+hobbit
+hobbler/M
+hobble/ZSRDG
+Hobbs/M
+hobbyhorse/SM
+hobbyist/SM
+hobby/SM
+Hobday/M
+Hobey/M
+hobgoblin/MS
+Hobie/M
+hobnail/GDMS
+hobnobbed
+hobnobbing
+hobnob/S
+Hoboken/M
+hobo/SDMG
+hob/SM
+hoc
+hocker/M
+hockey/SM
+hock/GDRMS
+Hockney/M
+hockshop/SM
+hodge/MS
+Hodge/MS
+hodgepodge/SM
+Hodgkin/M
+ho/DRYZ
+hod/SM
+Hoebart/M
+hoecake/SM
+hoedown/MS
+hoeing
+hoer/M
+hoe/SM
+Hoffa/M
+Hoff/M
+Hoffman/M
+Hofstadter/M
+Hogan/M
+hogan/SM
+Hogarth/M
+hogback/MS
+hogged
+hogger
+hogging
+hoggish/Y
+hogshead/SM
+hog/SM
+hogtie/SD
+hogtying
+hogwash/SM
+Hohenlohe/M
+Hohenstaufen/M
+Hohenzollern/M
+Hohhot/M
+hoister/M
+hoist/GRDS
+hoke/DSG
+hokey/PRT
+hokier
+hokiest
+Hokkaido/M
+hokum/MS
+Hokusai/M
+Holbein/M
+Holbrook/M
+Holcomb/M
+holdall/MS
+Holden/M
+holder/M
+Holder/M
+holding/IS
+holding's
+hold/NRBSJGZ
+holdout/SM
+holdover/SM
+holdup/MS
+hole/MGDS
+holey
+holiday/GRDMS
+Holiday/M
+holidaymaker/S
+holier/U
+Holiness/MS
+holiness/MSU
+holistic
+holistically
+hollandaise
+Hollandaise/M
+Hollander/M
+Holland/RMSZ
+holler/GDS
+Hollerith/M
+Holley/M
+Hollie/M
+Holli/SM
+Hollister/M
+Holloway/M
+hollowness/MS
+hollow/RDYTGSP
+hollowware/M
+Hollyanne/M
+hollyhock/MS
+Holly/M
+holly/SM
+Hollywood/M
+Holman/M
+Holmes
+holmium/MS
+Holm/M
+Holocaust
+holocaust/MS
+Holocene
+hologram/SM
+holograph/GMD
+holographic
+holographs
+holography/MS
+Holstein/MS
+holster/MDSG
+Holst/M
+Holt/M
+Holyoke/M
+holy/SRTP
+holystone/MS
+Holzman/M
+Ho/M
+homage/MGSRD
+homager/M
+hombre/SM
+homburg/SM
+homebody/MS
+homebound
+homeboy/S
+homebuilder/S
+homebuilding
+homebuilt
+homecoming/MS
+home/DSRMYZG
+homegrown
+homeland/SM
+homelessness/SM
+homeless/P
+homelike
+homeliness/SM
+homely/RPT
+homemade
+homemake/JRZG
+homemaker/M
+homemaking/M
+homeomorphic
+homeomorphism/MS
+homeomorph/M
+homeopath
+homeopathic
+homeopaths
+homeopathy/MS
+homeostases
+homeostasis/M
+homeostatic
+homeowner/S
+homeownership
+homepage
+Homere/M
+homer/GDM
+Homeric
+homerists
+Homer/M
+homeroom/MS
+Homerus/M
+homeschooling/S
+homesickness/MS
+homesick/P
+homespun/S
+homesteader/M
+homestead/GZSRDM
+homestretch/SM
+hometown/SM
+homeward
+homeworker/M
+homework/ZSMR
+homeyness/MS
+homey/PS
+homicidal/Y
+homicide/SM
+homier
+homiest
+homiletic/S
+homily/SM
+hominess's
+homing/M
+hominid/MS
+hominy/SM
+Hom/MR
+homogamy/M
+homogenate/MS
+homogeneity/ISM
+homogeneous/PY
+homogenization/MS
+homogenize/DRSGZ
+homogenizer/M
+homograph/M
+homographs
+homological
+homologous
+homologue/M
+homology/MS
+homomorphic
+homomorphism/SM
+homonym/SM
+homophobia/S
+homophobic
+homophone/MS
+homopolymers
+homosexuality/SM
+homosexual/YMS
+homo/SM
+homotopy
+homozygous/Y
+honcho/DSG
+Honda/M
+Hondo/M
+Honduran/S
+Honduras/M
+Honecker/M
+hone/SM
+honestly/E
+honest/RYT
+honesty/ESM
+honeybee/SM
+honeycomb/SDMG
+honeydew/SM
+honey/GSMD
+honeylocust
+Honey/M
+honeymooner/M
+honeymoon/RDMGZS
+honeysuckle/MS
+Honeywell/M
+hong/M
+Honiara/M
+honker/M
+honk/GZSDRM
+honky/SM
+Hon/M
+hon/MDRSZTG
+Honolulu/M
+honorableness/SM
+honorable/PSM
+honorables/U
+honorablies/U
+honorably/UE
+honorarily
+honorarium/SM
+honorary/S
+honored/U
+honoree/S
+honor/ERDBZGS
+honorer/EM
+Honoria/M
+honorific/S
+Honor/M
+honor's
+honors/A
+Honshu/M
+hooch/MS
+hoodedness/M
+hooded/P
+hoodlum/SM
+Hood/M
+hood/MDSG
+hoodoo/DMGS
+hoodwinker/M
+hoodwink/SRDG
+hooey/SM
+hoof/DRMSG
+hoofer/M
+hoofmark/S
+hookah/M
+hookahs
+hookedness/M
+hooked/P
+Hooke/MR
+hooker/M
+Hooker/M
+hookey's
+hook/GZDRMS
+hooks/U
+hookup/SM
+hookworm/MS
+hooky/SRMT
+hooliganism/SM
+hooligan/SM
+hooper/M
+Hooper/M
+hoopla/SM
+hoop/MDRSG
+hooray/SMDG
+hoosegow/MS
+Hoosier/SM
+hootch's
+hootenanny/SM
+hooter/M
+hoot/MDRSGZ
+Hoover/MS
+hooves/M
+hoped/U
+hopefulness/MS
+hopeful/SPY
+hopelessness/SM
+hopeless/YP
+Hope/M
+hoper/M
+hope/SM
+Hopewell/M
+Hopi/SM
+Hopkinsian/M
+Hopkins/M
+hopped
+Hopper/M
+hopper/MS
+hopping/M
+hoppled
+hopples
+hopscotch/MDSG
+hop/SMDRG
+Horace/M
+Horacio/M
+Horatia/M
+Horatio/M
+Horatius/M
+horde/DSGM
+horehound/MS
+horizon/MS
+horizontal/YS
+Hormel/M
+hormonal/Y
+hormone/MS
+Hormuz/M
+hornbeam/M
+hornblende/MS
+Hornblower/M
+hornedness/M
+horned/P
+Horne/M
+hornet/MS
+horn/GDRMS
+horniness/M
+hornless
+hornlike
+Horn/M
+hornpipe/MS
+horny/TRP
+horologic
+horological
+horologist/MS
+horology/MS
+horoscope/MS
+Horowitz/M
+horrendous/Y
+horribleness/SM
+horrible/SP
+horribly
+horridness/M
+horrid/PY
+horrific
+horrifically
+horrify/DSG
+horrifying/Y
+horror/MS
+hors/DSGX
+horseback/MS
+horsedom
+horseflesh/M
+horsefly/MS
+horsehair/SM
+horsehide/SM
+horselaugh/M
+horselaughs
+horseless
+horselike
+horsely
+horseman/M
+horsemanship/MS
+horsemen
+horseplayer/M
+horseplay/SMR
+horsepower/SM
+horseradish/SM
+horse's
+horseshoeing
+horseshoe/MRSD
+horseshoer/M
+horsetail/SM
+horse/UGDS
+horsewhipped
+horsewhipping
+horsewhip/SM
+horsewoman/M
+horsewomen
+horsey
+horsier
+horsiest
+horsing/M
+Horst/M
+hortatory
+Horten/M
+Hortense/M
+Hortensia/M
+horticultural
+horticulture/SM
+horticulturist/SM
+Hort/MN
+Horton/M
+Horus/M
+hosanna/SDG
+Hosea/M
+hose/M
+hosepipe
+hos/GDS
+hosier/MS
+hosiery/SM
+hosp
+hospice/MS
+hospitable/I
+hospitably/I
+hospitality/MS
+hospitality's/I
+hospitalization/MS
+hospitalize/GSD
+hospital/MS
+hostage/MS
+hosteler/M
+hostelry/MS
+hostel/SZGMRD
+hostess/MDSG
+hostile/YS
+hostility/SM
+hostler/MS
+Host/MS
+host/MYDGS
+hotbed/MS
+hotblooded
+hotbox/MS
+hotcake/S
+hotchpotch/M
+hotelier/MS
+hotelman/M
+hotel/MS
+hotfoot/DGS
+hothead/DMS
+hotheadedness/SM
+hotheaded/PY
+hothouse/MGDS
+hotness/MS
+hotplate/SM
+hotpot/M
+hot/PSY
+hotrod
+hotshot/S
+hotted
+Hottentot/SM
+hotter
+hottest
+hotting
+Houdaille/M
+Houdini/M
+hough/M
+hounder/M
+hounding/M
+hound/MRDSG
+hourglass/MS
+houri/MS
+hourly/S
+hour/YMS
+house/ASDG
+houseboat/SM
+housebound
+houseboy/SM
+housebreaker/M
+housebreaking/M
+housebreak/JSRZG
+housebroke
+housebroken
+housebuilding
+housecleaning/M
+houseclean/JDSG
+housecoat/MS
+housefly/MS
+houseful/SM
+householder/M
+household/ZRMS
+househusband/S
+housekeeper/M
+housekeeping/M
+housekeep/JRGZ
+houselights
+House/M
+housemaid/MS
+houseman/M
+housemen
+housemother/MS
+housemoving
+houseparent/SM
+houseplant/S
+houser
+house's
+housetop/MS
+housewares
+housewarming/MS
+housewifeliness/M
+housewifely/P
+housewife/YM
+housewives
+houseworker/M
+housework/ZSMR
+housing/MS
+Housman/M
+Houston/M
+Houyhnhnm/M
+HOV
+hovel/GSMD
+hovercraft/M
+hoverer/M
+hover/GRD
+hove/ZR
+Howard/M
+howbeit
+howdah/M
+howdahs
+howdy/GSD
+Howell/MS
+Howe/M
+however
+Howey/M
+Howie/M
+howitzer/MS
+howler/M
+howl/GZSMDR
+Howrah/M
+how/SM
+howsoever
+hoyden/DMGS
+hoydenish
+Hoyle/SM
+hoy/M
+Hoyt/M
+hp
+HP
+HQ
+hr
+HR
+HRH
+Hrothgar/M
+hrs
+h's
+H's
+HS
+HST
+ht
+HTML
+Hts/M
+HTTP
+Huang/M
+huarache/SM
+hubba
+Hubbard/M
+Hubble/M
+hubbub/SM
+hubby/SM
+hubcap/SM
+Huber/M
+Hube/RM
+Hubert/M
+Huberto/M
+Hubey/M
+Hubie/M
+hub/MS
+hubris/SM
+huckleberry/SM
+Huck/M
+huckster/SGMD
+HUD
+Huddersfield/M
+huddler/M
+huddle/RSDMG
+Hudson/M
+hue/MDS
+Huerta/M
+Huey/M
+huffily
+huffiness/SM
+Huff/M
+Huffman/M
+huff/SGDM
+huffy/TRP
+hugeness/MS
+huge/YP
+hugged
+hugger
+hugging/S
+Huggins
+Hughie/M
+Hugh/MS
+Hugibert/M
+Hugo/M
+hug/RTS
+Huguenot/SM
+Hugues/M
+huh
+huhs
+Hui/M
+Huitzilopitchli/M
+hula/MDSG
+Hulda/M
+hulk/GDMS
+hullabaloo/SM
+huller/M
+hulling/M
+Hull/M
+hull/MDRGZS
+hullo/GSDM
+humane/IY
+humaneness/SM
+humaner
+humanest
+human/IPY
+humanism/SM
+humanistic
+humanist/SM
+humanitarianism/SM
+humanitarian/S
+humanity/ISM
+humanization/CSM
+humanized/C
+humanizer/M
+humanize/RSDZG
+humanizes/IAC
+humanizing/C
+humankind/M
+humannesses
+humanness/IM
+humanoid/S
+humans
+Humbert/M
+Humberto/M
+humbleness/SM
+humble/TZGPRSDJ
+humbly
+Humboldt/M
+humbugged
+humbugging
+humbug/MS
+humdinger/MS
+humdrum/S
+Hume/M
+humeral/S
+humeri
+humerus/M
+Humfrey/M
+Humfrid/M
+Humfried/M
+humidification/MC
+humidifier/CM
+humidify/RSDCXGNZ
+humidistat/M
+humidity/MS
+humidor/MS
+humid/Y
+humiliate/SDXNG
+humiliating/Y
+humiliation/M
+humility/MS
+hummed
+Hummel/M
+hummer/SM
+humming
+hummingbird/SM
+hummock/MDSG
+hummocky
+hummus/S
+humongous
+humored/U
+humorist/MS
+humorlessness/MS
+humorless/PY
+humorousness/MS
+humorous/YP
+humor/RDMZGS
+humpback/SMD
+hump/GSMD
+humph/DG
+Humphrey/SM
+humphs
+Humpty/M
+hum/S
+humus/SM
+Humvee
+hunchback/DSM
+hunch/GMSD
+hundredfold/S
+hundred/SHRM
+hundredths
+hundredweight/SM
+Hunfredo/M
+hung/A
+Hungarian/MS
+Hungary/M
+hunger/SDMG
+Hung/M
+hungover
+hungrily
+hungriness/SM
+hungry/RTP
+hunker/DG
+hunky/RST
+hunk/ZRMS
+Hun/MS
+hunter/M
+Hunter/M
+hunt/GZJDRS
+hunting/M
+Huntington/M
+Huntlee/M
+Huntley/M
+Hunt/MR
+huntress/MS
+huntsman/M
+huntsmen
+Huntsville/M
+hurdle/JMZGRSD
+hurdler/M
+hurl/DRGZJS
+Hurlee/M
+Hurleigh/M
+hurler/M
+Hurley/M
+hurling/M
+Huron/SM
+hurray/SDG
+hurricane/MS
+hurriedness/M
+hurried/UY
+hurry/RSDG
+Hurst/M
+hurter/M
+hurtfulness/MS
+hurtful/PY
+hurting/Y
+hurtle/SDG
+hurts
+hurt/U
+Hurwitz/M
+Hus
+Husain's
+husbander/M
+husband/GSDRYM
+husbandman/M
+husbandmen
+husbandry/SM
+Husein/M
+hush/DSG
+husker/M
+huskily
+huskiness/MS
+husking/M
+husk/SGZDRM
+husky/RSPT
+hussar/MS
+Hussein/M
+Husserl/M
+hussy/SM
+hustings/M
+hustler/M
+hustle/RSDZG
+Huston/M
+Hutchins/M
+Hutchinson/M
+Hutchison/M
+hutch/MSDG
+hut/MS
+hutted
+hutting
+Hutton/M
+Hutu/M
+Huxley/M
+Huygens/M
+huzzah/GD
+huzzahs
+hwy
+Hyacintha/M
+Hyacinthe/M
+Hyacinthia/M
+Hyacinthie/M
+hyacinth/M
+Hyacinth/M
+hyacinths
+Hyades
+hyaena's
+Hyannis/M
+Hyatt/M
+hybridism/SM
+hybridization/S
+hybridize/GSD
+hybrid/MS
+Hyde/M
+Hyderabad/M
+Hydra/M
+hydra/MS
+hydrangea/SM
+hydrant/SM
+hydrate/CSDNGX
+hydrate's
+hydration/MC
+hydraulically
+hydraulicked
+hydraulicking
+hydraulic/S
+hydraulics/M
+hydrazine/M
+hydride/MS
+hydrocarbon/SM
+hydrocephali
+hydrocephalus/MS
+hydrochemistry
+hydrochloric
+hydrochloride/M
+hydrodynamical
+hydrodynamic/S
+hydrodynamics/M
+hydroelectric
+hydroelectrically
+hydroelectricity/SM
+hydrofluoric
+hydrofoil/MS
+hydrogenate/CDSGN
+hydrogenate's
+hydrogenation/MC
+hydrogenations
+hydrogen/MS
+hydrogenous
+hydrological/Y
+hydrologist/MS
+hydrology/SM
+hydrolysis/M
+hydrolyzed/U
+hydrolyze/GSD
+hydromagnetic
+hydromechanics/M
+hydrometer/SM
+hydrometry/MS
+hydrophilic
+hydrophobia/SM
+hydrophobic
+hydrophone/SM
+hydroplane/DSGM
+hydroponic/S
+hydroponics/M
+hydro/SM
+hydrosphere/MS
+hydrostatic/S
+hydrostatics/M
+hydrotherapy/SM
+hydrothermal/Y
+hydrous
+hydroxide/MS
+hydroxy
+hydroxylate/N
+hydroxyl/SM
+hydroxyzine/M
+hyena/MS
+hygiene/MS
+hygienically
+hygienic/S
+hygienics/M
+hygienist/MS
+hygrometer/SM
+hygroscopic
+hying
+Hy/M
+Hyman/M
+hymeneal/S
+Hymen/M
+hymen/MS
+Hymie/M
+hymnal/SM
+hymnbook/S
+hymn/GSDM
+Hynda/M
+hype/MZGDSR
+hyperactive/S
+hyperactivity/SM
+hyperbola/MS
+hyperbole/MS
+hyperbolic
+hyperbolically
+hyperboloidal
+hyperboloid/SM
+hypercellularity
+hypercritical/Y
+hypercube/MS
+hyperemia/M
+hyperemic
+hyperfine
+hypergamous/Y
+hypergamy/M
+hyperglycemia/MS
+hyperinflation
+Hyperion/M
+hypermarket/SM
+hypermedia/S
+hyperplane/SM
+hyperplasia/M
+hypersensitiveness/MS
+hypersensitive/P
+hypersensitivity/MS
+hypersonic
+hyperspace/M
+hypersphere/M
+hypertension/MS
+hypertensive/S
+hypertext/SM
+hyperthyroid
+hyperthyroidism/MS
+hypertrophy/MSDG
+hypervelocity
+hyperventilate/XSDGN
+hyperventilation/M
+hyphenated/U
+hyphenate/NGXSD
+hyphenation/M
+hyphen/DMGS
+hypnoses
+hypnosis/M
+hypnotherapy/SM
+hypnotically
+hypnotic/S
+hypnotism/MS
+hypnotist/SM
+hypnotize/SDG
+hypoactive
+hypoallergenic
+hypocellularity
+hypochondriac/SM
+hypochondria/MS
+hypocrisy/SM
+hypocrite/MS
+hypocritical/Y
+hypodermic/S
+hypo/DMSG
+hypoglycemia/SM
+hypoglycemic/S
+hypophyseal
+hypophysectomized
+hypotenuse/MS
+hypothalami
+hypothalamic
+hypothalamically
+hypothalamus/M
+hypothermia/SM
+hypotheses
+hypothesis/M
+hypothesizer/M
+hypothesize/ZGRSD
+hypothetic
+hypothetical/Y
+hypothyroid
+hypothyroidism/SM
+hypoxia/M
+hyssop/MS
+hysterectomy/MS
+hysteresis/M
+hysteria/SM
+hysterical/YU
+hysteric/SM
+Hyundai/M
+Hz
+i
+I
+IA
+Iaccoca/M
+Iago/M
+Iain/M
+Ia/M
+iambi
+iambic/S
+iamb/MS
+iambus/SM
+Ian/M
+Ianthe/M
+Ibadan/M
+Ibbie/M
+Ibby/M
+Iberia/M
+Iberian/MS
+Ibero/M
+ibex/MS
+ibid
+ibidem
+ibis/SM
+IBM/M
+Ibo/M
+Ibrahim/M
+Ibsen/M
+ibuprofen/S
+Icarus/M
+ICBM/S
+ICC
+iceberg/SM
+iceboat/MS
+icebound
+icebox/MS
+icebreaker/SM
+icecap/SM
+ice/GDSC
+Icelander/M
+Icelandic
+Iceland/MRZ
+Ice/M
+iceman/M
+icemen
+icepack
+icepick/S
+ice's
+Ichabod/M
+ichneumon/M
+ichthyologist/MS
+ichthyology/MS
+icicle/SM
+icily
+iciness/SM
+icing/MS
+icky/RT
+iconic
+icon/MS
+iconoclasm/MS
+iconoclastic
+iconoclast/MS
+iconography/MS
+icosahedra
+icosahedral
+icosahedron/M
+ictus/SM
+ICU
+icy/RPT
+I'd
+ID
+Idahoan/S
+Idahoes
+Idaho/MS
+Idalia/M
+Idalina/M
+Idaline/M
+Ida/M
+idealism/MS
+idealistic
+idealistically
+idealist/MS
+idealization/MS
+idealized/U
+idealize/GDRSZ
+idealizer/M
+ideal/MYS
+idealogical
+idea/SM
+ideate/SN
+ideation/M
+Idelle/M
+Idell/M
+idem
+idempotent/S
+identicalness/M
+identical/YP
+identifiability
+identifiable/U
+identifiably
+identification/M
+identified/U
+identifier/M
+identify/XZNSRDG
+identity/SM
+ideogram/MS
+ideographic
+ideograph/M
+ideographs
+ideological/Y
+ideologist/SM
+ideologue/S
+ideology/SM
+ides
+Idette/M
+idiocy/MS
+idiolect/M
+idiomatically
+idiomatic/P
+idiom/MS
+idiopathic
+idiosyncrasy/SM
+idiosyncratic
+idiosyncratically
+idiotic
+idiotically
+idiot/MS
+idleness/MS
+idle/PZTGDSR
+idler/M
+id/MY
+idolater/MS
+idolatress/S
+idolatrous
+idolatry/SM
+idolization/SM
+idolized/U
+idolizer/M
+idolize/ZGDRS
+idol/MS
+ids
+IDs
+idyllic
+idyllically
+idyll/MS
+IE
+IEEE
+Ieyasu/M
+if
+iffiness/S
+iffy/TPR
+Ifni/M
+ifs
+Iggie/M
+Iggy/M
+igloo/MS
+Ignace/M
+Ignacio/M
+Ignacius/M
+Ignatius/M
+Ignazio/M
+Ignaz/M
+igneous
+ignitable
+ignite/ASDG
+igniter/M
+ignition/MS
+ignobleness/M
+ignoble/P
+ignobly
+ignominious/Y
+ignominy/MS
+ignoramus/SM
+ignorance/MS
+ignorantness/M
+ignorant/SPY
+ignorer/M
+ignore/SRDGB
+Igor/M
+iguana/MS
+Iguassu/M
+ii
+iii
+Ijsselmeer/M
+Ike/M
+Ikey/M
+Ikhnaton/M
+ikon's
+IL
+Ilaire/M
+Ila/M
+Ilario/M
+ilea
+Ileana/M
+Ileane/M
+ileitides
+ileitis/M
+Ilene/M
+ileum/M
+ilia
+iliac
+Iliad/MS
+Ilise/M
+ilium/M
+Ilka/M
+ilk/MS
+I'll
+Illa/M
+illegality/MS
+illegal/YS
+illegibility/MS
+illegible
+illegibly
+illegitimacy/SM
+illegitimate/SDGY
+illiberality/SM
+illiberal/Y
+illicitness/MS
+illicit/YP
+illimitableness/M
+illimitable/P
+Illinoisan/MS
+Illinois/M
+illiquid
+illiteracy/MS
+illiterateness/M
+illiterate/PSY
+Ill/M
+illness/MS
+illogicality/SM
+illogicalness/M
+illogical/PY
+illogic/M
+ill/PS
+illume/DG
+illuminate/XSDVNG
+Illuminati
+illuminatingly
+illuminating/U
+illumination/M
+illumine/BGSD
+illusionary
+illusion/ES
+illusionist/MS
+illusion's
+illusiveness/M
+illusive/PY
+illusoriness/M
+illusory/P
+illustrated/U
+illustrate/VGNSDX
+illustration/M
+illustrative/Y
+illustrator/SM
+illustriousness/SM
+illustrious/PY
+illus/V
+illy
+Ilona/M
+Ilsa/M
+Ilse/M
+Ilysa/M
+Ilyse/M
+Ilyssa/M
+Ilyushin/M
+I'm
+image/DSGM
+Imagen/M
+imagery/MS
+imaginableness
+imaginable/U
+imaginably/U
+imaginariness/M
+imaginary/PS
+imagination/MS
+imaginativeness/M
+imaginative/UY
+imagined/U
+imaginer/M
+imagine/RSDJBG
+imagoes
+imago/M
+imam/MS
+imbalance/SDM
+imbecile/YMS
+imbecilic
+imbecility/MS
+imbiber/M
+imbibe/ZRSDG
+imbrication/SM
+Imbrium/M
+imbroglio/MS
+imbruing
+imbue/GDS
+Imelda/M
+IMF
+IMHO
+imitable/I
+imitate/SDVNGX
+imitation/M
+imitativeness/MS
+imitative/YP
+imitator/SM
+immaculateness/SM
+immaculate/YP
+immanence/S
+immanency/MS
+immanent/Y
+Immanuel/M
+immateriality/MS
+immaterialness/MS
+immaterial/PY
+immatureness/M
+immature/SPY
+immaturity/MS
+immeasurableness/M
+immeasurable/P
+immeasurably
+immediacy/MS
+immediateness/SM
+immediate/YP
+immemorial/Y
+immenseness/M
+immense/PRTY
+immensity/MS
+immerse/RSDXNG
+immersible
+immersion/M
+immigrant/SM
+immigrate/NGSDX
+immigration/M
+imminence/SM
+imminentness/M
+imminent/YP
+immobile
+immobility/MS
+immobilization/MS
+immobilize/DSRG
+immoderateness/M
+immoderate/NYP
+immoderation/M
+immodest/Y
+immodesty/SM
+immolate/SDNGX
+immolation/M
+immorality/MS
+immoral/Y
+immortality/SM
+immortalized/U
+immortalize/GDS
+immortal/SY
+immovability/SM
+immovableness/M
+immovable/PS
+immovably
+immune/S
+immunity/SM
+immunization/MS
+immunize/GSD
+immunoassay/M
+immunodeficiency/S
+immunodeficient
+immunologic
+immunological/Y
+immunologist/SM
+immunology/MS
+immure/GSD
+immutability/MS
+immutableness/M
+immutable/P
+immutably
+IMNSHO
+IMO
+Imogene/M
+Imogen/M
+Imojean/M
+impaction/SM
+impactor/SM
+impact/VGMRDS
+impaired/U
+impairer/M
+impair/LGRDS
+impairment/SM
+impala/MS
+impale/GLRSD
+impalement/SM
+impaler/M
+impalpable
+impalpably
+impanel/DGS
+impartation/M
+impart/GDS
+impartiality/SM
+impartial/Y
+impassableness/M
+impassable/P
+impassably
+impasse/SXBMVN
+impassibility/SM
+impassible
+impassibly
+impassion/DG
+impassioned/U
+impassiveness/MS
+impassive/YP
+impassivity/MS
+impasto/SM
+impatience/SM
+impatiens/M
+impatient/Y
+impeachable/U
+impeach/DRSZGLB
+impeacher/M
+impeachment/MS
+impeccability/SM
+impeccable/S
+impeccably
+impecuniousness/MS
+impecunious/PY
+impedance/MS
+impeded/U
+impeder/M
+impede/S
+imped/GRD
+impedimenta
+impediment/SM
+impelled
+impeller/MS
+impelling
+impel/S
+impend/DGS
+impenetrability/MS
+impenetrableness/M
+impenetrable/P
+impenetrably
+impenitence/MS
+impenitent/YS
+imperativeness/M
+imperative/PSY
+imperceivable
+imperceptibility/MS
+imperceptible
+imperceptibly
+imperceptive
+imperf
+imperfectability
+imperfection/MS
+imperfectness/SM
+imperfect/YSVP
+imperialism/MS
+imperialistic
+imperialistically
+imperialist/SM
+imperial/YS
+imperil/GSLD
+imperilment/SM
+imperiousness/MS
+imperious/YP
+imperishableness/M
+imperishable/SP
+imperishably
+impermanence/MS
+impermanent/Y
+impermeability/SM
+impermeableness/M
+impermeable/P
+impermeably
+impermissible
+impersonality/M
+impersonalized
+impersonal/Y
+impersonate/XGNDS
+impersonation/M
+impersonator/SM
+impertinence/SM
+impertinent/YS
+imperturbability/SM
+imperturbable
+imperturbably
+imperviousness/M
+impervious/PY
+impetigo/MS
+impetuosity/MS
+impetuousness/MS
+impetuous/YP
+impetus/MS
+impiety/MS
+impinge/LS
+impingement/MS
+imping/GD
+impiousness/SM
+impious/PY
+impishness/MS
+impish/YP
+implacability/SM
+implacableness/M
+implacable/P
+implacably
+implantation/SM
+implant/BGSDR
+implanter/M
+implausibility/MS
+implausible
+implausibly
+implementability
+implementable/U
+implementation/A
+implementations
+implementation's
+implemented/AU
+implementer/M
+implementing/A
+implementor/MS
+implement/SMRDGZB
+implicant/SM
+implicate/VGSD
+implication/M
+implicative/PY
+implicitness/SM
+implicit/YP
+implied/Y
+implode/GSD
+implore/GSD
+imploring/Y
+implosion/SM
+implosive/S
+imply/GNSDX
+impoliteness/MS
+impolite/YP
+impoliticness/M
+impolitic/PY
+imponderableness/M
+imponderable/PS
+importance/SM
+important/Y
+importation/MS
+importer/M
+importing/A
+import/SZGBRD
+importunateness/M
+importunate/PYGDS
+importuner/M
+importune/SRDZYG
+importunity/SM
+imposable
+impose/ASDG
+imposer/SM
+imposingly
+imposing/U
+imposition/SM
+impossibility/SM
+impossibleness/M
+impossible/PS
+impossibly
+imposter's
+impostor/SM
+impost/SGMD
+imposture/SM
+impotence/MS
+impotency/S
+impotent/SY
+impound/GDS
+impoundments
+impoverisher/M
+impoverish/LGDRS
+impoverishment/SM
+impracticableness/M
+impracticable/P
+impracticably
+impracticality/SM
+impracticalness/M
+impractical/PY
+imprecate/NGXSD
+imprecation/M
+impreciseness/MS
+imprecise/PYXN
+imprecision/M
+impregnability/MS
+impregnableness/M
+impregnable/P
+impregnably
+impregnate/DSXNG
+impregnation/M
+impresario/SM
+impress/DRSGVL
+impressed/U
+impresser/M
+impressibility/MS
+impressible
+impressionability/SM
+impressionableness/M
+impressionable/P
+impression/BMS
+impressionism/SM
+impressionistic
+impressionist/MS
+impressiveness/MS
+impressive/YP
+impressment/M
+imprimatur/SM
+imprinter/M
+imprinting/M
+imprint/SZDRGM
+imprison/GLDS
+imprisonment/MS
+improbability/MS
+improbableness/M
+improbable/P
+improbably
+impromptu/S
+improperness/M
+improper/PY
+impropitious
+impropriety/SM
+improved/U
+improvement/MS
+improver/M
+improve/SRDGBL
+improvidence/SM
+improvident/Y
+improvisational
+improvisation/MS
+improvisatory
+improviser/M
+improvise/RSDZG
+imprudence/SM
+imprudent/Y
+imp/SGMDRY
+impudence/MS
+impudent/Y
+impugner/M
+impugn/SRDZGB
+impulse/XMVGNSD
+impulsion/M
+impulsiveness/MS
+impulsive/YP
+impunity/SM
+impureness/M
+impure/RPTY
+impurity/MS
+imputation/SM
+impute/SDBG
+Imus/M
+IN
+inaction
+inactive
+inadequate/S
+inadvertence/MS
+inadvertent/Y
+inalienability/MS
+inalienably
+inalterableness/M
+inalterable/P
+Ina/M
+inamorata/MS
+inane/SRPYT
+inanimateness/S
+inanimate/P
+inanity/MS
+inappeasable
+inappropriate/P
+inarticulate/P
+in/AS
+inasmuch
+inaugural/S
+inaugurate/XSDNG
+inauguration/M
+inauthenticity
+inbound/G
+inbred/S
+inbreed/JG
+incalculableness/M
+incalculably
+incandescence/SM
+incandescent/YS
+incant
+incantation/SM
+incantatory
+incapable/S
+incapacitate/GNSD
+incapacitation/M
+incarcerate/XGNDS
+incarceration/M
+incarnadine/GDS
+incarnate/AGSDNX
+incarnation/AM
+Inca/SM
+incendiary/S
+incense/MGDS
+incentive/ESM
+incentively
+incept/DGVS
+inception/MS
+inceptive/Y
+inceptor/M
+incessant/Y
+incest/SM
+incestuousness/MS
+incestuous/PY
+inch/GMDS
+inchoate/DSG
+Inchon/M
+inchworm/MS
+incidence/MS
+incidental/YS
+incident/SM
+incinerate/XNGSD
+incineration/M
+incinerator/SM
+incipience/SM
+incipiency/M
+incipient/Y
+incise/SDVGNX
+incision/M
+incisiveness/MS
+incisive/YP
+incisor/MS
+incitement/MS
+inciter/M
+incite/RZL
+incl
+inclination/ESM
+incline/EGSD
+incliner/M
+inclining/M
+include/GDS
+inclusion/MS
+inclusiveness/MS
+inclusive/PY
+Inc/M
+incognito/S
+incoherency/M
+income/M
+incommode/DG
+incommunicado
+incomparable
+incompetent/MS
+incomplete/P
+inconceivability/MS
+inconceivableness/M
+inconceivable/P
+incondensable
+incongruousness/S
+inconsiderableness/M
+inconsiderable/P
+inconsistence
+inconsolableness/M
+inconsolable/P
+inconsolably
+incontestability/SM
+incontestably
+incontrovertibly
+inconvenience/DG
+inconvertibility
+inconvertible
+incorporable
+incorporated/UE
+incorporate/GASDXN
+incorrect/P
+incorrigibility/MS
+incorrigibleness/M
+incorrigible/SP
+incorrigibly
+incorruptible/S
+incorruptibly
+increase/JB
+increaser/M
+increasing/Y
+incredibleness/M
+incredible/P
+incremental/Y
+incrementation
+increment/DMGS
+incriminate/XNGSD
+incrimination/M
+incriminatory
+incrustation/SM
+inc/T
+incubate/XNGVDS
+incubation/M
+incubator/MS
+incubus/MS
+inculcate/SDGNX
+inculcation/M
+inculpate/SDG
+incumbency/MS
+incumbent/S
+incunabula
+incunabulum
+incurable/S
+incurious
+incursion/SM
+ind
+indebtedness/SM
+indebted/P
+indefatigableness/M
+indefatigable/P
+indefatigably
+indefeasible
+indefeasibly
+indefinableness/M
+indefinable/PS
+indefinite/S
+indelible
+indelibly
+indemnification/M
+indemnify/NXSDG
+indemnity/SM
+indentation/SM
+indented/U
+indenter/M
+indention/SM
+indent/R
+indenture/DG
+Independence/M
+indescribableness/M
+indescribable/PS
+indescribably
+indestructibleness/M
+indestructible/P
+indestructibly
+indeterminably
+indeterminacy/MS
+indeterminism
+indexation/S
+indexer/M
+index/MRDZGB
+India/M
+Indiana/M
+Indianan/S
+Indianapolis/M
+Indianian/S
+Indian/SM
+indicant/MS
+indicate/DSNGVX
+indication/M
+indicative/SY
+indicator/MS
+indices's
+indicter/M
+indictment/SM
+indict/SGLBDR
+indifference
+indigence/MS
+indigenousness/M
+indigenous/YP
+indigent/SY
+indigestible/S
+indignant/Y
+indignation/MS
+indigo/SM
+Indira/M
+indirect/PG
+indiscreet/P
+indiscriminateness/M
+indiscriminate/PY
+indispensability/MS
+indispensableness/M
+indispensable/SP
+indispensably
+indisputableness/M
+indisputable/P
+indissolubleness/M
+indissoluble/P
+indissolubly
+indistinguishableness/M
+indistinguishable/P
+indite/SDG
+indium/SM
+individualism/MS
+individualistic
+individualistically
+individualist/MS
+individuality/MS
+individualization/SM
+individualize/DRSGZ
+individualized/U
+individualizer/M
+individualizes/U
+individualizing/Y
+individual/YMS
+individuate/DSXGN
+individuation/M
+indivisibleness/M
+indivisible/SP
+indivisibly
+Ind/M
+Indochina/M
+Indochinese
+indoctrinate/GNXSD
+indoctrination/M
+indoctrinator/SM
+indolence/SM
+indolent/Y
+indomitableness/M
+indomitable/P
+indomitably
+Indonesia/M
+Indonesian/S
+indoor
+Indore/M
+Indra/M
+indubitableness/M
+indubitable/P
+indubitably
+inducement/MS
+inducer/M
+induce/ZGLSRD
+inducible
+inductance/MS
+inductee/SM
+induct/GV
+induction/SM
+inductiveness/M
+inductive/PY
+inductor/MS
+indulge/GDRS
+indulgence/SDGM
+indulgent/Y
+indulger/M
+Indus/M
+industrialism/MS
+industrialist/MS
+industrialization/MS
+industrialized/U
+industrialize/SDG
+industrial/SY
+industriousness/SM
+industrious/YP
+industry/SM
+Indy/SM
+inebriate/NGSDX
+inebriation/M
+inedible
+ineducable
+ineffability/MS
+ineffableness/M
+ineffable/P
+ineffably
+inelastic
+ineligibly
+ineluctable
+ineluctably
+ineptitude/SM
+ineptness/MS
+inept/YP
+inequivalent
+inerrant
+inertial/Y
+inertia/SM
+inertness/MS
+inert/SPY
+Ines
+inescapably
+Inesita/M
+Inessa/M
+inestimably
+inevitability/MS
+inevitableness/M
+inevitable/P
+inevitably
+inexact/P
+inexhaustibleness/M
+inexhaustible/P
+inexhaustibly
+inexorability/M
+inexorableness/M
+inexorable/P
+inexorably
+inexpedience/M
+inexplicableness/M
+inexplicable/P
+inexplicably
+inexplicit
+inexpressibility/M
+inexpressibleness/M
+inexpressible/PS
+inextricably
+Inez/M
+infamous
+infamy/SM
+infancy/M
+infanticide/MS
+infantile
+infant/MS
+infantryman/M
+infantrymen
+infantry/SM
+infarction/SM
+infarct/SM
+infatuate/XNGSD
+infatuation/M
+infauna
+infected/U
+infecter
+infect/ESGDA
+infection/EASM
+infectiousness/MS
+infectious/PY
+infective
+infer/B
+inference/GMSR
+inferential/Y
+inferiority/MS
+inferior/SMY
+infernal/Y
+inferno/MS
+inferred
+inferring
+infertile
+infestation/MS
+infester/M
+infest/GSDR
+infidel/SM
+infighting/M
+infill/MG
+infiltrate/V
+infiltrator/MS
+infinitesimal/SY
+infinite/V
+infinitival
+infinitive/YMS
+infinitude/MS
+infinitum
+infinity/SM
+infirmary/SM
+infirmity/SM
+infix/M
+inflammableness/M
+inflammable/P
+inflammation/MS
+inflammatory
+inflatable/MS
+inflate/NGBDRSX
+inflater/M
+inflationary
+inflation/ESM
+inflect/GVDS
+inflectional/Y
+inflection/SM
+inflexibleness/M
+inflexible/P
+inflexion/SM
+inflict/DRSGV
+inflicter/M
+infliction/SM
+inflow/M
+influenced/U
+influencer/M
+influence/SRDGM
+influent
+influential/SY
+influenza/MS
+infomercial/S
+Informatica/M
+informatics
+informational
+information/ES
+informativeness/S
+informative/UY
+informatory
+informed/U
+informer/M
+info/SM
+infotainment/S
+infra
+infrared/SM
+infrasonic
+infrastructural
+infrastructure/MS
+infrequence/S
+infringe/LR
+infringement/SM
+infringer/M
+infuriate/GNYSD
+infuriating/Y
+infuriation/M
+infuser/M
+infuse/RZ
+infusibleness/M
+infusible/P
+inf/ZT
+Ingaberg/M
+Ingaborg/M
+Inga/M
+Ingamar/M
+Ingar/M
+Ingeberg/M
+Ingeborg/M
+Ingelbert/M
+Ingemar/M
+ingeniousness/MS
+ingenious/YP
+ingénue/S
+ingenuity/SM
+ingenuous/EY
+ingenuousness/MS
+Inger/M
+Inge/RM
+Ingersoll/M
+ingest/DGVS
+ingestible
+ingestion/SM
+Inglebert/M
+inglenook/MS
+Inglewood/M
+Inglis/M
+Ingmar/M
+ingoing
+ingot/SMDG
+ingrained/Y
+Ingra/M
+Ingram/M
+ingrate/M
+ingratiate/DSGNX
+ingratiating/Y
+ingratiation/M
+ingredient/SM
+Ingres/M
+ingression/M
+ingress/MS
+Ingrid/M
+Ingrim/M
+ingrown/P
+inguinal
+Ingunna/M
+inhabitable/U
+inhabitance
+inhabited/U
+inhabiter/M
+inhabit/R
+inhalant/S
+inhalation/SM
+inhalator/SM
+inhale/Z
+inhere/DG
+inherent/Y
+inheritableness/M
+inheritable/P
+inheritance/EMS
+inherit/BDSG
+inherited/E
+inheriting/E
+inheritor/S
+inheritress/MS
+inheritrix/MS
+inherits/E
+inhibit/DVGS
+inhibited/U
+inhibiter's
+inhibition/MS
+inhibitor/MS
+inhibitory
+inhomogeneous
+inhospitableness/M
+inhospitable/P
+inhospitality
+Inigo/M
+inimical/Y
+inimitableness/M
+inimitable/P
+inimitably
+inion
+iniquitousness/M
+iniquitous/PY
+iniquity/MS
+initialer/M
+initial/GSPRDY
+initialization/A
+initializations
+initialization's
+initialize/ASDG
+initialized/U
+initializer/S
+initiates
+initiate/UD
+initiating
+initiation/SM
+initiative/SM
+initiator/MS
+initiatory
+injectable/U
+inject/GVSDB
+injection/MS
+injector/SM
+injunctive
+injured/U
+injurer/M
+injure/SRDZG
+injuriousness/M
+injurious/YP
+inkblot/SM
+inker/M
+inkiness/MS
+inkling/SM
+inkstand/SM
+inkwell/SM
+inky/TP
+ink/ZDRJ
+inland
+inlander/M
+inlay/RG
+inletting
+inly/G
+inmost
+Inna/M
+innards
+innateness/SM
+innate/YP
+innermost/S
+innersole/S
+innerspring
+innervate/GNSDX
+innervation/M
+inner/Y
+inning/M
+Innis/M
+innkeeper/MS
+innocence/SM
+Innocent/M
+innocent/SYRT
+innocuousness/MS
+innocuous/PY
+innovate/SDVNGX
+innovation/M
+innovative/P
+innovator/MS
+innovatory
+Innsbruck/M
+innuendo/MDGS
+innumerability/M
+innumerableness/M
+innumerable/P
+innumerably
+innumerate
+inn/ZGDRSJ
+inoculate/ASDG
+inoculation/MS
+inoculative
+inoffensive/P
+Inonu/M
+inopportuneness/M
+inopportune/P
+inordinateness/M
+inordinate/PY
+inorganic
+inpatient
+In/PM
+input/MRDG
+inquirer/M
+inquire/ZR
+inquiring/Y
+inquiry/MS
+inquisitional
+inquisition/MS
+Inquisition/MS
+inquisitiveness/MS
+inquisitive/YP
+inquisitorial/Y
+inquisitor/MS
+INRI
+inrush/M
+ins
+INS
+insalubrious
+insanitary
+insatiability/MS
+insatiableness/M
+insatiable/P
+insatiably
+inscribe/Z
+inscription/SM
+inscrutability/SM
+inscrutableness/SM
+inscrutable/P
+inscrutably
+inseam
+insecticidal
+insecticide/MS
+insectivore/SM
+insectivorous
+insecureness/M
+insecure/P
+inseminate/NGXSD
+insemination/M
+insensateness/M
+insensate/P
+insensible/P
+insentient
+inseparable/S
+insert/ADSG
+inserter/M
+insertion/AMS
+insetting
+inshore
+insider/M
+inside/Z
+insidiousness/MS
+insidious/YP
+insightful/Y
+insigne's
+insignia/SM
+insignificant
+insinuate/VNGXSD
+insinuating/Y
+insinuation/M
+insinuator/SM
+insipidity/MS
+insipid/Y
+insistence/SM
+insistent/Y
+insisting/Y
+insist/SGD
+insociable
+insofar
+insole/M
+insolence/SM
+insolent/YS
+insolubleness/M
+insoluble/P
+insolubly
+insomniac/S
+insomnia/MS
+insomuch
+insouciance/SM
+insouciant/Y
+inspect/AGSD
+inspection/SM
+inspective
+inspectorate/MS
+inspector/SM
+inspirational/Y
+inspiration/MS
+inspired/U
+inspire/R
+inspirer/M
+inspiring/U
+inspirit/DG
+Inst
+installable
+install/ADRSG
+installation/SM
+installer/MS
+installment/MS
+instance/GD
+instantaneousness/M
+instantaneous/PY
+instantiated/U
+instantiate/SDXNG
+instantiation/M
+instant/SRYMP
+instate/AGSD
+inst/B
+instead
+instigate/XSDVGN
+instigation/M
+instigator/SM
+instillation/SM
+instinctive/Y
+instinctual
+instinct/VMS
+instituter/M
+institutes/M
+institute/ZXVGNSRD
+institutionalism/M
+institutionalist/M
+institutionalization/SM
+institutionalize/GDS
+institutional/Y
+institution/AM
+institutor's
+instr
+instruct/DSVG
+instructed/U
+instructional
+instruction/MS
+instructiveness/M
+instructive/PY
+instructor/MS
+instrumentalist/MS
+instrumentality/SM
+instrumental/SY
+instrumentation/SM
+instrument/GMDS
+insubordinate
+insubstantial
+insufferable
+insufferably
+insularity/MS
+insular/YS
+insulate/DSXNG
+insulated/U
+insulation/M
+insulator/MS
+insulin/MS
+insult/DRSG
+insulter/M
+insulting/Y
+insuperable
+insuperably
+insupportableness/M
+insupportable/P
+insurance/MS
+insurance's/A
+insure/BZGS
+insured/S
+insurer/M
+insurgence/SM
+insurgency/MS
+insurgent/MS
+insurmountably
+insurrectionist/SM
+insurrection/SM
+intactness/M
+intact/P
+intaglio/GMDS
+intake/M
+intangible/M
+integer/MS
+integrability/M
+integrable
+integral/SYM
+integrand/MS
+integrate/AGNXEDS
+integration/EMA
+integrative/E
+integrator/MS
+integrity/SM
+integument/SM
+intellective/Y
+intellect/MVS
+intellectualism/MS
+intellectuality/M
+intellectualize/GSD
+intellectualness/M
+intellectual/YPS
+intelligence/MSR
+intelligencer/M
+intelligentsia/MS
+intelligent/UY
+intelligibilities
+intelligibility/UM
+intelligibleness/MU
+intelligible/PU
+intelligibly/U
+Intel/M
+Intelsat/M
+intemperate/P
+intendant/MS
+intendedness/M
+intended/SYP
+intender/M
+intensification/M
+intensifier/M
+intensify/GXNZRSD
+intensional/Y
+intensiveness/MS
+intensive/PSY
+intentionality/M
+intentional/UY
+intention/SDM
+intentness/SM
+intent/YP
+interaction/MS
+interactive/PY
+interactivity
+interact/VGDS
+interaxial
+interbank
+interbred
+interbreed/GS
+intercalate/GNVDS
+intercalation/M
+intercase
+intercaste
+interceder/M
+intercede/SRDG
+intercensal
+intercept/DGS
+interception/MS
+interceptor/MS
+intercession/MS
+intercessor/SM
+intercessory
+interchangeability/M
+interchangeableness/M
+interchangeable/P
+interchangeably
+interchange/DSRGJ
+interchanger/M
+intercity
+interclass
+intercohort
+intercollegiate
+intercommunicate/SDXNG
+intercommunication/M
+intercom/SM
+interconnectedness/M
+interconnected/P
+interconnect/GDS
+interconnection/SM
+interconnectivity
+intercontinental
+interconversion/M
+intercorrelated
+intercourse/SM
+Interdata/M
+interdenominational
+interdepartmental/Y
+interdependence/MS
+interdependency/SM
+interdependent/Y
+interdiction/MS
+interdict/MDVGS
+interdisciplinary
+interested/UYE
+interest/GEMDS
+interestingly/U
+interestingness/M
+interesting/YP
+inter/ESTL
+interface/SRDGM
+interfacing/M
+interfaith
+interference/MS
+interferer/M
+interfere/SRDG
+interfering/Y
+interferometer/SM
+interferometric
+interferometry/M
+interferon/MS
+interfile/GSD
+intergalactic
+intergenerational
+intergeneration/M
+interglacial
+intergovernmental
+intergroup
+interim/S
+interindex
+interindustry
+interior/SMY
+interj
+interject/GDS
+interjectional
+interjection/MS
+interlace/GSD
+interlard/SGD
+interlayer/G
+interleave/SDG
+interleukin/S
+interlibrary
+interlinear/S
+interline/JGSD
+interlingual
+interlingua/M
+interlining/M
+interlink/GDS
+interlisp/M
+interlobular
+interlocker/M
+interlock/RDSG
+interlocutor/MS
+interlocutory
+interlope/GZSRD
+interloper/M
+interlude/MSDG
+intermarriage/MS
+intermarry/GDS
+intermediary/MS
+intermediateness/M
+intermediate/YMNGSDP
+intermediation/M
+interment/SME
+intermeshed
+intermetrics
+intermezzi
+intermezzo/SM
+interminably
+intermingle/DSG
+intermission/MS
+intermittent/Y
+intermix/GSRD
+intermodule
+intermolecular/Y
+internalization/SM
+internalize/GDS
+internal/SY
+Internationale/M
+internationalism/SM
+internationalist/SM
+internationality/M
+internationalization/MS
+internationalize/DSG
+international/YS
+internecine
+internee/SM
+interne's
+Internet/M
+INTERNET/M
+internetwork
+internist/SM
+intern/L
+internment/SM
+internship/MS
+internuclear
+interocular
+interoffice
+interoperability
+interpenetrates
+interpersonal/Y
+interplanetary
+interplay/GSMD
+interpol
+interpolate/XGNVBDS
+interpolation/M
+Interpol/M
+interpose/GSRD
+interposer/M
+interposition/MS
+interpretable/U
+interpret/AGSD
+interpretation/MSA
+interpretative/Y
+interpreted/U
+interpreter/SM
+interpretive/Y
+interpretor/S
+interprocess
+interprocessor
+interquartile
+interracial
+interred/E
+interregional
+interregnum/MS
+interrelatedness/M
+interrelated/PY
+interrelate/GNDSX
+interrelation/M
+interrelationship/SM
+interring/E
+interrogate/DSXGNV
+interrogation/M
+interrogative/SY
+interrogator/SM
+interrogatory/S
+interrupted/U
+interrupter/M
+interruptibility
+interruptible
+interruption/MS
+interrupt/VGZRDS
+interscholastic
+intersect/GDS
+intersection/MS
+intersession/MS
+interspecies
+intersperse/GNDSX
+interspersion/M
+interstage
+interstate/S
+interstellar
+interstice/SM
+interstitial/SY
+intersurvey
+intertask
+intertwine/GSD
+interurban/S
+interval/MS
+intervene/GSRD
+intervener/M
+intervenor/M
+interventionism/MS
+interventionist/S
+intervention/MS
+interview/AMD
+interviewed/U
+interviewee/SM
+interviewer/SM
+interviewing
+interviews
+intervocalic
+interweave/GS
+interwove
+interwoven
+intestacy/SM
+intestinal/Y
+intestine/SM
+inti
+intifada
+intimacy/SM
+intimal
+intimateness/M
+intimater/M
+intimate/XYNGPDRS
+intimation/M
+intimidate/SDXNG
+intimidating/Y
+intimidation/M
+into
+intolerableness/M
+intolerable/P
+intolerant/PS
+intonate/NX
+intonation/M
+intoxicant/MS
+intoxicate/DSGNX
+intoxicated/Y
+intoxication/M
+intra
+intracellular
+intracity
+intraclass
+intracohort
+intractability/M
+intractableness/M
+intractable/P
+intradepartmental
+intrafamily
+intragenerational
+intraindustry
+intraline
+intrametropolitan
+intramural/Y
+intramuscular/Y
+intranasal
+intransigence/MS
+intransigent/YS
+intransitive/S
+intraoffice
+intraprocess
+intrapulmonary
+intraregional
+intrasectoral
+intrastate
+intratissue
+intrauterine
+intravenous/YS
+intrepidity/SM
+intrepidness/M
+intrepid/YP
+intricacy/SM
+intricateness/M
+intricate/PY
+intrigue/DRSZG
+intriguer/M
+intriguing/Y
+intrinsically
+intrinsic/S
+introduce/ADSG
+introducer/M
+introduction/ASM
+introductory
+introit/SM
+introject/SD
+intro/S
+introspection/MS
+introspectiveness/M
+introspective/YP
+introspect/SGVD
+introversion/SM
+introvert/SMDG
+intruder/M
+intrude/ZGDSR
+intrusion/SM
+intrusiveness/MS
+intrusive/SYP
+intubate/NGDS
+intubation/M
+intuit/GVDSB
+intuitionist/M
+intuitiveness/MS
+intuitive/YP
+int/ZR
+Inuit/MS
+inundate/SXNG
+inundation/M
+inure/GDS
+invader/M
+invade/ZSRDG
+invalid/GSDM
+invalidism/MS
+invariable/P
+invariant/M
+invasion/SM
+invasive/P
+invectiveness/M
+invective/PSMY
+inveigh/DRG
+inveigher/M
+inveighs
+inveigle/DRSZG
+inveigler/M
+invent/ADGS
+invented/U
+invention/ASM
+inventiveness/MS
+inventive/YP
+inventor/MS
+inventory/SDMG
+Inverness/M
+inverse/YV
+inverter/M
+invertible
+invert/ZSGDR
+invest/ADSLG
+investigate/XDSNGV
+investigation/MA
+investigator/MS
+investigatory
+investiture/SM
+investment/ESA
+investment's/A
+investor/SM
+inveteracy/MS
+inveterate/Y
+inviability
+invidiousness/MS
+invidious/YP
+invigilate/GD
+invigilator/SM
+invigorate/ANGSD
+invigorating/Y
+invigoration/AM
+invigorations
+invincibility/SM
+invincibleness/M
+invincible/P
+invincibly
+inviolability/MS
+inviolably
+inviolateness/M
+inviolate/YP
+inviscid
+invisibleness/M
+invisible/S
+invitational/S
+invitation/MS
+invited/U
+invitee/S
+inviter/M
+invite/SRDG
+inviting/Y
+invocable
+invocate
+invoked/A
+invoke/GSRDBZ
+invoker/M
+invokes/A
+involuntariness/S
+involuntary/P
+involute/XYN
+involution/M
+involutorial
+involvedly
+involved/U
+involve/GDSRL
+involvement/SM
+involver/M
+invulnerability/M
+invulnerableness/M
+inwardness/M
+inward/PY
+ioctl
+iodate/MGND
+iodation/M
+iodide/MS
+iodinate/DNG
+iodine/MS
+iodize/GSD
+Iolande/M
+Iolanthe/M
+Io/M
+Iona/M
+Ionesco/M
+Ionian/M
+ionic/S
+Ionic/S
+ionization's
+ionization/SU
+ionized/UC
+ionize/GNSRDJXZ
+ionizer's
+ionizer/US
+ionizes/U
+ionizing/U
+ionosphere/SM
+ionospheric
+ion's/I
+ion/SMU
+Iorgo/MS
+Iormina/M
+Iosep/M
+iota/SM
+IOU
+Iowan/S
+Iowa/SM
+IPA
+ipecac/MS
+Iphigenia/M
+ipso
+Ipswich/M
+IQ
+Iqbal/M
+Iquitos/M
+Ira/M
+Iranian/MS
+Iran/M
+Iraqi/SM
+Iraq/M
+IRA/S
+irascibility/SM
+irascible
+irascibly
+irateness/S
+irate/RPYT
+ireful
+Ireland/M
+ire/MGDS
+Irena/M
+Irene/M
+irenic/S
+iridescence/SM
+iridescent/Y
+irides/M
+iridium/MS
+irids
+Irina/M
+Iris
+iris/GDSM
+Irishman/M
+Irishmen
+Irish/R
+Irishwoman/M
+Irishwomen
+Irita/M
+irk/GDS
+irksomeness/SM
+irksome/YP
+Irkutsk/M
+Ir/M
+Irma/M
+ironclad/S
+iron/DRMPSGJ
+ironer/M
+ironic
+ironicalness/M
+ironical/YP
+ironing/M
+ironmonger/M
+ironmongery/M
+ironside/MS
+ironstone/MS
+ironware/SM
+ironwood/SM
+ironworker/M
+ironwork/MRS
+irony/SM
+Iroquoian/MS
+Iroquois/M
+irradiate/XSDVNG
+irradiation/M
+irrationality/MS
+irrationalness/M
+irrational/YSP
+Irrawaddy/M
+irreclaimable
+irreconcilability/MS
+irreconcilableness/M
+irreconcilable/PS
+irreconcilably
+irrecoverableness/M
+irrecoverable/P
+irrecoverably
+irredeemable/S
+irredeemably
+irredentism/M
+irredentist/M
+irreducibility/M
+irreducible
+irreducibly
+irreflexive
+irrefutable
+irrefutably
+irregardless
+irregularity/SM
+irregular/YS
+irrelevance/SM
+irrelevancy/MS
+irrelevant/Y
+irreligious
+irremediableness/M
+irremediable/P
+irremediably
+irremovable
+irreparableness/M
+irreparable/P
+irreparably
+irreplaceable/P
+irrepressible
+irrepressibly
+irreproachableness/M
+irreproachable/P
+irreproachably
+irreproducibility
+irreproducible
+irresistibility/M
+irresistibleness/M
+irresistible/P
+irresistibly
+irresoluteness/SM
+irresolute/PNXY
+irresolution/M
+irresolvable
+irrespective/Y
+irresponsibility/SM
+irresponsibleness/M
+irresponsible/PS
+irresponsibly
+irretrievable
+irretrievably
+irreverence/MS
+irreverent/Y
+irreversible
+irreversibly
+irrevocableness/M
+irrevocable/P
+irrevocably
+irrigable
+irrigate/DSXNG
+irrigation/M
+irritability/MS
+irritableness/M
+irritable/P
+irritably
+irritant/S
+irritate/DSXNGV
+irritated/Y
+irritating/Y
+irritation/M
+irrupt/GVSD
+irruption/SM
+IRS
+Irtish/M
+Irvine/M
+Irving/M
+Irvin/M
+Irv/MG
+Irwin/M
+Irwinn/M
+is
+i's
+Isaac/SM
+Isaak/M
+Isabelita/M
+Isabella/M
+Isabelle/M
+Isabel/M
+Isacco/M
+Isac/M
+Isadora/M
+Isadore/M
+Isador/M
+Isahella/M
+Isaiah/M
+Isak/M
+Isa/M
+ISBN
+Iscariot/M
+Iseabal/M
+Isfahan/M
+Isherwood/M
+Ishim/M
+Ishmael/M
+Ishtar/M
+Isiahi/M
+Isiah/M
+Isidora/M
+Isidore/M
+Isidor/M
+Isidoro/M
+Isidro/M
+isinglass/MS
+Isis/M
+Islamabad/M
+Islamic/S
+Islam/SM
+islander/M
+island/GZMRDS
+Islandia/M
+isle/MS
+islet/SM
+isl/GD
+Ismael/M
+ism/MCS
+isn't
+ISO
+isobaric
+isobar/MS
+Isobel/M
+isochronal/Y
+isochronous/Y
+isocline/M
+isocyanate/M
+isodine
+isolate/SDXNG
+isolationism/SM
+isolationistic
+isolationist/SM
+isolation/M
+isolator/MS
+Isolde/M
+isomeric
+isomerism/SM
+isomer/SM
+isometrically
+isometric/S
+isometrics/M
+isomorphic
+isomorphically
+isomorphism/MS
+isomorph/M
+isoperimetrical
+isopleth/M
+isopleths
+isosceles
+isostatic
+isothermal/Y
+isotherm/MS
+isotonic
+isotope/SM
+isotopic
+isotropic
+isotropically
+isotropy/M
+Ispahan's
+ispell/M
+Ispell/M
+Israeli/MS
+Israelite/SM
+Israel/MS
+Issac/M
+Issiah/M
+Issie/M
+Issi/M
+issuable
+issuance/MS
+issuant
+issued/A
+issue/GMZDSR
+issuer/AMS
+issues/A
+issuing/A
+Issy/M
+Istanbul/M
+isthmian/S
+isthmus/SM
+Istvan/M
+Isuzu/M
+It
+IT
+Itaipu/M
+ital
+Italianate/GSD
+Italian/MS
+italicization/MS
+italicized/U
+italicize/GSD
+italic/S
+Ital/M
+Italy/M
+Itasca/M
+itch/GMDS
+itchiness/MS
+Itch/M
+itchy/RTP
+ITcorp/M
+ITCorp/M
+it'd
+Itel/M
+itemization/SM
+itemized/U
+itemize/GZDRS
+itemizer/M
+itemizes/A
+item/MDSG
+iterate/ASDXVGN
+iteration/M
+iterative/YA
+iterator/MS
+Ithaca/M
+Ithacan
+itinerant/SY
+itinerary/MS
+it'll
+it/MUS
+Ito/M
+its
+itself
+ITT
+IUD/S
+IV
+Iva/M
+Ivanhoe/M
+Ivan/M
+Ivar/M
+I've
+Ive/MRS
+Iver/M
+Ivette/M
+Ivett/M
+Ivie/M
+iv/M
+Ivonne/M
+Ivor/M
+Ivory/M
+ivory/SM
+IVs
+Ivy/M
+ivy/MDS
+ix
+Izaak/M
+Izabel/M
+Izak/M
+Izanagi/M
+Izanami/M
+Izhevsk/M
+Izmir/M
+Izvestia/M
+Izzy/M
+jabbed
+jabberer/M
+jabber/JRDSZG
+jabbing
+Jabez/M
+Jablonsky/M
+jabot/MS
+jab/SM
+jacaranda/MS
+Jacenta/M
+Jacinda/M
+Jacinta/M
+Jacintha/M
+Jacinthe/M
+jackal/SM
+jackass/SM
+jackboot/DMS
+jackdaw/SM
+Jackelyn/M
+jacketed/U
+jacket/GSMD
+jack/GDRMS
+jackhammer/MDGS
+Jackie/M
+Jacki/M
+jackknife/MGSD
+jackknives
+Jacklin/M
+Jacklyn/M
+Jack/M
+Jackman/M
+jackpot/MS
+Jackqueline/M
+Jackquelin/M
+jackrabbit/DGS
+Jacksonian
+Jackson/SM
+Jacksonville/M
+jackstraw/MS
+Jacky/M
+Jaclin/M
+Jaclyn/M
+Jacobean
+Jacobian/M
+Jacobi/M
+Jacobin/M
+Jacobite/M
+Jacobo/M
+Jacobsen/M
+Jacob/SM
+Jacobs/N
+Jacobson/M
+Jacobus
+Jacoby/M
+jacquard/MS
+Jacquard/SM
+Jacqueline/M
+Jacquelin/M
+Jacquelyn/M
+Jacquelynn/M
+Jacquenetta/M
+Jacquenette/M
+Jacques/M
+Jacquetta/M
+Jacquette/M
+Jacquie/M
+Jacqui/M
+jacuzzi
+Jacuzzi/S
+Jacynth/M
+Jada/M
+jadedness/SM
+jaded/PY
+jadeite/SM
+Jade/M
+jade/MGDS
+Jaeger/M
+Jae/M
+jaggedness/SM
+jagged/RYTP
+Jagger/M
+jaggers
+jagging
+jag/S
+jaguar/MS
+jailbird/MS
+jailbreak/SM
+jailer/M
+jail/GZSMDR
+Jaime/M
+Jaimie/M
+Jaine/M
+Jainism/M
+Jain/M
+Jaipur/M
+Jakarta/M
+Jake/MS
+Jakie/M
+Jakob/M
+jalapeño/S
+jalopy/SM
+jalousie/MS
+Jamaal/M
+Jamaica/M
+Jamaican/S
+Jamal/M
+Jamar/M
+jambalaya/MS
+jamb/DMGS
+jamboree/MS
+Jamel/M
+Jame/MS
+Jameson/M
+Jamestown/M
+Jamesy/M
+Jamey/M
+Jamie/M
+Jamill/M
+Jamil/M
+Jami/M
+Jamima/M
+Jamison/M
+Jammal/M
+jammed/U
+Jammie/M
+jamming/U
+jam/SM
+Janacek/M
+Jana/M
+Janaya/M
+Janaye/M
+Jandy/M
+Janean/M
+Janeczka/M
+Janeen/M
+Janeiro/M
+Janek/M
+Janela/M
+Janella/M
+Janelle/M
+Janell/M
+Janel/M
+Jane/M
+Janene/M
+Janenna/M
+Janessa/M
+Janesville/M
+Janeta/M
+Janet/M
+Janetta/M
+Janette/M
+Janeva/M
+Janey/M
+jangler/M
+jangle/RSDGZ
+jangly
+Jania/M
+Janice/M
+Janie/M
+Janifer/M
+Janina/M
+Janine/M
+Janis/M
+janissary/MS
+Janith/M
+janitorial
+janitor/SM
+Janka/M
+Jan/M
+Janna/M
+Jannelle/M
+Jannel/M
+Jannie/M
+Janos/M
+Janot/M
+Jansenist/M
+Jansen/M
+January/MS
+Janus/M
+Jany/M
+Japanese/SM
+Japan/M
+japanned
+japanner
+japanning
+japan/SM
+jape/DSMG
+Japura/M
+Jaquelin/M
+Jaquelyn/M
+Jaquenetta/M
+Jaquenette/M
+Jaquith/M
+Jarad/M
+jardinière/MS
+Jard/M
+Jareb/M
+Jared/M
+jarful/S
+jargon/SGDM
+Jarib/M
+Jarid/M
+Jarlsberg
+jar/MS
+Jarrad/M
+jarred
+Jarred/M
+Jarret/M
+Jarrett/M
+Jarrid/M
+jarring/SY
+Jarrod/M
+Jarvis/M
+Jase/M
+Jasen/M
+Jasmina/M
+Jasmine/M
+jasmine/MS
+Jasmin/M
+Jason/M
+Jasper/M
+jasper/MS
+Jastrow/M
+Jasun/M
+jato/SM
+jaundice/DSMG
+jaundiced/U
+jauntily
+jauntiness/MS
+jaunt/MDGS
+jaunty/SRTP
+Javanese
+Java/SM
+javelin/SDMG
+Javier/M
+jawbone/SDMG
+jawbreaker/SM
+jawline
+jaw/SMDG
+Jaxartes/M
+Jayapura/M
+jaybird/SM
+Jaycee/SM
+Jaye/M
+Jay/M
+Jaymee/M
+Jayme/M
+Jaymie/M
+Jaynell/M
+Jayne/M
+jay/SM
+Jayson/M
+jaywalker/M
+jaywalk/JSRDZG
+Jazmin/M
+jazziness/M
+jazzmen
+jazz/MGDS
+jazzy/PTR
+JCS
+jct
+JD
+Jdavie/M
+jealousness/M
+jealous/PY
+jealousy/MS
+Jeana/M
+Jeanelle/M
+Jeane/M
+Jeanette/M
+Jeanie/M
+Jeanine/M
+Jean/M
+jean/MS
+Jeanna/M
+Jeanne/M
+Jeannette/M
+Jeannie/M
+Jeannine/M
+Jecho/M
+Jedd/M
+Jeddy/M
+Jedediah/M
+Jedidiah/M
+Jedi/M
+Jed/M
+jeep/GZSMD
+Jeep/S
+jeerer/M
+jeering/Y
+jeer/SJDRMG
+Jeeves/M
+jeez
+Jefferey/M
+Jeffersonian/S
+Jefferson/M
+Jeffery/M
+Jeffie/M
+Jeff/M
+Jeffrey/SM
+Jeffry/M
+Jeffy/M
+jehad's
+Jehanna/M
+Jehoshaphat/M
+Jehovah/M
+Jehu/M
+jejuna
+jejuneness/M
+jejune/PY
+jejunum/M
+Jekyll/M
+Jelene/M
+jell/GSD
+Jello/M
+jello's
+jellybean/SM
+jellyfish/MS
+jellying/M
+jellylike
+jellyroll/S
+jelly/SDMG
+Jemie/M
+Jemimah/M
+Jemima/M
+Jemmie/M
+jemmy/M
+Jemmy/M
+Jena/M
+Jenda/M
+Jenelle/M
+Jenica/M
+Jeniece/M
+Jenifer/M
+Jeniffer/M
+Jenilee/M
+Jeni/M
+Jenine/M
+Jenkins/M
+Jen/M
+Jenna/M
+Jennee/M
+Jenner/M
+jennet/SM
+Jennette/M
+Jennica/M
+Jennie/M
+Jennifer/M
+Jennilee/M
+Jenni/M
+Jennine/M
+Jennings/M
+Jenn/RMJ
+Jenny/M
+jenny/SM
+Jeno/M
+Jensen/M
+Jens/N
+jeopard
+jeopardize/GSD
+jeopardy/MS
+Jephthah/M
+Jerad/M
+Jerald/M
+Jeralee/M
+Jeramey/M
+Jeramie/M
+Jere/M
+Jereme/M
+jeremiad/SM
+Jeremiah/M
+Jeremiahs
+Jeremias/M
+Jeremie/M
+Jeremy/M
+Jericho/M
+Jeri/M
+jerker/M
+jerk/GSDRJ
+jerkily
+jerkiness/SM
+jerkin/SM
+jerkwater/S
+jerky/RSTP
+Jermaine/M
+Jermain/M
+Jermayne/M
+Jeroboam/M
+Jerold/M
+Jerome/M
+Jeromy/M
+Jerrie/M
+Jerrilee/M
+Jerrilyn/M
+Jerri/M
+Jerrine/M
+Jerrod/M
+Jerrold/M
+Jerrome/M
+jerrybuilt
+Jerrylee/M
+jerry/M
+Jerry/M
+jersey/MS
+Jersey/MS
+Jerusalem/M
+Jervis/M
+Jes
+Jessalin/M
+Jessalyn/M
+Jessa/M
+Jessamine/M
+jessamine's
+Jessamyn/M
+Jessee/M
+Jesselyn/M
+Jesse/M
+Jessey/M
+Jessica/M
+Jessie/M
+Jessika/M
+Jessi/M
+jess/M
+Jess/M
+Jessy/M
+jest/DRSGZM
+jester/M
+jesting/Y
+Jesuit/SM
+Jesus
+Jeth/M
+Jethro/M
+jetliner/MS
+jet/MS
+jetport/SM
+jetsam/MS
+jetted/M
+jetting/M
+jettison/DSG
+jetty/RSDGMT
+jeweler/M
+jewelery/S
+jewel/GZMRDS
+Jewelled/M
+Jewelle/M
+jewellery's
+Jewell/MD
+Jewel/M
+jewelry/MS
+Jewess/SM
+Jewishness/MS
+Jewish/P
+Jew/MS
+Jewry/MS
+Jezebel/MS
+j/F
+JFK/M
+jg/M
+jibbed
+jibbing
+jibe/S
+jib/MDSG
+Jidda/M
+jiff/S
+jiffy/SM
+jigged
+jigger/SDMG
+jigging/M
+jiggle/SDG
+jiggly/TR
+jig/MS
+jigsaw/GSDM
+jihad/SM
+Jilin
+Jillana/M
+Jillane/M
+Jillayne/M
+Jilleen/M
+Jillene/M
+Jillian/M
+Jillie/M
+Jilli/M
+Jill/M
+Jilly/M
+jilt/DRGS
+jilter/M
+Jimenez/M
+Jim/M
+Jimmie/M
+jimmy/GSDM
+Jimmy/M
+jimsonweed/S
+Jinan
+jingler/M
+jingle/RSDG
+jingly/TR
+jingoism/SM
+jingoistic
+jingoist/SM
+jingo/M
+Jinnah/M
+jinni's
+jinn/MS
+Jinny/M
+jinrikisha/SM
+jinx/GMDS
+jitney/MS
+jitterbugged
+jitterbugger
+jitterbugging
+jitterbug/SM
+jitter/S
+jittery/TR
+jiujitsu's
+Jivaro/M
+jive/MGDS
+Joachim/M
+Joana/M
+Joane/M
+Joanie/M
+Joan/M
+Joanna/M
+Joanne/SM
+Joann/M
+Joaquin/M
+jobbed
+jobber/MS
+jobbery/M
+jobbing/M
+Jobey/M
+jobholder/SM
+Jobie/M
+Jobi/M
+Jobina/M
+joblessness/MS
+jobless/P
+Jobrel/M
+job/SM
+Job/SM
+Jobye/M
+Joby/M
+Jobyna/M
+Jocasta/M
+Joceline/M
+Jocelin/M
+Jocelyne/M
+Jocelyn/M
+jockey/SGMD
+jock/GDMS
+Jock/M
+Jocko/M
+jockstrap/MS
+jocoseness/MS
+jocose/YP
+jocosity/SM
+jocularity/SM
+jocular/Y
+jocundity/SM
+jocund/Y
+Jodee/M
+jodhpurs
+Jodie/M
+Jodi/M
+Jody/M
+Joeann/M
+Joela/M
+Joelie/M
+Joella/M
+Joelle/M
+Joellen/M
+Joell/MN
+Joelly/M
+Joellyn/M
+Joel/MY
+Joelynn/M
+Joe/M
+Joesph/M
+Joete/M
+joey/M
+Joey/M
+jogged
+jogger/SM
+jogging/S
+joggler/M
+joggle/SRDG
+Jogjakarta/M
+jog/S
+Johan/M
+Johannah/M
+Johanna/M
+Johannes
+Johannesburg/M
+Johann/M
+Johansen/M
+Johanson/M
+Johna/MH
+Johnathan/M
+Johnath/M
+Johnathon/M
+Johnette/M
+Johnie/M
+Johnna/M
+Johnnie/M
+johnnycake/SM
+Johnny/M
+johnny/SM
+Johnsen/M
+john/SM
+John/SM
+Johns/N
+Johnson/M
+Johnston/M
+Johnstown/M
+Johny/M
+Joice/M
+join/ADGFS
+joined/U
+joiner/FSM
+joinery/MS
+jointed/EYP
+jointedness/ME
+joint/EGDYPS
+jointer/M
+jointly/F
+joint's
+jointures
+joist/GMDS
+Jojo/M
+joke/MZDSRG
+joker/M
+jokey
+jokier
+jokiest
+jokily
+joking/Y
+Jolee/M
+Joleen/M
+Jolene/M
+Joletta/M
+Jolie/M
+Joliet's
+Joli/M
+Joline/M
+Jolla/M
+jollification/MS
+jollily
+jolliness/SM
+jollity/MS
+jolly/TSRDGP
+Jolson/M
+jolt/DRGZS
+jolter/M
+Joly/M
+Jolyn/M
+Jolynn/M
+Jo/MY
+Jonah/M
+Jonahs
+Jonas
+Jonathan/M
+Jonathon/M
+Jonell/M
+Jone/MS
+Jones/S
+Jonie/M
+Joni/MS
+Jon/M
+jonquil/MS
+Jonson/M
+Joplin/M
+Jordain/M
+Jordana/M
+Jordanian/S
+Jordan/M
+Jordanna/M
+Jordon/M
+Jorey/M
+Jorgan/M
+Jorge/M
+Jorgensen/M
+Jorgenson/M
+Jorie/M
+Jori/M
+Jorrie/M
+Jorry/M
+Jory/M
+Joscelin/M
+Josee/M
+Josefa/M
+Josefina/M
+Josef/M
+Joseito/M
+Jose/M
+Josepha/M
+Josephina/M
+Josephine/M
+Joseph/M
+Josephs
+Josephson/M
+Josephus/M
+Josey/M
+josh/DSRGZ
+josher/M
+Joshia/M
+Josh/M
+Joshuah/M
+Joshua/M
+Josiah/M
+Josias/M
+Josie/M
+Josi/M
+Josselyn/M
+joss/M
+jostle/SDG
+Josue/M
+Josy/M
+jot/S
+jotted
+jotter/SM
+jotting/SM
+Joule/M
+joule/SM
+jounce/SDG
+jouncy/RT
+Jourdain/M
+Jourdan/M
+journalese/MS
+journal/GSDM
+journalism/SM
+journalistic
+journalist/SM
+journalize/DRSGZ
+journalized/U
+journalizer/M
+journey/DRMZSGJ
+journeyer/M
+journeyman/M
+journeymen
+jouster/M
+joust/ZSMRDG
+Jovanovich/M
+Jove/M
+joviality/SM
+jovial/Y
+Jovian
+jowl/SMD
+jowly/TR
+Joya/M
+Joyan/M
+Joyann/M
+Joycean
+Joycelin/M
+Joyce/M
+Joye/M
+joyfuller
+joyfullest
+joyfulness/SM
+joyful/PY
+joylessness/MS
+joyless/PY
+Joy/M
+joy/MDSG
+Joyner/M
+joyousness/MS
+joyous/YP
+joyridden
+joyride/SRZMGJ
+joyrode
+joystick/S
+Jozef/M
+JP
+Jpn
+Jr/M
+j's
+J's
+Jsandye/M
+Juana/M
+Juanita/M
+Juan/M
+Juarez
+Jubal/M
+jubilant/Y
+jubilate/XNGDS
+jubilation/M
+jubilee/SM
+Judah/M
+Judaic
+Judaical
+Judaism/SM
+Judas/S
+juddered
+juddering
+Judd/M
+Judea/M
+Jude/M
+judge/AGDS
+judger/M
+judge's
+judgeship/SM
+judgmental/Y
+judgment/MS
+judicable
+judicatory/S
+judicature/MS
+judicial/Y
+judiciary/S
+judicious/IYP
+judiciousness/SMI
+Judie/M
+Judi/MH
+Juditha/M
+Judith/M
+Jud/M
+judo/MS
+Judon/M
+Judson/M
+Judye/M
+Judy/M
+jugate/F
+jugful/SM
+jugged
+Juggernaut/M
+juggernaut/SM
+jugging
+juggler/M
+juggle/RSDGZ
+jugglery/MS
+jug/MS
+jugular/S
+juice/GMZDSR
+juicer/M
+juicily
+juiciness/MS
+juicy/TRP
+Juieta/M
+jujitsu/MS
+jujube/SM
+juju/M
+jujutsu's
+jukebox/SM
+juke/GS
+Julee/M
+Jule/MS
+julep/SM
+Julia/M
+Juliana/M
+Juliane/M
+Julian/M
+Julianna/M
+Julianne/M
+Juliann/M
+Julie/M
+julienne/GSD
+Julienne/M
+Julieta/M
+Juliet/M
+Julietta/M
+Juliette/M
+Juli/M
+Julina/M
+Juline/M
+Julio/M
+Julissa/M
+Julita/M
+Julius/M
+Jul/M
+Julys
+July/SM
+jumble/GSD
+jumbo/MS
+jumper/M
+jump/GZDRS
+jumpily
+jumpiness/MS
+jumpsuit/S
+jumpy/PTR
+jun
+junco/MS
+junction/IMESF
+juncture/SFM
+Juneau/M
+June/MS
+Junette/M
+Jungfrau/M
+Jungian
+jungle/SDM
+Jung/M
+Junia/M
+Junie/M
+Junina/M
+juniority/M
+junior/MS
+Junior/S
+juniper/SM
+junkerdom
+Junker/SM
+junketeer/SGDM
+junket/SMDG
+junk/GZDRMS
+junkie/RSMT
+junkyard/MS
+Jun/M
+Juno/M
+junta/MS
+Jupiter/M
+Jurassic
+juridic
+juridical/Y
+juried
+jurisdictional/Y
+jurisdiction/SM
+jurisprudence/SM
+jurisprudent
+jurisprudential/Y
+juristic
+jurist/MS
+juror/MS
+Jurua/M
+jury/IMS
+jurying
+juryman/M
+jurymen
+jurywoman/M
+jurywomen
+justed
+Justen/M
+juster/M
+justest
+Justice/M
+justice/MIS
+justiciable
+justifiability/M
+justifiable/U
+justifiably/U
+justification/M
+justified/UA
+justifier/M
+justify/GDRSXZN
+Justina/M
+Justine/M
+justing
+Justinian/M
+Justin/M
+Justinn/M
+Justino/M
+Justis/M
+justness/MS
+justness's/U
+justs
+just/UPY
+Justus/M
+jute/SM
+Jutish
+Jutland/M
+jut/S
+jutted
+jutting
+Juvenal/M
+juvenile/SM
+juxtapose/SDG
+juxtaposition/SM
+JV
+J/X
+Jyoti/M
+Kaaba/M
+kabob/SM
+kaboom
+Kabuki
+kabuki/SM
+Kabul/M
+Kacey/M
+Kacie/M
+Kacy/M
+Kaddish/M
+kaddish/S
+Kaela/M
+kaffeeklatch
+kaffeeklatsch/S
+Kafkaesque
+Kafka/M
+kaftan's
+Kagoshima/M
+Kahaleel/M
+Kahlil/M
+Kahlua/M
+Kahn/M
+Kaia/M
+Kaifeng/M
+Kaila/M
+Kaile/M
+Kailey/M
+Kai/M
+Kaine/M
+Kain/M
+kaiser/MS
+Kaiser/SM
+Kaitlin/M
+Kaitlyn/M
+Kaitlynn/M
+Kaja/M
+Kajar/M
+Kakalina/M
+Kalahari/M
+Kala/M
+Kalamazoo/M
+Kalashnikov/M
+Kalb/M
+Kaleb/M
+Kaleena/M
+kaleidescope
+kaleidoscope/SM
+kaleidoscopic
+kaleidoscopically
+Kale/M
+kale/MS
+Kalgoorlie/M
+Kalie/M
+Kalila/M
+Kalil/M
+Kali/M
+Kalina/M
+Kalinda/M
+Kalindi/M
+Kalle/M
+Kalli/M
+Kally/M
+Kalmyk
+Kalvin/M
+Kama/M
+Kamchatka/M
+Kamehameha/M
+Kameko/M
+Kamikaze/MS
+kamikaze/SM
+Kamilah/M
+Kamila/M
+Kamillah/M
+Kampala/M
+Kampuchea/M
+Kanchenjunga/M
+Kandace/M
+Kandahar/M
+Kandinsky/M
+Kandy/M
+Kane/M
+kangaroo/SGMD
+Kania/M
+Kankakee/M
+Kan/MS
+Kannada/M
+Kano/M
+Kanpur/M
+Kansan/S
+Kansas
+Kantian
+Kant/M
+Kanya/M
+Kaohsiung/M
+kaolinite/M
+kaolin/MS
+Kaplan/M
+kapok/SM
+Kaposi/M
+kappa/MS
+kaput/M
+Karachi/M
+Karaganda/M
+Karakorum/M
+karakul/MS
+Karalee/M
+Karalynn/M
+Kara/M
+Karamazov/M
+karaoke/S
+karate/MS
+karat/SM
+Karee/M
+Kareem/M
+Karel/M
+Kare/M
+Karena/M
+Karenina/M
+Karen/M
+Karia/M
+Karie/M
+Karil/M
+Karilynn/M
+Kari/M
+Karim/M
+Karina/M
+Karine/M
+Karin/M
+Kariotta/M
+Karisa/M
+Karissa/M
+Karita/M
+Karla/M
+Karlan/M
+Karlee/M
+Karleen/M
+Karlene/M
+Karlen/M
+Karlie/M
+Karlik/M
+Karlis
+Karl/MNX
+Karloff/M
+Karlotta/M
+Karlotte/M
+Karly/M
+Karlyn/M
+karma/SM
+Karmen/M
+karmic
+Karna/M
+Karney/M
+Karola/M
+Karole/M
+Karolina/M
+Karoline/M
+Karol/M
+Karoly/M
+Karon/M
+Karo/YM
+Karp/M
+Karrah/M
+Karrie/M
+Karroo/M
+Karry/M
+kart/MS
+Karylin/M
+Karyl/M
+Kary/M
+Karyn/M
+Kasai/M
+Kasey/M
+Kashmir/SM
+Kaspar/M
+Kasparov/M
+Kasper/M
+Kass
+Kassandra/M
+Kassey/M
+Kassia/M
+Kassie/M
+Kassi/M
+katakana
+Katalin/M
+Kata/M
+Katee/M
+Katelyn/M
+Kate/M
+Katerina/M
+Katerine/M
+Katey/M
+Katha/M
+Katharina/M
+Katharine/M
+Katharyn/M
+Kathe/M
+Katherina/M
+Katherine/M
+Katheryn/M
+Kathiawar/M
+Kathie/M
+Kathi/M
+Kathleen/M
+Kathlin/M
+Kath/M
+Kathmandu
+Kathrine/M
+Kathryne/M
+Kathryn/M
+Kathye/M
+Kathy/M
+Katie/M
+Kati/M
+Katina/M
+Katine/M
+Katinka/M
+Katleen/M
+Katlin/M
+Kat/M
+Katmai/M
+Katmandu's
+Katowice/M
+Katrina/M
+Katrine/M
+Katrinka/M
+Kattie/M
+Katti/M
+Katuscha/M
+Katusha/M
+Katya/M
+katydid/SM
+Katy/M
+Katz/M
+Kauai/M
+Kauffman/M
+Kaufman/M
+Kaunas/M
+Kaunda/M
+Kawabata/M
+Kawasaki/M
+kayak/SGDM
+Kaycee/M
+Kaye/M
+Kayla/M
+Kaylee/M
+Kayle/M
+Kayley/M
+Kaylil/M
+Kaylyn/M
+Kay/M
+Kayne/M
+kayo/DMSG
+Kazakh/M
+Kazakhstan
+Kazan/M
+Kazantzakis/M
+kazoo/SM
+Kb
+KB
+KC
+kcal/M
+kc/M
+KDE/M
+Keane/M
+Kean/M
+Kearney/M
+Keary/M
+Keaton/M
+Keats/M
+kebab/SM
+Keck/M
+Keefe/MR
+Keefer/M
+Keegan/M
+Keelby/M
+Keeley/M
+keel/GSMDR
+keelhaul/SGD
+Keelia/M
+Keely/M
+Keenan/M
+Keene/M
+keener/M
+keen/GTSPYDR
+keening/M
+Keen/M
+keenness/MS
+keeper/M
+keep/GZJSR
+keeping/M
+keepsake/SM
+Keewatin/M
+kegged
+kegging
+keg/MS
+Keillor/M
+Keir/M
+Keisha/M
+Keith/M
+Kelbee/M
+Kelby/M
+Kelcey/M
+Kelcie/M
+Kelci/M
+Kelcy/M
+Kele/M
+Kelila/M
+Kellby/M
+Kellen/M
+Keller/M
+Kelley/M
+Kellia/M
+Kellie/M
+Kelli/M
+Kellina/M
+Kellogg/M
+Kellsie/M
+Kellyann/M
+Kelly/M
+kelp/GZMDS
+Kelsey/M
+Kelsi/M
+Kelsy/M
+Kelt's
+Kelvin/M
+kelvin/MS
+Kelwin/M
+Kemerovo/M
+Kempis/M
+Kemp/M
+Kendall/M
+Kendal/M
+Kendell/M
+Kendra/M
+Kendre/M
+Kendrick/MS
+Kenilworth/M
+Ken/M
+Kenmore/M
+ken/MS
+Kenna/M
+Kennan/M
+Kennecott/M
+kenned
+Kennedy/M
+kennel/GSMD
+Kenneth/M
+Kennett/M
+Kennie/M
+kenning
+Kennith/M
+Kenn/M
+Kenny/M
+keno/M
+Kenon/M
+Kenosha/M
+Kensington/M
+Kent/M
+Kenton/M
+Kentuckian/S
+Kentucky/M
+Kenya/M
+Kenyan/S
+Kenyatta/M
+Kenyon/M
+Keogh/M
+Keokuk/M
+kepi/SM
+Kepler/M
+kept
+keratin/MS
+kerbside
+Kerby/M
+kerchief/MDSG
+Kerensky/M
+Kerianne/M
+Keriann/M
+Keri/M
+Kerk/M
+Ker/M
+Kermie/M
+Kermit/M
+Kermy/M
+kerned
+kernel/GSMD
+kerning
+Kern/M
+kerosene/MS
+Kerouac/M
+Kerrie/M
+Kerrill/M
+Kerri/M
+Kerrin/M
+Kerr/M
+Kerry/M
+Kerstin/M
+Kerwin/M
+Kerwinn/M
+Kesley/M
+Keslie/M
+Kessiah/M
+Kessia/M
+Kessler/M
+kestrel/SM
+ketch/MS
+ketchup/SM
+ketone/M
+ketosis/M
+Kettering/M
+Kettie/M
+Ketti/M
+kettledrum/SM
+kettleful
+kettle/SM
+Ketty/M
+Kevan/M
+Keven/M
+Kevina/M
+Kevin/M
+Kevlar
+Kev/MN
+Kevon/M
+Kevorkian/M
+Kevyn/M
+Kewaskum/M
+Kewaunee/M
+Kewpie/M
+keyboardist/S
+keyboard/RDMZGS
+keyclick/SM
+keyhole/MS
+Key/M
+Keynesian/M
+Keynes/M
+keynoter/M
+keynote/SRDZMG
+keypad/MS
+keypuncher/M
+keypunch/ZGRSD
+keyring
+key/SGMD
+keystone/SM
+keystroke/SDMG
+keyword/SM
+k/FGEIS
+kg
+K/G
+KGB
+Khabarovsk/M
+Khachaturian/M
+khaki/SM
+Khalid/M
+Khalil/M
+Khan/M
+khan/MS
+Kharkov/M
+Khartoum/M
+Khayyam/M
+Khmer/M
+Khoisan/M
+Khomeini/M
+Khorana/M
+Khrushchev/SM
+Khufu/M
+Khulna/M
+Khwarizmi/M
+Khyber/M
+kHz/M
+KIA
+Kiah/M
+Kial/M
+kibble/GMSD
+kibbutzim
+kibbutz/M
+kibitzer/M
+kibitz/GRSDZ
+kibosh/GMSD
+Kickapoo/M
+kickback/SM
+kickball/MS
+kicker/M
+kick/GZDRS
+kickoff/SM
+kickstand/MS
+kicky/RT
+kidded
+kidder/SM
+kiddie/SD
+kidding/YM
+kiddish
+Kidd/M
+kiddo/SM
+kiddying
+kiddy's
+kidless
+kid/MS
+kidnaper's
+kidnaping's
+kidnap/MSJ
+kidnapped
+kidnapper/SM
+kidnapping/S
+kidney/MS
+kidskin/SM
+Kieffer/M
+kielbasa/SM
+kielbasi
+Kiele/M
+Kiel/M
+Kienan/M
+kier/I
+Kierkegaard/M
+Kiersten/M
+Kieth/M
+Kiev/M
+Kigali/M
+Kikelia/M
+Kikuyu/M
+Kilauea/M
+Kile/M
+Kiley/M
+Kilian/M
+Kilimanjaro/M
+kill/BJGZSDR
+killdeer/SM
+Killebrew/M
+killer/M
+Killian/M
+Killie/M
+killing/Y
+killjoy/S
+Killy/M
+kiln/GDSM
+kilobaud/M
+kilobit/S
+kilobuck
+kilobyte/S
+kilocycle/MS
+kilogauss/M
+kilogram/MS
+kilohertz/M
+kilohm/M
+kilojoule/MS
+kiloliter/MS
+kilometer/SM
+kilo/SM
+kiloton/SM
+kilovolt/SM
+kilowatt/SM
+kiloword
+kilter/M
+kilt/MDRGZS
+Ki/M
+Kimball/M
+Kimbell/M
+Kimberlee/M
+Kimberley/M
+Kimberli/M
+Kimberly/M
+Kimberlyn/M
+Kimble/M
+Kimbra/M
+Kim/M
+Kimmie/M
+Kimmi/M
+Kimmy/M
+kimono/MS
+Kincaid/M
+kinda
+kindergarten/MS
+kindergärtner/SM
+kinder/U
+kindheartedness/MS
+kindhearted/YP
+kindle/AGRSD
+kindler/M
+kindliness/SM
+kindliness's/U
+kindling/M
+kindly/TUPR
+kindness's
+kindness/US
+kind/PSYRT
+kindred/S
+kinematic/S
+kinematics/M
+kinesics/M
+kine/SM
+kinesthesis
+kinesthetically
+kinesthetic/S
+kinetically
+kinetic/S
+kinetics/M
+kinfolk/S
+kingbird/M
+kingdom/SM
+kingfisher/MS
+kinglet/M
+kingliness/M
+kingly/TPR
+King/M
+kingpin/MS
+Kingsbury/M
+king/SGYDM
+kingship/SM
+Kingsley/M
+Kingsly/M
+Kingston/M
+Kingstown/M
+Kingwood/M
+kink/GSDM
+kinkily
+kinkiness/SM
+kinky/PRT
+Kin/M
+kin/MS
+Kinna/M
+Kinney/M
+Kinnickinnic/M
+Kinnie/M
+Kinny/M
+Kinsey/M
+kinsfolk/S
+Kinshasa/M
+Kinshasha/M
+kinship/SM
+Kinsley/M
+kinsman/M
+kinsmen/M
+kinswoman/M
+kinswomen
+kiosk/SM
+Kiowa/SM
+Kipling/M
+Kip/M
+kip/MS
+Kippar/M
+kipped
+kipper/DMSG
+Kipper/M
+Kippie/M
+kipping
+Kipp/MR
+Kippy/M
+Kira/M
+Kirbee/M
+Kirbie/M
+Kirby/M
+Kirchhoff/M
+Kirchner/M
+Kirchoff/M
+Kirghistan/M
+Kirghizia/M
+Kirghiz/M
+Kiribati
+Kiri/M
+Kirinyaga/M
+kirk/GDMS
+Kirkland/M
+Kirk/M
+Kirkpatrick/M
+Kirkwood/M
+Kirov/M
+kirsch/S
+Kirsteni/M
+Kirsten/M
+Kirsti/M
+Kirstin/M
+Kirstyn/M
+Kisangani/M
+Kishinev/M
+kismet/SM
+kiss/DSRBJGZ
+Kissee/M
+kisser/M
+Kissiah/M
+Kissie/M
+Kissinger/M
+Kitakyushu/M
+kitbag's
+kitchener/M
+Kitchener/M
+kitchenette/SM
+kitchen/GDRMS
+kitchenware/SM
+kiter/M
+kite/SM
+kith/MDG
+kiths
+Kit/M
+kit/MDRGS
+kitsch/MS
+kitschy
+kitted
+kittenishness/M
+kittenish/YP
+kitten/SGDM
+Kittie/M
+Kitti/M
+kitting
+kittiwakes
+Kitty/M
+kitty/SM
+Kiwanis/M
+kiwifruit/S
+kiwi/SM
+Kizzee/M
+Kizzie/M
+KKK
+kl
+Klan/M
+Klansman/M
+Klara/M
+Klarika/M
+Klarrisa/M
+Klaus/M
+klaxon/M
+Klee/M
+Kleenex/SM
+Klein/M
+Kleinrock/M
+Klemens/M
+Klement/M
+Kleon/M
+kleptomaniac/SM
+kleptomania/MS
+Kliment/M
+Kline/M
+Klingon/M
+Klondike/SDMG
+kludger/M
+kludge/RSDGMZ
+kludgey
+klutziness/S
+klutz/SM
+klutzy/TRP
+Klux/M
+klystron/MS
+km
+kn
+knacker/M
+knack/SGZRDM
+knackwurst/MS
+Knapp/M
+knapsack/MS
+Knauer/M
+knavery/MS
+knave/SM
+knavish/Y
+kneader/M
+knead/GZRDS
+kneecap/MS
+kneecapped
+kneecapping
+knee/DSM
+kneeing
+kneeler/M
+kneel/GRS
+kneepad/SM
+knell/SMDG
+knelt
+Knesset/M
+knew
+Kngwarreye/M
+Knickerbocker/MS
+knickerbocker/S
+knickknack/SM
+knick/ZR
+Knievel/M
+knife/DSGM
+knighthood/MS
+knightliness/MS
+knightly/P
+Knight/M
+knight/MDYSG
+knish/MS
+knit/AU
+knits
+knitted
+knitter/MS
+knitting/SM
+knitwear/M
+knives/M
+knobbly
+knobby/RT
+Knobeloch/M
+knob/MS
+knockabout/M
+knockdown/S
+knocker/M
+knock/GZSJRD
+knockoff/S
+knockout/MS
+knockwurst's
+knoll/MDSG
+Knopf/M
+Knossos/M
+knothole/SM
+knot/MS
+knotted
+knottiness/M
+knotting/M
+knotty/TPR
+knowable/U
+knower/M
+know/GRBSJ
+knowhow
+knowingly/U
+knowing/RYT
+knowings/U
+knowledgeableness/M
+knowledgeable/P
+knowledgeably
+knowledge/SM
+Knowles
+known/SU
+Knox/M
+Knoxville/M
+knuckleball/R
+knuckle/DSMG
+knuckleduster
+knucklehead/MS
+Knudsen/M
+Knudson/M
+knurl/DSG
+Knuth/M
+Knutsen/M
+Knutson/M
+KO
+koala/SM
+Kobayashi/M
+Kobe/M
+Kochab/M
+Koch/M
+Kodachrome/M
+Kodak/SM
+Kodaly/M
+Kodiak/M
+Koenig/M
+Koenigsberg/M
+Koenraad/M
+Koestler/M
+Kohinoor/M
+Kohler/M
+Kohl/MR
+kohlrabies
+kohlrabi/M
+kola/SM
+Kolyma/M
+Kommunizma/M
+Kong/M
+Kongo/M
+Konrad/M
+Konstance/M
+Konstantine/M
+Konstantin/M
+Konstanze/M
+kookaburra/SM
+kook/GDMS
+kookiness/S
+kooky/PRT
+Koo/M
+Koontz/M
+kopeck/MS
+Koppers/M
+Koralle/M
+Koral/M
+Kora/M
+Koranic
+Koran/SM
+Kordula/M
+Korea/M
+Korean/S
+Korella/M
+Kore/M
+Koren/M
+Koressa/M
+Korey/M
+Korie/M
+Kori/M
+Kornberg/M
+Korney/M
+Korrie/M
+Korry/M
+Kort/M
+Kory/M
+Korzybski/M
+Kosciusko/M
+kosher/DGS
+Kossuth/M
+Kosygin/M
+Kovacs/M
+Kowalewski/M
+Kowalski/M
+Kowloon/M
+kowtow/SGD
+KP
+kph
+kraal/SMDG
+Kraemer/M
+kraft/M
+Kraft/M
+Krakatau's
+Krakatoa/M
+Krakow/M
+Kramer/M
+Krasnodar/M
+Krasnoyarsk/M
+Krause/M
+kraut/S!
+Krebs/M
+Kremlin/M
+Kremlinologist/MS
+Kremlinology/MS
+Kresge/M
+Krieger/M
+kriegspiel/M
+krill/MS
+Kringle/M
+Krisha/M
+Krishnah/M
+Krishna/M
+Kris/M
+Krispin/M
+Krissie/M
+Krissy/M
+Kristal/M
+Krista/M
+Kristan/M
+Kristel/M
+Kriste/M
+Kristen/M
+Kristian/M
+Kristie/M
+Kristien/M
+Kristi/MN
+Kristina/M
+Kristine/M
+Kristin/M
+Kristofer/M
+Kristoffer/M
+Kristofor/M
+Kristoforo/M
+Kristo/MS
+Kristopher/M
+Kristy/M
+Kristyn/M
+Kr/M
+Kroc/M
+Kroger/M
+króna/M
+Kronecker/M
+krone/RM
+kronor
+krónur
+Kropotkin/M
+Krueger/M
+Kruger/M
+Krugerrand/S
+Krupp/M
+Kruse/M
+krypton/SM
+Krystalle/M
+Krystal/M
+Krysta/M
+Krystle/M
+Krystyna/M
+ks
+K's
+KS
+k's/IE
+kt
+Kublai/M
+Kubrick/M
+kuchen/MS
+kudos/M
+kudzu/SM
+Kuenning/M
+Kuhn/M
+Kuibyshev/M
+Ku/M
+Kumar/M
+kumquat/SM
+Kunming/M
+Kuomintang/M
+Kurdish/M
+Kurdistan/SM
+Kurd/SM
+Kurosawa/M
+Kurtis/M
+Kurt/M
+kurtosis/M
+Kusch/M
+Kuwaiti/SM
+Kuwait/M
+Kuznetsk/M
+Kuznets/M
+kvetch/DSG
+kw
+kW
+Kwakiutl/M
+Kwangchow's
+Kwangju/M
+Kwanzaa/S
+kWh
+KY
+Kyla/M
+kyle/M
+Kyle/M
+Kylen/M
+Kylie/M
+Kylila/M
+Kylynn/M
+Ky/MH
+Kym/M
+Kynthia/M
+Kyoto/M
+Kyrgyzstan
+Kyrstin/M
+Kyushu/M
+L
+LA
+Laban/M
+labeled/U
+labeler/M
+label/GAZRDS
+labellings/A
+label's
+labial/YS
+labia/M
+labile
+labiodental
+labium/M
+laboratory/MS
+laboredness/M
+labored/PMY
+labored's/U
+laborer/M
+laboring/MY
+laborings/U
+laboriousness/MS
+laborious/PY
+labor/RDMJSZG
+laborsaving
+Labradorean/S
+Labrador/SM
+lab/SM
+Lab/SM
+laburnum/SM
+labyrinthine
+labyrinth/M
+labyrinths
+laced/U
+Lacee/M
+lace/MS
+lacerate/NGVXDS
+laceration/M
+lacer/M
+laces/U
+lacewing/MS
+Lacey/M
+Lachesis/M
+lachrymal/S
+lachrymose
+Lacie/M
+lacing/M
+lackadaisic
+lackadaisical/Y
+Lackawanna/M
+lacker/M
+lackey/SMDG
+lack/GRDMS
+lackluster/S
+Lac/M
+laconic
+laconically
+lacquerer/M
+lacquer/ZGDRMS
+lacrosse/MS
+lac/SGMDR
+lactate/MNGSDX
+lactational/Y
+lactation/M
+lacteal
+lactic
+lactose/MS
+lacunae
+lacuna/M
+Lacy/M
+lacy/RT
+ladder/GDMS
+laddie/MS
+laded/U
+ladened
+ladening
+laden/U
+lade/S
+lading/M
+ladle/SDGM
+Ladoga/M
+Ladonna/M
+lad/XGSJMND
+ladybird/SM
+ladybug/MS
+ladyfinger/SM
+ladylike/U
+ladylove/MS
+Ladyship/MS
+ladyship/SM
+lady/SM
+Lady/SM
+Laetitia/M
+laetrile/S
+Lafayette/M
+Lafitte/M
+lager/DMG
+laggard/MYSP
+laggardness/M
+lagged
+lagging/MS
+lagniappe/SM
+lagoon/MS
+Lagos/M
+Lagrange/M
+Lagrangian/M
+Laguerre/M
+Laguna/M
+lag/ZSR
+Lahore/M
+laid/AI
+Laidlaw/M
+lain
+Laina/M
+Lainey/M
+Laird/M
+laird/MS
+lair/GDMS
+laissez
+laity/SM
+Laius/M
+lake/DSRMG
+Lakehurst/M
+Lakeisha/M
+laker/M
+lakeside
+Lakewood/M
+Lakisha/M
+Lakshmi/M
+lallygagged
+lallygagging
+lallygag/S
+Lalo/M
+La/M
+Lamaism/SM
+Lamarck/M
+Lamar/M
+lamasery/MS
+lama/SM
+Lamaze
+lambada/S
+lambaste/SDG
+lambda/SM
+lambency/MS
+lambent/Y
+Lambert/M
+lambkin/MS
+Lamb/M
+Lamborghini/M
+lambskin/MS
+lamb/SRDMG
+lambswool
+lamebrain/SM
+lamed/M
+lameness/MS
+lamentableness/M
+lamentable/P
+lamentably
+lamentation/SM
+lament/DGSB
+lamented/U
+lame/SPY
+la/MHLG
+laminae
+lamina/M
+laminar
+laminate/XNGSD
+lamination/M
+lam/MDRSTG
+lammed
+lammer
+lamming
+Lammond/M
+Lamond/M
+Lamont/M
+L'Amour
+lampblack/SM
+lamplighter/M
+lamplight/ZRMS
+lampooner/M
+lampoon/RDMGS
+Lamport/M
+lamppost/SM
+lamprey/MS
+lamp/SGMRD
+lampshade/MS
+LAN
+Lanae/M
+Lanai/M
+lanai/SM
+Lana/M
+Lancashire/M
+Lancaster/M
+Lancelot/M
+Lance/M
+lancer/M
+lance/SRDGMZ
+lancet/MS
+landau/MS
+lander/I
+landfall/SM
+landfill/DSG
+landforms
+landholder/M
+landhold/JGZR
+landing/M
+Landis/M
+landlady/MS
+landless
+landlines
+landlocked
+landlord/MS
+landlubber/SM
+Land/M
+landmark/GSMD
+landmass/MS
+Landon/M
+landowner/MS
+landownership/M
+landowning/SM
+Landry/M
+Landsat
+landscape/GMZSRD
+landscaper/M
+lands/I
+landslide/MS
+landslid/G
+landslip
+landsman/M
+landsmen
+land/SMRDJGZ
+Landsteiner/M
+landward/S
+Landwehr/M
+Lane/M
+lane/SM
+Lanette/M
+Laney/M
+Langeland/M
+Lange/M
+Langerhans/M
+Langford/M
+Langland/M
+Langley/M
+Lang/M
+Langmuir/M
+Langsdon/M
+Langston/M
+language/MS
+languidness/MS
+languid/PY
+languisher/M
+languishing/Y
+languish/SRDG
+languorous/Y
+languor/SM
+Lanie/M
+Lani/M
+Lanita/M
+lankiness/SM
+lankness/MS
+lank/PTYR
+lanky/PRT
+Lanna/M
+Lannie/M
+Lanni/M
+Lanny/M
+lanolin/MS
+Lansing/M
+lantern/GSDM
+lanthanide/M
+lanthanum/MS
+lanyard/MS
+Lanzhou
+Laocoon/M
+Lao/SM
+Laotian/MS
+lapboard/MS
+lapdog/S
+lapel/MS
+lapidary/MS
+lapin/MS
+Laplace/M
+Lapland/ZMR
+lapped
+lappet/MS
+lapping
+Lapp/SM
+lapsed/A
+lapse/KSDMG
+lapser/MA
+lapses/A
+lapsing/A
+lap/SM
+laps/SRDG
+laptop/SM
+lapwing/MS
+Laraine/M
+Lara/M
+Laramie/M
+larboard/MS
+larcenist/S
+larcenous
+larceny/MS
+larch/MS
+larder/M
+lard/MRDSGZ
+Lardner/M
+lardy/RT
+Laredo/M
+largehearted
+largemouth
+largeness/SM
+large/SRTYP
+largess/SM
+largish
+largo/S
+lariat/MDGS
+Lari/M
+Larina/M
+Larine/M
+Larisa/M
+Larissa/M
+larker/M
+lark/GRDMS
+Lark/M
+larkspur/MS
+Larousse/M
+Larry/M
+Larsen/M
+Lars/NM
+Larson/M
+larvae
+larval
+larva/M
+laryngeal/YS
+larynges
+laryngitides
+laryngitis/M
+larynx/M
+Laryssa/M
+lasagna/S
+lasagne's
+Lascaux/M
+lasciviousness/MS
+lascivious/YP
+lase
+laser/M
+lashed/U
+lasher/M
+lashing/M
+lash/JGMSRD
+Lassa/M
+Lassen/M
+Lassie/M
+lassie/SM
+lassitude/MS
+lassoer/M
+lasso/GRDMS
+las/SRZG
+lass/SM
+laster/M
+lastingness/M
+lasting/PY
+last/JGSYRD
+Laszlo/M
+Latasha/M
+Latashia/M
+latching/M
+latchkey/SM
+latch's
+latch/UGSD
+latecomer/SM
+lated/A
+late/KA
+lately
+latency/MS
+lateness/MS
+latent/YS
+later/A
+lateral/GDYS
+lateralization
+Lateran/M
+latest/S
+LaTeX/M
+latex/MS
+lathe/M
+latherer/M
+lather/RDMG
+lathery
+lathing/M
+lath/MSRDGZ
+Lathrop/M
+laths
+Latia/M
+latices/M
+Latina/SM
+Latinate
+Latino/S
+Latin/RMS
+latish
+Latisha/M
+latitude/SM
+latitudinal/Y
+latitudinarian/S
+latitudinary
+Lat/M
+Latonya/M
+Latoya/M
+Latrena/M
+Latrina/M
+latrine/MS
+Latrobe/M
+lat/SDRT
+latter/YM
+latte/SR
+lattice/SDMG
+latticework/MS
+latticing/M
+Lattimer/M
+Latvia/M
+Latvian/S
+laudably
+laudanum/MS
+laudatory
+Lauderdale/M
+lauder/M
+Lauder/M
+Laud/MR
+laud/RDSBG
+lauds/M
+Laue/M
+laughableness/M
+laughable/P
+laughably
+laugh/BRDZGJ
+laugher/M
+laughing/MY
+laughingstock/SM
+laughs
+laughter/MS
+Laughton/M
+Launce/M
+launch/AGSD
+launcher/MS
+launching/S
+launchpad/S
+laundered/U
+launderer/M
+launderette/MS
+launder/SDRZJG
+laundress/MS
+laundrette/S
+laundromat/S
+Laundromat/SM
+laundryman/M
+laundrymen
+laundry/MS
+laundrywoman/M
+laundrywomen
+Lauraine/M
+Lauralee/M
+Laural/M
+laura/M
+Laura/M
+Laurasia/M
+laureate/DSNG
+laureateship/SM
+Lauree/M
+Laureen/M
+Laurella/M
+Laurel/M
+laurel/SGMD
+Laure/M
+Laurena/M
+Laurence/M
+Laurene/M
+Lauren/SM
+Laurentian
+Laurent/M
+Lauretta/M
+Laurette/M
+Laurianne/M
+Laurice/M
+Laurie/M
+Lauri/M
+Lauritz/M
+Lauryn/M
+Lausanne/M
+lavage/MS
+lavaliere/MS
+Laval/M
+lava/SM
+lavatory/MS
+lave/GDS
+Lavena/M
+lavender/MDSG
+Laverna/M
+Laverne/M
+Lavern/M
+Lavina/M
+Lavinia/M
+Lavinie/M
+lavishness/MS
+lavish/SRDYPTG
+Lavoisier/M
+Lavonne/M
+Lawanda/M
+lawbreaker/SM
+lawbreaking/MS
+Lawford/M
+lawfulness/SMU
+lawful/PUY
+lawgiver/MS
+lawgiving/M
+lawlessness/MS
+lawless/PY
+Law/M
+lawmaker/MS
+lawmaking/SM
+lawman/M
+lawmen
+lawnmower/S
+lawn/SM
+Lawrence/M
+Lawrenceville/M
+lawrencium/SM
+Lawry/M
+law/SMDG
+Lawson/M
+lawsuit/MS
+Lawton/M
+lawyer/DYMGS
+laxativeness/M
+laxative/PSYM
+laxer/A
+laxes/A
+laxity/SM
+laxness/SM
+lax/PTSRY
+layabout/MS
+Layamon/M
+layaway/S
+lay/CZGSR
+layered/C
+layer/GJDM
+layering/M
+layer's/IC
+layette/SM
+Layla/M
+Lay/M
+layman/M
+laymen
+Layne/M
+Layney/M
+layoff/MS
+layout/SM
+layover/SM
+laypeople
+layperson/S
+lays/AI
+Layton/M
+layup/MS
+laywoman/M
+laywomen
+Lazare/M
+Lazar/M
+Lazaro/M
+Lazarus/M
+laze/DSG
+lazily
+laziness/MS
+lazuli/M
+lazybones/M
+lazy/PTSRDG
+lb
+LBJ/M
+lbs
+LC
+LCD
+LCM
+LDC
+leachate
+Leach/M
+leach/SDG
+Leadbelly/M
+leaded/U
+leadenness/M
+leaden/PGDY
+leaderless
+leader/M
+leadership/MS
+lead/SGZXJRDN
+leadsman/M
+leadsmen
+leafage/MS
+leaf/GSDM
+leafhopper/M
+leafiness/M
+leafless
+leaflet/SDMG
+leafstalk/SM
+leafy/PTR
+leaguer/M
+league/RSDMZG
+Leah/M
+leakage/SM
+leaker/M
+Leakey/M
+leak/GSRDM
+leakiness/MS
+leaky/PRT
+Lea/M
+lea/MS
+Leander/M
+Leandra/M
+leaner/M
+leaning/M
+Lean/M
+Leanna/M
+Leanne/M
+leanness/MS
+Leann/M
+Leanora/M
+Leanor/M
+lean/YRDGTJSP
+leaper/M
+leapfrogged
+leapfrogging
+leapfrog/SM
+leap/RDGZS
+Lear/M
+learnedly
+learnedness/M
+learned/UA
+learner/M
+learning/M
+learns/UA
+learn/SZGJRD
+Leary/M
+lease/ARSDG
+leaseback/MS
+leaseholder/M
+leasehold/SRMZ
+leaser/MA
+lease's
+leash's
+leash/UGSD
+leasing/M
+leas/SRDGZ
+least/S
+leastwise
+leatherette/S
+leather/MDSG
+leathern
+leatherneck/SM
+leathery
+leaven/DMJGS
+leavened/U
+leavening/M
+Leavenworth/M
+leaver/M
+leaves/M
+leave/SRDJGZ
+leaving/M
+Lebanese
+Lebanon/M
+Lebbie/M
+lebensraum
+Lebesgue/M
+Leblanc/M
+lecher/DMGS
+lecherousness/MS
+lecherous/YP
+lechery/MS
+lecithin/SM
+lectern/SM
+lecturer/M
+lecture/RSDZMG
+lectureship/SM
+led
+Leda/M
+Lederberg/M
+ledger/DMG
+ledge/SRMZ
+LED/SM
+Leeanne/M
+Leeann/M
+leech/MSDG
+Leeds/M
+leek/SM
+Leelah/M
+Leela/M
+Leeland/M
+Lee/M
+lee/MZRS
+Leena/M
+leer/DG
+leeriness/MS
+leering/Y
+leery/PTR
+Leesa/M
+Leese/M
+Leeuwenhoek/M
+Leeward/M
+leeward/S
+leeway/MS
+leftism/SM
+leftist/SM
+leftmost
+leftover/MS
+Left/S
+left/TRS
+leftward/S
+Lefty/M
+lefty/SM
+legacy/MS
+legalese/MS
+legalism/SM
+legalistic
+legality/MS
+legalization/MS
+legalize/DSG
+legalized/U
+legal/SY
+legate/AXCNGSD
+legatee/MS
+legate's/C
+legation/AMC
+legato/SM
+legendarily
+legendary/S
+Legendre/M
+legend/SM
+legerdemain/SM
+Leger/SM
+legged
+legginess/MS
+legging/MS
+leggy/PRT
+leghorn/SM
+Leghorn/SM
+legibility/MS
+legible
+legibly
+legionary/S
+legionnaire/SM
+legion/SM
+legislate/SDXVNG
+legislation/M
+legislative/SY
+legislator/SM
+legislature/MS
+legitimacy/MS
+legitimate/SDNGY
+legitimation/M
+legitimatize/SDG
+legitimization/MS
+legitimize/RSDG
+legit/S
+legless
+legman/M
+legmen
+leg/MS
+Lego/M
+Legra/M
+Legree/M
+legroom/MS
+legstraps
+legume/SM
+leguminous
+legwork/SM
+Lehigh/M
+Lehman/M
+Leia/M
+Leibniz/M
+Leicester/SM
+Leiden/M
+Leif/M
+Leigha/M
+Leigh/M
+Leighton/M
+Leilah/M
+Leila/M
+lei/MS
+Leipzig/M
+Leisha/M
+leisureliness/MS
+leisurely/P
+leisure/SDYM
+leisurewear
+leitmotif/SM
+leitmotiv/MS
+Lek/M
+Lelah/M
+Lela/M
+Leland/M
+Lelia/M
+Lemaitre/M
+Lemar/M
+Lemke/M
+Lem/M
+lemma/MS
+lemme/GJ
+Lemmie/M
+lemming/M
+Lemmy/M
+lemonade/SM
+lemon/GSDM
+lemony
+Lemuel/M
+Lemuria/M
+lemur/MS
+Lena/M
+Lenard/M
+Lenci/M
+lender/M
+lend/SRGZ
+Lenee/M
+Lenette/M
+lengthener/M
+lengthen/GRD
+lengthily
+lengthiness/MS
+length/MNYX
+lengths
+lengthwise
+lengthy/TRP
+lenience/S
+leniency/MS
+lenient/SY
+Leningrad/M
+Leninism/M
+Leninist
+Lenin/M
+lenitive/S
+Lenka/M
+Len/M
+Le/NM
+Lenna/M
+Lennard/M
+Lennie/M
+Lennon/M
+Lenny/M
+Lenoir/M
+Leno/M
+Lenora/M
+Lenore/M
+lens/SRDMJGZ
+lent/A
+lenticular
+lentil/SM
+lento/S
+Lent/SMN
+Leodora/M
+Leoine/M
+Leola/M
+Leoline/M
+Leo/MS
+Leona/M
+Leonanie/M
+Leonard/M
+Leonardo/M
+Leoncavallo/M
+Leonelle/M
+Leonel/M
+Leone/M
+Leonerd/M
+Leonhard/M
+Leonidas/M
+Leonid/M
+Leonie/M
+leonine
+Leon/M
+Leonora/M
+Leonore/M
+Leonor/M
+Leontine/M
+Leontyne/M
+leopardess/SM
+leopard/MS
+leopardskin
+Leopold/M
+Leopoldo/M
+Leopoldville/M
+Leora/M
+leotard/MS
+leper/SM
+Lepidus/M
+Lepke/M
+leprechaun/SM
+leprosy/MS
+leprous
+lepta
+lepton/SM
+Lepus/M
+Lerner/M
+Leroi/M
+Leroy/M
+Lesa/M
+lesbianism/MS
+lesbian/MS
+Leshia/M
+lesion/DMSG
+Lesley/M
+Leslie/M
+Lesli/M
+Lesly/M
+Lesotho/M
+lessee/MS
+lessen/GDS
+Lesseps/M
+lesser
+lesses
+Lessie/M
+lessing
+lesson/DMSG
+lessor/MS
+less/U
+Lester/M
+lest/R
+Les/Y
+Lesya/M
+Leta/M
+letdown/SM
+lethality/M
+lethal/YS
+Letha/M
+lethargic
+lethargically
+lethargy/MS
+Lethe/M
+Lethia/M
+Leticia/M
+Letisha/M
+let/ISM
+Letitia/M
+Letizia/M
+Letta/M
+letterbox/S
+lettered/U
+letterer/M
+letterhead/SM
+lettering/M
+letter/JSZGRDM
+letterman/M
+Letterman/M
+lettermen
+letterpress/MS
+Lettie/M
+Letti/M
+letting/S
+lettuce/SM
+Letty/M
+letup/MS
+leukemia/SM
+leukemic/S
+leukocyte/MS
+Leupold/M
+Levant/M
+leveeing
+levee/SDM
+leveled/U
+leveler/M
+levelheadedness/S
+levelheaded/P
+leveling/U
+levelness/SM
+level/STZGRDYP
+leverage/MGDS
+lever/SDMG
+Levesque/M
+Levey/M
+Leviathan
+leviathan/MS
+levier/M
+Levi/MS
+Levine/M
+Levin/M
+levitate/XNGDS
+levitation/M
+Leviticus/M
+Levitt/M
+levity/MS
+Lev/M
+Levon/M
+Levy/M
+levy/SRDZG
+lewdness/MS
+lewd/PYRT
+Lewellyn/M
+Lewes
+Lewie/M
+Lewinsky/M
+lewis/M
+Lewis/M
+Lewiss
+Lew/M
+lex
+lexeme/MS
+lexical/Y
+lexicographer/MS
+lexicographic
+lexicographical/Y
+lexicography/SM
+lexicon/SM
+Lexie/M
+Lexi/MS
+Lexine/M
+Lexington/M
+Lexus/M
+Lexy/M
+Leyden/M
+Leyla/M
+Lezley/M
+Lezlie/M
+lg
+Lhasa/SM
+Lhotse/M
+liability/SAM
+liable/AP
+liaise/GSD
+liaison/SM
+Lia/M
+Liam/M
+Liana/M
+Liane/M
+Lian/M
+Lianna/M
+Lianne/M
+liar/MS
+libation/SM
+libbed
+Libbey/M
+Libbie/M
+Libbi/M
+libbing
+Libby/M
+libeler/M
+libel/GMRDSZ
+libelous/Y
+Liberace/M
+liberalism/MS
+liberality/MS
+liberalization/SM
+liberalized/U
+liberalize/GZSRD
+liberalizer/M
+liberalness/MS
+liberal/YSP
+liberate/NGDSCX
+liberationists
+liberation/MC
+liberator/SCM
+Liberia/M
+Liberian/S
+libertarianism/M
+libertarian/MS
+libertine/MS
+liberty/MS
+libidinal
+libidinousness/M
+libidinous/PY
+libido/MS
+Lib/M
+lib/MS
+librarian/MS
+library/MS
+Libra/SM
+libretoes
+libretos
+librettist/MS
+libretto/MS
+Libreville/M
+Librium/M
+Libya/M
+Libyan/S
+lice/M
+licensed/AU
+licensee/SM
+license/MGBRSD
+licenser/M
+licenses/A
+licensing/A
+licensor/M
+licentiate/MS
+licentiousness/MS
+licentious/PY
+Licha/M
+lichee's
+lichen/DMGS
+Lichtenstein/M
+Lichter/M
+licit/Y
+licked/U
+lickerish
+licker/M
+lick/GRDSJ
+licking/M
+licorice/SM
+Lida/M
+lidded
+lidding
+Lidia/M
+lidless
+lid/MS
+lido/MS
+Lieberman/M
+Liebfraumilch/M
+Liechtenstein/RMZ
+lied/MR
+lie/DRS
+Lief/M
+liefs/A
+lief/TSR
+Liege/M
+liege/SR
+Lie/M
+lien/SM
+lier/IMA
+lies/A
+Liesa/M
+lieu/SM
+lieut
+lieutenancy/MS
+lieutenant/SM
+Lieut/M
+lifeblood/SM
+lifeboat/SM
+lifebuoy/S
+lifeforms
+lifeguard/MDSG
+lifelessness/SM
+lifeless/PY
+lifelikeness/M
+lifelike/P
+lifeline/SM
+lifelong
+life/MZR
+lifer/M
+lifesaver/SM
+lifesaving/S
+lifespan/S
+lifestyle/S
+lifetaking
+lifetime/MS
+lifework/MS
+LIFO
+lifter/M
+lift/GZMRDS
+liftoff/MS
+ligament/MS
+ligand/MS
+ligate/XSDNG
+ligation/M
+ligature/DSGM
+light/ADSCG
+lighted/U
+lightener/M
+lightening/M
+lighten/ZGDRS
+lighter/CM
+lightered
+lightering
+lighters
+lightest
+lightface/SDM
+lightheaded
+lightheartedness/MS
+lighthearted/PY
+lighthouse/MS
+lighting/MS
+lightly
+lightness/MS
+lightning/SMD
+lightproof
+light's
+lightship/SM
+lightweight/S
+ligneous
+lignite/MS
+lignum
+likability/MS
+likableness/MS
+likable/P
+likeability's
+liked/E
+likelihood/MSU
+likely/UPRT
+likeness/MSU
+liken/GSD
+liker/E
+liker's
+likes/E
+likest
+like/USPBY
+likewise
+liking/SM
+lilac/MS
+Lilah/M
+Lila/SM
+Lilia/MS
+Liliana/M
+Liliane/M
+Lilian/M
+Lilith/M
+Liliuokalani/M
+Lilla/M
+Lille/M
+Lillian/M
+Lillie/M
+Lilli/MS
+lilliputian/S
+Lilliputian/SM
+Lilliput/M
+Lilllie/M
+Lilly/M
+Lil/MY
+Lilongwe/M
+lilting/YP
+lilt/MDSG
+Lilyan/M
+Lily/M
+lily/MSD
+Lima/M
+Limbaugh/M
+limbered/U
+limberness/SM
+limber/RDYTGP
+limbers/U
+limbic
+limbless
+Limbo
+limbo/GDMS
+limb/SGZRDM
+Limburger/SM
+limeade/SM
+lime/DSMG
+limekiln/M
+limelight/DMGS
+limerick/SM
+limestone/SM
+limitability
+limitably
+limitation/MCS
+limit/CSZGRD
+limitedly/U
+limitedness/M
+limited/PSY
+limiter/M
+limiting/S
+limitlessness/SM
+limitless/PY
+limit's
+limn/GSD
+Limoges/M
+limo/S
+limousine/SM
+limper/M
+limpet/SM
+limpidity/MS
+limpidness/SM
+limpid/YP
+limpness/MS
+Limpopo/M
+limp/SGTPYRD
+Li/MY
+limy/TR
+linage/MS
+Lina/M
+linchpin/MS
+Linc/M
+Lincoln/SM
+Linda/M
+Lindbergh/M
+Lindberg/M
+linden/MS
+Lindholm/M
+Lindie/M
+Lindi/M
+Lind/M
+Lindon/M
+Lindquist/M
+Lindsay/M
+Lindsey/M
+Lindstrom/M
+Lindsy/M
+Lindy/M
+line/AGDS
+lineage/SM
+lineal/Y
+Linea/M
+lineament/MS
+linearity/MS
+linearize/SDGNB
+linear/Y
+linebacker/SM
+lined/U
+linefeed
+Linell/M
+lineman/M
+linemen
+linen/SM
+liner/SM
+line's
+linesman/M
+linesmen
+Linet/M
+Linette/M
+lineup/S
+lingerer/M
+lingerie/SM
+lingering/Y
+linger/ZGJRD
+lingoes
+lingo/M
+lingual/SY
+lingua/M
+linguine
+linguini's
+linguistically
+linguistic/S
+linguistics/M
+linguist/SM
+ling/ZR
+liniment/MS
+lining/SM
+linkable
+linkage/SM
+linked/A
+linker/S
+linking/S
+Link/M
+link's
+linkup/S
+link/USGD
+Lin/M
+Linnaeus/M
+Linnea/M
+Linnell/M
+Linnet/M
+linnet/SM
+Linnie/M
+Linn/M
+Linoel/M
+linoleum/SM
+lino/M
+Linotype/M
+linseed/SM
+lintel/SM
+linter/M
+Linton/M
+lint/SMR
+linty/RST
+Linus/M
+Linux/M
+Linwood/M
+Linzy/M
+Lionello/M
+Lionel/M
+lioness/SM
+lionhearted
+lionization/SM
+lionizer/M
+lionize/ZRSDG
+Lion/M
+lion/MS
+lipase/M
+lipid/MS
+lip/MS
+liposuction/S
+lipped
+lipper
+Lippi/M
+lipping
+Lippmann/M
+lippy/TR
+lipread/GSRJ
+Lipschitz/M
+Lipscomb/M
+lipstick/MDSG
+Lipton/M
+liq
+liquefaction/SM
+liquefier/M
+liquefy/DRSGZ
+liqueur/DMSG
+liquidate/GNXSD
+liquidation/M
+liquidator/SM
+liquidity/SM
+liquidizer/M
+liquidize/ZGSRD
+liquidness/M
+liquid/SPMY
+liquorice/SM
+liquorish
+liquor/SDMG
+lira/M
+Lira/M
+lire
+Lisabeth/M
+Lisa/M
+Lisbeth/M
+Lisbon/M
+Lise/M
+Lisetta/M
+Lisette/M
+Lisha/M
+Lishe/M
+Lisle/M
+lisle/SM
+lisper/M
+lisp/MRDGZS
+Lissajous/M
+Lissa/M
+Lissie/M
+Lissi/M
+Liss/M
+lissomeness/M
+lissome/P
+lissomness/M
+Lissy/M
+listed/U
+listener/M
+listen/ZGRD
+Listerine/M
+lister/M
+Lister/M
+listing/M
+list/JMRDNGZXS
+listlessness/SM
+listless/PY
+Liston/M
+Liszt/M
+Lita/M
+litany/MS
+litchi/SM
+literacy/MS
+literalism/M
+literalistic
+literalness/MS
+literal/PYS
+literariness/SM
+literary/P
+literate/YNSP
+literati
+literation/M
+literature/SM
+liter/M
+lite/S
+litheness/SM
+lithe/PRTY
+lithesome
+lithium/SM
+lithograph/DRMGZ
+lithographer/M
+lithographic
+lithographically
+lithographs
+lithography/MS
+lithology/M
+lithosphere/MS
+lithospheric
+Lithuania/M
+Lithuanian/S
+litigant/MS
+litigate/NGXDS
+litigation/M
+litigator/SM
+litigiousness/MS
+litigious/PY
+litmus/SM
+litotes/M
+lit/RZS
+littérateur/S
+litterbug/SM
+litter/SZGRDM
+Little/M
+littleneck/M
+littleness/SM
+little/RSPT
+Littleton/M
+Litton/M
+littoral/S
+liturgical/Y
+liturgic/S
+liturgics/M
+liturgist/MS
+liturgy/SM
+Liuka/M
+livability/MS
+livableness/M
+livable/U
+livably
+Liva/M
+lived/A
+livelihood/SM
+liveliness/SM
+livelong/S
+lively/RTP
+liveness/M
+liven/SDG
+liver/CSGD
+liveried
+liverish
+Livermore/M
+Liverpool/M
+Liverpudlian/MS
+liver's
+liverwort/SM
+liverwurst/SM
+livery/CMS
+liveryman/MC
+liverymen/C
+lives/A
+lives's
+livestock/SM
+live/YHZTGJDSRPB
+Livia/M
+lividness/M
+livid/YP
+livingness/M
+Livingstone/M
+Livingston/M
+living/YP
+Liv/M
+Livonia/M
+Livvie/M
+Livvy/M
+Livvyy/M
+Livy/M
+Lizabeth/M
+Liza/M
+lizard/MS
+Lizbeth/M
+Lizette/M
+Liz/M
+Lizzie/M
+Lizzy/M
+l/JGVXT
+Ljubljana/M
+LL
+llama/SM
+llano/SM
+LLB
+ll/C
+LLD
+Llewellyn/M
+Lloyd/M
+Llywellyn/M
+LNG
+lo
+loadable
+loaded/A
+loader/MU
+loading/MS
+load's/A
+loads/A
+loadstar's
+loadstone's
+load/SURDZG
+loafer/M
+Loafer/S
+loaf/SRDMGZ
+loam/SMDG
+loamy/RT
+loaner/M
+loaning/M
+loan/SGZRDMB
+loansharking/S
+loanword/S
+loathe
+loather/M
+loathing/M
+loath/JPSRDYZG
+loathness/M
+loathsomeness/MS
+loathsome/PY
+loaves/M
+Lobachevsky/M
+lobar
+lobbed
+lobber/MS
+lobbing
+lobby/GSDM
+lobbyist/MS
+lobe/SM
+lob/MDSG
+lobotomist
+lobotomize/GDS
+lobotomy/MS
+lobster/MDGS
+lobularity
+lobular/Y
+lobule/SM
+locale/MS
+localisms
+locality/MS
+localization/MS
+localized/U
+localizer/M
+localizes/U
+localize/ZGDRS
+local/SGDY
+locatable
+locate/AXESDGN
+locater/M
+locational/Y
+location/EMA
+locative/S
+locator's
+Lochinvar/M
+loch/M
+lochs
+loci/M
+lockable
+Lockean/M
+locked/A
+Locke/M
+locker/SM
+locket/SM
+Lockhart/M
+Lockheed/M
+Lockian/M
+locking/S
+lockjaw/SM
+Lock/M
+locknut/M
+lockout/MS
+lock's
+locksmithing/M
+locksmith/MG
+locksmiths
+lockstep/S
+lock/UGSD
+lockup/MS
+Lockwood/M
+locomotion/SM
+locomotive/YMS
+locomotor
+locomotory
+loco/SDMG
+locoweed/MS
+locus/M
+locust/SM
+locution/MS
+lode/SM
+lodestar/MS
+lodestone/MS
+lodged/E
+lodge/GMZSRDJ
+Lodge/M
+lodgepole
+lodger/M
+lodges/E
+lodging/M
+lodgment/M
+Lodovico/M
+Lodowick/M
+Lodz
+Loeb/M
+Loella/M
+Loewe/M
+Loewi/M
+lofter/M
+loftily
+loftiness/SM
+loft/SGMRD
+lofty/PTR
+loganberry/SM
+Logan/M
+logarithmic
+logarithmically
+logarithm/MS
+logbook/MS
+loge/SMNX
+logged/U
+loggerhead/SM
+logger/SM
+loggia/SM
+logging/MS
+logicality/MS
+logicalness/M
+logical/SPY
+logician/SM
+logic/SM
+login/S
+logion/M
+logistical/Y
+logistic/MS
+logjam/SM
+LOGO
+logo/SM
+logotype/MS
+logout
+logrolling/SM
+log's/K
+log/SM
+logy/RT
+Lohengrin/M
+loincloth/M
+loincloths
+loin/SM
+Loire/M
+Loise/M
+Lois/M
+loiterer/M
+loiter/RDJSZG
+Loki/M
+Lola/M
+Loleta/M
+Lolita/M
+loller/M
+lollipop/MS
+loll/RDGS
+Lolly/M
+lolly/SM
+Lombardi/M
+Lombard/M
+Lombardy/M
+Lomb/M
+Lome
+Lona/M
+Londonderry/M
+Londoner/M
+London/RMZ
+Lonee/M
+loneliness/SM
+lonely/TRP
+loneness/M
+lone/PYZR
+loner/M
+lonesomeness/MS
+lonesome/PSY
+longboat/MS
+longbow/SM
+longed/K
+longeing
+longer/K
+longevity/MS
+Longfellow/M
+longhair/SM
+longhand/SM
+longhorn/SM
+longing/MY
+longish
+longitude/MS
+longitudinal/Y
+long/JGTYRDPS
+Long/M
+longness/M
+longshoreman/M
+longshoremen
+longsighted
+longs/K
+longstanding
+Longstreet/M
+longsword
+longterm
+longtime
+Longueuil/M
+longueur/SM
+longways
+longword/SM
+Loni/M
+Lon/M
+Lonna/M
+Lonnard/M
+Lonnie/M
+Lonni/M
+Lonny/M
+loofah/M
+loofahs
+lookahead
+lookalike/S
+looker/M
+look/GZRDS
+lookout/MS
+lookup/SM
+looming/M
+Loomis/M
+loom/MDGS
+loon/MS
+loony/SRT
+looper/M
+loophole/MGSD
+loop/MRDGS
+loopy/TR
+loosed/U
+looseleaf
+loosener/M
+looseness/MS
+loosen/UDGS
+loose/SRDPGTY
+looses/U
+loosing/M
+looter/M
+loot/MRDGZS
+loper/M
+lope/S
+Lopez/M
+lopped
+lopper/MS
+lopping
+lop/SDRG
+lopsidedness/SM
+lopsided/YP
+loquaciousness/MS
+loquacious/YP
+loquacity/SM
+Loraine/M
+Lorain/M
+Loralee/M
+Loralie/M
+Loralyn/M
+Lora/M
+Lorant/M
+lording/M
+lordliness/SM
+lordly/PTR
+Lord/MS
+lord/MYDGS
+lordship/SM
+Lordship/SM
+Loree/M
+Loreen/M
+Lorelei/M
+Lorelle/M
+lore/MS
+Lorena/M
+Lorene/M
+Loren/SM
+Lorentzian/M
+Lorentz/M
+Lorenza/M
+Lorenz/M
+Lorenzo/M
+Loretta/M
+Lorette/M
+lorgnette/SM
+Loria/M
+Lorianna/M
+Lorianne/M
+Lorie/M
+Lorilee/M
+Lorilyn/M
+Lori/M
+Lorinda/M
+Lorine/M
+Lorin/M
+loris/SM
+Lorita/M
+lorn
+Lorna/M
+Lorne/M
+Lorraine/M
+Lorrayne/M
+Lorre/M
+Lorrie/M
+Lorri/M
+Lorrin/M
+lorryload/S
+Lorry/M
+lorry/SM
+Lory/M
+Los
+loser/M
+lose/ZGJBSR
+lossage
+lossless
+loss/SM
+lossy/RT
+lost/P
+Lothaire/M
+Lothario/MS
+lotion/MS
+Lot/M
+lot/MS
+Lotta/M
+lotted
+Lotte/M
+lotter
+lottery/MS
+Lottie/M
+Lotti/M
+lotting
+Lott/M
+lotto/MS
+Lotty/M
+lotus/SM
+louden/DG
+loudhailer/S
+loudly/RT
+loudmouth/DM
+loudmouths
+loudness/MS
+loudspeaker/SM
+loudspeaking
+loud/YRNPT
+Louella/M
+Louie/M
+Louisa/M
+Louise/M
+Louisette/M
+Louisiana/M
+Louisianan/S
+Louisianian/S
+Louis/M
+Louisville/M
+Lou/M
+lounger/M
+lounge/SRDZG
+Lourdes/M
+lour/GSD
+louse/CSDG
+louse's
+lousewort/M
+lousily
+lousiness/MS
+lousy/PRT
+loutishness/M
+loutish/YP
+Loutitia/M
+lout/SGMD
+louver/DMS
+L'Ouverture
+Louvre/M
+lovableness/MS
+lovable/U
+lovably
+lovebird/SM
+lovechild
+Lovecraft/M
+love/DSRMYZGJB
+loved/U
+Lovejoy/M
+Lovelace/M
+Loveland/M
+lovelessness/M
+loveless/YP
+lovelies
+lovelinesses
+loveliness/UM
+Lovell/M
+lovelornness/M
+lovelorn/P
+lovely/URPT
+Love/M
+lovemaking/SM
+lover/YMG
+lovesick
+lovestruck
+lovingly
+lovingness/M
+loving/U
+lowborn
+lowboy/SM
+lowbrow/MS
+lowdown/S
+Lowell/M
+Lowe/M
+lowercase/GSD
+lower/DG
+lowermost
+Lowery/M
+lowish
+lowland/RMZS
+Lowlands/M
+lowlife/SM
+lowlight/MS
+lowliness/MS
+lowly/PTR
+lowness/MS
+low/PDRYSZTG
+Lowrance/M
+lox/MDSG
+loyaler
+loyalest
+loyal/EY
+loyalism/SM
+loyalist/SM
+loyalty/EMS
+Loyang/M
+Loydie/M
+Loyd/M
+Loy/M
+Loyola/M
+lozenge/SDM
+LP
+LPG
+LPN/S
+Lr
+ls
+l's
+L's
+LSD
+ltd
+Ltd/M
+Lt/M
+Luanda/M
+Luann/M
+luau/MS
+lubber/YMS
+Lubbock/M
+lube/DSMG
+lubricant/SM
+lubricate/VNGSDX
+lubrication/M
+lubricator/MS
+lubricious/Y
+lubricity/SM
+Lubumbashi/M
+Lucais/M
+Luca/MS
+Luce/M
+lucent/Y
+Lucerne/M
+Lucho/M
+Lucia/MS
+Luciana/M
+Lucian/M
+Luciano/M
+lucidity/MS
+lucidness/MS
+lucid/YP
+Lucie/M
+Lucien/M
+Lucienne/M
+Lucifer/M
+Lucila/M
+Lucile/M
+Lucilia/M
+Lucille/M
+Luci/MN
+Lucina/M
+Lucinda/M
+Lucine/M
+Lucio/M
+Lucita/M
+Lucite/MS
+Lucius/M
+luck/GSDM
+luckier/U
+luckily/U
+luckiness/UMS
+luckless
+Lucknow/M
+Lucky/M
+lucky/RSPT
+lucrativeness/SM
+lucrative/YP
+lucre/MS
+Lucretia/M
+Lucretius/M
+lucubrate/GNSDX
+lucubration/M
+Lucy/M
+Luddite/SM
+Ludhiana/M
+ludicrousness/SM
+ludicrous/PY
+Ludlow/M
+Ludmilla/M
+ludo/M
+Ludovico/M
+Ludovika/M
+Ludvig/M
+Ludwig/M
+Luella/M
+Luelle/M
+luff/GSDM
+Lufthansa/M
+Luftwaffe/M
+luge/MC
+Luger/M
+luggage/SM
+lugged
+lugger/SM
+lugging
+Lugosi/M
+lug/RS
+lugsail/SM
+lugubriousness/MS
+lugubrious/YP
+Luigi/M
+Luisa/M
+Luise/M
+Luis/M
+Lukas/M
+Luke/M
+lukewarmness/SM
+lukewarm/PY
+Lula/M
+Lulita/M
+lullaby/GMSD
+lull/SDG
+lulu/M
+Lulu/M
+Lu/M
+lumbago/SM
+lumbar/S
+lumberer/M
+lumbering/M
+lumberjack/MS
+lumberman/M
+lumbermen
+lumber/RDMGZSJ
+lumberyard/MS
+lumen/M
+Lumière/M
+luminance/M
+luminary/MS
+luminescence/SM
+luminescent
+luminosity/MS
+luminousness/M
+luminous/YP
+lummox/MS
+lumper/M
+lumpiness/MS
+lumpishness/M
+lumpish/YP
+lump/SGMRDN
+lumpy/TPR
+lunacy/MS
+Luna/M
+lunar/S
+lunary
+lunate/YND
+lunatic/S
+lunation/M
+luncheonette/SM
+luncheon/SMDG
+luncher/M
+lunch/GMRSD
+lunchpack
+lunchroom/MS
+lunchtime/MS
+Lundberg/M
+Lund/M
+Lundquist/M
+lune/M
+lunge/MS
+lunger/M
+lungfish/SM
+lungful
+lung/SGRDM
+lunkhead/SM
+Lupe/M
+lupine/SM
+Lupus/M
+lupus/SM
+Lura/M
+lurcher/M
+lurch/RSDG
+lure/DSRG
+lurer/M
+Lurette/M
+lurex
+Luria/M
+luridness/SM
+lurid/YP
+lurker/M
+lurk/GZSRD
+Lurleen/M
+Lurlene/M
+Lurline/M
+Lusaka/M
+Lusa/M
+lusciousness/MS
+luscious/PY
+lushness/MS
+lush/YSRDGTP
+Lusitania/M
+luster/GDM
+lustering/M
+lusterless
+lustfulness/M
+lustful/PY
+lustily
+lustiness/MS
+lust/MRDGZS
+lustrousness/M
+lustrous/PY
+lusty/PRT
+lutanist/MS
+lute/DSMG
+lutenist/MS
+Lutero/M
+lutetium/MS
+Lutheranism/MS
+Lutheran/SM
+Luther/M
+luting/M
+Lutz
+Luxembourgian
+Luxembourg/RMZ
+Luxemburg's
+luxe/MS
+luxuriance/MS
+luxuriant/Y
+luxuriate/GNSDX
+luxuriation/M
+luxuriousness/SM
+luxurious/PY
+luxury/MS
+Luz/M
+Luzon/M
+L'vov
+Lyallpur/M
+lyceum/MS
+lychee's
+lycopodium/M
+Lycra/S
+Lycurgus/M
+Lyda/M
+Lydia/M
+Lydian/S
+Lydie/M
+Lydon/M
+lye/JSMG
+Lyell/M
+lying/Y
+Lyle/M
+Lyly/M
+Lyman/M
+Lyme/M
+lymphatic/S
+lymph/M
+lymphocyte/SM
+lymphoid
+lymphoma/MS
+lymphs
+Ly/MY
+Lynchburg/M
+lyncher/M
+lynching/M
+Lynch/M
+lynch/ZGRSDJ
+Lynda/M
+Lyndell/M
+Lyndel/M
+Lynde/M
+Lyndon/M
+Lyndsay/M
+Lyndsey/M
+Lyndsie/M
+Lyndy/M
+Lynea/M
+Lynelle/M
+Lynette/M
+Lynett/M
+Lyn/M
+Lynna/M
+Lynnea/M
+Lynnelle/M
+Lynnell/M
+Lynne/M
+Lynnet/M
+Lynnette/M
+Lynnett/M
+Lynn/M
+Lynsey/M
+lynx/MS
+Lyon/SM
+Lyra/M
+lyrebird/MS
+lyre/SM
+lyricalness/M
+lyrical/YP
+lyricism/SM
+lyricist/SM
+lyric/S
+Lysenko/M
+lysine/M
+Lysistrata/M
+Lysol/M
+Lyssa/M
+LyX/M
+MA
+Maalox/M
+ma'am
+Mabelle/M
+Mabel/M
+Mable/M
+Mab/M
+macabre/Y
+macadamize/SDG
+macadam/SM
+Macao/M
+macaque/SM
+macaroni/SM
+macaroon/MS
+Macarthur/M
+MacArthur/M
+Macaulay/M
+macaw/SM
+Macbeth/M
+Maccabees/M
+Maccabeus/M
+Macdonald/M
+MacDonald/M
+MacDraw/M
+Macedonia/M
+Macedonian/S
+Macedon/M
+mace/MS
+Mace/MS
+macerate/DSXNG
+maceration/M
+macer/M
+Macgregor/M
+MacGregor/M
+machete/SM
+Machiavellian/S
+Machiavelli/M
+machinate/SDXNG
+machination/M
+machinelike
+machine/MGSDB
+machinery/SM
+machinist/MS
+machismo/SM
+Mach/M
+macho/S
+Machs
+Macias/M
+Macintosh/M
+MacIntosh/M
+macintosh's
+Mackenzie/M
+MacKenzie/M
+mackerel/SM
+Mackinac/M
+Mackinaw
+mackinaw/SM
+mackintosh/SM
+mack/M
+Mack/M
+MacLeish/M
+Macmillan/M
+MacMillan/M
+Macon/SM
+MacPaint/M
+macramé/S
+macrobiotic/S
+macrobiotics/M
+macrocosm/MS
+macrodynamic
+macroeconomic/S
+macroeconomics/M
+macromolecular
+macromolecule/SM
+macron/MS
+macrophage/SM
+macroscopic
+macroscopically
+macrosimulation
+macro/SM
+macrosocioeconomic
+Mac/SGMD
+mac/SGMDR
+Macy/M
+Madagascan/SM
+Madagascar/M
+Madalena/M
+Madalyn/M
+Mada/M
+madame/M
+Madame/MS
+madam/SM
+madcap/S
+Maddalena/M
+madded
+madden/GSD
+maddening/Y
+Madden/M
+madder/MS
+maddest
+Maddie/M
+Maddi/M
+madding
+Maddox/M
+Maddy/M
+made/AU
+Madeira/SM
+Madelaine/M
+Madeleine/M
+Madelena/M
+Madelene/M
+Madelina/M
+Madeline/M
+Madelin/M
+Madella/M
+Madelle/M
+Madel/M
+Madelon/M
+Madelyn/M
+mademoiselle/MS
+Madge/M
+madhouse/SM
+Madhya/M
+Madison/M
+Madlen/M
+Madlin/M
+madman/M
+madmen
+madness/SM
+Madonna/MS
+mad/PSY
+Madras
+madras/SM
+Madrid/M
+madrigal/MSG
+Madsen/M
+Madurai/M
+madwoman/M
+madwomen
+Mady/M
+Maegan/M
+Maelstrom/M
+maelstrom/SM
+Mae/M
+maestro/MS
+Maeterlinck/M
+Mafia/MS
+mafia/S
+mafiosi
+mafioso/M
+Mafioso/S
+MAG
+magazine/DSMG
+Magdaia/M
+Magdalena/M
+Magdalene/M
+Magdalen/M
+Magda/M
+Magellanic
+Magellan/M
+magenta/MS
+magged
+Maggee/M
+Maggie/M
+Maggi/M
+magging
+maggot/MS
+maggoty/RT
+Maggy/M
+magi
+magical/Y
+magician/MS
+magicked
+magicking
+magic/SM
+Magill/M
+Magi/M
+Maginot/M
+magisterial/Y
+magistracy/MS
+magistrate/MS
+Mag/M
+magma/SM
+magnanimity/SM
+magnanimosity
+magnanimous/PY
+magnate/SM
+magnesia/MS
+magnesite/M
+magnesium/SM
+magnetically
+magnetic/S
+magnetics/M
+magnetism/SM
+magnetite/SM
+magnetizable
+magnetization/ASCM
+magnetize/CGDS
+magnetized/U
+magnetodynamics
+magnetohydrodynamical
+magnetohydrodynamics/M
+magnetometer/MS
+magneto/MS
+magnetosphere/M
+magnetron/M
+magnet/SM
+magnification/M
+magnificence/SM
+magnificent/Y
+magnified/U
+magnify/DRSGNXZ
+magniloquence/MS
+magniloquent
+Magnitogorsk/M
+magnitude/SM
+magnolia/SM
+Magnum
+magnum/SM
+Magnuson/M
+Magog/M
+Magoo/M
+magpie/SM
+Magritte/M
+Magruder/M
+mag/S
+Magsaysay/M
+Maguire/SM
+Magus/M
+Magyar/MS
+Mahabharata
+Mahala/M
+Mahalia/M
+maharajah/M
+maharajahs
+maharanee's
+maharani/MS
+Maharashtra/M
+maharishi/SM
+mahatma/SM
+Mahavira/M
+Mahayana/M
+Mahayanist
+Mahdi/M
+Mahfouz/M
+Mahican/SM
+mahjong's
+Mahler/M
+Mahmoud/M
+Mahmud/M
+mahogany/MS
+Mahomet's
+mahout/SM
+Maia/M
+Maible/M
+maidenhair/MS
+maidenhead/SM
+maidenhood/SM
+maidenly/P
+maiden/YM
+maidservant/MS
+maid/SMNX
+maier
+Maier/M
+Maiga/M
+Maighdiln/M
+Maigret/M
+mailbag/MS
+mailbox/MS
+mail/BSJGZMRD
+mailer/M
+Mailer/M
+Maillol/M
+maillot/SM
+mailman/M
+mailmen
+Maiman/M
+maimedness/M
+maimed/P
+maimer/M
+Maimonides/M
+Mai/MR
+maim/SGZRD
+mainbrace/M
+Maine/MZR
+Mainer/M
+mainframe/MS
+mainlander/M
+mainland/SRMZ
+mainliner/M
+mainline/RSDZG
+mainly
+mainmast/SM
+main/SA
+mainsail/SM
+mains/M
+mainspring/SM
+mainstay/MS
+mainstream/DRMSG
+maintainability
+maintainable/U
+maintain/BRDZGS
+maintained/U
+maintainer/M
+maintenance/SM
+maintop/SM
+maiolica's
+Maire/M
+Mair/M
+Maisey/M
+Maisie/M
+maisonette/MS
+Maison/M
+Maitilde/M
+maize/MS
+Maj
+Maje/M
+majestic
+majestically
+majesty/MS
+Majesty/MS
+majolica/SM
+Majorca/M
+major/DMGS
+majordomo/S
+majorette/SM
+majority/SM
+Major/M
+Majuro/M
+makable
+Makarios/M
+makefile/S
+makeover/S
+Maker/M
+maker/SM
+makeshift/S
+make/UGSA
+makeup/MS
+making/SM
+Malabar/M
+Malabo/M
+Malacca/M
+Malachi/M
+malachite/SM
+maladapt/DV
+maladjust/DLV
+maladjustment/MS
+maladministration
+maladroitness/MS
+maladroit/YP
+malady/MS
+Malagasy/M
+malaise/SM
+Mala/M
+Malamud/M
+malamute/SM
+Malanie/M
+malaprop
+malapropism/SM
+Malaprop/M
+malarial
+malaria/MS
+malarious
+malarkey/SM
+malathion/S
+Malawian/S
+Malawi/M
+Malayalam/M
+Malaya/M
+Malayan/MS
+Malaysia/M
+Malaysian/S
+Malay/SM
+Malchy/M
+Malcolm/M
+malcontentedness/M
+malcontented/PY
+malcontent/SMD
+Maldive/SM
+Maldivian/S
+Maldonado/M
+maledict
+malediction/MS
+malefaction/MS
+malefactor/MS
+malefic
+maleficence/MS
+maleficent
+Male/M
+Malena/M
+maleness/MS
+male/PSM
+malevolence/S
+malevolencies
+malevolent/Y
+malfeasance/SM
+malfeasant
+malformation/MS
+malformed
+malfunction/SDG
+Malia/M
+Malian/S
+Malibu/M
+malice/MGSD
+maliciousness/MS
+malicious/YU
+malignancy/SM
+malignant/YS
+malign/GSRDYZ
+malignity/MS
+Mali/M
+Malina/M
+Malinda/M
+Malinde/M
+malingerer/M
+malinger/GZRDS
+Malinowski/M
+Malissa/M
+Malissia/M
+mallard/SM
+Mallarmé/M
+malleability/SM
+malleableness/M
+malleable/P
+mallet/MS
+Mallissa/M
+Mallorie/M
+Mallory/M
+mallow/MS
+mall/SGMD
+Mal/M
+malnourished
+malnutrition/SM
+malocclusion/MS
+malodorous
+Malone/M
+Malorie/M
+Malory/M
+malposed
+malpractice/SM
+Malraux/M
+Malta/M
+malted/S
+Maltese
+Malthusian/S
+Malthus/M
+malting/M
+maltose/SM
+maltreat/GDSL
+maltreatment/S
+malt/SGMD
+malty/RT
+Malva/M
+Malvina/M
+Malvin/M
+Malynda/M
+mama/SM
+mamba/SM
+mambo/GSDM
+Mame/M
+Mamet/M
+ma/MH
+Mamie/M
+mammalian/SM
+mammal/SM
+mammary
+mamma's
+mammogram/S
+mammography/S
+Mammon's
+mammon/SM
+mammoth/M
+mammoths
+mammy/SM
+Mamore/M
+manacle/SDMG
+manageability/S
+manageableness
+manageable/U
+managed/U
+management/SM
+manageress/M
+managerial/Y
+manager/M
+managership/M
+manage/ZLGRSD
+Managua/M
+Manama/M
+mañana/M
+mananas
+Manasseh/M
+manatee/SM
+Manaus's
+Manchester/M
+Manchu/MS
+Manchuria/M
+Manchurian/S
+Mancini/M
+manciple/M
+Mancunian/MS
+mandala/SM
+Mandalay/M
+Manda/M
+mandamus/GMSD
+Mandarin
+mandarin/MS
+mandate/SDMG
+mandatory/S
+Mandela
+Mandelbrot/M
+Mandel/M
+mandible/MS
+mandibular
+Mandie/M
+Mandi/M
+Mandingo/M
+mandolin/MS
+mandrake/MS
+mandrel/SM
+mandrill/SM
+Mandy/M
+manège/GSD
+mane/MDS
+Manet/M
+maneuverability/MS
+maneuverer/M
+maneuver/MRDSGB
+Manfred/M
+manful/Y
+manganese/MS
+mange/GMSRDZ
+manger/M
+manginess/S
+mangler/M
+mangle/RSDG
+mangoes
+mango/M
+mangrove/MS
+mangy/PRT
+manhandle/GSD
+Manhattan/SM
+manhole/MS
+manhood/MS
+manhunt/SM
+maniacal/Y
+maniac/SM
+mania/SM
+manically
+Manichean/M
+manic/S
+manicure/MGSD
+manicurist/SM
+manifestation/SM
+manifesto/GSDM
+manifest/YDPGS
+manifolder/M
+manifold/GPYRDMS
+manifoldness/M
+manikin/MS
+Manila/MS
+manila/S
+manilla's
+Mani/M
+manioc/SM
+manipulability
+manipulable
+manipulate/SDXBVGN
+manipulative/PM
+manipulator/MS
+manipulatory
+Manitoba/M
+Manitoulin/M
+Manitowoc/M
+mankind/M
+Mankowski/M
+Manley/M
+manlike
+manliness/SM
+manliness's/U
+manly/URPT
+manna/MS
+manned/U
+mannequin/MS
+mannered/U
+mannerism/SM
+mannerist/M
+mannerliness/MU
+mannerly/UP
+manner/SDYM
+Mann/GM
+Mannheim/M
+Mannie/M
+mannikin's
+Manning/M
+manning/U
+mannishness/SM
+mannish/YP
+Manny/M
+Manolo/M
+Mano/M
+manometer/SM
+Manon/M
+manorial
+manor/MS
+manpower/SM
+manqué/M
+man's
+mansard/SM
+manservant/M
+manse/XNM
+Mansfield/M
+mansion/M
+manslaughter/SM
+Man/SM
+Manson/M
+mans/S
+manta/MS
+Mantegna/M
+mantelpiece/MS
+mantel/SM
+mantes
+mantilla/MS
+mantissa/SM
+mantis/SM
+mantle/ESDG
+Mantle/M
+mantle's
+mantling/M
+mantra/MS
+mantrap/SM
+manual/SMY
+Manuela/M
+Manuel/M
+manufacture/JZGDSR
+manufacturer/M
+manumission/MS
+manumit/S
+manumitted
+manumitting
+manure/RSDMZG
+manuscript/MS
+man/USY
+Manville/M
+Manx
+many
+Manya/M
+Maoism/MS
+Maoist/S
+Mao/M
+Maori/SM
+Maplecrest/M
+maple/MS
+mapmaker/S
+mappable
+mapped/UA
+mapper/S
+mapping/MS
+Mapplethorpe/M
+maps/AU
+map/SM
+Maputo/M
+Marabel/M
+marabou/MS
+marabout's
+Maracaibo/M
+maraca/MS
+Mara/M
+maraschino/SM
+Marathi
+marathoner/M
+Marathon/M
+marathon/MRSZ
+Marat/M
+marauder/M
+maraud/ZGRDS
+marbleize/GSD
+marble/JRSDMG
+marbler/M
+marbling/M
+Marceau/M
+Marcela/M
+Marcelia/M
+Marcelino/M
+Marcella/M
+Marcelle/M
+Marcellina/M
+Marcelline/M
+Marcello/M
+Marcellus/M
+Marcel/M
+Marcelo/M
+Marchall/M
+Marchelle/M
+marcher/M
+marchioness/SM
+March/MS
+march/RSDZG
+Marcia/M
+Marciano/M
+Marcie/M
+Marcile/M
+Marcille/M
+Marci/M
+Marc/M
+Marconi/M
+Marco/SM
+Marcotte/M
+Marcus/M
+Marcy/M
+Mardi/SM
+Marduk/M
+Mareah/M
+mare/MS
+Marena/M
+Maren/M
+Maressa/M
+Margalit/M
+Margalo/M
+Marga/M
+Margareta/M
+Margarete/M
+Margaretha/M
+Margarethe/M
+Margaret/M
+Margaretta/M
+Margarette/M
+margarine/MS
+Margarita/M
+margarita/SM
+Margarito/M
+Margaux/M
+Margeaux/M
+Marge/M
+Margery/M
+Marget/M
+Margette/M
+Margie/M
+Margi/M
+marginalia
+marginality
+marginalization
+marginalize/SDG
+marginal/YS
+margin/GSDM
+Margit/M
+Margo/M
+Margot/M
+Margrethe/M
+Margret/M
+Marguerite/M
+Margy/M
+mariachi/SM
+maria/M
+Maria/M
+Mariam/M
+Mariana/SM
+Marian/MS
+Marianna/M
+Marianne/M
+Mariann/M
+Mariano/M
+Maribelle/M
+Maribel/M
+Maribeth/M
+Maricela/M
+Marice/M
+Maridel/M
+Marieann/M
+Mariejeanne/M
+Mariele/M
+Marielle/M
+Mariellen/M
+Mariel/M
+Marie/M
+Marietta/M
+Mariette/M
+Marigold/M
+marigold/MS
+Marijn/M
+Marijo/M
+marijuana/SM
+Marika/M
+Marilee/M
+Marilin/M
+Marillin/M
+Marilyn/M
+marimba/SM
+Mari/MS
+marinade/MGDS
+Marina/M
+marina/MS
+marinara/SM
+marinate/NGXDS
+marination/M
+mariner/M
+Marine/S
+marine/ZRS
+Marin/M
+Marinna/M
+Marino/M
+Mario/M
+marionette/MS
+Marion/M
+Mariquilla/M
+Marisa/M
+Mariska/M
+Marisol/M
+Marissa/M
+Maritain/M
+marital/Y
+Marita/M
+maritime/R
+Maritsa/M
+Maritza/M
+Mariupol/M
+Marius/M
+Mariya/M
+Marja/M
+Marje/M
+Marjie/M
+Marji/M
+Marj/M
+marjoram/SM
+Marjorie/M
+Marjory/M
+Marjy/M
+Markab/M
+markdown/SM
+marked/AU
+markedly
+marker/M
+marketability/SM
+marketable/U
+Marketa/M
+marketeer/S
+marketer/M
+market/GSMRDJBZ
+marketing/M
+marketplace/MS
+mark/GZRDMBSJ
+Markham/M
+marking/M
+Markism/M
+markkaa
+markka/M
+Mark/MS
+Markos
+Markov
+Markovian
+Markovitz/M
+marks/A
+marksman/M
+marksmanship/S
+marksmen
+markup/SM
+Markus/M
+Marla/M
+Marlane/M
+Marlboro/M
+Marlborough/M
+Marleah/M
+Marlee/M
+Marleen/M
+Marlena/M
+Marlene/M
+Marley/M
+Marlie/M
+Marline/M
+marlinespike/SM
+Marlin/M
+marlin/SM
+marl/MDSG
+Marlo/M
+Marlon/M
+Marlowe/M
+Marlow/M
+Marlyn/M
+Marmaduke/M
+marmalade/MS
+Marmara/M
+marmoreal
+marmoset/MS
+marmot/SM
+Marna/M
+Marne/M
+Marney/M
+Marnia/M
+Marnie/M
+Marni/M
+maroon/GRDS
+marquee/MS
+Marquesas/M
+marque/SM
+marquess/MS
+marquetry/SM
+Marquette/M
+Marquez/M
+marquise/M
+marquisette/MS
+Marquis/M
+marquis/SM
+Marquita/M
+Marrakesh/M
+marred/U
+marriageability/SM
+marriageable
+marriage/ASM
+married/US
+Marrilee/M
+marring
+Marriott/M
+Marris/M
+Marrissa/M
+marrowbone/MS
+marrow/GDMS
+marry/SDGA
+mar/S
+Marseillaise/SM
+Marseilles
+Marseille's
+marshal/GMDRSZ
+Marshalled/M
+marshaller
+Marshall/GDM
+Marshalling/M
+marshallings
+Marshal/M
+Marsha/M
+marshiness/M
+marshland/MS
+Marsh/M
+marshmallow/SM
+marsh/MS
+marshy/PRT
+Marsiella/M
+Mar/SMN
+marsupial/MS
+Martainn/M
+Marta/M
+Martelle/M
+Martel/M
+marten/M
+Marten/M
+Martguerita/M
+Martha/M
+Marthe/M
+Marthena/M
+Martial
+martial/Y
+Martian/S
+Martica/M
+Martie/M
+Marti/M
+Martina/M
+martinet/SM
+Martinez/M
+martingale/MS
+martini/MS
+Martinique/M
+Martin/M
+Martino/M
+martin/SM
+Martinson/M
+Martita/M
+mart/MDNGXS
+Mart/MN
+Marty/M
+Martyn/M
+Martynne/M
+martyrdom/SM
+martyr/GDMS
+Marva/M
+marvel/DGS
+Marvell/M
+marvelous/PY
+Marve/M
+Marven/M
+Marvin/M
+Marv/NM
+Marwin/M
+Marxian/S
+Marxism/SM
+Marxist/SM
+Marx/M
+Marya/M
+Maryanna/M
+Maryanne/M
+Maryann/M
+Marybelle/M
+Marybeth/M
+Maryellen/M
+Maryjane/M
+Maryjo/M
+Maryland/MZR
+Marylee/M
+Marylinda/M
+Marylin/M
+Maryl/M
+Marylou/M
+Marylynne/M
+Mary/M
+Maryrose/M
+Marys
+Marysa/M
+marzipan/SM
+Masada/M
+Masai/M
+Masaryk/M
+masc
+Mascagni/M
+mascara/SGMD
+mascot/SM
+masculineness/M
+masculine/PYS
+masculinity/SM
+Masefield/M
+maser/M
+Maseru/M
+MASH
+Masha/M
+Mashhad/M
+mash/JGZMSRD
+m/ASK
+masked/U
+masker/M
+mask/GZSRDMJ
+masks/U
+masochism/MS
+masochistic
+masochistically
+masochist/MS
+masonic
+Masonic
+Masonite/M
+masonry/MS
+mason/SDMG
+Mason/SM
+masquerader/M
+masquerade/RSDGMZ
+masquer/M
+masque/RSMZ
+Massachusetts/M
+massacre/DRSMG
+massager/M
+massage/SRDMG
+Massasoit/M
+Massenet/M
+masseur/MS
+masseuse/SM
+Massey/M
+massif/SM
+Massimiliano/M
+Massimo/M
+massing/R
+massiveness/SM
+massive/YP
+massless
+mas/SRZ
+Mass/S
+mass/VGSD
+mastectomy/MS
+masterclass
+mastered/A
+masterfulness/M
+masterful/YP
+master/JGDYM
+masterliness/M
+masterly/P
+mastermind/GDS
+masterpiece/MS
+mastership/M
+Master/SM
+masterstroke/MS
+masterwork/S
+mastery/MS
+mast/GZSMRD
+masthead/SDMG
+masticate/SDXGN
+mastication/M
+mastic/SM
+mastiff/MS
+mastodon/MS
+mastoid/S
+masturbate/SDNGX
+masturbation/M
+masturbatory
+matador/SM
+Mata/M
+matchable/U
+match/BMRSDZGJ
+matchbook/SM
+matchbox/SM
+matched/UA
+matcher/M
+matches/A
+matchless/Y
+matchlock/MS
+matchmake/GZJR
+matchmaker/M
+matchmaking/M
+matchplay
+match's/A
+matchstick/MS
+matchwood/SM
+mated/U
+mate/IMS
+Matelda/M
+Mateo/M
+materialism/SM
+materialistic
+materialistically
+materialist/SM
+materiality/M
+materialization/SM
+materialize/CDS
+materialized/A
+materializer/SM
+materializes/A
+materializing
+materialness/M
+material/SPYM
+matériel/MS
+mater/M
+maternal/Y
+maternity/MS
+mates/U
+mathematical/Y
+Mathematica/M
+mathematician/SM
+mathematic/S
+mathematics/M
+Mathematik/M
+Mather/M
+Mathe/RM
+Mathew/MS
+Mathewson/M
+Mathian/M
+Mathias
+Mathieu/M
+Mathilda/M
+Mathilde/M
+Mathis
+math/M
+maths
+Matias/M
+Matilda/M
+Matilde/M
+matinée/S
+mating/M
+matins/M
+Matisse/SM
+matriarchal
+matriarch/M
+matriarchs
+matriarchy/MS
+matrices
+matricidal
+matricide/MS
+matriculate/XSDGN
+matriculation/M
+matrimonial/Y
+matrimony/SM
+matrix/M
+matron/YMS
+mat/SJGMDR
+Matsumoto/M
+matte/JGMZSRD
+Mattel/M
+Matteo/M
+matter/GDM
+Matterhorn/M
+Matthaeus/M
+Mattheus/M
+Matthew/MS
+Matthias
+Matthieu/M
+Matthiew/M
+Matthus/M
+Mattias/M
+Mattie/M
+Matti/M
+matting/M
+mattins's
+Matt/M
+mattock/MS
+mattress/MS
+matt's
+Matty/M
+maturate/DSNGVX
+maturational
+maturation/M
+matureness/M
+maturer/M
+mature/RSDTPYG
+maturity/MS
+matzo/SHM
+matzot
+Maude/M
+Maudie/M
+maudlin/Y
+Maud/M
+Maugham/M
+Maui/M
+mauler/M
+maul/RDGZS
+maunder/GDS
+Maupassant/M
+Maura/M
+Maureene/M
+Maureen/M
+Maure/M
+Maurene/M
+Mauriac/M
+Maurice/M
+Mauricio/M
+Maurie/M
+Maurine/M
+Maurise/M
+Maurita/M
+Mauritania/M
+Mauritanian/S
+Mauritian/S
+Mauritius/M
+Maurits/M
+Maurizia/M
+Maurizio/M
+Maurois/M
+Mauro/M
+Maury/M
+Mauser/M
+mausoleum/SM
+mauve/SM
+maven/S
+maverick/SMDG
+mavin's
+Mavis/M
+Mavra/M
+mawkishness/SM
+mawkish/PY
+Mawr/M
+maw/SGMD
+max/GDS
+Maxie/M
+maxillae
+maxilla/M
+maxillary/S
+Maxi/M
+maximality
+maximal/SY
+maxima's
+Maximilian/M
+Maximilianus/M
+Maximilien/M
+maximization/SM
+maximizer/M
+maximize/RSDZG
+Maxim/M
+Maximo/M
+maxim/SM
+maximum/MYS
+Maxine/M
+maxi/S
+Max/M
+Maxtor/M
+Maxwellian
+maxwell/M
+Maxwell/M
+Maxy/M
+Maya/MS
+Mayan/S
+Maybelle/M
+maybe/S
+mayday/S
+may/EGS
+Maye/M
+mayer
+Mayer/M
+mayest
+Mayfair/M
+Mayflower/M
+mayflower/SM
+mayfly/MS
+mayhap
+mayhem/MS
+Maynard/M
+Mayne/M
+Maynord/M
+mayn't
+Mayo/M
+mayonnaise/MS
+mayoral
+mayoralty/MS
+mayoress/MS
+Mayor/M
+mayor/MS
+mayorship/M
+mayo/S
+maypole/MS
+Maypole/SM
+Mayra/M
+May/SMR
+mayst
+Mazama/M
+Mazarin/M
+Mazatlan/M
+Mazda/M
+mazedness/SM
+mazed/YP
+maze/MGDSR
+mazurka/SM
+Mazzini/M
+Mb
+MB
+MBA
+Mbabane/M
+Mbini/M
+MC
+McAdam/MS
+McAllister/M
+McBride/M
+McCabe/M
+McCain/M
+McCall/M
+McCarthyism/M
+McCarthy/M
+McCartney/M
+McCarty/M
+McCauley/M
+McClain/M
+McClellan/M
+McClure/M
+McCluskey/M
+McConnell/M
+McCormick/M
+McCoy/SM
+McCracken/M
+McCray/M
+McCullough/M
+McDaniel/M
+McDermott/M
+McDonald/M
+McDonnell/M
+McDougall/M
+McDowell/M
+McElhaney/M
+McEnroe/M
+McFadden/M
+McFarland/M
+McGee/M
+McGill/M
+McGovern/M
+McGowan/M
+McGrath/M
+McGraw/M
+McGregor/M
+McGuffey/M
+McGuire/M
+MCI/M
+McIntosh/M
+McIntyre/M
+McKay/M
+McKee/M
+McKenzie/M
+McKesson/M
+McKinley/M
+McKinney/M
+McKnight/M
+McLanahan/M
+McLaughlin/M
+McLean/M
+McLeod/M
+McLuhan/M
+McMahon/M
+McMartin/M
+McMillan/M
+McNamara/M
+McNaughton/M
+McNeil/M
+McPherson/M
+MD
+Md/M
+mdse
+MDT
+ME
+Meade/M
+Mead/M
+meadowland
+meadowlark/SM
+meadow/MS
+Meadows
+meadowsweet/M
+mead/SM
+Meagan/M
+meagerness/SM
+meager/PY
+Meaghan/M
+meagres
+mealiness/MS
+meal/MDGS
+mealtime/MS
+mealybug/S
+mealymouthed
+mealy/PRST
+meander/JDSG
+meaneing
+meanie/MS
+meaningfulness/SM
+meaningful/YP
+meaninglessness/SM
+meaningless/PY
+meaning/M
+meanness/S
+means/M
+meantime/SM
+meant/U
+meanwhile/S
+Meany/M
+mean/YRGJTPS
+meany's
+Meara/M
+measle/SD
+measles/M
+measly/TR
+measurable/U
+measurably
+measure/BLMGRSD
+measured/Y
+measureless
+measurement/SM
+measurer/M
+measures/A
+measuring/A
+meas/Y
+meataxe
+meatball/MS
+meatiness/MS
+meatless
+meatloaf
+meatloaves
+meat/MS
+meatpacking/S
+meaty/RPT
+Mecca/MS
+mecca/S
+mechanical/YS
+mechanic/MS
+mechanism/SM
+mechanistic
+mechanistically
+mechanist/M
+mechanization/SM
+mechanized/U
+mechanizer/M
+mechanize/RSDZGB
+mechanizes/U
+mechanochemically
+Mechelle/M
+med
+medalist/MS
+medallion/MS
+medal/SGMD
+Medan/M
+meddle/GRSDZ
+meddlesome
+Medea/M
+Medellin
+Medfield/M
+mediaeval's
+medial/AY
+medials
+median/YMS
+media/SM
+mediateness/M
+mediate/PSDYVNGX
+mediation/ASM
+mediator/SM
+Medicaid/SM
+medical/YS
+medicament/MS
+Medicare/MS
+medicate/DSXNGV
+medication/M
+Medici/MS
+medicinal/SY
+medicine/DSMG
+medico/SM
+medic/SM
+medievalist/MS
+medieval/YMS
+Medina/M
+mediocre
+mediocrity/MS
+meditate/NGVXDS
+meditation/M
+meditativeness/M
+meditative/PY
+Mediterranean/MS
+mediumistic
+medium/SM
+medley/SM
+medulla/SM
+Medusa/M
+meed/MS
+meekness/MS
+meek/TPYR
+meerschaum/MS
+meeter/M
+meetinghouse/S
+meeting/M
+meet/JGSYR
+me/G
+mega
+megabit/MS
+megabuck/S
+megabyte/S
+megacycle/MS
+megadeath/M
+megadeaths
+megahertz/M
+megalithic
+megalith/M
+megaliths
+megalomaniac/SM
+megalomania/SM
+megalopolis/SM
+Megan/M
+megaphone/SDGM
+megaton/MS
+megavolt/M
+megawatt/SM
+megaword/S
+Megen/M
+Meggie/M
+Meggi/M
+Meggy/M
+Meghan/M
+Meghann/M
+Meg/MN
+megohm/MS
+Mehetabel/M
+Meier/M
+Meighen/M
+Meiji/M
+Mei/MR
+meioses
+meiosis/M
+meiotic
+Meir/M
+Meister/M
+Meistersinger/M
+Mejia/M
+Mekong/M
+Mela/M
+Melamie/M
+melamine/SM
+melancholia/SM
+melancholic/S
+melancholy/MS
+Melanesia/M
+Melanesian/S
+melange/S
+Melania/M
+Melanie/M
+melanin/MS
+melanoma/SM
+Melantha/M
+Melany/M
+Melba/M
+Melbourne/M
+Melcher/M
+Melchior/M
+meld/SGD
+mêlée/MS
+Melendez/M
+Melesa/M
+Melessa/M
+Melicent/M
+Melina/M
+Melinda/M
+Melinde/M
+meliorate/XSDVNG
+melioration/M
+Melisa/M
+Melisande/M
+Melisandra/M
+Melisenda/M
+Melisent/M
+Melissa/M
+Melisse/M
+Melita/M
+Melitta/M
+Mella/M
+Mellicent/M
+Mellie/M
+mellifluousness/SM
+mellifluous/YP
+Melli/M
+Mellisa/M
+Mellisent/M
+Melloney/M
+Mellon/M
+mellowness/MS
+mellow/TGRDYPS
+Melly/M
+Mel/MY
+Melodee/M
+melodically
+melodic/S
+Melodie/M
+melodiousness/S
+melodious/YP
+melodrama/SM
+melodramatically
+melodramatic/S
+Melody/M
+melody/MS
+Melonie/M
+melon/MS
+Melony/M
+Melosa/M
+Melpomene/M
+meltdown/S
+melter/M
+melting/Y
+Melton/M
+melt/SAGD
+Melva/M
+Melville/M
+Melvin/M
+Melvyn/M
+Me/M
+member/DMS
+membered/AE
+members/EA
+membership/SM
+membrane/MSD
+membranous
+memento/SM
+Memling/M
+memoir/MS
+memorabilia
+memorability/SM
+memorableness/M
+memorable/P
+memorably
+memorandum/SM
+memorialize/DSG
+memorialized/U
+memorial/SY
+memoriam
+memorization/MS
+memorized/U
+memorizer/M
+memorize/RSDZG
+memorizes/A
+memoryless
+memory/MS
+memo/SM
+Memphis/M
+menace/GSD
+menacing/Y
+menagerie/SM
+menage/S
+Menander/M
+menarche/MS
+Menard/M
+Mencius/M
+Mencken/M
+mendaciousness/M
+mendacious/PY
+mendacity/MS
+Mendeleev/M
+mendelevium/SM
+Mendelian
+Mendel/M
+Mendelssohn/M
+mender/M
+Mendez/M
+mendicancy/MS
+mendicant/S
+Mendie/M
+mending/M
+Mendocino/M
+Mendoza/M
+mend/RDSJGZ
+Mendy/M
+Menelaus/M
+Menes/M
+menfolk/S
+menhaden/M
+menial/YS
+meningeal
+meninges
+meningitides
+meningitis/M
+meninx
+menisci
+meniscus/M
+Menkalinan/M
+Menkar/M
+Menkent/M
+Menlo/M
+men/MS
+Mennonite/SM
+Menominee
+menopausal
+menopause/SM
+menorah/M
+menorahs
+Menotti/M
+Mensa/M
+Mensch/M
+mensch/S
+menservants/M
+mens/SDG
+menstrual
+menstruate/NGDSX
+menstruation/M
+mensurable/P
+mensuration/MS
+menswear/M
+mentalist/MS
+mentality/MS
+mental/Y
+mentholated
+menthol/SM
+mentionable/U
+mentioned/U
+mentioner/M
+mention/ZGBRDS
+mentor/DMSG
+Menuhin/M
+menu/SM
+Menzies/M
+meow/DSG
+Mephistopheles/M
+Merak/M
+Mercado/M
+mercantile
+Mercator/M
+Mercedes
+mercenariness/M
+mercenary/SMP
+mercerize/SDG
+Mercer/M
+mercer/SM
+merchandiser/M
+merchandise/SRDJMZG
+merchantability
+merchantman/M
+merchantmen
+merchant/SBDMG
+Mercie/M
+mercifully/U
+mercifulness/M
+merciful/YP
+mercilessness/SM
+merciless/YP
+Merci/M
+Merck/M
+mercurial/SPY
+mercuric
+Mercurochrome/M
+mercury/MS
+Mercury/MS
+Mercy/M
+mercy/SM
+Meredeth/M
+Meredithe/M
+Meredith/M
+Merell/M
+meretriciousness/SM
+meretricious/YP
+mere/YS
+merganser/MS
+merger/M
+merge/SRDGZ
+Meridel/M
+meridian/MS
+meridional
+Meridith/M
+Meriel/M
+Merilee/M
+Merill/M
+Merilyn/M
+meringue/MS
+merino/MS
+Meris
+Merissa/M
+merited/U
+meritocracy/MS
+meritocratic
+meritocrats
+meritoriousness/MS
+meritorious/PY
+merit/SCGMD
+Meriwether/M
+Merla/M
+Merle/M
+Merlina/M
+Merline/M
+merlin/M
+Merlin/M
+Merl/M
+mermaid/MS
+merman/M
+mermen
+Merna/M
+Merola/M
+meromorphic
+Merralee/M
+Merrel/M
+Merriam/M
+Merrick/M
+Merridie/M
+Merrielle/M
+Merrie/M
+Merrilee/M
+Merrile/M
+Merrili/M
+Merrill/M
+merrily
+Merrily/M
+Merrimack/M
+Merrimac/M
+merriment/MS
+merriness/S
+Merritt/M
+Merry/M
+merrymaker/MS
+merrymaking/SM
+merry/RPT
+Mersey/M
+mer/TGDR
+Merton/M
+Mervin/M
+Merv/M
+Merwin/M
+Merwyn/M
+Meryl/M
+Mesa
+Mesabi/M
+mesa/SM
+mescaline/SM
+mescal/SM
+mesdames/M
+mesdemoiselles/M
+Meshed's
+meshed/U
+mesh/GMSD
+mesmeric
+mesmerism/SM
+mesmerized/U
+mesmerizer/M
+mesmerize/SRDZG
+Mesolithic/M
+mesomorph/M
+mesomorphs
+meson/MS
+Mesopotamia/M
+Mesopotamian/S
+mesosphere/MS
+mesozoic
+Mesozoic
+mesquite/MS
+mes/S
+message/SDMG
+messeigneurs
+messenger/GSMD
+Messerschmidt/M
+mess/GSDM
+Messiaen/M
+messiah
+Messiah/M
+messiahs
+Messiahs
+messianic
+Messianic
+messieurs/M
+messily
+messiness/MS
+messmate/MS
+Messrs/M
+messy/PRT
+mestizo/MS
+meta
+metabolic
+metabolically
+metabolism/MS
+metabolite/SM
+metabolize/GSD
+metacarpal/S
+metacarpi
+metacarpus/M
+metacircular
+metacircularity
+metalanguage/MS
+metalization/SM
+metalized
+metallic/S
+metalliferous
+metallings
+metallography/M
+metalloid/M
+metallurgic
+metallurgical/Y
+metallurgist/S
+metallurgy/MS
+metal/SGMD
+metalsmith/MS
+metalworking/M
+metalwork/RMJGSZ
+Meta/M
+metamathematical
+metamorphic
+metamorphism/SM
+metamorphose/GDS
+metamorphosis/M
+metaphoric
+metaphorical/Y
+metaphor/MS
+metaphosphate/M
+metaphysical/Y
+metaphysic/SM
+metastability/M
+metastable
+metastases
+metastasis/M
+metastasize/DSG
+metastatic
+metatarsal/S
+metatarsi
+metatarsus/M
+metatheses
+metathesis/M
+metathesized
+metathesizes
+metathesizing
+metavariable
+metempsychoses
+metempsychosis/M
+meteoric
+meteorically
+meteorite/SM
+meteoritic/S
+meteoritics/M
+meteoroid/SM
+meteorologic
+meteorological
+meteorologist/S
+meteorology/MS
+meteor/SM
+meter/GDM
+mete/ZDGSR
+methadone/SM
+methane/MS
+methanol/SM
+methinks
+methionine/M
+methodicalness/SM
+methodical/YP
+methodism
+Methodism/SM
+methodist/MS
+Methodist/MS
+method/MS
+methodological/Y
+methodologists
+methodology/MS
+methought
+Methuen/M
+Methuselah/M
+Methuselahs
+methylated
+methylene/M
+methyl/SM
+meticulousness/MS
+meticulous/YP
+métier/S
+metonymy/M
+Metrecal/M
+metrical/Y
+metricate/SDNGX
+metricize/GSD
+metrics/M
+metric/SM
+metronome/MS
+metropolis/SM
+metropolitanization
+metropolitan/S
+metro/SM
+mets
+Metternich/M
+mettle/SDM
+mettlesome
+met/U
+Metzler/M
+Meuse/M
+mewl/GSD
+mew/SGD
+mews/SM
+Mex
+Mexicali/M
+Mexican/S
+Mexico/M
+Meyerbeer/M
+Meyer/SM
+mezzanine/MS
+mezzo/S
+MFA
+mfg
+mfr/S
+mg
+M/GB
+Mg/M
+MGM/M
+mgr
+Mgr
+MHz
+MI
+MIA
+Mia/M
+Miami/SM
+Miaplacidus/M
+miasmal
+miasma/SM
+Micaela/M
+Micah/M
+mica/MS
+micelles
+mice/M
+Michaela/M
+Michaelangelo/M
+Michaelina/M
+Michaeline/M
+Michaella/M
+Michaelmas/MS
+Michael/SM
+Michaelson/M
+Michail/M
+Michale/M
+Michal/M
+Micheal/M
+Micheil/M
+Michelangelo/M
+Michele/M
+Michelina/M
+Micheline/M
+Michelin/M
+Michelle/M
+Michell/M
+Michel/M
+Michelson/M
+Michigander/S
+Michiganite/S
+Michigan/M
+Mich/M
+Mickelson/M
+Mickey/M
+mickey/SM
+Mickie/M
+Micki/M
+Mick/M
+Micky/M
+Mic/M
+Micmac/M
+micra's
+microamp
+microanalysis/M
+microanalytic
+microbe/MS
+microbial
+microbicidal
+microbicide/M
+microbiological
+microbiologist/MS
+microbiology/SM
+microbrewery/S
+microchemistry/M
+microchip/S
+microcircuit/MS
+microcode/GSD
+microcomputer/MS
+microcosmic
+microcosm/MS
+microdensitometer
+microdot/MS
+microeconomic/S
+microeconomics/M
+microelectronic/S
+microelectronics/M
+microfiber/S
+microfiche/M
+microfilm/DRMSG
+microfossils
+micrography/M
+microgroove/MS
+microhydrodynamics
+microinstruction/SM
+microjoule
+microlevel
+microlight/S
+micromanage/GDSL
+micromanagement/S
+micrometeorite/MS
+micrometeoritic
+micrometer/SM
+Micronesia/M
+Micronesian/S
+micron/MS
+microorganism/SM
+microphone/SGM
+Microport/M
+microprocessing
+microprocessor/SM
+microprogrammed
+microprogramming
+microprogram/SM
+micro/S
+microscope/SM
+microscopic
+microscopical/Y
+microscopy/MS
+microsecond/MS
+microsimulation/S
+Microsystems
+micros/M
+Microsoft/M
+microsomal
+microstore
+microsurgery/SM
+MicroVAXes
+MicroVAX/M
+microvolt/SM
+microwaveable
+microwave/BMGSD
+microword/S
+midair/MS
+midas
+Midas/M
+midband/M
+midday/MS
+midden/SM
+middest
+middlebrow/SM
+Middlebury/M
+middle/GJRSD
+middleman/M
+middlemen
+middlemost
+Middlesex/M
+Middleton/M
+Middletown/M
+middleweight/SM
+middling/Y
+middy/SM
+Mideastern
+Mideast/M
+midfield/RM
+Midge/M
+midge/SM
+midget/MS
+midi/S
+midland/MRS
+Midland/MS
+midlife
+midlives
+midmorn/G
+midmost/S
+midnight/SYM
+midpoint/MS
+midrange
+midrib/MS
+midriff/MS
+mid/S
+midscale
+midsection/M
+midshipman/M
+midshipmen
+midship/S
+midspan
+midstream/MS
+midst/SM
+midsummer/MS
+midterm/MS
+midtown/MS
+Midway/M
+midway/S
+midweek/SYM
+Midwesterner/M
+Midwestern/ZR
+Midwest/M
+midwicket
+midwifery/SM
+midwife/SDMG
+midwinter/YMS
+midwives
+midyear/MS
+mien/M
+miff/GDS
+mightily
+mightiness/MS
+mightn't
+might/S
+mighty/TPR
+mignon
+mignonette/SM
+Mignon/M
+Mignonne/M
+migraine/SM
+migrant/MS
+migrate/ASDG
+migration/MS
+migrative
+migratory/S
+MIG/S
+Miguela/M
+Miguelita/M
+Miguel/M
+mikado/MS
+Mikaela/M
+Mikael/M
+mike/DSMG
+Mikel/M
+Mike/M
+Mikey/M
+Mikhail/M
+Mikkel/M
+Mikol/M
+Mikoyan/M
+milady/MS
+Milagros/M
+Milanese
+Milan/M
+milch/M
+mildew/DMGS
+mildness/MS
+Mildred/M
+Mildrid/M
+mild/STYRNP
+mileage/SM
+Milena/M
+milepost/SM
+miler/M
+mile/SM
+Mile/SM
+milestone/MS
+Milford/M
+Milicent/M
+milieu/SM
+Milissent/M
+militancy/MS
+militantness/M
+militant/YPS
+militarily
+militarism/SM
+militaristic
+militarist/MS
+militarization/SCM
+militarize/SDCG
+military
+militate/SDG
+militiaman/M
+militiamen
+militia/SM
+Milka/M
+Milken/M
+milker/M
+milk/GZSRDM
+milkiness/MS
+milkmaid/SM
+milkman/M
+milkmen
+milkshake/S
+milksop/SM
+milkweed/MS
+milky/RPT
+millage/S
+Millard/M
+Millay/M
+millenarian
+millenarianism/M
+millennial
+millennialism
+millennium/MS
+millepede's
+miller/M
+Miller/M
+Millet/M
+millet/MS
+milliamp
+milliampere/S
+milliard/MS
+millibar/MS
+Millicent/M
+millidegree/S
+Millie/M
+milligram/MS
+millijoule/S
+Millikan/M
+milliliter/MS
+Milli/M
+millimeter/SM
+milliner/SM
+millinery/MS
+milling/M
+millionaire/MS
+million/HDMS
+millionth/M
+millionths
+millipede/SM
+millisecond/MS
+Millisent/M
+millivoltmeter/SM
+millivolt/SM
+milliwatt/S
+millpond/MS
+millrace/SM
+mill/SGZMRD
+Mill/SMR
+millstone/SM
+millstream/SM
+millwright/MS
+Milly/M
+mil/MRSZ
+Mil/MY
+Milne/M
+Milo/M
+Milquetoast/S
+milquetoast/SM
+Miltiades/M
+Miltie/M
+Milt/M
+milt/MDSG
+Miltonic
+Milton/M
+Miltown/M
+Milty/M
+Milwaukee/M
+Milzie/M
+MIMD
+mime/DSRMG
+mimeograph/GMDS
+mimeographs
+mimer/M
+mimesis/M
+mimetic
+mimetically
+mimicked
+mimicker/SM
+mimicking
+mimicry/MS
+mimic/S
+Mimi/M
+mi/MNX
+Mimosa/M
+mimosa/SM
+Mina/M
+minaret/MS
+minatory
+mincemeat/MS
+mincer/M
+mince/SRDGZJ
+mincing/Y
+Minda/M
+Mindanao/M
+mind/ARDSZG
+mindbogglingly
+minded/P
+minder/M
+mindfully
+mindfulness/MS
+mindful/U
+mindlessness/SM
+mindless/YP
+Mindoro/M
+min/DRZGJ
+mind's
+mindset/S
+Mindy/M
+minefield/MS
+mineralization/C
+mineralized/U
+mineralogical
+mineralogist/SM
+mineralogy/MS
+mineral/SM
+miner/M
+Miner/M
+Minerva/M
+mineshaft
+mine/SNX
+minestrone/MS
+minesweeper/MS
+Minetta/M
+Minette/M
+mineworkers
+mingle/SDG
+Ming/M
+Mingus/M
+miniature/GMSD
+miniaturist/SM
+miniaturization/MS
+miniaturize/SDG
+minibike/S
+minibus/SM
+minicab/M
+minicam/MS
+minicomputer/SM
+minidress/SM
+minify/GSD
+minimalism/S
+minimalistic
+minimalist/MS
+minimality
+minimal/SY
+minima's
+minimax/M
+minimization/MS
+minimized/U
+minimizer/M
+minimize/RSDZG
+minim/SM
+minimum/MS
+mining/M
+minion/M
+mini/S
+miniseries
+miniskirt/MS
+ministerial/Y
+minister/MDGS
+ministrant/S
+ministration/SM
+ministry/MS
+minivan/S
+miniver/M
+minke
+mink/SM
+Min/MR
+Minna/M
+Minnaminnie/M
+Minneapolis/M
+Minne/M
+minnesinger/MS
+Minnesota/M
+Minnesotan/S
+Minnie/M
+Minni/M
+Minn/M
+Minnnie/M
+minnow/SM
+Minny/M
+Minoan/S
+Minolta/M
+minor/DMSG
+minority/MS
+Minor/M
+Minos
+Minotaur/M
+minotaur/S
+Minot/M
+minoxidil/S
+Minsk/M
+Minsky/M
+minster/SM
+minstrel/SM
+minstrelsy/MS
+mintage/SM
+Mintaka/M
+Minta/M
+minter/M
+mint/GZSMRD
+minty/RT
+minuend/SM
+minuet/SM
+Minuit/M
+minuscule/SM
+minus/S
+minuteman
+Minuteman/M
+minutemen
+minuteness/SM
+minute/RSDPMTYG
+minutiae
+minutia/M
+minx/MS
+Miocene
+MIPS
+Miquela/M
+Mirabeau/M
+Mirabella/M
+Mirabelle/M
+Mirabel/M
+Mirach/M
+miracle/MS
+miraculousness/M
+miraculous/PY
+mirage/GSDM
+Mira/M
+Miranda/M
+Miran/M
+Mireielle/M
+Mireille/M
+Mirella/M
+Mirelle/M
+mire/MGDS
+Mirfak/M
+Miriam/M
+Mirilla/M
+Mir/M
+Mirna/M
+Miro
+mirror/DMGS
+mirthfulness/SM
+mirthful/PY
+mirthlessness/M
+mirthless/YP
+mirth/M
+mirths
+MIRV/DSG
+miry/RT
+Mirzam/M
+misaddress/SDG
+misadventure/SM
+misalign/DSGL
+misalignment/MS
+misalliance/MS
+misanalysed
+misanthrope/MS
+misanthropic
+misanthropically
+misanthropist/S
+misanthropy/SM
+misapplier/M
+misapply/GNXRSD
+misapprehend/GDS
+misapprehension/MS
+misappropriate/GNXSD
+misbegotten
+misbehaver/M
+misbehave/RSDG
+misbehavior/SM
+misbrand/DSG
+misc
+miscalculate/XGNSD
+miscalculation/M
+miscall/SDG
+miscarriage/MS
+miscarry/SDG
+miscast/GS
+miscegenation/SM
+miscellanea
+miscellaneous/PY
+miscellany/MS
+Mischa/M
+mischance/MGSD
+mischief/MDGS
+mischievousness/MS
+mischievous/PY
+miscibility/S
+miscible/C
+misclassification/M
+misclassified
+misclassifying
+miscode/SDG
+miscommunicate/NDS
+miscomprehended
+misconceive/GDS
+misconception/MS
+misconduct/GSMD
+misconfiguration
+misconstruction/MS
+misconstrue/DSG
+miscopying
+miscount/DGS
+miscreant/MS
+miscue/MGSD
+misdeal/SG
+misdealt
+misdeed/MS
+misdemeanant/SM
+misdemeanor/SM
+misdiagnose/GSD
+misdid
+misdirect/GSD
+misdirection/MS
+misdirector/S
+misdoes
+misdo/JG
+misdone
+miserableness/SM
+miserable/SP
+miserably
+miser/KM
+miserliness/MS
+miserly/P
+misery/MS
+mises/KC
+misfeasance/MS
+misfeature/M
+misfield
+misfile/SDG
+misfire/SDG
+misfit/MS
+misfitted
+misfitting
+misfortune/SM
+misgauge/GDS
+misgiving/MYS
+misgovern/LDGS
+misgovernment/S
+misguidance/SM
+misguidedness/M
+misguided/PY
+misguide/DRSG
+misguider/M
+Misha/M
+mishandle/SDG
+mishap/MS
+mishapped
+mishapping
+misheard
+mishear/GS
+mishitting
+mishmash/SM
+misidentification/M
+misidentify/GNSD
+misinformation/SM
+misinform/GDS
+misinterpretation/MS
+misinterpreter/M
+misinterpret/RDSZG
+misjudge/DSG
+misjudging/Y
+misjudgment/MS
+Miskito
+mislabel/DSG
+mislaid
+mislay/GS
+misleader/M
+mislead/GRJS
+misleading/Y
+misled
+mismanage/LGSD
+mismanagement/MS
+mismatch/GSD
+misname/GSD
+misnomer/GSMD
+misogamist/MS
+misogamy/MS
+misogynistic
+misogynist/MS
+misogynous
+misogyny/MS
+misperceive/SD
+misplace/GLDS
+misplacement/MS
+misplay/GSD
+mispositioned
+misprint/SGDM
+misprision/SM
+mispronounce/DSG
+mispronunciation/MS
+misquotation/MS
+misquote/GDS
+misreader/M
+misread/RSGJ
+misrelated
+misremember/DG
+misreport/DGS
+misrepresentation/MS
+misrepresenter/M
+misrepresent/SDRG
+misroute/DS
+misrule/SDG
+missal/ESM
+misshape/DSG
+misshapenness/SM
+misshapen/PY
+Missie/M
+missile/MS
+missilery/SM
+mission/AMS
+missionary/MS
+missioned
+missioner/SM
+missioning
+missis's
+Mississauga/M
+Mississippian/S
+Mississippi/M
+missive/MS
+Missoula/M
+Missourian/S
+Missouri/M
+misspeak/SG
+misspecification
+misspecified
+misspelling/M
+misspell/SGJD
+misspend/GS
+misspent
+misspoke
+misspoken
+mis/SRZ
+miss/SDEGV
+Miss/SM
+misstate/GLDRS
+misstatement/MS
+misstater/M
+misstep/MS
+misstepped
+misstepping
+missus/SM
+Missy/M
+mistakable/U
+mistake/BMGSR
+mistaken/Y
+mistaker/M
+mistaking/Y
+Mistassini/M
+mister/GDM
+Mister/SM
+mistily
+Misti/M
+mistime/GSD
+mistiness/S
+mistletoe/MS
+mist/MRDGZS
+mistook
+mistral/MS
+mistranslated
+mistranslates
+mistranslating
+mistranslation/SM
+mistreat/DGSL
+mistreatment/SM
+Mistress/MS
+mistress/MSY
+mistrial/SM
+mistruster/M
+mistrustful/Y
+mistrust/SRDG
+Misty/M
+mistype/SDGJ
+misty/PRT
+misunderstander/M
+misunderstanding/M
+misunderstand/JSRZG
+misunderstood
+misuser/M
+misuse/RSDMG
+miswritten
+Mitchael/M
+Mitchell/M
+Mitchel/M
+Mitch/M
+miterer/M
+miter/GRDM
+mite/SRMZ
+Mitford/M
+Mithra/M
+Mithridates/M
+mitigated/U
+mitigate/XNGVDS
+mitigation/M
+MIT/M
+mitoses
+mitosis/M
+mitotic
+MITRE/SM
+Mitsubishi/M
+mitten/M
+Mitterrand/M
+mitt/XSMN
+Mitty/M
+Mitzi/M
+mitzvahs
+mixable
+mix/AGSD
+mixed/U
+mixer/SM
+mixture/SM
+Mizar/M
+mizzenmast/SM
+mizzen/MS
+Mk
+mks
+ml
+Mlle/M
+mm
+MM
+MMe
+Mme/SM
+MN
+mnemonically
+mnemonics/M
+mnemonic/SM
+Mnemosyne/M
+Mn/M
+MO
+moan/GSZRDM
+moat/SMDG
+mobbed
+mobber
+mobbing
+mobcap/SM
+Mobile/M
+mobile/S
+mobility/MS
+mobilizable
+mobilization/AMCS
+mobilize/CGDS
+mobilized/U
+mobilizer/MS
+mobilizes/A
+Mobil/M
+mob/MS
+mobster/MS
+Mobutu/M
+moccasin/SM
+mocha/SM
+mockers/M
+mockery/MS
+mock/GZSRD
+mockingbird/MS
+mocking/Y
+mo/CSK
+modality/MS
+modal/Y
+modeled/A
+modeler/M
+modeling/M
+models/A
+model/ZGSJMRD
+mode/MS
+modem/SM
+moderated/U
+moderateness/SM
+moderate/PNGDSXY
+moderation/M
+moderator/MS
+modernism/MS
+modernistic
+modernist/S
+modernity/SM
+modernization/MS
+modernized/U
+modernizer/M
+modernize/SRDGZ
+modernizes/U
+modernness/SM
+modern/PTRYS
+Modesta/M
+Modestia/M
+Modestine/M
+Modesto/M
+modest/TRY
+Modesty/M
+modesty/MS
+modicum/SM
+modifiability/M
+modifiableness/M
+modifiable/U
+modification/M
+modified/U
+modifier/M
+modify/NGZXRSD
+Modigliani/M
+modishness/MS
+modish/YP
+mod/TSR
+Modula/M
+modularity/SM
+modularization
+modularize/SDG
+modular/SY
+modulate/ADSNCG
+modulation/CMS
+modulator/ACSM
+module/SM
+moduli
+modulo
+modulus/M
+modus
+Moe/M
+Moen/M
+Mogadiscio's
+Mogadishu
+mogul/MS
+Mogul/MS
+mohair/SM
+Mohamed/M
+Mohammad/M
+Mohammedanism/MS
+Mohammedan/SM
+Mohammed's
+Mohandas/M
+Mohandis/M
+Mohawk/MS
+Mohegan/S
+Mohican's
+Moho/M
+Mohorovicic/M
+Mohr/M
+moiety/MS
+moil/SGD
+Moina/M
+Moines/M
+Moira/M
+moire/MS
+Moise/MS
+Moiseyev/M
+Moishe/M
+moistener/M
+moisten/ZGRD
+moistness/MS
+moist/TXPRNY
+moisture/MS
+moisturize/GZDRS
+Mojave/M
+molal
+molarity/SM
+molar/MS
+molasses/MS
+Moldavia/M
+Moldavian/S
+moldboard/SM
+molder/DG
+moldiness/SM
+molding/M
+mold/MRDJSGZ
+Moldova
+moldy/PTR
+molecularity/SM
+molecular/Y
+molecule/MS
+molehill/SM
+mole/MTS
+moleskin/MS
+molestation/SM
+molested/U
+molester/M
+molest/RDZGS
+Moliere
+Molina/M
+Moline/M
+Mollee/M
+Mollie/M
+mollification/M
+mollify/XSDGN
+Molli/M
+Moll/M
+moll/MS
+mollusc's
+mollusk/S
+mollycoddler/M
+mollycoddle/SRDG
+Molly/M
+molly/SM
+Molnar/M
+Moloch/M
+Molokai/M
+Molotov/M
+molter/M
+molt/RDNGZS
+Moluccas
+molybdenite/M
+molybdenum/MS
+Mombasa/M
+momenta
+momentarily
+momentariness/SM
+momentary/P
+moment/MYS
+momentousness/MS
+momentous/YP
+momentum/SM
+momma/S
+Mommy/M
+mommy/SM
+Mo/MN
+mom/SM
+Monaco/M
+monadic
+monad/SM
+Monah/M
+Mona/M
+monarchic
+monarchical
+monarchism/MS
+monarchistic
+monarchist/MS
+monarch/M
+monarchs
+monarchy/MS
+Monash/M
+monastery/MS
+monastical/Y
+monasticism/MS
+monastic/S
+monaural/Y
+Mondale/M
+Monday/MS
+Mondrian/M
+Monegasque/SM
+Monera/M
+monetarily
+monetarism/S
+monetarist/MS
+monetary
+monetization/CMA
+monetize/CGADS
+Monet/M
+moneybag/SM
+moneychangers
+moneyer/M
+moneylender/SM
+moneymaker/MS
+moneymaking/MS
+money/SMRD
+Monfort/M
+monger/SGDM
+Mongolia/M
+Mongolian/S
+Mongolic/M
+mongolism/SM
+mongoloid/S
+Mongoloid/S
+Mongol/SM
+mongoose/SM
+mongrel/SM
+Monica/M
+monies/M
+Monika/M
+moniker/MS
+Monique/M
+monism/MS
+monist/SM
+monition/SM
+monitored/U
+monitor/GSMD
+monitory/S
+monkeyshine/S
+monkey/SMDG
+monkish
+Monk/M
+monk/MS
+monkshood/SM
+Monmouth/M
+monochromatic
+monochromator
+monochrome/MS
+monocle/SDM
+monoclinic
+monoclonal/S
+monocotyledonous
+monocotyledon/SM
+monocular/SY
+monodic
+monodist/S
+monody/MS
+monogamist/MS
+monogamous/PY
+monogamy/MS
+monogrammed
+monogramming
+monogram/MS
+monograph/GMDS
+monographs
+monolingualism
+monolingual/S
+monolithic
+monolithically
+monolith/M
+monoliths
+monologist/S
+monologue/GMSD
+monomaniacal
+monomaniac/MS
+monomania/MS
+monomeric
+monomer/SM
+monomial/SM
+mono/MS
+Monongahela/M
+mononuclear
+mononucleoses
+mononucleosis/M
+monophonic
+monoplane/MS
+monopole/S
+monopolistic
+monopolist/MS
+monopolization/MS
+monopolized/U
+monopolize/GZDSR
+monopolizes/U
+monopoly/MS
+monorail/SM
+monostable
+monosyllabic
+monosyllable/MS
+monotheism/SM
+monotheistic
+monotheist/S
+monotone/SDMG
+monotonic
+monotonically
+monotonicity
+monotonousness/MS
+monotonous/YP
+monotony/MS
+monovalent
+monoxide/SM
+Monroe/M
+Monro/M
+Monrovia/M
+Monsanto/M
+monseigneur
+monsieur/M
+Monsignori
+Monsignor/MS
+monsignor/S
+Mon/SM
+monsoonal
+monsoon/MS
+monster/SM
+monstrance/ASM
+monstrosity/SM
+monstrousness/M
+monstrous/YP
+montage/SDMG
+Montague/M
+Montaigne/M
+Montana/M
+Montanan/MS
+Montcalm/M
+Montclair/M
+Monte/M
+Montenegrin
+Montenegro/M
+Monterey/M
+Monterrey/M
+Montesquieu/M
+Montessori/M
+Monteverdi/M
+Montevideo/M
+Montezuma
+Montgomery/M
+monthly/S
+month/MY
+months
+Monticello/M
+Monti/M
+Mont/M
+Montmartre/M
+Montoya/M
+Montpelier/M
+Montrachet/M
+Montreal/M
+Montserrat/M
+Monty/M
+monumentality/M
+monumental/Y
+monument/DMSG
+mooch/ZSRDG
+moodily
+moodiness/MS
+mood/MS
+Moody/M
+moody/PTR
+Moog
+moo/GSD
+moonbeam/SM
+Mooney/M
+moon/GDMS
+moonless
+moonlight/GZDRMS
+moonlighting/M
+moonlit
+Moon/M
+moonscape/MS
+moonshiner/M
+moonshine/SRZM
+moonshot/MS
+moonstone/SM
+moonstruck
+moonwalk/SDG
+Moore/M
+moor/GDMJS
+mooring/M
+Moorish
+moorland/MS
+Moor/MS
+moose/M
+moot/RDGS
+moped/MS
+moper/M
+mope/S
+mopey
+mopier
+mopiest
+mopish
+mopped
+moppet/MS
+mopping
+mop/SZGMDR
+moraine/MS
+morale/MS
+Morales/M
+moralistic
+moralistically
+moralist/MS
+morality/UMS
+moralization/CS
+moralize/CGDRSZ
+moralled
+moraller
+moralling
+moral/SMY
+Mora/M
+Moran/M
+morass/SM
+moratorium/SM
+Moravia/M
+Moravian
+moray/SM
+morbidity/SM
+morbidness/S
+morbid/YP
+mordancy/MS
+mordant/GDYS
+Mordecai/M
+Mord/M
+Mordred/M
+Mordy/M
+more/DSN
+Moreen/M
+Morehouse/M
+Moreland/M
+morel/SM
+More/M
+Morena/M
+Moreno/M
+moreover
+Morey/M
+Morgana/M
+Morganica/M
+Morgan/MS
+Morganne/M
+morgen/M
+Morgen/M
+morgue/SM
+Morgun/M
+Moria/M
+Moriarty/M
+moribundity/M
+moribund/Y
+Morie/M
+Morin/M
+morion/M
+Morison/M
+Morissa/M
+Morita/M
+Moritz/M
+Morlee/M
+Morley/M
+Morly/M
+Mormonism/MS
+Mormon/SM
+Morna/M
+morning/MY
+morn/SGJDM
+Moroccan/S
+Morocco/M
+morocco/SM
+Moro/M
+moronic
+moronically
+Moroni/M
+moron/SM
+moroseness/MS
+morose/YP
+morpheme/DSMG
+morphemic/S
+Morpheus/M
+morph/GDJ
+morphia/S
+morphine/MS
+morphism/MS
+morphologic
+morphological/Y
+morphology/MS
+morphophonemic/S
+morphophonemics/M
+morphs
+Morrie/M
+morris
+Morris/M
+Morrison/M
+Morristown/M
+Morrow/M
+morrow/MS
+Morry/M
+morsel/GMDS
+Morse/M
+mortality/SM
+mortal/SY
+mortarboard/SM
+mortar/GSDM
+Morten/M
+mortgageable
+mortgagee/SM
+mortgage/MGDS
+mortgagor/SM
+mortice's
+mortician/SM
+Mortie/M
+mortification/M
+mortified/Y
+mortifier/M
+mortify/DRSXGN
+Mortimer/M
+mortise/MGSD
+Mort/MN
+Morton/M
+mortuary/MS
+Morty/M
+Mosaic
+mosaicked
+mosaicking
+mosaic/MS
+Moscone/M
+Moscow/M
+Moseley/M
+Moselle/M
+Mose/MSR
+Moser/M
+mosey/SGD
+Moshe/M
+Moslem's
+Mosley/M
+mosque/SM
+mosquitoes
+mosquito/M
+mos/S
+mossback/MS
+Mossberg/M
+Moss/M
+moss/SDMG
+mossy/SRT
+most/SY
+Mosul/M
+mote/ASCNK
+motel/MS
+mote's
+motet/SM
+mothball/DMGS
+motherboard/MS
+motherfucker/MS!
+motherfucking/!
+motherhood/SM
+mothering/M
+motherland/SM
+motherless
+motherliness/MS
+motherly/P
+mother/RDYMZG
+moths
+moth/ZMR
+motif/MS
+motile/S
+motility/MS
+motional/K
+motioner/M
+motion/GRDMS
+motionlessness/S
+motionless/YP
+motion's/ACK
+motions/K
+motivated/U
+motivate/XDSNGV
+motivational/Y
+motivation/M
+motivator/S
+motiveless
+motive/MGSD
+motley/S
+motlier
+motliest
+mot/MSV
+motocross/SM
+motorbike/SDGM
+motorboat/MS
+motorcade/MSDG
+motorcar/MS
+motorcycle/GMDS
+motorcyclist/SM
+motor/DMSG
+motoring/M
+motorist/SM
+motorization/SM
+motorize/DSG
+motorized/U
+motorman/M
+motormen
+motormouth
+motormouths
+Motorola/M
+motorway/SM
+Motown/M
+mottle/GSRD
+mottler/M
+Mott/M
+mottoes
+motto/M
+moue/DSMG
+moulder/DSG
+moult/GSD
+mound/GMDS
+mountable
+mountaineering/M
+mountaineer/JMDSG
+mountainousness/M
+mountainous/PY
+mountainside/MS
+mountain/SM
+mountaintop/SM
+Mountbatten/M
+mountebank/SGMD
+mounted/U
+mount/EGACD
+mounter/SM
+mounties
+Mountie/SM
+mounting/MS
+Mount/M
+mounts/AE
+mourner/M
+mournfuller
+mournfullest
+mournfulness/S
+mournful/YP
+mourning/M
+mourn/ZGSJRD
+mouser/M
+mouse/SRDGMZ
+mousetrapped
+mousetrapping
+mousetrap/SM
+mousiness/MS
+mousing/M
+mousse/MGSD
+Moussorgsky/M
+mousy/PRT
+Mouthe/M
+mouthful/MS
+mouthiness/SM
+mouth/MSRDG
+mouthorgan
+mouthpiece/SM
+mouths
+mouthwash/SM
+mouthwatering
+mouthy/PTR
+Mouton/M
+mouton/SM
+movable/ASP
+movableness/AM
+move/ARSDGZB
+moved/U
+movement/SM
+mover/AM
+moviegoer/S
+movie/SM
+moving/YS
+mower/M
+Mowgli/M
+mowing/M
+mow/SDRZG
+moxie/MS
+Moyer/M
+Moyna/M
+Moyra/M
+Mozambican/S
+Mozambique/M
+Mozart/M
+Mozelle/M
+Mozes/M
+Mozilla/M
+mozzarella/MS
+mp
+MP
+mpg
+mph
+MPH
+MRI
+Mr/M
+Mrs
+ms
+M's
+MS
+MSG
+Msgr/M
+m's/K
+Ms/S
+MST
+MSW
+mt
+MT
+mtg
+mtge
+Mt/M
+MTS
+MTV
+Muawiya/M
+Mubarak/M
+muchness/M
+much/SP
+mucilage/MS
+mucilaginous
+mucker/M
+muck/GRDMS
+muckraker/M
+muckrake/ZMDRSG
+mucky/RT
+mucosa/M
+mucous
+mucus/SM
+mudded
+muddily
+muddiness/SM
+mudding
+muddle/GRSDZ
+muddleheaded/P
+muddlehead/SMD
+muddler/M
+muddy/TPGRSD
+mudflat/S
+mudguard/SM
+mudlarks
+mud/MS
+mudroom/S
+mudslide/S
+mudslinger/M
+mudslinging/M
+mudsling/JRGZ
+Mueller/M
+Muenster
+muenster/MS
+muesli/M
+muezzin/MS
+muff/GDMS
+Muffin/M
+muffin/SM
+muffler/M
+muffle/ZRSDG
+Mufi/M
+Mufinella/M
+mufti/MS
+Mugabe/M
+mugged
+mugger/SM
+mugginess/S
+mugging/S
+muggy/RPT
+mugshot/S
+mug/SM
+mugwump/MS
+Muhammadanism/S
+Muhammadan/SM
+Muhammad/M
+Muire/M
+Muir/M
+Mukden/M
+mukluk/SM
+mulattoes
+mulatto/M
+mulberry/MS
+mulch/GMSD
+mulct/SDG
+Mulder/M
+mule/MGDS
+muleskinner/S
+muleteer/MS
+mulishness/MS
+mulish/YP
+mullah/M
+mullahs
+mullein/MS
+Mullen/M
+muller/M
+Muller/M
+mullet/MS
+Mulligan/M
+mulligan/SM
+mulligatawny/SM
+Mullikan/M
+Mullins
+mullion/MDSG
+mull/RDSG
+Multan/M
+multi
+Multibus/M
+multicellular
+multichannel/M
+multicollinearity/M
+multicolor/SDM
+multicolumn
+multicomponent
+multicomputer/MS
+Multics/M
+MULTICS/M
+multicultural
+multiculturalism/S
+multidimensional
+multidimensionality
+multidisciplinary
+multifaceted
+multifamily
+multifariousness/SM
+multifarious/YP
+multifigure
+multiform
+multifunction/D
+multilateral/Y
+multilayer
+multilevel/D
+multilingual
+multilingualism/S
+multimedia/S
+multimegaton/M
+multimeter/M
+multimillionaire/SM
+multinational/S
+multinomial/M
+multiphase
+multiple/SM
+multiplet/SM
+multiplex/GZMSRD
+multiplexor's
+multipliable
+multiplicand/SM
+multiplication/M
+multiplicative/YS
+multiplicity/MS
+multiplier/M
+multiply/ZNSRDXG
+multiprocess/G
+multiprocessor/MS
+multiprogram
+multiprogrammed
+multiprogramming/MS
+multipurpose
+multiracial
+multistage
+multistory/S
+multisyllabic
+multitasking/S
+multitude/MS
+multitudinousness/M
+multitudinous/YP
+multiuser
+multivalent
+multivalued
+multivariate
+multiversity/M
+multivitamin/S
+mu/M
+mumbler/M
+mumbletypeg/S
+mumble/ZJGRSD
+Mumford/M
+mummed
+mummer/SM
+mummery/MS
+mummification/M
+mummify/XSDGN
+mumming
+mum/MS
+mummy/GSDM
+mumps/M
+muncher/M
+Münchhausen/M
+munchies
+Munch/M
+munch/ZRSDG
+Muncie/M
+mundane/YSP
+Mundt/M
+munge/JGZSRD
+Munich/M
+municipality/SM
+municipal/YS
+munificence/MS
+munificent/Y
+munition/SDG
+Munmro/M
+Munoz/M
+Munroe/M
+Munro/M
+mun/S
+Munsey/M
+Munson/M
+Munster/MS
+Muong/M
+muon/M
+Muppet/M
+muralist/SM
+mural/SM
+Murasaki/M
+Murat/M
+Murchison/M
+Murcia/M
+murderer/M
+murderess/S
+murder/GZRDMS
+murderousness/M
+murderous/YP
+Murdoch/M
+Murdock/M
+Mureil/M
+Murial/M
+muriatic
+Murielle/M
+Muriel/M
+Murillo/M
+murkily
+murkiness/S
+murk/TRMS
+murky/RPT
+Murmansk/M
+murmurer/M
+murmuring/U
+murmurous
+murmur/RDMGZSJ
+Murphy/M
+murrain/SM
+Murray/M
+Murrow/M
+Murrumbidgee/M
+Murry/M
+Murvyn/M
+muscatel/MS
+Muscat/M
+muscat/SM
+musclebound
+muscle/SDMG
+Muscovite/M
+muscovite/MS
+Muscovy/M
+muscularity/SM
+muscular/Y
+musculature/SM
+muse
+Muse/M
+muser/M
+musette/SM
+museum/MS
+mus/GJDSR
+musher/M
+mushiness/MS
+mush/MSRDG
+mushroom/DMSG
+mushy/PTR
+Musial/M
+musicale/SM
+musicality/SM
+musicals
+musical/YU
+musician/MYS
+musicianship/MS
+musicked
+musicking
+musicological
+musicologist/MS
+musicology/MS
+music/SM
+musing/Y
+Muskegon/M
+muskeg/SM
+muskellunge/SM
+musketeer/MS
+musketry/MS
+musket/SM
+musk/GDMS
+muskie/M
+muskiness/MS
+muskmelon/MS
+muskox/N
+muskrat/MS
+musky/RSPT
+Muslim/MS
+muslin/MS
+mussel/MS
+Mussolini/MS
+Mussorgsky/M
+muss/SDG
+mussy/RT
+mustache/DSM
+mustachio/MDS
+mustang/MS
+mustard/MS
+muster/GD
+mustily
+mustiness/MS
+mustn't
+must/RDGZS
+must've
+musty/RPT
+mutability/SM
+mutableness/M
+mutable/P
+mutably
+mutagen/SM
+mutant/MS
+mutate/XVNGSD
+mutational/Y
+mutation/M
+mutator/S
+muted/Y
+muteness/S
+mute/PDSRBYTG
+mutilate/XDSNG
+mutilation/M
+mutilator/MS
+mutineer/SMDG
+mutinous/Y
+mutiny/MGSD
+Mutsuhito/M
+mutterer/M
+mutter/GZRDJ
+muttonchops
+mutton/SM
+mutt/ZSMR
+mutuality/S
+mutual/SY
+muumuu/MS
+muzak
+Muzak/SM
+Muzo/M
+muzzled/U
+muzzle/MGRSD
+muzzler/M
+MVP
+MW
+Myanmar
+Mycah/M
+Myca/M
+Mycenaean
+Mycenae/M
+Mychal/M
+mycologist/MS
+mycology/MS
+myelitides
+myelitis/M
+Myer/MS
+myers
+mylar
+Mylar/S
+Myles/M
+Mylo/M
+My/M
+myna/SM
+Mynheer/M
+myocardial
+myocardium/M
+myopia/MS
+myopically
+myopic/S
+Myrah/M
+Myra/M
+Myranda/M
+Myrdal/M
+myriad/S
+Myriam/M
+Myrilla/M
+Myrle/M
+Myrlene/M
+myrmidon/S
+Myrna/M
+Myron/M
+myrrh/M
+myrrhs
+Myrta/M
+Myrtia/M
+Myrtice/M
+Myrtie/M
+Myrtle/M
+myrtle/SM
+Myrvyn/M
+Myrwyn/M
+mys
+my/S
+myself
+Mysore/M
+mysteriousness/MS
+mysterious/YP
+mystery/MDSG
+mystical/Y
+mysticism/MS
+mystic/SM
+mystification/M
+mystifier/M
+mystify/CSDGNX
+mystifying/Y
+mystique/MS
+Myst/M
+mythic
+mythical/Y
+myth/MS
+mythographer/SM
+mythography/M
+mythological/Y
+mythologist/MS
+mythologize/CSDG
+mythology/SM
+myths
+N
+NAACP
+nabbed
+nabbing
+Nabisco/M
+nabob/SM
+Nabokov/M
+nab/S
+nacelle/SM
+nacho/S
+NaCl/M
+nacre/MS
+nacreous
+Nada/M
+Nadean/M
+Nadeen/M
+Nader/M
+Nadia/M
+Nadine/M
+nadir/SM
+Nadiya/M
+Nadya/M
+Nady/M
+nae/VM
+Nagasaki/M
+nagged
+nagger/S
+nagging/Y
+nag/MS
+Nagoya/M
+Nagpur/M
+Nagy/M
+Nahuatl/SM
+Nahum/M
+naiad/SM
+naifs
+nailbrush/SM
+nailer/M
+nail/SGMRD
+Naipaul/M
+Nair/M
+Nairobi/M
+Naismith/M
+naive/SRTYP
+naiveté/SM
+naivety/MS
+Nakamura/M
+Nakayama/M
+nakedness/MS
+naked/TYRP
+Nakoma/M
+Nalani/M
+Na/M
+Namath/M
+nameable/U
+name/ADSG
+namedrop
+namedropping
+named's
+named/U
+nameless/PY
+namely
+nameplate/MS
+namer/SM
+name's
+namesake/SM
+Namibia/M
+Namibian/S
+naming/M
+Nam/M
+Nanak/M
+Nana/M
+Nananne/M
+Nancee/M
+Nance/M
+Nancey/M
+Nanchang/M
+Nancie/M
+Nanci/M
+Nancy/M
+Nanete/M
+Nanette/M
+Nanice/M
+Nani/M
+Nanine/M
+Nanjing
+Nanking's
+Nan/M
+Nannette/M
+Nannie/M
+Nanni/M
+Nanny/M
+nanny/SDMG
+nanometer/MS
+Nanon/M
+Nanook/M
+nanosecond/SM
+Nansen/M
+Nantes/M
+Nantucket/M
+Naoma/M
+Naomi/M
+napalm/MDGS
+nape/SM
+Naphtali/M
+naphthalene/MS
+naphtha/SM
+Napier/M
+napkin/SM
+Naples/M
+napless
+Nap/M
+Napoleonic
+napoleon/MS
+Napoleon/MS
+napped
+napper/MS
+Nappie/M
+napping
+Nappy/M
+nappy/TRSM
+nap/SM
+Nara/M
+Narbonne/M
+narc/DGS
+narcissism/MS
+narcissistic
+narcissist/MS
+narcissus/M
+Narcissus/M
+narcoleptic
+narcoses
+narcosis/M
+narcotic/SM
+narcotization/S
+narcotize/GSD
+Nariko/M
+Nari/M
+nark's
+Narmada/M
+Narragansett/M
+narrate/VGNSDX
+narration/M
+narrative/MYS
+narratology
+narrator/SM
+narrowing/P
+narrowness/SM
+narrow/RDYTGPS
+narwhal/MS
+nary
+nasality/MS
+nasalization/MS
+nasalize/GDS
+nasal/YS
+NASA/MS
+nascence/ASM
+nascent/A
+NASDAQ
+Nash/M
+Nashua/M
+Nashville/M
+Nassau/M
+Nasser/M
+nastily
+nastiness/MS
+nasturtium/SM
+nasty/TRSP
+natal
+Natala/M
+Natalee/M
+Natale/M
+Natalia/M
+Natalie/M
+Natalina/M
+Nataline/M
+natalist
+natality/M
+Natal/M
+Natalya/M
+Nata/M
+Nataniel/M
+Natasha/M
+Natassia/M
+Natchez
+natch/S
+Nate/XMN
+Nathalia/M
+Nathalie/M
+Nathanael/M
+Nathanial/M
+Nathaniel/M
+Nathanil/M
+Nathan/MS
+nationalism/SM
+nationalistic
+nationalistically
+nationalist/MS
+nationality/MS
+nationalization/MS
+nationalize/CSDG
+nationalized/AU
+nationalizer/SM
+national/YS
+nationhood/SM
+nation/MS
+nationwide
+nativeness/M
+native/PYS
+Natividad/M
+Nativity/M
+nativity/MS
+Natka/M
+natl
+Nat/M
+NATO/SM
+natter/SGD
+nattily
+nattiness/SM
+Natty/M
+natty/TRP
+naturalism/MS
+naturalistic
+naturalist/MS
+naturalization/SM
+naturalized/U
+naturalize/GSD
+naturalness/US
+natural/PUY
+naturals
+nature/ASDCG
+nature's
+naturist
+Naugahyde/S
+naughtily
+naughtiness/SM
+naught/MS
+naughty/TPRS
+Naur/M
+Nauru/M
+nausea/SM
+nauseate/DSG
+nauseating/Y
+nauseousness/SM
+nauseous/P
+nautical/Y
+nautilus/MS
+Navaho's
+Navajoes
+Navajo/S
+naval/Y
+Navarro/M
+navel/MS
+nave/SM
+navigability/SM
+navigableness/M
+navigable/P
+navigate/DSXNG
+navigational
+navigation/M
+navigator/MS
+Navona/M
+Navratilova/M
+navvy/M
+Navy/S
+navy/SM
+nay/MS
+naysayer/S
+Nazarene/MS
+Nazareth/M
+Nazi/SM
+Nazism/S
+NB
+NBA
+NBC
+Nb/M
+NBS
+NC
+NCAA
+NCC
+NCO
+NCR
+ND
+N'Djamena
+Ndjamena/M
+Nd/M
+Ne
+NE
+Neala/M
+Neale/M
+Neall/M
+Neal/M
+Nealon/M
+Nealson/M
+Nealy/M
+Neanderthal/S
+neap/DGS
+Neapolitan/SM
+nearby
+nearly/RT
+nearness/MS
+nearside/M
+nearsightedness/S
+nearsighted/YP
+near/TYRDPSG
+neaten/DG
+neath
+neatness/MS
+neat/YRNTXPS
+Neb/M
+Nebraska/M
+Nebraskan/MS
+Nebr/M
+Nebuchadnezzar/MS
+nebulae
+nebula/M
+nebular
+nebulousness/SM
+nebulous/PY
+necessaries
+necessarily/U
+necessary/U
+necessitate/DSNGX
+necessitation/M
+necessitous
+necessity/SM
+neckband/M
+neckerchief/MS
+neck/GRDMJS
+necking/M
+necklace/DSMG
+neckline/MS
+necktie/MS
+necrology/SM
+necromancer/MS
+necromancy/MS
+necromantic
+necrophiliac/S
+necrophilia/M
+necropolis/SM
+necropsy/M
+necroses
+necrosis/M
+necrotic
+nectarine/SM
+nectarous
+nectar/SM
+nectary/MS
+Neda/M
+Nedda/M
+Neddie/M
+Neddy/M
+Nedi/M
+Ned/M
+née
+needed/U
+needer/M
+needful/YSP
+Needham/M
+neediness/MS
+needlecraft/M
+needle/GMZRSD
+needlepoint/SM
+needlessness/S
+needless/YP
+needlewoman/M
+needlewomen
+needlework/RMS
+needn't
+need/YRDGS
+needy/TPR
+Neel/M
+Neely/M
+ne'er
+nefariousness/MS
+nefarious/YP
+Nefen/M
+Nefertiti/M
+negated/U
+negater/M
+negate/XRSDVNG
+negation/M
+negativeness/SM
+negative/PDSYG
+negativism/MS
+negativity/MS
+negator/MS
+Negev/M
+neglecter/M
+neglectfulness/SM
+neglectful/YP
+neglect/SDRG
+negligee/SM
+negligence/MS
+negligent/Y
+negligibility/M
+negligible
+negligibly
+negotiability/MS
+negotiable/A
+negotiant/M
+negotiate/ASDXGN
+negotiation/MA
+negotiator/MS
+Negress/MS
+negritude/MS
+Negritude/S
+Negroes
+negroid
+Negroid/S
+Negro/M
+neg/S
+Nehemiah/M
+Nehru/M
+neighbored/U
+neighborer/M
+neighborhood/SM
+neighborlinesses
+neighborliness/UM
+neighborly/UP
+neighbor/SMRDYZGJ
+neigh/MDG
+neighs
+Neila/M
+Neile/M
+Neilla/M
+Neille/M
+Neill/M
+Neil/SM
+neither
+Nelda/M
+Nelia/M
+Nelie/M
+Nelle/M
+Nellie/M
+Nelli/M
+Nell/M
+Nelly/M
+Nelsen/M
+Nels/N
+Nelson/M
+nelson/MS
+nematic
+nematode/SM
+Nembutal/M
+nemeses
+nemesis
+Nemesis/M
+neoclassical
+neoclassicism/MS
+neoclassic/M
+neocolonialism/MS
+neocortex/M
+neodymium/MS
+Neogene
+neolithic
+Neolithic/M
+neologism/SM
+neomycin/M
+neonatal/Y
+neonate/MS
+neon/DMS
+neophyte/MS
+neoplasm/SM
+neoplastic
+neoprene/SM
+Nepalese
+Nepali/MS
+Nepal/M
+nepenthe/MS
+nephew/MS
+nephrite/SM
+nephritic
+nephritides
+nephritis/M
+nepotism/MS
+nepotist/S
+Neptune/M
+neptunium/MS
+nerd/S
+nerdy/RT
+Nereid/M
+Nerf/M
+Nerissa/M
+Nerita/M
+Nero/M
+Neron/M
+Nerta/M
+Nerte/M
+Nertie/M
+Nerti/M
+Nert/M
+Nerty/M
+Neruda/M
+nervelessness/SM
+nerveless/YP
+nerve's
+nerve/UGSD
+nerviness/SM
+nerving/M
+nervousness/SM
+nervous/PY
+nervy/TPR
+Nessa/M
+Nessie/M
+Nessi/M
+Nessy/M
+Nesta/M
+nester/M
+Nester/M
+Nestle/M
+nestler/M
+nestle/RSDG
+nestling/M
+Nestorius/M
+Nestor/M
+nest/RDGSBM
+netball/M
+nether
+Netherlander/SM
+Netherlands/M
+nethermost
+netherworld/S
+Netscape/M
+net/SM
+Netta/M
+Nettie/M
+Netti/M
+netting/M
+nett/JGRDS
+Nettle/M
+nettle/MSDG
+nettlesome
+Netty/M
+network/SJMDG
+Netzahualcoyotl/M
+Neumann/M
+neuralgia/MS
+neuralgic
+neural/Y
+neurasthenia/MS
+neurasthenic/S
+neuritic/S
+neuritides
+neuritis/M
+neuroanatomy
+neurobiology/M
+neurological/Y
+neurologist/MS
+neurology/SM
+neuromuscular
+neuronal
+neurone/S
+neuron/MS
+neuropathology/M
+neurophysiology/M
+neuropsychiatric
+neuroses
+neurosis/M
+neurosurgeon/MS
+neurosurgery/SM
+neurotically
+neurotic/S
+neurotransmitter/S
+neuter/JZGRD
+neutralise's
+neutralism/MS
+neutralist/S
+neutrality/MS
+neutralization/MS
+neutralized/U
+neutralize/GZSRD
+neutral/PYS
+neutrino/MS
+neutron/MS
+neut/ZR
+Nevada/M
+Nevadan/S
+Nevadian/S
+Neva/M
+never
+nevermore
+nevertheless
+nevi
+Nevile/M
+Neville/M
+Nevil/M
+Nevin/SM
+Nevis/M
+Nev/M
+Nevsa/M
+Nevsky/M
+nevus/M
+Newark/M
+newbie/S
+newborn/S
+Newbury/M
+Newburyport/M
+Newcastle/M
+newcomer/MS
+newed/A
+Newell/M
+newel/MS
+newer/A
+newfangled
+newfound
+newfoundland
+Newfoundlander/M
+Newfoundland/SRMZ
+newish
+newline/SM
+newlywed/MS
+Newman/M
+newness/MS
+Newport/M
+news/A
+newsagent/MS
+newsboy/SM
+newscaster/M
+newscasting/M
+newscast/SRMGZ
+newsdealer/MS
+newsed
+newses
+newsflash/S
+newsgirl/S
+newsgroup/SM
+newsing
+newsletter/SM
+NeWS/M
+newsman/M
+newsmen
+newspaperman/M
+newspapermen
+newspaper/SMGD
+newspaperwoman/M
+newspaperwomen
+newsprint/MS
+new/SPTGDRY
+newsreader/MS
+newsreel/SM
+newsroom/S
+news's
+newsstand/MS
+Newsweekly/M
+newsweekly/S
+Newsweek/MY
+newswire
+newswoman/M
+newswomen
+newsworthiness/SM
+newsworthy/RPT
+newsy/TRS
+newt/MS
+Newtonian
+Newton/M
+newton/SM
+Nexis/M
+next
+nexus/SM
+Neysa/M
+NF
+NFC
+NFL
+NFS
+Ngaliema/M
+Nguyen/M
+NH
+NHL
+niacin/SM
+Niagara/M
+Niall/M
+Nial/M
+Niamey/M
+nibbed
+nibbing
+nibbler/M
+nibble/RSDGZ
+Nibelung/M
+nib/SM
+Nicaean
+Nicaragua/M
+Nicaraguan/S
+Niccolo/M
+Nice/M
+Nicene
+niceness/MS
+nicety/MS
+nice/YTPR
+niche/SDGM
+Nicholas
+Nichole/M
+Nicholle/M
+Nichol/MS
+Nicholson/M
+nichrome
+nickelodeon/SM
+nickel/SGMD
+nicker/GD
+Nickey/M
+nick/GZRDMS
+Nickie/M
+Nicki/M
+Nicklaus/M
+Nick/M
+nicknack's
+nickname/MGDRS
+nicknamer/M
+Nickolai/M
+Nickola/MS
+Nickolaus/M
+Nicko/M
+Nicky/M
+Nicobar/M
+Nicodemus/M
+Nicolai/MS
+Nicola/MS
+Nicolea/M
+Nicole/M
+Nicolette/M
+Nicoli/MS
+Nicolina/M
+Nicoline/M
+Nicolle/M
+Nicol/M
+Nico/M
+Nicosia/M
+nicotine/MS
+Niebuhr/M
+niece/MS
+Niel/MS
+Nielsen/M
+Niels/N
+Nielson/M
+Nietzsche/M
+Nieves/M
+nifty/TRS
+Nigel/M
+Nigeria/M
+Nigerian/S
+Nigerien
+Niger/M
+niggardliness/SM
+niggardly/P
+niggard/SGMDY
+nigger/SGDM!
+niggler/M
+niggle/RSDGZJ
+niggling/Y
+nigh/RDGT
+nighs
+nightcap/SM
+nightclothes
+nightclubbed
+nightclubbing
+nightclub/MS
+nightdress/MS
+nightfall/SM
+nightgown/MS
+nighthawk/MS
+nightie/MS
+Nightingale/M
+nightingale/SM
+nightlife/MS
+nightlong
+nightmare/MS
+nightmarish/Y
+nightshade/SM
+nightshirt/MS
+night/SMYDZ
+nightspot/MS
+nightstand/SM
+nightstick/S
+nighttime/S
+nightwear/M
+nighty's
+NIH
+nihilism/MS
+nihilistic
+nihilist/MS
+Nijinsky/M
+Nikaniki/M
+Nike/M
+Niki/M
+Nikita/M
+Nikkie/M
+Nikki/M
+Nikko/M
+Nikolai/M
+Nikola/MS
+Nikolaos/M
+Nikolaus/M
+Nikolayev's
+Nikoletta/M
+Nikolia/M
+Nikolos/M
+Niko/MS
+Nikon/M
+Nile/SM
+nilled
+nilling
+Nil/MS
+nil/MYS
+nilpotent
+Nilsen/M
+Nils/N
+Nilson/M
+Nilsson/M
+Ni/M
+nimbi
+nimbleness/SM
+nimble/TRP
+nimbly
+nimbus/DM
+NIMBY
+Nimitz/M
+Nimrod/MS
+Nina/M
+nincompoop/MS
+ninefold
+nine/MS
+ninepence/M
+ninepin/S
+ninepins/M
+nineteen/SMH
+nineteenths
+ninetieths
+Ninetta/M
+Ninette/M
+ninety/MHS
+Nineveh/M
+ninja/S
+Ninnetta/M
+Ninnette/M
+ninny/SM
+Ninon/M
+Nintendo/M
+ninth
+ninths
+Niobe/M
+niobium/MS
+nipped
+nipper/DMGS
+nippiness/S
+nipping/Y
+nipple/GMSD
+Nipponese
+Nippon/M
+nippy/TPR
+nip/S
+Nirenberg/M
+nirvana/MS
+Nirvana/S
+nisei
+Nisei/MS
+Nissa/M
+Nissan/M
+Nisse/M
+Nissie/M
+Nissy/M
+Nita/M
+niter/M
+nitpick/DRSJZG
+nitrate/MGNXSD
+nitration/M
+nitric
+nitride/MGS
+nitriding/M
+nitrification/SM
+nitrite/MS
+nitrocellulose/MS
+nitrogenous
+nitrogen/SM
+nitroglycerin/MS
+nitrous
+nitwit/MS
+nit/ZSMR
+Niven/M
+nixer/M
+nix/GDSR
+Nixie/M
+Nixon/M
+NJ
+Nkrumah/M
+NLRB
+nm
+NM
+no/A
+NOAA
+Noach/M
+Noah/M
+Noak/M
+Noami/M
+Noam/M
+Nobelist/SM
+nobelium/MS
+Nobel/M
+Nobe/M
+Nobie/M
+nobility/MS
+Noble/M
+nobleman/M
+noblemen
+nobleness/SM
+noblesse/M
+noble/TPSR
+noblewoman
+noblewomen
+nob/MY
+nobody/MS
+Noby/M
+nocturnal/SY
+nocturne/SM
+nodal/Y
+nodded
+nodding
+noddle/MSDG
+noddy/M
+node/MS
+NoDoz/M
+nod/SM
+nodular
+nodule/SM
+Noelani/M
+Noella/M
+Noelle/M
+Noell/M
+Noellyn/M
+Noel/MS
+noel/S
+Noelyn/M
+Noe/M
+Noemi/M
+noes/S
+noggin/SM
+nohow
+noise/GMSD
+noiselessness/SM
+noiseless/YP
+noisemaker/M
+noisemake/ZGR
+noisily
+noisiness/MS
+noisome
+noisy/TPR
+Nola/M
+Nolana/M
+Noland/M
+Nolan/M
+Nolie/M
+Nollie/M
+Noll/M
+Nolly/M
+No/M
+nomadic
+nomad/SM
+Nome/M
+nomenclature/MS
+Nomi/M
+nominalized
+nominal/K
+nominally
+nominals
+nominate/CDSAXNG
+nomination/MAC
+nominative/SY
+nominator/CSM
+nominee/MS
+non
+nonabrasive
+nonabsorbent/S
+nonacademic/S
+nonacceptance/MS
+nonacid/MS
+nonactive
+nonadaptive
+nonaddictive
+nonadhesive
+nonadjacent
+nonadjustable
+nonadministrative
+nonage/MS
+nonagenarian/MS
+nonaggression/SM
+nonagricultural
+Nonah/M
+nonalcoholic/S
+nonaligned
+nonalignment/SM
+nonallergic
+Nona/M
+nonappearance/MS
+nonassignable
+nonathletic
+nonattendance/SM
+nonautomotive
+nonavailability/SM
+nonbasic
+nonbeliever/SM
+nonbelligerent/S
+nonblocking
+nonbreakable
+nonburnable
+nonbusiness
+noncaloric
+noncancerous
+noncarbohydrate/M
+nonce/MS
+nonchalance/SM
+nonchalant/YP
+nonchargeable
+nonclerical/S
+nonclinical
+noncollectable
+noncombatant/MS
+noncombustible/S
+noncommercial/S
+noncommissioned
+noncommittal/Y
+noncom/MS
+noncommunicable
+noncompeting
+noncompetitive
+noncompliance/MS
+noncomplying/S
+noncomprehending
+nonconducting
+nonconductor/MS
+nonconforming
+nonconformist/SM
+nonconformity/SM
+nonconsecutive
+nonconservative
+nonconstructive
+noncontagious
+noncontiguous
+noncontinuous
+noncontributing
+noncontributory
+noncontroversial
+nonconvertible
+noncooperation/SM
+noncorroding/S
+noncorrosive
+noncredit
+noncriminal/S
+noncritical
+noncrystalline
+noncumulative
+noncustodial
+noncyclic
+nondairy
+nondecreasing
+nondeductible
+nondelivery/MS
+nondemocratic
+nondenominational
+nondepartmental
+nondepreciating
+nondescript/YS
+nondestructive/Y
+nondetachable
+nondeterminacy
+nondeterminate/Y
+nondeterminism
+nondeterministic
+nondeterministically
+nondisciplinary
+nondisclosure/SM
+nondiscrimination/SM
+nondiscriminatory
+nondramatic
+nondrinker/SM
+nondrying
+nondurable
+noneconomic
+noneducational
+noneffective/S
+nonelastic
+nonelectrical
+nonelectric/S
+nonemergency
+nonempty
+nonenforceable
+nonentity/MS
+nonequivalence/M
+nonequivalent/S
+none/S
+nones/M
+nonessential/S
+nonesuch/SM
+nonetheless
+nonevent/MS
+nonexchangeable
+nonexclusive
+nonexempt
+nonexistence/MS
+nonexistent
+nonexplosive/S
+nonextensible
+nonfactual
+nonfading
+nonfat
+nonfatal
+nonfattening
+nonferrous
+nonfictional
+nonfiction/SM
+nonflammable
+nonflowering
+nonfluctuating
+nonflying
+nonfood/M
+nonfreezing
+nonfunctional
+nongovernmental
+nongranular
+nonhazardous
+nonhereditary
+nonhuman
+nonidentical
+Nonie/M
+Noni/M
+noninclusive
+nonindependent
+nonindustrial
+noninfectious
+noninflammatory
+noninflationary
+noninflected
+nonintellectual/S
+noninteracting
+noninterchangeable
+noninterference/MS
+nonintervention/SM
+nonintoxicating
+nonintuitive
+noninvasive
+nonionic
+nonirritating
+nonjudgmental
+nonjudicial
+nonlegal
+nonlethal
+nonlinearity/MS
+nonlinear/Y
+nonlinguistic
+nonliterary
+nonliving
+nonlocal
+nonmagical
+nonmagnetic
+nonmalignant
+nonmember/SM
+nonmetallic
+nonmetal/MS
+nonmigratory
+nonmilitant/S
+nonmilitary
+Nonnah/M
+Nonna/M
+nonnarcotic/S
+nonnative/S
+nonnegative
+nonnegotiable
+nonnuclear
+nonnumerical/S
+nonobjective
+nonobligatory
+nonobservance/MS
+nonobservant
+nonoccupational
+nonoccurence
+nonofficial
+nonogenarian
+nonoperational
+nonoperative
+nonorthogonal
+nonorthogonality
+nonparallel/S
+nonparametric
+nonpareil/SM
+nonparticipant/SM
+nonparticipating
+nonpartisan/S
+nonpaying
+nonpayment/SM
+nonperformance/SM
+nonperforming
+nonperishable/S
+nonperson/S
+nonperturbing
+nonphysical/Y
+nonplus/S
+nonplussed
+nonplussing
+nonpoisonous
+nonpolitical
+nonpolluting
+nonporous
+nonpracticing
+nonprejudicial
+nonprescription
+nonprocedural/Y
+nonproductive
+nonprofessional/S
+nonprofit/SB
+nonprogrammable
+nonprogrammer
+nonproliferation/SM
+nonpublic
+nonpunishable
+nonracial
+nonradioactive
+nonrandom
+nonreactive
+nonreciprocal/S
+nonreciprocating
+nonrecognition/SM
+nonrecoverable
+nonrecurring
+nonredeemable
+nonreducing
+nonrefillable
+nonrefundable
+nonreligious
+nonrenewable
+nonrepresentational
+nonresidential
+nonresident/SM
+nonresidual
+nonresistance/SM
+nonresistant/S
+nonrespondent/S
+nonresponse
+nonrestrictive
+nonreturnable/S
+nonrhythmic
+nonrigid
+nonsalaried
+nonscheduled
+nonscientific
+nonscoring
+nonseasonal
+nonsectarian
+nonsecular
+nonsegregated
+nonsense/MS
+nonsensicalness/M
+nonsensical/PY
+nonsensitive
+nonsexist
+nonsexual
+nonsingular
+nonskid
+nonslip
+nonsmoker/SM
+nonsmoking
+nonsocial
+nonspeaking
+nonspecialist/MS
+nonspecializing
+nonspecific
+nonspiritual/S
+nonstaining
+nonstandard
+nonstarter/SM
+nonstick
+nonstop
+nonstrategic
+nonstriking
+nonstructural
+nonsuccessive
+nonsupervisory
+nonsupport/GS
+nonsurgical
+nonsustaining
+nonsympathizer/M
+nontarnishable
+nontaxable/S
+nontechnical/Y
+nontenured
+nonterminal/MS
+nonterminating
+nontermination/M
+nontheatrical
+nonthinking/S
+nonthreatening
+nontoxic
+nontraditional
+nontransferable
+nontransparent
+nontrivial
+nontropical
+nonuniform
+nonunion/S
+nonuser/SM
+nonvenomous
+nonverbal/Y
+nonveteran/MS
+nonviable
+nonviolence/SM
+nonviolent/Y
+nonvirulent
+nonvocal
+nonvocational
+nonvolatile
+nonvolunteer/S
+nonvoter/MS
+nonvoting
+nonwhite/SM
+nonworking
+nonyielding
+nonzero
+noodle/GMSD
+nook/MS
+noonday/MS
+noon/GDMS
+nooning/M
+noontide/MS
+noontime/MS
+noose/SDGM
+nope/S
+NORAD/M
+noradrenalin
+noradrenaline/M
+Norah/M
+Nora/M
+Norbert/M
+Norberto/M
+Norbie/M
+Norby/M
+Nordhoff/M
+Nordic/S
+Nordstrom/M
+Norean/M
+Noreen/M
+Norene/M
+Norfolk/M
+nor/H
+Norina/M
+Norine/M
+normalcy/MS
+normality/SM
+normalization/A
+normalizations
+normalization's
+normalized/AU
+normalizes/AU
+normalize/SRDZGB
+normal/SY
+Norma/M
+Normand/M
+Normandy/M
+Norman/SM
+normativeness/M
+normative/YP
+Normie/M
+norm/SMGD
+Normy/M
+Norplant
+Norrie/M
+Norri/SM
+Norristown/M
+Norry/M
+Norse
+Norseman/M
+Norsemen
+Northampton/M
+northbound
+northeastern
+northeaster/YM
+Northeast/SM
+northeastward/S
+northeast/ZSMR
+northerly/S
+norther/MY
+Northerner/M
+northernmost
+northern/RYZS
+Northfield/M
+northing/M
+northland
+North/M
+northmen
+north/MRGZ
+Northrop/M
+Northrup/M
+norths
+Norths
+Northumberland/M
+northward/S
+northwestern
+northwester/YM
+northwest/MRZS
+Northwest/MS
+northwestward/S
+Norton/M
+Norwalk/M
+Norway/M
+Norwegian/S
+Norwich/M
+Norw/M
+nosebag/M
+nosebleed/SM
+nosecone/S
+nosedive/DSG
+nosed/V
+nosegay/MS
+nose/M
+Nosferatu/M
+nos/GDS
+nosh/MSDG
+nosily
+nosiness/MS
+nosing/M
+nostalgia/SM
+nostalgically
+nostalgic/S
+Nostradamus/M
+Nostrand/M
+nostril/SM
+nostrum/SM
+nosy/SRPMT
+notability/SM
+notableness/M
+notable/PS
+notably
+notarial
+notarization/S
+notarize/DSG
+notary/MS
+notate/VGNXSD
+notational/CY
+notation/CMSF
+notative/CF
+notch/MSDG
+not/DRGB
+notebook/MS
+note/CSDFG
+notedness/M
+noted/YP
+notepad/S
+notepaper/MS
+note's
+noteworthiness/SM
+noteworthy/P
+nothingness/SM
+nothing/PS
+noticeable/U
+noticeably
+noticeboard/S
+noticed/U
+notice/MSDG
+notifiable
+notification/M
+notifier/M
+notify/NGXSRDZ
+notional/Y
+notion/MS
+notoriety/S
+notoriousness/M
+notorious/YP
+Notre/M
+Nottingham/M
+notwithstanding
+Nouakchott/M
+nougat/MS
+Noumea/M
+noun/SMK
+nourish/DRSGL
+nourished/U
+nourisher/M
+nourishment/SM
+nous/M
+nouveau
+nouvelle
+novae
+Novak/M
+Nova/M
+nova/MS
+novelette/SM
+Novelia/M
+novelist/SM
+novelization/S
+novelize/GDS
+Novell/SM
+novella/SM
+novel/SM
+novelty/MS
+November/SM
+novena/SM
+novene
+Novgorod/M
+novice/MS
+novitiate/MS
+Nov/M
+Novocaine/M
+Novocain/S
+Novokuznetsk/M
+Novosibirsk/M
+NOW
+nowadays
+noway/S
+Nowell/M
+nowhere/S
+nowise
+now/S
+noxiousness/M
+noxious/PY
+Noyce/M
+Noyes/M
+nozzle/MS
+Np
+NP
+NRA
+nroff/M
+N's
+NS
+n's/CI
+NSF
+n/T
+NT
+nth
+nuance/SDM
+nubbin/SM
+nubby/RT
+Nubia/M
+Nubian/M
+nubile
+nub/MS
+nuclear/K
+nuclease/M
+nucleated/A
+nucleate/DSXNG
+nucleation/M
+nucleic
+nuclei/M
+nucleoli
+nucleolus/M
+nucleon/MS
+nucleotide/MS
+nucleus/M
+nuclide/M
+nude/CRS
+nudely
+nudeness/M
+nudest
+nudge/GSRD
+nudger/M
+nudism/MS
+nudist/MS
+nudity/MS
+nugatory
+Nugent/M
+nugget/SM
+nuisance/MS
+nuke/DSMG
+Nukualofa
+null/DSG
+nullification/M
+nullifier/M
+nullify/RSDXGNZ
+nullity/SM
+nu/M
+numbered/UA
+numberer/M
+numberless
+numberplate/M
+number/RDMGJ
+numbers/A
+Numbers/M
+numbing/Y
+numbness/MS
+numb/SGZTYRDP
+numbskull's
+numerable/IC
+numeracy/SI
+numeral/YMS
+numerate/SDNGX
+numerates/I
+numeration/M
+numerator/MS
+numerical/Y
+numeric/S
+numerological
+numerologist/S
+numerology/MS
+numerousness/M
+numerous/YP
+numinous/S
+numismatic/S
+numismatics/M
+numismatist/MS
+numskull/SM
+Nunavut/M
+nuncio/SM
+Nunez/M
+Nunki/M
+nun/MS
+nunnery/MS
+nuptial/S
+Nuremberg/M
+Nureyev/M
+nursemaid/MS
+nurser/M
+nurseryman/M
+nurserymen
+nursery/MS
+nurse/SRDJGMZ
+nursling/M
+nurturer/M
+nurture/SRDGZM
+nus
+nutate/NGSD
+nutation/M
+nutcracker/M
+nutcrack/RZ
+nuthatch/SM
+nutmeat/SM
+nutmegged
+nutmegging
+nutmeg/MS
+nut/MS
+nutpick/MS
+Nutrasweet/M
+nutria/SM
+nutrient/MS
+nutriment/MS
+nutritional/Y
+nutritionist/MS
+nutrition/SM
+nutritiousness/MS
+nutritious/PY
+nutritive/Y
+nutshell/MS
+nutted
+nuttiness/SM
+nutting
+nutty/TRP
+nuzzle/GZRSD
+NV
+NW
+NWT
+NY
+Nyasa/M
+NYC
+Nydia/M
+Nye/M
+Nyerere/M
+nylon/SM
+nymphet/MS
+nymph/M
+nympholepsy/M
+nymphomaniac/S
+nymphomania/MS
+nymphs
+Nyquist/M
+NYSE
+Nyssa/M
+NZ
+o
+O
+oafishness/S
+oafish/PY
+oaf/MS
+Oahu/M
+Oakland/M
+Oakley/M
+Oakmont/M
+oak/SMN
+oakum/MS
+oakwood
+oar/GSMD
+oarlock/MS
+oarsman/M
+oarsmen
+oarswoman
+oarswomen
+OAS
+oases
+oasis/M
+oatcake/MS
+oater/M
+Oates/M
+oath/M
+oaths
+oatmeal/SM
+oat/SMNR
+Oaxaca/M
+ob
+OB
+Obadiah/M
+Obadias/M
+obbligato/S
+obduracy/S
+obdurateness/S
+obdurate/PDSYG
+Obediah/M
+obedience/EMS
+obedient/EY
+Obed/M
+obeisance/MS
+obeisant/Y
+obelisk/SM
+Oberlin/M
+Oberon/M
+obese
+obesity/MS
+obey/EDRGS
+obeyer/EM
+obfuscate/SRDXGN
+obfuscation/M
+obfuscatory
+Obidiah/M
+Obie/M
+obi/MDGS
+obit/SMR
+obituary/SM
+obj
+objectify/GSDXN
+objectionableness/M
+objectionable/U
+objectionably
+objection/SMB
+objectiveness/MS
+objective/PYS
+objectivity/MS
+objector/SM
+object/SGVMD
+objurgate/GNSDX
+objurgation/M
+oblate/NYPSX
+oblation/M
+obligate/NGSDXY
+obligational
+obligation/M
+obligatorily
+obligatory
+obliged/E
+obliger/M
+obliges/E
+oblige/SRDG
+obligingness/M
+obliging/PY
+oblique/DSYGP
+obliqueness/S
+obliquity/MS
+obliterate/VNGSDX
+obliteration/M
+obliterative/Y
+oblivion/MS
+obliviousness/MS
+oblivious/YP
+oblongness/M
+oblong/SYP
+obloquies
+obloquy/M
+Ob/MD
+obnoxiousness/MS
+obnoxious/YP
+oboe/SM
+oboist/S
+obos
+O'Brien/M
+obs
+obscene/RYT
+obscenity/MS
+obscurantism/MS
+obscurantist/MS
+obscuration
+obscureness/M
+obscure/YTPDSRGL
+obscurity/MS
+obsequies
+obsequiousness/S
+obsequious/YP
+obsequy
+observability/M
+observable/SU
+observably
+observance/MS
+observantly
+observants
+observant/U
+observational/Y
+observation/MS
+observatory/MS
+observed/U
+observer/M
+observe/ZGDSRB
+observing/Y
+obsess/GVDS
+obsessional
+obsession/MS
+obsessiveness/S
+obsessive/PYS
+obsidian/SM
+obsolesce/GSD
+obsolescence/S
+obsolescent/Y
+obsolete/GPDSY
+obsoleteness/M
+obstacle/SM
+obstetrical
+obstetrician/SM
+obstetric/S
+obstetrics/M
+obstinacy/SM
+obstinateness/M
+obstinate/PY
+obstreperousness/SM
+obstreperous/PY
+obstructed/U
+obstructer/M
+obstructionism/SM
+obstructionist/MS
+obstruction/SM
+obstructiveness/MS
+obstructive/PSY
+obstruct/RDVGS
+obtainable/U
+obtainably
+obtain/LSGDRB
+obtainment/S
+obtrude/DSRG
+obtruder/M
+obtrusion/S
+obtrusiveness/MSU
+obtrusive/UPY
+obtuseness/S
+obtuse/PRTY
+obverse/YS
+obviate/XGNDS
+obviousness/SM
+obvious/YP
+Oby/M
+ocarina/MS
+O'Casey
+Occam/M
+occasional/Y
+occasion/MDSJG
+Occidental/S
+occidental/SY
+occident/M
+Occident/SM
+occipital/Y
+occlude/GSD
+occlusion/MS
+occlusive/S
+occulter/M
+occultism/SM
+occult/SRDYG
+occupancy/SM
+occupant/MS
+occupational/Y
+occupation/SAM
+occupied/AU
+occupier/M
+occupies/A
+occupy/RSDZG
+occur/AS
+occurred/A
+occurrence/SM
+occurring/A
+oceanfront/MS
+oceangoing
+Oceania/M
+oceanic
+ocean/MS
+oceanographer/SM
+oceanographic
+oceanography/SM
+oceanology/MS
+oceanside
+Oceanside/M
+Oceanus/M
+ocelot/SM
+ocher/DMGS
+Ochoa/M
+o'clock
+O'Clock
+O'Connell/M
+O'Connor/M
+Oconomowoc/M
+OCR
+octagonal/Y
+octagon/SM
+octahedral
+octahedron/M
+octal/S
+octane/MS
+octant/M
+octave/MS
+Octavia/M
+Octavian/M
+Octavio/M
+Octavius/M
+octavo/MS
+octennial
+octet/SM
+octile
+octillion/M
+Oct/M
+October/MS
+octogenarian/MS
+octopus/SM
+octoroon/M
+ocular/S
+oculist/SM
+OD
+odalisque/SM
+oddball/SM
+oddity/MS
+oddment/MS
+oddness/MS
+odd/TRYSPL
+Odele/M
+Odelia/M
+Odelinda/M
+Odella/M
+Odelle/M
+Odell/M
+O'Dell/M
+ode/MDRS
+Ode/MR
+Oderberg/MS
+Oder/M
+Odessa/M
+Odets/M
+Odetta/M
+Odette/M
+Odey/M
+Odie/M
+Odilia/M
+Odille/M
+Odin/M
+odiousness/MS
+odious/PY
+Odis/M
+odium/MS
+Odo/M
+odometer/SM
+Odom/M
+O'Donnell/M
+odor/DMS
+odoriferous
+odorless
+odorous/YP
+ODs
+O'Dwyer/M
+Ody/M
+Odysseus/M
+Odyssey/M
+odyssey/S
+OE
+OED
+oedipal
+Oedipal/Y
+Oedipus/M
+OEM/M
+OEMS
+oenology/MS
+oenophile/S
+o'er
+O'Er
+Oersted/M
+oesophagi
+oeuvre/SM
+Ofelia/M
+Ofella/M
+offal/MS
+offbeat/MS
+offcuts
+Offenbach/M
+offender/M
+offend/SZGDR
+offense/MSV
+offensively/I
+offensiveness/MSI
+offensive/YSP
+offerer/M
+offering/M
+offer/RDJGZ
+offertory/SM
+offhand/D
+offhandedness/S
+offhanded/YP
+officeholder/SM
+officemate/S
+officer/GMD
+officership/S
+office/SRMZ
+officialdom/SM
+officialism/SM
+officially/U
+official/PSYM
+officiant/SM
+officiate/XSDNG
+officiation/M
+officiator/MS
+officio
+officiousness/MS
+officious/YP
+offing/M
+offish
+offload/GDS
+offprint/GSDM
+offramp
+offset/SM
+offsetting
+offshoot/MS
+offshore
+offside/RS
+offspring/M
+offstage/S
+off/SZGDRJ
+offtrack
+Ofilia/M
+of/K
+often/RT
+oftentimes
+oft/NRT
+ofttimes
+Ogbomosho/M
+Ogdan/M
+Ogden/M
+Ogdon/M
+Ogilvy/M
+ogive/M
+Oglethorpe/M
+ogle/ZGDSR
+ogreish
+ogre/MS
+ogress/S
+oh
+OH
+O'Hara
+O'Hare/M
+O'Higgins
+Ohioan/S
+Ohio/M
+ohmic
+ohmmeter/MS
+ohm/SM
+oho/S
+ohs
+OHSA/M
+oilcloth/M
+oilcloths
+oiler/M
+oilfield/MS
+oiliness/SM
+oilman/M
+oil/MDRSZG
+oilmen
+oilseed/SM
+oilskin/MS
+oily/TPR
+oink/GDS
+ointment/SM
+Oise/M
+OJ
+Ojibwa/SM
+Okamoto/M
+okapi/SM
+Okayama/M
+okay/M
+Okeechobee/M
+O'Keeffe
+Okefenokee
+Okhotsk/M
+Okinawa/M
+Okinawan/S
+Oklahoma/M
+Oklahoman/SM
+Okla/M
+OK/MDG
+okra/MS
+OKs
+Oktoberfest
+Olaf/M
+Olag/M
+Ola/M
+Olav/M
+Oldenburg/M
+olden/DG
+Oldfield/M
+oldie/MS
+oldish
+oldness/S
+Oldsmobile/M
+oldster/SM
+Olduvai/M
+old/XTNRPS
+olé
+oleaginous
+oleander/SM
+O'Leary/M
+olefin/M
+Oleg/M
+Ole/MV
+Olenek/M
+Olenka/M
+Olen/M
+Olenolin/M
+oleomargarine/SM
+oleo/S
+oles
+olfactory
+Olga/M
+Olia/M
+oligarchic
+oligarchical
+oligarch/M
+oligarchs
+oligarchy/SM
+Oligocene
+oligopolistic
+oligopoly/MS
+Olimpia/M
+Olin/M
+olive/MSR
+Olive/MZR
+Oliver/M
+Olivero/M
+Olivette/M
+Olivetti/M
+Olivia/M
+Olivier/M
+Olivie/RM
+Oliviero/M
+Oliy/M
+Ollie/M
+Olly/M
+Olmec
+Olmsted/M
+Olsen/M
+Olson/M
+Olva/M
+Olvan/M
+Olwen/M
+Olympe/M
+Olympiad/MS
+Olympian/S
+Olympia/SM
+Olympic/S
+Olympie/M
+Olympus/M
+Omaha/SM
+Oman/M
+Omar/M
+ombudsman/M
+ombudsmen
+Omdurman/M
+omega/MS
+omelet/SM
+omelette's
+omen/DMG
+Omero/M
+omicron/MS
+ominousness/SM
+ominous/YP
+omission/MS
+omit/S
+omitted
+omitting
+omnibus/MS
+omni/M
+omnipotence/SM
+Omnipotent
+omnipotent/SY
+omnipresence/MS
+omnipresent/Y
+omniscience/SM
+omniscient/YS
+omnivore/MS
+omnivorousness/MS
+omnivorous/PY
+oms
+Omsk/M
+om/XN
+ON
+onanism/M
+Onassis/M
+oncer/M
+once/SR
+oncogene/S
+oncologist/S
+oncology/SM
+oncoming/S
+Ondrea/M
+Oneal/M
+Onega/M
+Onegin/M
+Oneida/SM
+O'Neil
+O'Neill
+oneness/MS
+one/NPMSX
+oner/M
+onerousness/SM
+onerous/YP
+oneself
+onetime
+oneupmanship
+Onfre/M
+Onfroi/M
+ongoing/S
+Onida/M
+onion/GDM
+onionskin/MS
+onlooker/MS
+onlooking
+only/TP
+Onofredo/M
+Ono/M
+onomatopoeia/SM
+onomatopoeic
+onomatopoetic
+Onondaga/MS
+onrush/GMS
+on/RY
+ons
+Onsager/M
+onset/SM
+onsetting
+onshore
+onside
+onslaught/MS
+Ontarian/S
+Ontario/M
+Ont/M
+onto
+ontogeny/SM
+ontological/Y
+ontology/SM
+onus/SM
+onward/S
+onyx/MS
+oodles
+ooh/GD
+oohs
+oolitic
+Oona/M
+OOo/M
+oops/S
+Oort/M
+ooze/GDS
+oozy/RT
+opacity/SM
+opalescence/S
+opalescent/Y
+Opalina/M
+Opaline/M
+Opal/M
+opal/SM
+opaque/GTPYRSD
+opaqueness/SM
+opcode/MS
+OPEC
+Opel/M
+opencast
+opened/AU
+opener/M
+openhandedness/SM
+openhanded/P
+openhearted
+opening/M
+openness/S
+OpenOffice.org/M
+opens/A
+openwork/MS
+open/YRDJGZTP
+operable/I
+operandi
+operand/SM
+operant/YS
+opera/SM
+operate/XNGVDS
+operatically
+operatic/S
+operationalization/S
+operationalize/D
+operational/Y
+operation/M
+operative/IP
+operatively
+operativeness/MI
+operatives
+operator/SM
+operetta/MS
+ope/S
+Ophelia/M
+Ophelie/M
+Ophiuchus/M
+ophthalmic/S
+ophthalmologist/SM
+ophthalmology/MS
+opiate/GMSD
+opine/XGNSD
+opinionatedness/M
+opinionated/PY
+opinion/M
+opioid
+opium/MS
+opossum/SM
+opp
+Oppenheimer/M
+opponent/MS
+opportune/IY
+opportunism/SM
+opportunistic
+opportunistically
+opportunist/SM
+opportunity/MS
+oppose/BRSDG
+opposed/U
+opposer/M
+oppositeness/M
+opposite/SXYNP
+oppositional
+opposition/M
+oppress/DSGV
+oppression/MS
+oppressiveness/MS
+oppressive/YP
+oppressor/MS
+opprobrious/Y
+opprobrium/SM
+Oprah/M
+ops
+opt/DSG
+opthalmic
+opthalmologic
+opthalmology
+optical/Y
+optician/SM
+optic/S
+optics/M
+optima
+optimality
+optimal/Y
+optimise's
+optimism/SM
+optimistic
+optimistically
+optimist/SM
+optimization/SM
+optimize/DRSZG
+optimized/U
+optimizer/M
+optimizes/U
+optimum/SM
+optionality/M
+optional/YS
+option/GDMS
+optoelectronic
+optometric
+optometrist/MS
+optometry/SM
+opulence/SM
+opulent/Y
+opus/SM
+op/XGDN
+OR
+oracle/GMSD
+oracular
+Oralee/M
+Oralia/M
+Oralie/M
+Oralla/M
+Oralle/M
+oral/YS
+Ora/M
+orangeade/MS
+Orange/M
+orange/MS
+orangery/SM
+orangutan/MS
+Oranjestad/M
+Oran/M
+orate/SDGNX
+oration/M
+oratorical/Y
+oratorio/MS
+orator/MS
+oratory/MS
+Orazio/M
+Orbadiah/M
+orbicular
+orbiculares
+orbital/MYS
+orbit/MRDGZS
+orb/SMDG
+orchard/SM
+orchestral/Y
+orchestra/MS
+orchestrate/GNSDX
+orchestrater's
+orchestration/M
+orchestrator/M
+orchid/SM
+ordainer/M
+ordainment/MS
+ordain/SGLDR
+ordeal/SM
+order/AESGD
+ordered/U
+orderer
+ordering/S
+orderless
+orderliness/SE
+orderly/PS
+order's/E
+ordinal/S
+ordinance/MS
+ordinarily
+ordinariness/S
+ordinary/RSPT
+ordinated
+ordinate/I
+ordinates
+ordinate's
+ordinating
+ordination/SM
+ordnance/SM
+Ordovician
+ordure/MS
+oregano/SM
+Oreg/M
+Oregonian/S
+Oregon/M
+Orelee/M
+Orelia/M
+Orelie/M
+Orella/M
+Orelle/M
+Orel/M
+Oren/M
+Ore/NM
+ore/NSM
+Oreo
+Orestes
+organdie's
+organdy/MS
+organelle/MS
+organically/I
+organic/S
+organismic
+organism/MS
+organist/MS
+organizable/UMS
+organizational/MYS
+organization/MEAS
+organize/AGZDRS
+organized/UE
+organizer/MA
+organizes/E
+organizing/E
+organ/MS
+organometallic
+organza/SM
+orgasm/GSMD
+orgasmic
+orgiastic
+orgy/SM
+Oriana/M
+oriel/MS
+orientable
+Oriental/S
+oriental/SY
+orientated/A
+orientate/ESDXGN
+orientates/A
+orientation/AMES
+orienteering/M
+orienter
+orient/GADES
+orient's
+Orient/SM
+orifice/MS
+orig
+origami/MS
+originality/SM
+originally
+original/US
+originate/VGNXSD
+origination/M
+originative/Y
+originator/SM
+origin/MS
+Orin/M
+Orinoco/M
+oriole/SM
+Orion/M
+orison/SM
+Oriya/M
+Orizaba/M
+Orkney/M
+Orland/M
+Orlando/M
+Orlan/M
+Orleans
+Orlick/M
+Orlon/SM
+Orly/M
+ormolu/SM
+or/MY
+ornamental/SY
+ornamentation/SM
+ornament/GSDM
+ornateness/SM
+ornate/YP
+orneriness/SM
+ornery/PRT
+ornithological
+ornithologist/SM
+ornithology/MS
+orographic/M
+orography/M
+Orono/M
+orotund
+orotundity/MS
+orphanage/MS
+orphanhood/M
+orphan/SGDM
+Orpheus/M
+Orphic
+Orran/M
+Orren/M
+Orrin/M
+orris/SM
+Orr/MN
+ors
+Orsa/M
+Orsola/M
+Orson/M
+Ortega/M
+Ortensia/M
+orthodontia/S
+orthodontic/S
+orthodontics/M
+orthodontist/MS
+orthodoxies
+orthodoxly/U
+Orthodox/S
+orthodoxy's
+orthodox/YS
+orthodoxy/U
+orthogonality/M
+orthogonalization/M
+orthogonalized
+orthogonal/Y
+orthographic
+orthographically
+orthography/MS
+orthonormal
+orthopedic/S
+orthopedics/M
+orthopedist/SM
+orthophosphate/MS
+orthorhombic
+Ortiz/M
+Orton/M
+Orval/M
+Orville/M
+Orv/M
+Orwellian
+Orwell/M
+o's
+Osage/SM
+Osaka/M
+Osbert/M
+Osborne/M
+Osborn/M
+Osbourne/M
+Osbourn/M
+Oscar/SM
+Osceola/M
+oscillate/SDXNG
+oscillation/M
+oscillator/SM
+oscillatory
+oscilloscope/SM
+osculate/XDSNG
+osculation/M
+Osgood/M
+OSHA
+Oshawa/M
+O'Shea/M
+Oshkosh/M
+osier/MS
+Osiris/M
+Oslo/M
+Os/M
+OS/M
+Osman/M
+osmium/MS
+Osmond/M
+osmoses
+osmosis/M
+osmotic
+Osmund/M
+osprey/SM
+osseous/Y
+Ossie/M
+ossification/M
+ossify/NGSDX
+ostensible
+ostensibly
+ostentation/MS
+ostentatiousness/M
+ostentatious/PY
+osteoarthritides
+osteoarthritis/M
+osteology/M
+osteopathic
+osteopath/M
+osteopaths
+osteopathy/MS
+osteoporoses
+osteoporosis/M
+ostracise's
+ostracism/MS
+ostracize/GSD
+Ostrander/M
+ostrich/MS
+Ostrogoth/M
+Ostwald/M
+O'Sullivan/M
+Osvaldo/M
+Oswald/M
+Oswell/M
+OT
+OTB
+OTC
+Otes
+Otha/M
+Othelia/M
+Othella/M
+Othello/M
+otherness/M
+other/SMP
+otherwise
+otherworldly/P
+otherworld/Y
+Othilia/M
+Othilie/M
+Otho/M
+otiose
+Otis/M
+OTOH
+Ottawa/MS
+otter/DMGS
+Ottilie/M
+Otto/M
+Ottoman
+ottoman/MS
+Ouagadougou/M
+oubliette/SM
+ouch/SDG
+oughtn't
+ought/SGD
+Ouija/MS
+ounce/MS
+our/S
+ourself
+ourselves
+ouster/M
+oust/RDGZS
+outage/MS
+outargue/GDS
+outback/MRS
+outbalance/GDS
+outbidding
+outbid/S
+outboard/S
+outboast/GSD
+outbound/S
+outbreak/SMG
+outbroke
+outbroken
+outbuilding/SM
+outburst/MGS
+outcast/GSM
+outclass/SDG
+outcome/SM
+outcropped
+outcropping/S
+outcrop/SM
+outcry/MSDG
+outdated/P
+outdid
+outdistance/GSD
+outdoes
+outdo/G
+outdone
+outdoor/S
+outdoorsy
+outdraw/GS
+outdrawn
+outdrew
+outermost
+outerwear/M
+outface/SDG
+outfall/MS
+outfielder/M
+outfield/RMSZ
+outfight/SG
+outfit/MS
+outfitted
+outfitter/MS
+outfitting
+outflank/SGD
+outflow/SMDG
+outfought
+outfox/GSD
+outgeneraled
+outgoes
+outgo/GJ
+outgoing/P
+outgrew
+outgrip
+outgrow/GSH
+outgrown
+outgrowth/M
+outgrowths
+outguess/SDG
+outhit/S
+outhitting
+outhouse/SM
+outing/M
+outlaid
+outlander/M
+outlandishness/MS
+outlandish/PY
+outland/ZR
+outlast/GSD
+outlawry/M
+outlaw/SDMG
+outlay/GSM
+outlet/SM
+outliers
+outline/SDGM
+outlive/GSD
+outlook/MDGS
+outlying
+outmaneuver/GSD
+outmatch/SDG
+outmigration
+outmoded
+outness/M
+outnumber/GDS
+outpaced
+outpatient/SM
+outperform/DGS
+out/PJZGSDR
+outplacement/S
+outplay/GDS
+outpoint/GDS
+outpost/SM
+outpouring/M
+outpour/MJG
+outproduce/GSD
+output/SM
+outputted
+outputting
+outrace/GSD
+outrage/GSDM
+outrageousness/M
+outrageous/YP
+outran
+outrank/GSD
+outré
+outreach/SDG
+outrider/MS
+outrigger/SM
+outright/Y
+outrunning
+outrun/S
+outscore/GDS
+outsell/GS
+outset/MS
+outsetting
+outshine/SG
+outshone
+outshout/GDS
+outsider/PM
+outside/ZSR
+outsize/S
+outskirt/SM
+outsmart/SDG
+outsold
+outsource/SDJG
+outspend/SG
+outspent
+outspoke
+outspokenness/SM
+outspoken/YP
+outspread/SG
+outstanding/Y
+outstate/NX
+outstation/M
+outstay/SDG
+outstretch/GSD
+outstripped
+outstripping
+outstrip/S
+outtake/S
+outvote/GSD
+outwardness/M
+outward/SYP
+outwear/SG
+outweigh/GD
+outweighs
+outwit/S
+outwitted
+outwitting
+outwore
+outwork/SMDG
+outworn
+ouzo/SM
+oval/MYPS
+ovalness/M
+ova/M
+ovarian
+ovary/SM
+ovate/SDGNX
+ovation/GMD
+ovenbird/SM
+oven/MS
+overabundance/MS
+overabundant
+overachieve/SRDGZ
+overact/DGVS
+overage/S
+overaggressive
+overallocation
+overall/SM
+overambitious
+overanxious
+overarching
+overarm/GSD
+overate
+overattentive
+overawe/GDS
+overbalance/DSG
+overbear/GS
+overbearingness/M
+overbearing/YP
+overbidding
+overbid/S
+overbite/MS
+overblown
+overboard
+overbold
+overbook/SDG
+overbore
+overborne
+overbought
+overbuild/GS
+overbuilt
+overburdening/Y
+overburden/SDG
+overbuy/GS
+overcame
+overcapacity/M
+overcapitalize/DSG
+overcareful
+overcast/GS
+overcasting/M
+overcautious
+overcerebral
+overcharge/DSG
+overcloud/DSG
+overcoating/M
+overcoat/SMG
+overcomer/M
+overcome/RSG
+overcommitment/S
+overcompensate/XGNDS
+overcompensation/M
+overcomplexity/M
+overcomplicated
+overconfidence/MS
+overconfident/Y
+overconscientious
+overconsumption/M
+overcook/SDG
+overcooled
+overcorrection
+overcritical
+overcrowd/DGS
+overcurious
+overdecorate/SDG
+overdependent
+overdetermined
+overdevelop/SDG
+overdid
+overdoes
+overdo/G
+overdone
+overdose/DSMG
+overdraft/SM
+overdraw/GS
+overdrawn
+overdress/GDS
+overdrew
+overdrive/GSM
+overdriven
+overdrove
+overdubbed
+overdubbing
+overdub/S
+overdue
+overeagerness/M
+overeager/PY
+overeater/M
+overeat/GNRS
+overeducated
+overemotional
+overemphases
+overemphasis/M
+overemphasize/GZDSR
+overenthusiastic
+overestimate/DSXGN
+overestimation/M
+overexcite/DSG
+overexercise/SDG
+overexert/GDS
+overexertion/SM
+overexploitation
+overexploited
+overexpose/GDS
+overexposure/SM
+overextend/DSG
+overextension
+overfall/M
+overfed
+overfeed/GS
+overfill/GDS
+overfishing
+overflew
+overflight/SM
+overflow/DGS
+overflown
+overfly/GS
+overfond
+overfull
+overgeneralize/GDS
+overgenerous
+overgraze/SDG
+overgrew
+overground
+overgrow/GSH
+overgrown
+overgrowth/M
+overgrowths
+overhand/DGS
+overhang/GS
+overhasty
+overhaul/GRDJS
+overhead/S
+overheard
+overhearer/M
+overhear/SRG
+overheat/SGD
+overhung
+overincredulous
+overindulgence/SM
+overindulgent
+overindulge/SDG
+overinflated
+overjoy/SGD
+overkill/SDMG
+overladed
+overladen
+overlaid
+overlain
+overland/S
+overlap/MS
+overlapped
+overlapping
+overlarge
+overlay/GS
+overleaf
+overlie
+overload/SDG
+overlong
+overlook/DSG
+overlord/DMSG
+overloud
+overly/GRS
+overmanning
+overmaster/GSD
+overmatching
+overmodest
+overmuch/S
+overnice
+overnight/SDRGZ
+overoptimism/SM
+overoptimistic
+overpaid
+overparticular
+overpass/GMSD
+overpay/LSG
+overpayment/M
+overplay/SGD
+overpopulate/DSNGX
+overpopulation/M
+overpopulous
+overpower/GSD
+overpowering/Y
+overpraise/DSG
+overprecise
+overpressure
+overprice/SDG
+overprint/DGS
+overproduce/SDG
+overproduction/S
+overprotect/GVDS
+overprotection/M
+overqualified
+overran
+overrate/DSG
+overreach/DSRG
+overreaction/SM
+overreact/SGD
+overred
+overrefined
+overrepresented
+overridden
+overrider/M
+override/RSG
+overripe
+overrode
+overrule/GDS
+overrunning
+overrun/S
+oversample/DG
+oversaturate
+oversaw
+oversea/S
+overseeing
+overseen
+overseer/M
+oversee/ZRS
+oversell/SG
+oversensitiveness/S
+oversensitive/P
+oversensitivity
+oversexed
+overshadow/GSD
+overshoe/SM
+overshoot/SG
+overshot/S
+oversight/SM
+oversimple
+oversimplification/M
+oversimplify/GXNDS
+oversize/GS
+oversleep/GS
+overslept
+oversoftness/M
+oversoft/P
+oversold
+overspecialization/MS
+overspecialize/GSD
+overspend/SG
+overspent
+overspill/DMSG
+overspread/SG
+overstaffed
+overstatement/SM
+overstate/SDLG
+overstay/GSD
+overstepped
+overstepping
+overstep/S
+overstimulate/DSG
+overstock/SGD
+overstraining
+overstressed
+overstretch/D
+overstrict
+overstrike/GS
+overstrung
+overstuffed
+oversubscribe/SDG
+oversubtle
+oversupply/MDSG
+oversuspicious
+overtaken
+overtake/RSZG
+overtax/DSG
+overthrew
+overthrow/GS
+overthrown
+overtightened
+overtime/MGDS
+overtire/DSG
+overtone/MS
+overtook
+overt/PY
+overture/DSMG
+overturn/SDG
+overuse/DSG
+overvalue/GSD
+overview/MS
+overweening
+overweight/GSD
+overwhelm/GDS
+overwhelming/Y
+overwinter/SDG
+overwork/GSD
+overwrap
+overwrite/SG
+overwritten
+overwrote
+overwrought
+over/YGS
+overzealousness/M
+overzealous/P
+Ovid/M
+oviduct/SM
+oviform
+oviparous
+ovoid/S
+ovular
+ovulate/GNXDS
+ovulatory
+ovule/MS
+ovum/MS
+ow/DYG
+Owen/MS
+owe/S
+owlet/SM
+owl/GSMDR
+owlishness/M
+owlish/PY
+owned/U
+own/EGDS
+ownership/MS
+owner/SM
+oxalate/M
+oxalic
+oxaloacetic
+oxblood/S
+oxbow/SM
+oxcart/MS
+oxen/M
+oxford/MS
+Oxford/MS
+oxidant/SM
+oxidate/NVX
+oxidation/M
+oxidative/Y
+oxide/SM
+oxidization/MS
+oxidized/U
+oxidize/JDRSGZ
+oxidizer/M
+oxidizes/A
+ox/MNS
+Oxnard
+Oxonian
+oxtail/M
+Oxus/M
+oxyacetylene/MS
+oxygenate/XSDMGN
+oxygenation/M
+oxygen/MS
+oxyhydroxides
+oxymora
+oxymoron/M
+oyster/GSDM
+oystering/M
+oz
+Ozark/SM
+Oz/M
+ozone/SM
+Ozymandias/M
+Ozzie/M
+Ozzy/M
+P
+PA
+Pablo/M
+Pablum/M
+pablum/S
+Pabst/M
+pabulum/SM
+PAC
+pace/DRSMZG
+Pace/M
+pacemaker/SM
+pacer/M
+pacesetter/MS
+pacesetting
+Pacheco/M
+pachyderm/MS
+pachysandra/MS
+pacific
+pacifically
+pacification/M
+Pacific/M
+pacifier/M
+pacifism/MS
+pacifistic
+pacifist/MS
+pacify/NRSDGXZ
+package/ARSDG
+packaged/U
+packager/S
+package's
+packages/U
+packaging/SM
+Packard/SM
+packed/AU
+packer/MUS
+packet/MSDG
+pack/GZSJDRMB
+packhorse/M
+packinghouse/S
+packing/M
+packsaddle/SM
+Packston/M
+packs/UA
+Packwood/M
+Paco/M
+Pacorro/M
+pact/SM
+Padang/M
+padded/U
+Paddie/M
+padding/SM
+paddle/MZGRSD
+paddler/M
+paddock/SDMG
+Paddy/M
+paddy/SM
+Padget/M
+Padgett/M
+Padilla/M
+padlock/SGDM
+pad/MS
+Padraic/M
+Padraig/M
+padre/MS
+Padrewski/M
+Padriac/M
+paean/MS
+paediatrician/MS
+paediatrics/M
+paedophilia's
+paella/SM
+paeony/M
+Paganini/M
+paganism/MS
+pagan/SM
+pageantry/SM
+pageant/SM
+pageboy/SM
+paged/U
+pageful
+Page/M
+page/MZGDRS
+pager/M
+paginate/DSNGX
+Paglia/M
+pagoda/MS
+Pahlavi/M
+paid/AU
+Paige/M
+pailful/SM
+Pail/M
+pail/SM
+Paine/M
+painfuller
+painfullest
+painfulness/MS
+painful/YP
+pain/GSDM
+painkiller/MS
+painkilling
+painlessness/S
+painless/YP
+painstaking/SY
+paint/ADRZGS
+paintbox/M
+paintbrush/SM
+painted/U
+painterly/P
+painter/YM
+painting/SM
+paint's
+paintwork
+paired/UA
+pair/JSDMG
+pairs/A
+pairwise
+paisley/MS
+pajama/MDS
+Pakistani/S
+Pakistan/M
+palace/MS
+paladin/MS
+palaeolithic
+palaeontologists
+palaeontology/M
+palanquin/MS
+palatability/M
+palatableness/M
+palatable/P
+palatalization/MS
+palatalize/SDG
+palatal/YS
+palate/BMS
+palatial/Y
+palatinate/SM
+Palatine
+palatine/S
+palaver/GSDM
+paleface/SM
+Palembang/M
+paleness/S
+Paleocene
+Paleogene
+paleographer/SM
+paleography/SM
+paleolithic
+Paleolithic
+paleontologist/S
+paleontology/MS
+Paleozoic
+Palermo/M
+pale/SPY
+Palestine/M
+Palestinian/S
+Palestrina/M
+palette/MS
+Paley/M
+palfrey/MS
+palimony/S
+palimpsest/MS
+palindrome/MS
+palindromic
+paling/M
+palisade/MGSD
+Palisades/M
+palish
+Palladio/M
+palladium/SM
+pallbearer/SM
+palletized
+pallet/SMGD
+pall/GSMD
+palliate/SDVNGX
+palliation/M
+palliative/SY
+pallidness/MS
+pallid/PY
+Pall/M
+pallor/MS
+palmate
+palmer/M
+Palmer/M
+Palmerston/M
+palmetto/MS
+palm/GSMDR
+palmist/MS
+palmistry/MS
+Palm/MR
+Palmolive/M
+palmtop/S
+Palmyra/M
+palmy/RT
+Palo/M
+Paloma/M
+Palomar/M
+palomino/MS
+palpable
+palpably
+palpate/SDNGX
+palpation/M
+palpitate/NGXSD
+palpitation/M
+pal/SJMDRYTG
+palsy/GSDM
+paltriness/SM
+paltry/TRP
+paludal
+Pa/M
+Pamela/M
+Pamelina/M
+Pamella/M
+pa/MH
+Pamirs
+Pam/M
+Pammie/M
+Pammi/M
+Pammy/M
+pampas/M
+pamperer/M
+pamper/RDSG
+Pampers
+pamphleteer/DMSG
+pamphlet/SM
+panacea/MS
+panache/MS
+Panama/MS
+Panamanian/S
+panama/S
+pancake/MGSD
+Panchito/M
+Pancho/M
+panchromatic
+pancreas/MS
+pancreatic
+panda/SM
+pandemic/S
+pandemonium/SM
+pander/ZGRDS
+Pandora/M
+panegyric/SM
+pane/KMS
+paneling/M
+panelist/MS
+panelization
+panelized
+panel/JSGDM
+Pangaea/M
+pang/GDMS
+pangolin/M
+panhandle/RSDGMZ
+panicked
+panicking
+panicky/RT
+panic/SM
+panier's
+panjandrum/M
+Pankhurst/M
+Pan/M
+Panmunjom/M
+panned
+pannier/SM
+panning
+panoply/MSD
+panorama/MS
+panoramic
+panpipes
+Pansie/M
+pan/SMD
+Pansy/M
+pansy/SM
+Pantagruel/M
+Pantaloon/M
+pantaloons
+pant/GDS
+pantheism/MS
+pantheistic
+pantheist/S
+pantheon/MS
+panther/SM
+pantie/SM
+pantiled
+pantograph/M
+pantomime/SDGM
+pantomimic
+pantomimist/SM
+pantry/SM
+pantsuit/SM
+pantyhose
+pantyliner
+pantywaist/SM
+Panza/M
+Paola/M
+Paoli/M
+Paolina/M
+Paolo/M
+papacy/SM
+Papagena/M
+Papageno/M
+papal/Y
+papa/MS
+paparazzi
+papaw/SM
+papaya/MS
+paperback/GDMS
+paperboard/MS
+paperboy/SM
+paperer/M
+papergirl/SM
+paper/GJMRDZ
+paperhanger/SM
+paperhanging/SM
+paperiness/M
+paperless
+paperweight/MS
+paperwork/SM
+papery/P
+papillae
+papilla/M
+papillary
+papist/MS
+papoose/SM
+Pappas/M
+papped
+papping
+pappy/RST
+paprika/MS
+pap/SZMNR
+papyri
+papyrus/M
+Paquito/M
+parable/MGSD
+parabola/MS
+parabolic
+paraboloidal/M
+paraboloid/MS
+Paracelsus/M
+paracetamol/M
+parachuter/M
+parachute/RSDMG
+parachutist/MS
+Paraclete/M
+parader/M
+parade/RSDMZG
+paradigmatic
+paradigm/SM
+paradisaic
+paradisaical
+Paradise/M
+paradise/MS
+paradoxic
+paradoxicalness/M
+paradoxical/YP
+paradox/MS
+paraffin/GSMD
+paragon/SGDM
+paragrapher/M
+paragraph/MRDG
+paragraphs
+Paraguayan/S
+Paraguay/M
+parakeet/MS
+paralegal/S
+paralinguistic
+parallax/SM
+parallel/DSG
+paralleled/U
+parallelepiped/MS
+parallelism/SM
+parallelization/MS
+parallelize/ZGDSR
+parallelogram/MS
+paralysis/M
+paralytically
+paralytic/S
+paralyzedly/S
+paralyzed/Y
+paralyzer/M
+paralyze/ZGDRS
+paralyzingly/S
+paralyzing/Y
+paramagnetic
+paramagnet/M
+Paramaribo/M
+paramecia
+paramecium/M
+paramedical/S
+paramedic/MS
+parameterization/SM
+parameterize/BSDG
+parameterized/U
+parameterless
+parameter/SM
+parametric
+parametrically
+parametrization
+parametrize/DS
+paramilitary/S
+paramount/S
+paramour/MS
+para/MS
+Paramus/M
+Paraná
+paranoiac/S
+paranoia/SM
+paranoid/S
+paranormal/SY
+parapet/SMD
+paraphernalia
+paraphrase/GMSRD
+paraphraser/M
+paraplegia/MS
+paraplegic/S
+paraprofessional/SM
+parapsychologist/S
+parapsychology/MS
+paraquat/S
+parasite/SM
+parasitically
+parasitic/S
+parasitism/SM
+parasitologist/M
+parasitology/M
+parasol/SM
+parasympathetic/S
+parathion/SM
+parathyroid/S
+paratrooper/M
+paratroop/RSZ
+paratyphoid/S
+parboil/DSG
+parceled/U
+parceling/M
+parcel/SGMD
+Parcheesi/M
+parch/GSDL
+parchment/SM
+PARC/M
+pardonableness/M
+pardonable/U
+pardonably/U
+pardoner/M
+pardon/ZBGRDS
+paregoric/SM
+parentage/MS
+parental/Y
+parenteral
+parentheses
+parenthesis/M
+parenthesize/GSD
+parenthetic
+parenthetical/Y
+parenthood/MS
+parent/MDGJS
+pare/S
+paresis/M
+pares/S
+Pareto/M
+parfait/SM
+pariah/M
+pariahs
+parietal/S
+parimutuel/S
+paring/M
+parishioner/SM
+parish/MS
+Parisian/SM
+Paris/M
+parity/ESM
+parka/MS
+Parke/M
+Parker/M
+Parkersburg/M
+park/GJZDRMS
+Parkhouse/M
+parking/M
+Parkinson/M
+parkish
+parkland/M
+parklike
+Parkman
+Park/RMS
+parkway/MS
+parlance/SM
+parlay/DGS
+parley/MDSG
+parliamentarian/SM
+parliamentary/U
+parliament/MS
+Parliament/MS
+parlor/SM
+parlous
+Parmesan/S
+parmigiana
+Parnassus/SM
+Parnell/M
+parochialism/SM
+parochiality
+parochial/Y
+parodied/U
+parodist/SM
+parody/SDGM
+parolee/MS
+parole/MSDG
+paroxysmal
+paroxysm/MS
+parquetry/SM
+parquet/SMDG
+parrakeet's
+parred
+parricidal
+parricide/MS
+parring
+Parrish/M
+Parr/M
+Parrnell/M
+parrot/GMDS
+parrotlike
+parry/GSD
+Parry/M
+parse
+parsec/SM
+parsed/U
+Parsee's
+parser/M
+Parsifal/M
+parsimonious/Y
+parsimony/SM
+pars/JDSRGZ
+parsley/MS
+parsnip/MS
+parsonage/MS
+parson/MS
+Parsons/M
+partaken
+partaker/M
+partake/ZGSR
+part/CDGS
+parterre/MS
+parter/S
+parthenogeneses
+parthenogenesis/M
+Parthenon/M
+Parthia/M
+partiality/MS
+partial/SY
+participant/MS
+participate/NGVDSX
+participation/M
+participator/S
+participatory
+participial/Y
+participle/MS
+particleboard/S
+particle/MS
+particolored
+particularistic
+particularity/SM
+particularization/MS
+particularize/GSD
+particular/SY
+particulate/S
+parting/MS
+partisanship/SM
+partisan/SM
+partition/AMRDGS
+partitioned/U
+partitioner/M
+partitive/S
+partizan's
+partly
+partner/DMGS
+partnership/SM
+partook
+partridge/MS
+part's
+parturition/SM
+partway
+party/RSDMG
+parvenu/SM
+par/ZGSJBMDR
+Pasadena/M
+PASCAL
+Pascale/M
+Pascal/M
+pascal/SM
+paschal/S
+pasha/MS
+Paso/M
+Pasquale/M
+pas/S
+passably
+passage/MGSD
+passageway/MS
+Passaic/M
+passband
+passbook/MS
+passel/MS
+passé/M
+passenger/MYS
+passerby
+passer/M
+passersby
+passim
+passing/Y
+passionated
+passionate/EYP
+passionateness/EM
+passionates
+passionating
+passioned
+passionflower/MS
+passioning
+passionless
+passion/SEM
+Passion/SM
+passivated
+passiveness/S
+passive/SYP
+passivity/S
+pass/JGVBZDSR
+passkey/SM
+passmark
+passover
+Passover/MS
+passport/SM
+password/SDM
+pasta/MS
+pasteboard/SM
+pasted/UA
+pastel/MS
+paste/MS
+Pasternak/M
+pastern/SM
+pasteup
+pasteurization/MS
+pasteurized/U
+pasteurizer/M
+pasteurize/RSDGZ
+Pasteur/M
+pastiche/MS
+pastille/SM
+pastime/SM
+pastiness/SM
+pastoralization/M
+pastoral/SPY
+pastorate/MS
+pastor/GSDM
+past/PGMDRS
+pastrami/MS
+pastry/SM
+past's/A
+pasts/A
+pasturage/SM
+pasture/MGSRD
+pasturer/M
+pasty/PTRS
+Patagonia/M
+Patagonian/S
+patch/EGRSD
+patcher/EM
+patchily
+patchiness/S
+patch's
+patchwork/RMSZ
+patchy/PRT
+patellae
+patella/MS
+Patel/M
+Pate/M
+paten/M
+Paten/M
+patentee/SM
+patent/ZGMRDYSB
+paterfamilias/SM
+pater/M
+paternalism/MS
+paternalist
+paternalistic
+paternal/Y
+paternity/SM
+paternoster/SM
+Paterson/M
+pate/SM
+pathetic
+pathetically
+pathfinder/MS
+pathless/P
+path/M
+pathname/SM
+pathogenesis/M
+pathogenic
+pathogen/SM
+pathologic
+pathological/Y
+pathologist/MS
+pathology/SM
+pathos/SM
+paths
+pathway/MS
+Patience/M
+patience/SM
+patient/MRYTS
+patient's/I
+patients/I
+patina/SM
+patine
+Patin/M
+patio/MS
+Pat/MN
+pat/MNDRS
+Patna/M
+patois/M
+Paton/M
+patresfamilias
+patriarchal
+patriarchate/MS
+patriarch/M
+patriarchs
+patriarchy/MS
+Patrica/M
+Patrice/M
+Patricia/M
+patrician/MS
+patricide/MS
+Patricio/M
+Patrick/M
+Patric/M
+patrimonial
+patrimony/SM
+patriotically
+patriotic/U
+patriotism/SM
+patriot/SM
+patristic/S
+Patrizia/M
+Patrizio/M
+Patrizius/M
+patrolled
+patrolling
+patrolman/M
+patrolmen
+patrol/MS
+patrolwoman
+patrolwomen
+patronage/MS
+patroness/S
+patronization
+patronized/U
+patronize/GZRSDJ
+patronizer/M
+patronizes/A
+patronizing's/U
+patronizing/YM
+patronymically
+patronymic/S
+patron/YMS
+patroon/MS
+patsy/SM
+Patsy/SM
+patted
+Patten/M
+patten/MS
+patterer/M
+pattern/GSDM
+patternless
+patter/RDSGJ
+Patterson/M
+Pattie/M
+Patti/M
+patting
+Pattin/M
+Patton/M
+Patty/M
+patty/SM
+paucity/SM
+Paula/M
+Paule/M
+Pauletta/M
+Paulette/M
+Paulie/M
+Pauli/M
+Paulina/M
+Pauline
+Pauling/M
+Paulita/M
+Paul/MG
+Paulo/M
+Paulsen/M
+Paulson/M
+Paulus/M
+Pauly/M
+paunch/GMSD
+paunchiness/M
+paunchy/RTP
+pauperism/SM
+pauperize/SDG
+pauper/SGDM
+pause/DSG
+Pavarotti
+paved/UA
+pave/GDRSJL
+Pavel/M
+pavement/SGDM
+paver/M
+paves/A
+Pavia/M
+pavilion/SMDG
+paving/A
+paving's
+Pavla/M
+Pavlova/MS
+Pavlovian
+Pavlov/M
+pawl/SM
+paw/MDSG
+pawnbroker/SM
+pawnbroking/S
+Pawnee/SM
+pawner/M
+pawn/GSDRM
+pawnshop/MS
+pawpaw's
+Pawtucket/M
+paxes
+Paxon/M
+Paxton/M
+payable/S
+pay/AGSLB
+payback/S
+paycheck/SM
+payday/MS
+payed
+payee/SM
+payer/SM
+payload/SM
+paymaster/SM
+payment/ASM
+Payne/SM
+payoff/MS
+payola/MS
+payout/S
+payroll/MS
+payslip/S
+Payson/M
+Payton/M
+Paz/M
+Pb/M
+PBS
+PBX
+PCB
+PC/M
+PCP
+PCs
+pct
+pd
+PD
+Pd/M
+PDP
+PDQ
+PDT
+PE
+Peabody/M
+peaceableness/M
+peaceable/P
+peaceably
+peacefuller
+peacefullest
+peacefulness/S
+peaceful/PY
+peace/GMDS
+peacekeeping/S
+Peace/M
+peacemaker/MS
+peacemaking/MS
+peacetime/MS
+peach/GSDM
+Peachtree/M
+peachy/RT
+peacock/SGMD
+Peadar/M
+peafowl/SM
+peahen/MS
+peaked/P
+peakiness/M
+peak/SGDM
+peaky/P
+pealed/A
+Peale/M
+peal/MDSG
+peals/A
+pea/MS
+peanut/SM
+Pearce/M
+Pearla/M
+Pearle/M
+pearler/M
+Pearlie/M
+Pearline/M
+Pearl/M
+pearl/SGRDM
+pearly/TRS
+Pearson/M
+pear/SYM
+peartrees
+Peary/M
+peasanthood
+peasantry/SM
+peasant/SM
+peashooter/MS
+peats/A
+peat/SM
+peaty/TR
+pebble/MGSD
+pebbling/M
+pebbly/TR
+Pebrook/M
+pecan/SM
+peccadilloes
+peccadillo/M
+peccary/MS
+Pechora/M
+pecker/M
+peck/GZSDRM
+Peckinpah/M
+Peck/M
+Pecos/M
+pectic
+pectin/SM
+pectoral/S
+peculate/NGDSX
+peculator/S
+peculiarity/MS
+peculiar/SY
+pecuniary
+pedagogical/Y
+pedagogic/S
+pedagogics/M
+pedagogue/SDGM
+pedagogy/MS
+pedal/SGRDM
+pedantic
+pedantically
+pedantry/MS
+pedant/SM
+peddler/M
+peddle/ZGRSD
+pederast/SM
+pederasty/SM
+Peder/M
+pedestal/GDMS
+pedestrianization
+pedestrianize/GSD
+pedestrian/MS
+pediatrician/SM
+pediatric/S
+pedicab/SM
+pedicure/DSMG
+pedicurist/SM
+pedigree/DSM
+pediment/DMS
+pedlar's
+pedometer/MS
+pedophile/S
+pedophilia
+Pedro/M
+peduncle/MS
+peeing
+peekaboo/SM
+peek/GSD
+peeler/M
+peeling/M
+Peel/M
+peel/SJGZDR
+peen/GSDM
+peeper/M
+peephole/SM
+peep/SGZDR
+peepshow/MS
+peepy
+peerage/MS
+peer/DMG
+peeress/MS
+peerlessness/M
+peerless/PY
+peeve/GZMDS
+peevers/M
+peevishness/SM
+peevish/YP
+peewee/S
+pee/ZDRS
+Pegasus/MS
+pegboard/SM
+Pegeen/M
+pegged
+Peggie/M
+Peggi/M
+pegging
+Peggy/M
+Peg/M
+peg/MS
+peignoir/SM
+Pei/M
+Peiping/M
+Peirce/M
+pejoration/SM
+pejorative/SY
+peke/MS
+Pekinese's
+pekingese
+Pekingese/SM
+Peking/SM
+pekoe/SM
+pelagic
+Pelee/M
+Pele/M
+pelf/SM
+Pelham/M
+pelican/SM
+pellagra/SM
+pellet/SGMD
+pellucid
+Peloponnese/M
+pelter/M
+pelt/GSDR
+pelvic/S
+pelvis/SM
+Pembroke/M
+pemmican/SM
+penalization/SM
+penalized/U
+penalize/SDG
+penalty/MS
+penal/Y
+Pena/M
+penance/SDMG
+pence/M
+penchant/MS
+pencil/SGJMD
+pendant/SM
+pend/DCGS
+pendent/CS
+Penderecki/M
+Pendleton/M
+pendulous
+pendulum/MS
+Penelopa/M
+Penelope/M
+penetrability/SM
+penetrable
+penetrate/SDVGNX
+penetrating/Y
+penetration/M
+penetrativeness/M
+penetrative/PY
+penetrator/MS
+penguin/MS
+penicillin/SM
+penile
+peninsular
+peninsula/SM
+penis/MS
+penitence/MS
+penitential/YS
+penitentiary/MS
+penitent/SY
+penknife/M
+penknives
+penlight/MS
+pen/M
+Pen/M
+penman/M
+penmanship/MS
+penmen
+Penna
+pennant/SM
+penned
+Penney/M
+Pennie/M
+penniless
+Penni/M
+penning
+Pennington/M
+pennis
+Penn/M
+pennon/SM
+Pennsylvania/M
+Pennsylvanian/S
+Penny/M
+penny/SM
+pennyweight/SM
+pennyworth/M
+penologist/MS
+penology/MS
+Penrod/M
+Pensacola/M
+pensioner/M
+pension/ZGMRDBS
+pensiveness/S
+pensive/PY
+pens/V
+pentacle/MS
+pentagonal/SY
+Pentagon/M
+pentagon/SM
+pentagram/MS
+pentameter/SM
+pent/AS
+Pentateuch/M
+pentathlete/S
+pentathlon/MS
+pentatonic
+pentecostal
+Pentecostalism/S
+Pentecostal/S
+Pentecost/SM
+penthouse/SDGM
+Pentium/M
+penuche/SM
+penultimate/SY
+penumbrae
+penumbra/MS
+penuriousness/MS
+penurious/YP
+penury/SM
+peonage/MS
+peon/MS
+peony/SM
+people/SDMG
+Peoria/M
+Pepe/M
+Pepillo/M
+Pepi/M
+Pepin/M
+Pepita/M
+Pepito/M
+pepped
+peppercorn/MS
+pepperer/M
+peppergrass/M
+peppermint/MS
+pepperoni/S
+pepper/SGRDM
+peppery
+peppiness/SM
+pepping
+peppy/PRT
+Pepsico/M
+PepsiCo/M
+Pepsi/M
+pepsin/SM
+pep/SM
+peptic/S
+peptidase/SM
+peptide/SM
+peptizing
+Pepys/M
+Pequot/M
+peradventure/S
+perambulate/DSNGX
+perambulation/M
+perambulator/MS
+percale/MS
+perceivably
+perceive/DRSZGB
+perceived/U
+perceiver/M
+percentage/MS
+percentile/SM
+percent/MS
+perceptible
+perceptibly
+perceptional
+perception/MS
+perceptiveness/MS
+perceptive/YP
+perceptual/Y
+percept/VMS
+Perceval/M
+perchance
+perch/GSDM
+perchlorate/M
+perchlorination
+percipience/MS
+percipient/S
+Percival/M
+percolate/NGSDX
+percolation/M
+percolator/MS
+percuss/DSGV
+percussionist/MS
+percussion/SAM
+percussiveness/M
+percussive/PY
+percutaneous/Y
+Percy/M
+perdition/MS
+perdurable
+peregrinate/XSDNG
+peregrination/M
+peregrine/S
+Perelman/M
+peremptorily
+peremptory/P
+perennial/SY
+pères
+perestroika/S
+Perez/M
+perfecta/S
+perfect/DRYSTGVP
+perfecter/M
+perfectibility/MS
+perfectible
+perfectionism/MS
+perfectionist/MS
+perfection/MS
+perfectiveness/M
+perfective/PY
+perfectness/MS
+perfidiousness/M
+perfidious/YP
+perfidy/MS
+perforated/U
+perforate/XSDGN
+perforation/M
+perforce
+performance/MS
+performed/U
+performer/M
+perform/SDRZGB
+perfumer/M
+perfumery/SM
+perfume/ZMGSRD
+perfunctorily
+perfunctoriness/M
+perfunctory/P
+perfused
+perfusion/M
+Pergamon/M
+pergola/SM
+perhaps/S
+Peria/M
+pericardia
+pericardium/M
+Perice/M
+Periclean
+Pericles/M
+perigee/SM
+perihelia
+perihelion/M
+peril/GSDM
+Perilla/M
+perilousness/M
+perilous/PY
+Peri/M
+perimeter/MS
+perinatal
+perinea
+perineum/M
+periodic
+periodical/YMS
+periodicity/MS
+period/MS
+periodontal/Y
+periodontics/M
+periodontist/S
+peripatetic/S
+peripheral/SY
+periphery/SM
+periphrases
+periphrasis/M
+periphrastic
+periscope/SDMG
+perishable/SM
+perish/BZGSRD
+perishing/Y
+peristalses
+peristalsis/M
+peristaltic
+peristyle/MS
+peritoneal
+peritoneum/SM
+peritonitis/MS
+periwigged
+periwigging
+periwig/MS
+periwinkle/SM
+perjurer/M
+perjure/SRDZG
+perjury/MS
+per/K
+perk/GDS
+perkily
+perkiness/S
+Perkin/SM
+perky/TRP
+Perla/M
+Perle/M
+Perl/M
+permafrost/MS
+permalloy/M
+Permalloy/M
+permanence/SM
+permanency/MS
+permanentness/M
+permanent/YSP
+permeability/SM
+permeableness/M
+permeable/P
+permeate/NGVDSX
+Permian
+permissibility/M
+permissibleness/M
+permissible/P
+permissibly
+permission/SM
+permissiveness/MS
+permissive/YP
+permit/SM
+permitted
+permitting
+Perm/M
+perm/MDGS
+permutation/MS
+permute/SDG
+Pernell/M
+perniciousness/MS
+pernicious/PY
+Pernod/M
+Peron/M
+peroration/SM
+Perot/M
+peroxidase/M
+peroxide/MGDS
+perpend/DG
+perpendicularity/SM
+perpendicular/SY
+perpetrate/NGXSD
+perpetration/M
+perpetrator/SM
+perpetual/SY
+perpetuate/NGSDX
+perpetuation/M
+perpetuity/MS
+perplex/DSG
+perplexed/Y
+perplexity/MS
+perquisite/SM
+Perren/M
+Perri/M
+Perrine/M
+Perry/MR
+persecute/XVNGSD
+persecution/M
+persecutor/MS
+persecutory
+Perseid/M
+Persephone/M
+Perseus/M
+perseverance/MS
+persevere/GSD
+persevering/Y
+Pershing/M
+Persia/M
+Persian/S
+persiflage/MS
+persimmon/SM
+Persis/M
+persist/DRSG
+persistence/SM
+persistent/Y
+persnickety
+personableness/M
+personable/P
+personae
+personage/SM
+personality/SM
+personalization/CMS
+personalize/CSDG
+personalized/U
+personalty/MS
+personal/YS
+persona/M
+person/BMS
+personification/M
+personifier/M
+personify/XNGDRS
+personnel/SM
+person's/U
+persons/U
+perspective/YMS
+perspex
+perspicaciousness/M
+perspicacious/PY
+perspicacity/S
+perspicuity/SM
+perspicuousness/M
+perspicuous/YP
+perspiration/MS
+perspire/DSG
+persuaded/U
+persuader/M
+persuade/ZGDRSB
+persuasion/SM
+persuasively
+persuasiveness/MS
+persuasive/U
+pertain/GSD
+Perth/M
+pertinaciousness/M
+pertinacious/YP
+pertinacity/MS
+pertinence/S
+pertinent/YS
+pertness/MS
+perturbation/MS
+perturbed/U
+perturb/GDS
+pertussis/SM
+pert/YRTSP
+peruke/SM
+Peru/M
+perusal/SM
+peruser/M
+peruse/RSDZG
+Peruvian/S
+pervade/SDG
+pervasion/M
+pervasiveness/MS
+pervasive/PY
+perverseness/SM
+perverse/PXYNV
+perversion/M
+perversity/MS
+pervert/DRSG
+perverted/YP
+perverter/M
+perviousness
+peseta/SM
+Peshawar/M
+peskily
+peskiness/S
+pesky/RTP
+peso/MS
+pessimal/Y
+pessimism/SM
+pessimistic
+pessimistically
+pessimist/SM
+pester/DG
+pesticide/MS
+pestiferous
+pestilence/SM
+pestilential/Y
+pestilent/Y
+pestle/SDMG
+pesto/S
+pest/RZSM
+PET
+Pétain/M
+petal/SDM
+Peta/M
+petard/MS
+petcock/SM
+Pete/M
+peter/GD
+Peter/M
+Petersburg/M
+Petersen/M
+Peters/N
+Peterson/M
+Peterus/M
+Petey/M
+pethidine/M
+petiole/SM
+petiteness/M
+petite/XNPS
+petitioner/M
+petition/GZMRD
+petition's/A
+petitions/A
+petits
+Petkiewicz/M
+Pet/MRZ
+Petra/M
+Petrarch/M
+petrel/SM
+petri
+petrifaction/SM
+petrify/NDSG
+Petrina/M
+Petr/M
+petrochemical/SM
+petrodollar/MS
+petroglyph/M
+petrolatum/MS
+petroleum/MS
+petrolled
+petrolling
+petrol/MS
+petrologist/MS
+petrology/MS
+Petronella/M
+Petronia/M
+Petronilla/M
+Petronille/M
+pet/SMRZ
+petted
+petter/MS
+Pettibone/M
+petticoat/SMD
+pettifogged
+pettifogger/SM
+pettifogging
+pettifog/S
+pettily
+pettiness/S
+petting
+pettis
+pettishness/M
+pettish/YP
+Petty/M
+petty/PRST
+petulance/MS
+petulant/Y
+Petunia/M
+petunia/SM
+Peugeot/M
+Pewaukee/M
+pewee/MS
+pewit/MS
+pew/SM
+pewter/SRM
+peyote/SM
+Peyter/M
+Peyton/M
+pf
+Pfc
+PFC
+pfennig/SM
+Pfizer/M
+pg
+PG
+Phaedra/M
+Phaethon/M
+phaeton/MS
+phage/M
+phagocyte/SM
+Phaidra/M
+phalanger/MS
+phalanges
+phalanx/SM
+phalli
+phallic
+phallus/M
+Phanerozoic
+phantasmagoria/SM
+phantasmal
+phantasm/SM
+phantasy's
+phantom/MS
+pharaoh
+Pharaoh/M
+pharaohs
+Pharaohs
+pharisaic
+Pharisaic
+Pharisaical
+pharisee/S
+Pharisee/SM
+pharmaceutical/SY
+pharmaceutic/S
+pharmaceutics/M
+pharmacist/SM
+pharmacological/Y
+pharmacologist/SM
+pharmacology/SM
+pharmacopoeia/SM
+pharmacy/SM
+pharyngeal/S
+pharynges
+pharyngitides
+pharyngitis/M
+pharynx/M
+phase/DSRGZM
+phaseout/S
+PhD
+pheasant/SM
+Phebe/M
+Phedra/M
+Phekda/M
+Phelia/M
+Phelps/M
+phenacetin/MS
+phenobarbital/SM
+phenolic
+phenol/MS
+phenolphthalein/M
+phenomenal/Y
+phenomena/SM
+phenomenological/Y
+phenomenology/MS
+phenomenon/SM
+phenotype/MS
+phenylalanine/M
+phenyl/M
+pheromone/MS
+phew/S
+phialled
+phialling
+phial/MS
+Phidias/M
+Philadelphia/M
+philanderer/M
+philander/SRDGZ
+philanthropic
+philanthropically
+philanthropist/MS
+philanthropy/SM
+philatelic
+philatelist/MS
+philately/SM
+Philbert/M
+Philco/M
+philharmonic/S
+Philipa/M
+Philip/M
+Philippa/M
+Philippe/M
+Philippians/M
+philippic/SM
+Philippine/SM
+Philis/M
+philistine/S
+Philistine/SM
+philistinism/S
+Phillida/M
+Phillie/M
+Phillipa/M
+Phillipe/M
+Phillip/MS
+Phillipp/M
+Phillis/M
+Philly/SM
+Phil/MY
+philodendron/MS
+philological/Y
+philologist/MS
+philology/MS
+Philomena/M
+philosopher/MS
+philosophic
+philosophical/Y
+philosophized/U
+philosophizer/M
+philosophizes/U
+philosophize/ZDRSG
+philosophy/MS
+philter/SGDM
+philtre/DSMG
+Phineas/M
+Phip/M
+Phipps/M
+phi/SM
+phlebitides
+phlebitis/M
+phlegmatic
+phlegmatically
+phlegm/SM
+phloem/MS
+phlox/M
+pH/M
+Ph/M
+phobia/SM
+phobic/S
+Phobos/M
+Phoebe/M
+phoebe/SM
+Phoenicia/M
+Phoenician/SM
+Phoenix/M
+phoenix/MS
+phone/DSGM
+phoneme/SM
+phonemically
+phonemic/S
+phonemics/M
+phonetically
+phonetician/SM
+phonetic/S
+phonetics/M
+phonically
+phonic/S
+phonics/M
+phoniness/MS
+phonographer/M
+phonographic
+phonograph/RM
+phonographs
+phonologic
+phonological/Y
+phonologist/MS
+phonology/MS
+phonon/M
+phony/PTRSDG
+phooey/S
+phosphatase/M
+phosphate/MS
+phosphide/M
+phosphine/MS
+phosphoresce
+phosphorescence/SM
+phosphorescent/Y
+phosphoric
+phosphor/MS
+phosphorous
+phosphorus/SM
+photocell/MS
+photochemical/Y
+photochemistry/M
+photocopier/M
+photocopy/MRSDZG
+photoelectric
+photoelectrically
+photoelectronic
+photoelectrons
+photoengraver/M
+photoengrave/RSDJZG
+photoengraving/M
+photofinishing/MS
+photogenic
+photogenically
+photograph/AGD
+photographer/SM
+photographic
+photographically
+photograph's
+photographs/A
+photography/MS
+photojournalism/SM
+photojournalist/SM
+photoluminescence/M
+photolysis/M
+photolytic
+photometer/SM
+photometric
+photometrically
+photometry/M
+photomicrograph/M
+photomicrography/M
+photomultiplier/M
+photon/MS
+photorealism
+photosensitive
+photo/SGMD
+photosphere/M
+photostatic
+Photostat/MS
+Photostatted
+Photostatting
+photosyntheses
+photosynthesis/M
+photosynthesize/DSG
+photosynthetic
+phototypesetter
+phototypesetting/M
+phrasal
+phrase/AGDS
+phrasebook
+phrasemaking
+phraseology/MS
+phrase's
+phrasing/SM
+phrenological/Y
+phrenologist/MS
+phrenology/MS
+phylactery/MS
+phylae
+phyla/M
+Phylis/M
+Phyllida/M
+Phyllis/M
+Phyllys/M
+phylogeny/MS
+phylum/M
+Phylys/M
+phys
+physicality/M
+physical/PYS
+physician/SM
+physicist/MS
+physicked
+physicking
+physic/SM
+physiochemical
+physiognomy/SM
+physiography/MS
+physiologic
+physiological/Y
+physiologist/SM
+physiology/MS
+physiotherapist/MS
+physiotherapy/SM
+physique/MSD
+phytoplankton/M
+Piaf/M
+Piaget/M
+Pia/M
+pianism/M
+pianissimo/S
+pianistic
+pianist/SM
+pianoforte/MS
+pianola
+Pianola/M
+piano/SM
+piaster/MS
+piazza/SM
+pibroch/M
+pibrochs
+picador/MS
+picaresque/S
+pica/SM
+Picasso/M
+picayune/S
+Piccadilly/M
+piccalilli/MS
+piccolo/MS
+pickaback's
+pickaxe's
+pickax/GMSD
+pickerel/MS
+Pickering/M
+picker/MG
+picketer/M
+picket/MSRDZG
+Pickett/M
+Pickford/M
+pick/GZSJDR
+pickle/SDMG
+Pickman/M
+pickoff/S
+pickpocket/GSM
+pickup/SM
+Pickwick/M
+picky/RT
+picnicked
+picnicker/MS
+picnicking
+picnic/SM
+picofarad/MS
+picojoule
+picoseconds
+picot/DMGS
+Pict/M
+pictograph/M
+pictographs
+pictorialness/M
+pictorial/PYS
+picture/MGSD
+picturesqueness/SM
+picturesque/PY
+piddle/GSD
+piddly
+pidgin/SM
+piebald/S
+piece/GMDSR
+piecemeal
+piecer/M
+piecewise
+pieceworker/M
+piecework/ZSMR
+piedmont
+Piedmont/M
+pieing
+pie/MS
+Pierce/M
+piercer/M
+pierce/RSDZGJ
+piercing/Y
+Pierette/M
+pier/M
+Pier/M
+Pierre/M
+Pierrette/M
+Pierrot/M
+Pierson/M
+Pieter/M
+Pietra/M
+Pietrek/M
+Pietro/M
+piety/SM
+piezoelectric
+piezoelectricity/M
+piffle/MGSD
+pigeon/DMGS
+pigeonhole/SDGM
+pigged
+piggery/M
+pigging
+piggishness/SM
+piggish/YP
+piggyback/MSDG
+Piggy/M
+piggy/RSMT
+pigheadedness/S
+pigheaded/YP
+piglet/MS
+pigmentation/MS
+pigment/MDSG
+pig/MLS
+Pigmy's
+pigpen/SM
+pigroot
+pigskin/MS
+pigsty/SM
+pigswill/M
+pigtail/SMD
+Pike/M
+pike/MZGDRS
+piker/M
+pikestaff/MS
+pilaf/MS
+pilaster/SM
+Pilate/M
+pilau's
+pilchard/SM
+Pilcomayo/M
+pile/JDSMZG
+pileup/MS
+pilferage/SM
+pilferer/M
+pilfer/ZGSRD
+Pilgrim
+pilgrimage/DSGM
+pilgrim/MS
+piling/M
+pillage/RSDZG
+pillar/DMSG
+pillbox/MS
+pill/GSMD
+pillion/DMGS
+pillory/MSDG
+pillowcase/SM
+pillow/GDMS
+pillowslip/S
+Pillsbury/M
+pilot/DMGS
+pilothouse/SM
+piloting/M
+pimento/MS
+pimiento/SM
+pimpernel/SM
+pimp/GSMYD
+pimple/SDM
+pimplike
+pimply/TRM
+PIN
+pinafore/MS
+piñata/S
+Pinatubo/M
+pinball/MS
+Pincas/M
+pincer/GSD
+Pinchas/M
+pincher/M
+pinch/GRSD
+pincushion/SM
+Pincus/M
+Pindar/M
+pineapple/MS
+pined/A
+Pinehurst/M
+pine/MNGXDS
+pines/A
+pinfeather/SM
+ping/GDRM
+pinheaded/P
+pinhead/SMD
+pinhole/SM
+pining/A
+pinion/DMG
+Pinkerton/M
+pinkeye/MS
+pink/GTYDRMPS
+pinkie/SM
+pinkish/P
+pinkness/S
+pinko/MS
+pinky's
+pinnacle/MGSD
+pinnate
+pinned/U
+pinning/S
+Pinocchio/M
+Pinochet/M
+pinochle/SM
+piñon/S
+pinpoint/SDG
+pinprick/MDSG
+pin's
+pinsetter/SM
+Pinsky/M
+pinstripe/SDM
+pintail/SM
+Pinter/M
+pint/MRS
+pinto/S
+pinup/MS
+pin/US
+pinwheel/DMGS
+pinyin
+Pinyin
+piny/RT
+pioneer/SDMG
+pion/M
+Piotr/M
+piousness/MS
+pious/YP
+pipeline/DSMG
+pipe/MS
+piper/M
+Piper/M
+Pipestone/M
+pipet's
+pipette/MGSD
+pipework
+piping/YM
+pipit/MS
+pip/JSZMGDR
+Pip/MR
+Pippa/M
+pipped
+pipping
+pippin/SM
+Pippo/M
+Pippy/M
+pipsqueak/SM
+piquancy/MS
+piquantness/M
+piquant/PY
+pique/GMDS
+piracy/MS
+Piraeus/M
+Pirandello/M
+piranha/SM
+pirate/MGSD
+piratical/Y
+pirogi
+pirogies
+pirouette/MGSD
+pis
+Pisa/M
+piscatorial
+Pisces/M
+Pisistratus/M
+pismire/SM
+Pissaro/M
+piss/DSRG!
+pistachio/MS
+piste/SM
+pistillate
+pistil/MS
+pistoleers
+pistole/M
+pistol/SMGD
+piston/SM
+pitapat/S
+pitapatted
+pitapatting
+pita/SM
+Pitcairn/M
+pitchblende/SM
+pitcher/M
+pitchfork/GDMS
+pitching/M
+pitchman/M
+pitchmen
+pitch/RSDZG
+pitchstone/M
+piteousness/SM
+piteous/YP
+pitfall/SM
+pithily
+pithiness/SM
+pith/MGDS
+piths
+pithy/RTP
+pitiableness/M
+pitiable/P
+pitiably
+pitier/M
+pitifuller
+pitifullest
+pitifulness/M
+pitiful/PY
+pitilessness/SM
+pitiless/PY
+pitman/M
+pit/MS
+Pitney/M
+piton/SM
+pittance/SM
+pitted
+pitting
+Pittman/M
+Pittsburgh/ZM
+Pittsfield/M
+Pitt/SM
+Pittston/M
+pituitary/SM
+pitying/Y
+pity/ZDSRMG
+Pius/M
+pivotal/Y
+pivot/DMSG
+pivoting/M
+pix/DSG
+pixel/SM
+pixie/MS
+pixiness
+pixmap/SM
+Pizarro/M
+pizazz/S
+pi/ZGDRH
+pizza/SM
+pizzeria/SM
+pizzicati
+pizzicato
+pj's
+PJ's
+pk
+pkg
+pkt
+pkwy
+Pkwy
+pl
+placard/DSMG
+placate/NGVXDRS
+placatory
+placeable/A
+placebo/SM
+placed/EAU
+place/DSRJLGZM
+placeholder/S
+placekick/DGS
+placeless/Y
+placement/AMES
+placental/S
+placenta/SM
+placer/EM
+places/EA
+placidity/SM
+placidness/M
+placid/PY
+placing/AE
+placket/SM
+plagiarism/MS
+plagiarist/MS
+plagiarize/GZDSR
+plagiary/SM
+plagued/U
+plague/MGRSD
+plaguer/M
+plaice/M
+plaid/DMSG
+plainclothes
+plainclothesman
+plainclothesmen
+Plainfield/M
+plainness/MS
+plainsman/M
+plainsmen
+plainsong/SM
+plainspoken
+plain/SPTGRDY
+plaintiff/MS
+plaintiveness/M
+plaintive/YP
+plaint/VMS
+Plainview/M
+plaiting/M
+plait/SRDMG
+planar
+planarity
+Planck/M
+plan/DRMSGZ
+planeload
+planer/M
+plane's
+plane/SCGD
+planetarium/MS
+planetary
+planetesimal/M
+planet/MS
+planetoid/SM
+plangency/S
+plangent
+planking/M
+plank/SJMDG
+plankton/MS
+planned/U
+planner/SM
+planning
+Plano
+planoconcave
+planoconvex
+Plantagenet/M
+plantain/MS
+plantar
+plantation/MS
+planter/MS
+planting/S
+plantlike
+plant's
+plant/SADG
+plaque/MS
+plash/GSDM
+plasma/MS
+plasmid/S
+plasm/M
+plasterboard/MS
+plasterer/M
+plastering/M
+plaster/MDRSZG
+plasterwork/M
+plastically
+plasticine
+Plasticine/M
+plasticity/SM
+plasticize/GDS
+plastic/MYS
+plateau/GDMS
+plateful/S
+platelet/SM
+platen/M
+plater/M
+plate/SM
+platform/SGDM
+Plath/M
+plating/M
+platinize/GSD
+platinum/MS
+platitude/SM
+platitudinous/Y
+plat/JDNRSGXZ
+Plato/M
+platonic
+Platonic
+Platonism/M
+Platonist
+platoon/MDSG
+platted
+Platte/M
+platter/MS
+Platteville/M
+platting
+platypus/MS
+platys
+platy/TR
+plaudit/MS
+plausibility/S
+plausible/P
+plausibly
+Plautus/M
+playability/U
+playable/U
+playacting/M
+playact/SJDG
+playback/MS
+playbill/SM
+Playboy/M
+playboy/SM
+play/DRSEBG
+played/A
+player's/E
+player/SM
+playfellow/S
+playfulness/MS
+playful/PY
+playgirl/SM
+playgoer/MS
+playground/MS
+playgroup/S
+playhouse/SM
+playing/S
+playmate/MS
+playoff/S
+playpen/SM
+playroom/SM
+plays/A
+Playtex/M
+plaything/MS
+playtime/SM
+playwright/SM
+playwriting/M
+plaza/SM
+pleader/MA
+pleading/MY
+plead/ZGJRDS
+pleasanter
+pleasantest
+pleasantness/SMU
+pleasantry/MS
+pleasant/UYP
+pleased/EU
+pleaser/M
+pleases/E
+please/Y
+pleasingness/M
+pleasing/YP
+plea/SM
+pleas/RSDJG
+pleasurableness/M
+pleasurable/P
+pleasurably
+pleasureful
+pleasure/MGBDS
+pleasure's/E
+pleasures/E
+pleater/M
+pleat/RDMGS
+plebeian/SY
+plebe/MS
+plebiscite/SM
+plectra
+plectrum/SM
+pledger/M
+pledge/RSDMG
+Pleiads
+Pleistocene
+plenary/S
+plenipotentiary/S
+plenitude/MS
+plenteousness/M
+plenteous/PY
+plentifulness/M
+plentiful/YP
+plenty/SM
+plenum/M
+pleonasm/MS
+plethora/SM
+pleurae
+pleural
+pleura/M
+pleurisy/SM
+Plexiglas/MS
+plexus/SM
+pliability/MS
+pliableness/M
+pliable/P
+pliancy/MS
+pliantness/M
+pliant/YP
+plication/MA
+plier/MA
+plight/GMDRS
+plimsolls
+plinker/M
+plink/GRDS
+plinth/M
+plinths
+Pliny/M
+Pliocene/S
+PLO
+plodded
+plodder/SM
+plodding/SY
+plod/S
+plopped
+plopping
+plop/SM
+plosive
+plot/SM
+plotted/A
+plotter/MDSG
+plotting
+plover/MS
+plowed/U
+plower/M
+plowman/M
+plowmen
+plow/SGZDRM
+plowshare/MS
+ploy's
+ploy/SCDG
+plucker/M
+pluckily
+pluckiness/SM
+pluck/SGRD
+plucky/TPR
+pluggable
+plugged/UA
+plugging/AU
+plughole
+plug's
+plug/US
+plumage/DSM
+plumbago/M
+plumbed/U
+plumber/M
+plumbing/M
+plumb/JSZGMRD
+plume/SM
+plummer
+plummest
+plummet/DSG
+plummy
+plumper/M
+plumpness/S
+plump/RDNYSTGP
+plum/SMDG
+plumy/TR
+plunder/GDRSZ
+plunger/M
+plunge/RSDZG
+plunker/M
+plunk/ZGSRD
+pluperfect/S
+pluralism/MS
+pluralistic
+pluralist/S
+plurality/SM
+pluralization/MS
+pluralize/GZRSD
+pluralizer/M
+plural/SY
+plushness/MS
+plush/RSYMTP
+plushy/RPT
+plus/S
+plussed
+plussing
+Plutarch/M
+plutocracy/MS
+plutocratic
+plutocrat/SM
+Pluto/M
+plutonium/SM
+pluvial/S
+ply/AZNGRSD
+Plymouth/M
+plywood/MS
+pm
+PM
+Pm/M
+PMS
+pneumatically
+pneumatic/S
+pneumatics/M
+pneumonia/MS
+PO
+poacher/M
+poach/ZGSRD
+Pocahontas/M
+pocketbook/SM
+pocketful/SM
+pocketing/M
+pocketknife/M
+pocketknives
+pocket/MSRDG
+pock/GDMS
+pockmark/MDSG
+Pocono/MS
+podded
+podding
+podge/ZR
+Podgorica/M
+podiatrist/MS
+podiatry/MS
+podium/MS
+pod/SM
+Podunk/M
+Poe/M
+poem/MS
+poesy/GSDM
+poetaster/MS
+poetess/MS
+poetically
+poeticalness
+poetical/U
+poetic/S
+poetics/M
+poet/MS
+poetry/SM
+pogo
+Pogo/M
+pogrom/GMDS
+poignancy/MS
+poignant/Y
+Poincaré/M
+poinciana/SM
+Poindexter/M
+poinsettia/SM
+pointblank
+pointedness/M
+pointed/PY
+pointer/M
+pointillism/SM
+pointillist/SM
+pointing/M
+pointlessness/SM
+pointless/YP
+point/RDMZGS
+pointy/TR
+poise/M
+pois/GDS
+poi/SM
+poisoner/M
+poisoning/M
+poisonous/PY
+poison/RDMZGSJ
+Poisson/M
+poke/DRSZG
+Pokemon/M
+pokerface/D
+poker/M
+poky/SRT
+Poland/M
+Polanski/M
+polarimeter/SM
+polarimetry
+polariscope/M
+Polaris/M
+polarity/MS
+polarization/CMS
+polarized/UC
+polarize/RSDZG
+polarizes/C
+polarizing/C
+polarogram/SM
+polarograph
+polarography/M
+Polaroid/SM
+polar/S
+polecat/SM
+polemical/Y
+polemicist/S
+polemic/S
+polemics/M
+pole/MS
+Pole/MS
+poler/M
+polestar/S
+poleward/S
+pol/GMDRS
+policeman/M
+policemen/M
+police/MSDG
+policewoman/M
+policewomen
+policyholder/MS
+policymaker/S
+policymaking
+policy/SM
+poliomyelitides
+poliomyelitis/M
+polio/SM
+Polish
+polished/U
+polisher/M
+polish/RSDZGJ
+polis/M
+Politburo/M
+politburo/S
+politeness/MS
+polite/PRTY
+politesse/SM
+politically
+political/U
+politician/MS
+politicization/S
+politicize/CSDG
+politicked
+politicking/SM
+politico/SM
+politic/S
+politics/M
+polity/MS
+polka/SDMG
+Polk/M
+pollack/SM
+Pollard/M
+polled/U
+pollen/GDM
+pollinate/XSDGN
+pollination/M
+pollinator/MS
+polliwog/SM
+poll/MDNRSGX
+pollock's
+Pollock/SM
+pollster/MS
+pollutant/MS
+polluted/U
+polluter/M
+pollute/RSDXZVNG
+pollution/M
+Pollux/M
+Pollyanna/M
+Polly/M
+pollywog's
+Pol/MY
+Polo/M
+polo/MS
+polonaise/MS
+polonium/MS
+poltergeist/SM
+poltroon/MS
+polyandrous
+polyandry/MS
+polyatomic
+polybutene/MS
+polycarbonate
+polychemicals
+polychrome
+polyclinic/MS
+polycrystalline
+polyelectrolytes
+polyester/SM
+polyether/S
+polyethylene/SM
+polygamist/MS
+polygamous/Y
+polygamy/MS
+polyglot/S
+polygonal/Y
+polygon/MS
+polygraph/MDG
+polygraphs
+polygynous
+polyhedral
+polyhedron/MS
+Polyhymnia/M
+polyisobutylene
+polyisocyanates
+polymath/M
+polymaths
+polymerase/S
+polymeric
+polymerization/SM
+polymerize/SDG
+polymer/MS
+polymorphic
+polymorphism/MS
+polymorph/M
+polymyositis
+Polynesia/M
+Polynesian/S
+polynomial/YMS
+Polyphemus/M
+polyphonic
+polyphony/MS
+polyphosphate/S
+polyp/MS
+polypropylene/MS
+polystyrene/SM
+polysyllabic
+polysyllable/SM
+polytechnic/MS
+polytheism/SM
+polytheistic
+polytheist/SM
+polythene/M
+polytonal/Y
+polytopes
+polyunsaturated
+polyurethane/SM
+polyvinyl/MS
+Po/M
+pomade/MGSD
+pomander/MS
+pomegranate/SM
+Pomerania/M
+Pomeranian
+pommel/GSMD
+Pomona/M
+Pompadour/M
+pompadour/MDS
+pompano/SM
+Pompeian/S
+Pompeii/M
+Pompey/M
+pompom/SM
+pompon's
+pomposity/MS
+pompousness/S
+pompous/YP
+pomp/SM
+ponce/M
+Ponce/M
+Ponchartrain/M
+poncho/MS
+ponderer/M
+ponderousness/MS
+ponderous/PY
+ponder/ZGRD
+pond/SMDRGZ
+pone/SM
+pongee/MS
+poniard/GSDM
+pons/M
+Pontchartrain/M
+Pontiac/M
+Pontianak/M
+pontiff/MS
+pontifical/YS
+pontificate/XGNDS
+pontoon/SMDG
+pony/DSMG
+ponytail/SM
+pooch/GSDM
+poodle/MS
+poof/MS
+pooh/DG
+Pooh/M
+poohs
+Poole/M
+pool/MDSG
+poolroom/MS
+poolside
+Poona/M
+poop/MDSG
+poorboy
+poorhouse/MS
+poorness/MS
+poor/TYRP
+popcorn/MS
+Popek/MS
+pope/SM
+Pope/SM
+Popeye/M
+popgun/SM
+popinjay/MS
+poplar/SM
+poplin/MS
+Popocatepetl/M
+popover/SM
+poppa/MS
+popped
+Popper/M
+popper/SM
+poppet/M
+popping
+Poppins/M
+poppycock/MS
+Poppy/M
+poppy/SDM
+poppyseed
+Popsicle/MS
+pop/SM
+populace/MS
+popularism
+popularity/UMS
+popularization/SM
+popularize/A
+popularized
+popularizer/MS
+popularizes/U
+popularizing
+popular/YS
+populate/CXNGDS
+populated/UA
+populates/A
+populating/A
+population/MC
+populism/S
+populist/SM
+populousness/MS
+populous/YP
+porcelain/SM
+porch/SM
+porcine
+porcupine/MS
+pore/ZGDRS
+Porfirio/M
+porgy/SM
+poring/Y
+porker/M
+porky/TSR
+pork/ZRMS
+pornographer/SM
+pornographic
+pornographically
+pornography/SM
+porno/S
+porn/S
+porosity/SM
+porousness/MS
+porous/PY
+porphyritic
+porphyry/MS
+porpoise/DSGM
+porridge/MS
+Porrima/M
+porringer/MS
+Porsche/M
+portability/S
+portables
+portable/U
+portably
+port/ABSGZMRD
+portage/ASM
+portaged
+portaging
+portal/SM
+portamento/M
+portcullis/MS
+ported/CE
+Porte/M
+portend/SDG
+portentousness/M
+portentous/PY
+portent/SM
+porterage/M
+porter/DMG
+porterhouse/SM
+Porter/M
+porter's/A
+portfolio/MS
+porthole/SM
+Portia/M
+porticoes
+portico/M
+Portie/M
+portière/SM
+porting/E
+portion/KGSMD
+Portland/M
+portliness/SM
+portly/PTR
+portmanteau/SM
+Port/MR
+Pôrto/M
+portraitist/SM
+portrait/MS
+portraiture/MS
+portrayal/SM
+portrayer/M
+portray/GDRS
+ports/CE
+Portsmouth/M
+Portugal/M
+Portuguese/M
+portulaca/MS
+Porty/M
+posed/CA
+Poseidon/M
+poser/KME
+poses/CA
+poseur/MS
+pose/ZGKDRSE
+posh/DSRGT
+posing/CA
+positifs
+positionable
+positional/KY
+position/KGASMD
+position's/EC
+positions/EC
+positiveness/S
+positive/RSPYT
+positivism/M
+positivist/S
+positivity
+positron/SM
+posit/SCGD
+Posner/M
+posse/M
+possess/AGEDS
+possessed/PY
+possession/AEMS
+possessional
+possessiveness/MS
+possessive/PSMY
+possessor/MS
+possibility/SM
+possible/TRS
+possibly
+poss/S
+possum/MS
+postage/MS
+postal/S
+post/ASDRJG
+postbag/M
+postbox/SM
+postcard/SM
+postcode/SM
+postcondition/S
+postconsonantal
+postdate/DSG
+postdoctoral
+posteriori
+posterior/SY
+posterity/SM
+poster/MS
+postfix/GDS
+postgraduate/SM
+posthaste/S
+posthumousness/M
+posthumous/YP
+posthypnotic
+postilion/MS
+postindustrial
+posting/M
+postlude/MS
+Post/M
+postman/M
+postmarital
+postmark/GSMD
+postmaster/SM
+postmen
+postmeridian
+postmistress/MS
+postmodern
+postmodernist
+postmortem/S
+postnasal
+postnatal
+postoperative/Y
+postorder
+postpaid
+postpartum
+postpone/GLDRS
+postponement/S
+postpositions
+postprandial
+post's
+postscript/SM
+postsecondary
+postulate/XGNSD
+postulation/M
+postural
+posture/MGSRD
+posturer/M
+postvocalic
+postwar
+posy/SM
+potability/SM
+potableness/M
+potable/SP
+potage/M
+potash/MS
+potassium/MS
+potatoes
+potato/M
+potbelly/MSD
+potboiler/M
+potboil/ZR
+pot/CMS
+Potemkin/M
+potency/MS
+potentate/SM
+potentiality/MS
+potential/SY
+potentiating
+potentiometer/SM
+potent/YS
+potful/SM
+pothead/MS
+potherb/MS
+pother/GDMS
+potholder/MS
+pothole/SDMG
+potholing/M
+pothook/SM
+potion/SM
+potlatch/SM
+potluck/MS
+Potomac/M
+potpie/SM
+potpourri/SM
+Potsdam/M
+potsherd/MS
+potshot/S
+pottage/SM
+Pottawatomie/M
+potted
+Potter/M
+potter/RDMSG
+pottery/MS
+potting
+Potts/M
+potty/SRT
+pouch/SDMG
+Poughkeepsie/M
+Poul/M
+poulterer/MS
+poultice/DSMG
+poultry/MS
+pounce/SDG
+poundage/MS
+pounder/MS
+pound/KRDGS
+Pound/M
+pour/DSG
+pourer's
+Poussin/MS
+pouter/M
+pout/GZDRS
+poverty/MS
+POW
+powderpuff
+powder/RDGMS
+powdery
+Powell/M
+powerboat/MS
+powerfulness/M
+powerful/YP
+power/GMD
+powerhouse/MS
+powerlessness/SM
+powerless/YP
+Powers
+Powhatan/M
+pow/RZ
+powwow/GDMS
+pox/GMDS
+Poznan/M
+pp
+PP
+ppm
+ppr
+PPS
+pr
+PR
+practicability/S
+practicable/P
+practicably
+practicality/SM
+practicalness/M
+practical/YPS
+practice/BDRSMG
+practiced/U
+practicer/M
+practicum/SM
+practitioner/SM
+Pradesh/M
+Prado/M
+Praetorian
+praetorian/S
+praetor/MS
+pragmatical/Y
+pragmatic/S
+pragmatics/M
+pragmatism/MS
+pragmatist/MS
+Prague/M
+Praia
+prairie/MS
+praise/ESDG
+praiser/S
+praise's
+praiseworthiness/MS
+praiseworthy/P
+praising/Y
+Prakrit/M
+praline/MS
+pram/MS
+prancer/M
+prance/ZGSRD
+prancing/Y
+prank/SMDG
+prankster/SM
+praseodymium/SM
+Pratchett/M
+prate/DSRGZ
+prater/M
+pratfall/MS
+prating/Y
+prattle/DRSGZ
+prattler/M
+prattling/Y
+Pratt/M
+Prattville/M
+Pravda/M
+prawn/MDSG
+praxes
+praxis/M
+Praxiteles/M
+pray/DRGZS
+prayerbook
+prayerfulness/M
+prayerful/YP
+prayer/M
+PRC
+preach/DRSGLZJ
+preacher/M
+preaching/Y
+preachment/MS
+preachy/RT
+preadolescence/S
+Preakness/M
+preallocate/XGNDS
+preallocation/M
+preallocator/S
+preamble/MGDS
+preamp
+preamplifier/M
+prearrange/LSDG
+prearrangement/SM
+preassign/SDG
+preauthorize
+prebendary/M
+Precambrian
+precancel/DGS
+precancerous
+precariousness/MS
+precarious/PY
+precautionary
+precaution/SGDM
+precede/DSG
+precedence/SM
+precedented/U
+precedent/SDM
+preceptive/Y
+preceptor/MS
+precept/SMV
+precess/DSG
+precession/M
+precinct/MS
+preciosity/MS
+preciousness/S
+precious/PYS
+precipice/MS
+precipitable
+precipitant/S
+precipitateness/M
+precipitate/YNGVPDSX
+precipitation/M
+precipitousness/M
+precipitous/YP
+preciseness/SM
+precise/XYTRSPN
+precision/M
+précis/MDG
+preclude/GDS
+preclusion/S
+precociousness/MS
+precocious/YP
+precocity/SM
+precode/D
+precognition/SM
+precognitive
+precollege/M
+precolonial
+precomputed
+preconceive/GSD
+preconception/SM
+precondition/GMDS
+preconscious
+precook/GDS
+precursor/SM
+precursory
+precut
+predate/NGDSX
+predation/CMS
+predator/SM
+predatory
+predecease/SDG
+predecessor/MS
+predeclared
+predecline
+predefine/GSD
+predefinition/SM
+predesignate/GDS
+predestination/SM
+predestine/SDG
+predetermination/MS
+predeterminer/M
+predetermine/ZGSRD
+predicable/S
+predicament/SM
+predicate/VGNXSD
+predication/M
+predicator
+predictability/UMS
+predictable/U
+predictably/U
+predict/BSDGV
+predicted/U
+prediction/MS
+predictive/Y
+predictor/MS
+predigest/GDS
+predilect
+predilection/SM
+predispose/SDG
+predisposition/MS
+predoctoral
+predominance/SM
+predominant/Y
+predominate/YSDGN
+predomination/M
+preemie/MS
+preeminence/SM
+preeminent/Y
+preemployment/M
+preempt/GVSD
+preemption/SM
+preemptive/Y
+preemptor/M
+preener/M
+preen/SRDG
+preexist/DSG
+preexistence/SM
+preexistent
+prefabbed
+prefabbing
+prefab/MS
+prefabricate/XNGDS
+prefabrication/M
+preface/DRSGM
+prefacer/M
+prefatory
+prefect/MS
+prefecture/MS
+preferableness/M
+preferable/P
+preferably
+prefer/BL
+preference/MS
+preferential/Y
+preferment/SM
+preferred
+preferring
+prefiguration/M
+prefigure/SDG
+prefix/MDSG
+preflight/SGDM
+preform/DSG
+pref/RZ
+pregnancy/SM
+pregnant/Y
+preheat/GDS
+prehensile
+prehistoric
+prehistorical/Y
+prehistory/SM
+preindustrial
+preinitialize/SDG
+preinterview/M
+preisolated
+prejudge/DRSG
+prejudger/M
+prejudgment/SM
+prejudiced/U
+prejudice/MSDG
+prejudicial/PY
+prekindergarten/MS
+prelacy/MS
+prelate/SM
+preliminarily
+preliminary/S
+preliterate/S
+preloaded
+prelude/GMDRS
+preluder/M
+premarital/Y
+premarket
+prematureness/M
+premature/SPY
+prematurity/M
+premedical
+premeditated/Y
+premeditate/XDSGNV
+premeditation/M
+premed/S
+premenstrual
+premiere/MS
+premier/GSDM
+premiership/SM
+Preminger/M
+premise/GMDS
+premiss's
+premium/MS
+premix/GDS
+premolar/S
+premonition/SM
+premonitory
+prenatal/Y
+Pren/M
+Prenticed/M
+Prentice/MGD
+Prenticing/M
+Prentiss/M
+Prent/M
+prenuptial
+preoccupation/MS
+preoccupy/DSG
+preoperative
+preordain/DSLG
+prepackage/GSD
+prepaid
+preparation/SM
+preparative/SYM
+preparatory
+preparedly
+preparedness/USM
+prepared/UP
+prepare/ZDRSG
+prepay/GLS
+prepayment/SM
+prepender/S
+prepends
+preplanned
+preponderance/SM
+preponderant/Y
+preponderate/DSYGN
+prepositional/Y
+preposition/SDMG
+prepossess/GSD
+prepossessing/U
+prepossession/MS
+preposterousness/M
+preposterous/PY
+prepped
+prepping
+preppy/RST
+preprepared
+preprint/SGDM
+preprocessed
+preprocessing
+preprocessor/S
+preproduction
+preprogrammed
+prep/SM
+prepubescence/S
+prepubescent/S
+prepublication/M
+prepuce/SM
+prequel/S
+preradiation
+prerecord/DGS
+preregister/DSG
+preregistration/MS
+prerequisite/SM
+prerogative/SDM
+Pres
+presage/GMDRS
+presager/M
+presbyopia/MS
+presbyterian
+Presbyterianism/S
+Presbyterian/S
+presbyter/MS
+presbytery/MS
+preschool/RSZ
+prescience/SM
+prescient/Y
+Prescott/M
+prescribed/U
+prescriber/M
+prescribe/RSDG
+prescription/SM
+prescriptive/Y
+prescript/SVM
+preselect/SGD
+presence/SM
+presentableness/M
+presentable/P
+presentably/A
+presentational/A
+presentation/AMS
+presented/A
+presenter/A
+presentiment/MS
+presentment/SM
+presents/A
+present/SLBDRYZGP
+preservationist/S
+preservation/SM
+preservative/SM
+preserve/DRSBZG
+preserved/U
+preserver/M
+preset/S
+presetting
+preshrank
+preshrink/SG
+preshrunk
+preside/DRSG
+presidency/MS
+presidential/Y
+president/SM
+presider/M
+presidia
+presidium/M
+Presley/M
+presoaks
+presort/GDS
+pres/S
+press/ACDSG
+pressed/U
+presser/MS
+pressingly/C
+pressing/YS
+pressman/M
+pressmen
+pressure/DSMG
+pressurization/MS
+pressurize/DSRGZ
+pressurized/U
+prestidigitate/NX
+prestidigitation/M
+prestidigitatorial
+prestidigitator/M
+prestige/MS
+prestigious/PY
+Preston/M
+presto/S
+presumably
+presume/BGDRS
+presumer/M
+presuming/Y
+presumption/MS
+presumptive/Y
+presumptuousness/SM
+presumptuous/YP
+presuppose/GDS
+presupposition/S
+pretax
+preteen/S
+pretended/Y
+pretender/M
+pretending/U
+pretend/SDRZG
+pretense/MNVSX
+pretension/GDM
+pretentiousness/S
+pretentious/UYP
+preterite's
+preterit/SM
+preternatural/Y
+pretest/SDG
+pretext/SMDG
+Pretoria/M
+pretreated
+pretreatment/S
+pretrial
+prettify/SDG
+prettily
+prettiness/SM
+pretty/TGPDRS
+pretzel/SM
+prevailing/Y
+prevail/SGD
+prevalence/MS
+prevalent/SY
+prevaricate/DSXNG
+prevaricator/MS
+preventable/U
+preventably
+preventative/S
+prevent/BSDRGV
+preventer/M
+prevention/MS
+preventiveness/M
+preventive/SPY
+preview/ZGSDRM
+previous/Y
+prevision/SGMD
+prewar
+prexes
+preyer's
+prey/SMDG
+Priam/M
+priapic
+Pribilof/M
+price/AGSD
+priced/U
+priceless
+Price/M
+pricer/MS
+price's
+pricey
+pricier
+priciest
+pricker/M
+pricking/M
+prickle/GMDS
+prickliness/S
+prickly/RTP
+prick/RDSYZG
+prideful/Y
+pride/GMDS
+prier/M
+priestess/MS
+priesthood/SM
+Priestley/M
+priestliness/SM
+priestly/PTR
+priest/SMYDG
+prigged
+prigging
+priggishness/S
+priggish/PYM
+prig/SM
+primacy/MS
+primal
+primarily
+primary/MS
+primate/MS
+primed/U
+primely/M
+primeness/M
+prime/PYS
+primer/M
+Prime's
+primeval/Y
+priming/M
+primitiveness/SM
+primitive/YPS
+primitivism/M
+primmed
+primmer
+primmest
+primming
+primness/MS
+primogenitor/MS
+primogeniture/MS
+primordial/YS
+primp/DGS
+primrose/MGSD
+prim/SPJGZYDR
+princedom/MS
+princeliness/SM
+princely/PRT
+Prince/M
+prince/SMY
+princess/MS
+Princeton/M
+principality/MS
+principal/SY
+Principe/M
+Principia/M
+principled/U
+principle/SDMG
+printable/U
+printably
+print/AGDRS
+printed/U
+printer/AM
+printers
+printing/SM
+printmaker/M
+printmake/ZGR
+printmaking/M
+printout/S
+Prinz/M
+prioress/MS
+priori
+prioritize/DSRGZJ
+priority/MS
+prior/YS
+priory/SM
+Pris
+Prisca/M
+Priscella/M
+Priscilla/M
+prised
+prise/GMAS
+prismatic
+prism/MS
+prison/DRMSGZ
+prisoner/M
+Prissie/M
+prissily
+prissiness/SM
+prissy/RSPT
+pristine/Y
+prithee/S
+privacy/MS
+privateer/SMDG
+privateness/M
+private/NVYTRSXP
+privation/MCS
+privative/Y
+privatization/S
+privatize/GSD
+privet/SM
+privileged/U
+privilege/SDMG
+privily
+privy/SRMT
+prized/A
+prize/DSRGZM
+prizefighter/M
+prizefighting/M
+prizefight/SRMGJZ
+prizewinner/S
+prizewinning
+Pr/MN
+PRO
+proactive
+probabilist
+probabilistic
+probabilistically
+probability/SM
+probable/S
+probably
+probated/A
+probate/NVMX
+probates/A
+probating/A
+probational
+probationary/S
+probationer/M
+probation/MRZ
+probation's/A
+probative/A
+prober/M
+probity/SM
+problematical/UY
+problematic/S
+problem/SM
+proboscis/MS
+prob/RBJ
+procaine/MS
+procedural/SY
+procedure/MS
+proceeder/M
+proceeding/M
+proceed/JRDSG
+process/BSDMG
+processed/UA
+processes/A
+processional/YS
+procession/GD
+processor/MS
+proclamation/MS
+proclivity/MS
+proconsular
+procrastinate/XNGDS
+procrastination/M
+procrastinator/MS
+procreational
+procreatory
+procrustean
+Procrustean
+Procrustes/M
+proctor/GSDM
+proctorial
+procurable/U
+procure/L
+procurement/MS
+Procyon/M
+prodded
+prodding
+prodigality/S
+prodigal/SY
+prodigiousness/M
+prodigious/PY
+prodigy/MS
+prod/S
+produce/AZGDRS
+producer/AM
+producible/A
+production/ASM
+productively/UA
+productiveness/MS
+productive/PY
+productivities
+productivity/A
+productivity's
+productize/GZRSD
+product/V
+Prof
+profanation/S
+profaneness/MS
+profane/YPDRSG
+profanity/MS
+professed/Y
+professionalism/SM
+professionalize/GSD
+professional/USY
+profession/SM
+professorial/Y
+professorship/SM
+professor/SM
+proffer/GSD
+proficiency/SM
+proficient/YS
+profitability/MS
+profitableness/MU
+profitable/UP
+profitably/U
+profiteer/GSMD
+profiterole/MS
+profit/GZDRB
+profitless
+profligacy/S
+profligate/YS
+proforma/S
+profoundity
+profoundness/SM
+profound/PTYR
+prof/S
+profundity/MS
+profuseness/MS
+profuse/YP
+progenitor/SM
+progeny/M
+progesterone/SM
+prognathous
+prognoses
+prognosis/M
+prognosticate/NGVXDS
+prognostication/M
+prognosticator/S
+prognostic/S
+program/CSA
+programed
+programing
+programmability
+programmable/S
+programmed/CA
+programmer/ASM
+programming/CA
+programmings
+progression/SM
+progressiveness/SM
+progressive/SPY
+progressivism
+progress/MSDVG
+prohibiter/M
+prohibitionist/MS
+prohibition/MS
+Prohibition/MS
+prohibitiveness/M
+prohibitive/PY
+prohibitory
+prohibit/VGSRD
+projected/AU
+projectile/MS
+projectionist/MS
+projection/MS
+projective/Y
+project/MDVGS
+projector/SM
+Prokofieff/M
+Prokofiev/M
+prolegomena
+proletarianization/M
+proletarianized
+proletarian/S
+proletariat/SM
+proliferate/GNVDSX
+proliferation/M
+prolifically
+prolific/P
+prolixity/MS
+prolix/Y
+prologize
+prologue/MGSD
+prologuize
+prolongate/NGSDX
+prolongation/M
+prolonger/M
+prolong/G
+promenade/GZMSRD
+promenader/M
+Promethean
+Prometheus/M
+promethium/SM
+prominence/MS
+prominent/Y
+promiscuity/MS
+promiscuousness/M
+promiscuous/PY
+promise/GD
+promising/UY
+promissory
+promontory/MS
+promote/GVZBDR
+promoter/M
+promotiveness/M
+promotive/P
+prompted/U
+prompter/M
+promptitude/SM
+promptness/MS
+prompt/SGJTZPYDR
+pro/MS
+promulgate/NGSDX
+promulgation/M
+promulgator/MS
+pron
+proneness/MS
+prone/PY
+pronghorn/SM
+prong/SGMD
+pronominalization
+pronominalize
+pronounceable/U
+pronouncedly
+pronounced/U
+pronounce/GLSRD
+pronouncement/SM
+pronouncer/M
+pronto
+pronunciation/SM
+proofed/A
+proofer
+proofing/M
+proofreader/M
+proofread/GZSR
+proof/SEAM
+propaganda/SM
+propagandistic
+propagandist/SM
+propagandize/DSG
+propagated/U
+propagate/SDVNGX
+propagation/M
+propagator/MS
+propellant/MS
+propelled
+propeller/MS
+propelling
+propel/S
+propensity/MS
+properness/M
+proper/PYRT
+propertied/U
+property/SDM
+prophecy/SM
+prophesier/M
+prophesy/GRSDZ
+prophetess/S
+prophetic
+prophetical/Y
+prophet/SM
+prophylactic/S
+prophylaxes
+prophylaxis/M
+propinquity/MS
+propionate/M
+propitiate/GNXSD
+propitiatory
+propitiousness/M
+propitious/YP
+proponent/MS
+proportionality/M
+proportional/SY
+proportionate/YGESD
+proportioner/M
+proportion/ESGDM
+proportionment/M
+proposal/SM
+propped
+propping
+proprietary/S
+proprietorial
+proprietorship/SM
+proprietor/SM
+proprietress/MS
+propriety/MS
+proprioception
+proprioceptive
+prop/SZ
+propulsion/MS
+propulsive
+propylene/M
+prorogation/SM
+prorogue
+prosaic
+prosaically
+proscenium/MS
+prosciutti
+prosciutto/SM
+proscription/SM
+proscriptive
+pros/DSRG
+prosecute/SDBXNG
+prosecution/M
+prosecutor/MS
+proselyte/SDGM
+proselytism/MS
+proselytize/ZGDSR
+prose/M
+proser/M
+Proserpine/M
+prosodic/S
+prosody/MS
+prospect/DMSVG
+prospection/SM
+prospectiveness/M
+prospective/SYP
+prospector/MS
+prospectus/SM
+prosper/GSD
+prosperity/MS
+prosperousness/M
+prosperous/PY
+prostate
+prostheses
+prosthesis/M
+prosthetic/S
+prosthetics/M
+prostitute/DSXNGM
+prostitution/M
+prostrate/SDXNG
+prostration/M
+prosy/RT
+protactinium/MS
+protagonist/SM
+Protagoras/M
+protean/S
+protease/M
+protect/DVGS
+protected/UY
+protectionism/MS
+protectionist/MS
+protection/MS
+protectiveness/S
+protective/YPS
+protectorate/SM
+protector/MS
+protégées
+protégé/SM
+protein/MS
+proteolysis/M
+proteolytic
+Proterozoic/M
+protestantism
+Protestantism/MS
+protestant/S
+Protestant/SM
+protestation/MS
+protest/G
+protesting/Y
+Proteus/M
+protocol/DMGS
+protoplasmic
+protoplasm/MS
+prototype/SDGM
+prototypic
+prototypical/Y
+protozoa
+protozoan/MS
+protozoic
+protozoon's
+protract/DG
+protrude/SDG
+protrusile
+protrusion/MS
+protrusive/PY
+protuberance/S
+protuberant
+Proudhon/M
+proud/TRY
+Proust/M
+provabilities
+provability's
+provability/U
+provableness/M
+provable/P
+provably
+prov/DRGZB
+proved/U
+proven/U
+prove/ESDAG
+provenance/SM
+Provençal
+Provencals
+Provence/M
+provender/SDG
+provenience/SM
+provenly
+proverb/DG
+proverbial/Y
+Proverbs/M
+prover/M
+provide/DRSBGZ
+provided/U
+providence/SM
+Providence/SM
+providential/Y
+provident/Y
+provider/M
+province/SM
+provincialism/SM
+provincial/SY
+provisional/YS
+provisioner/M
+provision/R
+proviso/MS
+provocateur/S
+provocativeness/SM
+provocative/P
+provoked/U
+provoke/GZDRS
+provoking/Y
+provolone/SM
+Provo/M
+provost/MS
+prowess/SM
+prowler/M
+prowl/RDSZG
+prow/TRMS
+proximal/Y
+proximateness/M
+proximate/PY
+proximity/MS
+Proxmire/M
+proxy/SM
+Prozac
+prude/MS
+Prudence/M
+prudence/SM
+Prudential/M
+prudential/SY
+prudent/Y
+prudery/MS
+Prudi/M
+prudishness/SM
+prudish/YP
+Prudy/M
+Prue/M
+Pruitt/M
+Pru/M
+prune/DSRGZM
+pruner/M
+prurience/MS
+prurient/Y
+Prussia/M
+Prussian/S
+prussic
+Prut/M
+Pryce/M
+pry/DRSGTZ
+pryer's
+prying/Y
+P's
+PS
+p's/A
+psalmist/SM
+psalm/SGDM
+Psalms/M
+psalter
+Psalter/SM
+psaltery/MS
+psephologist/M
+pseudonymous
+pseudonym/SM
+pseudopod
+pseudo/S
+pseudoscience/S
+pshaw/SDG
+psi/S
+psittacoses
+psittacosis/M
+psoriases
+psoriasis/M
+psst/S
+PST
+psychedelically
+psychedelic/S
+psyche/M
+Psyche/M
+psychiatric
+psychiatrist/SM
+psychiatry/MS
+psychical/Y
+psychic/MS
+psychoacoustic/S
+psychoacoustics/M
+psychoactive
+psychoanalysis/M
+psychoanalyst/S
+psychoanalytic
+psychoanalytical
+psychoanalyze/SDG
+psychobabble/S
+psychobiology/M
+psychocultural
+psychodrama/MS
+psychogenic
+psychokinesis/M
+psycholinguistic/S
+psycholinguistics/M
+psycholinguists
+psychological/Y
+psychologist/MS
+psychology/MS
+psychometric/S
+psychometrics/M
+psychometry/M
+psychoneuroses
+psychoneurosis/M
+psychopathic/S
+psychopath/M
+psychopathology/M
+psychopaths
+psychopathy/SM
+psychophysical/Y
+psychophysic/S
+psychophysics/M
+psychophysiology/M
+psychosis/M
+psycho/SM
+psychosocial/Y
+psychosomatic/S
+psychosomatics/M
+psychos/S
+psychotherapeutic/S
+psychotherapist/MS
+psychotherapy/SM
+psychotically
+psychotic/S
+psychotropic/S
+psychs
+psych/SDG
+PT
+PTA
+Ptah/M
+ptarmigan/MS
+pt/C
+pterodactyl/SM
+Pt/M
+PTO
+Ptolemaic
+Ptolemaists
+Ptolemy/MS
+ptomaine/MS
+Pu
+pubbed
+pubbing
+pubertal
+puberty/MS
+pubes
+pubescence/S
+pubescent
+pubic
+pubis/M
+publican/AMS
+publication/AMS
+publicist/SM
+publicity/SM
+publicized/U
+publicize/SDG
+publicness/M
+publics/A
+public/YSP
+publishable/U
+published/UA
+publisher/ASM
+publishes/A
+publishing/M
+publish/JDRSBZG
+pub/MS
+Puccini/M
+puce/SM
+pucker/DG
+Puckett/M
+puck/GZSDRM
+puckishness/S
+puckish/YP
+Puck/M
+pudding/MS
+puddle/JMGRSD
+puddler/M
+puddling/M
+puddly
+pudenda
+pudendum/M
+pudginess/SM
+pudgy/PRT
+Puebla/M
+Pueblo/MS
+pueblo/SM
+puerile/Y
+puerility/SM
+puerperal
+puers
+Puerto/M
+puffball/SM
+puffer/M
+puffery/M
+puffiness/S
+puffin/SM
+Puff/M
+puff/SGZDRM
+puffy/PRT
+Puget/M
+pugged
+pugging
+Pugh/M
+pugilism/SM
+pugilistic
+pugilist/S
+pug/MS
+pugnaciousness/MS
+pugnacious/YP
+pugnacity/SM
+puissant/Y
+puke/GDS
+pukka
+Pulaski/SM
+pulchritude/SM
+pulchritudinous/M
+pule/GDS
+Pulitzer/SM
+pullback/S
+pull/DRGZSJ
+pullet/SM
+pulley/SM
+Pullman/MS
+pullout/S
+pullover/SM
+pulmonary
+pulpiness/S
+pulpit/MS
+pulp/MDRGS
+pulpwood/MS
+pulpy/PTR
+pulsar/MS
+pulsate/NGSDX
+pulsation/M
+pulse/ADSG
+pulser
+pulse's
+pulverable
+pulverization/MS
+pulverized/U
+pulverize/GZSRD
+pulverizer/M
+pulverizes/UA
+puma/SM
+pumice/SDMG
+pummel/SDG
+pumpernickel/SM
+pump/GZSMDR
+pumping/M
+pumpkin/MS
+punchbowl/M
+punched/U
+puncheon/MS
+puncher/M
+punch/GRSDJBZ
+punchline/S
+Punch/M
+punchy/RT
+punctilio/SM
+punctiliousness/SM
+punctilious/PY
+punctualities
+punctuality/UM
+punctualness/M
+punctual/PY
+punctuate/SDXNG
+punctuational
+punctuation/M
+puncture/SDMG
+punditry/S
+pundit/SM
+pungency/MS
+pungent/Y
+Punic
+puniness/MS
+punished/U
+punisher/M
+punishment/MS
+punish/RSDGBL
+punitiveness/M
+punitive/YP
+Punjabi/M
+Punjab/M
+punk/TRMS
+punky/PRS
+pun/MS
+punned
+punning
+punster/SM
+punter/M
+punt/GZMDRS
+puny/PTR
+pupae
+pupal
+pupa/M
+pupate/NGSD
+pupillage/M
+pupil/SM
+pup/MS
+pupped
+puppeteer/SM
+puppetry/MS
+puppet/SM
+pupping
+puppy/GSDM
+puppyish
+purblind
+Purcell/M
+purchasable
+purchase/GASD
+purchaser/MS
+purdah/M
+purdahs
+Purdue/M
+purebred/S
+puree/DSM
+pureeing
+pureness/MS
+pure/PYTGDR
+purgation/M
+purgative/MS
+purgatorial
+purgatory/SM
+purge/GZDSR
+purger/M
+purify/GSRDNXZ
+Purim/SM
+Purina/M
+purine/SM
+purism/MS
+puristic
+purist/MS
+puritanic
+puritanical/Y
+Puritanism/MS
+puritanism/S
+puritan/SM
+Puritan/SM
+purity/SM
+purlieu/SM
+purl/MDGS
+purloin/DRGS
+purloiner/M
+purple/MTGRSD
+purplish
+purport/DRSZG
+purported/Y
+purposefulness/S
+purposeful/YP
+purposelessness/M
+purposeless/PY
+purpose/SDVGYM
+purposiveness/M
+purposive/YP
+purr/DSG
+purring/Y
+purse/DSRGZM
+purser/M
+pursuance/MS
+pursuant
+pursuer/M
+pursue/ZGRSD
+pursuit/MS
+purulence/MS
+purulent
+Purus
+purveyance/MS
+purvey/DGS
+purveyor/MS
+purview/SM
+Pusan/M
+Pusey/M
+pushbutton/S
+pushcart/SM
+pushchair/SM
+pushdown
+push/DSRBGZ
+pusher/M
+pushily
+pushiness/MS
+Pushkin/M
+pushover/SM
+Pushtu/M
+pushy/PRT
+pusillanimity/MS
+pusillanimous/Y
+pus/SM
+puss/S
+pussycat/S
+pussyfoot/DSG
+pussy/TRSM
+pustular
+pustule/MS
+putative/Y
+Putin/M
+put/IS
+Putnam/M
+Putnem/M
+putout/S
+putrefaction/SM
+putrefactive
+putrefy/DSG
+putrescence/MS
+putrescent
+putridity/M
+putridness/M
+putrid/YP
+putsch/S
+putted/I
+puttee/MS
+putter/RDMGZ
+putting/I
+putt/SGZMDR
+puttying/M
+putty/SDMG
+puzzle/JRSDZLG
+puzzlement/MS
+puzzler/M
+PVC
+pvt
+Pvt/M
+PW
+PX
+p/XTGJ
+Pygmalion/M
+pygmy/SM
+Pygmy/SM
+Pyhrric/M
+pyknotic
+Pyle/M
+pylon/SM
+pylori
+pyloric
+pylorus/M
+Pym/M
+Pynchon/M
+Pyongyang/M
+pyorrhea/SM
+Pyotr/M
+pyramidal/Y
+pyramid/GMDS
+pyre/MS
+Pyrenees
+Pyrex/SM
+pyridine/M
+pyrimidine/SM
+pyrite/MS
+pyroelectric
+pyroelectricity/SM
+pyrolysis/M
+pyrolyze/RSM
+pyromaniac/SM
+pyromania/MS
+pyrometer/MS
+pyrometry/M
+pyrophosphate/M
+pyrotechnical
+pyrotechnic/S
+pyrotechnics/M
+pyroxene/M
+pyroxenite/M
+Pyrrhic
+Pythagoras/M
+Pythagorean/S
+Pythias
+Python/M
+python/MS
+pyx/MDSG
+q
+Q
+QA
+Qaddafi/M
+Qantas/M
+Qatar/M
+QB
+QC
+QED
+Qingdao
+Qiqihar/M
+QM
+Qom/M
+qr
+q's
+Q's
+qt
+qty
+qua
+Quaalude/M
+quackery/MS
+quackish
+quack/SDG
+quadded
+quadding
+quadrangle/MS
+quadrangular/M
+quadrant/MS
+quadraphonic/S
+quadrapole
+quadratical/Y
+quadratic/SM
+quadrature/MS
+quadrennial/SY
+quadrennium/MS
+quadric
+quadriceps/SM
+quadrilateral/S
+quadrille/XMGNSD
+quadrillion/MH
+quadripartite/NY
+quadriplegia/SM
+quadriplegic/SM
+quadrivia
+quadrivium/M
+quadrupedal
+quadruped/MS
+quadruple/GSD
+quadruplet/SM
+quadruplicate/GDS
+quadruply/NX
+quadrupole
+quad/SM
+quadword/MS
+quaffer/M
+quaff/SRDG
+quagmire/DSMG
+quahog/MS
+quail/GSDM
+quaintness/MS
+quaint/PTYR
+quake/GZDSR
+Quakeress/M
+Quakerism/S
+Quaker/SM
+quaky/RT
+qualification/ME
+qualified/UY
+qualifier/SM
+qualify/EGXSDN
+qualitative/Y
+quality/MS
+qualmish
+qualm/SM
+quandary/MS
+quangos
+quanta/M
+Quantico/M
+quantifiable/U
+quantified/U
+quantifier/M
+quantify/GNSRDZX
+quantile/S
+quantitativeness/M
+quantitative/PY
+quantity/MS
+quantization/MS
+quantizer/M
+quantize/ZGDRS
+quantum/M
+quarantine/DSGM
+quark/SM
+quarreler/M
+quarrellings
+quarrelsomeness/MS
+quarrelsome/PY
+quarrel/SZDRMG
+quarrier/M
+quarryman/M
+quarrymen
+quarry/RSDGM
+quarterback/SGMD
+quarterdeck/MS
+quarterer/M
+quarterfinal/MS
+quartering/M
+quarterly/S
+quartermaster/MS
+quarter/MDRYG
+quarterstaff/M
+quarterstaves
+quartet/SM
+quartic/S
+quartile/SM
+quarto/SM
+quart/RMSZ
+quartzite/M
+quartz/SM
+quasar/SM
+quash/GSD
+quasi
+quasilinear
+Quasimodo/M
+Quaternary
+quaternary/S
+quaternion/SM
+quatrain/SM
+quaver/GDS
+quavering/Y
+quavery
+Quayle/M
+quayside/M
+quay/SM
+queasily
+queasiness/SM
+queasy/TRP
+Quebec/M
+Quechua/M
+Queenie/M
+queenly/RT
+queen/SGMDY
+Queensland/M
+Queen/SM
+queerness/S
+queer/STGRDYP
+queller/M
+quell/SRDG
+Que/M
+quenchable/U
+quenched/U
+quencher/M
+quench/GZRSDB
+quenchless
+Quentin/M
+Quent/M
+Querida/M
+quern/M
+querulousness/S
+querulous/YP
+query/MGRSD
+quested/A
+quester/AS
+quester's
+quest/FSIM
+questing
+questionableness/M
+questionable/P
+questionably/U
+questioned/UA
+questioner/M
+questioning/UY
+questionnaire/MS
+question/SMRDGBZJ
+quests/A
+Quetzalcoatl/M
+queued/C
+queue/GZMDSR
+queuer/M
+queues/C
+queuing/C
+Quezon/M
+quibble/GZRSD
+quibbler/M
+quiche/SM
+quicken/RDG
+quickie/MS
+quicklime/SM
+quickness/MS
+quick/RNYTXPS
+quicksand/MS
+quicksilver/GDMS
+quickstep/SM
+quid/SM
+quiesce/D
+quiescence/MS
+quiescent/YP
+quieted/E
+quieten/SGD
+quieter/E
+quieter's
+quieting/E
+quietly/E
+quietness/MS
+quiets/E
+quietude/IEMS
+quietus/MS
+quiet/UTGPSDRY
+Quillan/M
+quill/GSDM
+Quill/M
+quilter/M
+quilting/M
+quilt/SZJGRDM
+quincentenary/M
+quince/SM
+Quincey/M
+quincy/M
+Quincy/M
+quinine/MS
+Quinlan/M
+Quinn/M
+quinquennial/Y
+quinsy/SM
+Quinta/M
+Quintana/M
+quintessence/SM
+quintessential/Y
+quintet/SM
+quintic
+quintile/SM
+Quintilian/M
+Quintilla/M
+quintillion/MH
+quintillionth/M
+Quintina/M
+Quintin/M
+Quint/M
+quint/MS
+Quinton/M
+quintuple/SDG
+quintuplet/MS
+Quintus/M
+quip/MS
+quipped
+quipper
+quipping
+quipster/SM
+quired/AI
+quire/MDSG
+quires/AI
+Quirinal/M
+quiring/IA
+quirkiness/SM
+quirk/SGMD
+quirky/PTR
+quirt/SDMG
+Quisling/M
+quisling/SM
+quitclaim/GDMS
+quit/DGS
+quite/SADG
+Quito/M
+quittance/SM
+quitter/SM
+quitting
+quiver/GDS
+quivering/Y
+quivery
+Quixote/M
+quixotic
+quixotically
+Quixotism/M
+quiz/M
+quizzed
+quizzer/SM
+quizzes
+quizzical/Y
+quizzing
+quo/H
+quoin/SGMD
+quoit/GSDM
+quondam
+quonset
+Quonset
+quorate/I
+quorum/MS
+quotability/S
+quota/MS
+quotation/SM
+quoter/M
+quote/UGSD
+quot/GDRB
+quotidian/S
+quotient/SM
+qwerty
+qwertys
+Rabat/M
+rabbet/GSMD
+Rabbi/M
+rabbi/MS
+rabbinate/MS
+rabbinic
+rabbinical/Y
+rabbiter/M
+rabbit/MRDSG
+rabble/GMRSD
+rabbler/M
+Rabelaisian
+Rabelais/M
+rabidness/SM
+rabid/YP
+rabies
+Rabi/M
+Rabin/M
+rabis
+Rab/M
+raccoon/SM
+racecourse/MS
+racegoers
+racehorse/SM
+raceme/MS
+race/MZGDRSJ
+racer/M
+racetrack/SMR
+raceway/SM
+Rachael/M
+Rachele/M
+Rachelle/M
+Rachel/M
+Rachmaninoff/M
+racialism/MS
+racialist/MS
+racial/Y
+racily
+Racine/M
+raciness/MS
+racism/S
+racist/MS
+racketeer/MDSJG
+racket/SMDG
+rackety
+rack/GDRMS
+raconteur/SM
+racoon's
+racquetball/S
+racquet's
+racy/RTP
+radarscope/MS
+radar/SM
+Radcliffe/M
+radded
+radder
+raddest
+Raddie/M
+radding
+Raddy/M
+radial/SY
+radiance/SM
+radian/SM
+radiant/YS
+radiate/XSDYVNG
+radiation/M
+radiative/Y
+radiator/MS
+radicalism/MS
+radicalization/S
+radicalize/GSD
+radicalness/M
+radical/SPY
+radices's
+radii/M
+radioactive/Y
+radioactivity/MS
+radioastronomical
+radioastronomy
+radiocarbon/MS
+radiochemical/Y
+radiochemistry/M
+radiogalaxy/S
+radiogram/SM
+radiographer/MS
+radiographic
+radiography/MS
+radioisotope/SM
+radiologic
+radiological/Y
+radiologist/MS
+radiology/MS
+radioman/M
+radiomen
+radiometer/SM
+radiometric
+radiometry/MS
+radionics
+radionuclide/M
+radiopasteurization
+radiophone/MS
+radiophysics
+radioscopy/SM
+radio/SMDG
+radiosonde/SM
+radiosterilization
+radiosterilized
+radiotelegraph
+radiotelegraphs
+radiotelegraphy/MS
+radiotelephone/SM
+radiotherapist/SM
+radiotherapy/SM
+radish/MS
+radium/MS
+radius/M
+radix/SM
+Rad/M
+radon/SM
+rad/S
+Raeann/M
+Rae/M
+RAF
+Rafaela/M
+Rafaelia/M
+Rafaelita/M
+Rafaellle/M
+Rafaello/M
+Rafael/M
+Rafa/M
+Rafe/M
+Raffaello/M
+Raffarty/M
+Rafferty/M
+raffia/SM
+raffishness/SM
+raffish/PY
+raffle/MSDG
+Raff/M
+Rafi/M
+Raf/M
+rafter/DM
+raft/GZSMDR
+raga/MS
+ragamuffin/MS
+ragbag/SM
+rage/MS
+raggedness/SM
+ragged/PRYT
+raggedy/TR
+ragging
+rag/GSMD
+raging/Y
+raglan/MS
+Ragnar/M
+Ragnarök
+ragout/SMDG
+ragtag/MS
+ragtime/MS
+ragweed/MS
+ragwort/M
+Rahal/M
+rah/DG
+Rahel/M
+rahs
+raider/M
+raid/MDRSGZ
+railbird/S
+rail/CDGS
+railer/SM
+railhead/SM
+railing/MS
+raillery/MS
+railroader/M
+railroading/M
+railroad/SZRDMGJ
+rail's
+railwaymen
+railway/MS
+raiment/SM
+Raimondo/M
+Raimund/M
+Raimundo/M
+Raina/M
+rainbow/MS
+raincloud/S
+raincoat/SM
+raindrop/SM
+Raine/MR
+Rainer/M
+rainfall/SM
+rainforest's
+rain/GSDM
+Rainier/M
+rainless
+rainmaker/SM
+rainmaking/MS
+rainproof/GSD
+rainstorm/SM
+rainwater/MS
+rainy/RT
+raise/DSRGZ
+raiser/M
+raising/M
+raisin/MS
+rajah/M
+rajahs
+Rajive/M
+raj/M
+Rakel/M
+rake/MGDRS
+raker/M
+rakishness/MS
+rakish/PY
+Raleigh/M
+Ralf/M
+Ralina/M
+rally/GSD
+Ralph/M
+Ralston/M
+Ra/M
+Ramada/M
+Ramadan/SM
+Ramakrishna/M
+Rama/M
+Raman/M
+Ramayana/M
+ramble/JRSDGZ
+rambler/M
+rambling/Y
+Rambo/M
+rambunctiousness/S
+rambunctious/PY
+ramekin/SM
+ramie/MS
+ramification/M
+ramify/XNGSD
+Ramirez/M
+Ramiro/M
+ramjet/SM
+Ram/M
+rammed
+ramming
+Ramo/MS
+Ramona/M
+Ramonda/M
+Ramon/M
+rampage/SDG
+rampancy/S
+rampant/Y
+rampart/SGMD
+ramp/GMDS
+ramrodded
+ramrodding
+ramrod/MS
+RAM/S
+Ramsay/M
+Ramses/M
+Ramsey/M
+ramshackle
+ram/SM
+rams/S
+ran/A
+Rana/M
+Rancell/M
+Rance/M
+rancher/M
+rancho/SM
+ranch/ZRSDMJG
+rancidity/MS
+rancidness/SM
+rancid/P
+rancorous/Y
+rancor/SM
+Randall/M
+Randal/M
+Randa/M
+Randee/M
+Randell/M
+Randene/M
+Randie/M
+Randi/M
+randiness/S
+Rand/M
+rand/MDGS
+Randolf/M
+Randolph/M
+randomization/SM
+randomize/SRDG
+randomness/SM
+random/PYS
+Randy/M
+randy/PRST
+Ranee/M
+ranee/SM
+ranged/C
+rangeland/S
+ranger/M
+ranges/C
+range/SM
+rang/GZDR
+ranginess/S
+ranging/C
+Rangoon/M
+rangy/RPT
+Rania/M
+Ranice/M
+Ranier/M
+Rani/MR
+Ranique/M
+rani's
+ranked/U
+ranker/M
+rank/GZTYDRMPJS
+Rankine/M
+ranking/M
+Rankin/M
+rankle/SDG
+rankness/MS
+Ranna/M
+ransacker/M
+ransack/GRDS
+Ransell/M
+ransomer/M
+Ransom/M
+ransom/ZGMRDS
+ranter/M
+rant/GZDRJS
+ranting/Y
+Raoul/M
+rapaciousness/MS
+rapacious/YP
+rapacity/MS
+rapeseed/M
+rape/SM
+Raphaela/M
+Raphael/M
+rapidity/MS
+rapidness/S
+rapid/YRPST
+rapier/SM
+rapine/SM
+rapist/MS
+rap/MDRSZG
+rapped
+rappelled
+rappelling
+rappel/S
+rapper/SM
+rapping/M
+rapporteur/SM
+rapport/SM
+rapprochement/SM
+rapscallion/MS
+raptness/S
+rapture/MGSD
+rapturousness/M
+rapturous/YP
+rapt/YP
+Rapunzel/M
+Raquela/M
+Raquel/M
+rarebit/MS
+rarefaction/MS
+rarefy/GSD
+rareness/MS
+rare/YTPGDRS
+rarity/SM
+Rasalgethi/M
+Rasalhague/M
+rascal/SMY
+rasher/M
+rashness/S
+rash/PZTYSR
+Rasia/M
+Rasla/M
+Rasmussen/M
+raspberry/SM
+rasper/M
+rasping/Y
+rasp/SGJMDR
+Rasputin/M
+raspy/RT
+Rastaban/M
+Rastafarian/M
+raster/MS
+Rastus/M
+ratchet/MDSG
+rateable
+rated/U
+rate/KNGSD
+ratepayer/SM
+rater/M
+rate's
+Ratfor/M
+rather
+Rather/M
+rathskeller/SM
+ratifier/M
+ratify/ZSRDGXN
+rating/M
+ratiocinate/VNGSDX
+ratiocination/M
+ratio/MS
+rationale/SM
+rationalism/SM
+rationalistic
+rationalist/S
+rationality/MS
+rationalization/SM
+rationalizer/M
+rationalize/ZGSRD
+rationalness/M
+rational/YPS
+ration/DSMG
+Ratliff/M
+ratlike
+ratline/SM
+rat/MDRSJZGB
+rattail
+rattan/MS
+ratted
+ratter/MS
+ratting
+rattlebrain/DMS
+rattle/RSDJGZ
+rattlesnake/MS
+rattletrap/MS
+rattling/Y
+rattly/TR
+rattrap/SM
+ratty/RT
+raucousness/SM
+raucous/YP
+Raul/M
+raunchily
+raunchiness/S
+raunchy/RTP
+ravage/GZRSD
+ravager/M
+raveling/S
+Ravel/M
+ravel/UGDS
+raven/JGMRDS
+Raven/M
+ravenous/YP
+raver/M
+rave/ZGDRSJ
+Ravid/M
+Ravi/M
+ravine/SDGM
+ravioli/SM
+ravisher/M
+ravishing/Y
+ravish/LSRDZG
+ravishment/SM
+Raviv/M
+Rawalpindi/M
+rawboned
+rawhide/SDMG
+Rawley/M
+Rawlings/M
+Rawlins/M
+Rawlinson/M
+rawness/SM
+raw/PSRYT
+Rawson/M
+Rayburn/M
+Raychel/M
+Raye/M
+ray/GSMD
+Rayleigh/M
+Ray/M
+Raymond/M
+Raymondville/M
+Raymund/M
+Raymundo/M
+Rayna/M
+Raynard/M
+Raynell/M
+Rayner/M
+Raynor/M
+rayon/SM
+Rayshell/M
+Raytheon/M
+raze/DRSG
+razer/M
+razorback/SM
+razorblades
+razor/MDGS
+razz/GDS
+razzmatazz/S
+Rb
+RBI/S
+RC
+RCA
+rcpt
+RCS
+rd
+RD
+RDA
+Rd/M
+reabbreviate
+reachability
+reachable/U
+reachably
+reached/U
+reacher/M
+reach/GRB
+reacquisition
+reactant/SM
+reacted/U
+reaction
+reactionary/SM
+reactivity
+readability/MS
+readable/P
+readably
+readdress/G
+Reade/M
+reader/M
+readership/MS
+Read/GM
+readied
+readies
+readily
+readinesses
+readiness/UM
+reading/M
+Reading/M
+read/JGZBR
+readopt/G
+readout/MS
+reads/A
+readying
+ready/TUPR
+Reagan/M
+Reagen/M
+realisms
+realism's
+realism/U
+realistically/U
+realistic/U
+realist/SM
+reality/USM
+realizability/MS
+realizableness/M
+realizable/SMP
+realizably/S
+realization/MS
+realized/U
+realize/JRSDBZG
+realizer/M
+realizes/U
+realizing/MY
+realm/M
+realness/S
+realpolitik/SM
+real/RSTP
+realtor's
+Realtor/S
+realty/SM
+Rea/M
+reamer/M
+ream/MDRGZ
+Reamonn/M
+reanimate
+reaper/M
+reappraise/G
+reap/SGZ
+rear/DRMSG
+rearguard/MS
+rearmost
+rearrange/L
+rearward/S
+reasonableness/SMU
+reasonable/UP
+reasonably/U
+Reasoner/M
+reasoner/SM
+reasoning/MS
+reasonless
+reasons
+reason/UBDMG
+reassess/GL
+reassuringly/U
+reattach/GSL
+reawakening/M
+Reba/M
+rebate/M
+Rebbecca/M
+Rebeca/M
+Rebecca's
+Rebecka/M
+Rebekah/M
+Rebeka/M
+Rebekkah/M
+rebeller
+rebellion/SM
+rebelliousness/MS
+rebellious/YP
+rebel/MS
+Rebe/M
+rebid
+rebidding
+rebind/G
+rebirth
+reboil/G
+rebook
+reboot/ZR
+rebound/G
+rebroadcast/MG
+rebuke/RSDG
+rebuking/Y
+rebus
+rebuttal/SM
+rebutting
+rec
+recalcitrance/SM
+recalcitrant/S
+recalibrate/N
+recantation/S
+recant/G
+recap
+recappable
+recapping
+recast/G
+recd
+rec'd
+recede
+receipt/SGDM
+receivable/S
+received/U
+receiver/M
+receivership/SM
+receive/ZGRSDB
+recency/M
+recension/M
+recentness/SM
+recent/YPT
+receptacle/SM
+receptionist/MS
+reception/MS
+receptiveness/S
+receptive/YP
+receptivity/S
+receptor/MS
+recessional/S
+recessionary
+recessiveness/M
+recessive/YPS
+recess/SDMVG
+rechargeable
+recheck/G
+recherché
+recherches
+recidivism/MS
+recidivist/MS
+Recife/M
+recipe/MS
+recipiency
+recipient/MS
+reciprocal/SY
+reciprocate/NGXVDS
+reciprocation/M
+reciprocity/MS
+recitalist/S
+recital/MS
+recitative/MS
+reciter/M
+recite/ZR
+recked
+recking
+recklessness/S
+reckless/PY
+reckoner/M
+reckoning/M
+reckon/SGRDJ
+reclaim/B
+reclamation/SM
+recliner/M
+recline/RSDZG
+recluse/MVNS
+reclusion/M
+recode/G
+recognizability
+recognizable/U
+recognizably
+recognize/BZGSRD
+recognizedly/S
+recognized/U
+recognizer/M
+recognizingly/S
+recognizing/UY
+recoilless
+recoinage
+recolor/GD
+recombinant
+recombine
+recommended/U
+recompense/GDS
+recompute/B
+reconciled/U
+reconciler/M
+reconcile/SRDGB
+reconditeness/M
+recondite/YP
+reconfigurability
+reconfigure/R
+reconnaissance/MS
+reconnect/R
+reconnoiter/GSD
+reconquer/G
+reconsecrate
+reconstitute
+reconstructed/U
+Reconstruction/M
+reconsult/G
+recontact/G
+recontaminate/N
+recontribute
+recook/G
+recopy/G
+recorded/AU
+records/A
+record/ZGJ
+recourse
+recoverability
+recoverable/U
+recover/B
+recovery/MS
+recreant/S
+recreational
+recriminate/GNVXDS
+recrimination/M
+recriminatory
+recross/G
+recrudesce/GDS
+recrudescence/MS
+recrudescent
+recruiter/M
+recruitment/MS
+recruit/ZSGDRML
+recrystallize
+rectal/Y
+rectangle/SM
+rectangular/Y
+recta's
+rectifiable
+rectification/M
+rectifier/M
+rectify/DRSGXZN
+rectilinear/Y
+rectitude/MS
+recto/MS
+rector/SM
+rectory/MS
+rectum/SM
+recumbent/Y
+recuperate/VGNSDX
+recuperation/M
+recur
+recurrence/MS
+recurrent
+recurse/NX
+recursion/M
+recusant/M
+recuse
+recyclable/S
+recycle/BZ
+redact/DGS
+redaction/SM
+redactor/MS
+redbird/SM
+redbreast/SM
+redbrick/M
+redbud/M
+redcap/MS
+redcoat/SM
+redcurrant/M
+redden/DGS
+redder
+reddest
+redding
+reddish/P
+Redd/M
+redeclaration
+redecorate
+redeemable/U
+redeem/BRZ
+redeemed/U
+redeemer/M
+Redeemer/M
+redemptioner/M
+redemption/RMS
+redemptive
+redeposit/M
+redetermination
+Redford/M
+Redgrave/M
+redhead/DRMS
+Redhook/M
+redial/G
+redirect/G
+redirection
+redlining/S
+Redmond/M
+redneck/SMD
+redness/MS
+redo/G
+redolence/MS
+redolent
+Redondo/M
+redouble/S
+redoubtably
+redound/GDS
+red/PYS
+redshift/S
+redskin/SM
+Redstone/M
+reduced/U
+reducer/M
+reduce/RSDGZ
+reducibility/M
+reducible
+reducibly
+reductionism/M
+reductionist/S
+reduction/SM
+reduct/V
+redundancy/SM
+redundant/Y
+redwood/SM
+redye
+redyeing
+Reeba/M
+Reebok/M
+Reece/M
+reecho/G
+reed/GMDR
+reediness/SM
+reeding/M
+Reed/M
+Reedville/M
+reedy/PTR
+reefer/M
+reef/GZSDRM
+reeker/M
+reek/GSR
+reeler/M
+reel's
+reel/USDG
+Ree/MDS
+Reena/M
+reenforcement
+reentrant
+Reese/M
+reestimate/M
+Reeta/M
+Reeva/M
+reeve/G
+Reeves
+reexamine
+refection/SM
+refectory/SM
+refer/B
+refereed/U
+refereeing
+referee/MSD
+reference/CGSRD
+referenced/U
+reference's
+referencing/U
+referendum/MS
+referentiality
+referential/YM
+referent/SM
+referral/SM
+referred
+referrer/S
+referring
+reffed
+reffing
+refile
+refinance
+refined/U
+refine/LZ
+refinement/MS
+refinish/G
+refit
+reflectance/M
+reflected/U
+reflectional
+reflection/SM
+reflectiveness/M
+reflective/YP
+reflectivity/M
+reflector/MS
+reflect/SDGV
+reflexion/MS
+reflexiveness/M
+reflexive/PSY
+reflexivity/M
+reflex/YV
+reflooring
+refluent
+reflux/G
+refocus/G
+refold/G
+reforestation
+reforge/G
+reformatory/SM
+reform/B
+reformed/U
+reformer/M
+reformism/M
+reformist/S
+refract/DGVS
+refractiveness/M
+refractive/PY
+refractometer/MS
+refractoriness/M
+refractory/PS
+refrain/DGS
+refreshed/U
+refreshing/Y
+refresh/LB
+refreshment/MS
+refrigerant/MS
+refrigerated/U
+refrigerate/XDSGN
+refrigeration/M
+refrigerator/MS
+refrozen
+refry/GS
+refugee/MS
+refuge/SDGM
+Refugio/M
+refulgence/SM
+refulgent
+refund/B
+refunder/M
+refurbish/L
+refurbishment/S
+refusal/SM
+refuse/R
+refuser/M
+refutation/MS
+refute/GZRSDB
+refuter/M
+ref/ZS
+reg
+regale/L
+regalement/S
+regal/GYRD
+regalia/M
+Regan/M
+regard/EGDS
+regardless/PY
+regather/G
+regatta/MS
+regency/MS
+regeneracy/MS
+regenerately
+regenerateness/M
+regenerate/U
+Regen/M
+reggae/SM
+Reggie/M
+Reggi/MS
+Reggy/M
+regicide/SM
+regime/MS
+regimen/MS
+regimental/S
+regimentation/MS
+regiment/SDMG
+Reginae
+Reginald/M
+Regina/M
+Reginauld/M
+Regine/M
+regionalism/MS
+regional/SY
+region/SM
+Regis/M
+register's
+register/UDSG
+registrable
+registrant/SM
+registrar/SM
+registration/AM
+registrations
+registry/MS
+Reg/MN
+regnant
+Regor/M
+regress/DSGV
+regression/MS
+regressiveness/M
+regressive/PY
+regressors
+regretfulness/M
+regretful/PY
+regret/S
+regrettable
+regrettably
+regretted
+regretting
+reground
+regroup/G
+regrow/G
+regularity/MS
+regularization/MS
+regularize/SDG
+regular/YS
+regulate/CSDXNG
+regulated/U
+regulation/M
+regulative
+regulator/SM
+regulatory
+Regulus/M
+regurgitate/XGNSD
+regurgitation/M
+rehabbed
+rehabbing
+rehabilitate/SDXVGN
+rehabilitation/M
+rehab/S
+rehang/G
+rehear/GJ
+rehearsal/SM
+rehearse
+rehearsed/U
+rehearser/M
+rehears/R
+reheat/G
+reheating/M
+Rehnquist
+rehydrate
+Reichenberg/M
+Reich/M
+Reichstags
+Reichstag's
+Reidar/M
+Reider/M
+Reid/MR
+reign/MDSG
+Reiko/M
+Reilly/M
+reimburse/GSDBL
+reimbursement/MS
+Reinald/M
+Reinaldo/MS
+Reina/M
+reindeer/M
+Reine/M
+reinforced/U
+reinforce/GSRDL
+reinforcement/MS
+reinforcer/M
+rein/GDM
+Reinhard/M
+Reinhardt/M
+Reinhold/M
+Reinold/M
+reinstate/L
+reinstatement/MS
+reinsurance
+Reinwald/M
+reissue
+REIT
+reiterative/SP
+rejecter/M
+rejecting/Y
+rejection/SM
+rejector/MS
+reject/RDVGS
+rejigger
+rejoice/RSDJG
+rejoicing/Y
+rejoinder/SM
+rejuvenate/NGSDX
+rejuvenatory
+relapse
+relatedly
+relatedness/MS
+related/U
+relater/M
+relate/XVNGSZ
+relational/Y
+relation/M
+relationship/MS
+relativeness/M
+relative/SPY
+relativism/M
+relativistic
+relativistically
+relativist/MS
+relativity/MS
+relator's
+relaxant/SM
+relaxation/MS
+relaxedness/M
+relaxed/YP
+relax/GZD
+relaxing/Y
+relay/GDM
+relearn/G
+releasable/U
+release/B
+released/U
+relenting/U
+relentlessness/SM
+relentless/PY
+relent/SDG
+relevance/SM
+relevancy/MS
+relevant/Y
+reliability/UMS
+reliables
+reliable/U
+reliably/U
+reliance/MS
+reliant/Y
+relicense/R
+relic/MS
+relict/C
+relict's
+relief/M
+relievedly
+relieved/U
+reliever/M
+relieve/RSDZG
+religionists
+religion/SM
+religiosity/M
+religiousness/MS
+religious/PY
+relink/G
+relinquish/GSDL
+relinquishment/SM
+reliquary/MS
+relish/GSD
+relive/GB
+reload/GR
+relocate/B
+reluctance/MS
+reluctant/Y
+rel/V
+rely/DG
+rem
+Re/M
+remade/S
+remainder/SGMD
+remain/GD
+remake/M
+remand/DGS
+remap
+remapping
+remarkableness/S
+remarkable/U
+remarkably
+remark/BG
+remarked/U
+Remarque/M
+rematch/G
+Rembrandt/M
+remeasure/D
+remediableness/M
+remediable/P
+remedy/SDMG
+remembered/U
+rememberer/M
+remember/GR
+remembrance/MRS
+remembrancer/M
+Remington/M
+reminisce/GSD
+reminiscence/SM
+reminiscent/Y
+remissness/MS
+remiss/YP
+remit/S
+remittance/MS
+remitted
+remitting/U
+Rem/M
+remnant/MS
+remodel/G
+remolding
+remonstrant/MS
+remonstrate/SDXVNG
+remonstration/M
+remonstrative/Y
+remorsefulness/M
+remorseful/PY
+remorselessness/MS
+remorseless/YP
+remorse/SM
+remoteness/MS
+remote/RPTY
+remoulds
+removal/MS
+REM/S
+remunerated/U
+remunerate/VNGXSD
+remuneration/M
+remunerativeness/M
+remunerative/YP
+Remus/M
+Remy/M
+Renado/M
+Renae/M
+renaissance/S
+Renaissance/SM
+renal
+Renaldo/M
+Rena/M
+Renard/M
+Renascence/SM
+Renata/M
+Renate/M
+Renato/M
+renaturation
+Renaud/M
+Renault/MS
+rend
+renderer/M
+render/GJRD
+rendering/M
+rendezvous/DSMG
+rendition/GSDM
+rend/RGZS
+Renee/M
+renegade/SDMG
+renege/GZRSD
+reneger/M
+Renelle/M
+Renell/M
+Rene/M
+renewal/MS
+renew/BG
+renewer/M
+Renie/M
+rennet/MS
+Rennie/M
+rennin/SM
+Renoir/M
+Reno/M
+renounce/LGRSD
+renouncement/MS
+renouncer/M
+renovate/NGXSD
+renovation/M
+renovator/SM
+renown/SGDM
+Rensselaer/M
+rentaller
+rental/SM
+renter/M
+rent/GZMDRS
+renumber/G
+renumeration
+renunciate/VNX
+renunciation/M
+Renville/M
+reoccupy/G
+reopen/G
+reorganized/U
+repack/G
+repairable/U
+repair/BZGR
+repairer/M
+repairman/M
+repairmen
+repairs/E
+repaper
+reparable
+reparation/SM
+reparteeing
+repartee/MDS
+repartition/Z
+repast/G
+repatriate/SDXNG
+repave
+repealer/M
+repeal/GR
+repeatability/M
+repeatable/U
+repeatably
+repeated/Y
+repeater/M
+repeat/RDJBZG
+repelled
+repellent/SY
+repelling/Y
+repel/S
+repentance/SM
+repentant/SY
+repent/RDG
+repertoire/SM
+repertory/SM
+repetition
+repetitiousness/S
+repetitious/YP
+repetitiveness/MS
+repetitive/PY
+repine/R
+repiner/M
+replace/RL
+replay/GM
+replenish/LRSDG
+replenishment/S
+repleteness/MS
+replete/SDPXGN
+repletion/M
+replica/SM
+replicate/SDVG
+replicator/S
+replug
+reply/X
+Rep/M
+repopulate
+reported/Y
+reportorial/Y
+reposeful
+repose/M
+repository/MS
+reprehend/GDS
+reprehensibility/MS
+reprehensibleness/M
+reprehensible/P
+reprehensibly
+reprehension/MS
+representable/U
+representational/Y
+representativeness/M
+Representative/S
+representative/SYMP
+representativity
+represented/U
+represent/GB
+repression/SM
+repressiveness/M
+repressive/YP
+repress/V
+reprieve/GDS
+reprimand/SGMD
+reprint/M
+reprisal/MS
+reproacher/M
+reproachfulness/M
+reproachful/YP
+reproach/GRSDB
+reproaching/Y
+reprobate/N
+reprocess/G
+reproducibility/MS
+reproducible/S
+reproducibly
+reproductive/S
+reproof/G
+reprove/R
+reproving/Y
+rep/S
+reptile/SM
+reptilian/S
+Republicanism/S
+republicanism/SM
+Republican/S
+republic/M
+republish/G
+repudiate/XGNSD
+repudiation/M
+repudiator/S
+repugnance/MS
+repugnant/Y
+repulse/VNX
+repulsion/M
+repulsiveness/MS
+repulsive/PY
+reputability/SM
+reputably/E
+reputation/SM
+reputed/Y
+repute/ESB
+reputing
+requested/U
+request/G
+Requiem/MS
+requiem/SM
+require/LR
+requirement/MS
+requisiteness/M
+requisite/PNXS
+requisitioner/M
+requisition/GDRM
+requital/MS
+requited/U
+requiter/M
+requite/RZ
+reread/G
+rerecord/G
+rerouteing
+rerunning
+res/C
+rescale
+rescind/SDRG
+rescission/SM
+rescue/GZRSD
+reseal/BG
+research/MB
+reselect/G
+resemblant
+resemble/DSG
+resend/G
+resent/DSLG
+resentfulness/SM
+resentful/PY
+resentment/MS
+reserpine/MS
+reservation/MS
+reservednesses
+reservedness/UM
+reserved/UYP
+reservist/SM
+reservoir/MS
+reset/RDG
+resettle/L
+reshipping
+reshow/G
+reshuffle/M
+reside/G
+residence/MS
+residency/SM
+residential/Y
+resident/SM
+resider/M
+residua
+residual/YS
+residuary
+residue/SM
+residuum/M
+resignation/MS
+resigned/YP
+resilience/MS
+resiliency/S
+resilient/Y
+resin/D
+resinlike
+resinous
+resiny
+resistance/SM
+Resistance/SM
+resistantly
+resistants
+resistant/U
+resisted/U
+resistible
+resistibly
+resisting/U
+resistiveness/M
+resistive/PY
+resistivity/M
+resistless
+resistor/MS
+resist/RDZVGS
+resize/G
+resold
+resole/G
+resoluble
+resoluteness/MS
+resolute/PYTRV
+resolvability/M
+resolvable/U
+resolved/U
+resolvent
+resonance/SM
+resonant/YS
+resonate/DSG
+resonator/MS
+resorption/MS
+resort/R
+resound/G
+resourcefulness/SM
+resourceful/PY
+resp
+respectability/SM
+respectable/SP
+respectably
+respect/BSDRMZGV
+respected/E
+respectful/EY
+respectfulness/SM
+respecting/E
+respectiveness/M
+respective/PY
+respect's/E
+respects/E
+respell/G
+respiration/MS
+respirator/SM
+respiratory/M
+resplendence/MS
+resplendent/Y
+respondent/MS
+respond/SDRZG
+responser/M
+response/RSXMV
+responsibility/MS
+responsibleness/M
+responsible/P
+responsibly
+responsiveness/MSU
+responsive/YPU
+respray/G
+restart/B
+restate/L
+restaurant/SM
+restaurateur/SM
+rest/DRSGVM
+rested/U
+rester/M
+restfuller
+restfullest
+restfulness/MS
+restful/YP
+restitution/SM
+restiveness/SM
+restive/PY
+restlessness/MS
+restless/YP
+restorability
+Restoration/M
+restoration/MS
+restorative/PYS
+restorer/M
+restore/Z
+restrained/UY
+restraint/MS
+restrict/DVGS
+restricted/YU
+restriction/SM
+restrictively
+restrictiveness/MS
+restrictives
+restrictive/U
+restroom/SM
+restructurability
+restructure
+rest's/U
+rests/U
+restudy/M
+restyle
+resubstitute
+resultant/YS
+result/SGMD
+resume/SDBG
+resumption/MS
+resurface
+resurgence/MS
+resurgent
+resurrect/GSD
+resurrection/SM
+resurvey/G
+resuscitate/XSDVNG
+resuscitation/M
+resuscitator/MS
+retail/Z
+retainer/M
+retain/LZGSRD
+retake
+retaliate/VNGXSD
+retaliation/M
+retaliatory
+Reta/M
+retardant/SM
+retardation/SM
+retarder/M
+retard/ZGRDS
+retch/SDG
+retention/SM
+retentiveness/S
+retentive/YP
+retentivity/M
+retest/G
+Retha/M
+rethought
+reticence/S
+reticent/Y
+reticle/SM
+reticular
+reticulate/GNYXSD
+reticulation/M
+reticule/MS
+reticulum/M
+retinal/S
+retina/SM
+retinue/MS
+retiredness/M
+retiree/MS
+retire/L
+retirement/SM
+retiring/YP
+retort/GD
+retract/DG
+retractile
+retrench/L
+retrenchment/MS
+retributed
+retribution/MS
+retributive
+retrieval/SM
+retriever/M
+retrieve/ZGDRSB
+retroactive/Y
+retrofire/GMSD
+retrofit/S
+retrofitted
+retrofitting
+retroflection
+retroflex/D
+retroflexion/M
+retrogradations
+retrograde/GYDS
+retrogression/MS
+retrogressive/Y
+retrogress/SDVG
+retrorocket/MS
+retro/SM
+retrospection/MS
+retrospective/SY
+retrospect/SVGMD
+retrovirus/S
+retrovision
+retry/G
+retsina/SM
+returnable/S
+returned/U
+returnee/SM
+retype
+Reube/M
+Reuben/M
+Reub/NM
+Reunion/M
+reuse/B
+Reuters
+Reuther/M
+reutilization
+Reuven/M
+Reva/M
+revanchist
+revealed/U
+revealingly
+revealing/U
+reveal/JBG
+reveille/MS
+revelation/MS
+Revelation/MS
+revelatory
+revelry/MS
+revel/SJRDGZ
+revenge/MGSRD
+revenger/M
+revenuer/M
+revenue/ZR
+reverberant
+reverberate/XVNGSD
+reverberation/M
+revere/GSD
+Revere/M
+reverencer/M
+reverence/SRDGM
+Reverend
+reverend/SM
+reverential/Y
+reverent/Y
+reverie/SM
+reversal/MS
+reverser/M
+reverse/Y
+reversibility/M
+reversible/S
+reversibly
+reversioner/M
+reversion/R
+revers/M
+reverter/M
+revertible
+revert/RDVGS
+revet/L
+revetment/SM
+review/G
+revile/GZSDL
+revilement/MS
+reviler/M
+revise/BRZ
+revised/U
+revisionary
+revisionism/SM
+revisionist/SM
+revitalize/ZR
+revivalism/MS
+revivalist/MS
+revival/SM
+reviver/M
+revive/RSDG
+revivification/M
+revivify/X
+Revkah/M
+Revlon/M
+Rev/M
+revocable
+revoke/GZRSD
+revolter/M
+revolt/GRD
+revolting/Y
+revolutionariness/M
+revolutionary/MSP
+revolutionist/MS
+revolutionize/GDSRZ
+revolutionizer/M
+revolution/SM
+revolve/BSRDZJG
+revolver/M
+revue/MS
+revulsion/MS
+revved
+revving
+rev/ZM
+rewarded/U
+rewarding/Y
+rewarm/G
+reweave
+rewedding
+reweigh/G
+rewind/BGR
+rewire/G
+rework/G
+rexes
+Rex/M
+Reyes
+Reykjavik/M
+re/YM
+Rey/M
+Reynaldo/M
+Reyna/M
+Reynard/M
+Reynold/SM
+rezone
+Rf
+RF
+RFC
+RFD
+R/G
+rhapsodic
+rhapsodical
+rhapsodize/GSD
+rhapsody/SM
+Rhea/M
+rhea/SM
+Rheba/M
+Rhee/M
+Rheims/M
+Rheinholdt/M
+Rhenish
+rhenium/MS
+rheology/M
+rheostat/MS
+rhesus/S
+Rheta/M
+rhetorical/YP
+rhetorician/MS
+rhetoric/MS
+Rhetta/M
+Rhett/M
+rheumatically
+rheumatic/S
+rheumatics/M
+rheumatism/SM
+rheumatoid
+rheum/MS
+rheumy/RT
+Rhiamon/M
+Rhianna/M
+Rhiannon/M
+Rhianon/M
+Rhinelander/M
+Rhineland/RM
+Rhine/M
+rhinestone/SM
+rhinitides
+rhinitis/M
+rhinoceros/MS
+rhino/MS
+rhinotracheitis
+rhizome/MS
+Rh/M
+Rhoda/M
+Rhodes
+Rhodesia/M
+Rhodesian/S
+Rhodia/M
+Rhodie/M
+rhodium/MS
+rhododendron/SM
+rhodolite/M
+rhodonite/M
+Rhody/M
+rhombic
+rhomboidal
+rhomboid/SM
+rhombus/SM
+rho/MS
+Rhona/M
+Rhonda/M
+Rhone
+rhubarb/MS
+rhyme/DSRGZM
+rhymester/MS
+Rhys/M
+rhythmical/Y
+rhythmic/S
+rhythmics/M
+rhythm/MS
+RI
+rial/MS
+Riane/M
+Riannon/M
+Rianon/M
+ribaldry/MS
+ribald/S
+ribbed
+Ribbentrop/M
+ribber/S
+ribbing/M
+ribbon/DMSG
+ribcage
+rib/MS
+riboflavin/MS
+ribonucleic
+ribosomal
+ribosome/MS
+Rica/M
+Rican/SM
+Ricard/M
+Ricardo/M
+Ricca/M
+Riccardo/M
+rice/DRSMZG
+Rice/M
+ricer/M
+Richard/MS
+Richardo/M
+Richardson/M
+Richart/M
+Richelieu/M
+richen/DG
+Richey/M
+Richfield/M
+Richie/M
+Richland/M
+Rich/M
+Richmond/M
+Richmound/M
+richness/MS
+Richter/M
+Richthofen/M
+Richy/M
+rich/YNSRPT
+Rici/M
+Rickard/M
+Rickenbacker/M
+Rickenbaugh/M
+Rickert/M
+rickets/M
+rickety/RT
+Rickey/M
+rick/GSDM
+Rickie/M
+Ricki/M
+Rick/M
+Rickover/M
+rickrack/MS
+rickshaw/SM
+Ricky/M
+Ric/M
+ricochet/GSD
+Rico/M
+Ricoriki/M
+ricotta/MS
+riddance/SM
+ridden
+ridding
+riddle/GMRSD
+Riddle/M
+ride/CZSGR
+Ride/M
+rider/CM
+riderless
+ridership/S
+ridge/DSGM
+Ridgefield/M
+ridgepole/SM
+Ridgway/M
+ridgy/RT
+ridicule/MGDRS
+ridiculer/M
+ridiculousness/MS
+ridiculous/PY
+riding/M
+rid/ZGRJSB
+Riemann/M
+Riesling/SM
+rife/RT
+riff/GSDM
+riffle/SDG
+riffraff/SM
+rifled/U
+rifle/GZMDSR
+rifleman/M
+riflemen
+rifler/M
+rifling/M
+rift/GSMD
+Riga/M
+rigamarole's
+rigatoni/M
+Rigel/M
+rigged
+rigger/SM
+rigging/MS
+Riggs/M
+righteousnesses/U
+righteousness/MS
+righteous/PYU
+rightfulness/MS
+rightful/PY
+rightism/SM
+rightist/S
+rightmost
+rightness/MS
+Right/S
+right/SGTPYRDN
+rightsize/SDG
+rights/M
+rightward/S
+rigidify/S
+rigidity/S
+rigidness/S
+rigid/YP
+rigmarole/MS
+rig/MS
+Rigoberto/M
+Rigoletto/M
+rigor/MS
+rigorousness/S
+rigorous/YP
+Riki/M
+Rikki/M
+Rik/M
+rile/DSG
+Riley/M
+Rilke/M
+rill/GSMD
+Rimbaud/M
+rime/MS
+rimer/M
+rim/GSMDR
+rimless
+rimmed
+rimming
+Rinaldo/M
+Rina/M
+rind/MDGS
+Rinehart/M
+ringer/M
+ring/GZJDRM
+ringing/Y
+ringleader/MS
+ringlet/SM
+ringlike
+Ringling/M
+Ring/M
+ringmaster/MS
+Ringo/M
+ringside/ZMRS
+ringworm/SM
+rink/GDRMS
+rinse/DSRG
+Riobard/M
+Rio/MS
+Riordan/M
+rioter/M
+riotousness/M
+riotous/PY
+riot/SMDRGZJ
+RIP
+riparian/S
+ripcord/SM
+ripened/U
+ripenesses
+ripeness/UM
+ripen/RDG
+ripe/PSY
+riper/U
+ripest/U
+Ripley/M
+Rip/M
+rip/NDRSXTG
+ripoff/S
+riposte/SDMG
+ripped
+ripper/SM
+ripping
+rippler/M
+ripple/RSDGM
+ripply/TR
+ripsaw/GDMS
+riptide/SM
+Risa/M
+RISC
+risen
+riser/M
+rise/RSJZG
+risibility/SM
+risible/S
+rising/M
+risker/M
+risk/GSDRM
+riskily
+riskiness/MS
+risky/RTP
+risotto/SM
+risqué
+rissole/M
+Ritalin
+Rita/M
+Ritchie/M
+rite/DSM
+Ritter/M
+ritualism/SM
+ritualistic
+ritualistically
+ritualized
+ritual/MSY
+Ritz/M
+ritzy/TR
+rivaled/U
+Rivalee/M
+rivalry/MS
+rival/SGDM
+Riva/MS
+rive/CSGRD
+Rivera/M
+riverbank/SM
+riverbed/S
+riverboat/S
+river/CM
+riverfront
+riverine
+Rivers
+Riverside/M
+riverside/S
+Riverview/M
+riveter/M
+rivet/GZSRDM
+riveting/Y
+Riviera/MS
+Rivi/M
+Rivkah/M
+rivulet/SM
+Rivy/M
+riv/ZGNDR
+Riyadh/M
+riyal/SM
+rm
+RMS
+RN
+RNA
+Rn/M
+roach/GSDM
+Roach/M
+roadbed/MS
+roadblock/SMDG
+roadhouse/SM
+roadie/S
+roadkill/S
+road/MIS
+roadrunner/MS
+roadshow/S
+roadside/S
+roadsigns
+roadster/SM
+roadsweepers
+roadway/SM
+roadwork/SM
+roadworthy
+roam/DRGZS
+Roana/M
+Roanna/M
+Roanne/M
+Roanoke/M
+roan/S
+roar/DRSJGZ
+roarer/M
+roaring/T
+Roarke/M
+roaster/M
+roast/SGJZRD
+robbed
+robber/SM
+Robbert/M
+robbery/SM
+Robbie/M
+Robbi/M
+robbing
+Robbin/MS
+Robb/M
+Robby/M
+Robbyn/M
+robe/ESDG
+Robena/M
+Robenia/M
+Robers/M
+Roberson/M
+Roberta/M
+Robert/MS
+Roberto/M
+Robertson/SM
+robe's
+Robeson/M
+Robespierre/M
+Robina/M
+Robinet/M
+Robinetta/M
+Robinette/M
+Robinett/M
+Robinia/M
+Robin/M
+robin/MS
+Robinson/M
+Robinsonville/M
+Robles/M
+Rob/MZ
+robotic/S
+robotism
+robotize/GDS
+robot/MS
+rob/SDG
+Robson/M
+Robt/M
+robustness/SM
+robust/RYPT
+Roby/M
+Robyn/M
+Rocco/M
+Rocha/M
+Rochambeau/M
+Rochella/M
+Rochelle/M
+Rochell/M
+Roche/M
+Rochester/M
+Rochette/M
+Roch/M
+rockabilly/MS
+rockabye
+Rockaway/MS
+rockbound
+Rockefeller/M
+rocker/M
+rocketry/MS
+rocket/SMDG
+Rockey/M
+rockfall/S
+Rockford/M
+rock/GZDRMS
+Rockie/M
+rockiness/MS
+Rockland/M
+Rock/M
+Rockne/M
+Rockville/M
+Rockwell/M
+Rocky/SM
+rocky/SRTP
+rococo/MS
+Roda/M
+rodded
+Roddenberry/M
+rodder
+Roddie/M
+rodding
+Rodd/M
+Roddy/M
+rodent/MS
+rodeo/SMDG
+Roderich/M
+Roderick/M
+Roderic/M
+Roderigo/M
+rode/S
+Rodger/M
+Rodge/ZMR
+Rodie/M
+Rodi/M
+Rodina/M
+Rodin/M
+Rod/M
+Rodney/M
+Rodolfo/M
+Rodolphe/M
+Rodolph/M
+Rodrick/M
+Rodrigo/M
+Rodriguez/M
+Rodrique/M
+Rodriquez/M
+rod/SGMD
+roebuck/SM
+Roentgen's
+roentgen/SM
+roe/SM
+ROFL
+Rogelio/M
+roger/GSD
+Rogerio/M
+Roger/M
+Roget/M
+Rog/MRZ
+rogued/K
+rogue/GMDS
+roguery/MS
+rogues/K
+roguing/K
+roguishness/SM
+roguish/PY
+roil/SGD
+Roi/SM
+roisterer/M
+roister/SZGRD
+Rojas/M
+Roland/M
+Rolando/M
+Roldan/M
+role/MS
+Roley/M
+Rolfe/M
+Rolf/M
+Rolland/M
+rollback/SM
+rolled/A
+Rollerblade/S
+rollerskating
+roller/SM
+rollick/DGS
+rollicking/Y
+Rollie/M
+rolling/S
+Rollin/SM
+Rollo/M
+rollover/S
+roll/UDSG
+Rolodex
+Rolph/M
+Rolvaag/M
+ROM
+romaine/MS
+Romain/M
+Roma/M
+romancer/M
+romance/RSDZMG
+Romanesque/S
+Romania/M
+Romanian/SM
+Romano/MS
+Romanov/M
+roman/S
+Romansh/M
+Romans/M
+Roman/SM
+romantically/U
+romanticism/MS
+Romanticism/S
+romanticist/S
+romanticize/SDG
+romantic/MS
+Romany/SM
+Romeo/MS
+romeo/S
+Romero/M
+Rome/SM
+Rommel/M
+Romney/M
+Romola/M
+Romona/M
+Romonda/M
+romper/M
+romp/GSZDR
+Rom/SM
+Romulus/M
+Romy/M
+Ronalda/M
+Ronald/M
+Rona/M
+Ronda/M
+rondo/SM
+Ronica/M
+Ron/M
+Ronna/M
+Ronnica/M
+Ronnie/M
+Ronni/M
+Ronny/M
+Ronstadt/M
+Rontgen
+Roobbie/M
+rood/MS
+roof/DRMJGZS
+roofer/M
+roofgarden
+roofing/M
+roofless
+rooftop/S
+rookery/MS
+rook/GDMS
+rookie/SRMT
+roomer/M
+roomette/SM
+roomful/MS
+roominess/MS
+roommate/SM
+room/MDRGZS
+roomy/TPSR
+Rooney/M
+Rooseveltian
+Roosevelt/M
+rooster/M
+roost/SGZRDM
+rooted/P
+rooter/M
+rootlessness/M
+rootless/P
+rootlet/SM
+Root/M
+root/MGDRZS
+rootstock/M
+rope/DRSMZG
+roper/M
+roping/M
+Roquefort/MS
+Roquemore/M
+Rora/M
+Rorie/M
+Rori/M
+Rorke/M
+Rorschach
+Rory/M
+Rosabella/M
+Rosabelle/M
+Rosabel/M
+Rosaleen/M
+Rosales/M
+Rosalia/M
+Rosalie/M
+Rosalinda/M
+Rosalinde/M
+Rosalind/M
+Rosaline/M
+Rosalynd/M
+Rosalyn/M
+Rosa/M
+Rosamond/M
+Rosamund/M
+Rosana/M
+Rosanna/M
+Rosanne/M
+Rosario/M
+rosary/SM
+Roscoe/M
+Rosco/M
+Roseanna/M
+Roseanne/M
+Roseann/M
+roseate/Y
+Roseau
+rosebud/MS
+rosebush/SM
+Rosecrans/M
+Roseland/M
+Roselia/M
+Roseline/M
+Roselin/M
+Rosella/M
+Roselle/M
+Rose/M
+Rosemaria/M
+Rosemarie/M
+Rosemary/M
+rosemary/MS
+rose/MGDS
+Rosemonde/M
+Rosenberg/M
+Rosenblum/M
+Rosendo/M
+Rosene/M
+Rosen/M
+Rosenthal/M
+Rosenzweig/M
+Rosetta/M
+Rosette/M
+rosette/SDMG
+rosewater
+rosewood/SM
+Roshelle/M
+Rosicrucian/M
+Rosie/M
+rosily
+Rosina/M
+rosiness/MS
+rosin/SMDG
+Rosita/M
+Roslyn/M
+Rosmunda/M
+Ros/N
+Ross
+Rossetti/M
+Rossie/M
+Rossi/M
+Rossini/M
+Rossy/M
+Rostand/M
+roster/DMGS
+Rostov/M
+rostra's
+rostrum/SM
+Roswell/M
+Rosy/M
+rosy/RTP
+rota/MS
+Rotarian/SM
+rotary/S
+rotated/U
+rotate/VGNXSD
+rotational/Y
+rotation/M
+rotative/Y
+rotator/SM
+rotatory
+ROTC
+rote/MS
+rotgut/MS
+Roth/M
+Rothschild/M
+rotisserie/MS
+rotogravure/SM
+rotor/MS
+rototill/RZ
+rot/SDG
+rotted
+rottenness/S
+rotten/RYSTP
+Rotterdam/M
+rotter/M
+rotting
+rotunda/SM
+rotundity/S
+rotundness/S
+rotund/SDYPG
+Rouault/M
+roué/MS
+rouge/GMDS
+roughage/SM
+roughen/DG
+rougher/M
+roughhouse/GDSM
+roughish
+roughneck/MDSG
+roughness/MS
+roughs
+roughshod
+rough/XPYRDNGT
+roulette/MGDS
+roundabout/PSM
+roundedness/M
+rounded/P
+roundelay/SM
+roundels
+rounder/M
+roundhead/D
+roundheadedness/M
+roundheaded/P
+roundhouse/SM
+roundish
+roundness/MS
+roundoff
+roundup/MS
+roundworm/MS
+round/YRDSGPZT
+Rourke/M
+rouse/DSRG
+rouser/M
+Rousseau/M
+roustabout/SM
+roust/SGD
+route/ASRDZGJ
+router/M
+route's
+rout/GZJMDRS
+routine/SYM
+routing/M
+routinize/GSD
+Rouvin/M
+rover/M
+Rover/M
+rove/ZGJDRS
+roving/M
+Rowan/M
+rowboat/SM
+rowdily
+rowdiness/MS
+rowdyism/MS
+rowdy/PTSR
+rowel/DMSG
+Rowe/M
+Rowena/M
+rowen/M
+Rowen/M
+rower/M
+Rowland/M
+Rowley/M
+Row/MN
+Rowney/M
+row/SJZMGNDR
+Roxana/M
+Roxane/M
+Roxanna/M
+Roxanne/M
+Roxie/M
+Roxi/M
+Roxine/M
+Roxy/M
+royalist/SM
+Royall/M
+Royal/M
+royal/SY
+royalty/MS
+Royce/M
+Roy/M
+Rozalie/M
+Rozalin/M
+Rozamond/M
+Rozanna/M
+Rozanne/M
+Rozele/M
+Rozella/M
+Rozelle/M
+Roze/M
+Rozina/M
+Roz/M
+RP
+rpm
+RPM
+rps
+RR
+Rriocard/M
+rs
+r's
+R's
+RSFSR
+RSI
+RSV
+RSVP
+RSX
+rt
+rte
+Rte
+RTFM
+r/TGVJ
+Rubaiyat/M
+rubato/MS
+rubbed
+rubberize/GSD
+rubberneck/DRMGSZ
+rubber/SDMG
+rubbery/TR
+rubbing/M
+rubbish/DSMG
+rubbishy
+rubble/GMSD
+rubdown/MS
+rubella/MS
+Rube/M
+Ruben/MS
+rube/SM
+Rubetta/M
+Rubia/M
+Rubicon/SM
+rubicund
+rubidium/SM
+Rubie/M
+Rubik/M
+Rubi/M
+Rubina/M
+Rubin/M
+Rubinstein/M
+ruble/MS
+rubout
+rubric/MS
+rub/S
+Ruby/M
+ruby/MTGDSR
+Ruchbah/M
+ruck/M
+rucksack/SM
+ruckus/SM
+ruction/SM
+rudderless
+rudder/MS
+Ruddie/M
+ruddiness/MS
+Rudd/M
+Ruddy/M
+ruddy/PTGRSD
+rudeness/MS
+rude/PYTR
+Rudie/M
+Rudiger/M
+rudimentariness/M
+rudimentary/P
+rudiment/SM
+Rudolf/M
+Rudolfo/M
+Rudolph/M
+Rudyard/M
+Rudy/M
+ruefulness/S
+rueful/PY
+rue/GDS
+Rufe/M
+ruff/GSYDM
+ruffian/GSMDY
+ruffled/U
+ruffler/M
+ruffle/RSDG
+ruffly/TR
+Rufus/M
+Rugby's
+rugby/SM
+ruggedness/S
+rugged/PYRT
+Ruggiero/M
+rugging
+rug/MS
+Ruhr/M
+ruination/MS
+ruiner/M
+ruin/MGSDR
+ruinousness/M
+ruinous/YP
+Ruiz/M
+rulebook/S
+ruled/U
+rule/MZGJDRS
+ruler/GMD
+ruling/M
+Rumanian's
+Rumania's
+rumba/GDMS
+rumble/JRSDG
+rumbler/M
+rumbustious
+rumen/M
+Rumford/M
+Ru/MH
+ruminant/YMS
+ruminate/VNGXSD
+ruminative/Y
+rummage/GRSD
+rummager/M
+Rummel/M
+rummer
+rummest
+rummy/TRSM
+rumored/U
+rumorer/M
+rumormonger/SGMD
+rumor/ZMRDSG
+Rumpelstiltskin/M
+rump/GMYDS
+rumple/SDG
+rumply/TR
+rumpus/SM
+rum/XSMN
+runabout/SM
+runaround/S
+run/AS
+runaway/S
+rundown/SM
+rune/MS
+Runge/M
+rung/MS
+runic
+runlet/SM
+runnable
+runnel/SM
+runner/MS
+running/S
+Runnymede/M
+runny/RT
+runoff/MS
+runtime
+runtiness/M
+runt/MS
+runty/RPT
+runway/MS
+Runyon/M
+rupee/MS
+Ruperta/M
+Rupert/M
+Ruperto/M
+rupiah/M
+rupiahs
+Ruppert/M
+Ruprecht/M
+rupture/GMSD
+rurality/M
+rural/Y
+Rurik/M
+ruse/MS
+Rushdie/M
+rush/DSRGZ
+rusher/M
+rushes/I
+rushing/M
+Rush/M
+Rushmore/M
+rushy/RT
+Ruskin/M
+rusk/MS
+Russell/M
+Russel/M
+russet/MDS
+russetting
+Russia/M
+Russian/SM
+Russo/M
+Russ/S
+Rustbelt/M
+rustically
+rusticate/GSD
+rustication/M
+rusticity/S
+rustic/S
+Rustie/M
+rustiness/MS
+Rustin/M
+rustler/M
+rustle/RSDGZ
+rust/MSDG
+rustproof/DGS
+Rusty/M
+rusty/XNRTP
+rutabaga/SM
+Rutger/SM
+Ruthanne/M
+Ruthann/M
+Ruthe/M
+ruthenium/MS
+rutherfordium/SM
+Rutherford/M
+Ruthie/M
+Ruthi/M
+ruthlessness/MS
+ruthless/YP
+Ruth/M
+Ruthy/M
+Rutland/M
+Rutledge/M
+rut/MS
+rutted
+Rutter/M
+Ruttger/M
+rutting
+rutty/RT
+Ruy/M
+RV
+RVs
+Rwandan/S
+Rwanda/SM
+Rwy/M
+Rx/M
+Ryan/M
+Ryann/M
+Rycca/M
+Rydberg/M
+Ryder/M
+rye/MS
+Ryley/M
+Ry/M
+Ryon/M
+Ryukyu/M
+Ryun/M
+S
+SA
+Saab/M
+Saar/M
+Saba/M
+sabbath
+Sabbath/M
+Sabbaths
+sabbatical/S
+sabered/U
+saber/GSMD
+Sabik/M
+Sabina/M
+Sabine/M
+Sabin/M
+sable/GMDS
+sabotage/DSMG
+saboteur/SM
+sabot/MS
+Sabra/M
+sabra/MS
+Sabrina/M
+SAC
+Sacajawea/M
+saccharides
+saccharine
+saccharin/MS
+Sacco/M
+sacerdotal
+Sacha/M
+sachem/MS
+sachet/SM
+Sachs/M
+sackcloth/M
+sackcloths
+sacker/M
+sackful/MS
+sack/GJDRMS
+sacking/M
+sacral
+sacra/L
+sacramental/S
+sacrament/DMGS
+Sacramento/M
+sacredness/S
+sacred/PY
+sacrificer/M
+sacrifice/RSDZMG
+sacrificial/Y
+sacrilege/MS
+sacrilegious/Y
+sacristan/SM
+sacristy/MS
+sacroiliac/S
+sacrosanctness/MS
+sacrosanct/P
+sacrum/M
+sac/SM
+Sada/M
+Sadat/M
+Saddam/M
+sadden/DSG
+sadder
+saddest
+saddlebag/SM
+saddler/M
+saddle's
+saddle/UGDS
+Sadducee/M
+Sadella/M
+Sade/M
+sades
+Sadie/M
+sadism/MS
+sadistic
+sadistically
+sadist/MS
+sadness/SM
+sadomasochism/MS
+sadomasochistic
+sadomasochist/S
+sad/PY
+Sadr/M
+Sadye/M
+safari/GMDS
+safeguard/MDSG
+safekeeping/MS
+safeness/MS
+safeness's/U
+safes
+safety/SDMG
+safe/URPTY
+safflower/SM
+saffron/MS
+sagaciousness/M
+sagacious/YP
+sagacity/MS
+saga/MS
+Sagan/M
+sagebrush/SM
+sage/MYPS
+sagged
+sagger
+sagging
+saggy/RT
+Saginaw/M
+Sagittarius/MS
+sago/MS
+sag/TSR
+saguaro/SM
+Sahara/M
+Saharan/M
+Sahel
+sahib/MS
+Saidee/M
+saids
+said/U
+Saigon/M
+sailboard/DGS
+sailboat/SRMZG
+sailcloth/M
+sailcloths
+sailer/M
+sailfish/SM
+sail/GJMDRS
+sailing/M
+sailor/YMS
+sailplane/SDMG
+sainthood/MS
+saintlike
+saintliness/MS
+saintly/RTP
+saint/YDMGS
+Saiph/M
+saith
+saiths
+Sakai/M
+sake/MRS
+saker/M
+Sakhalin/M
+Sakharov/M
+Saki/M
+saki's
+salaam/GMDS
+salable/U
+salaciousness/MS
+salacious/YP
+salacity/MS
+Saladin/M
+Salado/M
+salad/SM
+Salaidh/M
+salamander/MS
+salami/MS
+salary/SDMG
+Salas/M
+Salazar/M
+saleability/M
+sale/ABMS
+Saleem/M
+Salem/M
+Salerno/M
+salesclerk/SM
+salesgirl/SM
+saleslady/S
+salesman/M
+salesmanship/SM
+salesmen
+salespeople/M
+salesperson/MS
+salesroom/M
+saleswoman
+saleswomen
+salience/MS
+saliency
+salient/SY
+Salim/M
+Salina/MS
+saline/S
+salinger
+Salinger/M
+salinity/MS
+Salisbury/M
+Salish/M
+saliva/MS
+salivary
+salivate/XNGSD
+salivation/M
+Salk/M
+Sallee/M
+Salle/M
+Sallie/M
+Salli/M
+sallowness/MS
+sallow/TGRDSP
+Sallust/M
+Sallyanne/M
+Sallyann/M
+sally/GSDM
+Sally/M
+salmonellae
+salmonella/M
+Salmon/M
+salmon/SM
+Sal/MY
+Saloma/M
+Salome/M
+Salomi/M
+Salomo/M
+Salomone/M
+Salomon/M
+Salonika/M
+salon/SM
+saloonkeeper
+saloon/MS
+salsa/MS
+salsify/M
+SALT
+saltcellar/SM
+salted/UC
+salter/M
+salt/GZTPMDRS
+saltine/MS
+saltiness/SM
+saltness/M
+Salton/M
+saltpeter/SM
+salts/C
+saltshaker/S
+saltwater
+salty/RSPT
+salubriousness/M
+salubrious/YP
+salubrity/M
+salutariness/M
+salutary/P
+salutation/SM
+salutatory/S
+saluter/M
+salute/RSDG
+Salvadoran/S
+Salvadorian/S
+Salvador/M
+salvageable
+salvage/MGRSD
+salvager/M
+salvation/MS
+Salvatore/M
+salve/GZMDSR
+salver/M
+Salvidor/M
+salvo/GMDS
+Salween/M
+Salyut/M
+Salz/M
+SAM
+Samantha/M
+Samara/M
+Samaria/M
+Samaritan/MS
+samarium/MS
+Samarkand/M
+samba/GSDM
+sameness/MS
+same/SP
+Sam/M
+Sammie/M
+Sammy/M
+Samoa
+Samoan/S
+Samoset/M
+samovar/SM
+Samoyed/M
+sampan/MS
+sampler/M
+sample/RSDJGMZ
+sampling/M
+Sampson/M
+Samsonite/M
+Samson/M
+Samuele/M
+Samuel/SM
+Samuelson/M
+samurai/M
+San'a
+Sana/M
+sanatorium/MS
+Sanborn/M
+Sanchez/M
+Sancho/M
+sanctification/M
+sanctifier/M
+sanctify/RSDGNX
+sanctimoniousness/MS
+sanctimonious/PY
+sanctimony/MS
+sanctioned/U
+sanction/SMDG
+sanctity/SM
+sanctuary/MS
+sanctum/SM
+sandal/MDGS
+sandalwood/SM
+sandbagged
+sandbagging
+sandbag/MS
+sandbank/SM
+sandbar/S
+sandblaster/M
+sandblast/GZSMRD
+sandbox/MS
+Sandburg/M
+sandcastle/S
+Sande/M
+Sanderling/M
+sander/M
+Sander/M
+Sanderson/M
+sandhill
+sandhog/SM
+Sandia/M
+Sandie/M
+Sandi/M
+sandiness/S
+Sandinista
+sandlot/SM
+sandlotter/S
+sandman/M
+sandmen
+Sand/MRZ
+Sandor/M
+Sandoval/M
+sandpaper/DMGS
+sandpile
+sandpiper/MS
+sandpit/M
+Sandra/M
+Sandro/M
+sand/SMDRGZ
+sandstone/MS
+sandstorm/SM
+Sandusky/M
+sandwich/SDMG
+Sandye/M
+Sandy/M
+sandy/PRT
+saned
+sane/IRYTP
+saneness/MS
+saneness's/I
+sanes
+Sanford/M
+Sanforized
+Sanger/M
+sangfroid/S
+sangria/SM
+Sang/RM
+sang/S
+sanguinary
+sanguined
+sanguine/F
+sanguinely
+sanguineness/M
+sanguineous/F
+sanguines
+sanguining
+Sanhedrin/M
+saning
+sanitarian/S
+sanitarium/SM
+sanitary/S
+sanitate/NX
+sanitation/M
+sanitizer/M
+sanitize/RSDZG
+sanity/SIM
+sank
+Sankara/M
+San/M
+sans
+sanserif
+Sanskritic
+Sanskritize/M
+Sanskrit/M
+Sansone/M
+Sanson/M
+Santa/M
+Santana/M
+Santayana/M
+Santeria
+Santiago/M
+Santo/MS
+sapience/MS
+sapient
+sapless
+sapling/SM
+sap/MS
+sapped
+sapper/SM
+Sapphira/M
+Sapphire/M
+sapphire/MS
+Sappho/M
+sappiness/SM
+sapping
+Sapporo/M
+sappy/RPT
+saprophyte/MS
+saprophytic
+sapsucker/SM
+sapwood/SM
+Saraann/M
+Saracen/MS
+Saragossa/M
+Sarah/M
+Sarajane/M
+Sarajevo/M
+Sara/M
+Saran/M
+saran/SM
+sarape's
+Sarasota/M
+Saratoga/M
+Saratov/M
+Sarawak/M
+sarcasm/MS
+sarcastic
+sarcastically
+sarcoma/MS
+sarcophagi
+sarcophagus/M
+sardine/SDMG
+Sardinia/M
+sardonic
+sardonically
+Saree/M
+Sarena/M
+Sarene/M
+Sarette/M
+Sargasso/M
+Sarge/M
+Sargent/M
+sarge/SM
+Sargon/M
+Sari/M
+sari/MS
+Sarina/M
+Sarine/M
+Sarita/M
+Sarnoff/M
+sarong/MS
+Saroyan/M
+sarsaparilla/MS
+Sarto/M
+sartorial/Y
+sartorius/M
+Sartre/M
+Sascha/M
+SASE
+Sasha/M
+sashay/GDS
+Sashenka/M
+sash/GMDS
+Saskatchewan/M
+Saskatoon/M
+Sask/M
+sassafras/MS
+sass/GDSM
+Sassoon/M
+sassy/TRS
+SAT
+satanic
+satanical/Y
+Satanism/M
+satanism/S
+Satanist/M
+satanist/S
+Satan/M
+satchel/SM
+sat/DG
+sateen/MS
+satellite/GMSD
+sate/S
+satiable/I
+satiate/GNXSD
+satiation/M
+satiety/MS
+satin/MDSG
+satinwood/MS
+satiny
+satire/SM
+satiric
+satirical/Y
+satirist/SM
+satirize/DSG
+satirizes/U
+satisfaction/ESM
+satisfactorily/U
+satisfactoriness/MU
+satisfactory/UP
+satisfiability/U
+satisfiable/U
+satisfied/UE
+satisfier/M
+satisfies/E
+satisfy/GZDRS
+satisfying/EU
+satisfyingly
+Sat/M
+satori/SM
+satrap/SM
+saturated/CUA
+saturater/M
+saturates/A
+saturate/XDRSNG
+saturation/M
+Saturday/MS
+saturnalia
+Saturnalia/M
+saturnine/Y
+Saturn/M
+Satyanarayanan/M
+satyriases
+satyriasis/M
+satyric
+satyr/MS
+sauce/DSRGZM
+saucepan/SM
+saucer/M
+saucily
+sauciness/S
+saucy/TRP
+Saudi/S
+Saud/M
+Saudra/M
+sauerkraut/SM
+Saukville/M
+Saul/M
+Sault/M
+sauna/DMSG
+Sauncho/M
+Saunder/SM
+Saunderson/M
+Saundra/M
+saunter/DRSG
+saurian/S
+sauropod/SM
+sausage/MS
+Saussure/M
+sauté/DGS
+Sauternes/M
+Sauveur/M
+savage/GTZYPRSD
+Savage/M
+savageness/SM
+savagery/MS
+Savannah/M
+savanna/MS
+savant/SM
+saved/U
+saveloy/M
+saver/M
+save/ZGJDRSB
+Savina/M
+Savior/M
+savior/SM
+Saviour/M
+Savonarola/M
+savored/U
+savorer/M
+savorier
+savoriest
+savoriness/S
+savoringly/S
+savoring/Y
+savor/SMRDGZ
+savory/UMPS
+Savoyard/M
+Savoy/M
+savoy/SM
+savvy/GTRSD
+sawbones/M
+sawbuck/SM
+sawdust/MDSG
+sawer/M
+sawfly/SM
+sawhorse/MS
+Saw/M
+sawmill/SM
+saw/SMDRG
+sawtooth
+Sawyere/M
+Sawyer/M
+sawyer/MS
+Saxe/M
+saxifrage/SM
+Sax/M
+sax/MS
+Saxon/SM
+Saxony/M
+saxophone/MS
+saxophonist/SM
+Saxton/M
+Sayer/M
+sayer/SM
+sayest
+saying/MS
+Sayre/MS
+says/M
+say/USG
+Say/ZMR
+SBA
+Sb/M
+SC
+scabbard/SGDM
+scabbed
+scabbiness/SM
+scabbing
+scabby/RTP
+scabies/M
+scabrousness/M
+scabrous/YP
+scab/SM
+scad/SM
+scaffolding/M
+scaffold/JGDMS
+scalability
+Scala/M
+scalar/SM
+scalawag/SM
+scald/GJRDS
+scaled/AU
+scale/JGZMBDSR
+scaleless
+scalene
+scaler/M
+scales/A
+scaliness/MS
+scaling/A
+scallion/MS
+scalloper/M
+scallop/GSMDR
+scalloping/M
+scalpel/SM
+scalper/M
+scalp/GZRDMS
+scalping/M
+scaly/TPR
+scammed
+scamming
+scamper/GD
+scampi/M
+scamp/RDMGZS
+scam/SM
+Scan
+scan/AS
+scandal/GMDS
+scandalized/U
+scandalize/GDS
+scandalmonger/SM
+scandalousness/M
+scandalous/YP
+Scandinavia/M
+Scandinavian/S
+scandium/MS
+scanned/A
+scanner/SM
+scanning/A
+scansion/SM
+scant/CDRSG
+scantest
+scantily
+scantiness/MS
+scantly
+scantness/MS
+scanty/TPRS
+scapegoat/SGDM
+scapegrace/MS
+scape/M
+scapulae
+scapula/M
+scapular/S
+scarab/SM
+Scaramouch/M
+Scarborough/M
+scarceness/SM
+scarce/RTYP
+scarcity/MS
+scar/DRMSG
+scarecrow/MS
+scaremongering/M
+scaremonger/SGM
+scarer/M
+scare/S
+scarface
+Scarface/M
+scarf/SDGM
+scarification/M
+scarify/DRSNGX
+scarily
+scariness/S
+scarlatina/MS
+Scarlatti/M
+Scarlet/M
+scarlet/MDSG
+Scarlett/M
+scarp/SDMG
+scarred
+scarring
+scarves/M
+scary/PTR
+scathe/DG
+scathed/U
+scathing/Y
+scatological
+scatology/SM
+scat/S
+scatted
+scatterbrain/MDS
+scatter/DRJZSG
+scatterer/M
+scattergun
+scattering/YM
+scatting
+scavenge/GDRSZ
+scavenger/M
+SCCS
+scenario/SM
+scenarist/MS
+scene/GMDS
+scenery/SM
+scenically
+scenic/S
+scented/U
+scent/GDMS
+scentless
+scent's/C
+scents/C
+scepter/DMSG
+scepters/U
+sceptically
+sch
+Schaefer/M
+Schaeffer/M
+Schafer/M
+Schaffner/M
+Schantz/M
+Schapiro/M
+Scheat/M
+Schedar/M
+schedule/ADSRG
+scheduled/U
+scheduler/MS
+schedule's
+Scheherazade/M
+Scheherezade/M
+Schelling/M
+schema/M
+schemata
+schematically
+schematic/S
+scheme/JSRDGMZ
+schemer/M
+schemta
+Schenectady/M
+scherzo/MS
+Schick/M
+Schiller/M
+schilling/SM
+schismatic/S
+schism/SM
+schist/SM
+schizoid/S
+schizomycetes
+schizophrenia/SM
+schizophrenically
+schizophrenic/S
+schizo/S
+schlemiel/MS
+schlepped
+schlepping
+schlep/S
+Schlesinger/M
+Schliemann/M
+Schlitz/M
+schlock/SM
+schlocky/TR
+Schloss/M
+schmaltz/MS
+schmaltzy/TR
+Schmidt/M
+Schmitt/M
+schmoes
+schmo/M
+schmooze/GSD
+schmuck/MS
+Schnabel/M
+schnapps/M
+schnauzer/MS
+Schneider/M
+schnitzel/MS
+schnook/SM
+schnoz/S
+schnozzle/MS
+Schoenberg/M
+Schofield/M
+scholarship/MS
+scholar/SYM
+scholastically
+scholastic/S
+schoolbag/SM
+schoolbook/SM
+schoolboy/MS
+schoolchild/M
+schoolchildren
+schooldays
+schooled/U
+schoolfellow/S
+schoolfriend
+schoolgirlish
+schoolgirl/MS
+schoolhouse/MS
+schooling/M
+schoolmarmish
+schoolmarm/MS
+schoolmaster/SGDM
+schoolmate/MS
+schoolmistress/MS
+schoolroom/SM
+schoolteacher/MS
+schoolwork/SM
+schoolyard/SM
+school/ZGMRDJS
+schooner/SM
+Schopenhauer/M
+Schottky/M
+Schrieffer/M
+Schrödinger/M
+Schroeder/M
+Schroedinger/M
+Schubert/M
+Schultz/M
+Schulz/M
+Schumacher/M
+Schuman/M
+Schumann/M
+schussboomer/S
+schuss/SDMG
+Schuster/M
+Schuyler/M
+Schuylkill/M
+Schwab/M
+Schwartzkopf/M
+Schwartz/M
+Schwarzenegger/M
+schwa/SM
+Schweitzer/M
+Schweppes/M
+Schwinger/M
+Schwinn/M
+sci
+sciatica/SM
+sciatic/S
+science/FMS
+scientifically/U
+scientific/U
+scientist/SM
+Scientology/M
+scimitar/SM
+scintilla/MS
+scintillate/GNDSX
+scintillation/M
+scintillator/SM
+scion/SM
+Scipio/M
+scissor/SGD
+scleroses
+sclerosis/M
+sclerotic/S
+Sc/M
+scoffer/M
+scofflaw/MS
+scoff/RDGZS
+scolder/M
+scold/GSJRD
+scolioses
+scoliosis/M
+scollop's
+sconce/SDGM
+scone/SM
+scooper/M
+scoop/SRDMG
+scooter/M
+scoot/SRDGZ
+scope/DSGM
+Scopes/M
+scops
+scorbutic
+scorcher/M
+scorching/Y
+scorch/ZGRSD
+scoreboard/MS
+scorecard/MS
+scored/M
+scorekeeper/SM
+scoreless
+scoreline
+score/ZMDSRJG
+scorner/M
+scornfulness/M
+scornful/PY
+scorn/SGZMRD
+scorpion/SM
+Scorpio/SM
+Scorpius/M
+Scorsese/M
+Scotchgard/M
+Scotchman/M
+Scotchmen
+scotch/MSDG
+scotchs
+Scotch/S
+Scotchwoman
+Scotchwomen
+Scotia/M
+Scotian/M
+Scotland/M
+Scot/MS
+Scotsman/M
+Scotsmen
+Scotswoman
+Scotswomen
+Scottie/SM
+Scotti/M
+Scottish
+Scott/M
+Scottsdale/M
+Scotty's
+scoundrel/YMS
+scourer/M
+scourge/MGRSD
+scourger/M
+scouring/M
+scour/SRDGZ
+scouter/M
+scouting/M
+scoutmaster/SM
+Scout's
+scout/SRDMJG
+scow/DMGS
+scowler/M
+scowl/SRDG
+scrabble/DRSZG
+scrabbler/M
+Scrabble/SM
+scragged
+scragging
+scraggly/TR
+scraggy/TR
+scrag/SM
+scrambler/MS
+scrambler's/U
+scramble/UDSRG
+scrammed
+scramming
+scram/S
+Scranton/M
+scrapbook/SM
+scraper/M
+scrape/S
+scrapheap/SM
+scrapped
+scrapper/SM
+scrapping
+scrappy/RT
+scrap/SGZJRDM
+scrapyard/S
+scratched/U
+scratcher/M
+scratches/M
+scratchily
+scratchiness/S
+scratch/JDRSZG
+scratchy/TRP
+scrawler/M
+scrawl/GRDS
+scrawly/RT
+scrawniness/MS
+scrawny/TRP
+screamer/M
+screaming/Y
+scream/ZGSRD
+screecher/M
+screech/GMDRS
+screechy/TR
+screed/MS
+scree/DSM
+screened/U
+screening/M
+screenplay/MS
+screen/RDMJSG
+screenwriter/MS
+screwball/SM
+screwdriver/SM
+screwer/M
+screw/GUSD
+screwiness/S
+screw's
+screwup
+screwworm/MS
+screwy/RTP
+Scriabin/M
+scribal
+scribble/JZDRSG
+scribbler/M
+scribe/CDRSGIK
+scriber/MKIC
+scribe's
+Scribner/MS
+scrimmager/M
+scrimmage/RSDMG
+scrimp/DGS
+scrimshaw/GSDM
+scrim/SM
+Scripps/M
+scrip/SM
+scripted/U
+script/FGMDS
+scriptural/Y
+scripture/MS
+Scripture/MS
+scriptwriter/SM
+scriptwriting/M
+scrivener/M
+scriven/ZR
+scrod/M
+scrofula/MS
+scrofulous
+scrollbar/SM
+scroll/GMDSB
+Scrooge/MS
+scrooge/SDMG
+scrota
+scrotal
+scrotum/M
+scrounge/ZGDRS
+scroungy/TR
+scrubbed
+scrubber/MS
+scrubbing
+scrubby/TR
+scrub/S
+scruffily
+scruffiness/S
+scruff/SM
+scruffy/PRT
+Scruggs/M
+scrummage/MG
+scrum/MS
+scrumptious/Y
+scrunch/DSG
+scrunchy/S
+scruple/SDMG
+scrupulosity/SM
+scrupulousness's
+scrupulousness/US
+scrupulous/UPY
+scrutable/I
+scrutinized/U
+scrutinizer/M
+scrutinize/RSDGZ
+scrutinizingly/S
+scrutinizing/UY
+scrutiny/MS
+SCSI
+scuba/SDMG
+scudded
+scudding
+Scud/M
+scud/S
+scuff/GSD
+scuffle/SDG
+sculler/M
+scullery/MS
+Sculley/M
+scullion/MS
+scull/SRDMGZ
+sculptor/MS
+sculptress/MS
+sculpt/SDG
+sculptural/Y
+sculpture/SDGM
+scumbag/S
+scummed
+scumming
+scum/MS
+scummy/TR
+scupper/SDMG
+scurf/MS
+scurfy/TR
+scurrility/MS
+scurrilousness/MS
+scurrilous/PY
+scurry/GJSD
+scurvily
+scurviness/M
+scurvy/SRTP
+scutcheon/SM
+scuttlebutt/MS
+scuttle/MGSD
+scuzzy/RT
+Scylla/M
+scythe/SDGM
+Scythia/M
+SD
+SDI
+SE
+seabed/S
+seabird/S
+seaboard/MS
+Seaborg/M
+seaborne
+Seabrook/M
+seacoast/MS
+seafare/JRZG
+seafarer/M
+seafood/MS
+seafront/MS
+Seagate/M
+seagoing
+Seagram/M
+seagull/S
+seahorse/S
+sealant/MS
+sealed/AU
+sealer/M
+seal/MDRSGZ
+sealskin/SM
+seals/UA
+seamail
+seamanship/SM
+seaman/YM
+seamer/M
+seaminess/M
+seamlessness/M
+seamless/PY
+seam/MNDRGS
+seams/I
+seamstress/MS
+Seamus/M
+sea/MYS
+seamy/TRP
+Seana/M
+séance/SM
+Sean/M
+seaplane/SM
+seaport/SM
+seaquake/M
+Seaquarium/M
+searcher/AM
+searching/YS
+searchlight/SM
+search/RSDAGZ
+sear/DRSJGT
+searing/Y
+Sears/M
+seascape/SM
+seashell/MS
+seashore/SM
+seasickness/SM
+seasick/P
+seaside/SM
+seasonableness/M
+seasonable/UP
+seasonably/U
+seasonality
+seasonal/Y
+seasoned/U
+seasoner/M
+seasoning/M
+season/JRDYMBZSG
+seatbelt
+seated/A
+seater/M
+seating/SM
+SEATO
+seat's
+Seattle/M
+seat/UDSG
+seawall/S
+seaward/S
+seawater/S
+seaway/MS
+seaweed/SM
+seaworthinesses
+seaworthiness/MU
+seaworthy/TRP
+sebaceous
+Sebastian/M
+Sebastiano/M
+Sebastien/M
+seborrhea/SM
+SEC
+secant/SM
+secede/GRSD
+secessionist/MS
+secession/MS
+secludedness/M
+secluded/YP
+seclude/GSD
+seclusion/SM
+seclusive
+Seconal
+secondarily
+secondary/PS
+seconder/M
+secondhand
+second/RDYZGSL
+secrecy/MS
+secretarial
+secretariat/MS
+secretaryship/MS
+secretary/SM
+secrete/XNS
+secretion/M
+secretiveness/S
+secretive/PY
+secretory
+secret/TVGRDYS
+sec/S
+sectarianism/MS
+sectarian/S
+sectary/MS
+sectionalism/MS
+sectionalized
+sectional/SY
+section/ASEM
+sectioned
+sectioning
+sect/ISM
+sectoral
+sectored
+sector/EMS
+sectoring
+sects/E
+secularism/MS
+secularist/MS
+secularity/M
+secularization/MS
+secularized/U
+secularize/GSD
+secular/SY
+secured/U
+securely/I
+secure/PGTYRSDJ
+security/MSI
+secy
+sec'y
+sedan/SM
+sedateness/SM
+sedate/PXVNGTYRSD
+sedation/M
+sedative/S
+sedentary
+Seder/SM
+sedge/SM
+Sedgwick/M
+sedgy/RT
+sedimentary
+sedimentation/SM
+sediment/SGDM
+sedition/SM
+seditiousness/M
+seditious/PY
+seducer/M
+seduce/RSDGZ
+seduction/MS
+seductiveness/MS
+seductive/YP
+seductress/SM
+sedulous/Y
+Seebeck/M
+seed/ADSG
+seedbed/MS
+seedcase/SM
+seeded/U
+seeder/MS
+seediness/MS
+seeding/S
+seedless
+seedling/SM
+seedpod/S
+seed's
+seedy/TPR
+seeings
+seeing's
+seeing/U
+seeker/M
+seek/GZSR
+seeking/Y
+Seeley/M
+See/M
+seem/GJSYD
+seeming/Y
+seemliness's
+seemliness/US
+seemly/UTPR
+seen/U
+seepage/MS
+seep/GSD
+seer/SM
+seersucker/MS
+sees
+seesaw/DMSG
+seethe/SDGJ
+see/U
+segmental/Y
+segmentation/SM
+segmented/U
+segment/SGDM
+Segovia/M
+segregant
+segregated/U
+segregate/XCNGSD
+segregation/CM
+segregationist/SM
+segregative
+Segre/M
+segue/DS
+segueing
+Segundo/M
+Se/H
+Seidel/M
+seigneur/MS
+seignior/SM
+Seiko/M
+seine/GZMDSR
+Seine/M
+seiner/M
+Seinfeld/M
+seismic
+seismically
+seismographer/M
+seismographic
+seismographs
+seismography/SM
+seismograph/ZMR
+seismologic
+seismological
+seismologist/MS
+seismology/SM
+seismometer/S
+seize/BJGZDSR
+seizer/M
+seizing/M
+seizin/MS
+seizor/MS
+seizure/MS
+Seka/M
+Sela/M
+Selassie/M
+Selby/M
+seldom
+selected/UAC
+selectional
+selection/MS
+selectiveness/M
+selective/YP
+selectivity/MS
+selectman/M
+selectmen
+selectness/SM
+selector/SM
+select/PDSVGB
+Selectric/M
+selects/A
+Selena/M
+selenate/M
+Selene/M
+selenite/M
+selenium/MS
+selenographer/SM
+selenography/MS
+Selestina/M
+Seleucid/M
+Seleucus/M
+self/GPDMS
+selfishness/SU
+selfish/PUY
+selflessness/MS
+selfless/YP
+selfness/M
+Selfridge/M
+selfsameness/M
+selfsame/P
+Selia/M
+Selie/M
+Selig/M
+Selim/M
+Selina/M
+Selinda/M
+Seline/M
+Seljuk/M
+Selkirk/M
+Sella/M
+sell/AZGSR
+seller/AM
+Sellers/M
+Selle/ZM
+sellout/MS
+Selma/M
+seltzer/S
+selvage/MGSD
+selves/M
+Selznick/M
+semantical/Y
+semanticist/SM
+semantic/S
+semantics/M
+semaphore/GMSD
+Semarang/M
+semblance/ASME
+semen/SM
+semester/SM
+semiannual/Y
+semiarid
+semiautomated
+semiautomatic/S
+semicircle/SM
+semicircular
+semicolon/MS
+semiconductor/SM
+semiconscious
+semidefinite
+semidetached
+semidrying/M
+semifinalist/MS
+semifinal/MS
+semilogarithmic
+semimonthly/S
+seminal/Y
+seminarian/MS
+seminar/SM
+seminary/MS
+Seminole/SM
+semiofficial
+semioticians
+semiotic/S
+semiotics/M
+semipermanent/Y
+semipermeable
+semiprecious
+semiprivate
+semiprofessional/YS
+semipublic
+semiquantitative/Y
+Semiramis/M
+semiretired
+semisecret
+semiskilled
+semi/SM
+semisolid/S
+semistructured
+semisweet
+Semite/SM
+Semitic/MS
+semitic/S
+semitone/SM
+semitrailer/SM
+semitrance
+semitransparent
+semitropical
+semivowel/MS
+semiweekly/S
+semiyearly
+semolina/SM
+sempiternal
+sempstress/SM
+Semtex
+sen
+Sen
+Sena/M
+senate/MS
+Senate/MS
+senatorial
+senator/MS
+Sendai/M
+sender/M
+sends/A
+send/SRGZ
+Seneca/MS
+Senegalese
+Senegal/M
+senescence/SM
+senescent
+senile/SY
+senility/MS
+seniority/SM
+senior/MS
+Senior/S
+Sennacherib/M
+senna/MS
+Sennett/M
+Señora/M
+senora/S
+senorita/S
+senor/MS
+sensately/I
+sensate/YNX
+sensationalism/MS
+sensationalist/S
+sensationalize/GSD
+sensational/Y
+sensation/M
+sens/DSG
+senselessness/SM
+senseless/PY
+sense/M
+sensibility/ISM
+sensibleness/MS
+sensible/PRST
+sensibly/I
+sensitiveness/MS
+sensitiveness's/I
+sensitives
+sensitive/YIP
+sensitivity/ISM
+sensitization/CSM
+sensitized/U
+sensitizers
+sensitize/SDCG
+sensor/MS
+sensory
+sensualist/MS
+sensuality/MS
+sensual/YF
+sensuousness/S
+sensuous/PY
+Sensurround/M
+sentence/SDMG
+sentential/Y
+sententious/Y
+sentience/ISM
+sentient/YS
+sentimentalism/SM
+sentimentalist/SM
+sentimentality/SM
+sentimentalization/SM
+sentimentalize/RSDZG
+sentimentalizes/U
+sentimental/Y
+sentiment/MS
+sentinel/GDMS
+sentry/SM
+sent/UFEA
+Seoul/M
+sepal/SM
+separability/MSI
+separableness/MI
+separable/PI
+separably/I
+separateness/MS
+separates/M
+separate/YNGVDSXP
+separation/M
+separatism/SM
+separatist/SM
+separator/SM
+Sephardi/M
+Sephira/M
+sepia/MS
+Sepoy/M
+sepses
+sepsis/M
+septa/M
+septate/N
+September/MS
+septennial/Y
+septet/MS
+septicemia/SM
+septicemic
+septic/S
+septillion/M
+sept/M
+Sept/M
+septuagenarian/MS
+Septuagint/MS
+septum/M
+sepulcher/MGSD
+sepulchers/UA
+sepulchral/Y
+seq
+sequel/MS
+sequenced/A
+sequence/DRSJZMG
+sequencer/M
+sequence's/F
+sequences/F
+sequent/F
+sequentiality/FM
+sequentialize/DSG
+sequential/YF
+sequester/SDG
+sequestrate/XGNDS
+sequestration/M
+sequin/SDMG
+sequitur
+Sequoia/M
+sequoia/MS
+Sequoya/M
+Serafin/M
+seraglio/SM
+serape/S
+seraphic
+seraphically
+seraphim's
+seraph/M
+seraphs
+sera's
+Serbia/M
+Serbian/S
+Serb/MS
+Serbo/M
+serenade/MGDRS
+serenader/M
+Serena/M
+serendipitous/Y
+serendipity/MS
+serene/GTYRSDP
+Serene/M
+sereneness/SM
+Serengeti/M
+serenity/MS
+sere/TGDRS
+serfdom/MS
+serf/MS
+Sergeant/M
+sergeant/SM
+serge/DSGM
+Sergei/M
+Serge/M
+Sergent/M
+Sergio/M
+serialization/MS
+serialize/GSD
+serial/MYS
+series/M
+serif/SMD
+serigraph/M
+serigraphs
+seriousness/SM
+serious/PY
+sermonize/GSD
+sermon/SGDM
+serological/Y
+serology/MS
+serons
+serous
+Serpens/M
+serpent/GSDM
+serpentine/GYS
+Serra/M
+Serrano/M
+serrate/GNXSD
+serration/M
+serried
+serum/MS
+servant/SDMG
+serve/AGCFDSR
+served/U
+server/MCF
+servers
+serviceability/SM
+serviceableness/M
+serviceable/P
+serviced/U
+serviceman/M
+servicemen
+service/MGSRD
+service's/E
+services/E
+servicewoman
+servicewomen
+serviette/MS
+servilely
+servileness/M
+serviles
+servile/U
+servility/SM
+serving/SM
+servitor/SM
+servitude/MS
+servomechanism/MS
+servomotor/MS
+servo/S
+sesame/MS
+sesquicentennial/S
+sessile
+session/SM
+setback/S
+Seth/M
+Set/M
+Seton/M
+set's
+setscrew/SM
+set/SIA
+settable/A
+sett/BJGZSMR
+settee/MS
+setter/M
+setting/AS
+setting's
+settle/AUDSG
+settlement/ASM
+settler/MS
+settling/S
+setup/MS
+Seumas/M
+Seurat/M
+Seuss/M
+Sevastopol/M
+sevenfold
+sevenpence
+seven/SMH
+seventeen/HMS
+seventeenths
+sevenths
+seventieths
+seventy/MSH
+severalfold
+severalty/M
+several/YS
+severance/SM
+severed/E
+severeness/SM
+severe/PY
+severing/E
+severity/MS
+Severn/M
+severs/E
+sever/SGTRD
+Severus/M
+Seville/M
+sewage/MS
+Seward/M
+sewerage/SM
+sewer/GSMD
+sewing/SM
+sewn
+sew/SAGD
+sexagenarian/MS
+sex/GMDS
+sexily
+sexiness/MS
+sexism/SM
+sexist/SM
+sexless
+sexologist/SM
+sexology/MS
+sexpot/SM
+Sextans/M
+sextant/SM
+sextet/SM
+sextillion/M
+Sexton/M
+sexton/MS
+sextuple/MDG
+sextuplet/MS
+sexuality/MS
+sexualized
+sexual/Y
+sexy/RTP
+Seychelles
+Seyfert
+Seymour/M
+sf
+SF
+Sgt
+shabbily
+shabbiness/SM
+shabby/RTP
+shack/GMDS
+shackler/M
+shackle's
+Shackleton/M
+shackle/UGDS
+shad/DRJGSM
+shaded/U
+shadeless
+shade/SM
+shadily
+shadiness/MS
+shading/M
+shadowbox/SDG
+shadower/M
+shadow/GSDRM
+shadowiness/M
+Shadow/M
+shadowy/TRP
+shady/TRP
+Shae/M
+Shafer/M
+Shaffer/M
+shafting/M
+shaft/SDMG
+shagged
+shagginess/SM
+shagging
+shaggy/TPR
+shag/MS
+shah/M
+shahs
+Shaina/M
+Shaine/M
+shakable/U
+shakably/U
+shakeable
+shakedown/S
+shaken/U
+shakeout/SM
+shaker/M
+Shaker/S
+Shakespearean/S
+Shakespeare/M
+Shakespearian
+shake/SRGZB
+shakeup/S
+shakily
+shakiness/S
+shaking/M
+shaky/TPR
+shale/SM
+shall
+shallot/SM
+shallowness/SM
+shallow/STPGDRY
+Shalna/M
+Shalne/M
+shalom
+Shalom/M
+shalt
+shamanic
+shaman/SM
+shamble/DSG
+shambles/M
+shamefaced/Y
+shamefulness/S
+shameful/YP
+shamelessness/SM
+shameless/PY
+shame/SM
+sham/MDSG
+shammed
+shammer
+shamming
+shammy's
+shampoo/DRSMZG
+shampooer/M
+shamrock/SM
+Shamus/M
+Shana/M
+Shanan/M
+Shanda/M
+Shandee/M
+Shandeigh/M
+Shandie/M
+Shandra/M
+shandy/M
+Shandy/M
+Shane/M
+Shanghai/GM
+Shanghaiing/M
+shanghai/SDG
+Shanie/M
+Shani/M
+shank/SMDG
+Shannah/M
+Shanna/M
+Shannan/M
+Shannen/M
+Shannon/M
+Shanon/M
+shan't
+Shanta/M
+Shantee/M
+shantis
+Shantung/M
+shantung/MS
+shanty/SM
+shantytown/SM
+shape/AGDSR
+shaped/U
+shapelessness/SM
+shapeless/PY
+shapeliness/S
+shapely/RPT
+shaper/S
+shape's
+Shapiro/M
+sharable/U
+Sharai/M
+Shara/M
+shard/SM
+shareable
+sharecropped
+sharecropper/MS
+sharecropping
+sharecrop/S
+share/DSRGZMB
+shared/U
+shareholder/MS
+shareholding/S
+sharer/M
+shareware/S
+Shari'a
+Sharia/M
+sharia/SM
+Shari/M
+Sharity/M
+shark/SGMD
+sharkskin/SM
+Sharla/M
+Sharleen/M
+Sharlene/M
+Sharline/M
+Sharl/M
+Sharona/M
+Sharon/M
+Sharpe/M
+sharpen/ASGD
+sharpened/U
+sharpener/S
+sharper/M
+sharpie/SM
+Sharp/M
+sharpness/MS
+sharp/SGTZXPYRDN
+sharpshooter/M
+sharpshooting/M
+sharpshoot/JRGZ
+sharpy's
+Sharron/M
+Sharyl/M
+Shasta/M
+shat
+shatter/DSG
+shattering/Y
+shatterproof
+Shaughn/M
+Shaula/M
+Shauna/M
+Shaun/M
+shave/DSRJGZ
+shaved/U
+shaver/M
+Shavian
+shaving/M
+Shavuot/M
+Shawano/M
+shawl/SDMG
+shaw/M
+Shaw/M
+Shawna/M
+Shawnee/SM
+Shawn/M
+Shaylah/M
+Shayla/M
+Shaylyn/M
+Shaylynn/M
+Shay/M
+shay/MS
+Shayna/M
+Shayne/M
+Shcharansky/M
+sh/DRS
+sheaf/MDGS
+Shea/M
+shearer/M
+shear/RDGZS
+sheather/M
+sheathe/UGSD
+sheath/GJMDRS
+sheathing/M
+sheaths
+sheave/SDG
+sheaves/M
+Sheba/M
+shebang/MS
+Shebeli/M
+Sheboygan/M
+she'd
+shedding
+Shedir/M
+sheds
+shed's
+shed/U
+Sheelagh/M
+Sheelah/M
+Sheela/M
+Sheena/M
+sheen/MDGS
+sheeny/TRSM
+sheepdog/SM
+sheepfold/MS
+sheepherder/MS
+sheepishness/SM
+sheepish/YP
+sheep/M
+sheepskin/SM
+Sheeree/M
+sheerness/S
+sheer/PGTYRDS
+sheeting/M
+sheetlike
+sheet/RDMJSG
+Sheetrock
+Sheffielder/M
+Sheffield/RMZ
+Sheffie/M
+Sheff/M
+Sheffy/M
+sheikdom/SM
+sheikh's
+sheik/SM
+Sheilah/M
+Sheila/M
+shekel/MS
+Shelagh/M
+Shela/M
+Shelba/M
+Shelbi/M
+Shelby/M
+Shelden/M
+Sheldon/M
+shelf/MDGS
+Shelia/M
+she'll
+shellacked
+shellacking/MS
+shellac/S
+shelled/U
+Shelley/M
+shellfire/SM
+shellfish/SM
+Shellie/M
+Shelli/M
+Shell/M
+shell/RDMGS
+Shelly/M
+Shel/MY
+shelter/DRMGS
+sheltered/U
+shelterer/M
+Shelton/M
+shelve/JRSDG
+shelver/M
+shelves/M
+shelving/M
+she/M
+Shem/M
+Shena/M
+Shenandoah/M
+shenanigan/SM
+Shenyang/M
+Sheol/M
+Shepard/M
+shepherd/DMSG
+shepherdess/S
+Shepherd/M
+Shep/M
+Sheppard/M
+Shepperd/M
+Sheratan/M
+Sheraton/M
+sherbet/MS
+sherd's
+Sheree/M
+Sheridan/M
+Sherie/M
+sheriff/SM
+Sherill/M
+Sherilyn/M
+Sheri/M
+Sherline/M
+Sherlocke/M
+sherlock/M
+Sherlock/M
+Sher/M
+Sherman/M
+Shermie/M
+Sherm/M
+Shermy/M
+Sherpa/SM
+Sherrie/M
+Sherri/M
+Sherry/M
+sherry/MS
+Sherwin/M
+Sherwood/M
+Sherwynd/M
+Sherye/M
+Sheryl/M
+Shetland/S
+Shevardnadze/M
+shew/GSD
+shewn
+shh
+shiatsu/S
+shibboleth/M
+shibboleths
+shielded/U
+shielder/M
+shield/MDRSG
+Shields/M
+shiftily
+shiftiness/SM
+shiftlessness/S
+shiftless/PY
+shift/RDGZS
+shifty/TRP
+Shi'ite
+Shiite/SM
+Shijiazhuang
+Shikoku/M
+shill/DJSG
+shillelagh/M
+shillelaghs
+shilling/M
+Shillong/M
+Shiloh/M
+shimmed
+shimmer/DGS
+shimmery
+shimming
+shimmy/DSMG
+shim/SM
+Shina/M
+shinbone/SM
+shindig/MS
+shiner/M
+shine/S
+shingle/MDRSG
+shingler/M
+shinguard
+shininess/MS
+shining/Y
+shinned
+shinning
+shinny/GDSM
+shin/SGZDRM
+shinsplints
+Shintoism/S
+Shintoist/MS
+Shinto/MS
+shiny/PRT
+shipboard/MS
+shipborne
+shipbuilder/M
+shipbuild/RGZJ
+shipload/SM
+shipman/M
+shipmate/SM
+shipmen
+shipment/AMS
+shipowner/MS
+shippable
+shipped/A
+shipper/SM
+shipping/MS
+ship's
+shipshape
+ship/SLA
+shipwreck/GSMD
+shipwright/MS
+shipyard/MS
+Shiraz/M
+shire/MS
+shirker/M
+shirk/RDGZS
+Shirlee/M
+Shirleen/M
+Shirlene/M
+Shirley/M
+Shirline/M
+Shirl/M
+Shir/M
+shirr/GJDS
+shirtfront/S
+shirting/M
+shirt/JDMSG
+shirtless
+shirtmake/R
+shirtmaker/M
+shirtsleeve/MS
+shirttail/S
+shirtwaist/SM
+shit/S!
+shitting/!
+shitty/RT!
+Shiva/M
+shiverer/M
+shiver/GDR
+shivery
+shiv/SZRM
+shivved
+shivving
+shlemiel's
+Shmuel/M
+shoal/SRDMGT
+shoat/SM
+shocker/M
+shocking/Y
+Shockley/M
+shockproof
+shock/SGZRD
+shoddily
+shoddiness/SM
+shoddy/RSTP
+shod/U
+shoehorn/GSMD
+shoeing
+shoelace/MS
+shoemaker/M
+shoemake/RZ
+shoe/MS
+shoer's
+shoeshine/MS
+shoestring/MS
+shoetree/MS
+shogunate/SM
+shogun/MS
+Shoji/M
+Sholom/M
+shone
+shoo/DSG
+shoofly
+shook/SM
+shooter/M
+shootout/MS
+shoot/SJRGZ
+shopkeeper/M
+shopkeep/RGZ
+shoplifter/M
+shoplifting/M
+shoplift/SRDGZ
+shop/MS
+shopped/M
+shopper/M
+shoppe/RSDGZJ
+shopping/M
+shoptalk/SM
+shopworn
+shorebird/S
+shore/DSRGMJ
+shoreline/SM
+Shorewood/M
+shoring/M
+shortage/MS
+shortbread/MS
+shortcake/SM
+shortchange/DSG
+shortcoming/MS
+shortcrust
+shortcut/MS
+shortcutting
+shortener/M
+shortening/M
+shorten/RDGJ
+shortfall/SM
+shorthand/DMS
+Shorthorn/M
+shorthorn/MS
+shortie's
+shortish
+shortlist/GD
+Short/M
+shortness/MS
+short/SGTXYRDNP
+shortsightedness/S
+shortsighted/YP
+shortstop/MS
+shortwave/SM
+shorty/SM
+Shoshana/M
+Shoshanna/M
+Shoshone/SM
+Shostakovitch/M
+shotgunned
+shotgunner
+shotgunning
+shotgun/SM
+shot/MS
+shotted
+shotting
+shoulder/GMD
+shouldn't
+should/TZR
+shout/SGZRDM
+shove/DSRG
+shoveler/M
+shovelful/MS
+shovel/MDRSZG
+shover/M
+showbiz
+showbizzes
+showboat/SGDM
+showcase/MGSD
+showdown/MS
+shower/GDM
+showery/TR
+show/GDRZJS
+showgirl/SM
+showily
+showiness/MS
+showing/M
+showman/M
+showmanship/SM
+showmen
+shown
+showoff/S
+showpiece/SM
+showplace/SM
+showroom/MS
+showy/RTP
+shpt
+shrank
+shrapnel/SM
+shredded
+shredder/MS
+shredding
+shred/MS
+Shreveport/M
+shrewdness/SM
+shrewd/RYTP
+shrew/GSMD
+shrewishness/M
+shrewish/PY
+shrieker/M
+shriek/SGDRMZ
+shrift/SM
+shrike/SM
+shrill/DRTGPS
+shrillness/MS
+shrilly
+shrimp/MDGS
+shrine/SDGM
+shrinkage/SM
+shrinker/M
+shrinking/U
+shrink/SRBG
+shrivel/GSD
+shriven
+shrive/RSDG
+Shropshire/M
+shroud/GSMD
+shrubbed
+shrubbery/SM
+shrubbing
+shrubby/TR
+shrub/SM
+shrugged
+shrugging
+shrug/S
+shrunk/N
+shtick/S
+shucker/M
+shuck/SGMRD
+shucks/S
+shudder/DSG
+shuddery
+shuffleboard/MS
+shuffled/A
+shuffle/GDSRZ
+shuffles/A
+shuffling/A
+Shulman/M
+Shu/M
+shunned
+shunning
+shun/S
+shunter/M
+shunt/GSRD
+Shurlocke/M
+Shurlock/M
+Shurwood/M
+shush/SDG
+shutdown/MS
+shuteye/SM
+shutoff/M
+shutout/SM
+shut/S
+shutterbug/S
+shutter/DMGS
+shuttering/M
+shutting
+shuttlecock/MDSG
+shuttle/MGDS
+shy/DRSGTZY
+shyer
+shyest
+Shylockian/M
+Shylock/M
+shyness/SM
+shyster/SM
+Siamese/M
+Siam/M
+Siana/M
+Sianna/M
+Sian's
+Sibbie/M
+Sibby/M
+Sibeal/M
+Sibelius/M
+Sibella/M
+Sibelle/M
+Sibel/M
+Siberia/M
+Siberian/S
+sibilance/M
+sibilancy/M
+sibilant/SY
+Sibilla/M
+Sibley/M
+sibling/SM
+Sib/M
+Sibylla/M
+Sibylle/M
+sibylline
+Sibyl/M
+sibyl/SM
+Siciliana/M
+Sicilian/S
+Sicily/M
+sickbay/M
+sickbed/S
+sickener/M
+sickening/Y
+sicken/JRDG
+sicker/Y
+sick/GXTYNDRSP
+sickie/SM
+sickish/PY
+sickle/SDGM
+sickliness/M
+sickly/TRSDPG
+sickness/MS
+sicko/S
+sickout/S
+sickroom/SM
+sic/S
+sidearm/S
+sideband/MS
+sidebar/MS
+sideboard/SM
+sideburns
+sidecar/MS
+sided/A
+sidedness
+side/ISRM
+sidekick/MS
+sidelight/SM
+sideline/MGDRS
+sidelong
+sideman/M
+sidemen
+sidepiece/S
+sidereal
+sider/FA
+sides/A
+sidesaddle/MS
+sideshow/MS
+sidesplitting
+sidestepped
+sidestepping
+sidestep/S
+sidestroke/GMSD
+sideswipe/GSDM
+sidetrack/SDG
+sidewalk/MS
+sidewall/MS
+sidewards
+sideway/SM
+sidewinder/SM
+siding/SM
+sidle/DSG
+Sid/M
+Sidnee/M
+Sidney/M
+Sidoney/M
+Sidonia/M
+Sidonnie/M
+SIDS
+siege/GMDS
+Siegel/M
+Siegfried/M
+Sieglinda/M
+Siegmund/M
+Siemens/M
+Siena/M
+sienna/SM
+Sierpinski/M
+sierra/SM
+siesta/MS
+sieve/GZMDS
+Siffre/M
+sifted/UA
+sifter/M
+sift/GZJSDR
+Sigfrid/M
+Sigfried/M
+SIGGRAPH/M
+sigh/DRG
+sigher/M
+sighs
+sighted/P
+sighter/M
+sighting/S
+sight/ISM
+sightless/Y
+sightliness/UM
+sightly/TURP
+sightread
+sightseeing/S
+sightsee/RZ
+Sigismond/M
+Sigismondo/M
+Sigismund/M
+Sigismundo/M
+Sig/M
+sigma/SM
+sigmoid
+Sigmund/M
+signal/A
+signaled
+signaler/S
+signaling
+signalization/S
+signalize/GSD
+signally
+signalman/M
+signalmen
+signals
+signal's
+signatory/SM
+signature/MS
+signboard/MS
+signed/FU
+signer/SC
+signet/SGMD
+sign/GARDCS
+significance/IMS
+significantly/I
+significant/YS
+signification/M
+signify/DRSGNX
+signing/S
+Signora/M
+signora/SM
+signore/M
+signori
+signories
+signorina/SM
+signorine
+Signor/M
+signor/SFM
+signpost/DMSG
+sign's
+signs/F
+Sigrid/M
+Sigurd/M
+Sigvard/M
+Sihanouk/M
+Sikhism/MS
+Sikh/MS
+Sikhs
+Sikkimese
+Sikkim/M
+Sikorsky/M
+silage/GMSD
+Silas/M
+Sileas/M
+siled
+Sile/M
+silence/MZGRSD
+silencer/M
+silentness/M
+silent/TSPRY
+Silesia/M
+silhouette/GMSD
+silica/SM
+silicate/SM
+siliceous
+silicide/M
+silicone/SM
+silicon/MS
+silicoses
+silicosis/M
+silken/DG
+silk/GXNDMS
+silkily
+silkiness/SM
+silkscreen/SM
+silkworm/MS
+silky/RSPT
+silliness/SM
+sill/MS
+silly/PRST
+silo/GSM
+siltation/M
+silt/MDGS
+siltstone/M
+silty/RT
+Silurian/S
+Silvain/M
+Silva/M
+Silvana/M
+Silvan/M
+Silvano/M
+Silvanus/M
+silverer/M
+silverfish/MS
+Silverman/M
+silver/RDYMGS
+silversmith/M
+silversmiths
+Silverstein/M
+silverware/SM
+silvery/RTP
+Silvester/M
+Silvia/M
+Silvie/M
+Silvio/M
+Si/M
+SIMD
+Simenon/M
+Simeon/M
+simian/S
+similar/EY
+similarity/EMS
+simile/SM
+similitude/SME
+Simla/M
+simmer/GSD
+Simmonds/M
+Simmons/M
+Simmonsville/M
+Sim/MS
+Simms/M
+Simona/M
+Simone/M
+Simonette/M
+simonize/SDG
+Simon/M
+Simonne/M
+simony/MS
+simpatico
+simper/GDS
+simpleminded/YP
+simpleness/S
+simple/RSDGTP
+simpleton/SM
+simplex/S
+simplicity/MS
+simplified/U
+simplify/ZXRSDNG
+simplistic
+simplistically
+simply
+Simpson/M
+simulacrum/M
+Simula/M
+SIMULA/M
+simulate/XENGSD
+simulation/ME
+simulative
+simulator/SEM
+simulcast/GSD
+simultaneity/SM
+simultaneousness/M
+simultaneous/YP
+Sinai/M
+Sinatra/M
+since
+sincere/IY
+sincereness/M
+sincerer
+sincerest
+sincerity/MIS
+Sinclair/M
+Sinclare/M
+Sindbad/M
+Sindee/M
+Sindhi/M
+sinecure/MS
+sinecurist/M
+sine/SM
+sinew/SGMD
+sinewy
+sinfulness/SM
+sinful/YP
+Singaporean/S
+Singapore/M
+sing/BGJZYDR
+Singborg/M
+singeing
+singer/M
+Singer/M
+singe/S
+singing/Y
+singlehanded/Y
+singleness/SM
+single/PSDG
+Singleton/M
+singleton/SM
+singletree/SM
+singlet/SM
+singsong/GSMD
+singularity/SM
+singularization/M
+singular/SY
+Sinhalese/M
+sinisterness/M
+sinister/YP
+sinistral/Y
+sinkable/U
+sinker/M
+sink/GZSDRB
+sinkhole/SM
+Sinkiang/M
+sinking/M
+sinlessness/M
+sinless/YP
+sin/MAGS
+sinned
+sinner/MS
+sinning
+sinter/DM
+sinuosity/MS
+sinuousities
+sinuousness/M
+sinuous/PY
+sinusitis/SM
+sinus/MS
+sinusoidal/Y
+sinusoid/MS
+Siobhan/M
+Siouxie/M
+Sioux/M
+siphon/DMSG
+siphons/U
+sipped
+sipper/SM
+sipping
+sip/S
+sired/C
+sire/MS
+siren/M
+sires/C
+siring/C
+Sirius/M
+sirloin/MS
+Sir/MS
+sirocco/MS
+sirred
+sirring
+sirup's
+sir/XGMNDS
+sisal/MS
+Sisely/M
+Sisile/M
+sis/S
+Sissie/M
+sissified
+Sissy/M
+sissy/TRSM
+sister/GDYMS
+sisterhood/MS
+sisterliness/MS
+sisterly/P
+sister's/A
+Sistine
+Sisyphean
+Sisyphus/M
+sit/AG
+sitarist/SM
+sitar/SM
+sitcom/SM
+site/DSJM
+sits
+sitter/MS
+sitting/SM
+situate/GNSDX
+situational/Y
+situationist
+situation/M
+situ/S
+situs/M
+Siusan/M
+Siva/M
+Siward/M
+sixfold
+sixgun
+six/MRSH
+sixpence/MS
+sixpenny
+sixshooter
+sixteen/HRSM
+sixteenths
+sixths
+sixth/Y
+sixtieths
+sixty/SMH
+sizableness/M
+sizable/P
+sized/UA
+size/GJDRSBMZ
+sizer/M
+sizes/A
+sizing/M
+sizzler/M
+sizzle/RSDG
+SJ
+Sjaelland/M
+SK
+ska/S
+skateboard/SJGZMDR
+skater/M
+skate/SM
+skat/JMDRGZ
+skedaddle/GSD
+skeet/RMS
+skein/MDGS
+skeletal/Y
+skeleton/MS
+Skell/M
+Skelly/M
+skeptical/Y
+skepticism/MS
+skeptic/SM
+sketchbook/SM
+sketcher/M
+sketchily
+sketchiness/MS
+sketch/MRSDZG
+sketchpad
+sketchy/PRT
+skew/DRSPGZ
+skewer/GDM
+skewing/M
+skewness/M
+skidded
+skidding
+skid/S
+skiff/GMDS
+skiing/M
+skilfully
+skill/DMSG
+skilled/U
+skillet/MS
+skillfulnesses
+skillfulness/MU
+skillful/YUP
+skilling/M
+skimmed
+skimmer/MS
+skimming/SM
+ski/MNJSG
+skimp/GDS
+skimpily
+skimpiness/MS
+skimpy/PRT
+skim/SM
+skincare
+skindive/G
+skinflint/MS
+skinhead/SM
+skinless
+skinned
+Skinner/M
+skinner/SM
+skinniness/MS
+skinning
+skinny/TRSP
+skin/SM
+skintight
+Skip/M
+skipped
+Skipper/M
+skipper/SGDM
+Skippie/M
+skipping
+Skipp/RM
+Skippy/M
+skip/S
+Skipton/M
+skirmisher/M
+skirmish/RSDMZG
+skirter/M
+skirting/M
+skirt/RDMGS
+skit/GSMD
+skitter/SDG
+skittishness/SM
+skittish/YP
+skittle/SM
+skivvy/GSDM
+skoal/SDG
+Skopje/M
+skulduggery/MS
+skulker/M
+skulk/SRDGZ
+skullcap/MS
+skullduggery's
+skull/SDM
+skunk/GMDS
+skycap/MS
+skydiver/SM
+skydiving/MS
+Skye/M
+skyhook
+skyjacker/M
+skyjack/ZSGRDJ
+Skylab/M
+skylarker/M
+skylark/SRDMG
+Skylar/M
+Skyler/M
+skylight/MS
+skyline/MS
+Sky/M
+sky/MDRSGZ
+skyrocket/GDMS
+skyscraper/M
+skyscrape/RZ
+skyward/S
+skywave
+skyway/M
+skywriter/MS
+skywriting/MS
+slabbed
+slabbing
+slab/MS
+slacken/DG
+slacker/M
+slackness/MS
+slack/SPGTZXYRDN
+Slade/M
+slagged
+slagging
+slag/MS
+slain
+slake/DSG
+slaked/U
+slalom/SGMD
+slammed
+slammer/S
+slamming
+slam/S
+slander/MDRZSG
+slanderousness/M
+slanderous/PY
+slang/SMGD
+slangy/TR
+slanting/Y
+slant/SDG
+slantwise
+slapdash/S
+slaphappy/TR
+slap/MS
+slapped
+slapper
+slapping
+slapstick/MS
+slash/GZRSD
+slashing/Y
+slater/M
+Slater/M
+slate/SM
+slather/SMDG
+slating/M
+slat/MDRSGZ
+slatted
+slattern/MYS
+slatting
+slaughterer/M
+slaughterhouse/SM
+slaughter/SJMRDGZ
+slave/DSRGZM
+slaveholder/SM
+slaver/GDM
+slavery/SM
+Slavic/M
+slavishness/SM
+slavish/YP
+Slav/MS
+Slavonic/M
+slaw/MS
+slay/RGZS
+sleaze/S
+sleazily
+sleaziness/SM
+sleazy/RTP
+sledded
+sledder/S
+sledding
+sledgehammer/MDGS
+sledge/SDGM
+sled/SM
+sleekness/S
+sleek/PYRDGTS
+sleeper/M
+sleepily
+sleepiness/SM
+sleeping/M
+sleeplessness/SM
+sleepless/YP
+sleepover/S
+sleep/RMGZS
+sleepwalker/M
+sleepwalk/JGRDZS
+sleepwear/M
+sleepyhead/MS
+sleepy/PTR
+sleet/DMSG
+sleety/TR
+sleeveless
+sleeve/SDGM
+sleeving/M
+sleigh/GMD
+sleighs
+sleight/SM
+sleken/DG
+slenderize/DSG
+slenderness/MS
+slender/RYTP
+slept
+Slesinger/M
+sleuth/GMD
+sleuths
+slew/DGS
+slice/DSRGZM
+sliced/U
+slicer/M
+slicker/M
+slickness/MS
+slick/PSYRDGTZ
+slider/M
+slide/S
+slid/GZDR
+slight/DRYPSTG
+slighter/M
+slighting/Y
+slightness/S
+slime/SM
+sliminess/S
+slimline
+slimmed
+slimmer/S
+slimmest
+slimming/S
+slimness/S
+slim/SPGYD
+slimy/PTR
+sling/GMRS
+slingshot/MS
+slings/U
+slink/GS
+slinky/RT
+slipcase/MS
+slipcover/GMDS
+slipknot/SM
+slippage/SM
+slipped
+slipper/GSMD
+slipperiness/S
+slippery/PRT
+slipping
+slipshod
+slip/SM
+slipstream/MDGS
+slipway/SM
+slither/DSG
+slithery
+slit/SM
+slitted
+slitter/S
+slitting
+sliver/GSDM
+slivery
+Sloane/M
+Sloan/M
+slobber/SDG
+slobbery
+slob/MS
+Slocum/M
+sloe/MS
+sloganeer/MG
+slogan/MS
+slogged
+slogging
+slog/S
+sloop/SM
+slop/DRSGZ
+sloped/U
+slope/S
+slopped
+sloppily
+sloppiness/SM
+slopping
+sloppy/RTP
+slosh/GSDM
+slothfulness/MS
+slothful/PY
+sloth/GDM
+sloths
+slot/MS
+slotted
+slotting
+slouch/DRSZG
+sloucher/M
+slouchy/RT
+slough/GMD
+sloughs
+Slovakia/M
+Slovakian/S
+Slovak/S
+Slovene/S
+Slovenia/M
+Slovenian/S
+slovenliness/SM
+slovenly/TRP
+sloven/YMS
+slowcoaches
+slowdown/MS
+slowish
+slowness/MS
+slow/PGTYDRS
+slowpoke/MS
+SLR
+sludge/SDGM
+sludgy/TR
+slue/MGDS
+sluggard/MS
+slugged
+slugger/SM
+slugging
+sluggishness/SM
+sluggish/YP
+slug/MS
+sluice/SDGM
+slumberer/M
+slumber/MDRGS
+slumberous
+slumlord/MS
+slummed
+slummer
+slumming
+slum/MS
+slummy/TR
+slump/DSG
+slung/U
+slunk
+slur/MS
+slurp/GSD
+slurred
+slurried/M
+slurring
+slurrying/M
+slurry/MGDS
+slushiness/SM
+slush/SDMG
+slushy/RTP
+slut/MS
+sluttish
+slutty/TR
+Sly/M
+slyness/MS
+sly/RTY
+smacker/M
+smack/SMRDGZ
+smallholders
+smallholding/MS
+smallish
+Small/M
+smallness/S
+smallpox/SM
+small/SGTRDP
+smalltalk
+smalltime
+Smallwood/M
+smarmy/RT
+smarten/GD
+smartness/S
+smartypants
+smart/YRDNSGTXP
+smasher/M
+smash/GZRSD
+smashing/Y
+smashup/S
+smattering/SM
+smearer/M
+smear/GRDS
+smeary/TR
+smeller/M
+smelliness/MS
+smell/SBRDG
+smelly/TRP
+smelter/M
+smelt/SRDGZ
+Smetana/M
+smidgen/MS
+smilax/MS
+smile/GMDSR
+smiley/M
+smilies
+smiling/UY
+smirch/SDG
+smirk/GSMD
+Smirnoff/M
+smite/GSR
+smiter/M
+smith/DMG
+smithereens
+Smithfield/M
+Smith/M
+smiths
+Smithsonian/M
+Smithson/M
+Smithtown/M
+smithy/SM
+smitten
+Smitty/M
+Sm/M
+smocking/M
+smock/SGMDJ
+smoggy/TR
+smog/SM
+smoke/GZMDSRBJ
+smokehouse/MS
+smokeless
+smoker/M
+smokescreen/S
+smokestack/MS
+Smokey/M
+smokiness/S
+smoking/M
+smoky/RSPT
+smoldering/Y
+smolder/SGD
+Smolensk/M
+Smollett/M
+smooch/SDG
+smoothen/DG
+smoother/M
+smoothie/SM
+smoothness/MS
+smooths
+smooth/TZGPRDNY
+smörgåsbord/SM
+smote
+smother/GSD
+SMSA/MS
+SMTP
+Smucker/M
+smudge/GSD
+smudginess/M
+smudgy/TRP
+smugged
+smugger
+smuggest
+smugging
+smuggle/JZGSRD
+smuggler/M
+smugness/MS
+smug/YSP
+smut/SM
+Smuts/M
+smutted
+smuttiness/SM
+smutting
+smutty/TRP
+Smyrna/M
+snack/SGMD
+snaffle/GDSM
+snafu/DMSG
+snagged
+snagging
+snag/MS
+snail/GSDM
+Snake
+snakebird/M
+snakebite/MS
+snake/DSGM
+snakelike
+snakeroot/M
+snaky/TR
+snapback/M
+snapdragon/MS
+snapped/U
+snapper/SM
+snappily
+snappiness/SM
+snapping/U
+snappishness/SM
+snappish/PY
+snappy/PTR
+snapshot/MS
+snapshotted
+snapshotting
+snap/US
+snare/DSRGM
+snarer/M
+snarf/JSGD
+snarler/M
+snarling/Y
+snarl/UGSD
+snarly/RT
+snatch/DRSZG
+snatcher/M
+snazzily
+snazzy/TR
+Snead/M
+sneaker/MD
+sneakily
+sneakiness/SM
+sneaking/Y
+sneak/RDGZS
+sneaky/PRT
+Sneed/M
+sneerer/M
+sneer/GMRDJS
+sneering/Y
+sneeze/SRDG
+Snell/M
+snicker/GMRD
+snick/MRZ
+snideness/M
+Snider/M
+snide/YTSRP
+sniffer/M
+sniff/GZSRD
+sniffle/GDRS
+sniffler/M
+sniffles/M
+snifter/MDSG
+snigger's
+sniper/M
+snipe/SM
+snipped
+snipper/SM
+snippet/SM
+snipping
+snippy/RT
+snip/SGDRZ
+snitch/GDS
+snit/SM
+sniveler/M
+snivel/JSZGDR
+Sn/M
+snobbery/SM
+snobbishness/S
+snobbish/YP
+snobby/RT
+snob/MS
+Snodgrass/M
+snood/SGDM
+snooker/GMD
+snook/SMRZ
+snooper/M
+snoop/SRDGZ
+Snoopy/M
+snoopy/RT
+snootily
+snootiness/MS
+snoot/SDMG
+snooty/TRP
+snooze/GSD
+snore/DSRGZ
+snorkel/ZGSRDM
+snorter/M
+snort/GSZRD
+snot/MS
+snotted
+snottily
+snottiness/SM
+snotting
+snotty/TRP
+snout/SGDM
+snowball/SDMG
+snowbank/SM
+Snowbelt/SM
+snowbird/SM
+snowblower/S
+snowboard/GZDRJS
+snowbound
+snowcapped
+snowdrift/MS
+snowdrop/MS
+snowfall/MS
+snowfield/MS
+snowflake/MS
+snow/GDMS
+snowily
+snowiness/MS
+Snow/M
+snowman/M
+snowmen
+snowmobile/GMDRS
+snowplough/M
+snowploughs
+snowplow/SMGD
+snowshed
+snowshoeing
+snowshoe/MRS
+snowshoer/M
+snowstorm/MS
+snowsuit/S
+snowy/RTP
+snubbed
+snubber
+snubbing
+snub/SP
+snuffbox/SM
+snuffer/M
+snuff/GZSYRD
+snuffle/GDSR
+snuffler/M
+snuffly/RT
+snugged
+snugger
+snuggest
+snugging
+snuggle/GDS
+snuggly
+snugness/MS
+snug/SYP
+Snyder/M
+so
+SO
+soaker/M
+soak/GDRSJ
+soapbox/DSMG
+soapiness/S
+soap/MDRGS
+soapstone/MS
+soapsud/S
+soapy/RPT
+soar/DRJSG
+soarer/M
+soaring/Y
+sobbed
+sobbing/Y
+soberer/M
+soberness/SM
+sober/PGTYRD
+sobriety/SIM
+sobriquet/MS
+sob/SZR
+Soc
+soccer/MS
+sociabilities
+sociability/IM
+sociable/S
+sociably/IU
+socialism/SM
+socialistic
+socialist/SM
+socialite/SM
+sociality/M
+socialization/SM
+socialized/U
+socializer/M
+socialize/RSDG
+socially/U
+social/SY
+societal/Y
+society/MS
+socio
+sociobiology/M
+sociocultural/Y
+sociodemographic
+socioeconomically
+socioeconomic/S
+sociolinguistics/M
+sociological/MY
+sociologist/SM
+sociology/SM
+sociometric
+sociometry/M
+sociopath/M
+sociopaths
+socket/SMDG
+sock/GDMS
+Socorro/M
+Socrates/M
+Socratic/S
+soc/S
+soda/SM
+sodded
+sodden/DYPSG
+soddenness/M
+sodding
+Soddy/M
+sodium/MS
+sod/MS
+sodomite/MS
+sodomize/GDS
+Sodom/M
+sodomy/SM
+soever
+sofa/SM
+Sofia/M
+Sofie/M
+softball/MS
+softbound
+softener/M
+soften/ZGRD
+softhearted
+softie's
+softness/MS
+soft/SPXTYNR
+software/MS
+softwood/SM
+softy/SM
+soggily
+sogginess/S
+soggy/RPT
+Soho/M
+soigné
+soiled/U
+soil/SGMD
+soirée/SM
+sojourn/RDZGSM
+solace/GMSRD
+solacer/M
+solaria
+solarium/M
+solar/S
+solder/RDMSZG
+soldier/MDYSG
+soldiery/MS
+sold/RU
+solecism/MS
+soled/FA
+solemness
+solemnify/GSD
+solemnity/MS
+solemnization/SM
+solemnize/GSD
+solemnness/SM
+solemn/PTRY
+solenoid/MS
+soler/F
+soles/IFA
+sole/YSP
+sol/GSMDR
+solicitation/S
+solicited/U
+solicitor/MS
+solicitousness/S
+solicitous/YP
+solicit/SDG
+solicitude/MS
+solidarity/MS
+solidi
+solidification/M
+solidify/NXSDG
+solidity/S
+solidness/SM
+solid/STYRP
+solidus/M
+soliloquies
+soliloquize/DSG
+soliloquy/M
+soling/NM
+solipsism/MS
+solipsist/S
+Solis/M
+solitaire/SM
+solitary/SP
+solitude/SM
+Sollie/M
+Solly/M
+Sol/MY
+solo/DMSG
+soloist/SM
+Solomon/SM
+Solon/M
+Soloviev/M
+solstice/SM
+solubility/IMS
+soluble/SI
+solute/ENAXS
+solute's
+solution/AME
+solvable/UI
+solvating
+solve/ABSRDZG
+solved/EU
+solvency/IMS
+solvent/IS
+solvently
+solvent's
+solver/MEA
+solves/E
+solving/E
+Solzhenitsyn/M
+Somalia/M
+Somalian/S
+Somali/MS
+soma/M
+somatic
+somberness/SM
+somber/PY
+sombre
+sombrero/SM
+somebody'll
+somebody/SM
+someday
+somehow
+someone'll
+someone/SM
+someplace/M
+somersault/DSGM
+Somerset/M
+somerset/S
+somersetted
+somersetting
+Somerville/M
+something/S
+sometime/S
+someway/S
+somewhat/S
+somewhere/S
+some/Z
+sommelier/SM
+Somme/M
+somnambulism/SM
+somnambulist/SM
+somnolence/MS
+somnolent/Y
+Somoza/M
+sonar/SM
+sonata/MS
+sonatina/SM
+Sondheim/M
+Sondra/M
+Sonenberg/M
+songbag
+songbird/SM
+songbook/S
+songfest/MS
+songfulness/M
+songful/YP
+Songhai/M
+Songhua/M
+song/MS
+songster/MS
+songstress/SM
+songwriter/SM
+songwriting
+Sonia/M
+sonic/S
+Sonja/M
+Son/M
+sonnet/MDSG
+Sonnie/M
+Sonni/M
+Sonnnie/M
+Sonny/M
+sonny/SM
+Sonoma/M
+Sonora/M
+sonority/S
+sonorousness/SM
+sonorous/PY
+son/SMY
+Sontag/M
+sonuvabitch
+Sonya/M
+Sony/M
+soonish
+soon/TR
+soothe
+soother/M
+sooth/GZTYSRDMJ
+soothingness/M
+soothing/YP
+sooths
+soothsayer/M
+soothsay/JGZR
+soot/MGDS
+sooty/RT
+SOP
+Sophey/M
+Sophia/SM
+Sophie/M
+Sophi/M
+sophism/SM
+sophister/M
+sophistical
+sophisticatedly
+sophisticated/U
+sophisticate/XNGDS
+sophistication/MU
+sophistic/S
+sophist/RMS
+sophistry/SM
+Sophoclean
+Sophocles/M
+sophomore/SM
+sophomoric
+Sophronia/M
+soporifically
+soporific/SM
+sopped
+sopping/S
+soppy/RT
+soprano/SM
+sop/SM
+Sopwith/M
+sorbet/SM
+Sorbonne/M
+sorcerer/MS
+sorceress/S
+sorcery/MS
+Sorcha/M
+sordidness/SM
+sordid/PY
+sorehead/SM
+soreness/S
+Sorensen/M
+Sorenson/M
+sore/PYTGDRS
+sorghum/MS
+sorority/MS
+sorrel/SM
+Sorrentine/M
+sorrily
+sorriness/SM
+sorrower/M
+sorrowfulness/SM
+sorrowful/YP
+sorrow/GRDMS
+sorry/PTSR
+sorta
+sortable
+sorted/U
+sorter/MS
+sort/FSAGD
+sortieing
+sortie/MSD
+sort's
+sos
+SOS
+Sosa/M
+Sosanna/M
+Soto/M
+sot/SM
+sottish
+soubriquet's
+soufflé/MS
+sough/DG
+soughs
+sought/U
+soulfulness/MS
+soulful/YP
+soulless/Y
+soul/MDS
+sound/AUD
+soundboard/MS
+sounders
+sounder's
+sounder/U
+soundest
+sounding/AY
+soundings
+sounding's
+soundless/Y
+soundly/U
+soundness/UMS
+soundproof/GSD
+soundproofing/M
+sound's
+sounds/A
+soundtrack/MS
+soupçon/SM
+soup/GMDS
+Souphanouvong/M
+soupy/RT
+source/ASDMG
+sourceless
+sourdough
+sourdoughs
+sourish
+sourness/MS
+sourpuss/MS
+sour/TYDRPSG
+Sousa/M
+sousaphone/SM
+sous/DSG
+souse
+sou/SMH
+Southampton/M
+southbound
+southeastern
+southeaster/YM
+Southeast/MS
+southeast/RZMS
+southeastward/S
+southerly/S
+souther/MY
+southerner/M
+Southerner/MS
+southernisms
+southernmost
+southern/PZSYR
+Southey/M
+Southfield/M
+southing/M
+southland/M
+South/M
+southpaw/MS
+south/RDMG
+souths
+Souths
+southward/S
+southwestern
+southwester/YM
+Southwest/MS
+southwest/RMSZ
+southwestward/S
+souvenir/SM
+sou'wester
+sovereignty/MS
+sovereign/YMS
+soviet/MS
+Soviet/S
+sow/ADGS
+sowbelly/M
+sowens/M
+sower/DS
+Soweto/M
+sown/A
+sox's
+soybean/MS
+Soyinka/M
+soy/MS
+Soyuz/M
+Spaatz/M
+spacecraft/MS
+space/DSRGZMJ
+spaceflight/S
+spaceman/M
+spacemen
+spaceport/SM
+spacer/M
+spaceship/MS
+spacesuit/MS
+spacewalk/GSMD
+Spacewar/M
+spacewoman
+spacewomen
+spacey
+spacial
+spacier
+spaciest
+spaciness
+spacing/M
+spaciousness/SM
+spacious/PY
+Spackle
+spade/DSRGM
+spadeful/SM
+spader/M
+spadework/SM
+spadices
+spadix/M
+Spafford/M
+spaghetti/SM
+Spahn/M
+Spain/M
+spake
+Spalding/M
+Spam/M
+spa/MS
+Span
+spandex/MS
+spandrels
+spangle/GMDS
+Spanglish/S
+Spaniard/SM
+spanielled
+spanielling
+spaniel/SM
+Spanish/M
+spanker/M
+spanking/M
+spank/SRDJG
+span/MS
+spanned/U
+spanner/SM
+spanning
+SPARC/M
+SPARCstation/M
+spar/DRMGTS
+spareness/MS
+spare/PSY
+spareribs
+sparer/M
+sparing/UY
+sparker/M
+sparkle/DRSGZ
+sparkler/M
+Sparkman/M
+Sparks
+spark/SGMRD
+sparky/RT
+sparling/SM
+sparred
+sparrer
+sparring/U
+sparrow/MS
+sparseness/S
+sparse/YP
+sparsity/S
+spars/TR
+Spartacus/M
+Sparta/M
+spartan
+Spartan/S
+spasm/GSDM
+spasmodic
+spasmodically
+spastic/S
+spate/SM
+spathe/MS
+spatiality/M
+spatial/Y
+spat/MS
+spatted
+spatter/DGS
+spatterdock/M
+spatting
+spatula/SM
+spavin/DMS
+spawner/M
+spawn/MRDSG
+spay/DGS
+SPCA
+speakable/U
+speakeasy/SM
+speaker/M
+Speaker's
+speakership/M
+speaking/U
+speak/RBGZJS
+spearer/M
+spearfish/SDMG
+spearhead/GSDM
+spearmint/MS
+spear/MRDGS
+Spears
+spec'd
+specialism/MS
+specialist/MS
+specialization/SM
+specialized/U
+specialize/GZDSR
+specializing/U
+special/SRYP
+specialty/MS
+specie/MS
+specif
+specifiability
+specifiable
+specifiably
+specifically
+specification/SM
+specificity/S
+specific/SP
+specified/U
+specifier/SM
+specifies
+specify/AD
+specifying
+specimen/SM
+spec'ing
+speciousness/SM
+specious/YP
+speck/GMDS
+speckle/GMDS
+spec/SM
+spectacle/MSD
+spectacular/SY
+spectator/SM
+specter/DMS
+specter's/A
+spectralness/M
+spectral/YP
+spectra/M
+spectrogram/MS
+spectrographically
+spectrograph/M
+spectrography/M
+spectrometer/MS
+spectrometric
+spectrometry/M
+spectrophotometer/SM
+spectrophotometric
+spectrophotometry/M
+spectroscope/SM
+spectroscopic
+spectroscopically
+spectroscopy/SM
+spectrum/M
+specularity
+specular/Y
+speculate/VNGSDX
+speculation/M
+speculative/Y
+speculator/SM
+sped
+speech/GMDS
+speechlessness/SM
+speechless/YP
+speedboat/GSRM
+speedboating/M
+speeder/M
+speedily
+speediness/SM
+speedometer/MS
+speed/RMJGZS
+speedster/SM
+speedup/MS
+speedway/SM
+speedwell/MS
+speedy/PTR
+speer/M
+speleological
+speleologist/S
+speleology/MS
+spellbinder/M
+spellbind/SRGZ
+spellbound
+spelldown/MS
+spelled/A
+speller/M
+spelling/M
+spell/RDSJGZ
+spells/A
+spelunker/MS
+spelunking/S
+Spencerian
+Spencer/M
+Spence/RM
+spender/M
+spend/SBJRGZ
+spendthrift/MS
+Spenglerian
+Spengler/M
+Spense/MR
+Spenserian
+Spenser/M
+spent/U
+spermatophyte/M
+spermatozoa
+spermatozoon/M
+spermicidal
+spermicide/MS
+sperm/SM
+Sperry/M
+spew/DRGZJS
+spewer/M
+SPF
+sphagnum/SM
+sphere/SDGM
+spherical/Y
+spheric/S
+spherics/M
+spheroidal/Y
+spheroid/SM
+spherule/MS
+sphincter/SM
+Sphinx/M
+sphinx/MS
+Spica/M
+spic/DGM
+spicebush/M
+spice/SM
+spicily
+spiciness/SM
+spicule/MS
+spicy/PTR
+spider/SM
+spiderweb/S
+spiderwort/M
+spidery/TR
+Spiegel/M
+Spielberg/M
+spiel/GDMS
+spier/M
+spiffy/TDRSG
+spigot/MS
+spike/GMDSR
+Spike/M
+spiker/M
+spikiness/SM
+spiky/PTR
+spillage/SM
+Spillane/M
+spillover/SM
+spill/RDSG
+spillway/SM
+spinach/MS
+spinal/YS
+spindle/JGMDRS
+spindly/RT
+spinelessness/M
+spineless/YP
+spine/MS
+spinet/SM
+spininess/M
+spinnability/M
+spinnaker/SM
+spinneret/MS
+spinner/SM
+spinning/SM
+Spinoza/M
+spin/S
+spinsterhood/SM
+spinsterish
+spinster/MS
+spiny/PRT
+spiracle/SM
+spiraea's
+spiral/YDSG
+spire/AIDSGF
+spirea/MS
+spire's
+spiritedness/M
+spirited/PY
+spirit/GMDS
+spiritless
+spirits/I
+spiritualism/SM
+spiritualistic
+spiritualist/SM
+spirituality/SM
+spiritual/SYP
+spirituous
+spirochete/SM
+Spiro/M
+spiry/TR
+spitball/SM
+spite/CSDAG
+spitefuller
+spitefullest
+spitefulness/MS
+spiteful/PY
+spite's/A
+spitfire/SM
+spit/SGD
+spitted
+spitting
+spittle/SM
+spittoon/SM
+Spitz/M
+splashdown/MS
+splasher/M
+splash/GZDRS
+splashily
+splashiness/MS
+splashy/RTP
+splat/SM
+splatted
+splatter/DSG
+splatting
+splayfeet
+splayfoot/MD
+splay/SDG
+spleen/SM
+splendidness/M
+splendid/YRPT
+splendorous
+splendor/SM
+splenetic/S
+splicer/M
+splice/RSDGZJ
+spline/MSD
+splinter/GMD
+splintery
+splint/SGZMDR
+splits/M
+split/SM
+splittable
+splitter/MS
+splitting/S
+splodge/SM
+splotch/MSDG
+splotchy/RT
+splurge/GMDS
+splutterer/M
+splutter/RDSG
+Sp/M
+Spock/M
+spoilables
+spoilage/SM
+spoil/CSZGDR
+spoiled/U
+spoiler/MC
+spoilsport/SM
+Spokane/M
+spoke/DSG
+spoken/U
+spokeshave/MS
+spokesman/M
+spokesmen
+spokespeople
+spokesperson/S
+spokeswoman/M
+spokeswomen
+spoliation/MCS
+spongecake
+sponge/GMZRSD
+sponger/M
+sponginess/S
+spongy/TRP
+sponsor/DGMS
+sponsorship/S
+spontaneity/SM
+spontaneousness/M
+spontaneous/PY
+spoof/SMDG
+spookiness/MS
+spook/SMDG
+spooky/PRT
+spool/SRDMGZ
+spoonbill/SM
+spoonerism/SM
+spoonful/MS
+spoon/GSMD
+spoor/GSMD
+sporadically
+sporadic/Y
+spore/DSGM
+sporran/MS
+sportiness/SM
+sporting/Y
+sportiveness/M
+sportive/PY
+sportscast/RSGZM
+sportsmanlike/U
+sportsman/MY
+sportsmanship/MS
+sportsmen
+sportswear/M
+sportswoman/M
+sportswomen
+sportswriter/S
+sport/VGSRDM
+sporty/PRT
+Sposato/M
+spotlessness/MS
+spotless/YP
+spotlight/GDMS
+spotlit
+spot/MSC
+spotted/U
+spotter/MS
+spottily
+spottiness/SM
+spotting/M
+spotty/RTP
+spousal/MS
+spouse/GMSD
+spouter/M
+spout/SGRD
+sprain/SGD
+sprang/S
+sprat/SM
+sprawl/GSD
+sprayed/UA
+sprayer/M
+spray/GZSRDM
+sprays/A
+spreadeagled
+spreader/M
+spread/RSJGZB
+spreadsheet/S
+spreeing
+spree/MDS
+sprigged
+sprigging
+sprightliness/MS
+sprightly/PRT
+sprig/MS
+springboard/MS
+springbok/MS
+springeing
+springer/M
+Springfield/M
+springily
+springiness/SM
+springing/M
+springlike
+spring/SGZR
+Springsteen/M
+springtime/MS
+springy/TRP
+sprinkle/DRSJZG
+sprinkler/DM
+sprinkling/M
+Sprint/M
+sprint/SGZMDR
+sprite/SM
+spritz/GZDSR
+sprocket/DMGS
+sprocketed/U
+Sproul/M
+sprout/GSD
+spruce/GMTYRSDP
+spruceness/SM
+sprue/M
+sprung/U
+spryness/S
+spry/TRY
+SPSS
+spudded
+spudding
+spud/MS
+Spuds/M
+spume/DSGM
+spumone's
+spumoni/S
+spumy/TR
+spun
+spunk/GSMD
+spunky/SRT
+spurge/MS
+spuriousness/SM
+spurious/PY
+spur/MS
+spurn/RDSG
+spurred
+spurring
+spurt/SGD
+sputa
+Sputnik
+sputnik/MS
+sputter/DRGS
+sputum/M
+spy/DRSGM
+spyglass/MS
+sq
+sqq
+sqrt
+squabbed
+squabber
+squabbest
+squabbing
+squabbler/M
+squabble/ZGDRS
+squab/SM
+squadded
+squadding
+squadron/MDGS
+squad/SM
+squalidness/SM
+squalid/PRYT
+squaller/M
+squall/GMRDS
+squally/RT
+squalor/SM
+squamous/Y
+squander/GSRD
+Squanto
+square/GMTYRSDP
+squareness/SM
+squarer/M
+Squaresville/M
+squarish
+squash/GSRD
+squashiness/M
+squashy/RTP
+squatness/MS
+squat/SPY
+squatted
+squatter/SMDG
+squattest
+squatting
+squawker/M
+squawk/GRDMZS
+squaw/SM
+squeaker/M
+squeakily
+squeakiness/S
+squeak/RDMGZS
+squeaky/RPT
+squealer/M
+squeal/MRDSGZ
+squeamishness/SM
+squeamish/YP
+squeegee/DSM
+squeegeeing
+squeeze/GZSRDB
+squeezer/M
+squelcher/M
+squelch/GDRS
+squelchy/RT
+squibbed
+Squibb/GM
+squibbing
+Squibbing/M
+squib/SM
+squidded
+squidding
+squid/SM
+squiggle/MGDS
+squiggly/RT
+squinter/M
+squint/GTSRD
+squinting/Y
+squirehood
+squire/SDGM
+squirm/SGD
+squirmy/TR
+squirrel/SGYDM
+squirter/M
+squirt/GSRD
+squish/GSD
+squishy/RTP
+Sr
+Srinagar/M
+SRO
+S's
+SS
+SSA
+SSE
+ssh
+s's/KI
+SSS
+SST
+SSW
+ST
+stabbed
+stabber/S
+stabbing/S
+stability/ISM
+stabilizability
+stabilization/CS
+stabilization's
+stabilize/CGSD
+stabilizer/MS
+stableman/M
+stablemate
+stablemen
+stableness/UM
+stable/RSDGMTP
+stabler/U
+stable's/F
+stables/F
+stablest/U
+stabling/M
+stably/U
+stab/YS
+staccato/S
+Stacee/M
+Stace/M
+Stacey/M
+Stacia/M
+Stacie/M
+Staci/M
+stackable
+stacker/M
+stack's
+stack/USDG
+Stacy/M
+stadias
+stadia's
+stadium/MS
+Stael/M
+Stafani/M
+staff/ADSG
+Staffard/M
+staffer/MS
+Stafford/M
+Staffordshire/M
+staffroom
+staff's
+Staford/M
+stag/DRMJSGZ
+stagecoach/MS
+stagecraft/MS
+stagehand/MS
+stager/M
+stage/SM
+stagestruck
+stagflation/SM
+stagged
+staggerer/M
+stagger/GSJDR
+staggering/Y
+staggers/M
+stagging
+staginess/M
+staging/M
+stagnancy/SM
+stagnant/Y
+stagnate/NGDSX
+stagnation/M
+stagy/PTR
+Stahl/M
+staidness/MS
+staid/YRTP
+stained/U
+stainer/M
+stainless/YS
+stain/SGRD
+staircase/SM
+stair/MS
+stairway/SM
+stairwell/MS
+stake/DSGM
+stakeholder/S
+stakeout/SM
+stalactite/SM
+stalag/M
+stalagmite/SM
+stalemate/SDMG
+staleness/MS
+stale/PGYTDSR
+Staley/M
+Stalingrad/M
+Stalinist
+Stalin/SM
+stalker/M
+stalk/MRDSGZJ
+stall/DMSJG
+stalled/I
+stallholders
+stallion/SM
+Stallone/M
+stalls/I
+stalwartness/M
+stalwart/PYS
+Sta/M
+stamen/MS
+Stamford/M
+stamina/SM
+staminate
+stammer/DRSZG
+stammerer/M
+stammering/Y
+stampede/MGDRS
+stampeder/M
+stamped/U
+stamper/M
+stamp/RDSGZJ
+stance/MIS
+stancher/M
+stanch/GDRST
+stanchion/SGMD
+standalone
+standardization/AMS
+standardized/U
+standardize/GZDSR
+standardizer/M
+standardizes/A
+standard/YMS
+standby
+standbys
+standee/MS
+Standford/M
+standing/M
+Standish/M
+standoffish
+standoff/SM
+standout/MS
+standpipe/MS
+standpoint/SM
+stand/SJGZR
+standstill/SM
+Stanfield/M
+Stanford/M
+Stanislas/M
+Stanislaus/M
+Stanislavsky/M
+Stanislaw/M
+stank/S
+Stanleigh/M
+Stanley/M
+Stanly/M
+stannic
+stannous
+Stanton/M
+Stanwood/M
+Stan/YMS
+stanza/MS
+staph/M
+staphs
+staphylococcal
+staphylococci
+staphylococcus/M
+stapled/U
+stapler/M
+Stapleton/M
+staple/ZRSDGM
+starboard/SDMG
+starchily
+starchiness/MS
+starch/MDSG
+starchy/TRP
+stardom/MS
+star/DRMGZS
+stardust/MS
+stare/S
+starfish/SM
+Stargate/M
+stargaze/ZGDRS
+staring/U
+Starkey/M
+Stark/M
+starkness/MS
+stark/SPGTYRD
+Starla/M
+Starlene/M
+starless
+starlet/MS
+starlight/MS
+starling/MS
+Starlin/M
+starlit
+Star/M
+starred
+starring
+Starr/M
+starry/TR
+starship
+starstruck
+start/ASGDR
+starter/MS
+startle/GDS
+startling/PY
+startup/SM
+starvation/MS
+starveling/M
+starver/M
+starve/RSDG
+stash/GSD
+stasis/M
+stat/DRSGV
+statecraft/MS
+stated/U
+statehood/MS
+statehouse/S
+Statehouse's
+state/IGASD
+statelessness/MS
+stateless/P
+stateliness/MS
+stately/PRT
+statement/MSA
+Staten/M
+stater/M
+stateroom/SM
+stateside
+state's/K
+states/K
+statesmanlike
+statesman/MY
+statesmanship/SM
+statesmen
+stateswoman
+stateswomen
+statewide
+statical/Y
+static/S
+statics/M
+stationarity
+stationary/S
+stationer/M
+stationery/MS
+stationmaster/M
+station/SZGMDR
+statistical/Y
+statistician/MS
+statistic/MS
+Statler/M
+stator/SM
+statuary/SM
+statue/MSD
+statuesque/YP
+statuette/MS
+stature/MS
+status/SM
+statute/SM
+statutorily
+statutory/P
+Stauffer/M
+staunchness/S
+staunch/PDRSYTG
+stave/DGM
+Stavro/MS
+stay/DRGZS
+stayer/M
+std
+STD
+stdio
+steadfastness/MS
+steadfast/PY
+steadily/U
+steadiness's
+steadiness/US
+steading/M
+stead/SGDM
+steady/DRSUTGP
+steakhouse/SM
+steak/SM
+stealer/M
+stealing/M
+steal/SRHG
+stealthily
+stealthiness/MS
+stealth/M
+stealths
+stealthy/PTR
+steamboat/MS
+steamer/MDG
+steamfitter/S
+steamfitting/S
+steamily
+steaminess/SM
+steamroller/DMG
+steamroll/GZRDS
+steam/SGZRDMJ
+steamship/SM
+steamy/RSTP
+Stearne/M
+Stearn/SM
+steed/SM
+Steele/M
+steeliness/SM
+steelmaker/M
+steel/SDMGZ
+steelworker/M
+steelwork/ZSMR
+steelyard/MS
+steely/TPRS
+Steen/M
+steepen/GD
+steeper/M
+steeplebush/M
+steeplechase/GMSD
+steeplejack/MS
+steeple/MS
+steepness/S
+steep/SYRNDPGTX
+steerage/MS
+steerer/M
+steer/SGBRDJ
+steersman/M
+steersmen
+steeves
+Stefa/M
+Stefania/M
+Stefanie/M
+Stefan/M
+Stefano/M
+Steffane/M
+Steffen/M
+Steffie/M
+Steffi/M
+stegosauri
+stegosaurus/S
+Steinbeck/SM
+Steinberg/M
+Steinem/M
+Steiner/M
+Steinmetz/M
+Stein/RM
+stein/SGZMRD
+Steinway/M
+Stella/M
+stellar
+stellated
+Ste/M
+stemless
+stemmed/U
+stemming
+stem/MS
+stemware/MS
+stench/GMDS
+stenciler/M
+stencil/GDRMSZ
+stencillings
+Stendhal/M
+Stendler/M
+Stengel/M
+stenographer/SM
+stenographic
+stenography/SM
+steno/SM
+stenotype/M
+stentorian
+stepbrother/MS
+stepchild/M
+stepchildren
+stepdaughter/MS
+stepfather/SM
+Stepha/M
+Stephana/M
+Stephanie/M
+Stephani/M
+Stephan/M
+Stephannie/M
+Stephanus/M
+Stephenie/M
+Stephen/MS
+Stephenson/M
+Stephie/M
+Stephi/M
+Stephine/M
+stepladder/SM
+step/MIS
+stepmother/SM
+stepparent/SM
+stepper/M
+steppe/RSDGMZ
+steppingstone/S
+stepsister/SM
+stepson/SM
+stepwise
+stereographic
+stereography/M
+stereo/GSDM
+stereophonic
+stereoscope/MS
+stereoscopic
+stereoscopically
+stereoscopy/M
+stereotype/GMZDRS
+stereotypic
+stereotypical/Y
+sterile
+sterility/SM
+sterilization/SM
+sterilized/U
+sterilize/RSDGZ
+sterilizes/A
+Sterling/M
+sterling/MPYS
+sterlingness/M
+sternal
+Sternberg/M
+Sterne/M
+Stern/M
+sternness/S
+Sterno
+stern/SYRDPGT
+sternum/SM
+steroidal
+steroid/MS
+stertorous
+Stesha/M
+stethoscope/SM
+stet/MS
+stetson/MS
+Stetson/SM
+stetted
+stetting
+Steuben/M
+Stevana/M
+stevedore/GMSD
+Steve/M
+Stevena/M
+Steven/MS
+Stevenson/M
+Stevie/M
+Stevy/M
+steward/DMSG
+stewardess/SM
+Steward/M
+stewardship/MS
+Stewart/M
+stew/GDMS
+st/GBJ
+sticker/M
+stickily
+stickiness/SM
+stickleback/MS
+stickle/GZDR
+stickler/M
+stick/MRDSGZ
+stickpin/SM
+stickup/SM
+sticky/GPTDRS
+Stieglitz/M
+stiffen/JZRDG
+stiff/GTXPSYRND
+stiffness/MS
+stifle/GJRSD
+stifler/M
+stifling/Y
+stigma/MS
+stigmata
+stigmatic/S
+stigmatization/C
+stigmatizations
+stigmatization's
+stigmatize/DSG
+stigmatized/U
+stile/GMDS
+stiletto/MDSG
+stillbirth/M
+stillbirths
+stillborn/S
+stiller/MI
+stillest
+Stillman/M
+Stillmann/M
+stillness/MS
+still/RDIGS
+Stillwell/M
+stilted/PY
+stilt/GDMS
+Stilton/MS
+Stimson/M
+stimulant/MS
+stimulated/U
+stimulate/SDVGNX
+stimulation/M
+stimulative/S
+stimulator/M
+stimulatory
+stimuli/M
+stimulus/MS
+Stine/M
+stinger/M
+sting/GZR
+stingily
+stinginess/MS
+stinging/Y
+stingray/MS
+stingy/RTP
+stinkbug/S
+stinker/M
+stink/GZRJS
+stinking/Y
+stinkpot/M
+Stinky/M
+stinky/RT
+stinter/M
+stinting/U
+stint/JGRDMS
+stipendiary
+stipend/MS
+stipple/JDRSG
+stippler/M
+stipulate/XNGSD
+stipulation/M
+Stirling/M
+stirred/U
+stirrer/SM
+stirring/YS
+stirrup/SM
+stir/S
+stitch/ASDG
+stitcher/M
+stitchery/S
+stitching/MS
+stitch's
+St/M
+stoat/SM
+stochastic
+stochastically
+stochasticity
+stockade/SDMG
+stockbreeder/SM
+stockbroker/MS
+stockbroking/S
+stocker/SM
+Stockhausen/M
+stockholder/SM
+Stockholm/M
+stockily
+stockiness/SM
+stockinet's
+stockinette/S
+stocking/MDS
+stockist/MS
+stockpile/GRSD
+stockpiler/M
+stockpot/MS
+stockroom/MS
+stock's
+stock/SGAD
+stocktaking/MS
+Stockton/M
+stockyard/SM
+stocky/PRT
+Stoddard/M
+stodge/M
+stodgily
+stodginess/S
+stodgy/TRP
+stogy/SM
+stoical/Y
+stoichiometric
+stoichiometry/M
+stoicism/SM
+Stoicism/SM
+stoic/MS
+Stoic/MS
+stoke/DSRGZ
+stoker/M
+stokes/M
+Stokes/M
+STOL
+stole/MDS
+stolen
+stolidity/S
+stolidness/S
+stolid/PTYR
+stolon/SM
+stomachache/MS
+stomacher/M
+stomach/RSDMZG
+stomachs
+stomp/DSG
+stonecutter/SM
+stone/DSRGM
+Stonehenge/M
+stoneless
+Stone/M
+stonemason/MS
+stoner/M
+stonewall/GDS
+stoneware/MS
+stonewashed
+stonework/SM
+stonewort/M
+stonily
+stoniness/MS
+stony/TPR
+stood
+stooge/SDGM
+stool/SDMG
+stoop/SDG
+stopcock/MS
+stopgap/SM
+stoplight/SM
+stopover/MS
+stoppable/U
+stoppage/MS
+Stoppard/M
+stopped/U
+stopper/GMDS
+stopping/M
+stopple/GDSM
+stop's
+stops/M
+stop/US
+stopwatch/SM
+storage/SM
+store/ADSRG
+storefront/SM
+storehouse/MS
+storekeeper/M
+storekeep/ZR
+storeroom/SM
+store's
+stork/SM
+stormbound
+stormer/M
+Stormie/M
+stormily
+Stormi/M
+storminess/S
+Storm/M
+storm/SRDMGZ
+stormtroopers
+Stormy/M
+stormy/PTR
+storyboard/MDSG
+storybook/MS
+story/GSDM
+storyline
+storyteller/SM
+storytelling/MS
+Stouffer/M
+stoup/SM
+stouten/DG
+stouthearted
+Stout/M
+stoutness/MS
+stout/STYRNP
+stove/DSRGM
+stovepipe/SM
+stover/M
+stowage/SM
+stowaway/MS
+Stowe/M
+stow/GDS
+Strabo/M
+straddler/M
+straddle/ZDRSG
+Stradivari/SM
+Stradivarius/M
+strafe/GRSD
+strafer/M
+straggle/GDRSZ
+straggly/RT
+straightaway/S
+straightedge/MS
+straightener/M
+straighten/ZGDR
+straightforwardness/MS
+straightforward/SYP
+straightjacket's
+straightness/MS
+straight/RNDYSTXGP
+straightway/S
+strain/ASGZDR
+strained/UF
+strainer/MA
+straining/F
+strains/F
+straiten/DG
+straitjacket/GDMS
+straitlaced
+straitness/M
+strait/XTPSMGYDNR
+stranded/P
+strand/SDRG
+strangeness/SM
+strange/PYZTR
+stranger/GMD
+stranglehold/MS
+strangle/JDRSZG
+strangles/M
+strangulate/NGSDX
+strangulation/M
+strapless/S
+strapped/U
+strapping/S
+strap's
+strap/US
+Strasbourg/M
+stratagem/SM
+strata/MS
+strategical/Y
+strategic/S
+strategics/M
+strategist/SM
+strategy/SM
+Stratford/M
+strati
+stratification/M
+stratified/U
+stratify/NSDGX
+stratigraphic
+stratigraphical
+stratigraphy/M
+stratosphere/SM
+stratospheric
+stratospherically
+stratum/M
+stratus/M
+Strauss
+Stravinsky/M
+strawberry/SM
+strawflower/SM
+straw/SMDG
+strayer/M
+stray/GSRDM
+streak/DRMSGZ
+streaker/M
+streaky/TR
+streamed/U
+streamer/M
+stream/GZSMDR
+streaming/M
+streamline/SRDGM
+streetcar/MS
+streetlight/SM
+street/SMZ
+streetwalker/MS
+streetwise
+Streisand/M
+strengthen/AGDS
+strengthener/MS
+strength/NMX
+strengths
+strenuousness/SM
+strenuous/PY
+strep/MS
+streptococcal
+streptococci
+streptococcus/M
+streptomycin/SM
+stress/DSMG
+stressed/U
+stressful/YP
+stretchability/M
+stretchable/U
+stretch/BDRSZG
+stretcher/DMG
+stretchy/TRP
+strew/GDHS
+strewn
+striae
+stria/M
+striate/DSXGN
+striated/U
+striation/M
+stricken
+Strickland/M
+strict/AF
+stricter
+strictest
+strictly
+strictness/S
+stricture/SM
+stridden
+stridency/S
+strident/Y
+strider/M
+stride/RSGM
+strife/SM
+strikebreaker/M
+strikebreaking/M
+strikebreak/ZGR
+strikeout/S
+striker/M
+strike/RSGZJ
+striking/Y
+Strindberg/M
+stringed
+stringency/S
+stringent/Y
+stringer/MS
+stringiness/SM
+stringing/M
+string's
+string/SAG
+stringy/RTP
+striper/M
+stripe/SM
+strip/GRDMS
+stripling/M
+stripped/U
+stripper/MS
+stripping
+stripteaser/M
+striptease/SRDGZM
+stripy/RT
+strive/JRSG
+striven
+striver/M
+strobe/SDGM
+stroboscope/SM
+stroboscopic
+strode
+stroke/ZRSDGM
+stroking/M
+stroller/M
+stroll/GZSDR
+Stromberg/M
+Stromboli/M
+Strom/M
+strongbow
+strongbox/MS
+Strongheart/M
+stronghold/SM
+strongish
+Strong/M
+strongman/M
+strongmen
+strongroom/MS
+strong/YRT
+strontium/SM
+strophe/MS
+strophic
+stropped
+stropping
+strop/SM
+strove
+struck
+structuralism/M
+structuralist/SM
+structural/Y
+structured/AU
+structureless
+structures/A
+structure/SRDMG
+structuring/A
+strudel/MS
+struggle/GDRS
+struggler/M
+strummed
+strumming
+strumpet/GSDM
+strum/S
+strung/UA
+strut/S
+strutted
+strutter/M
+strutting
+strychnine/MS
+Stuart/MS
+stubbed/M
+stubbing
+Stubblefield/MS
+stubble/SM
+stubbly/RT
+stubbornness/SM
+stubborn/SGTYRDP
+stubby/SRT
+stub/MS
+stuccoes
+stucco/GDM
+stuck/U
+studbook/SM
+studded
+studding/SM
+Studebaker/M
+studentship/MS
+student/SM
+studiedness/M
+studied/PY
+studier/SM
+studio/MS
+studiousness/SM
+studious/PY
+stud/MS
+study/AGDS
+stuffily
+stuffiness/SM
+stuffing/M
+stuff/JGSRD
+stuffy/TRP
+stultify/NXGSD
+Stu/M
+stumble/GZDSR
+stumbling/Y
+stumpage/M
+stumper/M
+stump/RDMSG
+stumpy/RT
+stung
+stunk
+stunned
+stunner/M
+stunning/Y
+stun/S
+stunted/P
+stunt/GSDM
+stupefaction/SM
+stupefy/DSG
+stupendousness/M
+stupendous/PY
+stupidity/SM
+stupidness/M
+stupid/PTYRS
+stupor/MS
+sturdily
+sturdiness/SM
+sturdy/SRPT
+sturgeon/SM
+Sturm/M
+stutter/DRSZG
+Stuttgart/M
+Stuyvesant/M
+sty/DSGM
+Stygian
+styled/A
+style/GZMDSR
+styles/A
+styli
+styling/A
+stylishness/S
+stylish/PY
+stylistically
+stylistic/S
+stylist/MS
+stylites
+stylization/MS
+stylize/DSG
+stylos
+stylus/SM
+stymieing
+stymie/SD
+stymy's
+styptic/S
+styrene/MS
+Styrofoam/S
+Styx/M
+suable
+Suarez/M
+suasion/EMS
+suaveness/S
+suave/PRYT
+suavity/SM
+subaltern/SM
+subarctic/S
+subareas
+Subaru/M
+subassembly/M
+subatomic/S
+subbasement/SM
+subbed
+subbing
+subbranch/S
+subcaste/M
+subcategorizing
+subcategory/SM
+subchain
+subclassifications
+subclass/MS
+subclauses
+subcommand/S
+subcommittee/SM
+subcompact/S
+subcomponent/MS
+subcomputation/MS
+subconcept
+subconsciousness/SM
+subconscious/PSY
+subconstituent
+subcontinental
+subcontinent/MS
+subcontractor/SM
+subcontract/SMDG
+subcultural
+subculture/GMDS
+subcutaneous/Y
+subdirectory/S
+subdistrict/M
+subdivide/SRDG
+subdivision/SM
+subdued/Y
+subdue/GRSD
+subduer/M
+subexpression/MS
+subfamily/SM
+subfield/MS
+subfile/SM
+subfreezing
+subgoal/SM
+subgraph
+subgraphs
+subgroup/SGM
+subharmonic/S
+subheading/M
+subhead/MGJS
+subhuman/S
+subindex/M
+subinterval/MS
+subj
+subject/GVDMS
+subjection/SM
+subjectiveness/M
+subjective/PSY
+subjectivist/S
+subjectivity/SM
+subjoin/DSG
+subjugate/NGXSD
+subjugation/M
+subjunctive/S
+sublayer
+sublease/DSMG
+sublet/S
+subletting
+sublimate/GNSDX
+sublimation/M
+sublime/GRSDTYP
+sublimeness/M
+sublimer/M
+subliminal/Y
+sublimity/SM
+sublist/SM
+subliterary
+sublunary
+submachine
+submarginal
+submarine/MZGSRD
+submariner/M
+submerge/DSG
+submergence/SM
+submerse/XNGDS
+submersible/S
+submersion/M
+submicroscopic
+submission/SAM
+submissiveness/MS
+submissive/PY
+submit/SA
+submittable
+submittal
+submitted/A
+submitter/S
+submitting/A
+submode/S
+submodule/MS
+sub/MS
+subnational
+subnet/SM
+subnetwork/SM
+subnormal/SY
+suboptimal
+suborbital
+suborder/MS
+subordinately/I
+subordinates/I
+subordinate/YVNGXPSD
+subordination/IMS
+subordinator
+subornation/SM
+suborn/GSD
+subpage
+subparagraph/M
+subpart/MS
+subplot/MS
+subpoena/GSDM
+subpopulation/MS
+subproblem/SM
+subprocess/SM
+subprofessional/S
+subprogram/SM
+subproject
+subproof/SM
+subquestion/MS
+subrange/SM
+subregional/Y
+subregion/MS
+subrogation/M
+subroutine/SM
+subsample/MS
+subschema/MS
+subscribe/ASDG
+subscriber/SM
+subscripted/U
+subscription/MS
+subscript/SGD
+subsection/SM
+subsegment/SM
+subsentence
+subsequence/MS
+subsequent/SYP
+subservience/SM
+subservient/SY
+subset/MS
+subsidence/MS
+subside/SDG
+subsidiarity
+subsidiary/MS
+subsidization/MS
+subsidized/U
+subsidizer/M
+subsidize/ZRSDG
+subsidy/MS
+subsistence/MS
+subsistent
+subsist/SGD
+subsocietal
+subsoil/DRMSG
+subsonic
+subspace/MS
+subspecies/M
+substance/MS
+substandard
+substantially/IU
+substantialness/M
+substantial/PYS
+substantiated/U
+substantiate/VGNSDX
+substantiation/MFS
+substantiveness/M
+substantive/PSYM
+substantivity
+substation/MS
+substerilization
+substitutability
+substituted/U
+substitute/NGVBXDRS
+substitutionary
+substitution/M
+substitutive/Y
+substrata
+substrate/MS
+substratum/M
+substring/S
+substructure/SM
+subsume/SDG
+subsurface/S
+subsystem/MS
+subtable/S
+subtask/SM
+subteen/SM
+subtenancy/MS
+subtenant/SM
+subtend/DS
+subterfuge/SM
+subterranean/SY
+subtest
+subtext/SM
+subtitle/DSMG
+subtleness/M
+subtle/RPT
+subtlety/MS
+subtly/U
+subtopic/SM
+subtotal/GSDM
+subtracter/M
+subtraction/MS
+subtract/SRDZVG
+subtrahend/SM
+subtree/SM
+subtropical
+subtropic/S
+subtype/MS
+subunit/SM
+suburbanite/MS
+suburbanization/MS
+suburbanized
+suburbanizing
+suburban/S
+suburbia/SM
+suburb/MS
+subvention/MS
+subversion/SM
+subversiveness/MS
+subversive/SPY
+subverter/M
+subvert/SGDR
+subway/MDGS
+subzero
+succeeder/M
+succeed/GDRS
+successfulness/M
+successful/UY
+succession/SM
+successiveness/M
+successive/YP
+success/MSV
+successor/MS
+successorship
+succinctness/SM
+succinct/RYPT
+succored/U
+succorer/M
+succor/SGZRDM
+succotash/SM
+succubus/M
+succulence/SM
+succulency/MS
+succulent/S
+succumb/SDG
+such
+suchlike
+sucker/DMG
+suck/GZSDRB
+suckle/SDJG
+suckling/M
+Sucre/M
+sucrose/MS
+suction/SMGD
+Sudanese/M
+Sudanic/M
+Sudan/M
+suddenness/SM
+sudden/YPS
+Sudetenland/M
+sud/S
+suds/DSRG
+sudsy/TR
+sued/DG
+suede/SM
+Suellen/M
+Sue/M
+suer/M
+suet/MS
+Suetonius/M
+suety
+sue/ZGDRS
+Suez/M
+sufferance/SM
+sufferer/M
+suffering/M
+suffer/SJRDGZ
+suffice/GRSD
+sufficiency/SIM
+sufficient/IY
+suffixation/S
+suffixed/U
+suffix/GMRSD
+suffocate/XSDVGN
+suffocating/Y
+Suffolk/M
+suffragan/S
+suffrage/MS
+suffragette/MS
+suffragist/SM
+suffuse/VNGSDX
+suffusion/M
+Sufi/M
+Sufism/M
+sugarcane/S
+sugarcoat/GDS
+sugarless
+sugarplum/MS
+sugar/SJGMD
+sugary/TR
+suggest/DRZGVS
+suggester/M
+suggestibility/SM
+suggestible
+suggestion/MS
+suggestiveness/MS
+suggestive/PY
+sugillate
+Suharto/M
+suicidal/Y
+suicide/GSDM
+Sui/M
+suitability/SU
+suitableness/S
+suitable/P
+suitably/U
+suitcase/MS
+suited/U
+suite/SM
+suiting/M
+suit/MDGZBJS
+suitor/SM
+Sukarno/M
+Sukey/M
+Suki/M
+sukiyaki/SM
+Sukkoth's
+Sukkot/S
+Sula/M
+Sulawesi/M
+Suleiman/M
+sulfaquinoxaline
+sulfa/S
+sulfate/MSDG
+sulfide/S
+sulfite/M
+sulfonamide/SM
+sulfur/DMSG
+sulfuric
+sulfurousness/M
+sulfurous/YP
+sulk/GDS
+sulkily
+sulkiness/S
+sulky/RSPT
+Sulla/M
+sullenness/MS
+sullen/TYRP
+sullied/U
+Sullivan/M
+sully/GSD
+Sully/M
+sulphate/SM
+sulphide/MS
+sulphuric
+sultana/SM
+sultanate/MS
+sultan/SM
+sultrily
+sultriness/SM
+sultry/PRT
+Sulzberger/M
+sumach's
+sumac/SM
+Sumatra/M
+Sumatran/S
+sumer/F
+Sumeria/M
+Sumerian/M
+summability/M
+summable
+summand/MS
+summarily
+summarization/MS
+summarized/U
+summarize/GSRDZ
+summarizer/M
+summary/MS
+summation/FMS
+summed
+Summerdale/M
+summerhouse/MS
+summer/SGDM
+Summer/SM
+summertime/MS
+summery/TR
+summing
+summit/GMDS
+summitry/MS
+summoner/M
+summon/JSRDGZ
+summons/MSDG
+sum/MRS
+Sumner/M
+sumo/SM
+sump/SM
+sumptuousness/SM
+sumptuous/PY
+Sumter/M
+Sun
+sunbaked
+sunbathe
+sunbather/M
+sunbathing/M
+sunbaths
+sunbath/ZRSDG
+sunbeam/MS
+Sunbelt/M
+sunblock/S
+sunbonnet/MS
+sunburn/GSMD
+sunburst/MS
+suncream
+sundae/MS
+Sundanese/M
+Sundas
+Sunday/MS
+sunder/SDG
+sundial/MS
+sundowner/M
+sundown/MRDSZG
+sundris
+sundry/S
+sunfish/SM
+sunflower/MS
+sunglass/MS
+Sung/M
+sung/U
+sunk/SN
+sunlamp/S
+sunless
+sunlight/MS
+sunlit
+sun/MS
+sunned
+Sunni/MS
+sunniness/SM
+sunning
+Sunnite/SM
+Sunny/M
+sunny/RSTP
+Sunnyvale/M
+sunrise/GMS
+sunroof/S
+sunscreen/S
+sunset/MS
+sunsetting
+sunshade/MS
+Sunshine/M
+sunshine/MS
+sunshiny
+sunspot/SM
+sunstroke/MS
+suntanned
+suntanning
+suntan/SM
+sunup/MS
+superabundance/MS
+superabundant
+superannuate/GNXSD
+superannuation/M
+superbness/M
+superb/YRPT
+supercargoes
+supercargo/M
+supercharger/M
+supercharge/SRDZG
+superciliousness/SM
+supercilious/PY
+supercity/S
+superclass/M
+supercomputer/MS
+supercomputing
+superconcept
+superconducting
+superconductivity/SM
+superconductor/SM
+supercooled
+supercooling
+supercritical
+superdense
+super/DG
+superego/SM
+supererogation/MS
+supererogatory
+superficiality/S
+superficial/SPY
+superfine
+superfix/M
+superfluity/MS
+superfluousness/S
+superfluous/YP
+superheat/D
+superheroes
+superhero/SM
+superhighway/MS
+superhumanness/M
+superhuman/YP
+superimpose/SDG
+superimposition/MS
+superintendence/S
+superintendency/SM
+superintendent/SM
+superintend/GSD
+superiority/MS
+Superior/M
+superior/SMY
+superlativeness/M
+superlative/PYS
+superlunary
+supermachine
+superman/M
+Superman/M
+supermarket/SM
+supermen
+supermodel
+supermom/S
+supernal
+supernatant
+supernaturalism/M
+supernaturalness/M
+supernatural/SPY
+supernormal/Y
+supernovae
+supernova/MS
+supernumerary/S
+superordinate
+superpose/BSDG
+superposition/MS
+superpower/MS
+superpredicate
+supersaturate/XNGDS
+supersaturation/M
+superscribe/GSD
+superscript/DGS
+superscription/SM
+superseder/M
+supersede/SRDG
+supersensitiveness/M
+supersensitive/P
+superset/MS
+supersonically
+supersonic/S
+supersonics/M
+superstar/SM
+superstition/SM
+superstitious/YP
+superstore/S
+superstructural
+superstructure/SM
+supertanker/SM
+supertitle/MSDG
+superuser/MS
+supervene/GSD
+supervention/S
+supervised/U
+supervise/SDGNX
+supervision/M
+supervisor/SM
+supervisory
+superwoman/M
+superwomen
+supineness/M
+supine/PSY
+supper/DMG
+supplanter/M
+supplant/SGRD
+supplemental/S
+supplementary/S
+supplementation/S
+supplementer/M
+supplement/SMDRG
+suppleness/SM
+supple/SPLY
+suppliant/S
+supplicant/MS
+supplicate/NGXSD
+supplication/M
+supplier/AM
+suppl/RDGT
+supply/MAZGSRD
+supportability/M
+supportable/UI
+supported/U
+supporter/M
+supporting/Y
+supportive/Y
+support/ZGVSBDR
+supposed/Y
+suppose/SRDBJG
+supposition/MS
+suppository/MS
+suppressant/S
+suppressed/U
+suppressible/I
+suppression/SM
+suppressive/P
+suppressor/S
+suppress/VGSD
+suppurate/NGXSD
+suppuration/M
+supp/YDRGZ
+supra
+supranational
+supranationalism/M
+suprasegmental
+supremacist/SM
+supremacy/SM
+supremal
+supremeness/M
+supreme/PSRTY
+supremo/M
+sup/RSZ
+supt
+Supt/M
+Surabaya/M
+Surat/M
+surcease/DSMG
+surcharge/MGSD
+surcingle/MGSD
+surd/M
+sured/I
+surefire
+surefooted
+surely
+sureness/MS
+sureness's/U
+sure/PU
+surer/I
+surest
+surety/SM
+surfaced/UA
+surface/GSRDPZM
+surfacer/AMS
+surfaces/A
+surfacing/A
+surfactant/SM
+surfboard/MDSG
+surfeit/SDRMG
+surfer/M
+surfing/M
+surf/SJDRGMZ
+surged/A
+surge/GYMDS
+surgeon/MS
+surgery/MS
+surges/A
+surgical/Y
+Suriname
+Surinamese
+Surinam's
+surliness/SM
+surly/TPR
+surmiser/M
+surmise/SRDG
+surmountable/IU
+surmount/DBSG
+surname/GSDM
+surpassed/U
+surpass/GDS
+surpassing/Y
+surplice/SM
+surplus/MS
+surplussed
+surplussing
+surprised/U
+surprise/MGDRSJ
+surpriser/M
+surprising/YU
+surrealism/MS
+surrealistic
+surrealistically
+surrealist/S
+surreality
+surreal/S
+surrender/DRSG
+surrenderer/M
+surreptitiousness/S
+surreptitious/PY
+surrey/SM
+surrogacy/S
+surrogate/SDMNG
+surrogation/M
+surrounding/M
+surround/JGSD
+surtax/SDGM
+surveillance/SM
+surveillant
+surveyed/A
+surveying/M
+survey/JDSG
+surveyor/MS
+surveys/A
+survivability/M
+survivable/U
+survivalist/S
+survival/MS
+survive/SRDBG
+survivor/MS
+survivorship/M
+Surya/M
+Sus
+Susana/M
+Susanetta/M
+Susan/M
+Susannah/M
+Susanna/M
+Susanne/M
+Susann/M
+susceptibilities
+susceptibility/IM
+susceptible/I
+Susette/M
+sushi/SM
+Susie/M
+Susi/M
+suspected/U
+suspecter/M
+suspect/GSDR
+suspecting/U
+suspend/DRZGS
+suspended/UA
+suspender/M
+suspenseful
+suspense/MXNVS
+suspension/AM
+suspensive/Y
+suspensor/M
+suspicion/GSMD
+suspiciousness/M
+suspicious/YP
+Susquehanna/M
+Sussex/M
+sustainability
+sustainable/U
+sustain/DRGLBS
+sustainer/M
+sustainment/M
+sustenance/MS
+Susy/M
+Sutherland/M
+Sutherlan/M
+sutler/MS
+Sutton/M
+suture/GMSD
+SUV
+Suva/M
+Suwanee/M
+Suzanna/M
+Suzanne/M
+Suzann/M
+suzerain/SM
+suzerainty/MS
+Suzette/M
+Suzhou/M
+Suzie/M
+Suzi/M
+Suzuki/M
+Suzy/M
+Svalbard/M
+svelte/RPTY
+Svend/M
+Svengali
+Sven/M
+Sverdlovsk/M
+Svetlana/M
+SW
+swabbed
+swabbing
+swabby/S
+Swabian/SM
+swab/MS
+swaddle/SDG
+swagged
+swagger/GSDR
+swagging
+swag/GMS
+Swahili/MS
+swain/SM
+SWAK
+swallower/M
+swallow/GDRS
+swallowtail/SM
+swam
+swami/SM
+swamper/M
+swampland/MS
+swamp/SRDMG
+swampy/RPT
+Swanee/M
+swankily
+swankiness/MS
+swank/RDSGT
+swanky/PTRS
+swanlike
+swan/MS
+swanned
+swanning
+Swansea/M
+Swanson/M
+swappable/U
+swapped
+swapper/SM
+swapping
+swap/S
+sward/MSGD
+swarmer/M
+swarm/GSRDM
+swarthiness/M
+Swarthmore/M
+swarthy/RTP
+swart/P
+Swartz/M
+swashbuckler/SM
+swashbuckling/S
+swash/GSRD
+swastika/SM
+SWAT
+swatch/MS
+swathe
+swather/M
+swaths
+swath/SRDMGJ
+swat/S
+swatted
+swatter/MDSG
+swatting
+swayback/SD
+sway/DRGS
+swayer/M
+Swaziland/M
+Swazi/SM
+swearer/M
+swear/SGZR
+swearword/SM
+sweatband/MS
+sweater/M
+sweatily
+sweatiness/M
+sweatpants
+sweat/SGZRM
+sweatshirt/S
+sweatshop/MS
+sweaty/TRP
+Swedenborg/M
+Sweden/M
+swede/SM
+Swede/SM
+Swedish
+Swed/MN
+Sweeney/SM
+sweeper/M
+sweepingness/M
+sweeping/PY
+sweep/SBRJGZ
+sweeps/M
+sweepstakes
+sweepstake's
+sweetbread/SM
+sweetbrier/SM
+sweetcorn
+sweetened/U
+sweetener/M
+sweetening/M
+sweeten/ZDRGJ
+sweetheart/MS
+sweetie/MS
+sweeting/M
+sweetish/Y
+Sweet/M
+sweetmeat/MS
+sweetness/MS
+sweetshop
+sweet/TXSYRNPG
+swellhead/DS
+swelling/M
+swell/SJRDGT
+swelter/DJGS
+sweltering/Y
+Swen/M
+Swenson/M
+swept
+sweptback
+swerve/GSD
+swerving/U
+swifter/M
+swift/GTYRDPS
+Swift/M
+swiftness/MS
+swigged
+swigging
+swig/SM
+swill/SDG
+swimmer/MS
+swimming/MYS
+swim/S
+swimsuit/MS
+Swinburne/M
+swindle/GZRSD
+swindler/M
+swineherd/MS
+swine/SM
+swingeing
+swinger/M
+swinging/Y
+swing/SGRZJB
+swingy/R
+swinishness/M
+swinish/PY
+Swink/M
+swipe/DSG
+swirling/Y
+swirl/SGRD
+swirly/TR
+swish/GSRD
+swishy/R
+swiss
+Swiss/S
+switchback/GDMS
+switchblade/SM
+switchboard/MS
+switcher/M
+switch/GBZMRSDJ
+switchgear
+switchman/M
+switchmen/M
+switchover/M
+Switzerland/M
+Switzer/M
+Switz/MR
+swivel/GMDS
+swizzle/RDGM
+swob's
+swollen
+swoon/GSRD
+swooning/Y
+swoop/RDSG
+swoosh/GSD
+swop's
+sword/DMSG
+swordfish/SM
+swordplayer/M
+swordplay/RMS
+swordsman/M
+swordsmanship/SM
+swordsmen
+swordtail/M
+swore
+sworn
+swot/S
+swum
+swung
+s/XJBG
+sybarite/MS
+sybaritic
+Sybila/M
+Sybilla/M
+Sybille/M
+Sybil/M
+Sybyl/M
+sycamore/SM
+sycophancy/S
+sycophantic
+sycophantically
+sycophant/SYM
+Sydelle/M
+Sydel/M
+Syd/M
+Sydney/M
+Sykes/M
+Sylas/M
+syllabicate/GNDSX
+syllabication/M
+syllabicity
+syllabic/S
+syllabification/M
+syllabify/GSDXN
+syllabi's
+syllable/SDMG
+syllabub/M
+syllabus/MS
+syllabusss
+syllogism/MS
+syllogistic
+Sylow/M
+sylphic
+sylphlike
+sylph/M
+sylphs
+Sylvania/M
+Sylvan/M
+sylvan/S
+Sylvester/M
+Sylvia/M
+Sylvie/M
+Syman/M
+symbiont/M
+symbioses
+symbiosis/M
+symbiotic
+symbol/GMDS
+symbolical/Y
+symbolics/M
+symbolic/SM
+symbolism/MS
+symbolist/MS
+symbolization/MAS
+symbolized/U
+symbolize/GZRSD
+symbolizes/A
+Symington/M
+symmetric
+symmetrically/U
+symmetricalness/M
+symmetrical/PY
+symmetrization/M
+symmetrizing
+symmetry/MS
+Symon/M
+sympathetically/U
+sympathetic/S
+sympathized/U
+sympathizer/M
+sympathize/SRDJGZ
+sympathizing/MYUS
+sympathy/MS
+symphonic
+symphonists
+symphony/MS
+symposium/MS
+symptomatic
+symptomatically
+symptomatology/M
+symptom/MS
+syn
+synagogal
+synagogue/SM
+synapse/SDGM
+synaptic
+synchronism/M
+synchronization's
+synchronization/SA
+synchronize/AGCDS
+synchronized/U
+synchronizer/MS
+synchronousness/M
+synchronous/YP
+synchrony
+synchrotron/M
+syncopate/VNGXSD
+syncopation/M
+syncope/MS
+sync/SGD
+syndicalist
+syndicate/XSDGNM
+syndic/SM
+syndrome/SM
+synergism/SM
+synergistic
+synergy/MS
+synfuel/S
+Synge/M
+synod/SM
+synonymic
+synonymous/Y
+synonym/SM
+synonymy/MS
+synopses
+synopsis/M
+synopsized
+synopsizes
+synopsizing
+synoptic/S
+syntactical/Y
+syntactics/M
+syntactic/SY
+syntax/MS
+syntheses
+synthesis/M
+synthesized/U
+synthesize/GZSRD
+synthesizer/M
+synthesizes/A
+synthetically
+synthetic/S
+syphilis/MS
+syphilitic/S
+syphilized
+syphilizing
+Syracuse/M
+Syriac/M
+Syria/M
+Syrian/SM
+syringe/GMSD
+syrup/DMSG
+syrupy
+sys
+systematical/Y
+systematics/M
+systematic/SP
+systematization/SM
+systematized/U
+systematizer/M
+systematize/ZDRSG
+systematizing/U
+systemically
+systemic/S
+systemization/SM
+system/MS
+systole/MS
+systolic
+Szilard/M
+Szymborska/M
+TA
+Tabasco/MS
+Tabatha/M
+Tabbatha/M
+tabbed
+Tabbie/M
+Tabbi/M
+tabbing
+Tabbitha/M
+Tabb/M
+tabbouleh
+tabboulehs
+tabby/GSD
+Tabby/M
+Taber/M
+Tabernacle/S
+tabernacle/SDGM
+Tabina/M
+Tabitha/M
+tabla/MS
+tableau/M
+tableaux
+tablecloth/M
+tablecloths
+table/GMSD
+tableland/SM
+tablespoonful/MS
+tablespoon/SM
+tablet/MDGS
+tabletop/MS
+tableware/SM
+tabling/M
+tabloid/MS
+Tab/MR
+taboo/GSMD
+Tabor/M
+tabor/MDGS
+Tabriz/SM
+tab/SM
+tabula
+tabular/Y
+tabulate/XNGDS
+tabulation/M
+tabulator/MS
+tachometer/SM
+tachometry
+tachycardia/MS
+tachyon/SM
+tacitness/MS
+taciturnity/MS
+taciturn/Y
+Tacitus/M
+tacit/YP
+tacker/M
+tack/GZRDMS
+tackiness/MS
+tackler/M
+tackle/RSDMZG
+tackling/M
+tacky/RSTP
+Tacoma/M
+taco/MS
+tact/FSM
+tactfulness/S
+tactful/YP
+tactical/Y
+tactician/MS
+tactic/SM
+tactile/Y
+tactility/S
+tactlessness/SM
+tactless/PY
+tactual/Y
+Taddeo/M
+Taddeusz/M
+Tadd/M
+Tadeas/M
+Tadeo/M
+Tades
+Tadio/M
+Tad/M
+tadpole/MS
+tad/SM
+Tadzhikistan's
+Tadzhikstan/M
+Taegu/M
+Taejon/M
+taffeta/MS
+taffrail/SM
+Taffy/M
+taffy/SM
+Taft/M
+Tagalog/SM
+tagged/U
+tagger/S
+tagging
+Tagore/M
+tag/SM
+Tagus/M
+Tahitian/S
+Tahiti/M
+Tahoe/M
+Taichung/M
+taiga/MS
+tailback/MS
+tail/CMRDGAS
+tailcoat/S
+tailer/AM
+tailgate/MGRSD
+tailgater/M
+tailing/MS
+taillessness/M
+tailless/P
+taillight/MS
+tailor/DMJSGB
+Tailor/M
+tailpipe/SM
+tailspin/MS
+tailwind/SM
+Tainan/M
+Taine/M
+taint/DGS
+tainted/U
+Taipei/M
+Taite/M
+Tait/M
+Taiwanese
+Taiwan/M
+Taiyuan/M
+Tajikistan
+takeaway/S
+taken/A
+takeoff/SM
+takeout/S
+takeover/SM
+taker/M
+take/RSHZGJ
+takes/IA
+taking/IA
+Taklamakan/M
+Talbert/M
+Talbot/M
+talcked
+talcking
+talc/SM
+talcum/S
+talebearer/SM
+talented/M
+talentless
+talent/SMD
+taler/M
+tale/RSMN
+tali
+Talia/M
+Taliesin/M
+talion/M
+talismanic
+talisman/SM
+talkativeness/MS
+talkative/YP
+talker/M
+talk/GZSRD
+talkie/M
+talky/RST
+Talladega/M
+Tallahassee/M
+Tallahatchie/M
+Tallahoosa/M
+tallboy/MS
+Tallchief/M
+Talley/M
+Talleyrand/M
+Tallia/M
+Tallie/M
+Tallinn/M
+tallish
+tallness/MS
+Tallou/M
+tallow/DMSG
+tallowy
+tall/TPR
+Tallulah/M
+tally/GRSDZ
+tallyho/DMSG
+Tally/M
+Talmudic
+Talmudist/MS
+Talmud/MS
+talon/SMD
+talus/MS
+Talyah/M
+Talya/M
+Ta/M
+tamable/M
+tamale/SM
+tamarack/SM
+Tamarah/M
+Tamara/M
+tamarind/MS
+Tamar/M
+Tamarra/M
+Tamas
+tambourine/MS
+tamed/U
+Tameka/M
+tameness/S
+Tamera/M
+Tamerlane/M
+tame/SYP
+Tamika/M
+Tamiko/M
+Tamil/MS
+Tami/M
+Tam/M
+Tamma/M
+Tammany/M
+Tammara/M
+tam/MDRSTZGB
+Tammie/M
+Tammi/M
+Tammy/M
+Tampa/M
+Tampax/M
+tampered/U
+tamperer/M
+tamper/ZGRD
+tampon/DMSG
+tamp/SGZRD
+Tamqrah/M
+Tamra/M
+tanager/MS
+Tanaka/M
+Tana/M
+Tananarive/M
+tanbark/SM
+Tancred/M
+tandem/SM
+Tandie/M
+Tandi/M
+tandoori/S
+Tandy/M
+Taney/M
+T'ang
+Tanganyika/M
+tangelo/SM
+tangency/M
+tangential/Y
+tangent/SM
+tangerine/MS
+tang/GSYDM
+tangibility/MIS
+tangible/IPS
+tangibleness's/I
+tangibleness/SM
+tangibly/I
+Tangier/M
+tangle's
+tangle/UDSG
+tango/MDSG
+Tangshan/M
+tangy/RST
+Tanhya/M
+Tania/M
+Tani/M
+Tanisha/M
+Tanitansy/M
+tankard/MS
+tanker/M
+tankful/MS
+tank/GZSRDM
+Tan/M
+tan/MS
+tanned/U
+Tannenbaum/M
+Tanner/M
+tanner/SM
+tannery/MS
+tannest
+Tanney/M
+Tannhäuser/M
+Tannie/M
+tanning/SM
+tannin/SM
+Tann/RM
+Tanny/M
+Tansy/M
+tansy/SM
+tantalization/SM
+tantalized/U
+tantalize/GZSRD
+tantalizingly/S
+tantalizingness/S
+tantalizing/YP
+tantalum/MS
+Tantalus/M
+tantamount
+tantra/S
+tantrum/SM
+Tanya/M
+Tanzania/M
+Tanzanian/S
+taoism
+Taoism/MS
+Taoist/MS
+taoist/S
+Tao/M
+tao/S
+Tapdance/M
+taped/U
+tapeline/S
+taperer/M
+taper/GRD
+tape/SM
+tapestry/GMSD
+tapeworm/MS
+tapioca/MS
+tapir/MS
+tap/MSDRJZG
+tapped/U
+tapper/MS
+tappet/MS
+tapping/M
+taproom/MS
+taproot/SM
+taps/M
+Tarah/M
+Tara/M
+tarantella/MS
+tarantula/MS
+Tarawa/M
+Tarazed/M
+Tarbell/M
+tardily
+tardiness/S
+tardy/TPRS
+tare/MS
+target/GSMD
+tar/GSMD
+tariff/DMSG
+Tarim/M
+Tarkington/M
+tarmacked
+tarmacking
+tarmac/S
+tarnished/U
+tarnish/GDS
+tarn/MS
+taro/MS
+tarot/MS
+tarpapered
+tarpaulin/MS
+tarp/MS
+tarpon/MS
+tarragon/SM
+Tarrah/M
+Tarra/M
+Tarrance/M
+tarred/M
+tarring/M
+tarry/TGRSD
+Tarrytown/M
+tarsal/S
+tarsi
+tarsus/M
+tartan/MS
+tartaric
+Tartar's
+tartar/SM
+Tartary/M
+tartness/MS
+tart/PMYRDGTS
+Tartuffe/M
+Taryn/M
+Tarzan/M
+Tasha/M
+Tashkent/M
+Tasia/M
+task/GSDM
+taskmaster/SM
+taskmistress/MS
+Tasmania/M
+Tasmanian/S
+tassellings
+tassel/MDGS
+Tass/M
+tasted/EU
+tastefulness/SME
+tasteful/PEY
+taste/GZMJSRD
+tastelessness/SM
+tasteless/YP
+taster/M
+taste's/E
+tastes/E
+tastily
+tastiness/MS
+tasting/E
+tasty/RTP
+tatami/MS
+Tatar/SM
+Tate/M
+tater/M
+Tatiana/M
+Tatiania/M
+tat/SRZ
+tatted
+tatterdemalion/SM
+tattered/M
+tatter/GDS
+tatting/SM
+tattler/M
+tattle/RSDZG
+tattletale/SM
+tattooer/M
+tattooist/MS
+tattoo/ZRDMGS
+tatty/R
+Tatum/M
+taught/AU
+taunter/M
+taunting/Y
+taunt/ZGRDS
+taupe/SM
+Taurus/SM
+tau/SM
+tauten/GD
+tautness/S
+tautological/Y
+tautologous
+tautology/SM
+taut/PGTXYRDNS
+taverner/M
+tavern/RMS
+tawdrily
+tawdriness/SM
+tawdry/SRTP
+Tawney/M
+Tawnya/M
+tawny/RSMPT
+Tawsha/M
+taxable/S
+taxably
+taxation/MS
+taxed/U
+taxicab/MS
+taxidermist/SM
+taxidermy/MS
+taxi/MDGS
+taximeter/SM
+taxing/Y
+taxiway/MS
+taxonomic
+taxonomically
+taxonomist/SM
+taxonomy/SM
+taxpayer/MS
+taxpaying/M
+tax/ZGJMDRSB
+Taylor/SM
+Tb
+TB
+TBA
+Tbilisi/M
+tbs
+tbsp
+Tchaikovsky/M
+Tc/M
+TCP
+TD
+TDD
+Te
+teabag/S
+teacake/MS
+teacart/M
+teachable/P
+teach/AGS
+teacher/MS
+teaching/SM
+teacloth
+teacupful/MS
+teacup/MS
+Teador/M
+teahouse/SM
+teakettle/SM
+teak/SM
+teakwood/M
+tealeaves
+teal/MS
+tea/MDGS
+teammate/MS
+team/MRDGS
+teamster/MS
+teamwork/SM
+teapot/MS
+tearaway
+teardrop/MS
+tearer/M
+tearfulness/M
+tearful/YP
+teargas/S
+teargassed
+teargassing
+tearjerker/S
+tearoom/MS
+tear/RDMSG
+teary/RT
+Teasdale/M
+tease/KS
+teasel/DGSM
+teaser/M
+teashop/SM
+teasing/Y
+teaspoonful/MS
+teaspoon/MS
+teas/SRDGZ
+teatime/MS
+teat/MDS
+tech/D
+technetium/SM
+technicality/MS
+technicalness/M
+technical/YSP
+technician/MS
+Technicolor/MS
+Technion/M
+technique/SM
+technocracy/MS
+technocratic
+technocrat/S
+technological/Y
+technologist/MS
+technology/MS
+technophobia
+technophobic
+techs
+tectonically
+tectonic/S
+tectonics/M
+Tecumseh/M
+Tedda/M
+Teddie/M
+Teddi/M
+Tedd/M
+Teddy/M
+teddy/SM
+Tedie/M
+Tedi/M
+tediousness/SM
+tedious/YP
+tedium/MS
+Ted/M
+Tedman/M
+Tedmund/M
+Tedra/M
+tee/DRSMH
+teeing
+teem/GSD
+teemingness/M
+teeming/PY
+teenager/M
+teenage/RZ
+Teena/M
+teen/SR
+teenybopper/SM
+teeny/RT
+teepee's
+teeshirt/S
+teeter/GDS
+teethe
+teether/M
+teething/M
+teethmarks
+teeth/RSDJMG
+teetotaler/M
+teetotalism/MS
+teetotal/SRDGZ
+TEFL
+Teflon/MS
+Tegucigalpa/M
+Teheran's
+Tehran
+TEirtza/M
+tektite/SM
+Tektronix/M
+telecast/SRGZ
+telecommunicate/NX
+telecommunication/M
+telecommute/SRDZGJ
+telecoms
+teleconference/GMJSD
+Teledyne/M
+Telefunken/M
+telegenic
+telegrammed
+telegramming
+telegram/MS
+telegraphic
+telegraphically
+telegraphist/MS
+telegraph/MRDGZ
+telegraphs
+telegraphy/MS
+telekineses
+telekinesis/M
+telekinetic
+Telemachus/M
+Telemann/M
+telemarketer/S
+telemarketing/S
+telemeter/DMSG
+telemetric
+telemetry/MS
+teleological/Y
+teleology/M
+telepathic
+telepathically
+telepathy/SM
+telephone/SRDGMZ
+telephonic
+telephonist/SM
+telephony/MS
+telephotography/MS
+telephoto/S
+teleprinter/MS
+teleprocessing/S
+teleprompter
+TelePrompter/M
+TelePrompTer/S
+telescope/GSDM
+telescopic
+telescopically
+teletext/S
+telethon/MS
+teletype/SM
+Teletype/SM
+teletypewriter/SM
+televangelism/S
+televangelist/S
+televise/SDXNG
+television/M
+televisor/MS
+televisual
+telex/GSDM
+Telex/M
+tell/AGS
+Teller/M
+teller/SDMG
+telling/YS
+Tell/MR
+telltale/MS
+tellurium/SM
+telly/SM
+Telnet/M
+TELNET/M
+telnet/S
+telomeric
+tel/SY
+Telugu/M
+temblor/SM
+temerity/MS
+Tempe/M
+temperamental/Y
+temperament/SM
+temperance/IMS
+tempera/SLM
+temperately/I
+temperateness's/I
+temperateness/SM
+temperate/SDGPY
+temperature/MS
+tempered/UE
+temper/GRDM
+tempering/E
+temper's/E
+tempers/E
+tempest/DMSG
+tempestuousness/SM
+tempestuous/PY
+template/FS
+template's
+Temple/M
+Templeman/M
+temple/SDM
+Templeton/M
+Temp/M
+tempoes
+tempo/MS
+temporal/YS
+temporarily
+temporarinesses
+temporariness/FM
+temporary/SFP
+temporize/GJZRSD
+temporizer/M
+temporizings/U
+temporizing/YM
+temp/SGZTMRD
+temptation/MS
+tempted
+tempter/S
+tempt/FS
+tempting/YS
+temptress/MS
+tempura/SM
+tenabilities
+tenability/UM
+tenableness/M
+tenable/P
+tenably
+tenaciousness/S
+tenacious/YP
+tenacity/S
+tenancy/MS
+tenanted/U
+tenant/MDSG
+tenantry/MS
+tench/M
+tended/UE
+tendency/MS
+tendentiousness/SM
+tendentious/PY
+tendered
+tenderer
+tenderest
+tenderfoot/MS
+tender/FS
+tenderheartedness/MS
+tenderhearted/YP
+tendering
+tenderizer/M
+tenderize/SRDGZ
+tenderloin/SM
+tenderly
+tenderness/SM
+tending/E
+tendinitis/S
+tend/ISFRDG
+tendon/MS
+tendril/SM
+tends/E
+tenebrous
+tenement/MS
+tenet/SM
+Tenex/M
+TENEX/M
+tenfold/S
+ten/MHB
+Tenneco/M
+tenner
+Tennessean/S
+Tennessee/M
+Tenney/M
+tennis/SM
+Tenn/M
+Tennyson/M
+Tenochtitlan/M
+tenon/GSMD
+tenor/MS
+tenpin/SM
+tense/IPYTNVR
+tenseness's/I
+tenseness/SM
+tensile
+tensional/I
+tension/GMRDS
+tensionless
+tensions/E
+tension's/I
+tensity/IMS
+tensorial
+tensor/MS
+tenspot
+tens/SRDVGT
+tentacle/MSD
+tentativeness/S
+tentative/SPY
+tented/UF
+tenterhook/MS
+tenter/M
+tent/FSIM
+tenths
+tenth/SY
+tenting/F
+tenuity/S
+tenuousness/SM
+tenuous/YP
+tenure/SDM
+Teodoor/M
+Teodora/M
+Teodorico/M
+Teodor/M
+Teodoro/M
+tepee/MS
+tepidity/S
+tepidness/S
+tepid/YP
+tequila/SM
+Tera/M
+teratogenic
+teratology/MS
+terbium/SM
+tercel/M
+tercentenary/S
+tercentennial/S
+Terence/M
+Terencio/M
+Teresa/M
+Terese/M
+Tereshkova/M
+Teresina/M
+Teresita/M
+Teressa/M
+Teriann/M
+Teri/M
+Terkel/M
+termagant/SM
+termcap
+termer/M
+terminable/CPI
+terminableness/IMC
+terminal/SYM
+terminate/CXNV
+terminated/U
+terminates
+terminating
+termination/MC
+terminative/YC
+terminator/SM
+termini
+terminological/Y
+terminology/MS
+terminus/M
+termite/SM
+term/MYRDGS
+ternary/S
+tern/GIDS
+tern's
+terpsichorean
+Terpsichore/M
+terrace/MGSD
+terracing/M
+terracotta
+terrain/MS
+Terra/M
+terramycin
+Terrance/M
+Terran/M
+terrapin/MS
+terrarium/MS
+terrazzo/SM
+Terrell/M
+Terrel/M
+Terre/M
+Terrence/M
+terrestrial/YMS
+terribleness/SM
+terrible/P
+terribly
+Terrie/M
+terrier/M
+terrifically
+terrific/Y
+terrify/GDS
+terrifying/Y
+Terrijo/M
+Terrill/M
+Terri/M
+terrine/M
+territoriality/M
+Territorial/SM
+territorial/SY
+Territory's
+territory/SM
+terrorism/MS
+terroristic
+terrorist/MS
+terrorized/U
+terrorizer/M
+terrorize/RSDZG
+terror/MS
+terr/S
+terrycloth
+Terrye/M
+Terry/M
+terry/ZMRS
+terseness/SM
+terse/RTYP
+Tersina/M
+tertian
+Tertiary
+tertiary/S
+Terza/M
+TESL
+Tesla/M
+TESOL
+Tessa/M
+tessellate/XDSNG
+tessellation/M
+tesseral
+Tessie/M
+Tessi/M
+Tess/M
+Tessy/M
+testability/M
+testable/U
+testamentary
+testament/SM
+testate/IS
+testator/MS
+testatrices
+testatrix
+testbed/S
+testcard
+tested/AKU
+tester/MFCKS
+testes/M
+testicle/SM
+testicular
+testifier/M
+testify/GZDRS
+testily
+testimonial/SM
+testimony/SM
+testiness/S
+testing/S
+testis/M
+testosterone/SM
+test/RDBFZGSC
+tests/AK
+test's/AKF
+testy/RTP
+tetanus/MS
+tetchy/TR
+tether/DMSG
+tethered/U
+Tethys/M
+Tetons
+tetrachloride/M
+tetracycline/SM
+tetrafluoride
+tetragonal/Y
+tetrahalides
+tetrahedral/Y
+tetrahedron/SM
+tetrameron
+tetrameter/SM
+tetra/MS
+tetrasodium
+tetravalent
+Teutonic
+Teuton/SM
+Texaco/M
+Texan/S
+Texas/MS
+Tex/M
+TeX/M
+textbook/SM
+text/FSM
+textile/SM
+Textron/M
+textual/FY
+textural/Y
+textured/U
+texture/MGSD
+T/G
+Thacher/M
+Thackeray/M
+Thaddeus/M
+Thaddus/M
+Thadeus/M
+Thad/M
+Thailand/M
+Thaine/M
+Thain/M
+Thai/S
+thalami
+thalamus/M
+Thales/M
+Thalia/M
+thalidomide/MS
+thallium/SM
+thallophyte/M
+Thames
+than
+Thane/M
+thane/SM
+Thanh/M
+thanker/M
+thankfuller
+thankfullest
+thankfulness/SM
+thankful/YP
+thanklessness/SM
+thankless/PY
+thanksgiving/MS
+Thanksgiving/S
+thank/SRDG
+Thant/M
+Thar/M
+Thatcher/M
+thatching/M
+thatch/JMDRSZG
+Thatch/MR
+that'd
+that'll
+that/MS
+thaumaturge/M
+thaw/DGS
+Thaxter/M
+Thayer/M
+Thayne/M
+THC
+the
+Theadora/M
+Thea/M
+theatergoer/MS
+theatergoing/MS
+theater/SM
+theatricality/SM
+theatrical/YS
+theatric/S
+theatrics/M
+Thebault/M
+Thebes
+Theda/M
+Thedrick/M
+Thedric/M
+thee/DS
+theeing
+theft/MS
+Theiler/M
+their/MS
+theism/SM
+theistic
+theist/SM
+Thekla/M
+Thelma/M
+themas
+thematically
+thematics
+thematic/U
+theme/MS
+them/GD
+Themistocles/M
+themselves
+thence
+thenceforth
+thenceforward/S
+Theobald/M
+theocracy/SM
+theocratic
+Theocritus/M
+theodolite/MS
+Theodora/M
+Theodore/M
+Theodoric/M
+Theodor/M
+Theodosia/M
+Theodosian
+Theodosius/M
+theologian/SM
+theological/Y
+theologists
+theology/MS
+Theo/M
+theorem/MS
+theoretical/Y
+theoretician/MS
+theoretic/S
+theoretics/M
+theorist/SM
+theorization/SM
+theorize/ZGDRS
+theory/MS
+theosophic
+theosophical
+theosophist/MS
+Theosophy
+theosophy/SM
+therapeutically
+therapeutic/S
+therapeutics/M
+therapist/MS
+therapy/MS
+Theravada/M
+thereabout/S
+thereafter
+thereat
+thereby
+there'd
+therefor
+therefore
+therefrom
+therein
+there'll
+there/MS
+thereof
+thereon
+Theresa/M
+Therese/M
+Theresina/M
+Theresita/M
+Theressa/M
+thereto
+theretofore
+thereunder
+thereunto
+thereupon
+therewith
+Therine/M
+thermal/YS
+thermionic/S
+thermionics/M
+thermistor/MS
+therm/MS
+thermocouple/MS
+thermodynamical/Y
+thermodynamic/S
+thermodynamics/M
+thermoelastic
+thermoelectric
+thermoformed
+thermoforming
+thermogravimetric
+thermoluminescence/M
+thermometer/MS
+thermometric
+thermometry/M
+thermonuclear
+thermopile/M
+thermoplastic/S
+thermopower
+thermo/S
+thermosetting
+thermos/S
+Thermos/SM
+thermostable
+thermostatically
+thermostatic/S
+thermostatics/M
+thermostat/SM
+thermostatted
+thermostatting
+Theron/M
+thesauri
+thesaurus/MS
+these/S
+Theseus/M
+thesis/M
+thespian/S
+Thespian/S
+Thespis/M
+Thessalonian
+Thessaloníki/M
+Thessaly/M
+theta/MS
+thew/SM
+they
+they'd
+they'll
+they're
+they've
+th/GNJX
+Thia/M
+thiamine/MS
+Thibaud/M
+Thibaut/M
+thickener/M
+thickening/M
+thicken/RDJZG
+thicket/SMD
+thickheaded/M
+thickish
+thickness/MS
+thickset/S
+thick/TXPSRNY
+thief/M
+Thiensville/M
+Thieu/M
+thievery/MS
+thieve/SDJG
+thievishness/M
+thievish/P
+thighbone/SM
+thigh/DM
+thighs
+thimble/DSMG
+thimbleful/MS
+Thimbu/M
+Thimphu
+thine
+thingamabob/MS
+thingamajig/SM
+thing/MP
+thinkableness/M
+thinkable/U
+thinkably/U
+think/AGRS
+thinker/MS
+thinkingly/U
+thinking/SMYP
+thinned
+thinner/MS
+thinness/MS
+thinnest
+thinning
+thinnish
+thin/STPYR
+thiocyanate/M
+thiouracil/M
+third/DYGS
+thirster/M
+thirst/GSMDR
+thirstily
+thirstiness/S
+thirsty/TPR
+thirteen/MHS
+thirteenths
+thirtieths
+thirty/HMS
+this
+this'll
+thistledown/MS
+thistle/SM
+thither
+Th/M
+tho
+thole/GMSD
+Thomasa/M
+Thomasina/M
+Thomasine/M
+Thomasin/M
+Thoma/SM
+Thomism/M
+Thomistic
+Thom/M
+Thompson/M
+Thomson/M
+thong/SMD
+thoracic
+thorax/MS
+Thorazine
+Thoreau/M
+thoriate/D
+Thorin/M
+thorium/MS
+Thor/M
+Thornburg/M
+Thorndike/M
+Thornie/M
+thorniness/S
+Thorn/M
+thorn/SMDG
+Thornton/M
+Thorny/M
+thorny/PTR
+thoroughbred/S
+thoroughfare/MS
+thoroughgoing
+thoroughness/SM
+thorough/PTYR
+Thorpe/M
+Thorstein/M
+Thorsten/M
+Thorvald/M
+those
+Thoth/M
+thou/DSG
+though
+thoughtfully
+thoughtfulness/S
+thoughtful/U
+thoughtlessness/MS
+thoughtless/YP
+thought/MS
+thousandfold
+thousand/SHM
+thousandths
+Thrace/M
+Thracian/M
+thralldom/S
+thrall/GSMD
+thrash/DSRZGJ
+thrasher/M
+thrashing/M
+threadbare/P
+threader/M
+threading/A
+threadlike
+thread/MZDRGS
+thready/RT
+threatener/M
+threaten/GJRD
+threatening/Y
+threat/MDNSXG
+threefold
+three/MS
+threepence/M
+threepenny
+threescore/S
+threesome/SM
+threnody/SM
+thresh/DSRZG
+thresher/M
+threshold/MDGS
+threw
+thrice
+thriftily
+thriftiness/S
+thriftless
+thrift/SM
+thrifty/PTR
+thriller/M
+thrilling/Y
+thrill/ZMGDRS
+thriver/M
+thrive/RSDJG
+thriving/Y
+throatily
+throatiness/MS
+throat/MDSG
+throaty/PRT
+throbbed
+throbbing
+throb/S
+throeing
+throe/SDM
+thrombi
+thromboses
+thrombosis/M
+thrombotic
+thrombus/M
+Throneberry/M
+throne/CGSD
+throne's
+throng/GDSM
+throttle/DRSZMG
+throttler/M
+throughout
+throughput/SM
+throughway's
+through/Y
+throwaway/SM
+throwback/MS
+thrower/M
+thrown
+throwout
+throw/SZGR
+thrummed
+thrumming
+thrum/S
+thrush/MS
+thruster/M
+thrust/ZGSR
+Thruway/MS
+thruway/SM
+Thunderbird/M
+Thu
+Thucydides/M
+thudded
+thudding
+thud/MS
+thuggee/M
+thuggery/SM
+thuggish
+thug/MS
+Thule/M
+thulium/SM
+thumbnail/MS
+thumbscrew/SM
+thumb/SMDG
+thumbtack/GMDS
+thump/RDMSG
+thunderbolt/MS
+thunderclap/SM
+thundercloud/SM
+thunderer/M
+thunderhead/SM
+thundering/Y
+thunderous/Y
+thundershower/MS
+thunderstorm/MS
+thunderstruck
+thundery
+thunder/ZGJDRMS
+thunk
+Thurber/M
+Thurman/M
+Thur/MS
+Thursday/SM
+Thurstan/M
+Thurston/M
+thus/Y
+thwack/DRSZG
+thwacker/M
+thwarter/M
+thwart/GSDRY
+thy
+thyme/SM
+thymine/MS
+thymus/SM
+thyratron/M
+thyristor/MS
+thyroglobulin
+thyroidal
+thyroid/S
+thyronine
+thyrotoxic
+thyrotrophic
+thyrotrophin
+thyrotropic
+thyrotropin/M
+thyroxine/M
+thyself
+Tia/M
+Tianjin
+tiara/MS
+Tiberius/M
+Tiber/M
+Tibetan/S
+Tibet/M
+tibiae
+tibial
+tibia/M
+Tibold/M
+Tiburon/M
+ticker/M
+ticket/SGMD
+tick/GZJRDMS
+ticking/M
+tickler/M
+tickle/RSDZG
+ticklishness/MS
+ticklish/PY
+ticktacktoe/S
+ticktock/SMDG
+tic/MS
+Ticonderoga/M
+tidal/Y
+tidbit/MS
+tiddlywinks/M
+tide/GJDS
+tideland/MS
+tidewater/SM
+tideway/SM
+tidily/U
+tidiness/USM
+tidying/M
+tidy/UGDSRPT
+tie/AUDS
+tieback/MS
+Tiebold/M
+Tiebout/M
+tiebreaker/SM
+Tieck/M
+Tiena/M
+Tienanmen/M
+Tientsin's
+tier/DGM
+Tierney/M
+Tiertza/M
+Tiffanie/M
+Tiffani/M
+tiffany/M
+Tiffany/M
+tiff/GDMS
+Tiffie/M
+Tiffi/M
+Tiff/M
+Tiffy/M
+tigerish
+tiger/SM
+tightener/M
+tighten/JZGDR
+tightfisted
+tightness/MS
+tightrope/SM
+tight/STXPRNY
+tightwad/MS
+tigress/SM
+Tigris/M
+Tijuana/M
+tike's
+Tilda/M
+tilde/MS
+Tildie/M
+Tildi/M
+Tildy/M
+tile/DRSJMZG
+tiled/UE
+Tiler/M
+tiles/U
+tiling/M
+tillable
+tillage/SM
+till/EGSZDR
+tiller/GDM
+tiller's/E
+Tillich/M
+Tillie/M
+Tillman/M
+Tilly/M
+tilth/M
+tilt/RDSGZ
+Ti/M
+timber/DMSG
+timbering/M
+timberland/SM
+timberline/S
+timbrel/SM
+timbre/MS
+Timbuktu/M
+ti/MDRZ
+timebase
+time/DRSJMYZG
+timekeeper/MS
+timekeeping/SM
+timelessness/S
+timeless/PY
+timeliness/SMU
+timely/UTRP
+timeout/S
+timepiece/MS
+timer/M
+timescale/S
+timeserver/MS
+timeserving/S
+timeshare/SDG
+timespan
+timestamped
+timestamps
+timetable/GMSD
+timeworn
+Timex/M
+timezone/S
+timidity/SM
+timidness/MS
+timid/RYTP
+Timi/M
+timing/M
+Timmie/M
+Timmi/M
+Tim/MS
+Timmy/M
+Timofei/M
+Timon/M
+timorousness/MS
+timorous/YP
+Timoteo/M
+Timothea/M
+Timothee/M
+Timotheus/M
+Timothy/M
+timothy/MS
+timpani
+timpanist/S
+Timur/M
+Tina/M
+tincture/SDMG
+tinderbox/MS
+tinder/MS
+Tine/M
+tine/SM
+tinfoil/MS
+tingeing
+tinge/S
+ting/GYDM
+tingle/SDG
+tingling/Y
+tingly/TR
+Ting/M
+tinily
+tininess/MS
+tinker/SRDMZG
+Tinkertoy
+tinkle/SDG
+tinkling/M
+tinkly
+tin/MDGS
+tinned
+tinner/M
+tinnily
+tinniness/SM
+tinning/M
+tinnitus/MS
+tinny/RSTP
+tinplate/S
+tinsel/GMDYS
+Tinseltown/M
+tinsmith/M
+tinsmiths
+tinter/M
+tintinnabulation/MS
+Tintoretto/M
+tint/SGMRDB
+tintype/SM
+tinware/MS
+tiny/RPT
+Tioga/M
+Tiphanie/M
+Tiphani/M
+Tiphany/M
+tipi's
+tip/MS
+tipoff
+Tippecanoe/M
+tipped
+Tipperary/M
+tipper/MS
+tippet/MS
+tipping
+tippler/M
+tipple/ZGRSD
+tippy/R
+tipsily
+tipsiness/SM
+tipster/SM
+tipsy/TPR
+tiptoeing
+tiptoe/SD
+tiptop/S
+tirade/SM
+Tirana's
+Tirane
+tired/AYP
+tireder
+tiredest
+tiredness/S
+tirelessness/SM
+tireless/PY
+tire/MGDSJ
+tires/A
+Tiresias/M
+tiresomeness/S
+tiresome/PY
+tiring/AU
+Tirolean/S
+Tirol/M
+tiro's
+Tirrell/M
+tis
+Tisha/M
+Tish/M
+tissue/MGSD
+titanate/M
+Titania/M
+titanic
+titanically
+Titanic/M
+titanium/SM
+titan/SM
+Titan/SM
+titbit's
+titer/M
+tither/M
+tithe/SRDGZM
+tithing/M
+Titian/M
+titian/S
+Titicaca/M
+titillate/XSDVNG
+titillating/Y
+titillation/M
+titivate/NGDSX
+titivation/M
+titled/AU
+title/GMSRD
+titleholder/SM
+titling/A
+titmice
+titmouse/M
+tit/MRZS
+Tito/SM
+titrate/SDGN
+titration/M
+titted
+titter/GDS
+titting
+tittle/SDMG
+titular/SY
+Titus/M
+tizzy/SM
+TKO
+Tlaloc/M
+TLC
+Tlingit/M
+Tl/M
+TM
+Tm/M
+tn
+TN
+tnpk
+TNT
+toad/SM
+toadstool/SM
+toady/GSDM
+toadyism/M
+toaster/M
+toastmaster/MS
+toastmistress/S
+toast/SZGRDM
+toasty/TRS
+tobacconist/SM
+tobacco/SM
+tobaggon/SM
+Tobago/M
+Tobe/M
+Tobey/M
+Tobiah/M
+Tobias/M
+Tobie/M
+Tobi/M
+Tobin/M
+Tobit/M
+toboggan/MRDSZG
+Tobye/M
+Toby/M
+Tocantins/M
+toccata/M
+Tocqueville
+tocsin/MS
+to/D
+today'll
+today/SM
+Toddie/M
+toddler/M
+toddle/ZGSRD
+Todd/M
+Toddy/M
+toddy/SM
+Tod/M
+toecap/SM
+toeclip/S
+TOEFL
+toehold/MS
+toeing
+toe/MS
+toenail/DMGS
+toffee/SM
+tofu/S
+toga/SMD
+toge
+togetherness/MS
+together/P
+togged
+togging
+toggle/SDMG
+Togolese/M
+Togo/M
+tog/SMG
+Toiboid/M
+toilet/GMDS
+toiletry/MS
+toilette/SM
+toil/SGZMRD
+toilsomeness/M
+toilsome/PY
+Toinette/M
+Tojo/M
+tokamak
+Tokay/M
+toke/GDS
+tokenism/SM
+tokenized
+token/SMDG
+Tokugawa/M
+Tokyoite/MS
+Tokyo/M
+Toland/M
+told/AU
+Toledo/SM
+tole/MGDS
+tolerability/IM
+tolerable/I
+tolerably/I
+tolerance/SIM
+tolerant/IY
+tolerate/XVNGSD
+toleration/M
+Tolkien
+tollbooth/M
+tollbooths
+toll/DGS
+Tolley/M
+tollgate/MS
+tollhouse/M
+tollway/S
+Tolstoy/M
+toluene/MS
+Tolyatti/M
+tomahawk/SGMD
+Tomasina/M
+Tomasine/M
+Toma/SM
+Tomaso/M
+tomatoes
+tomato/M
+Tombaugh/M
+tomb/GSDM
+Tombigbee/M
+tomblike
+tombola/M
+tomboyish
+tomboy/MS
+tombstone/MS
+tomcat/SM
+tomcatted
+tomcatting
+Tome/M
+tome/SM
+tomfoolery/MS
+tomfool/M
+Tomi/M
+Tomkin/M
+Tomlin/M
+Tom/M
+tommed
+Tommie/M
+Tommi/M
+tomming
+tommy/M
+Tommy/M
+tomographic
+tomography/MS
+tomorrow/MS
+Tompkins/M
+Tomsk/M
+tom/SM
+tomtit/SM
+tonality/MS
+tonal/Y
+tonearm/S
+tone/ISRDZG
+tonelessness/M
+toneless/YP
+toner/IM
+tone's
+Tonga/M
+Tongan/SM
+tong/GRDS
+tongueless
+tongue/SDMG
+tonguing/M
+Tonia/M
+tonic/SM
+Tonie/M
+tonight/MS
+Toni/M
+Tonio/M
+tonk/MS
+tonnage/SM
+tonne/MS
+Tonnie/M
+tonsillectomy/MS
+tonsillitis/SM
+tonsil/SM
+ton/SKM
+tonsorial
+tonsure/SDGM
+Tonto/M
+Tonya/M
+Tonye/M
+Tony/M
+tony/RT
+toodle
+too/H
+took/A
+tool/AGDS
+toolbox/SM
+tooler/SM
+tooling/M
+toolkit/SM
+toolmaker/M
+toolmake/ZRG
+toolmaking/M
+tool's
+toolsmith
+Toomey/M
+tooter/M
+toot/GRDZS
+toothache/SM
+toothbrush/MSG
+tooth/DMG
+toothily
+toothless
+toothmarks
+toothpaste/SM
+toothpick/MS
+tooths
+toothsome
+toothy/TR
+tootle/SRDG
+tootsie
+Tootsie/M
+toots/M
+tootsy/MS
+topaz/MS
+topcoat/MS
+topdressing/S
+Topeka/M
+toper/M
+topflight
+topgallant/M
+topiary/S
+topicality/MS
+topical/Y
+topic/MS
+topknot/MS
+topless
+topmast/MS
+topmost
+topnotch/R
+topocentric
+topographer/SM
+topographic
+topographical/Y
+topography/MS
+topological/Y
+topologist/MS
+topology/MS
+topped
+topper/MS
+topping/MS
+topple/GSD
+topsail/MS
+topside/SRM
+top/SMDRG
+topsoil/GDMS
+topspin/MS
+Topsy/M
+toque/MS
+Torah/M
+Torahs
+torchbearer/SM
+torchlight/S
+torch/SDMG
+toreador/SM
+Tore/M
+tore/S
+Torey/M
+Torie/M
+tori/M
+Tori/M
+Torin/M
+torment/GSD
+tormenting/Y
+tormentor/MS
+torn
+tornadoes
+tornado/M
+toroidal/Y
+toroid/MS
+Toronto/M
+torpedoes
+torpedo/GMD
+torpidity/S
+torpid/SY
+torpor/MS
+Torquemada/M
+torque/MZGSRD
+Torrance/M
+Torre/MS
+torrence
+Torrence/M
+Torrens/M
+torrential
+torrent/MS
+Torrey/M
+Torricelli/M
+torridity/SM
+torridness/SM
+torrid/RYTP
+Torrie/M
+Torrin/M
+Torr/XM
+Torry/M
+torsional/Y
+torsion/IAM
+torsions
+torsi's
+tor/SLM
+torso/SM
+tors/S
+tort/ASFE
+tortellini/MS
+torte/MS
+torten
+tortilla/MS
+tortoiseshell/SM
+tortoise/SM
+Tortola/M
+tortoni/MS
+tort's
+Tortuga/M
+tortuousness/MS
+tortuous/PY
+torture/ZGSRD
+torturous
+torus/MS
+Tory/SM
+Tosca/M
+Toscanini/M
+Toshiba/M
+toss/SRDGZ
+tossup/MS
+totaler/M
+totalistic
+totalitarianism/SM
+totalitarian/S
+totality/MS
+totalizator/S
+totalizing
+total/ZGSRDYM
+totemic
+totem/MS
+toter/M
+tote/S
+toting/M
+tot/MDRSG
+Toto/M
+totted
+totterer/M
+tottering/Y
+totter/ZGRDS
+totting
+toucan/MS
+touchable/U
+touch/ASDG
+touchdown/SM
+touché
+touched/U
+toucher/M
+touchily
+touchiness/SM
+touching/SY
+touchline/M
+touchscreen
+touchstone/SM
+touchy/TPR
+toughen/DRZG
+toughener/M
+toughness/SM
+toughs
+tough/TXGRDNYP
+Toulouse/M
+toupee/SM
+toured/CF
+tourer/M
+tour/GZSRDM
+touring/F
+tourism/SM
+touristic
+tourist/SM
+touristy
+tourmaline/SM
+tournament/MS
+tourney/GDMS
+tourniquet/MS
+tour's/CF
+tours/CF
+tousle/GSD
+touter/M
+tout/SGRD
+Tova/M
+Tove/M
+towardliness/M
+towardly/P
+towards
+toward/YU
+towboat/MS
+tow/DRSZG
+towelette/S
+towel/GJDMS
+toweling/M
+tower/GMD
+towering/Y
+towhead/MSD
+towhee/SM
+towline/MS
+towner/M
+Townes
+Towney/M
+townhouse/S
+Townie/M
+townie/S
+Townley/M
+Town/M
+Townsend/M
+townsfolk
+township/MS
+townsman/M
+townsmen
+townspeople/M
+town/SRM
+townswoman/M
+townswomen
+Towny/M
+towpath/M
+towpaths
+towrope/MS
+Towsley/M
+toxemia/MS
+toxicity/MS
+toxicological
+toxicologist/SM
+toxicology/MS
+toxic/S
+toxin/MS
+toyer/M
+toymaker
+toy/MDRSG
+Toynbee/M
+Toyoda/M
+Toyota/M
+toyshop
+tr
+traceability/M
+traceableness/M
+traceable/P
+trace/ASDG
+traceback/MS
+traced/U
+Tracee/M
+traceless/Y
+Trace/M
+tracepoint/SM
+tracer/MS
+tracery/MDS
+trace's
+Tracey/M
+tracheae
+tracheal/M
+trachea/M
+tracheotomy/SM
+Tracie/M
+Traci/M
+tracing/SM
+trackage
+trackball/S
+trackbed
+tracked/U
+tracker/M
+trackless
+tracksuit/SM
+track/SZGMRD
+tractability/SI
+tractable/I
+tractably/I
+tract/ABS
+Tractarians
+traction/KSCEMAF
+tractive/KFE
+tractor/FKMASC
+tract's
+tracts/CEFK
+Tracy/M
+trademark/GSMD
+trader/M
+tradesman/M
+tradesmen
+tradespeople
+tradespersons
+trade/SRDGZM
+tradeswoman/M
+tradeswomen
+traditionalism/MS
+traditionalistic
+traditionalist/MS
+traditionalized
+traditionally
+traditional/U
+tradition/SM
+traduce/DRSGZ
+Trafalgar/M
+trafficked
+trafficker/MS
+trafficking/S
+traffic/SM
+tragedian/SM
+tragedienne/MS
+tragedy/MS
+tragically
+tragicomedy/SM
+tragicomic
+tragic/S
+trailblazer/MS
+trailblazing/S
+trailer/GDM
+trails/F
+trailside
+trail/SZGJRD
+trainable
+train/ASDG
+trained/U
+trainee/MS
+traineeships
+trainer/MS
+training/SM
+trainman/M
+trainmen
+trainspotter/S
+traipse/DSG
+trait/MS
+traitorous/Y
+traitor/SM
+Trajan/M
+trajectory/MS
+trammed
+trammeled/U
+trammel/GSD
+tramming
+tram/MS
+trample/DGRSZ
+trampler/M
+trampoline/GMSD
+tramp/RDSZG
+tramway/M
+trance/MGSD
+tranche/SM
+Tran/M
+tranquility/S
+tranquilized/U
+tranquilize/JGZDSR
+tranquilizer/M
+tranquilizes/A
+tranquilizing/YM
+tranquillize/GRSDZ
+tranquillizer/M
+tranquilness/M
+tranquil/PTRY
+transact/GSD
+transactional
+transaction/MS
+transactor/SM
+transalpine
+transaminase
+transatlantic
+Transcaucasia/M
+transceiver/SM
+transcendence/MS
+transcendentalism/SM
+transcendentalist/SM
+transcendental/YS
+transcendent/Y
+transcend/SDG
+transconductance
+transcontinental
+transcribe/DSRGZ
+transcriber/M
+transcription/SM
+transcript/SM
+transcultural
+transducer/SM
+transduction/M
+transect/DSG
+transept/SM
+transferability/M
+transferal/MS
+transfer/BSMD
+transferee/M
+transference/SM
+transferor/MS
+transferral/SM
+transferred
+transferrer/SM
+transferring
+transfiguration/SM
+transfigure/SDG
+transfinite/Y
+transfix/SDG
+transformational
+transformation/MS
+transform/DRZBSG
+transformed/U
+transformer/M
+transfuse/XSDGNB
+transfusion/M
+transgression/SM
+transgressor/S
+transgress/VGSD
+trans/I
+transience/SM
+transiency/S
+transient/YS
+transistorize/GDS
+transistor/SM
+Transite/M
+transitional/Y
+transition/MDGS
+transitivenesses
+transitiveness/IM
+transitive/PIY
+transitivity/MS
+transitoriness/M
+transitory/P
+transit/SGVMD
+transl
+translatability/M
+translatable/U
+translated/AU
+translate/VGNXSDB
+translational
+translation/M
+translator/SM
+transliterate/XNGSD
+translucence/SM
+translucency/MS
+translucent/Y
+transmigrate/XNGSD
+transmissible
+transmission/MSA
+transmissive
+transmit/AS
+transmittable
+transmittal/SM
+transmittance/MS
+transmitted/A
+transmitter/SM
+transmitting/A
+transmogrification/M
+transmogrify/GXDSN
+transmutation/SM
+transmute/GBSD
+transnational/S
+transoceanic
+transom/SM
+transonic
+transpacific
+transparency/MS
+transparentness/M
+transparent/YP
+transpiration/SM
+transpire/GSD
+transplantation/S
+transplant/GRDBS
+transpolar
+transponder/MS
+transportability
+transportable/U
+transportation/SM
+transport/BGZSDR
+transpose/BGSD
+transposed/U
+transposition/SM
+Transputer/M
+transsexualism/MS
+transsexual/SM
+transship/LS
+transshipment/SM
+transshipped
+transshipping
+transubstantiation/MS
+Transvaal/M
+transversal/YM
+transverse/GYDS
+transvestism/SM
+transvestite/SM
+transvestitism
+Transylvania/M
+trapdoor/S
+trapeze/DSGM
+trapezium/MS
+trapezoidal
+trapezoid/MS
+trap/MS
+trappable/U
+trapped
+trapper/SM
+trapping/S
+Trappist/MS
+trapshooting/SM
+trashcan/SM
+trashiness/SM
+trash/SRDMG
+trashy/TRP
+Trastevere/M
+trauma/MS
+traumatic
+traumatically
+traumatize/SDG
+travail/SMDG
+traveled/U
+traveler/M
+travelog's
+travelogue/S
+travel/SDRGZJ
+Traver/MS
+traversal/SM
+traverse/GBDRS
+traverser/M
+travertine/M
+travesty/SDGM
+Travis/M
+Travus/M
+trawler/M
+trawl/RDMSZG
+tray/SM
+treacherousness/SM
+treacherous/PY
+treachery/SM
+treacle/DSGM
+treacly
+treader/M
+treadle/GDSM
+treadmill/MS
+tread/SAGD
+Treadwell/M
+treas
+treason/BMS
+treasonous
+treasure/DRSZMG
+treasurer/M
+treasurership
+treasury/SM
+Treasury/SM
+treatable
+treated/U
+treater/S
+treatise/MS
+treatment/MS
+treat's
+treat/SAGDR
+treaty/MS
+treble/SDG
+Treblinka/M
+treeing
+treeless
+treelike
+tree/MDS
+treetop/SM
+trefoil/SM
+Trefor/M
+trekked
+trekker/MS
+Trekkie/M
+trekking
+trek/MS
+trellis/GDSM
+Tremaine/M
+Tremain/M
+trematode/SM
+Tremayne/M
+tremble/JDRSG
+trembler/M
+trembles/M
+trembly
+tremendousness/M
+tremendous/YP
+tremolo/MS
+tremor/MS
+tremulousness/SM
+tremulous/YP
+trenchancy/MS
+trenchant/Y
+trencherman/M
+trenchermen
+trencher/SM
+trench/GASD
+trench's
+trendily
+trendiness/S
+trend/SDMG
+trendy/PTRS
+Trenna/M
+Trent/M
+Trenton/M
+trepanned
+trepidation/MS
+Tresa/M
+Trescha/M
+trespasser/M
+trespass/ZRSDG
+Tressa/M
+tressed/E
+tresses/E
+tressing/E
+tress/MSDG
+trestle/MS
+Trevar/M
+Trevelyan/M
+Trever/M
+Trevino/M
+Trevor/M
+Trev/RM
+Trey/M
+trey/MS
+triableness/M
+triable/P
+triadic
+triad/MS
+triage/SDMG
+trial/ASM
+trialization
+trialled
+trialling
+triamcinolone
+triangle/SM
+triangulable
+triangularization/S
+triangular/Y
+triangulate/YGNXSD
+triangulation/M
+Triangulum/M
+Trianon/M
+Triassic
+triathlon/S
+triatomic
+tribalism/MS
+tribal/Y
+tribe/MS
+tribesman/M
+tribesmen
+tribeswoman
+tribeswomen
+tribulate/NX
+tribulation/M
+tribunal/MS
+tribune/SM
+tributary/MS
+tribute/EGSF
+tribute's
+trice/GSDM
+tricentennial/S
+triceps/SM
+triceratops/M
+trichinae
+trichina/M
+trichinoses
+trichinosis/M
+trichloroacetic
+trichloroethane
+trichotomy/M
+trichromatic
+Tricia/M
+trickery/MS
+trick/GMSRD
+trickily
+trickiness/SM
+trickle/DSG
+trickster/MS
+tricky/RPT
+tricolor/SMD
+tricycle/SDMG
+trident/SM
+tridiagonal
+tried/UA
+triennial/SY
+trier/AS
+trier's
+tries/A
+Trieste/M
+triffid/S
+trifle/MZGJSRD
+trifler/M
+trifluoride/M
+trifocals
+trigged
+trigger/GSDM
+triggest
+trigging
+triglyceride/MS
+trigonal/Y
+trigonometric
+trigonometrical
+trigonometry/MS
+trigram/S
+trig/S
+trihedral
+trike/GMSD
+trilateral/S
+trilby/SM
+trilingual
+trillion/SMH
+trillionth/M
+trillionths
+trillium/SM
+trill/RDMGS
+trilobite/MS
+trilogy/MS
+trimaran/MS
+Trimble/M
+trimer/M
+trimester/MS
+trimmed/U
+trimmer/MS
+trimmest
+trimming/MS
+trimness/S
+trimodal
+trimonthly
+trim/PSYR
+Trimurti/M
+Trina/M
+Trinidad/M
+trinitarian/S
+trinitrotoluene/SM
+trinity/MS
+Trinity/MS
+trinketer/M
+trinket/MRDSG
+triode/MS
+trio/SM
+trioxide/M
+tripartite/N
+tripartition/M
+tripe/MS
+triphenylarsine
+triphenylphosphine
+triphenylstibine
+triphosphopyridine
+triple/GSD
+triplet/SM
+triplex/S
+triplicate/SDG
+triplication/M
+triply/GDSN
+Trip/M
+tripodal
+tripod/MS
+tripoli/M
+Tripoli/M
+tripolyphosphate
+tripos/SM
+tripped
+Trippe/M
+tripper/MS
+tripping/Y
+Tripp/M
+trip/SMY
+triptych/M
+triptychs
+tripwire/MS
+trireme/SM
+Tris
+trisect/GSD
+trisection/S
+trisector
+Trisha/M
+Trish/M
+trisodium
+Trista/M
+Tristam/M
+Tristan/M
+tristate
+trisyllable/M
+tritely/F
+triteness/SF
+trite/SRPTY
+tritium/MS
+triton/M
+Triton/M
+triumphal
+triumphalism
+triumphant/Y
+triumph/GMD
+triumphs
+triumvirate/MS
+triumvir/MS
+triune
+trivalent
+trivet/SM
+trivia
+triviality/MS
+trivialization/MS
+trivialize/DSG
+trivial/Y
+trivium/M
+Trixie/M
+Trixi/M
+Trix/M
+Trixy/M
+Trobriand/M
+trochaic/S
+trochee/SM
+trod/AU
+trodden/UA
+trodes
+troff/MR
+troglodyte/MS
+troika/SM
+Trojan/MS
+troll/DMSG
+trolled/F
+trolleybus/S
+trolley/SGMD
+trolling/F
+trollish
+Trollope/M
+trollop/GSMD
+trolly's
+trombone/MS
+trombonist/SM
+tromp/DSG
+Trondheim/M
+trooper/M
+troopship/SM
+troop/SRDMZG
+trope/SM
+Tropez/M
+trophic
+trophy/MGDS
+tropical/SY
+tropic/MS
+tropism/SM
+tropocollagen
+troposphere/MS
+tropospheric
+troth/GDM
+troths
+trot/S
+Trotsky/M
+trotted
+trotter/SM
+trotting
+troubadour/SM
+troubled/U
+trouble/GDRSM
+troublemaker/MS
+troubler/M
+troubleshooter/M
+troubleshoot/SRDZG
+troubleshot
+troublesomeness/M
+troublesome/YP
+trough/M
+troughs
+trounce/GZDRS
+trouncer/M
+troupe/MZGSRD
+trouper/M
+trouser/DMGS
+trousseau/M
+trousseaux
+Troutman/M
+trout/SM
+trove/SM
+troweler/M
+trowel/SMDRGZ
+trow/SGD
+Troyes
+Troy/M
+troy/S
+Trstram/M
+truancy/MS
+truant/SMDG
+truce/SDGM
+Truckee/M
+trucker/M
+trucking/M
+truckle/GDS
+truckload/MS
+truck/SZGMRDJ
+truculence/SM
+truculent/Y
+Truda/M
+Trudeau/M
+Trude/M
+Trudey/M
+trudge/SRDG
+Trudie/M
+Trudi/M
+Trudy/M
+true/DRSPTG
+truelove/MS
+Trueman/M
+trueness/M
+truer/U
+truest/U
+truffle/MS
+truism/SM
+Trujillo/M
+Trula/M
+truly/U
+Trumaine/M
+Truman/M
+Trumann/M
+Trumbull/M
+trump/DMSG
+trumpery/SM
+trumpeter/M
+trumpet/MDRZGS
+Trump/M
+truncate/NGDSX
+truncation/M
+truncheon/MDSG
+trundle/GZDSR
+trundler/M
+trunk/GSMD
+trunnion/SM
+trusser/M
+trussing/M
+truss/SRDG
+trusted/EU
+trusteeing
+trustee/MDS
+trusteeship/SM
+truster/M
+trustful/EY
+trustfulness/SM
+trustiness/M
+trusting/Y
+trust/RDMSG
+trusts/E
+trustworthier
+trustworthiest
+trustworthiness/MS
+trustworthy/UP
+trusty/PTMSR
+Truth
+truthfulness/US
+truthful/UYP
+truths/U
+truth/UM
+TRW
+trying/Y
+try/JGDRSZ
+tryout/MS
+trypsin/M
+tryst/GDMS
+ts
+T's
+tsarevich
+tsarina's
+tsarism/M
+tsarist
+tsetse/S
+Tsimshian/M
+Tsiolkovsky/M
+Tsitsihar/M
+tsp
+tsunami/MS
+Tsunematsu/M
+Tswana/M
+TTL
+tty/M
+ttys
+Tuamotu/M
+Tuareg/M
+tubae
+tubal
+tuba/SM
+tubbed
+tubbing
+tubby/TR
+tubeless
+tubercle/MS
+tubercular/S
+tuberculin/MS
+tuberculoses
+tuberculosis/M
+tuberculous
+tuber/M
+tuberose/SM
+tuberous
+tube/SM
+tubing/M
+tub/JMDRSZG
+Tubman/M
+tubular/Y
+tubule/SM
+tucker/GDM
+Tucker/M
+tuck/GZSRD
+Tuckie/M
+Tuck/RM
+Tucky/M
+Tucson/M
+Tucuman/M
+Tudor/MS
+Tue/S
+Tuesday/SM
+tufter/M
+tuft/GZSMRD
+tufting/M
+tugboat/MS
+tugged
+tugging
+tug/S
+tuition/ISM
+Tulane/M
+tularemia/S
+tulip/SM
+tulle/SM
+Tulley/M
+Tull/M
+Tully/M
+Tulsa/M
+tum
+tumbledown
+tumbler/M
+tumbleweed/MS
+tumble/ZGRSDJ
+tumbrel/SM
+tumescence/S
+tumescent
+tumidity/MS
+tumid/Y
+tummy/SM
+tumor/MDS
+tumorous
+Tums/M
+tumult/SGMD
+tumultuousness/M
+tumultuous/PY
+tumulus/M
+tunableness/M
+tunable/P
+tuna/SM
+tundra/SM
+tun/DRJZGBS
+tune/CSDG
+tunefulness/MS
+tuneful/YP
+tuneless/Y
+tuner/M
+tune's
+tuneup/S
+tung
+tungstate/M
+tungsten/SM
+Tunguska/M
+Tungus/M
+tunic/MS
+tuning/A
+tuning's
+Tunisia/M
+Tunisian/S
+Tunis/M
+tunned
+tunneler/M
+tunnel/MRDSJGZ
+tunning
+tunny/SM
+tupelo/M
+Tupi/M
+tuple/SM
+tuppence/M
+Tupperware
+Tupungato/M
+turban/SDM
+turbid
+turbidity/SM
+turbinate/SD
+turbine/SM
+turbocharged
+turbocharger/SM
+turbofan/MS
+turbojet/MS
+turboprop/MS
+turbo/SM
+turbot/MS
+turbulence/SM
+turbulent/Y
+turd/MS
+tureen/MS
+turf/DGSM
+turfy/RT
+Turgenev/M
+turgidity/SM
+turgidness/M
+turgid/PY
+Turing/M
+Turin/M
+Turkestan/M
+Turkey/M
+turkey/SM
+Turkic/SM
+Turkish
+Turkmenistan/M
+turk/S
+Turk/SM
+turmeric/MS
+turmoil/SDMG
+turnabout/SM
+turnaround/MS
+turn/AZGRDBS
+turnbuckle/SM
+turncoat/SM
+turned/U
+turner/M
+Turner/M
+turning/MS
+turnip/SMDG
+turnkey/MS
+turnoff/MS
+turnout/MS
+turnover/SM
+turnpike/MS
+turnround/MS
+turnstile/SM
+turnstone/M
+turntable/SM
+turpentine/GMSD
+Turpin/M
+turpitude/SM
+turquoise/SM
+turret/SMD
+turtleback/MS
+turtledove/MS
+turtleneck/SDM
+turtle/SDMG
+turves's
+turvy
+Tuscaloosa/M
+Tuscan
+Tuscany/M
+Tuscarora/M
+Tuscon/M
+tush/SDG
+Tuskegee/M
+tusker/M
+tusk/GZRDMS
+tussle/GSD
+tussock/MS
+tussocky
+Tussuad/M
+Tutankhamen/M
+tutelage/MS
+tutelary/S
+Tut/M
+tutored/U
+tutorial/MS
+tutor/MDGS
+tutorship/S
+tut/S
+Tutsi
+tutted
+tutting
+tutti/S
+Tuttle/M
+tutu/SM
+Tuvalu
+tuxedo/SDM
+tux/S
+TVA
+TV/M
+TVs
+twaddle/GZMRSD
+twaddler/M
+Twain/M
+twain/S
+TWA/M
+twang/MDSG
+twangy/TR
+twas
+tweak/SGRD
+tweediness/M
+Tweedledee/M
+Tweedledum/M
+Tweed/M
+twee/DP
+tweed/SM
+tweedy/PTR
+tween
+tweeter/M
+tweet/ZSGRD
+tweezer/M
+tweeze/ZGRD
+twelfth
+twelfths
+twelvemonth/M
+twelvemonths
+twelve/MS
+twentieths
+twenty/MSH
+twerp/MS
+twice/R
+twiddle/GRSD
+twiddler/M
+twiddly/RT
+twigged
+twigging
+twiggy/RT
+twig/SM
+Twila/M
+twilight/MS
+twilit
+twill/SGD
+twiner/M
+twine/SM
+twinge/SDMG
+Twinkie
+twinkler/M
+twinkle/RSDG
+twinkling/M
+twinkly
+twinned
+twinning
+twin/RDMGZS
+twirler/M
+twirling/Y
+twirl/SZGRD
+twirly/TR
+twisted/U
+twister/M
+twists/U
+twist/SZGRD
+twisty
+twitch/GRSD
+twitchy/TR
+twit/S
+twitted
+twitterer/M
+twitter/SGRD
+twittery
+twitting
+twixt
+twofer/MS
+twofold/S
+two/MS
+twopence/SM
+twopenny/S
+twosome/MS
+twp
+Twp
+TWX
+Twyla/M
+TX
+t/XTJBG
+Tybalt/M
+Tybie/M
+Tybi/M
+tycoon/MS
+tyeing
+Tye/M
+tying/UA
+tyke/SM
+Tylenol/M
+Tyler/M
+Ty/M
+Tymon/M
+Tymothy/M
+tympani
+tympanist/SM
+tympanum/SM
+Tynan/M
+Tyndale/M
+Tyndall/M
+Tyne/M
+typeahead
+typecast/SG
+typed/AU
+typedef/S
+typeface/MS
+typeless
+type/MGDRSJ
+types/A
+typescript/SM
+typeset/S
+typesetter/MS
+typesetting/SM
+typewriter/M
+typewrite/SRJZG
+typewriting/M
+typewritten
+typewrote
+typhoid/SM
+Typhon/M
+typhoon/SM
+typhus/SM
+typicality/MS
+typically
+typicalness/M
+typical/U
+typification/M
+typify/SDNXG
+typing/A
+typist/MS
+typographer/SM
+typographic
+typographical/Y
+typography/MS
+typological/Y
+typology/MS
+typo/MS
+tyrannic
+tyrannicalness/M
+tyrannical/PY
+tyrannicide/M
+tyrannizer/M
+tyrannize/ZGJRSD
+tyrannizing/YM
+tyrannosaur/MS
+tyrannosaurus/S
+tyrannous
+tyranny/MS
+tyrant/MS
+Tyree/M
+tyreo
+Tyrolean/S
+Tyrol's
+Tyrone/M
+tyrosine/M
+tyro/SM
+Tyrus/M
+Tyson/M
+tzarina's
+tzar's
+Tzeltal/M
+u
+U
+UAR
+UART
+UAW
+Ubangi/M
+ubiquitous/YP
+ubiquity/S
+Ucayali/M
+Uccello/M
+UCLA/M
+Udale/M
+Udall/M
+udder/SM
+Udell/M
+Ufa/M
+ufologist/S
+ufology/MS
+UFO/S
+Uganda/M
+Ugandan/S
+ugh
+ughs
+uglification
+ugliness/MS
+uglis
+ugly/PTGSRD
+Ugo/M
+uh
+UHF
+Uighur
+Ujungpandang/M
+UK
+ukase/SM
+Ukraine/M
+Ukrainian/S
+ukulele/SM
+UL
+Ula/M
+Ulberto/M
+ulcerate/NGVXDS
+ulceration/M
+ulcer/MDGS
+ulcerous
+Ulick/M
+Ulises/M
+Ulla/M
+Ullman/M
+ulnae
+ulna/M
+ulnar
+Ulrica/M
+Ulrich/M
+Ulrick/M
+Ulric/M
+Ulrika/M
+Ulrikaumeko/M
+Ulrike/M
+Ulster/M
+ulster/MS
+ult
+ulterior/Y
+ultimas
+ultimate/DSYPG
+ultimateness/M
+ultimatum/MS
+ultimo
+ultracentrifugally
+ultracentrifugation
+ultracentrifuge/M
+ultraconservative/S
+ultrafast
+ultrahigh
+ultralight/S
+ultramarine/SM
+ultramodern
+ultramontane
+ultra/S
+ultrashort
+ultrasonically
+ultrasonic/S
+ultrasonics/M
+ultrasound/SM
+ultrastructure/M
+Ultrasuede
+ultraviolet/SM
+Ultrix/M
+ULTRIX/M
+ululate/DSXGN
+ululation/M
+Ulyanovsk/M
+Ulysses/M
+um
+umbel/MS
+umber/GMDS
+Umberto/M
+umbilical/S
+umbilici
+umbilicus/M
+umbrage/MGSD
+umbrageous
+umbra/MS
+umbrella/GDMS
+Umbriel/M
+Umeko/M
+umiak/MS
+umlaut/GMDS
+umpire/MGSD
+ump/MDSG
+umpteen/H
+UN
+unabated/Y
+unabridged/S
+unacceptability
+unacceptable
+unaccepted
+unaccommodating
+unaccountability
+unaccustomed/Y
+unadapted
+unadulterated/Y
+unadventurous
+unalienability
+unalterableness/M
+unalterable/P
+unalterably
+Una/M
+unambiguity
+unambiguous
+unambitious
+unamused
+unanimity/SM
+unanimous/Y
+unanticipated/Y
+unapologetic
+unapologizing/M
+unappeasable
+unappeasably
+unappreciative
+unary
+unassailableness/M
+unassailable/P
+unassertive
+unassumingness/M
+unassuming/PY
+unauthorized/PY
+unavailing/PY
+unaware/SPY
+unbalanced/P
+unbar
+unbarring
+unbecoming/P
+unbeknown
+unbelieving/Y
+unbiased/P
+unbid
+unbind/G
+unblessed
+unblinking/Y
+unbodied
+unbolt/G
+unbreakability
+unbred
+unbroken
+unbuckle
+unbudging/Y
+unburnt
+uncap
+uncapping
+uncatalogued
+uncauterized/MS
+unceasing/Y
+uncelebrated
+uncertain/P
+unchallengeable
+unchangingness/M
+unchanging/PY
+uncharacteristic
+uncharismatic
+unchastity
+unchristian
+uncial/S
+uncivilized/Y
+unclassified
+uncle/MSD
+unclouded/Y
+uncodable
+uncollected
+uncoloredness/M
+uncolored/PY
+uncombable
+uncommunicative
+uncompetitive
+uncomplicated
+uncomprehending/Y
+uncompromisable
+unconcerned/P
+unconcern/M
+unconfirmed
+unconfused
+unconscionableness/M
+unconscionable/P
+unconscionably
+unconstitutional
+unconsumed
+uncontentious
+uncontrollability
+unconvertible
+uncool
+uncooperative
+uncork/G
+uncouple/G
+uncouthness/M
+uncouth/YP
+uncreate/V
+uncritical
+uncross/GB
+uncrowded
+unction/IM
+unctions
+unctuousness/MS
+unctuous/PY
+uncustomary
+uncut
+undated/I
+undaunted/Y
+undeceive
+undecided/S
+undedicated
+undefinability
+undefinedness/M
+undefined/P
+undelete
+undeliverability
+undeniableness/M
+undeniable/P
+undeniably
+undependable
+underachiever/M
+underachieve/SRDGZ
+underact/GDS
+underadjusting
+underage/S
+underarm/DGS
+underbedding
+underbelly/MS
+underbidding
+underbid/S
+underbracing
+underbrush/MSDG
+undercarriage/MS
+undercharge/GSD
+underclassman
+underclassmen
+underclass/S
+underclothes
+underclothing/MS
+undercoating/M
+undercoat/JMDGS
+underconsumption/M
+undercooked
+undercount/S
+undercover
+undercurrent/SM
+undercut/S
+undercutting
+underdeveloped
+underdevelopment/MS
+underdog/MS
+underdone
+undereducated
+underemphasis
+underemployed
+underemployment/SM
+underenumerated
+underenumeration
+underestimate/NGXSD
+underexploited
+underexpose/SDG
+underexposure/SM
+underfed
+underfeed/SG
+underfloor
+underflow/GDMS
+underfoot
+underfund/DG
+underfur/MS
+undergarment/SM
+undergirding
+undergoes
+undergo/G
+undergone
+undergrad/MS
+undergraduate/MS
+underground/RMS
+undergrowth/M
+undergrowths
+underhand/D
+underhandedness/MS
+underhanded/YP
+underheat
+underinvestment
+underlaid
+underlain/S
+underlay/GS
+underlie
+underline/GSDJ
+underling/MS
+underlip/SM
+underloaded
+underly/GS
+undermanned
+undermentioned
+undermine/SDG
+undermost
+underneath
+underneaths
+undernourished
+undernourishment/SM
+underpaid
+underpants
+underpart/MS
+underpass/SM
+underpay/GSL
+underpayment/SM
+underperformed
+underpinned
+underpinning/MS
+underpin/S
+underplay/SGD
+underpopulated
+underpopulation/M
+underpowered
+underpricing
+underprivileged
+underproduction/MS
+underrate/GSD
+underregistration/M
+underreported
+underreporting
+underrepresentation/M
+underrepresented
+underscore/SDG
+undersealed
+undersea/S
+undersecretary/SM
+undersell/SG
+undersexed
+undershirt/SM
+undershoot/SG
+undershorts
+undershot
+underside/SM
+undersigned/M
+undersign/SGD
+undersized
+undersizes
+undersizing
+underskirt/MS
+undersold
+underspecification
+underspecified
+underspend/G
+understaffed
+understandability/M
+understandably
+understanding/YM
+understand/RGSJB
+understate/GSDL
+understatement/MS
+understocked
+understood
+understrength
+understructure/SM
+understudy/GMSD
+undertaken
+undertaker/M
+undertake/SRGZJ
+undertaking/M
+underthings
+undertone/SM
+undertook
+undertow/MS
+underused
+underusing
+underutilization/M
+underutilized
+undervaluation/S
+undervalue/SDG
+underwater/S
+underway
+underwear/M
+underweight/S
+underwent
+underwhelm/DGS
+underwood/M
+Underwood/M
+underworld/MS
+underwrite/GZSR
+underwriter/M
+underwritten
+underwrote
+under/Y
+undeserving
+undesigned
+undeviating/Y
+undialyzed/SM
+undiplomatic
+undiscerning
+undiscriminating
+undo/GJ
+undoubted/Y
+undramatic
+undramatized/SM
+undress/G
+undrinkability
+undrinkable
+undroppable
+undue
+undulant
+undulate/XDSNG
+undulation/M
+unearthliness/S
+unearthly/P
+unearth/YG
+unease
+uneconomic
+uneducated
+unemployed/S
+unencroachable
+unending/Y
+unendurable/P
+unenergized/MS
+unenforced
+unenterprising
+UNESCO
+unethical
+uneulogized/SM
+unexacting
+unexceptionably
+unexcited
+unexpectedness/MS
+unfading/Y
+unfailingness/M
+unfailing/P
+unfamiliar
+unfashionable
+unfathomably
+unfavored
+unfeeling
+unfeigned/Y
+unfelt
+unfeminine
+unfertile
+unfetchable
+unflagging
+unflappability/S
+unflappable
+unflappably
+unflinching/Y
+unfold/LG
+unfoldment/M
+unforced
+unforgeable
+unfossilized/MS
+unfraternizing/SM
+unfrozen
+unfulfillable
+unfunny
+unfussy
+ungainliness/MS
+ungainly/PRT
+Ungava/M
+ungenerous
+ungentle
+unglamorous
+ungrammaticality
+ungrudging
+unguent/MS
+ungulate/MS
+unharmonious
+unharness/G
+unhistorical
+unholy/TP
+unhook/DG
+unhydrolyzed/SM
+unhygienic
+Unibus/M
+unicameral
+UNICEF
+unicellular
+Unicode/M
+unicorn/SM
+unicycle/MGSD
+unicyclist/MS
+unideal
+unidimensional
+unidiomatic
+unidirectionality
+unidirectional/Y
+unidolized/MS
+unifiable
+unification/MA
+unifier/MS
+unifilar
+uniformity/MS
+uniformness/M
+uniform/TGSRDYMP
+unify/AXDSNG
+unilateralism/M
+unilateralist
+unilateral/Y
+unimodal
+unimpeachably
+unimportance
+unimportant
+unimpressive
+unindustrialized/MS
+uninhibited/YP
+uninominal
+uninsured
+unintellectual
+unintended
+uninteresting
+uninterruptedness/M
+uninterrupted/YP
+unintuitive
+uninviting
+union/AEMS
+unionism/SM
+unionist/SM
+Unionist/SM
+unionize
+Union/MS
+UniPlus/M
+unipolar
+uniprocessor/SM
+uniqueness/S
+unique/TYSRP
+Uniroyal/M
+unisex/S
+UniSoft/M
+unison/MS
+Unisys/M
+unitarianism/M
+Unitarianism/SM
+unitarian/MS
+Unitarian/MS
+unitary
+unite/AEDSG
+united/Y
+uniter/M
+unitize/GDS
+unit/VGRD
+unity/SEM
+univ
+Univac/M
+univalent/S
+univalve/MS
+univariate
+universalism/M
+universalistic
+universality/SM
+universalize/DSRZG
+universalizer/M
+universal/YSP
+universe/MS
+university/MS
+Unix/M
+UNIX/M
+unjam
+unkempt
+unkind/TP
+unkink
+unknightly
+unknowable/S
+unknowing
+unlabored
+unlace/G
+unlearn/G
+unlikeable
+unlikeliness/S
+unlimber/G
+unlimited
+unlit
+unliterary
+unloose/G
+unlucky/TP
+unmagnetized/MS
+unmanageably
+unmannered/Y
+unmask/G
+unmeaning
+unmeasured
+unmeetable
+unmelodious
+unmemorable
+unmemorialized/MS
+unmentionable/S
+unmerciful
+unmeritorious
+unmethodical
+unmineralized/MS
+unmissable
+unmistakably
+unmitigated/YP
+unmnemonic
+unmobilized/SM
+unmoral
+unmount/B
+unmovable
+unmoving
+unnaturalness/M
+unnavigable
+unnerving/Y
+unobliging
+unoffensive
+unofficial
+unorganized/YP
+unorthodox
+unpack/G
+unpaintable
+unpalatability
+unpalatable
+unpartizan
+unpatronizing
+unpeople
+unperceptive
+unperson
+unperturbed/Y
+unphysical
+unpick/G
+unpicturesque
+unpinning
+unpleasing
+unploughed
+unpolarized/SM
+unpopular
+unpractical
+unprecedented/Y
+unpredictable/S
+unpreemphasized
+unpremeditated
+unpretentiousness/M
+unprincipled/P
+unproblematic
+unproductive
+unpropitious
+unprovable
+unproven
+unprovocative
+unpunctual
+unquestionable
+unraisable
+unravellings
+unreadability
+unread/B
+unreal
+unrealizable
+unreasoning/Y
+unreceptive
+unrecordable
+unreflective
+unrelenting/Y
+unremitting/Y
+unrepeatability
+unrepeated
+unrepentant
+unreported
+unrepresentative
+unreproducible
+unrest/G
+unrestrained/P
+unrewarding
+unriddle
+unripe/P
+unromantic
+unruliness/SM
+unruly/PTR
+unsaleable
+unsanitary
+unsavored/YP
+unsavoriness/M
+unseal/GB
+unsearchable
+unseasonal
+unseeing/Y
+unseen/S
+unselfconsciousness/M
+unselfconscious/P
+unselfishness/M
+unsellable
+unsentimental
+unset
+unsettledness/M
+unsettled/P
+unsettling/Y
+unshapely
+unshaven
+unshorn
+unsighted
+unsightliness/S
+unskilful
+unsociability
+unsociable/P
+unsocial
+unsound/PT
+unspeakably
+unspecific
+unspectacular
+unspoilt
+unspoke
+unsporting
+unstable/P
+unstigmatized/SM
+unstilted
+unstinting/Y
+unstopping
+unstrapping
+unstudied
+unstuffy
+unsubdued
+unsubstantial
+unsubtle
+unsuitable
+unsuspecting/Y
+unswerving/Y
+unsymmetrical
+unsympathetic
+unsystematic
+unsystematized/Y
+untactful
+untalented
+untaxing
+unteach/B
+untellable
+untenable
+unthinking
+until/G
+untiring/Y
+unto
+untouchable/MS
+untowardness/M
+untoward/P
+untraceable
+untrue
+untruthfulness/M
+untwist/G
+Unukalhai/M
+unusualness/M
+unutterable
+unutterably
+unvocalized/MS
+unvulcanized/SM
+unwaivering
+unwarrantable
+unwarrantably
+unwashed/PS
+unwearable
+unwearied/Y
+unwed
+unwedge
+unwelcome
+unwell/M
+unwieldiness/MS
+unwieldy/TPR
+unwind/B
+unwomanly
+unworkable/S
+unworried
+unwrap
+unwrapping
+unyielding/Y
+unyoke
+unzip
+up
+Upanishads
+uparrow
+upbeat/SM
+upbraid/GDRS
+upbringing/M
+upbring/JG
+UPC
+upchuck/SDG
+upcome/G
+upcountry/S
+updatability
+updater/M
+update/RSDG
+Updike/M
+updraft/SM
+upend/SDG
+upfield
+upfront
+upgradeable
+upgrade/DSJG
+upheaval/MS
+upheld
+uphill/S
+upholder/M
+uphold/RSGZ
+upholster/ADGS
+upholsterer/SM
+upholstery/MS
+UPI
+upkeep/SM
+uplander/M
+upland/MRS
+uplifter/M
+uplift/SJDRG
+upload/GSD
+upmarket
+upon
+upped
+uppercase/GSD
+upperclassman/M
+upperclassmen
+uppercut/S
+uppercutting
+uppermost
+upper/S
+upping
+uppish
+uppity
+upraise/GDS
+uprated
+uprating
+uprear/DSG
+upright/DYGSP
+uprightness/S
+uprise/RGJ
+uprising/M
+upriver/S
+uproariousness/M
+uproarious/PY
+uproar/MS
+uproot/DRGS
+uprooter/M
+ups
+UPS
+upscale/GDS
+upset/S
+upsetting/MS
+upshot/SM
+upside/MS
+upsilon/MS
+upslope
+upstage/DSRG
+upstairs
+upstandingness/M
+upstanding/P
+upstart/MDGS
+upstate/SR
+upstream/DSG
+upstroke/MS
+upsurge/DSG
+upswing/GMS
+upswung
+uptake/SM
+upthrust/GMS
+uptight
+uptime
+Upton/M
+uptown/RS
+uptrend/M
+upturn/GDS
+upwardness/M
+upward/SYP
+upwelling
+upwind/S
+uracil/MS
+Ural/MS
+Urania/M
+uranium/MS
+Uranus/M
+uranyl/M
+Urbain/M
+Urbana/M
+urbane/Y
+urbanism/M
+urbanite/SM
+urbanity/SM
+urbanization/MS
+urbanize/DSG
+Urban/M
+urbanologist/S
+urbanology/S
+Urbano/M
+urban/RT
+Urbanus/M
+urchin/SM
+Urdu/M
+urea/SM
+uremia/MS
+uremic
+ureter/MS
+urethane/MS
+urethrae
+urethral
+urethra/M
+urethritis/M
+Urey/M
+urge/GDRSJ
+urgency/SM
+urgent/Y
+urger/M
+Uriah/M
+uric
+Uriel/M
+urinal/MS
+urinalyses
+urinalysis/M
+urinary/MS
+urinate/XDSNG
+urination/M
+urine/MS
+Uri/SM
+URL
+Ur/M
+urning/M
+urn/MDGS
+urogenital
+urological
+urologist/S
+urology/MS
+Urquhart/M
+Ursala/M
+Ursa/M
+ursine
+Ursola/M
+Urson/M
+Ursula/M
+Ursulina/M
+Ursuline/M
+urticaria/MS
+Uruguayan/S
+Uruguay/M
+Urumqi
+US
+USA
+usability/S
+usable/U
+usably/U
+USAF
+usage/SM
+USART
+USCG
+USC/M
+USDA
+us/DRSBZG
+used/U
+use/ESDAG
+usefulness/SM
+useful/YP
+uselessness/MS
+useless/PY
+Usenet/M
+Usenix/M
+user/M
+USG/M
+usherette/SM
+usher/SGMD
+USIA
+USMC
+USN
+USO
+USP
+USPS
+USS
+USSR
+Ustinov/M
+usu
+usuals
+usual/UPY
+usurer/SM
+usuriousness/M
+usurious/PY
+usurpation/MS
+usurper/M
+usurp/RDZSG
+usury/SM
+UT
+Utahan/SM
+Utah/M
+Uta/M
+Ute/M
+utensil/SM
+uteri
+uterine
+uterus/M
+Utica/M
+utile/I
+utilitarianism/MS
+utilitarian/S
+utility/MS
+utilization/MS
+utilization's/A
+utilize/GZDRS
+utilizer/M
+utilizes/A
+utmost/S
+Utopia/MS
+utopianism/M
+utopian's
+Utopian/S
+utopia/S
+Utrecht/M
+Utrillo/M
+utterance/MS
+uttered/U
+utterer/M
+uttermost/S
+utter/TRDYGS
+uucp/M
+UV
+uvula/MS
+uvular/S
+uxorious
+Uzbekistan
+Uzbek/M
+Uzi/M
+V
+VA
+vacancy/MS
+vacantness/M
+vacant/PY
+vacate/NGXSD
+vacationist/SM
+vacationland
+vacation/MRDZG
+vaccinate/NGSDX
+vaccination/M
+vaccine/SM
+vaccinial
+vaccinia/M
+Vachel/M
+vacillate/XNGSD
+vacillating/Y
+vacillation/M
+vacillator/SM
+Vaclav/M
+vacua's
+vacuity/MS
+vacuo
+vacuolated/U
+vacuolate/SDGN
+vacuole/SM
+vacuolization/SM
+vacuousness/MS
+vacuous/PY
+vacuum/GSMD
+Vader/M
+Vaduz/M
+vagabondage/MS
+vagabond/DMSG
+vagarious
+vagary/MS
+vaginae
+vaginal/Y
+vagina/M
+vagrancy/MS
+vagrant/SMY
+vagueing
+vagueness/MS
+vague/TYSRDP
+Vail/M
+vaingloriousness/M
+vainglorious/YP
+vainglory/MS
+vain/TYRP
+val
+valance/SDMG
+Valaree/M
+Valaria/M
+Valarie/M
+Valdemar/M
+Valdez/M
+Valeda/M
+valediction/MS
+valedictorian/MS
+valedictory/MS
+Vale/M
+valence/SM
+Valencia/MS
+valency/MS
+Valene/M
+Valenka/M
+Valentia/M
+Valentijn/M
+Valentina/M
+Valentine/M
+valentine/SM
+Valentin/M
+Valentino/M
+Valenzuela/M
+Valera/M
+Valeria/M
+Valerian/M
+Valerie/M
+Valerye/M
+Valéry/M
+vale/SM
+valet/GDMS
+valetudinarianism/MS
+valetudinarian/MS
+Valhalla/M
+valiance/S
+valiantness/M
+valiant/SPY
+Valida/M
+validated/AU
+validate/INGSDX
+validates/A
+validation/AMI
+validity/IMS
+validnesses
+validness/MI
+valid/PIY
+Valina/M
+valise/MS
+Valium/S
+Valkyrie/SM
+Vallejo
+Valle/M
+Valletta/M
+valley/SM
+Vallie/M
+Valli/M
+Vally/M
+Valma/M
+Val/MY
+Valois/M
+valor/MS
+valorous/Y
+Valparaiso/M
+Valry/M
+valuable/IP
+valuableness/IM
+valuables
+valuably/I
+valuate/NGXSD
+valuation/CSAM
+valuator/SM
+value/CGASD
+valued/U
+valuelessness/M
+valueless/P
+valuer/SM
+value's
+values/E
+valve/GMSD
+valveless
+valvular
+Va/M
+vamoose/GSD
+vamp/ADSG
+vamper
+vampire/MGSD
+vamp's
+vanadium/MS
+Vance/M
+Vancouver/M
+vandalism/MS
+vandalize/GSD
+vandal/MS
+Vandal/MS
+Vanda/M
+Vandenberg/M
+Vanderbilt/M
+Vanderburgh/M
+Vanderpoel/M
+Vandyke/SM
+vane/MS
+Vanessa/M
+Vang/M
+vanguard/MS
+Vania/M
+vanilla/MS
+vanisher/M
+vanish/GRSDJ
+vanishing/Y
+vanity/SM
+Van/M
+Vanna/M
+vanned
+Vannie/M
+Vanni/M
+vanning
+Vanny/M
+vanquisher/M
+vanquish/RSDGZ
+van/SMD
+vantage/MS
+Vanuatu
+Vanya/M
+Vanzetti/M
+vapidity/MS
+vapidness/SM
+vapid/PY
+vaporer/M
+vaporing/MY
+vaporisation
+vaporise/DSG
+vaporization/AMS
+vaporize/DRSZG
+vaporizer/M
+vapor/MRDJGZS
+vaporous
+vapory
+vaquero/SM
+VAR
+Varanasi/M
+Varese/M
+Vargas/M
+variability/IMS
+variableness/IM
+variable/PMS
+variables/I
+variably/I
+variance/I
+variances
+variance's
+Varian/M
+variant/ISY
+variate/MGNSDX
+variational
+variation/M
+varicolored/MS
+varicose/S
+variedly
+varied/U
+variegate/NGXSD
+variegation/M
+varier/M
+varietal/S
+variety/MS
+various/PY
+varistor/M
+Varityping/M
+varlet/MS
+varmint/SM
+varnished/U
+varnisher/M
+varnish/ZGMDRS
+var/S
+varsity/MS
+varying/UY
+vary/SRDJG
+vascular
+vasectomy/SM
+Vaseline/DSMG
+vase/SM
+Vasili/MS
+Vasily/M
+vasomotor
+Vasquez/M
+vassalage/MS
+vassal/GSMD
+Vassar/M
+Vassili/M
+Vassily/M
+vastness/MS
+vast/PTSYR
+v/ASV
+VAT
+Vatican/M
+vat/SM
+vatted
+vatting
+vaudeville/SM
+vaudevillian/SM
+Vaudois
+Vaughan/M
+Vaughn/M
+vaulter/M
+vaulting/M
+vault/ZSRDMGJ
+vaunter/M
+vaunt/GRDS
+VAXes
+Vax/M
+VAX/M
+Vazquez/M
+vb
+VCR
+VD
+VDT
+VDU
+vealed/A
+vealer/MA
+veal/MRDGS
+veals/A
+Veblen/M
+vectorial
+vectorization
+vectorized
+vectorizing
+vector's/F
+vector/SGDM
+Veda/MS
+Vedanta/M
+veejay/S
+veep/S
+veer/DSG
+veering/Y
+vegan/SM
+Vega/SM
+Vegemite/M
+veges
+vegetable/MS
+vegetarianism/MS
+vegetarian/SM
+vegetate/DSNGVX
+vegetation/M
+vegetative/PY
+vegged
+veggie/S
+vegging
+veg/M
+vehemence/MS
+vehemency/S
+vehement/Y
+vehicle/SM
+vehicular
+veiling/MU
+veil's
+veil/UGSD
+vein/GSRDM
+veining/M
+vela/M
+Vela/M
+velarize/SDG
+velar/S
+Velásquez/M
+Velázquez
+Velcro/SM
+veld/SM
+veldt's
+Velez/M
+Vella/M
+vellum/MS
+Velma/M
+velocipede/SM
+velocity/SM
+velor/S
+velour's
+velum/M
+Velveeta/M
+velveteen/MS
+velvet/GSMD
+Velvet/M
+velvety/RT
+venality/MS
+venal/Y
+venation/SM
+vend/DSG
+vender's/K
+vendetta/MS
+vendible/S
+vendor/MS
+veneerer/M
+veneer/GSRDM
+veneering/M
+venerability/S
+venerable/P
+venerate/XNGSD
+veneration/M
+venereal
+venetian
+Venetian/SM
+Venezuela/M
+Venezuelan/S
+vengeance/MS
+vengeful/APY
+vengefulness/AM
+venialness/M
+venial/YP
+Venice/M
+venireman/M
+veniremen
+venison/SM
+Venita/M
+Venn/M
+venomousness/M
+venomous/YP
+venom/SGDM
+venous/Y
+venter/M
+ventilated/U
+ventilate/XSDVGN
+ventilation/M
+ventilator/MS
+vent/ISGFD
+ventral/YS
+ventricle/MS
+ventricular
+ventriloquies
+ventriloquism/MS
+ventriloquist/MS
+ventriloquy
+vent's/F
+Ventura/M
+venture/RSDJZG
+venturesomeness/SM
+venturesome/YP
+venturi/S
+venturousness/MS
+venturous/YP
+venue/MAS
+Venusian/S
+Venus/S
+veraciousness/M
+veracious/YP
+veracities
+veracity/IM
+Veracruz/M
+Veradis
+Vera/M
+verandahed
+veranda/SDM
+verbalization/MS
+verbalized/U
+verbalizer/M
+verbalize/ZGRSD
+verballed
+verballing
+verbal/SY
+verbatim
+verbena/MS
+verbiage/SM
+verb/KSM
+verbose/YP
+verbosity/SM
+verboten
+verdant/Y
+Verde/M
+Verderer/M
+verdict/SM
+verdigris/GSDM
+Verdi/M
+verdure/SDM
+Vere/M
+Verena/M
+Verene/M
+verge/FGSD
+Verge/M
+verger/SM
+verge's
+Vergil's
+veridical/Y
+Veriee/M
+verifiability/M
+verifiableness/M
+verifiable/U
+verification/S
+verified/U
+verifier/MS
+verify/GASD
+Verile/M
+verily
+Verina/M
+Verine/M
+verisimilitude/SM
+veritableness/M
+veritable/P
+veritably
+verity/MS
+Verlag/M
+Verlaine/M
+Verla/M
+Vermeer/M
+vermicelli/MS
+vermiculite/MS
+vermiform
+vermilion/MS
+vermin/M
+verminous
+Vermonter/M
+Vermont/ZRM
+vermouth/M
+vermouths
+vernacular/YS
+vernal/Y
+Verna/M
+Verne/M
+Vernen/M
+Verney/M
+Vernice/M
+vernier/SM
+Vern/NM
+Vernon/M
+Vernor/M
+Verona/M
+Veronese/M
+Veronica/M
+veronica/SM
+Veronika/M
+Veronike/M
+Veronique/M
+verrucae
+verruca/MS
+versa
+Versailles/M
+Versatec/M
+versatileness/M
+versatile/YP
+versatility/SM
+versed/UI
+verse's
+verses/I
+verse/XSRDAGNF
+versicle/M
+versification/M
+versifier/M
+versify/GDRSZXN
+versing/I
+version/MFISA
+verso/SM
+versus
+vertebrae
+vertebral/Y
+vertebra/M
+vertebrate/IMS
+vertebration/M
+vertex/SM
+vertical/YPS
+vertices's
+vertiginous
+vertigoes
+vertigo/M
+verve/SM
+very/RT
+Vesalius/M
+vesicle/SM
+vesicular/Y
+vesiculate/GSD
+Vespasian/M
+vesper/SM
+Vespucci/M
+vessel/MS
+vestal/YS
+Vesta/M
+vest/DIGSL
+vestibular
+vestibule/SDM
+vestige/SM
+vestigial/Y
+vesting/SM
+vestment/ISM
+vestryman/M
+vestrymen
+vestry/MS
+vest's
+vesture/SDMG
+Vesuvius/M
+vetch/SM
+veteran/SM
+veterinarian/MS
+veterinary/S
+veter/M
+veto/DMG
+vetoes
+vet/SMR
+vetted
+vetting/A
+Vevay/M
+vexation/SM
+vexatiousness/M
+vexatious/PY
+vexed/Y
+vex/GFSD
+VF
+VFW
+VG
+VGA
+vhf
+VHF
+VHS
+VI
+via
+viability/SM
+viable/I
+viably
+viaduct/MS
+Viagra/M
+vial/MDGS
+viand/SM
+vibe/S
+vibraharp/MS
+vibrancy/MS
+vibrant/YS
+vibraphone/MS
+vibraphonist/SM
+vibrate/XNGSD
+vibrational/Y
+vibration/M
+vibrato/MS
+vibrator/SM
+vibratory
+vibrio/M
+vibrionic
+viburnum/SM
+vicarage/SM
+vicariousness/MS
+vicarious/YP
+vicar/SM
+vice/CMS
+viced
+vicegerent/MS
+vicennial
+Vicente/M
+viceregal
+viceroy/SM
+Vichy/M
+vichyssoise/MS
+vicing
+vicinity/MS
+viciousness/S
+vicious/YP
+vicissitude/MS
+Vickers/M
+Vickie/M
+Vicki/M
+Vicksburg/M
+Vicky/M
+Vick/ZM
+Vic/M
+victimization/SM
+victimized/U
+victimizer/M
+victimize/SRDZG
+victim/SM
+Victoir/M
+Victoria/M
+Victorianism/S
+Victorian/S
+victoriousness/M
+victorious/YP
+Victor/M
+victor/SM
+victory/MS
+Victrola/SM
+victualer/M
+victual/ZGSDR
+vicuña/S
+Vidal/M
+Vida/M
+videlicet
+videocassette/S
+videoconferencing
+videodisc/S
+videodisk/SM
+video/GSMD
+videophone/SM
+videotape/SDGM
+Vidovic/M
+Vidovik/M
+Vienna/M
+Viennese/M
+Vientiane/M
+vier/M
+vie/S
+Vietcong/M
+Viet/M
+Vietminh/M
+Vietnamese/M
+Vietnam/M
+viewed/A
+viewer/AS
+viewer's
+viewfinder/MS
+viewgraph/SM
+viewing/M
+viewless/Y
+view/MBGZJSRD
+viewpoint/SM
+views/A
+vigesimal
+vigilance/MS
+vigilante/SM
+vigilantism/MS
+vigilantist
+vigilant/Y
+vigil/SM
+vignette/MGDRS
+vignetter/M
+vignetting/M
+vignettist/MS
+vigor/MS
+vigorousness/M
+vigorous/YP
+vii
+viii
+Vijayawada/M
+Viki/M
+Viking/MS
+viking/S
+Vikki/M
+Vikky/M
+Vikram/M
+Vila
+vile/AR
+vilely
+vileness/MS
+vilest
+Vilhelmina/M
+vilification/M
+vilifier/M
+vilify/GNXRSD
+villager/M
+village/RSMZ
+villainousness/M
+villainous/YP
+villain/SM
+villainy/MS
+Villa/M
+villa/MS
+Villarreal/M
+ville
+villeinage/SM
+villein/MS
+villi
+Villon/M
+villus/M
+Vilma/M
+Vilnius/M
+Vilyui/M
+Vi/M
+vi/MDR
+vim/MS
+vinaigrette/MS
+Vina/M
+Vince/M
+Vincent/MS
+Vincenty/M
+Vincenz/M
+vincible/I
+Vinci/M
+Vindemiatrix/M
+vindicate/XSDVGN
+vindication/M
+vindicator/SM
+vindictiveness/MS
+vindictive/PY
+vinegar/DMSG
+vinegary
+vine/MGDS
+vineyard/SM
+Vinita/M
+Vin/M
+Vinnie/M
+Vinni/M
+Vinny/M
+vino/MS
+vinous
+Vinson/M
+vintage/MRSDG
+vintager/M
+vintner/MS
+vinyl/SM
+violable/I
+Viola/M
+Violante/M
+viola/SM
+violate/VNGXSD
+violator/MS
+Viole/M
+violence/SM
+violent/Y
+Violet/M
+violet/SM
+Violetta/M
+Violette/M
+violinist/SM
+violin/MS
+violist/MS
+viol/MSB
+violoncellist/S
+violoncello/MS
+viper/MS
+viperous
+VIP/S
+viragoes
+virago/M
+viral/Y
+vireo/SM
+Virge/M
+Virgie/M
+Virgilio/M
+Virgil/M
+virginal/YS
+Virgina/M
+Virginia/M
+Virginian/S
+Virginie/M
+virginity/SM
+virgin/SM
+Virgo/MS
+virgule/MS
+virile
+virility/MS
+virologist/S
+virology/SM
+virtual/Y
+virtue/SM
+virtuosity/MS
+virtuosoes
+virtuoso/MS
+virtuousness/SM
+virtuous/PY
+virulence/SM
+virulent/Y
+virus/MS
+visage/MSD
+Visakhapatnam's
+Visa/M
+visa/SGMD
+Visayans
+viscera
+visceral/Y
+viscid/Y
+viscoelastic
+viscoelasticity
+viscometer/SM
+viscose/MS
+viscosity/MS
+viscountcy/MS
+viscountess/SM
+viscount/MS
+viscousness/M
+viscous/PY
+viscus/M
+vise/CAXNGSD
+viselike
+vise's
+Vishnu/M
+visibility/ISM
+visible/PI
+visibly/I
+Visigoth/M
+Visigoths
+visionariness/M
+visionary/PS
+vision/KMDGS
+vision's/A
+visitable/U
+visitant/SM
+visitation/SM
+visited/U
+visit/GASD
+visitor/MS
+vis/MDSGV
+visor/SMDG
+VISTA
+vista/GSDM
+Vistula/M
+visualization/AMS
+visualized/U
+visualizer/M
+visualizes/A
+visualize/SRDZG
+visual/SY
+vitae
+vitality/MS
+vitalization/AMS
+vitalize/ASDGC
+vital/SY
+vita/M
+Vita/M
+vitamin/SM
+Vite/M
+Vitia/M
+vitiate/XGNSD
+vitiation/M
+viticulture/SM
+viticulturist/S
+Vitim/M
+Vito/M
+Vitoria/M
+vitreous/YSP
+vitrifaction/S
+vitrification/M
+vitrify/XDSNG
+vitrine/SM
+vitriolic
+vitriol/MDSG
+vitro
+vittles
+Vittoria/M
+Vittorio/M
+vituperate/SDXVGN
+vituperation/M
+vituperative/Y
+Vitus/M
+vivace/S
+vivaciousness/MS
+vivacious/YP
+vivacity/SM
+viva/DGS
+Vivaldi
+Viva/M
+vivaria
+vivarium/MS
+vivaxes
+Vivekananda/M
+vive/Z
+Vivia/M
+Viviana/M
+Vivian/M
+Vivianna/M
+Vivianne/M
+vividness/SM
+vivid/PTYR
+Vivie/M
+Viviene/M
+Vivien/M
+Vivienne/M
+vivifier
+vivify/NGASD
+Vivi/MN
+viviparous
+vivisect/DGS
+vivisectional
+vivisectionist/SM
+vivisection/MS
+Viviyan/M
+Viv/M
+vivo
+Vivyan/M
+Vivyanne/M
+vixenish/Y
+vixen/SM
+viz
+vizier/MS
+vizor's
+VJ
+Vladamir/M
+Vladimir/M
+Vladivostok/M
+Vlad/M
+VLF
+VLSI
+VMS/M
+VOA
+vocable/SM
+vocab/S
+vocabularian
+vocabularianism
+vocabulary/MS
+vocalic/S
+vocalise's
+vocalism/M
+vocalist/MS
+vocalization/SM
+vocalized/U
+vocalizer/M
+vocalize/ZGDRS
+vocal/SY
+vocation/AKMISF
+vocational/Y
+vocative/KYS
+vociferate/NGXSD
+vociferation/M
+vociferousness/MS
+vociferous/YP
+vocoded
+vocoder
+vodka/MS
+voe/S
+Vogel/M
+vogue/GMSRD
+vogueing
+voguish
+voiceband
+voiced/CU
+voice/IMGDS
+voicelessness/SM
+voiceless/YP
+voicer/S
+voices/C
+voicing/C
+voidable
+void/C
+voided
+voider/M
+voiding
+voidness/M
+voids
+voilà
+voile/MS
+volar
+volatileness/M
+volatile/PS
+volatility/MS
+volatilization/MS
+volatilize/SDG
+volcanically
+volcanic/S
+volcanism/M
+volcanoes
+volcano/M
+vole/MS
+Volga/M
+Volgograd/M
+vol/GSD
+volitionality
+volitional/Y
+volition/MS
+Volkswagen/SM
+volleyball/MS
+volleyer/M
+volley/SMRDG
+Vol/M
+Volstead/M
+voltage/SM
+voltaic
+Voltaire/M
+Volta/M
+volt/AMS
+Volterra/M
+voltmeter/MS
+volubility/S
+voluble/P
+volubly
+volume/SDGM
+volumetric
+volumetrically
+voluminousness/MS
+voluminous/PY
+voluntarily/I
+voluntariness/MI
+voluntarism/MS
+voluntary/PS
+volunteer/DMSG
+voluptuary/SM
+voluptuousness/S
+voluptuous/YP
+volute/S
+Volvo/M
+vomit/GRDS
+Vonda/M
+Von/M
+Vonnegut/M
+Vonnie/M
+Vonni/M
+Vonny/M
+voodoo/GDMS
+voodooism/S
+voraciousness/MS
+voracious/YP
+voracity/MS
+Voronezh/M
+Vorster/M
+vortex/SM
+vortices's
+vorticity/M
+votary/MS
+vote/CSDG
+voter/SM
+vote's
+votive/YP
+voucher/GMD
+vouchsafe/SDG
+vouch/SRDGZ
+vowelled
+vowelling
+vowel/MS
+vower/M
+vow/SMDRG
+voyage/GMZJSRD
+voyager/M
+voyageur/SM
+voyeurism/MS
+voyeuristic
+voyeur/MS
+VP
+vs
+V's
+VT
+Vt/M
+VTOL
+vulcanization/SM
+vulcanized/U
+vulcanize/SDG
+Vulcan/M
+vulgarian/MS
+vulgarism/MS
+vulgarity/MS
+vulgarization/S
+vulgarize/GZSRD
+vulgar/TSYR
+Vulgate/SM
+Vulg/M
+vulnerability/SI
+vulnerable/IP
+vulnerably/I
+vulpine
+vulturelike
+vulture/SM
+vulturous
+vulvae
+vulva/M
+vying
+Vyky/M
+WA
+Waals
+Wabash/M
+WAC
+Wacke/M
+wackes
+wackiness/MS
+wacko/MS
+wacky/RTP
+Waco/M
+Wac/S
+wadded
+wadding/SM
+waddle/GRSD
+Wade/M
+wader/M
+wade/S
+wadi/SM
+wad/MDRZGS
+Wadsworth/M
+wafer/GSMD
+waffle/GMZRSD
+Wafs
+wafter/M
+waft/SGRD
+wag/DRZGS
+waged/U
+wager/GZMRD
+wage/SM
+wagged
+waggery/MS
+wagging
+waggishness/SM
+waggish/YP
+waggle/SDG
+waggly
+Wagnerian
+Wagner/M
+wagoner/M
+wagon/SGZMRD
+wagtail/SM
+Wahl/M
+waif/SGDM
+Waikiki/M
+wailer/M
+wail/SGZRD
+wain/GSDM
+Wain/M
+wainscot/SGJD
+Wainwright/M
+wainwright/SM
+waistband/MS
+waistcoat/GDMS
+waister/M
+waist/GSRDM
+waistline/MS
+Waite/M
+waiter/DMG
+Waiter/M
+wait/GSZJRD
+Wait/MR
+waitpeople
+waitperson/S
+waitress/GMSD
+waiver/MB
+waive/SRDGZ
+Wakefield/M
+wakefulness/MS
+wakeful/PY
+Wake/M
+wake/MGDRSJ
+waken/SMRDG
+waker/M
+wakeup
+Waksman/M
+Walbridge/M
+Walcott/M
+Waldemar/M
+Walden/M
+Waldensian
+Waldheim/M
+Wald/MN
+Waldo/M
+Waldon/M
+Waldorf/M
+wale/DRSMG
+Wales
+Walesa/M
+Walford/M
+Walgreen/M
+waling/M
+walkabout/M
+walkaway/SM
+walker/M
+Walker/M
+walk/GZSBJRD
+walkie
+Walkman/S
+walkout/SM
+walkover/SM
+walkway/MS
+wallaby/MS
+Wallace/M
+Wallache/M
+wallah/M
+Wallas/M
+wallboard/MS
+Wallenstein/M
+Waller/M
+wallet/SM
+walleye/MSD
+wallflower/MS
+Wallie/M
+Wallis
+Walliw/M
+Walloon/SM
+walloper/M
+walloping/M
+wallop/RDSJG
+wallower/M
+wallow/RDSG
+wallpaper/DMGS
+wall/SGMRD
+Wall/SMR
+Wally/M
+wally/S
+walnut/SM
+Walpole/M
+Walpurgisnacht
+walrus/SM
+Walsh/M
+Walter/M
+Walther/M
+Walton/M
+waltzer/M
+Walt/ZMR
+waltz/MRSDGZ
+Walworth/M
+Waly/M
+wampum/SM
+Wanamaker/M
+Wanda/M
+wanderer/M
+wander/JZGRD
+wanderlust/SM
+Wandie/M
+Wandis/M
+wand/MRSZ
+wane/S
+Waneta/M
+wangler/M
+wangle/RSDGZ
+Wang/M
+Wanids/M
+Wankel/M
+wanna
+wannabe/S
+wanned
+wanner
+wanness/S
+wannest
+wanning
+wan/PGSDY
+Wansee/M
+Wansley/M
+wanted/U
+wanter/M
+want/GRDSJ
+wantonness/S
+wanton/PGSRDY
+wapiti/MS
+warble/GZRSD
+warbler/M
+warbonnet/S
+ward/AGMRDS
+Warde/M
+warden/DMGS
+Warden/M
+warder/DMGS
+Ward/MN
+wardrobe/MDSG
+wardroom/MS
+wardship/M
+wards/I
+warehouseman/M
+warehouse/MGSRD
+Ware/MG
+ware/MS
+warfare/SM
+Warfield/M
+war/GSMD
+warhead/MS
+Warhol/M
+warhorse/SM
+warily/U
+warinesses/U
+wariness/MS
+Waring/M
+warless
+warlike
+warlock/SM
+warlord/MS
+warmblooded
+warmed/A
+warmer/M
+warmheartedness/SM
+warmhearted/PY
+warmish
+warmness/MS
+warmongering/M
+warmonger/JGSM
+warms/A
+warmth/M
+warmths
+warm/YRDHPGZTS
+warned/U
+warner/M
+Warner/M
+warn/GRDJS
+warning/YM
+Warnock/M
+warpaint
+warpath/M
+warpaths
+warper/M
+warplane/MS
+warp/MRDGS
+warranted/U
+warranter/M
+warrant/GSMDR
+warranty/SDGM
+warred/M
+warrener/M
+Warren/M
+warren/SZRM
+warring/M
+warrior/MS
+Warsaw/M
+wars/C
+warship/MS
+warthog/S
+wartime/SM
+wart/MDS
+warty/RT
+Warwick/M
+wary/URPT
+Wasatch/M
+washable/S
+wash/AGSD
+washbasin/SM
+washboard/SM
+washbowl/SM
+Washburn/M
+washcloth/M
+washcloths
+washday/M
+washed/U
+washer/GDMS
+washerwoman/M
+washerwomen
+washing/SM
+Washingtonian/S
+Washington/M
+Wash/M
+Washoe/M
+washout/SM
+washrag/SM
+washroom/MS
+washstand/SM
+washtub/MS
+washy/RT
+wasn't
+WASP
+waspishness/SM
+waspish/PY
+Wasp's
+wasp/SM
+was/S
+wassail/GMDS
+Wasserman/M
+Wassermann/M
+wastage/SM
+wastebasket/SM
+wastefulness/S
+wasteful/YP
+wasteland/MS
+wastepaper/MS
+waster/DG
+waste/S
+wastewater
+wast/GZSRD
+wasting/Y
+wastrel/MS
+Watanabe/M
+watchable/U
+watchband/SM
+watchdogged
+watchdogging
+watchdog/SM
+watched/U
+watcher/M
+watchfulness/MS
+watchful/PY
+watch/JRSDGZB
+watchmake/JRGZ
+watchmaker/M
+watchman/M
+watchmen
+watchpoints
+watchtower/MS
+watchword/MS
+waterbird/S
+waterborne
+Waterbury/M
+watercolor/DMGS
+watercolorist/SM
+watercourse/SM
+watercraft/M
+watercress/SM
+waterer/M
+waterfall/SM
+waterfowl/M
+waterfront/SM
+Watergate/M
+waterhole/S
+Waterhouse/M
+wateriness/SM
+watering/M
+water/JGSMRD
+waterless
+waterlily/S
+waterline/S
+waterlogged
+waterloo
+Waterloo/SM
+waterman/M
+watermark/GSDM
+watermelon/SM
+watermill/S
+waterproof/PGRDSJ
+watershed/SM
+waterside/MSR
+watersider/M
+Waters/M
+waterspout/MS
+watertightness/M
+watertight/P
+Watertown/M
+waterway/MS
+waterwheel/S
+waterworks/M
+watery/PRT
+Watkins
+WATS
+Watson/M
+wattage/SM
+Watteau/M
+Wattenberg/M
+Watterson/M
+wattle/SDGM
+Watt/MS
+watt/TMRS
+Watusi/M
+Wat/ZM
+Waugh/M
+Waukesha/M
+Waunona/M
+Waupaca/M
+Waupun/M
+Wausau/M
+Wauwatosa/M
+waveband/MS
+waveform/SM
+wavefront/MS
+waveguide/MS
+Waveland/M
+wavelength/M
+wavelengths
+wavelet/SM
+wavelike
+wavenumber
+waver/GZRD
+wavering/YU
+Waverley/M
+Waverly/M
+Wave/S
+wave/ZGDRS
+wavily
+waviness/MS
+wavy/SRTP
+waxer/M
+waxiness/MS
+wax/MNDRSZG
+waxwing/MS
+waxwork/MS
+waxy/PRT
+wayfarer/MS
+wayfaring/S
+waylaid
+Wayland/M
+Waylan/M
+waylayer/M
+waylay/GRSZ
+wayleave/MS
+Waylen/M
+Waylin/M
+Waylon/M
+Way/M
+waymarked
+way/MS
+Wayne/M
+Waynesboro/M
+wayside/MS
+waywardness/S
+wayward/YP
+WC
+we
+weakener/M
+weaken/ZGRD
+weakfish/SM
+weakish
+weakliness/M
+weakling/SM
+weakly/RTP
+weakness/MS
+weak/TXPYRN
+weal/MHS
+wealthiness/MS
+wealth/M
+wealths
+wealthy/PTR
+weaner/M
+weanling/M
+wean/RDGS
+weapon/GDMS
+weaponless
+weaponry/MS
+wearable/S
+wearer/M
+wearied/U
+wearily
+weariness/MS
+wearing/Y
+wearisomeness/M
+wearisome/YP
+wear/RBSJGZ
+wearying/Y
+weary/TGPRSD
+weasel/SGMDY
+weatherbeaten
+weathercock/SDMG
+weatherer/M
+Weatherford/M
+weathering/M
+weatherize/GSD
+weatherman/M
+weather/MDRYJGS
+weathermen
+weatherperson/S
+weatherproof/SGPD
+weatherstripped
+weatherstripping/S
+weatherstrip/S
+weaver/M
+Weaver/M
+weaves/A
+weave/SRDGZ
+weaving/A
+webbed
+Webber/M
+webbing/MS
+Webb/RM
+weber/M
+Weber/M
+Webern/M
+webfeet
+webfoot/M
+Web/MR
+website/S
+web/SMR
+Webster/MS
+Websterville/M
+we'd
+wedded/A
+Weddell/M
+wedder
+wedding/SM
+wedge/SDGM
+wedgie/RST
+Wedgwood/M
+wedlock/SM
+Wed/M
+Wednesday/SM
+wed/SA
+weeder/M
+weediness/M
+weedkiller/M
+weedless
+wee/DRST
+weed/SGMRDZ
+weedy/TRP
+weeing
+weekday/MS
+weekender/M
+weekend/SDRMG
+weekly/S
+weeknight/SM
+Weeks/M
+week/SYM
+weenie/M
+ween/SGD
+weeny/RSMT
+weeper/M
+weep/SGZJRD
+weepy/RST
+weevil/MS
+weft/SGMD
+Wehr/M
+Weibull/M
+Weidar/M
+Weider/M
+Weidman/M
+Weierstrass/M
+weighed/UA
+weigher/M
+weigh/RDJG
+weighs/A
+weighted/U
+weighter/M
+weightily
+weightiness/SM
+weighting/M
+weight/JMSRDG
+weightlessness/SM
+weightless/YP
+weightlifter/S
+weightlifting/MS
+weighty/TPR
+Weill/M
+Wei/M
+Weinberg/M
+Weiner/M
+Weinstein/M
+weirdie/SM
+weirdness/MS
+weirdo/SM
+weird/YRDPGTS
+weir/SDMG
+Weisenheimer/M
+Weiss/M
+Weissman/M
+Weissmuller/M
+Weizmann/M
+Welbie/M
+Welby/M
+Welcher/M
+Welches
+welcomeness/M
+welcome/PRSDYG
+welcoming/U
+welder/M
+Weldon/M
+weld/SBJGZRD
+Weldwood/M
+welfare/SM
+welkin/SM
+we'll
+Welland/M
+wellbeing/M
+Weller/M
+Wellesley/M
+Welles/M
+wellhead/SM
+Wellington/MS
+wellington/S
+Wellman/M
+wellness/MS
+well/SGPD
+Wells/M
+wellspring/SM
+Wellsville/M
+Welmers/M
+Welsh
+welsher/M
+Welshman/M
+Welshmen
+welsh/RSDGZ
+Welshwoman/M
+Welshwomen
+welter/GD
+welterweight/MS
+welt/GZSMRD
+wencher/M
+wench/GRSDM
+Wendall/M
+Wenda/M
+wend/DSG
+Wendeline/M
+Wendell/M
+Wendel/M
+Wendie/M
+Wendi/M
+Wendye/M
+Wendy/M
+wen/M
+Wenonah/M
+Wenona/M
+went
+Wentworth/M
+wept/U
+were
+we're
+weren't
+werewolf/M
+werewolves
+Werner/M
+Wernher/M
+Werther/M
+werwolf's
+Wes
+Wesleyan
+Wesley/M
+Wessex/M
+Wesson/M
+westbound
+Westbrooke/M
+Westbrook/M
+Westchester/M
+wester/DYG
+westerly/S
+westerner/M
+westernization/MS
+westernize/GSD
+westernmost
+Western/ZRS
+western/ZSR
+Westfield/M
+Westhampton/M
+Westinghouse/M
+westing/M
+Westleigh/M
+Westley/M
+Westminster/M
+Westmore/M
+West/MS
+Weston/M
+Westphalia/M
+Westport/M
+west/RDGSM
+westward/S
+Westwood/M
+wetback/MS
+wetland/S
+wetness/MS
+wet/SPY
+wettable
+wetter/S
+wettest
+wetting
+we've
+Weyden/M
+Weyerhauser/M
+Weylin/M
+Wezen/M
+WFF
+whacker/M
+whack/GZRDS
+whaleboat/MS
+whalebone/SM
+whale/GSRDZM
+Whalen/M
+whaler/M
+whaling/M
+whammed
+whamming/M
+wham/MS
+whammy/S
+wharf/SGMD
+Wharton/M
+wharves
+whatchamacallit/MS
+what'd
+whatever
+what/MS
+whatnot/MS
+what're
+whatsoever
+wheal/MS
+wheatgerm
+Wheaties/M
+Wheatland/M
+wheat/NMXS
+Wheaton/M
+Wheatstone/M
+wheedle/ZDRSG
+wheelbarrow/GSDM
+wheelbase/MS
+wheelchair/MS
+wheeler/M
+Wheeler/M
+wheelhouse/SM
+wheelie/MS
+wheeling/M
+Wheeling/M
+Wheelock/M
+wheel/RDMJSGZ
+wheelwright/MS
+whee/S
+wheeze/SDG
+wheezily
+wheeziness/SM
+wheezy/PRT
+Whelan/M
+whelk/MDS
+Wheller/M
+whelm/DGS
+whelp/DMGS
+whence/S
+whenever
+when/S
+whensoever
+whereabout/S
+whereas/S
+whereat
+whereby
+where'd
+wherefore/MS
+wherein
+where/MS
+whereof
+whereon
+where're
+wheresoever
+whereto
+whereupon
+wherever
+wherewith
+wherewithal/SM
+wherry/DSGM
+whether
+whet/S
+whetstone/MS
+whetted
+whetting
+whew/GSD
+whey/MS
+which
+whichever
+whiff/GSMD
+whiffle/DRSG
+whiffler/M
+whiffletree/SM
+whig/S
+Whig/SM
+while/GSD
+whilom
+whilst
+whimmed
+whimming
+whimper/DSG
+whimsey's
+whimsicality/MS
+whimsical/YP
+whim/SM
+whimsy/TMDRS
+whine/GZMSRD
+whining/Y
+whinny/GTDRS
+whiny/RT
+whipcord/SM
+whiplash/SDMG
+Whippany/M
+whipped
+whipper/MS
+whippersnapper/MS
+whippet/MS
+whipping/SM
+Whipple/M
+whippletree/SM
+whippoorwill/SM
+whipsaw/GDMS
+whips/M
+whip/SM
+whirligig/MS
+whirlpool/MS
+whirl/RDGS
+whirlwind/MS
+whirlybird/MS
+whirly/MS
+whirred
+whirring
+whir/SY
+whisker/DM
+whiskery
+whiskey/SM
+whisk/GZRDS
+whisperer/M
+whisper/GRDJZS
+whispering/YM
+whist/GDMS
+whistleable
+whistle/DRSZG
+whistler/M
+Whistler/M
+whistling/M
+Whitaker/M
+Whitby/M
+Whitcomb/M
+whitebait/M
+whitecap/MS
+whiteface/M
+Whitefield/M
+whitefish/SM
+Whitehall/M
+Whitehead/M
+whitehead/S
+Whitehorse/M
+Whiteleaf/M
+Whiteley/M
+White/MS
+whitener/M
+whiteness/MS
+whitening/M
+whiten/JZDRG
+whiteout/S
+white/PYS
+whitespace
+whitetail/S
+whitewall/SM
+whitewash/GRSDM
+whitewater
+Whitewater/M
+whitey/MS
+Whitfield/M
+whither/DGS
+whitier
+whitiest
+whiting/M
+whitish
+Whitley/M
+Whitlock/M
+Whit/M
+Whitman/M
+Whitney/M
+whit/SJGTXMRND
+Whitsunday/MS
+Whittaker/M
+whitter
+Whittier
+whittle/JDRSZG
+whittler/M
+whiz
+whizkid
+whizzbang/S
+whizzed
+whizzes
+whizzing
+WHO
+whoa/S
+who'd
+whodunit/SM
+whoever
+wholegrain
+wholeheartedness/MS
+wholehearted/PY
+wholemeal
+wholeness/S
+wholesale/GZMSRD
+wholesaler/M
+wholesomeness/USM
+wholesome/UYP
+whole/SP
+wholewheat
+who'll
+wholly
+whom
+who/M
+whomever
+whomsoever
+whoopee/S
+whooper/M
+whoop/SRDGZ
+whoosh/DSGM
+whop
+whopper/MS
+whopping/S
+who're
+whorehouse/SM
+whoreish
+whore/SDGM
+whorish
+whorl/SDM
+whose
+whoso
+whosoever
+who've
+why
+whys
+WI
+Wiatt/M
+Wichita/M
+wickedness/MS
+wicked/RYPT
+wicker/M
+wickerwork/MS
+wicketkeeper/SM
+wicket/SM
+wick/GZRDMS
+wicking/M
+widemouthed
+widener/M
+wideness/S
+widen/SGZRD
+wide/RSYTP
+widespread
+widgeon's
+widget/SM
+widower/M
+widowhood/S
+widow/MRDSGZ
+width/M
+widths
+widthwise
+Wieland/M
+wielder/M
+wield/GZRDS
+Wiemar/M
+wiener/SM
+wienie/SM
+Wier/M
+Wiesel/M
+wife/DSMYG
+wifeless
+wifely/RPT
+wigeon/MS
+wigged
+wigging/M
+Wiggins
+wiggler/M
+wiggle/RSDGZ
+wiggly/RT
+wight/SGDM
+wiglet/S
+wigmaker
+wig/MS
+Wigner/M
+wigwagged
+wigwagging
+wigwag/S
+wigwam/MS
+Wilberforce/M
+Wilbert/M
+Wilbur/M
+Wilburn/M
+Wilburt/M
+Wilcox/M
+Wilda/M
+wildcat/SM
+wildcatted
+wildcatter/MS
+wildcatting
+wildebeest/SM
+Wilde/MR
+Wilden/M
+Wilder/M
+wilderness/SM
+wilder/P
+wildfire/MS
+wildflower/S
+wildfowl/M
+wilding/M
+wildlife/M
+wildness/MS
+Wildon/M
+wild/SPGTYRD
+wile/DSMG
+Wileen/M
+Wilek/M
+Wiley/M
+Wilford/M
+Wilfred/M
+Wilfredo/M
+Wilfrid/M
+wilfulness's
+Wilhelmina/M
+Wilhelmine/M
+Wilhelm/M
+Wilie/M
+wilily
+wiliness/MS
+Wilkerson/M
+Wilkes/M
+Wilkins/M
+Wilkinson/M
+Willabella/M
+Willa/M
+Willamette/M
+Willamina/M
+Willard/M
+Willcox/M
+Willdon/M
+willed/U
+Willem/M
+Willemstad/M
+willer/M
+Willetta/M
+Willette/M
+Willey/M
+willfulness/S
+willful/YP
+Williamsburg/M
+William/SM
+Williamson/M
+Willied/M
+Willie/M
+willies
+Willi/MS
+willinger
+willingest
+willingness's
+willingness/US
+willing/UYP
+Willisson/M
+williwaw/MS
+Will/M
+Willoughby/M
+willower/M
+Willow/M
+willow/RDMSG
+willowy/TR
+willpower/MS
+will/SGJRD
+Willy/SDM
+Willyt/M
+Wilma/M
+Wilmar/M
+Wilmer/M
+Wilmette/M
+Wilmington/M
+Wilona/M
+Wilone/M
+Wilow/M
+Wilshire/M
+Wilsonian
+Wilson/M
+wilt/DGS
+Wilt/M
+Wilton/M
+wily/PTR
+Wimbledon/M
+wimp/GSMD
+wimpish
+wimple/SDGM
+wimpy/RT
+wince/SDG
+Winchell/M
+wincher/M
+winchester/M
+Winchester/MS
+winch/GRSDM
+windbag/SM
+windblown
+windbreak/MZSR
+windburn/GSMD
+winded
+winder/UM
+windfall/SM
+windflower/MS
+Windham/M
+Windhoek/M
+windily
+windiness/SM
+winding/MS
+windjammer/SM
+windlass/GMSD
+windless/YP
+windmill/GDMS
+window/DMGS
+windowless
+windowpane/SM
+Windows
+windowsill/SM
+windpipe/SM
+windproof
+windrow/GDMS
+wind's
+winds/A
+windscreen/MS
+windshield/SM
+windsock/MS
+Windsor/MS
+windstorm/MS
+windsurf/GZJSRD
+windswept
+windup/MS
+wind/USRZG
+Windward/M
+windward/SY
+Windy/M
+windy/TPR
+wineglass/SM
+winegrower/SM
+Winehead/M
+winemake
+winemaster
+wine/MS
+winery/MS
+Winesap/M
+wineskin/M
+Winfield/M
+Winfred/M
+Winfrey/M
+wingback/M
+wingding/MS
+wingeing
+winger/M
+wing/GZRDM
+wingless
+winglike
+wingman
+wingmen
+wingspan/SM
+wingspread/MS
+wingtip/S
+Winifield/M
+Winifred/M
+Wini/M
+winker/M
+wink/GZRDS
+winking/U
+Winkle/M
+winkle/SDGM
+winless
+Win/M
+winnable
+Winnah/M
+Winna/M
+Winnebago/M
+Winne/M
+winner/MS
+Winnetka/M
+Winnie/M
+Winnifred/M
+Winni/M
+winning/SY
+Winnipeg/M
+Winn/M
+winnow/SZGRD
+Winny/M
+Winograd/M
+wino/MS
+Winonah/M
+Winona/M
+Winooski/M
+Winsborough/M
+Winsett/M
+Winslow/M
+winsomeness/SM
+winsome/PRTY
+Winston/M
+winterer/M
+wintergreen/SM
+winterize/GSD
+Winters
+winter/SGRDYM
+wintertime/MS
+Winthrop/M
+wintriness/M
+wintry/TPR
+winy/RT
+win/ZGDRS
+wipe/DRSZG
+wiper/M
+wirehair/MS
+wireless/MSDG
+wireman/M
+wiremen
+wirer/M
+wire's
+wires/A
+wiretap/MS
+wiretapped
+wiretapper/SM
+wiretapping
+wire/UDA
+wiriness/S
+wiring/SM
+wiry/RTP
+Wisc
+Wisconsinite/SM
+Wisconsin/M
+wisdoms
+wisdom/UM
+wiseacre/MS
+wisecrack/GMRDS
+wised
+wisely/TR
+Wise/M
+wiseness
+wisenheimer/M
+Wisenheimer/M
+wises
+wise/URTY
+wishbone/MS
+wishfulness/M
+wishful/PY
+wish/GZSRD
+wishy
+wising
+Wis/M
+wisp/MDGS
+wispy/RT
+wist/DGS
+wisteria/SM
+wistfulness/MS
+wistful/PY
+witchcraft/SM
+witchdoctor/S
+witchery/MS
+witch/SDMG
+withal
+withdrawal/MS
+withdrawer/M
+withdrawnness/M
+withdrawn/P
+withdraw/RGS
+withdrew
+withe/M
+wither/GDJ
+withering/Y
+Witherspoon/M
+with/GSRDZ
+withheld
+withholder/M
+withhold/SJGZR
+within/S
+without/S
+withs
+withstand/SG
+withstood
+witlessness/MS
+witless/PY
+Wit/M
+witness/DSMG
+witnessed/U
+wit/PSM
+witted
+witter/G
+Wittgenstein/M
+witticism/MS
+Wittie/M
+wittily
+wittiness/SM
+wittings
+witting/UY
+Witt/M
+Witty/M
+witty/RTP
+Witwatersrand/M
+wive/GDS
+wives/M
+wizard/MYS
+wizardry/MS
+wizen/D
+wiz's
+wk/Y
+Wm/M
+WNW
+woad/MS
+wobble/GSRD
+wobbler/M
+wobbliness/S
+wobbly/PRST
+Wodehouse/M
+woebegone/P
+woefuller
+woefullest
+woefulness/SM
+woeful/PY
+woe/PSM
+woke
+wok/SMN
+Wolcott/M
+wold/MS
+Wolfe/M
+wolfer/M
+Wolff/M
+Wolfgang/M
+wolfhound/MS
+Wolfie/M
+wolfishness/M
+wolfish/YP
+Wolf/M
+wolfram/MS
+wolf/RDMGS
+Wolfy/M
+Wollongong/M
+Wollstonecraft/M
+Wolsey/M
+Wolverhampton/M
+wolverine/SM
+Wolverton/M
+wolves/M
+woman/GSMYD
+womanhood/MS
+womanish
+womanized/U
+womanizer/M
+womanize/RSDZG
+womanizes/U
+womankind/M
+womanlike
+womanliness/SM
+womanly/PRT
+wombat/MS
+womb/SDM
+womenfolk/MS
+women/MS
+wonderer/M
+wonderfulness/SM
+wonderful/PY
+wonder/GLRDMS
+wondering/Y
+wonderland/SM
+wonderment/SM
+wondrousness/M
+wondrous/YP
+Wong/M
+wonk/S
+wonky/RT
+wonned
+wonning
+won/SG
+won't
+wontedness/MU
+wonted/PUY
+wont/SGMD
+Woodard/M
+Woodberry/M
+woodbine/SM
+woodblock/S
+Woodbury/M
+woodcarver/S
+woodcarving/MS
+woodchopper/SM
+woodchuck/MS
+woodcock/MS
+woodcraft/MS
+woodcut/SM
+woodcutter/MS
+woodcutting/MS
+woodenness/SM
+wooden/TPRY
+woodgrain/G
+woodhen
+Woodhull/M
+Woodie/M
+woodiness/MS
+woodland/SRM
+Woodlawn/M
+woodlice
+woodlot/S
+woodlouse/M
+woodman/M
+Woodman/M
+woodmen
+woodpecker/SM
+woodpile/SM
+Woodrow/M
+woodruff/M
+woo/DRZGS
+woodshedded
+woodshedding
+woodshed/SM
+woodside
+Wood/SM
+woodsman/M
+woodsmen
+wood/SMNDG
+woodsmoke
+woods/R
+Woodstock/M
+woodsy/TRP
+Woodward/MS
+woodwind/S
+woodworker/M
+woodworking/M
+woodwork/SMRGZJ
+woodworm/M
+woodyard
+Woody/M
+woody/TPSR
+woofer/M
+woof/SRDMGZ
+Woolf/M
+woolgatherer/M
+woolgathering/M
+woolgather/RGJ
+woolliness/MS
+woolly/RSPT
+Woolongong/M
+wool/SMYNDX
+Woolworth/M
+Woonsocket/M
+Wooster/M
+Wooten/M
+woozily
+wooziness/MS
+woozy/RTP
+wop/MS!
+Worcestershire/M
+Worcester/SM
+wordage/SM
+word/AGSJD
+wordbook/MS
+Worden/M
+wordily
+wordiness/SM
+wording/AM
+wordless/Y
+wordplay/SM
+word's
+Wordsworth/M
+wordy/TPR
+wore
+workability's
+workability/U
+workableness/M
+workable/U
+workably
+workaday
+workaholic/S
+workaround/SM
+workbench/MS
+workbook/SM
+workday/SM
+worked/A
+worker/M
+workfare/S
+workforce/S
+work/GZJSRDMB
+workhorse/MS
+workhouse/SM
+working/M
+workingman/M
+workingmen
+workingwoman/M
+workingwomen
+workload/SM
+workmanlike
+Workman/M
+workman/MY
+workmanship/MS
+workmate/S
+workmen/M
+workout/SM
+workpiece/SM
+workplace/SM
+workroom/MS
+works/A
+worksheet/S
+workshop/MS
+workspace/S
+workstation/MS
+worktable/SM
+worktop/S
+workup/S
+workweek/SM
+worldlier
+worldliest
+worldliness/USM
+worldly/UP
+worldwide
+world/ZSYM
+wormer/M
+wormhole/SM
+worm/SGMRD
+Worms/M
+wormwood/SM
+wormy/RT
+worn/U
+worried/Y
+worrier/M
+worriment/MS
+worrisome/YP
+worrying/Y
+worrywart/SM
+worry/ZGSRD
+worsen/GSD
+worse/SR
+worshiper/M
+worshipfulness/M
+worshipful/YP
+worship/ZDRGS
+worsted/MS
+worst/SGD
+worth/DG
+worthily/U
+worthinesses/U
+worthiness/SM
+Worthington/M
+worthlessness/SM
+worthless/PY
+Worth/M
+worths
+worthwhile/P
+Worthy/M
+worthy/UTSRP
+wort/SM
+wost
+wot
+Wotan/M
+wouldn't
+would/S
+wouldst
+would've
+wound/AU
+wounded/U
+wounder
+wounding
+wounds
+wound's
+wove/A
+woven/AU
+wovens
+wow/SDG
+Wozniak/M
+WP
+wpm
+wrack/SGMD
+wraith/M
+wraiths
+Wrangell/M
+wrangle/GZDRS
+wrangler/M
+wraparound/S
+wrap/MS
+wrapped/U
+wrapper/MS
+wrapping/SM
+wraps/U
+wrasse/SM
+wrathful/YP
+wrath/GDM
+wraths
+wreak/SDG
+wreathe
+wreath/GMDS
+wreaths
+wreckage/MS
+wrecker/M
+wreck/GZRDS
+wrenching/Y
+wrench/MDSG
+wren/MS
+Wren/MS
+Wrennie/M
+wrester/M
+wrestle/JGZDRS
+wrestler/M
+wrestling/M
+wrest/SRDG
+wretchedness/SM
+wretched/TPYR
+wretch/MDS
+wriggle/DRSGZ
+wriggler/M
+wriggly/RT
+Wright/M
+wright/MS
+Wrigley/M
+wringer/M
+wring/GZRS
+wrinkled/U
+wrinkle/GMDS
+wrinkly/RST
+wristband/SM
+wrist/MS
+wristwatch/MS
+writable/U
+write/ASBRJG
+writer/MA
+writeup
+writhe/SDG
+writing/M
+writ/MRSBJGZ
+written/UA
+Wroclaw
+wrongdoer/MS
+wrongdoing/MS
+wronger/M
+wrongfulness/MS
+wrongful/PY
+wrongheadedness/MS
+wrongheaded/PY
+wrongness/MS
+wrong/PSGTYRD
+Wronskian/M
+wrote/A
+wroth
+wrought/I
+wrung
+wry/DSGY
+wryer
+wryest
+wryness/SM
+W's
+WSW
+wt
+W/T
+Wuhan/M
+Wu/M
+Wurlitzer/M
+wurst/SM
+wuss/S
+wussy/TRS
+WV
+WW
+WWI
+WWII
+WWW
+w/XTJGV
+WY
+Wyatan/M
+Wyatt/M
+Wycherley/M
+Wycliffe/M
+Wye/MH
+Wyeth/M
+Wylie/M
+Wylma/M
+Wyman/M
+Wyndham/M
+Wyn/M
+Wynne/M
+Wynnie/M
+Wynn/M
+Wynny/M
+Wyo/M
+Wyomingite/SM
+Wyoming/M
+WYSIWYG
+x
+X
+Xanadu
+Xanthippe/M
+Xanthus/M
+Xaviera/M
+Xavier/M
+Xebec/M
+Xe/M
+XEmacs/M
+Xenakis/M
+Xena/M
+Xenia/M
+Xenix/M
+xenon/SM
+xenophobe/MS
+xenophobia/SM
+xenophobic
+Xenophon/M
+Xenos
+xerographic
+xerography/MS
+xerox/GSD
+Xerox/MGSD
+Xerxes/M
+Xever/M
+Xhosa/M
+Xi'an
+Xian/S
+Xiaoping/M
+xii
+xiii
+xi/M
+Ximenes/M
+Ximenez/M
+Ximian/SM
+Xingu/M
+xis
+xiv
+xix
+XL
+Xmas/SM
+XML
+Xochipilli/M
+XOR
+X's
+XS
+xterm/M
+Xuzhou/M
+xv
+xvi
+xvii
+xviii
+xx
+XXL
+xylem/SM
+xylene/M
+Xylia/M
+Xylina/M
+xylophone/MS
+xylophonist/S
+Xymenes/M
+Y
+ya
+yacc/M
+Yacc/M
+yachting/M
+yachtsman
+yachtsmen
+yachtswoman/M
+yachtswomen
+yacht/ZGJSDM
+yack's
+Yagi/M
+yahoo/MS
+Yahweh/M
+Yakima/M
+yakked
+yakking
+yak/SM
+Yakut/M
+Yakutsk/M
+Yale/M
+Yalies/M
+y'all
+Yalonda/M
+Yalow/M
+Yalta/M
+Yalu/M
+Yamaha/M
+yammer/RDZGS
+Yamoussoukro
+yam/SM
+Yanaton/M
+Yance/M
+Yancey/M
+Yancy/M
+Yang/M
+Yangon
+yang/S
+Yangtze/M
+Yankee/SM
+yank/GDS
+Yank/MS
+Yaounde/M
+yapped
+yapping
+yap/S
+Yaqui/M
+yardage/SM
+yardarm/SM
+Yardley/M
+Yard/M
+yardman/M
+yardmaster/S
+yardmen
+yard/SMDG
+yardstick/SM
+yarmulke/SM
+yarn/SGDM
+Yaroslavl/M
+yarrow/MS
+Yasmeen/M
+Yasmin/M
+Yates
+yaw/DSG
+yawl/SGMD
+yawner/M
+yawn/GZSDR
+yawning/Y
+Yb/M
+yd
+Yeager/M
+yeah
+yeahs
+yearbook/SM
+yearling/M
+yearlong
+yearly/S
+yearner/M
+yearning/MY
+yearn/JSGRD
+year/YMS
+yea/S
+yeastiness/M
+yeast/SGDM
+yeasty/PTR
+Yeats/M
+yecch
+yegg/MS
+Yehudi/M
+Yehudit/M
+Yekaterinburg/M
+Yelena/M
+yell/GSDR
+yellowhammers
+yellowish
+Yellowknife/M
+yellowness/MS
+Yellowstone/M
+yellow/TGPSRDM
+yellowy
+yelper/M
+yelp/GSDR
+Yeltsin
+Yemeni/S
+Yemenite/SM
+Yemen/M
+Yenisei/M
+yenned
+yenning
+yen/SM
+Yentl/M
+yeomanry/MS
+yeoman/YM
+yeomen
+yep/S
+Yerevan/M
+Yerkes/M
+Yesenia/M
+yeshiva/SM
+yes/S
+yessed
+yessing
+yesterday/MS
+yesteryear/SM
+yet
+ye/T
+yeti/SM
+Yetta/M
+Yettie/M
+Yetty/M
+Yevette/M
+Yevtushenko/M
+yew/SM
+y/F
+Yggdrasil/M
+Yiddish/M
+yielded/U
+yielding/U
+yield/JGRDS
+yikes
+yin/S
+yipe/S
+yipped
+yippee/S
+yipping
+yip/S
+YMCA
+YMHA
+Ymir/M
+YMMV
+Ynes/M
+Ynez/M
+yo
+Yoda/M
+yodeler/M
+yodel/SZRDG
+Yoder/M
+yoga/MS
+yoghurt's
+yogi/MS
+yogurt/SM
+yoke/DSMG
+yoked/U
+yokel/SM
+yokes/U
+yoking/U
+Yoknapatawpha/M
+Yokohama/M
+Yoko/M
+Yolanda/M
+Yolande/M
+Yolane/M
+Yolanthe/M
+yolk/DMS
+yon
+yonder
+Yong/M
+Yonkers/M
+yore/MS
+Yorgo/MS
+Yorick/M
+Yorke/M
+Yorker/M
+yorker/SM
+Yorkshire/MS
+Yorktown/M
+York/ZRMS
+Yoruba/M
+Yosemite/M
+Yoshiko/M
+Yoshi/M
+Yost/M
+you'd
+you'll
+youngish
+Young/M
+youngster/MS
+Youngstown/M
+young/TRYP
+you're
+your/MS
+yourself
+yourselves
+you/SH
+youthfulness/SM
+youthful/YP
+youths
+youth/SM
+you've
+Yovonnda/M
+yow
+yowl/GSD
+Ypres/M
+Ypsilanti/M
+yr
+yrs
+Y's
+Ysabel/M
+YT
+ytterbium/MS
+yttrium/SM
+yuan/M
+Yuba/M
+Yucatan
+yucca/MS
+yuck/GSD
+yucky/RT
+Yugo/M
+Yugoslavia/M
+Yugoslavian/S
+Yugoslav/M
+Yuh/M
+Yuki/M
+yukked
+yukking
+Yukon/M
+yuk/S
+yule/MS
+Yule/MS
+yuletide/MS
+Yuletide/S
+Yul/M
+Yulma/M
+yum
+Yuma/M
+yummy/TRS
+Yunnan/M
+yuppie/SM
+yup/S
+Yurik/M
+Yuri/M
+yurt/SM
+Yves/M
+Yvette/M
+Yvon/M
+Yvonne/M
+Yvor/M
+YWCA
+YWHA
+Zabrina/M
+Zaccaria/M
+Zachariah/M
+Zacharia/SM
+Zacharie/M
+Zachary/M
+Zacherie/M
+Zachery/M
+Zach/M
+Zackariah/M
+Zack/M
+zagging
+Zagreb/M
+zag/S
+Zahara/M
+Zaire/M
+Zairian/S
+Zak/M
+Zambezi/M
+Zambia/M
+Zambian/S
+Zamboni
+Zamenhof/M
+Zamora/M
+Zandra/M
+Zane/M
+Zaneta/M
+zaniness/MS
+Zan/M
+Zanuck/M
+zany/PDSRTG
+Zanzibar/M
+Zapata/M
+Zaporozhye/M
+Zappa/M
+zapped
+zapper/S
+zapping
+zap/S
+Zarah/M
+Zara/M
+Zared/M
+Zaria/M
+Zarla/M
+Zealand/M
+zeal/MS
+zealot/MS
+zealotry/MS
+zealousness/SM
+zealous/YP
+Zea/M
+Zebadiah/M
+Zebedee/M
+Zeb/M
+zebra/MS
+Zebulen/M
+Zebulon/M
+zebu/SM
+Zechariah/M
+Zedekiah/M
+Zed/M
+Zedong/M
+zed/SM
+Zeffirelli/M
+Zeiss/M
+zeitgeist/S
+Zeke/M
+Zelda/M
+Zelig/M
+Zellerbach/M
+Zelma/M
+Zena/M
+Zenger/M
+Zenia/M
+zenith/M
+zeniths
+Zen/M
+Zennist/M
+Zeno/M
+Zephaniah/M
+zephyr/MS
+Zephyrus/M
+Zeppelin's
+zeppelin/SM
+Zerk/M
+zeroed/M
+zeroing/M
+zero/SDHMG
+zestfulness/MS
+zestful/YP
+zest/MDSG
+zesty/RT
+zeta/SM
+zeugma/M
+Zeus/M
+Zhdanov/M
+Zhengzhou
+Zhivago/M
+Zhukov/M
+Zia/M
+Zibo/M
+Ziegfeld/MS
+Ziegler/M
+zig
+zigged
+zigging
+Ziggy/M
+zigzagged
+zigzagger
+zigzagging
+zigzag/MS
+zilch/S
+zillion/MS
+Zilvia/M
+Zimbabwean/S
+Zimbabwe/M
+Zimmerman/M
+zincked
+zincking
+zinc/MS
+zing/GZDRM
+zingy/RT
+zinnia/SM
+Zionism/MS
+Zionist/MS
+Zion/SM
+zip/MS
+zipped/U
+zipper/GSDM
+zipping/U
+zippy/RT
+zips/U
+zirconium/MS
+zircon/SM
+Zita/M
+Zitella/M
+zither/SM
+zit/S
+zloty/SM
+Zn/M
+zodiacal
+zodiac/SM
+Zoe/M
+Zola/M
+Zollie/M
+Zolly/M
+Zomba/M
+zombie/SM
+zombi's
+zonal/Y
+Zonda/M
+Zondra/M
+zoned/A
+zone/MYDSRJG
+zones/A
+zoning/A
+zonked
+Zonnya/M
+zookeepers
+zoological/Y
+zoologist/SM
+zoology/MS
+zoom/DGS
+zoophyte/SM
+zoophytic
+zoo/SM
+Zorah/M
+Zora/M
+Zorana/M
+Zorina/M
+Zorine/M
+Zorn/M
+Zoroaster/M
+Zoroastrianism/MS
+Zoroastrian/S
+Zorro/M
+Zosma/M
+zounds/S
+Zr/M
+Zs
+Zsazsa/M
+Zsigmondy/M
+z/TGJ
+Zubenelgenubi/M
+Zubeneschamali/M
+zucchini/SM
+Zukor/M
+Zulema/M
+Zululand/M
+Zulu/MS
+Zuni/S
+Zürich/M
+Zuzana/M
+zwieback/MS
+Zwingli/M
+Zworykin/M
+Z/X
+zydeco/S
+zygote/SM
+zygotic
+zymurgy/S
diff --git a/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_AU.aff b/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_AU.aff
new file mode 100755
index 0000000000..2ddd985437
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_AU.aff
@@ -0,0 +1,201 @@
+SET ISO8859-1
+TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
+NOSUGGEST !
+
+# ordinal numbers
+COMPOUNDMIN 1
+# only in compounds: 1th, 2th, 3th
+ONLYINCOMPOUND c
+# compound rules:
+# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
+# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
+WORDCHARS 0123456789
+
+PFX A Y 1
+PFX A 0 re .
+
+PFX I Y 1
+PFX I 0 in .
+
+PFX U Y 1
+PFX U 0 un .
+
+PFX C Y 1
+PFX C 0 de .
+
+PFX E Y 1
+PFX E 0 dis .
+
+PFX F Y 1
+PFX F 0 con .
+
+PFX K Y 1
+PFX K 0 pro .
+
+SFX V N 2
+SFX V e ive e
+SFX V 0 ive [^e]
+
+SFX N Y 3
+SFX N e ion e
+SFX N y ication y
+SFX N 0 en [^ey]
+
+SFX X Y 3
+SFX X e ions e
+SFX X y ications y
+SFX X 0 ens [^ey]
+
+SFX H N 2
+SFX H y ieth y
+SFX H 0 th [^y]
+
+SFX Y Y 1
+SFX Y 0 ly .
+
+SFX G Y 2
+SFX G e ing e
+SFX G 0 ing [^e]
+
+SFX J Y 2
+SFX J e ings e
+SFX J 0 ings [^e]
+
+SFX D Y 4
+SFX D 0 d e
+SFX D y ied [^aeiou]y
+SFX D 0 ed [^ey]
+SFX D 0 ed [aeiou]y
+
+SFX T N 4
+SFX T 0 st e
+SFX T y iest [^aeiou]y
+SFX T 0 est [aeiou]y
+SFX T 0 est [^ey]
+
+SFX R Y 4
+SFX R 0 r e
+SFX R y ier [^aeiou]y
+SFX R 0 er [aeiou]y
+SFX R 0 er [^ey]
+
+SFX Z Y 4
+SFX Z 0 rs e
+SFX Z y iers [^aeiou]y
+SFX Z 0 ers [aeiou]y
+SFX Z 0 ers [^ey]
+
+SFX S Y 4
+SFX S y ies [^aeiou]y
+SFX S 0 s [aeiou]y
+SFX S 0 es [sxzh]
+SFX S 0 s [^sxzhy]
+
+SFX P Y 3
+SFX P y iness [^aeiou]y
+SFX P 0 ness [aeiou]y
+SFX P 0 ness [^y]
+
+SFX M Y 1
+SFX M 0 's .
+
+SFX B Y 3
+SFX B 0 able [^aeiou]
+SFX B 0 able ee
+SFX B e able [^aeiou]e
+
+SFX L Y 1
+SFX L 0 ment .
+
+REP 88
+REP a ei
+REP ei a
+REP a ey
+REP ey a
+REP ai ie
+REP ie ai
+REP are air
+REP are ear
+REP are eir
+REP air are
+REP air ere
+REP ere air
+REP ere ear
+REP ere eir
+REP ear are
+REP ear air
+REP ear ere
+REP eir are
+REP eir ere
+REP ch te
+REP te ch
+REP ch ti
+REP ti ch
+REP ch tu
+REP tu ch
+REP ch s
+REP s ch
+REP ch k
+REP k ch
+REP f ph
+REP ph f
+REP gh f
+REP f gh
+REP i igh
+REP igh i
+REP i uy
+REP uy i
+REP i ee
+REP ee i
+REP j di
+REP di j
+REP j gg
+REP gg j
+REP j ge
+REP ge j
+REP s ti
+REP ti s
+REP s ci
+REP ci s
+REP k cc
+REP cc k
+REP k qu
+REP qu k
+REP kw qu
+REP o eau
+REP eau o
+REP o ew
+REP ew o
+REP oo ew
+REP ew oo
+REP ew ui
+REP ui ew
+REP oo ui
+REP ui oo
+REP ew u
+REP u ew
+REP oo u
+REP u oo
+REP u oe
+REP oe u
+REP u ieu
+REP ieu u
+REP ue ew
+REP ew ue
+REP uff ough
+REP oo ieu
+REP ieu oo
+REP ier ear
+REP ear ier
+REP ear air
+REP air ear
+REP w qu
+REP qu w
+REP z ss
+REP ss z
+REP shun tion
+REP shun sion
+REP shun cion
diff --git a/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.aff b/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.aff
new file mode 100755
index 0000000000..2ddd985437
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.aff
@@ -0,0 +1,201 @@
+SET ISO8859-1
+TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ'
+NOSUGGEST !
+
+# ordinal numbers
+COMPOUNDMIN 1
+# only in compounds: 1th, 2th, 3th
+ONLYINCOMPOUND c
+# compound rules:
+# 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.)
+# 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.)
+COMPOUNDRULE 2
+COMPOUNDRULE n*1t
+COMPOUNDRULE n*mp
+WORDCHARS 0123456789
+
+PFX A Y 1
+PFX A 0 re .
+
+PFX I Y 1
+PFX I 0 in .
+
+PFX U Y 1
+PFX U 0 un .
+
+PFX C Y 1
+PFX C 0 de .
+
+PFX E Y 1
+PFX E 0 dis .
+
+PFX F Y 1
+PFX F 0 con .
+
+PFX K Y 1
+PFX K 0 pro .
+
+SFX V N 2
+SFX V e ive e
+SFX V 0 ive [^e]
+
+SFX N Y 3
+SFX N e ion e
+SFX N y ication y
+SFX N 0 en [^ey]
+
+SFX X Y 3
+SFX X e ions e
+SFX X y ications y
+SFX X 0 ens [^ey]
+
+SFX H N 2
+SFX H y ieth y
+SFX H 0 th [^y]
+
+SFX Y Y 1
+SFX Y 0 ly .
+
+SFX G Y 2
+SFX G e ing e
+SFX G 0 ing [^e]
+
+SFX J Y 2
+SFX J e ings e
+SFX J 0 ings [^e]
+
+SFX D Y 4
+SFX D 0 d e
+SFX D y ied [^aeiou]y
+SFX D 0 ed [^ey]
+SFX D 0 ed [aeiou]y
+
+SFX T N 4
+SFX T 0 st e
+SFX T y iest [^aeiou]y
+SFX T 0 est [aeiou]y
+SFX T 0 est [^ey]
+
+SFX R Y 4
+SFX R 0 r e
+SFX R y ier [^aeiou]y
+SFX R 0 er [aeiou]y
+SFX R 0 er [^ey]
+
+SFX Z Y 4
+SFX Z 0 rs e
+SFX Z y iers [^aeiou]y
+SFX Z 0 ers [aeiou]y
+SFX Z 0 ers [^ey]
+
+SFX S Y 4
+SFX S y ies [^aeiou]y
+SFX S 0 s [aeiou]y
+SFX S 0 es [sxzh]
+SFX S 0 s [^sxzhy]
+
+SFX P Y 3
+SFX P y iness [^aeiou]y
+SFX P 0 ness [aeiou]y
+SFX P 0 ness [^y]
+
+SFX M Y 1
+SFX M 0 's .
+
+SFX B Y 3
+SFX B 0 able [^aeiou]
+SFX B 0 able ee
+SFX B e able [^aeiou]e
+
+SFX L Y 1
+SFX L 0 ment .
+
+REP 88
+REP a ei
+REP ei a
+REP a ey
+REP ey a
+REP ai ie
+REP ie ai
+REP are air
+REP are ear
+REP are eir
+REP air are
+REP air ere
+REP ere air
+REP ere ear
+REP ere eir
+REP ear are
+REP ear air
+REP ear ere
+REP eir are
+REP eir ere
+REP ch te
+REP te ch
+REP ch ti
+REP ti ch
+REP ch tu
+REP tu ch
+REP ch s
+REP s ch
+REP ch k
+REP k ch
+REP f ph
+REP ph f
+REP gh f
+REP f gh
+REP i igh
+REP igh i
+REP i uy
+REP uy i
+REP i ee
+REP ee i
+REP j di
+REP di j
+REP j gg
+REP gg j
+REP j ge
+REP ge j
+REP s ti
+REP ti s
+REP s ci
+REP ci s
+REP k cc
+REP cc k
+REP k qu
+REP qu k
+REP kw qu
+REP o eau
+REP eau o
+REP o ew
+REP ew o
+REP oo ew
+REP ew oo
+REP ew ui
+REP ui ew
+REP oo ui
+REP ui oo
+REP ew u
+REP u ew
+REP oo u
+REP u oo
+REP u oe
+REP oe u
+REP u ieu
+REP ieu u
+REP ue ew
+REP ew ue
+REP uff ough
+REP oo ieu
+REP ieu oo
+REP ier ear
+REP ear ier
+REP ear air
+REP air ear
+REP w qu
+REP qu w
+REP z ss
+REP ss z
+REP shun tion
+REP shun sion
+REP shun cion
diff --git a/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.dic b/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.dic
new file mode 100755
index 0000000000..4f69807a28
--- /dev/null
+++ b/core/src/test/resources/indices/analyze/two_aff_conf_dir/hunspell/en_US/en_US.dic
@@ -0,0 +1,62120 @@
+62118
+0/nm
+1/n1
+2/nm
+3/nm
+4/nm
+5/nm
+6/nm
+7/nm
+8/nm
+9/nm
+0th/pt
+1st/p
+1th/tc
+2nd/p
+2th/tc
+3rd/p
+3th/tc
+4th/pt
+5th/pt
+6th/pt
+7th/pt
+8th/pt
+9th/pt
+a
+A
+AA
+AAA
+Aachen/M
+aardvark/SM
+Aaren/M
+Aarhus/M
+Aarika/M
+Aaron/M
+AB
+aback
+abacus/SM
+abaft
+Abagael/M
+Abagail/M
+abalone/SM
+abandoner/M
+abandon/LGDRS
+abandonment/SM
+abase/LGDSR
+abasement/S
+abaser/M
+abashed/UY
+abashment/MS
+abash/SDLG
+abate/DSRLG
+abated/U
+abatement/MS
+abater/M
+abattoir/SM
+Abba/M
+Abbe/M
+abbé/S
+abbess/SM
+Abbey/M
+abbey/MS
+Abbie/M
+Abbi/M
+Abbot/M
+abbot/MS
+Abbott/M
+abbr
+abbrev
+abbreviated/UA
+abbreviates/A
+abbreviate/XDSNG
+abbreviating/A
+abbreviation/M
+Abbye/M
+Abby/M
+ABC/M
+Abdel/M
+abdicate/NGDSX
+abdication/M
+abdomen/SM
+abdominal/YS
+abduct/DGS
+abduction/SM
+abductor/SM
+Abdul/M
+ab/DY
+abeam
+Abelard/M
+Abel/M
+Abelson/M
+Abe/M
+Aberdeen/M
+Abernathy/M
+aberrant/YS
+aberrational
+aberration/SM
+abet/S
+abetted
+abetting
+abettor/SM
+Abeu/M
+abeyance/MS
+abeyant
+Abey/M
+abhorred
+abhorrence/MS
+abhorrent/Y
+abhorrer/M
+abhorring
+abhor/S
+abidance/MS
+abide/JGSR
+abider/M
+abiding/Y
+Abidjan/M
+Abie/M
+Abigael/M
+Abigail/M
+Abigale/M
+Abilene/M
+ability/IMES
+abjection/MS
+abjectness/SM
+abject/SGPDY
+abjuration/SM
+abjuratory
+abjurer/M
+abjure/ZGSRD
+ablate/VGNSDX
+ablation/M
+ablative/SY
+ablaze
+abler/E
+ables/E
+ablest
+able/U
+abloom
+ablution/MS
+Ab/M
+ABM/S
+abnegate/NGSDX
+abnegation/M
+Abner/M
+abnormality/SM
+abnormal/SY
+aboard
+abode/GMDS
+abolisher/M
+abolish/LZRSDG
+abolishment/MS
+abolitionism/SM
+abolitionist/SM
+abolition/SM
+abominable
+abominably
+abominate/XSDGN
+abomination/M
+aboriginal/YS
+aborigine/SM
+Aborigine/SM
+aborning
+abortionist/MS
+abortion/MS
+abortiveness/M
+abortive/PY
+abort/SRDVG
+Abo/SM!
+abound/GDS
+about/S
+aboveboard
+aboveground
+above/S
+abracadabra/S
+abrader/M
+abrade/SRDG
+Abraham/M
+Abrahan/M
+Abra/M
+Abramo/M
+Abram/SM
+Abramson/M
+Abran/M
+abrasion/MS
+abrasiveness/S
+abrasive/SYMP
+abreaction/MS
+abreast
+abridge/DSRG
+abridged/U
+abridger/M
+abridgment/SM
+abroad
+abrogate/XDSNG
+abrogation/M
+abrogator/SM
+abruptness/SM
+abrupt/TRYP
+ABS
+abscess/GDSM
+abscissa/SM
+abscission/SM
+absconder/M
+abscond/SDRZG
+abseil/SGDR
+absence/SM
+absenteeism/SM
+absentee/MS
+absentia/M
+absentmindedness/S
+absentminded/PY
+absent/SGDRY
+absinthe/SM
+abs/M
+absoluteness/SM
+absolute/NPRSYTX
+absolution/M
+absolutism/MS
+absolutist/SM
+absolve/GDSR
+absolver/M
+absorb/ASGD
+absorbed/U
+absorbency/MS
+absorbent/MS
+absorber/SM
+absorbing/Y
+absorption/MS
+absorptive
+absorptivity/M
+abstainer/M
+abstain/GSDRZ
+abstemiousness/MS
+abstemious/YP
+abstention/SM
+abstinence/MS
+abstinent/Y
+abstractedness/SM
+abstracted/YP
+abstracter/M
+abstractionism/M
+abstractionist/SM
+abstraction/SM
+abstractness/SM
+abstractor/MS
+abstract/PTVGRDYS
+abstruseness/SM
+abstruse/PRYT
+absurdity/SM
+absurdness/SM
+absurd/PRYST
+Abuja
+abundance/SM
+abundant/Y
+abused/E
+abuse/GVZDSRB
+abuser/M
+abuses/E
+abusing/E
+abusiveness/SM
+abusive/YP
+abut/LS
+abutment/SM
+abutted
+abutter/MS
+abutting
+abuzz
+abysmal/Y
+abyssal
+Abyssinia/M
+Abyssinian
+abyss/SM
+AC
+acacia/SM
+academe/MS
+academia/SM
+academical/Y
+academicianship
+academician/SM
+academic/S
+academy/SM
+Acadia/M
+acanthus/MS
+Acapulco/M
+accede/SDG
+accelerated/U
+accelerate/NGSDXV
+accelerating/Y
+acceleration/M
+accelerator/SM
+accelerometer/SM
+accented/U
+accent/SGMD
+accentual/Y
+accentuate/XNGSD
+accentuation/M
+acceptability/SM
+acceptability's/U
+acceptableness/SM
+acceptable/P
+acceptably/U
+acceptance/SM
+acceptant
+acceptation/SM
+accepted/Y
+accepter/M
+accepting/PY
+acceptor/MS
+accept/RDBSZVG
+accessed/A
+accessibility/IMS
+accessible/IU
+accessibly/I
+accession/SMDG
+accessors
+accessory/SM
+access/SDMG
+accidence/M
+accidentalness/M
+accidental/SPY
+accident/MS
+acclaimer/M
+acclaim/SDRG
+acclamation/MS
+acclimate/XSDGN
+acclimation/M
+acclimatisation
+acclimatise/DG
+acclimatization/AMS
+acclimatized/U
+acclimatize/RSDGZ
+acclimatizes/A
+acclivity/SM
+accolade/GDSM
+accommodated/U
+accommodate/XVNGSD
+accommodating/Y
+accommodation/M
+accommodativeness/M
+accommodative/P
+accompanied/U
+accompanier/M
+accompaniment/MS
+accompanist/SM
+accompany/DRSG
+accomplice/MS
+accomplished/U
+accomplisher/M
+accomplishment/SM
+accomplish/SRDLZG
+accordance/SM
+accordant/Y
+accorder/M
+according/Y
+accordionist/SM
+accordion/MS
+accord/SZGMRD
+accost/SGD
+accountability/MS
+accountability's/U
+accountableness/M
+accountable/U
+accountably/U
+accountancy/SM
+accountant/MS
+account/BMDSGJ
+accounted/U
+accounting/M
+accouter/GSD
+accouterments
+accouterment's
+accoutrement/M
+Accra/M
+accreditation/SM
+accredited/U
+accredit/SGD
+accretion/SM
+accrual/MS
+accrue/SDG
+acct
+acculturate/XSDVNG
+acculturation/M
+accumulate/VNGSDX
+accumulation/M
+accumulativeness/M
+accumulative/YP
+accumulator/MS
+accuracy/IMS
+accurate/IY
+accurateness/SM
+accursedness/SM
+accursed/YP
+accusal/M
+accusation/SM
+accusative/S
+accusatory
+accused/M
+accuser/M
+accuse/SRDZG
+accusing/Y
+accustomedness/M
+accustomed/P
+accustom/SGD
+ac/DRG
+aced/M
+acerbate/DSG
+acerbic
+acerbically
+acerbity/MS
+ace/SM
+acetaminophen/S
+acetate/MS
+acetic
+acetone/SM
+acetonic
+acetylene/MS
+Acevedo/M
+Achaean/M
+Achebe/M
+ached/A
+ache/DSG
+achene/SM
+Achernar/M
+aches/A
+Acheson/M
+achievable/U
+achieved/UA
+achieve/LZGRSDB
+achievement/SM
+achiever/M
+Achilles
+aching/Y
+achoo
+achromatic
+achy/TR
+acidic
+acidification/M
+acidify/NSDG
+acidity/SM
+acidness/M
+acidoses
+acidosis/M
+acid/SMYP
+acidulous
+acing/M
+Ackerman/M
+acknowledgeable
+acknowledgedly
+acknowledged/U
+acknowledge/GZDRS
+acknowledger/M
+acknowledgment/SAM
+ACLU
+Ac/M
+ACM
+acme/SM
+acne/MDS
+acolyte/MS
+Aconcagua/M
+aconite/MS
+acorn/SM
+Acosta/M
+acoustical/Y
+acoustician/M
+acoustic/S
+acoustics/M
+acquaintance/MS
+acquaintanceship/S
+acquainted/U
+acquaint/GASD
+acquiesce/GSD
+acquiescence/SM
+acquiescent/Y
+acquirable
+acquire/ASDG
+acquirement/SM
+acquisition's/A
+acquisition/SM
+acquisitiveness/MS
+acquisitive/PY
+acquit/S
+acquittal/MS
+acquittance/M
+acquitted
+acquitter/M
+acquitting
+acreage/MS
+acre/MS
+acridity/MS
+acridness/SM
+acrid/TPRY
+acrimoniousness/MS
+acrimonious/YP
+acrimony/MS
+acrobatically
+acrobatic/S
+acrobatics/M
+acrobat/SM
+acronym/SM
+acrophobia/SM
+Acropolis/M
+acropolis/SM
+across
+acrostic/SM
+Acrux/M
+acrylate/M
+acrylic/S
+ACT
+Actaeon/M
+Acta/M
+ACTH
+acting/S
+actinic
+actinide/SM
+actinium/MS
+actinometer/MS
+action/DMSGB
+actions/AI
+action's/IA
+activate/AXCDSNGI
+activated/U
+activation/AMCI
+activator/SM
+active/APY
+actively/I
+activeness/MS
+actives
+activism/MS
+activist/MS
+activities/A
+activity/MSI
+Acton/M
+actor/MAS
+actress/SM
+act's
+Acts
+act/SADVG
+actuality/SM
+actualization/MAS
+actualize/GSD
+actualizes/A
+actual/SY
+actuarial/Y
+actuary/MS
+actuate/GNXSD
+actuation/M
+actuator/SM
+acuity/MS
+acumen/SM
+acupressure/S
+acupuncture/SM
+acupuncturist/S
+acuteness/MS
+acute/YTSRP
+acyclic
+acyclically
+acyclovir/S
+AD
+adage/MS
+adagio/S
+Adah/M
+Adair/M
+Adaline/M
+Ada/M
+adamant/SY
+Adamo/M
+Adam/SM
+Adamson/M
+Adana/M
+Adan/M
+adaptability/MS
+adaptable/U
+adaptation/MS
+adaptedness/M
+adapted/P
+adapter/M
+adapting/A
+adaption
+adaptively
+adaptiveness/M
+adaptive/U
+adaptivity
+adapt/SRDBZVG
+Adara/M
+ad/AS
+ADC
+Adda/M
+Addams
+addenda
+addend/SM
+addendum/M
+adder/M
+Addia/M
+addiction/MS
+addictive/P
+addict/SGVD
+Addie/M
+Addi/M
+Addison/M
+additional/Y
+addition/MS
+additive/YMS
+additivity
+addle/GDS
+addressability
+addressable/U
+addressed/A
+addressee/SM
+addresser/M
+addresses/A
+address/MDRSZGB
+Addressograph/M
+adduce/GRSD
+adducer/M
+adduct/DGVS
+adduction/M
+adductor/M
+Addy/M
+add/ZGBSDR
+Adelaida/M
+Adelaide/M
+Adela/M
+Adelbert/M
+Adele/M
+Adelheid/M
+Adelice/M
+Adelina/M
+Adelind/M
+Adeline/M
+Adella/M
+Adelle/M
+Adel/M
+Ade/M
+Adena/M
+Adenauer/M
+adenine/SM
+Aden/M
+adenoidal
+adenoid/S
+adeptness/MS
+adept/RYPTS
+adequacy/IMS
+adequate/IPY
+adequateness's/I
+adequateness/SM
+Adey/M
+Adham/M
+Adhara/M
+adherence/SM
+adherent/YMS
+adherer/M
+adhere/ZGRSD
+adhesion/MS
+adhesiveness/MS
+adhesive/PYMS
+adiabatic
+adiabatically
+Adiana/M
+Adidas/M
+adieu/S
+Adi/M
+Adina/M
+adiós
+adipose/S
+Adirondack/SM
+adj
+adjacency/MS
+adjacent/Y
+adjectival/Y
+adjective/MYS
+adjoin/SDG
+adjoint/M
+adjourn/DGLS
+adjournment/SM
+adjudge/DSG
+adjudicate/VNGXSD
+adjudication/M
+adjudicator/SM
+adjudicatory
+adjunct/VSYM
+adjuration/SM
+adjure/GSD
+adjustable/U
+adjustably
+adjust/DRALGSB
+adjusted/U
+adjuster's/A
+adjuster/SM
+adjustive
+adjustment/MAS
+adjustor's
+adjutant/SM
+Adkins/M
+Adlai/M
+Adler/M
+adman/M
+admen
+administer/GDJS
+administrable
+administrate/XSDVNG
+administration/M
+administrative/Y
+administrator/MS
+administratrix/M
+admirableness/M
+admirable/P
+admirably
+admiral/SM
+admiralty/MS
+Admiralty/S
+admiration/MS
+admirer/M
+admire/RSDZBG
+admiring/Y
+admissibility/ISM
+admissible/I
+admissibly
+admission/AMS
+admit/AS
+admittance/MS
+admitted/A
+admittedly
+admitting/A
+admix/SDG
+admixture/SM
+Adm/M
+Ad/MN
+admonisher/M
+admonish/GLSRD
+admonishing/Y
+admonishment/SM
+admonition/MS
+admonitory
+adobe/MS
+adolescence/MS
+adolescent/SYM
+Adolf/M
+Adolfo/M
+Adolphe/M
+Adolph/M
+Adolpho/M
+Adolphus/M
+Ado/M
+ado/MS
+Adonis/SM
+adopted/AU
+adopter/M
+adoption/MS
+adoptive/Y
+adopt/RDSBZVG
+adopts/A
+adorableness/SM
+adorable/P
+adorably
+Adora/M
+adoration/SM
+adore/DSRGZB
+Adoree/M
+Adore/M
+adorer/M
+adoring/Y
+adorned/U
+Adorne/M
+adornment/SM
+adorn/SGLD
+ADP
+Adrea/M
+adrenalin
+adrenaline/MS
+Adrenalin/MS
+adrenal/YS
+Adria/MX
+Adriana/M
+Adriane/M
+Adrian/M
+Adrianna/M
+Adrianne/M
+Adriano/M
+Adriatic
+Adriena/M
+Adrien/M
+Adrienne/M
+adrift
+adroitness/MS
+adroit/RTYP
+ads
+ad's
+adsorbate/M
+adsorbent/S
+adsorb/GSD
+adsorption/MS
+adsorptive/Y
+adulate/GNDSX
+adulation/M
+adulator/SM
+adulatory
+adulterant/SM
+adulterated/U
+adulterate/NGSDX
+adulteration/M
+adulterer/SM
+adulteress/MS
+adulterous/Y
+adultery/SM
+adulthood/MS
+adult/MYPS
+adultness/M
+adumbrate/XSDVGN
+adumbration/M
+adumbrative/Y
+adv
+advance/DSRLZG
+advancement/MS
+advancer/M
+advantage/GMEDS
+advantageous/EY
+advantageousness/M
+Adventist/M
+adventist/S
+adventitiousness/M
+adventitious/PY
+adventive/Y
+Advent/SM
+advent/SVM
+adventurer/M
+adventuresome
+adventure/SRDGMZ
+adventuress/SM
+adventurousness/SM
+adventurous/YP
+adverbial/MYS
+adverb/SM
+adversarial
+adversary/SM
+adverse/DSRPYTG
+adverseness/MS
+adversity/SM
+advert/GSD
+advertised/U
+advertise/JGZSRDL
+advertisement/SM
+advertiser/M
+advertising/M
+advertorial/S
+advice/SM
+Advil/M
+advisability/SIM
+advisable/I
+advisableness/M
+advisably
+advisedly/I
+advised/YU
+advisee/MS
+advisement/MS
+adviser/M
+advise/ZRSDGLB
+advisor/S
+advisor's
+advisory/S
+advocacy/SM
+advocate/NGVDS
+advocation/M
+advt
+adze's
+adz/MDSG
+Aegean
+aegis/SM
+Aelfric/M
+Aeneas
+Aeneid/M
+aeolian
+Aeolus/M
+aeon's
+aerate/XNGSD
+aeration/M
+aerator/MS
+aerialist/MS
+aerial/SMY
+Aeriela/M
+Aeriell/M
+Aeriel/M
+aerie/SRMT
+aeroacoustic
+aerobatic/S
+aerobically
+aerobic/S
+aerodrome/SM
+aerodynamically
+aerodynamic/S
+aerodynamics/M
+aeronautical/Y
+aeronautic/S
+aeronautics/M
+aerosolize/D
+aerosol/MS
+aerospace/SM
+Aeschylus/M
+Aesculapius/M
+Aesop/M
+aesthete/S
+aesthetically
+aestheticism/MS
+aesthetics/M
+aesthetic/U
+aether/M
+aetiology/M
+AF
+AFAIK
+afar/S
+AFB
+AFC
+AFDC
+affability/MS
+affable/TR
+affably
+affair/SM
+affectation/MS
+affectedness/EM
+affected/UEYP
+affect/EGSD
+affecter/M
+affecting/Y
+affectionate/UY
+affectioned
+affection/EMS
+affectioning
+affective/MY
+afferent/YS
+affiance/GDS
+affidavit/SM
+affiliated/U
+affiliate/EXSDNG
+affiliation/EM
+affine
+affinity/SM
+affirm/ASDG
+affirmation/SAM
+affirmative/SY
+affix/SDG
+afflatus/MS
+afflict/GVDS
+affliction/SM
+afflictive/Y
+affluence/SM
+affluent/YS
+afford/DSBG
+afforest/A
+afforestation/SM
+afforested
+afforesting
+afforests
+affray/MDSG
+affricate/VNMS
+affrication/M
+affricative/M
+affright
+affront/GSDM
+Afghani/SM
+Afghanistan/M
+afghan/MS
+Afghan/SM
+aficionado/MS
+afield
+afire
+aflame
+afloat
+aflutter
+afoot
+afore
+aforementioned
+aforesaid
+aforethought/S
+afoul
+Afr
+afraid/U
+afresh
+Africa/M
+African/MS
+Afrikaans/M
+Afrikaner/SM
+afro
+Afrocentric
+Afrocentrism/S
+Afro/MS
+afterbirth/M
+afterbirths
+afterburner/MS
+aftercare/SM
+aftereffect/MS
+afterglow/MS
+afterimage/MS
+afterlife/M
+afterlives
+aftermath/M
+aftermaths
+aftermost
+afternoon/SM
+aftershave/S
+aftershock/SM
+afters/M
+aftertaste/SM
+afterthought/MS
+afterward/S
+afterworld/MS
+Afton/M
+aft/ZR
+Agace/M
+again
+against
+Agamemnon/M
+agapae
+agape/S
+agar/MS
+Agassiz/M
+Agata/M
+agate/SM
+Agatha/M
+Agathe/M
+agave/SM
+agedness/M
+aged/PY
+age/GJDRSMZ
+ageism/S
+ageist/S
+agelessness/MS
+ageless/YP
+agency/SM
+agenda/MS
+agent/AMS
+agented
+agenting
+agentive
+ageratum/M
+Aggie/M
+Aggi/M
+agglomerate/XNGVDS
+agglomeration/M
+agglutinate/VNGXSD
+agglutination/M
+agglutinin/MS
+aggrandize/LDSG
+aggrandizement/SM
+aggravate/SDNGX
+aggravating/Y
+aggravation/M
+aggregated/U
+aggregate/EGNVD
+aggregately
+aggregateness/M
+aggregates
+aggregation/SM
+aggregative/Y
+aggression/SM
+aggressively
+aggressiveness/S
+aggressive/U
+aggressor/MS
+aggrieved/Y
+aggrieve/GDS
+Aggy/SM
+aghast
+agile/YTR
+agility/MS
+agitated/Y
+agitate/XVNGSD
+agitation/M
+agitator/SM
+agitprop/MS
+Aglaia/M
+agleam
+aglitter
+aglow
+Ag/M
+Agna/M
+Agnella/M
+Agnese/M
+Agnes/M
+Agnesse/M
+Agneta/M
+Agnew/M
+Agni/M
+Agnola/M
+agnosticism/MS
+agnostic/SM
+ago
+agog
+agonizedly/S
+agonized/Y
+agonize/ZGRSD
+agonizing/Y
+agony/SM
+agoraphobia/MS
+agoraphobic/S
+Agosto/M
+Agra/M
+agrarianism/MS
+agrarian/S
+agreeable/EP
+agreeableness/SME
+agreeably/E
+agreeing/E
+agree/LEBDS
+agreement/ESM
+agreer/S
+Agretha/M
+agribusiness/SM
+Agricola/M
+agriculturalist/S
+agricultural/Y
+agriculture/MS
+agriculturist/SM
+Agrippa/M
+Agrippina/M
+agrochemicals
+agronomic/S
+agronomist/SM
+agronomy/MS
+aground
+Aguascalientes/M
+ague/MS
+Aguie/M
+Aguilar/M
+Aguinaldo/M
+Aguirre/M
+Aguistin/M
+Aguste/M
+Agustin/M
+ah
+Ahab/M
+Aharon/M
+aha/S
+ahead
+ahem/S
+Ahmadabad
+Ahmad/M
+Ahmed/M
+ahoy/S
+Ahriman/M
+AI
+Aida/M
+Aidan/M
+aided/U
+aide/MS
+aider/M
+AIDS
+aid/ZGDRS
+Aigneis/M
+aigrette/SM
+Aiken/M
+Aila/M
+Ailbert/M
+Ailee/M
+Aileen/M
+Aile/M
+Ailene/M
+aileron/MS
+Ailey/M
+Ailina/M
+Aili/SM
+ail/LSDG
+ailment/SM
+Ailsun/M
+Ailyn/M
+Aimee/M
+Aime/M
+aimer/M
+Aimil/M
+aimlessness/MS
+aimless/YP
+aim/ZSGDR
+Aindrea/M
+Ainslee/M
+Ainsley/M
+Ainslie/M
+ain't
+Ainu/M
+airbag/MS
+airbase/S
+airborne
+airbrush/SDMG
+Airbus/M
+airbus/SM
+aircraft/MS
+aircrew/M
+airdrop/MS
+airdropped
+airdropping
+Airedale/SM
+Aires
+airfare/S
+airfield/MS
+airflow/SM
+airfoil/MS
+airframe/MS
+airfreight/SGD
+airhead/MS
+airily
+airiness/MS
+airing/M
+airlessness/S
+airless/P
+airlift/MDSG
+airliner/M
+airline/SRMZ
+airlock/MS
+airmail/DSG
+airman/M
+airmass
+air/MDRTZGJS
+airmen
+airpark
+airplane/SM
+airplay/S
+airport/MS
+airship/MS
+airsickness/SM
+airsick/P
+airspace/SM
+airspeed/SM
+airstrip/MS
+airtightness/M
+airtight/P
+airtime
+airwaves
+airway/SM
+airworthiness/SM
+airworthy/PTR
+airy/PRT
+Aisha/M
+aisle/DSGM
+aitch/MS
+ajar
+Ajax/M
+Ajay/M
+AK
+aka
+Akbar/M
+Akihito/M
+akimbo
+Akim/M
+akin
+Akita/M
+Akkad/M
+Akron/M
+Aksel/M
+AL
+Alabama/M
+Alabaman/S
+Alabamian/MS
+alabaster/MS
+alack/S
+alacrity/SM
+Aladdin/M
+Alaine/M
+Alain/M
+Alair/M
+Alameda/M
+Alamogordo/M
+Alamo/SM
+ala/MS
+Ala/MS
+Alanah/M
+Alana/M
+Aland/M
+Alane/M
+alanine/M
+Alan/M
+Alanna/M
+Alano/M
+Alanson/M
+Alard/M
+Alaric/M
+Alar/M
+alarming/Y
+alarmist/MS
+alarm/SDG
+Alasdair/M
+Alaska/M
+Alaskan/S
+alas/S
+Alastair/M
+Alasteir/M
+Alaster/M
+Alayne/M
+albacore/SM
+alba/M
+Alba/M
+Albania/M
+Albanian/SM
+Albany/M
+albatross/SM
+albedo/M
+Albee/M
+albeit
+Alberich/M
+Alberik/M
+Alberio/M
+Alberta/M
+Albertan/S
+Albertina/M
+Albertine/M
+Albert/M
+Alberto/M
+Albie/M
+Albigensian
+Albina/M
+albinism/SM
+albino/MS
+Albion/M
+Albireo/M
+alb/MS
+Albrecht/M
+albumen/M
+albumin/MS
+albuminous
+album/MNXS
+Albuquerque/M
+Alcatraz/M
+Alcestis/M
+alchemical
+alchemist/SM
+alchemy/MS
+Alcibiades/M
+Alcmena/M
+Alcoa/M
+alcoholically
+alcoholic/MS
+alcoholism/SM
+alcohol/MS
+Alcott/M
+alcove/MSD
+Alcuin/M
+Alcyone/M
+Aldan/M
+Aldebaran/M
+aldehyde/M
+Alden/M
+Alderamin/M
+alderman/M
+aldermen
+alder/SM
+alderwoman
+alderwomen
+Aldin/M
+Aldis/M
+Aldo/M
+Aldon/M
+Aldous/M
+Aldrich/M
+Aldric/M
+Aldridge/M
+Aldrin/M
+Aldus/M
+Aldwin/M
+aleatory
+Alecia/M
+Aleck/M
+Alec/M
+Aleda/M
+alee
+Aleece/M
+Aleen/M
+alehouse/MS
+Aleichem/M
+Alejandra/M
+Alejandrina/M
+Alejandro/M
+Alejoa/M
+Aleksandr/M
+Alembert/M
+alembic/SM
+ale/MVS
+Alena/M
+Alene/M
+aleph/M
+Aleppo/M
+Aler/M
+alerted/Y
+alertness/MS
+alert/STZGPRDY
+Alessandra/M
+Alessandro/M
+Aleta/M
+Alethea/M
+Aleutian/S
+Aleut/SM
+alewife/M
+alewives
+Alexa/M
+Alexander/SM
+Alexandra/M
+Alexandre/M
+Alexandria/M
+Alexandrian/S
+Alexandrina/M
+Alexandr/M
+Alexandro/MS
+Alexei/M
+Alexia/M
+Alexina/M
+Alexine/M
+Alexio/M
+Alexi/SM
+Alex/M
+alfalfa/MS
+Alfa/M
+Alfie/M
+Alfi/M
+Alf/M
+Alfonse/M
+Alfons/M
+Alfonso/M
+Alfonzo/M
+Alford/M
+Alfreda/M
+Alfred/M
+Alfredo/M
+alfresco
+Alfy/M
+algae
+algaecide
+algal
+alga/M
+algebraic
+algebraical/Y
+algebraist/M
+algebra/MS
+Algenib/M
+Algeria/M
+Algerian/MS
+Alger/M
+Algernon/M
+Algieba/M
+Algiers/M
+alginate/SM
+ALGOL
+Algol/M
+Algonquian/SM
+Algonquin/SM
+algorithmic
+algorithmically
+algorithm/MS
+Alhambra/M
+Alhena/M
+Alia/M
+alias/GSD
+alibi/MDSG
+Alica/M
+Alicea/M
+Alice/M
+Alicia/M
+Alick/M
+Alic/M
+Alida/M
+Alidia/M
+Alie/M
+alienable/IU
+alienate/SDNGX
+alienation/M
+alienist/MS
+alien/RDGMBS
+Alighieri/M
+alight/DSG
+aligned/U
+aligner/SM
+align/LASDG
+alignment/SAM
+Alika/M
+Alikee/M
+alikeness/M
+alike/U
+alimentary
+aliment/SDMG
+alimony/MS
+Ali/MS
+Alina/M
+Aline/M
+alinement's
+Alioth/M
+aliquot/S
+Alisa/M
+Alisander/M
+Alisha/M
+Alison/M
+Alissa/M
+Alistair/M
+Alister/M
+Alisun/M
+aliveness/MS
+alive/P
+Alix/M
+aliyah/M
+aliyahs
+Aliza/M
+Alkaid/M
+alkalies
+alkali/M
+alkaline
+alkalinity/MS
+alkalize/SDG
+alkaloid/MS
+alkyd/S
+alkyl/M
+Allahabad/M
+Allah/M
+Alla/M
+Allan/M
+Allard/M
+allay/GDS
+Allayne/M
+Alleen/M
+allegation/SM
+alleged/Y
+allege/SDG
+Allegheny/MS
+allegiance/SM
+allegiant
+allegoric
+allegoricalness/M
+allegorical/YP
+allegorist/MS
+allegory/SM
+Allegra/M
+allegretto/MS
+allegri
+allegro/MS
+allele/SM
+alleluia/S
+allemande/M
+Allendale/M
+Allende/M
+Allene/M
+Allen/M
+Allentown/M
+allergenic
+allergen/MS
+allergic
+allergically
+allergist/MS
+allergy/MS
+alleviate/SDVGNX
+alleviation/M
+alleviator/MS
+Alley/M
+alley/MS
+Alleyn/M
+alleyway/MS
+Allhallows
+alliance/MS
+Allianora/M
+Allie/M
+allier
+allies/M
+alligator/DMGS
+Alli/MS
+Allina/M
+Allin/M
+Allison/M
+Allissa/M
+Allister/M
+Allistir/M
+alliterate/XVNGSD
+alliteration/M
+alliterative/Y
+Allix/M
+allocable/U
+allocatable
+allocate/ACSDNGX
+allocated/U
+allocation/AMC
+allocative
+allocator/AMS
+allophone/MS
+allophonic
+allotment/MS
+allotments/A
+allotrope/M
+allotropic
+allots/A
+allot/SDL
+allotted/A
+allotter/M
+allotting/A
+allover/S
+allowableness/M
+allowable/P
+allowably
+allowance/GSDM
+allowed/Y
+allowing/E
+allow/SBGD
+allows/E
+alloyed/U
+alloy/SGMD
+all/S
+allspice/MS
+Allstate/M
+Allsun/M
+allude/GSD
+allure/GLSD
+allurement/SM
+alluring/Y
+allusion/MS
+allusiveness/MS
+allusive/PY
+alluvial/S
+alluvions
+alluvium/MS
+Allx/M
+ally/ASDG
+Allyce/M
+Ally/MS
+Allyn/M
+Allys
+Allyson/M
+alma
+Almach/M
+Almaden/M
+almagest
+Alma/M
+almanac/MS
+Almaty/M
+Almeda/M
+Almeria/M
+Almeta/M
+almightiness/M
+Almighty/M
+almighty/P
+Almira/M
+Almire/M
+almond/SM
+almoner/MS
+almost
+Al/MRY
+alms/A
+almshouse/SM
+almsman/M
+alnico
+Alnilam/M
+Alnitak/M
+aloe/MS
+aloft
+aloha/SM
+Aloin/M
+Aloise/M
+Aloisia/M
+aloneness/M
+alone/P
+along
+alongshore
+alongside
+Alon/M
+Alonso/M
+Alonzo/M
+aloofness/MS
+aloof/YP
+aloud
+Aloysia/M
+Aloysius/M
+alpaca/SM
+Alpert/M
+alphabetical/Y
+alphabetic/S
+alphabetization/SM
+alphabetizer/M
+alphabetize/SRDGZ
+alphabet/SGDM
+alpha/MS
+alphanumerical/Y
+alphanumeric/S
+Alphard/M
+Alphecca/M
+Alpheratz/M
+Alphonse/M
+Alphonso/M
+Alpine
+alpine/S
+alp/MS
+Alps
+already
+Alric/M
+alright
+Alsace/M
+Alsatian/MS
+also
+Alsop/M
+Alston/M
+Altaic/M
+Altai/M
+Altair/M
+Alta/M
+altar/MS
+altarpiece/SM
+alterable/UI
+alteration/MS
+altercate/NX
+altercation/M
+altered/U
+alternate/SDVGNYX
+alternation/M
+alternativeness/M
+alternative/YMSP
+alternator/MS
+alter/RDZBG
+Althea/M
+although
+altimeter/SM
+Altiplano/M
+altitude/SM
+altogether/S
+Alton/M
+alto/SM
+Altos/M
+altruism/SM
+altruistic
+altruistically
+altruist/SM
+alt/RZS
+ALU
+Aludra/M
+Aluin/M
+Aluino/M
+alumina/SM
+aluminum/MS
+alumnae
+alumna/M
+alumni
+alumnus/MS
+alum/SM
+alundum
+Alva/M
+Alvan/M
+Alvarado/M
+Alvarez/M
+Alvaro/M
+alveolar/Y
+alveoli
+alveolus/M
+Alvera/M
+Alverta/M
+Alvie/M
+Alvina/M
+Alvinia/M
+Alvin/M
+Alvira/M
+Alvis/M
+Alvy/M
+alway/S
+Alwin/M
+Alwyn/M
+Alyce/M
+Alyda/M
+Alyosha/M
+Alysa/M
+Alyse/M
+Alysia/M
+Alys/M
+Alyson/M
+Alyss
+Alyssa/M
+Alzheimer/M
+AM
+AMA
+Amabelle/M
+Amabel/M
+Amadeus/M
+Amado/M
+amain
+Amalea/M
+Amalee/M
+Amaleta/M
+amalgamate/VNGXSD
+amalgamation/M
+amalgam/MS
+Amalia/M
+Amalie/M
+Amalita/M
+Amalle/M
+Amanda/M
+Amandie/M
+Amandi/M
+Amandy/M
+amanuenses
+amanuensis/M
+Amara/M
+amaranth/M
+amaranths
+amaretto/S
+Amargo/M
+Amarillo/M
+amaryllis/MS
+am/AS
+amasser/M
+amass/GRSD
+Amata/M
+amateurishness/MS
+amateurish/YP
+amateurism/MS
+amateur/SM
+Amati/M
+amatory
+amazed/Y
+amaze/LDSRGZ
+amazement/MS
+amazing/Y
+amazonian
+Amazonian
+amazon/MS
+Amazon/SM
+ambassadorial
+ambassador/MS
+ambassadorship/MS
+ambassadress/SM
+ambergris/SM
+Amberly/M
+amber/MS
+Amber/YM
+ambiance/MS
+ambidexterity/MS
+ambidextrous/Y
+ambience's
+ambient/S
+ambiguity/MS
+ambiguously/U
+ambiguousness/M
+ambiguous/YP
+ambition/GMDS
+ambitiousness/MS
+ambitious/PY
+ambit/M
+ambivalence/SM
+ambivalent/Y
+amble/GZDSR
+Amble/M
+ambler/M
+ambrose
+Ambrose/M
+ambrosial/Y
+ambrosia/SM
+Ambrosi/M
+Ambrosio/M
+Ambrosius/M
+Ambros/M
+ambulance/MS
+ambulant/S
+ambulate/DSNGX
+ambulation/M
+ambulatory/S
+Ambur/M
+ambuscade/MGSRD
+ambuscader/M
+ambusher/M
+ambush/MZRSDG
+Amby/M
+Amdahl/M
+ameba's
+Amelia/M
+Amelie/M
+Amelina/M
+Ameline/M
+ameliorate/XVGNSD
+amelioration/M
+Amelita/M
+amenability/SM
+amenably
+amended/U
+amender/M
+amendment/SM
+amen/DRGTSB
+amend/SBRDGL
+amends/M
+Amenhotep/M
+amenity/MS
+amenorrhea/M
+Amerada/M
+Amerasian/S
+amercement/MS
+amerce/SDLG
+Americana/M
+Americanism/SM
+Americanization/SM
+americanized
+Americanize/SDG
+American/MS
+America/SM
+americium/MS
+Amerigo/M
+Amerindian/MS
+Amerind/MS
+Amer/M
+Amery/M
+Ameslan/M
+Ame/SM
+amethystine
+amethyst/MS
+Amharic/M
+Amherst/M
+amiability/MS
+amiableness/M
+amiable/RPT
+amiably
+amicability/SM
+amicableness/M
+amicable/P
+amicably
+amide/SM
+amid/S
+amidships
+amidst
+Amie/M
+Amiga/M
+amigo/MS
+Amii/M
+Amil/M
+Ami/M
+amines
+aminobenzoic
+amino/M
+amir's
+Amish
+amiss
+Amitie/M
+Amity/M
+amity/SM
+Ammamaria/M
+Amman/M
+Ammerman/M
+ammeter/MS
+ammo/MS
+ammoniac
+ammonia/MS
+ammonium/M
+Am/MR
+ammunition/MS
+amnesiac/MS
+amnesia/SM
+amnesic/S
+amnesty/GMSD
+amniocenteses
+amniocentesis/M
+amnion/SM
+amniotic
+Amoco/M
+amoeba/SM
+amoebic
+amoeboid
+amok/MS
+among
+amongst
+Amontillado/M
+amontillado/MS
+amorality/MS
+amoral/Y
+amorousness/SM
+amorous/PY
+amorphousness/MS
+amorphous/PY
+amortization/SUM
+amortized/U
+amortize/SDG
+Amory/M
+Amos
+amount/SMRDZG
+amour/MS
+Amparo/M
+amperage/SM
+Ampere/M
+ampere/MS
+ampersand/MS
+Ampex/M
+amphetamine/MS
+amphibian/SM
+amphibiousness/M
+amphibious/PY
+amphibology/M
+amphitheater/SM
+amphorae
+amphora/M
+ampleness/M
+ample/PTR
+amplification/M
+amplifier/M
+amplify/DRSXGNZ
+amplitude/MS
+ampoule's
+amp/SGMDY
+ampule/SM
+amputate/DSNGX
+amputation/M
+amputee/SM
+Amritsar/M
+ams
+Amsterdam/M
+amt
+Amtrak/M
+amuck's
+amulet/SM
+Amundsen/M
+Amur/M
+amused/Y
+amuse/LDSRGVZ
+amusement/SM
+amuser/M
+amusingness/M
+amusing/YP
+Amway/M
+Amye/M
+amylase/MS
+amyl/M
+Amy/M
+Anabal/M
+Anabaptist/SM
+Anabella/M
+Anabelle/M
+Anabel/M
+anabolic
+anabolism/MS
+anachronism/SM
+anachronistic
+anachronistically
+Anacin/M
+anaconda/MS
+Anacreon/M
+anaerobe/SM
+anaerobic
+anaerobically
+anaglyph/M
+anagrammatic
+anagrammatically
+anagrammed
+anagramming
+anagram/MS
+Anaheim/M
+Analects/M
+analgesia/MS
+analgesic/S
+Analiese/M
+Analise/M
+Anallese/M
+Anallise/M
+analogical/Y
+analogize/SDG
+analogousness/MS
+analogous/YP
+analog/SM
+analogue/SM
+analogy/MS
+anal/Y
+analysand/MS
+analyses
+analysis/AM
+analyst/SM
+analytical/Y
+analyticity/S
+analytic/S
+analytics/M
+analyzable/U
+analyze/DRSZGA
+analyzed/U
+analyzer/M
+Ana/M
+anamorphic
+Ananias/M
+anapaest's
+anapestic/S
+anapest/SM
+anaphora/M
+anaphoric
+anaphorically
+anaplasmosis/M
+anarchic
+anarchical/Y
+anarchism/MS
+anarchistic
+anarchist/MS
+anarchy/MS
+Anastasia/M
+Anastasie/M
+Anastassia/M
+anastigmatic
+anastomoses
+anastomosis/M
+anastomotic
+anathema/MS
+anathematize/GSD
+Anatola/M
+Anatole/M
+Anatolia/M
+Anatolian
+Anatollo/M
+Anatol/M
+anatomic
+anatomical/YS
+anatomist/MS
+anatomize/GSD
+anatomy/MS
+Anaxagoras/M
+Ancell/M
+ancestor/SMDG
+ancestral/Y
+ancestress/SM
+ancestry/SM
+Anchorage/M
+anchorage/SM
+anchored/U
+anchorite/MS
+anchoritism/M
+anchorman/M
+anchormen
+anchorpeople
+anchorperson/S
+anchor/SGDM
+anchorwoman
+anchorwomen
+anchovy/MS
+ancientness/MS
+ancient/SRYTP
+ancillary/S
+an/CS
+Andalusia/M
+Andalusian
+Andaman
+andante/S
+and/DZGS
+Andean/M
+Andeee/M
+Andee/M
+Anderea/M
+Andersen/M
+Anders/N
+Anderson/M
+Andes
+Andie/M
+Andi/M
+andiron/MS
+Andonis/M
+Andorra/M
+Andover/M
+Andra/SM
+Andrea/MS
+Andreana/M
+Andree/M
+Andrei/M
+Andrej/M
+Andre/SM
+Andrew/MS
+Andrey/M
+Andria/M
+Andriana/M
+Andriette/M
+Andris
+androgenic
+androgen/SM
+androgynous
+androgyny/SM
+android/MS
+Andromache/M
+Andromeda/M
+Andropov/M
+Andros/M
+Andrus/M
+Andy/M
+anecdotal/Y
+anecdote/SM
+anechoic
+anemia/SM
+anemically
+anemic/S
+anemometer/MS
+anemometry/M
+anemone/SM
+anent
+aneroid
+Anestassia/M
+anesthesia/MS
+anesthesiologist/MS
+anesthesiology/SM
+anesthetically
+anesthetic/SM
+anesthetist/MS
+anesthetization/SM
+anesthetizer/M
+anesthetize/ZSRDG
+Anet/M
+Anetta/M
+Anette/M
+Anett/M
+aneurysm/MS
+anew
+Angara/M
+Angela/M
+Angeleno/SM
+Angele/SM
+angelfish/SM
+Angelia/M
+angelic
+angelical/Y
+Angelica/M
+angelica/MS
+Angelico/M
+Angelika/M
+Angeli/M
+Angelina/M
+Angeline/M
+Angelique/M
+Angelita/M
+Angelle/M
+Angel/M
+angel/MDSG
+Angelo/M
+Angelou/M
+Ange/M
+anger/GDMS
+Angevin/M
+Angie/M
+Angil/M
+angina/MS
+angiography
+angioplasty/S
+angiosperm/MS
+Angkor/M
+angle/GMZDSRJ
+angler/M
+Angles
+angleworm/MS
+Anglia/M
+Anglicanism/MS
+Anglican/MS
+Anglicism/SM
+Anglicization/MS
+anglicize/SDG
+Anglicize/SDG
+angling/M
+Anglo/MS
+Anglophile/SM
+Anglophilia/M
+Anglophobe/MS
+Anglophobia/M
+Angola/M
+Angolan/S
+angora/MS
+Angora/MS
+angrily
+angriness/M
+angry/RTP
+angst/MS
+Ångström/M
+angstrom/MS
+Anguilla/M
+anguish/DSMG
+angularity/MS
+angular/Y
+Angus/M
+Angy/M
+Anheuser/M
+anhydride/M
+anhydrite/M
+anhydrous/Y
+Aniakchak/M
+Ania/M
+Anibal/M
+Anica/M
+aniline/SM
+animadversion/SM
+animadvert/DSG
+animalcule/MS
+animal/MYPS
+animated/A
+animatedly
+animately/I
+animateness/MI
+animates/A
+animate/YNGXDSP
+animating/A
+animation/AMS
+animator/SM
+animism/SM
+animistic
+animist/S
+animized
+animosity/MS
+animus/SM
+anionic/S
+anion/MS
+aniseed/MS
+aniseikonic
+anise/MS
+anisette/SM
+anisotropic
+anisotropy/MS
+Anissa/M
+Anita/M
+Anitra/M
+Anjanette/M
+Anjela/M
+Ankara/M
+ankh/M
+ankhs
+anklebone/SM
+ankle/GMDS
+anklet/MS
+Annabal/M
+Annabela/M
+Annabella/M
+Annabelle/M
+Annabell/M
+Annabel/M
+Annadiana/M
+Annadiane/M
+Annalee/M
+Annaliese/M
+Annalise/M
+annalist/MS
+annal/MNS
+Anna/M
+Annamaria/M
+Annamarie/M
+Annapolis/M
+Annapurna/M
+anneal/DRSZG
+annealer/M
+Annecorinne/M
+annelid/MS
+Anneliese/M
+Annelise/M
+Anne/M
+Annemarie/M
+Annetta/M
+Annette/M
+annexation/SM
+annexe/M
+annex/GSD
+Annice/M
+Annie/M
+annihilate/XSDVGN
+annihilation/M
+annihilator/MS
+Anni/MS
+Annissa/M
+anniversary/MS
+Ann/M
+Annmaria/M
+Annmarie/M
+Annnora/M
+Annora/M
+annotated/U
+annotate/VNGXSD
+annotation/M
+annotator/MS
+announced/U
+announcement/SM
+announcer/M
+announce/ZGLRSD
+annoyance/MS
+annoyer/M
+annoying/Y
+annoy/ZGSRD
+annualized
+annual/YS
+annuitant/MS
+annuity/MS
+annular/YS
+annuli
+annulled
+annulling
+annulment/MS
+annul/SL
+annulus/M
+annum
+annunciate/XNGSD
+annunciation/M
+Annunciation/S
+annunciator/SM
+Anny/M
+anode/SM
+anodic
+anodize/GDS
+anodyne/SM
+anoint/DRLGS
+anointer/M
+anointment/SM
+anomalousness/M
+anomalous/YP
+anomaly/MS
+anomic
+anomie/M
+anon/S
+anonymity/MS
+anonymousness/M
+anonymous/YP
+anopheles/M
+anorak/SM
+anorectic/S
+anorexia/SM
+anorexic/S
+another/M
+Anouilh/M
+Ansell/M
+Ansel/M
+Anselma/M
+Anselm/M
+Anselmo/M
+Anshan/M
+ANSI/M
+Ansley/M
+ans/M
+Anson/M
+Anstice/M
+answerable/U
+answered/U
+answerer/M
+answer/MZGBSDR
+antacid/MS
+Antaeus/M
+antagonism/MS
+antagonistic
+antagonistically
+antagonist/MS
+antagonized/U
+antagonize/GZRSD
+antagonizing/U
+Antananarivo/M
+antarctic
+Antarctica/M
+Antarctic/M
+Antares
+anteater/MS
+antebellum
+antecedence/MS
+antecedent/SMY
+antechamber/SM
+antedate/GDS
+antediluvian/S
+anteing
+antelope/MS
+ante/MS
+antenatal
+antennae
+antenna/MS
+anterior/SY
+anteroom/SM
+ant/GSMD
+Anthea/M
+Anthe/M
+anthem/MGDS
+anther/MS
+Anthia/M
+Anthiathia/M
+anthill/S
+anthologist/MS
+anthologize/GDS
+anthology/SM
+Anthony/M
+anthraces
+anthracite/MS
+anthrax/M
+anthropic
+anthropocentric
+anthropogenic
+anthropoid/S
+anthropological/Y
+anthropologist/MS
+anthropology/SM
+anthropometric/S
+anthropometry/M
+anthropomorphic
+anthropomorphically
+anthropomorphism/SM
+anthropomorphizing
+anthropomorphous
+antiabortion
+antiabortionist/S
+antiaircraft
+antibacterial/S
+antibiotic/SM
+antibody/MS
+anticancer
+Antichrist/MS
+anticipated/U
+anticipate/XVGNSD
+anticipation/M
+anticipative/Y
+anticipatory
+anticked
+anticking
+anticlerical/S
+anticlimactic
+anticlimactically
+anticlimax/SM
+anticline/SM
+anticlockwise
+antic/MS
+anticoagulant/S
+anticoagulation/M
+anticommunism/SM
+anticommunist/SM
+anticompetitive
+anticyclone/MS
+anticyclonic
+antidemocratic
+antidepressant/SM
+antidisestablishmentarianism/M
+antidote/DSMG
+Antietam/M
+antifascist/SM
+antiformant
+antifreeze/SM
+antifundamentalist/M
+antigenic
+antigenicity/SM
+antigen/MS
+antigone
+Antigone/M
+Antigua/M
+antiheroes
+antihero/M
+antihistamine/MS
+antihistorical
+antiknock/MS
+antilabor
+Antillean
+Antilles
+antilogarithm/SM
+antilogs
+antimacassar/SM
+antimalarial/S
+antimatter/SM
+antimicrobial/S
+antimissile/S
+antimony/SM
+anting/M
+Antin/M
+antinomian
+antinomy/M
+antinuclear
+Antioch/M
+antioxidant/MS
+antiparticle/SM
+Antipas/M
+antipasti
+antipasto/MS
+antipathetic
+antipathy/SM
+antipersonnel
+antiperspirant/MS
+antiphonal/SY
+antiphon/SM
+antipodal/S
+antipodean/S
+antipode/MS
+Antipodes
+antipollution/S
+antipoverty
+antiquarianism/MS
+antiquarian/MS
+antiquary/SM
+antiquate/NGSD
+antiquation/M
+antique/MGDS
+antiquity/SM
+antiredeposition
+antiresonance/M
+antiresonator
+anti/S
+antisemitic
+antisemitism/M
+antisepses
+antisepsis/M
+antiseptically
+antiseptic/S
+antiserum/SM
+antislavery/S
+antisocial/Y
+antispasmodic/S
+antisubmarine
+antisymmetric
+antisymmetry
+antitank
+antitheses
+antithesis/M
+antithetic
+antithetical/Y
+antithyroid
+antitoxin/MS
+antitrust/MR
+antivenin/MS
+antiviral/S
+antivivisectionist/S
+antiwar
+antler/SDM
+Antofagasta/M
+Antoine/M
+Antoinette/M
+Antonella/M
+Antone/M
+Antonetta/M
+Antonia/M
+Antonie/M
+Antonietta/M
+Antoni/M
+Antonina/M
+Antonin/M
+Antonino/M
+Antoninus/M
+Antonio/M
+Antonius/M
+Anton/MS
+Antonovics/M
+Antony/M
+antonymous
+antonym/SM
+antral
+antsy/RT
+Antwan/M
+Antwerp/M
+Anubis/M
+anus/SM
+anvil/MDSG
+anxiety/MS
+anxiousness/SM
+anxious/PY
+any
+Anya/M
+anybody/S
+anyhow
+Any/M
+anymore
+anyone/MS
+anyplace
+anything/S
+anytime
+anyway/S
+anywhere/S
+anywise
+AOL/M
+aorta/MS
+aortic
+AP
+apace
+apache/MS
+Apache/MS
+Apalachicola/M
+apartheid/SM
+apart/LP
+apartment/MS
+apartness/M
+apathetic
+apathetically
+apathy/SM
+apatite/MS
+APB
+aped/A
+apelike
+ape/MDRSG
+Apennines
+aper/A
+aperiodic
+aperiodically
+aperiodicity/M
+aperitif/S
+aperture/MDS
+apex/MS
+aphasia/SM
+aphasic/S
+aphelia
+aphelion/SM
+aphid/MS
+aphonic
+aphorism/MS
+aphoristic
+aphoristically
+aphrodisiac/SM
+Aphrodite/M
+Apia/M
+apiarist/SM
+apiary/SM
+apical/YS
+apices's
+apiece
+apishness/M
+apish/YP
+aplenty
+aplomb/SM
+APO
+Apocalypse/M
+apocalypse/MS
+apocalyptic
+apocryphalness/M
+apocryphal/YP
+apocrypha/M
+Apocrypha/M
+apogee/MS
+apolar
+apolitical/Y
+Apollinaire/M
+Apollonian
+Apollo/SM
+apologetically/U
+apologetic/S
+apologetics/M
+apologia/SM
+apologist/MS
+apologize/GZSRD
+apologizer/M
+apologizes/A
+apologizing/U
+apology/MS
+apoplectic
+apoplexy/SM
+apostasy/SM
+apostate/SM
+apostatize/DSG
+apostleship/SM
+apostle/SM
+apostolic
+apostrophe/SM
+apostrophized
+apothecary/MS
+apothegm/MS
+apotheoses
+apotheosis/M
+apotheosized
+apotheosizes
+apotheosizing
+Appalachia/M
+Appalachian/MS
+appalling/Y
+appall/SDG
+Appaloosa/MS
+appaloosa/S
+appanage/M
+apparatus/SM
+apparel/SGMD
+apparency
+apparently/I
+apparentness/M
+apparent/U
+apparition/SM
+appealer/M
+appealing/UY
+appeal/SGMDRZ
+appear/AEGDS
+appearance/AMES
+appearer/S
+appease/DSRGZL
+appeased/U
+appeasement/MS
+appeaser/M
+appellant/MS
+appellate/VNX
+appellation/M
+appellative/MY
+appendage/MS
+appendectomy/SM
+appendices
+appendicitis/SM
+appendix/SM
+append/SGZDR
+appertain/DSG
+appetite/MVS
+appetizer/SM
+appetizing/YU
+Appia/M
+Appian/M
+applauder/M
+applaud/ZGSDR
+applause/MS
+applecart/M
+applejack/MS
+Apple/M
+apple/MS
+applesauce/SM
+Appleseed/M
+Appleton/M
+applet/S
+appliance/SM
+applicabilities
+applicability/IM
+applicable/I
+applicably
+applicant/MS
+applicate/V
+application/MA
+applicative/Y
+applicator/MS
+applier/SM
+appliquéd
+appliqué/MSG
+apply/AGSDXN
+appointee/SM
+appoint/ELSADG
+appointer/MS
+appointive
+appointment/ASEM
+Appolonia/M
+Appomattox/M
+apportion/GADLS
+apportionment/SAM
+appose/SDG
+appositeness/MS
+apposite/XYNVP
+apposition/M
+appositive/SY
+appraisal/SAM
+appraised/A
+appraisees
+appraiser/M
+appraises/A
+appraise/ZGDRS
+appraising/Y
+appreciable/I
+appreciably/I
+appreciated/U
+appreciate/XDSNGV
+appreciation/M
+appreciativeness/MI
+appreciative/PIY
+appreciator/MS
+appreciatory
+apprehend/DRSG
+apprehender/M
+apprehensible
+apprehension/SM
+apprehensiveness/SM
+apprehensive/YP
+apprentice/DSGM
+apprenticeship/SM
+apprise/DSG
+apprizer/SM
+apprizingly
+apprizings
+approachability/UM
+approachable/UI
+approach/BRSDZG
+approacher/M
+approbate/NX
+approbation/EMS
+appropriable
+appropriated/U
+appropriately/I
+appropriateness/SMI
+appropriate/XDSGNVYTP
+appropriation/M
+appropriator/SM
+approval/ESM
+approve/DSREG
+approved/U
+approver's/E
+approver/SM
+approving/YE
+approx
+approximate/XGNVYDS
+approximation/M
+approximative/Y
+appurtenance/MS
+appurtenant/S
+APR
+apricot/MS
+Aprilette/M
+April/MS
+Apr/M
+apron/SDMG
+apropos
+apse/MS
+apsis/M
+apter
+aptest
+aptitude/SM
+aptness/SMI
+aptness's/U
+apt/UPYI
+Apuleius/M
+aquaculture/MS
+aqualung/SM
+aquamarine/SM
+aquanaut/SM
+aquaplane/GSDM
+aquarium/MS
+Aquarius/MS
+aqua/SM
+aquatically
+aquatic/S
+aquavit/SM
+aqueduct/MS
+aqueous/Y
+aquiculture's
+aquifer/SM
+Aquila/M
+aquiline
+Aquinas/M
+Aquino/M
+Aquitaine/M
+AR
+Arabela/M
+Arabele/M
+Arabella/M
+Arabelle/M
+Arabel/M
+arabesque/SM
+Arabia/M
+Arabian/MS
+Arabic/M
+arability/MS
+Arabist/MS
+arable/S
+Arab/MS
+Araby/M
+Araceli/M
+arachnid/MS
+arachnoid/M
+arachnophobia
+Arafat/M
+Araguaya/M
+Araldo/M
+Aral/M
+Ara/M
+Aramaic/M
+Aramco/M
+Arapahoes
+Arapahoe's
+Arapaho/MS
+Ararat/M
+Araucanian/M
+Arawakan/M
+Arawak/M
+arbiter/MS
+arbitrage/GMZRSD
+arbitrager/M
+arbitrageur/S
+arbitrament/MS
+arbitrarily
+arbitrariness/MS
+arbitrary/P
+arbitrate/SDXVNG
+arbitration/M
+arbitrator/SM
+arbor/DMS
+arboreal/Y
+arbores
+arboretum/MS
+arborvitae/MS
+arbutus/SM
+ARC
+arcade/SDMG
+Arcadia/M
+Arcadian
+arcana/M
+arcane/P
+arc/DSGM
+archaeological/Y
+archaeologist/SM
+archaically
+archaic/P
+Archaimbaud/M
+archaism/SM
+archaist/MS
+archaize/GDRSZ
+archaizer/M
+Archambault/M
+archangel/SM
+archbishopric/SM
+archbishop/SM
+archdeacon/MS
+archdiocesan
+archdiocese/SM
+archduchess/MS
+archduke/MS
+Archean
+archenemy/SM
+archeologist's
+archeology/MS
+archer/M
+Archer/M
+archery/MS
+archetypal
+archetype/SM
+archfiend/SM
+archfool
+Archibald/M
+Archibaldo/M
+Archibold/M
+Archie/M
+archiepiscopal
+Archimedes/M
+arching/M
+archipelago/SM
+architect/MS
+architectonic/S
+architectonics/M
+architectural/Y
+architecture/SM
+architrave/MS
+archival
+archive/DRSGMZ
+archived/U
+archivist/MS
+Arch/MR
+archness/MS
+arch/PGVZTMYDSR
+archway/SM
+Archy/M
+arclike
+ARCO/M
+arcsine
+arctangent
+Arctic/M
+arctic/S
+Arcturus/M
+Ardabil
+Arda/MH
+Ardath/M
+Ardeen/M
+Ardelia/M
+Ardelis/M
+Ardella/M
+Ardelle/M
+ardency/M
+Ardene/M
+Ardenia/M
+Arden/M
+ardent/Y
+Ardine/M
+Ardisj/M
+Ardis/M
+Ardith/M
+ardor/SM
+Ardra/M
+arduousness/SM
+arduous/YP
+Ardyce/M
+Ardys
+Ardyth/M
+areal
+area/SM
+areawide
+are/BS
+Arel/M
+arenaceous
+arena/SM
+aren't
+Arequipa/M
+Ares
+Aretha/M
+Argentina/M
+Argentinean/S
+Argentine/SM
+Argentinian/S
+argent/MS
+arginine/MS
+Argonaut/MS
+argonaut/S
+argon/MS
+Argonne/M
+Argo/SM
+argosy/SM
+argot/SM
+arguable/IU
+arguably/IU
+argue/DSRGZ
+arguer/M
+argumentation/SM
+argumentativeness/MS
+argumentative/YP
+argument/SM
+Argus/M
+argyle/S
+Ariadne/M
+Ariana/M
+Arianism/M
+Arianist/SM
+aria/SM
+Aridatha/M
+aridity/SM
+aridness/M
+arid/TYRP
+Ariela/M
+Ariella/M
+Arielle/M
+Ariel/M
+Arie/SM
+Aries/S
+aright
+Ari/M
+Arin/M
+Ario/M
+Ariosto/M
+arise/GJSR
+arisen
+Aristarchus/M
+Aristides
+aristocracy/SM
+aristocratic
+aristocratically
+aristocrat/MS
+Aristophanes/M
+Aristotelean
+Aristotelian/M
+Aristotle/M
+arithmetical/Y
+arithmetician/SM
+arithmetic/MS
+arithmetize/SD
+Arius/M
+Ariz/M
+Arizona/M
+Arizonan/S
+Arizonian/S
+Arjuna/M
+Arkansan/MS
+Arkansas/M
+Arkhangelsk/M
+Ark/M
+ark/MS
+Arkwright/M
+Arlana/M
+Arlan/M
+Arlee/M
+Arleen/M
+Arlena/M
+Arlene/M
+Arlen/M
+Arleta/M
+Arlette/M
+Arley/M
+Arleyne/M
+Arlie/M
+Arliene/M
+Arlina/M
+Arlinda/M
+Arline/M
+Arlington/M
+Arlin/M
+Arluene/M
+Arly/M
+Arlyne/M
+Arlyn/M
+Armada/M
+armada/SM
+armadillo/MS
+Armageddon/SM
+Armagnac/M
+armament/EAS
+armament's/E
+Armand/M
+Armando/M
+Arman/M
+arm/ASEDG
+Armata/M
+armature/MGSD
+armband/SM
+armchair/MS
+Armco/M
+armed/U
+Armenia/M
+Armenian/MS
+armer/MES
+armful/SM
+armhole/MS
+arming/M
+Arminius/M
+Armin/M
+armistice/MS
+armless
+armlet/SM
+armload/M
+Armonk/M
+armored/U
+armorer/M
+armorial/S
+armory/DSM
+armor/ZRDMGS
+Armour/M
+armpit/MS
+armrest/MS
+arm's
+Armstrong/M
+Ar/MY
+army/SM
+Arnaldo/M
+Arneb/M
+Arne/M
+Arney/M
+Arnhem/M
+Arnie/M
+Arni/M
+Arnold/M
+Arnoldo/M
+Arno/M
+Arnuad/M
+Arnulfo/M
+Arny/M
+aroma/SM
+aromatherapist/S
+aromatherapy/S
+aromatically
+aromaticity/M
+aromaticness/M
+aromatic/SP
+Aron/M
+arose
+around
+arousal/MS
+aroused/U
+arouse/GSD
+ARPA/M
+Arpanet/M
+ARPANET/M
+arpeggio/SM
+arrack/M
+Arragon/M
+arraignment/MS
+arraign/SDGL
+arrangeable/A
+arranged/EA
+arrangement/AMSE
+arranger/M
+arranges/EA
+arrange/ZDSRLG
+arranging/EA
+arrant/Y
+arras/SM
+arrayer
+array/ESGMD
+arrear/SM
+arrest/ADSG
+arrestee/MS
+arrester/MS
+arresting/Y
+arrestor/MS
+Arrhenius/M
+arrhythmia/SM
+arrhythmic
+arrhythmical
+Arri/M
+arrival/MS
+arriver/M
+arrive/SRDG
+arrogance/MS
+arrogant/Y
+arrogate/XNGDS
+arrogation/M
+Arron/M
+arrowhead/SM
+arrowroot/MS
+arrow/SDMG
+arroyo/MS
+arr/TV
+arsenal/MS
+arsenate/M
+arsenic/MS
+arsenide/M
+arsine/MS
+arsonist/MS
+arson/SM
+Artair/M
+Artaxerxes/M
+artefact's
+Arte/M
+Artemas
+Artemis/M
+Artemus/M
+arterial/SY
+arteriolar
+arteriole/SM
+arterioscleroses
+arteriosclerosis/M
+artery/SM
+artesian
+artfulness/SM
+artful/YP
+Arther/M
+arthritic/S
+arthritides
+arthritis/M
+arthrogram/MS
+arthropod/SM
+arthroscope/S
+arthroscopic
+Arthurian
+Arthur/M
+artichoke/SM
+article/GMDS
+articulable/I
+articular
+articulated/EU
+articulately/I
+articulateness/IMS
+articulates/I
+articulate/VGNYXPSD
+articulation/M
+articulator/SM
+articulatory
+Artie/M
+artifact/MS
+artificer/M
+artifice/ZRSM
+artificiality/MS
+artificialness/M
+artificial/PY
+artillerist
+artilleryman/M
+artillerymen
+artillery/SM
+artiness/MS
+artisan/SM
+artiste/SM
+artistically/I
+artistic/I
+artist/MS
+artistry/SM
+artlessness/MS
+artless/YP
+Art/M
+art/SM
+artsy/RT
+Artur/M
+Arturo/M
+Artus/M
+artwork/MS
+Arty/M
+arty/TPR
+Aruba/M
+arum/MS
+Arvie/M
+Arvin/M
+Arv/M
+Arvy/M
+Aryan/MS
+Aryn/M
+as
+As
+A's
+Asa/M
+Asama/M
+asap
+ASAP
+asbestos/MS
+Ascella/M
+ascend/ADGS
+ascendancy/MS
+ascendant/SY
+ascender/SM
+Ascension/M
+ascension/SM
+ascent/SM
+ascertain/DSBLG
+ascertainment/MS
+ascetically
+asceticism/MS
+ascetic/SM
+ASCII
+ascot/MS
+ascribe/GSDB
+ascription/MS
+ascriptive
+Ase/M
+aseptically
+aseptic/S
+asexuality/MS
+asexual/Y
+Asgard/M
+ashame/D
+ashamed/UY
+Ashanti/M
+Ashbey/M
+Ashby/M
+ashcan/SM
+Ashely/M
+Asher/M
+Asheville/M
+Ashia/M
+Ashien/M
+Ashil/M
+Ashkenazim
+Ashkhabad/M
+Ashla/M
+Ashland/M
+Ashlan/M
+ashlar/GSDM
+Ashlee/M
+Ashleigh/M
+Ashlen/M
+Ashley/M
+Ashlie/M
+Ashli/M
+Ashlin/M
+Ashly/M
+ashman/M
+ash/MNDRSG
+Ashmolean/M
+Ash/MRY
+ashore
+ashram/SM
+Ashton/M
+ashtray/MS
+Ashurbanipal/M
+ashy/RT
+Asia/M
+Asian/MS
+Asiatic/SM
+aside/S
+Asilomar/M
+Asimov
+asinine/Y
+asininity/MS
+askance
+ask/DRZGS
+asked/U
+asker/M
+askew/P
+ASL
+aslant
+asleep
+Asmara/M
+asocial/S
+Asoka/M
+asparagus/MS
+aspartame/S
+ASPCA
+aspect/SM
+Aspell/M
+aspen/M
+Aspen/M
+asperity/SM
+asper/M
+aspersion/SM
+asphalt/MDRSG
+asphodel/MS
+asphyxia/MS
+asphyxiate/GNXSD
+asphyxiation/M
+aspic/MS
+Aspidiske/M
+aspidistra/MS
+aspirant/MS
+aspirate/NGDSX
+aspirational
+aspiration/M
+aspirator/SM
+aspire/GSRD
+aspirer/M
+aspirin/SM
+asplenium
+asp/MNRXS
+Asquith/M
+Assad/M
+assailable/U
+assailant/SM
+assail/BGDS
+Assamese/M
+Assam/M
+assassinate/DSGNX
+assassination/M
+assassin/MS
+assaulter/M
+assaultive/YP
+assault/SGVMDR
+assayer/M
+assay/SZGRD
+assemblage/MS
+assemble/ADSREG
+assembled/U
+assembler/EMS
+assemblies/A
+assembly/EAM
+assemblyman/M
+assemblymen
+Assembly/MS
+assemblywoman
+assemblywomen
+assent/SGMRD
+assert/ADGS
+asserter/MS
+assertional
+assertion/AMS
+assertiveness/SM
+assertive/PY
+assess/BLSDG
+assessed/A
+assesses/A
+assessment/SAM
+assessor/MS
+asset/SM
+asseverate/XSDNG
+asseveration/M
+asshole/MS!
+assiduity/SM
+assiduousness/SM
+assiduous/PY
+assign/ALBSGD
+assignation/MS
+assigned/U
+assignee/MS
+assigner/MS
+assignment/MAS
+assignor/MS
+assigns/CU
+assimilate/VNGXSD
+assimilationist/M
+assimilation/M
+Assisi/M
+assistance/SM
+assistantship/SM
+assistant/SM
+assisted/U
+assister/M
+assist/RDGS
+assize/MGSD
+ass/MNS
+assn
+assoc
+associable
+associated/U
+associate/SDEXNG
+associateship
+associational
+association/ME
+associative/Y
+associativity/S
+associator/MS
+assonance/SM
+assonant/S
+assorter/M
+assort/LRDSG
+assortment/SM
+asst
+assuaged/U
+assuage/SDG
+assumability
+assumer/M
+assume/SRDBJG
+assuming/UA
+assumption/SM
+assumptive
+assurance/AMS
+assure/AGSD
+assuredness/M
+assured/PYS
+assurer/SM
+assuring/YA
+Assyria/M
+Assyrian/SM
+Assyriology/M
+Astaire/SM
+Astarte/M
+astatine/MS
+aster/ESM
+asteria
+asterisked/U
+asterisk/SGMD
+astern
+asteroidal
+asteroid/SM
+asthma/MS
+asthmatic/S
+astigmatic/S
+astigmatism/SM
+astir
+astonish/GSDL
+astonishing/Y
+astonishment/SM
+Aston/M
+Astoria/M
+Astor/M
+astounding/Y
+astound/SDG
+astraddle
+Astrakhan/M
+astrakhan/SM
+astral/SY
+Astra/M
+astray
+astride
+Astrid/M
+astringency/SM
+astringent/YS
+Astrix/M
+astrolabe/MS
+astrologer/MS
+astrological/Y
+astrologist/M
+astrology/SM
+astronautical
+astronautic/S
+astronautics/M
+astronaut/SM
+astronomer/MS
+astronomic
+astronomical/Y
+astronomy/SM
+astrophysical
+astrophysicist/SM
+astrophysics/M
+Astroturf/M
+AstroTurf/S
+Asturias/M
+astuteness/MS
+astute/RTYP
+Asunción/M
+asunder
+Aswan/M
+asylum/MS
+asymmetric
+asymmetrical/Y
+asymmetry/MS
+asymptomatic
+asymptomatically
+asymptote/MS
+asymptotically
+asymptotic/Y
+asynchronism/M
+asynchronous/Y
+asynchrony
+at
+Atacama/M
+Atahualpa/M
+Atalanta/M
+Atari/M
+Atatürk/M
+atavism/MS
+atavistic
+atavist/MS
+ataxia/MS
+ataxic/S
+atelier/SM
+atemporal
+ate/S
+Athabasca/M
+Athabascan's
+Athabaskan/MS
+Athabaska's
+atheism/SM
+atheistic
+atheist/SM
+Athena/M
+Athene/M
+Athenian/SM
+Athens/M
+atheroscleroses
+atherosclerosis/M
+athirst
+athlete/MS
+athletically
+athleticism/M
+athletic/S
+athletics/M
+athwart
+atilt
+Atkins/M
+Atkinson/M
+Atlanta/M
+Atlante/MS
+atlantes
+Atlantic/M
+Atlantis/M
+atlas/SM
+Atlas/SM
+At/M
+Atman
+ATM/M
+atmosphere/DSM
+atmospherically
+atmospheric/S
+atoll/MS
+atomically
+atomicity/M
+atomic/S
+atomics/M
+atomistic
+atomization/SM
+atomize/GZDRS
+atomizer/M
+atom/SM
+atonality/MS
+atonal/Y
+atone/LDSG
+atonement/SM
+atop
+ATP
+Atreus/M
+atria
+atrial
+Atria/M
+atrium/M
+atrociousness/SM
+atrocious/YP
+atrocity/SM
+atrophic
+atrophy/DSGM
+atropine/SM
+Atropos/M
+Ats
+attach/BLGZMDRS
+attached/UA
+attacher/M
+attaché/S
+attachment/ASM
+attacker/M
+attack/GBZSDR
+attainabilities
+attainability/UM
+attainableness/M
+attainable/U
+attainably/U
+attain/AGSD
+attainder/MS
+attained/U
+attainer/MS
+attainment/MS
+attar/MS
+attempt/ADSG
+attempter/MS
+attendance/MS
+attendant/SM
+attended/U
+attendee/SM
+attender/M
+attend/SGZDR
+attentional
+attentionality
+attention/IMS
+attentiveness/IMS
+attentive/YIP
+attenuated/U
+attenuate/SDXGN
+attenuation/M
+attenuator/MS
+attestation/SM
+attested/U
+attester/M
+attest/GSDR
+Attic
+Attica/M
+attic/MS
+Attila/M
+attire/SDG
+attitude/MS
+attitudinal/Y
+attitudinize/SDG
+Attlee/M
+attn
+Attn
+attorney/SM
+attractant/SM
+attract/BSDGV
+attraction/MS
+attractivenesses
+attractiveness/UM
+attractive/UYP
+attractor/MS
+attributable/U
+attribute/BVNGRSDX
+attributed/U
+attributer/M
+attributional
+attribution/M
+attributive/SY
+attrition/MS
+Attucks
+attune/SDG
+atty
+ATV/S
+atwitter
+Atwood/M
+atypical/Y
+Aube/M
+Auberge/M
+aubergine/MS
+Auberon/M
+Auberta/M
+Aubert/M
+Aubine/M
+Aubree/M
+Aubrette/M
+Aubrey/M
+Aubrie/M
+Aubry/M
+auburn/SM
+Auckland/M
+auctioneer/SDMG
+auction/MDSG
+audaciousness/SM
+audacious/PY
+audacity/MS
+Auden/M
+audibility/MSI
+audible/I
+audibles
+audibly/I
+Audie/M
+audience/MS
+Audi/M
+audiogram/SM
+audiological
+audiologist/MS
+audiology/SM
+audiometer/MS
+audiometric
+audiometry/M
+audiophile/SM
+audio/SM
+audiotape/S
+audiovisual/S
+audited/U
+audition/MDSG
+auditorium/MS
+auditor/MS
+auditory/S
+audit/SMDVG
+Audra/M
+Audre/M
+Audrey/M
+Audrie/M
+Audrye/M
+Audry/M
+Audubon/M
+Audy/M
+Auerbach/M
+Augean
+auger/SM
+aught/S
+Augie/M
+Aug/M
+augmentation/SM
+augmentative/S
+augment/DRZGS
+augmenter/M
+augur/GDMS
+augury/SM
+Augusta/M
+Augustan/S
+Auguste/M
+Augustina/M
+Augustine/M
+Augustinian/S
+Augustin/M
+augustness/SM
+Augusto/M
+August/SM
+august/STPYR
+Augustus/M
+Augy/M
+auk/MS
+Au/M
+Aundrea/M
+auntie/MS
+aunt/MYS
+aunty's
+aural/Y
+Aura/M
+aura/SM
+Aurea/M
+Aurelea/M
+Aurelia/M
+Aurelie/M
+Aurelio/M
+Aurelius/M
+Aurel/M
+aureole/GMSD
+aureomycin
+Aureomycin/M
+Auria/M
+auric
+auricle/SM
+auricular
+Aurie/M
+Auriga/M
+Aurilia/M
+Aurlie/M
+Auroora/M
+auroral
+Aurora/M
+aurora/SM
+Aurore/M
+Aurthur/M
+Auschwitz/M
+auscultate/XDSNG
+auscultation/M
+auspice/SM
+auspicious/IPY
+auspiciousnesses
+auspiciousness/IM
+Aussie/MS
+Austen/M
+austereness/M
+austere/TYRP
+austerity/SM
+Austina/M
+Austine/M
+Austin/SM
+austral
+Australasia/M
+Australasian/S
+australes
+Australia/M
+Australian/MS
+Australis/M
+australites
+Australoid
+Australopithecus/M
+Austria/M
+Austrian/SM
+Austronesian
+authentically
+authenticated/U
+authenticate/GNDSX
+authentication/M
+authenticator/MS
+authenticity/MS
+authentic/UI
+author/DMGS
+authoress/S
+authorial
+authoritarianism/MS
+authoritarian/S
+authoritativeness/SM
+authoritative/PY
+authority/SM
+authorization/MAS
+authorize/AGDS
+authorized/U
+authorizer/SM
+authorizes/U
+authorship/MS
+autism/MS
+autistic/S
+autobahn/MS
+autobiographer/MS
+autobiographic
+autobiographical/Y
+autobiography/MS
+autoclave/SDGM
+autocollimator/M
+autocorrelate/GNSDX
+autocorrelation/M
+autocracy/SM
+autocratic
+autocratically
+autocrat/SM
+autodial/R
+autodidact/MS
+autofluorescence
+autograph/MDG
+autographs
+autoignition/M
+autoimmune
+autoimmunity/S
+autoloader
+automaker/S
+automata's
+automate/NGDSX
+automatically
+automatic/S
+automation/M
+automatism/SM
+automatize/DSG
+automaton/SM
+automobile/GDSM
+automorphism/SM
+automotive
+autonavigator/SM
+autonomic/S
+autonomous/Y
+autonomy/MS
+autopilot/SM
+autopsy/MDSG
+autoregressive
+autorepeat/GS
+auto/SDMG
+autostart
+autosuggestibility/M
+autotransformer/M
+autoworker/S
+autumnal/Y
+Autumn/M
+autumn/MS
+aux
+auxiliary/S
+auxin/MS
+AV
+availability/USM
+availableness/M
+available/U
+availably
+avail/BSZGRD
+availing/U
+avalanche/MGSD
+Avalon/M
+Ava/M
+avant
+avarice/SM
+avariciousness/M
+avaricious/PY
+avast/S
+avatar/MS
+avaunt/S
+avdp
+Aveline/M
+Ave/MS
+avenged/U
+avenger/M
+avenge/ZGSRD
+Aventine/M
+Aventino/M
+avenue/MS
+average/DSPGYM
+Averell/M
+Averill/M
+Averil/M
+Avernus/M
+averred
+averrer
+averring
+Averroes/M
+averseness/M
+averse/YNXP
+aversion/M
+avers/V
+avert/GSD
+Averyl/M
+Avery/M
+ave/S
+aves/C
+Avesta/M
+avg
+avian/S
+aviary/SM
+aviate/NX
+aviation/M
+aviator/SM
+aviatrices
+aviatrix/SM
+Avicenna/M
+Avictor/M
+avidity/MS
+avid/TPYR
+Avie/M
+Avigdor/M
+Avignon/M
+Avila/M
+avionic/S
+avionics/M
+Avior/M
+Avis
+avitaminoses
+avitaminosis/M
+Avivah/M
+Aviva/M
+Aviv/M
+avocado/MS
+avocational
+avocation/SM
+Avogadro/M
+avoidable/U
+avoidably/U
+avoidance/SM
+avoider/M
+avoid/ZRDBGS
+avoirdupois/MS
+Avon/M
+avouch/GDS
+avowal/EMS
+avowed/Y
+avower/M
+avow/GEDS
+Avram/M
+Avril/M
+Avrit/M
+Avrom/M
+avuncular
+av/ZR
+AWACS
+await/SDG
+awake/GS
+awakened/U
+awakener/M
+awakening/S
+awaken/SADG
+awarder/M
+award/RDSZG
+awareness/MSU
+aware/TRP
+awash
+away/PS
+aweigh
+awe/SM
+awesomeness/SM
+awesome/PY
+awestruck
+awfuller
+awfullest
+awfulness/SM
+awful/YP
+aw/GD
+awhile/S
+awkwardness/MS
+awkward/PRYT
+awl/MS
+awning/DM
+awn/MDJGS
+awoke
+awoken
+AWOL
+awry/RT
+ax/DRSZGM
+axehead/S
+Axel/M
+Axe/M
+axeman
+axial/Y
+axillary
+axiological/Y
+axiology/M
+axiomatically
+axiomatic/S
+axiomatization/MS
+axiomatize/GDS
+axiom/SM
+axion/SM
+axis/SM
+axle/MS
+axletree/MS
+Ax/M
+axolotl/SM
+axon/SM
+ayah/M
+ayahs
+Ayala/M
+ayatollah
+ayatollahs
+aye/MZRS
+Ayers
+Aylmar/M
+Aylmer/M
+Aymara/M
+Aymer/M
+Ayn/M
+AZ
+azalea/SM
+Azania/M
+Azazel/M
+Azerbaijan/M
+azimuthal/Y
+azimuth/M
+azimuths
+Azores
+Azov/M
+AZT
+Aztecan
+Aztec/MS
+azure/MS
+BA
+Baal/SM
+baa/SDG
+Babara/M
+Babar's
+Babbage/M
+Babbette/M
+Babbie/M
+babbitt/GDS
+Babbitt/M
+babbler/M
+babble/RSDGZ
+Babb/M
+Babcock/M
+Babel/MS
+babel/S
+babe/SM
+Babette/M
+Babita/M
+Babka/M
+baboon/MS
+Bab/SM
+babushka/MS
+babyhood/MS
+babyish
+Babylonia/M
+Babylonian/SM
+Babylon/MS
+babysat
+babysit/S
+babysitter/S
+babysitting
+baby/TDSRMG
+Bacall/M
+Bacardi/M
+baccalaureate/MS
+baccarat/SM
+bacchanalia
+Bacchanalia/M
+bacchanalian/S
+bacchanal/SM
+Bacchic
+Bacchus/M
+bachelorhood/SM
+bachelor/SM
+Bach/M
+bacillary
+bacilli
+bacillus/MS
+backache/SM
+backarrow
+backbencher/M
+backbench/ZR
+backbiter/M
+backbite/S
+backbitten
+backbit/ZGJR
+backboard/SM
+backbone/SM
+backbreaking
+backchaining
+backcloth/M
+backdate/GDS
+backdrop/MS
+backdropped
+backdropping
+backed/U
+backer/M
+backfield/SM
+backfill/SDG
+backfire/GDS
+backgammon/MS
+background/SDRMZG
+back/GZDRMSJ
+backhanded/Y
+backhander/M
+backhand/RDMSZG
+backhoe/S
+backing/M
+backlash/GRSDM
+backless
+backlogged
+backlogging
+backlog/MS
+backorder
+backpacker/M
+backpack/ZGSMRD
+backpedal/DGS
+backplane/MS
+backplate/SM
+backrest/MS
+backscatter/SMDG
+backseat/S
+backside/SM
+backslapper/MS
+backslapping/M
+backslash/DSG
+backslider/M
+backslide/S
+backslid/RZG
+backspace/GSD
+backspin/SM
+backstabber/M
+backstabbing
+backstage
+backstair/S
+backstitch/GDSM
+backstop/MS
+backstopped
+backstopping
+backstreet/M
+backstretch/SM
+backstroke/GMDS
+backtalk/S
+backtrack/SDRGZ
+backup/SM
+Backus/M
+backwardness/MS
+backward/YSP
+backwash/SDMG
+backwater/SM
+backwood/S
+backwoodsman/M
+backwoodsmen
+backyard/MS
+baconer/M
+Bacon/M
+bacon/SRM
+bacterial/Y
+bacteria/MS
+bactericidal
+bactericide/SM
+bacteriologic
+bacteriological
+bacteriologist/MS
+bacteriology/SM
+bacterium/M
+Bactria/M
+badder
+baddest
+baddie/MS
+bade
+Baden/M
+badge/DSRGMZ
+badger/DMG
+badinage/DSMG
+badland/S
+Badlands/M
+badman/M
+badmen
+badminton/MS
+badmouth/DG
+badmouths
+badness/SM
+bad/PSNY
+Baedeker/SM
+Baez/M
+Baffin/M
+bafflement/MS
+baffler/M
+baffle/RSDGZL
+baffling/Y
+bagatelle/MS
+bagel/SM
+bagful/MS
+baggageman
+baggagemen
+baggage/SM
+bagged/M
+bagger/SM
+baggily
+bagginess/MS
+bagging/M
+baggy/PRST
+Baghdad/M
+bagpiper/M
+bagpipe/RSMZ
+Bagrodia/MS
+bag/SM
+baguette/SM
+Baguio/M
+bah
+Baha'i
+Bahama/MS
+Bahamanian/S
+Bahamian/MS
+Baha'ullah
+Bahia/M
+Bahrain/M
+bahs
+Baikal/M
+Bailey/SM
+bail/GSMYDRB
+Bailie/M
+bailiff/SM
+bailiwick/MS
+Baillie/M
+Bail/M
+bailout/MS
+bailsman/M
+bailsmen
+Baily/M
+Baird/M
+bairn/SM
+baiter/M
+bait/GSMDR
+baize/GMDS
+Baja/M
+baked/U
+bakehouse/M
+Bakelite/M
+baker/M
+Baker/M
+Bakersfield/M
+bakery/SM
+bakeshop/S
+bake/ZGJDRS
+baking/M
+baklava/M
+baksheesh/SM
+Baku/M
+Bakunin/M
+balaclava/MS
+balalaika/MS
+balanced/A
+balancedness
+balancer/MS
+balance's
+balance/USDG
+Balanchine/M
+Balboa/M
+balboa/SM
+balcony/MSD
+balderdash/MS
+Balder/M
+baldfaced
+Bald/MR
+baldness/MS
+bald/PYDRGST
+baldric/SM
+Balduin/M
+Baldwin/M
+baldy
+Balearic/M
+baleen/MS
+balefuller
+balefullest
+balefulness/MS
+baleful/YP
+Bale/M
+bale/MZGDRS
+baler/M
+Balfour/M
+Bali/M
+Balinese
+balkanization
+balkanize/DG
+Balkan/SM
+balker/M
+balk/GDRS
+Balkhash/M
+balkiness/M
+balky/PRT
+balladeer/MS
+ballade/MS
+balladry/MS
+ballad/SM
+Ballard/SM
+ballast/SGMD
+ballcock/S
+ballerina/MS
+baller/M
+balletic
+ballet/MS
+ballfields
+ballgame/S
+ball/GZMSDR
+ballistic/S
+ballistics/M
+Ball/M
+balloonist/S
+balloon/RDMZGS
+balloter/M
+ballot/MRDGS
+ballpark/SM
+ballplayer/SM
+ballpoint/SM
+ballroom/SM
+ballsy/TR
+ballyhoo/SGMD
+balminess/SM
+balm/MS
+balmy/PRT
+baloney/SM
+balsam/GMDS
+balsamic
+balsa/MS
+Balthazar/M
+Baltic/M
+Baltimore/M
+Baluchistan/M
+baluster/MS
+balustrade/SM
+Balzac/M
+Ba/M
+Bamako/M
+Bamberger/M
+Bambie/M
+Bambi/M
+bamboo/SM
+bamboozle/GSD
+Bamby/M
+Banach/M
+banality/MS
+banal/TYR
+banana/SM
+Bancroft/M
+bandager/M
+bandage/RSDMG
+bandanna/SM
+bandbox/MS
+bandeau/M
+bandeaux
+band/EDGS
+bander/M
+banding/M
+bandit/MS
+banditry/MS
+bandmaster/MS
+bandoleer/SM
+bandpass
+band's
+bandsman/M
+bandsmen
+bandstand/SM
+bandstop
+Bandung/M
+bandwagon/MS
+bandwidth/M
+bandwidths
+bandy/TGRSD
+banefuller
+banefullest
+baneful/Y
+bane/MS
+Bangalore/M
+banger/M
+bang/GDRZMS
+bangkok
+Bangkok/M
+Bangladeshi/S
+Bangladesh/M
+bangle/MS
+Bangor/M
+Bangui/M
+bani
+banisher/M
+banishment/MS
+banish/RSDGL
+banister/MS
+Banjarmasin/M
+banjoist/SM
+banjo/MS
+Banjul/M
+bankbook/SM
+bankcard/S
+banker/M
+bank/GZJDRMBS
+banking/M
+Bank/MS
+banknote/S
+bankroll/DMSG
+bankruptcy/MS
+bankrupt/DMGS
+Banky/M
+Ban/M
+banned/U
+Banneker/M
+banner/SDMG
+banning/U
+Bannister/M
+bannister's
+bannock/SM
+banns
+banqueter/M
+banquet/SZGJMRD
+banquette/MS
+ban/SGMD
+banshee/MS
+bans/U
+bantam/MS
+bantamweight/MS
+banterer/M
+bantering/Y
+banter/RDSG
+Banting/M
+Bantu/SM
+banyan/MS
+banzai/S
+baobab/SM
+Baotou/M
+baptismal/Y
+baptism/SM
+Baptiste/M
+baptistery/MS
+baptist/MS
+Baptist/MS
+baptistry's
+baptized/U
+baptizer/M
+baptize/SRDZG
+baptizes/U
+Barabbas/M
+Barbabas/M
+Barbabra/M
+Barbadian/S
+Barbados/M
+Barbaraanne/M
+Barbara/M
+Barbarella/M
+barbarianism/MS
+barbarian/MS
+barbaric
+barbarically
+barbarism/MS
+barbarity/SM
+barbarize/SDG
+Barbarossa/M
+barbarousness/M
+barbarous/PY
+Barbary/M
+barb/DRMSGZ
+barbecue/DRSMG
+barbed/P
+Barbee/M
+barbell/SM
+barbel/MS
+Barbe/M
+barbeque's
+barber/DMG
+barbered/U
+Barber/M
+barberry/MS
+barbershop/MS
+Barbette/M
+Barbey/M
+Barbie/M
+Barbi/M
+barbital/M
+barbiturate/MS
+Barbour/M
+Barbra/M
+Barb/RM
+Barbuda/M
+barbwire/SM
+Barby/M
+barcarole/SM
+Barcelona/M
+Barclay/M
+Bardeen/M
+Barde/M
+bardic
+Bard/M
+bard/MDSG
+bareback/D
+barefacedness/M
+barefaced/YP
+barefoot/D
+barehanded
+bareheaded
+barelegged
+bareness/MS
+Barents/M
+bare/YSP
+barfly/SM
+barf/YDSG
+bargainer/M
+bargain/ZGSDRM
+barge/DSGM
+bargeman/M
+bargemen
+bargepole/M
+barhopped
+barhopping
+barhop/S
+Bari/M
+baritone/MS
+barium/MS
+barked/C
+barkeeper/M
+barkeep/SRZ
+barker/M
+Barker/M
+bark/GZDRMS
+Barkley/M
+barks/C
+barleycorn/MS
+barley/MS
+Barlow/M
+barmaid/SM
+barman/M
+barmen
+Bar/MH
+Barnabas
+Barnabe/M
+Barnaby/M
+barnacle/MDS
+Barnard/M
+Barnaul/M
+Barnebas/M
+Barnes
+Barnett/M
+Barney/M
+barnful
+barn/GDSM
+Barnhard/M
+Barnie/M
+Barn/M
+barnsful
+barnstorm/DRGZS
+barnstormer/M
+Barnum/M
+barnyard/MS
+Barny/M
+Baroda/M
+barometer/MS
+barometric
+barometrically
+baronage/MS
+baroness/MS
+baronetcy/SM
+baronet/MS
+baronial
+Baron/M
+baron/SM
+barony/SM
+baroque/SPMY
+barque's
+Barquisimeto/M
+barracker/M
+barrack/SDRG
+barracuda/MS
+barrage/MGSD
+Barranquilla/M
+barred/ECU
+barre/GMDSJ
+barrel/SGMD
+barrenness/SM
+barren/SPRT
+Barrera/M
+Barret/M
+barrette/SM
+Barrett/M
+barricade/SDMG
+Barrie/M
+barrier/MS
+barring/R
+barrio/SM
+Barri/SM
+barrister/MS
+Barr/M
+Barron/M
+barroom/SM
+barrow/MS
+Barry/M
+Barrymore/MS
+bars/ECU
+barstool/SM
+Barstow/M
+Bartel/M
+bartender/M
+bartend/ZR
+barterer/M
+barter/SRDZG
+bar/TGMDRS
+Barthel/M
+Barth/M
+Bartholdi/M
+Bartholemy/M
+Bartholomeo/M
+Bartholomeus/M
+Bartholomew/M
+Bartie/M
+Bartlet/M
+Bartlett/M
+Bart/M
+Bartók/M
+Bartolemo/M
+Bartolomeo/M
+Barton/M
+Bartram/M
+Barty/M
+barycenter
+barycentre's
+barycentric
+Bary/M
+baryon/SM
+Baryram/M
+Baryshnikov/M
+basaltic
+basalt/SM
+basal/Y
+Bascom/M
+bas/DRSTG
+baseball/MS
+baseband
+baseboard/MS
+base/CGRSDL
+baseless
+baseline/SM
+Basel/M
+basely
+Base/M
+baseman/M
+basemen
+basement/CSM
+baseness/MS
+baseplate/M
+base's
+basetting
+bashfulness/MS
+bashful/PY
+bash/JGDSR
+Basho/M
+Basia/M
+BASIC
+basically
+basic/S
+Basie/M
+basilar
+Basile/M
+basilica/SM
+Basilio/M
+basilisk/SM
+Basilius/M
+Basil/M
+basil/MS
+basin/DMS
+basinful/S
+basis/M
+basketball/MS
+basketry/MS
+basket/SM
+basketwork/SM
+bask/GSD
+basophilic
+Basque/SM
+Basra/M
+Basseterre/M
+basset/GMDS
+Bassett/M
+bassinet/SM
+bassist/MS
+Bass/M
+basso/MS
+bassoonist/MS
+bassoon/MS
+bass/SM
+basswood/SM
+bastardization/MS
+bastardized/U
+bastardize/SDG
+bastard/MYS
+bastardy/MS
+baste/NXS
+baster/M
+Bastian/M
+Bastien/M
+Bastille/M
+basting/M
+bastion/DM
+bast/SGZMDR
+Basutoland/M
+Bataan/M
+Batavia/M
+batch/MRSDG
+bated/U
+bate/KGSADC
+bater/AC
+Bates
+bathe
+bather/M
+bathetic
+bathhouse/SM
+bath/JMDSRGZ
+bathmat/S
+Batholomew/M
+bathos/SM
+bathrobe/MS
+bathroom/SDM
+baths
+Bathsheba/M
+bathtub/MS
+bathwater
+bathyscaphe's
+bathysphere/MS
+batik/DMSG
+Batista/M
+batiste/SM
+Bat/M
+batman/M
+Batman/M
+batmen
+baton/SM
+Batsheva/M
+batsman/M
+bat/SMDRG
+batsmen
+battalion/MS
+batted
+batten/SDMG
+batter/SRDZG
+battery/MS
+batting/MS
+battledore/MS
+battledress
+battlefield/SM
+battlefront/SM
+battle/GMZRSDL
+battleground/SM
+Battle/M
+battlement/SMD
+battler/M
+battleship/MS
+batty/RT
+Batu/M
+batwings
+bauble/SM
+Baudelaire/M
+baud/M
+Baudoin/M
+Baudouin/M
+Bauer/M
+Bauhaus/M
+baulk/GSDM
+Bausch/M
+bauxite/SM
+Bavaria/M
+Bavarian/S
+bawdily
+bawdiness/MS
+bawd/SM
+bawdy/PRST
+bawler/M
+bawl/SGDR
+Baxie/M
+Bax/M
+Baxter/M
+Baxy/M
+Bayamon
+Bayard/M
+bayberry/MS
+Bayda/M
+Bayer/M
+Bayes
+Bayesian
+bay/GSMDY
+Baylor/M
+Bay/MR
+bayonet/SGMD
+Bayonne/M
+bayou/MS
+Bayreuth/M
+bazaar/MS
+bazillion/S
+bazooka/MS
+BB
+BBB
+BBC
+bbl
+BBQ
+BBS
+BC
+BCD
+bdrm
+beachcomber/SM
+beachhead/SM
+Beach/M
+beach/MSDG
+beachwear/M
+beacon/DMSG
+beading/M
+Beadle/M
+beadle/SM
+bead/SJGMD
+beadsman/M
+beadworker
+beady/TR
+beagle/SDGM
+beaker/M
+beak/ZSDRM
+Beale/M
+Bealle/M
+Bea/M
+beam/MDRSGZ
+beanbag/SM
+bean/DRMGZS
+beanie/SM
+Bean/M
+beanpole/MS
+beanstalk/SM
+bearable/U
+bearably/U
+beard/DSGM
+bearded/P
+beardless
+Beard/M
+Beardmore/M
+Beardsley/M
+bearer/M
+bearing/M
+bearishness/SM
+bearish/PY
+bearlike
+Bear/M
+Bearnaise/M
+Bearnard/M
+bearskin/MS
+bear/ZBRSJG
+Beasley/M
+beasties
+beastings/M
+beastliness/MS
+beastly/PTR
+beast/SJMY
+beatable/U
+beatably/U
+beaten/U
+beater/M
+beatific
+beatifically
+beatification/M
+beatify/GNXDS
+beating/M
+beatitude/MS
+Beatlemania/M
+Beatles/M
+beatnik/SM
+beat/NRGSBZJ
+Beatrice/M
+Beatrisa/M
+Beatrix/M
+Beatriz/M
+Beauchamps
+Beaufort/M
+Beaujolais/M
+Beau/M
+Beaumarchais/M
+Beaumont/M
+beau/MS
+Beauregard/M
+beauteousness/M
+beauteous/YP
+beautician/MS
+beautification/M
+beautifier/M
+beautifully/U
+beautifulness/M
+beautiful/PTYR
+beautify/SRDNGXZ
+beaut/SM
+beauty/SM
+Beauvoir/M
+beaux's
+beaver/DMSG
+Beaverton/M
+Bebe/M
+bebop/MS
+becalm/GDS
+became
+because
+Becca/M
+Bechtel/M
+Becka/M
+Becker/M
+Becket/M
+Beckett/M
+beck/GSDM
+Beckie/M
+Becki/M
+beckon/SDG
+Beck/RM
+Becky/M
+becloud/SGD
+become/GJS
+becoming/UY
+Becquerel/M
+bedaub/GDS
+bedazzle/GLDS
+bedazzlement/SM
+bedbug/SM
+bedchamber/M
+bedclothes
+bedded
+bedder/MS
+bedding/MS
+bedeck/DGS
+Bede/M
+bedevil/DGLS
+bedevilment/SM
+bedfast
+bedfellow/MS
+Bedford/M
+bedimmed
+bedimming
+bedim/S
+bedizen/DGS
+bedlam/MS
+bedlinen
+bedmaker/SM
+bedmate/MS
+bed/MS
+Bedouin/SM
+bedpan/SM
+bedpost/SM
+bedraggle/GSD
+bedridden
+bedrock/SM
+bedroll/SM
+bedroom/DMS
+bedsheets
+bedside/MS
+bedsit
+bedsitter/M
+bedsore/MS
+bedspread/SM
+bedspring/SM
+bedstead/SM
+bedstraw/M
+bedtime/SM
+Beebe/M
+beebread/MS
+Beecher/M
+beech/MRSN
+beechnut/MS
+beechwood
+beefburger/SM
+beefcake/MS
+beef/GZSDRM
+beefiness/MS
+beefsteak/MS
+beefy/TRP
+beehive/MS
+beekeeper/MS
+beekeeping/SM
+beeline/MGSD
+Beelzebub/M
+Bee/M
+bee/MZGJRS
+been/S
+beeper/M
+beep/GZSMDR
+Beerbohm/M
+beer/M
+beermat/S
+beery/TR
+beeswax/DSMG
+Beethoven/M
+beetle/GMRSD
+Beeton/M
+beetroot/M
+beet/SM
+beeves/M
+befall/SGN
+befell
+befit/SM
+befitted
+befitting/Y
+befogged
+befogging
+befog/S
+before
+beforehand
+befoul/GSD
+befriend/DGS
+befuddle/GLDS
+befuddlement/SM
+began
+beget/S
+begetting
+beggar/DYMSG
+beggarliness/M
+beggarly/P
+beggary/MS
+begged
+begging
+Begin/M
+beginner/MS
+beginning/MS
+begin/S
+begone/S
+begonia/SM
+begot
+begotten
+begrime/SDG
+begrudge/GDRS
+begrudging/Y
+beg/S
+beguilement/SM
+beguiler/M
+beguile/RSDLZG
+beguiling/Y
+beguine/SM
+begum/MS
+begun
+behalf/M
+behalves
+Behan/M
+behave/GRSD
+behavioral/Y
+behaviorism/MS
+behavioristic/S
+behaviorist/S
+behavior/SMD
+behead/GSD
+beheld
+behemoth/M
+behemoths
+behest/SM
+behindhand
+behind/S
+beholder/M
+behold/ZGRNS
+behoofs
+behoove/SDJMG
+behooving/YM
+Behring/M
+Beiderbecke/M
+beige/MS
+Beijing
+Beilul/M
+being/M
+Beirut/M
+Beitris/M
+bejewel/SDG
+Bekesy/M
+Bekki/M
+be/KS
+belabor/MDSG
+Bela/M
+Belarus
+belate/D
+belatedness/M
+belated/PY
+Belau/M
+belay/GSD
+belch/GSD
+beleaguer/GDS
+Belem/M
+Belfast/M
+belfry/SM
+Belgian/MS
+Belgium/M
+Belg/M
+Belgrade/M
+Belia/M
+Belicia/M
+belie
+belief/ESUM
+belier/M
+believability's
+believability/U
+believable/U
+believably/U
+believed/U
+believe/EZGDRS
+believer/MUSE
+believing/U
+Belinda/M
+Belita/M
+belittlement/MS
+belittler/M
+belittle/RSDGL
+Belize/M
+belladonna/MS
+Bella/M
+Bellamy/M
+Bellanca/M
+Bellatrix/M
+bellboy/MS
+belled/A
+Belle/M
+belle/MS
+belletristic
+belletrist/SM
+Belleville/M
+bellflower/M
+bell/GSMD
+bellhop/MS
+bellicoseness/M
+bellicose/YP
+bellicosity/MS
+belligerence/SM
+belligerency/MS
+belligerent/SMY
+Bellina/M
+belling/A
+Bellini/M
+Bell/M
+bellman/M
+bellmen
+Bellovin/M
+bellow/DGS
+Bellow/M
+bellows/M
+bells/A
+bellwether/MS
+Bellwood/M
+bellyacher/M
+bellyache/SRDGM
+bellybutton/MS
+bellyfull
+bellyful/MS
+belly/SDGM
+Bel/M
+Belmont/M
+Belmopan/M
+Beloit/M
+belong/DGJS
+belonging/MP
+Belorussian/S
+Belorussia's
+belove/D
+beloved/S
+below/S
+Belshazzar/M
+belted/U
+belt/GSMD
+belting/M
+Belton/M
+Beltran/M
+Beltsville/M
+beltway/SM
+beluga/SM
+Belushi/M
+Belva/M
+belvedere/M
+Belvia/M
+bely/DSRG
+beman
+Be/MH
+bemire/SDG
+bemoan/GDS
+bemused/Y
+bemuse/GSDL
+bemusement/SM
+Benacerraf/M
+Benares's
+bencher/M
+benchmark/GDMS
+bench/MRSDG
+bend/BUSG
+bended
+Bender/M
+bender/MS
+Bendick/M
+Bendicty/M
+Bendite/M
+Bendix/M
+beneath
+Benedetta/M
+Benedetto/M
+Benedick/M
+Benedicta/M
+Benedictine/MS
+benediction/MS
+Benedict/M
+Benedicto/M
+benedictory
+Benedikta/M
+Benedikt/M
+benefaction/MS
+benefactor/MS
+benefactress/S
+benefice/MGSD
+beneficence/SM
+beneficent/Y
+beneficialness/M
+beneficial/PY
+beneficiary/MS
+benefiter/M
+benefit/SRDMZG
+Benelux/M
+Benet/M
+Benetta/M
+Benetton/M
+benevolence/SM
+benevolentness/M
+benevolent/YP
+Bengali/M
+Bengal/SM
+Benghazi/M
+Bengt/M
+Beniamino/M
+benightedness/M
+benighted/YP
+benignant
+benignity/MS
+benign/Y
+Beninese
+Benin/M
+Benita/M
+Benito/M
+Benjamen/M
+Benjamin/M
+Benjie/M
+Benji/M
+Benjy/M
+Ben/M
+Bennett/M
+Bennie/M
+Benni/M
+Bennington/M
+Benn/M
+Benny/M
+Benoite/M
+Benoit/M
+Benson/M
+Bentham/M
+Bentlee/M
+Bentley/MS
+Bent/M
+Benton/M
+bents
+bent/U
+bentwood/SM
+benumb/SGD
+Benyamin/M
+Benzedrine/M
+benzene/MS
+benzine/SM
+Benz/M
+Beograd's
+Beowulf/M
+bequeath/GSD
+bequeaths
+bequest/MS
+berate/GSD
+Berber/MS
+bereave/GLSD
+bereavement/MS
+bereft
+Berenice/M
+Beret/M
+beret/SM
+Bergen/M
+Bergerac/M
+Berger/M
+Berget/M
+Berglund/M
+Bergman/M
+Berg/NRM
+berg/NRSM
+Bergson/M
+Bergsten/M
+Bergstrom/M
+beribbon/D
+beriberi/SM
+Beringer/M
+Bering/RM
+Berkeley/M
+berkelium/SM
+Berke/M
+Berkie/M
+Berkley/M
+Berkly/M
+Berkowitz/M
+Berkshire/SM
+Berky/M
+Berk/YM
+Berle/M
+Berliner/M
+Berlin/SZRM
+Berlioz/M
+Berlitz/M
+Berman/M
+Ber/MG
+berm/SM
+Bermuda/MS
+Bermudan/S
+Bermudian/S
+Bernadene/M
+Bernadette/M
+Bernadina/M
+Bernadine/M
+Berna/M
+Bernardina/M
+Bernardine/M
+Bernardino/M
+Bernard/M
+Bernardo/M
+Bernarr/M
+Bernays/M
+Bernbach/M
+Bernelle/M
+Berne's
+Bernese
+Bernete/M
+Bernetta/M
+Bernette/M
+Bernhard/M
+Bernhardt/M
+Bernice/M
+Berniece/M
+Bernie/M
+Berni/M
+Bernini/M
+Bernita/M
+Bern/M
+Bernoulli/M
+Bernstein/M
+Berny/M
+Berra/M
+Berrie/M
+Berri/M
+berrylike
+Berry/M
+berry/SDMG
+berserker/M
+berserk/SR
+Berta/M
+Berte/M
+Bertha/M
+Berthe/M
+berth/MDGJ
+berths
+Bertie/M
+Bertillon/M
+Berti/M
+Bertina/M
+Bertine/M
+Bert/M
+Berton/M
+Bertram/M
+Bertrand/M
+Bertrando/M
+Berty/M
+Beryle/M
+beryllium/MS
+Beryl/M
+beryl/SM
+Berzelius/M
+bes
+beseecher/M
+beseeching/Y
+beseech/RSJZG
+beseem/GDS
+beset/S
+besetting
+beside/S
+besieger/M
+besiege/SRDZG
+besmear/GSD
+besmirch/GSD
+besom/GMDS
+besot/S
+besotted
+besotting
+besought
+bespangle/GSD
+bespatter/SGD
+bespeak/SG
+bespectacled
+bespoke
+bespoken
+Bess
+Bessel/M
+Bessemer/M
+Bessie/M
+Bessy/M
+best/DRSG
+bestiality/MS
+bestial/Y
+bestiary/MS
+bestirred
+bestirring
+bestir/S
+Best/M
+bestowal/SM
+bestow/SGD
+bestrew/DGS
+bestrewn
+bestridden
+bestride/SG
+bestrode
+bestseller/MS
+bestselling
+bestubble/D
+betaken
+betake/SG
+beta/SM
+betatron/M
+betcha
+Betelgeuse/M
+betel/MS
+Bethanne/M
+Bethany/M
+bethel/M
+Bethe/M
+Bethena/M
+Bethesda/M
+Bethina/M
+bethink/GS
+Bethlehem/M
+beth/M
+Beth/M
+bethought
+Bethune
+betide/GSD
+betimes
+bet/MS
+betoken/GSD
+betook
+betrayal/SM
+betrayer/M
+betray/SRDZG
+betrothal/SM
+betrothed/U
+betroth/GD
+betroths
+Betsey/M
+Betsy/M
+Betta/M
+Betteanne/M
+Betteann/M
+Bette/M
+betterment/MS
+better/SDLG
+Bettie/M
+Betti/M
+Bettina/M
+Bettine/M
+betting
+bettor/SM
+Bettye/M
+Betty/SM
+betweenness/M
+between/SP
+betwixt
+Beulah/M
+Bevan/M
+bevel/SJGMRD
+beverage/MS
+Beverie/M
+Beverlee/M
+Beverley/M
+Beverlie/M
+Beverly/M
+Bevin/M
+Bevon/M
+Bev's
+Bevvy/M
+bevy/SM
+bewail/GDS
+beware/GSD
+bewhisker/D
+bewigged
+bewildered/PY
+bewildering/Y
+bewilder/LDSG
+bewilderment/SM
+bewitching/Y
+bewitch/LGDS
+bewitchment/SM
+bey/MS
+beyond/S
+bezel/MS
+bf
+B/GT
+Bhopal/M
+Bhutanese
+Bhutan/M
+Bhutto/M
+Bialystok/M
+Bianca/M
+Bianco/M
+Bianka/M
+biannual/Y
+bias/DSMPG
+biased/U
+biathlon/MS
+biaxial/Y
+bibbed
+Bibbie/M
+bibbing
+Bibbye/M
+Bibby/M
+Bibi/M
+bible/MS
+Bible/MS
+biblical/Y
+biblicists
+bibliographer/MS
+bibliographical/Y
+bibliographic/S
+bibliography/MS
+bibliophile/MS
+Bib/M
+bib/MS
+bibulous
+bicameral
+bicameralism/MS
+bicarb/MS
+bicarbonate/MS
+bicentenary/S
+bicentennial/S
+bicep/S
+biceps/M
+bichromate/DM
+bickerer/M
+bickering/M
+bicker/SRDZG
+biconcave
+biconnected
+biconvex
+bicuspid/S
+bicycler/M
+bicycle/RSDMZG
+bicyclist/SM
+biddable
+bidden/U
+bidder/MS
+Biddie/M
+bidding/MS
+Biddle/M
+Biddy/M
+biddy/SM
+bider/M
+bide/S
+bidet/SM
+Bidget/M
+bid/GMRS
+bidiagonal
+bidirectional/Y
+bids/A
+biennial/SY
+biennium/SM
+Bienville/M
+Bierce/M
+bier/M
+bifocal/S
+bifurcate/SDXGNY
+bifurcation/M
+bigamist/SM
+bigamous
+bigamy/SM
+Bigelow/M
+Bigfoot
+bigged
+bigger
+biggest
+biggie/SM
+bigging
+biggish
+bighead/MS
+bigheartedness/S
+bighearted/P
+bighorn/MS
+bight/SMDG
+bigmouth/M
+bigmouths
+bigness/SM
+bigoted/Y
+bigot/MDSG
+bigotry/MS
+big/PYS
+bigwig/MS
+biharmonic
+bijection/MS
+bijective/Y
+bijou/M
+bijoux
+bike/MZGDRS
+biker/M
+bikini/SMD
+Biko/M
+bilabial/S
+bilateralness/M
+bilateral/PY
+bilayer/S
+Bilbao/M
+bilberry/MS
+Bilbo/M
+bile/SM
+bilge/GMDS
+biliary
+Bili/M
+bilinear
+bilingualism/SM
+bilingual/SY
+biliousness/SM
+bilious/P
+bilker/M
+bilk/GZSDR
+billboard/MDGS
+biller/M
+billet/MDGS
+billfold/MS
+billiard/SM
+Billie/M
+Billi/M
+billing/M
+billingsgate/SM
+Billings/M
+billionaire/MS
+billion/SHM
+billionths
+bill/JGZSBMDR
+Bill/JM
+billow/DMGS
+billowy/RT
+billposters
+Billye/M
+Billy/M
+billy/SM
+Bil/MY
+bi/M
+Bi/M
+bimbo/MS
+bimetallic/S
+bimetallism/MS
+Bimini/M
+bimodal
+bimolecular/Y
+bimonthly/S
+binary/S
+binaural/Y
+binder/M
+bindery/MS
+binding/MPY
+bindingness/M
+bind/JDRGZS
+bindle/M
+binds/AU
+bindweed/MS
+binge/MS
+bing/GNDM
+Bingham/M
+Binghamton/M
+Bing/M
+bingo/MS
+Bini/M
+Bink/M
+Binky/M
+binnacle/MS
+binned
+Binnie/M
+Binni/M
+binning
+Binny/M
+binocular/SY
+binodal
+binomial/SYM
+bin/SM
+binuclear
+biochemical/SY
+biochemist/MS
+biochemistry/MS
+biodegradability/S
+biodegradable
+biodiversity/S
+bioengineering/M
+bioethics
+biofeedback/SM
+biographer/M
+biographic
+biographical/Y
+biograph/RZ
+biography/MS
+biog/S
+Bioko/M
+biol
+biological/SY
+biologic/S
+biologist/SM
+biology/MS
+biomass/SM
+biomedical
+biomedicine/M
+biometric/S
+biometrics/M
+biometry/M
+biomolecule/S
+biomorph
+bionically
+bionic/S
+bionics/M
+biophysical/Y
+biophysicist/SM
+biophysic/S
+biophysics/M
+biopic/S
+biopsy/SDGM
+biorhythm/S
+BIOS
+bioscience/S
+biosphere/MS
+biostatistic/S
+biosynthesized
+biotechnological
+biotechnologist
+biotechnology/SM
+biotic
+biotin/SM
+bipartisan
+bipartisanship/MS
+bipartite/YN
+bipartition/M
+bipedal
+biped/MS
+biplane/MS
+bipolar
+bipolarity/MS
+biracial
+Birch/M
+birch/MRSDNG
+birdbath/M
+birdbaths
+birdbrain/SDM
+birdcage/SM
+birder/M
+birdhouse/MS
+birdieing
+Birdie/M
+birdie/MSD
+birdlike
+birdlime/MGDS
+Bird/M
+birdseed/MS
+Birdseye/M
+bird/SMDRGZ
+birdsong
+birdtables
+birdwatch/GZR
+birefringence/M
+birefringent
+biretta/SM
+Birgit/M
+Birgitta/M
+Birkenstock/M
+Birk/M
+Birmingham/M
+Biro/M
+Biron/M
+birthday/SM
+birthmark/MS
+birth/MDG
+birthplace/SM
+birthrate/MS
+birthright/MS
+birth's/A
+births/A
+birthstone/SM
+bis
+Biscay/M
+Biscayne/M
+biscuit/MS
+bisect/DSG
+bisection/MS
+bisector/MS
+biserial
+bisexuality/MS
+bisexual/YMS
+Bishkek
+bishop/DGSM
+Bishop/M
+bishopric/SM
+Bismarck/M
+Bismark/M
+bismuth/M
+bismuths
+bison/M
+bisque/SM
+Bissau/M
+bistable
+bistate
+bistro/SM
+bisyllabic
+bitblt/S
+bitchily
+bitchiness/MS
+bitch/MSDG
+bitchy/PTR
+biter/M
+bite/S
+biting/Y
+bitmap/SM
+bit/MRJSZG
+BITNET/M
+bit's/C
+bits/C
+bitser/M
+bitted
+bitten
+bitterness/SM
+bittern/SM
+bitternut/M
+bitter/PSRDYTG
+bitterroot/M
+bittersweet/YMSP
+bitting
+bitty/PRT
+bitumen/MS
+bituminous
+bitwise
+bivalent/S
+bivalve/MSD
+bivariate
+bivouacked
+bivouacking
+bivouac/MS
+biweekly/S
+biyearly
+bizarreness/M
+bizarre/YSP
+Bizet/M
+biz/M
+bizzes
+Bjorn/M
+bk
+b/KGD
+Bk/M
+blabbed
+blabber/GMDS
+blabbermouth/M
+blabbermouths
+blabbing
+blab/S
+blackamoor/SM
+blackball/SDMG
+blackberry/GMS
+blackbirder/M
+blackbird/SGDRM
+blackboard/SM
+blackbody/S
+Blackburn/M
+blackcurrant/M
+blackener/M
+blacken/GDR
+Blackfeet
+Blackfoot/M
+blackguard/MDSG
+blackhead/SM
+blacking/M
+blackish
+blackjack/SGMD
+blackleg/M
+blacklist/DRMSG
+blackmail/DRMGZS
+blackmailer/M
+Blackman/M
+Blackmer/M
+blackness/MS
+blackout/SM
+Blackpool/M
+Black's
+black/SJTXPYRDNG
+blacksmith/MG
+blacksmiths
+blacksnake/MS
+blackspot
+Blackstone/M
+blackthorn/MS
+blacktop/MS
+blacktopped
+blacktopping
+Blackwell/MS
+bladder/MS
+bladdernut/M
+bladderwort/M
+blade/DSGM
+blah/MDG
+blahs
+Blaine/M
+Blaire/M
+Blair/M
+Blakelee/M
+Blakeley/M
+Blake/M
+Blakey/M
+blame/DSRBGMZ
+blamelessness/SM
+blameless/YP
+blamer/M
+blameworthiness/SM
+blameworthy/P
+Blanca/M
+Blancha/M
+Blanchard/M
+blanch/DRSG
+Blanche/M
+blancher/M
+Blanch/M
+blanc/M
+blancmange/SM
+blandishment/MS
+blandish/SDGL
+blandness/MS
+bland/PYRT
+Blane/M
+Blankenship/M
+blanketing/M
+blanket/SDRMZG
+blankness/MS
+blank/SPGTYRD
+Blanton/M
+Blantyre/M
+blare/DSG
+blarney/DMGS
+blasé
+blasphemer/M
+blaspheme/RSDZG
+blasphemousness/M
+blasphemous/PY
+blasphemy/SM
+blaster/M
+blasting/M
+blastoff/SM
+blast/SMRDGZ
+blatancy/SM
+blatant/YP
+blather/DRGS
+blatting
+Blatz/M
+Blavatsky/M
+Blayne/M
+blaze/DSRGMZ
+blazer/M
+blazing/Y
+blazoner/M
+blazon/SGDR
+bl/D
+bldg
+bleach/DRSZG
+bleached/U
+bleacher/M
+bleakness/MS
+bleak/TPYRS
+blear/GDS
+blearily
+bleariness/SM
+bleary/PRT
+bleater/M
+bleat/RDGS
+bleeder/M
+bleed/ZRJSG
+Bleeker/M
+bleep/GMRDZS
+blemish/DSMG
+blemished/U
+blench/DSG
+blender/M
+blend/GZRDS
+Blenheim/M
+blessedness/MS
+blessed/PRYT
+blessing/M
+bless/JGSD
+Blevins/M
+blew
+Bligh/M
+blighter/M
+blight/GSMDR
+blimey/S
+blimp/MS
+blinded/U
+blinder/M
+blindfold/SDG
+blinding/MY
+blind/JGTZPYRDS
+blindness/MS
+blindside/SDG
+blinker/MDG
+blinking/U
+blink/RDGSZ
+blinks/M
+Blinnie/M
+Blinni/M
+Blinny/M
+blintze/M
+blintz/SM
+blip/MS
+blipped
+blipping
+Blisse/M
+blissfulness/MS
+blissful/PY
+Bliss/M
+bliss/SDMG
+blistering/Y
+blister/SMDG
+blistery
+Blithe/M
+blitheness/SM
+blither/G
+blithesome
+blithe/TYPR
+blitz/GSDM
+blitzkrieg/SM
+blizzard/MS
+bloater/M
+bloat/SRDGZ
+blobbed
+blobbing
+blob/MS
+Bloch/M
+blockader/M
+blockade/ZMGRSD
+blockage/MS
+blockbuster/SM
+blockbusting/MS
+blocker/MS
+blockhead/MS
+blockhouse/SM
+block's
+block/USDG
+blocky/R
+bloc/MS
+Bloemfontein/M
+bloke/SM
+Blomberg/M
+Blomquist/M
+Blondelle/M
+Blondell/M
+blonde's
+Blondie/M
+blondish
+blondness/MS
+blond/SPMRT
+Blondy/M
+bloodbath
+bloodbaths
+bloodcurdling
+bloodhound/SM
+bloodied/U
+bloodiness/MS
+bloodlessness/SM
+bloodless/PY
+bloodletting/MS
+bloodline/SM
+bloodmobile/MS
+bloodroot/M
+bloodshed/SM
+bloodshot
+blood/SMDG
+bloodsport/S
+bloodstain/MDS
+bloodstock/SM
+bloodstone/M
+bloodstream/SM
+bloodsucker/SM
+bloodsucking/S
+bloodthirstily
+bloodthirstiness/MS
+bloodthirsty/RTP
+bloodworm/M
+bloodymindedness
+bloody/TPGDRS
+bloomer/M
+Bloomer/M
+Bloomfield/M
+Bloomington/M
+Bloom/MR
+bloom/SMRDGZ
+blooper/M
+bloop/GSZRD
+blossom/DMGS
+blossomy
+blotch/GMDS
+blotchy/RT
+blot/MS
+blotted
+blotter/MS
+blotting
+blotto
+blouse/GMSD
+blower/M
+blowfish/M
+blowfly/MS
+blowgun/SM
+blow/GZRS
+blowing/M
+blown/U
+blowout/MS
+blowpipe/SM
+blowtorch/SM
+blowup/MS
+blowy/RST
+blowzy/RT
+BLT
+blubber/GSDR
+blubbery
+Blucher/M
+bludgeon/GSMD
+blueback
+Bluebeard/M
+bluebell/MS
+blueberry/SM
+bluebill/M
+bluebird/MS
+bluebonnet/SM
+bluebook/M
+bluebottle/MS
+bluebush
+bluefish/SM
+bluegill/SM
+bluegrass/MS
+blueing's
+blueish
+bluejacket/MS
+bluejeans
+blue/JMYTGDRSP
+blueness/MS
+bluenose/MS
+bluepoint/SM
+blueprint/GDMS
+bluer/M
+bluest/M
+bluestocking/SM
+bluesy/TR
+bluet/MS
+bluffer/M
+bluffness/MS
+bluff/SPGTZYRD
+bluing/M
+bluishness/M
+bluish/P
+Blumenthal/M
+Blum/M
+blunderbuss/MS
+blunderer/M
+blunder/GSMDRJZ
+blundering/Y
+bluntness/MS
+blunt/PSGTYRD
+blurb/GSDM
+blur/MS
+blurred/Y
+blurriness/S
+blurring/Y
+blurry/RPT
+blurt/GSRD
+blusher/M
+blushing/UY
+blush/RSDGZ
+blusterer/M
+blustering/Y
+blusterous
+bluster/SDRZG
+blustery
+blvd
+Blvd
+Blythe/M
+BM
+BMW/M
+BO
+boarded
+boarder/SM
+boardgames
+boardinghouse/SM
+boarding/SM
+board/IS
+boardroom/MS
+board's
+boardwalk/SM
+boar/MS
+boa/SM
+boaster/M
+boastfulness/MS
+boastful/YP
+boast/SJRDGZ
+boatclubs
+boater/M
+boathouse/SM
+boating/M
+boatload/SM
+boatman/M
+boat/MDRGZJS
+boatmen
+boatswain/SM
+boatyard/SM
+bobbed
+Bobbee/M
+Bobbe/M
+Bobbette/M
+Bobbie/M
+Bobbi/M
+bobbing/M
+bobbin/MS
+Bobbitt/M
+bobble/SDGM
+Bobbsey/M
+Bobbye/M
+Bobby/M
+bobby/SM
+bobbysoxer's
+bobcat/MS
+Bobette/M
+Bobina/M
+Bobine/M
+Bobinette/M
+Bob/M
+bobolink/SM
+Bobrow/M
+bobsledded
+bobsledder/MS
+bobsledding/M
+bobsled/MS
+bobsleigh/M
+bobsleighs
+bobs/M
+bob/SM
+bobtail/SGDM
+bobwhite/SM
+Boca/M
+Boccaccio/M
+boccie/SM
+bock/GDS
+bockwurst
+bodega/MS
+Bodenheim/M
+bode/S
+Bodhidharma/M
+bodhisattva
+Bodhisattva/M
+bodice/SM
+bodied/M
+bodiless
+bodily
+boding/M
+bodkin/SM
+bod/SGMD
+bodybuilder/SM
+bodybuilding/S
+body/DSMG
+bodyguard/MS
+bodying/M
+bodysuit/S
+bodyweight
+bodywork/SM
+Boeing/M
+Boeotia/M
+Boeotian
+Boer/M
+Bogartian/M
+Bogart/M
+Bogey/M
+bogeyman/M
+bogeymen
+bogey/SGMD
+bogged
+bogging
+boggle/SDG
+boggling/Y
+boggy/RT
+bogie's
+bog/MS
+Bogotá/M
+bogus
+bogyman
+bogymen
+bogy's
+Boheme/M
+bohemianism/S
+bohemian/S
+Bohemian/SM
+Bohemia/SM
+Bohr/M
+Boigie/M
+boiled/AU
+boiler/M
+boilermaker/MS
+boilerplate/SM
+boil/JSGZDR
+boils/A
+Boise/M
+Bois/M
+boisterousness/MS
+boisterous/YP
+bola/SM
+boldface/SDMG
+boldness/MS
+bold/YRPST
+bole/MS
+bolero/MS
+Boleyn/M
+bolivares
+Bolivar/M
+bolivar/MS
+Bolivia/M
+Bolivian/S
+bollard/SM
+bollix/GSD
+boll/MDSG
+Bologna/M
+bologna/MS
+bolometer/MS
+bolo/MS
+boloney's
+Bolshevik/MS
+Bolshevism/MS
+Bolshevistic/M
+Bolshevist/MS
+Bolshoi/M
+bolsterer/M
+bolster/SRDG
+bolted/U
+bolter/M
+bolt/MDRGS
+Bolton/M
+bolts/U
+Boltzmann/M
+bolus/SM
+bombardier/MS
+bombard/LDSG
+bombardment/SM
+bombastic
+bombastically
+bombast/RMS
+Bombay/M
+bomber/M
+bombproof
+bomb/SGZDRJ
+bombshell/SM
+Bo/MRZ
+bona
+bonanza/MS
+Bonaparte/M
+Bonaventure/M
+bonbon/SM
+bondage/SM
+bonder/M
+bondholder/SM
+Bondie/M
+bond/JMDRSGZ
+Bond/M
+bondman/M
+bondmen
+Bondon/M
+bonds/A
+bondsman/M
+bondsmen
+bondwoman/M
+bondwomen
+Bondy/M
+boned/U
+bonehead/SDM
+boneless
+Bone/M
+bone/MZDRSG
+boner/M
+bonfire/MS
+bong/GDMS
+bongo/MS
+Bonham/M
+bonhomie/MS
+Boniface/M
+boniness/MS
+Bonita/M
+bonito/MS
+bonjour
+bonkers
+Bonnee/M
+Bonner/M
+bonneted/U
+bonnet/SGMD
+Bonneville/M
+Bonnibelle/M
+bonnie
+Bonnie/M
+Bonni/M
+Bonn/RM
+Bonny/M
+bonny/RT
+bonsai/SM
+Bontempo/M
+bonus/SM
+bony/RTP
+bonzes
+boob/DMSG
+booby/SM
+boodle/GMSD
+boogeyman's
+boogieing
+boogie/SD
+boo/GSDH
+boohoo/GDS
+bookbinder/M
+bookbindery/SM
+bookbinding/M
+bookbind/JRGZ
+bookcase/MS
+booked/U
+bookend/SGD
+Booker/M
+book/GZDRMJSB
+bookie/SM
+booking/M
+bookishness/M
+bookish/PY
+bookkeeper/M
+bookkeep/GZJR
+bookkeeping/M
+booklet/MS
+bookmaker/MS
+bookmaking/MS
+bookmark/MDGS
+bookmobile/MS
+bookplate/SM
+bookseller/SM
+bookshelf/M
+bookshelves
+bookshop/MS
+bookstall/MS
+bookstore/SM
+bookwork/M
+bookworm/MS
+Boolean
+boolean/S
+Boole/M
+boom/DRGJS
+boomerang/MDSG
+boomer/M
+boomtown/S
+boondocks
+boondoggle/DRSGZ
+boondoggler/M
+Boone/M
+Boonie/M
+boonies
+boon/MS
+Boony/M
+boorishness/SM
+boorish/PY
+boor/MS
+boosterism
+booster/M
+boost/SGZMRD
+boot/AGDS
+bootblack/MS
+bootee/MS
+Boote/M
+Boötes
+Boothe/M
+booth/M
+Booth/M
+booths
+bootie's
+bootlaces
+bootlegged/M
+bootlegger/SM
+bootlegging/M
+bootleg/S
+Bootle/M
+bootless
+Boot/M
+bootprints
+boot's
+bootstrapped
+bootstrapping
+bootstrap/SM
+booty/SM
+booze/DSRGMZ
+boozer/M
+boozy/TR
+bopped
+bopping
+bop/S
+borate/MSD
+borax/MS
+Bordeaux/M
+bordello/MS
+Borden/M
+borderer/M
+border/JRDMGS
+borderland/SM
+borderline/MS
+Bordie/M
+Bord/MN
+Bordon/M
+Bordy/M
+Borealis/M
+Boreas/M
+boredom/MS
+boreholes
+borer/M
+bore/ZGJDRS
+Borges
+Borgia/M
+Borg/M
+boric
+boring/YMP
+Boris
+Bork/M
+born/AIU
+Borneo/M
+borne/U
+Born/M
+Borodin/M
+boron/SM
+borosilicate/M
+borough/M
+boroughs
+Borroughs/M
+borrower/M
+borrowing/M
+borrow/JZRDGBS
+borscht/SM
+borstal/MS
+Boru/M
+borzoi/MS
+Bosch/M
+Bose/M
+bosh/MS
+Bosnia/M
+Bosnian/S
+bosom's
+bosom/SGUD
+bosomy/RT
+boson/SM
+Bosporus/M
+boss/DSRMG
+bossily
+bossiness/MS
+bossism/MS
+bossy/PTSR
+Bostitch/M
+Bostonian/SM
+Boston/MS
+bosun's
+Boswell/MS
+botanical/SY
+botanic/S
+botanist/SM
+botany/SM
+botcher/M
+botch/SRDGZ
+botfly/M
+bother/DG
+bothersome
+bothy/M
+both/ZR
+bot/S
+Botswana/M
+Botticelli/M
+bottle/GMZSRD
+bottleneck/GSDM
+bottler/M
+bottomlessness/M
+bottomless/YP
+bottommost
+bottom/SMRDG
+botulin/M
+botulinus/M
+botulism/SM
+Boucher/M
+boudoir/MS
+bouffant/S
+bougainvillea/SM
+bough/MD
+boughs
+bought/N
+bouillabaisse/MS
+bouillon/MS
+boulder/GMDS
+Boulder/M
+boulevard/MS
+bouncer/M
+bounce/SRDGZ
+bouncily
+bouncing/Y
+bouncy/TRP
+boundary/MS
+bound/AUDI
+boundedness/MU
+bounded/UP
+bounden
+bounder/AM
+bounders
+bounding
+boundlessness/SM
+boundless/YP
+bounds/IA
+bounteousness/MS
+bounteous/PY
+bountifulness/SM
+bountiful/PY
+bounty/SDM
+bouquet/SM
+Bourbaki/M
+bourbon/SM
+Bourbon/SM
+bourgeoisie/SM
+bourgeois/M
+Bourke/M
+Bourne/M
+Bournemouth/M
+boutique/MS
+bout/MS
+boutonnière/MS
+Bouvier
+Bovary/M
+bovine/YS
+Bowditch/M
+bowdlerization/MS
+bowdlerize/GRSD
+bowed/U
+bowel/GMDS
+Bowell/M
+Bowen/M
+bower/DMG
+Bowers
+Bowery/M
+Bowes
+bowie
+Bowie/M
+bowing/M
+bowlder's
+bowlegged
+bowleg/SM
+bowler/M
+bowlful/S
+bowl/GZSMDR
+bowline/MS
+bowling/M
+bowman/M
+Bowman/M
+bowmen
+bowser/M
+bowsprit/SM
+bows/R
+bowstring/GSMD
+bow/SZGNDR
+bowwow/DMGS
+boxcar/SM
+box/DRSJZGM
+boxer/M
+boxful/M
+boxing/M
+boxlike
+boxtops
+boxwood/SM
+boxy/TPR
+Boyce/M
+Boycey/M
+Boycie/M
+boycotter/M
+boycott/RDGS
+Boyd/M
+Boyer/M
+boyfriend/MS
+boyhood/SM
+boyishness/MS
+boyish/PY
+Boyle/M
+Boy/MR
+boy/MRS
+boyscout
+boysenberry/SM
+bozo/SM
+bpi
+bps
+BR
+brace/DSRJGM
+braced/U
+bracelet/MS
+bracer/M
+brachia
+brachium/M
+bracken/SM
+bracketed/U
+bracketing/M
+bracket/SGMD
+brackishness/SM
+brackish/P
+bract/SM
+Bradan/M
+bradawl/M
+Bradbury/M
+Bradburys
+bradded
+bradding
+Braddock/M
+Brade/M
+Braden/M
+Bradford/M
+Bradley/M
+Bradly/M
+Brad/MYN
+Bradney/M
+Bradshaw/M
+brad/SM
+Bradstreet/M
+Brady/M
+brae/SM
+braggadocio/SM
+braggart/SM
+bragged
+bragger/MS
+braggest
+bragging
+Bragg/M
+brag/S
+Brahe/M
+Brahma/MS
+Brahmanism/MS
+Brahman/SM
+Brahmaputra/M
+Brahmin's
+Brahms
+braider/M
+braiding/M
+braid/RDSJG
+braille/DSG
+Braille/GDSM
+Brainard/SM
+braincell/S
+brainchild/M
+brainchildren
+brain/GSDM
+braininess/MS
+brainlessness/M
+brainless/YP
+Brain/M
+brainpower/M
+brainstorm/DRMGJS
+brainstorming/M
+brainteaser/S
+brainteasing
+brainwasher/M
+brainwashing/M
+brainwash/JGRSD
+brainwave/S
+brainy/RPT
+braise/SDG
+brake/DSGM
+brakeman/M
+brakemen/M
+bramble/DSGM
+brambling/M
+brambly/RT
+Bram/M
+Brampton/M
+bra/MS
+Brana/M
+branched/U
+branching/M
+branchlike
+Branch/M
+branch/MDSJG
+Branchville/M
+Brandais/M
+Brandea/M
+branded/U
+Brandeis/M
+Brandel/M
+Brande/M
+Brandenburg/M
+Branden/M
+brander/GDM
+Brander/M
+Brandice/M
+Brandie/M
+Brandi/M
+Brandise/M
+brandish/GSD
+Brand/MRN
+Brando/M
+Brandon/M
+brand/SMRDGZ
+Brandt/M
+Brandtr/M
+brandy/GDSM
+Brandy/M
+Brandyn/M
+brandywine
+Braniff/M
+Bran/M
+branned
+branning
+Brannon/M
+bran/SM
+Brantley/M
+Brant/M
+Braque/M
+brashness/MS
+brash/PYSRT
+Brasilia
+brasserie/SM
+brass/GSDM
+brassiere/MS
+brassily
+brassiness/SM
+brassy/RSPT
+Bratislava/M
+brat/SM
+Brattain/M
+bratty/RT
+bratwurst/MS
+Braun/M
+bravadoes
+bravado/M
+brave/DSRGYTP
+braveness/MS
+bravery/MS
+bravest/M
+bravo/SDG
+bravura/SM
+brawler/M
+brawl/MRDSGZ
+brawniness/SM
+brawn/MS
+brawny/TRP
+brayer/M
+Bray/M
+bray/SDRG
+braze/GZDSR
+brazenness/MS
+brazen/PYDSG
+brazer/M
+brazier/SM
+Brazilian/MS
+Brazil/M
+Brazos/M
+Brazzaville/M
+breacher/M
+breach/MDRSGZ
+breadbasket/SM
+breadboard/SMDG
+breadbox/S
+breadcrumb/S
+breadfruit/MS
+breadline/MS
+bread/SMDHG
+breadth/M
+breadths
+breadwinner/MS
+breakables
+breakable/U
+breakage/MS
+breakaway/MS
+breakdown/MS
+breaker/M
+breakfaster/M
+breakfast/RDMGZS
+breakfront/S
+breaking/M
+breakneck
+breakout/MS
+breakpoint/SMDG
+break/SZRBG
+breakthroughs
+breakthrough/SM
+breakup/SM
+breakwater/SM
+bream/SDG
+Breanne/M
+Brear/M
+breastbone/MS
+breastfed
+breastfeed/G
+breasting/M
+breast/MDSG
+breastplate/SM
+breaststroke/SM
+breastwork/MS
+breathable/U
+breathalyser/S
+Breathalyzer/SM
+breathe
+breather/M
+breathing/M
+breathlessness/SM
+breathless/PY
+breaths
+breathtaking/Y
+breathy/TR
+breath/ZBJMDRSG
+Brecht/M
+Breckenridge/M
+bred/DG
+bredes
+breeching/M
+breech/MDSG
+breeder/I
+breeder's
+breeding/IM
+breeds/I
+breed/SZJRG
+Bree/M
+Breena/M
+breeze/GMSD
+breezeway/SM
+breezily
+breeziness/SM
+breezy/RPT
+Bremen/M
+bremsstrahlung/M
+Brena/M
+Brenda/M
+Brendan/M
+Brenden/M
+Brendin/M
+Brendis/M
+Brendon/M
+Bren/M
+Brenna/M
+Brennan/M
+Brennen/M
+Brenner/M
+Brenn/RNM
+Brent/M
+Brenton/M
+Bresenham/M
+Brest/M
+brethren
+Bret/M
+Breton
+Brett/M
+breve/SM
+brevet/MS
+brevetted
+brevetting
+breviary/SM
+brevity/MS
+brew/DRGZS
+brewer/M
+Brewer/M
+brewery/MS
+brewing/M
+brewpub/S
+Brew/RM
+Brewster/M
+Brezhnev/M
+Bria/M
+Briana/M
+Brian/M
+Brianna/M
+Brianne/M
+Briano/M
+Briant/M
+briar's
+bribe/GZDSR
+briber/M
+bribery/MS
+Brice/M
+brickbat/SM
+brick/GRDSM
+bricklayer/MS
+bricklaying/SM
+brickmason/S
+brickwork/SM
+brickyard/M
+bridal/S
+Bridalveil/M
+bridegroom/MS
+Bride/M
+bride/MS
+bridesmaid/MS
+Bridewell/M
+bridgeable/U
+bridged/U
+bridgehead/MS
+Bridgeport/M
+Bridger/M
+Bridges
+bridge/SDGM
+Bridget/M
+Bridgetown/M
+Bridgette/M
+Bridgett/M
+Bridgewater/M
+bridgework/MS
+bridging/M
+Bridgman/M
+Bridie/M
+bridled/U
+bridle/SDGM
+bridleway/S
+briefcase/SM
+briefed/C
+briefing/M
+briefness/MS
+briefs/C
+brief/YRDJPGTS
+Brien/M
+Brier/M
+brier/MS
+Brie/RSM
+Brietta/M
+brigade/GDSM
+brigadier/MS
+Brigadoon
+brigandage/MS
+brigand/MS
+brigantine/MS
+Brigg/MS
+Brigham/M
+brightener/M
+brighten/RDZG
+bright/GXTPSYNR
+Bright/M
+brightness/SM
+Brighton/M
+Brigida/M
+Brigid/M
+Brigit/M
+Brigitta/M
+Brigitte/M
+Brig/M
+brig/SM
+brilliance/MS
+brilliancy/MS
+brilliantine/MS
+brilliantness/M
+brilliant/PSY
+Brillo
+Brillouin/M
+brimful
+brimless
+brimmed
+brimming
+brim/SM
+brimstone/MS
+Brina/M
+Brindisi/M
+brindle/DSM
+brine/GMDSR
+briner/M
+Briney/M
+bringer/M
+bring/RGZS
+brininess/MS
+Brinkley/M
+brinkmanship/SM
+brink/MS
+Brinna/M
+Brinn/M
+Briny/M
+briny/PTSR
+brioche/SM
+Brion/M
+briquet's
+briquette/MGSD
+Brisbane/M
+brisket/SM
+briskness/MS
+brisk/YRDPGTS
+bristle/DSGM
+bristly/TR
+Bristol/M
+bristol/S
+Britain/M
+Brita/M
+Britannia/M
+Britannic
+Britannica/M
+britches
+Briticism/MS
+Britisher/M
+Britishly/M
+British/RYZ
+Brit/MS
+Britney/M
+Britni/M
+Briton/MS
+Britta/M
+Brittaney/M
+Brittani/M
+Brittan/M
+Brittany/MS
+Britte/M
+Britten/M
+Britteny/M
+brittleness/MS
+brittle/YTPDRSG
+Britt/MN
+Brittne/M
+Brittney/M
+Brittni/M
+Brnaba/M
+Brnaby/M
+Brno/M
+broach/DRSG
+broacher/M
+broadband
+broadcaster/M
+broadcast/RSGZJ
+broadcasts/A
+broadcloth/M
+broadcloths
+broaden/JGRDZ
+broadleaved
+broadloom/SM
+broadminded/P
+broadness/S
+broadsheet/MS
+broadside/SDGM
+broadsword/MS
+broad/TXSYRNP
+Broadway/SM
+Brobdingnagian
+Brobdingnag/M
+brocade/DSGM
+broccoli/MS
+brochette/SM
+brochure/SM
+Brockie/M
+Brock/M
+Brocky/M
+Broddie/M
+Broddy/M
+Broderick/M
+Broderic/M
+Brodie/M
+Brod/M
+Brody/M
+brogan/MS
+Broglie/M
+brogue/MS
+broiler/M
+broil/RDSGZ
+brokenhearted/Y
+brokenness/MS
+broken/YP
+brokerage/MS
+broker/DMG
+broke/RGZ
+Brok/M
+bromide/MS
+bromidic
+bromine/MS
+bronchial
+bronchi/M
+bronchiolar
+bronchiole/MS
+bronchiolitis
+bronchitic/S
+bronchitis/MS
+broncho's
+bronchus/M
+broncobuster/SM
+bronco/SM
+bronc/S
+Bron/M
+Bronnie/M
+Bronny/M
+Bronson/M
+Bronte
+brontosaur/SM
+brontosaurus/SM
+Bronx/M
+bronzed/M
+bronze/SRDGM
+bronzing/M
+brooch/MS
+brooder/M
+broodiness/M
+brooding/Y
+broodmare/SM
+brood/SMRDGZ
+broody/PTR
+Brookdale/M
+Brooke/M
+Brookfield/M
+Brookhaven/M
+brooklet/MS
+Brooklyn/M
+Brookmont/M
+brook/SGDM
+brookside
+Brook/SM
+broom/SMDG
+broomstick/MS
+Bros
+Brose/M
+bro/SH
+bros/S
+brothel/MS
+brother/DYMG
+brotherhood/SM
+brotherliness/MS
+brotherly/P
+broths
+broth/ZMR
+brougham/MS
+brought
+brouhaha/MS
+browbeat/NSG
+brow/MS
+Brownell/M
+Browne/M
+Brownian/M
+Brownie/MS
+brownie/MTRS
+browning/M
+Browning/M
+brownish
+Brown/MG
+brownness/MS
+brownout/MS
+brownstone/MS
+Brownsville/M
+brown/YRDMSJGTP
+browse
+browser/M
+brows/SRDGZ
+brr
+Br/TMN
+Brubeck/M
+brucellosis/M
+Bruce/M
+Brucie/M
+Bruckner/M
+Bruegel/M
+Brueghel's
+bruin/MS
+bruised/U
+bruise/JGSRDZ
+bruiser/M
+Bruis/M
+bruit/DSG
+Brumidi/M
+Brummel/M
+brunch/MDSG
+Brunei/M
+Brunelleschi/M
+brunet/S
+brunette/SM
+Brunhilda/M
+Brunhilde/M
+Bruno/M
+Brunswick/M
+brunt/GSMD
+brusher/M
+brushfire/MS
+brushlike
+brush/MSRDG
+brushoff/S
+brushwood/SM
+brushwork/MS
+brushy/R
+brusqueness/MS
+brusque/PYTR
+Brussels
+brutality/SM
+brutalization/SM
+brutalized/U
+brutalizes/AU
+brutalize/SDG
+brutal/Y
+brute/DSRGM
+brutishness/SM
+brutish/YP
+Brutus/M
+Bruxelles/M
+Bryana/M
+Bryan/M
+Bryant/M
+Bryanty/M
+Bryce/M
+Bryna/M
+Bryn/M
+Brynna/M
+Brynne/M
+Brynner/M
+Brynn/RM
+Bryon/M
+Brzezinski/M
+B's
+BS
+BSA
+BSD
+Btu
+BTU
+BTW
+bu
+bubblegum/S
+bubbler/M
+bubble/RSDGM
+bubbly/TRS
+Buber/M
+bub/MS
+buboes
+bubo/M
+bubonic
+buccaneer/GMDS
+Buchanan/M
+Bucharest/M
+Buchenwald/M
+Buchwald/M
+buckaroo/SM
+buckboard/SM
+bucker/M
+bucketful/MS
+bucket/SGMD
+buckeye/SM
+buck/GSDRM
+buckhorn/M
+Buckie/M
+Buckingham/M
+buckled/U
+buckler/MDG
+buckle/RSDGMZ
+buckles/U
+Buckley/M
+buckling's
+buckling/U
+Buck/M
+Buckner/M
+buckram/GSDM
+bucksaw/SM
+buckshot/MS
+buckskin/SM
+buckteeth
+bucktooth/DM
+buckwheat/SM
+Bucky/M
+bucolically
+bucolic/S
+Budapest/M
+budded
+Buddha/MS
+Buddhism/SM
+Buddhist/SM
+Buddie/M
+budding/S
+Budd/M
+buddy/GSDM
+Buddy/M
+budge/GDS
+budgerigar/MS
+budgetary
+budgeter/M
+budget/GMRDZS
+budgie/MS
+budging/U
+Bud/M
+bud/MS
+Budweiser/MS
+Buehring/M
+Buena/M
+buffaloes
+Buffalo/M
+buffalo/MDG
+buff/ASGD
+buffered/U
+bufferer/M
+buffer/RDMSGZ
+buffet/GMDJS
+bufflehead/M
+buffoonery/MS
+buffoonish
+buffoon/SM
+buff's
+Buffy/M
+Buford/M
+bugaboo/SM
+Bugatti/M
+bugbear/SM
+bug/CS
+bugeyed
+bugged/C
+buggered
+buggering
+bugger/SCM!
+buggery/M
+bugging/C
+buggy/RSMT
+bugle/GMDSRZ
+bugler/M
+bug's
+Buick/M
+builder/SM
+building/SM
+build/SAG
+buildup/MS
+built/AUI
+Buiron/M
+Bujumbura/M
+Bukhara/M
+Bukharin/M
+Bulawayo/M
+Bulba/M
+bulb/DMGS
+bulblet
+bulbous
+Bulfinch/M
+Bulganin/M
+Bulgaria/M
+Bulgarian/S
+bulge/DSGM
+bulgy/RT
+bulimarexia/S
+bulimia/MS
+bulimic/S
+bulk/GDRMS
+bulkhead/SDM
+bulkiness/SM
+bulky/RPT
+bulldogged
+bulldogger
+bulldogging
+bulldog/SM
+bulldoze/GRSDZ
+bulldozer/M
+bullet/GMDS
+bulletin/SGMD
+bulletproof/SGD
+bullfighter/M
+bullfighting/M
+bullfight/SJGZMR
+bullfinch/MS
+bullfrog/SM
+bullhead/DMS
+bullheadedness/SM
+bullheaded/YP
+bullhide
+bullhorn/SM
+bullied/M
+bullion/SM
+bullishness/SM
+bullish/PY
+bull/MDGS
+Bullock/M
+bullock/MS
+bullpen/MS
+bullring/SM
+bullseye
+bullshit/MS!
+bullshitted/!
+bullshitter/S!
+bullshitting/!
+bullwhackers
+Bullwinkle/M
+bullyboy/MS
+bullying/M
+bully/TRSDGM
+bulrush/SM
+Bultmann/M
+bulwark/GMDS
+bumblebee/MS
+bumble/JGZRSD
+bumbler/M
+bumbling/Y
+Bumbry/M
+bummed/M
+bummer/MS
+bummest
+bumming/M
+bumper/DMG
+bump/GZDRS
+bumpiness/MS
+bumpkin/MS
+Bumppo/M
+bumptiousness/SM
+bumptious/PY
+bumpy/PRT
+bum/SM
+Bunche/M
+bunch/MSDG
+bunchy/RT
+buncombe's
+bunco's
+Bundestag/M
+bundled/U
+bundle/GMRSD
+bundler/M
+Bundy/M
+bungalow/MS
+bungee/SM
+bung/GDMS
+bunghole/MS
+bungle/GZRSD
+bungler/M
+bungling/Y
+Bunin/M
+bunion/SM
+bunk/CSGDR
+Bunker/M
+bunker's/C
+bunker/SDMG
+bunkhouse/SM
+bunkmate/MS
+bunko's
+bunk's
+bunkum/SM
+Bunnie/M
+Bunni/M
+Bunny/M
+bunny/SM
+Bunsen/SM
+bun/SM
+bunt/GJZDRS
+bunting/M
+Buñuel/M
+Bunyan/M
+buoyancy/MS
+buoyant/Y
+buoy/SMDG
+Burbank/M
+burbler/M
+burble/RSDG
+burbs
+Burch/M
+burden's
+burdensomeness/M
+burdensome/PY
+burden/UGDS
+burdock/SM
+bureaucracy/MS
+bureaucratically
+bureaucratic/U
+bureaucratization/MS
+bureaucratize/SDG
+bureaucrat/MS
+bureau/MS
+burgeon/GDS
+burger/M
+Burger/M
+Burgess/M
+burgess/MS
+burgher/M
+burgh/MRZ
+burghs
+burglarize/GDS
+burglarproof/DGS
+burglar/SM
+burglary/MS
+burgle/SDG
+burgomaster/SM
+Burgoyne/M
+Burg/RM
+burg/SZRM
+Burgundian/S
+Burgundy/MS
+burgundy/S
+burial/ASM
+buried/U
+burier/M
+Burke/M
+Burk/SM
+burlap/MS
+burler/M
+burlesquer/M
+burlesque/SRDMYG
+burley/M
+Burlie/M
+burliness/SM
+Burlingame/M
+Burlington/M
+Burl/M
+burl/SMDRG
+burly/PRT
+Burma/M
+Burmese
+bur/MYS
+burnable/S
+Burnaby/M
+Burnard/M
+burned/U
+Burne/MS
+burner/M
+Burnett/M
+burn/GZSDRBJ
+burning/Y
+burnisher/M
+burnish/GDRSZ
+burnoose/MS
+burnout/MS
+Burns
+Burnside/MS
+burnt/YP
+burp/SGMD
+burr/GSDRM
+Burris/M
+burrito/S
+Burr/M
+burro/SM
+Burroughs/M
+burrower/M
+burrow/GRDMZS
+bursae
+bursa/M
+Bursa/M
+bursar/MS
+bursary/MS
+bursitis/MS
+burster/M
+burst/SRG
+Burtie/M
+Burt/M
+Burton/M
+Burty/M
+Burundian/S
+Burundi/M
+bury/ASDG
+busboy/MS
+busby/SM
+Busch/M
+buses/A
+busgirl/S
+bus/GMDSJ
+bushel/MDJSG
+Bushido/M
+bushiness/MS
+bushing/M
+bush/JMDSRG
+bushland
+Bush/M
+bushman/M
+bushmaster/SM
+bushmen
+Bushnell/M
+bushwhacker/M
+bushwhacking/M
+bushwhack/RDGSZ
+bushy/PTR
+busily
+businesslike
+businessman/M
+businessmen
+business/MS
+businesspeople
+businessperson/S
+businesswoman/M
+businesswomen
+busker/M
+busk/GRM
+buskin/SM
+bus's/A
+buss/D
+bustard/MS
+buster/M
+bustle/GSD
+bustling/Y
+bust/MSDRGZ
+busty/RT
+busybody/MS
+busy/DSRPTG
+busyness/MS
+busywork/SM
+but/ACS
+butane/MS
+butcherer/M
+butcher/MDRYG
+butchery/MS
+Butch/M
+butch/RSZ
+butene/M
+Butler/M
+butler/SDMG
+butted/A
+butte/MS
+butterball/MS
+buttercup/SM
+buttered/U
+butterfat/MS
+Butterfield/M
+butterfingered
+butterfingers/M
+butterfly/MGSD
+buttermilk/MS
+butternut/MS
+butter/RDMGZ
+butterscotch/SM
+buttery/TRS
+butting/M
+buttock/SGMD
+buttoner/M
+buttonhole/GMRSD
+buttonholer/M
+button's
+button/SUDG
+buttonweed
+buttonwood/SM
+buttress/MSDG
+butt/SGZMDR
+butyl/M
+butyrate/M
+buxomness/M
+buxom/TPYR
+Buxtehude/M
+buyback/S
+buyer/M
+buyout/S
+buy/ZGRS
+buzzard/MS
+buzz/DSRMGZ
+buzzer/M
+buzzword/SM
+buzzy
+bx
+bxs
+byelaw's
+Byelorussia's
+bye/MZS
+Byers/M
+bygone/S
+bylaw/SM
+byliner/M
+byline/RSDGM
+BYOB
+bypass/GSDM
+bypath/M
+bypaths
+byplay/S
+byproduct/SM
+Byram/M
+Byran/M
+Byrann/M
+Byrd/M
+byre/SM
+Byrle/M
+Byrne/M
+byroad/MS
+Byrom/M
+Byronic
+Byronism/M
+Byron/M
+bystander/SM
+byte/SM
+byway/SM
+byword/SM
+byzantine
+Byzantine/S
+Byzantium/M
+by/ZR
+C
+ca
+CA
+cabala/MS
+caballed
+caballero/SM
+caballing
+cabal/SM
+cabana/MS
+cabaret/SM
+cabbage/MGSD
+cabbed
+cabbing
+cabby's
+cabdriver/SM
+caber/M
+Cabernet/M
+cabinetmaker/SM
+cabinetmaking/MS
+cabinet/MS
+cabinetry/SM
+cabinetwork/MS
+cabin/GDMS
+cablecast/SG
+cable/GMDS
+cablegram/SM
+cabochon/MS
+caboodle/SM
+caboose/MS
+Cabot/M
+Cabrera/M
+Cabrini/M
+cabriolet/MS
+cab/SMR
+cabstand/MS
+cacao/SM
+cacciatore
+cache/DSRGM
+cachepot/MS
+cachet/MDGS
+Cacilia/M
+Cacilie/M
+cackler/M
+cackle/RSDGZ
+cackly
+CACM
+cacophonist
+cacophonous
+cacophony/SM
+cacti
+cactus/M
+CAD
+cadaverous/Y
+cadaver/SM
+caddishness/SM
+caddish/PY
+Caddric/M
+caddy/GSDM
+cadence/CSM
+cadenced
+cadencing
+cadent/C
+cadenza/MS
+cadet/SM
+Cadette/S
+cadge/DSRGZ
+cadger/M
+Cadillac/MS
+Cadiz/M
+Cad/M
+cadmium/MS
+cadre/SM
+cad/SM
+caducei
+caduceus/M
+Caedmon/M
+Caesar/MS
+caesura/SM
+café/MS
+cafeteria/SM
+caffeine/SM
+caftan/SM
+caged/U
+Cage/M
+cage/MZGDRS
+cager/M
+cagey/P
+cagier
+cagiest
+cagily
+caginess/MS
+Cagney/M
+Cahokia/M
+cahoot/MS
+Cahra/M
+CAI
+Caiaphas/M
+caiman's
+Caine/M
+Cain/MS
+Cairistiona/M
+cairn/SDM
+Cairo/M
+caisson/SM
+caitiff/MS
+Caitlin/M
+Caitrin/M
+cajole/LGZRSD
+cajolement/MS
+cajoler/M
+cajolery/SM
+Cajun/MS
+cake/MGDS
+cakewalk/SMDG
+calabash/SM
+calaboose/MS
+Calais/M
+calamari/S
+calamine/GSDM
+calamitousness/M
+calamitous/YP
+calamity/MS
+cal/C
+calcareousness/M
+calcareous/PY
+calciferous
+calcification/M
+calcify/XGNSD
+calcimine/GMSD
+calcine/SDG
+calcite/SM
+calcium/SM
+Calcomp/M
+CalComp/M
+CALCOMP/M
+calculability/IM
+calculable/IP
+calculate/AXNGDS
+calculated/PY
+calculatingly
+calculating/U
+calculation/AM
+calculative
+calculator/SM
+calculi
+calculus/M
+Calcutta/M
+caldera/SM
+Calder/M
+Calderon/M
+caldron's
+Caldwell/M
+Caleb/M
+Caledonia/M
+Cale/M
+calendar/MDGS
+calender/MDGS
+calf/M
+calfskin/SM
+Calgary/M
+Calhoun/M
+Caliban/M
+caliber/SM
+calibrated/U
+calibrater's
+calibrate/XNGSD
+calibrating/A
+calibration/M
+calibrator/MS
+calicoes
+calico/M
+Calida/M
+Calif/M
+California/M
+Californian/MS
+californium/SM
+calif's
+Caligula/M
+Cali/M
+caliper/SDMG
+caliphate/SM
+caliph/M
+caliphs
+calisthenic/S
+calisthenics/M
+Callaghan/M
+call/AGRDBS
+Callahan/M
+calla/MS
+Calla/MS
+Callao/M
+callback/S
+Callean/M
+called/U
+callee/M
+caller/MS
+Calley/M
+Callida/M
+Callie/M
+calligrapher/M
+calligraphic
+calligraphist/MS
+calligraph/RZ
+calligraphy/MS
+Calli/M
+calling/SM
+Calliope/M
+calliope/SM
+callisthenics's
+Callisto/M
+callosity/MS
+callousness/SM
+callous/PGSDY
+callowness/MS
+callow/RTSP
+callus/SDMG
+Cally/M
+calming/Y
+calmness/MS
+calm/PGTYDRS
+Cal/MY
+Caloocan/M
+caloric/S
+calorie/SM
+calorific
+calorimeter/MS
+calorimetric
+calorimetry/M
+Caltech/M
+Calumet/M
+calumet/MS
+calumniate/NGSDX
+calumniation/M
+calumniator/SM
+calumnious
+calumny/MS
+calvary/M
+Calvary/M
+calve/GDS
+Calvert/M
+calves/M
+Calvinism/MS
+Calvinistic
+Calvinist/MS
+Calvin/M
+Calv/M
+calyces's
+Calypso/M
+calypso/SM
+calyx/MS
+Ca/M
+CAM
+Camacho/M
+Camala/M
+camaraderie/SM
+camber/DMSG
+cambial
+cambium/SM
+Cambodia/M
+Cambodian/S
+Cambrian/S
+cambric/MS
+Cambridge/M
+camcorder/S
+Camden/M
+camelhair's
+Camella/M
+Camellia/M
+camellia/MS
+Camel/M
+Camelopardalis/M
+Camelot/M
+camel/SM
+Camembert/MS
+cameo/GSDM
+camerae
+cameraman/M
+cameramen
+camera/MS
+camerawoman
+camerawomen
+Cameron/M
+Cameroonian/S
+Cameroon/SM
+came/N
+Camey/M
+Camila/M
+Camile/M
+Camilla/M
+Camille/M
+Cami/M
+Camino/M
+camion/M
+camisole/MS
+Cam/M
+cammed
+Cammie/M
+Cammi/M
+cam/MS
+Cammy/M
+Camoens/M
+camomile's
+camouflage/DRSGZM
+camouflager/M
+campaigner/M
+campaign/ZMRDSG
+campanile/SM
+campanological
+campanologist/SM
+campanology/MS
+Campbell/M
+Campbellsport/M
+camper/SM
+campesinos
+campest
+campfire/SM
+campground/MS
+camphor/MS
+Campinas/M
+camping/S
+Campos
+camp's
+camp/SCGD
+campsite/MS
+campus/GSDM
+campy/RT
+Camry/M
+camshaft/SM
+Camus/M
+Canaanite/SM
+Canaan/M
+Canada/M
+Canadianism/SM
+Canadian/S
+Canad/M
+Canaletto/M
+canalization/MS
+canalize/GSD
+canal/SGMD
+canapé/S
+canard/MS
+Canaries
+canary/SM
+canasta/SM
+Canaveral/M
+Canberra/M
+cancan/SM
+cancelate/D
+canceled/U
+canceler/M
+cancellation/MS
+cancel/RDZGS
+cancer/MS
+Cancer/MS
+cancerous/Y
+Cancun/M
+Candace/M
+candelabra/S
+candelabrum/M
+Candice/M
+candidacy/MS
+Candida/M
+candidate/SM
+candidature/S
+Candide/M
+candidly/U
+candidness/SM
+candid/TRYPS
+Candie/M
+Candi/SM
+candle/GMZRSD
+candlelight/SMR
+candlelit
+candlepower/SM
+candler/M
+candlestick/SM
+Candlewick/M
+candlewick/MS
+candor/MS
+Candra/M
+candy/GSDM
+Candy/M
+canebrake/SM
+caner/M
+cane/SM
+canine/S
+caning/M
+Canis/M
+canister/SGMD
+cankerous
+canker/SDMG
+Can/M
+can/MDRSZGJ
+cannabis/MS
+canned
+cannelloni
+canner/SM
+cannery/MS
+Cannes
+cannibalism/MS
+cannibalistic
+cannibalization/SM
+cannibalize/GSD
+cannibal/SM
+cannily/U
+canninesses
+canniness/UM
+canning/M
+cannister/SM
+cannonade/SDGM
+cannonball/SGDM
+Cannon/M
+cannon/SDMG
+cannot
+canny/RPUT
+canoe/DSGM
+canoeist/SM
+Canoga/M
+canonic
+canonicalization
+canonicalize/GSD
+canonical/SY
+canonist/M
+canonization/MS
+canonized/U
+canonize/SDG
+canon/SM
+Canopus/M
+canopy/GSDM
+canst
+can't
+cantabile/S
+Cantabrigian
+cantaloupe/MS
+cantankerousness/SM
+cantankerous/PY
+cantata/SM
+cant/CZGSRD
+canted/IA
+canteen/MS
+Canterbury/M
+canter/CM
+cantered
+cantering
+canticle/SM
+cantilever/SDMG
+canto/MS
+cantonal
+Cantonese/M
+Canton/M
+cantonment/SM
+canton/MGSLD
+Cantor/M
+cantor/MS
+Cantrell/M
+cant's
+cants/A
+Cantu/M
+Canute/M
+canvasback/MS
+canvas/RSDMG
+canvasser/M
+canvass/RSDZG
+canyon/MS
+CAP
+capability/ISM
+capableness/IM
+capable/PI
+capabler
+capablest
+capably/I
+capaciousness/MS
+capacious/PY
+capacitance/SM
+capacitate/V
+capacitive/Y
+capacitor/MS
+capacity/IMS
+caparison/SDMG
+Capek/M
+Capella/M
+caper/GDM
+capeskin/SM
+cape/SM
+Capet/M
+Capetown/M
+Caph/M
+capillarity/MS
+capillary/S
+Capistrano/M
+capitalism/SM
+capitalistic
+capitalistically
+capitalist/SM
+capitalization/SMA
+capitalized/AU
+capitalizer/M
+capitalize/RSDGZ
+capitalizes/A
+capital/SMY
+capita/M
+Capitan/M
+capitation/CSM
+Capitoline/M
+Capitol/MS
+capitol/SM
+capitulate/AXNGSD
+capitulation/MA
+caplet/S
+cap/MDRSZB
+Capone/M
+capon/SM
+capo/SM
+Capote/M
+capped/UA
+capping/M
+cappuccino/MS
+Cappy/M
+Capra/M
+Caprice/M
+caprice/MS
+capriciousness/MS
+capricious/PY
+Capricorn/MS
+Capri/M
+caps/AU
+capsicum/MS
+capsize/SDG
+capstan/MS
+capstone/MS
+capsular
+capsule/MGSD
+capsulize/GSD
+captaincy/MS
+captain/SGDM
+caption/GSDRM
+captiousness/SM
+captious/PY
+captivate/XGNSD
+captivation/M
+captivator/SM
+captive/MS
+captivity/SM
+Capt/M
+captor/SM
+capture/AGSD
+capturer/MS
+capt/V
+Capulet/M
+Caputo/M
+Caracalla/M
+Caracas/M
+caracul's
+carafe/SM
+Caralie/M
+Cara/M
+caramelize/SDG
+caramel/MS
+carapace/SM
+carapaxes
+carat/SM
+Caravaggio/M
+caravan/DRMGS
+caravaner/M
+caravansary/MS
+caravanserai's
+caravel/MS
+caraway/MS
+carbide/MS
+carbine/MS
+carbohydrate/MS
+carbolic
+Carboloy/M
+carbonaceous
+carbonate/SDXMNG
+carbonation/M
+Carbondale/M
+Carbone/MS
+carbonic
+carboniferous
+Carboniferous
+carbonization/SAM
+carbonizer/AS
+carbonizer's
+carbonizes/A
+carbonize/ZGRSD
+carbon/MS
+carbonyl/M
+carborundum
+Carborundum/MS
+carboy/MS
+carbuncle/SDM
+carbuncular
+carburetor/MS
+carburetter/S
+carburettor/SM
+carcase/MS
+carcass/SM
+Carce/M
+carcinogenic
+carcinogenicity/MS
+carcinogen/SM
+carcinoma/SM
+cardamom/MS
+cardboard/MS
+card/EDRSG
+Cardenas/M
+carder/MS
+carder's/E
+cardholders
+cardiac/S
+Cardiff/M
+cardigan/SM
+cardinality/SM
+cardinal/SYM
+carding/M
+Cardin/M
+Cardiod/M
+cardiogram/MS
+cardiograph/M
+cardiographs
+cardioid/M
+cardiologist/SM
+cardiology/MS
+cardiomegaly/M
+cardiopulmonary
+cardiovascular
+card's
+cardsharp/ZSMR
+CARE
+cared/U
+careen/DSG
+careerism/M
+careerist/MS
+career/SGRDM
+carefree
+carefuller
+carefullest
+carefulness/MS
+careful/PY
+caregiver/S
+carelessness/MS
+careless/YP
+Care/M
+Carena/M
+Caren/M
+carer/M
+care/S
+Caresa/M
+Caressa/M
+Caresse/M
+caresser/M
+caressing/Y
+caressive/Y
+caress/SRDMVG
+caretaker/SM
+caret/SM
+careworn
+Carey/M
+carfare/MS
+cargoes
+cargo/M
+carhopped
+carhopping
+carhop/SM
+Caria/M
+Caribbean/S
+Carib/M
+caribou/MS
+caricature/GMSD
+caricaturisation
+caricaturist/MS
+caricaturization
+Carie/M
+caries/M
+carillonned
+carillonning
+carillon/SM
+Caril/M
+Carilyn/M
+Cari/M
+Carina/M
+Carine/M
+caring/U
+Carin/M
+Cariotta/M
+carious
+Carissa/M
+Carita/M
+Caritta/M
+carjack/GSJDRZ
+Carla/M
+Carlee/M
+Carleen/M
+Carlene/M
+Carlen/M
+Carletonian/M
+Carleton/M
+Carley/M
+Carlie/M
+Carlina/M
+Carline/M
+Carling/M
+Carlin/M
+Carlita/M
+Carl/MNG
+carload/MSG
+Carlo/SM
+Carlota/M
+Carlotta/M
+Carlsbad/M
+Carlson/M
+Carlton/M
+Carlye/M
+Carlyle/M
+Carly/M
+Carlyn/M
+Carlynne/M
+Carlynn/M
+Carma/M
+Carmela/M
+Carmelia/M
+Carmelina/M
+Carmelita/M
+Carmella/M
+Carmelle/M
+Carmel/M
+Carmelo/M
+Carmencita/M
+Carmen/M
+Carmichael/M
+Carmina/M
+Carmine/M
+carmine/MS
+Carmita/M
+Car/MNY
+Carmon/M
+carnage/MS
+carnality/SM
+carnal/Y
+Carnap/M
+carnation/IMS
+Carnegie/M
+carnelian/SM
+Carney/M
+carney's
+carnival/MS
+carnivore/SM
+carnivorousness/MS
+carnivorous/YP
+Carnot/M
+Carny/M
+carny/SDG
+carob/SM
+Carola/M
+Carolan/M
+Carolann/M
+Carolee/M
+Carole/M
+caroler/M
+Carolina/MS
+Caroline/M
+Carolingian
+Carolinian/S
+Carolin/M
+Caroljean/M
+Carol/M
+carol/SGZMRD
+Carolus/M
+Carolyne/M
+Carolyn/M
+Carolynn/M
+Caro/M
+carom/GSMD
+Caron/M
+carotene/MS
+carotid/MS
+carousal/MS
+carousel/MS
+carouser/M
+carouse/SRDZG
+carpal/SM
+Carpathian/MS
+carpel/SM
+carpenter/DSMG
+carpentering/M
+Carpenter/M
+carpentry/MS
+carper/M
+carpetbagged
+carpetbagger/MS
+carpetbagging
+carpetbag/MS
+carpeting/M
+carpet/MDJGS
+carpi/M
+carping/Y
+carp/MDRSGZ
+carpool/DGS
+carport/MS
+carpus/M
+carrageen/M
+Carree/M
+carrel/SM
+carriage/SM
+carriageway/SM
+Carrie/M
+carrier/M
+Carrier/M
+Carrillo/M
+Carri/M
+carrion/SM
+Carrissa/M
+Carr/M
+Carroll/M
+Carrol/M
+carrot/MS
+carroty/RT
+carrousel's
+carryall/MS
+Carry/MR
+carryout/S
+carryover/S
+carry/RSDZG
+carsickness/SM
+carsick/P
+Carson/M
+cartage/MS
+cartel/SM
+carte/M
+carter/M
+Carter/M
+Cartesian
+Carthage/M
+Carthaginian/S
+carthorse/MS
+Cartier/M
+cartilage/MS
+cartilaginous
+cartload/MS
+cart/MDRGSZ
+Cart/MR
+cartographer/MS
+cartographic
+cartography/MS
+carton/GSDM
+cartoon/GSDM
+cartoonist/MS
+cartridge/SM
+cartwheel/MRDGS
+Cartwright/M
+Carty/RM
+Caruso/M
+carve/DSRJGZ
+carven
+carver/M
+Carver/M
+carving/M
+caryatid/MS
+Caryl/M
+Cary/M
+Caryn/M
+car/ZGSMDR
+casaba/SM
+Casablanca/M
+Casals/M
+Casandra/M
+Casanova/SM
+Casar/M
+casbah/M
+cascade/MSDG
+Cascades/M
+cascara/MS
+casebook/SM
+case/DSJMGL
+cased/U
+caseharden/SGD
+casein/SM
+caseload/MS
+Case/M
+casement/SM
+caseworker/M
+casework/ZMRS
+Casey/M
+cashbook/SM
+cashew/MS
+cash/GZMDSR
+cashier/SDMG
+cashless
+Cash/M
+cashmere/MS
+Casie/M
+Casi/M
+casing/M
+casino/MS
+casket/SGMD
+cask/GSDM
+Caspar/M
+Casper/M
+Caspian
+Cass
+Cassandra/SM
+Cassandre/M
+Cassandry/M
+Cassatt/M
+Cassaundra/M
+cassava/MS
+casserole/MGSD
+cassette/SM
+Cassey/M
+cassia/MS
+Cassie/M
+Cassi/M
+cassino's
+Cassiopeia/M
+Cassite/M
+Cassius/M
+cassock/SDM
+Cassondra/M
+cassowary/SM
+Cassy/M
+Castaneda/M
+castanet/SM
+castaway/SM
+castellated
+caste/MHS
+caster/M
+cast/GZSJMDR
+castigate/XGNSD
+castigation/M
+castigator/SM
+Castile's
+Castillo/M
+casting/M
+castle/GMSD
+castoff/S
+Castor/M
+castor's
+castrate/DSNGX
+castration/M
+Castries/M
+Castro/M
+casts/A
+casualness/SM
+casual/SYP
+casualty/SM
+casuistic
+casuist/MS
+casuistry/SM
+cataclysmal
+cataclysmic
+cataclysm/MS
+catacomb/MS
+catafalque/SM
+Catalan/MS
+catalepsy/MS
+cataleptic/S
+Catalina/M
+cataloger/M
+catalog/SDRMZG
+Catalonia/M
+catalpa/SM
+catalysis/M
+catalyst/SM
+catalytic
+catalytically
+catalyze/DSG
+catamaran/MS
+catapult/MGSD
+cataract/MS
+Catarina/M
+catarrh/M
+catarrhs
+catastrophe/SM
+catastrophic
+catastrophically
+catatonia/MS
+catatonic/S
+Catawba/M
+catbird/MS
+catboat/SM
+catcall/SMDG
+catchable/U
+catchall/MS
+catch/BRSJLGZ
+catcher/M
+catchment/SM
+catchpenny/S
+catchphrase/S
+catchup/MS
+catchword/MS
+catchy/TR
+catechism/MS
+catechist/SM
+catechize/SDG
+catecholamine/MS
+categoric
+categorical/Y
+categorization/MS
+categorized/AU
+categorize/RSDGZ
+category/MS
+Cate/M
+catenate/NF
+catenation/MF
+catercorner
+caterer/M
+cater/GRDZ
+Caterina/M
+catering/M
+Caterpillar
+caterpillar/SM
+caterwaul/DSG
+catfish/MS
+catgut/SM
+Catha/M
+Catharina/M
+Catharine/M
+catharses
+catharsis/M
+cathartic/S
+Cathay/M
+cathedral/SM
+Cathee/M
+Catherina/M
+Catherine/M
+Catherin/M
+Cather/M
+Cathe/RM
+catheterize/GSD
+catheter/SM
+Cathie/M
+Cathi/M
+Cathleen/M
+Cathlene/M
+cathode/MS
+cathodic
+catholicism
+Catholicism/SM
+catholicity/MS
+catholic/MS
+Catholic/S
+Cathrine/M
+Cathrin/M
+Cathryn/M
+Cathyleen/M
+Cathy/M
+Catie/M
+Catiline/M
+Cati/M
+Catina/M
+cationic
+cation/MS
+catkin/SM
+Catlaina/M
+Catlee/M
+catlike
+Catlin/M
+catnapped
+catnapping
+catnap/SM
+catnip/MS
+Cato/M
+Catrina/M
+Catriona/M
+Catskill/SM
+cat/SMRZ
+catsup's
+cattail/SM
+catted
+cattery/M
+cattily
+cattiness/SM
+catting
+cattle/M
+cattleman/M
+cattlemen
+Catt/M
+catty/PRST
+Catullus/M
+CATV
+catwalk/MS
+Caty/M
+Caucasian/S
+Caucasoid/S
+Caucasus/M
+Cauchy/M
+caucus/SDMG
+caudal/Y
+caught/U
+cauldron/MS
+cauliflower/MS
+caulker/M
+caulk/JSGZRD
+causality/SM
+causal/YS
+causate/XVN
+causation/M
+causative/SY
+cause/DSRGMZ
+caused/U
+causeless
+causerie/MS
+causer/M
+causeway/SGDM
+caustically
+causticity/MS
+caustic/YS
+cauterization/SM
+cauterized/U
+cauterize/GSD
+cautionary
+cautioner/M
+caution/GJDRMSZ
+cautiousness's/I
+cautiousness/SM
+cautious/PIY
+cavalcade/MS
+cavalierness/M
+cavalier/SGYDP
+cavalryman/M
+cavalrymen
+cavalry/MS
+caveat/SM
+caveatted
+caveatting
+cave/GFRSD
+caveman/M
+cavemen
+Cavendish/M
+caver/M
+cavern/GSDM
+cavernous/Y
+cave's
+caviar/MS
+caviler/M
+cavil/SJRDGZ
+caving/MS
+cavity/MFS
+cavort/SDG
+Cavour/M
+caw/SMDG
+Caxton/M
+Caye/M
+Cayenne/M
+cayenne/SM
+Cayla/M
+Cayman/M
+cayman/SM
+cay's
+cay/SC
+Cayuga/M
+cayuse/SM
+Caz/M
+Cazzie/M
+c/B
+CB
+CBC
+Cb/M
+CBS
+cc
+Cchaddie/M
+CCTV
+CCU
+CD
+CDC/M
+Cd/M
+CDT
+Ce
+cease/DSCG
+ceasefire/S
+ceaselessness/SM
+ceaseless/YP
+ceasing/U
+Ceausescu/M
+Cebuano/M
+Cebu/M
+ceca
+cecal
+Cecelia/M
+Cece/M
+Cecile/M
+Ceciley/M
+Cecilia/M
+Cecilio/M
+Cecilius/M
+Cecilla/M
+Cecil/M
+Cecily/M
+cecum/M
+cedar/SM
+ceded/A
+cede/FRSDG
+ceder's/F
+ceder/SM
+cedes/A
+cedilla/SM
+ceding/A
+Ced/M
+Cedric/M
+ceilidh/M
+ceiling/MDS
+Ceil/M
+celandine/MS
+Celanese/M
+Celebes's
+celebrant/MS
+celebratedness/M
+celebrated/P
+celebrate/XSDGN
+celebration/M
+celebrator/MS
+celebratory
+celebrity/MS
+Cele/M
+Celene/M
+celerity/SM
+celery/SM
+Celesta/M
+celesta/SM
+Celeste/M
+celestial/YS
+Celestia/M
+Celestina/M
+Celestine/M
+Celestyna/M
+Celestyn/M
+Celia/M
+celibacy/MS
+celibate/SM
+Celie/M
+Celina/M
+Celinda/M
+Celine/M
+Celinka/M
+Celisse/M
+Celka/M
+cellarer/M
+cellar/RDMGS
+Celle/M
+cell/GMDS
+Cellini/M
+cellist/SM
+Cello/M
+cello/MS
+cellophane/SM
+cellphone/S
+cellular/SY
+cellulite/S
+celluloid/SM
+cellulose/SM
+Celsius/S
+Celtic/SM
+Celt/MS
+cementa
+cementer/M
+cementum/SM
+cement/ZGMRDS
+cemetery/MS
+cenobite/MS
+cenobitic
+cenotaph/M
+cenotaphs
+Cenozoic
+censer/MS
+censored/U
+censor/GDMS
+censorial
+censoriousness/MS
+censorious/YP
+censorship/MS
+censure/BRSDZMG
+censurer/M
+census/SDMG
+centaur/SM
+Centaurus/M
+centavo/SM
+centenarian/MS
+centenary/S
+centennial/YS
+center/AC
+centerboard/SM
+centered
+centerer/S
+centerfold/S
+centering/SM
+centerline/SM
+centerpiece/SM
+center's
+Centigrade
+centigrade/S
+centigram/SM
+centiliter/MS
+centime/SM
+centimeter/SM
+centipede/MS
+Centralia/M
+centralism/M
+centralist/M
+centrality/MS
+centralization/CAMS
+centralize/CGSD
+centralizer/SM
+centralizes/A
+central/STRY
+centrefold's
+Centrex
+CENTREX/M
+centric/F
+centrifugal/SY
+centrifugate/NM
+centrifugation/M
+centrifuge/GMSD
+centripetal/Y
+centrist/MS
+centroid/MS
+cent/SZMR
+centurion/MS
+century/MS
+CEO
+cephalic/S
+Cepheid
+Cepheus/M
+ceramicist/S
+ceramic/MS
+ceramist/MS
+cerate/MD
+Cerberus/M
+cereal/MS
+cerebellar
+cerebellum/MS
+cerebra
+cerebral/SY
+cerebrate/XSDGN
+cerebration/M
+cerebrum/MS
+cerement/SM
+ceremonial/YSP
+ceremoniousness/MS
+ceremoniousness's/U
+ceremonious/YUP
+ceremony/MS
+Cerenkov/M
+Ceres/M
+Cerf/M
+cerise/SM
+cerium/MS
+cermet/SM
+CERN/M
+certainer
+certainest
+certainty/UMS
+certain/UY
+cert/FS
+certifiable
+certifiably
+certificate/SDGM
+certification/AMC
+certified/U
+certifier/M
+certify/DRSZGNX
+certiorari/M
+certitude/ISM
+cerulean/MS
+Cervantes/M
+cervical
+cervices/M
+cervix/M
+Cesarean
+cesarean/S
+Cesare/M
+Cesar/M
+Cesaro/M
+cesium/MS
+cessation/SM
+cession/FAMSK
+Cessna/M
+cesspit/M
+cesspool/SM
+Cesya/M
+cetacean/S
+cetera/S
+Cetus/M
+Ceylonese
+Ceylon/M
+Cezanne/S
+cf
+CF
+CFC
+Cf/M
+CFO
+cg
+Chablis/SM
+Chaddie/M
+Chadd/M
+Chaddy/M
+Chadian/S
+Chad/M
+Chadwick/M
+chafe/GDSR
+chafer/M
+chaffer/DRG
+chafferer/M
+Chaffey/M
+chaff/GRDMS
+chaffinch/SM
+Chagall/M
+chagrin/DGMS
+Chaim/M
+chainlike
+chain's
+chainsaw/SGD
+chain/SGUD
+chairlady/M
+chairlift/MS
+chairman/MDGS
+chairmanship/MS
+chairmen
+chairperson/MS
+chair/SGDM
+chairwoman/M
+chairwomen
+chaise/SM
+chalcedony/MS
+Chaldea/M
+Chaldean/M
+chalet/SM
+chalice/DSM
+chalkboard/SM
+chalk/DSMG
+chalkiness/S
+chalkline
+chalky/RPT
+challenged/U
+challenger/M
+challenge/ZGSRD
+challenging/Y
+challis/SM
+Chalmers
+chamberer/M
+Chamberlain/M
+chamberlain/MS
+chambermaid/MS
+chamberpot/S
+Chambers/M
+chamber/SZGDRM
+chambray/MS
+chameleon/SM
+chamfer/DMGS
+chammy's
+chamois/DSMG
+chamomile/MS
+champagne/MS
+champaign/M
+champ/DGSZ
+champion/MDGS
+championship/MS
+Champlain/M
+chanced/M
+chance/GMRSD
+chancellery/SM
+chancellorship/SM
+chancellor/SM
+Chancellorsville/M
+chancel/SM
+Chance/M
+chancery/SM
+Chancey/M
+chanciness/S
+chancing/M
+chancre/SM
+chancy/RPT
+Chandal/M
+Chanda/M
+chandelier/SM
+Chandigarh/M
+Chandler/M
+chandler/MS
+Chandragupta/M
+Chandra/M
+Chandrasekhar/M
+Chandy/M
+Chanel/M
+Chane/M
+Chaney/M
+Changchun/M
+changeabilities
+changeability/UM
+changeableness/SM
+changeable/U
+changeably/U
+changed/U
+change/GZRSD
+changeless
+changeling/M
+changeover/SM
+changer/M
+changing/U
+Chang/M
+Changsha/M
+Chan/M
+Channa/M
+channeler/M
+channeling/M
+channelization/SM
+channelize/GDS
+channellings
+channel/MDRZSG
+Channing/M
+chanson/SM
+Chantalle/M
+Chantal/M
+chanter/M
+chanteuse/MS
+chantey/SM
+chanticleer/SM
+Chantilly/M
+chantry/MS
+chant/SJGZMRD
+chanty's
+Chanukah's
+Chao/M
+chaos/SM
+chaotic
+chaotically
+chaparral/MS
+chapbook/SM
+chapeau/MS
+chapel/MS
+chaperonage/MS
+chaperoned/U
+chaperone's
+chaperon/GMDS
+chaplaincy/MS
+chaplain/MS
+chaplet/SM
+Chaplin/M
+Chapman/M
+chap/MS
+Chappaquiddick/M
+chapped
+chapping
+chapter/SGDM
+Chara
+charabanc/MS
+characterful
+characteristically/U
+characteristic/SM
+characterizable/MS
+characterization/MS
+characterize/DRSBZG
+characterized/U
+characterizer/M
+characterless
+character/MDSG
+charade/SM
+charbroil/SDG
+charcoal/MGSD
+Chardonnay
+chardonnay/S
+chard/SM
+chargeableness/M
+chargeable/P
+charged/U
+charge/EGRSDA
+charger/AME
+chargers
+char/GS
+Charil/M
+charily
+chariness/MS
+Charin/M
+charioteer/GSDM
+Chariot/M
+chariot/SMDG
+Charis
+charisma/M
+charismata
+charismatically
+charismatic/S
+Charissa/M
+Charisse/M
+charitablenesses
+charitableness/UM
+charitable/UP
+charitably/U
+Charita/M
+Charity/M
+charity/MS
+charlady/M
+Charla/M
+charlatanism/MS
+charlatanry/SM
+charlatan/SM
+Charlean/M
+Charleen/M
+Charlemagne/M
+Charlena/M
+Charlene/M
+Charles/M
+Charleston/SM
+Charley/M
+Charlie/M
+Charline/M
+Charlot/M
+Charlotta/M
+Charlotte/M
+Charlottesville/M
+Charlottetown/M
+Charlton/M
+Charmaine/M
+Charmain/M
+Charmane/M
+charmer/M
+Charmian/M
+Charmine/M
+charming/RYT
+Charmin/M
+Charmion/M
+charmless
+charm/SGMZRD
+Charolais
+Charo/M
+Charon/M
+charred
+charring
+charted/U
+charter/AGDS
+chartered/U
+charterer/SM
+charter's
+chartist/SM
+Chartres/M
+chartreuse/MS
+chartroom/S
+chart/SJMRDGBZ
+charwoman/M
+charwomen
+Charybdis/M
+Charyl/M
+chary/PTR
+Chas
+chase/DSRGZ
+Chase/M
+chaser/M
+chasing/M
+Chasity/M
+chasm/SM
+chassis/M
+chastely
+chasteness/SM
+chasten/GSD
+chaste/UTR
+chastisement/SM
+chastiser/M
+chastise/ZGLDRS
+Chastity/M
+chastity/SM
+chastity's/U
+chasuble/SM
+Chateaubriand
+château/M
+chateaus
+châteaux
+châtelaine/SM
+chat/MS
+Chattahoochee/M
+Chattanooga/M
+chatted
+chattel/MS
+chatterbox/MS
+chatterer/M
+Chatterley/M
+chatter/SZGDRY
+Chatterton/M
+chattily
+chattiness/SM
+chatting
+chatty/RTP
+Chaucer/M
+chauffeur/GSMD
+Chaunce/M
+Chauncey/M
+Chautauqua/M
+chauvinism/MS
+chauvinistic
+chauvinistically
+chauvinist/MS
+Chavez/M
+chaw
+Chayefsky/M
+cheapen/DG
+cheapish
+cheapness/MS
+cheapskate/MS
+cheap/YRNTXSP
+cheater/M
+cheat/RDSGZ
+Chechen/M
+Chechnya/M
+checkable/U
+checkbook/MS
+checked/UA
+checkerboard/MS
+checker/DMG
+check/GZBSRDM
+checklist/S
+checkmate/MSDG
+checkoff/SM
+checkout/S
+checkpoint/MS
+checkroom/MS
+check's/A
+checks/A
+checksummed
+checksumming
+checksum/SM
+checkup/MS
+Cheddar/MS
+cheddar/S
+cheekbone/SM
+cheek/DMGS
+cheekily
+cheekiness/SM
+cheeky/PRT
+cheep/GMDS
+cheerer/M
+cheerfuller
+cheerfullest
+cheerfulness/MS
+cheerful/YP
+cheerily
+cheeriness/SM
+cheerio/S
+Cheerios/M
+cheerleader/SM
+cheerlessness/SM
+cheerless/PY
+cheers/S
+cheery/PTR
+cheer/YRDGZS
+cheeseburger/SM
+cheesecake/SM
+cheesecloth/M
+cheesecloths
+cheeseparing/S
+cheese/SDGM
+cheesiness/SM
+cheesy/PRT
+cheetah/M
+cheetahs
+Cheeto/M
+Cheever/M
+cheffed
+cheffing
+chef/SM
+Chekhov/M
+chelate/XDMNG
+chelation/M
+Chelsae/M
+Chelsea/M
+Chelsey/M
+Chelsie/M
+Chelsy/M
+Chelyabinsk/M
+chem
+Che/M
+chemic
+chemical/SYM
+chemiluminescence/M
+chemiluminescent
+chemise/SM
+chemistry/SM
+chemist/SM
+chemotherapeutic/S
+chemotherapy/SM
+chemurgy/SM
+Chengdu
+Cheng/M
+chenille/SM
+Chen/M
+Cheops/M
+Chere/M
+Cherey/M
+Cherianne/M
+Cherice/M
+Cherida/M
+Cherie/M
+Cherilyn/M
+Cherilynn/M
+Cheri/M
+Cherin/M
+Cherise/M
+cherisher/M
+cherish/GDRS
+Cherish/M
+Cheriton/M
+Cherlyn/M
+Cher/M
+Chernenko/M
+Chernobyl/M
+Cherokee/MS
+cheroot/MS
+Cherri/M
+Cherrita/M
+Cherry/M
+cherry/SM
+chert/MS
+cherubic
+cherubim/S
+cherub/SM
+chervil/MS
+Cherye/M
+Cheryl/M
+Chery/M
+Chesapeake/M
+Cheshire/M
+Cheslie/M
+chessboard/SM
+chessman/M
+chessmen
+chess/SM
+Chesterfield/M
+chesterfield/MS
+Chester/M
+Chesterton/M
+chestful/S
+chest/MRDS
+chestnut/SM
+Cheston/M
+chesty/TR
+Chet/M
+Chevalier/M
+chevalier/SM
+Cheviot/M
+cheviot/S
+Chev/M
+Chevrolet/M
+chevron/DMS
+Chevy/M
+chewer/M
+chew/GZSDR
+chewiness/S
+chewy/RTP
+Cheyenne/SM
+chg
+chge
+Chiang/M
+chianti/M
+Chianti/S
+chiaroscuro/SM
+Chiarra/M
+Chiba/M
+Chicagoan/SM
+Chicago/M
+Chicana/MS
+chicane/MGDS
+chicanery/MS
+Chicano/MS
+chichi/RTS
+chickadee/SM
+Chickasaw/SM
+chickenfeed
+chicken/GDM
+chickenhearted
+chickenpox/MS
+Chickie/M
+Chick/M
+chickpea/MS
+chickweed/MS
+chick/XSNM
+Chicky/M
+chicle/MS
+Chic/M
+chicness/S
+Chico/M
+chicory/MS
+chic/SYRPT
+chide/GDS
+chiding/Y
+chiefdom/MS
+chieftain/SM
+chief/YRMST
+chiffonier/MS
+chiffon/MS
+chigger/MS
+chignon/MS
+Chihuahua/MS
+chihuahua/S
+chilblain/MS
+childbearing/MS
+childbirth/M
+childbirths
+childcare/S
+childes
+child/GMYD
+childhood/MS
+childishness/SM
+childish/YP
+childlessness/SM
+childless/P
+childlikeness/M
+childlike/P
+childminders
+childproof/GSD
+childrearing
+children/M
+Chilean/S
+Chile/MS
+chile's
+chilies
+chili/M
+chiller/M
+chilliness/MS
+chilling/Y
+chilli's
+chill/MRDJGTZPS
+chillness/MS
+chilly/TPRS
+Chilton/M
+Chi/M
+chimaera's
+chimaerical
+Chimborazo/M
+chime/DSRGMZ
+Chimera/S
+chimera/SM
+chimeric
+chimerical
+chimer/M
+Chimiques
+chimney/SMD
+chimpanzee/SM
+chimp/MS
+chi/MS
+Chimu/M
+Ch'in
+China/M
+Chinaman/M
+Chinamen
+china/MS
+Chinatown/SM
+chinchilla/SM
+chine/MS
+Chinese/M
+Ching/M
+chink/DMSG
+chinless
+Chin/M
+chinned
+chinner/S
+chinning
+chino/MS
+Chinook/MS
+chin/SGDM
+chinstrap/S
+chintz/SM
+chintzy/TR
+chipboard/M
+Chipewyan/M
+Chip/M
+chipmunk/SM
+chipped
+Chippendale/M
+chipper/DGS
+Chippewa/MS
+chipping/MS
+chip/SM
+Chiquia/M
+Chiquita/M
+chiral
+Chirico/M
+chirography/SM
+chiropodist/SM
+chiropody/MS
+chiropractic/MS
+chiropractor/SM
+chirp/GDS
+chirpy/RT
+chirrup/DGS
+chiseler/M
+chisel/ZGSJMDR
+Chisholm/M
+Chisinau/M
+chitchat/SM
+chitchatted
+chitchatting
+chitinous
+chitin/SM
+chit/SM
+Chittagong/M
+chitterlings
+chivalric
+chivalrously/U
+chivalrousness/MS
+chivalrous/YP
+chivalry/SM
+chive/GMDS
+chivvy/D
+chivying
+chlamydiae
+chlamydia/S
+Chloe/M
+Chloette/M
+Chlo/M
+chloral/MS
+chlorate/M
+chlordane/MS
+chloride/MS
+chlorinated/C
+chlorinates/C
+chlorinate/XDSGN
+chlorination/M
+chlorine/MS
+Chloris
+chlorofluorocarbon/S
+chloroform/DMSG
+chlorophyll/SM
+chloroplast/MS
+chloroquine/M
+chm
+Ch/MGNRS
+chockablock
+chock/SGRDM
+chocoholic/S
+chocolate/MS
+chocolaty
+Choctaw/MS
+choiceness/M
+choice/RSMTYP
+choirboy/MS
+choirmaster/SM
+choir/SDMG
+chokeberry/M
+chokecherry/SM
+choke/DSRGZ
+choker/M
+chokes/M
+choking/Y
+cholera/SM
+choleric
+choler/SM
+cholesterol/SM
+choline/M
+cholinesterase/M
+chomp/DSG
+Chomsky/M
+Chongqing
+choose/GZRS
+chooser/M
+choosiness/S
+choosy/RPT
+chophouse/SM
+Chopin/M
+chopped
+chopper/SDMG
+choppily
+choppiness/MS
+chopping
+choppy/RPT
+chop/S
+chopstick/SM
+chorale/MS
+choral/SY
+chordal
+chordata
+chordate/MS
+chording/M
+chord/SGMD
+chorea/MS
+chore/DSGNM
+choreographer/M
+choreographic
+choreographically
+choreographs
+choreography/MS
+choreograph/ZGDR
+chorines
+chorion/M
+chorister/SM
+choroid/S
+chortler/M
+chortle/ZGDRS
+chorus/GDSM
+chosen/U
+chose/S
+Chou/M
+chowder/SGDM
+chow/DGMS
+Chretien/M
+Chris/M
+chrism/SM
+chrissake
+Chrisse/M
+Chrissie/M
+Chrissy/M
+Christabella/M
+Christabel/M
+Christalle/M
+Christal/M
+Christa/M
+Christan/M
+Christchurch/M
+Christean/M
+Christel/M
+Christendom/MS
+christened/U
+christening/SM
+Christen/M
+christen/SAGD
+Christensen/M
+Christenson/M
+Christiana/M
+Christiane/M
+Christianity/SM
+Christianize/GSD
+Christian/MS
+Christiano/M
+Christiansen/M
+Christians/N
+Christie/SM
+Christi/M
+Christina/M
+Christine/M
+Christin/M
+Christlike
+Christmas/SM
+Christmastide/SM
+Christmastime/S
+Christoffel/M
+Christoffer/M
+Christoforo/M
+Christoper/M
+Christophe/M
+Christopher/M
+Christoph/MR
+Christophorus/M
+Christos/M
+Christ/SMN
+Christye/M
+Christyna/M
+Christy's
+Chrisy/M
+chroma/M
+chromate/M
+chromatically
+chromaticism/M
+chromaticness/M
+chromatic/PS
+chromatics/M
+chromatin/MS
+chromatogram/MS
+chromatograph
+chromatographic
+chromatography/M
+chrome/GMSD
+chromic
+chromite/M
+chromium/SM
+chromosomal
+chromosome/MS
+chromosphere/M
+chronically
+chronicled/U
+chronicler/M
+chronicle/SRDMZG
+chronic/S
+chronograph/M
+chronographs
+chronography
+chronological/Y
+chronologist/MS
+chronology/MS
+chronometer/MS
+chronometric
+Chrotoem/M
+chrysalids
+chrysalis/SM
+Chrysa/M
+chrysanthemum/MS
+Chrysler/M
+Chrysostom/M
+Chrystal/M
+Chrystel/M
+Chryste/M
+chubbiness/SM
+chubby/RTP
+chub/MS
+Chucho/M
+chuck/GSDM
+chuckhole/SM
+chuckle/DSG
+chuckling/Y
+Chuck/M
+chuff/DM
+chugged
+chugging
+chug/MS
+Chukchi/M
+chukka/S
+Chumash/M
+chummed
+chummily
+chumminess/MS
+chumming
+chum/MS
+chummy/SRTP
+chumping/M
+chump/MDGS
+Chungking's
+Chung/M
+chunkiness/MS
+chunk/SGDM
+chunky/RPT
+chuntering
+churchgoer/SM
+churchgoing/SM
+Churchillian
+Churchill/M
+churchliness/M
+churchly/P
+churchman/M
+church/MDSYG
+churchmen
+Church/MS
+churchwarden/SM
+churchwoman/M
+churchwomen
+churchyard/SM
+churlishness/SM
+churlish/YP
+churl/SM
+churner/M
+churning/M
+churn/SGZRDM
+chute/DSGM
+chutney/MS
+chutzpah/M
+chutzpahs
+chutzpa/SM
+Chuvash/M
+ch/VT
+chyme/SM
+Ci
+CIA
+ciao/S
+cicada/MS
+cicatrice/S
+cicatrix's
+Cicely/M
+Cicero/M
+cicerone/MS
+ciceroni
+Ciceronian
+Cicily/M
+CID
+cider's/C
+cider/SM
+Cid/M
+Ciel/M
+cigarette/MS
+cigarillo/MS
+cigar/SM
+cilantro/S
+cilia/M
+ciliate/FDS
+ciliately
+cilium/M
+Cilka/M
+cinch/MSDG
+cinchona/SM
+Cincinnati/M
+cincture/MGSD
+Cinda/M
+Cindee/M
+Cindelyn/M
+cinder/DMGS
+Cinderella/MS
+Cindie/M
+Cindi/M
+Cindra/M
+Cindy/M
+cine/M
+cinema/SM
+cinematic
+cinematographer/MS
+cinematographic
+cinematography/MS
+Cinerama/M
+cinnabar/MS
+Cinnamon/M
+cinnamon/MS
+ciphered/C
+cipher/MSGD
+ciphers/C
+cir
+circa
+circadian
+Circe/M
+circler/M
+circle/RSDGM
+circlet/MS
+circuital
+circuit/GSMD
+circuitousness/MS
+circuitous/YP
+circuitry/SM
+circuity/MS
+circulant
+circularity/SM
+circularize/GSD
+circularness/M
+circular/PSMY
+circulate/ASDNG
+circulation/MA
+circulations
+circulative
+circulatory
+circumcise/DRSXNG
+circumcised/U
+circumciser/M
+circumcision/M
+circumference/SM
+circumferential/Y
+circumflex/MSDG
+circumlocution/MS
+circumlocutory
+circumnavigate/DSNGX
+circumnavigational
+circumnavigation/M
+circumpolar
+circumscribe/GSD
+circumscription/SM
+circumspection/SM
+circumspect/Y
+circumsphere
+circumstance/SDMG
+circumstantial/YS
+circumvention/MS
+circumvent/SBGD
+circus/SM
+Cirillo/M
+Cirilo/M
+Ciro/M
+cirque/SM
+cirrhoses
+cirrhosis/M
+cirrhotic/S
+cirri/M
+cirrus/M
+Cissiee/M
+Cissy/M
+cistern/SM
+citadel/SM
+citations/I
+citation/SMA
+cit/DSG
+cite/ISDAG
+Citibank/M
+citified
+citizenry/SM
+citizenship/MS
+citizen/SYM
+citrate/DM
+citric
+Citroen/M
+citronella/MS
+citron/MS
+citrus/SM
+city/DSM
+cityscape/MS
+citywide
+civet/SM
+civic/S
+civics/M
+civilian/SM
+civility/IMS
+civilizational/MS
+civilization/AMS
+civilizedness/M
+civilized/PU
+civilize/DRSZG
+civilizer/M
+civilizes/AU
+civil/UY
+civvies
+ck/C
+clack/SDG
+cladding/SM
+clads
+clad/U
+Claiborne/M
+Claiborn/M
+claimable
+claimant/MS
+claim/CDRSKAEGZ
+claimed/U
+claimer/KMACE
+Claire/M
+Clair/M
+Clairol/M
+clairvoyance/MS
+clairvoyant/YS
+clambake/MS
+clamberer/M
+clamber/SDRZG
+clammed
+clammily
+clamminess/MS
+clamming
+clam/MS
+clammy/TPR
+clamorer/M
+clamor/GDRMSZ
+clamorousness/UM
+clamorous/PUY
+clampdown/SM
+clamper/M
+clamp/MRDGS
+clamshell/MS
+Clancy/M
+clandestineness/M
+clandestine/YP
+clanger/M
+clangor/MDSG
+clangorous/Y
+clang/SGZRD
+clanking/Y
+clank/SGDM
+clan/MS
+clannishness/SM
+clannish/PY
+clansman/M
+clansmen
+clapboard/SDGM
+Clapeyron/M
+clapped
+clapper/GMDS
+clapping
+clap/S
+Clapton/M
+claptrap/SM
+claque/MS
+Clarabelle/M
+Clara/M
+Clarance/M
+Clare/M
+Claremont/M
+Clarence/M
+Clarendon/M
+Claresta/M
+Clareta/M
+claret/MDGS
+Claretta/M
+Clarette/M
+Clarey/M
+Claribel/M
+Clarice/M
+Clarie/M
+clarification/M
+clarifier/M
+clarify/NGXDRS
+Clari/M
+Clarinda/M
+Clarine/M
+clarinetist/SM
+clarinet/SM
+clarinettist's
+clarion/GSMD
+Clarissa/M
+Clarisse/M
+Clarita/M
+clarities
+clarity/UM
+Clarke/M
+Clark/M
+Clarridge/M
+Clary/M
+clasher/M
+clash/RSDG
+clasped/M
+clasper/M
+clasp's
+clasp/UGSD
+classer/M
+class/GRSDM
+classical/Y
+classicism/SM
+classicist/SM
+classic/S
+classics/M
+classifiable/U
+classification/AMC
+classificatory
+classified/S
+classifier/SM
+classify/CNXASDG
+classiness/SM
+classless/P
+classmate/MS
+classroom/MS
+classwork/M
+classy/PRT
+clatterer/M
+clattering/Y
+clatter/SGDR
+clattery
+Claudelle/M
+Claudell/M
+Claude/M
+Claudetta/M
+Claudette/M
+Claudia/M
+Claudian/M
+Claudianus/M
+Claudie/M
+Claudina/M
+Claudine/M
+Claudio/M
+Claudius/M
+clausal
+clause/MS
+Clausen/M
+Clausewitz/M
+Clausius/M
+Claus/NM
+claustrophobia/SM
+claustrophobic
+clave/RM
+clave's/F
+clavichord/SM
+clavicle/MS
+clavier/MS
+clawer/M
+claw/GDRMS
+Clayborne/M
+Clayborn/M
+Claybourne/M
+clayey
+clayier
+clayiest
+Clay/M
+clay/MDGS
+claymore/MS
+Clayson/M
+Clayton/M
+Clea/M
+cleanable
+cleaner/MS
+cleaning/SM
+cleanliness/UMS
+cleanly/PRTU
+cleanness/MSU
+cleanse
+cleanser/M
+cleans/GDRSZ
+cleanup/MS
+clean/UYRDPT
+clearance/MS
+clearcut
+clearer/M
+clearheadedness/M
+clearheaded/PY
+clearinghouse/S
+clearing/MS
+clearly
+clearness/MS
+clears
+clear/UTRD
+Clearwater/M
+clearway/M
+cleat/MDSG
+cleavage/MS
+cleaver/M
+cleave/RSDGZ
+Cleavland/M
+clef/SM
+cleft/MDGS
+clematis/MS
+clemence
+Clemenceau/M
+Clemence/M
+clemency/ISM
+Clemente/M
+Clementia/M
+Clementina/M
+Clementine/M
+Clementius/M
+clement/IY
+Clement/MS
+clements
+Clemmie/M
+Clemmy/M
+Clemons
+Clemson/M
+Clem/XM
+clenches
+clenching
+clench/UD
+Cleo/M
+Cleon/M
+Cleopatra/M
+Clerc/M
+clerestory/MS
+clergyman/M
+clergymen
+clergy/MS
+clergywoman
+clergywomen
+clericalism/SM
+clerical/YS
+cleric/SM
+Clerissa/M
+clerk/SGYDM
+clerkship/MS
+Cletis
+Cletus/M
+Cleveland/M
+Cleve/M
+cleverness/SM
+clever/RYPT
+Clevey/M
+Clevie/M
+clevis/SM
+clew/DMGS
+cl/GJ
+Cliburn/M
+clichéd
+cliché/SM
+clicker/M
+click/GZSRDM
+clientèle/SM
+client/SM
+cliffhanger/MS
+cliffhanging
+Cliff/M
+Clifford/M
+cliff/SM
+Clifton/M
+climacteric/SM
+climactic
+climate/MS
+climatic
+climatically
+climatological/Y
+climatologist/SM
+climatology/MS
+climax/MDSG
+climbable/U
+climb/BGZSJRD
+climbdown
+climbed/U
+climber/M
+clime/SM
+Clim/M
+clinch/DRSZG
+clincher/M
+clinching/Y
+Cline/M
+clinger/MS
+clinging
+cling/U
+clingy/TR
+clinical/Y
+clinician/MS
+clinic/MS
+clinker/GMD
+clink/RDGSZ
+clinometer/MIS
+Clint/M
+Clinton/M
+Clio/M
+cliometrician/S
+cliometric/S
+clipboard/SM
+clipped/U
+clipper/MS
+clipping/SM
+clip/SM
+clique/SDGM
+cliquey
+cliquier
+cliquiest
+cliquishness/SM
+cliquish/YP
+clitoral
+clitorides
+clitoris/MS
+Clive/M
+cloacae
+cloaca/M
+cloakroom/MS
+cloak's
+cloak/USDG
+clobber/DGS
+cloche/MS
+clocker/M
+clockmaker/M
+clock/SGZRDMJ
+clockwatcher
+clockwise
+clockwork/MS
+clodded
+clodding
+cloddishness/M
+cloddish/P
+clodhopper/SM
+clod/MS
+Cloe/M
+clogged/U
+clogging/U
+clog's
+clog/US
+cloisonné
+cloisonnes
+cloister/MDGS
+cloistral
+Clo/M
+clomp/MDSG
+clonal
+clone/DSRGMZ
+clonk/SGD
+clopped
+clopping
+clop/S
+Cloris/M
+closed/U
+close/EDSRG
+closefisted
+closely
+closemouthed
+closeness/MS
+closeout/MS
+closer/EM
+closers
+closest
+closet/MDSG
+closeup/S
+closing/S
+closured
+closure/EMS
+closure's/I
+closuring
+clothbound
+clothesbrush
+clotheshorse/MS
+clothesline/SDGM
+clothesman
+clothesmen
+clothespin/MS
+clothe/UDSG
+cloth/GJMSD
+clothier/MS
+clothing/M
+Clotho/M
+cloths
+Clotilda/M
+clot/MS
+clotted
+clotting
+cloture/MDSG
+cloudburst/MS
+clouded/U
+cloudiness/SM
+cloudlessness/M
+cloudless/YP
+cloudscape/SM
+cloud/SGMD
+cloudy/TPR
+clout/GSMD
+cloven
+cloverleaf/MS
+clover/M
+clove/SRMZ
+Clovis/M
+clown/DMSG
+clownishness/SM
+clownish/PY
+cloy/DSG
+cloying/Y
+clubbed/M
+clubbing/M
+clubfeet
+clubfoot/DM
+clubhouse/SM
+club/MS
+clubroom/SM
+cluck/GSDM
+clueless
+clue/MGDS
+Cluj/M
+clump/MDGS
+clumpy/RT
+clumsily
+clumsiness/MS
+clumsy/PRT
+clung
+clunk/SGZRDM
+clunky/PRYT
+clustered/AU
+clusters/A
+cluster/SGJMD
+clutch/DSG
+cluttered/U
+clutter/GSD
+Cl/VM
+Clyde/M
+Clydesdale/M
+Cly/M
+Clytemnestra/M
+Clyve/M
+Clywd/M
+cm
+Cm/M
+CMOS
+cnidarian/MS
+CNN
+CNS
+CO
+coacher/M
+coachman/M
+coachmen
+coach/MSRDG
+coachwork/M
+coadjutor/MS
+coagulable
+coagulant/SM
+coagulate/GNXSD
+coagulation/M
+coagulator/S
+coaler/M
+coalesce/GDS
+coalescence/SM
+coalescent
+coalface/SM
+coalfield/MS
+coalitionist/SM
+coalition/MS
+coal/MDRGS
+coalminers
+coarseness/SM
+coarsen/SGD
+coarse/TYRP
+coastal
+coaster/M
+coastguard/MS
+coastline/SM
+coast/SMRDGZ
+coated/U
+Coates/M
+coating/M
+coat/MDRGZJS
+coattail/S
+coattest
+coauthor/MDGS
+coaxer/M
+coax/GZDSR
+coaxial/Y
+coaxing/Y
+Cobain/M
+cobalt/MS
+cobbed
+Cobbie/M
+cobbing
+cobbler/M
+cobble/SRDGMZ
+cobblestone/MSD
+Cobb/M
+Cobby/M
+coble/M
+Cob/M
+COBOL
+Cobol/M
+cobra/MS
+cob/SM
+cobwebbed
+cobwebbing
+cobwebby/RT
+cobweb/SM
+cocaine/MS
+coca/MS
+cocci/MS
+coccus/M
+coccyges
+coccyx/M
+Cochabamba/M
+cochineal/SM
+Cochin/M
+Cochise/M
+cochleae
+cochlear
+cochlea/SM
+Cochran/M
+cockade/SM
+cockamamie
+cockatoo/SM
+cockatrice/MS
+cockcrow/MS
+cockerel/MS
+cocker/M
+cockeye/DM
+cockeyed/PY
+cockfighting/M
+cockfight/MJSG
+cock/GDRMS
+cockily
+cockiness/MS
+cocklebur/M
+cockle/SDGM
+cockleshell/SM
+Cockney
+cockney/MS
+cockpit/MS
+cockroach/SM
+cockscomb/SM
+cockshies
+cocksucker/S!
+cocksure
+cocktail/GDMS
+cocky/RPT
+cocoa/SM
+coco/MS
+coconut/SM
+cocoon/GDMS
+Cocteau/M
+COD
+coda/SM
+codded
+codding
+coddle/GSRD
+coddler/M
+codebook/S
+codebreak/R
+coded/UA
+Codee/M
+codeine/MS
+codename/D
+codependency/S
+codependent/S
+coder/CM
+code's
+co/DES
+codes/A
+code/SCZGJRD
+codetermine/S
+codeword/SM
+codex/M
+codfish/SM
+codger/MS
+codices/M
+codicil/SM
+Codie/M
+codification/M
+codifier/M
+codify/NZXGRSD
+Codi/M
+coding/M
+codling/M
+Cod/M
+cod/MDRSZGJ
+codpiece/MS
+Cody/M
+coedited
+coediting
+coeditor/MS
+coedits
+coed/SM
+coeducational
+coeducation/SM
+coefficient/SYM
+coelenterate/MS
+coequal/SY
+coercer/M
+coerce/SRDXVGNZ
+coercible/I
+coercion/M
+coerciveness/M
+coercive/PY
+coeval/YS
+coexistence/MS
+coexistent
+coexist/GDS
+coextensive/Y
+cofactor/MS
+coffeecake/SM
+coffeecup
+coffeehouse/SM
+coffeemaker/S
+coffeepot/MS
+coffee/SM
+cofferdam/SM
+coffer/DMSG
+Coffey/M
+coffin/DMGS
+Coffman/M
+cogency/MS
+cogent/Y
+cogged
+cogging
+cogitate/DSXNGV
+cogitation/M
+cogitator/MS
+cog/MS
+Cognac/M
+cognac/SM
+cognate/SXYN
+cognation/M
+cognitional
+cognition/SAM
+cognitive/SY
+cognizable
+cognizance/MAI
+cognizances/A
+cognizant/I
+cognomen/SM
+cognoscente
+cognoscenti
+cogwheel/SM
+cohabitant/MS
+cohabitational
+cohabitation/SM
+cohabit/SDG
+Cohan/M
+coheir/MS
+Cohen/M
+cohere/GSRD
+coherence/SIM
+coherencies
+coherency/I
+coherent/IY
+coherer/M
+cohesion/MS
+cohesiveness/SM
+cohesive/PY
+Cohn/M
+cohoes
+coho/MS
+cohort/SM
+coiffed
+coiffing
+coiffure/MGSD
+coif/SM
+coil/UGSAD
+Coimbatore/M
+coinage's/A
+coinage/SM
+coincide/GSD
+coincidence/MS
+coincidental/Y
+coincident/Y
+coined/U
+coiner/M
+coin/GZSDRM
+coinsurance/SM
+Cointon/M
+cointreau
+coital/Y
+coitus/SM
+coke/MGDS
+Coke/MS
+COL
+COLA
+colander/SM
+Colan/M
+Colas
+cola/SM
+colatitude/MS
+Colbert/M
+Colby/M
+coldblooded
+coldish
+coldness/MS
+cold/YRPST
+Coleen/M
+Cole/M
+Coleman/M
+Colene/M
+Coleridge/M
+coleslaw/SM
+Colet/M
+Coletta/M
+Colette/M
+coleus/SM
+Colfax/M
+Colgate/M
+colicky
+colic/SM
+coliform
+Colin/M
+coliseum/SM
+colitis/MS
+collaborate/VGNXSD
+collaboration/M
+collaborative/SY
+collaborator/SM
+collage/MGSD
+collagen/M
+collapse/SDG
+collapsibility/M
+collapsible
+collarbone/MS
+collar/DMGS
+collard/SM
+collarless
+collated/U
+collateral/SYM
+collate/SDVNGX
+collation/M
+collator/MS
+colleague/SDGM
+collectedness/M
+collected/PY
+collectible/S
+collection/AMS
+collective/SY
+collectivism/SM
+collectivist/MS
+collectivity/MS
+collectivization/MS
+collectivize/DSG
+collector/MS
+collect/SAGD
+Colleen/M
+colleen/SM
+college/SM
+collegiality/S
+collegian/SM
+collegiate/Y
+Collen/M
+Collete/M
+Collette/M
+coll/G
+collide/SDG
+Collie/M
+collie/MZSRD
+collier/M
+Collier/M
+colliery/MS
+collimate/C
+collimated/U
+collimates
+collimating
+collimation/M
+collimator/M
+collinear
+collinearity/M
+Colline/M
+Collin/MS
+collisional
+collision/SM
+collocate/XSDGN
+collocation/M
+colloidal/Y
+colloid/MS
+colloq
+colloquialism/MS
+colloquial/SY
+colloquies
+colloquium/SM
+colloquy/M
+collude/SDG
+collusion/SM
+collusive
+collying
+Colly/RM
+Colman/M
+Col/MY
+Cologne/M
+cologne/MSD
+Colo/M
+Colombia/M
+Colombian/S
+Colombo/M
+colonelcy/MS
+colonel/MS
+colonialism/MS
+colonialist/MS
+colonial/SPY
+colonist/SM
+colonization/ACSM
+colonize/ACSDG
+colonized/U
+colonizer/MS
+colonizes/U
+Colon/M
+colonnade/MSD
+colon/SM
+colony/SM
+colophon/SM
+Coloradan/S
+Coloradoan/S
+Colorado/M
+colorant/SM
+coloration/EMS
+coloratura/SM
+colorblindness/S
+colorblind/P
+colored/USE
+colorer/M
+colorfastness/SM
+colorfast/P
+colorfulness/MS
+colorful/PY
+colorimeter/SM
+colorimetry
+coloring/M
+colorization/S
+colorize/GSD
+colorizing/C
+colorlessness/SM
+colorless/PY
+colors/EA
+color/SRDMGZJ
+colossal/Y
+Colosseum/M
+colossi
+colossus/M
+colostomy/SM
+colostrum/SM
+col/SD
+colter/M
+coltishness/M
+coltish/PY
+Colt/M
+colt/MRS
+Coltrane/M
+Columbia/M
+Columbian
+Columbine/M
+columbine/SM
+Columbus/M
+columnar
+columnist/MS
+columnize/GSD
+column/SDM
+Colver/M
+Co/M
+comae
+comaker/SM
+Comanche/MS
+coma/SM
+comatose
+combatant/SM
+combativeness/MS
+combative/PY
+combat/SVGMD
+combed/U
+comber/M
+combinational/A
+combination/ASM
+combinatorial/Y
+combinatoric/S
+combinator/SM
+combined/AU
+combiner/M
+combines/A
+combine/ZGBRSD
+combining/A
+combo/MS
+comb/SGZDRMJ
+Combs/M
+combusted
+combustibility/SM
+combustible/SI
+combustion/MS
+combustive
+Comdex/M
+Comdr/M
+comeback/SM
+comedian/SM
+comedic
+comedienne/SM
+comedown/MS
+comedy/SM
+come/IZSRGJ
+comeliness/SM
+comely/TPR
+comer/IM
+comes/M
+comestible/MS
+cometary
+cometh
+comet/SM
+comeuppance/SM
+comfit's
+comfit/SE
+comfortability/S
+comfortableness/MS
+comfortable/U
+comfortably/U
+comforted/U
+comforter/MS
+comfort/ESMDG
+comforting/YE
+comfy/RT
+comicality/MS
+comical/Y
+comic/MS
+Cominform/M
+comity/SM
+com/LJRTZG
+comm
+Com/M
+comma/MS
+commandant/MS
+commandeer/SDG
+commander/M
+commanding/Y
+commandment/SM
+commando/SM
+command/SZRDMGL
+commemorate/SDVNGX
+commemoration/M
+commemorative/YS
+commemorator/S
+commence/ALDSG
+commencement/AMS
+commencer/M
+commendably
+commendation/ASM
+commendatory/A
+commender/AM
+commend/GSADRB
+commensurable/I
+commensurate/IY
+commensurates
+commensuration/SM
+commentary/MS
+commentate/GSD
+commentator/SM
+commenter/M
+comment's
+comment/SUGD
+commerce/MGSD
+commercialism/MS
+commercialization/SM
+commercialize/GSD
+commercial/PYS
+Commie
+commie/SM
+commingle/GSD
+commiserate/VGNXSD
+commiseration/M
+commissariat/MS
+commissar/MS
+commissary/MS
+commission/ASCGD
+commissioner/SM
+commission's/A
+commitment/SM
+commit/SA
+committable
+committal/MA
+committals
+committed/UA
+committeeman/M
+committeemen
+committee/MS
+committeewoman/M
+committeewomen
+committing/A
+commode/MS
+commodes/IE
+commodiousness/MI
+commodious/YIP
+commodity/MS
+commodore/SM
+commonality/MS
+commonalty/MS
+commoner/MS
+commonness/MSU
+commonplaceness/M
+commonplace/SP
+common/RYUPT
+commonsense
+commons/M
+Commons/M
+commonweal/SHM
+commonwealth/M
+Commonwealth/M
+commonwealths
+Commonwealths
+commotion/MS
+communality/M
+communal/Y
+commune/XSDNG
+communicability/MS
+communicable/IU
+communicably
+communicant/MS
+communicate/VNGXSD
+communicational
+communication/M
+communicativeness/M
+communicative/PY
+communicator/SM
+communion/M
+Communion/SM
+communique/S
+communism/MS
+Communism/S
+communistic
+communist/MS
+Communist/S
+communitarian/M
+community/MS
+communize/SDG
+commutable/I
+commutate/XVGNSD
+commutation/M
+commutative/Y
+commutativity
+commutator/MS
+commute/BZGRSD
+commuter/M
+Comoros
+compaction/M
+compactness/MS
+compactor/MS
+compact/TZGSPRDY
+companionableness/M
+companionable/P
+companionably
+companion/GBSMD
+companionship/MS
+companionway/MS
+company/MSDG
+Compaq/M
+comparabilities
+comparability/IM
+comparableness/M
+comparable/P
+comparably/I
+comparativeness/M
+comparative/PYS
+comparator/SM
+compare/GRSDB
+comparer/M
+comparison/MS
+compartmental
+compartmentalization/SM
+compartmentalize/DSG
+compartment/SDMG
+compassionateness/M
+compassionate/PSDGY
+compassion/MS
+compass/MSDG
+compatibility/IMS
+compatibleness/M
+compatible/SI
+compatibly/I
+compatriot/SM
+compeer/DSGM
+compellable
+compelled
+compelling/YM
+compel/S
+compendious
+compendium/MS
+compensable
+compensated/U
+compensate/XVNGSD
+compensation/M
+compensator/M
+compensatory
+compete/GSD
+competence/ISM
+competency/IS
+competency's
+competent/IY
+competition/SM
+competitiveness/SM
+competitive/YP
+competitor/MS
+comp/GSYD
+compilable/U
+compilation/SAM
+compile/ASDCG
+compiler/CS
+compiler's
+complacence/S
+complacency/SM
+complacent/Y
+complainant/MS
+complainer/M
+complain/GZRDS
+complaining/YU
+complaint/MS
+complaisance/SM
+complaisant/Y
+complected
+complementariness/M
+complementarity
+complementary/SP
+complementation/M
+complementer/M
+complement/ZSMRDG
+complete/BTYVNGPRSDX
+completed/U
+completely/I
+completeness/ISM
+completer/M
+completion/MI
+complexional
+complexion/DMS
+complexity/MS
+complexness/M
+complex/TGPRSDY
+compliance/SM
+compliant/Y
+complicatedness/M
+complicated/YP
+complicate/SDG
+complication/M
+complicator/SM
+complicit
+complicity/MS
+complier/M
+complimentary/U
+complimenter/M
+compliment/ZSMRDG
+comply/ZXRSDNG
+component/SM
+comport/GLSD
+comportment/SM
+compose/CGASDE
+composedness/M
+composed/PY
+composer/CM
+composers
+composite/YSDXNG
+compositional/Y
+composition/CMA
+compositions/C
+compositor/MS
+compost/DMGS
+composure/ESM
+compote/MS
+compounded/U
+compounder/M
+compound/RDMBGS
+comprehend/DGS
+comprehending/U
+comprehensibility/SIM
+comprehensibleness/IM
+comprehensible/PI
+comprehensibly/I
+comprehension/IMS
+comprehensiveness/SM
+comprehensive/YPS
+compressed/Y
+compressibility/IM
+compressible/I
+compressional
+compression/CSM
+compressive/Y
+compressor/MS
+compress/SDUGC
+comprise/GSD
+compromiser/M
+compromise/SRDGMZ
+compromising/UY
+Compton/M
+comptroller/SM
+compulsion/SM
+compulsiveness/MS
+compulsive/PYS
+compulsivity
+compulsorily
+compulsory/S
+compunction/MS
+Compuserve/M
+CompuServe/M
+computability/M
+computable/UI
+computably
+computational/Y
+computation/SM
+computed/A
+computerese
+computerization/MS
+computerize/SDG
+computer/M
+compute/RSDZBG
+computes/A
+computing/A
+comradely/P
+comradeship/MS
+comrade/YMS
+Comte/M
+Conakry/M
+Conan/M
+Conant/M
+concatenate/XSDG
+concaveness/MS
+concave/YP
+conceal/BSZGRDL
+concealed/U
+concealer/M
+concealing/Y
+concealment/MS
+conceded/Y
+conceitedness/SM
+conceited/YP
+conceit/SGDM
+conceivable/IU
+conceivably/I
+conceive/BGRSD
+conceiver/M
+concentrate/VNGSDX
+concentration/M
+concentrator/MS
+concentrically
+Concepción/M
+conceptional
+conception/MS
+concept/SVM
+conceptuality/M
+conceptualization/A
+conceptualizations
+conceptualization's
+conceptualize/DRSG
+conceptualizing/A
+conceptual/Y
+concerned/YU
+concern/USGD
+concerted/PY
+concert/EDSG
+concertina/MDGS
+concertize/GDS
+concertmaster/MS
+concerto/SM
+concert's
+concessionaire/SM
+concessional
+concessionary
+concession/R
+Concetta/M
+Concettina/M
+Conchita/M
+conch/MDG
+conchs
+concierge/SM
+conciliar
+conciliate/GNVX
+conciliation/ASM
+conciliator/MS
+conciliatory/A
+conciseness/SM
+concise/TYRNPX
+concision/M
+conclave/S
+concluder/M
+conclude/RSDG
+conclusion/SM
+conclusive/IPY
+conclusiveness/ISM
+concocter/M
+concoction/SM
+concoct/RDVGS
+concomitant/YS
+concordance/MS
+concordant/Y
+concordat/SM
+Concorde/M
+Concordia/M
+Concord/MS
+concourse
+concreteness/MS
+concrete/NGXRSDPYM
+concretion/M
+concubinage/SM
+concubine/SM
+concupiscence/SM
+concupiscent
+concurrence/MS
+concur/S
+concussion/MS
+concuss/VD
+condemnate/XN
+condemnation/M
+condemnatory
+condemner/M
+condemn/ZSGRDB
+condensate/NMXS
+condensation/M
+condenser/M
+condense/ZGSD
+condensible
+condescend
+condescending/Y
+condescension/MS
+condign
+condiment/SM
+condition/AGSJD
+conditionals
+conditional/UY
+conditioned/U
+conditioner/MS
+conditioning/M
+condition's
+condole
+condolence/MS
+condominium/MS
+condom/SM
+condone/GRSD
+condoner/M
+Condorcet/M
+condor/MS
+condo/SM
+conduce/VGSD
+conduciveness/M
+conducive/P
+conductance/SM
+conductibility/SM
+conductible
+conduction/MS
+conductive/Y
+conductivity/MS
+conductor/MS
+conductress/MS
+conduct/V
+conduit/MS
+coneflower/M
+Conestoga
+coney's
+confabbed
+confabbing
+confab/MS
+confabulate/XSDGN
+confabulation/M
+confectioner/M
+confectionery/SM
+confectionist
+confection/RDMGZS
+confect/S
+Confederacy/M
+confederacy/MS
+confederate/M
+Confederate/S
+conferee/MS
+conference/DSGM
+conferrable
+conferral/SM
+conferred
+conferrer/SM
+conferring
+confer/SB
+confessed/Y
+confessional/SY
+confession/MS
+confessor/SM
+confetti/M
+confidante/SM
+confidant/SM
+confidence/SM
+confidentiality/MS
+confidentialness/M
+confidential/PY
+confident/Y
+confider/M
+confide/ZGRSD
+confiding/PY
+configuration/ASM
+configure/AGSDB
+confined/U
+confine/L
+confinement/MS
+confiner/M
+confirm/AGDS
+confirmation/ASM
+confirmatory
+confirmedness/M
+confirmed/YP
+confiscate/DSGNX
+confiscation/M
+confiscator/MS
+confiscatory
+conflagration/MS
+conflate/NGSDX
+conflation/M
+conflicting/Y
+conflict/SVGDM
+confluence/MS
+conformable/U
+conformal
+conformance/SM
+conformational/Y
+conform/B
+conformer/M
+conformism/SM
+conformist/SM
+conformities
+conformity/MUI
+confounded/Y
+confound/R
+confrère/MS
+confrontational
+confrontation/SM
+confronter/M
+confront/Z
+Confucianism/SM
+Confucian/S
+Confucius/M
+confusedness/M
+confused/PY
+confuse/RBZ
+confusing/Y
+confutation/MS
+confute/GRSD
+confuter/M
+conga/MDG
+congeal/GSDL
+congealment/MS
+congeniality/UM
+congenial/U
+congeries/M
+conger/SM
+congestion/MS
+congest/VGSD
+conglomerate/XDSNGVM
+conglomeration/M
+Cong/M
+Congolese
+Congo/M
+congrats
+congratulate/NGXSD
+congratulation/M
+congratulatory
+congregate/DSXGN
+congregational
+Congregational
+congregationalism/MS
+congregationalist/MS
+Congregationalist/S
+congregation/M
+congressional/Y
+congressman/M
+congressmen
+Congress/MS
+congress/MSDG
+congresspeople
+congressperson/S
+congresswoman/M
+congresswomen
+Congreve/M
+congruence/IM
+congruences
+congruency/M
+congruential
+congruent/YI
+congruity/MSI
+congruousness/IM
+congruous/YIP
+conicalness/M
+conical/PSY
+conic/S
+conics/M
+conifer/MS
+coniferous
+conjectural/Y
+conjecture/GMDRS
+conjecturer/M
+conjoint
+conjugacy
+conjugal/Y
+conjugate/XVNGYSDP
+conjugation/M
+conjunct/DSV
+conjunctiva/MS
+conjunctive/YS
+conjunctivitis/SM
+conjuration/MS
+conjurer/M
+conjure/RSDZG
+conjuring/M
+conker/M
+conk/ZDR
+Conley/M
+Con/M
+conman
+connect/ADGES
+connectedly/E
+connectedness/ME
+connected/U
+connectible
+Connecticut/M
+connection/AME
+connectionless
+connections/E
+connective/SYM
+connectivity/MS
+connector/MS
+Connelly/M
+Conner/M
+Connery/M
+connexion/MS
+Conney/M
+conn/GVDR
+Connie/M
+Conni/M
+conniption/MS
+connivance/MS
+conniver/M
+connive/ZGRSD
+connoisseur/MS
+Connor/SM
+connotative/Y
+Conn/RM
+connubial/Y
+Conny/M
+conquerable/U
+conquered/AU
+conqueror/MS
+conquer/RDSBZG
+conquers/A
+conquest/ASM
+conquistador/MS
+Conrade/M
+Conrad/M
+Conrado/M
+Conrail/M
+Conroy/M
+Consalve/M
+consanguineous/Y
+consanguinity/SM
+conscienceless
+conscientiousness/MS
+conscientious/YP
+conscionable/U
+consciousness/MUS
+conscious/UYSP
+conscription/SM
+consecrated/AU
+consecrates/A
+consecrate/XDSNGV
+consecrating/A
+consecration/AMS
+consecutiveness/M
+consecutive/YP
+consensus/SM
+consenter/M
+consenting/Y
+consent/SZGRD
+consequence
+consequentiality/S
+consequential/IY
+consequentialness/M
+consequently/I
+consequent/PSY
+conservancy/SM
+conservationism
+conservationist/SM
+conservation/SM
+conservatism/SM
+conservativeness/M
+Conservative/S
+conservative/SYP
+conservator/MS
+conservatory/MS
+con/SGM
+considerable/I
+considerables
+considerably/I
+considerateness/MSI
+considerate/XIPNY
+consideration/ASMI
+considered/U
+considerer/M
+consider/GASD
+considering/S
+consign/ASGD
+consignee/SM
+consignment/SM
+consist/DSG
+consistence/S
+consistency/IMS
+consistent/IY
+consistory/MS
+consolable/I
+Consolata/M
+consolation/MS
+consolation's/E
+consolatory
+consoled/U
+consoler/M
+console/ZBG
+consolidated/AU
+consolidate/NGDSX
+consolidates/A
+consolidation/M
+consolidator/SM
+consoling/Y
+consommé/S
+consonance/IM
+consonances
+consonantal
+consonant/MYS
+consortia
+consortium/M
+conspectus/MS
+conspicuousness/IMS
+conspicuous/YIP
+conspiracy/MS
+conspiratorial/Y
+conspirator/SM
+constable
+Constable/M
+constabulary/MS
+constance
+Constance/M
+Constancia/M
+constancy/IMS
+Constancy/M
+Constanta/M
+Constantia/M
+Constantina/M
+Constantine/M
+Constantin/M
+Constantino/M
+Constantinople/M
+constant/IY
+constants
+constellation/SM
+consternate/XNGSD
+consternation/M
+constipate/XDSNG
+constipation/M
+constituency/MS
+constituent/SYM
+constituted/A
+constitute/NGVXDS
+constitutes/A
+constituting/A
+Constitution
+constitutionality's
+constitutionality/US
+constitutionally/U
+constitutional/SY
+constitution/AMS
+constitutive/Y
+constrain
+constrainedly
+constrained/U
+constraint/MS
+constriction/MS
+constrictor/MS
+constrict/SDGV
+construable
+construct/ASDGV
+constructibility
+constructible/A
+constructional/Y
+constructionist/MS
+construction/MAS
+constructions/C
+constructiveness/SM
+constructive/YP
+constructor/MS
+construe/GSD
+Consuela/M
+Consuelo/M
+consular/S
+consulate/MS
+consul/KMS
+consulship/MS
+consultancy/S
+consultant/MS
+consultation/SM
+consultative
+consulted/A
+consulter/M
+consult/RDVGS
+consumable/S
+consumed/Y
+consume/JZGSDB
+consumerism/MS
+consumerist/S
+consumer/M
+consuming/Y
+consummate/DSGVY
+consummated/U
+consumption/SM
+consumptive/YS
+cont
+contact/BGD
+contacted/A
+contact's/A
+contacts/A
+contagion/SM
+contagiousness/MS
+contagious/YP
+containerization/SM
+containerize/GSD
+container/M
+containment/SM
+contain/SLZGBRD
+contaminant/SM
+contaminated/AU
+contaminates/A
+contaminate/SDCXNG
+contaminating/A
+contamination/CM
+contaminative
+contaminator/MS
+contd
+cont'd
+contemn/SGD
+contemplate/DVNGX
+contemplation/M
+contemplativeness/M
+contemplative/PSY
+contemporaneity/MS
+contemporaneousness/M
+contemporaneous/PY
+contemptibleness/M
+contemptible/P
+contemptibly
+contempt/M
+contemptuousness/SM
+contemptuous/PY
+contentedly/E
+contentedness/SM
+contented/YP
+content/EMDLSG
+contention/MS
+contentiousness/SM
+contentious/PY
+contently
+contentment/ES
+contentment's
+conterminous/Y
+contestable/I
+contestant/SM
+contested/U
+contextualize/GDS
+contiguity/MS
+contiguousness/M
+contiguous/YP
+continence/ISM
+Continental/S
+continental/SY
+continent/IY
+Continent/M
+continents
+continent's
+contingency/SM
+contingent/SMY
+continua
+continuable
+continual/Y
+continuance/ESM
+continuant/M
+continuation/ESM
+continue/ESDG
+continuer/M
+continuity/SEM
+continuousness/M
+continuous/YE
+continuum/M
+contortionist/SM
+contortion/MS
+contort/VGD
+contour
+contraband/SM
+contrabass/M
+contraception/SM
+contraceptive/S
+contract/DG
+contractible
+contractile
+contractual/Y
+contradict/GDS
+contradiction/MS
+contradictorily
+contradictoriness/M
+contradictory/PS
+contradistinction/MS
+contraflow/S
+contrail/M
+contraindicate/SDVNGX
+contraindication/M
+contralto/SM
+contrapositive/S
+contraption/MS
+contrapuntal/Y
+contrariety/MS
+contrarily
+contrariness/MS
+contrariwise
+contrary/PS
+contra/S
+contrasting/Y
+contrastive/Y
+contrast/SRDVGZ
+contravene/GSRD
+contravener/M
+contravention/MS
+Contreras/M
+contretemps/M
+contribute/XVNZRD
+contribution/M
+contributive/Y
+contributorily
+contributor/SM
+contributory/S
+contriteness/M
+contrite/NXP
+contrition/M
+contrivance/SM
+contriver/M
+contrive/ZGRSD
+control/CS
+controllability/M
+controllable/IU
+controllably/U
+controlled/CU
+controller/SM
+controlling/C
+control's
+controversialists
+controversial/UY
+controversy/MS
+controvert/DGS
+controvertible/I
+contumacious/Y
+contumacy/MS
+contumelious
+contumely/MS
+contuse/NGXSD
+contusion/M
+conundrum/SM
+conurbation/MS
+convalesce/GDS
+convalescence/SM
+convalescent/S
+convect/DSVG
+convectional
+convection/MS
+convector
+convene/ASDG
+convener/MS
+convenience/ISM
+convenient/IY
+conventicle/SM
+conventionalism/M
+conventionalist/M
+conventionality/SUM
+conventionalize/GDS
+conventional/UY
+convention/MA
+conventions
+convergence/MS
+convergent
+conversant/Y
+conversationalist/SM
+conversational/Y
+conversation/SM
+conversazione/M
+converse/Y
+conversion/AM
+conversioning
+converted/U
+converter/MS
+convert/GADS
+convertibility's/I
+convertibility/SM
+convertibleness/M
+convertible/PS
+convexity/MS
+convex/Y
+conveyance/DRSGMZ
+conveyancer/M
+conveyancing/M
+convey/BDGS
+conveyor/MS
+conviction/MS
+convict/SVGD
+convinced/U
+convincer/M
+convince/RSDZG
+convincingness/M
+convincing/PUY
+conviviality/MS
+convivial/Y
+convoke/GSD
+convolute/XDNY
+convolution/M
+convolve/C
+convolved
+convolves
+convolving
+convoy/GMDS
+convulse/SDXVNG
+convulsion/M
+convulsiveness/M
+convulsive/YP
+Conway/M
+cony/SM
+coo/GSD
+cookbook/SM
+cooked/AU
+Cooke/M
+cooker/M
+cookery/MS
+cook/GZDRMJS
+Cookie/M
+cookie/SM
+cooking/M
+Cook/M
+cookout/SM
+cooks/A
+cookware/SM
+cooky's
+coolant/SM
+cooled/U
+cooler/M
+Cooley/M
+coolheaded
+Coolidge/M
+coolie/MS
+coolness/MS
+cool/YDRPJGZTS
+coon/MS!
+coonskin/MS
+cooperage/MS
+cooperate/VNGXSD
+cooperation/M
+cooperativeness/SM
+cooperative/PSY
+cooperator/MS
+cooper/GDM
+Cooper/M
+coop/MDRGZS
+Coop/MR
+coordinated/U
+coordinateness/M
+coordinate/XNGVYPDS
+coordination/M
+coordinator/MS
+Coors/M
+cootie/SM
+coot/MS
+copay/S
+Copeland/M
+Copenhagen/M
+coper/M
+Copernican
+Copernicus/M
+cope/S
+copied/A
+copier/M
+copies/A
+copilot/SM
+coping/M
+copiousness/SM
+copious/YP
+coplanar
+Copland/M
+Copley/M
+copolymer/MS
+copora
+copped
+Copperfield/M
+copperhead/MS
+copper/MSGD
+copperplate/MS
+coppersmith/M
+coppersmiths
+coppery
+coppice's
+copping
+Coppola/M
+copra/MS
+coprolite/M
+coprophagous
+copse/M
+cops/GDS
+cop/SJMDRG
+copter/SM
+Coptic/M
+copula/MS
+copulate/XDSNGV
+copulation/M
+copulative/S
+copybook/MS
+copycat/SM
+copycatted
+copycatting
+copyist/SM
+copy/MZBDSRG
+copyrighter/M
+copyright/MSRDGZ
+copywriter/MS
+coquetry/MS
+coquette/DSMG
+coquettish/Y
+Corabella/M
+Corabelle/M
+Corabel/M
+coracle/SM
+Coralie/M
+Coraline/M
+coralline
+Coral/M
+coral/SM
+Coralyn/M
+Cora/M
+corbel/GMDJS
+Corbet/M
+Corbett/M
+Corbie/M
+Corbin/M
+Corby/M
+cordage/MS
+corded/AE
+Cordelia/M
+Cordelie/M
+Cordell/M
+corder/AM
+Cordey/M
+cord/FSAEM
+cordiality/MS
+cordialness/M
+cordial/PYS
+Cordie/M
+cordillera/MS
+Cordilleras
+Cordi/M
+cording/MA
+cordite/MS
+cordless
+Cord/M
+Cordoba
+cordon/DMSG
+cordovan/SM
+Cordula/M
+corduroy/GDMS
+Cordy/M
+cored/A
+Coreen/M
+Corella/M
+core/MZGDRS
+Corenda/M
+Corene/M
+corer/M
+corespondent/MS
+Coretta/M
+Corette/M
+Corey/M
+Corfu/M
+corgi/MS
+coriander/SM
+Corie/M
+Corilla/M
+Cori/M
+Corina/M
+Corine/M
+coring/M
+Corinna/M
+Corinne/M
+Corinthian/S
+Corinthians/M
+Corinth/M
+Coriolanus/M
+Coriolis/M
+Corissa/M
+Coriss/M
+corked/U
+corker/M
+cork/GZDRMS
+Cork/M
+corkscrew/DMGS
+corks/U
+Corliss/M
+Corly/M
+Cormack/M
+corm/MS
+cormorant/MS
+Cornall/M
+cornball/SM
+cornbread/S
+corncob/SM
+corncrake/M
+corneal
+cornea/SM
+Corneille/M
+Cornela/M
+Cornelia/M
+Cornelius/M
+Cornelle/M
+Cornell/M
+corner/GDM
+cornerstone/MS
+cornet/SM
+Corney/M
+cornfield/SM
+cornflake/S
+cornflour/M
+cornflower/SM
+corn/GZDRMS
+cornice/GSDM
+Cornie/M
+cornily
+corniness/S
+Cornish/S
+cornmeal/S
+cornrow/GDS
+cornstalk/MS
+cornstarch/SM
+cornucopia/MS
+Cornwallis/M
+Cornwall/M
+Corny/M
+corny/RPT
+corolla/MS
+corollary/SM
+Coronado/M
+coronal/MS
+coronary/S
+corona/SM
+coronate/NX
+coronation/M
+coroner/MS
+coronet/DMS
+Corot/M
+coroutine/SM
+Corp
+corporal/SYM
+corpora/MS
+corporate/INVXS
+corporately
+corporation/MI
+corporatism/M
+corporatist
+corporeality/MS
+corporeal/IY
+corporealness/M
+corp/S
+corpse/M
+corpsman/M
+corpsmen
+corps/SM
+corpulence/MS
+corpulentness/S
+corpulent/YP
+corpuscle/SM
+corpuscular
+corpus/M
+corr
+corralled
+corralling
+corral/MS
+correctable/U
+correct/BPSDRYTGV
+corrected/U
+correctional
+correction/MS
+corrective/YPS
+correctly/I
+correctness/MSI
+corrector/MS
+Correggio/M
+correlated/U
+correlate/SDXVNG
+correlation/M
+correlative/YS
+Correna/M
+correspond/DSG
+correspondence/MS
+correspondent/SM
+corresponding/Y
+Correy/M
+Corrianne/M
+corridor/SM
+Corrie/M
+corrigenda
+corrigendum/M
+corrigible/I
+Corri/M
+Corrina/M
+Corrine/M
+Corrinne/M
+corroborated/U
+corroborate/GNVXDS
+corroboration/M
+corroborative/Y
+corroborator/MS
+corroboratory
+corrode/SDG
+corrodible
+corrosion/SM
+corrosiveness/M
+corrosive/YPS
+corrugate/NGXSD
+corrugation/M
+corrupt/DRYPTSGV
+corrupted/U
+corrupter/M
+corruptibility/SMI
+corruptible/I
+corruption/IM
+corruptions
+corruptive/Y
+corruptness/MS
+Corry/M
+corsage/MS
+corsair/SM
+corset/GMDS
+Corsica/M
+Corsican/S
+cortège/MS
+Cortes/S
+cortex/M
+Cortez's
+cortical/Y
+cortices
+corticosteroid/SM
+Cortie/M
+cortisone/SM
+Cortland/M
+Cort/M
+Cortney/M
+Corty/M
+corundum/MS
+coruscate/XSDGN
+coruscation/M
+Corvallis/M
+corvette/MS
+Corvus/M
+Cory/M
+Cos
+Cosby/M
+Cosetta/M
+Cosette/M
+cos/GDS
+cosignatory/MS
+cosign/SRDZG
+cosily
+Cosimo/M
+cosine/MS
+cosiness/MS
+Cosme/M
+cosmetically
+cosmetician/MS
+cosmetic/SM
+cosmetologist/MS
+cosmetology/MS
+cosmic
+cosmical/Y
+cosmogonist/MS
+cosmogony/SM
+cosmological/Y
+cosmologist/MS
+cosmology/SM
+Cosmo/M
+cosmonaut/MS
+cosmopolitanism/MS
+cosmopolitan/SM
+cosmos/SM
+cosponsor/DSG
+cossack/S
+Cossack/SM
+cosset/GDS
+Costa/M
+Costanza/M
+costarred
+costarring
+costar/S
+Costello/M
+costiveness/M
+costive/PY
+costless
+costliness/SM
+costly/RTP
+cost/MYGVJS
+Costner/M
+costumer/M
+costume/ZMGSRD
+cotangent/SM
+Cote/M
+cote/MS
+coterie/MS
+coterminous/Y
+cotillion/SM
+Cotonou/M
+Cotopaxi/M
+cot/SGMD
+cottager/M
+cottage/ZMGSRD
+cottar's
+cotted
+cotter/SDM
+cotton/GSDM
+Cotton/M
+cottonmouth/M
+cottonmouths
+cottonseed/MS
+cottontail/SM
+cottonwood/SM
+cottony
+cotyledon/MS
+couching/M
+couch/MSDG
+cougar/MS
+cougher/M
+cough/RDG
+coughs
+couldn't
+could/T
+could've
+coulée/MS
+Coulomb/M
+coulomb/SM
+councilman/M
+councilmen
+councilor/MS
+councilperson/S
+council/SM
+councilwoman/M
+councilwomen
+counsel/GSDM
+counsellings
+counselor/MS
+countability/E
+countable/U
+countably/U
+countdown/SM
+counted/U
+count/EGARDS
+countenance/EGDS
+countenancer/M
+countenance's
+counteract/DSVG
+counteraction/SM
+counterargument/SM
+counterattack/DRMGS
+counterbalance/MSDG
+counterclaim/GSDM
+counterclockwise
+counterculture/MS
+countercyclical
+counterespionage/MS
+counterexample/S
+counterfeiter/M
+counterfeit/ZSGRD
+counterflow
+counterfoil/MS
+counterforce/M
+counter/GSMD
+counterinsurgency/MS
+counterintelligence/MS
+counterintuitive
+countermand/DSG
+counterman/M
+countermeasure/SM
+countermen
+counteroffensive/SM
+counteroffer/SM
+counterpane/SM
+counterpart/SM
+counterpoint/GSDM
+counterpoise/GMSD
+counterproductive
+counterproposal/M
+counterrevolutionary/MS
+counterrevolution/MS
+counter's/E
+counters/E
+countersignature/MS
+countersign/SDG
+countersink/SG
+counterspy/MS
+counterstrike
+countersunk
+countertenor/SM
+countervail/DSG
+counterweight/GMDS
+countess/MS
+countless/Y
+countrify/D
+countryman/M
+countrymen
+country/MS
+countryside/MS
+countrywide
+countrywoman/M
+countrywomen
+county/SM
+coup/ASDG
+coupe/MS
+Couperin/M
+couple/ACU
+coupled/CU
+coupler/C
+couplers
+coupler's
+couple's
+couples/CU
+couplet/SM
+coupling's/C
+coupling/SM
+coupon/SM
+coup's
+courage/MS
+courageously
+courageousness/MS
+courageous/U
+courages/E
+Courbet/M
+courgette/MS
+courier/GMDS
+course/EGSRDM
+courser's/E
+courser/SM
+course's/AF
+courses/FA
+coursework
+coursing/M
+Courtenay/M
+courteousness/EM
+courteousnesses
+courteous/PEY
+courtesan/MS
+courtesied
+courtesy/ESM
+courtesying
+court/GZMYRDS
+courthouse/MS
+courtier/SM
+courtliness/MS
+courtly/RTP
+Court/M
+Courtnay/M
+Courtney/M
+courtroom/MS
+courtship/SM
+courtyard/SM
+couscous/MS
+cousinly/U
+cousin/YMS
+Cousteau/M
+couture/SM
+couturier/SM
+covalent/Y
+covariance/SM
+covariant/S
+covariate/SN
+covary
+cove/DRSMZG
+covenanted/U
+covenanter/M
+covenant/SGRDM
+coven/SM
+Covent/M
+Coventry/MS
+coverable/E
+cover/AEGUDS
+coverage/MS
+coverall/DMS
+coverer/AME
+covering/MS
+coverlet/MS
+coversheet
+covers/M
+covertness/SM
+covert/YPS
+coveter/M
+coveting/Y
+covetousness/SM
+covetous/PY
+covet/SGRD
+covey/SM
+covington
+cowardice/MS
+cowardliness/MS
+cowardly/P
+Coward/M
+coward/MYS
+cowbell/MS
+cowbird/MS
+cowboy/MS
+cowcatcher/SM
+cowed/Y
+cowering/Y
+cower/RDGZ
+cowgirl/MS
+cowhand/S
+cowherd/SM
+cowhide/MGSD
+Cowley/M
+cowlick/MS
+cowling/M
+cowl/SGMD
+cowman/M
+cow/MDRSZG
+cowmen
+coworker/MS
+Cowper/M
+cowpoke/MS
+cowpony
+cowpox/MS
+cowpuncher/M
+cowpunch/RZ
+cowrie/SM
+cowshed/SM
+cowslip/MS
+coxcomb/MS
+Cox/M
+cox/MDSG
+coxswain/GSMD
+coy/CDSG
+coyer
+coyest
+coyly
+Coy/M
+coyness/MS
+coyote/SM
+coypu/SM
+cozenage/MS
+cozen/SGD
+cozily
+coziness/MS
+Cozmo/M
+Cozumel/M
+cozy/DSRTPG
+CPA
+cpd
+CPI
+cpl
+Cpl
+CPO
+CPR
+cps
+CPU/SM
+crabapple
+crabbedness/M
+crabbed/YP
+Crabbe/M
+crabber/MS
+crabbily
+crabbiness/S
+crabbing/M
+crabby/PRT
+crabgrass/S
+crablike
+crab/MS
+crackable/U
+crackdown/MS
+crackerjack/S
+cracker/M
+crackle/GJDS
+crackling/M
+crackly/RT
+crackpot/SM
+crackup/S
+crack/ZSBYRDG
+cradler/M
+cradle/SRDGM
+cradling/M
+craftily
+craftiness/SM
+Craft/M
+craft/MRDSG
+craftsman/M
+craftsmanship/SM
+craftsmen
+craftspeople
+craftspersons
+craftswoman
+craftswomen
+crafty/TRP
+Craggie/M
+cragginess/SM
+Craggy/M
+craggy/RTP
+crag/SM
+Craig/M
+Cramer/M
+crammed
+crammer/M
+cramming
+cramper/M
+cramp/MRDGS
+crampon/SM
+cram/S
+Cranach/M
+cranberry/SM
+Crandall/M
+crane/DSGM
+cranelike
+Crane/M
+Cranford/M
+cranial
+cranium/MS
+crankcase/MS
+crankily
+crankiness/MS
+crank/SGTRDM
+crankshaft/MS
+cranky/TRP
+Cranmer/M
+cranny/DSGM
+Cranston/M
+crape/SM
+crapped
+crappie/M
+crapping
+crappy/RST
+crapshooter/SM
+crap/SMDG!
+crasher/M
+crashing/Y
+crash/SRDGZ
+crassness/MS
+crass/TYRP
+crate/DSRGMZ
+crater/DMG
+Crater/M
+cravat/SM
+cravatted
+cravatting
+crave/DSRGJ
+cravenness/SM
+craven/SPYDG
+craver/M
+craving/M
+crawdad/S
+crawfish's
+Crawford/M
+crawler/M
+crawl/RDSGZ
+crawlspace/S
+crawlway
+crawly/TRS
+craw/SYM
+crayfish/GSDM
+Crayola/M
+crayon/GSDM
+Cray/SM
+craze/GMDS
+crazily
+craziness/MS
+crazy/SRTP
+creakily
+creakiness/SM
+creak/SDG
+creaky/PTR
+creamer/M
+creamery/MS
+creamily
+creaminess/SM
+cream/SMRDGZ
+creamy/TRP
+creased/CU
+crease/IDRSG
+crease's
+creases/C
+creasing/C
+created/U
+create/XKVNGADS
+creationism/MS
+creationist/MS
+Creation/M
+creation/MAK
+creativeness/SM
+creative/YP
+creativities
+creativity/K
+creativity's
+Creator/M
+creator/MS
+creatureliness/M
+creaturely/P
+creature/YMS
+crèche/SM
+credence/MS
+credent
+credential/SGMD
+credenza/SM
+credibility/IMS
+credible/I
+credibly/I
+creditability/M
+creditableness/M
+creditable/P
+creditably/E
+credited/U
+credit/EGBSD
+creditor/MS
+credit's
+creditworthiness
+credo/SM
+credulity/ISM
+credulous/IY
+credulousness/SM
+creedal
+creed/C
+creeds
+creed's
+creekside
+creek/SM
+Creek/SM
+creel/SMDG
+Cree/MDS
+creeper/M
+creepily
+creepiness/SM
+creep/SGZR
+creepy/PRST
+Creigh/M
+Creight/M
+Creighton/M
+cremate/XDSNG
+cremation/M
+crematoria
+crematorium/MS
+crematory/S
+creme/S
+crenelate/XGNSD
+crenelation/M
+Creole/MS
+creole/SM
+Creon/M
+creosote/MGDS
+crepe/DSGM
+crept
+crescendoed
+crescendoing
+crescendo/SCM
+crescent/MS
+cress/S
+crestfallenness/M
+crestfallen/PY
+cresting/M
+crestless
+crest/SGMD
+Crestview/M
+cretaceous
+Cretaceously/M
+Cretaceous/Y
+Cretan/S
+Crete/M
+cretinism/MS
+cretin/MS
+cretinous
+cretonne/SM
+crevasse/DSMG
+crevice/SM
+crew/DMGS
+crewel/SM
+crewelwork/SM
+crewman/M
+crewmen
+cribbage/SM
+cribbed
+cribber/SM
+cribbing/M
+crib/SM
+Crichton/M
+cricketer/M
+cricket/SMZRDG
+crick/GDSM
+Crick/M
+cried/C
+crier/CM
+cries/C
+Crimea/M
+Crimean
+crime/GMDS
+criminality/MS
+criminalization/C
+criminalize/GC
+criminal/SYM
+criminologist/SM
+criminology/MS
+crimper/M
+crimp/RDGS
+crimson/DMSG
+cringer/M
+cringe/SRDG
+crinkle/DSG
+crinkly/TRS
+Crin/M
+crinoline/SM
+cripple/GMZDRS
+crippler/M
+crippling/Y
+Crisco/M
+crises
+crisis/M
+Cris/M
+crisper/M
+crispiness/SM
+crispness/MS
+crisp/PGTYRDS
+crispy/RPT
+criss
+crisscross/GDS
+Crissie/M
+Crissy/M
+Cristabel/M
+Cristal/M
+Crista/M
+Cristen/M
+Cristian/M
+Cristiano/M
+Cristie/M
+Cristi/M
+Cristina/M
+Cristine/M
+Cristin/M
+Cristionna/M
+Cristobal/M
+Cristy/M
+criteria
+criterion/M
+criticality
+critically/U
+criticalness/M
+critical/YP
+criticism/MS
+criticized/U
+criticize/GSRDZ
+criticizer/M
+criticizes/A
+criticizingly/S
+criticizing/UY
+critic/MS
+critique/MGSD
+critter/SM
+Cr/M
+croaker/M
+croak/SRDGZ
+croaky/RT
+Croatia/M
+Croatian/S
+Croat/SM
+Croce/M
+crocheter/M
+crochet/RDSZJG
+crockery/SM
+Crockett/M
+Crockpot/M
+crock/SGRDM
+crocodile/MS
+crocus/SM
+Croesus/SM
+crofter/M
+croft/MRGZS
+croissant/MS
+Croix/M
+Cromwellian
+Cromwell/M
+crone/SM
+Cronin/M
+Cronkite/M
+Cronus/M
+crony/SM
+crookedness/SM
+crooked/TPRY
+Crookes/M
+crookneck/MS
+crook/SGDM
+crooner/M
+croon/SRDGZ
+cropland/MS
+crop/MS
+cropped
+cropper/SM
+cropping
+croquet/MDSG
+croquette/SM
+Crosby/M
+crosier/SM
+crossarm
+crossbarred
+crossbarring
+crossbar/SM
+crossbeam/MS
+crossbones
+crossbowman/M
+crossbowmen
+crossbow/SM
+crossbred/S
+crossbreed/SG
+crosscheck/SGD
+crosscurrent/SM
+crosscut/SM
+crosscutting
+crossed/UA
+crosses/UA
+crossfire/SM
+crosshatch/GDS
+crossing/M
+Cross/M
+crossness/MS
+crossover/MS
+crosspatch/MS
+crosspiece/SM
+crosspoint
+crossproduct/S
+crossroad/GSM
+crossroads/M
+crosstalk/M
+crosstown
+crosswalk/MS
+crossway/M
+crosswind/SM
+crosswise
+crossword/MS
+cross/ZTYSRDMPBJG
+crotchetiness/M
+crotchet/MS
+crotchety/P
+crotchless
+crotch/MDS
+crouch/DSG
+croupier/M
+croup/SMDG
+croupy/TZR
+croûton/MS
+crowbait
+crowbarred
+crowbarring
+crowbar/SM
+crowdedness/M
+crowded/P
+crowd/MRDSG
+crowfeet
+crowfoot/M
+crow/GDMS
+Crowley/M
+crowned/U
+crowner/M
+crown/RDMSJG
+crozier's
+CRT/S
+crucial/Y
+crucible/MS
+crucifiable
+crucifixion/MS
+Crucifixion/MS
+crucifix/SM
+cruciform/S
+crucify/NGDS
+crudded
+crudding
+cruddy/TR
+crudeness/MS
+crude/YSP
+crudités
+crudity/MS
+crud/STMR
+cruelness/MS
+cruelty/SM
+cruel/YRTSP
+cruet/MS
+cruft
+crufty
+Cruikshank/M
+cruise/GZSRD
+cruiser/M
+cruller/SM
+crumb/GSYDM
+crumble/DSJG
+crumbliness/MS
+crumbly/PTRS
+crumby/RT
+crumminess/S
+crummy/SRTP
+crump
+crumpet/SM
+crumple/DSG
+crunch/DSRGZ
+crunchiness/MS
+crunchy/TRP
+crupper/MS
+crusade/GDSRMZ
+crusader/M
+cruse/MS
+crushable/U
+crusher/M
+crushing/Y
+crushproof
+crush/SRDBGZ
+Crusoe/M
+crustacean/MS
+crustal
+crust/GMDS
+crustily
+crustiness/SM
+crusty/SRTP
+crutch/MDSG
+Crux/M
+crux/MS
+Cruz/M
+crybaby/MS
+cry/JGDRSZ
+cryogenic/S
+cryogenics/M
+cryostat/M
+cryosurgery/SM
+cryptanalysis/M
+cryptanalyst/M
+cryptanalytic
+crypt/CS
+cryptic
+cryptically
+cryptogram/MS
+cryptographer/MS
+cryptographic
+cryptographically
+cryptography/MS
+cryptologic
+cryptological
+cryptologist/M
+cryptology/M
+Cryptozoic/M
+crypt's
+crystalline/S
+crystallite/SM
+crystallization/AMS
+crystallized/UA
+crystallizes/A
+crystallize/SRDZG
+crystallizing/A
+crystallographer/MS
+crystallographic
+crystallography/M
+Crystal/M
+crystal/SM
+Crysta/M
+Crystie/M
+Cs
+C's
+cs/EA
+cs's
+CST
+ct
+CT
+Cthrine/M
+Ct/M
+ctn
+ctr
+Cuba/M
+Cuban/S
+cubbed
+cubbing
+cubbyhole/MS
+cuber/M
+cube/SM
+cubical/Y
+cubicle/SM
+cubic/YS
+cubism/SM
+cubist/MS
+cubit/MS
+cub/MDRSZG
+cuboid
+Cuchulain/M
+cuckold/GSDM
+cuckoldry/MS
+cuckoo/SGDM
+cucumber/MS
+cuddle/GSD
+cuddly/TRP
+cu/DG
+cudgel/GSJMD
+cud/MS
+cue/MS
+cuff/GSDM
+Cuisinart/M
+cuisine/MS
+Culbertson/M
+culinary
+Cullan/M
+cull/DRGS
+cullender's
+Cullen/M
+culler/M
+Culley/M
+Cullie/M
+Cullin/M
+Cull/MN
+Cully/M
+culminate/XSDGN
+culmination/M
+culotte/S
+culpability/MS
+culpable/I
+culpableness/M
+culpably
+culpa/SM
+culprit/SM
+cultism/SM
+cultist/SM
+cultivable
+cultivated/U
+cultivate/XBSDGN
+cultivation/M
+cultivator/SM
+cult/MS
+cultural/Y
+cultured/U
+culture/SDGM
+Culver/MS
+culvert/SM
+Cu/M
+cumber/DSG
+Cumberland/M
+cumbersomeness/MS
+cumbersome/YP
+cumbrous
+cumin/MS
+cummerbund/MS
+Cummings
+cumquat's
+cum/S
+cumulate/XVNGSD
+cumulation/M
+cumulative/Y
+cumuli
+cumulonimbi
+cumulonimbus/M
+cumulus/M
+Cunard/M
+cuneiform/S
+cunnilingus/SM
+Cunningham/M
+cunningness/M
+cunning/RYSPT
+cunt/SM!
+cupboard/SM
+cupcake/SM
+Cupertino/M
+cupful/SM
+cupidinously
+cupidity/MS
+Cupid/M
+cupid/S
+cup/MS
+cupola/MDGS
+cupped
+cupping/M
+cupric
+cuprous
+curability/MS
+curable/IP
+curableness/MI
+curably/I
+Curacao/M
+curacy/SM
+curare/MS
+curate/VGMSD
+curative/YS
+curatorial
+curator/KMS
+curbing/M
+curbside
+curb/SJDMG
+curbstone/MS
+Curcio/M
+curdle/SDG
+curd/SMDG
+cured/U
+cure/KBDRSGZ
+curer/MK
+curettage/SM
+curfew/SM
+curfs
+curiae
+curia/M
+cur/IBS
+Curie/M
+curie/SM
+curiosity/SM
+curio/SM
+curiousness/SM
+curious/TPRY
+Curitiba/M
+curium/MS
+curler/SM
+curlew/MS
+curlicue/MGDS
+curliness/SM
+curling/M
+curl/UDSG
+curlycue's
+curly/PRT
+curmudgeon/MYS
+Curran/M
+currant/SM
+curred/AFI
+currency's
+currency/SF
+current/FSY
+currently/A
+currentness/M
+Currey/M
+curricle/M
+curricula
+curricular
+curriculum/M
+Currie/M
+currier/M
+Currier/M
+curring/FAI
+Curr/M
+currycomb/DMGS
+Curry/MR
+curry/RSDMG
+cur's
+curs/ASDVG
+curse/A
+cursedness/M
+cursed/YRPT
+curse's
+cursive/EPYA
+cursiveness/EM
+cursives
+cursor/DMSG
+cursorily
+cursoriness/SM
+cursory/P
+curtailer/M
+curtail/LSGDR
+curtailment/SM
+curtain/GSMD
+Curtice/M
+Curtis/M
+Curt/M
+curtness/MS
+curtsey's
+curtsy/SDMG
+curt/TYRP
+curvaceousness/S
+curvaceous/YP
+curvature/MS
+curved/A
+curved's
+curve/DSGM
+curvilinearity/M
+curvilinear/Y
+curving/M
+curvy/RT
+cushion/SMDG
+Cushman/M
+cushy/TR
+cuspid/MS
+cuspidor/MS
+cusp/MS
+cussedness/M
+cussed/YP
+cuss/EGDSR
+cusses/F
+cussing/F
+cuss's
+custard/MS
+Custer/M
+custodial
+custodianship/MS
+custodian/SM
+custody/MS
+customarily
+customariness/M
+customary/PS
+customer/M
+customhouse/S
+customization/SM
+customize/ZGBSRD
+custom/SMRZ
+cutaneous/Y
+cutaway/SM
+cutback/SM
+cuteness/MS
+cute/SPY
+cutesy/RT
+cuticle/SM
+cutlass/MS
+cutler/SM
+cutlery/MS
+cutlet/SM
+cut/MRST
+cutoff/MS
+cutout/SM
+cutter/SM
+cutthroat/SM
+cutting/MYS
+cuttlebone/SM
+cuttlefish/MS
+cuttle/M
+cutup/MS
+cutworm/MS
+Cuvier/M
+Cuzco/M
+CV
+cw
+cwt
+Cyanamid/M
+cyanate/M
+cyanic
+cyanide/GMSD
+cyan/MS
+cyanogen/M
+Cybele/M
+cybernetic/S
+cybernetics/M
+cyberpunk/S
+cyberspace/S
+Cybill/M
+Cybil/M
+Cyb/M
+cyborg/S
+Cyclades
+cyclamen/MS
+cycle/ASDG
+cycler
+cycle's
+cycleway/S
+cyclic
+cyclical/SY
+cycling/M
+cyclist/MS
+cyclohexanol
+cycloidal
+cycloid/SM
+cyclometer/MS
+cyclone/SM
+cyclonic
+cyclopean
+cyclopedia/MS
+cyclopes
+Cyclopes
+cyclops
+Cyclops/M
+cyclotron/MS
+cyder/SM
+cygnet/MS
+Cygnus/M
+cylinder/GMDS
+cylindric
+cylindrical/Y
+Cy/M
+cymbalist/MS
+cymbal/SM
+Cymbre/M
+Cynde/M
+Cyndia/M
+Cyndie/M
+Cyndi/M
+Cyndy/M
+cynical/UY
+cynicism/MS
+cynic/MS
+cynosure/SM
+Cynthea/M
+Cynthia/M
+Cynthie/M
+Cynthy/M
+cypher/MGSD
+cypreses
+cypress/SM
+Cyprian
+Cypriot/SM
+Cyprus/M
+Cyrano/M
+Cyrille/M
+Cyrillic
+Cyrill/M
+Cyrillus/M
+Cyril/M
+Cyrus/M
+cystic
+cyst/MS
+cytochemistry/M
+cytochrome/M
+cytologist/MS
+cytology/MS
+cytolysis/M
+cytoplasmic
+cytoplasm/SM
+cytosine/MS
+cytotoxic
+CZ
+czarevitch/M
+czarina/SM
+czarism/M
+czarist/S
+czarship
+czar/SM
+Czech
+Czechoslovakia/M
+Czechoslovakian/S
+Czechoslovak/S
+Czechs
+Czerniak/M
+Czerny/M
+D
+DA
+dabbed
+dabber/MS
+dabbing
+dabbler/M
+dabble/RSDZG
+dab/S
+Dacca's
+dace/MS
+Dacey/M
+dacha/SM
+Dachau/M
+dachshund/SM
+Dacia/M
+Dacie/M
+Dacron/MS
+dactylic/S
+dactyl/MS
+Dacy/M
+Dadaism/M
+dadaism/S
+Dadaist/M
+dadaist/S
+Dada/M
+daddy/SM
+Dade/M
+dado/DMG
+dadoes
+dad/SM
+Daedalus/M
+Dael/M
+daemonic
+daemon/SM
+Daffie/M
+Daffi/M
+daffiness/S
+daffodil/MS
+Daffy/M
+daffy/PTR
+daftness/MS
+daft/TYRP
+DAG
+dagger/DMSG
+Dag/M
+Dagmar/M
+Dagny/M
+Daguerre/M
+daguerreotype/MGDS
+Dagwood/M
+Dahlia/M
+dahlia/MS
+Dahl/M
+Dahomey/M
+Daile/M
+dailiness/MS
+daily/PS
+Daimler/M
+daintily
+daintiness/MS
+dainty/TPRS
+daiquiri/SM
+dairying/M
+dairyland
+dairymaid/SM
+dairyman/M
+dairymen
+dairy/MJGS
+dairywoman/M
+dairywomen
+Daisey/M
+Daisie/M
+Daisi/M
+dais/SM
+Daisy/M
+daisy/SM
+Dakar/M
+Dakotan
+Dakota/SM
+Dale/M
+Dalenna/M
+dale/SMH
+daleth/M
+Daley/M
+Dalhousie/M
+Dalia/M
+Dalian/M
+Dalila/M
+Dali/SM
+Dallas/M
+dalliance/SM
+dallier/M
+Dalli/MS
+Dall/M
+Dallon/M
+dally/ZRSDG
+Dal/M
+Dalmatia/M
+dalmatian/S
+Dalmatian/SM
+Daloris/M
+Dalston/M
+Dalt/M
+Dalton/M
+Daly/M
+damageable
+damaged/U
+damage/MZGRSD
+damager/M
+damaging/Y
+Damara/M
+Damaris/M
+Damascus/M
+damask/DMGS
+dame/SM
+Dame/SMN
+Damian/M
+Damiano/M
+Damien/M
+Damion/M
+Damita/M
+dam/MDS
+dammed
+damming
+dammit/S
+damnably
+damnation/MS
+damnedest/MS
+damned/TR
+damn/GSBRD
+damning/Y
+Damocles/M
+Damon/M
+damped/U
+dampener/M
+dampen/RDZG
+damper/M
+dampness/MS
+damp/SGZTXYRDNP
+damselfly/MS
+damsel/MS
+damson/MS
+Danaë
+Dana/M
+Danbury/M
+dancelike
+dancer/M
+dance/SRDJGZ
+dandelion/MS
+dander/DMGS
+dandify/SDG
+dandily
+dandle/GSD
+dandruff/MS
+dandy/TRSM
+Danelaw/M
+Danella/M
+Danell/M
+Dane/SM
+Danette/M
+danger/DMG
+Dangerfield/M
+dangerousness/M
+dangerous/YP
+dangler/M
+dangle/ZGRSD
+dangling/Y
+dang/SGZRD
+Danial/M
+Dania/M
+Danica/M
+Danice/M
+Daniela/M
+Daniele/M
+Daniella/M
+Danielle/M
+Daniel/SM
+Danielson/M
+Danie/M
+Danika/M
+Danila/M
+Dani/M
+Danish
+danish/S
+Danita/M
+Danit/M
+dankness/MS
+dank/TPYR
+Danna/M
+Dannel/M
+Dannie/M
+Danni/M
+Dannye/M
+Danny/M
+danseuse/SM
+Dan/SM
+Dante/M
+Danton/M
+Danube/M
+Danubian
+Danville/M
+Danya/M
+Danyelle/M
+Danyette/M
+Danzig/M
+Daphene/M
+Daphna/M
+Daphne/M
+dapperness/M
+dapper/PSTRY
+dapple/SDG
+Dara/M
+Darbee/M
+Darbie/M
+Darb/M
+Darby/M
+Darcee/M
+Darcey/M
+Darcie/M
+Darci/M
+D'Arcy
+Darcy/M
+Darda/M
+Dardanelles
+daredevil/MS
+daredevilry/S
+Dareen/M
+Darelle/M
+Darell/M
+Dare/M
+Daren/M
+darer/M
+daresay
+dare/ZGDRSJ
+d'Arezzo
+Daria/M
+Darice/M
+Darill/M
+Dari/M
+daringness/M
+daring/PY
+Darin/M
+Dario/M
+Darius/M
+Darjeeling/M
+darkener/M
+darken/RDZG
+dark/GTXYRDNSP
+darkish
+darkly/TR
+darkness/MS
+darkroom/SM
+Darla/M
+Darleen/M
+Darlene/M
+Darline/M
+Darling/M
+darlingness/M
+Darlington/M
+darling/YMSP
+Darlleen/M
+Dar/MNH
+Darnall/M
+darned/TR
+Darnell/M
+darner/M
+darn/GRDZS
+darning/M
+Darn/M
+Daron/M
+DARPA/M
+Darrelle/M
+Darrell/M
+Darrel/M
+Darren/M
+Darrick/M
+Darrin/M
+Darrow/M
+Darryl/M
+Darsey/M
+Darsie/M
+d'art
+dartboard/SM
+darter/M
+Darth/M
+Dartmouth/M
+dart/MRDGZS
+Darvon/M
+Darwinian/S
+Darwinism/MS
+Darwinist/MS
+Darwin/M
+Darya/M
+Daryle/M
+Daryl/M
+Daryn/M
+Dasha/M
+dashboard/SM
+dasher/M
+dash/GZSRD
+dashiki/SM
+dashing/Y
+Dasie/M
+Dasi/M
+dastardliness/SM
+dastardly/P
+dastard/MYS
+Dasya/M
+DAT
+database/DSMG
+datafile
+datagram/MS
+data/M
+Datamation/M
+Datamedia/M
+dataset/S
+datedly
+datedness
+date/DRSMZGV
+dated/U
+dateless
+dateline/DSMG
+dater/M
+Datha/M
+dative/S
+Datsun/M
+datum/MS
+dauber/M
+daub/RDSGZ
+Daugherty/M
+daughter/MYS
+Daumier/M
+Daune/M
+daunt/DSG
+daunted/U
+daunting/Y
+dauntlessness/SM
+dauntless/PY
+dauphin/SM
+Davao/M
+Daveen/M
+Dave/M
+Daven/M
+Davenport/M
+davenport/MS
+Daveta/M
+Davey/M
+Davida/M
+Davidde/M
+Davide/M
+David/SM
+Davidson/M
+Davie/M
+Davina/M
+Davine/M
+Davinich/M
+Davin/M
+Davis/M
+Davita/M
+davit/SM
+Dav/MN
+Davon/M
+Davy/SM
+dawdler/M
+dawdle/ZGRSD
+Dawes/M
+Dawna/M
+dawn/GSDM
+Dawn/M
+Dawson/M
+daybed/S
+daybreak/SM
+daycare/S
+daydreamer/M
+daydream/RDMSZG
+Dayle/M
+daylight/GSDM
+Day/M
+Dayna/M
+daysack
+day/SM
+daytime/SM
+Dayton/M
+dazed/PY
+daze/DSG
+dazzler/M
+dazzle/ZGJRSD
+dazzling/Y
+db
+DB
+dbl
+dB/M
+DBMS
+DC
+DD
+Ddene/M
+DDS
+DDT
+DE
+deacon/DSMG
+deaconess/MS
+deadbeat/SM
+deadbolt/S
+deadener/M
+deadening/MY
+deaden/RDG
+deadhead/MS
+deadline/MGDS
+deadliness/SM
+deadlock/MGDS
+deadly/RPT
+deadness/M
+deadpanned
+deadpanner
+deadpanning
+deadpan/S
+dead/PTXYRN
+deadwood/SM
+deafening/MY
+deafen/JGD
+deafness/MS
+deaf/TXPYRN
+dealer/M
+dealership/MS
+dealing/M
+deallocator
+deal/RSGZJ
+dealt
+Deana/M
+dean/DMG
+Deandre/M
+Deane/M
+deanery/MS
+Dean/M
+Deanna/M
+Deanne/M
+Deann/M
+deanship/SM
+Dearborn/M
+dearness/MS
+dearth/M
+dearths
+dear/TYRHPS
+deary/MS
+deassign
+deathbed/MS
+deathblow/SM
+deathless/Y
+deathlike
+deathly/TR
+death/MY
+deaths
+deathtrap/SM
+deathward
+deathwatch/MS
+debacle/SM
+debarkation/SM
+debark/G
+debar/L
+debarment/SM
+debarring
+debaser/M
+debatable/U
+debate/BMZ
+debater/M
+debauchedness/M
+debauched/PY
+debauchee/SM
+debaucher/M
+debauchery/SM
+debauch/GDRS
+Debbie/M
+Debbi/M
+Debby/M
+Debee/M
+debenture/MS
+Debera/M
+debilitate/NGXSD
+debilitation/M
+debility/MS
+Debi/M
+debit/DG
+deb/MS
+Deb/MS
+debonairness/SM
+debonair/PY
+Deborah/M
+Debora/M
+Debor/M
+debouch/DSG
+Debra/M
+debrief/GJ
+debris/M
+debtor/SM
+debt/SM
+Debussy/M
+débutante/SM
+debut/MDG
+decade/MS
+decadency/S
+decadent/YS
+decaffeinate/DSG
+decaf/S
+decagon/MS
+Decalogue/M
+decal/SM
+decamp/L
+decampment/MS
+decapitate/GSD
+decapitator/SM
+decathlon/SM
+Decatur/M
+decay/GRD
+Decca/M
+Deccan/M
+decease/M
+decedent/MS
+deceitfulness/SM
+deceitful/PY
+deceit/SM
+deceived/U
+deceiver/M
+deceives/U
+deceive/ZGRSD
+deceivingly
+deceiving/U
+decelerate/XNGSD
+deceleration/M
+decelerator/SM
+December/SM
+decency/ISM
+decennial/SY
+decent/TIYR
+deception/SM
+deceptiveness/SM
+deceptive/YP
+decertify/N
+dechlorinate/N
+decibel/MS
+decidability/U
+decidable/U
+decidedness/M
+decided/PY
+decide/GRSDB
+deciduousness/M
+deciduous/YP
+decile/SM
+deciliter/SM
+decimal/SYM
+decimate/XNGDS
+decimation/M
+decimeter/MS
+decipherable/IU
+decipher/BRZG
+decipherer/M
+decisional
+decisioned
+decisioning
+decision/ISM
+decisive/IPY
+decisiveness/MSI
+deckchair
+decker/M
+Decker/M
+deck/GRDMSJ
+deckhand/S
+decking/M
+Deck/RM
+declamation/SM
+declamatory
+declarable
+declaration/MS
+declaration's/A
+declarative/SY
+declarator/MS
+declaratory
+declare/AGSD
+declared/U
+declarer/MS
+declension/SM
+declination/MS
+decliner/M
+decline/ZGRSD
+declivity/SM
+Dec/M
+DEC/M
+DECNET
+DECnet/M
+deco
+décolletage/S
+décolleté
+decolletes
+decolorising
+decomposability/M
+decomposable/IU
+decompose/B
+decompress/R
+decongestant/S
+deconstruction
+deconvolution
+decorated/AU
+decorate/NGVDSX
+decorates/A
+decorating/A
+decoration/ASM
+decorativeness/M
+decorative/YP
+decorator/SM
+decorousness/MS
+decorousness's/I
+decorous/PIY
+decor/S
+decorticate/GNDS
+decortication/M
+decorum/MS
+decoupage/MGSD
+decouple/G
+decoy/M
+decrease
+decreasing/Y
+decreeing
+decree/RSM
+decremental
+decrement/DMGS
+decrepit
+decrepitude/SM
+decriminalization/S
+decriminalize/DS
+decry/G
+decrypt/GD
+decryption
+DECstation/M
+DECsystem/M
+DECtape/M
+decustomised
+Dedekind/M
+Dede/M
+dedicate/AGDS
+dedicated/Y
+dedication/MS
+dedicative
+dedicator/MS
+dedicatory
+Dedie/M
+Dedra/M
+deduce/RSDG
+deducible
+deductibility/M
+deductible/S
+deduction/SM
+deductive/Y
+deduct/VG
+Deeanne/M
+Deeann/M
+deeded
+Deedee/M
+deeding
+deed/IS
+deed's
+deejay/MDSG
+Dee/M
+deem/ADGS
+deemphasis
+Deena/M
+deepen/DG
+deepish
+deepness/MS
+deep/PTXSYRN
+Deerdre/M
+Deere/M
+deerskin/MS
+deer/SM
+deerstalker/SM
+deerstalking/M
+Deeyn/M
+deface/LZ
+defacement/SM
+defaecate
+defalcate/NGXSD
+defalcation/M
+defamation/SM
+defamatory
+defamer/M
+defame/ZR
+defaulter/M
+default/ZR
+defeated/U
+defeater/M
+defeatism/SM
+defeatist/SM
+defeat/ZGD
+defecate/DSNGX
+defecation/M
+defection/SM
+defectiveness/MS
+defective/PYS
+defect/MDSVG
+defector/MS
+defendant/SM
+defended/U
+defenestrate/GSD
+defenselessness/MS
+defenseless/PY
+defenses/U
+defense/VGSDM
+defensibility/M
+defensible/I
+defensibly/I
+defensiveness/MS
+defensive/PSY
+deference/MS
+deferential/Y
+deferent/S
+deferrable
+deferral/SM
+deferred
+deferrer/MS
+deferring
+deffer
+defiance/MS
+defiant/Y
+defibrillator/M
+deficiency/MS
+deficient/SY
+deficit/MS
+defier/M
+defile/L
+defilement/MS
+definable/UI
+definably/I
+define/AGDRS
+defined/U
+definer/SM
+definite/IPY
+definiteness/IMS
+definitional
+definition/ASM
+definitiveness/M
+definitive/SYP
+defis
+deflate/XNGRSDB
+deflationary
+deflation/M
+deflect/DSGV
+deflected/U
+deflection/MS
+deflector/MS
+defocus
+defocussing
+Defoe/M
+defog
+defogger/S
+defoliant/SM
+defoliator/SM
+deformational
+deform/B
+deformed/U
+deformity/SM
+defrauder/M
+defraud/ZGDR
+defrayal/SM
+defroster/M
+defrost/RZ
+deftness/MS
+deft/TYRP
+defunct/S
+defying/Y
+defy/RDG
+def/Z
+deg
+Degas/M
+degassing
+degauss/GD
+degeneracy/MS
+degenerateness/M
+degenerate/PY
+degrade/B
+degradedness/M
+degraded/YP
+degrading/Y
+degrease
+degree/SM
+degum
+Dehlia/M
+dehumanize
+dehydrator/MS
+deicer/M
+deice/ZR
+deictic
+Deidre/M
+deification/M
+deify/SDXGN
+deign/DGS
+Deimos/M
+Deina/M
+Deirdre/MS
+deistic
+deist/SM
+Deity/M
+deity/SM
+deja
+deject/DSG
+dejectedness/M
+dejected/PY
+dejection/SM
+Dejesus/M
+DeKalb/M
+DeKastere/M
+Delacroix/M
+Delacruz/M
+Delainey/M
+Dela/M
+Delaney/M
+Delano/M
+Delawarean/SM
+Delaware/MS
+delay/D
+delayer/G
+Delbert/M
+Delcina/M
+Delcine/M
+delectableness/M
+delectable/SP
+delectably
+delectation/MS
+delegable
+Deleon/M
+deleted/U
+deleteriousness/M
+deleterious/PY
+delete/XBRSDNG
+deletion/M
+delfs
+Delft/M
+delft/MS
+delftware/S
+Delgado/M
+Delhi/M
+Delia/M
+deliberateness/SM
+deliberate/PVY
+deliberativeness/M
+deliberative/PY
+Delibes/M
+delicacy/IMS
+delicate/IYP
+delicatenesses
+delicateness/IM
+delicates
+delicatessen/MS
+deliciousness/MS
+delicious/YSP
+delicti
+delightedness/M
+delighted/YP
+delightfulness/M
+delightful/YP
+Delilah/M
+Delilahs
+Delila/M
+Delinda/M
+delineate/SDXVNG
+delineation/M
+delinquency/MS
+delinquent/SYM
+deliquesce/GSD
+deliquescent
+deliriousness/MS
+delirious/PY
+delirium/SM
+deli/SM
+Delius/M
+deliverables
+deliverable/U
+deliver/AGSD
+deliverance/SM
+delivered/U
+deliverer/SM
+delivery/AM
+deliverymen/M
+Della/M
+Dell/M
+dell/SM
+Dellwood/M
+Delly/M
+Delmar/M
+Delmarva/M
+Delmer/M
+Delmonico
+Delmore/M
+Delmor/M
+Del/MY
+Delora/M
+Delores/M
+Deloria/M
+Deloris/M
+Delphic
+Delphi/M
+Delphine/M
+Delphinia/M
+delphinium/SM
+Delphinus/M
+Delta/M
+delta/MS
+deltoid/SM
+deluder/M
+delude/RSDG
+deluding/Y
+deluge/SDG
+delusional
+delusion/SM
+delusiveness/M
+delusive/PY
+deluxe
+delve/GZSRD
+delver/M
+demagnify/N
+demagogic
+demagogue/GSDM
+demagoguery/SM
+demagogy/MS
+demander/M
+demand/GSRD
+demandingly
+demanding/U
+demarcate/SDNGX
+demarcation/M
+Demavend/M
+demean/GDS
+demeanor/SM
+dementedness/M
+demented/YP
+dementia/MS
+Demerol/M
+demesne/SM
+Demeter/M
+Demetra/M
+Demetre/M
+Demetria/M
+Demetri/MS
+Demetrius/M
+demigod/MS
+demijohn/MS
+demimondaine/SM
+demimonde/SM
+demineralization/SM
+Deming/M
+demise/DMG
+demit
+demitasse/MS
+demitted
+demitting
+Dem/MG
+democracy/MS
+Democratic
+democratically/U
+democratic/U
+democratization/MS
+democratize/DRSG
+democratizes/U
+Democrat/MS
+democrat/SM
+Democritus/M
+démodé
+demo/DMPG
+demographer/MS
+demographical/Y
+demographic/S
+demography/MS
+demolisher/M
+demolish/GSRD
+demolition/MS
+demonetization/S
+demoniacal/Y
+demoniac/S
+demonic
+demonology/M
+demon/SM
+demonstrable/I
+demonstrableness/M
+demonstrably/I
+demonstrate/XDSNGV
+demonstration/M
+demonstrativenesses
+demonstrativeness/UM
+demonstratives
+demonstrative/YUP
+demonstrator/MS
+demoralization/M
+demoralizer/M
+demoralizing/Y
+DeMorgan/M
+Demosthenes/M
+demote/DGX
+demotic/S
+Demott/M
+demount/B
+Dempsey/M
+demulcent/S
+demultiplex
+demureness/SM
+demure/YP
+demurral/MS
+demurred
+demurrer/MS
+demurring
+demur/RTS
+demythologization/M
+demythologize/R
+den
+Dena/M
+dendrite/MS
+Deneb/M
+Denebola/M
+Deneen/M
+Dene/M
+Deng/M
+dengue/MS
+deniable/U
+denial/SM
+Denice/M
+denier/M
+denigrate/VNGXSD
+denigration/M
+denim/SM
+Denise/M
+Deni/SM
+denizen/SMDG
+Den/M
+De/NM
+Denmark/M
+Denna/M
+denned
+Dennet/M
+Denney/M
+Dennie/M
+Denni/MS
+denning
+Dennison/M
+Denny/M
+denominate/V
+denominational/Y
+denote/B
+denouement/MS
+denounce/LZRSDG
+denouncement/SM
+denouncer/M
+dense/FR
+densely
+denseness/SM
+densitometer/MS
+densitometric
+densitometry/M
+density/MS
+dens/RT
+dental/YS
+dentifrice/SM
+dentine's
+dentin/SM
+dent/ISGD
+dentistry/MS
+dentist/SM
+dentition/MS
+dent's
+denture/IMS
+denuclearize/GSD
+denudation/SM
+denude/DG
+denuder/M
+denunciate/VNGSDX
+denunciation/M
+Denver/M
+denying/Y
+Deny/M
+Denys
+Denyse/M
+deny/SRDZG
+deodorant/SM
+deodorization/SM
+deodorize/GZSRD
+deodorizer/M
+Deon/M
+Deonne/M
+deoxyribonucleic
+depart/L
+departmentalization/SM
+departmentalize/DSG
+departmental/Y
+department/MS
+departure/MS
+dependability/MS
+dependableness/M
+dependable/P
+dependably
+Dependant/MS
+depend/B
+dependence/ISM
+dependency/MS
+dependent/IYS
+dependent's
+depicted/U
+depicter/M
+depiction/SM
+depict/RDSG
+depilatory/S
+deplete/VGNSDX
+depletion/M
+deplorableness/M
+deplorable/P
+deplorably
+deplorer/M
+deplore/SRDBG
+deploring/Y
+deployable
+deploy/AGDLS
+deployment/SAM
+depolarize
+deponent/S
+deportation/MS
+deportee/SM
+deport/LG
+deportment/MS
+depose
+deposit/ADGS
+depositary/M
+deposition/A
+depositor/SAM
+depository/MS
+depravedness/M
+depraved/PY
+deprave/GSRD
+depraver/M
+depravity/SM
+deprecate/XSDNG
+deprecating/Y
+deprecation/M
+deprecatory
+depreciable
+depreciate/XDSNGV
+depreciating/Y
+depreciation/M
+depreciative/Y
+depressant/S
+depressible
+depression/MS
+depressive/YS
+depressor/MS
+depress/V
+deprive/GSD
+depth/M
+depths
+Dept/M
+deputation/SM
+depute/SDG
+deputize/DSG
+deputy/MS
+dequeue
+derail/L
+dérailleur/MS
+derailment/MS
+derange/L
+derangement/MS
+Derbyshire/M
+derby/SM
+Derby/SM
+dereference/Z
+Derek/M
+dereliction/SM
+derelict/S
+Derick/M
+deride/D
+deriding/Y
+derision/SM
+derisiveness/MS
+derisive/PY
+derisory
+derivable/U
+derivate/XNV
+derivation/M
+derivativeness/M
+derivative/SPYM
+derive/B
+derived/U
+Derk/M
+Der/M
+dermal
+dermatitides
+dermatitis/MS
+dermatological
+dermatologist/MS
+dermatology/MS
+dermis/SM
+Dermot/M
+derogate/XDSNGV
+derogation/M
+derogatorily
+derogatory
+Derrek/M
+Derrick/M
+derrick/SMDG
+Derrida/M
+derrière/S
+Derrik/M
+Derril/M
+derringer/SM
+Derron/M
+Derry/M
+dervish/SM
+Derward/M
+Derwin/M
+Des
+desalinate/NGSDX
+desalination/M
+desalinization/MS
+desalinize/GSD
+desalt/G
+descant/M
+Descartes/M
+descendant/SM
+descended/FU
+descendent's
+descender/M
+descending/F
+descends/F
+descend/ZGSDR
+descent
+describable/I
+describe/ZB
+description/MS
+descriptiveness/MS
+descriptive/SYP
+descriptor/SM
+descry/SDG
+Desdemona/M
+desecrater/M
+desecrate/SRDGNX
+desecration/M
+deserter/M
+desertification
+desertion/MS
+desert/ZGMRDS
+deservedness/M
+deserved/YU
+deserve/J
+deserving/Y
+déshabillé's
+desiccant/S
+desiccate/XNGSD
+desiccation/M
+desiccator/SM
+desiderata
+desideratum/M
+designable
+design/ADGS
+designate/VNGSDX
+designational
+designation/M
+designator/SM
+designed/Y
+designer/M
+designing/U
+Desi/M
+desirabilia
+desirability's
+desirability/US
+desirableness/SM
+desirableness's/U
+desirable/UPS
+desirably/U
+Desirae/M
+desire/BR
+desired/U
+Desiree/M
+desirer/M
+Desiri/M
+desirousness/M
+desirous/PY
+desist/DSG
+desk/SM
+desktop/S
+Desmond/M
+Desmund/M
+desolateness/SM
+desolate/PXDRSYNG
+desolater/M
+desolating/Y
+desolation/M
+desorption/M
+despairer/M
+despairing/Y
+despair/SGDR
+desperadoes
+desperado/M
+desperateness/SM
+desperate/YNXP
+desperation/M
+despicable
+despicably
+despiser/M
+despise/SRDG
+despoil/L
+despoilment/MS
+despond
+despondence/S
+despondency/MS
+despondent/Y
+despotic
+despotically
+despotism/SM
+dessert/SM
+dessicate/DN
+d'Estaing
+destinate/NX
+destination/M
+destine/GSD
+destiny/MS
+destituteness/M
+destitute/NXP
+destitution/M
+destroy/BZGDRS
+destroyer/M
+destructibility/SMI
+destructible/I
+destruction/SM
+destructiveness/MS
+destructive/YP
+destructor/M
+destruct/VGSD
+desuetude/MS
+desultorily
+desultoriness/M
+desultory/P
+detachedness/M
+detached/YP
+detacher/M
+detach/LSRDBG
+detachment/SM
+detailedness/M
+detailed/YP
+detainee/S
+detainer/M
+detain/LGRDS
+detainment/MS
+d'etat
+detectability/U
+detectable/U
+detectably/U
+detect/DBSVG
+detected/U
+detection/SM
+detective/MS
+detector/MS
+détente
+detentes
+detention/SM
+detergency/M
+detergent/SM
+deteriorate/XDSNGV
+deterioration/M
+determent/SM
+determinability/M
+determinable/IP
+determinableness/IM
+determinacy/I
+determinant/MS
+determinateness/IM
+determinate/PYIN
+determination/IM
+determinativeness/M
+determinative/P
+determinedly
+determinedness/M
+determined/U
+determine/GASD
+determiner/SM
+determinism/MS
+determinism's/I
+deterministically
+deterministic/I
+deterred/U
+deterrence/SM
+deterrent/SMY
+deterring
+detersive/S
+deter/SL
+deters/V
+detestableness/M
+detestable/P
+detestably
+detestation/SM
+dethrone/L
+dethronement/SM
+detonable
+detonated/U
+detonate/XDSNGV
+detonation/M
+detonator/MS
+detour/G
+detoxification/M
+detoxify/NXGSD
+detox/SDG
+detract/GVD
+detractive/Y
+d'etre
+detribalize/GSD
+detrimental/SY
+detriment/SM
+detritus/M
+Detroit/M
+deuced/Y
+deuce/SDGM
+deus
+deuterium/MS
+deuteron/M
+Deuteronomy/M
+Deutsch/M
+Deva/M
+Devanagari/M
+Devan/M
+devastate/XVNGSD
+devastating/Y
+devastation/M
+devastator/SM
+develop/ALZSGDR
+developed/U
+developer/MA
+developmental/Y
+development/ASM
+deviance/MS
+deviancy/S
+deviant/YMS
+deviated/U
+deviate/XSDGN
+deviating/U
+deviation/M
+devilishness/MS
+devilish/PY
+devilment/SM
+devilry/MS
+devil/SLMDG
+deviltry/MS
+Devi/M
+Devina/M
+Devin/M
+Devinne/M
+deviousness/SM
+devious/YP
+devise/JR
+deviser/M
+Devland/M
+Devlen/M
+Devlin/M
+Dev/M
+devoice
+devolution/MS
+devolve/GSD
+Devondra/M
+Devonian
+Devon/M
+Devonna/M
+Devonne/M
+Devonshire/M
+Devora/M
+devoted/Y
+devotee/MS
+devote/XN
+devotional/YS
+devotion/M
+devourer/M
+devour/SRDZG
+devoutness/MS
+devout/PRYT
+Devy/M
+Dewain/M
+dewar
+Dewar/M
+Dewayne/M
+dewberry/MS
+dewclaw/SM
+dewdrop/MS
+Dewey/M
+Dewie/M
+dewiness/MS
+Dewitt/M
+dewlap/MS
+Dew/M
+dew/MDGS
+dewy/TPR
+Dexedrine/M
+dexes/I
+Dex/M
+dexter
+dexterity/MS
+Dexter/M
+dexterousness/MS
+dexterous/PY
+dextrose/SM
+DH
+Dhaka
+Dhaulagiri/M
+dhoti/SM
+dhow/MS
+DI
+diabase/M
+diabetes/M
+diabetic/S
+diabolic
+diabolicalness/M
+diabolical/YP
+diabolism/M
+diachronic/P
+diacritical/YS
+diacritic/MS
+diadem/GMDS
+diaereses
+diaeresis/M
+Diaghilev/M
+diagnometer/SM
+diagnosable/U
+diagnose/BGDS
+diagnosed/U
+diagnosis/M
+diagnostically
+diagnostician/SM
+diagnostic/MS
+diagnostics/M
+diagonalize/GDSB
+diagonal/YS
+diagrammable
+diagrammatic
+diagrammaticality
+diagrammatically
+diagrammed
+diagrammer/SM
+diagramming
+diagram/MS
+Diahann/M
+dialectal/Y
+dialectical/Y
+dialectic/MS
+dialect/MS
+dialed/A
+dialer/M
+dialing/M
+dial/MRDSGZJ
+dialogged
+dialogging
+dialog/MS
+dials/A
+dialysis/M
+dialyzed/U
+dialyzes
+diam
+diamagnetic
+diameter/MS
+diametric
+diametrical/Y
+diamondback/SM
+diamond/GSMD
+Diana/M
+Diandra/M
+Diane/M
+Dianemarie/M
+Dian/M
+Dianna/M
+Dianne/M
+Diann/M
+Diannne/M
+diapason/MS
+diaper/SGDM
+diaphanousness/M
+diaphanous/YP
+diaphragmatic
+diaphragm/SM
+diarist/SM
+Diarmid/M
+diarrheal
+diarrhea/MS
+diary/MS
+diaspora
+Diaspora/SM
+diastase/SM
+diastole/MS
+diastolic
+diathermy/SM
+diathesis/M
+diatomic
+diatom/SM
+diatonic
+diatribe/MS
+Diaz's
+dibble/SDMG
+dibs
+DiCaprio/M
+dice/GDRS
+dicer/M
+dicey
+dichloride/M
+dichotomization/M
+dichotomize/DSG
+dichotomous/PY
+dichotomy/SM
+dicier
+diciest
+dicing/M
+Dickensian/S
+dickens/M
+Dickens/M
+dicker/DG
+Dickerson/M
+dickey/SM
+dick/GZXRDMS!
+Dickie/M
+dickier
+dickiest
+Dickinson/M
+Dickson/M
+Dick/XM
+Dicky/M
+dicky's
+dicotyledonous
+dicotyledon/SM
+dicta/M
+Dictaphone/SM
+dictate/SDNGX
+dictation/M
+dictatorialness/M
+dictatorial/YP
+dictator/MS
+dictatorship/SM
+dictionary/SM
+diction/MS
+dictum/M
+didactically
+didactic/S
+didactics/M
+did/AU
+diddler/M
+diddle/ZGRSD
+Diderot/M
+Didi/M
+didn't
+didoes
+dido/M
+Dido/M
+didst
+die/DS
+Diefenbaker/M
+Diego/M
+dieing
+dielectric/MS
+diem
+Diem/M
+Diena/M
+Dierdre/M
+diereses
+dieresis/M
+diesel/GMDS
+Diesel's
+dies's
+dies/U
+dietary/S
+dieter/M
+Dieter/M
+dietetic/S
+dietetics/M
+diethylaminoethyl
+diethylstilbestrol/M
+dietitian/MS
+diet/RDGZSM
+Dietrich/M
+Dietz/M
+difference/DSGM
+difference's/I
+differences/I
+differentiability
+differentiable
+differential/SMY
+differentiated/U
+differentiate/XSDNG
+differentiation/M
+differentiator/SM
+differentness
+different/YI
+differ/SZGRD
+difficile
+difficult/Y
+difficulty/SM
+diffidence/MS
+diffident/Y
+diffract/GSD
+diffraction/SM
+diffractometer/SM
+diffuseness/MS
+diffuse/PRSDZYVXNG
+diffuser/M
+diffusible
+diffusional
+diffusion/M
+diffusiveness/M
+diffusive/YP
+diffusivity/M
+digerati
+digested/IU
+digester/M
+digestibility/MS
+digestible/I
+digestifs
+digestion/ISM
+digestive/YSP
+digest/RDVGS
+digger/MS
+digging/S
+digitalis/M
+digitalization/MS
+digitalized
+digitalizes
+digitalizing
+digital/SY
+digitization/M
+digitizer/M
+digitize/ZGDRS
+digit/SM
+dignified/U
+dignify/DSG
+dignitary/SM
+dignity/ISM
+digram
+digraph/M
+digraphs
+digress/GVDS
+digression/SM
+digressiveness/M
+digressive/PY
+dig/TS
+dihedral
+Dijkstra/M
+Dijon/M
+dike/DRSMG
+diker/M
+diktat/SM
+Dilan/M
+dilapidate/XGNSD
+dilapidation/M
+dilatation/SM
+dilated/YP
+dilate/XVNGSD
+dilation/M
+dilatoriness/M
+dilator/SM
+dilatory/P
+Dilbert/M
+dilemma/MS
+dilettante/MS
+dilettantish
+dilettantism/MS
+diligence/SM
+diligentness/M
+diligent/YP
+dilithium
+Dillard/M
+Dillie/M
+Dillinger/M
+dilling/R
+dillis
+Dill/M
+Dillon/M
+dill/SGMD
+dillydally/GSD
+Dilly/M
+dilly/SM
+dilogarithm
+diluent
+diluted/U
+diluteness/M
+dilute/RSDPXYVNG
+dilution/M
+Di/M
+DiMaggio/M
+dimensionality/M
+dimensional/Y
+dimensionless
+dimension/MDGS
+dimer/M
+dime/SM
+dimethylglyoxime
+dimethyl/M
+diminished/U
+diminish/SDGBJ
+diminuendo/SM
+diminution/SM
+diminutiveness/M
+diminutive/SYP
+Dimitri/M
+Dimitry/M
+dimity/MS
+dimmed/U
+dimmer/MS
+dimmest
+dimming
+dimness/SM
+dimorphism/M
+dimple/MGSD
+dimply/RT
+dim/RYPZS
+dimwit/MS
+dimwitted
+Dinah/M
+Dina/M
+dinar/SM
+diner/M
+dine/S
+dinette/MS
+dingbat/MS
+ding/GD
+dinghy/SM
+dingily
+dinginess/SM
+dingle/MS
+dingoes
+dingo/MS
+dingus/SM
+dingy/PRST
+dinky/RST
+din/MDRZGS
+dinned
+dinner/SM
+dinnertime/S
+dinnerware/MS
+Dinnie/M
+dinning
+Dinny/M
+Dino/M
+dinosaur/MS
+dint/SGMD
+diocesan/S
+diocese/SM
+Diocletian/M
+diode/SM
+Diogenes/M
+Dione/M
+Dionisio/M
+Dionis/M
+Dion/M
+Dionne/M
+Dionysian
+Dionysus/M
+Diophantine/M
+diopter/MS
+diorama/SM
+Dior/M
+dioxalate
+dioxide/MS
+dioxin/S
+diphtheria/SM
+diphthong/SM
+diplexers
+diploid/S
+diplomacy/SM
+diploma/SMDG
+diplomata
+diplomatically
+diplomatic/S
+diplomatics/M
+diplomatist/SM
+diplomat/MS
+dipodic
+dipody/M
+dipole/MS
+dipped
+Dipper/M
+dipper/SM
+dipping/S
+dippy/TR
+dip/S
+dipsomaniac/MS
+dipsomania/SM
+dipstick/MS
+dipterous
+diptych/M
+diptychs
+Dir
+Dirac/M
+directed/IUA
+directionality
+directional/SY
+direction/MIS
+directions/A
+directive/SM
+directivity/M
+directly/I
+directness/ISM
+director/AMS
+directorate/SM
+directorial
+directorship/SM
+directory/SM
+direct/RDYPTSVG
+directrix/MS
+directs/IA
+direful/Y
+direness/M
+dire/YTRP
+dirge/GSDM
+Dirichlet/M
+dirigible/S
+dirk/GDMS
+Dirk/M
+dirndl/MS
+dirtily
+dirtiness/SM
+dirt/MS
+dirty/GPRSDT
+Dis
+disable/LZGD
+disablement/MS
+disabler/M
+disabuse
+disadvantaged/P
+disagreeable/S
+disallow/D
+disambiguate/DSGNX
+disappointed/Y
+disappointing/Y
+disarming/Y
+disarrange/L
+disastrous/Y
+disband/L
+disbandment/SM
+disbar/L
+disbarment/MS
+disbarring
+disbelieving/Y
+disbursal/S
+disburse/GDRSL
+disbursement/MS
+disburser/M
+discerner/M
+discernibility
+discernible/I
+discernibly
+discerning/Y
+discernment/MS
+discern/SDRGL
+disc/GDM
+discharged/U
+disciple/DSMG
+discipleship/SM
+disciplinarian/SM
+disciplinary
+disciplined/U
+discipline/IDM
+discipliner/M
+disciplines
+disciplining
+disclosed/U
+discography/MS
+discolored/MP
+discoloreds/U
+discolor/G
+discombobulate/SDGNX
+discomfit/DG
+discomfiture/MS
+disco/MG
+discommode/DG
+disconcerting/Y
+disconnectedness/S
+disconnected/P
+disconnecter/M
+disconnect/R
+disconsolate/YN
+discordance/SM
+discordant/Y
+discord/G
+discorporate/D
+discotheque/MS
+discount/B
+discourage/LGDR
+discouragement/MS
+discouraging/Y
+discoverable/I
+discover/ADGS
+discovered/U
+discoverer/S
+discovery/SAM
+discreetly/I
+discreetness's/I
+discreetness/SM
+discreet/TRYP
+discrepancy/SM
+discrepant/Y
+discreteness/SM
+discrete/YPNX
+discretionary
+discretion/IMS
+discretization
+discretized
+discriminable
+discriminant/MS
+discriminated/U
+discriminate/SDVNGX
+discriminating/YI
+discrimination/MI
+discriminator/MS
+discriminatory
+discursiveness/S
+discussant/MS
+discussed/UA
+discusser/M
+discussion/SM
+discus/SM
+disdainfulness/M
+disdainful/YP
+disdain/MGSD
+disease/G
+disembowelment/SM
+disembowel/SLGD
+disengage/L
+disfigure/L
+disfigurement/MS
+disfranchise/L
+disfranchisement/MS
+disgorge
+disgrace/R
+disgracer/M
+disgruntle/DSLG
+disgruntlement/MS
+disguised/UY
+disguise/R
+disguiser/M
+disgust
+disgusted/Y
+disgustful/Y
+disgusting/Y
+dishabille/SM
+disharmonious
+dishcloth/M
+dishcloths
+dishevel/LDGS
+dishevelment/MS
+dish/GD
+dishonest
+dishonored/U
+dishpan/MS
+dishrag/SM
+dishtowel/SM
+dishwasher/MS
+dishwater/SM
+disillusion/LGD
+disillusionment/SM
+disinfectant/MS
+disinherit
+disinterestedness/SM
+disinterested/P
+disinvest/L
+disjoin
+disjointedness/S
+disjunctive/YS
+disjunct/VS
+disk/D
+diskette/S
+dislike/G
+dislodge/LG
+dislodgement/M
+dismalness/M
+dismal/PSTRY
+dismantle/L
+dismantlement/SM
+dismay/D
+dismayed/U
+dismaying/Y
+dis/MB
+dismember/LG
+dismemberment/MS
+dismissive/Y
+dismiss/RZ
+Disneyland/M
+Disney/M
+disoblige/G
+disorderedness/M
+disordered/YP
+disorderliness/M
+disorderly/P
+disorder/Y
+disorganize
+disorganized/U
+disparagement/MS
+disparager/M
+disparage/RSDLG
+disparaging/Y
+disparateness/M
+disparate/PSY
+dispatch/Z
+dispelled
+dispelling
+dispel/S
+dispensable/I
+dispensary/MS
+dispensate/NX
+dispensation/M
+dispenser/M
+dispense/ZGDRSB
+dispersal/MS
+dispersant/M
+dispersed/Y
+disperser/M
+disperse/XDRSZLNGV
+dispersible
+dispersion/M
+dispersiveness/M
+dispersive/PY
+dispirit/DSG
+displace/L
+display/AGDS
+displayed/U
+displeased/Y
+displease/G
+displeasure
+disport
+disposable/S
+disposal/SM
+dispose/IGSD
+dispositional
+disposition/ISM
+disproportional
+disproportionate/N
+disproportionation/M
+disprove/B
+disputable/I
+disputably/I
+disputant/SM
+disputation/SM
+disputatious/Y
+disputed/U
+disputer/M
+dispute/ZBGSRD
+disquieting/Y
+disquiet/M
+disquisition/SM
+Disraeli/M
+disregardful
+disrepair/M
+disreputableness/M
+disreputable/P
+disrepute/M
+disrespect
+disrupted/U
+disrupter/M
+disrupt/GVDRS
+disruption/MS
+disruptive/YP
+disruptor/M
+dissatisfy
+dissect/DG
+dissed
+dissembler/M
+dissemble/ZGRSD
+disseminate/XGNSD
+dissemination/M
+dissension/SM
+dissenter/M
+dissent/ZGSDR
+dissertation/SM
+disservice
+disses
+dissever
+dissidence/SM
+dissident/MS
+dissimilar/S
+dissing
+dissipatedly
+dissipatedness/M
+dissipated/U
+dissipater/M
+dissipate/XRSDVNG
+dissipation/M
+dissociable/I
+dissociate/DSXNGV
+dissociated/U
+dissociation/M
+dissociative/Y
+dissoluble/I
+dissoluteness/SM
+dissolute/PY
+dissolve/ASDG
+dissolved/U
+dissonance/SM
+dissonant/Y
+dissuade/GDRS
+dissuader/M
+dissuasive
+dist
+distaff/SM
+distal/Y
+distance/DSMG
+distantness/M
+distant/YP
+distaste
+distemper
+distend
+distension
+distention/SM
+distillate/XNMS
+distillation/M
+distillery/MS
+distincter
+distinctest
+distinction/MS
+distinctiveness/MS
+distinctive/YP
+distinct/IYVP
+distinctness/MSI
+distinguishable/I
+distinguishably/I
+distinguish/BDRSG
+distinguished/U
+distinguisher/M
+distort/BGDR
+distorted/U
+distorter/M
+distortion/MS
+distract/DG
+distractedness/M
+distracted/YP
+distracting/Y
+distrait
+distraught/Y
+distress
+distressful
+distressing/Y
+distribute/ADXSVNGB
+distributed/U
+distributer
+distributional
+distribution/AM
+distributiveness/M
+distributive/SPY
+distributivity
+distributorship/M
+distributor/SM
+district/GSAD
+district's
+distrust/G
+disturbance/SM
+disturbed/U
+disturber/M
+disturbing/Y
+disturb/ZGDRS
+disulfide/M
+disuse/M
+disyllable/M
+Dita/M
+ditcher/M
+ditch/MRSDG
+dither/RDZSG
+ditsy/TR
+ditto/DMGS
+ditty/SDGM
+Ditzel/M
+ditz/S
+diuresis/M
+diuretic/S
+diurnal/SY
+divalent/S
+diva/MS
+divan/SM
+dived/M
+divergence/SM
+divergent/Y
+diverge/SDG
+diver/M
+diverseness/MS
+diverse/XYNP
+diversification/M
+diversifier/M
+diversify/GSRDNX
+diversionary
+diversion/M
+diversity/SM
+divert/GSD
+diverticulitis/SM
+divertimento/M
+dive/S
+divestiture/MS
+divest/LDGS
+divestment/S
+dividable
+divide/AGDS
+divided/U
+dividend/MS
+divider/MS
+divination/SM
+diviner/M
+divine/RSDTZYG
+divinity/MS
+divisibility/IMS
+divisible/I
+divisional
+division/SM
+divisiveness/MS
+divisive/PY
+divisor/SM
+divorcée/MS
+divorce/GSDLM
+divorcement/MS
+divot/MS
+div/TZGJDRS
+divulge/GSD
+divvy/GSDM
+Dixiecrat/MS
+dixieland
+Dixieland/MS
+Dixie/M
+Dix/M
+Dixon/M
+dizzily
+dizziness/SM
+dizzying/Y
+dizzy/PGRSDT
+DJ
+Djakarta's
+djellabah's
+djellaba/S
+d/JGVX
+Djibouti/M
+DMD
+Dmitri/M
+DMZ
+DNA
+Dnepropetrovsk/M
+Dnepr's
+Dnieper's
+Dniester/M
+Dniren/M
+DOA
+doable
+DOB
+Dobbin/M
+dobbin/MS
+Doberman
+Dobro/M
+docent/SM
+docile/Y
+docility/MS
+docker/M
+docket/GSMD
+dock/GZSRDM
+dockland/MS
+dockside/M
+dockworker/S
+dockyard/SM
+doc/MS
+Doctor
+doctoral
+doctorate/SM
+doctor/GSDM
+Doctorow/M
+doctrinaire/S
+doctrinal/Y
+doctrine/SM
+docudrama/S
+documentary/MS
+documentation/MS
+documented/U
+document/RDMZGS
+DOD
+dodder/DGS
+dodecahedra
+dodecahedral
+dodecahedron/M
+Dode/M
+dodge/GZSRD
+Dodge/M
+dodgem/S
+dodger/M
+Dodgson/M
+Dodie/M
+Dodi/M
+Dodington/M
+Dodoma/M
+dodo/SM
+Dodson/M
+Dody/M
+DOE
+Doe/M
+doe/MS
+doer/MU
+does/AU
+doeskin/MS
+doesn't
+d'oeuvre
+doff/SGD
+dogcart/SM
+dogcatcher/MS
+dogeared
+Doge/M
+doge/SM
+dogfight/GMS
+dogfish/SM
+dogfought
+doggedness/SM
+dogged/PY
+doggerel/SM
+dogging
+doggone/RSDTG
+doggy/SRMT
+doghouse/SM
+dogie/SM
+doglegged
+doglegging
+dogleg/SM
+dogma/MS
+dogmatically/U
+dogmatic/S
+dogmatics/M
+dogmatism/SM
+dogmatist/SM
+dogsbody/M
+dog/SM
+dogtooth/M
+Dogtown/M
+dogtrot/MS
+dogtrotted
+dogtrotting
+dogwood/SM
+dogy's
+Doha/M
+doh's
+doily/SM
+doing/MU
+Dolby/SM
+doldrum/S
+doldrums/M
+doled/F
+dolefuller
+dolefullest
+dolefulness/MS
+doleful/PY
+Dole/M
+dole/MGDS
+doles/F
+Dolf/M
+doling/F
+dollar/SM
+Dolley/M
+Dollie/M
+Dolli/M
+Doll/M
+doll/MDGS
+dollop/GSMD
+Dolly/M
+dolly/SDMG
+dolmen/MS
+dolomite/SM
+dolomitic
+Dolores/M
+Dolorita/SM
+dolorous/Y
+dolor/SM
+dolphin/SM
+Dolph/M
+doltishness/SM
+doltish/YP
+dolt/MS
+domain/MS
+dome/DSMG
+Domenic/M
+Domenico/M
+Domeniga/M
+Domesday/M
+domestically
+domesticate/DSXGN
+domesticated/U
+domestication/M
+domesticity/MS
+domestic/S
+domicile/SDMG
+domiciliary
+dominance/MS
+dominant/YS
+dominate/VNGXSD
+domination/M
+dominator/M
+dominatrices
+dominatrix
+domineer/DSG
+domineeringness/M
+domineering/YP
+Dominga/M
+Domingo/M
+Dominguez/M
+Dominica/M
+Dominican/MS
+Dominick/M
+Dominic/M
+Dominik/M
+Domini/M
+dominion/MS
+Dominique/M
+dominoes
+domino/M
+Domitian/M
+Dom/M
+Donahue/M
+Donald/M
+Donaldson/M
+Donall/M
+Donal/M
+Donalt/M
+Dona/M
+dona/MS
+Donatello/M
+donate/XVGNSD
+donation/M
+donative/M
+Donaugh/M
+Donavon/M
+done/AUF
+Donella/M
+Donelle/M
+Donetsk/M
+Donetta/M
+dong/GDMS
+dongle/S
+Donia/M
+Donica/M
+Donielle/M
+Donizetti/M
+donkey/MS
+Donna/M
+Donnamarie/M
+donned
+Donnell/M
+Donnelly/M
+Donne/M
+Donner/M
+Donnie/M
+Donni/M
+donning
+donnishness/M
+donnish/YP
+Donn/RM
+donnybrook/MS
+Donny/M
+donor/MS
+Donovan/M
+don/S
+Don/SM
+don't
+donut/MS
+donutted
+donutting
+doodad/MS
+doodlebug/MS
+doodler/M
+doodle/SRDZG
+doohickey/MS
+Dooley/M
+Doolittle/M
+doom/MDGS
+doomsday/SM
+Doonesbury/M
+doorbell/SM
+door/GDMS
+doorhandles
+doorkeeper/M
+doorkeep/RZ
+doorknob/SM
+doorman/M
+doormat/SM
+doormen
+doornail/M
+doorplate/SM
+doors/I
+doorstep/MS
+doorstepped
+doorstepping
+doorstop/MS
+doorway/MS
+dooryard/SM
+dopamine
+dopant/M
+dopa/SM
+dope/DRSMZG
+doper/M
+dopey
+dopier
+dopiest
+dopiness/S
+Doppler/M
+Dorado/M
+Doralia/M
+Doralin/M
+Doralyn/M
+Doralynne/M
+Doralynn/M
+Dora/M
+Dorcas
+Dorchester/M
+Doreen/M
+Dorelia/M
+Dorella/M
+Dorelle/M
+Doré/M
+Dorena/M
+Dorene/M
+Doretta/M
+Dorette/M
+Dorey/M
+Doria/M
+Dorian/M
+Doric
+Dorice/M
+Dorie/M
+Dori/MS
+Dorine/M
+Dorisa/M
+Dorise/M
+Dorita/M
+dork/S
+dorky/RT
+dormancy/MS
+dormant/S
+dormer/M
+dormice
+dormitory/SM
+dorm/MRZS
+dormouse/M
+Dorolice/M
+Dorolisa/M
+Doro/M
+Dorotea/M
+Doroteya/M
+Dorothea/M
+Dorothee/M
+Dorothy/M
+Dorree/M
+Dorrie/M
+Dorri/SM
+Dorry/M
+dorsal/YS
+Dorsey/M
+Dorthea/M
+Dorthy/M
+Dortmund/M
+Dory/M
+dory/SM
+DOS
+dosage/SM
+dose/M
+dos/GDS
+Dosi/M
+dosimeter/MS
+dosimetry/M
+dossier/MS
+dost
+Dostoevsky/M
+DOT
+dotage/SM
+dotard/MS
+doter/M
+dote/S
+Doti/M
+doting/Y
+Dot/M
+dot/MDRSJZG
+Dotson/M
+dotted
+Dottie/M
+Dotti/M
+dottiness/M
+dotting
+Dotty/M
+dotty/PRT
+do/TZRHGJ
+Douala/M
+Douay/M
+Doubleday/M
+doubled/UA
+double/GPSRDZ
+doubleheader/MS
+doubleness/M
+doubler/M
+doubles/M
+doublespeak/S
+doublethink/M
+doublet/MS
+doubleton/M
+doubling/A
+doubloon/MS
+doubly
+doubt/AGSDMB
+doubted/U
+doubter/SM
+doubtfulness/SM
+doubtful/YP
+doubting/Y
+doubtlessness/M
+doubtless/YP
+douche/GSDM
+Dougherty/M
+dough/M
+doughs
+doughty/RT
+doughy/RT
+Dougie/M
+Douglas/M
+Douglass
+Doug/M
+Dougy/M
+dourness/MS
+Douro/M
+dour/TYRP
+douser/M
+douse/SRDG
+dovecote/MS
+Dover/M
+dove/RSM
+dovetail/GSDM
+dovish
+Dov/MR
+dowager/SM
+dowdily
+dowdiness/MS
+dowdy/TPSR
+dowel/GMDS
+dower/GDMS
+Dow/M
+downbeat/SM
+downcast/S
+downdraft/M
+downer/M
+Downey/M
+downfall/NMS
+downgrade/GSD
+down/GZSRD
+downheartedness/MS
+downhearted/PY
+downhill/RS
+downland
+download/DGS
+downpipes
+downplay/GDS
+downpour/MS
+downrange
+downrightness/M
+downright/YP
+downriver
+Downs
+downscale/GSD
+downside/S
+downsize/DSG
+downslope
+downspout/SM
+downstage/S
+downstairs
+downstate/SR
+downstream
+downswing/MS
+downtime/SM
+downtowner/M
+downtown/MRS
+downtrend/M
+downtrodden
+downturn/MS
+downwardness/M
+downward/YPS
+downwind
+downy/RT
+dowry/SM
+dowse/GZSRD
+dowser/M
+doxology/MS
+doyenne/SM
+doyen/SM
+Doyle/M
+Doy/M
+doze
+dozen/GHD
+dozenths
+dozer/M
+doz/XGNDRS
+dozy
+DP
+DPs
+dpt
+DPT
+drabbed
+drabber
+drabbest
+drabbing
+drabness/MS
+drab/YSP
+drachma/MS
+Draco/M
+draconian
+Draconian
+Dracula/M
+draft/AMDGS
+draftee/SM
+drafter/MS
+draftily
+draftiness/SM
+drafting/S
+draftsman/M
+draftsmanship/SM
+draftsmen
+draftsperson
+draftswoman
+draftswomen
+drafty/PTR
+dragged
+dragger/M
+dragging/Y
+draggy/RT
+drag/MS
+dragnet/MS
+dragonfly/SM
+dragonhead/M
+dragon/SM
+dragoon/DMGS
+drainage/MS
+drainboard/SM
+drained/U
+drainer/M
+drainpipe/MS
+drain/SZGRDM
+Drake/M
+drake/SM
+Dramamine/MS
+drama/SM
+dramatically/U
+dramatical/Y
+dramatic/S
+dramatics/M
+dramatist/MS
+dramatization/MS
+dramatized/U
+dramatizer/M
+dramatize/SRDZG
+dramaturgy/M
+Drambuie/M
+drammed
+dramming
+dram/MS
+drank
+Drano/M
+draper/M
+drapery/MS
+drape/SRDGZ
+drastic
+drastically
+drat/S
+dratted
+dratting
+Dravidian/M
+drawable
+draw/ASG
+drawback/MS
+drawbridge/SM
+drawer/SM
+drawing/SM
+drawler/M
+drawling/Y
+drawl/RDSG
+drawly
+drawn/AI
+drawnly
+drawnness
+drawstring/MS
+dray/SMDG
+dreadfulness/SM
+dreadful/YPS
+dreadlocks
+dreadnought/SM
+dread/SRDG
+dreamboat/SM
+dreamed/U
+dreamer/M
+dreamily
+dreaminess/SM
+dreaming/Y
+dreamland/SM
+dreamlessness/M
+dreamless/PY
+dreamlike
+dream/SMRDZG
+dreamworld/S
+dreamy/PTR
+drearily
+dreariness/SM
+drear/S
+dreary/TRSP
+Dreddy/M
+dredge/MZGSRD
+dredger/M
+Dredi/M
+dreg/MS
+Dreiser/M
+Dre/M
+drencher/M
+drench/GDRS
+Dresden/M
+dress/ADRSG
+dressage/MS
+dressed/U
+dresser/MS
+dresser's/A
+dresses/U
+dressiness/SM
+dressing/MS
+dressmaker/MS
+dressmaking/SM
+dressy/PTR
+drew/A
+Drew/M
+Drexel/M
+Dreyfus/M
+Dreyfuss
+dribble/DRSGZ
+dribbler/M
+driblet/SM
+drib/SM
+dried/U
+drier/M
+drifter/M
+drifting/Y
+drift/RDZSG
+driftwood/SM
+driller/M
+drilling/M
+drillmaster/SM
+drill/MRDZGS
+drinkable/S
+drink/BRSZG
+drinker/M
+dripped
+dripping/MS
+drippy/RT
+drip/SM
+driveler/M
+drivel/GZDRS
+driven/P
+driver/M
+drive/SRBGZJ
+driveway/MS
+drizzle/DSGM
+drizzling/Y
+drizzly/TR
+Dr/M
+drogue/MS
+drollery/SM
+drollness/MS
+droll/RDSPTG
+drolly
+dromedary/MS
+Drona/M
+drone/SRDGM
+droning/Y
+drool/GSRD
+droopiness/MS
+drooping/Y
+droop/SGD
+droopy/PRT
+drophead
+dropkick/S
+droplet/SM
+dropout/MS
+dropped
+dropper/SM
+dropping/MS
+dropsical
+drop/SM
+dropsy/MS
+drosophila/M
+dross/SM
+drought/SM
+drover/M
+drove/SRDGZ
+drowner/M
+drown/RDSJG
+drowse/SDG
+drowsily
+drowsiness/SM
+drowsy/PTR
+drubbed
+drubber/MS
+drubbing/SM
+drub/S
+Drucie/M
+Drucill/M
+Druci/M
+Drucy/M
+drudge/MGSRD
+drudger/M
+drudgery/SM
+drudging/Y
+Drud/M
+drugged
+druggie/SRT
+drugging
+druggist/SM
+Drugi/M
+drugless
+drug/SM
+drugstore/SM
+druidism/MS
+druid/MS
+Druid's
+Dru/M
+drumbeat/SGM
+drumhead/M
+drumlin/MS
+drummed
+drummer/SM
+drumming
+Drummond/M
+drum/SM
+drumstick/SM
+drunkard/SM
+drunkenness/SM
+drunken/YP
+drunk/SRNYMT
+drupe/SM
+Drury/M
+Drusie/M
+Drusilla/M
+Drusi/M
+Drusy/M
+druthers
+dryad/MS
+Dryden/M
+dryer/MS
+dry/GYDRSTZ
+dryish
+dryness/SM
+drys
+drystone
+drywall/GSD
+D's
+d's/A
+Dshubba/M
+DST
+DTP
+dualism/MS
+dualistic
+dualist/M
+duality/MS
+dual/YS
+Duane/M
+Dubai/M
+dubbed
+dubber/S
+dubbing/M
+dubbin/MS
+Dubcek/M
+Dubhe/M
+dubiety/MS
+dubiousness/SM
+dubious/YP
+Dublin/M
+Dubrovnik/M
+dub/S
+Dubuque/M
+ducal
+ducat/SM
+duce/CAIKF
+duce's
+Duchamp/M
+duchess/MS
+duchy/SM
+duckbill/SM
+ducker/M
+duck/GSRDM
+duckling/SM
+duckpins
+duckpond
+duckweed/MS
+ducky/RSMT
+ducted/CFI
+ductile/I
+ductility/SM
+ducting/F
+duct/KMSF
+ductless
+duct's/A
+ducts/CI
+ductwork/M
+dudder
+dude/MS
+dudgeon/SM
+dud/GMDS
+Dudley/M
+Dud/M
+duelist/MS
+duel/MRDGZSJ
+dueness/M
+duenna/MS
+due/PMS
+duet/MS
+duetted
+duetting
+duffel/M
+duffer/M
+duff/GZSRDM
+Duffie/M
+Duff/M
+Duffy/M
+Dugald/M
+dugout/SM
+dug/S
+duh
+DUI
+Duisburg/M
+dukedom/SM
+duke/DSMG
+Duke/M
+Dukey/M
+Dukie/M
+Duky/M
+Dulcea/M
+Dulce/M
+dulcet/SY
+Dulcia/M
+Dulciana/M
+Dulcie/M
+dulcify
+Dulci/M
+dulcimer/MS
+Dulcinea/M
+Dulcine/M
+Dulcy/M
+dullard/MS
+Dulles/M
+dullness/MS
+dull/SRDPGT
+dully
+dulness's
+Dulsea/M
+Duluth/M
+duly/U
+Du/M
+Dumas
+dumbbell/MS
+dumbfound/GSDR
+dumbness/MS
+Dumbo/M
+dumb/PSGTYRD
+dumbstruck
+dumbwaiter/SM
+dumdum/MS
+dummy/SDMG
+Dumont/M
+dumper/UM
+dumpiness/MS
+dumpling/MS
+dump/SGZRD
+dumpster/S
+Dumpster/S
+Dumpty/M
+dumpy/PRST
+Dunant/M
+Dunbar/M
+Duncan/M
+dunce/MS
+Dunc/M
+Dundee/M
+dunderhead/MS
+Dunedin/M
+dune/SM
+dungaree/SM
+dungeon/GSMD
+dunghill/MS
+dung/SGDM
+Dunham/M
+dunker/M
+dunk/GSRD
+Dunkirk/M
+Dunlap/M
+Dun/M
+dunned
+Dunne/M
+dunner
+dunnest
+dunning
+Dunn/M
+dunno/M
+dun/S
+Dunstan/M
+duodecimal/S
+duodena
+duodenal
+duodenum/M
+duologue/M
+duo/MS
+duopolist
+duopoly/M
+dupe/NGDRSMZ
+duper/M
+dupion/M
+duple
+duplexer/M
+duplex/MSRDG
+duplicability/M
+duplicable
+duplicate/ADSGNX
+duplication/AM
+duplicative
+duplicator/MS
+duplicitous
+duplicity/SM
+Dupont/MS
+DuPont/MS
+durability/MS
+durableness/M
+durable/PS
+durably
+Duracell/M
+durance/SM
+Durand/M
+Duran/M
+Durante/M
+Durant/M
+durational
+duration/MS
+Durban/M
+Dürer/M
+duress/SM
+Durex/M
+Durham/MS
+during
+Durkee/M
+Durkheim/M
+Dur/M
+Durocher/M
+durst
+durum/MS
+Durward/M
+Duse/M
+Dusenberg/M
+Dusenbury/M
+Dushanbe/M
+dusk/GDMS
+duskiness/MS
+dusky/RPT
+Düsseldorf
+dustbin/MS
+dustcart/M
+dustcover
+duster/M
+dustily
+dustiness/MS
+dusting/M
+Dustin/M
+dustless
+dustman/M
+dustmen
+dust/MRDGZS
+dustpan/SM
+Dusty/M
+dusty/RPT
+Dutch/M
+Dutchman/M
+Dutchmen
+dutch/MS
+Dutchwoman
+Dutchwomen
+duteous/Y
+dutiable
+dutifulness/S
+dutiful/UPY
+duty/SM
+Duvalier/M
+duvet/SM
+duxes
+Dvina/M
+Dvorák/M
+Dwain/M
+dwarfish
+dwarfism/MS
+dwarf/MTGSPRD
+Dwayne/M
+dweeb/S
+dweller/SM
+dwell/IGS
+dwelling/MS
+dwelt/I
+DWI
+Dwight/M
+dwindle/GSD
+dyadic
+dyad/MS
+Dyana/M
+Dyane/M
+Dyan/M
+Dyanna/M
+Dyanne/M
+Dyann/M
+dybbukim
+dybbuk/SM
+dyed/A
+dyeing/M
+dye/JDRSMZG
+dyer/M
+Dyer/M
+dyes/A
+dyestuff/SM
+dying/UA
+Dyke/M
+dyke's
+Dylan/M
+Dy/M
+Dynah/M
+Dyna/M
+dynamical/Y
+dynamic/S
+dynamics/M
+dynamism/SM
+dynamiter/M
+dynamite/RSDZMG
+dynamized
+dynamo/MS
+dynastic
+dynasty/MS
+dyne/M
+dysentery/SM
+dysfunctional
+dysfunction/MS
+dyslectic/S
+dyslexia/MS
+dyslexically
+dyslexic/S
+dyspepsia/MS
+dyspeptic/S
+dysprosium/MS
+dystopia/M
+dystrophy/M
+dz
+Dzerzhinsky/M
+E
+ea
+each
+Eachelle/M
+Eada/M
+Eadie/M
+Eadith/M
+Eadmund/M
+eagerness/MS
+eager/TSPRYM
+eagle/SDGM
+eaglet/SM
+Eakins/M
+Ealasaid/M
+Eal/M
+Eamon/M
+earache/SM
+eardrum/SM
+earful/MS
+ear/GSMDYH
+Earhart/M
+earing/M
+earldom/MS
+Earle/M
+Earlene/M
+Earlie/M
+Earline/M
+earliness/SM
+Earl/M
+earl/MS
+earlobe/S
+Early/M
+early/PRST
+earmark/DGSJ
+earmuff/SM
+earned/U
+earner/M
+Earnestine/M
+Earnest/M
+earnestness/MS
+earnest/PYS
+earn/GRDZTSJ
+earning/M
+earphone/MS
+earpieces
+earplug/MS
+Earp/M
+earring/MS
+earshot/MS
+earsplitting
+Eartha/M
+earthbound
+earthed/U
+earthenware/MS
+earthiness/SM
+earthliness/M
+earthling/MS
+earthly/TPR
+earth/MDNYG
+earthmen
+earthmover/M
+earthmoving
+earthquake/SDGM
+earthshaking
+earths/U
+earthward/S
+earthwork/MS
+earthworm/MS
+earthy/PTR
+Earvin/M
+earwax/MS
+earwigged
+earwigging
+earwig/MS
+eased/E
+ease/LDRSMG
+easel/MS
+easement/MS
+easer/M
+ease's/EU
+eases/UE
+easies
+easily/U
+easiness/MSU
+easing/M
+eastbound
+easterly/S
+Easter/M
+easterner/M
+Easterner/M
+easternmost
+Eastern/RZ
+eastern/ZR
+easter/Y
+east/GSMR
+Easthampton/M
+easting/M
+Eastland/M
+Eastman/M
+eastward/S
+Eastwick/M
+Eastwood/M
+East/ZSMR
+easygoingness/M
+easygoing/P
+easy/PUTR
+eatables
+eatable/U
+eaten/U
+eater/M
+eatery/MS
+eating/M
+Eaton/M
+eat/SJZGNRB
+eavesdropped
+eavesdropper/MS
+eavesdropping
+eavesdrop/S
+eave/SM
+Eba/M
+Ebba/M
+ebb/DSG
+EBCDIC
+Ebeneezer/M
+Ebeneser/M
+Ebenezer/M
+Eben/M
+Eberhard/M
+Eberto/M
+Eb/MN
+Ebola
+Ebonee/M
+Ebonics
+Ebony/M
+ebony/SM
+Ebro/M
+ebullience/SM
+ebullient/Y
+ebullition/SM
+EC
+eccentrically
+eccentricity/SM
+eccentric/MS
+eccl
+Eccles
+Ecclesiastes/M
+ecclesiastical/Y
+ecclesiastic/MS
+ECG
+echelon/SGDM
+echinoderm/SM
+echo/DMG
+echoed/A
+echoes/A
+echoic
+echolocation/SM
+éclair/MS
+éclat/MS
+eclectically
+eclecticism/MS
+eclectic/S
+eclipse/MGSD
+ecliptic/MS
+eclogue/MS
+ecocide/SM
+ecol
+Ecole/M
+ecologic
+ecological/Y
+ecologist/MS
+ecology/MS
+Eco/M
+econ
+Econometrica/M
+econometricians
+econometric/S
+econometrics/M
+economical/YU
+economic/S
+economics/M
+economist/MS
+economization
+economize/GZSRD
+economizer/M
+economizing/U
+economy/MS
+ecosystem/MS
+ecru/SM
+ecstasy/MS
+Ecstasy/S
+ecstatically
+ecstatic/S
+ectoplasm/M
+Ecuadoran/S
+Ecuadorean/S
+Ecuadorian/S
+Ecuador/M
+ecumenical/Y
+ecumenicism/SM
+ecumenicist/MS
+ecumenic/MS
+ecumenics/M
+ecumenism/SM
+ecumenist/MS
+eczema/MS
+Eda/M
+Edam/SM
+Edan/M
+ed/ASC
+Edda/M
+Eddie/M
+Eddi/M
+Edd/M
+Eddy/M
+eddy/SDMG
+Edee/M
+Edeline/M
+edelweiss/MS
+Ede/M
+edema/SM
+edematous
+eden
+Eden/M
+Edgard/M
+Edgardo/M
+Edgar/M
+edge/DRSMZGJ
+edgeless
+edger/M
+Edgerton/M
+Edgewater/M
+edgewise
+Edgewood/M
+edgily
+edginess/MS
+edging/M
+edgy/TRP
+edibility/MS
+edibleness/SM
+edible/SP
+edict/SM
+Edie/M
+edification/M
+edifice/SM
+edifier/M
+edifying/U
+edify/ZNXGRSD
+Edik/M
+Edi/MH
+Edinburgh/M
+Edin/M
+Edison/M
+editable
+Edita/M
+edited/IU
+Editha/M
+Edithe/M
+Edith/M
+edition/SM
+editorialist/M
+editorialize/DRSG
+editorializer/M
+editorial/YS
+editor/MS
+editorship/MS
+edit/SADG
+Ediva/M
+Edlin/M
+Edmond/M
+Edmon/M
+Edmonton/M
+Edmund/M
+Edna/M
+Edouard/M
+EDP
+eds
+Edsel/M
+Edsger/M
+EDT
+Eduard/M
+Eduardo/M
+educability/SM
+educable/S
+educated/YP
+educate/XASDGN
+educationalists
+educational/Y
+education/AM
+educationists
+educative
+educator/MS
+educ/DBG
+educe/S
+eduction/M
+Eduino/M
+edutainment/S
+Edvard/M
+Edwardian
+Edwardo/M
+Edward/SM
+Edwina/M
+Edwin/M
+Ed/XMN
+Edy/M
+Edythe/M
+Edyth/M
+EEC
+EEG
+eek/S
+eelgrass/M
+eel/MS
+e'en
+EEO
+EEOC
+e'er
+eerie/RT
+eerily
+eeriness/MS
+Eeyore/M
+effaceable/I
+effacement/MS
+effacer/M
+efface/SRDLG
+effectiveness/ISM
+effectives
+effective/YIP
+effector/MS
+effect/SMDGV
+effectual/IYP
+effectualness/MI
+effectuate/SDGN
+effectuation/M
+effeminacy/MS
+effeminate/SY
+effendi/MS
+efferent/SY
+effervesce/GSD
+effervescence/SM
+effervescent/Y
+effeteness/SM
+effete/YP
+efficacious/IPY
+efficaciousness/MI
+efficacy/IMS
+efficiency/MIS
+efficient/ISY
+Effie/M
+effigy/SM
+effloresce
+efflorescence/SM
+efflorescent
+effluence/SM
+effluent/MS
+effluvia
+effluvium/M
+effluxion
+efflux/M
+effortlessness/SM
+effortless/PY
+effort/MS
+effrontery/MS
+effulgence/SM
+effulgent
+effuse/XSDVGN
+effusion/M
+effusiveness/MS
+effusive/YP
+EFL
+e/FMDS
+Efrain/M
+Efrem/M
+Efren/M
+EFT
+egad
+egalitarian/I
+egalitarianism/MS
+egalitarians
+EGA/M
+Egan/M
+Egbert/M
+Egerton/M
+eggbeater/SM
+eggcup/MS
+egger/M
+egg/GMDRS
+eggheaded/P
+egghead/SDM
+eggnog/SM
+eggplant/MS
+eggshell/SM
+egis's
+eglantine/MS
+egocentrically
+egocentricity/SM
+egocentric/S
+egoism/SM
+egoistic
+egoistical/Y
+egoist/SM
+egomaniac/MS
+egomania/MS
+Egon/M
+Egor/M
+ego/SM
+egotism/SM
+egotistic
+egotistical/Y
+egotist/MS
+egregiousness/MS
+egregious/PY
+egress/SDMG
+egret/SM
+Egyptian/S
+Egypt/M
+Egyptology/M
+eh
+Ehrlich/M
+Eichmann/M
+eiderdown/SM
+eider/SM
+eidetic
+Eiffel/M
+eigenfunction/MS
+eigenstate/S
+eigenvalue/SM
+eigenvector/MS
+eighteen/MHS
+eighteenths
+eightfold
+eighth/MS
+eighths
+eightieths
+eightpence
+eight/SM
+eighty/SHM
+Eileen/M
+Eilis/M
+Eimile/M
+Einsteinian
+einsteinium/MS
+Einstein/SM
+Eire/M
+Eirena/M
+Eisenhower/M
+Eisenstein/M
+Eisner/M
+eisteddfod/M
+either
+ejaculate/SDXNG
+ejaculation/M
+ejaculatory
+ejecta
+ejection/SM
+ejector/SM
+eject/VGSD
+Ekaterina/M
+Ekberg/M
+eked/A
+eke/DSG
+EKG
+Ekstrom/M
+Ektachrome/M
+elaborateness/SM
+elaborate/SDYPVNGX
+elaboration/M
+elaborators
+Elaina/M
+Elaine/M
+Elana/M
+eland/SM
+Elane/M
+élan/M
+Elanor/M
+elans
+elapse/SDG
+el/AS
+elastically/I
+elasticated
+elasticity/SM
+elasticize/GDS
+elastic/S
+elastodynamics
+elastomer/M
+elatedness/M
+elated/PY
+elater/M
+elate/SRDXGN
+elation/M
+Elayne/M
+Elba/MS
+Elbe/M
+Elberta/M
+Elbertina/M
+Elbertine/M
+Elbert/M
+elbow/GDMS
+elbowroom/SM
+Elbrus/M
+Elden/M
+elderberry/MS
+elderflower
+elderliness/M
+elderly/PS
+elder/SY
+eldest
+Eldin/M
+Eldon/M
+Eldorado's
+Eldredge/M
+Eldridge/M
+Eleanora/M
+Eleanore/M
+Eleanor/M
+Eleazar/M
+electable/U
+elect/ASGD
+elected/U
+electioneer/GSD
+election/SAM
+electiveness/M
+elective/SPY
+electoral/Y
+electorate/SM
+elector/SM
+Electra/M
+electress/M
+electricalness/M
+electrical/PY
+electrician/SM
+electricity/SM
+electric/S
+electrification/M
+electrifier/M
+electrify/ZXGNDRS
+electrocardiogram/MS
+electrocardiograph/M
+electrocardiographs
+electrocardiography/MS
+electrochemical/Y
+electrocute/GNXSD
+electrocution/M
+electrode/SM
+electrodynamics/M
+electrodynamic/YS
+electroencephalogram/SM
+electroencephalographic
+electroencephalograph/M
+electroencephalographs
+electroencephalography/MS
+electrologist/MS
+electroluminescent
+electrolysis/M
+electrolyte/SM
+electrolytic
+electrolytically
+electrolyze/SDG
+electro/M
+electromagnetic
+electromagnetically
+electromagnetism/SM
+electromagnet/SM
+electromechanical
+electromechanics
+electromotive
+electromyograph
+electromyographic
+electromyographically
+electromyography/M
+electronegative
+electronically
+electronic/S
+electronics/M
+electron/MS
+electrophoresis/M
+electrophorus/M
+electroplate/DSG
+electroscope/MS
+electroscopic
+electroshock/GDMS
+electrostatic/S
+electrostatics/M
+electrotherapist/M
+electrotype/GSDZM
+electroweak
+eleemosynary
+Eleen/M
+elegance/ISM
+elegant/YI
+elegiacal
+elegiac/S
+elegy/SM
+elem
+elemental/YS
+elementarily
+elementariness/M
+elementary/P
+element/MS
+Elena/M
+Elene/M
+Eleni/M
+Elenore/M
+Eleonora/M
+Eleonore/M
+elephantiases
+elephantiasis/M
+elephantine
+elephant/SM
+elevated/S
+elevate/XDSNG
+elevation/M
+elevator/SM
+eleven/HM
+elevens/S
+elevenths
+elev/NX
+Elfie/M
+elfin/S
+elfish
+elf/M
+Elfreda/M
+Elfrida/M
+Elfrieda/M
+Elga/M
+Elgar/M
+Elianora/M
+Elianore/M
+Elia/SM
+Elicia/M
+elicitation/MS
+elicit/GSD
+elide/GSD
+Elie/M
+eligibility/ISM
+eligible/SI
+Elihu/M
+Elijah/M
+Eli/M
+eliminate/XSDYVGN
+elimination/M
+eliminator/SM
+Elinore/M
+Elinor/M
+Eliot/M
+Elisabeth/M
+Elisabet/M
+Elisabetta/M
+Elisa/M
+Elise/M
+Eliseo/M
+Elisha/M
+elision/SM
+Elissa/M
+Elita/M
+elite/MPS
+elitism/SM
+elitist/SM
+elixir/MS
+Elizabethan/S
+Elizabeth/M
+Elizabet/M
+Eliza/M
+Elka/M
+Elke/M
+Elkhart/M
+elk/MS
+Elladine/M
+Ella/M
+Ellary/M
+Elle/M
+Ellene/M
+Ellen/M
+Ellerey/M
+Ellery/M
+Ellesmere/M
+Ellette/M
+Ellie/M
+Ellington/M
+Elliot/M
+Elliott/M
+ellipse/MS
+ellipsis/M
+ellipsoidal
+ellipsoid/MS
+ellipsometer/MS
+ellipsometry
+elliptic
+elliptical/YS
+ellipticity/M
+Elli/SM
+Ellison/M
+Ellissa/M
+ell/MS
+Ellswerth/M
+Ellsworth/M
+Ellwood/M
+Elly/M
+Ellyn/M
+Ellynn/M
+Elma/M
+Elmer/M
+Elmhurst/M
+Elmira/M
+elm/MRS
+Elmo/M
+Elmore/M
+Elmsford/M
+El/MY
+Elna/MH
+Elnar/M
+Elnath/M
+Elnora/M
+Elnore/M
+elocutionary
+elocutionist/MS
+elocution/SM
+elodea/S
+Elohim/M
+Eloisa/M
+Eloise/M
+elongate/NGXSD
+elongation/M
+Elonore/M
+elopement/MS
+eloper/M
+elope/SRDLG
+eloquence/SM
+eloquent/IY
+Elora/M
+Eloy/M
+Elroy/M
+els
+Elsa/M
+Elsbeth/M
+else/M
+Else/M
+Elset/M
+elsewhere
+Elsey/M
+Elsie/M
+Elsi/M
+Elsinore/M
+Elspeth/M
+Elston/M
+Elsworth/M
+Elsy/M
+Eltanin/M
+Elton/M
+eluate/SM
+elucidate/SDVNGX
+elucidation/M
+elude/GSD
+elusiveness/SM
+elusive/YP
+elute/DGN
+elution/M
+Elva/M
+elven
+Elvera/M
+elver/SM
+elves/M
+Elvia/M
+Elvina/M
+Elvin/M
+Elvira/M
+elvish
+Elvis/M
+Elvyn/M
+Elwin/M
+Elwira/M
+Elwood/M
+Elwyn/M
+Ely/M
+Elyn/M
+Elysée/M
+Elysees
+Elyse/M
+Elysha/M
+Elysia/M
+elysian
+Elysian
+Elysium/SM
+Elyssa/M
+EM
+emaciate/NGXDS
+emaciation/M
+emacs/M
+Emacs/M
+email/SMDG
+Emalee/M
+Emalia/M
+Ema/M
+emanate/XSDVNG
+emanation/M
+emancipate/DSXGN
+emancipation/M
+emancipator/MS
+Emanuele/M
+Emanuel/M
+emasculate/GNDSX
+emasculation/M
+embalmer/M
+embalm/ZGRDS
+embank/GLDS
+embankment/MS
+embarcadero
+embargoes
+embargo/GMD
+embark/ADESG
+embarkation/EMS
+embarrassedly
+embarrassed/U
+embarrassing/Y
+embarrassment/MS
+embarrass/SDLG
+embassy/MS
+embattle/DSG
+embeddable
+embedded
+embedder
+embedding/MS
+embed/S
+embellished/U
+embellisher/M
+embellish/LGRSD
+embellishment/MS
+ember/MS
+embezzle/LZGDRS
+embezzlement/MS
+embezzler/M
+embitter/LGDS
+embitterment/SM
+emblazon/DLGS
+emblazonment/SM
+emblematic
+emblem/GSMD
+embodier/M
+embodiment/ESM
+embody/ESDGA
+embolden/DSG
+embolism/SM
+embosom
+embosser/M
+emboss/ZGRSD
+embouchure/SM
+embower/GSD
+embraceable
+embracer/M
+embrace/RSDVG
+embracing/Y
+embrasure/MS
+embrittle
+embrocation/SM
+embroiderer/M
+embroider/SGZDR
+embroidery/MS
+embroilment/MS
+embroil/SLDG
+embryologist/SM
+embryology/MS
+embryonic
+embryo/SM
+emceeing
+emcee/SDM
+Emelda/M
+Emelen/M
+Emelia/M
+Emelina/M
+Emeline/M
+Emelita/M
+Emelyne/M
+emendation/MS
+emend/SRDGB
+emerald/SM
+Emera/M
+emerge/ADSG
+emergence/MAS
+emergency/SM
+emergent/S
+emerita
+emeritae
+emeriti
+emeritus
+Emerson/M
+Emery/M
+emery/MGSD
+emetic/S
+emf/S
+emigrant/MS
+emigrate/SDXNG
+emigration/M
+émigré/S
+Emilee/M
+Emile/M
+Emilia/M
+Emilie/M
+Emili/M
+Emiline/M
+Emilio/M
+Emil/M
+Emily/M
+eminence/MS
+Eminence/MS
+eminent/Y
+emirate/SM
+emir/SM
+emissary/SM
+emission/AMS
+emissivity/MS
+emit/S
+emittance/M
+emitted
+emitter/SM
+emitting
+Emlen/M
+Emlyn/M
+Emlynne/M
+Emlynn/M
+em/M
+Em/M
+Emmalee/M
+Emmaline/M
+Emmalyn/M
+Emmalynne/M
+Emmalynn/M
+Emma/M
+Emmanuel/M
+Emmeline/M
+Emmerich/M
+Emmery/M
+Emmet/M
+Emmett/M
+Emmey/M
+Emmie/M
+Emmi/M
+Emmit/M
+Emmott/M
+Emmye/M
+Emmy/SM
+Emogene/M
+emollient/S
+emolument/SM
+Emory/M
+emote/SDVGNX
+emotionalism/MS
+emotionality/M
+emotionalize/GDS
+emotional/UY
+emotionless
+emotion/M
+emotive/Y
+empaneled
+empaneling
+empath
+empathetic
+empathetical/Y
+empathic
+empathize/SDG
+empathy/MS
+emperor/MS
+emphases
+emphasis/M
+emphasize/ZGCRSDA
+emphatically/U
+emphatic/U
+emphysema/SM
+emphysematous
+empire/MS
+empirical/Y
+empiricism/SM
+empiricist/SM
+empiric/SM
+emplace/L
+emplacement/MS
+employability/UM
+employable/US
+employed/U
+employee/SM
+employer/SM
+employ/LAGDS
+employment/UMAS
+emporium/MS
+empower/GLSD
+empowerment/MS
+empress/MS
+emptier/M
+emptily
+emptiness/SM
+empty/GRSDPT
+empyrean/SM
+ems/C
+EMT
+emulate/SDVGNX
+emulation/M
+emulative/Y
+emulator/MS
+emulsification/M
+emulsifier/M
+emulsify/NZSRDXG
+emulsion/SM
+emu/SM
+Emylee/M
+Emyle/M
+enabler/M
+enable/SRDZG
+enactment/ASM
+enact/SGALD
+enameler/M
+enamelware/SM
+enamel/ZGJMDRS
+enamor/DSG
+en/BM
+enc
+encamp/LSDG
+encampment/MS
+encapsulate/SDGNX
+encapsulation/M
+encase/GSDL
+encasement/SM
+encephalitic
+encephalitides
+encephalitis/M
+encephalographic
+encephalopathy/M
+enchain/SGD
+enchanter/MS
+enchant/ESLDG
+enchanting/Y
+enchantment/MSE
+enchantress/MS
+enchilada/SM
+encipherer/M
+encipher/SRDG
+encircle/GLDS
+encirclement/SM
+encl
+enclave/MGDS
+enclosed/U
+enclose/GDS
+enclosure/SM
+encoder/M
+encode/ZJGSRD
+encomium/SM
+encompass/GDS
+encore/GSD
+encounter/GSD
+encouragement/SM
+encourager/M
+encourage/SRDGL
+encouraging/Y
+encroacher/M
+encroach/LGRSD
+encroachment/MS
+encrustation/MS
+encrust/DSG
+encrypt/DGS
+encrypted/U
+encryption/SM
+encumbered/U
+encumber/SEDG
+encumbrancer/M
+encumbrance/SRM
+ency
+encyclical/SM
+encyclopaedia's
+encyclopedia/SM
+encyclopedic
+encyst/GSLD
+encystment/MS
+endanger/DGSL
+endangerment/SM
+endear/GSLD
+endearing/Y
+endearment/MS
+endeavored/U
+endeavorer/M
+endeavor/GZSMRD
+endemically
+endemicity
+endemic/S
+ender/M
+endgame/M
+Endicott/M
+ending/M
+endive/SM
+endlessness/MS
+endless/PY
+endmost
+endnote/MS
+endocrine/S
+endocrinologist/SM
+endocrinology/SM
+endogamous
+endogamy/M
+endogenous/Y
+endomorphism/SM
+endorse/DRSZGL
+endorsement/MS
+endorser/M
+endoscope/MS
+endoscopic
+endoscopy/SM
+endosperm/M
+endothelial
+endothermic
+endow/GSDL
+endowment/SM
+endpoint/MS
+endue/SDG
+endungeoned
+endurable/U
+endurably/U
+endurance/SM
+endure/BSDG
+enduringness/M
+enduring/YP
+endways
+Endymion/M
+end/ZGVMDRSJ
+ENE
+enema/SM
+enemy/SM
+energetically
+energetic/S
+energetics/M
+energized/U
+energizer/M
+energize/ZGDRS
+energy/MS
+enervate/XNGVDS
+enervation/M
+enfeeble/GLDS
+enfeeblement/SM
+enfilade/MGDS
+enfold/SGD
+enforceability/M
+enforceable/U
+enforced/Y
+enforce/LDRSZG
+enforcement/SM
+enforcer/M
+enforcible/U
+enfranchise/ELDRSG
+enfranchisement/EMS
+enfranchiser/M
+engage/ADSGE
+engagement/SEM
+engaging/Y
+Engelbert/M
+Engel/MS
+engender/DGS
+engineer/GSMDJ
+engineering/MY
+engine/MGSD
+England/M
+england/ZR
+Englebert/M
+Englewood/M
+English/GDRSM
+Englishman/M
+Englishmen
+Englishwoman/M
+Englishwomen
+Eng/M
+engorge/LGDS
+engorgement/MS
+Engracia/M
+engram/MS
+engraver/M
+engrave/ZGDRSJ
+engraving/M
+engrossed/Y
+engrosser/M
+engross/GLDRS
+engrossing/Y
+engrossment/SM
+engulf/GDSL
+engulfment/SM
+enhanceable
+enhance/LZGDRS
+enhancement/MS
+enhancer/M
+enharmonic
+Enid/M
+Enif/M
+enigma/MS
+enigmatic
+enigmatically
+Eniwetok/M
+enjambement's
+enjambment/MS
+enjoinder
+enjoin/GSD
+enjoyability
+enjoyableness/M
+enjoyable/P
+enjoyably
+enjoy/GBDSL
+enjoyment/SM
+Enkidu/M
+enlargeable
+enlarge/LDRSZG
+enlargement/MS
+enlarger/M
+enlightened/U
+enlighten/GDSL
+enlightening/U
+enlightenment/SM
+enlistee/MS
+enlister/M
+enlistment/SAM
+enlist/SAGDL
+enliven/LDGS
+enlivenment/SM
+enmesh/DSLG
+enmeshment/SM
+enmity/MS
+Ennis/M
+ennoble/LDRSG
+ennoblement/SM
+ennobler/M
+ennui/SM
+Enoch/M
+enormity/SM
+enormousness/MS
+enormous/YP
+Enos
+enough
+enoughs
+enplane/DSG
+enqueue/DS
+enquirer/S
+enquiringly
+enrage/SDG
+enrapture/GSD
+Enrica/M
+enricher/M
+Enrichetta/M
+enrich/LDSRG
+enrichment/SM
+Enrico/M
+Enrika/M
+Enrique/M
+Enriqueta/M
+enrobed
+enrollee/SM
+enroll/LGSD
+enrollment/SM
+ens
+ensconce/DSG
+ensemble/MS
+enshrine/DSLG
+enshrinement/SM
+enshroud/DGS
+ensign/SM
+ensilage/DSMG
+enslavement/MS
+enslaver/M
+enslave/ZGLDSR
+ensnare/GLDS
+ensnarement/SM
+Ensolite/M
+ensue/SDG
+ensurer/M
+ensure/SRDZG
+entailer/M
+entailment/MS
+entail/SDRLG
+entangle/EGDRSL
+entanglement/ESM
+entangler/EM
+entente/MS
+enter/ASDG
+entered/U
+enterer/M
+enteritides
+enteritis/SM
+enterprise/GMSR
+Enterprise/M
+enterpriser/M
+enterprising/Y
+entertainer/M
+entertaining/Y
+entertainment/SM
+entertain/SGZRDL
+enthalpy/SM
+enthrall/GDSL
+enthrallment/SM
+enthrone/GDSL
+enthronement/MS
+enthuse/DSG
+enthusiasm/SM
+enthusiastically/U
+enthusiastic/U
+enthusiast/MS
+enticement/SM
+entice/SRDJLZG
+enticing/Y
+entire/SY
+entirety/SM
+entitle/GLDS
+entitlement/MS
+entity/SM
+entomb/GDSL
+entombment/MS
+entomological
+entomologist/S
+entomology/MS
+entourage/SM
+entr'acte/S
+entrails
+entrainer/M
+entrain/GSLDR
+entrancement/MS
+entrance/MGDSL
+entranceway/M
+entrancing/Y
+entrant/MS
+entrapment/SM
+entrapped
+entrapping
+entrap/SL
+entreating/Y
+entreat/SGD
+entreaty/SM
+entrée/S
+entrench/LSDG
+entrenchment/MS
+entrepreneurial
+entrepreneur/MS
+entrepreneurship/M
+entropic
+entropy/MS
+entrust/DSG
+entry/ASM
+entryway/SM
+entwine/DSG
+enumerable
+enumerate/AN
+enumerated/U
+enumerates
+enumerating
+enumeration's/A
+enumeration/SM
+enumerative
+enumerator/SM
+enunciable
+enunciated/U
+enunciate/XGNSD
+enunciation/M
+enureses
+enuresis/M
+envelope/MS
+enveloper/M
+envelopment/MS
+envelop/ZGLSDR
+envenom/SDG
+enviableness/M
+enviable/U
+enviably
+envied/U
+envier/M
+enviousness/SM
+envious/PY
+environ/LGSD
+environmentalism/SM
+environmentalist/SM
+environmental/Y
+environment/MS
+envisage/DSG
+envision/GSD
+envoy/SM
+envying/Y
+envy/SRDMG
+enzymatic
+enzymatically
+enzyme/SM
+enzymology/M
+Eocene
+EOE
+eohippus/M
+Eolanda/M
+Eolande/M
+eolian
+eon/SM
+EPA
+epaulet/SM
+épée/S
+ephedrine/MS
+ephemeral/SY
+ephemera/MS
+ephemerids
+ephemeris/M
+Ephesian/S
+Ephesians/M
+Ephesus/M
+Ephraim/M
+Ephrayim/M
+Ephrem/M
+epically
+epicenter/SM
+epic/SM
+Epictetus/M
+Epicurean
+epicurean/S
+epicure/SM
+Epicurus/M
+epicycle/MS
+epicyclic
+epicyclical/Y
+epicycloid/M
+epidemically
+epidemic/MS
+epidemiological/Y
+epidemiologist/MS
+epidemiology/MS
+epidermal
+epidermic
+epidermis/MS
+epidural
+epigenetic
+epiglottis/SM
+epigrammatic
+epigram/MS
+epigrapher/M
+epigraph/RM
+epigraphs
+epigraphy/MS
+epilepsy/SM
+epileptic/S
+epilogue/SDMG
+Epimethius/M
+epinephrine/SM
+epiphany/SM
+Epiphany/SM
+epiphenomena
+episcopacy/MS
+episcopalian
+Episcopalian/S
+Episcopal/S
+episcopal/Y
+episcopate/MS
+episode/SM
+episodic
+episodically
+epistemic
+epistemological/Y
+epistemology/M
+epistle/MRS
+Epistle/SM
+epistolary/S
+epistolatory
+epitaph/GMD
+epitaphs
+epitaxial/Y
+epitaxy/M
+epithelial
+epithelium/MS
+epithet/MS
+epitome/MS
+epitomized/U
+epitomizer/M
+epitomize/SRDZG
+epochal/Y
+epoch/M
+epochs
+eponymous
+epoxy/GSD
+epsilon/SM
+Epsom/M
+Epstein/M
+equability/MS
+equableness/M
+equable/P
+equably
+equaling
+equality/ISM
+equalization/MS
+equalize/DRSGJZ
+equalized/U
+equalizer/M
+equalizes/U
+equal/USDY
+equanimity/MS
+equate/NGXBSD
+equation/M
+equatorial/S
+equator/SM
+equerry/MS
+equestrianism/SM
+equestrian/S
+equestrienne/SM
+equiangular
+equidistant/Y
+equilateral/S
+equilibrate/GNSD
+equilibration/M
+equilibrium/MSE
+equine/S
+equinoctial/S
+equinox/MS
+equipage/SM
+equipartition/M
+equip/AS
+equipment/SM
+equipoise/GMSD
+equipotent
+equipped/AU
+equipping/A
+equiproportional
+equiproportionality
+equiproportionate
+equitable/I
+equitableness/M
+equitably/I
+equitation/SM
+equity/IMS
+equiv
+equivalence/DSMG
+equivalent/SY
+equivocalness/MS
+equivocal/UY
+equivocate/NGSDX
+equivocation/M
+equivocator/SM
+Equuleus/M
+ER
+ERA
+eradicable/I
+eradicate/SDXVGN
+eradication/M
+eradicator/SM
+era/MS
+Eran/M
+erase/N
+eraser/M
+erasion/M
+Erasmus/M
+eras/SRDBGZ
+Erastus/M
+erasure/MS
+Erato/M
+Eratosthenes/M
+erbium/SM
+Erda/M
+ere
+Erebus/M
+erect/GPSRDY
+erectile
+erection/SM
+erectness/MS
+erector/SM
+Erek/M
+erelong
+eremite/MS
+Erena/M
+ergo
+ergodic
+ergodicity/M
+ergonomically
+ergonomics/M
+ergonomic/U
+ergophobia
+ergosterol/SM
+ergot/SM
+erg/SM
+Erhard/M
+Erhart/M
+Erica/M
+Ericha/M
+Erich/M
+Ericka/M
+Erick/M
+Erickson/M
+Eric/M
+Ericson's
+Ericsson's
+Eridanus/M
+Erie/SM
+Erika/M
+Erik/M
+Erikson/M
+Erina/M
+Erin/M
+Erinna/M
+Erinn/M
+eris
+Eris
+Eritrea/M
+Erlang/M
+Erlenmeyer/M
+Erl/M
+Er/M
+Erma/M
+Ermanno/M
+Ermengarde/M
+Ermentrude/M
+Ermina/M
+ermine/MSD
+Erminia/M
+Erminie/M
+Ermin/M
+Ernaline/M
+Erna/M
+Ernesta/M
+Ernestine/M
+Ernest/M
+Ernesto/M
+Ernestus/M
+Ernie/M
+Ernst/M
+Erny/M
+erode/SDG
+erodible
+erogenous
+erosible
+erosional
+erosion/SM
+erosiveness/M
+erosive/P
+Eros/SM
+erotically
+erotica/M
+eroticism/MS
+erotic/S
+errancy/MS
+errand/MS
+errantry/M
+errant/YS
+errata/SM
+erratically
+erratic/S
+erratum/MS
+err/DGS
+Errick/M
+erring/UY
+Erroll/M
+Errol/M
+erroneousness/M
+erroneous/YP
+error/SM
+ersatz/S
+Erse/M
+Erskine/M
+erst
+erstwhile
+Ertha/M
+eructation/MS
+eruct/DGS
+erudite/NYX
+erudition/M
+erupt/DSVG
+eruption/SM
+eruptive/SY
+Ervin/M
+ErvIn/M
+Erv/M
+Erwin/M
+Eryn/M
+erysipelas/SM
+erythrocyte/SM
+es
+e's
+Es
+E's
+Esau/M
+escadrille/M
+escalate/CDSXGN
+escalation/MC
+escalator/SM
+escallop/SGDM
+escapable/I
+escapade/SM
+escapee/MS
+escape/LGSRDB
+escapement/MS
+escaper/M
+escapism/SM
+escapist/S
+escapology
+escarole/MS
+escarpment/MS
+eschatology/M
+Escherichia/M
+Escher/M
+eschew/SGD
+Escondido/M
+escort/SGMD
+escritoire/SM
+escrow/DMGS
+escudo/MS
+escutcheon/SM
+Esdras/M
+ESE
+Eskimo/SM
+ESL
+Esma/M
+Esmaria/M
+Esmark/M
+Esme/M
+Esmeralda/M
+esophageal
+esophagi
+esophagus/M
+esoteric
+esoterica
+esoterically
+esp
+ESP
+espadrille/MS
+Espagnol/M
+espalier/SMDG
+especial/Y
+Esperanto/M
+Esperanza/M
+Espinoza/M
+espionage/SM
+esplanade/SM
+Esp/M
+Esposito/M
+espousal/MS
+espouser/M
+espouse/SRDG
+espresso/SM
+esprit/SM
+espy/GSD
+Esq/M
+esquire/GMSD
+Esquire/S
+Esra/M
+Essa/M
+essayer/M
+essayist/SM
+essay/SZMGRD
+essence/MS
+Essene/SM
+Essen/M
+essentialist/M
+essentially
+essentialness/M
+essential/USI
+Essequibo/M
+Essex/M
+Essie/M
+Essy/M
+EST
+established/U
+establisher/M
+establish/LAEGSD
+establishment/EMAS
+Establishment/MS
+Esta/M
+estate/GSDM
+Esteban/M
+esteem/EGDS
+Estela/M
+Estele/M
+Estella/M
+Estelle/M
+Estell/M
+Estel/M
+Esterházy/M
+ester/M
+Ester/M
+Estes
+Estevan/M
+Esther/M
+esthete's
+esthetically
+esthetic's
+esthetics's
+estimable/I
+estimableness/M
+estimate/XDSNGV
+estimating/A
+estimation/M
+estimator/SM
+Estonia/M
+Estonian/S
+estoppal
+Estrada/M
+estrange/DRSLG
+estrangement/SM
+estranger/M
+Estrella/M
+Estrellita/M
+estrogen/SM
+estrous
+estrus/SM
+est/RZ
+estuarine
+estuary/SM
+et
+ET
+ETA
+Etan/M
+eta/SM
+etc
+etcetera/SM
+etcher/M
+etch/GZJSRD
+etching/M
+ETD
+eternalness/SM
+eternal/PSY
+eternity/SM
+ethane/SM
+Ethan/M
+ethanol/MS
+Ethelbert/M
+Ethelda/M
+Ethelind/M
+Etheline/M
+Ethelin/M
+Ethel/M
+Ethelred/M
+Ethelyn/M
+Ethe/M
+etherealness/M
+ethereal/PY
+etherized
+Ethernet/MS
+ether/SM
+ethically/U
+ethicalness/M
+ethical/PYS
+ethicist/S
+ethic/MS
+Ethiopia/M
+Ethiopian/S
+ethnically
+ethnicity/MS
+ethnic/S
+ethnocentric
+ethnocentrism/MS
+ethnographers
+ethnographic
+ethnography/M
+ethnological
+ethnologist/SM
+ethnology/SM
+ethnomethodology
+ethological
+ethologist/MS
+ethology/SM
+ethos/SM
+ethylene/MS
+Ethyl/M
+ethyl/SM
+Etienne/M
+etiologic
+etiological
+etiology/SM
+etiquette/SM
+Etna/M
+Etruria/M
+Etruscan/MS
+Etta/M
+Ettie/M
+Etti/M
+Ettore/M
+Etty/M
+étude/MS
+etymological/Y
+etymologist/SM
+etymology/MS
+EU
+eucalypti
+eucalyptus/SM
+Eucharistic
+Eucharist/SM
+euchre/MGSD
+euclidean
+Euclid/M
+Eudora/M
+Euell/M
+Eugene/M
+Eugenia/M
+eugenically
+eugenicist/SM
+eugenic/S
+eugenics/M
+Eugenie/M
+Eugenio/M
+Eugenius/M
+Eugen/M
+Eugine/M
+Eulalie/M
+Eula/M
+Eulerian/M
+Euler/M
+eulogistic
+eulogist/MS
+eulogized/U
+eulogize/GRSDZ
+eulogizer/M
+eulogy/MS
+Eu/M
+Eumenides
+Eunice/M
+eunuch/M
+eunuchs
+Euphemia/M
+euphemism/MS
+euphemistic
+euphemistically
+euphemist/M
+euphonious/Y
+euphonium/M
+euphony/SM
+euphoria/SM
+euphoric
+euphorically
+Euphrates/M
+Eurasia/M
+Eurasian/S
+eureka/S
+Euripides/M
+Eur/M
+Eurodollar/SM
+Europa/M
+Europeanization/SM
+Europeanized
+European/MS
+Europe/M
+europium/MS
+Eurydice/M
+Eustace/M
+Eustachian/M
+Eustacia/M
+eutectic
+Euterpe/M
+euthanasia/SM
+euthenics/M
+evacuate/DSXNGV
+evacuation/M
+evacuee/MS
+evader/M
+evade/SRDBGZ
+Evaleen/M
+evaluable
+evaluate/ADSGNX
+evaluated/U
+evaluational
+evaluation/MA
+evaluative
+evaluator/MS
+Eva/M
+evanescence/MS
+evanescent
+Evangelia/M
+evangelic
+evangelicalism/SM
+Evangelical/S
+evangelical/YS
+Evangelina/M
+Evangeline/M
+Evangelin/M
+evangelism/SM
+evangelistic
+evangelist/MS
+Evangelist/MS
+evangelize/GDS
+Evania/M
+Evan/MS
+Evanne/M
+Evanston/M
+Evansville/M
+evaporate/VNGSDX
+evaporation/M
+evaporative/Y
+evaporator/MS
+evasion/SM
+evasiveness/SM
+evasive/PY
+Eveleen/M
+Evelina/M
+Eveline/M
+Evelin/M
+Evelyn/M
+Eve/M
+evened
+evener/M
+evenhanded/YP
+evening/SM
+Evenki/M
+Even/M
+evenness/MSU
+even/PUYRT
+evens
+evensong/MS
+eventfulness/SM
+eventful/YU
+eventide/SM
+event/SGM
+eventuality/MS
+eventual/Y
+eventuate/GSD
+Everard/M
+Eveready/M
+Evered/M
+Everest/M
+Everette/M
+Everett/M
+everglade/MS
+Everglades
+evergreen/S
+Everhart/M
+everlastingness/M
+everlasting/PYS
+everliving
+evermore
+EverReady/M
+eve/RSM
+ever/T
+every
+everybody/M
+everydayness/M
+everyday/P
+everyman
+everyone/MS
+everyplace
+everything
+everywhere
+eve's/A
+eves/A
+Evey/M
+evict/DGS
+eviction/SM
+evidence/MGSD
+evidential/Y
+evident/YS
+Evie/M
+evildoer/SM
+evildoing/MS
+evilness/MS
+evil/YRPTS
+evince/SDG
+Evin/M
+eviscerate/GNXDS
+evisceration/M
+Evita/M
+Ev/MN
+evocable
+evocate/NVX
+evocation/M
+evocativeness/M
+evocative/YP
+evoke/SDG
+evolute/NMXS
+evolutionarily
+evolutionary
+evolutionist/MS
+evolution/M
+evolve/SDG
+Evonne/M
+Evvie/M
+Evvy/M
+Evy/M
+Evyn/M
+Ewan/M
+Eward/M
+Ewart/M
+Ewell/M
+ewe/MZRS
+Ewen/M
+ewer/M
+Ewing/M
+exacerbate/NGXDS
+exacerbation/M
+exacter/M
+exactingness/M
+exacting/YP
+exaction/SM
+exactitude/ISM
+exactly/I
+exactness/MSI
+exact/TGSPRDY
+exaggerate/DSXNGV
+exaggerated/YP
+exaggeration/M
+exaggerative/Y
+exaggerator/MS
+exaltation/SM
+exalted/Y
+exalter/M
+exalt/ZRDGS
+examen/M
+examination/AS
+examination's
+examine/BGZDRS
+examined/AU
+examinees
+examiner/M
+examines/A
+examining/A
+exam/MNS
+example/DSGM
+exampled/U
+exasperate/DSXGN
+exasperated/Y
+exasperating/Y
+exasperation/M
+Excalibur/M
+excavate/NGDSX
+excavation/M
+excavator/SM
+Excedrin/M
+exceeder/M
+exceeding/Y
+exceed/SGDR
+excelled
+excellence/SM
+excellency/MS
+Excellency/MS
+excellent/Y
+excelling
+excel/S
+excelsior/S
+except/DSGV
+exceptionable/U
+exceptionalness/M
+exceptional/YU
+exception/BMS
+excerpter/M
+excerpt/GMDRS
+excess/GVDSM
+excessiveness/M
+excessive/PY
+exchangeable
+exchange/GDRSZ
+exchanger/M
+exchequer/SM
+Exchequer/SM
+excise/XMSDNGB
+excision/M
+excitability/MS
+excitableness/M
+excitable/P
+excitably
+excitation/SM
+excitatory
+excited/Y
+excitement/MS
+exciter/M
+excite/RSDLBZG
+excitingly
+exciting/U
+exciton/M
+exclaimer/M
+exclaim/SZDRG
+exclamation/MS
+exclamatory
+exclude/DRSG
+excluder/M
+exclusionary
+exclusioner/M
+exclusion/SZMR
+exclusiveness/SM
+exclusive/SPY
+exclusivity/MS
+excommunicate/XVNGSD
+excommunication/M
+excoriate/GNXSD
+excoriation/M
+excremental
+excrement/SM
+excrescence/MS
+excrescent
+excreta
+excrete/NGDRSX
+excreter/M
+excretion/M
+excretory/S
+excruciate/NGDS
+excruciating/Y
+excruciation/M
+exculpate/XSDGN
+exculpation/M
+exculpatory
+excursionist/SM
+excursion/MS
+excursiveness/SM
+excursive/PY
+excursus/MS
+excusable/IP
+excusableness/IM
+excusably/I
+excuse/BGRSD
+excused/U
+excuser/M
+exec/MS
+execrableness/M
+execrable/P
+execrably
+execrate/DSXNGV
+execration/M
+executable/MS
+execute/NGVZBXDRS
+executer/M
+executional
+executioner/M
+execution/ZMR
+executive/SM
+executor/SM
+executrices
+executrix/M
+exegeses
+exegesis/M
+exegete/M
+exegetical
+exegetic/S
+exemplariness/M
+exemplar/MS
+exemplary/P
+exemplification/M
+exemplifier/M
+exemplify/ZXNSRDG
+exemption/MS
+exempt/SDG
+exerciser/M
+exercise/ZDRSGB
+exertion/MS
+exert/SGD
+Exeter/M
+exeunt
+exhalation/SM
+exhale/GSD
+exhausted/Y
+exhauster/M
+exhaustible/I
+exhausting/Y
+exhaustion/SM
+exhaustiveness/MS
+exhaustive/YP
+exhaust/VGRDS
+exhibitioner/M
+exhibitionism/MS
+exhibitionist/MS
+exhibition/ZMRS
+exhibitor/SM
+exhibit/VGSD
+exhilarate/XSDVNG
+exhilarating/Y
+exhilaration/M
+exhortation/SM
+exhort/DRSG
+exhorter/M
+exhumation/SM
+exhume/GRSD
+exhumer/M
+exigence/S
+exigency/SM
+exigent/SY
+exiguity/SM
+exiguous
+exile/SDGM
+existence/MS
+existent/I
+existentialism/MS
+existentialistic
+existentialist/MS
+existential/Y
+existents
+exist/SDG
+exit/MDSG
+exobiology/MS
+exocrine
+Exodus/M
+exodus/SM
+exogamous
+exogamy/M
+exogenous/Y
+exonerate/SDVGNX
+exoneration/M
+exorbitance/MS
+exorbitant/Y
+exorcise/SDG
+exorcism/SM
+exorcist/SM
+exorcizer/M
+exoskeleton/MS
+exosphere/SM
+exothermic
+exothermically
+exotica
+exotically
+exoticism/SM
+exoticness/M
+exotic/PS
+exp
+expandability/M
+expand/DRSGZB
+expanded/U
+expander/M
+expanse/DSXGNVM
+expansible
+expansionary
+expansionism/MS
+expansionist/MS
+expansion/M
+expansiveness/S
+expansive/YP
+expatiate/XSDNG
+expatiation/M
+expatriate/SDNGX
+expatriation/M
+expectancy/MS
+expectant/YS
+expectational
+expectation/MS
+expected/UPY
+expecting/Y
+expectorant/S
+expectorate/NGXDS
+expectoration/M
+expect/SBGD
+expedience/IS
+expediency/IMS
+expedients
+expedient/YI
+expediter/M
+expedite/ZDRSNGX
+expeditionary
+expedition/M
+expeditiousness/MS
+expeditious/YP
+expeditor's
+expellable
+expelled
+expelling
+expel/S
+expendable/S
+expended/U
+expender/M
+expenditure/SM
+expend/SDRGB
+expense/DSGVM
+expensive/IYP
+expensiveness/SMI
+experienced/U
+experience/ISDM
+experiencing
+experiential/Y
+experimentalism/M
+experimentalist/SM
+experimental/Y
+experimentation/SM
+experimenter/M
+experiment/GSMDRZ
+experted
+experting
+expertise/SM
+expertize/GD
+expertnesses
+expertness/IM
+expert/PISY
+expert's
+expiable/I
+expiate/XGNDS
+expiation/M
+expiatory
+expiration/MS
+expired/U
+expire/SDG
+expiry/MS
+explainable/UI
+explain/ADSG
+explained/U
+explainer/SM
+explanation/MS
+explanatory
+expletive/SM
+explicable/I
+explicate/VGNSDX
+explication/M
+explicative/Y
+explicitness/SM
+explicit/PSY
+explode/DSRGZ
+exploded/U
+exploder/M
+exploitation/MS
+exploitative
+exploited/U
+exploiter/M
+exploit/ZGVSMDRB
+exploration/MS
+exploratory
+explore/DSRBGZ
+explored/U
+explorer/M
+explosion/MS
+explosiveness/SM
+explosive/YPS
+expo/MS
+exponential/SY
+exponentiate/XSDNG
+exponentiation/M
+exponent/MS
+exportability
+exportable
+export/AGSD
+exportation/SM
+exporter/MS
+export's
+expose
+exposed/U
+exposer/M
+exposit/D
+exposition/SM
+expositor/MS
+expository
+expos/RSDZG
+expostulate/DSXNG
+expostulation/M
+exposure/SM
+expounder/M
+expound/ZGSDR
+expressed/U
+expresser/M
+express/GVDRSY
+expressibility/I
+expressible/I
+expressibly/I
+expressionism/SM
+expressionistic
+expressionist/S
+expressionless/YP
+expression/MS
+expressive/IYP
+expressiveness/MS
+expressiveness's/I
+expressway/SM
+expropriate/XDSGN
+expropriation/M
+expropriator/SM
+expulsion/MS
+expunge/GDSR
+expunger/M
+expurgated/U
+expurgate/SDGNX
+expurgation/M
+exquisiteness/SM
+exquisite/YPS
+ex/S
+ext
+extant
+extemporaneousness/MS
+extemporaneous/YP
+extempore/S
+extemporization/SM
+extemporizer/M
+extemporize/ZGSRD
+extendability/M
+extendedly
+extendedness/M
+extended/U
+extender/M
+extendibility/M
+extendibles
+extend/SGZDR
+extensibility/M
+extensible/I
+extensional/Y
+extension/SM
+extensiveness/SM
+extensive/PY
+extensor/MS
+extent/SM
+extenuate/XSDGN
+extenuation/M
+exterior/MYS
+exterminate/XNGDS
+extermination/M
+exterminator/SM
+externalities
+externalization/SM
+externalize/GDS
+external/YS
+extern/M
+extinct/DGVS
+extinction/MS
+extinguishable/I
+extinguish/BZGDRS
+extinguisher/M
+extirpate/XSDVNG
+extirpation/M
+extolled
+extoller/M
+extolling
+extol/S
+extort/DRSGV
+extorter/M
+extortionate/Y
+extortioner/M
+extortionist/SM
+extortion/ZSRM
+extracellular/Y
+extract/GVSBD
+extraction/SM
+extractive/Y
+extractor/SM
+extracurricular/S
+extradite/XNGSDB
+extradition/M
+extragalactic
+extralegal/Y
+extramarital
+extramural
+extraneousness/M
+extraneous/YP
+extraordinarily
+extraordinariness/M
+extraordinary/PS
+extrapolate/XVGNSD
+extrapolation/M
+extra/S
+extrasensory
+extraterrestrial/S
+extraterritorial
+extraterritoriality/MS
+extravagance/MS
+extravagant/Y
+extravaganza/SM
+extravehicular
+extravert's
+extrema
+extremal
+extreme/DSRYTP
+extremeness/MS
+extremism/SM
+extremist/MS
+extremity/SM
+extricable/I
+extricate/XSDNG
+extrication/M
+extrinsic
+extrinsically
+extroversion/SM
+extrovert/GMDS
+extrude/GDSR
+extruder/M
+extrusion/MS
+extrusive
+exuberance/MS
+exuberant/Y
+exudate/XNM
+exudation/M
+exude/GSD
+exultant/Y
+exultation/SM
+exult/DGS
+exulting/Y
+exurban
+exurbanite/SM
+exurbia/MS
+exurb/MS
+Exxon/M
+Eyck/M
+Eyde/M
+Eydie/M
+eyeball/GSMD
+eyebrow/MS
+eyed/P
+eyedropper/MS
+eyeful/MS
+eye/GDRSMZ
+eyeglass/MS
+eyelash/MS
+eyeless
+eyelet/GSMD
+eyelid/SM
+eyeliner/MS
+eyeopener/MS
+eyeopening
+eyepiece/SM
+eyer/M
+eyeshadow
+eyesight/MS
+eyesore/SM
+eyestrain/MS
+eyeteeth
+eyetooth/M
+eyewash/MS
+eyewitness/SM
+Eyre/M
+eyrie's
+Eysenck/M
+Ezechiel/M
+Ezekiel/M
+Ezequiel/M
+Eziechiele/M
+Ezmeralda/M
+Ezra/M
+Ezri/M
+F
+FAA
+Fabe/MR
+Fabergé/M
+Faber/M
+Fabiano/M
+Fabian/S
+Fabien/M
+Fabio/M
+fable/GMSRD
+fabler/M
+fabricate/SDXNG
+fabrication/M
+fabricator/MS
+fabric/MS
+fabulists
+fabulousness/M
+fabulous/YP
+facade/GMSD
+face/AGCSD
+facecloth
+facecloths
+faceless/P
+faceplate/M
+facer/CM
+face's
+facetiousness/MS
+facetious/YP
+facet/SGMD
+facial/YS
+facileness/M
+facile/YP
+facilitate/VNGXSD
+facilitation/M
+facilitator/SM
+facilitatory
+facility/MS
+facing/MS
+facsimileing
+facsimile/MSD
+factional
+factionalism/SM
+faction/SM
+factiousness/M
+factious/PY
+factitious
+fact/MS
+facto
+factoid/S
+factorial/MS
+factoring/A
+factoring's
+factorisable
+factorization/SM
+factorize/GSD
+factor/SDMJG
+factory/MS
+factotum/MS
+factuality/M
+factualness/M
+factual/PY
+faculty/MS
+faddish
+faddist/SM
+fadedly
+faded/U
+fadeout
+fader/M
+fade/S
+fading's
+fading/U
+fad/ZGSMDR
+Fae/M
+faerie/MS
+Faeroe/M
+faery's
+Fafnir/M
+fagged
+fagging
+faggoting's
+Fagin/M
+fag/MS
+fagoting/M
+fagot/MDSJG
+Fahd/M
+Fahrenheit/S
+faïence/S
+failing's
+failing/UY
+fail/JSGD
+faille/MS
+failsafe
+failure/SM
+Faina/M
+fain/GTSRD
+fainter/M
+fainthearted
+faintness/MS
+faint/YRDSGPT
+Fairbanks
+Fairchild/M
+faired
+Fairfax/M
+Fairfield/M
+fairgoer/S
+fairground/MS
+fairing/MS
+fairish
+Fairleigh/M
+fairless
+Fairlie/M
+Fair/M
+Fairmont/M
+fairness's
+fairness/US
+Fairport/M
+fairs
+fair/TURYP
+Fairview/M
+fairway/MS
+fairyland/MS
+fairy/MS
+fairytale
+Faisalabad
+Faisal/M
+faithed
+faithfulness/MSU
+faithfuls
+faithful/UYP
+faithing
+faithlessness/SM
+faithless/YP
+Faith/M
+faiths
+faith's
+faith/U
+fajitas
+faker/M
+fake/ZGDRS
+fakir/SM
+falafel
+falconer/M
+falconry/MS
+falcon/ZSRM
+Falito/M
+Falkland/MS
+Falk/M
+Falkner/M
+fallaciousness/M
+fallacious/PY
+fallacy/MS
+faller/M
+fallibility/MSI
+fallible/I
+fallibleness/MS
+fallibly/I
+falloff/S
+Fallon/M
+fallopian
+Fallopian/M
+fallout/MS
+fallowness/M
+fallow/PSGD
+fall/SGZMRN
+falsehood/SM
+falseness/SM
+false/PTYR
+falsetto/SM
+falsie/MS
+falsifiability/M
+falsifiable/U
+falsification/M
+falsifier/M
+falsify/ZRSDNXG
+falsity/MS
+Falstaff/M
+falterer/M
+faltering/UY
+falter/RDSGJ
+Falwell/M
+fa/M
+famed/C
+fame/DSMG
+fames/C
+familial
+familiarity/MUS
+familiarization/MS
+familiarized/U
+familiarizer/M
+familiarize/ZGRSD
+familiarizing/Y
+familiarly/U
+familiarness/M
+familiar/YPS
+family/MS
+famine/SM
+faming/C
+famish/GSD
+famously/I
+famousness/M
+famous/PY
+fanaticalness/M
+fanatical/YP
+fanaticism/MS
+fanatic/SM
+Fanchette/M
+Fanchon/M
+fancied
+Fancie/M
+fancier/SM
+fanciest
+fancifulness/MS
+fanciful/YP
+fancily
+fanciness/SM
+fancying
+fancy/IS
+Fancy/M
+fancywork/SM
+fandango/SM
+Fanechka/M
+fanfare/SM
+fanfold/M
+fang/DMS
+fangled
+Fania/M
+fanlight/SM
+Fan/M
+fanned
+Fannie/M
+Fanni/M
+fanning
+fanny/SM
+Fanny/SM
+fanout
+fan/SM
+fantail/SM
+fantasia/SM
+fantasist/M
+fantasize/SRDG
+fantastical/Y
+fantastic/S
+fantasy/GMSD
+Fanya/M
+fanzine/S
+FAQ/SM
+Faraday/M
+farad/SM
+Farah/M
+Fara/M
+Farand/M
+faraway
+Farber/M
+farce/SDGM
+farcical/Y
+fare/MS
+farer/M
+farewell/DGMS
+farfetchedness/M
+far/GDR
+Fargo/M
+Farica/M
+farinaceous
+farina/MS
+Farkas/M
+Farlay/M
+Farlee/M
+Farleigh/M
+Farley/M
+Farlie/M
+Farly/M
+farmer/M
+Farmer/M
+farmhand/S
+farmhouse/SM
+farming/M
+Farmington/M
+farmland/SM
+farm/MRDGZSJ
+farmstead/SM
+farmworker/S
+Far/MY
+farmyard/MS
+faro/MS
+farragoes
+farrago/M
+Farragut/M
+Farrah/M
+Farrakhan/M
+Farra/M
+Farrand/M
+Farrell/M
+Farrel/M
+farrier/SM
+Farris/M
+Farr/M
+farrow/DMGS
+farseeing
+farsightedness/SM
+farsighted/YP
+farther
+farthermost
+farthest
+farthing/SM
+fart/MDGS!
+fas
+fascia/SM
+fascicle/DSM
+fasciculate/DNX
+fasciculation/M
+fascinate/SDNGX
+fascinating/Y
+fascination/M
+fascism/MS
+Fascism's
+fascistic
+Fascist's
+fascist/SM
+fashionableness/M
+fashionable/PS
+fashionably/U
+fashion/ADSG
+fashioner/SM
+fashion's
+Fassbinder/M
+fastback/MS
+fastball/S
+fasten/AGUDS
+fastener/MS
+fastening/SM
+fast/GTXSPRND
+fastidiousness/MS
+fastidious/PY
+fastness/MS
+fatalism/MS
+fatalistic
+fatalistically
+fatalist/MS
+fatality/MS
+fatal/SY
+fatback/SM
+fatefulness/MS
+fateful/YP
+fate/MS
+Fates
+fatheaded/P
+fathead/SMD
+father/DYMGS
+fathered/U
+fatherhood/MS
+fatherland/SM
+fatherless
+fatherliness/M
+fatherly/P
+Father/SM
+fathomable/U
+fathomless
+fathom/MDSBG
+fatigued/U
+fatigue/MGSD
+fatiguing/Y
+Fatima/M
+fatness/SM
+fat/PSGMDY
+fatso/M
+fatted
+fattener/M
+fatten/JZGSRD
+fatter
+fattest/M
+fattiness/SM
+fatting
+fatty/RSPT
+fatuity/MS
+fatuousness/SM
+fatuous/YP
+fatwa/SM
+faucet/SM
+Faulknerian
+Faulkner/M
+fault/CGSMD
+faultfinder/MS
+faultfinding/MS
+faultily
+faultiness/MS
+faultlessness/SM
+faultless/PY
+faulty/RTP
+fauna/MS
+Faunie/M
+Faun/M
+faun/MS
+Fauntleroy/M
+Faustian
+Faustina/M
+Faustine/M
+Faustino/M
+Faust/M
+Faustus/M
+fauvism/S
+favorableness/MU
+favorable/UMPS
+favorably/U
+favoredness/M
+favored's/U
+favored/YPSM
+favorer/EM
+favor/ESMRDGZ
+favoring/MYS
+favorings/U
+favorite/SMU
+favoritism/MS
+favors/A
+Fawkes/M
+Fawne/M
+fawner/M
+fawn/GZRDMS
+Fawnia/M
+fawning/Y
+Fawn/M
+fax/GMDS
+Fax/M
+Faydra/M
+Faye/M
+Fayette/M
+Fayetteville/M
+Fayina/M
+Fay/M
+fay/MDRGS
+Fayre/M
+Faythe/M
+Fayth/M
+faze/DSG
+FBI
+FCC
+FD
+FDA
+FDIC
+FDR/M
+fealty/MS
+fearfuller
+fearfullest
+fearfulness/MS
+fearful/YP
+fearlessness/MS
+fearless/PY
+fear/RDMSG
+fearsomeness/M
+fearsome/PY
+feasibility/SM
+feasibleness/M
+feasible/UI
+feasibly/U
+feaster/M
+feast/GSMRD
+feater/C
+featherbed
+featherbedding/SM
+featherbrain/MD
+feathered/U
+feathering/M
+featherless
+featherlight
+Featherman/M
+feathertop
+featherweight/SM
+feathery/TR
+feather/ZMDRGS
+feat/MYRGTS
+feats/C
+featureless
+feature/MGSD
+Feb/M
+febrile
+February/MS
+fecal
+feces
+fecklessness/M
+feckless/PY
+fecundability
+fecundate/XSDGN
+fecundation/M
+fecund/I
+fecundity/SM
+federalism/SM
+Federalist
+federalist/MS
+federalization/MS
+federalize/GSD
+Federal/S
+federal/YS
+federated/U
+federate/FSDXVNG
+federation/FM
+federative/Y
+Federica/M
+Federico/M
+FedEx/M
+Fedora/M
+fedora/SM
+feds
+Fed/SM
+fed/U
+feebleness/SM
+feeble/TPR
+feebly
+feedback/SM
+feedbag/MS
+feeder/M
+feed/GRZJS
+feeding/M
+feedlot/SM
+feedstock
+feedstuffs
+feeing
+feeler/M
+feel/GZJRS
+feelingly/U
+feeling/MYP
+feelingness/M
+Fee/M
+fee/MDS
+feet/M
+feigned/U
+feigner/M
+feign/RDGS
+feint/MDSG
+feisty/RT
+Felder/M
+Feldman/M
+feldspar/MS
+Felecia/M
+Felicdad/M
+Felice/M
+Felicia/M
+Felicio/M
+felicitate/XGNSD
+felicitation/M
+felicitous/IY
+felicitousness/M
+felicity/IMS
+Felicity/M
+Felicle/M
+Felic/M
+Felike/M
+Feliks/M
+feline/SY
+Felipa/M
+Felipe/M
+Felisha/M
+Felita/M
+Felix/M
+Feliza/M
+Felizio/M
+fella/S
+fellatio/SM
+felled/A
+feller/M
+felling/A
+Fellini/M
+fellness/M
+fellowman
+fellowmen
+fellow/SGDYM
+fellowshipped
+fellowshipping
+fellowship/SM
+fell/PSGZTRD
+feloniousness/M
+felonious/PY
+felon/MS
+felony/MS
+felt/GSD
+felting/M
+Fe/M
+female/MPS
+femaleness/SM
+feminineness/M
+feminine/PYS
+femininity/MS
+feminism/MS
+feminist/MS
+femme/MS
+femoral
+fem/S
+femur/MS
+fenced/U
+fencepost/M
+fencer/M
+fence/SRDJGMZ
+fencing/M
+fender/CM
+fend/RDSCZG
+Fenelia/M
+fenestration/CSM
+Fenian/M
+fenland/M
+fen/MS
+fennel/SM
+Fenwick/M
+Feodora/M
+Feodor/M
+feral
+Ferber/M
+Ferdie/M
+Ferdinanda/M
+Ferdinande/M
+Ferdinand/M
+Ferdinando/M
+Ferd/M
+Ferdy/M
+fer/FLC
+Fergus/M
+Ferguson/M
+Ferlinghetti/M
+Fermat/M
+fermentation/MS
+fermented
+fermenter
+ferment/FSCM
+fermenting
+Fermi/M
+fermion/MS
+fermium/MS
+Fernanda/M
+Fernande/M
+Fernandez/M
+Fernandina/M
+Fernando/M
+Ferne/M
+fernery/M
+Fern/M
+fern/MS
+ferny/TR
+ferociousness/MS
+ferocious/YP
+ferocity/MS
+Ferrari/M
+Ferraro/M
+Ferreira/M
+Ferrell/M
+Ferrel/M
+Ferrer/M
+ferreter/M
+ferret/SMRDG
+ferric
+ferris
+Ferris
+ferrite/M
+ferro
+ferroelectric
+ferromagnetic
+ferromagnet/M
+ferrous
+ferrule/MGSD
+ferryboat/MS
+ferryman/M
+ferrymen
+ferry/SDMG
+fertileness/M
+fertile/YP
+fertility/IMS
+fertilization/ASM
+fertilized/U
+fertilizer/M
+fertilizes/A
+fertilize/SRDZG
+ferule/SDGM
+fervency/MS
+fervent/Y
+fervidness/M
+fervid/YP
+fervor/MS
+fess/KGFSD
+Fess/M
+fess's
+festal/S
+fester/GD
+festival/SM
+festiveness/SM
+festive/PY
+festivity/SM
+festoon/SMDG
+fest/RVZ
+fetal
+feta/MS
+fetcher/M
+fetching/Y
+fetch/RSDGZ
+feted
+fête/MS
+fetich's
+fetidness/SM
+fetid/YP
+feting
+fetishism/SM
+fetishistic
+fetishist/SM
+fetish/MS
+fetlock/MS
+fetter's
+fetter/UGSD
+fettle/GSD
+fettling/M
+fettuccine/S
+fetus/SM
+feudalism/MS
+feudalistic
+feudal/Y
+feudatory/M
+feud/MDSG
+feverishness/SM
+feverish/PY
+fever/SDMG
+fewness/MS
+few/PTRS
+Fey/M
+Feynman/M
+fey/RT
+fez/M
+Fez/M
+fezzes
+ff
+FHA
+fiancée/S
+fiancé/MS
+Fianna/M
+Fiann/M
+fiascoes
+fiasco/M
+Fiat/M
+fiat/MS
+fibbed
+fibber/MS
+fibbing
+fiberboard/MS
+fiber/DM
+fiberfill/S
+Fiberglas/M
+fiberglass/DSMG
+Fibonacci/M
+fibrillate/XGNDS
+fibrillation/M
+fibril/MS
+fibrin/MS
+fibroblast/MS
+fibroid/S
+fibroses
+fibrosis/M
+fibrousness/M
+fibrous/YP
+fib/SZMR
+fibulae
+fibula/M
+fibular
+FICA
+fices
+fiche/SM
+Fichte/M
+fichu/SM
+fickleness/MS
+fickle/RTP
+ficos
+fictionalization/MS
+fictionalize/DSG
+fictional/Y
+fiction/SM
+fictitiousness/M
+fictitious/PY
+fictive/Y
+ficus
+fiddle/GMZJRSD
+fiddler/M
+fiddlestick/SM
+fiddly
+fide/F
+Fidela/M
+Fidelia/M
+Fidelio/M
+fidelity/IMS
+Fidelity/M
+Fidel/M
+fidget/DSG
+fidgety
+Fidole/M
+Fido/M
+fiducial/Y
+fiduciary/MS
+fiefdom/S
+fief/MS
+fielded
+fielder/IM
+fielding
+Fielding/M
+Field/MGS
+fieldstone/M
+fieldworker/M
+fieldwork/ZMRS
+field/ZISMR
+fiendishness/M
+fiendish/YP
+fiend/MS
+fierceness/SM
+fierce/RPTY
+fierily
+fieriness/MS
+fiery/PTR
+fie/S
+fies/C
+fiesta/MS
+fife/DRSMZG
+fifer/M
+Fifi/M
+Fifine/M
+FIFO
+fifteen/HRMS
+fifteenths
+fifths
+fifth/Y
+fiftieths
+fifty/HSM
+Figaro/M
+figged
+figging
+fightback
+fighter/MIS
+fighting/IS
+fight/ZSJRG
+figment/MS
+fig/MLS
+Figueroa/M
+figural
+figuration/FSM
+figurativeness/M
+figurative/YP
+figure/GFESD
+figurehead/SM
+figurer/SM
+figure's
+figurine/SM
+figuring/S
+Fijian/SM
+Fiji/M
+filamentary
+filament/MS
+filamentous
+Filberte/M
+Filbert/M
+filbert/MS
+Filberto/M
+filch/SDG
+filed/AC
+file/KDRSGMZ
+filename/SM
+filer/KMCS
+files/AC
+filet's
+filial/UY
+Filia/M
+filibusterer/M
+filibuster/MDRSZG
+Filide/M
+filigreeing
+filigree/MSD
+filing/AC
+filings
+Filipino/SM
+Filip/M
+Filippa/M
+Filippo/M
+fill/BAJGSD
+filled/U
+filler/MS
+filleting/M
+fillet/MDSG
+filling/M
+fillip/MDGS
+Fillmore/M
+filly/SM
+filmdom/M
+Filmer/M
+filminess/SM
+filming/M
+filmmaker/S
+Filmore/M
+film/SGMD
+filmstrip/SM
+filmy/RTP
+Filofax/S
+filtered/U
+filterer/M
+filter/RDMSZGB
+filthily
+filthiness/SM
+filth/M
+filths
+filthy/TRSDGP
+filtrated/I
+filtrate/SDXMNG
+filtrates/I
+filtrating/I
+filtration/IMS
+finagler/M
+finagle/RSDZG
+finale/MS
+finalist/MS
+finality/MS
+finalization/SM
+finalize/GSD
+final/SY
+Fina/M
+financed/A
+finance/MGSDJ
+finances/A
+financial/Y
+financier/DMGS
+financing/A
+Finch/M
+finch/MS
+findable/U
+find/BRJSGZ
+finder/M
+finding/M
+Findlay/M
+Findley/M
+fine/FGSCRDA
+finely
+fineness/MS
+finery/MAS
+fine's
+finespun
+finesse/SDMG
+fingerboard/SM
+fingerer/M
+fingering/M
+fingerless
+fingerling/M
+fingernail/MS
+fingerprint/SGDM
+finger/SGRDMJ
+fingertip/MS
+finial/SM
+finical
+finickiness/S
+finicky/RPT
+fining/M
+finished/UA
+finisher/M
+finishes/A
+finish/JZGRSD
+finis/SM
+finite/ISPY
+finitely/C
+finiteness/MIC
+fink/GDMS
+Finland/M
+Finlay/M
+Finley/M
+Fin/M
+Finnbogadottir/M
+finned
+Finnegan/M
+finner
+finning
+Finnish
+Finn/MS
+finny/RT
+fin/TGMDRS
+Fiona/M
+Fionna/M
+Fionnula/M
+fiord's
+Fiorello/M
+Fiorenze/M
+Fiori/M
+f/IRAC
+firearm/SM
+fireball/SM
+fireboat/M
+firebomb/MDSG
+firebox/MS
+firebrand/MS
+firebreak/SM
+firebrick/SM
+firebug/SM
+firecracker/SM
+firedamp/SM
+fired/U
+firefight/JRGZS
+firefly/MS
+Firefox/M
+fireguard/M
+firehouse/MS
+firelight/GZSM
+fireman/M
+firemen
+fire/MS
+fireplace/MS
+fireplug/MS
+firepower/SM
+fireproof/SGD
+firer/M
+firesafe
+fireside/SM
+Firestone/M
+firestorm/SM
+firetrap/SM
+firetruck/S
+firewall/S
+firewater/SM
+firewood/MS
+firework/MS
+firing/M
+firkin/M
+firmament/MS
+firmer
+firmest
+firm/ISFDG
+firmly/I
+firmness/MS
+firm's
+firmware/MS
+firring
+firstborn/S
+firsthand
+first/SY
+firth/M
+firths
+fir/ZGJMDRHS
+fiscal/YS
+Fischbein/M
+Fischer/M
+fishbowl/MS
+fishcake/S
+fisher/M
+Fisher/M
+fisherman/M
+fishermen/M
+fishery/MS
+fishhook/MS
+fishily
+fishiness/MS
+fishing/M
+fish/JGZMSRD
+Fishkill/M
+fishmeal
+fishmonger/MS
+fishnet/SM
+fishpond/SM
+fishtail/DMGS
+fishtanks
+fishwife/M
+fishwives
+fishy/TPR
+Fiske/M
+Fisk/M
+fissile
+fissionable/S
+fission/BSDMG
+fissure/MGSD
+fistfight/SM
+fistful/MS
+fisticuff/SM
+fist/MDGS
+fistula/SM
+fistulous
+Fitchburg/M
+Fitch/M
+fitfulness/SM
+fitful/PY
+fitments
+fitness/USM
+fits/AK
+fit's/K
+fitted/UA
+fitter/SM
+fittest
+fitting/AU
+fittingly
+fittingness/M
+fittings
+fit/UYPS
+Fitzgerald/M
+Fitz/M
+Fitzpatrick/M
+Fitzroy/M
+fivefold
+five/MRS
+fiver/M
+fixable
+fixate/VNGXSD
+fixatifs
+fixation/M
+fixative/S
+fixedness/M
+fixed/YP
+fixer/SM
+fixes/I
+fixing/SM
+fixity/MS
+fixture/SM
+fix/USDG
+Fizeau/M
+fizzer/M
+fizzle/GSD
+fizz/SRDG
+fizzy/RT
+fjord/SM
+FL
+flabbergast/GSD
+flabbergasting/Y
+flabbily
+flabbiness/SM
+flabby/TPR
+flab/MS
+flaccidity/MS
+flaccid/Y
+flack/SGDM
+flagella/M
+flagellate/DSNGX
+flagellation/M
+flagellum/M
+flagged
+flaggingly/U
+flagging/SMY
+flagman/M
+flagmen
+flag/MS
+flagon/SM
+flagpole/SM
+flagrance/MS
+flagrancy/SM
+flagrant/Y
+flagship/MS
+flagstaff/MS
+flagstone/SM
+flail/SGMD
+flair/SM
+flaker/M
+flake/SM
+flakiness/MS
+flak/RDMGS
+flaky/PRT
+Fla/M
+flambé/D
+flambeing
+flambes
+flamboyance/MS
+flamboyancy/MS
+flamboyant/YS
+flamenco/SM
+flamen/M
+flameproof/DGS
+flamer/IM
+flame's
+flame/SIGDR
+flamethrower/SM
+flamingo/SM
+flaming/Y
+flammability/ISM
+flammable/SI
+flam/MRNDJGZ
+Flanagan/M
+Flanders/M
+flange/GMSD
+flanker/M
+flank/SGZRDM
+flan/MS
+flannel/DMGS
+flannelet/MS
+flannelette's
+flapjack/SM
+flap/MS
+flapped
+flapper/SM
+flapping
+flaps/M
+flare/SDG
+flareup/S
+flaring/Y
+flashback/SM
+flashbulb/SM
+flashcard/S
+flashcube/MS
+flasher/M
+flashgun/S
+flashily
+flashiness/SM
+flashing/M
+flash/JMRSDGZ
+flashlight/MS
+flashy/TPR
+flask/SM
+flatbed/S
+flatboat/MS
+flatcar/MS
+flatfeet
+flatfish/SM
+flatfoot/SGDM
+flathead/M
+flatiron/SM
+flatland/RS
+flatmate/M
+flat/MYPS
+flatness/MS
+flatted
+flattener/M
+flatten/SDRG
+flatter/DRSZG
+flatterer/M
+flattering/YU
+flattery/SM
+flattest/M
+flatting
+flattish
+Flatt/M
+flattop/MS
+flatulence/SM
+flatulent/Y
+flatus/SM
+flatware/MS
+flatworm/SM
+Flaubert/M
+flaunting/Y
+flaunt/SDG
+flautist/SM
+flavored/U
+flavorer/M
+flavorful
+flavoring/M
+flavorless
+flavor/SJDRMZG
+flavorsome
+flaw/GDMS
+flawlessness/MS
+flawless/PY
+flax/MSN
+flaxseed/M
+flayer/M
+flay/RDGZS
+fleabag/MS
+fleabites
+flea/SM
+fleawort/M
+fleck/GRDMS
+Fledermaus/M
+fledged/U
+fledge/GSD
+fledgling/SM
+fleecer/M
+fleece/RSDGMZ
+fleeciness/SM
+fleecy/RTP
+fleeing
+flee/RS
+fleetingly/M
+fleetingness/SM
+fleeting/YP
+fleet/MYRDGTPS
+fleetness/MS
+Fleischer/M
+Fleischman/M
+Fleisher/M
+Fleming/M
+Flemished/M
+Flemish/GDSM
+Flemishing/M
+Flem/JGM
+Flemming/M
+flesher/M
+fleshiness/M
+flesh/JMYRSDG
+fleshless
+fleshly/TR
+fleshpot/SM
+fleshy/TPR
+fletch/DRSGJ
+fletcher/M
+Fletcher/M
+fletching/M
+Fletch/MR
+Fleurette/M
+Fleur/M
+flew/S
+flews/M
+flexed/I
+flexibility/MSI
+flexible/I
+flexibly/I
+flexitime's
+flex/MSDAG
+flextime/S
+flexural
+flexure/M
+fl/GJD
+flibbertigibbet/MS
+flicker/GD
+flickering/Y
+flickery
+flick/GZSRD
+flier/M
+flight/GMDS
+flightiness/SM
+flightless
+flightpath
+flighty/RTP
+flimflammed
+flimflamming
+flimflam/MS
+flimsily
+flimsiness/MS
+flimsy/PTRS
+flincher/M
+flinch/GDRS
+flinching/U
+flinger/M
+fling/RMG
+Flin/M
+Flinn/M
+flintiness/M
+flintless
+flintlock/MS
+Flint/M
+flint/MDSG
+Flintstones
+flinty/TRP
+flipflop
+flippable
+flippancy/MS
+flippant/Y
+flipped
+flipper/SM
+flippest
+flipping
+flip/S
+flirtation/SM
+flirtatiousness/MS
+flirtatious/PY
+flirt/GRDS
+flit/S
+flitted
+flitting
+floater/M
+float/SRDGJZ
+floaty
+flocculate/GNDS
+flocculation/M
+flock/SJDMG
+floe/MS
+flogged
+flogger/SM
+flogging/SM
+flog/S
+Flo/M
+floodgate/MS
+floodlight/DGMS
+floodlit
+floodplain/S
+flood/SMRDG
+floodwater/SM
+floorboard/MS
+floorer/M
+flooring/M
+floor/SJRDMG
+floorspace
+floorwalker/SM
+floozy/SM
+flophouse/SM
+flop/MS
+flopped
+flopper/M
+floppily
+floppiness/SM
+flopping
+floppy/TMRSP
+floral/SY
+Flora/M
+Florance/M
+flora/SM
+Florella/M
+Florence/M
+Florencia/M
+Florentia/M
+Florentine/S
+Florenza/M
+florescence/MIS
+florescent/I
+Flore/SM
+floret/MS
+Florette/M
+Floria/M
+Florian/M
+Florida/M
+Floridan/S
+Floridian/S
+floridness/SM
+florid/YP
+Florie/M
+Florina/M
+Florinda/M
+Florine/M
+florin/MS
+Flori/SM
+florist/MS
+Flor/M
+Florrie/M
+Florri/M
+Florry/M
+Flory/M
+floss/GSDM
+Flossie/M
+Flossi/M
+Flossy/M
+flossy/RST
+flotation/SM
+flotilla/SM
+flotsam/SM
+flounce/GDS
+flouncing/M
+flouncy/RT
+flounder/SDG
+flourisher/M
+flourish/GSRD
+flourishing/Y
+flour/SGDM
+floury/TR
+flouter/M
+flout/GZSRD
+flowchart/SG
+flowed
+flowerbed/SM
+flower/CSGD
+flowerer/M
+floweriness/SM
+flowerless
+flowerpot/MS
+flower's
+Flowers
+flowery/TRP
+flowing/Y
+flow/ISG
+flown
+flowstone
+Floyd/M
+Flss/M
+flt
+flubbed
+flubbing
+flub/S
+fluctuate/XSDNG
+fluctuation/M
+fluency/MS
+fluently
+fluent/SF
+flue/SM
+fluffiness/SM
+fluff/SGDM
+fluffy/PRT
+fluidity/SM
+fluidized
+fluid/MYSP
+fluidness/M
+fluke/SDGM
+fluky/RT
+flume/SDGM
+flummox/DSG
+flu/MS
+flung
+flunkey's
+flunk/SRDG
+flunky/MS
+fluoresce/GSRD
+fluorescence/MS
+fluorescent/S
+fluoridate/XDSGN
+fluoridation/M
+fluoride/SM
+fluorimetric
+fluorinated
+fluorine/SM
+fluorite/MS
+fluorocarbon/MS
+fluoroscope/MGDS
+fluoroscopic
+flurry/GMDS
+flushness/M
+flush/TRSDPBG
+fluster/DSG
+fluter/M
+flute/SRDGMJ
+fluting/M
+flutist/MS
+flutter/DRSG
+flutterer/M
+fluttery
+fluxed/A
+fluxes/A
+flux/IMS
+fluxing
+flyaway
+flyblown
+flyby/M
+flybys
+flycatcher/MS
+flyer's
+fly/JGBDRSTZ
+flyleaf/M
+flyleaves
+Flynn/M
+flyover/MS
+flypaper/MS
+flysheet/S
+flyspeck/MDGS
+flyswatter/S
+flyway/MS
+flyweight/MS
+flywheel/MS
+FM
+Fm/M
+FNMA/M
+foal/MDSG
+foaminess/MS
+foam/MRDSG
+foamy/RPT
+fobbed
+fobbing
+fob/SM
+focal/F
+focally
+Foch/M
+foci's
+focused/AU
+focuser/M
+focuses/A
+focus/SRDMBG
+fodder/GDMS
+foe/SM
+foetid
+FOFL
+fogbound
+fogged/C
+foggily
+fogginess/MS
+fogging/C
+foggy/RPT
+foghorn/SM
+fogs/C
+fog/SM
+fogyish
+fogy/SM
+foible/MS
+foil/GSD
+foist/GDS
+Fokker/M
+foldaway/S
+folded/AU
+folder/M
+foldout/MS
+fold/RDJSGZ
+folds/UA
+Foley/M
+foliage/MSD
+foliate/CSDXGN
+foliation/CM
+folio/SDMG
+folklike
+folklore/MS
+folkloric
+folklorist/SM
+folk/MS
+folksiness/MS
+folksinger/S
+folksinging/S
+folksong/S
+folksy/TPR
+folktale/S
+folkway/S
+foll
+follicle/SM
+follicular
+follower/M
+follow/JSZBGRD
+followup's
+folly/SM
+Folsom
+fol/Y
+Fomalhaut/M
+fomentation/SM
+fomenter/M
+foment/RDSG
+Fonda/M
+fondant/SM
+fondle/GSRD
+fondler/M
+fondness/MS
+fond/PMYRDGTS
+fondue/MS
+Fons
+Fonsie/M
+Fontainebleau/M
+Fontaine/M
+Fontana/M
+fontanelle's
+fontanel/MS
+font/MS
+Fonzie/M
+Fonz/M
+foodie/S
+food/MS
+foodstuff/MS
+foolery/MS
+foolhardily
+foolhardiness/SM
+foolhardy/PTR
+foolishness/SM
+foolish/PRYT
+fool/MDGS
+foolproof
+foolscap/MS
+footage/SM
+football/SRDMGZ
+footbridge/SM
+Foote/M
+footer/M
+footfall/SM
+foothill/SM
+foothold/MS
+footing/M
+footless
+footlights
+footling
+footlocker/SM
+footloose
+footman/M
+footmarks
+footmen
+footnote/MSDG
+footpad/SM
+footpath/M
+footpaths
+footplate/M
+footprint/MS
+footrace/S
+footrest/MS
+footsie/SM
+foot/SMRDGZJ
+footsore
+footstep/SM
+footstool/SM
+footwear/M
+footwork/SM
+fop/MS
+fopped
+foppery/MS
+fopping
+foppishness/SM
+foppish/YP
+forage/GSRDMZ
+forager/M
+forayer/M
+foray/SGMRD
+forbade
+forbearance/SM
+forbearer/M
+forbear/MRSG
+Forbes/M
+forbidden
+forbiddingness/M
+forbidding/YPS
+forbid/S
+forbore
+forborne
+forced/Y
+forcefield/MS
+forcefulness/MS
+forceful/PY
+forceps/M
+forcer/M
+force/SRDGM
+forcibleness/M
+forcible/P
+forcibly
+fordable/U
+Fordham/M
+Ford/M
+ford/SMDBG
+forearm/GSDM
+forebear/MS
+forebode/GJDS
+forebodingness/M
+foreboding/PYM
+forecaster/M
+forecastle/MS
+forecast/SZGR
+foreclose/GSD
+foreclosure/MS
+forecourt/SM
+foredoom/SDG
+forefather/SM
+forefeet
+forefinger/MS
+forefoot/M
+forefront/SM
+foregoer/M
+foregoing/S
+foregone
+foregos
+foreground/MGDS
+forehand/S
+forehead/MS
+foreigner/M
+foreignness/SM
+foreign/PRYZS
+foreknew
+foreknow/GS
+foreknowledge/MS
+foreknown
+foreleg/MS
+forelimb/MS
+forelock/MDSG
+foreman/M
+Foreman/M
+foremast/SM
+foremen
+foremost
+forename/DSM
+forenoon/SM
+forensically
+forensic/S
+forensics/M
+foreordain/DSG
+forepart/MS
+forepaws
+forepeople
+foreperson/S
+foreplay/MS
+forequarter/SM
+forerunner/MS
+fore/S
+foresail/SM
+foresaw
+foreseeable/U
+foreseeing
+foreseen/U
+foreseer/M
+foresee/ZSRB
+foreshadow/SGD
+foreshore/M
+foreshorten/DSG
+foresightedness/SM
+foresighted/PY
+foresight/SMD
+foreskin/SM
+forestaller/M
+forestall/LGSRD
+forestallment/M
+forestation/MCS
+forestations/A
+forest/CSAGD
+Forester/M
+forester/SM
+forestland/S
+Forest/MR
+forestry/MS
+forest's
+foretaste/MGSD
+foreteller/M
+foretell/RGS
+forethought/MS
+foretold
+forevermore
+forever/PS
+forewarner/M
+forewarn/GSJRD
+forewent
+forewoman/M
+forewomen
+foreword/SM
+forfeiter/M
+forfeiture/MS
+forfeit/ZGDRMS
+forfend/GSD
+forgather/GSD
+forgave
+forged/A
+forge/JVGMZSRD
+forger/M
+forgery/MS
+forges/A
+forgetfulness/SM
+forgetful/PY
+forget/SV
+forgettable/U
+forgettably/U
+forgetting
+forging/M
+forgivable/U
+forgivably/U
+forgiven
+forgiveness/SM
+forgiver/M
+forgive/SRPBZG
+forgivingly
+forgivingness/M
+forgiving/UP
+forgoer/M
+forgoes
+forgone
+forgo/RSGZ
+forgot
+forgotten/U
+for/HT
+forkful/S
+fork/GSRDM
+forklift/DMSG
+forlornness/M
+forlorn/PTRY
+formability/AM
+formaldehyde/SM
+formalin/M
+formalism/SM
+formalistic
+formalist/SM
+formality/SMI
+formal/IY
+formalization/SM
+formalized/U
+formalizer/M
+formalizes/I
+formalize/ZGSRD
+formalness/M
+formals
+formant/MIS
+format/AVS
+formate/MXGNSD
+formation/AFSCIM
+formatively/I
+formativeness/IM
+formative/SYP
+format's
+formatted/UA
+formatter/A
+formatters
+formatter's
+formatting/A
+form/CGSAFDI
+formed/U
+former/FSAI
+formerly
+formfitting
+formic
+Formica/MS
+formidableness/M
+formidable/P
+formidably
+formlessness/MS
+formless/PY
+Formosa/M
+Formosan
+form's
+formulaic
+formula/SM
+formulate/AGNSDX
+formulated/U
+formulation/AM
+formulator/SM
+fornicate/GNXSD
+fornication/M
+fornicator/SM
+Forrester/M
+Forrest/RM
+forsaken
+forsake/SG
+forsook
+forsooth
+Forster/M
+forswear/SG
+forswore
+forsworn
+forsythia/MS
+Fortaleza/M
+forte/MS
+forthcome/JG
+forthcoming/U
+FORTH/M
+forthrightness/SM
+forthright/PYS
+forthwith
+fortieths
+fortification/MS
+fortified/U
+fortifier/SM
+fortify/ADSG
+fortiori
+fortissimo/S
+fortitude/SM
+fortnightly/S
+fortnight/MYS
+FORTRAN
+Fortran/M
+fortress/GMSD
+fort/SM
+fortuitousness/SM
+fortuitous/YP
+fortuity/MS
+fortunateness/M
+fortunate/YUS
+fortune/MGSD
+fortuneteller/SM
+fortunetelling/SM
+forty/SRMH
+forum/MS
+forwarder/M
+forwarding/M
+forwardness/MS
+forward/PTZSGDRY
+forwent
+fossiliferous
+fossilization/MS
+fossilized/U
+fossilize/GSD
+fossil/MS
+Foss/M
+fosterer/M
+Foster/M
+foster/SRDG
+Foucault/M
+fought
+foulard/SM
+foulmouth/D
+foulness/MS
+fouls/M
+foul/SYRDGTP
+foundational
+foundation/SM
+founded/UF
+founder/MDG
+founder's/F
+founding/F
+foundling/MS
+found/RDGZS
+foundry/MS
+founds/KF
+fountainhead/SM
+fountain/SMDG
+fount/MS
+fourfold
+Fourier/M
+fourpence/M
+fourpenny
+fourposter/SM
+fourscore/S
+four/SHM
+foursome/SM
+foursquare
+fourteener/M
+fourteen/SMRH
+fourteenths
+Fourth
+fourths
+Fourths
+fourth/Y
+fovea/M
+fowler/M
+Fowler/M
+fowling/M
+fowl/SGMRD
+foxfire/SM
+foxglove/SM
+Foxhall/M
+foxhole/SM
+foxhound/SM
+foxily
+foxiness/MS
+foxing/M
+fox/MDSG
+Fox/MS
+foxtail/M
+foxtrot/MS
+foxtrotted
+foxtrotting
+foxy/TRP
+foyer/SM
+FPO
+fps
+fr
+fracas/SM
+fractal/SM
+fractional/Y
+fractionate/DNG
+fractionation/M
+fractioned
+fractioning
+fraction/ISMA
+fractiousness/SM
+fractious/PY
+fracture/MGDS
+fragile/Y
+fragility/MS
+fragmentarily
+fragmentariness/M
+fragmentary/P
+fragmentation/MS
+fragment/SDMG
+Fragonard/M
+fragrance/SM
+fragrant/Y
+frailness/MS
+frail/STPYR
+frailty/MS
+framed/U
+framer/M
+frame/SRDJGMZ
+framework/SM
+framing/M
+Francaise/M
+France/MS
+Francene/M
+Francesca/M
+Francesco/M
+franchisee/S
+franchise/ESDG
+franchiser/SM
+franchise's
+Franchot/M
+Francie/M
+Francine/M
+Francis
+Francisca/M
+Franciscan/MS
+Francisco/M
+Franciska/M
+Franciskus/M
+francium/MS
+Francklin/M
+Francklyn/M
+Franck/M
+Francoise/M
+Francois/M
+Franco/M
+francophone/M
+franc/SM
+Francyne/M
+frangibility/SM
+frangible
+Frankel/M
+Frankenstein/MS
+franker/M
+Frankford/M
+Frankfort/M
+Frankfurter/M
+frankfurter/MS
+Frankfurt/RM
+Frankie/M
+frankincense/MS
+Frankish/M
+franklin/M
+Franklin/M
+Franklyn/M
+frankness/MS
+frank/SGTYRDP
+Frank/SM
+Franky/M
+Fran/MS
+Frannie/M
+Franni/M
+Franny/M
+Fransisco/M
+frantically
+franticness/M
+frantic/PY
+Frants/M
+Franzen/M
+Franz/NM
+frappé
+frappeed
+frappeing
+frappes
+Frasco/M
+Fraser/M
+Frasier/M
+Frasquito/M
+fraternal/Y
+fraternity/MSF
+fraternization/SM
+fraternize/GZRSD
+fraternizer/M
+fraternizing/U
+frat/MS
+fratricidal
+fratricide/MS
+fraud/CS
+fraud's
+fraudsters
+fraudulence/S
+fraudulent/YP
+fraught/SGD
+Fraulein/S
+Frau/MN
+fray/CSDG
+Frayda/M
+Frayne/M
+fray's
+Fraze/MR
+Frazer/M
+Frazier/M
+frazzle/GDS
+freakishness/SM
+freakish/YP
+freak/SGDM
+freaky/RT
+freckle/GMDS
+freckly/RT
+Freda/M
+Freddie/M
+Freddi/M
+Freddy/M
+Fredek/M
+Fredelia/M
+Frederica/M
+Frederich/M
+Fredericka/M
+Frederick/MS
+Frederic/M
+Frederico/M
+Fredericton/M
+Frederigo/M
+Frederik/M
+Frederique/M
+Fredholm/M
+Fredia/M
+Fredi/M
+Fred/M
+Fredra/M
+Fredrick/M
+Fredrickson/M
+Fredric/M
+Fredrika/M
+freebase/GDS
+freebie/MS
+freebooter/M
+freeboot/ZR
+freeborn
+freedman/M
+Freedman/M
+freedmen
+freedom/MS
+freehand/D
+freehanded/Y
+freeholder/M
+freehold/ZSRM
+freeing/S
+freelance/SRDGZM
+Freeland/M
+freeloader/M
+freeload/SRDGZ
+Free/M
+freeman/M
+Freeman/M
+freemasonry/M
+Freemasonry/MS
+Freemason/SM
+freemen
+Freemon/M
+freeness/M
+Freeport/M
+freestanding
+freestone/SM
+freestyle/SM
+freethinker/MS
+freethinking/S
+Freetown/M
+freeway/MS
+freewheeler/M
+freewheeling/P
+freewheel/SRDMGZ
+freewill
+free/YTDRSP
+freezable
+freezer/SM
+freeze/UGSA
+freezing/S
+Freida/M
+freighter/M
+freight/ZGMDRS
+Fremont/M
+Frenchman/M
+French/MDSG
+Frenchmen
+Frenchwoman/M
+Frenchwomen
+frenetically
+frenetic/S
+frenzied/Y
+frenzy/MDSG
+freon/S
+Freon/SM
+freq
+frequency/ISM
+frequented/U
+frequenter/MS
+frequentest
+frequenting
+frequent/IY
+frequentness/M
+frequents
+fresco/DMG
+frescoes
+fresh/AZSRNDG
+freshener/M
+freshen/SZGDR
+fresher/MA
+freshest
+freshet/SM
+freshly
+freshman/M
+freshmen
+freshness/MS
+freshwater/SM
+Fresnel/M
+Fresno/M
+fretboard
+fretfulness/MS
+fretful/PY
+fret/S
+fretsaw/S
+fretted
+fretting
+fretwork/MS
+Freudian/S
+Freud/M
+Freya/M
+Frey/M
+friableness/M
+friable/P
+friary/MS
+friar/YMS
+fricasseeing
+fricassee/MSD
+frication/M
+fricative/MS
+Frick/M
+frictional/Y
+frictionless/Y
+friction/MS
+Friday/SM
+fridge/SM
+fried/A
+Frieda/M
+Friedan/M
+friedcake/SM
+Friederike/M
+Friedman/M
+Friedrich/M
+Friedrick/M
+friendlessness/M
+friendless/P
+friendlies
+friendlily
+friendliness/USM
+friendly/PUTR
+friend/SGMYD
+friendship/MS
+frier's
+fries/M
+frieze/SDGM
+frigate/SM
+Frigga/M
+frigged
+frigging/S
+frighten/DG
+frightening/Y
+frightfulness/MS
+frightful/PY
+fright/GXMDNS
+Frigidaire/M
+frigidity/MS
+frigidness/SM
+frigid/YP
+frig/S
+frill/MDGS
+frilly/RST
+Fri/M
+fringe/IGSD
+fringe's
+frippery/SM
+Frisbee/MS
+Frisco/M
+Frisian/SM
+frisker/M
+friskily
+friskiness/SM
+frisk/RDGS
+frisky/RTP
+frisson/M
+Frito/M
+fritterer/M
+fritter/RDSG
+Fritz/M
+fritz/SM
+frivolity/MS
+frivolousness/SM
+frivolous/PY
+frizz/GYSD
+frizzle/DSG
+frizzly/RT
+frizzy/RT
+Fr/MD
+Frobisher/M
+frocking/M
+frock's
+frock/SUDGC
+frogged
+frogging
+frogman/M
+frogmarched
+frogmen
+frog/MS
+fro/HS
+Froissart/M
+frolicked
+frolicker/SM
+frolicking
+frolic/SM
+frolicsome
+from
+Fromm/M
+frond/SM
+frontage/MS
+frontal/SY
+Frontenac/M
+front/GSFRD
+frontier/SM
+frontiersman/M
+frontiersmen
+frontispiece/SM
+frontrunner's
+front's
+frontward/S
+frosh/M
+Frostbelt/M
+frostbite/MS
+frostbit/G
+frostbiting/M
+frostbitten
+frost/CDSG
+frosteds
+frosted/U
+frostily
+frostiness/SM
+frosting/MS
+Frost/M
+frost's
+frosty/PTR
+froth/GMD
+frothiness/SM
+froths
+frothy/TRP
+froufrou/MS
+frowardness/MS
+froward/P
+frowner/M
+frowning/Y
+frown/RDSG
+frowzily
+frowziness/SM
+frowzy/RPT
+frozenness/M
+frozen/YP
+froze/UA
+fructify/GSD
+fructose/MS
+Fruehauf/M
+frugality/SM
+frugal/Y
+fruitcake/SM
+fruiterer/M
+fruiter/RM
+fruitfuller
+fruitfullest
+fruitfulness/MS
+fruitful/UYP
+fruit/GMRDS
+fruitiness/MS
+fruition/SM
+fruitlessness/MS
+fruitless/YP
+fruity/RPT
+frumpish
+frump/MS
+frumpy/TR
+Frunze/M
+frustrater/M
+frustrate/RSDXNG
+frustrating/Y
+frustration/M
+frustum/SM
+Frye/M
+fryer/MS
+Fry/M
+fry/NGDS
+F's
+f's/KA
+FSLIC
+ft/C
+FTC
+FTP
+fuchsia/MS
+Fuchs/M
+fucker/M!
+fuck/GZJRDMS!
+FUD
+fuddle/GSD
+fudge/GMSD
+fuel/ASDG
+fueler/SM
+fuel's
+Fuentes/M
+fugal
+Fugger/M
+fugitiveness/M
+fugitive/SYMP
+fugue/GMSD
+fuhrer/S
+Fuji/M
+Fujitsu/M
+Fujiyama
+Fukuoka/M
+Fulani/M
+Fulbright/M
+fulcrum/SM
+fulfilled/U
+fulfiller/M
+fulfill/GLSRD
+fulfillment/MS
+fullback/SMG
+fuller/DMG
+Fuller/M
+Fullerton/M
+fullish
+fullness/MS
+full/RDPSGZT
+fullstops
+fullword/SM
+fully
+fulminate/XSDGN
+fulmination/M
+fulness's
+fulsomeness/SM
+fulsome/PY
+Fulton/M
+Fulvia/M
+fumble/GZRSD
+fumbler/M
+fumbling/Y
+fume/DSG
+fumigant/MS
+fumigate/NGSDX
+fumigation/M
+fumigator/SM
+fuming/Y
+fumy/TR
+Funafuti
+functionalism/M
+functionalist/SM
+functionality/S
+functional/YS
+functionary/MS
+function/GSMD
+functor/SM
+fundamentalism/SM
+fundamentalist/SM
+fundamental/SY
+fund/ASMRDZG
+funded/U
+fundholders
+fundholding
+funding/S
+Fundy/M
+funeral/MS
+funerary
+funereal/Y
+funfair/M
+fungal/S
+fungible/M
+fungicidal
+fungicide/SM
+fungi/M
+fungoid/S
+fungous
+fungus/M
+funicular/SM
+funk/GSDM
+funkiness/S
+funky/RTP
+fun/MS
+funned
+funnel/SGMD
+funner
+funnest
+funnily/U
+funniness/SM
+funning
+funny/RSPT
+furbelow/MDSG
+furbisher/M
+furbish/GDRSA
+furiousness/M
+furious/RYP
+furlong/MS
+furlough/DGM
+furloughs
+furl/UDGS
+furn
+furnace/GMSD
+furnished/U
+furnisher/MS
+furnish/GASD
+furnishing/SM
+furniture/SM
+furore/MS
+furor/MS
+fur/PMS
+furred
+furrier/M
+furriness/SM
+furring/SM
+furrow/DMGS
+furry/RTZP
+furtherance/MS
+furtherer/M
+furthermore
+furthermost
+further/TGDRS
+furthest
+furtiveness/SM
+furtive/PY
+fury/SM
+furze/SM
+fusebox/S
+fusee/SM
+fuse/FSDAGCI
+fuselage/SM
+fuse's/A
+Fushun/M
+fusibility/SM
+fusible/I
+fusiform
+fusilier/MS
+fusillade/SDMG
+fusion/KMFSI
+fussbudget/MS
+fusser/M
+fussily
+fussiness/MS
+fusspot/SM
+fuss/SRDMG
+fussy/PTR
+fustian/MS
+fustiness/MS
+fusty/RPT
+fut
+futileness/M
+futile/PY
+futility/MS
+futon/S
+future/SM
+futurism/SM
+futuristic/S
+futurist/S
+futurity/MS
+futurologist/S
+futurology/MS
+futz/GSD
+fuze's
+Fuzhou/M
+Fuzzbuster/M
+fuzzily
+fuzziness/SM
+fuzz/SDMG
+fuzzy/PRT
+fwd
+FWD
+fwy
+FY
+FYI
+GA
+gabardine/SM
+gabbed
+Gabbey/M
+Gabbie/M
+Gabbi/M
+gabbiness/S
+gabbing
+gabble/SDG
+Gabby/M
+gabby/TRP
+Gabe/M
+gaberdine's
+Gabey/M
+gabfest/MS
+Gabie/M
+Gabi/M
+gable/GMSRD
+Gable/M
+Gabonese
+Gabon/M
+Gaborone/M
+Gabriela/M
+Gabriele/M
+Gabriella/M
+Gabrielle/M
+Gabriellia/M
+Gabriell/M
+Gabriello/M
+Gabriel/M
+Gabrila/M
+gab/S
+Gaby/M
+Gacrux/M
+gadabout/MS
+gadded
+gadder/MS
+gadding
+gadfly/MS
+gadgetry/MS
+gadget/SM
+gadolinium/MS
+gad/S
+Gadsden/M
+Gaea/M
+Gaelan/M
+Gaelic/M
+Gael/SM
+Gae/M
+gaffe/MS
+gaffer/M
+gaff/SGZRDM
+gaga
+Gagarin/M
+gag/DRSG
+Gage/M
+gager/M
+gage/SM
+gagged
+gagging
+gaggle/SDG
+gagwriter/S
+gaiety/MS
+Gaile/M
+Gail/M
+gaily
+gain/ADGS
+gainer/SM
+Gaines/M
+Gainesville/M
+gainfulness/M
+gainful/YP
+gaining/S
+gainly/U
+gainsaid
+gainsayer/M
+gainsay/RSZG
+Gainsborough/M
+gaiter/M
+gait/GSZMRD
+Gaithersburg/M
+galactic
+Galahad/MS
+Galapagos/M
+gal/AS
+gala/SM
+Galatea/M
+Galatia/M
+Galatians/M
+Galaxy/M
+galaxy/MS
+Galbraith/M
+Galbreath/M
+gale/AS
+Gale/M
+galen
+galena/MS
+galenite/M
+Galen/M
+gale's
+Galibi/M
+Galilean/MS
+Galilee/M
+Galileo/M
+Galina/M
+Gallagher/M
+gallanted
+gallanting
+gallantry/MS
+gallants
+gallant/UY
+Gallard/M
+gallbladder/MS
+Gallegos/M
+galleon/SM
+galleria/S
+gallery/MSDG
+galley/MS
+Gallic
+Gallicism/SM
+gallimaufry/MS
+galling/Y
+gallium/SM
+gallivant/GDS
+Gall/M
+gallonage/M
+gallon/SM
+galloper/M
+gallop/GSRDZ
+Galloway/M
+gallows/M
+gall/SGMD
+gallstone/MS
+Gallup/M
+Gal/MN
+Galois/M
+galoot/MS
+galore/S
+galosh/GMSD
+gal's
+Galsworthy/M
+galumph/GD
+galumphs
+galvanic
+Galvani/M
+galvanism/MS
+galvanization/SM
+galvanize/SDG
+Galvan/M
+galvanometer/SM
+galvanometric
+Galven/M
+Galveston/M
+Galvin/M
+Ga/M
+Gamaliel/M
+Gama/M
+Gambia/M
+Gambian/S
+gambit/MS
+gamble/GZRSD
+Gamble/M
+gambler/M
+gambol/SGD
+gamecock/SM
+gamekeeper/MS
+gameness/MS
+game/PJDRSMYTZG
+gamesmanship/SM
+gamesmen
+gamester/M
+gamest/RZ
+gamete/MS
+gametic
+gamine/SM
+gaminess/MS
+gaming/M
+gamin/MS
+gamma/MS
+gammon/DMSG
+Gamow/M
+gamut/MS
+gamy/TRP
+gander/DMGS
+Gandhian
+Gandhi/M
+gangbusters
+ganger/M
+Ganges/M
+gang/GRDMS
+gangland/SM
+ganglia/M
+gangling
+ganglionic
+ganglion/M
+gangplank/SM
+gangrene/SDMG
+gangrenous
+gangster/SM
+Gangtok/M
+gangway/MS
+Gan/M
+gannet/SM
+Gannie/M
+Gannon/M
+Ganny/M
+gantlet/GMDS
+Gantry/M
+gantry/MS
+Ganymede/M
+GAO
+gaoler/M
+gaol/MRDGZS
+gaper/M
+gape/S
+gaping/Y
+gapped
+gapping
+gap/SJMDRG
+garage/GMSD
+Garald/M
+garbageman/M
+garbage/SDMG
+garbanzo/MS
+garb/DMGS
+garbler/M
+garble/RSDG
+Garbo/M
+Garcia/M
+garçon/SM
+gardener/M
+Gardener/M
+gardenia/SM
+gardening/M
+garden/ZGRDMS
+Gardie/M
+Gardiner/M
+Gard/M
+Gardner/M
+Gardy/M
+Garek/M
+Gare/MH
+Gareth/M
+Garey/M
+Garfield/M
+garfish/MS
+Garfunkel/M
+Gargantua/M
+gargantuan
+gargle/SDG
+gargoyle/DSM
+Garibaldi/M
+Garik/M
+garishness/MS
+garish/YP
+Garland/M
+garland/SMDG
+garlicked
+garlicking
+garlicky
+garlic/SM
+garment/MDGS
+Gar/MH
+Garner/M
+garner/SGD
+Garnet/M
+garnet/SM
+Garnette/M
+Garnett/M
+garnish/DSLG
+garnisheeing
+garnishee/SDM
+garnishment/MS
+Garold/M
+garote's
+garotte's
+Garrard/M
+garred
+Garrek/M
+Garreth/M
+Garret/M
+garret/SM
+Garrett/M
+Garrick/M
+Garrik/M
+garring
+Garrison/M
+garrison/SGMD
+garroter/M
+garrote/SRDMZG
+Garrot/M
+garrotte's
+Garrott/M
+garrulity/SM
+garrulousness/MS
+garrulous/PY
+Garry/M
+gar/SLM
+garter/SGDM
+Garth/M
+Garvey/M
+Garvin/M
+Garv/M
+Garvy/M
+Garwin/M
+Garwood/M
+Gary/M
+Garza/M
+gasbag/MS
+Gascony/M
+gaseousness/M
+gaseous/YP
+gases/C
+gas/FC
+gash/GTMSRD
+gasification/M
+gasifier/M
+gasify/SRDGXZN
+gasket/SM
+gaslight/DMS
+gasohol/S
+gasoline/MS
+gasometer/M
+Gaspard/M
+Gaspar/M
+Gasparo/M
+gasper/M
+Gasper/M
+gasp/GZSRD
+gasping/Y
+gas's
+gassed/C
+Gasser/M
+gasser/MS
+Gasset/M
+gassiness/M
+gassing/SM
+gassy/PTR
+Gaston/M
+gastric
+gastritides
+gastritis/MS
+gastroenteritides
+gastroenteritis/M
+gastrointestinal
+gastronome/SM
+gastronomic
+gastronomical/Y
+gastronomy/MS
+gastropod/SM
+gasworks/M
+gateau/MS
+gateaux
+gatecrash/GZSRD
+gatehouse/MS
+gatekeeper/SM
+gate/MGDS
+gatepost/SM
+Gates
+gateway/MS
+gathered/IA
+gatherer/M
+gathering/M
+gather/JRDZGS
+gathers/A
+Gatlinburg/M
+Gatling/M
+Gatorade/M
+gator/MS
+Gatsby/M
+Gatun/M
+gaucheness/SM
+gaucherie/SM
+gauche/TYPR
+gaucho/SM
+gaudily
+gaudiness/MS
+gaudy/PRST
+gaugeable
+gauger/M
+Gauguin/M
+Gaulish/M
+Gaulle/M
+Gaul/MS
+Gaultiero/M
+gauntlet/GSDM
+Gauntley/M
+gauntness/MS
+gaunt/PYRDSGT
+gauss/C
+gausses
+Gaussian
+Gauss/M
+gauss's
+Gautama/M
+Gauthier/M
+Gautier/M
+gauze/SDGM
+gauziness/MS
+gauzy/TRP
+Gavan/M
+gave
+gavel/GMDS
+Gaven/M
+Gavin/M
+Gav/MN
+gavotte/MSDG
+Gavra/M
+Gavrielle/M
+Gawain/M
+Gawen/M
+gawkily
+gawkiness/MS
+gawk/SGRDM
+gawky/RSPT
+Gayel/M
+Gayelord/M
+Gaye/M
+gayety's
+Gayla/M
+Gayleen/M
+Gaylene/M
+Gayler/M
+Gayle/RM
+Gaylord/M
+Gaylor/M
+Gay/M
+gayness/SM
+Gaynor/M
+gay/RTPS
+Gaza/M
+gazebo/SM
+gaze/DRSZG
+gazelle/MS
+gazer/M
+gazetteer/SGDM
+gazette/MGSD
+Gaziantep/M
+gazillion/S
+gazpacho/MS
+GB
+G/B
+Gdansk/M
+Gd/M
+GDP
+Gearalt/M
+Gearard/M
+gearbox/SM
+gear/DMJSG
+gearing/M
+gearshift/MS
+gearstick
+gearwheel/SM
+Geary/M
+gecko/MS
+GED
+geegaw's
+geeing
+geek/SM
+geeky/RT
+geese/M
+geest/M
+gee/TDS
+geezer/MS
+Gehenna/M
+Gehrig/M
+Geiger/M
+Geigy/M
+geisha/M
+gelatinousness/M
+gelatinous/PY
+gelatin/SM
+gelcap
+gelding/M
+geld/JSGD
+gelid
+gelignite/MS
+gelled
+gelling
+gel/MBS
+Gelya/M
+Ge/M
+GE/M
+Gemini/SM
+gemlike
+Gemma/M
+gemmed
+gemming
+gem/MS
+gemological
+gemologist/MS
+gemology/MS
+gemstone/SM
+gen
+Gena/M
+Genaro/M
+gendarme/MS
+gender/DMGS
+genderless
+genealogical/Y
+genealogist/SM
+genealogy/MS
+Gene/M
+gene/MS
+generalissimo/SM
+generalist/MS
+generality/MS
+generalizable/SM
+generalization/MS
+generalized/U
+generalize/GZBSRD
+generalizer/M
+general/MSPY
+generalness/M
+generalship/SM
+genera/M
+generate/CXAVNGSD
+generational
+generation/MCA
+generative/AY
+generators/A
+generator/SM
+generically
+generic/PS
+generosity/MS
+generously/U
+generousness/SM
+generous/PY
+Genesco/M
+genesis/M
+Genesis/M
+genes/S
+genetically
+geneticist/MS
+genetic/S
+genetics/M
+Genet/M
+Geneva/M
+Genevieve/M
+Genevra/M
+Genghis/M
+geniality/FMS
+genially/F
+genialness/M
+genial/PY
+Genia/M
+genies/K
+genie/SM
+genii/M
+genitalia
+genitals
+genital/YF
+genitive/SM
+genitourinary
+genius/SM
+Gen/M
+Genna/M
+Gennie/M
+Gennifer/M
+Genni/M
+Genny/M
+Genoa/SM
+genocidal
+genocide/SM
+Geno/M
+genome/SM
+genotype/MS
+Genovera/M
+genre/MS
+gent/AMS
+genteelness/MS
+genteel/PRYT
+gentian/SM
+gentile/S
+Gentile's
+gentility/MS
+gentlefolk/S
+gentlemanliness/M
+gentlemanly/U
+gentleman/YM
+gentlemen
+gentleness/SM
+gentle/PRSDGT
+gentlewoman/M
+gentlewomen/M
+gently
+gentrification/M
+gentrify/NSDGX
+Gentry/M
+gentry/MS
+genuflect/GDS
+genuflection/MS
+genuineness/SM
+genuine/PY
+genus
+Genvieve/M
+geocentric
+geocentrically
+geocentricism
+geochemical/Y
+geochemistry/MS
+geochronology/M
+geodesic/S
+geode/SM
+geodesy/MS
+geodetic/S
+Geoff/M
+Geoffrey/M
+Geoffry/M
+geog
+geographer/MS
+geographic
+geographical/Y
+geography/MS
+geologic
+geological/Y
+geologist/MS
+geology/MS
+geom
+Geo/M
+geomagnetic
+geomagnetically
+geomagnetism/SM
+geometer/MS
+geometrical/Y
+geometrician/M
+geometric/S
+geometry/MS
+geomorphological
+geomorphology/M
+geophysical/Y
+geophysicist/MS
+geophysics/M
+geopolitical/Y
+geopolitic/S
+geopolitics/M
+Georas/M
+Geordie/M
+Georgeanna/M
+Georgeanne/M
+Georgena/M
+George/SM
+Georgeta/M
+Georgetown/M
+Georgetta/M
+Georgette/M
+Georgia/M
+Georgiana/M
+Georgianna/M
+Georgianne/M
+Georgian/S
+Georgie/M
+Georgi/M
+Georgina/M
+Georgine/M
+Georg/M
+Georgy/M
+geostationary
+geosynchronous
+geosyncline/SM
+geothermal
+geothermic
+Geralda/M
+Geraldine/M
+Gerald/M
+geranium/SM
+Gerard/M
+Gerardo/M
+Gerber/M
+gerbil/MS
+Gerda/M
+Gerek/M
+Gerhardine/M
+Gerhard/M
+Gerhardt/M
+Gerianna/M
+Gerianne/M
+geriatric/S
+geriatrics/M
+Gerick/M
+Gerik/M
+Geri/M
+Geritol/M
+Gerladina/M
+Ger/M
+Germaine/M
+Germain/M
+Germana/M
+germane
+Germania/M
+Germanic/M
+germanium/SM
+germanized
+German/SM
+Germantown/M
+Germany/M
+Germayne/M
+germen/M
+germicidal
+germicide/MS
+germinal/Y
+germinated/U
+germinate/XVGNSD
+germination/M
+germinative/Y
+germ/MNS
+Gerome/M
+Geronimo/M
+gerontocracy/M
+gerontological
+gerontologist/SM
+gerontology/SM
+Gerrard/M
+Gerrie/M
+Gerrilee/M
+Gerri/M
+Gerry/M
+gerrymander/SGD
+Gershwin/MS
+Gerta/M
+Gertie/M
+Gerti/M
+Gert/M
+Gertruda/M
+Gertrude/M
+Gertrudis/M
+Gertrud/M
+Gerty/M
+gerundive/M
+gerund/SVM
+Gery/M
+gestalt/M
+gestapo/S
+Gestapo/SM
+gestate/SDGNX
+gestational
+gestation/M
+gesticulate/XSDVGN
+gesticulation/M
+gesticulative/Y
+gestural
+gesture/SDMG
+gesundheit
+getaway/SM
+Gethsemane/M
+get/S
+getter/SDM
+getting
+Getty/M
+Gettysburg/M
+getup/MS
+gewgaw/MS
+Gewürztraminer
+geyser/GDMS
+Ghanaian/MS
+Ghana/M
+Ghanian's
+ghastliness/MS
+ghastly/TPR
+ghat/MS
+Ghats/M
+Ghent/M
+Gherardo/M
+gherkin/SM
+ghetto/DGMS
+ghettoize/SDG
+Ghibelline/M
+ghostlike
+ghostliness/MS
+ghostly/TRP
+ghost/SMYDG
+ghostwrite/RSGZ
+ghostwritten
+ghostwrote
+ghoulishness/SM
+ghoulish/PY
+ghoul/SM
+GHQ
+GI
+Giacinta/M
+Giacobo/M
+Giacometti/M
+Giacomo/M
+Giacopo/M
+Giana/M
+Gianina/M
+Gian/M
+Gianna/M
+Gianni/M
+Giannini/M
+giantess/MS
+giantkiller
+giant/SM
+Giauque/M
+Giavani/M
+gibber/DGS
+gibberish/MS
+gibbet/MDSG
+Gibbie/M
+Gibb/MS
+Gibbon/M
+gibbon/MS
+gibbousness/M
+gibbous/YP
+Gibby/M
+gibe/GDRS
+giber/M
+giblet/MS
+Gib/M
+Gibraltar/MS
+Gibson/M
+giddap
+giddily
+giddiness/SM
+Giddings/M
+giddy/GPRSDT
+Gide/M
+Gideon/MS
+Gielgud/M
+Gienah/M
+Giffard/M
+Giffer/M
+Giffie/M
+Gifford/M
+Giff/RM
+Giffy/M
+giftedness/M
+gifted/PY
+gift/SGMD
+gigabyte/S
+gigacycle/MS
+gigahertz/M
+gigantically
+giganticness/M
+gigantic/P
+gigavolt
+gigawatt/M
+gigged
+gigging
+giggler/M
+giggle/RSDGZ
+giggling/Y
+giggly/TR
+Gigi/M
+gig/MS
+GIGO
+gigolo/MS
+gila
+Gila/M
+Gilberta/M
+Gilberte/M
+Gilbertina/M
+Gilbertine/M
+gilbert/M
+Gilbert/M
+Gilberto/M
+Gilbertson/M
+Gilburt/M
+Gilchrist/M
+Gilda/M
+gilder/M
+gilding/M
+gild/JSGZRD
+Gilead/M
+Gilemette/M
+Giles
+Gilgamesh/M
+Gilkson/M
+Gillan/M
+Gilles
+Gillespie/M
+Gillette/M
+Gilliam/M
+Gillian/M
+Gillie/M
+Gilligan/M
+Gilli/M
+Gill/M
+gill/SGMRD
+Gilly/M
+Gilmore/M
+Gil/MY
+gilt/S
+gimbaled
+gimbals
+Gimbel/M
+gimcrackery/SM
+gimcrack/S
+gimlet/MDSG
+gimme/S
+gimmick/GDMS
+gimmickry/MS
+gimmicky
+gimp/GSMD
+gimpy/RT
+Gina/M
+Ginelle/M
+Ginevra/M
+gingerbread/SM
+gingerliness/M
+gingerly/P
+Ginger/M
+ginger/SGDYM
+gingersnap/SM
+gingery
+gingham/SM
+gingivitis/SM
+Gingrich/M
+ginkgoes
+ginkgo/M
+ginmill
+gin/MS
+ginned
+Ginnie/M
+Ginnifer/M
+Ginni/M
+ginning
+Ginny/M
+Gino/M
+Ginsberg/M
+Ginsburg/M
+ginseng/SM
+Gioconda/M
+Giordano/M
+Giorgia/M
+Giorgi/M
+Giorgio/M
+Giorgione/M
+Giotto/M
+Giovanna/M
+Giovanni/M
+Gipsy's
+giraffe/MS
+Giralda/M
+Giraldo/M
+Giraud/M
+Giraudoux/M
+girded/U
+girder/M
+girdle/GMRSD
+girdler/M
+gird/RDSGZ
+girlfriend/MS
+girlhood/SM
+girlie/M
+girlishness/SM
+girlish/YP
+girl/MS
+giro/M
+girt/GDS
+girth/MDG
+girths
+Gisela/M
+Giselbert/M
+Gisele/M
+Gisella/M
+Giselle/M
+Gish/M
+gist/MS
+git/M
+Giuditta/M
+Giulia/M
+Giuliano/M
+Giulietta/M
+Giulio/M
+Giuseppe/M
+Giustina/M
+Giustino/M
+Giusto/M
+giveaway/SM
+giveback/S
+give/HZGRS
+given/SP
+giver/M
+giving/Y
+Giza/M
+Gizela/M
+gizmo's
+gizzard/SM
+Gk/M
+glacé/DGS
+glacial/Y
+glaciate/XNGDS
+glaciation/M
+glacier/SM
+glaciological
+glaciologist/M
+glaciology/M
+gladded
+gladden/GDS
+gladder
+gladdest
+gladding
+gladdy
+glade/SM
+gladiatorial
+gladiator/SM
+Gladi/M
+gladiola/MS
+gladioli
+gladiolus/M
+gladly/RT
+Glad/M
+gladness/MS
+gladsome/RT
+Gladstone/MS
+Gladys
+glad/YSP
+glamor/DMGS
+glamorization/MS
+glamorizer/M
+glamorize/SRDZG
+glamorousness/M
+glamorous/PY
+glance/GJSD
+glancing/Y
+glanders/M
+glandes
+glandular/Y
+gland/ZSM
+glans/M
+glare/SDG
+glaringness/M
+glaring/YP
+Glaser/M
+Glasgow/M
+glasnost/S
+glassblower/S
+glassblowing/MS
+glassful/MS
+glass/GSDM
+glasshouse/SM
+glassily
+glassiness/SM
+glassless
+Glass/M
+glassware/SM
+glasswort/M
+glassy/PRST
+Glastonbury/M
+Glaswegian/S
+glaucoma/SM
+glaucous
+glazed/U
+glazer/M
+glaze/SRDGZJ
+glazier/SM
+glazing/M
+gleam/MDGS
+gleaner/M
+gleaning/M
+glean/RDGZJS
+Gleason/M
+Gleda/M
+gleed/M
+glee/DSM
+gleefulness/MS
+gleeful/YP
+gleeing
+Glendale/M
+Glenda/M
+Glenden/M
+Glendon/M
+Glenine/M
+Glen/M
+Glenna/M
+Glennie/M
+Glennis/M
+Glenn/M
+glen/SM
+glibber
+glibbest
+glibness/MS
+glib/YP
+glide/JGZSRD
+glider/M
+glim/M
+glimmer/DSJG
+glimmering/M
+glimpse/DRSZMG
+glimpser/M
+glint/DSG
+glissandi
+glissando/M
+glisten/DSG
+glister/DGS
+glitch/MS
+glitter/GDSJ
+glittering/Y
+glittery
+glitz/GSD
+glitzy/TR
+gloaming/MS
+gloater/M
+gloating/Y
+gloat/SRDG
+globalism/S
+globalist/S
+global/SY
+globe/SM
+globetrotter/MS
+glob/GDMS
+globularity/M
+globularness/M
+globular/PY
+globule/MS
+globulin/MS
+glockenspiel/SM
+glommed
+gloom/GSMD
+gloomily
+gloominess/MS
+gloomy/RTP
+glop/MS
+glopped
+glopping
+gloppy/TR
+Gloria/M
+Gloriana/M
+Gloriane/M
+glorification/M
+glorifier/M
+glorify/XZRSDNG
+Glori/M
+glorious/IYP
+gloriousness/IM
+Glory/M
+glory/SDMG
+glossary/MS
+gloss/GSDM
+glossily
+glossiness/SM
+glossolalia/SM
+glossy/RSPT
+glottal
+glottalization/M
+glottis/MS
+Gloucester/M
+gloveless
+glover/M
+Glover/M
+glove/SRDGMZ
+glower/GD
+glow/GZRDMS
+glowing/Y
+glowworm/SM
+glucose/SM
+glue/DRSMZG
+glued/U
+gluer/M
+gluey
+gluier
+gluiest
+glummer
+glummest
+glumness/MS
+glum/SYP
+gluon/M
+glutamate/M
+gluten/M
+glutenous
+glutinousness/M
+glutinous/PY
+glut/SMNX
+glutted
+glutting
+glutton/MS
+gluttonous/Y
+gluttony/SM
+glyceride/M
+glycerinate/MD
+glycerine's
+glycerin/SM
+glycerolized/C
+glycerol/SM
+glycine/M
+glycogen/SM
+glycol/MS
+Glynda/M
+Glynis/M
+Glyn/M
+Glynnis/M
+Glynn/M
+glyph/M
+glyphs
+gm
+GM
+GMT
+gnarl/SMDG
+gnash/SDG
+gnat/MS
+gnawer/M
+gnaw/GRDSJ
+gnawing/M
+gneiss/SM
+Gnni/M
+gnomelike
+GNOME/M
+gnome/SM
+gnomic
+gnomish
+gnomonic
+gnosticism
+Gnosticism/M
+gnostic/K
+Gnostic/M
+GNP
+gnu/MS
+goad/MDSG
+goalie/SM
+goalkeeper/MS
+goalkeeping/M
+goalless
+goal/MDSG
+goalmouth/M
+goalpost/S
+goalscorer
+goalscoring
+goaltender/SM
+Goa/M
+goatee/SM
+goatherd/MS
+goat/MS
+goatskin/SM
+gobbed
+gobbet/MS
+gobbing
+gobbledegook's
+gobbledygook/S
+gobbler/M
+gobble/SRDGZ
+Gobi/M
+goblet/MS
+goblin/SM
+gob/SM
+Godard/M
+Godart/M
+godchild/M
+godchildren
+goddammit
+goddamn/GS
+Goddard/M
+Goddart/M
+goddaughter/SM
+godded
+goddess/MS
+godding
+Gödel/M
+godfather/GSDM
+godforsaken
+Godfree/M
+Godfrey/M
+Godfry/M
+godhead/S
+godhood/SM
+Godiva/M
+godlessness/MS
+godless/P
+godlikeness/M
+godlike/P
+godliness/UMS
+godly/UTPR
+God/M
+godmother/MS
+Godot/M
+godparent/SM
+godsend/MS
+god/SMY
+godson/MS
+Godspeed/S
+Godthaab/M
+Godunov/M
+Godwin/M
+Godzilla/M
+Goebbels/M
+Goering/M
+goer/MG
+goes
+Goethals/M
+Goethe/M
+gofer/SM
+Goff/M
+goggler/M
+goggle/SRDGZ
+Gogh/M
+Gog/M
+Gogol/M
+Goiania/M
+going/M
+goiter/SM
+Golan/M
+Golconda/M
+Golda/M
+Goldarina/M
+Goldberg/M
+goldbricker/M
+goldbrick/GZRDMS
+Golden/M
+goldenness/M
+goldenrod/SM
+goldenseal/M
+golden/TRYP
+goldfinch/MS
+goldfish/SM
+Goldia/M
+Goldie/M
+Goldilocks/M
+Goldi/M
+Goldina/M
+Golding/M
+Goldman/M
+goldmine/S
+gold/MRNGTS
+goldsmith/M
+Goldsmith/M
+goldsmiths
+Goldstein/M
+Goldwater/M
+Goldwyn/M
+Goldy/M
+Goleta/M
+golfer/M
+golf/RDMGZS
+Golgotha/M
+Goliath/M
+Goliaths
+golly/S
+Gomez/M
+Gomorrah/M
+Gompers/M
+go/MRHZGJ
+gonadal
+gonad/SM
+gondola/SM
+gondolier/MS
+Gondwanaland/M
+goner/M
+gone/RZN
+gong/SGDM
+gonion/M
+gonna
+gonorrheal
+gonorrhea/MS
+Gonzales/M
+Gonzalez/M
+Gonzalo/M
+Goober/M
+goober/MS
+goodbye/MS
+goodhearted
+goodie's
+goodish
+goodly/TR
+Good/M
+Goodman/M
+goodness/MS
+goodnight
+Goodrich/M
+good/SYP
+goodwill/MS
+Goodwin/M
+Goodyear/M
+goody/SM
+gooey
+goofiness/MS
+goof/SDMG
+goofy/RPT
+Google/M
+gooier
+gooiest
+gook/SM
+goo/MS
+goon/SM
+goop/SM
+gooseberry/MS
+goosebumps
+goose/M
+goos/SDG
+GOP
+Gopher
+gopher/SM
+Goran/M
+Goraud/M
+Gorbachev
+Gordan/M
+Gorden/M
+Gordian/M
+Gordie/M
+Gordimer/M
+Gordon/M
+Gordy/M
+gore/DSMG
+Gore/M
+Goren/M
+Gorey/M
+Gorgas
+gorged/E
+gorge/GMSRD
+gorgeousness/SM
+gorgeous/YP
+gorger/EM
+gorges/E
+gorging/E
+Gorgon/M
+gorgon/S
+Gorgonzola/M
+Gorham/M
+gorilla/MS
+gorily
+goriness/MS
+goring/M
+Gorky/M
+gormandizer/M
+gormandize/SRDGZ
+gormless
+gorp/S
+gorse/SM
+gory/PRT
+gos
+goshawk/MS
+gosh/S
+gosling/M
+gospeler/M
+gospel/MRSZ
+Gospel/SM
+gossamer/SM
+gossipy
+gossip/ZGMRDS
+gotcha/SM
+Göteborg/M
+Gotham/M
+Gothart/M
+Gothicism/M
+Gothic/S
+Goth/M
+Goths
+got/IU
+goto
+GOTO/MS
+gotta
+gotten/U
+Gottfried/M
+Goucher/M
+Gouda/SM
+gouge/GZSRD
+gouger/M
+goulash/SM
+Gould/M
+Gounod/M
+gourde/SM
+gourd/MS
+gourmand/MS
+gourmet/MS
+gout/SM
+gouty/RT
+governable/U
+governance/SM
+governed/U
+governess/SM
+govern/LBGSD
+governmental/Y
+government/MS
+Governor
+governor/MS
+governorship/SM
+gov/S
+govt
+gown/GSDM
+Goya/M
+GP
+GPA
+GPO
+GPSS
+gr
+grabbed
+grabber/SM
+grabbing/S
+grab/S
+Gracchus/M
+grace/ESDMG
+graceful/EYPU
+gracefuller
+gracefullest
+gracefulness/ESM
+Graceland/M
+gracelessness/MS
+graceless/PY
+Grace/M
+Gracia/M
+Graciela/M
+Gracie/M
+graciousness/SM
+gracious/UY
+grackle/SM
+gradate/DSNGX
+gradation/MCS
+grade/ACSDG
+graded/U
+Gradeigh/M
+gradely
+grader/MC
+grade's
+Gradey/M
+gradient/RMS
+grad/MRDGZJS
+gradualism/MS
+gradualist/MS
+gradualness/MS
+gradual/SYP
+graduand/SM
+graduate/MNGDSX
+graduation/M
+Grady/M
+Graehme/M
+Graeme/M
+Graffias/M
+graffiti
+graffito/M
+Graff/M
+grafter/M
+grafting/M
+graft/MRDSGZ
+Grafton/M
+Grahame/M
+Graham/M
+graham/SM
+Graig/M
+grail/S
+Grail/SM
+grainer/M
+grain/IGSD
+graininess/MS
+graining/M
+grain's
+grainy/RTP
+gram/KSM
+Gram/M
+grammarian/SM
+grammar/MS
+grammaticality/M
+grammaticalness/M
+grammatical/UY
+grammatic/K
+gramme/SM
+Grammy/S
+gramophone/SM
+Grampians
+grampus/SM
+Granada/M
+granary/MS
+grandam/SM
+grandaunt/MS
+grandchild/M
+grandchildren
+granddaddy/MS
+granddad/SM
+granddaughter/MS
+grandee/SM
+grandeur/MS
+grandfather/MYDSG
+grandiloquence/SM
+grandiloquent/Y
+grandiose/YP
+grandiosity/MS
+grandkid/SM
+grandma/MS
+grandmaster/MS
+grandmother/MYS
+grandnephew/MS
+grandness/MS
+grandniece/SM
+grandpa/MS
+grandparent/MS
+grandson/MS
+grandstander/M
+grandstand/SRDMG
+grand/TPSYR
+granduncle/MS
+Grange/MR
+grange/MSR
+Granger/M
+granite/MS
+granitic
+Gran/M
+Grannie/M
+Granny/M
+granny/MS
+granola/S
+grantee/MS
+granter/M
+Grantham/M
+Granthem/M
+Grantley/M
+Grant/M
+grantor's
+grant/SGZMRD
+grantsmanship/S
+granularity/SM
+granular/Y
+granulate/SDXVGN
+granulation/M
+granule/SM
+granulocytic
+Granville/M
+grapefruit/SM
+grape/SDGM
+grapeshot/M
+grapevine/MS
+grapheme/M
+graph/GMD
+graphical/Y
+graphicness/M
+graphic/PS
+graphics/M
+graphite/SM
+graphologist/SM
+graphology/MS
+graphs
+grapnel/SM
+grapple/DRSG
+grappler/M
+grappling/M
+grasper/M
+graspingness/M
+grasping/PY
+grasp/SRDBG
+grass/GZSDM
+grasshopper/SM
+grassland/MS
+Grass/M
+grassroots
+grassy/RT
+Grata/M
+gratefuller
+gratefullest
+gratefulness/USM
+grateful/YPU
+grater/M
+grates/I
+grate/SRDJGZ
+Gratia/M
+Gratiana/M
+graticule/M
+gratification/M
+gratified/U
+gratifying/Y
+gratify/NDSXG
+grating/YM
+gratis
+gratitude/IMS
+gratuitousness/MS
+gratuitous/PY
+gratuity/SM
+gravamen/SM
+gravedigger/SM
+gravel/SGMYD
+graven
+graveness/MS
+graver/M
+graveside/S
+Graves/M
+grave/SRDPGMZTY
+gravestone/SM
+graveyard/MS
+gravidness/M
+gravid/PY
+gravimeter/SM
+gravimetric
+gravitas
+gravitate/XVGNSD
+gravitational/Y
+gravitation/M
+graviton/SM
+gravity/MS
+gravy/SM
+graybeard/MS
+Grayce/M
+grayish
+Gray/M
+grayness/S
+gray/PYRDGTS
+Grayson/M
+graze/GZSRD
+grazer/M
+Grazia/M
+grazing/M
+grease/GMZSRD
+greasepaint/MS
+greaseproof
+greaser/M
+greasily
+greasiness/SM
+greasy/PRT
+greatcoat/DMS
+greaten/DG
+greathearted
+greatness/MS
+great/SPTYRN
+grebe/MS
+Grecian/S
+Greece/M
+greed/C
+greedily
+greediness/SM
+greeds
+greed's
+greedy/RTP
+Greek/SM
+Greeley/M
+greenback/MS
+greenbelt/S
+Greenberg/M
+Greenblatt/M
+Greenbriar/M
+Greene/M
+greenery/MS
+Greenfeld/M
+greenfield
+Greenfield/M
+greenfly/M
+greengage/SM
+greengrocer/SM
+greengrocery/M
+greenhorn/SM
+greenhouse/SM
+greening/M
+greenish/P
+Greenland/M
+Green/M
+greenmail/GDS
+greenness/MS
+Greenpeace/M
+greenroom/SM
+Greensboro/M
+Greensleeves/M
+Greensville/M
+greensward/SM
+green/SYRDMPGT
+Greentree/M
+Greenville/M
+Greenwich/M
+greenwood/MS
+Greer/M
+greeter/M
+greeting/M
+greets/A
+greet/SRDJGZ
+gregariousness/MS
+gregarious/PY
+Gregg/M
+Greggory/M
+Greg/M
+Gregoire/M
+Gregoor/M
+Gregorian
+Gregorio/M
+Gregorius/M
+Gregor/M
+Gregory/M
+gremlin/SM
+Grenada/M
+grenade/MS
+Grenadian/S
+grenadier/SM
+Grenadines
+grenadine/SM
+Grendel/M
+Grenier/M
+Grenoble/M
+Grenville/M
+Gresham/M
+Gretal/M
+Greta/M
+Gretchen/M
+Gretel/M
+Grete/M
+Grethel/M
+Gretna/M
+Gretta/M
+Gretzky/M
+grew/A
+greybeard/M
+greyhound/MS
+Grey/M
+greyness/M
+gridded
+griddlecake/SM
+griddle/DSGM
+gridiron/GSMD
+gridlock/DSG
+grids/A
+grid/SGM
+grief/MS
+Grieg/M
+Grier/M
+grievance/SM
+griever/M
+grieve/SRDGZ
+grieving/Y
+grievousness/SM
+grievous/PY
+Griffie/M
+Griffin/M
+griffin/SM
+Griffith/M
+Griff/M
+griffon's
+Griffy/M
+griller/M
+grille/SM
+grill/RDGS
+grillwork/M
+grimace/DRSGM
+grimacer/M
+Grimaldi/M
+grime/MS
+Grimes
+griminess/MS
+grimmer
+grimmest
+Grimm/M
+grimness/MS
+grim/PGYD
+grimy/TPR
+Grinch/M
+grind/ASG
+grinder/MS
+grinding/SY
+grindstone/SM
+gringo/SM
+grinned
+grinner/M
+grinning/Y
+grin/S
+griper/M
+gripe/S
+grippe/GMZSRD
+gripper/M
+gripping/Y
+grip/SGZMRD
+Griselda/M
+grisliness/SM
+grisly/RPT
+Gris/M
+Grissel/M
+gristle/SM
+gristliness/M
+gristly/TRP
+gristmill/MS
+grist/MYS
+Griswold/M
+grit/MS
+gritted
+gritter/MS
+grittiness/SM
+gritting
+gritty/PRT
+Griz/M
+grizzle/DSG
+grizzling/M
+grizzly/TRS
+Gr/M
+groaner/M
+groan/GZSRDM
+groat/SM
+grocer/MS
+grocery/MS
+groggily
+grogginess/SM
+groggy/RPT
+grog/MS
+groin/MGSD
+grokked
+grokking
+grok/S
+grommet/GMDS
+Gromyko/M
+groofs
+groomer/M
+groom/GZSMRD
+groomsman/M
+groomsmen
+Groot/M
+groover/M
+groove/SRDGM
+groovy/TR
+groper/M
+grope/SRDJGZ
+Gropius/M
+grosbeak/SM
+grosgrain/MS
+Gross
+Grosset/M
+gross/GTYSRDP
+Grossman/M
+grossness/MS
+Grosvenor/M
+Grosz/M
+grotesqueness/MS
+grotesque/PSY
+Grotius/M
+Groton/M
+grottoes
+grotto/M
+grouch/GDS
+grouchily
+grouchiness/MS
+grouchy/RPT
+groundbreaking/S
+grounded/U
+grounder/M
+groundhog/SM
+ground/JGZMDRS
+groundlessness/M
+groundless/YP
+groundnut/MS
+groundsheet/M
+groundskeepers
+groundsman/M
+groundswell/S
+groundwater/S
+groundwork/SM
+grouped/A
+grouper/M
+groupie/MS
+grouping/M
+groups/A
+group/ZJSMRDG
+grouse/GMZSRD
+grouser/M
+grouter/M
+grout/GSMRD
+groveler/M
+grovelike
+groveling/Y
+grovel/SDRGZ
+Grover/M
+Grove/RM
+grove/SRMZ
+grower/M
+grow/GZYRHS
+growing/I
+growingly
+growler/M
+growling/Y
+growl/RDGZS
+growly/RP
+grown/IA
+grownup/MS
+grows/A
+growth/IMA
+growths/IA
+grubbed
+grubber/SM
+grubbily
+grubbiness/SM
+grubbing
+grubby/RTP
+grub/MS
+grubstake/MSDG
+grudge/GMSRDJ
+grudger/M
+grudging/Y
+grueling/Y
+gruel/MDGJS
+gruesomeness/SM
+gruesome/RYTP
+gruffness/MS
+gruff/PSGTYRD
+grumble/GZJDSR
+grumbler/M
+grumbling/Y
+Grumman/M
+grumpily
+grumpiness/MS
+grump/MDGS
+grumpy/TPR
+Grundy/M
+Grünewald/M
+grunge/S
+grungy/RT
+grunion/SM
+grunter/M
+grunt/SGRD
+Grusky/M
+Grus/M
+Gruyère
+Gruyeres
+gryphon's
+g's
+G's
+gs/A
+GSA
+gt
+GU
+guacamole/MS
+Guadalajara/M
+Guadalcanal/M
+Guadalquivir/M
+Guadalupe/M
+Guadeloupe/M
+Guallatiri/M
+Gualterio/M
+Guamanian/SM
+Guam/M
+Guangzhou
+guanine/MS
+guano/MS
+Guantanamo/M
+Guarani/M
+guarani/SM
+guaranteeing
+guarantee/RSDZM
+guarantor/SM
+guaranty/MSDG
+guardedness/UM
+guarded/UYP
+guarder/M
+guardhouse/SM
+Guardia/M
+guardianship/MS
+guardian/SM
+guardrail/SM
+guard/RDSGZ
+guardroom/SM
+guardsman/M
+guardsmen
+Guarnieri/M
+Guatemala/M
+Guatemalan/S
+guava/SM
+Guayaquil/M
+gubernatorial
+Gucci/M
+gudgeon/M
+Guelph/M
+Guendolen/M
+Guenevere/M
+Guenna/M
+Guenther/M
+guernsey/S
+Guernsey/SM
+Guerra/M
+Guerrero/M
+guerrilla/MS
+guessable/U
+guess/BGZRSD
+guessed/U
+guesser/M
+guesstimate/DSMG
+guesswork/MS
+guest/SGMD
+Guevara/M
+guffaw/GSDM
+guff/SM
+Guggenheim/M
+Guglielma/M
+Guglielmo/M
+Guhleman/M
+GUI
+Guiana/M
+guidance/MS
+guidebook/SM
+guided/U
+guide/GZSRD
+guideline/SM
+guidepost/MS
+guider/M
+Guido/M
+Guilbert/M
+guilder/M
+guildhall/SM
+guild/SZMR
+guileful
+guilelessness/MS
+guileless/YP
+guile/SDGM
+Guillaume/M
+Guillema/M
+Guillemette/M
+guillemot/MS
+Guillermo/M
+guillotine/SDGM
+guiltily
+guiltiness/MS
+guiltlessness/M
+guiltless/YP
+guilt/SM
+guilty/PTR
+Gui/M
+Guinea/M
+Guinean/S
+guinea/SM
+Guinevere/M
+Guinna/M
+Guinness/M
+guise's
+guise/SDEG
+guitarist/SM
+guitar/SM
+Guiyang
+Guizot/M
+Gujarati/M
+Gujarat/M
+Gujranwala/M
+gulag/S
+gulch/MS
+gulden/MS
+gulf/DMGS
+Gullah/M
+gullet/MS
+gulley's
+gullibility/MS
+gullible
+Gulliver/M
+gull/MDSG
+gully/SDMG
+gulp/RDGZS
+gumboil/MS
+gumbo/MS
+gumboots
+gumdrop/SM
+gummed
+gumminess/M
+gumming/C
+gum/MS
+gummy/RTP
+gumption/SM
+gumshoeing
+gumshoe/SDM
+gumtree/MS
+Gunar/M
+gunboat/MS
+Gunderson/M
+gunfighter/M
+gunfight/SRMGZ
+gunfire/SM
+gunflint/M
+gunfought
+Gunilla/M
+gunk/SM
+gunky/RT
+Gun/M
+gunman/M
+gunmen
+gunmetal/MS
+gun/MS
+Gunnar/M
+gunned
+gunnel's
+Gunner/M
+gunner/SM
+gunnery/MS
+gunning/M
+gunnysack/SM
+gunny/SM
+gunpoint/MS
+gunpowder/SM
+gunrunner/MS
+gunrunning/MS
+gunship/S
+gunshot/SM
+gunslinger/M
+gunsling/GZR
+gunsmith/M
+gunsmiths
+Guntar/M
+Gunter/M
+Gunther/M
+gunwale/MS
+Guofeng/M
+guppy/SM
+Gupta/M
+gurgle/SDG
+Gurkha/M
+gurney/S
+guru/MS
+Gusella/M
+gusher/M
+gush/SRDGZ
+gushy/TR
+Gus/M
+Guss
+gusset/MDSG
+Gussie/M
+Gussi/M
+gussy/GSD
+Gussy/M
+Gustaf/M
+Gustafson/M
+Gusta/M
+gustatory
+Gustave/M
+Gustav/M
+Gustavo/M
+Gustavus/M
+gusted/E
+Gustie/M
+gustily
+Gusti/M
+gustiness/M
+gusting/E
+gust/MDGS
+gustoes
+gusto/M
+gusts/E
+Gusty/M
+gusty/RPT
+Gutenberg/M
+Guthrey/M
+Guthrie/M
+Guthry/M
+Gutierrez/M
+gutlessness/S
+gutless/P
+gutser/M
+gutsiness/M
+gut/SM
+guts/R
+gutsy/PTR
+gutted
+gutter/GSDM
+guttering/M
+guttersnipe/M
+gutting
+gutturalness/M
+guttural/SPY
+gutty/RSMT
+Guyana/M
+Guyanese
+Guy/M
+guy/MDRZGS
+Guzman/M
+guzzle/GZRSD
+guzzler/M
+g/VBX
+Gwalior/M
+Gwendolen/M
+Gwendoline/M
+Gwendolin/M
+Gwendolyn/M
+Gweneth/M
+Gwenette/M
+Gwen/M
+Gwenneth/M
+Gwennie/M
+Gwenni/M
+Gwenny/M
+Gwenora/M
+Gwenore/M
+Gwyneth/M
+Gwyn/M
+Gwynne/M
+gymkhana/SM
+gym/MS
+gymnasia's
+gymnasium/SM
+gymnastically
+gymnastic/S
+gymnastics/M
+gymnast/SM
+gymnosperm/SM
+gynecologic
+gynecological/MS
+gynecologist/SM
+gynecology/MS
+gypped
+gypper/S
+gypping
+gyp/S
+gypsite
+gypster/S
+gypsum/MS
+gypsy/SDMG
+Gypsy/SM
+gyrate/XNGSD
+gyration/M
+gyrator/MS
+gyrfalcon/SM
+gyrocompass/M
+gyro/MS
+gyroscope/SM
+gyroscopic
+gyve/GDS
+H
+Haag/M
+Haas/M
+Habakkuk/M
+habeas
+haberdasher/SM
+haberdashery/SM
+Haber/M
+Haberman/M
+Habib/M
+habiliment/SM
+habitability/MS
+habitableness/M
+habitable/P
+habitant/ISM
+habitation/MI
+habitations
+habitat/MS
+habit/IBDGS
+habit's
+habitualness/SM
+habitual/SYP
+habituate/SDNGX
+habituation/M
+habitué/MS
+hacienda/MS
+hacker/M
+Hackett/M
+hack/GZSDRBJ
+hackler/M
+hackle/RSDMG
+hackney/SMDG
+hacksaw/SDMG
+hackwork/S
+Hadamard/M
+Hadar/M
+Haddad/M
+haddock/MS
+hades
+Hades
+had/GD
+hadji's
+hadj's
+Hadlee/M
+Hadleigh/M
+Hadley/M
+Had/M
+hadn't
+Hadria/M
+Hadrian/M
+hadron/MS
+hadst
+haemoglobin's
+haemophilia's
+haemorrhage's
+Hafiz/M
+hafnium/MS
+haft/GSMD
+Hagan/M
+Hagar/M
+Hagen/M
+Hager/M
+Haggai/M
+haggardness/MS
+haggard/SYP
+hagged
+hagging
+haggish
+haggis/SM
+haggler/M
+haggle/RSDZG
+Hagiographa/M
+hagiographer/SM
+hagiography/MS
+hag/SMN
+Hagstrom/M
+Hague/M
+ha/H
+hahnium/S
+Hahn/M
+Haifa/M
+haiku/M
+Hailee/M
+hailer/M
+Hailey/M
+hail/SGMDR
+hailstone/SM
+hailstorm/SM
+Haily/M
+Haiphong/M
+hairball/SM
+hairbreadth/M
+hairbreadths
+hairbrush/SM
+haircare
+haircloth/M
+haircloths
+haircut/MS
+haircutting
+hairdo/SM
+hairdresser/SM
+hairdressing/SM
+hairdryer/S
+hairiness/MS
+hairlessness/M
+hairless/P
+hairlike
+hairline/SM
+hairnet/MS
+hairpiece/MS
+hairpin/MS
+hairsbreadth
+hairsbreadths
+hair/SDM
+hairsplitter/SM
+hairsplitting/MS
+hairspray
+hairspring/SM
+hairstyle/SMG
+hairstylist/S
+hairy/PTR
+Haitian/S
+Haiti/M
+hajjes
+hajji/MS
+hajj/M
+Hakeem/M
+hake/MS
+Hakim/M
+Hakka/M
+Hakluyt/M
+halalled
+halalling
+halal/S
+halberd/SM
+halcyon/S
+Haldane/M
+Haleakala/M
+Haleigh/M
+hale/ISRDG
+Hale/M
+haler/IM
+halest
+Halette/M
+Haley/M
+halfback/SM
+halfbreed
+halfheartedness/MS
+halfhearted/PY
+halfpence/S
+halfpenny/MS
+halfpennyworth
+half/PM
+halftime/S
+halftone/MS
+halfway
+halfword/MS
+halibut/SM
+halide/SM
+Halie/M
+Halifax/M
+Hali/M
+Halimeda/M
+halite/MS
+halitoses
+halitosis/M
+hallelujah
+hallelujahs
+Halley/M
+halliard's
+Hallie/M
+Halli/M
+Hallinan/M
+Hall/M
+Hallmark/M
+hallmark/SGMD
+hallo/GDS
+halloo's
+Halloween/MS
+hallowing
+hallows
+hallow/UD
+hall/SMR
+Hallsy/M
+hallucinate/VNGSDX
+hallucination/M
+hallucinatory
+hallucinogenic/S
+hallucinogen/SM
+hallway/SM
+Hally/M
+halocarbon
+halogenated
+halogen/SM
+halon
+halo/SDMG
+Halpern/M
+Halsey/M
+Hal/SMY
+Halsy/M
+halter/GDM
+halt/GZJSMDR
+halting/Y
+halve/GZDS
+halves/M
+halyard/MS
+Ha/M
+Hamal/M
+Haman/M
+hamburger/M
+Hamburg/MS
+hamburg/SZRM
+Hamel/M
+Hamey/M
+Hamhung/M
+Hamid/M
+Hamilcar/M
+Hamil/M
+Hamiltonian/MS
+Hamilton/M
+Hamish/M
+Hamitic/M
+Hamlen/M
+Hamlet/M
+hamlet/MS
+Hamlin/M
+Ham/M
+Hammad/M
+Hammarskjold/M
+hammed
+hammerer/M
+hammerhead/SM
+hammering/M
+hammerless
+hammerlock/MS
+Hammerstein/M
+hammertoe/SM
+hammer/ZGSRDM
+Hammett/M
+hamming
+hammock/MS
+Hammond/M
+Hammurabi/M
+hammy/RT
+Hamnet/M
+hampered/U
+hamper/GSD
+Hampshire/M
+Hampton/M
+ham/SM
+hamster/MS
+hamstring/MGS
+hamstrung
+Hamsun/M
+Hana/M
+Hanan/M
+Hancock/M
+handbagged
+handbagging
+handbag/MS
+handball/SM
+handbarrow/MS
+handbasin
+handbill/MS
+handbook/SM
+handbrake/M
+handcar/SM
+handcart/MS
+handclasp/MS
+handcraft/GMDS
+handcuff/GSD
+handcuffs/M
+handedness/M
+handed/PY
+Handel/M
+hander/S
+handful/SM
+handgun/SM
+handhold/M
+handicapped
+handicapper/SM
+handicapping
+handicap/SM
+handicraftsman/M
+handicraftsmen
+handicraft/SMR
+handily/U
+handiness/SM
+handiwork/MS
+handkerchief/MS
+handleable
+handlebar/SM
+handle/MZGRSD
+handler/M
+handless
+handling/M
+handmade
+handmaiden/M
+handmaid/NMSX
+handout/SM
+handover
+handpick/GDS
+handrail/SM
+hand's
+handsaw/SM
+handset/SM
+handshake/GMSR
+handshaker/M
+handshaking/M
+handsomely/U
+handsomeness/MS
+handsome/RPTY
+handspike/SM
+handspring/SM
+handstand/MS
+hand/UDSG
+handwork/SM
+handwoven
+handwrite/GSJ
+handwriting/M
+handwritten
+Handy/M
+handyman/M
+handymen
+handy/URT
+Haney/M
+hangar/SGDM
+hangdog/S
+hanged/A
+hanger/M
+hang/GDRZBSJ
+hanging/M
+hangman/M
+hangmen
+hangnail/MS
+hangout/MS
+hangover/SM
+hangs/A
+Hangul/M
+hangup/S
+Hangzhou
+Hankel/M
+hankerer/M
+hanker/GRDJ
+hankering/M
+hank/GZDRMS
+hankie/SM
+Hank/M
+hanky's
+Hannah/M
+Hanna/M
+Hannibal/M
+Hannie/M
+Hanni/MS
+Hanny/M
+Hanoi/M
+Hanoverian
+Hanover/M
+Hansel/M
+Hansen/M
+Hansiain/M
+Han/SM
+Hans/N
+hansom/MS
+Hanson/M
+Hanuka/S
+Hanukkah/M
+Hanukkahs
+Hapgood/M
+haphazardness/SM
+haphazard/SPY
+haplessness/MS
+hapless/YP
+haploid/S
+happed
+happening/M
+happen/JDGS
+happenstance/SM
+happily/U
+happiness/UMS
+happing
+Happy/M
+happy/UTPR
+Hapsburg/M
+hap/SMY
+Harald/M
+harangue/GDRS
+haranguer/M
+Harare
+harasser/M
+harass/LSRDZG
+harassment/SM
+Harbert/M
+harbinger/DMSG
+Harbin/M
+harborer/M
+harbor/ZGRDMS
+Harcourt/M
+hardback/SM
+hardball/SM
+hardboard/SM
+hardboiled
+hardbound
+hardcore/MS
+hardcover/SM
+hardened/U
+hardener/M
+hardening/M
+harden/ZGRD
+hardhat/S
+hardheadedness/SM
+hardheaded/YP
+hardheartedness/SM
+hardhearted/YP
+hardihood/MS
+hardily
+hardiness/SM
+Harding/M
+Hardin/M
+hardliner/S
+hardness/MS
+hardscrabble
+hardshell
+hardship/MS
+hardstand/S
+hardtack/MS
+hardtop/MS
+hardware/SM
+hardwire/DSG
+hardwood/MS
+hardworking
+Hardy/M
+hard/YNRPJGXTS
+hardy/PTRS
+harebell/MS
+harebrained
+harelip/MS
+harelipped
+hare/MGDS
+harem/SM
+Hargreaves/M
+hark/GDS
+Harland/M
+Harlan/M
+Harlem/M
+Harlene/M
+Harlen/M
+Harlequin
+harlequin/MS
+Harley/M
+Harlie/M
+Harli/M
+Harlin/M
+harlotry/MS
+harlot/SM
+Harlow/M
+Harman/M
+harmed/U
+harmer/M
+harmfulness/MS
+harmful/PY
+harmlessness/SM
+harmless/YP
+harm/MDRGS
+Harmonia/M
+harmonically
+harmonica/MS
+harmonic/S
+harmonics/M
+Harmonie/M
+harmonious/IPY
+harmoniousness/MS
+harmoniousness's/I
+harmonium/MS
+harmonization/A
+harmonizations
+harmonization's
+harmonized/U
+harmonizer/M
+harmonizes/UA
+harmonize/ZGSRD
+Harmon/M
+harmony/EMS
+Harmony/M
+harness/DRSMG
+harnessed/U
+harnesser/M
+harnesses/U
+Harold/M
+Haroun/M
+harper/M
+Harper/M
+harping/M
+harpist/SM
+harp/MDRJGZS
+Harp/MR
+harpooner/M
+harpoon/SZGDRM
+harpsichordist/MS
+harpsichord/SM
+harpy/SM
+Harpy/SM
+Harrell/M
+harridan/SM
+Harrie/M
+harrier/M
+Harriet/M
+Harrietta/M
+Harriette/M
+Harriett/M
+Harrington/M
+Harriot/M
+Harriott/M
+Harrisburg/M
+Harri/SM
+Harrisonburg/M
+Harrison/M
+harrower/M
+harrow/RDMGS
+harrumph/SDG
+Harry/M
+harry/RSDGZ
+harshen/GD
+harshness/SM
+harsh/TRNYP
+Harte/M
+Hartford/M
+Hartley/M
+Hartline/M
+Hart/M
+Hartman/M
+hart/MS
+Hartwell/M
+Harvard/M
+harvested/U
+harvester/M
+harvestman/M
+harvest/MDRZGS
+Harvey/MS
+Harv/M
+Harwell/M
+Harwilll/M
+has
+Hasbro/M
+hash/AGSD
+Hasheem/M
+hasher/M
+Hashim/M
+hashing/M
+hashish/MS
+hash's
+Hasidim
+Haskell/M
+Haskel/M
+Haskins/M
+Haslett/M
+hasn't
+hasp/GMDS
+hassle/MGRSD
+hassock/MS
+haste/MS
+hastener/M
+hasten/GRD
+hast/GXJDN
+Hastie/M
+hastily
+hastiness/MS
+Hastings/M
+Hasty/M
+hasty/RPT
+hatchback/SM
+hatcheck/S
+hatched/U
+hatcher/M
+hatchery/MS
+hatchet/MDSG
+hatching/M
+hatch/RSDJG
+Hatchure/M
+hatchway/MS
+hatefulness/MS
+hateful/YP
+hater/M
+hate/S
+Hatfield/M
+Hathaway/M
+hatless
+hat/MDRSZG
+hatred/SM
+hatstands
+hatted
+Hatteras/M
+hatter/SM
+Hattie/M
+Hatti/M
+hatting
+Hatty/M
+hauberk/SM
+Haugen/M
+haughtily
+haughtiness/SM
+haughty/TPR
+haulage/MS
+hauler/M
+haul/SDRGZ
+haunch/GMSD
+haunter/M
+haunting/Y
+haunt/JRDSZG
+Hauptmann/M
+Hausa/M
+Hausdorff/M
+Hauser/M
+hauteur/MS
+Havana/SM
+Havarti
+Havel/M
+haven/DMGS
+Haven/M
+haven't
+haver/G
+haversack/SM
+have/ZGSR
+havocked
+havocking
+havoc/SM
+Haw
+Hawaiian/S
+Hawaii/M
+hawker/M
+hawk/GZSDRM
+Hawking
+hawking/M
+Hawkins/M
+hawkishness/S
+hawkish/P
+Hawley/M
+haw/MDSG
+hawser/M
+haws/RZ
+Hawthorne/M
+hawthorn/MS
+haycock/SM
+Hayden/M
+Haydn/M
+Haydon/M
+Hayes
+hayfield/MS
+hay/GSMDR
+Hayley/M
+hayloft/MS
+haymow/MS
+Haynes
+hayrick/MS
+hayride/MS
+hayseed/MS
+Hay/SM
+haystack/SM
+haywain
+Hayward/M
+haywire/MS
+Haywood/M
+Hayyim/M
+hazard/MDGS
+hazardousness/M
+hazardous/PY
+haze/DSRJMZG
+Hazel/M
+hazel/MS
+hazelnut/SM
+Haze/M
+hazer/M
+hazily
+haziness/MS
+hazing/M
+Hazlett/M
+Hazlitt/M
+hazy/PTR
+HBO/M
+hdqrs
+HDTV
+headache/MS
+headband/SM
+headboard/MS
+headcount
+headdress/MS
+header/M
+headfirst
+headgear/SM
+headhunter/M
+headhunting/M
+headhunt/ZGSRDMJ
+headily
+headiness/S
+heading/M
+headlamp/S
+headland/MS
+headlessness/M
+headless/P
+headlight/MS
+headline/DRSZMG
+headliner/M
+headlock/MS
+headlong
+Head/M
+headman/M
+headmaster/MS
+headmastership/M
+headmen
+headmistress/MS
+headphone/SM
+headpiece/SM
+headpin/MS
+headquarter/GDS
+headrest/MS
+headroom/SM
+headscarf/M
+headset/SM
+headship/SM
+headshrinker/MS
+head/SJGZMDR
+headsman/M
+headsmen
+headstall/SM
+headstand/MS
+headstock/M
+headstone/MS
+headstrong
+headwaiter/SM
+headwall/S
+headwater/S
+headway/MS
+headwind/SM
+headword/MS
+heady/PTR
+heal/DRHSGZ
+healed/U
+healer/M
+Heall/M
+healthfully
+healthfulness/SM
+healthful/U
+healthily/U
+healthiness/MSU
+health/M
+healths
+healthy/URPT
+heap/SMDG
+heard/UA
+hearer/M
+hearing/AM
+hearken/SGD
+hearsay/SM
+hearse/M
+hears/SDAG
+Hearst/M
+heartache/SM
+heartbeat/MS
+heartbreak/GMS
+heartbreaking/Y
+heartbroke
+heartbroken
+heartburning/M
+heartburn/SGM
+hearted/Y
+hearten/EGDS
+heartening/EY
+heartfelt
+hearth/M
+hearthrug
+hearths
+hearthstone/MS
+heartily
+heartiness/SM
+heartland/SM
+heartlessness/SM
+heartless/YP
+heartrending/Y
+heartsickness/MS
+heartsick/P
+heart/SMDNXG
+heartstrings
+heartthrob/MS
+heartwarming
+Heartwood/M
+heartwood/SM
+hearty/TRSP
+hear/ZTSRHJG
+heatedly
+heated/UA
+heater/M
+heathendom/SM
+heathenish/Y
+heathenism/MS
+heathen/M
+heather/M
+Heather/M
+heathery
+Heathkit/M
+heathland
+Heathman/M
+Heath/MR
+heath/MRNZX
+heaths
+heatproof
+heats/A
+heat/SMDRGZBJ
+heatstroke/MS
+heatwave
+heave/DSRGZ
+heavenliness/M
+heavenly/PTR
+heaven/SYM
+heavenward/S
+heaver/M
+heaves/M
+heavily
+heaviness/MS
+Heaviside/M
+heavyhearted
+heavyset
+heavy/TPRS
+heavyweight/SM
+Hebe/M
+hebephrenic
+Hebert/M
+Heb/M
+Hebraic
+Hebraism/MS
+Hebrew/SM
+Hebrides/M
+Hecate/M
+hecatomb/M
+heckler/M
+heckle/RSDZG
+heck/S
+hectare/MS
+hectically
+hectic/S
+hectogram/MS
+hectometer/SM
+Hector/M
+hector/SGD
+Hecuba/M
+he'd
+Heda/M
+Hedda/M
+Heddie/M
+Heddi/M
+hedge/DSRGMZ
+hedgehog/MS
+hedgehopped
+hedgehopping
+hedgehop/S
+hedger/M
+hedgerow/SM
+hedging/Y
+Hedi/M
+hedonism/SM
+hedonistic
+hedonist/MS
+Hedvige/M
+Hedvig/M
+Hedwiga/M
+Hedwig/M
+Hedy/M
+heeded/U
+heedfulness/M
+heedful/PY
+heeding/U
+heedlessness/SM
+heedless/YP
+heed/SMGD
+heehaw/DGS
+heeler/M
+heeling/M
+heelless
+heel/SGZMDR
+Heep/M
+Hefner/M
+heft/GSD
+heftily
+heftiness/SM
+hefty/TRP
+Hegelian
+Hegel/M
+hegemonic
+hegemony/MS
+Hegira/M
+hegira/S
+Heida/M
+Heidegger/M
+Heidelberg/M
+Heidie/M
+Heidi/M
+heifer/MS
+Heifetz/M
+heighten/GD
+height/SMNX
+Heimlich/M
+Heindrick/M
+Heineken/M
+Heine/M
+Heinlein/M
+heinousness/SM
+heinous/PY
+Heinrich/M
+Heinrick/M
+Heinrik/M
+Heinze/M
+Heinz/M
+heiress/MS
+heirloom/MS
+heir/SDMG
+Heisenberg/M
+Heiser/M
+heister/M
+heist/GSMRD
+Hejira's
+Helaina/M
+Helaine/M
+held
+Helena/M
+Helene/M
+Helenka/M
+Helen/M
+Helga/M
+Helge/M
+helical/Y
+helices/M
+helicon/M
+Helicon/M
+helicopter/GSMD
+heliocentric
+heliography/M
+Heliopolis/M
+Helios/M
+heliosphere
+heliotrope/SM
+heliport/MS
+helium/MS
+helix/M
+he'll
+hellbender/M
+hellbent
+hellcat/SM
+hellebore/SM
+Hellene/SM
+Hellenic
+Hellenism/MS
+Hellenistic
+Hellenist/MS
+Hellenization/M
+Hellenize
+heller/M
+Heller/M
+Hellespont/M
+hellfire/M
+hell/GSMDR
+hellhole/SM
+Helli/M
+hellion/SM
+hellishness/SM
+hellish/PY
+Hellman/M
+hello/GMS
+Hell's
+helluva
+helmed
+helmet/GSMD
+Helmholtz/M
+helming
+helms
+helm's
+helmsman/M
+helmsmen
+helm/U
+Helmut/M
+Héloise/M
+helot/S
+helper/M
+helpfulness/MS
+helpful/UY
+help/GZSJDR
+helping/M
+helplessness/SM
+helpless/YP
+helpline/S
+helpmate/SM
+helpmeet's
+Helsa/M
+Helsinki/M
+helve/GMDS
+Helvetian/S
+Helvetius/M
+Helyn/M
+He/M
+hematite/MS
+hematologic
+hematological
+hematologist/SM
+hematology/MS
+heme/MS
+Hemingway/M
+hemisphere/MSD
+hemispheric
+hemispherical
+hemline/SM
+hemlock/MS
+hemmed
+hemmer/SM
+hemming
+hem/MS
+hemoglobin/MS
+hemolytic
+hemophiliac/SM
+hemophilia/SM
+hemorrhage/GMDS
+hemorrhagic
+hemorrhoid/MS
+hemostat/SM
+hemp/MNS
+h/EMS
+hemstitch/DSMG
+henceforth
+henceforward
+hence/S
+Hench/M
+henchman/M
+henchmen
+Henderson/M
+Hendrick/SM
+Hendrickson/M
+Hendrika/M
+Hendrik/M
+Hendrix/M
+henge/M
+Henka/M
+Henley/M
+hen/MS
+henna/MDSG
+Hennessey/M
+henning
+henpeck/GSD
+Henrie/M
+Henrieta/M
+Henrietta/M
+Henriette/M
+Henrik/M
+Henri/M
+Henryetta/M
+henry/M
+Henry/M
+Hensley/M
+Henson/M
+heparin/MS
+hepatic/S
+hepatitides
+hepatitis/M
+Hepburn/M
+Hephaestus/M
+Hephzibah/M
+hepper
+heppest
+Hepplewhite
+hep/S
+heptagonal
+heptagon/SM
+heptane/M
+heptathlon/S
+her
+Heracles/M
+Heraclitus/M
+heralded/U
+heraldic
+herald/MDSG
+heraldry/MS
+Hera/M
+herbaceous
+herbage/MS
+herbalism
+herbalist/MS
+herbal/S
+Herbart/M
+Herbert/M
+herbicidal
+herbicide/MS
+Herbie/M
+herbivore/SM
+herbivorous/Y
+Herb/M
+herb/MS
+Herby/M
+Herc/M
+Herculaneum/M
+herculean
+Herculean
+Hercule/MS
+Herculie/M
+herder/M
+Herder/M
+herd/MDRGZS
+herdsman/M
+herdsmen
+hereabout/S
+hereafter/S
+hereby
+hereditary
+heredity/MS
+Hereford/SM
+herein
+hereinafter
+here/IS
+hereof
+hereon
+here's
+heres/M
+heresy/SM
+heretical
+heretic/SM
+hereto
+heretofore
+hereunder
+hereunto
+hereupon
+herewith
+Heriberto/M
+heritable
+heritage/MS
+heritor/IM
+Herkimer/M
+Herman/M
+Hermann/M
+hermaphrodite/SM
+hermaphroditic
+Hermaphroditus/M
+hermeneutic/S
+hermeneutics/M
+Hermes
+hermetical/Y
+hermetic/S
+Hermia/M
+Hermie/M
+Hermina/M
+Hermine/M
+Herminia/M
+Hermione/M
+hermitage/SM
+Hermite/M
+hermitian
+hermit/MS
+Hermon/M
+Hermosa/M
+Hermosillo/M
+Hermy/M
+Hernandez/M
+Hernando/M
+hernial
+hernia/MS
+herniate/NGXDS
+Herod/M
+Herodotus/M
+heroes
+heroically
+heroics
+heroic/U
+heroine/SM
+heroin/MS
+heroism/SM
+Herold/M
+hero/M
+heron/SM
+herpes/M
+herpetologist/SM
+herpetology/MS
+Herrera/M
+Herrick/M
+herringbone/SDGM
+Herring/M
+herring/SM
+Herrington/M
+Herr/MG
+Herschel/M
+Hersch/M
+herself
+Hersey/M
+Hershel/M
+Hershey/M
+Hersh/M
+Herta/M
+Hertha/M
+hertz/M
+Hertz/M
+Hertzog/M
+Hertzsprung/M
+Herve/M
+Hervey/M
+Herzegovina/M
+Herzl/M
+hes
+Hesiod/M
+hesitance/S
+hesitancy/SM
+hesitantly
+hesitant/U
+hesitater/M
+hesitate/XDRSNG
+hesitating/UY
+hesitation/M
+Hesperus/M
+Hesse/M
+Hessian/MS
+Hess/M
+Hester/M
+Hesther/M
+Hestia/M
+Heston/M
+heterodox
+heterodoxy/MS
+heterodyne
+heterogamous
+heterogamy/M
+heterogeneity/SM
+heterogeneousness/M
+heterogeneous/PY
+heterosexuality/SM
+heterosexual/YMS
+heterostructure
+heterozygous
+Hettie/M
+Hetti/M
+Hetty/M
+Heublein/M
+heuristically
+heuristic/SM
+Heusen/M
+Heuser/M
+he/VMZ
+hew/DRZGS
+Hewe/M
+hewer/M
+Hewet/M
+Hewett/M
+Hewie/M
+Hewitt/M
+Hewlett/M
+Hew/M
+hexachloride/M
+hexadecimal/YS
+hexafluoride/M
+hexagonal/Y
+hexagon/SM
+hexagram/SM
+hexameter/SM
+hex/DSRG
+hexer/M
+hey
+heyday/MS
+Heyerdahl/M
+Heywood/M
+Hezekiah/M
+hf
+HF
+Hf/M
+Hg/M
+hgt
+hgwy
+HHS
+HI
+Hialeah/M
+hiatus/SM
+Hiawatha/M
+hibachi/MS
+hibernate/XGNSD
+hibernation/M
+hibernator/SM
+Hibernia/M
+Hibernian/S
+hibiscus/MS
+hiccup/MDGS
+hickey/SM
+Hickey/SM
+Hickman/M
+Hickok/M
+hickory/MS
+hick/SM
+Hicks/M
+hi/D
+hidden/U
+hideaway/SM
+hidebound
+hideousness/SM
+hideous/YP
+hideout/MS
+hider/M
+hide/S
+hiding/M
+hid/ZDRGJ
+hieing
+hierarchal
+hierarchic
+hierarchical/Y
+hierarchy/SM
+hieratic
+hieroglyph
+hieroglyphic/S
+hieroglyphics/M
+hieroglyphs
+Hieronymus/M
+hie/S
+hifalutin
+Higashiosaka
+Higgins/M
+highball/GSDM
+highborn
+highboy/MS
+highbrow/SM
+highchair/SM
+highfalutin
+Highfield/M
+highhandedness/SM
+highhanded/PY
+highish
+Highlander/SM
+Highlands
+highland/ZSRM
+highlight/GZRDMS
+Highness/M
+highness/MS
+highpoint
+high/PYRT
+highroad/MS
+highs
+hight
+hightail/DGS
+highwayman/M
+highwaymen
+highway/MS
+hijacker/M
+hijack/JZRDGS
+hiker/M
+hike/ZGDSR
+Hilario/M
+hilariousness/MS
+hilarious/YP
+hilarity/MS
+Hilarius/M
+Hilary/M
+Hilbert/M
+Hildagarde/M
+Hildagard/M
+Hilda/M
+Hildebrand/M
+Hildegaard/M
+Hildegarde/M
+Hilde/M
+Hildy/M
+Hillard/M
+Hillary/M
+hillbilly/MS
+Hillcrest/M
+Hillel/M
+hiller/M
+Hillery/M
+hill/GSMDR
+Hilliard/M
+Hilliary/M
+Hillie/M
+Hillier/M
+hilliness/SM
+Hill/M
+hillman
+hillmen
+hillock/SM
+Hillsboro/M
+Hillsdale/M
+hillside/SM
+hilltop/MS
+hillwalking
+Hillyer/M
+Hilly/RM
+hilly/TRP
+hilt/MDGS
+Hilton/M
+Hi/M
+Himalaya/MS
+Himalayan/S
+Himmler/M
+him/S
+himself
+Hinayana/M
+Hinda/M
+Hindemith/M
+Hindenburg/M
+hindered/U
+hinderer/M
+hinder/GRD
+Hindi/M
+hindmost
+hindquarter/SM
+hindrance/SM
+hind/RSZ
+hindsight/SM
+Hinduism/SM
+Hindu/MS
+Hindustani/MS
+Hindustan/M
+Hines/M
+hinger
+hinge's
+hinge/UDSG
+Hinkle/M
+Hinsdale/M
+hinterland/MS
+hinter/M
+hint/GZMDRS
+Hinton/M
+Hinze/M
+hipbone/SM
+hipness/S
+Hipparchus/M
+hipped
+hipper
+hippest
+hippie/MTRS
+hipping/M
+Hippocrates/M
+Hippocratic
+hippodrome/MS
+hippo/MS
+hippopotamus/SM
+hip/PSM
+hippy's
+hipster/MS
+hiragana
+Hiram/M
+hire/AGSD
+hireling/SM
+hirer/SM
+Hirey/M
+hiring/S
+Hirohito/M
+Hiroshi/M
+Hiroshima/M
+Hirsch/M
+hirsuteness/MS
+hirsute/P
+his
+Hispanic/SM
+Hispaniola/M
+hiss/DSRMJG
+hisser/M
+hissing/M
+Hiss/M
+histamine/SM
+histidine/SM
+histochemic
+histochemical
+histochemistry/M
+histogram/MS
+histological
+histologist/MS
+histology/SM
+historian/MS
+historic
+historicalness/M
+historical/PY
+historicism/M
+historicist/M
+historicity/MS
+historiographer/SM
+historiography/MS
+history/MS
+histrionically
+histrionic/S
+histrionics/M
+hist/SDG
+Hitachi/M
+Hitchcock/M
+hitcher/MS
+hitchhike/RSDGZ
+hitch/UGSD
+hither
+hitherto
+Hitler/SM
+hitless
+hit/MS
+hittable
+hitter/SM
+hitting
+Hittite/SM
+HIV
+hive/MGDS
+h'm
+HM
+HMO
+Hmong
+HMS
+hoarder/M
+hoarding/M
+hoard/RDJZSGM
+hoarfrost/SM
+hoariness/MS
+hoar/M
+hoarseness/SM
+hoarse/RTYP
+hoary/TPR
+hoaxer/M
+hoax/GZMDSR
+Hobard/M
+Hobart/M
+hobbed
+Hobbes/M
+hobbing
+hobbit
+hobbler/M
+hobble/ZSRDG
+Hobbs/M
+hobbyhorse/SM
+hobbyist/SM
+hobby/SM
+Hobday/M
+Hobey/M
+hobgoblin/MS
+Hobie/M
+hobnail/GDMS
+hobnobbed
+hobnobbing
+hobnob/S
+Hoboken/M
+hobo/SDMG
+hob/SM
+hoc
+hocker/M
+hockey/SM
+hock/GDRMS
+Hockney/M
+hockshop/SM
+hodge/MS
+Hodge/MS
+hodgepodge/SM
+Hodgkin/M
+ho/DRYZ
+hod/SM
+Hoebart/M
+hoecake/SM
+hoedown/MS
+hoeing
+hoer/M
+hoe/SM
+Hoffa/M
+Hoff/M
+Hoffman/M
+Hofstadter/M
+Hogan/M
+hogan/SM
+Hogarth/M
+hogback/MS
+hogged
+hogger
+hogging
+hoggish/Y
+hogshead/SM
+hog/SM
+hogtie/SD
+hogtying
+hogwash/SM
+Hohenlohe/M
+Hohenstaufen/M
+Hohenzollern/M
+Hohhot/M
+hoister/M
+hoist/GRDS
+hoke/DSG
+hokey/PRT
+hokier
+hokiest
+Hokkaido/M
+hokum/MS
+Hokusai/M
+Holbein/M
+Holbrook/M
+Holcomb/M
+holdall/MS
+Holden/M
+holder/M
+Holder/M
+holding/IS
+holding's
+hold/NRBSJGZ
+holdout/SM
+holdover/SM
+holdup/MS
+hole/MGDS
+holey
+holiday/GRDMS
+Holiday/M
+holidaymaker/S
+holier/U
+Holiness/MS
+holiness/MSU
+holistic
+holistically
+hollandaise
+Hollandaise/M
+Hollander/M
+Holland/RMSZ
+holler/GDS
+Hollerith/M
+Holley/M
+Hollie/M
+Holli/SM
+Hollister/M
+Holloway/M
+hollowness/MS
+hollow/RDYTGSP
+hollowware/M
+Hollyanne/M
+hollyhock/MS
+Holly/M
+holly/SM
+Hollywood/M
+Holman/M
+Holmes
+holmium/MS
+Holm/M
+Holocaust
+holocaust/MS
+Holocene
+hologram/SM
+holograph/GMD
+holographic
+holographs
+holography/MS
+Holstein/MS
+holster/MDSG
+Holst/M
+Holt/M
+Holyoke/M
+holy/SRTP
+holystone/MS
+Holzman/M
+Ho/M
+homage/MGSRD
+homager/M
+hombre/SM
+homburg/SM
+homebody/MS
+homebound
+homeboy/S
+homebuilder/S
+homebuilding
+homebuilt
+homecoming/MS
+home/DSRMYZG
+homegrown
+homeland/SM
+homelessness/SM
+homeless/P
+homelike
+homeliness/SM
+homely/RPT
+homemade
+homemake/JRZG
+homemaker/M
+homemaking/M
+homeomorphic
+homeomorphism/MS
+homeomorph/M
+homeopath
+homeopathic
+homeopaths
+homeopathy/MS
+homeostases
+homeostasis/M
+homeostatic
+homeowner/S
+homeownership
+homepage
+Homere/M
+homer/GDM
+Homeric
+homerists
+Homer/M
+homeroom/MS
+Homerus/M
+homeschooling/S
+homesickness/MS
+homesick/P
+homespun/S
+homesteader/M
+homestead/GZSRDM
+homestretch/SM
+hometown/SM
+homeward
+homeworker/M
+homework/ZSMR
+homeyness/MS
+homey/PS
+homicidal/Y
+homicide/SM
+homier
+homiest
+homiletic/S
+homily/SM
+hominess's
+homing/M
+hominid/MS
+hominy/SM
+Hom/MR
+homogamy/M
+homogenate/MS
+homogeneity/ISM
+homogeneous/PY
+homogenization/MS
+homogenize/DRSGZ
+homogenizer/M
+homograph/M
+homographs
+homological
+homologous
+homologue/M
+homology/MS
+homomorphic
+homomorphism/SM
+homonym/SM
+homophobia/S
+homophobic
+homophone/MS
+homopolymers
+homosexuality/SM
+homosexual/YMS
+homo/SM
+homotopy
+homozygous/Y
+honcho/DSG
+Honda/M
+Hondo/M
+Honduran/S
+Honduras/M
+Honecker/M
+hone/SM
+honestly/E
+honest/RYT
+honesty/ESM
+honeybee/SM
+honeycomb/SDMG
+honeydew/SM
+honey/GSMD
+honeylocust
+Honey/M
+honeymooner/M
+honeymoon/RDMGZS
+honeysuckle/MS
+Honeywell/M
+hong/M
+Honiara/M
+honker/M
+honk/GZSDRM
+honky/SM
+Hon/M
+hon/MDRSZTG
+Honolulu/M
+honorableness/SM
+honorable/PSM
+honorables/U
+honorablies/U
+honorably/UE
+honorarily
+honorarium/SM
+honorary/S
+honored/U
+honoree/S
+honor/ERDBZGS
+honorer/EM
+Honoria/M
+honorific/S
+Honor/M
+honor's
+honors/A
+Honshu/M
+hooch/MS
+hoodedness/M
+hooded/P
+hoodlum/SM
+Hood/M
+hood/MDSG
+hoodoo/DMGS
+hoodwinker/M
+hoodwink/SRDG
+hooey/SM
+hoof/DRMSG
+hoofer/M
+hoofmark/S
+hookah/M
+hookahs
+hookedness/M
+hooked/P
+Hooke/MR
+hooker/M
+Hooker/M
+hookey's
+hook/GZDRMS
+hooks/U
+hookup/SM
+hookworm/MS
+hooky/SRMT
+hooliganism/SM
+hooligan/SM
+hooper/M
+Hooper/M
+hoopla/SM
+hoop/MDRSG
+hooray/SMDG
+hoosegow/MS
+Hoosier/SM
+hootch's
+hootenanny/SM
+hooter/M
+hoot/MDRSGZ
+Hoover/MS
+hooves/M
+hoped/U
+hopefulness/MS
+hopeful/SPY
+hopelessness/SM
+hopeless/YP
+Hope/M
+hoper/M
+hope/SM
+Hopewell/M
+Hopi/SM
+Hopkinsian/M
+Hopkins/M
+hopped
+Hopper/M
+hopper/MS
+hopping/M
+hoppled
+hopples
+hopscotch/MDSG
+hop/SMDRG
+Horace/M
+Horacio/M
+Horatia/M
+Horatio/M
+Horatius/M
+horde/DSGM
+horehound/MS
+horizon/MS
+horizontal/YS
+Hormel/M
+hormonal/Y
+hormone/MS
+Hormuz/M
+hornbeam/M
+hornblende/MS
+Hornblower/M
+hornedness/M
+horned/P
+Horne/M
+hornet/MS
+horn/GDRMS
+horniness/M
+hornless
+hornlike
+Horn/M
+hornpipe/MS
+horny/TRP
+horologic
+horological
+horologist/MS
+horology/MS
+horoscope/MS
+Horowitz/M
+horrendous/Y
+horribleness/SM
+horrible/SP
+horribly
+horridness/M
+horrid/PY
+horrific
+horrifically
+horrify/DSG
+horrifying/Y
+horror/MS
+hors/DSGX
+horseback/MS
+horsedom
+horseflesh/M
+horsefly/MS
+horsehair/SM
+horsehide/SM
+horselaugh/M
+horselaughs
+horseless
+horselike
+horsely
+horseman/M
+horsemanship/MS
+horsemen
+horseplayer/M
+horseplay/SMR
+horsepower/SM
+horseradish/SM
+horse's
+horseshoeing
+horseshoe/MRSD
+horseshoer/M
+horsetail/SM
+horse/UGDS
+horsewhipped
+horsewhipping
+horsewhip/SM
+horsewoman/M
+horsewomen
+horsey
+horsier
+horsiest
+horsing/M
+Horst/M
+hortatory
+Horten/M
+Hortense/M
+Hortensia/M
+horticultural
+horticulture/SM
+horticulturist/SM
+Hort/MN
+Horton/M
+Horus/M
+hosanna/SDG
+Hosea/M
+hose/M
+hosepipe
+hos/GDS
+hosier/MS
+hosiery/SM
+hosp
+hospice/MS
+hospitable/I
+hospitably/I
+hospitality/MS
+hospitality's/I
+hospitalization/MS
+hospitalize/GSD
+hospital/MS
+hostage/MS
+hosteler/M
+hostelry/MS
+hostel/SZGMRD
+hostess/MDSG
+hostile/YS
+hostility/SM
+hostler/MS
+Host/MS
+host/MYDGS
+hotbed/MS
+hotblooded
+hotbox/MS
+hotcake/S
+hotchpotch/M
+hotelier/MS
+hotelman/M
+hotel/MS
+hotfoot/DGS
+hothead/DMS
+hotheadedness/SM
+hotheaded/PY
+hothouse/MGDS
+hotness/MS
+hotplate/SM
+hotpot/M
+hot/PSY
+hotrod
+hotshot/S
+hotted
+Hottentot/SM
+hotter
+hottest
+hotting
+Houdaille/M
+Houdini/M
+hough/M
+hounder/M
+hounding/M
+hound/MRDSG
+hourglass/MS
+houri/MS
+hourly/S
+hour/YMS
+house/ASDG
+houseboat/SM
+housebound
+houseboy/SM
+housebreaker/M
+housebreaking/M
+housebreak/JSRZG
+housebroke
+housebroken
+housebuilding
+housecleaning/M
+houseclean/JDSG
+housecoat/MS
+housefly/MS
+houseful/SM
+householder/M
+household/ZRMS
+househusband/S
+housekeeper/M
+housekeeping/M
+housekeep/JRGZ
+houselights
+House/M
+housemaid/MS
+houseman/M
+housemen
+housemother/MS
+housemoving
+houseparent/SM
+houseplant/S
+houser
+house's
+housetop/MS
+housewares
+housewarming/MS
+housewifeliness/M
+housewifely/P
+housewife/YM
+housewives
+houseworker/M
+housework/ZSMR
+housing/MS
+Housman/M
+Houston/M
+Houyhnhnm/M
+HOV
+hovel/GSMD
+hovercraft/M
+hoverer/M
+hover/GRD
+hove/ZR
+Howard/M
+howbeit
+howdah/M
+howdahs
+howdy/GSD
+Howell/MS
+Howe/M
+however
+Howey/M
+Howie/M
+howitzer/MS
+howler/M
+howl/GZSMDR
+Howrah/M
+how/SM
+howsoever
+hoyden/DMGS
+hoydenish
+Hoyle/SM
+hoy/M
+Hoyt/M
+hp
+HP
+HQ
+hr
+HR
+HRH
+Hrothgar/M
+hrs
+h's
+H's
+HS
+HST
+ht
+HTML
+Hts/M
+HTTP
+Huang/M
+huarache/SM
+hubba
+Hubbard/M
+Hubble/M
+hubbub/SM
+hubby/SM
+hubcap/SM
+Huber/M
+Hube/RM
+Hubert/M
+Huberto/M
+Hubey/M
+Hubie/M
+hub/MS
+hubris/SM
+huckleberry/SM
+Huck/M
+huckster/SGMD
+HUD
+Huddersfield/M
+huddler/M
+huddle/RSDMG
+Hudson/M
+hue/MDS
+Huerta/M
+Huey/M
+huffily
+huffiness/SM
+Huff/M
+Huffman/M
+huff/SGDM
+huffy/TRP
+hugeness/MS
+huge/YP
+hugged
+hugger
+hugging/S
+Huggins
+Hughie/M
+Hugh/MS
+Hugibert/M
+Hugo/M
+hug/RTS
+Huguenot/SM
+Hugues/M
+huh
+huhs
+Hui/M
+Huitzilopitchli/M
+hula/MDSG
+Hulda/M
+hulk/GDMS
+hullabaloo/SM
+huller/M
+hulling/M
+Hull/M
+hull/MDRGZS
+hullo/GSDM
+humane/IY
+humaneness/SM
+humaner
+humanest
+human/IPY
+humanism/SM
+humanistic
+humanist/SM
+humanitarianism/SM
+humanitarian/S
+humanity/ISM
+humanization/CSM
+humanized/C
+humanizer/M
+humanize/RSDZG
+humanizes/IAC
+humanizing/C
+humankind/M
+humannesses
+humanness/IM
+humanoid/S
+humans
+Humbert/M
+Humberto/M
+humbleness/SM
+humble/TZGPRSDJ
+humbly
+Humboldt/M
+humbugged
+humbugging
+humbug/MS
+humdinger/MS
+humdrum/S
+Hume/M
+humeral/S
+humeri
+humerus/M
+Humfrey/M
+Humfrid/M
+Humfried/M
+humidification/MC
+humidifier/CM
+humidify/RSDCXGNZ
+humidistat/M
+humidity/MS
+humidor/MS
+humid/Y
+humiliate/SDXNG
+humiliating/Y
+humiliation/M
+humility/MS
+hummed
+Hummel/M
+hummer/SM
+humming
+hummingbird/SM
+hummock/MDSG
+hummocky
+hummus/S
+humongous
+humored/U
+humorist/MS
+humorlessness/MS
+humorless/PY
+humorousness/MS
+humorous/YP
+humor/RDMZGS
+humpback/SMD
+hump/GSMD
+humph/DG
+Humphrey/SM
+humphs
+Humpty/M
+hum/S
+humus/SM
+Humvee
+hunchback/DSM
+hunch/GMSD
+hundredfold/S
+hundred/SHRM
+hundredths
+hundredweight/SM
+Hunfredo/M
+hung/A
+Hungarian/MS
+Hungary/M
+hunger/SDMG
+Hung/M
+hungover
+hungrily
+hungriness/SM
+hungry/RTP
+hunker/DG
+hunky/RST
+hunk/ZRMS
+Hun/MS
+hunter/M
+Hunter/M
+hunt/GZJDRS
+hunting/M
+Huntington/M
+Huntlee/M
+Huntley/M
+Hunt/MR
+huntress/MS
+huntsman/M
+huntsmen
+Huntsville/M
+hurdle/JMZGRSD
+hurdler/M
+hurl/DRGZJS
+Hurlee/M
+Hurleigh/M
+hurler/M
+Hurley/M
+hurling/M
+Huron/SM
+hurray/SDG
+hurricane/MS
+hurriedness/M
+hurried/UY
+hurry/RSDG
+Hurst/M
+hurter/M
+hurtfulness/MS
+hurtful/PY
+hurting/Y
+hurtle/SDG
+hurts
+hurt/U
+Hurwitz/M
+Hus
+Husain's
+husbander/M
+husband/GSDRYM
+husbandman/M
+husbandmen
+husbandry/SM
+Husein/M
+hush/DSG
+husker/M
+huskily
+huskiness/MS
+husking/M
+husk/SGZDRM
+husky/RSPT
+hussar/MS
+Hussein/M
+Husserl/M
+hussy/SM
+hustings/M
+hustler/M
+hustle/RSDZG
+Huston/M
+Hutchins/M
+Hutchinson/M
+Hutchison/M
+hutch/MSDG
+hut/MS
+hutted
+hutting
+Hutton/M
+Hutu/M
+Huxley/M
+Huygens/M
+huzzah/GD
+huzzahs
+hwy
+Hyacintha/M
+Hyacinthe/M
+Hyacinthia/M
+Hyacinthie/M
+hyacinth/M
+Hyacinth/M
+hyacinths
+Hyades
+hyaena's
+Hyannis/M
+Hyatt/M
+hybridism/SM
+hybridization/S
+hybridize/GSD
+hybrid/MS
+Hyde/M
+Hyderabad/M
+Hydra/M
+hydra/MS
+hydrangea/SM
+hydrant/SM
+hydrate/CSDNGX
+hydrate's
+hydration/MC
+hydraulically
+hydraulicked
+hydraulicking
+hydraulic/S
+hydraulics/M
+hydrazine/M
+hydride/MS
+hydrocarbon/SM
+hydrocephali
+hydrocephalus/MS
+hydrochemistry
+hydrochloric
+hydrochloride/M
+hydrodynamical
+hydrodynamic/S
+hydrodynamics/M
+hydroelectric
+hydroelectrically
+hydroelectricity/SM
+hydrofluoric
+hydrofoil/MS
+hydrogenate/CDSGN
+hydrogenate's
+hydrogenation/MC
+hydrogenations
+hydrogen/MS
+hydrogenous
+hydrological/Y
+hydrologist/MS
+hydrology/SM
+hydrolysis/M
+hydrolyzed/U
+hydrolyze/GSD
+hydromagnetic
+hydromechanics/M
+hydrometer/SM
+hydrometry/MS
+hydrophilic
+hydrophobia/SM
+hydrophobic
+hydrophone/SM
+hydroplane/DSGM
+hydroponic/S
+hydroponics/M
+hydro/SM
+hydrosphere/MS
+hydrostatic/S
+hydrostatics/M
+hydrotherapy/SM
+hydrothermal/Y
+hydrous
+hydroxide/MS
+hydroxy
+hydroxylate/N
+hydroxyl/SM
+hydroxyzine/M
+hyena/MS
+hygiene/MS
+hygienically
+hygienic/S
+hygienics/M
+hygienist/MS
+hygrometer/SM
+hygroscopic
+hying
+Hy/M
+Hyman/M
+hymeneal/S
+Hymen/M
+hymen/MS
+Hymie/M
+hymnal/SM
+hymnbook/S
+hymn/GSDM
+Hynda/M
+hype/MZGDSR
+hyperactive/S
+hyperactivity/SM
+hyperbola/MS
+hyperbole/MS
+hyperbolic
+hyperbolically
+hyperboloidal
+hyperboloid/SM
+hypercellularity
+hypercritical/Y
+hypercube/MS
+hyperemia/M
+hyperemic
+hyperfine
+hypergamous/Y
+hypergamy/M
+hyperglycemia/MS
+hyperinflation
+Hyperion/M
+hypermarket/SM
+hypermedia/S
+hyperplane/SM
+hyperplasia/M
+hypersensitiveness/MS
+hypersensitive/P
+hypersensitivity/MS
+hypersonic
+hyperspace/M
+hypersphere/M
+hypertension/MS
+hypertensive/S
+hypertext/SM
+hyperthyroid
+hyperthyroidism/MS
+hypertrophy/MSDG
+hypervelocity
+hyperventilate/XSDGN
+hyperventilation/M
+hyphenated/U
+hyphenate/NGXSD
+hyphenation/M
+hyphen/DMGS
+hypnoses
+hypnosis/M
+hypnotherapy/SM
+hypnotically
+hypnotic/S
+hypnotism/MS
+hypnotist/SM
+hypnotize/SDG
+hypoactive
+hypoallergenic
+hypocellularity
+hypochondriac/SM
+hypochondria/MS
+hypocrisy/SM
+hypocrite/MS
+hypocritical/Y
+hypodermic/S
+hypo/DMSG
+hypoglycemia/SM
+hypoglycemic/S
+hypophyseal
+hypophysectomized
+hypotenuse/MS
+hypothalami
+hypothalamic
+hypothalamically
+hypothalamus/M
+hypothermia/SM
+hypotheses
+hypothesis/M
+hypothesizer/M
+hypothesize/ZGRSD
+hypothetic
+hypothetical/Y
+hypothyroid
+hypothyroidism/SM
+hypoxia/M
+hyssop/MS
+hysterectomy/MS
+hysteresis/M
+hysteria/SM
+hysterical/YU
+hysteric/SM
+Hyundai/M
+Hz
+i
+I
+IA
+Iaccoca/M
+Iago/M
+Iain/M
+Ia/M
+iambi
+iambic/S
+iamb/MS
+iambus/SM
+Ian/M
+Ianthe/M
+Ibadan/M
+Ibbie/M
+Ibby/M
+Iberia/M
+Iberian/MS
+Ibero/M
+ibex/MS
+ibid
+ibidem
+ibis/SM
+IBM/M
+Ibo/M
+Ibrahim/M
+Ibsen/M
+ibuprofen/S
+Icarus/M
+ICBM/S
+ICC
+iceberg/SM
+iceboat/MS
+icebound
+icebox/MS
+icebreaker/SM
+icecap/SM
+ice/GDSC
+Icelander/M
+Icelandic
+Iceland/MRZ
+Ice/M
+iceman/M
+icemen
+icepack
+icepick/S
+ice's
+Ichabod/M
+ichneumon/M
+ichthyologist/MS
+ichthyology/MS
+icicle/SM
+icily
+iciness/SM
+icing/MS
+icky/RT
+iconic
+icon/MS
+iconoclasm/MS
+iconoclastic
+iconoclast/MS
+iconography/MS
+icosahedra
+icosahedral
+icosahedron/M
+ictus/SM
+ICU
+icy/RPT
+I'd
+ID
+Idahoan/S
+Idahoes
+Idaho/MS
+Idalia/M
+Idalina/M
+Idaline/M
+Ida/M
+idealism/MS
+idealistic
+idealistically
+idealist/MS
+idealization/MS
+idealized/U
+idealize/GDRSZ
+idealizer/M
+ideal/MYS
+idealogical
+idea/SM
+ideate/SN
+ideation/M
+Idelle/M
+Idell/M
+idem
+idempotent/S
+identicalness/M
+identical/YP
+identifiability
+identifiable/U
+identifiably
+identification/M
+identified/U
+identifier/M
+identify/XZNSRDG
+identity/SM
+ideogram/MS
+ideographic
+ideograph/M
+ideographs
+ideological/Y
+ideologist/SM
+ideologue/S
+ideology/SM
+ides
+Idette/M
+idiocy/MS
+idiolect/M
+idiomatically
+idiomatic/P
+idiom/MS
+idiopathic
+idiosyncrasy/SM
+idiosyncratic
+idiosyncratically
+idiotic
+idiotically
+idiot/MS
+idleness/MS
+idle/PZTGDSR
+idler/M
+id/MY
+idolater/MS
+idolatress/S
+idolatrous
+idolatry/SM
+idolization/SM
+idolized/U
+idolizer/M
+idolize/ZGDRS
+idol/MS
+ids
+IDs
+idyllic
+idyllically
+idyll/MS
+IE
+IEEE
+Ieyasu/M
+if
+iffiness/S
+iffy/TPR
+Ifni/M
+ifs
+Iggie/M
+Iggy/M
+igloo/MS
+Ignace/M
+Ignacio/M
+Ignacius/M
+Ignatius/M
+Ignazio/M
+Ignaz/M
+igneous
+ignitable
+ignite/ASDG
+igniter/M
+ignition/MS
+ignobleness/M
+ignoble/P
+ignobly
+ignominious/Y
+ignominy/MS
+ignoramus/SM
+ignorance/MS
+ignorantness/M
+ignorant/SPY
+ignorer/M
+ignore/SRDGB
+Igor/M
+iguana/MS
+Iguassu/M
+ii
+iii
+Ijsselmeer/M
+Ike/M
+Ikey/M
+Ikhnaton/M
+ikon's
+IL
+Ilaire/M
+Ila/M
+Ilario/M
+ilea
+Ileana/M
+Ileane/M
+ileitides
+ileitis/M
+Ilene/M
+ileum/M
+ilia
+iliac
+Iliad/MS
+Ilise/M
+ilium/M
+Ilka/M
+ilk/MS
+I'll
+Illa/M
+illegality/MS
+illegal/YS
+illegibility/MS
+illegible
+illegibly
+illegitimacy/SM
+illegitimate/SDGY
+illiberality/SM
+illiberal/Y
+illicitness/MS
+illicit/YP
+illimitableness/M
+illimitable/P
+Illinoisan/MS
+Illinois/M
+illiquid
+illiteracy/MS
+illiterateness/M
+illiterate/PSY
+Ill/M
+illness/MS
+illogicality/SM
+illogicalness/M
+illogical/PY
+illogic/M
+ill/PS
+illume/DG
+illuminate/XSDVNG
+Illuminati
+illuminatingly
+illuminating/U
+illumination/M
+illumine/BGSD
+illusionary
+illusion/ES
+illusionist/MS
+illusion's
+illusiveness/M
+illusive/PY
+illusoriness/M
+illusory/P
+illustrated/U
+illustrate/VGNSDX
+illustration/M
+illustrative/Y
+illustrator/SM
+illustriousness/SM
+illustrious/PY
+illus/V
+illy
+Ilona/M
+Ilsa/M
+Ilse/M
+Ilysa/M
+Ilyse/M
+Ilyssa/M
+Ilyushin/M
+I'm
+image/DSGM
+Imagen/M
+imagery/MS
+imaginableness
+imaginable/U
+imaginably/U
+imaginariness/M
+imaginary/PS
+imagination/MS
+imaginativeness/M
+imaginative/UY
+imagined/U
+imaginer/M
+imagine/RSDJBG
+imagoes
+imago/M
+imam/MS
+imbalance/SDM
+imbecile/YMS
+imbecilic
+imbecility/MS
+imbiber/M
+imbibe/ZRSDG
+imbrication/SM
+Imbrium/M
+imbroglio/MS
+imbruing
+imbue/GDS
+Imelda/M
+IMF
+IMHO
+imitable/I
+imitate/SDVNGX
+imitation/M
+imitativeness/MS
+imitative/YP
+imitator/SM
+immaculateness/SM
+immaculate/YP
+immanence/S
+immanency/MS
+immanent/Y
+Immanuel/M
+immateriality/MS
+immaterialness/MS
+immaterial/PY
+immatureness/M
+immature/SPY
+immaturity/MS
+immeasurableness/M
+immeasurable/P
+immeasurably
+immediacy/MS
+immediateness/SM
+immediate/YP
+immemorial/Y
+immenseness/M
+immense/PRTY
+immensity/MS
+immerse/RSDXNG
+immersible
+immersion/M
+immigrant/SM
+immigrate/NGSDX
+immigration/M
+imminence/SM
+imminentness/M
+imminent/YP
+immobile
+immobility/MS
+immobilization/MS
+immobilize/DSRG
+immoderateness/M
+immoderate/NYP
+immoderation/M
+immodest/Y
+immodesty/SM
+immolate/SDNGX
+immolation/M
+immorality/MS
+immoral/Y
+immortality/SM
+immortalized/U
+immortalize/GDS
+immortal/SY
+immovability/SM
+immovableness/M
+immovable/PS
+immovably
+immune/S
+immunity/SM
+immunization/MS
+immunize/GSD
+immunoassay/M
+immunodeficiency/S
+immunodeficient
+immunologic
+immunological/Y
+immunologist/SM
+immunology/MS
+immure/GSD
+immutability/MS
+immutableness/M
+immutable/P
+immutably
+IMNSHO
+IMO
+Imogene/M
+Imogen/M
+Imojean/M
+impaction/SM
+impactor/SM
+impact/VGMRDS
+impaired/U
+impairer/M
+impair/LGRDS
+impairment/SM
+impala/MS
+impale/GLRSD
+impalement/SM
+impaler/M
+impalpable
+impalpably
+impanel/DGS
+impartation/M
+impart/GDS
+impartiality/SM
+impartial/Y
+impassableness/M
+impassable/P
+impassably
+impasse/SXBMVN
+impassibility/SM
+impassible
+impassibly
+impassion/DG
+impassioned/U
+impassiveness/MS
+impassive/YP
+impassivity/MS
+impasto/SM
+impatience/SM
+impatiens/M
+impatient/Y
+impeachable/U
+impeach/DRSZGLB
+impeacher/M
+impeachment/MS
+impeccability/SM
+impeccable/S
+impeccably
+impecuniousness/MS
+impecunious/PY
+impedance/MS
+impeded/U
+impeder/M
+impede/S
+imped/GRD
+impedimenta
+impediment/SM
+impelled
+impeller/MS
+impelling
+impel/S
+impend/DGS
+impenetrability/MS
+impenetrableness/M
+impenetrable/P
+impenetrably
+impenitence/MS
+impenitent/YS
+imperativeness/M
+imperative/PSY
+imperceivable
+imperceptibility/MS
+imperceptible
+imperceptibly
+imperceptive
+imperf
+imperfectability
+imperfection/MS
+imperfectness/SM
+imperfect/YSVP
+imperialism/MS
+imperialistic
+imperialistically
+imperialist/SM
+imperial/YS
+imperil/GSLD
+imperilment/SM
+imperiousness/MS
+imperious/YP
+imperishableness/M
+imperishable/SP
+imperishably
+impermanence/MS
+impermanent/Y
+impermeability/SM
+impermeableness/M
+impermeable/P
+impermeably
+impermissible
+impersonality/M
+impersonalized
+impersonal/Y
+impersonate/XGNDS
+impersonation/M
+impersonator/SM
+impertinence/SM
+impertinent/YS
+imperturbability/SM
+imperturbable
+imperturbably
+imperviousness/M
+impervious/PY
+impetigo/MS
+impetuosity/MS
+impetuousness/MS
+impetuous/YP
+impetus/MS
+impiety/MS
+impinge/LS
+impingement/MS
+imping/GD
+impiousness/SM
+impious/PY
+impishness/MS
+impish/YP
+implacability/SM
+implacableness/M
+implacable/P
+implacably
+implantation/SM
+implant/BGSDR
+implanter/M
+implausibility/MS
+implausible
+implausibly
+implementability
+implementable/U
+implementation/A
+implementations
+implementation's
+implemented/AU
+implementer/M
+implementing/A
+implementor/MS
+implement/SMRDGZB
+implicant/SM
+implicate/VGSD
+implication/M
+implicative/PY
+implicitness/SM
+implicit/YP
+implied/Y
+implode/GSD
+implore/GSD
+imploring/Y
+implosion/SM
+implosive/S
+imply/GNSDX
+impoliteness/MS
+impolite/YP
+impoliticness/M
+impolitic/PY
+imponderableness/M
+imponderable/PS
+importance/SM
+important/Y
+importation/MS
+importer/M
+importing/A
+import/SZGBRD
+importunateness/M
+importunate/PYGDS
+importuner/M
+importune/SRDZYG
+importunity/SM
+imposable
+impose/ASDG
+imposer/SM
+imposingly
+imposing/U
+imposition/SM
+impossibility/SM
+impossibleness/M
+impossible/PS
+impossibly
+imposter's
+impostor/SM
+impost/SGMD
+imposture/SM
+impotence/MS
+impotency/S
+impotent/SY
+impound/GDS
+impoundments
+impoverisher/M
+impoverish/LGDRS
+impoverishment/SM
+impracticableness/M
+impracticable/P
+impracticably
+impracticality/SM
+impracticalness/M
+impractical/PY
+imprecate/NGXSD
+imprecation/M
+impreciseness/MS
+imprecise/PYXN
+imprecision/M
+impregnability/MS
+impregnableness/M
+impregnable/P
+impregnably
+impregnate/DSXNG
+impregnation/M
+impresario/SM
+impress/DRSGVL
+impressed/U
+impresser/M
+impressibility/MS
+impressible
+impressionability/SM
+impressionableness/M
+impressionable/P
+impression/BMS
+impressionism/SM
+impressionistic
+impressionist/MS
+impressiveness/MS
+impressive/YP
+impressment/M
+imprimatur/SM
+imprinter/M
+imprinting/M
+imprint/SZDRGM
+imprison/GLDS
+imprisonment/MS
+improbability/MS
+improbableness/M
+improbable/P
+improbably
+impromptu/S
+improperness/M
+improper/PY
+impropitious
+impropriety/SM
+improved/U
+improvement/MS
+improver/M
+improve/SRDGBL
+improvidence/SM
+improvident/Y
+improvisational
+improvisation/MS
+improvisatory
+improviser/M
+improvise/RSDZG
+imprudence/SM
+imprudent/Y
+imp/SGMDRY
+impudence/MS
+impudent/Y
+impugner/M
+impugn/SRDZGB
+impulse/XMVGNSD
+impulsion/M
+impulsiveness/MS
+impulsive/YP
+impunity/SM
+impureness/M
+impure/RPTY
+impurity/MS
+imputation/SM
+impute/SDBG
+Imus/M
+IN
+inaction
+inactive
+inadequate/S
+inadvertence/MS
+inadvertent/Y
+inalienability/MS
+inalienably
+inalterableness/M
+inalterable/P
+Ina/M
+inamorata/MS
+inane/SRPYT
+inanimateness/S
+inanimate/P
+inanity/MS
+inappeasable
+inappropriate/P
+inarticulate/P
+in/AS
+inasmuch
+inaugural/S
+inaugurate/XSDNG
+inauguration/M
+inauthenticity
+inbound/G
+inbred/S
+inbreed/JG
+incalculableness/M
+incalculably
+incandescence/SM
+incandescent/YS
+incant
+incantation/SM
+incantatory
+incapable/S
+incapacitate/GNSD
+incapacitation/M
+incarcerate/XGNDS
+incarceration/M
+incarnadine/GDS
+incarnate/AGSDNX
+incarnation/AM
+Inca/SM
+incendiary/S
+incense/MGDS
+incentive/ESM
+incentively
+incept/DGVS
+inception/MS
+inceptive/Y
+inceptor/M
+incessant/Y
+incest/SM
+incestuousness/MS
+incestuous/PY
+inch/GMDS
+inchoate/DSG
+Inchon/M
+inchworm/MS
+incidence/MS
+incidental/YS
+incident/SM
+incinerate/XNGSD
+incineration/M
+incinerator/SM
+incipience/SM
+incipiency/M
+incipient/Y
+incise/SDVGNX
+incision/M
+incisiveness/MS
+incisive/YP
+incisor/MS
+incitement/MS
+inciter/M
+incite/RZL
+incl
+inclination/ESM
+incline/EGSD
+incliner/M
+inclining/M
+include/GDS
+inclusion/MS
+inclusiveness/MS
+inclusive/PY
+Inc/M
+incognito/S
+incoherency/M
+income/M
+incommode/DG
+incommunicado
+incomparable
+incompetent/MS
+incomplete/P
+inconceivability/MS
+inconceivableness/M
+inconceivable/P
+incondensable
+incongruousness/S
+inconsiderableness/M
+inconsiderable/P
+inconsistence
+inconsolableness/M
+inconsolable/P
+inconsolably
+incontestability/SM
+incontestably
+incontrovertibly
+inconvenience/DG
+inconvertibility
+inconvertible
+incorporable
+incorporated/UE
+incorporate/GASDXN
+incorrect/P
+incorrigibility/MS
+incorrigibleness/M
+incorrigible/SP
+incorrigibly
+incorruptible/S
+incorruptibly
+increase/JB
+increaser/M
+increasing/Y
+incredibleness/M
+incredible/P
+incremental/Y
+incrementation
+increment/DMGS
+incriminate/XNGSD
+incrimination/M
+incriminatory
+incrustation/SM
+inc/T
+incubate/XNGVDS
+incubation/M
+incubator/MS
+incubus/MS
+inculcate/SDGNX
+inculcation/M
+inculpate/SDG
+incumbency/MS
+incumbent/S
+incunabula
+incunabulum
+incurable/S
+incurious
+incursion/SM
+ind
+indebtedness/SM
+indebted/P
+indefatigableness/M
+indefatigable/P
+indefatigably
+indefeasible
+indefeasibly
+indefinableness/M
+indefinable/PS
+indefinite/S
+indelible
+indelibly
+indemnification/M
+indemnify/NXSDG
+indemnity/SM
+indentation/SM
+indented/U
+indenter/M
+indention/SM
+indent/R
+indenture/DG
+Independence/M
+indescribableness/M
+indescribable/PS
+indescribably
+indestructibleness/M
+indestructible/P
+indestructibly
+indeterminably
+indeterminacy/MS
+indeterminism
+indexation/S
+indexer/M
+index/MRDZGB
+India/M
+Indiana/M
+Indianan/S
+Indianapolis/M
+Indianian/S
+Indian/SM
+indicant/MS
+indicate/DSNGVX
+indication/M
+indicative/SY
+indicator/MS
+indices's
+indicter/M
+indictment/SM
+indict/SGLBDR
+indifference
+indigence/MS
+indigenousness/M
+indigenous/YP
+indigent/SY
+indigestible/S
+indignant/Y
+indignation/MS
+indigo/SM
+Indira/M
+indirect/PG
+indiscreet/P
+indiscriminateness/M
+indiscriminate/PY
+indispensability/MS
+indispensableness/M
+indispensable/SP
+indispensably
+indisputableness/M
+indisputable/P
+indissolubleness/M
+indissoluble/P
+indissolubly
+indistinguishableness/M
+indistinguishable/P
+indite/SDG
+indium/SM
+individualism/MS
+individualistic
+individualistically
+individualist/MS
+individuality/MS
+individualization/SM
+individualize/DRSGZ
+individualized/U
+individualizer/M
+individualizes/U
+individualizing/Y
+individual/YMS
+individuate/DSXGN
+individuation/M
+indivisibleness/M
+indivisible/SP
+indivisibly
+Ind/M
+Indochina/M
+Indochinese
+indoctrinate/GNXSD
+indoctrination/M
+indoctrinator/SM
+indolence/SM
+indolent/Y
+indomitableness/M
+indomitable/P
+indomitably
+Indonesia/M
+Indonesian/S
+indoor
+Indore/M
+Indra/M
+indubitableness/M
+indubitable/P
+indubitably
+inducement/MS
+inducer/M
+induce/ZGLSRD
+inducible
+inductance/MS
+inductee/SM
+induct/GV
+induction/SM
+inductiveness/M
+inductive/PY
+inductor/MS
+indulge/GDRS
+indulgence/SDGM
+indulgent/Y
+indulger/M
+Indus/M
+industrialism/MS
+industrialist/MS
+industrialization/MS
+industrialized/U
+industrialize/SDG
+industrial/SY
+industriousness/SM
+industrious/YP
+industry/SM
+Indy/SM
+inebriate/NGSDX
+inebriation/M
+inedible
+ineducable
+ineffability/MS
+ineffableness/M
+ineffable/P
+ineffably
+inelastic
+ineligibly
+ineluctable
+ineluctably
+ineptitude/SM
+ineptness/MS
+inept/YP
+inequivalent
+inerrant
+inertial/Y
+inertia/SM
+inertness/MS
+inert/SPY
+Ines
+inescapably
+Inesita/M
+Inessa/M
+inestimably
+inevitability/MS
+inevitableness/M
+inevitable/P
+inevitably
+inexact/P
+inexhaustibleness/M
+inexhaustible/P
+inexhaustibly
+inexorability/M
+inexorableness/M
+inexorable/P
+inexorably
+inexpedience/M
+inexplicableness/M
+inexplicable/P
+inexplicably
+inexplicit
+inexpressibility/M
+inexpressibleness/M
+inexpressible/PS
+inextricably
+Inez/M
+infamous
+infamy/SM
+infancy/M
+infanticide/MS
+infantile
+infant/MS
+infantryman/M
+infantrymen
+infantry/SM
+infarction/SM
+infarct/SM
+infatuate/XNGSD
+infatuation/M
+infauna
+infected/U
+infecter
+infect/ESGDA
+infection/EASM
+infectiousness/MS
+infectious/PY
+infective
+infer/B
+inference/GMSR
+inferential/Y
+inferiority/MS
+inferior/SMY
+infernal/Y
+inferno/MS
+inferred
+inferring
+infertile
+infestation/MS
+infester/M
+infest/GSDR
+infidel/SM
+infighting/M
+infill/MG
+infiltrate/V
+infiltrator/MS
+infinitesimal/SY
+infinite/V
+infinitival
+infinitive/YMS
+infinitude/MS
+infinitum
+infinity/SM
+infirmary/SM
+infirmity/SM
+infix/M
+inflammableness/M
+inflammable/P
+inflammation/MS
+inflammatory
+inflatable/MS
+inflate/NGBDRSX
+inflater/M
+inflationary
+inflation/ESM
+inflect/GVDS
+inflectional/Y
+inflection/SM
+inflexibleness/M
+inflexible/P
+inflexion/SM
+inflict/DRSGV
+inflicter/M
+infliction/SM
+inflow/M
+influenced/U
+influencer/M
+influence/SRDGM
+influent
+influential/SY
+influenza/MS
+infomercial/S
+Informatica/M
+informatics
+informational
+information/ES
+informativeness/S
+informative/UY
+informatory
+informed/U
+informer/M
+info/SM
+infotainment/S
+infra
+infrared/SM
+infrasonic
+infrastructural
+infrastructure/MS
+infrequence/S
+infringe/LR
+infringement/SM
+infringer/M
+infuriate/GNYSD
+infuriating/Y
+infuriation/M
+infuser/M
+infuse/RZ
+infusibleness/M
+infusible/P
+inf/ZT
+Ingaberg/M
+Ingaborg/M
+Inga/M
+Ingamar/M
+Ingar/M
+Ingeberg/M
+Ingeborg/M
+Ingelbert/M
+Ingemar/M
+ingeniousness/MS
+ingenious/YP
+ingénue/S
+ingenuity/SM
+ingenuous/EY
+ingenuousness/MS
+Inger/M
+Inge/RM
+Ingersoll/M
+ingest/DGVS
+ingestible
+ingestion/SM
+Inglebert/M
+inglenook/MS
+Inglewood/M
+Inglis/M
+Ingmar/M
+ingoing
+ingot/SMDG
+ingrained/Y
+Ingra/M
+Ingram/M
+ingrate/M
+ingratiate/DSGNX
+ingratiating/Y
+ingratiation/M
+ingredient/SM
+Ingres/M
+ingression/M
+ingress/MS
+Ingrid/M
+Ingrim/M
+ingrown/P
+inguinal
+Ingunna/M
+inhabitable/U
+inhabitance
+inhabited/U
+inhabiter/M
+inhabit/R
+inhalant/S
+inhalation/SM
+inhalator/SM
+inhale/Z
+inhere/DG
+inherent/Y
+inheritableness/M
+inheritable/P
+inheritance/EMS
+inherit/BDSG
+inherited/E
+inheriting/E
+inheritor/S
+inheritress/MS
+inheritrix/MS
+inherits/E
+inhibit/DVGS
+inhibited/U
+inhibiter's
+inhibition/MS
+inhibitor/MS
+inhibitory
+inhomogeneous
+inhospitableness/M
+inhospitable/P
+inhospitality
+Inigo/M
+inimical/Y
+inimitableness/M
+inimitable/P
+inimitably
+inion
+iniquitousness/M
+iniquitous/PY
+iniquity/MS
+initialer/M
+initial/GSPRDY
+initialization/A
+initializations
+initialization's
+initialize/ASDG
+initialized/U
+initializer/S
+initiates
+initiate/UD
+initiating
+initiation/SM
+initiative/SM
+initiator/MS
+initiatory
+injectable/U
+inject/GVSDB
+injection/MS
+injector/SM
+injunctive
+injured/U
+injurer/M
+injure/SRDZG
+injuriousness/M
+injurious/YP
+inkblot/SM
+inker/M
+inkiness/MS
+inkling/SM
+inkstand/SM
+inkwell/SM
+inky/TP
+ink/ZDRJ
+inland
+inlander/M
+inlay/RG
+inletting
+inly/G
+inmost
+Inna/M
+innards
+innateness/SM
+innate/YP
+innermost/S
+innersole/S
+innerspring
+innervate/GNSDX
+innervation/M
+inner/Y
+inning/M
+Innis/M
+innkeeper/MS
+innocence/SM
+Innocent/M
+innocent/SYRT
+innocuousness/MS
+innocuous/PY
+innovate/SDVNGX
+innovation/M
+innovative/P
+innovator/MS
+innovatory
+Innsbruck/M
+innuendo/MDGS
+innumerability/M
+innumerableness/M
+innumerable/P
+innumerably
+innumerate
+inn/ZGDRSJ
+inoculate/ASDG
+inoculation/MS
+inoculative
+inoffensive/P
+Inonu/M
+inopportuneness/M
+inopportune/P
+inordinateness/M
+inordinate/PY
+inorganic
+inpatient
+In/PM
+input/MRDG
+inquirer/M
+inquire/ZR
+inquiring/Y
+inquiry/MS
+inquisitional
+inquisition/MS
+Inquisition/MS
+inquisitiveness/MS
+inquisitive/YP
+inquisitorial/Y
+inquisitor/MS
+INRI
+inrush/M
+ins
+INS
+insalubrious
+insanitary
+insatiability/MS
+insatiableness/M
+insatiable/P
+insatiably
+inscribe/Z
+inscription/SM
+inscrutability/SM
+inscrutableness/SM
+inscrutable/P
+inscrutably
+inseam
+insecticidal
+insecticide/MS
+insectivore/SM
+insectivorous
+insecureness/M
+insecure/P
+inseminate/NGXSD
+insemination/M
+insensateness/M
+insensate/P
+insensible/P
+insentient
+inseparable/S
+insert/ADSG
+inserter/M
+insertion/AMS
+insetting
+inshore
+insider/M
+inside/Z
+insidiousness/MS
+insidious/YP
+insightful/Y
+insigne's
+insignia/SM
+insignificant
+insinuate/VNGXSD
+insinuating/Y
+insinuation/M
+insinuator/SM
+insipidity/MS
+insipid/Y
+insistence/SM
+insistent/Y
+insisting/Y
+insist/SGD
+insociable
+insofar
+insole/M
+insolence/SM
+insolent/YS
+insolubleness/M
+insoluble/P
+insolubly
+insomniac/S
+insomnia/MS
+insomuch
+insouciance/SM
+insouciant/Y
+inspect/AGSD
+inspection/SM
+inspective
+inspectorate/MS
+inspector/SM
+inspirational/Y
+inspiration/MS
+inspired/U
+inspire/R
+inspirer/M
+inspiring/U
+inspirit/DG
+Inst
+installable
+install/ADRSG
+installation/SM
+installer/MS
+installment/MS
+instance/GD
+instantaneousness/M
+instantaneous/PY
+instantiated/U
+instantiate/SDXNG
+instantiation/M
+instant/SRYMP
+instate/AGSD
+inst/B
+instead
+instigate/XSDVGN
+instigation/M
+instigator/SM
+instillation/SM
+instinctive/Y
+instinctual
+instinct/VMS
+instituter/M
+institutes/M
+institute/ZXVGNSRD
+institutionalism/M
+institutionalist/M
+institutionalization/SM
+institutionalize/GDS
+institutional/Y
+institution/AM
+institutor's
+instr
+instruct/DSVG
+instructed/U
+instructional
+instruction/MS
+instructiveness/M
+instructive/PY
+instructor/MS
+instrumentalist/MS
+instrumentality/SM
+instrumental/SY
+instrumentation/SM
+instrument/GMDS
+insubordinate
+insubstantial
+insufferable
+insufferably
+insularity/MS
+insular/YS
+insulate/DSXNG
+insulated/U
+insulation/M
+insulator/MS
+insulin/MS
+insult/DRSG
+insulter/M
+insulting/Y
+insuperable
+insuperably
+insupportableness/M
+insupportable/P
+insurance/MS
+insurance's/A
+insure/BZGS
+insured/S
+insurer/M
+insurgence/SM
+insurgency/MS
+insurgent/MS
+insurmountably
+insurrectionist/SM
+insurrection/SM
+intactness/M
+intact/P
+intaglio/GMDS
+intake/M
+intangible/M
+integer/MS
+integrability/M
+integrable
+integral/SYM
+integrand/MS
+integrate/AGNXEDS
+integration/EMA
+integrative/E
+integrator/MS
+integrity/SM
+integument/SM
+intellective/Y
+intellect/MVS
+intellectualism/MS
+intellectuality/M
+intellectualize/GSD
+intellectualness/M
+intellectual/YPS
+intelligence/MSR
+intelligencer/M
+intelligentsia/MS
+intelligent/UY
+intelligibilities
+intelligibility/UM
+intelligibleness/MU
+intelligible/PU
+intelligibly/U
+Intel/M
+Intelsat/M
+intemperate/P
+intendant/MS
+intendedness/M
+intended/SYP
+intender/M
+intensification/M
+intensifier/M
+intensify/GXNZRSD
+intensional/Y
+intensiveness/MS
+intensive/PSY
+intentionality/M
+intentional/UY
+intention/SDM
+intentness/SM
+intent/YP
+interaction/MS
+interactive/PY
+interactivity
+interact/VGDS
+interaxial
+interbank
+interbred
+interbreed/GS
+intercalate/GNVDS
+intercalation/M
+intercase
+intercaste
+interceder/M
+intercede/SRDG
+intercensal
+intercept/DGS
+interception/MS
+interceptor/MS
+intercession/MS
+intercessor/SM
+intercessory
+interchangeability/M
+interchangeableness/M
+interchangeable/P
+interchangeably
+interchange/DSRGJ
+interchanger/M
+intercity
+interclass
+intercohort
+intercollegiate
+intercommunicate/SDXNG
+intercommunication/M
+intercom/SM
+interconnectedness/M
+interconnected/P
+interconnect/GDS
+interconnection/SM
+interconnectivity
+intercontinental
+interconversion/M
+intercorrelated
+intercourse/SM
+Interdata/M
+interdenominational
+interdepartmental/Y
+interdependence/MS
+interdependency/SM
+interdependent/Y
+interdiction/MS
+interdict/MDVGS
+interdisciplinary
+interested/UYE
+interest/GEMDS
+interestingly/U
+interestingness/M
+interesting/YP
+inter/ESTL
+interface/SRDGM
+interfacing/M
+interfaith
+interference/MS
+interferer/M
+interfere/SRDG
+interfering/Y
+interferometer/SM
+interferometric
+interferometry/M
+interferon/MS
+interfile/GSD
+intergalactic
+intergenerational
+intergeneration/M
+interglacial
+intergovernmental
+intergroup
+interim/S
+interindex
+interindustry
+interior/SMY
+interj
+interject/GDS
+interjectional
+interjection/MS
+interlace/GSD
+interlard/SGD
+interlayer/G
+interleave/SDG
+interleukin/S
+interlibrary
+interlinear/S
+interline/JGSD
+interlingual
+interlingua/M
+interlining/M
+interlink/GDS
+interlisp/M
+interlobular
+interlocker/M
+interlock/RDSG
+interlocutor/MS
+interlocutory
+interlope/GZSRD
+interloper/M
+interlude/MSDG
+intermarriage/MS
+intermarry/GDS
+intermediary/MS
+intermediateness/M
+intermediate/YMNGSDP
+intermediation/M
+interment/SME
+intermeshed
+intermetrics
+intermezzi
+intermezzo/SM
+interminably
+intermingle/DSG
+intermission/MS
+intermittent/Y
+intermix/GSRD
+intermodule
+intermolecular/Y
+internalization/SM
+internalize/GDS
+internal/SY
+Internationale/M
+internationalism/SM
+internationalist/SM
+internationality/M
+internationalization/MS
+internationalize/DSG
+international/YS
+internecine
+internee/SM
+interne's
+Internet/M
+INTERNET/M
+internetwork
+internist/SM
+intern/L
+internment/SM
+internship/MS
+internuclear
+interocular
+interoffice
+interoperability
+interpenetrates
+interpersonal/Y
+interplanetary
+interplay/GSMD
+interpol
+interpolate/XGNVBDS
+interpolation/M
+Interpol/M
+interpose/GSRD
+interposer/M
+interposition/MS
+interpretable/U
+interpret/AGSD
+interpretation/MSA
+interpretative/Y
+interpreted/U
+interpreter/SM
+interpretive/Y
+interpretor/S
+interprocess
+interprocessor
+interquartile
+interracial
+interred/E
+interregional
+interregnum/MS
+interrelatedness/M
+interrelated/PY
+interrelate/GNDSX
+interrelation/M
+interrelationship/SM
+interring/E
+interrogate/DSXGNV
+interrogation/M
+interrogative/SY
+interrogator/SM
+interrogatory/S
+interrupted/U
+interrupter/M
+interruptibility
+interruptible
+interruption/MS
+interrupt/VGZRDS
+interscholastic
+intersect/GDS
+intersection/MS
+intersession/MS
+interspecies
+intersperse/GNDSX
+interspersion/M
+interstage
+interstate/S
+interstellar
+interstice/SM
+interstitial/SY
+intersurvey
+intertask
+intertwine/GSD
+interurban/S
+interval/MS
+intervene/GSRD
+intervener/M
+intervenor/M
+interventionism/MS
+interventionist/S
+intervention/MS
+interview/AMD
+interviewed/U
+interviewee/SM
+interviewer/SM
+interviewing
+interviews
+intervocalic
+interweave/GS
+interwove
+interwoven
+intestacy/SM
+intestinal/Y
+intestine/SM
+inti
+intifada
+intimacy/SM
+intimal
+intimateness/M
+intimater/M
+intimate/XYNGPDRS
+intimation/M
+intimidate/SDXNG
+intimidating/Y
+intimidation/M
+into
+intolerableness/M
+intolerable/P
+intolerant/PS
+intonate/NX
+intonation/M
+intoxicant/MS
+intoxicate/DSGNX
+intoxicated/Y
+intoxication/M
+intra
+intracellular
+intracity
+intraclass
+intracohort
+intractability/M
+intractableness/M
+intractable/P
+intradepartmental
+intrafamily
+intragenerational
+intraindustry
+intraline
+intrametropolitan
+intramural/Y
+intramuscular/Y
+intranasal
+intransigence/MS
+intransigent/YS
+intransitive/S
+intraoffice
+intraprocess
+intrapulmonary
+intraregional
+intrasectoral
+intrastate
+intratissue
+intrauterine
+intravenous/YS
+intrepidity/SM
+intrepidness/M
+intrepid/YP
+intricacy/SM
+intricateness/M
+intricate/PY
+intrigue/DRSZG
+intriguer/M
+intriguing/Y
+intrinsically
+intrinsic/S
+introduce/ADSG
+introducer/M
+introduction/ASM
+introductory
+introit/SM
+introject/SD
+intro/S
+introspection/MS
+introspectiveness/M
+introspective/YP
+introspect/SGVD
+introversion/SM
+introvert/SMDG
+intruder/M
+intrude/ZGDSR
+intrusion/SM
+intrusiveness/MS
+intrusive/SYP
+intubate/NGDS
+intubation/M
+intuit/GVDSB
+intuitionist/M
+intuitiveness/MS
+intuitive/YP
+int/ZR
+Inuit/MS
+inundate/SXNG
+inundation/M
+inure/GDS
+invader/M
+invade/ZSRDG
+invalid/GSDM
+invalidism/MS
+invariable/P
+invariant/M
+invasion/SM
+invasive/P
+invectiveness/M
+invective/PSMY
+inveigh/DRG
+inveigher/M
+inveighs
+inveigle/DRSZG
+inveigler/M
+invent/ADGS
+invented/U
+invention/ASM
+inventiveness/MS
+inventive/YP
+inventor/MS
+inventory/SDMG
+Inverness/M
+inverse/YV
+inverter/M
+invertible
+invert/ZSGDR
+invest/ADSLG
+investigate/XDSNGV
+investigation/MA
+investigator/MS
+investigatory
+investiture/SM
+investment/ESA
+investment's/A
+investor/SM
+inveteracy/MS
+inveterate/Y
+inviability
+invidiousness/MS
+invidious/YP
+invigilate/GD
+invigilator/SM
+invigorate/ANGSD
+invigorating/Y
+invigoration/AM
+invigorations
+invincibility/SM
+invincibleness/M
+invincible/P
+invincibly
+inviolability/MS
+inviolably
+inviolateness/M
+inviolate/YP
+inviscid
+invisibleness/M
+invisible/S
+invitational/S
+invitation/MS
+invited/U
+invitee/S
+inviter/M
+invite/SRDG
+inviting/Y
+invocable
+invocate
+invoked/A
+invoke/GSRDBZ
+invoker/M
+invokes/A
+involuntariness/S
+involuntary/P
+involute/XYN
+involution/M
+involutorial
+involvedly
+involved/U
+involve/GDSRL
+involvement/SM
+involver/M
+invulnerability/M
+invulnerableness/M
+inwardness/M
+inward/PY
+ioctl
+iodate/MGND
+iodation/M
+iodide/MS
+iodinate/DNG
+iodine/MS
+iodize/GSD
+Iolande/M
+Iolanthe/M
+Io/M
+Iona/M
+Ionesco/M
+Ionian/M
+ionic/S
+Ionic/S
+ionization's
+ionization/SU
+ionized/UC
+ionize/GNSRDJXZ
+ionizer's
+ionizer/US
+ionizes/U
+ionizing/U
+ionosphere/SM
+ionospheric
+ion's/I
+ion/SMU
+Iorgo/MS
+Iormina/M
+Iosep/M
+iota/SM
+IOU
+Iowan/S
+Iowa/SM
+IPA
+ipecac/MS
+Iphigenia/M
+ipso
+Ipswich/M
+IQ
+Iqbal/M
+Iquitos/M
+Ira/M
+Iranian/MS
+Iran/M
+Iraqi/SM
+Iraq/M
+IRA/S
+irascibility/SM
+irascible
+irascibly
+irateness/S
+irate/RPYT
+ireful
+Ireland/M
+ire/MGDS
+Irena/M
+Irene/M
+irenic/S
+iridescence/SM
+iridescent/Y
+irides/M
+iridium/MS
+irids
+Irina/M
+Iris
+iris/GDSM
+Irishman/M
+Irishmen
+Irish/R
+Irishwoman/M
+Irishwomen
+Irita/M
+irk/GDS
+irksomeness/SM
+irksome/YP
+Irkutsk/M
+Ir/M
+Irma/M
+ironclad/S
+iron/DRMPSGJ
+ironer/M
+ironic
+ironicalness/M
+ironical/YP
+ironing/M
+ironmonger/M
+ironmongery/M
+ironside/MS
+ironstone/MS
+ironware/SM
+ironwood/SM
+ironworker/M
+ironwork/MRS
+irony/SM
+Iroquoian/MS
+Iroquois/M
+irradiate/XSDVNG
+irradiation/M
+irrationality/MS
+irrationalness/M
+irrational/YSP
+Irrawaddy/M
+irreclaimable
+irreconcilability/MS
+irreconcilableness/M
+irreconcilable/PS
+irreconcilably
+irrecoverableness/M
+irrecoverable/P
+irrecoverably
+irredeemable/S
+irredeemably
+irredentism/M
+irredentist/M
+irreducibility/M
+irreducible
+irreducibly
+irreflexive
+irrefutable
+irrefutably
+irregardless
+irregularity/SM
+irregular/YS
+irrelevance/SM
+irrelevancy/MS
+irrelevant/Y
+irreligious
+irremediableness/M
+irremediable/P
+irremediably
+irremovable
+irreparableness/M
+irreparable/P
+irreparably
+irreplaceable/P
+irrepressible
+irrepressibly
+irreproachableness/M
+irreproachable/P
+irreproachably
+irreproducibility
+irreproducible
+irresistibility/M
+irresistibleness/M
+irresistible/P
+irresistibly
+irresoluteness/SM
+irresolute/PNXY
+irresolution/M
+irresolvable
+irrespective/Y
+irresponsibility/SM
+irresponsibleness/M
+irresponsible/PS
+irresponsibly
+irretrievable
+irretrievably
+irreverence/MS
+irreverent/Y
+irreversible
+irreversibly
+irrevocableness/M
+irrevocable/P
+irrevocably
+irrigable
+irrigate/DSXNG
+irrigation/M
+irritability/MS
+irritableness/M
+irritable/P
+irritably
+irritant/S
+irritate/DSXNGV
+irritated/Y
+irritating/Y
+irritation/M
+irrupt/GVSD
+irruption/SM
+IRS
+Irtish/M
+Irvine/M
+Irving/M
+Irvin/M
+Irv/MG
+Irwin/M
+Irwinn/M
+is
+i's
+Isaac/SM
+Isaak/M
+Isabelita/M
+Isabella/M
+Isabelle/M
+Isabel/M
+Isacco/M
+Isac/M
+Isadora/M
+Isadore/M
+Isador/M
+Isahella/M
+Isaiah/M
+Isak/M
+Isa/M
+ISBN
+Iscariot/M
+Iseabal/M
+Isfahan/M
+Isherwood/M
+Ishim/M
+Ishmael/M
+Ishtar/M
+Isiahi/M
+Isiah/M
+Isidora/M
+Isidore/M
+Isidor/M
+Isidoro/M
+Isidro/M
+isinglass/MS
+Isis/M
+Islamabad/M
+Islamic/S
+Islam/SM
+islander/M
+island/GZMRDS
+Islandia/M
+isle/MS
+islet/SM
+isl/GD
+Ismael/M
+ism/MCS
+isn't
+ISO
+isobaric
+isobar/MS
+Isobel/M
+isochronal/Y
+isochronous/Y
+isocline/M
+isocyanate/M
+isodine
+isolate/SDXNG
+isolationism/SM
+isolationistic
+isolationist/SM
+isolation/M
+isolator/MS
+Isolde/M
+isomeric
+isomerism/SM
+isomer/SM
+isometrically
+isometric/S
+isometrics/M
+isomorphic
+isomorphically
+isomorphism/MS
+isomorph/M
+isoperimetrical
+isopleth/M
+isopleths
+isosceles
+isostatic
+isothermal/Y
+isotherm/MS
+isotonic
+isotope/SM
+isotopic
+isotropic
+isotropically
+isotropy/M
+Ispahan's
+ispell/M
+Ispell/M
+Israeli/MS
+Israelite/SM
+Israel/MS
+Issac/M
+Issiah/M
+Issie/M
+Issi/M
+issuable
+issuance/MS
+issuant
+issued/A
+issue/GMZDSR
+issuer/AMS
+issues/A
+issuing/A
+Issy/M
+Istanbul/M
+isthmian/S
+isthmus/SM
+Istvan/M
+Isuzu/M
+It
+IT
+Itaipu/M
+ital
+Italianate/GSD
+Italian/MS
+italicization/MS
+italicized/U
+italicize/GSD
+italic/S
+Ital/M
+Italy/M
+Itasca/M
+itch/GMDS
+itchiness/MS
+Itch/M
+itchy/RTP
+ITcorp/M
+ITCorp/M
+it'd
+Itel/M
+itemization/SM
+itemized/U
+itemize/GZDRS
+itemizer/M
+itemizes/A
+item/MDSG
+iterate/ASDXVGN
+iteration/M
+iterative/YA
+iterator/MS
+Ithaca/M
+Ithacan
+itinerant/SY
+itinerary/MS
+it'll
+it/MUS
+Ito/M
+its
+itself
+ITT
+IUD/S
+IV
+Iva/M
+Ivanhoe/M
+Ivan/M
+Ivar/M
+I've
+Ive/MRS
+Iver/M
+Ivette/M
+Ivett/M
+Ivie/M
+iv/M
+Ivonne/M
+Ivor/M
+Ivory/M
+ivory/SM
+IVs
+Ivy/M
+ivy/MDS
+ix
+Izaak/M
+Izabel/M
+Izak/M
+Izanagi/M
+Izanami/M
+Izhevsk/M
+Izmir/M
+Izvestia/M
+Izzy/M
+jabbed
+jabberer/M
+jabber/JRDSZG
+jabbing
+Jabez/M
+Jablonsky/M
+jabot/MS
+jab/SM
+jacaranda/MS
+Jacenta/M
+Jacinda/M
+Jacinta/M
+Jacintha/M
+Jacinthe/M
+jackal/SM
+jackass/SM
+jackboot/DMS
+jackdaw/SM
+Jackelyn/M
+jacketed/U
+jacket/GSMD
+jack/GDRMS
+jackhammer/MDGS
+Jackie/M
+Jacki/M
+jackknife/MGSD
+jackknives
+Jacklin/M
+Jacklyn/M
+Jack/M
+Jackman/M
+jackpot/MS
+Jackqueline/M
+Jackquelin/M
+jackrabbit/DGS
+Jacksonian
+Jackson/SM
+Jacksonville/M
+jackstraw/MS
+Jacky/M
+Jaclin/M
+Jaclyn/M
+Jacobean
+Jacobian/M
+Jacobi/M
+Jacobin/M
+Jacobite/M
+Jacobo/M
+Jacobsen/M
+Jacob/SM
+Jacobs/N
+Jacobson/M
+Jacobus
+Jacoby/M
+jacquard/MS
+Jacquard/SM
+Jacqueline/M
+Jacquelin/M
+Jacquelyn/M
+Jacquelynn/M
+Jacquenetta/M
+Jacquenette/M
+Jacques/M
+Jacquetta/M
+Jacquette/M
+Jacquie/M
+Jacqui/M
+jacuzzi
+Jacuzzi/S
+Jacynth/M
+Jada/M
+jadedness/SM
+jaded/PY
+jadeite/SM
+Jade/M
+jade/MGDS
+Jaeger/M
+Jae/M
+jaggedness/SM
+jagged/RYTP
+Jagger/M
+jaggers
+jagging
+jag/S
+jaguar/MS
+jailbird/MS
+jailbreak/SM
+jailer/M
+jail/GZSMDR
+Jaime/M
+Jaimie/M
+Jaine/M
+Jainism/M
+Jain/M
+Jaipur/M
+Jakarta/M
+Jake/MS
+Jakie/M
+Jakob/M
+jalapeño/S
+jalopy/SM
+jalousie/MS
+Jamaal/M
+Jamaica/M
+Jamaican/S
+Jamal/M
+Jamar/M
+jambalaya/MS
+jamb/DMGS
+jamboree/MS
+Jamel/M
+Jame/MS
+Jameson/M
+Jamestown/M
+Jamesy/M
+Jamey/M
+Jamie/M
+Jamill/M
+Jamil/M
+Jami/M
+Jamima/M
+Jamison/M
+Jammal/M
+jammed/U
+Jammie/M
+jamming/U
+jam/SM
+Janacek/M
+Jana/M
+Janaya/M
+Janaye/M
+Jandy/M
+Janean/M
+Janeczka/M
+Janeen/M
+Janeiro/M
+Janek/M
+Janela/M
+Janella/M
+Janelle/M
+Janell/M
+Janel/M
+Jane/M
+Janene/M
+Janenna/M
+Janessa/M
+Janesville/M
+Janeta/M
+Janet/M
+Janetta/M
+Janette/M
+Janeva/M
+Janey/M
+jangler/M
+jangle/RSDGZ
+jangly
+Jania/M
+Janice/M
+Janie/M
+Janifer/M
+Janina/M
+Janine/M
+Janis/M
+janissary/MS
+Janith/M
+janitorial
+janitor/SM
+Janka/M
+Jan/M
+Janna/M
+Jannelle/M
+Jannel/M
+Jannie/M
+Janos/M
+Janot/M
+Jansenist/M
+Jansen/M
+January/MS
+Janus/M
+Jany/M
+Japanese/SM
+Japan/M
+japanned
+japanner
+japanning
+japan/SM
+jape/DSMG
+Japura/M
+Jaquelin/M
+Jaquelyn/M
+Jaquenetta/M
+Jaquenette/M
+Jaquith/M
+Jarad/M
+jardinière/MS
+Jard/M
+Jareb/M
+Jared/M
+jarful/S
+jargon/SGDM
+Jarib/M
+Jarid/M
+Jarlsberg
+jar/MS
+Jarrad/M
+jarred
+Jarred/M
+Jarret/M
+Jarrett/M
+Jarrid/M
+jarring/SY
+Jarrod/M
+Jarvis/M
+Jase/M
+Jasen/M
+Jasmina/M
+Jasmine/M
+jasmine/MS
+Jasmin/M
+Jason/M
+Jasper/M
+jasper/MS
+Jastrow/M
+Jasun/M
+jato/SM
+jaundice/DSMG
+jaundiced/U
+jauntily
+jauntiness/MS
+jaunt/MDGS
+jaunty/SRTP
+Javanese
+Java/SM
+javelin/SDMG
+Javier/M
+jawbone/SDMG
+jawbreaker/SM
+jawline
+jaw/SMDG
+Jaxartes/M
+Jayapura/M
+jaybird/SM
+Jaycee/SM
+Jaye/M
+Jay/M
+Jaymee/M
+Jayme/M
+Jaymie/M
+Jaynell/M
+Jayne/M
+jay/SM
+Jayson/M
+jaywalker/M
+jaywalk/JSRDZG
+Jazmin/M
+jazziness/M
+jazzmen
+jazz/MGDS
+jazzy/PTR
+JCS
+jct
+JD
+Jdavie/M
+jealousness/M
+jealous/PY
+jealousy/MS
+Jeana/M
+Jeanelle/M
+Jeane/M
+Jeanette/M
+Jeanie/M
+Jeanine/M
+Jean/M
+jean/MS
+Jeanna/M
+Jeanne/M
+Jeannette/M
+Jeannie/M
+Jeannine/M
+Jecho/M
+Jedd/M
+Jeddy/M
+Jedediah/M
+Jedidiah/M
+Jedi/M
+Jed/M
+jeep/GZSMD
+Jeep/S
+jeerer/M
+jeering/Y
+jeer/SJDRMG
+Jeeves/M
+jeez
+Jefferey/M
+Jeffersonian/S
+Jefferson/M
+Jeffery/M
+Jeffie/M
+Jeff/M
+Jeffrey/SM
+Jeffry/M
+Jeffy/M
+jehad's
+Jehanna/M
+Jehoshaphat/M
+Jehovah/M
+Jehu/M
+jejuna
+jejuneness/M
+jejune/PY
+jejunum/M
+Jekyll/M
+Jelene/M
+jell/GSD
+Jello/M
+jello's
+jellybean/SM
+jellyfish/MS
+jellying/M
+jellylike
+jellyroll/S
+jelly/SDMG
+Jemie/M
+Jemimah/M
+Jemima/M
+Jemmie/M
+jemmy/M
+Jemmy/M
+Jena/M
+Jenda/M
+Jenelle/M
+Jenica/M
+Jeniece/M
+Jenifer/M
+Jeniffer/M
+Jenilee/M
+Jeni/M
+Jenine/M
+Jenkins/M
+Jen/M
+Jenna/M
+Jennee/M
+Jenner/M
+jennet/SM
+Jennette/M
+Jennica/M
+Jennie/M
+Jennifer/M
+Jennilee/M
+Jenni/M
+Jennine/M
+Jennings/M
+Jenn/RMJ
+Jenny/M
+jenny/SM
+Jeno/M
+Jensen/M
+Jens/N
+jeopard
+jeopardize/GSD
+jeopardy/MS
+Jephthah/M
+Jerad/M
+Jerald/M
+Jeralee/M
+Jeramey/M
+Jeramie/M
+Jere/M
+Jereme/M
+jeremiad/SM
+Jeremiah/M
+Jeremiahs
+Jeremias/M
+Jeremie/M
+Jeremy/M
+Jericho/M
+Jeri/M
+jerker/M
+jerk/GSDRJ
+jerkily
+jerkiness/SM
+jerkin/SM
+jerkwater/S
+jerky/RSTP
+Jermaine/M
+Jermain/M
+Jermayne/M
+Jeroboam/M
+Jerold/M
+Jerome/M
+Jeromy/M
+Jerrie/M
+Jerrilee/M
+Jerrilyn/M
+Jerri/M
+Jerrine/M
+Jerrod/M
+Jerrold/M
+Jerrome/M
+jerrybuilt
+Jerrylee/M
+jerry/M
+Jerry/M
+jersey/MS
+Jersey/MS
+Jerusalem/M
+Jervis/M
+Jes
+Jessalin/M
+Jessalyn/M
+Jessa/M
+Jessamine/M
+jessamine's
+Jessamyn/M
+Jessee/M
+Jesselyn/M
+Jesse/M
+Jessey/M
+Jessica/M
+Jessie/M
+Jessika/M
+Jessi/M
+jess/M
+Jess/M
+Jessy/M
+jest/DRSGZM
+jester/M
+jesting/Y
+Jesuit/SM
+Jesus
+Jeth/M
+Jethro/M
+jetliner/MS
+jet/MS
+jetport/SM
+jetsam/MS
+jetted/M
+jetting/M
+jettison/DSG
+jetty/RSDGMT
+jeweler/M
+jewelery/S
+jewel/GZMRDS
+Jewelled/M
+Jewelle/M
+jewellery's
+Jewell/MD
+Jewel/M
+jewelry/MS
+Jewess/SM
+Jewishness/MS
+Jewish/P
+Jew/MS
+Jewry/MS
+Jezebel/MS
+j/F
+JFK/M
+jg/M
+jibbed
+jibbing
+jibe/S
+jib/MDSG
+Jidda/M
+jiff/S
+jiffy/SM
+jigged
+jigger/SDMG
+jigging/M
+jiggle/SDG
+jiggly/TR
+jig/MS
+jigsaw/GSDM
+jihad/SM
+Jilin
+Jillana/M
+Jillane/M
+Jillayne/M
+Jilleen/M
+Jillene/M
+Jillian/M
+Jillie/M
+Jilli/M
+Jill/M
+Jilly/M
+jilt/DRGS
+jilter/M
+Jimenez/M
+Jim/M
+Jimmie/M
+jimmy/GSDM
+Jimmy/M
+jimsonweed/S
+Jinan
+jingler/M
+jingle/RSDG
+jingly/TR
+jingoism/SM
+jingoistic
+jingoist/SM
+jingo/M
+Jinnah/M
+jinni's
+jinn/MS
+Jinny/M
+jinrikisha/SM
+jinx/GMDS
+jitney/MS
+jitterbugged
+jitterbugger
+jitterbugging
+jitterbug/SM
+jitter/S
+jittery/TR
+jiujitsu's
+Jivaro/M
+jive/MGDS
+Joachim/M
+Joana/M
+Joane/M
+Joanie/M
+Joan/M
+Joanna/M
+Joanne/SM
+Joann/M
+Joaquin/M
+jobbed
+jobber/MS
+jobbery/M
+jobbing/M
+Jobey/M
+jobholder/SM
+Jobie/M
+Jobi/M
+Jobina/M
+joblessness/MS
+jobless/P
+Jobrel/M
+job/SM
+Job/SM
+Jobye/M
+Joby/M
+Jobyna/M
+Jocasta/M
+Joceline/M
+Jocelin/M
+Jocelyne/M
+Jocelyn/M
+jockey/SGMD
+jock/GDMS
+Jock/M
+Jocko/M
+jockstrap/MS
+jocoseness/MS
+jocose/YP
+jocosity/SM
+jocularity/SM
+jocular/Y
+jocundity/SM
+jocund/Y
+Jodee/M
+jodhpurs
+Jodie/M
+Jodi/M
+Jody/M
+Joeann/M
+Joela/M
+Joelie/M
+Joella/M
+Joelle/M
+Joellen/M
+Joell/MN
+Joelly/M
+Joellyn/M
+Joel/MY
+Joelynn/M
+Joe/M
+Joesph/M
+Joete/M
+joey/M
+Joey/M
+jogged
+jogger/SM
+jogging/S
+joggler/M
+joggle/SRDG
+Jogjakarta/M
+jog/S
+Johan/M
+Johannah/M
+Johanna/M
+Johannes
+Johannesburg/M
+Johann/M
+Johansen/M
+Johanson/M
+Johna/MH
+Johnathan/M
+Johnath/M
+Johnathon/M
+Johnette/M
+Johnie/M
+Johnna/M
+Johnnie/M
+johnnycake/SM
+Johnny/M
+johnny/SM
+Johnsen/M
+john/SM
+John/SM
+Johns/N
+Johnson/M
+Johnston/M
+Johnstown/M
+Johny/M
+Joice/M
+join/ADGFS
+joined/U
+joiner/FSM
+joinery/MS
+jointed/EYP
+jointedness/ME
+joint/EGDYPS
+jointer/M
+jointly/F
+joint's
+jointures
+joist/GMDS
+Jojo/M
+joke/MZDSRG
+joker/M
+jokey
+jokier
+jokiest
+jokily
+joking/Y
+Jolee/M
+Joleen/M
+Jolene/M
+Joletta/M
+Jolie/M
+Joliet's
+Joli/M
+Joline/M
+Jolla/M
+jollification/MS
+jollily
+jolliness/SM
+jollity/MS
+jolly/TSRDGP
+Jolson/M
+jolt/DRGZS
+jolter/M
+Joly/M
+Jolyn/M
+Jolynn/M
+Jo/MY
+Jonah/M
+Jonahs
+Jonas
+Jonathan/M
+Jonathon/M
+Jonell/M
+Jone/MS
+Jones/S
+Jonie/M
+Joni/MS
+Jon/M
+jonquil/MS
+Jonson/M
+Joplin/M
+Jordain/M
+Jordana/M
+Jordanian/S
+Jordan/M
+Jordanna/M
+Jordon/M
+Jorey/M
+Jorgan/M
+Jorge/M
+Jorgensen/M
+Jorgenson/M
+Jorie/M
+Jori/M
+Jorrie/M
+Jorry/M
+Jory/M
+Joscelin/M
+Josee/M
+Josefa/M
+Josefina/M
+Josef/M
+Joseito/M
+Jose/M
+Josepha/M
+Josephina/M
+Josephine/M
+Joseph/M
+Josephs
+Josephson/M
+Josephus/M
+Josey/M
+josh/DSRGZ
+josher/M
+Joshia/M
+Josh/M
+Joshuah/M
+Joshua/M
+Josiah/M
+Josias/M
+Josie/M
+Josi/M
+Josselyn/M
+joss/M
+jostle/SDG
+Josue/M
+Josy/M
+jot/S
+jotted
+jotter/SM
+jotting/SM
+Joule/M
+joule/SM
+jounce/SDG
+jouncy/RT
+Jourdain/M
+Jourdan/M
+journalese/MS
+journal/GSDM
+journalism/SM
+journalistic
+journalist/SM
+journalize/DRSGZ
+journalized/U
+journalizer/M
+journey/DRMZSGJ
+journeyer/M
+journeyman/M
+journeymen
+jouster/M
+joust/ZSMRDG
+Jovanovich/M
+Jove/M
+joviality/SM
+jovial/Y
+Jovian
+jowl/SMD
+jowly/TR
+Joya/M
+Joyan/M
+Joyann/M
+Joycean
+Joycelin/M
+Joyce/M
+Joye/M
+joyfuller
+joyfullest
+joyfulness/SM
+joyful/PY
+joylessness/MS
+joyless/PY
+Joy/M
+joy/MDSG
+Joyner/M
+joyousness/MS
+joyous/YP
+joyridden
+joyride/SRZMGJ
+joyrode
+joystick/S
+Jozef/M
+JP
+Jpn
+Jr/M
+j's
+J's
+Jsandye/M
+Juana/M
+Juanita/M
+Juan/M
+Juarez
+Jubal/M
+jubilant/Y
+jubilate/XNGDS
+jubilation/M
+jubilee/SM
+Judah/M
+Judaic
+Judaical
+Judaism/SM
+Judas/S
+juddered
+juddering
+Judd/M
+Judea/M
+Jude/M
+judge/AGDS
+judger/M
+judge's
+judgeship/SM
+judgmental/Y
+judgment/MS
+judicable
+judicatory/S
+judicature/MS
+judicial/Y
+judiciary/S
+judicious/IYP
+judiciousness/SMI
+Judie/M
+Judi/MH
+Juditha/M
+Judith/M
+Jud/M
+judo/MS
+Judon/M
+Judson/M
+Judye/M
+Judy/M
+jugate/F
+jugful/SM
+jugged
+Juggernaut/M
+juggernaut/SM
+jugging
+juggler/M
+juggle/RSDGZ
+jugglery/MS
+jug/MS
+jugular/S
+juice/GMZDSR
+juicer/M
+juicily
+juiciness/MS
+juicy/TRP
+Juieta/M
+jujitsu/MS
+jujube/SM
+juju/M
+jujutsu's
+jukebox/SM
+juke/GS
+Julee/M
+Jule/MS
+julep/SM
+Julia/M
+Juliana/M
+Juliane/M
+Julian/M
+Julianna/M
+Julianne/M
+Juliann/M
+Julie/M
+julienne/GSD
+Julienne/M
+Julieta/M
+Juliet/M
+Julietta/M
+Juliette/M
+Juli/M
+Julina/M
+Juline/M
+Julio/M
+Julissa/M
+Julita/M
+Julius/M
+Jul/M
+Julys
+July/SM
+jumble/GSD
+jumbo/MS
+jumper/M
+jump/GZDRS
+jumpily
+jumpiness/MS
+jumpsuit/S
+jumpy/PTR
+jun
+junco/MS
+junction/IMESF
+juncture/SFM
+Juneau/M
+June/MS
+Junette/M
+Jungfrau/M
+Jungian
+jungle/SDM
+Jung/M
+Junia/M
+Junie/M
+Junina/M
+juniority/M
+junior/MS
+Junior/S
+juniper/SM
+junkerdom
+Junker/SM
+junketeer/SGDM
+junket/SMDG
+junk/GZDRMS
+junkie/RSMT
+junkyard/MS
+Jun/M
+Juno/M
+junta/MS
+Jupiter/M
+Jurassic
+juridic
+juridical/Y
+juried
+jurisdictional/Y
+jurisdiction/SM
+jurisprudence/SM
+jurisprudent
+jurisprudential/Y
+juristic
+jurist/MS
+juror/MS
+Jurua/M
+jury/IMS
+jurying
+juryman/M
+jurymen
+jurywoman/M
+jurywomen
+justed
+Justen/M
+juster/M
+justest
+Justice/M
+justice/MIS
+justiciable
+justifiability/M
+justifiable/U
+justifiably/U
+justification/M
+justified/UA
+justifier/M
+justify/GDRSXZN
+Justina/M
+Justine/M
+justing
+Justinian/M
+Justin/M
+Justinn/M
+Justino/M
+Justis/M
+justness/MS
+justness's/U
+justs
+just/UPY
+Justus/M
+jute/SM
+Jutish
+Jutland/M
+jut/S
+jutted
+jutting
+Juvenal/M
+juvenile/SM
+juxtapose/SDG
+juxtaposition/SM
+JV
+J/X
+Jyoti/M
+Kaaba/M
+kabob/SM
+kaboom
+Kabuki
+kabuki/SM
+Kabul/M
+Kacey/M
+Kacie/M
+Kacy/M
+Kaddish/M
+kaddish/S
+Kaela/M
+kaffeeklatch
+kaffeeklatsch/S
+Kafkaesque
+Kafka/M
+kaftan's
+Kagoshima/M
+Kahaleel/M
+Kahlil/M
+Kahlua/M
+Kahn/M
+Kaia/M
+Kaifeng/M
+Kaila/M
+Kaile/M
+Kailey/M
+Kai/M
+Kaine/M
+Kain/M
+kaiser/MS
+Kaiser/SM
+Kaitlin/M
+Kaitlyn/M
+Kaitlynn/M
+Kaja/M
+Kajar/M
+Kakalina/M
+Kalahari/M
+Kala/M
+Kalamazoo/M
+Kalashnikov/M
+Kalb/M
+Kaleb/M
+Kaleena/M
+kaleidescope
+kaleidoscope/SM
+kaleidoscopic
+kaleidoscopically
+Kale/M
+kale/MS
+Kalgoorlie/M
+Kalie/M
+Kalila/M
+Kalil/M
+Kali/M
+Kalina/M
+Kalinda/M
+Kalindi/M
+Kalle/M
+Kalli/M
+Kally/M
+Kalmyk
+Kalvin/M
+Kama/M
+Kamchatka/M
+Kamehameha/M
+Kameko/M
+Kamikaze/MS
+kamikaze/SM
+Kamilah/M
+Kamila/M
+Kamillah/M
+Kampala/M
+Kampuchea/M
+Kanchenjunga/M
+Kandace/M
+Kandahar/M
+Kandinsky/M
+Kandy/M
+Kane/M
+kangaroo/SGMD
+Kania/M
+Kankakee/M
+Kan/MS
+Kannada/M
+Kano/M
+Kanpur/M
+Kansan/S
+Kansas
+Kantian
+Kant/M
+Kanya/M
+Kaohsiung/M
+kaolinite/M
+kaolin/MS
+Kaplan/M
+kapok/SM
+Kaposi/M
+kappa/MS
+kaput/M
+Karachi/M
+Karaganda/M
+Karakorum/M
+karakul/MS
+Karalee/M
+Karalynn/M
+Kara/M
+Karamazov/M
+karaoke/S
+karate/MS
+karat/SM
+Karee/M
+Kareem/M
+Karel/M
+Kare/M
+Karena/M
+Karenina/M
+Karen/M
+Karia/M
+Karie/M
+Karil/M
+Karilynn/M
+Kari/M
+Karim/M
+Karina/M
+Karine/M
+Karin/M
+Kariotta/M
+Karisa/M
+Karissa/M
+Karita/M
+Karla/M
+Karlan/M
+Karlee/M
+Karleen/M
+Karlene/M
+Karlen/M
+Karlie/M
+Karlik/M
+Karlis
+Karl/MNX
+Karloff/M
+Karlotta/M
+Karlotte/M
+Karly/M
+Karlyn/M
+karma/SM
+Karmen/M
+karmic
+Karna/M
+Karney/M
+Karola/M
+Karole/M
+Karolina/M
+Karoline/M
+Karol/M
+Karoly/M
+Karon/M
+Karo/YM
+Karp/M
+Karrah/M
+Karrie/M
+Karroo/M
+Karry/M
+kart/MS
+Karylin/M
+Karyl/M
+Kary/M
+Karyn/M
+Kasai/M
+Kasey/M
+Kashmir/SM
+Kaspar/M
+Kasparov/M
+Kasper/M
+Kass
+Kassandra/M
+Kassey/M
+Kassia/M
+Kassie/M
+Kassi/M
+katakana
+Katalin/M
+Kata/M
+Katee/M
+Katelyn/M
+Kate/M
+Katerina/M
+Katerine/M
+Katey/M
+Katha/M
+Katharina/M
+Katharine/M
+Katharyn/M
+Kathe/M
+Katherina/M
+Katherine/M
+Katheryn/M
+Kathiawar/M
+Kathie/M
+Kathi/M
+Kathleen/M
+Kathlin/M
+Kath/M
+Kathmandu
+Kathrine/M
+Kathryne/M
+Kathryn/M
+Kathye/M
+Kathy/M
+Katie/M
+Kati/M
+Katina/M
+Katine/M
+Katinka/M
+Katleen/M
+Katlin/M
+Kat/M
+Katmai/M
+Katmandu's
+Katowice/M
+Katrina/M
+Katrine/M
+Katrinka/M
+Kattie/M
+Katti/M
+Katuscha/M
+Katusha/M
+Katya/M
+katydid/SM
+Katy/M
+Katz/M
+Kauai/M
+Kauffman/M
+Kaufman/M
+Kaunas/M
+Kaunda/M
+Kawabata/M
+Kawasaki/M
+kayak/SGDM
+Kaycee/M
+Kaye/M
+Kayla/M
+Kaylee/M
+Kayle/M
+Kayley/M
+Kaylil/M
+Kaylyn/M
+Kay/M
+Kayne/M
+kayo/DMSG
+Kazakh/M
+Kazakhstan
+Kazan/M
+Kazantzakis/M
+kazoo/SM
+Kb
+KB
+KC
+kcal/M
+kc/M
+KDE/M
+Keane/M
+Kean/M
+Kearney/M
+Keary/M
+Keaton/M
+Keats/M
+kebab/SM
+Keck/M
+Keefe/MR
+Keefer/M
+Keegan/M
+Keelby/M
+Keeley/M
+keel/GSMDR
+keelhaul/SGD
+Keelia/M
+Keely/M
+Keenan/M
+Keene/M
+keener/M
+keen/GTSPYDR
+keening/M
+Keen/M
+keenness/MS
+keeper/M
+keep/GZJSR
+keeping/M
+keepsake/SM
+Keewatin/M
+kegged
+kegging
+keg/MS
+Keillor/M
+Keir/M
+Keisha/M
+Keith/M
+Kelbee/M
+Kelby/M
+Kelcey/M
+Kelcie/M
+Kelci/M
+Kelcy/M
+Kele/M
+Kelila/M
+Kellby/M
+Kellen/M
+Keller/M
+Kelley/M
+Kellia/M
+Kellie/M
+Kelli/M
+Kellina/M
+Kellogg/M
+Kellsie/M
+Kellyann/M
+Kelly/M
+kelp/GZMDS
+Kelsey/M
+Kelsi/M
+Kelsy/M
+Kelt's
+Kelvin/M
+kelvin/MS
+Kelwin/M
+Kemerovo/M
+Kempis/M
+Kemp/M
+Kendall/M
+Kendal/M
+Kendell/M
+Kendra/M
+Kendre/M
+Kendrick/MS
+Kenilworth/M
+Ken/M
+Kenmore/M
+ken/MS
+Kenna/M
+Kennan/M
+Kennecott/M
+kenned
+Kennedy/M
+kennel/GSMD
+Kenneth/M
+Kennett/M
+Kennie/M
+kenning
+Kennith/M
+Kenn/M
+Kenny/M
+keno/M
+Kenon/M
+Kenosha/M
+Kensington/M
+Kent/M
+Kenton/M
+Kentuckian/S
+Kentucky/M
+Kenya/M
+Kenyan/S
+Kenyatta/M
+Kenyon/M
+Keogh/M
+Keokuk/M
+kepi/SM
+Kepler/M
+kept
+keratin/MS
+kerbside
+Kerby/M
+kerchief/MDSG
+Kerensky/M
+Kerianne/M
+Keriann/M
+Keri/M
+Kerk/M
+Ker/M
+Kermie/M
+Kermit/M
+Kermy/M
+kerned
+kernel/GSMD
+kerning
+Kern/M
+kerosene/MS
+Kerouac/M
+Kerrie/M
+Kerrill/M
+Kerri/M
+Kerrin/M
+Kerr/M
+Kerry/M
+Kerstin/M
+Kerwin/M
+Kerwinn/M
+Kesley/M
+Keslie/M
+Kessiah/M
+Kessia/M
+Kessler/M
+kestrel/SM
+ketch/MS
+ketchup/SM
+ketone/M
+ketosis/M
+Kettering/M
+Kettie/M
+Ketti/M
+kettledrum/SM
+kettleful
+kettle/SM
+Ketty/M
+Kevan/M
+Keven/M
+Kevina/M
+Kevin/M
+Kevlar
+Kev/MN
+Kevon/M
+Kevorkian/M
+Kevyn/M
+Kewaskum/M
+Kewaunee/M
+Kewpie/M
+keyboardist/S
+keyboard/RDMZGS
+keyclick/SM
+keyhole/MS
+Key/M
+Keynesian/M
+Keynes/M
+keynoter/M
+keynote/SRDZMG
+keypad/MS
+keypuncher/M
+keypunch/ZGRSD
+keyring
+key/SGMD
+keystone/SM
+keystroke/SDMG
+keyword/SM
+k/FGEIS
+kg
+K/G
+KGB
+Khabarovsk/M
+Khachaturian/M
+khaki/SM
+Khalid/M
+Khalil/M
+Khan/M
+khan/MS
+Kharkov/M
+Khartoum/M
+Khayyam/M
+Khmer/M
+Khoisan/M
+Khomeini/M
+Khorana/M
+Khrushchev/SM
+Khufu/M
+Khulna/M
+Khwarizmi/M
+Khyber/M
+kHz/M
+KIA
+Kiah/M
+Kial/M
+kibble/GMSD
+kibbutzim
+kibbutz/M
+kibitzer/M
+kibitz/GRSDZ
+kibosh/GMSD
+Kickapoo/M
+kickback/SM
+kickball/MS
+kicker/M
+kick/GZDRS
+kickoff/SM
+kickstand/MS
+kicky/RT
+kidded
+kidder/SM
+kiddie/SD
+kidding/YM
+kiddish
+Kidd/M
+kiddo/SM
+kiddying
+kiddy's
+kidless
+kid/MS
+kidnaper's
+kidnaping's
+kidnap/MSJ
+kidnapped
+kidnapper/SM
+kidnapping/S
+kidney/MS
+kidskin/SM
+Kieffer/M
+kielbasa/SM
+kielbasi
+Kiele/M
+Kiel/M
+Kienan/M
+kier/I
+Kierkegaard/M
+Kiersten/M
+Kieth/M
+Kiev/M
+Kigali/M
+Kikelia/M
+Kikuyu/M
+Kilauea/M
+Kile/M
+Kiley/M
+Kilian/M
+Kilimanjaro/M
+kill/BJGZSDR
+killdeer/SM
+Killebrew/M
+killer/M
+Killian/M
+Killie/M
+killing/Y
+killjoy/S
+Killy/M
+kiln/GDSM
+kilobaud/M
+kilobit/S
+kilobuck
+kilobyte/S
+kilocycle/MS
+kilogauss/M
+kilogram/MS
+kilohertz/M
+kilohm/M
+kilojoule/MS
+kiloliter/MS
+kilometer/SM
+kilo/SM
+kiloton/SM
+kilovolt/SM
+kilowatt/SM
+kiloword
+kilter/M
+kilt/MDRGZS
+Ki/M
+Kimball/M
+Kimbell/M
+Kimberlee/M
+Kimberley/M
+Kimberli/M
+Kimberly/M
+Kimberlyn/M
+Kimble/M
+Kimbra/M
+Kim/M
+Kimmie/M
+Kimmi/M
+Kimmy/M
+kimono/MS
+Kincaid/M
+kinda
+kindergarten/MS
+kindergärtner/SM
+kinder/U
+kindheartedness/MS
+kindhearted/YP
+kindle/AGRSD
+kindler/M
+kindliness/SM
+kindliness's/U
+kindling/M
+kindly/TUPR
+kindness's
+kindness/US
+kind/PSYRT
+kindred/S
+kinematic/S
+kinematics/M
+kinesics/M
+kine/SM
+kinesthesis
+kinesthetically
+kinesthetic/S
+kinetically
+kinetic/S
+kinetics/M
+kinfolk/S
+kingbird/M
+kingdom/SM
+kingfisher/MS
+kinglet/M
+kingliness/M
+kingly/TPR
+King/M
+kingpin/MS
+Kingsbury/M
+king/SGYDM
+kingship/SM
+Kingsley/M
+Kingsly/M
+Kingston/M
+Kingstown/M
+Kingwood/M
+kink/GSDM
+kinkily
+kinkiness/SM
+kinky/PRT
+Kin/M
+kin/MS
+Kinna/M
+Kinney/M
+Kinnickinnic/M
+Kinnie/M
+Kinny/M
+Kinsey/M
+kinsfolk/S
+Kinshasa/M
+Kinshasha/M
+kinship/SM
+Kinsley/M
+kinsman/M
+kinsmen/M
+kinswoman/M
+kinswomen
+kiosk/SM
+Kiowa/SM
+Kipling/M
+Kip/M
+kip/MS
+Kippar/M
+kipped
+kipper/DMSG
+Kipper/M
+Kippie/M
+kipping
+Kipp/MR
+Kippy/M
+Kira/M
+Kirbee/M
+Kirbie/M
+Kirby/M
+Kirchhoff/M
+Kirchner/M
+Kirchoff/M
+Kirghistan/M
+Kirghizia/M
+Kirghiz/M
+Kiribati
+Kiri/M
+Kirinyaga/M
+kirk/GDMS
+Kirkland/M
+Kirk/M
+Kirkpatrick/M
+Kirkwood/M
+Kirov/M
+kirsch/S
+Kirsteni/M
+Kirsten/M
+Kirsti/M
+Kirstin/M
+Kirstyn/M
+Kisangani/M
+Kishinev/M
+kismet/SM
+kiss/DSRBJGZ
+Kissee/M
+kisser/M
+Kissiah/M
+Kissie/M
+Kissinger/M
+Kitakyushu/M
+kitbag's
+kitchener/M
+Kitchener/M
+kitchenette/SM
+kitchen/GDRMS
+kitchenware/SM
+kiter/M
+kite/SM
+kith/MDG
+kiths
+Kit/M
+kit/MDRGS
+kitsch/MS
+kitschy
+kitted
+kittenishness/M
+kittenish/YP
+kitten/SGDM
+Kittie/M
+Kitti/M
+kitting
+kittiwakes
+Kitty/M
+kitty/SM
+Kiwanis/M
+kiwifruit/S
+kiwi/SM
+Kizzee/M
+Kizzie/M
+KKK
+kl
+Klan/M
+Klansman/M
+Klara/M
+Klarika/M
+Klarrisa/M
+Klaus/M
+klaxon/M
+Klee/M
+Kleenex/SM
+Klein/M
+Kleinrock/M
+Klemens/M
+Klement/M
+Kleon/M
+kleptomaniac/SM
+kleptomania/MS
+Kliment/M
+Kline/M
+Klingon/M
+Klondike/SDMG
+kludger/M
+kludge/RSDGMZ
+kludgey
+klutziness/S
+klutz/SM
+klutzy/TRP
+Klux/M
+klystron/MS
+km
+kn
+knacker/M
+knack/SGZRDM
+knackwurst/MS
+Knapp/M
+knapsack/MS
+Knauer/M
+knavery/MS
+knave/SM
+knavish/Y
+kneader/M
+knead/GZRDS
+kneecap/MS
+kneecapped
+kneecapping
+knee/DSM
+kneeing
+kneeler/M
+kneel/GRS
+kneepad/SM
+knell/SMDG
+knelt
+Knesset/M
+knew
+Kngwarreye/M
+Knickerbocker/MS
+knickerbocker/S
+knickknack/SM
+knick/ZR
+Knievel/M
+knife/DSGM
+knighthood/MS
+knightliness/MS
+knightly/P
+Knight/M
+knight/MDYSG
+knish/MS
+knit/AU
+knits
+knitted
+knitter/MS
+knitting/SM
+knitwear/M
+knives/M
+knobbly
+knobby/RT
+Knobeloch/M
+knob/MS
+knockabout/M
+knockdown/S
+knocker/M
+knock/GZSJRD
+knockoff/S
+knockout/MS
+knockwurst's
+knoll/MDSG
+Knopf/M
+Knossos/M
+knothole/SM
+knot/MS
+knotted
+knottiness/M
+knotting/M
+knotty/TPR
+knowable/U
+knower/M
+know/GRBSJ
+knowhow
+knowingly/U
+knowing/RYT
+knowings/U
+knowledgeableness/M
+knowledgeable/P
+knowledgeably
+knowledge/SM
+Knowles
+known/SU
+Knox/M
+Knoxville/M
+knuckleball/R
+knuckle/DSMG
+knuckleduster
+knucklehead/MS
+Knudsen/M
+Knudson/M
+knurl/DSG
+Knuth/M
+Knutsen/M
+Knutson/M
+KO
+koala/SM
+Kobayashi/M
+Kobe/M
+Kochab/M
+Koch/M
+Kodachrome/M
+Kodak/SM
+Kodaly/M
+Kodiak/M
+Koenig/M
+Koenigsberg/M
+Koenraad/M
+Koestler/M
+Kohinoor/M
+Kohler/M
+Kohl/MR
+kohlrabies
+kohlrabi/M
+kola/SM
+Kolyma/M
+Kommunizma/M
+Kong/M
+Kongo/M
+Konrad/M
+Konstance/M
+Konstantine/M
+Konstantin/M
+Konstanze/M
+kookaburra/SM
+kook/GDMS
+kookiness/S
+kooky/PRT
+Koo/M
+Koontz/M
+kopeck/MS
+Koppers/M
+Koralle/M
+Koral/M
+Kora/M
+Koranic
+Koran/SM
+Kordula/M
+Korea/M
+Korean/S
+Korella/M
+Kore/M
+Koren/M
+Koressa/M
+Korey/M
+Korie/M
+Kori/M
+Kornberg/M
+Korney/M
+Korrie/M
+Korry/M
+Kort/M
+Kory/M
+Korzybski/M
+Kosciusko/M
+kosher/DGS
+Kossuth/M
+Kosygin/M
+Kovacs/M
+Kowalewski/M
+Kowalski/M
+Kowloon/M
+kowtow/SGD
+KP
+kph
+kraal/SMDG
+Kraemer/M
+kraft/M
+Kraft/M
+Krakatau's
+Krakatoa/M
+Krakow/M
+Kramer/M
+Krasnodar/M
+Krasnoyarsk/M
+Krause/M
+kraut/S!
+Krebs/M
+Kremlin/M
+Kremlinologist/MS
+Kremlinology/MS
+Kresge/M
+Krieger/M
+kriegspiel/M
+krill/MS
+Kringle/M
+Krisha/M
+Krishnah/M
+Krishna/M
+Kris/M
+Krispin/M
+Krissie/M
+Krissy/M
+Kristal/M
+Krista/M
+Kristan/M
+Kristel/M
+Kriste/M
+Kristen/M
+Kristian/M
+Kristie/M
+Kristien/M
+Kristi/MN
+Kristina/M
+Kristine/M
+Kristin/M
+Kristofer/M
+Kristoffer/M
+Kristofor/M
+Kristoforo/M
+Kristo/MS
+Kristopher/M
+Kristy/M
+Kristyn/M
+Kr/M
+Kroc/M
+Kroger/M
+króna/M
+Kronecker/M
+krone/RM
+kronor
+krónur
+Kropotkin/M
+Krueger/M
+Kruger/M
+Krugerrand/S
+Krupp/M
+Kruse/M
+krypton/SM
+Krystalle/M
+Krystal/M
+Krysta/M
+Krystle/M
+Krystyna/M
+ks
+K's
+KS
+k's/IE
+kt
+Kublai/M
+Kubrick/M
+kuchen/MS
+kudos/M
+kudzu/SM
+Kuenning/M
+Kuhn/M
+Kuibyshev/M
+Ku/M
+Kumar/M
+kumquat/SM
+Kunming/M
+Kuomintang/M
+Kurdish/M
+Kurdistan/SM
+Kurd/SM
+Kurosawa/M
+Kurtis/M
+Kurt/M
+kurtosis/M
+Kusch/M
+Kuwaiti/SM
+Kuwait/M
+Kuznetsk/M
+Kuznets/M
+kvetch/DSG
+kw
+kW
+Kwakiutl/M
+Kwangchow's
+Kwangju/M
+Kwanzaa/S
+kWh
+KY
+Kyla/M
+kyle/M
+Kyle/M
+Kylen/M
+Kylie/M
+Kylila/M
+Kylynn/M
+Ky/MH
+Kym/M
+Kynthia/M
+Kyoto/M
+Kyrgyzstan
+Kyrstin/M
+Kyushu/M
+L
+LA
+Laban/M
+labeled/U
+labeler/M
+label/GAZRDS
+labellings/A
+label's
+labial/YS
+labia/M
+labile
+labiodental
+labium/M
+laboratory/MS
+laboredness/M
+labored/PMY
+labored's/U
+laborer/M
+laboring/MY
+laborings/U
+laboriousness/MS
+laborious/PY
+labor/RDMJSZG
+laborsaving
+Labradorean/S
+Labrador/SM
+lab/SM
+Lab/SM
+laburnum/SM
+labyrinthine
+labyrinth/M
+labyrinths
+laced/U
+Lacee/M
+lace/MS
+lacerate/NGVXDS
+laceration/M
+lacer/M
+laces/U
+lacewing/MS
+Lacey/M
+Lachesis/M
+lachrymal/S
+lachrymose
+Lacie/M
+lacing/M
+lackadaisic
+lackadaisical/Y
+Lackawanna/M
+lacker/M
+lackey/SMDG
+lack/GRDMS
+lackluster/S
+Lac/M
+laconic
+laconically
+lacquerer/M
+lacquer/ZGDRMS
+lacrosse/MS
+lac/SGMDR
+lactate/MNGSDX
+lactational/Y
+lactation/M
+lacteal
+lactic
+lactose/MS
+lacunae
+lacuna/M
+Lacy/M
+lacy/RT
+ladder/GDMS
+laddie/MS
+laded/U
+ladened
+ladening
+laden/U
+lade/S
+lading/M
+ladle/SDGM
+Ladoga/M
+Ladonna/M
+lad/XGSJMND
+ladybird/SM
+ladybug/MS
+ladyfinger/SM
+ladylike/U
+ladylove/MS
+Ladyship/MS
+ladyship/SM
+lady/SM
+Lady/SM
+Laetitia/M
+laetrile/S
+Lafayette/M
+Lafitte/M
+lager/DMG
+laggard/MYSP
+laggardness/M
+lagged
+lagging/MS
+lagniappe/SM
+lagoon/MS
+Lagos/M
+Lagrange/M
+Lagrangian/M
+Laguerre/M
+Laguna/M
+lag/ZSR
+Lahore/M
+laid/AI
+Laidlaw/M
+lain
+Laina/M
+Lainey/M
+Laird/M
+laird/MS
+lair/GDMS
+laissez
+laity/SM
+Laius/M
+lake/DSRMG
+Lakehurst/M
+Lakeisha/M
+laker/M
+lakeside
+Lakewood/M
+Lakisha/M
+Lakshmi/M
+lallygagged
+lallygagging
+lallygag/S
+Lalo/M
+La/M
+Lamaism/SM
+Lamarck/M
+Lamar/M
+lamasery/MS
+lama/SM
+Lamaze
+lambada/S
+lambaste/SDG
+lambda/SM
+lambency/MS
+lambent/Y
+Lambert/M
+lambkin/MS
+Lamb/M
+Lamborghini/M
+lambskin/MS
+lamb/SRDMG
+lambswool
+lamebrain/SM
+lamed/M
+lameness/MS
+lamentableness/M
+lamentable/P
+lamentably
+lamentation/SM
+lament/DGSB
+lamented/U
+lame/SPY
+la/MHLG
+laminae
+lamina/M
+laminar
+laminate/XNGSD
+lamination/M
+lam/MDRSTG
+lammed
+lammer
+lamming
+Lammond/M
+Lamond/M
+Lamont/M
+L'Amour
+lampblack/SM
+lamplighter/M
+lamplight/ZRMS
+lampooner/M
+lampoon/RDMGS
+Lamport/M
+lamppost/SM
+lamprey/MS
+lamp/SGMRD
+lampshade/MS
+LAN
+Lanae/M
+Lanai/M
+lanai/SM
+Lana/M
+Lancashire/M
+Lancaster/M
+Lancelot/M
+Lance/M
+lancer/M
+lance/SRDGMZ
+lancet/MS
+landau/MS
+lander/I
+landfall/SM
+landfill/DSG
+landforms
+landholder/M
+landhold/JGZR
+landing/M
+Landis/M
+landlady/MS
+landless
+landlines
+landlocked
+landlord/MS
+landlubber/SM
+Land/M
+landmark/GSMD
+landmass/MS
+Landon/M
+landowner/MS
+landownership/M
+landowning/SM
+Landry/M
+Landsat
+landscape/GMZSRD
+landscaper/M
+lands/I
+landslide/MS
+landslid/G
+landslip
+landsman/M
+landsmen
+land/SMRDJGZ
+Landsteiner/M
+landward/S
+Landwehr/M
+Lane/M
+lane/SM
+Lanette/M
+Laney/M
+Langeland/M
+Lange/M
+Langerhans/M
+Langford/M
+Langland/M
+Langley/M
+Lang/M
+Langmuir/M
+Langsdon/M
+Langston/M
+language/MS
+languidness/MS
+languid/PY
+languisher/M
+languishing/Y
+languish/SRDG
+languorous/Y
+languor/SM
+Lanie/M
+Lani/M
+Lanita/M
+lankiness/SM
+lankness/MS
+lank/PTYR
+lanky/PRT
+Lanna/M
+Lannie/M
+Lanni/M
+Lanny/M
+lanolin/MS
+Lansing/M
+lantern/GSDM
+lanthanide/M
+lanthanum/MS
+lanyard/MS
+Lanzhou
+Laocoon/M
+Lao/SM
+Laotian/MS
+lapboard/MS
+lapdog/S
+lapel/MS
+lapidary/MS
+lapin/MS
+Laplace/M
+Lapland/ZMR
+lapped
+lappet/MS
+lapping
+Lapp/SM
+lapsed/A
+lapse/KSDMG
+lapser/MA
+lapses/A
+lapsing/A
+lap/SM
+laps/SRDG
+laptop/SM
+lapwing/MS
+Laraine/M
+Lara/M
+Laramie/M
+larboard/MS
+larcenist/S
+larcenous
+larceny/MS
+larch/MS
+larder/M
+lard/MRDSGZ
+Lardner/M
+lardy/RT
+Laredo/M
+largehearted
+largemouth
+largeness/SM
+large/SRTYP
+largess/SM
+largish
+largo/S
+lariat/MDGS
+Lari/M
+Larina/M
+Larine/M
+Larisa/M
+Larissa/M
+larker/M
+lark/GRDMS
+Lark/M
+larkspur/MS
+Larousse/M
+Larry/M
+Larsen/M
+Lars/NM
+Larson/M
+larvae
+larval
+larva/M
+laryngeal/YS
+larynges
+laryngitides
+laryngitis/M
+larynx/M
+Laryssa/M
+lasagna/S
+lasagne's
+Lascaux/M
+lasciviousness/MS
+lascivious/YP
+lase
+laser/M
+lashed/U
+lasher/M
+lashing/M
+lash/JGMSRD
+Lassa/M
+Lassen/M
+Lassie/M
+lassie/SM
+lassitude/MS
+lassoer/M
+lasso/GRDMS
+las/SRZG
+lass/SM
+laster/M
+lastingness/M
+lasting/PY
+last/JGSYRD
+Laszlo/M
+Latasha/M
+Latashia/M
+latching/M
+latchkey/SM
+latch's
+latch/UGSD
+latecomer/SM
+lated/A
+late/KA
+lately
+latency/MS
+lateness/MS
+latent/YS
+later/A
+lateral/GDYS
+lateralization
+Lateran/M
+latest/S
+LaTeX/M
+latex/MS
+lathe/M
+latherer/M
+lather/RDMG
+lathery
+lathing/M
+lath/MSRDGZ
+Lathrop/M
+laths
+Latia/M
+latices/M
+Latina/SM
+Latinate
+Latino/S
+Latin/RMS
+latish
+Latisha/M
+latitude/SM
+latitudinal/Y
+latitudinarian/S
+latitudinary
+Lat/M
+Latonya/M
+Latoya/M
+Latrena/M
+Latrina/M
+latrine/MS
+Latrobe/M
+lat/SDRT
+latter/YM
+latte/SR
+lattice/SDMG
+latticework/MS
+latticing/M
+Lattimer/M
+Latvia/M
+Latvian/S
+laudably
+laudanum/MS
+laudatory
+Lauderdale/M
+lauder/M
+Lauder/M
+Laud/MR
+laud/RDSBG
+lauds/M
+Laue/M
+laughableness/M
+laughable/P
+laughably
+laugh/BRDZGJ
+laugher/M
+laughing/MY
+laughingstock/SM
+laughs
+laughter/MS
+Laughton/M
+Launce/M
+launch/AGSD
+launcher/MS
+launching/S
+launchpad/S
+laundered/U
+launderer/M
+launderette/MS
+launder/SDRZJG
+laundress/MS
+laundrette/S
+laundromat/S
+Laundromat/SM
+laundryman/M
+laundrymen
+laundry/MS
+laundrywoman/M
+laundrywomen
+Lauraine/M
+Lauralee/M
+Laural/M
+laura/M
+Laura/M
+Laurasia/M
+laureate/DSNG
+laureateship/SM
+Lauree/M
+Laureen/M
+Laurella/M
+Laurel/M
+laurel/SGMD
+Laure/M
+Laurena/M
+Laurence/M
+Laurene/M
+Lauren/SM
+Laurentian
+Laurent/M
+Lauretta/M
+Laurette/M
+Laurianne/M
+Laurice/M
+Laurie/M
+Lauri/M
+Lauritz/M
+Lauryn/M
+Lausanne/M
+lavage/MS
+lavaliere/MS
+Laval/M
+lava/SM
+lavatory/MS
+lave/GDS
+Lavena/M
+lavender/MDSG
+Laverna/M
+Laverne/M
+Lavern/M
+Lavina/M
+Lavinia/M
+Lavinie/M
+lavishness/MS
+lavish/SRDYPTG
+Lavoisier/M
+Lavonne/M
+Lawanda/M
+lawbreaker/SM
+lawbreaking/MS
+Lawford/M
+lawfulness/SMU
+lawful/PUY
+lawgiver/MS
+lawgiving/M
+lawlessness/MS
+lawless/PY
+Law/M
+lawmaker/MS
+lawmaking/SM
+lawman/M
+lawmen
+lawnmower/S
+lawn/SM
+Lawrence/M
+Lawrenceville/M
+lawrencium/SM
+Lawry/M
+law/SMDG
+Lawson/M
+lawsuit/MS
+Lawton/M
+lawyer/DYMGS
+laxativeness/M
+laxative/PSYM
+laxer/A
+laxes/A
+laxity/SM
+laxness/SM
+lax/PTSRY
+layabout/MS
+Layamon/M
+layaway/S
+lay/CZGSR
+layered/C
+layer/GJDM
+layering/M
+layer's/IC
+layette/SM
+Layla/M
+Lay/M
+layman/M
+laymen
+Layne/M
+Layney/M
+layoff/MS
+layout/SM
+layover/SM
+laypeople
+layperson/S
+lays/AI
+Layton/M
+layup/MS
+laywoman/M
+laywomen
+Lazare/M
+Lazar/M
+Lazaro/M
+Lazarus/M
+laze/DSG
+lazily
+laziness/MS
+lazuli/M
+lazybones/M
+lazy/PTSRDG
+lb
+LBJ/M
+lbs
+LC
+LCD
+LCM
+LDC
+leachate
+Leach/M
+leach/SDG
+Leadbelly/M
+leaded/U
+leadenness/M
+leaden/PGDY
+leaderless
+leader/M
+leadership/MS
+lead/SGZXJRDN
+leadsman/M
+leadsmen
+leafage/MS
+leaf/GSDM
+leafhopper/M
+leafiness/M
+leafless
+leaflet/SDMG
+leafstalk/SM
+leafy/PTR
+leaguer/M
+league/RSDMZG
+Leah/M
+leakage/SM
+leaker/M
+Leakey/M
+leak/GSRDM
+leakiness/MS
+leaky/PRT
+Lea/M
+lea/MS
+Leander/M
+Leandra/M
+leaner/M
+leaning/M
+Lean/M
+Leanna/M
+Leanne/M
+leanness/MS
+Leann/M
+Leanora/M
+Leanor/M
+lean/YRDGTJSP
+leaper/M
+leapfrogged
+leapfrogging
+leapfrog/SM
+leap/RDGZS
+Lear/M
+learnedly
+learnedness/M
+learned/UA
+learner/M
+learning/M
+learns/UA
+learn/SZGJRD
+Leary/M
+lease/ARSDG
+leaseback/MS
+leaseholder/M
+leasehold/SRMZ
+leaser/MA
+lease's
+leash's
+leash/UGSD
+leasing/M
+leas/SRDGZ
+least/S
+leastwise
+leatherette/S
+leather/MDSG
+leathern
+leatherneck/SM
+leathery
+leaven/DMJGS
+leavened/U
+leavening/M
+Leavenworth/M
+leaver/M
+leaves/M
+leave/SRDJGZ
+leaving/M
+Lebanese
+Lebanon/M
+Lebbie/M
+lebensraum
+Lebesgue/M
+Leblanc/M
+lecher/DMGS
+lecherousness/MS
+lecherous/YP
+lechery/MS
+lecithin/SM
+lectern/SM
+lecturer/M
+lecture/RSDZMG
+lectureship/SM
+led
+Leda/M
+Lederberg/M
+ledger/DMG
+ledge/SRMZ
+LED/SM
+Leeanne/M
+Leeann/M
+leech/MSDG
+Leeds/M
+leek/SM
+Leelah/M
+Leela/M
+Leeland/M
+Lee/M
+lee/MZRS
+Leena/M
+leer/DG
+leeriness/MS
+leering/Y
+leery/PTR
+Leesa/M
+Leese/M
+Leeuwenhoek/M
+Leeward/M
+leeward/S
+leeway/MS
+leftism/SM
+leftist/SM
+leftmost
+leftover/MS
+Left/S
+left/TRS
+leftward/S
+Lefty/M
+lefty/SM
+legacy/MS
+legalese/MS
+legalism/SM
+legalistic
+legality/MS
+legalization/MS
+legalize/DSG
+legalized/U
+legal/SY
+legate/AXCNGSD
+legatee/MS
+legate's/C
+legation/AMC
+legato/SM
+legendarily
+legendary/S
+Legendre/M
+legend/SM
+legerdemain/SM
+Leger/SM
+legged
+legginess/MS
+legging/MS
+leggy/PRT
+leghorn/SM
+Leghorn/SM
+legibility/MS
+legible
+legibly
+legionary/S
+legionnaire/SM
+legion/SM
+legislate/SDXVNG
+legislation/M
+legislative/SY
+legislator/SM
+legislature/MS
+legitimacy/MS
+legitimate/SDNGY
+legitimation/M
+legitimatize/SDG
+legitimization/MS
+legitimize/RSDG
+legit/S
+legless
+legman/M
+legmen
+leg/MS
+Lego/M
+Legra/M
+Legree/M
+legroom/MS
+legstraps
+legume/SM
+leguminous
+legwork/SM
+Lehigh/M
+Lehman/M
+Leia/M
+Leibniz/M
+Leicester/SM
+Leiden/M
+Leif/M
+Leigha/M
+Leigh/M
+Leighton/M
+Leilah/M
+Leila/M
+lei/MS
+Leipzig/M
+Leisha/M
+leisureliness/MS
+leisurely/P
+leisure/SDYM
+leisurewear
+leitmotif/SM
+leitmotiv/MS
+Lek/M
+Lelah/M
+Lela/M
+Leland/M
+Lelia/M
+Lemaitre/M
+Lemar/M
+Lemke/M
+Lem/M
+lemma/MS
+lemme/GJ
+Lemmie/M
+lemming/M
+Lemmy/M
+lemonade/SM
+lemon/GSDM
+lemony
+Lemuel/M
+Lemuria/M
+lemur/MS
+Lena/M
+Lenard/M
+Lenci/M
+lender/M
+lend/SRGZ
+Lenee/M
+Lenette/M
+lengthener/M
+lengthen/GRD
+lengthily
+lengthiness/MS
+length/MNYX
+lengths
+lengthwise
+lengthy/TRP
+lenience/S
+leniency/MS
+lenient/SY
+Leningrad/M
+Leninism/M
+Leninist
+Lenin/M
+lenitive/S
+Lenka/M
+Len/M
+Le/NM
+Lenna/M
+Lennard/M
+Lennie/M
+Lennon/M
+Lenny/M
+Lenoir/M
+Leno/M
+Lenora/M
+Lenore/M
+lens/SRDMJGZ
+lent/A
+lenticular
+lentil/SM
+lento/S
+Lent/SMN
+Leodora/M
+Leoine/M
+Leola/M
+Leoline/M
+Leo/MS
+Leona/M
+Leonanie/M
+Leonard/M
+Leonardo/M
+Leoncavallo/M
+Leonelle/M
+Leonel/M
+Leone/M
+Leonerd/M
+Leonhard/M
+Leonidas/M
+Leonid/M
+Leonie/M
+leonine
+Leon/M
+Leonora/M
+Leonore/M
+Leonor/M
+Leontine/M
+Leontyne/M
+leopardess/SM
+leopard/MS
+leopardskin
+Leopold/M
+Leopoldo/M
+Leopoldville/M
+Leora/M
+leotard/MS
+leper/SM
+Lepidus/M
+Lepke/M
+leprechaun/SM
+leprosy/MS
+leprous
+lepta
+lepton/SM
+Lepus/M
+Lerner/M
+Leroi/M
+Leroy/M
+Lesa/M
+lesbianism/MS
+lesbian/MS
+Leshia/M
+lesion/DMSG
+Lesley/M
+Leslie/M
+Lesli/M
+Lesly/M
+Lesotho/M
+lessee/MS
+lessen/GDS
+Lesseps/M
+lesser
+lesses
+Lessie/M
+lessing
+lesson/DMSG
+lessor/MS
+less/U
+Lester/M
+lest/R
+Les/Y
+Lesya/M
+Leta/M
+letdown/SM
+lethality/M
+lethal/YS
+Letha/M
+lethargic
+lethargically
+lethargy/MS
+Lethe/M
+Lethia/M
+Leticia/M
+Letisha/M
+let/ISM
+Letitia/M
+Letizia/M
+Letta/M
+letterbox/S
+lettered/U
+letterer/M
+letterhead/SM
+lettering/M
+letter/JSZGRDM
+letterman/M
+Letterman/M
+lettermen
+letterpress/MS
+Lettie/M
+Letti/M
+letting/S
+lettuce/SM
+Letty/M
+letup/MS
+leukemia/SM
+leukemic/S
+leukocyte/MS
+Leupold/M
+Levant/M
+leveeing
+levee/SDM
+leveled/U
+leveler/M
+levelheadedness/S
+levelheaded/P
+leveling/U
+levelness/SM
+level/STZGRDYP
+leverage/MGDS
+lever/SDMG
+Levesque/M
+Levey/M
+Leviathan
+leviathan/MS
+levier/M
+Levi/MS
+Levine/M
+Levin/M
+levitate/XNGDS
+levitation/M
+Leviticus/M
+Levitt/M
+levity/MS
+Lev/M
+Levon/M
+Levy/M
+levy/SRDZG
+lewdness/MS
+lewd/PYRT
+Lewellyn/M
+Lewes
+Lewie/M
+Lewinsky/M
+lewis/M
+Lewis/M
+Lewiss
+Lew/M
+lex
+lexeme/MS
+lexical/Y
+lexicographer/MS
+lexicographic
+lexicographical/Y
+lexicography/SM
+lexicon/SM
+Lexie/M
+Lexi/MS
+Lexine/M
+Lexington/M
+Lexus/M
+Lexy/M
+Leyden/M
+Leyla/M
+Lezley/M
+Lezlie/M
+lg
+Lhasa/SM
+Lhotse/M
+liability/SAM
+liable/AP
+liaise/GSD
+liaison/SM
+Lia/M
+Liam/M
+Liana/M
+Liane/M
+Lian/M
+Lianna/M
+Lianne/M
+liar/MS
+libation/SM
+libbed
+Libbey/M
+Libbie/M
+Libbi/M
+libbing
+Libby/M
+libeler/M
+libel/GMRDSZ
+libelous/Y
+Liberace/M
+liberalism/MS
+liberality/MS
+liberalization/SM
+liberalized/U
+liberalize/GZSRD
+liberalizer/M
+liberalness/MS
+liberal/YSP
+liberate/NGDSCX
+liberationists
+liberation/MC
+liberator/SCM
+Liberia/M
+Liberian/S
+libertarianism/M
+libertarian/MS
+libertine/MS
+liberty/MS
+libidinal
+libidinousness/M
+libidinous/PY
+libido/MS
+Lib/M
+lib/MS
+librarian/MS
+library/MS
+Libra/SM
+libretoes
+libretos
+librettist/MS
+libretto/MS
+Libreville/M
+Librium/M
+Libya/M
+Libyan/S
+lice/M
+licensed/AU
+licensee/SM
+license/MGBRSD
+licenser/M
+licenses/A
+licensing/A
+licensor/M
+licentiate/MS
+licentiousness/MS
+licentious/PY
+Licha/M
+lichee's
+lichen/DMGS
+Lichtenstein/M
+Lichter/M
+licit/Y
+licked/U
+lickerish
+licker/M
+lick/GRDSJ
+licking/M
+licorice/SM
+Lida/M
+lidded
+lidding
+Lidia/M
+lidless
+lid/MS
+lido/MS
+Lieberman/M
+Liebfraumilch/M
+Liechtenstein/RMZ
+lied/MR
+lie/DRS
+Lief/M
+liefs/A
+lief/TSR
+Liege/M
+liege/SR
+Lie/M
+lien/SM
+lier/IMA
+lies/A
+Liesa/M
+lieu/SM
+lieut
+lieutenancy/MS
+lieutenant/SM
+Lieut/M
+lifeblood/SM
+lifeboat/SM
+lifebuoy/S
+lifeforms
+lifeguard/MDSG
+lifelessness/SM
+lifeless/PY
+lifelikeness/M
+lifelike/P
+lifeline/SM
+lifelong
+life/MZR
+lifer/M
+lifesaver/SM
+lifesaving/S
+lifespan/S
+lifestyle/S
+lifetaking
+lifetime/MS
+lifework/MS
+LIFO
+lifter/M
+lift/GZMRDS
+liftoff/MS
+ligament/MS
+ligand/MS
+ligate/XSDNG
+ligation/M
+ligature/DSGM
+light/ADSCG
+lighted/U
+lightener/M
+lightening/M
+lighten/ZGDRS
+lighter/CM
+lightered
+lightering
+lighters
+lightest
+lightface/SDM
+lightheaded
+lightheartedness/MS
+lighthearted/PY
+lighthouse/MS
+lighting/MS
+lightly
+lightness/MS
+lightning/SMD
+lightproof
+light's
+lightship/SM
+lightweight/S
+ligneous
+lignite/MS
+lignum
+likability/MS
+likableness/MS
+likable/P
+likeability's
+liked/E
+likelihood/MSU
+likely/UPRT
+likeness/MSU
+liken/GSD
+liker/E
+liker's
+likes/E
+likest
+like/USPBY
+likewise
+liking/SM
+lilac/MS
+Lilah/M
+Lila/SM
+Lilia/MS
+Liliana/M
+Liliane/M
+Lilian/M
+Lilith/M
+Liliuokalani/M
+Lilla/M
+Lille/M
+Lillian/M
+Lillie/M
+Lilli/MS
+lilliputian/S
+Lilliputian/SM
+Lilliput/M
+Lilllie/M
+Lilly/M
+Lil/MY
+Lilongwe/M
+lilting/YP
+lilt/MDSG
+Lilyan/M
+Lily/M
+lily/MSD
+Lima/M
+Limbaugh/M
+limbered/U
+limberness/SM
+limber/RDYTGP
+limbers/U
+limbic
+limbless
+Limbo
+limbo/GDMS
+limb/SGZRDM
+Limburger/SM
+limeade/SM
+lime/DSMG
+limekiln/M
+limelight/DMGS
+limerick/SM
+limestone/SM
+limitability
+limitably
+limitation/MCS
+limit/CSZGRD
+limitedly/U
+limitedness/M
+limited/PSY
+limiter/M
+limiting/S
+limitlessness/SM
+limitless/PY
+limit's
+limn/GSD
+Limoges/M
+limo/S
+limousine/SM
+limper/M
+limpet/SM
+limpidity/MS
+limpidness/SM
+limpid/YP
+limpness/MS
+Limpopo/M
+limp/SGTPYRD
+Li/MY
+limy/TR
+linage/MS
+Lina/M
+linchpin/MS
+Linc/M
+Lincoln/SM
+Linda/M
+Lindbergh/M
+Lindberg/M
+linden/MS
+Lindholm/M
+Lindie/M
+Lindi/M
+Lind/M
+Lindon/M
+Lindquist/M
+Lindsay/M
+Lindsey/M
+Lindstrom/M
+Lindsy/M
+Lindy/M
+line/AGDS
+lineage/SM
+lineal/Y
+Linea/M
+lineament/MS
+linearity/MS
+linearize/SDGNB
+linear/Y
+linebacker/SM
+lined/U
+linefeed
+Linell/M
+lineman/M
+linemen
+linen/SM
+liner/SM
+line's
+linesman/M
+linesmen
+Linet/M
+Linette/M
+lineup/S
+lingerer/M
+lingerie/SM
+lingering/Y
+linger/ZGJRD
+lingoes
+lingo/M
+lingual/SY
+lingua/M
+linguine
+linguini's
+linguistically
+linguistic/S
+linguistics/M
+linguist/SM
+ling/ZR
+liniment/MS
+lining/SM
+linkable
+linkage/SM
+linked/A
+linker/S
+linking/S
+Link/M
+link's
+linkup/S
+link/USGD
+Lin/M
+Linnaeus/M
+Linnea/M
+Linnell/M
+Linnet/M
+linnet/SM
+Linnie/M
+Linn/M
+Linoel/M
+linoleum/SM
+lino/M
+Linotype/M
+linseed/SM
+lintel/SM
+linter/M
+Linton/M
+lint/SMR
+linty/RST
+Linus/M
+Linux/M
+Linwood/M
+Linzy/M
+Lionello/M
+Lionel/M
+lioness/SM
+lionhearted
+lionization/SM
+lionizer/M
+lionize/ZRSDG
+Lion/M
+lion/MS
+lipase/M
+lipid/MS
+lip/MS
+liposuction/S
+lipped
+lipper
+Lippi/M
+lipping
+Lippmann/M
+lippy/TR
+lipread/GSRJ
+Lipschitz/M
+Lipscomb/M
+lipstick/MDSG
+Lipton/M
+liq
+liquefaction/SM
+liquefier/M
+liquefy/DRSGZ
+liqueur/DMSG
+liquidate/GNXSD
+liquidation/M
+liquidator/SM
+liquidity/SM
+liquidizer/M
+liquidize/ZGSRD
+liquidness/M
+liquid/SPMY
+liquorice/SM
+liquorish
+liquor/SDMG
+lira/M
+Lira/M
+lire
+Lisabeth/M
+Lisa/M
+Lisbeth/M
+Lisbon/M
+Lise/M
+Lisetta/M
+Lisette/M
+Lisha/M
+Lishe/M
+Lisle/M
+lisle/SM
+lisper/M
+lisp/MRDGZS
+Lissajous/M
+Lissa/M
+Lissie/M
+Lissi/M
+Liss/M
+lissomeness/M
+lissome/P
+lissomness/M
+Lissy/M
+listed/U
+listener/M
+listen/ZGRD
+Listerine/M
+lister/M
+Lister/M
+listing/M
+list/JMRDNGZXS
+listlessness/SM
+listless/PY
+Liston/M
+Liszt/M
+Lita/M
+litany/MS
+litchi/SM
+literacy/MS
+literalism/M
+literalistic
+literalness/MS
+literal/PYS
+literariness/SM
+literary/P
+literate/YNSP
+literati
+literation/M
+literature/SM
+liter/M
+lite/S
+litheness/SM
+lithe/PRTY
+lithesome
+lithium/SM
+lithograph/DRMGZ
+lithographer/M
+lithographic
+lithographically
+lithographs
+lithography/MS
+lithology/M
+lithosphere/MS
+lithospheric
+Lithuania/M
+Lithuanian/S
+litigant/MS
+litigate/NGXDS
+litigation/M
+litigator/SM
+litigiousness/MS
+litigious/PY
+litmus/SM
+litotes/M
+lit/RZS
+littérateur/S
+litterbug/SM
+litter/SZGRDM
+Little/M
+littleneck/M
+littleness/SM
+little/RSPT
+Littleton/M
+Litton/M
+littoral/S
+liturgical/Y
+liturgic/S
+liturgics/M
+liturgist/MS
+liturgy/SM
+Liuka/M
+livability/MS
+livableness/M
+livable/U
+livably
+Liva/M
+lived/A
+livelihood/SM
+liveliness/SM
+livelong/S
+lively/RTP
+liveness/M
+liven/SDG
+liver/CSGD
+liveried
+liverish
+Livermore/M
+Liverpool/M
+Liverpudlian/MS
+liver's
+liverwort/SM
+liverwurst/SM
+livery/CMS
+liveryman/MC
+liverymen/C
+lives/A
+lives's
+livestock/SM
+live/YHZTGJDSRPB
+Livia/M
+lividness/M
+livid/YP
+livingness/M
+Livingstone/M
+Livingston/M
+living/YP
+Liv/M
+Livonia/M
+Livvie/M
+Livvy/M
+Livvyy/M
+Livy/M
+Lizabeth/M
+Liza/M
+lizard/MS
+Lizbeth/M
+Lizette/M
+Liz/M
+Lizzie/M
+Lizzy/M
+l/JGVXT
+Ljubljana/M
+LL
+llama/SM
+llano/SM
+LLB
+ll/C
+LLD
+Llewellyn/M
+Lloyd/M
+Llywellyn/M
+LNG
+lo
+loadable
+loaded/A
+loader/MU
+loading/MS
+load's/A
+loads/A
+loadstar's
+loadstone's
+load/SURDZG
+loafer/M
+Loafer/S
+loaf/SRDMGZ
+loam/SMDG
+loamy/RT
+loaner/M
+loaning/M
+loan/SGZRDMB
+loansharking/S
+loanword/S
+loathe
+loather/M
+loathing/M
+loath/JPSRDYZG
+loathness/M
+loathsomeness/MS
+loathsome/PY
+loaves/M
+Lobachevsky/M
+lobar
+lobbed
+lobber/MS
+lobbing
+lobby/GSDM
+lobbyist/MS
+lobe/SM
+lob/MDSG
+lobotomist
+lobotomize/GDS
+lobotomy/MS
+lobster/MDGS
+lobularity
+lobular/Y
+lobule/SM
+locale/MS
+localisms
+locality/MS
+localization/MS
+localized/U
+localizer/M
+localizes/U
+localize/ZGDRS
+local/SGDY
+locatable
+locate/AXESDGN
+locater/M
+locational/Y
+location/EMA
+locative/S
+locator's
+Lochinvar/M
+loch/M
+lochs
+loci/M
+lockable
+Lockean/M
+locked/A
+Locke/M
+locker/SM
+locket/SM
+Lockhart/M
+Lockheed/M
+Lockian/M
+locking/S
+lockjaw/SM
+Lock/M
+locknut/M
+lockout/MS
+lock's
+locksmithing/M
+locksmith/MG
+locksmiths
+lockstep/S
+lock/UGSD
+lockup/MS
+Lockwood/M
+locomotion/SM
+locomotive/YMS
+locomotor
+locomotory
+loco/SDMG
+locoweed/MS
+locus/M
+locust/SM
+locution/MS
+lode/SM
+lodestar/MS
+lodestone/MS
+lodged/E
+lodge/GMZSRDJ
+Lodge/M
+lodgepole
+lodger/M
+lodges/E
+lodging/M
+lodgment/M
+Lodovico/M
+Lodowick/M
+Lodz
+Loeb/M
+Loella/M
+Loewe/M
+Loewi/M
+lofter/M
+loftily
+loftiness/SM
+loft/SGMRD
+lofty/PTR
+loganberry/SM
+Logan/M
+logarithmic
+logarithmically
+logarithm/MS
+logbook/MS
+loge/SMNX
+logged/U
+loggerhead/SM
+logger/SM
+loggia/SM
+logging/MS
+logicality/MS
+logicalness/M
+logical/SPY
+logician/SM
+logic/SM
+login/S
+logion/M
+logistical/Y
+logistic/MS
+logjam/SM
+LOGO
+logo/SM
+logotype/MS
+logout
+logrolling/SM
+log's/K
+log/SM
+logy/RT
+Lohengrin/M
+loincloth/M
+loincloths
+loin/SM
+Loire/M
+Loise/M
+Lois/M
+loiterer/M
+loiter/RDJSZG
+Loki/M
+Lola/M
+Loleta/M
+Lolita/M
+loller/M
+lollipop/MS
+loll/RDGS
+Lolly/M
+lolly/SM
+Lombardi/M
+Lombard/M
+Lombardy/M
+Lomb/M
+Lome
+Lona/M
+Londonderry/M
+Londoner/M
+London/RMZ
+Lonee/M
+loneliness/SM
+lonely/TRP
+loneness/M
+lone/PYZR
+loner/M
+lonesomeness/MS
+lonesome/PSY
+longboat/MS
+longbow/SM
+longed/K
+longeing
+longer/K
+longevity/MS
+Longfellow/M
+longhair/SM
+longhand/SM
+longhorn/SM
+longing/MY
+longish
+longitude/MS
+longitudinal/Y
+long/JGTYRDPS
+Long/M
+longness/M
+longshoreman/M
+longshoremen
+longsighted
+longs/K
+longstanding
+Longstreet/M
+longsword
+longterm
+longtime
+Longueuil/M
+longueur/SM
+longways
+longword/SM
+Loni/M
+Lon/M
+Lonna/M
+Lonnard/M
+Lonnie/M
+Lonni/M
+Lonny/M
+loofah/M
+loofahs
+lookahead
+lookalike/S
+looker/M
+look/GZRDS
+lookout/MS
+lookup/SM
+looming/M
+Loomis/M
+loom/MDGS
+loon/MS
+loony/SRT
+looper/M
+loophole/MGSD
+loop/MRDGS
+loopy/TR
+loosed/U
+looseleaf
+loosener/M
+looseness/MS
+loosen/UDGS
+loose/SRDPGTY
+looses/U
+loosing/M
+looter/M
+loot/MRDGZS
+loper/M
+lope/S
+Lopez/M
+lopped
+lopper/MS
+lopping
+lop/SDRG
+lopsidedness/SM
+lopsided/YP
+loquaciousness/MS
+loquacious/YP
+loquacity/SM
+Loraine/M
+Lorain/M
+Loralee/M
+Loralie/M
+Loralyn/M
+Lora/M
+Lorant/M
+lording/M
+lordliness/SM
+lordly/PTR
+Lord/MS
+lord/MYDGS
+lordship/SM
+Lordship/SM
+Loree/M
+Loreen/M
+Lorelei/M
+Lorelle/M
+lore/MS
+Lorena/M
+Lorene/M
+Loren/SM
+Lorentzian/M
+Lorentz/M
+Lorenza/M
+Lorenz/M
+Lorenzo/M
+Loretta/M
+Lorette/M
+lorgnette/SM
+Loria/M
+Lorianna/M
+Lorianne/M
+Lorie/M
+Lorilee/M
+Lorilyn/M
+Lori/M
+Lorinda/M
+Lorine/M
+Lorin/M
+loris/SM
+Lorita/M
+lorn
+Lorna/M
+Lorne/M
+Lorraine/M
+Lorrayne/M
+Lorre/M
+Lorrie/M
+Lorri/M
+Lorrin/M
+lorryload/S
+Lorry/M
+lorry/SM
+Lory/M
+Los
+loser/M
+lose/ZGJBSR
+lossage
+lossless
+loss/SM
+lossy/RT
+lost/P
+Lothaire/M
+Lothario/MS
+lotion/MS
+Lot/M
+lot/MS
+Lotta/M
+lotted
+Lotte/M
+lotter
+lottery/MS
+Lottie/M
+Lotti/M
+lotting
+Lott/M
+lotto/MS
+Lotty/M
+lotus/SM
+louden/DG
+loudhailer/S
+loudly/RT
+loudmouth/DM
+loudmouths
+loudness/MS
+loudspeaker/SM
+loudspeaking
+loud/YRNPT
+Louella/M
+Louie/M
+Louisa/M
+Louise/M
+Louisette/M
+Louisiana/M
+Louisianan/S
+Louisianian/S
+Louis/M
+Louisville/M
+Lou/M
+lounger/M
+lounge/SRDZG
+Lourdes/M
+lour/GSD
+louse/CSDG
+louse's
+lousewort/M
+lousily
+lousiness/MS
+lousy/PRT
+loutishness/M
+loutish/YP
+Loutitia/M
+lout/SGMD
+louver/DMS
+L'Ouverture
+Louvre/M
+lovableness/MS
+lovable/U
+lovably
+lovebird/SM
+lovechild
+Lovecraft/M
+love/DSRMYZGJB
+loved/U
+Lovejoy/M
+Lovelace/M
+Loveland/M
+lovelessness/M
+loveless/YP
+lovelies
+lovelinesses
+loveliness/UM
+Lovell/M
+lovelornness/M
+lovelorn/P
+lovely/URPT
+Love/M
+lovemaking/SM
+lover/YMG
+lovesick
+lovestruck
+lovingly
+lovingness/M
+loving/U
+lowborn
+lowboy/SM
+lowbrow/MS
+lowdown/S
+Lowell/M
+Lowe/M
+lowercase/GSD
+lower/DG
+lowermost
+Lowery/M
+lowish
+lowland/RMZS
+Lowlands/M
+lowlife/SM
+lowlight/MS
+lowliness/MS
+lowly/PTR
+lowness/MS
+low/PDRYSZTG
+Lowrance/M
+lox/MDSG
+loyaler
+loyalest
+loyal/EY
+loyalism/SM
+loyalist/SM
+loyalty/EMS
+Loyang/M
+Loydie/M
+Loyd/M
+Loy/M
+Loyola/M
+lozenge/SDM
+LP
+LPG
+LPN/S
+Lr
+ls
+l's
+L's
+LSD
+ltd
+Ltd/M
+Lt/M
+Luanda/M
+Luann/M
+luau/MS
+lubber/YMS
+Lubbock/M
+lube/DSMG
+lubricant/SM
+lubricate/VNGSDX
+lubrication/M
+lubricator/MS
+lubricious/Y
+lubricity/SM
+Lubumbashi/M
+Lucais/M
+Luca/MS
+Luce/M
+lucent/Y
+Lucerne/M
+Lucho/M
+Lucia/MS
+Luciana/M
+Lucian/M
+Luciano/M
+lucidity/MS
+lucidness/MS
+lucid/YP
+Lucie/M
+Lucien/M
+Lucienne/M
+Lucifer/M
+Lucila/M
+Lucile/M
+Lucilia/M
+Lucille/M
+Luci/MN
+Lucina/M
+Lucinda/M
+Lucine/M
+Lucio/M
+Lucita/M
+Lucite/MS
+Lucius/M
+luck/GSDM
+luckier/U
+luckily/U
+luckiness/UMS
+luckless
+Lucknow/M
+Lucky/M
+lucky/RSPT
+lucrativeness/SM
+lucrative/YP
+lucre/MS
+Lucretia/M
+Lucretius/M
+lucubrate/GNSDX
+lucubration/M
+Lucy/M
+Luddite/SM
+Ludhiana/M
+ludicrousness/SM
+ludicrous/PY
+Ludlow/M
+Ludmilla/M
+ludo/M
+Ludovico/M
+Ludovika/M
+Ludvig/M
+Ludwig/M
+Luella/M
+Luelle/M
+luff/GSDM
+Lufthansa/M
+Luftwaffe/M
+luge/MC
+Luger/M
+luggage/SM
+lugged
+lugger/SM
+lugging
+Lugosi/M
+lug/RS
+lugsail/SM
+lugubriousness/MS
+lugubrious/YP
+Luigi/M
+Luisa/M
+Luise/M
+Luis/M
+Lukas/M
+Luke/M
+lukewarmness/SM
+lukewarm/PY
+Lula/M
+Lulita/M
+lullaby/GMSD
+lull/SDG
+lulu/M
+Lulu/M
+Lu/M
+lumbago/SM
+lumbar/S
+lumberer/M
+lumbering/M
+lumberjack/MS
+lumberman/M
+lumbermen
+lumber/RDMGZSJ
+lumberyard/MS
+lumen/M
+Lumière/M
+luminance/M
+luminary/MS
+luminescence/SM
+luminescent
+luminosity/MS
+luminousness/M
+luminous/YP
+lummox/MS
+lumper/M
+lumpiness/MS
+lumpishness/M
+lumpish/YP
+lump/SGMRDN
+lumpy/TPR
+lunacy/MS
+Luna/M
+lunar/S
+lunary
+lunate/YND
+lunatic/S
+lunation/M
+luncheonette/SM
+luncheon/SMDG
+luncher/M
+lunch/GMRSD
+lunchpack
+lunchroom/MS
+lunchtime/MS
+Lundberg/M
+Lund/M
+Lundquist/M
+lune/M
+lunge/MS
+lunger/M
+lungfish/SM
+lungful
+lung/SGRDM
+lunkhead/SM
+Lupe/M
+lupine/SM
+Lupus/M
+lupus/SM
+Lura/M
+lurcher/M
+lurch/RSDG
+lure/DSRG
+lurer/M
+Lurette/M
+lurex
+Luria/M
+luridness/SM
+lurid/YP
+lurker/M
+lurk/GZSRD
+Lurleen/M
+Lurlene/M
+Lurline/M
+Lusaka/M
+Lusa/M
+lusciousness/MS
+luscious/PY
+lushness/MS
+lush/YSRDGTP
+Lusitania/M
+luster/GDM
+lustering/M
+lusterless
+lustfulness/M
+lustful/PY
+lustily
+lustiness/MS
+lust/MRDGZS
+lustrousness/M
+lustrous/PY
+lusty/PRT
+lutanist/MS
+lute/DSMG
+lutenist/MS
+Lutero/M
+lutetium/MS
+Lutheranism/MS
+Lutheran/SM
+Luther/M
+luting/M
+Lutz
+Luxembourgian
+Luxembourg/RMZ
+Luxemburg's
+luxe/MS
+luxuriance/MS
+luxuriant/Y
+luxuriate/GNSDX
+luxuriation/M
+luxuriousness/SM
+luxurious/PY
+luxury/MS
+Luz/M
+Luzon/M
+L'vov
+Lyallpur/M
+lyceum/MS
+lychee's
+lycopodium/M
+Lycra/S
+Lycurgus/M
+Lyda/M
+Lydia/M
+Lydian/S
+Lydie/M
+Lydon/M
+lye/JSMG
+Lyell/M
+lying/Y
+Lyle/M
+Lyly/M
+Lyman/M
+Lyme/M
+lymphatic/S
+lymph/M
+lymphocyte/SM
+lymphoid
+lymphoma/MS
+lymphs
+Ly/MY
+Lynchburg/M
+lyncher/M
+lynching/M
+Lynch/M
+lynch/ZGRSDJ
+Lynda/M
+Lyndell/M
+Lyndel/M
+Lynde/M
+Lyndon/M
+Lyndsay/M
+Lyndsey/M
+Lyndsie/M
+Lyndy/M
+Lynea/M
+Lynelle/M
+Lynette/M
+Lynett/M
+Lyn/M
+Lynna/M
+Lynnea/M
+Lynnelle/M
+Lynnell/M
+Lynne/M
+Lynnet/M
+Lynnette/M
+Lynnett/M
+Lynn/M
+Lynsey/M
+lynx/MS
+Lyon/SM
+Lyra/M
+lyrebird/MS
+lyre/SM
+lyricalness/M
+lyrical/YP
+lyricism/SM
+lyricist/SM
+lyric/S
+Lysenko/M
+lysine/M
+Lysistrata/M
+Lysol/M
+Lyssa/M
+LyX/M
+MA
+Maalox/M
+ma'am
+Mabelle/M
+Mabel/M
+Mable/M
+Mab/M
+macabre/Y
+macadamize/SDG
+macadam/SM
+Macao/M
+macaque/SM
+macaroni/SM
+macaroon/MS
+Macarthur/M
+MacArthur/M
+Macaulay/M
+macaw/SM
+Macbeth/M
+Maccabees/M
+Maccabeus/M
+Macdonald/M
+MacDonald/M
+MacDraw/M
+Macedonia/M
+Macedonian/S
+Macedon/M
+mace/MS
+Mace/MS
+macerate/DSXNG
+maceration/M
+macer/M
+Macgregor/M
+MacGregor/M
+machete/SM
+Machiavellian/S
+Machiavelli/M
+machinate/SDXNG
+machination/M
+machinelike
+machine/MGSDB
+machinery/SM
+machinist/MS
+machismo/SM
+Mach/M
+macho/S
+Machs
+Macias/M
+Macintosh/M
+MacIntosh/M
+macintosh's
+Mackenzie/M
+MacKenzie/M
+mackerel/SM
+Mackinac/M
+Mackinaw
+mackinaw/SM
+mackintosh/SM
+mack/M
+Mack/M
+MacLeish/M
+Macmillan/M
+MacMillan/M
+Macon/SM
+MacPaint/M
+macramé/S
+macrobiotic/S
+macrobiotics/M
+macrocosm/MS
+macrodynamic
+macroeconomic/S
+macroeconomics/M
+macromolecular
+macromolecule/SM
+macron/MS
+macrophage/SM
+macroscopic
+macroscopically
+macrosimulation
+macro/SM
+macrosocioeconomic
+Mac/SGMD
+mac/SGMDR
+Macy/M
+Madagascan/SM
+Madagascar/M
+Madalena/M
+Madalyn/M
+Mada/M
+madame/M
+Madame/MS
+madam/SM
+madcap/S
+Maddalena/M
+madded
+madden/GSD
+maddening/Y
+Madden/M
+madder/MS
+maddest
+Maddie/M
+Maddi/M
+madding
+Maddox/M
+Maddy/M
+made/AU
+Madeira/SM
+Madelaine/M
+Madeleine/M
+Madelena/M
+Madelene/M
+Madelina/M
+Madeline/M
+Madelin/M
+Madella/M
+Madelle/M
+Madel/M
+Madelon/M
+Madelyn/M
+mademoiselle/MS
+Madge/M
+madhouse/SM
+Madhya/M
+Madison/M
+Madlen/M
+Madlin/M
+madman/M
+madmen
+madness/SM
+Madonna/MS
+mad/PSY
+Madras
+madras/SM
+Madrid/M
+madrigal/MSG
+Madsen/M
+Madurai/M
+madwoman/M
+madwomen
+Mady/M
+Maegan/M
+Maelstrom/M
+maelstrom/SM
+Mae/M
+maestro/MS
+Maeterlinck/M
+Mafia/MS
+mafia/S
+mafiosi
+mafioso/M
+Mafioso/S
+MAG
+magazine/DSMG
+Magdaia/M
+Magdalena/M
+Magdalene/M
+Magdalen/M
+Magda/M
+Magellanic
+Magellan/M
+magenta/MS
+magged
+Maggee/M
+Maggie/M
+Maggi/M
+magging
+maggot/MS
+maggoty/RT
+Maggy/M
+magi
+magical/Y
+magician/MS
+magicked
+magicking
+magic/SM
+Magill/M
+Magi/M
+Maginot/M
+magisterial/Y
+magistracy/MS
+magistrate/MS
+Mag/M
+magma/SM
+magnanimity/SM
+magnanimosity
+magnanimous/PY
+magnate/SM
+magnesia/MS
+magnesite/M
+magnesium/SM
+magnetically
+magnetic/S
+magnetics/M
+magnetism/SM
+magnetite/SM
+magnetizable
+magnetization/ASCM
+magnetize/CGDS
+magnetized/U
+magnetodynamics
+magnetohydrodynamical
+magnetohydrodynamics/M
+magnetometer/MS
+magneto/MS
+magnetosphere/M
+magnetron/M
+magnet/SM
+magnification/M
+magnificence/SM
+magnificent/Y
+magnified/U
+magnify/DRSGNXZ
+magniloquence/MS
+magniloquent
+Magnitogorsk/M
+magnitude/SM
+magnolia/SM
+Magnum
+magnum/SM
+Magnuson/M
+Magog/M
+Magoo/M
+magpie/SM
+Magritte/M
+Magruder/M
+mag/S
+Magsaysay/M
+Maguire/SM
+Magus/M
+Magyar/MS
+Mahabharata
+Mahala/M
+Mahalia/M
+maharajah/M
+maharajahs
+maharanee's
+maharani/MS
+Maharashtra/M
+maharishi/SM
+mahatma/SM
+Mahavira/M
+Mahayana/M
+Mahayanist
+Mahdi/M
+Mahfouz/M
+Mahican/SM
+mahjong's
+Mahler/M
+Mahmoud/M
+Mahmud/M
+mahogany/MS
+Mahomet's
+mahout/SM
+Maia/M
+Maible/M
+maidenhair/MS
+maidenhead/SM
+maidenhood/SM
+maidenly/P
+maiden/YM
+maidservant/MS
+maid/SMNX
+maier
+Maier/M
+Maiga/M
+Maighdiln/M
+Maigret/M
+mailbag/MS
+mailbox/MS
+mail/BSJGZMRD
+mailer/M
+Mailer/M
+Maillol/M
+maillot/SM
+mailman/M
+mailmen
+Maiman/M
+maimedness/M
+maimed/P
+maimer/M
+Maimonides/M
+Mai/MR
+maim/SGZRD
+mainbrace/M
+Maine/MZR
+Mainer/M
+mainframe/MS
+mainlander/M
+mainland/SRMZ
+mainliner/M
+mainline/RSDZG
+mainly
+mainmast/SM
+main/SA
+mainsail/SM
+mains/M
+mainspring/SM
+mainstay/MS
+mainstream/DRMSG
+maintainability
+maintainable/U
+maintain/BRDZGS
+maintained/U
+maintainer/M
+maintenance/SM
+maintop/SM
+maiolica's
+Maire/M
+Mair/M
+Maisey/M
+Maisie/M
+maisonette/MS
+Maison/M
+Maitilde/M
+maize/MS
+Maj
+Maje/M
+majestic
+majestically
+majesty/MS
+Majesty/MS
+majolica/SM
+Majorca/M
+major/DMGS
+majordomo/S
+majorette/SM
+majority/SM
+Major/M
+Majuro/M
+makable
+Makarios/M
+makefile/S
+makeover/S
+Maker/M
+maker/SM
+makeshift/S
+make/UGSA
+makeup/MS
+making/SM
+Malabar/M
+Malabo/M
+Malacca/M
+Malachi/M
+malachite/SM
+maladapt/DV
+maladjust/DLV
+maladjustment/MS
+maladministration
+maladroitness/MS
+maladroit/YP
+malady/MS
+Malagasy/M
+malaise/SM
+Mala/M
+Malamud/M
+malamute/SM
+Malanie/M
+malaprop
+malapropism/SM
+Malaprop/M
+malarial
+malaria/MS
+malarious
+malarkey/SM
+malathion/S
+Malawian/S
+Malawi/M
+Malayalam/M
+Malaya/M
+Malayan/MS
+Malaysia/M
+Malaysian/S
+Malay/SM
+Malchy/M
+Malcolm/M
+malcontentedness/M
+malcontented/PY
+malcontent/SMD
+Maldive/SM
+Maldivian/S
+Maldonado/M
+maledict
+malediction/MS
+malefaction/MS
+malefactor/MS
+malefic
+maleficence/MS
+maleficent
+Male/M
+Malena/M
+maleness/MS
+male/PSM
+malevolence/S
+malevolencies
+malevolent/Y
+malfeasance/SM
+malfeasant
+malformation/MS
+malformed
+malfunction/SDG
+Malia/M
+Malian/S
+Malibu/M
+malice/MGSD
+maliciousness/MS
+malicious/YU
+malignancy/SM
+malignant/YS
+malign/GSRDYZ
+malignity/MS
+Mali/M
+Malina/M
+Malinda/M
+Malinde/M
+malingerer/M
+malinger/GZRDS
+Malinowski/M
+Malissa/M
+Malissia/M
+mallard/SM
+Mallarmé/M
+malleability/SM
+malleableness/M
+malleable/P
+mallet/MS
+Mallissa/M
+Mallorie/M
+Mallory/M
+mallow/MS
+mall/SGMD
+Mal/M
+malnourished
+malnutrition/SM
+malocclusion/MS
+malodorous
+Malone/M
+Malorie/M
+Malory/M
+malposed
+malpractice/SM
+Malraux/M
+Malta/M
+malted/S
+Maltese
+Malthusian/S
+Malthus/M
+malting/M
+maltose/SM
+maltreat/GDSL
+maltreatment/S
+malt/SGMD
+malty/RT
+Malva/M
+Malvina/M
+Malvin/M
+Malynda/M
+mama/SM
+mamba/SM
+mambo/GSDM
+Mame/M
+Mamet/M
+ma/MH
+Mamie/M
+mammalian/SM
+mammal/SM
+mammary
+mamma's
+mammogram/S
+mammography/S
+Mammon's
+mammon/SM
+mammoth/M
+mammoths
+mammy/SM
+Mamore/M
+manacle/SDMG
+manageability/S
+manageableness
+manageable/U
+managed/U
+management/SM
+manageress/M
+managerial/Y
+manager/M
+managership/M
+manage/ZLGRSD
+Managua/M
+Manama/M
+mañana/M
+mananas
+Manasseh/M
+manatee/SM
+Manaus's
+Manchester/M
+Manchu/MS
+Manchuria/M
+Manchurian/S
+Mancini/M
+manciple/M
+Mancunian/MS
+mandala/SM
+Mandalay/M
+Manda/M
+mandamus/GMSD
+Mandarin
+mandarin/MS
+mandate/SDMG
+mandatory/S
+Mandela
+Mandelbrot/M
+Mandel/M
+mandible/MS
+mandibular
+Mandie/M
+Mandi/M
+Mandingo/M
+mandolin/MS
+mandrake/MS
+mandrel/SM
+mandrill/SM
+Mandy/M
+manège/GSD
+mane/MDS
+Manet/M
+maneuverability/MS
+maneuverer/M
+maneuver/MRDSGB
+Manfred/M
+manful/Y
+manganese/MS
+mange/GMSRDZ
+manger/M
+manginess/S
+mangler/M
+mangle/RSDG
+mangoes
+mango/M
+mangrove/MS
+mangy/PRT
+manhandle/GSD
+Manhattan/SM
+manhole/MS
+manhood/MS
+manhunt/SM
+maniacal/Y
+maniac/SM
+mania/SM
+manically
+Manichean/M
+manic/S
+manicure/MGSD
+manicurist/SM
+manifestation/SM
+manifesto/GSDM
+manifest/YDPGS
+manifolder/M
+manifold/GPYRDMS
+manifoldness/M
+manikin/MS
+Manila/MS
+manila/S
+manilla's
+Mani/M
+manioc/SM
+manipulability
+manipulable
+manipulate/SDXBVGN
+manipulative/PM
+manipulator/MS
+manipulatory
+Manitoba/M
+Manitoulin/M
+Manitowoc/M
+mankind/M
+Mankowski/M
+Manley/M
+manlike
+manliness/SM
+manliness's/U
+manly/URPT
+manna/MS
+manned/U
+mannequin/MS
+mannered/U
+mannerism/SM
+mannerist/M
+mannerliness/MU
+mannerly/UP
+manner/SDYM
+Mann/GM
+Mannheim/M
+Mannie/M
+mannikin's
+Manning/M
+manning/U
+mannishness/SM
+mannish/YP
+Manny/M
+Manolo/M
+Mano/M
+manometer/SM
+Manon/M
+manorial
+manor/MS
+manpower/SM
+manqué/M
+man's
+mansard/SM
+manservant/M
+manse/XNM
+Mansfield/M
+mansion/M
+manslaughter/SM
+Man/SM
+Manson/M
+mans/S
+manta/MS
+Mantegna/M
+mantelpiece/MS
+mantel/SM
+mantes
+mantilla/MS
+mantissa/SM
+mantis/SM
+mantle/ESDG
+Mantle/M
+mantle's
+mantling/M
+mantra/MS
+mantrap/SM
+manual/SMY
+Manuela/M
+Manuel/M
+manufacture/JZGDSR
+manufacturer/M
+manumission/MS
+manumit/S
+manumitted
+manumitting
+manure/RSDMZG
+manuscript/MS
+man/USY
+Manville/M
+Manx
+many
+Manya/M
+Maoism/MS
+Maoist/S
+Mao/M
+Maori/SM
+Maplecrest/M
+maple/MS
+mapmaker/S
+mappable
+mapped/UA
+mapper/S
+mapping/MS
+Mapplethorpe/M
+maps/AU
+map/SM
+Maputo/M
+Marabel/M
+marabou/MS
+marabout's
+Maracaibo/M
+maraca/MS
+Mara/M
+maraschino/SM
+Marathi
+marathoner/M
+Marathon/M
+marathon/MRSZ
+Marat/M
+marauder/M
+maraud/ZGRDS
+marbleize/GSD
+marble/JRSDMG
+marbler/M
+marbling/M
+Marceau/M
+Marcela/M
+Marcelia/M
+Marcelino/M
+Marcella/M
+Marcelle/M
+Marcellina/M
+Marcelline/M
+Marcello/M
+Marcellus/M
+Marcel/M
+Marcelo/M
+Marchall/M
+Marchelle/M
+marcher/M
+marchioness/SM
+March/MS
+march/RSDZG
+Marcia/M
+Marciano/M
+Marcie/M
+Marcile/M
+Marcille/M
+Marci/M
+Marc/M
+Marconi/M
+Marco/SM
+Marcotte/M
+Marcus/M
+Marcy/M
+Mardi/SM
+Marduk/M
+Mareah/M
+mare/MS
+Marena/M
+Maren/M
+Maressa/M
+Margalit/M
+Margalo/M
+Marga/M
+Margareta/M
+Margarete/M
+Margaretha/M
+Margarethe/M
+Margaret/M
+Margaretta/M
+Margarette/M
+margarine/MS
+Margarita/M
+margarita/SM
+Margarito/M
+Margaux/M
+Margeaux/M
+Marge/M
+Margery/M
+Marget/M
+Margette/M
+Margie/M
+Margi/M
+marginalia
+marginality
+marginalization
+marginalize/SDG
+marginal/YS
+margin/GSDM
+Margit/M
+Margo/M
+Margot/M
+Margrethe/M
+Margret/M
+Marguerite/M
+Margy/M
+mariachi/SM
+maria/M
+Maria/M
+Mariam/M
+Mariana/SM
+Marian/MS
+Marianna/M
+Marianne/M
+Mariann/M
+Mariano/M
+Maribelle/M
+Maribel/M
+Maribeth/M
+Maricela/M
+Marice/M
+Maridel/M
+Marieann/M
+Mariejeanne/M
+Mariele/M
+Marielle/M
+Mariellen/M
+Mariel/M
+Marie/M
+Marietta/M
+Mariette/M
+Marigold/M
+marigold/MS
+Marijn/M
+Marijo/M
+marijuana/SM
+Marika/M
+Marilee/M
+Marilin/M
+Marillin/M
+Marilyn/M
+marimba/SM
+Mari/MS
+marinade/MGDS
+Marina/M
+marina/MS
+marinara/SM
+marinate/NGXDS
+marination/M
+mariner/M
+Marine/S
+marine/ZRS
+Marin/M
+Marinna/M
+Marino/M
+Mario/M
+marionette/MS
+Marion/M
+Mariquilla/M
+Marisa/M
+Mariska/M
+Marisol/M
+Marissa/M
+Maritain/M
+marital/Y
+Marita/M
+maritime/R
+Maritsa/M
+Maritza/M
+Mariupol/M
+Marius/M
+Mariya/M
+Marja/M
+Marje/M
+Marjie/M
+Marji/M
+Marj/M
+marjoram/SM
+Marjorie/M
+Marjory/M
+Marjy/M
+Markab/M
+markdown/SM
+marked/AU
+markedly
+marker/M
+marketability/SM
+marketable/U
+Marketa/M
+marketeer/S
+marketer/M
+market/GSMRDJBZ
+marketing/M
+marketplace/MS
+mark/GZRDMBSJ
+Markham/M
+marking/M
+Markism/M
+markkaa
+markka/M
+Mark/MS
+Markos
+Markov
+Markovian
+Markovitz/M
+marks/A
+marksman/M
+marksmanship/S
+marksmen
+markup/SM
+Markus/M
+Marla/M
+Marlane/M
+Marlboro/M
+Marlborough/M
+Marleah/M
+Marlee/M
+Marleen/M
+Marlena/M
+Marlene/M
+Marley/M
+Marlie/M
+Marline/M
+marlinespike/SM
+Marlin/M
+marlin/SM
+marl/MDSG
+Marlo/M
+Marlon/M
+Marlowe/M
+Marlow/M
+Marlyn/M
+Marmaduke/M
+marmalade/MS
+Marmara/M
+marmoreal
+marmoset/MS
+marmot/SM
+Marna/M
+Marne/M
+Marney/M
+Marnia/M
+Marnie/M
+Marni/M
+maroon/GRDS
+marquee/MS
+Marquesas/M
+marque/SM
+marquess/MS
+marquetry/SM
+Marquette/M
+Marquez/M
+marquise/M
+marquisette/MS
+Marquis/M
+marquis/SM
+Marquita/M
+Marrakesh/M
+marred/U
+marriageability/SM
+marriageable
+marriage/ASM
+married/US
+Marrilee/M
+marring
+Marriott/M
+Marris/M
+Marrissa/M
+marrowbone/MS
+marrow/GDMS
+marry/SDGA
+mar/S
+Marseillaise/SM
+Marseilles
+Marseille's
+marshal/GMDRSZ
+Marshalled/M
+marshaller
+Marshall/GDM
+Marshalling/M
+marshallings
+Marshal/M
+Marsha/M
+marshiness/M
+marshland/MS
+Marsh/M
+marshmallow/SM
+marsh/MS
+marshy/PRT
+Marsiella/M
+Mar/SMN
+marsupial/MS
+Martainn/M
+Marta/M
+Martelle/M
+Martel/M
+marten/M
+Marten/M
+Martguerita/M
+Martha/M
+Marthe/M
+Marthena/M
+Martial
+martial/Y
+Martian/S
+Martica/M
+Martie/M
+Marti/M
+Martina/M
+martinet/SM
+Martinez/M
+martingale/MS
+martini/MS
+Martinique/M
+Martin/M
+Martino/M
+martin/SM
+Martinson/M
+Martita/M
+mart/MDNGXS
+Mart/MN
+Marty/M
+Martyn/M
+Martynne/M
+martyrdom/SM
+martyr/GDMS
+Marva/M
+marvel/DGS
+Marvell/M
+marvelous/PY
+Marve/M
+Marven/M
+Marvin/M
+Marv/NM
+Marwin/M
+Marxian/S
+Marxism/SM
+Marxist/SM
+Marx/M
+Marya/M
+Maryanna/M
+Maryanne/M
+Maryann/M
+Marybelle/M
+Marybeth/M
+Maryellen/M
+Maryjane/M
+Maryjo/M
+Maryland/MZR
+Marylee/M
+Marylinda/M
+Marylin/M
+Maryl/M
+Marylou/M
+Marylynne/M
+Mary/M
+Maryrose/M
+Marys
+Marysa/M
+marzipan/SM
+Masada/M
+Masai/M
+Masaryk/M
+masc
+Mascagni/M
+mascara/SGMD
+mascot/SM
+masculineness/M
+masculine/PYS
+masculinity/SM
+Masefield/M
+maser/M
+Maseru/M
+MASH
+Masha/M
+Mashhad/M
+mash/JGZMSRD
+m/ASK
+masked/U
+masker/M
+mask/GZSRDMJ
+masks/U
+masochism/MS
+masochistic
+masochistically
+masochist/MS
+masonic
+Masonic
+Masonite/M
+masonry/MS
+mason/SDMG
+Mason/SM
+masquerader/M
+masquerade/RSDGMZ
+masquer/M
+masque/RSMZ
+Massachusetts/M
+massacre/DRSMG
+massager/M
+massage/SRDMG
+Massasoit/M
+Massenet/M
+masseur/MS
+masseuse/SM
+Massey/M
+massif/SM
+Massimiliano/M
+Massimo/M
+massing/R
+massiveness/SM
+massive/YP
+massless
+mas/SRZ
+Mass/S
+mass/VGSD
+mastectomy/MS
+masterclass
+mastered/A
+masterfulness/M
+masterful/YP
+master/JGDYM
+masterliness/M
+masterly/P
+mastermind/GDS
+masterpiece/MS
+mastership/M
+Master/SM
+masterstroke/MS
+masterwork/S
+mastery/MS
+mast/GZSMRD
+masthead/SDMG
+masticate/SDXGN
+mastication/M
+mastic/SM
+mastiff/MS
+mastodon/MS
+mastoid/S
+masturbate/SDNGX
+masturbation/M
+masturbatory
+matador/SM
+Mata/M
+matchable/U
+match/BMRSDZGJ
+matchbook/SM
+matchbox/SM
+matched/UA
+matcher/M
+matches/A
+matchless/Y
+matchlock/MS
+matchmake/GZJR
+matchmaker/M
+matchmaking/M
+matchplay
+match's/A
+matchstick/MS
+matchwood/SM
+mated/U
+mate/IMS
+Matelda/M
+Mateo/M
+materialism/SM
+materialistic
+materialistically
+materialist/SM
+materiality/M
+materialization/SM
+materialize/CDS
+materialized/A
+materializer/SM
+materializes/A
+materializing
+materialness/M
+material/SPYM
+matériel/MS
+mater/M
+maternal/Y
+maternity/MS
+mates/U
+mathematical/Y
+Mathematica/M
+mathematician/SM
+mathematic/S
+mathematics/M
+Mathematik/M
+Mather/M
+Mathe/RM
+Mathew/MS
+Mathewson/M
+Mathian/M
+Mathias
+Mathieu/M
+Mathilda/M
+Mathilde/M
+Mathis
+math/M
+maths
+Matias/M
+Matilda/M
+Matilde/M
+matinée/S
+mating/M
+matins/M
+Matisse/SM
+matriarchal
+matriarch/M
+matriarchs
+matriarchy/MS
+matrices
+matricidal
+matricide/MS
+matriculate/XSDGN
+matriculation/M
+matrimonial/Y
+matrimony/SM
+matrix/M
+matron/YMS
+mat/SJGMDR
+Matsumoto/M
+matte/JGMZSRD
+Mattel/M
+Matteo/M
+matter/GDM
+Matterhorn/M
+Matthaeus/M
+Mattheus/M
+Matthew/MS
+Matthias
+Matthieu/M
+Matthiew/M
+Matthus/M
+Mattias/M
+Mattie/M
+Matti/M
+matting/M
+mattins's
+Matt/M
+mattock/MS
+mattress/MS
+matt's
+Matty/M
+maturate/DSNGVX
+maturational
+maturation/M
+matureness/M
+maturer/M
+mature/RSDTPYG
+maturity/MS
+matzo/SHM
+matzot
+Maude/M
+Maudie/M
+maudlin/Y
+Maud/M
+Maugham/M
+Maui/M
+mauler/M
+maul/RDGZS
+maunder/GDS
+Maupassant/M
+Maura/M
+Maureene/M
+Maureen/M
+Maure/M
+Maurene/M
+Mauriac/M
+Maurice/M
+Mauricio/M
+Maurie/M
+Maurine/M
+Maurise/M
+Maurita/M
+Mauritania/M
+Mauritanian/S
+Mauritian/S
+Mauritius/M
+Maurits/M
+Maurizia/M
+Maurizio/M
+Maurois/M
+Mauro/M
+Maury/M
+Mauser/M
+mausoleum/SM
+mauve/SM
+maven/S
+maverick/SMDG
+mavin's
+Mavis/M
+Mavra/M
+mawkishness/SM
+mawkish/PY
+Mawr/M
+maw/SGMD
+max/GDS
+Maxie/M
+maxillae
+maxilla/M
+maxillary/S
+Maxi/M
+maximality
+maximal/SY
+maxima's
+Maximilian/M
+Maximilianus/M
+Maximilien/M
+maximization/SM
+maximizer/M
+maximize/RSDZG
+Maxim/M
+Maximo/M
+maxim/SM
+maximum/MYS
+Maxine/M
+maxi/S
+Max/M
+Maxtor/M
+Maxwellian
+maxwell/M
+Maxwell/M
+Maxy/M
+Maya/MS
+Mayan/S
+Maybelle/M
+maybe/S
+mayday/S
+may/EGS
+Maye/M
+mayer
+Mayer/M
+mayest
+Mayfair/M
+Mayflower/M
+mayflower/SM
+mayfly/MS
+mayhap
+mayhem/MS
+Maynard/M
+Mayne/M
+Maynord/M
+mayn't
+Mayo/M
+mayonnaise/MS
+mayoral
+mayoralty/MS
+mayoress/MS
+Mayor/M
+mayor/MS
+mayorship/M
+mayo/S
+maypole/MS
+Maypole/SM
+Mayra/M
+May/SMR
+mayst
+Mazama/M
+Mazarin/M
+Mazatlan/M
+Mazda/M
+mazedness/SM
+mazed/YP
+maze/MGDSR
+mazurka/SM
+Mazzini/M
+Mb
+MB
+MBA
+Mbabane/M
+Mbini/M
+MC
+McAdam/MS
+McAllister/M
+McBride/M
+McCabe/M
+McCain/M
+McCall/M
+McCarthyism/M
+McCarthy/M
+McCartney/M
+McCarty/M
+McCauley/M
+McClain/M
+McClellan/M
+McClure/M
+McCluskey/M
+McConnell/M
+McCormick/M
+McCoy/SM
+McCracken/M
+McCray/M
+McCullough/M
+McDaniel/M
+McDermott/M
+McDonald/M
+McDonnell/M
+McDougall/M
+McDowell/M
+McElhaney/M
+McEnroe/M
+McFadden/M
+McFarland/M
+McGee/M
+McGill/M
+McGovern/M
+McGowan/M
+McGrath/M
+McGraw/M
+McGregor/M
+McGuffey/M
+McGuire/M
+MCI/M
+McIntosh/M
+McIntyre/M
+McKay/M
+McKee/M
+McKenzie/M
+McKesson/M
+McKinley/M
+McKinney/M
+McKnight/M
+McLanahan/M
+McLaughlin/M
+McLean/M
+McLeod/M
+McLuhan/M
+McMahon/M
+McMartin/M
+McMillan/M
+McNamara/M
+McNaughton/M
+McNeil/M
+McPherson/M
+MD
+Md/M
+mdse
+MDT
+ME
+Meade/M
+Mead/M
+meadowland
+meadowlark/SM
+meadow/MS
+Meadows
+meadowsweet/M
+mead/SM
+Meagan/M
+meagerness/SM
+meager/PY
+Meaghan/M
+meagres
+mealiness/MS
+meal/MDGS
+mealtime/MS
+mealybug/S
+mealymouthed
+mealy/PRST
+meander/JDSG
+meaneing
+meanie/MS
+meaningfulness/SM
+meaningful/YP
+meaninglessness/SM
+meaningless/PY
+meaning/M
+meanness/S
+means/M
+meantime/SM
+meant/U
+meanwhile/S
+Meany/M
+mean/YRGJTPS
+meany's
+Meara/M
+measle/SD
+measles/M
+measly/TR
+measurable/U
+measurably
+measure/BLMGRSD
+measured/Y
+measureless
+measurement/SM
+measurer/M
+measures/A
+measuring/A
+meas/Y
+meataxe
+meatball/MS
+meatiness/MS
+meatless
+meatloaf
+meatloaves
+meat/MS
+meatpacking/S
+meaty/RPT
+Mecca/MS
+mecca/S
+mechanical/YS
+mechanic/MS
+mechanism/SM
+mechanistic
+mechanistically
+mechanist/M
+mechanization/SM
+mechanized/U
+mechanizer/M
+mechanize/RSDZGB
+mechanizes/U
+mechanochemically
+Mechelle/M
+med
+medalist/MS
+medallion/MS
+medal/SGMD
+Medan/M
+meddle/GRSDZ
+meddlesome
+Medea/M
+Medellin
+Medfield/M
+mediaeval's
+medial/AY
+medials
+median/YMS
+media/SM
+mediateness/M
+mediate/PSDYVNGX
+mediation/ASM
+mediator/SM
+Medicaid/SM
+medical/YS
+medicament/MS
+Medicare/MS
+medicate/DSXNGV
+medication/M
+Medici/MS
+medicinal/SY
+medicine/DSMG
+medico/SM
+medic/SM
+medievalist/MS
+medieval/YMS
+Medina/M
+mediocre
+mediocrity/MS
+meditate/NGVXDS
+meditation/M
+meditativeness/M
+meditative/PY
+Mediterranean/MS
+mediumistic
+medium/SM
+medley/SM
+medulla/SM
+Medusa/M
+meed/MS
+meekness/MS
+meek/TPYR
+meerschaum/MS
+meeter/M
+meetinghouse/S
+meeting/M
+meet/JGSYR
+me/G
+mega
+megabit/MS
+megabuck/S
+megabyte/S
+megacycle/MS
+megadeath/M
+megadeaths
+megahertz/M
+megalithic
+megalith/M
+megaliths
+megalomaniac/SM
+megalomania/SM
+megalopolis/SM
+Megan/M
+megaphone/SDGM
+megaton/MS
+megavolt/M
+megawatt/SM
+megaword/S
+Megen/M
+Meggie/M
+Meggi/M
+Meggy/M
+Meghan/M
+Meghann/M
+Meg/MN
+megohm/MS
+Mehetabel/M
+Meier/M
+Meighen/M
+Meiji/M
+Mei/MR
+meioses
+meiosis/M
+meiotic
+Meir/M
+Meister/M
+Meistersinger/M
+Mejia/M
+Mekong/M
+Mela/M
+Melamie/M
+melamine/SM
+melancholia/SM
+melancholic/S
+melancholy/MS
+Melanesia/M
+Melanesian/S
+melange/S
+Melania/M
+Melanie/M
+melanin/MS
+melanoma/SM
+Melantha/M
+Melany/M
+Melba/M
+Melbourne/M
+Melcher/M
+Melchior/M
+meld/SGD
+mêlée/MS
+Melendez/M
+Melesa/M
+Melessa/M
+Melicent/M
+Melina/M
+Melinda/M
+Melinde/M
+meliorate/XSDVNG
+melioration/M
+Melisa/M
+Melisande/M
+Melisandra/M
+Melisenda/M
+Melisent/M
+Melissa/M
+Melisse/M
+Melita/M
+Melitta/M
+Mella/M
+Mellicent/M
+Mellie/M
+mellifluousness/SM
+mellifluous/YP
+Melli/M
+Mellisa/M
+Mellisent/M
+Melloney/M
+Mellon/M
+mellowness/MS
+mellow/TGRDYPS
+Melly/M
+Mel/MY
+Melodee/M
+melodically
+melodic/S
+Melodie/M
+melodiousness/S
+melodious/YP
+melodrama/SM
+melodramatically
+melodramatic/S
+Melody/M
+melody/MS
+Melonie/M
+melon/MS
+Melony/M
+Melosa/M
+Melpomene/M
+meltdown/S
+melter/M
+melting/Y
+Melton/M
+melt/SAGD
+Melva/M
+Melville/M
+Melvin/M
+Melvyn/M
+Me/M
+member/DMS
+membered/AE
+members/EA
+membership/SM
+membrane/MSD
+membranous
+memento/SM
+Memling/M
+memoir/MS
+memorabilia
+memorability/SM
+memorableness/M
+memorable/P
+memorably
+memorandum/SM
+memorialize/DSG
+memorialized/U
+memorial/SY
+memoriam
+memorization/MS
+memorized/U
+memorizer/M
+memorize/RSDZG
+memorizes/A
+memoryless
+memory/MS
+memo/SM
+Memphis/M
+menace/GSD
+menacing/Y
+menagerie/SM
+menage/S
+Menander/M
+menarche/MS
+Menard/M
+Mencius/M
+Mencken/M
+mendaciousness/M
+mendacious/PY
+mendacity/MS
+Mendeleev/M
+mendelevium/SM
+Mendelian
+Mendel/M
+Mendelssohn/M
+mender/M
+Mendez/M
+mendicancy/MS
+mendicant/S
+Mendie/M
+mending/M
+Mendocino/M
+Mendoza/M
+mend/RDSJGZ
+Mendy/M
+Menelaus/M
+Menes/M
+menfolk/S
+menhaden/M
+menial/YS
+meningeal
+meninges
+meningitides
+meningitis/M
+meninx
+menisci
+meniscus/M
+Menkalinan/M
+Menkar/M
+Menkent/M
+Menlo/M
+men/MS
+Mennonite/SM
+Menominee
+menopausal
+menopause/SM
+menorah/M
+menorahs
+Menotti/M
+Mensa/M
+Mensch/M
+mensch/S
+menservants/M
+mens/SDG
+menstrual
+menstruate/NGDSX
+menstruation/M
+mensurable/P
+mensuration/MS
+menswear/M
+mentalist/MS
+mentality/MS
+mental/Y
+mentholated
+menthol/SM
+mentionable/U
+mentioned/U
+mentioner/M
+mention/ZGBRDS
+mentor/DMSG
+Menuhin/M
+menu/SM
+Menzies/M
+meow/DSG
+Mephistopheles/M
+Merak/M
+Mercado/M
+mercantile
+Mercator/M
+Mercedes
+mercenariness/M
+mercenary/SMP
+mercerize/SDG
+Mercer/M
+mercer/SM
+merchandiser/M
+merchandise/SRDJMZG
+merchantability
+merchantman/M
+merchantmen
+merchant/SBDMG
+Mercie/M
+mercifully/U
+mercifulness/M
+merciful/YP
+mercilessness/SM
+merciless/YP
+Merci/M
+Merck/M
+mercurial/SPY
+mercuric
+Mercurochrome/M
+mercury/MS
+Mercury/MS
+Mercy/M
+mercy/SM
+Meredeth/M
+Meredithe/M
+Meredith/M
+Merell/M
+meretriciousness/SM
+meretricious/YP
+mere/YS
+merganser/MS
+merger/M
+merge/SRDGZ
+Meridel/M
+meridian/MS
+meridional
+Meridith/M
+Meriel/M
+Merilee/M
+Merill/M
+Merilyn/M
+meringue/MS
+merino/MS
+Meris
+Merissa/M
+merited/U
+meritocracy/MS
+meritocratic
+meritocrats
+meritoriousness/MS
+meritorious/PY
+merit/SCGMD
+Meriwether/M
+Merla/M
+Merle/M
+Merlina/M
+Merline/M
+merlin/M
+Merlin/M
+Merl/M
+mermaid/MS
+merman/M
+mermen
+Merna/M
+Merola/M
+meromorphic
+Merralee/M
+Merrel/M
+Merriam/M
+Merrick/M
+Merridie/M
+Merrielle/M
+Merrie/M
+Merrilee/M
+Merrile/M
+Merrili/M
+Merrill/M
+merrily
+Merrily/M
+Merrimack/M
+Merrimac/M
+merriment/MS
+merriness/S
+Merritt/M
+Merry/M
+merrymaker/MS
+merrymaking/SM
+merry/RPT
+Mersey/M
+mer/TGDR
+Merton/M
+Mervin/M
+Merv/M
+Merwin/M
+Merwyn/M
+Meryl/M
+Mesa
+Mesabi/M
+mesa/SM
+mescaline/SM
+mescal/SM
+mesdames/M
+mesdemoiselles/M
+Meshed's
+meshed/U
+mesh/GMSD
+mesmeric
+mesmerism/SM
+mesmerized/U
+mesmerizer/M
+mesmerize/SRDZG
+Mesolithic/M
+mesomorph/M
+mesomorphs
+meson/MS
+Mesopotamia/M
+Mesopotamian/S
+mesosphere/MS
+mesozoic
+Mesozoic
+mesquite/MS
+mes/S
+message/SDMG
+messeigneurs
+messenger/GSMD
+Messerschmidt/M
+mess/GSDM
+Messiaen/M
+messiah
+Messiah/M
+messiahs
+Messiahs
+messianic
+Messianic
+messieurs/M
+messily
+messiness/MS
+messmate/MS
+Messrs/M
+messy/PRT
+mestizo/MS
+meta
+metabolic
+metabolically
+metabolism/MS
+metabolite/SM
+metabolize/GSD
+metacarpal/S
+metacarpi
+metacarpus/M
+metacircular
+metacircularity
+metalanguage/MS
+metalization/SM
+metalized
+metallic/S
+metalliferous
+metallings
+metallography/M
+metalloid/M
+metallurgic
+metallurgical/Y
+metallurgist/S
+metallurgy/MS
+metal/SGMD
+metalsmith/MS
+metalworking/M
+metalwork/RMJGSZ
+Meta/M
+metamathematical
+metamorphic
+metamorphism/SM
+metamorphose/GDS
+metamorphosis/M
+metaphoric
+metaphorical/Y
+metaphor/MS
+metaphosphate/M
+metaphysical/Y
+metaphysic/SM
+metastability/M
+metastable
+metastases
+metastasis/M
+metastasize/DSG
+metastatic
+metatarsal/S
+metatarsi
+metatarsus/M
+metatheses
+metathesis/M
+metathesized
+metathesizes
+metathesizing
+metavariable
+metempsychoses
+metempsychosis/M
+meteoric
+meteorically
+meteorite/SM
+meteoritic/S
+meteoritics/M
+meteoroid/SM
+meteorologic
+meteorological
+meteorologist/S
+meteorology/MS
+meteor/SM
+meter/GDM
+mete/ZDGSR
+methadone/SM
+methane/MS
+methanol/SM
+methinks
+methionine/M
+methodicalness/SM
+methodical/YP
+methodism
+Methodism/SM
+methodist/MS
+Methodist/MS
+method/MS
+methodological/Y
+methodologists
+methodology/MS
+methought
+Methuen/M
+Methuselah/M
+Methuselahs
+methylated
+methylene/M
+methyl/SM
+meticulousness/MS
+meticulous/YP
+métier/S
+metonymy/M
+Metrecal/M
+metrical/Y
+metricate/SDNGX
+metricize/GSD
+metrics/M
+metric/SM
+metronome/MS
+metropolis/SM
+metropolitanization
+metropolitan/S
+metro/SM
+mets
+Metternich/M
+mettle/SDM
+mettlesome
+met/U
+Metzler/M
+Meuse/M
+mewl/GSD
+mew/SGD
+mews/SM
+Mex
+Mexicali/M
+Mexican/S
+Mexico/M
+Meyerbeer/M
+Meyer/SM
+mezzanine/MS
+mezzo/S
+MFA
+mfg
+mfr/S
+mg
+M/GB
+Mg/M
+MGM/M
+mgr
+Mgr
+MHz
+MI
+MIA
+Mia/M
+Miami/SM
+Miaplacidus/M
+miasmal
+miasma/SM
+Micaela/M
+Micah/M
+mica/MS
+micelles
+mice/M
+Michaela/M
+Michaelangelo/M
+Michaelina/M
+Michaeline/M
+Michaella/M
+Michaelmas/MS
+Michael/SM
+Michaelson/M
+Michail/M
+Michale/M
+Michal/M
+Micheal/M
+Micheil/M
+Michelangelo/M
+Michele/M
+Michelina/M
+Micheline/M
+Michelin/M
+Michelle/M
+Michell/M
+Michel/M
+Michelson/M
+Michigander/S
+Michiganite/S
+Michigan/M
+Mich/M
+Mickelson/M
+Mickey/M
+mickey/SM
+Mickie/M
+Micki/M
+Mick/M
+Micky/M
+Mic/M
+Micmac/M
+micra's
+microamp
+microanalysis/M
+microanalytic
+microbe/MS
+microbial
+microbicidal
+microbicide/M
+microbiological
+microbiologist/MS
+microbiology/SM
+microbrewery/S
+microchemistry/M
+microchip/S
+microcircuit/MS
+microcode/GSD
+microcomputer/MS
+microcosmic
+microcosm/MS
+microdensitometer
+microdot/MS
+microeconomic/S
+microeconomics/M
+microelectronic/S
+microelectronics/M
+microfiber/S
+microfiche/M
+microfilm/DRMSG
+microfossils
+micrography/M
+microgroove/MS
+microhydrodynamics
+microinstruction/SM
+microjoule
+microlevel
+microlight/S
+micromanage/GDSL
+micromanagement/S
+micrometeorite/MS
+micrometeoritic
+micrometer/SM
+Micronesia/M
+Micronesian/S
+micron/MS
+microorganism/SM
+microphone/SGM
+Microport/M
+microprocessing
+microprocessor/SM
+microprogrammed
+microprogramming
+microprogram/SM
+micro/S
+microscope/SM
+microscopic
+microscopical/Y
+microscopy/MS
+microsecond/MS
+microsimulation/S
+Microsystems
+micros/M
+Microsoft/M
+microsomal
+microstore
+microsurgery/SM
+MicroVAXes
+MicroVAX/M
+microvolt/SM
+microwaveable
+microwave/BMGSD
+microword/S
+midair/MS
+midas
+Midas/M
+midband/M
+midday/MS
+midden/SM
+middest
+middlebrow/SM
+Middlebury/M
+middle/GJRSD
+middleman/M
+middlemen
+middlemost
+Middlesex/M
+Middleton/M
+Middletown/M
+middleweight/SM
+middling/Y
+middy/SM
+Mideastern
+Mideast/M
+midfield/RM
+Midge/M
+midge/SM
+midget/MS
+midi/S
+midland/MRS
+Midland/MS
+midlife
+midlives
+midmorn/G
+midmost/S
+midnight/SYM
+midpoint/MS
+midrange
+midrib/MS
+midriff/MS
+mid/S
+midscale
+midsection/M
+midshipman/M
+midshipmen
+midship/S
+midspan
+midstream/MS
+midst/SM
+midsummer/MS
+midterm/MS
+midtown/MS
+Midway/M
+midway/S
+midweek/SYM
+Midwesterner/M
+Midwestern/ZR
+Midwest/M
+midwicket
+midwifery/SM
+midwife/SDMG
+midwinter/YMS
+midwives
+midyear/MS
+mien/M
+miff/GDS
+mightily
+mightiness/MS
+mightn't
+might/S
+mighty/TPR
+mignon
+mignonette/SM
+Mignon/M
+Mignonne/M
+migraine/SM
+migrant/MS
+migrate/ASDG
+migration/MS
+migrative
+migratory/S
+MIG/S
+Miguela/M
+Miguelita/M
+Miguel/M
+mikado/MS
+Mikaela/M
+Mikael/M
+mike/DSMG
+Mikel/M
+Mike/M
+Mikey/M
+Mikhail/M
+Mikkel/M
+Mikol/M
+Mikoyan/M
+milady/MS
+Milagros/M
+Milanese
+Milan/M
+milch/M
+mildew/DMGS
+mildness/MS
+Mildred/M
+Mildrid/M
+mild/STYRNP
+mileage/SM
+Milena/M
+milepost/SM
+miler/M
+mile/SM
+Mile/SM
+milestone/MS
+Milford/M
+Milicent/M
+milieu/SM
+Milissent/M
+militancy/MS
+militantness/M
+militant/YPS
+militarily
+militarism/SM
+militaristic
+militarist/MS
+militarization/SCM
+militarize/SDCG
+military
+militate/SDG
+militiaman/M
+militiamen
+militia/SM
+Milka/M
+Milken/M
+milker/M
+milk/GZSRDM
+milkiness/MS
+milkmaid/SM
+milkman/M
+milkmen
+milkshake/S
+milksop/SM
+milkweed/MS
+milky/RPT
+millage/S
+Millard/M
+Millay/M
+millenarian
+millenarianism/M
+millennial
+millennialism
+millennium/MS
+millepede's
+miller/M
+Miller/M
+Millet/M
+millet/MS
+milliamp
+milliampere/S
+milliard/MS
+millibar/MS
+Millicent/M
+millidegree/S
+Millie/M
+milligram/MS
+millijoule/S
+Millikan/M
+milliliter/MS
+Milli/M
+millimeter/SM
+milliner/SM
+millinery/MS
+milling/M
+millionaire/MS
+million/HDMS
+millionth/M
+millionths
+millipede/SM
+millisecond/MS
+Millisent/M
+millivoltmeter/SM
+millivolt/SM
+milliwatt/S
+millpond/MS
+millrace/SM
+mill/SGZMRD
+Mill/SMR
+millstone/SM
+millstream/SM
+millwright/MS
+Milly/M
+mil/MRSZ
+Mil/MY
+Milne/M
+Milo/M
+Milquetoast/S
+milquetoast/SM
+Miltiades/M
+Miltie/M
+Milt/M
+milt/MDSG
+Miltonic
+Milton/M
+Miltown/M
+Milty/M
+Milwaukee/M
+Milzie/M
+MIMD
+mime/DSRMG
+mimeograph/GMDS
+mimeographs
+mimer/M
+mimesis/M
+mimetic
+mimetically
+mimicked
+mimicker/SM
+mimicking
+mimicry/MS
+mimic/S
+Mimi/M
+mi/MNX
+Mimosa/M
+mimosa/SM
+Mina/M
+minaret/MS
+minatory
+mincemeat/MS
+mincer/M
+mince/SRDGZJ
+mincing/Y
+Minda/M
+Mindanao/M
+mind/ARDSZG
+mindbogglingly
+minded/P
+minder/M
+mindfully
+mindfulness/MS
+mindful/U
+mindlessness/SM
+mindless/YP
+Mindoro/M
+min/DRZGJ
+mind's
+mindset/S
+Mindy/M
+minefield/MS
+mineralization/C
+mineralized/U
+mineralogical
+mineralogist/SM
+mineralogy/MS
+mineral/SM
+miner/M
+Miner/M
+Minerva/M
+mineshaft
+mine/SNX
+minestrone/MS
+minesweeper/MS
+Minetta/M
+Minette/M
+mineworkers
+mingle/SDG
+Ming/M
+Mingus/M
+miniature/GMSD
+miniaturist/SM
+miniaturization/MS
+miniaturize/SDG
+minibike/S
+minibus/SM
+minicab/M
+minicam/MS
+minicomputer/SM
+minidress/SM
+minify/GSD
+minimalism/S
+minimalistic
+minimalist/MS
+minimality
+minimal/SY
+minima's
+minimax/M
+minimization/MS
+minimized/U
+minimizer/M
+minimize/RSDZG
+minim/SM
+minimum/MS
+mining/M
+minion/M
+mini/S
+miniseries
+miniskirt/MS
+ministerial/Y
+minister/MDGS
+ministrant/S
+ministration/SM
+ministry/MS
+minivan/S
+miniver/M
+minke
+mink/SM
+Min/MR
+Minna/M
+Minnaminnie/M
+Minneapolis/M
+Minne/M
+minnesinger/MS
+Minnesota/M
+Minnesotan/S
+Minnie/M
+Minni/M
+Minn/M
+Minnnie/M
+minnow/SM
+Minny/M
+Minoan/S
+Minolta/M
+minor/DMSG
+minority/MS
+Minor/M
+Minos
+Minotaur/M
+minotaur/S
+Minot/M
+minoxidil/S
+Minsk/M
+Minsky/M
+minster/SM
+minstrel/SM
+minstrelsy/MS
+mintage/SM
+Mintaka/M
+Minta/M
+minter/M
+mint/GZSMRD
+minty/RT
+minuend/SM
+minuet/SM
+Minuit/M
+minuscule/SM
+minus/S
+minuteman
+Minuteman/M
+minutemen
+minuteness/SM
+minute/RSDPMTYG
+minutiae
+minutia/M
+minx/MS
+Miocene
+MIPS
+Miquela/M
+Mirabeau/M
+Mirabella/M
+Mirabelle/M
+Mirabel/M
+Mirach/M
+miracle/MS
+miraculousness/M
+miraculous/PY
+mirage/GSDM
+Mira/M
+Miranda/M
+Miran/M
+Mireielle/M
+Mireille/M
+Mirella/M
+Mirelle/M
+mire/MGDS
+Mirfak/M
+Miriam/M
+Mirilla/M
+Mir/M
+Mirna/M
+Miro
+mirror/DMGS
+mirthfulness/SM
+mirthful/PY
+mirthlessness/M
+mirthless/YP
+mirth/M
+mirths
+MIRV/DSG
+miry/RT
+Mirzam/M
+misaddress/SDG
+misadventure/SM
+misalign/DSGL
+misalignment/MS
+misalliance/MS
+misanalysed
+misanthrope/MS
+misanthropic
+misanthropically
+misanthropist/S
+misanthropy/SM
+misapplier/M
+misapply/GNXRSD
+misapprehend/GDS
+misapprehension/MS
+misappropriate/GNXSD
+misbegotten
+misbehaver/M
+misbehave/RSDG
+misbehavior/SM
+misbrand/DSG
+misc
+miscalculate/XGNSD
+miscalculation/M
+miscall/SDG
+miscarriage/MS
+miscarry/SDG
+miscast/GS
+miscegenation/SM
+miscellanea
+miscellaneous/PY
+miscellany/MS
+Mischa/M
+mischance/MGSD
+mischief/MDGS
+mischievousness/MS
+mischievous/PY
+miscibility/S
+miscible/C
+misclassification/M
+misclassified
+misclassifying
+miscode/SDG
+miscommunicate/NDS
+miscomprehended
+misconceive/GDS
+misconception/MS
+misconduct/GSMD
+misconfiguration
+misconstruction/MS
+misconstrue/DSG
+miscopying
+miscount/DGS
+miscreant/MS
+miscue/MGSD
+misdeal/SG
+misdealt
+misdeed/MS
+misdemeanant/SM
+misdemeanor/SM
+misdiagnose/GSD
+misdid
+misdirect/GSD
+misdirection/MS
+misdirector/S
+misdoes
+misdo/JG
+misdone
+miserableness/SM
+miserable/SP
+miserably
+miser/KM
+miserliness/MS
+miserly/P
+misery/MS
+mises/KC
+misfeasance/MS
+misfeature/M
+misfield
+misfile/SDG
+misfire/SDG
+misfit/MS
+misfitted
+misfitting
+misfortune/SM
+misgauge/GDS
+misgiving/MYS
+misgovern/LDGS
+misgovernment/S
+misguidance/SM
+misguidedness/M
+misguided/PY
+misguide/DRSG
+misguider/M
+Misha/M
+mishandle/SDG
+mishap/MS
+mishapped
+mishapping
+misheard
+mishear/GS
+mishitting
+mishmash/SM
+misidentification/M
+misidentify/GNSD
+misinformation/SM
+misinform/GDS
+misinterpretation/MS
+misinterpreter/M
+misinterpret/RDSZG
+misjudge/DSG
+misjudging/Y
+misjudgment/MS
+Miskito
+mislabel/DSG
+mislaid
+mislay/GS
+misleader/M
+mislead/GRJS
+misleading/Y
+misled
+mismanage/LGSD
+mismanagement/MS
+mismatch/GSD
+misname/GSD
+misnomer/GSMD
+misogamist/MS
+misogamy/MS
+misogynistic
+misogynist/MS
+misogynous
+misogyny/MS
+misperceive/SD
+misplace/GLDS
+misplacement/MS
+misplay/GSD
+mispositioned
+misprint/SGDM
+misprision/SM
+mispronounce/DSG
+mispronunciation/MS
+misquotation/MS
+misquote/GDS
+misreader/M
+misread/RSGJ
+misrelated
+misremember/DG
+misreport/DGS
+misrepresentation/MS
+misrepresenter/M
+misrepresent/SDRG
+misroute/DS
+misrule/SDG
+missal/ESM
+misshape/DSG
+misshapenness/SM
+misshapen/PY
+Missie/M
+missile/MS
+missilery/SM
+mission/AMS
+missionary/MS
+missioned
+missioner/SM
+missioning
+missis's
+Mississauga/M
+Mississippian/S
+Mississippi/M
+missive/MS
+Missoula/M
+Missourian/S
+Missouri/M
+misspeak/SG
+misspecification
+misspecified
+misspelling/M
+misspell/SGJD
+misspend/GS
+misspent
+misspoke
+misspoken
+mis/SRZ
+miss/SDEGV
+Miss/SM
+misstate/GLDRS
+misstatement/MS
+misstater/M
+misstep/MS
+misstepped
+misstepping
+missus/SM
+Missy/M
+mistakable/U
+mistake/BMGSR
+mistaken/Y
+mistaker/M
+mistaking/Y
+Mistassini/M
+mister/GDM
+Mister/SM
+mistily
+Misti/M
+mistime/GSD
+mistiness/S
+mistletoe/MS
+mist/MRDGZS
+mistook
+mistral/MS
+mistranslated
+mistranslates
+mistranslating
+mistranslation/SM
+mistreat/DGSL
+mistreatment/SM
+Mistress/MS
+mistress/MSY
+mistrial/SM
+mistruster/M
+mistrustful/Y
+mistrust/SRDG
+Misty/M
+mistype/SDGJ
+misty/PRT
+misunderstander/M
+misunderstanding/M
+misunderstand/JSRZG
+misunderstood
+misuser/M
+misuse/RSDMG
+miswritten
+Mitchael/M
+Mitchell/M
+Mitchel/M
+Mitch/M
+miterer/M
+miter/GRDM
+mite/SRMZ
+Mitford/M
+Mithra/M
+Mithridates/M
+mitigated/U
+mitigate/XNGVDS
+mitigation/M
+MIT/M
+mitoses
+mitosis/M
+mitotic
+MITRE/SM
+Mitsubishi/M
+mitten/M
+Mitterrand/M
+mitt/XSMN
+Mitty/M
+Mitzi/M
+mitzvahs
+mixable
+mix/AGSD
+mixed/U
+mixer/SM
+mixture/SM
+Mizar/M
+mizzenmast/SM
+mizzen/MS
+Mk
+mks
+ml
+Mlle/M
+mm
+MM
+MMe
+Mme/SM
+MN
+mnemonically
+mnemonics/M
+mnemonic/SM
+Mnemosyne/M
+Mn/M
+MO
+moan/GSZRDM
+moat/SMDG
+mobbed
+mobber
+mobbing
+mobcap/SM
+Mobile/M
+mobile/S
+mobility/MS
+mobilizable
+mobilization/AMCS
+mobilize/CGDS
+mobilized/U
+mobilizer/MS
+mobilizes/A
+Mobil/M
+mob/MS
+mobster/MS
+Mobutu/M
+moccasin/SM
+mocha/SM
+mockers/M
+mockery/MS
+mock/GZSRD
+mockingbird/MS
+mocking/Y
+mo/CSK
+modality/MS
+modal/Y
+modeled/A
+modeler/M
+modeling/M
+models/A
+model/ZGSJMRD
+mode/MS
+modem/SM
+moderated/U
+moderateness/SM
+moderate/PNGDSXY
+moderation/M
+moderator/MS
+modernism/MS
+modernistic
+modernist/S
+modernity/SM
+modernization/MS
+modernized/U
+modernizer/M
+modernize/SRDGZ
+modernizes/U
+modernness/SM
+modern/PTRYS
+Modesta/M
+Modestia/M
+Modestine/M
+Modesto/M
+modest/TRY
+Modesty/M
+modesty/MS
+modicum/SM
+modifiability/M
+modifiableness/M
+modifiable/U
+modification/M
+modified/U
+modifier/M
+modify/NGZXRSD
+Modigliani/M
+modishness/MS
+modish/YP
+mod/TSR
+Modula/M
+modularity/SM
+modularization
+modularize/SDG
+modular/SY
+modulate/ADSNCG
+modulation/CMS
+modulator/ACSM
+module/SM
+moduli
+modulo
+modulus/M
+modus
+Moe/M
+Moen/M
+Mogadiscio's
+Mogadishu
+mogul/MS
+Mogul/MS
+mohair/SM
+Mohamed/M
+Mohammad/M
+Mohammedanism/MS
+Mohammedan/SM
+Mohammed's
+Mohandas/M
+Mohandis/M
+Mohawk/MS
+Mohegan/S
+Mohican's
+Moho/M
+Mohorovicic/M
+Mohr/M
+moiety/MS
+moil/SGD
+Moina/M
+Moines/M
+Moira/M
+moire/MS
+Moise/MS
+Moiseyev/M
+Moishe/M
+moistener/M
+moisten/ZGRD
+moistness/MS
+moist/TXPRNY
+moisture/MS
+moisturize/GZDRS
+Mojave/M
+molal
+molarity/SM
+molar/MS
+molasses/MS
+Moldavia/M
+Moldavian/S
+moldboard/SM
+molder/DG
+moldiness/SM
+molding/M
+mold/MRDJSGZ
+Moldova
+moldy/PTR
+molecularity/SM
+molecular/Y
+molecule/MS
+molehill/SM
+mole/MTS
+moleskin/MS
+molestation/SM
+molested/U
+molester/M
+molest/RDZGS
+Moliere
+Molina/M
+Moline/M
+Mollee/M
+Mollie/M
+mollification/M
+mollify/XSDGN
+Molli/M
+Moll/M
+moll/MS
+mollusc's
+mollusk/S
+mollycoddler/M
+mollycoddle/SRDG
+Molly/M
+molly/SM
+Molnar/M
+Moloch/M
+Molokai/M
+Molotov/M
+molter/M
+molt/RDNGZS
+Moluccas
+molybdenite/M
+molybdenum/MS
+Mombasa/M
+momenta
+momentarily
+momentariness/SM
+momentary/P
+moment/MYS
+momentousness/MS
+momentous/YP
+momentum/SM
+momma/S
+Mommy/M
+mommy/SM
+Mo/MN
+mom/SM
+Monaco/M
+monadic
+monad/SM
+Monah/M
+Mona/M
+monarchic
+monarchical
+monarchism/MS
+monarchistic
+monarchist/MS
+monarch/M
+monarchs
+monarchy/MS
+Monash/M
+monastery/MS
+monastical/Y
+monasticism/MS
+monastic/S
+monaural/Y
+Mondale/M
+Monday/MS
+Mondrian/M
+Monegasque/SM
+Monera/M
+monetarily
+monetarism/S
+monetarist/MS
+monetary
+monetization/CMA
+monetize/CGADS
+Monet/M
+moneybag/SM
+moneychangers
+moneyer/M
+moneylender/SM
+moneymaker/MS
+moneymaking/MS
+money/SMRD
+Monfort/M
+monger/SGDM
+Mongolia/M
+Mongolian/S
+Mongolic/M
+mongolism/SM
+mongoloid/S
+Mongoloid/S
+Mongol/SM
+mongoose/SM
+mongrel/SM
+Monica/M
+monies/M
+Monika/M
+moniker/MS
+Monique/M
+monism/MS
+monist/SM
+monition/SM
+monitored/U
+monitor/GSMD
+monitory/S
+monkeyshine/S
+monkey/SMDG
+monkish
+Monk/M
+monk/MS
+monkshood/SM
+Monmouth/M
+monochromatic
+monochromator
+monochrome/MS
+monocle/SDM
+monoclinic
+monoclonal/S
+monocotyledonous
+monocotyledon/SM
+monocular/SY
+monodic
+monodist/S
+monody/MS
+monogamist/MS
+monogamous/PY
+monogamy/MS
+monogrammed
+monogramming
+monogram/MS
+monograph/GMDS
+monographs
+monolingualism
+monolingual/S
+monolithic
+monolithically
+monolith/M
+monoliths
+monologist/S
+monologue/GMSD
+monomaniacal
+monomaniac/MS
+monomania/MS
+monomeric
+monomer/SM
+monomial/SM
+mono/MS
+Monongahela/M
+mononuclear
+mononucleoses
+mononucleosis/M
+monophonic
+monoplane/MS
+monopole/S
+monopolistic
+monopolist/MS
+monopolization/MS
+monopolized/U
+monopolize/GZDSR
+monopolizes/U
+monopoly/MS
+monorail/SM
+monostable
+monosyllabic
+monosyllable/MS
+monotheism/SM
+monotheistic
+monotheist/S
+monotone/SDMG
+monotonic
+monotonically
+monotonicity
+monotonousness/MS
+monotonous/YP
+monotony/MS
+monovalent
+monoxide/SM
+Monroe/M
+Monro/M
+Monrovia/M
+Monsanto/M
+monseigneur
+monsieur/M
+Monsignori
+Monsignor/MS
+monsignor/S
+Mon/SM
+monsoonal
+monsoon/MS
+monster/SM
+monstrance/ASM
+monstrosity/SM
+monstrousness/M
+monstrous/YP
+montage/SDMG
+Montague/M
+Montaigne/M
+Montana/M
+Montanan/MS
+Montcalm/M
+Montclair/M
+Monte/M
+Montenegrin
+Montenegro/M
+Monterey/M
+Monterrey/M
+Montesquieu/M
+Montessori/M
+Monteverdi/M
+Montevideo/M
+Montezuma
+Montgomery/M
+monthly/S
+month/MY
+months
+Monticello/M
+Monti/M
+Mont/M
+Montmartre/M
+Montoya/M
+Montpelier/M
+Montrachet/M
+Montreal/M
+Montserrat/M
+Monty/M
+monumentality/M
+monumental/Y
+monument/DMSG
+mooch/ZSRDG
+moodily
+moodiness/MS
+mood/MS
+Moody/M
+moody/PTR
+Moog
+moo/GSD
+moonbeam/SM
+Mooney/M
+moon/GDMS
+moonless
+moonlight/GZDRMS
+moonlighting/M
+moonlit
+Moon/M
+moonscape/MS
+moonshiner/M
+moonshine/SRZM
+moonshot/MS
+moonstone/SM
+moonstruck
+moonwalk/SDG
+Moore/M
+moor/GDMJS
+mooring/M
+Moorish
+moorland/MS
+Moor/MS
+moose/M
+moot/RDGS
+moped/MS
+moper/M
+mope/S
+mopey
+mopier
+mopiest
+mopish
+mopped
+moppet/MS
+mopping
+mop/SZGMDR
+moraine/MS
+morale/MS
+Morales/M
+moralistic
+moralistically
+moralist/MS
+morality/UMS
+moralization/CS
+moralize/CGDRSZ
+moralled
+moraller
+moralling
+moral/SMY
+Mora/M
+Moran/M
+morass/SM
+moratorium/SM
+Moravia/M
+Moravian
+moray/SM
+morbidity/SM
+morbidness/S
+morbid/YP
+mordancy/MS
+mordant/GDYS
+Mordecai/M
+Mord/M
+Mordred/M
+Mordy/M
+more/DSN
+Moreen/M
+Morehouse/M
+Moreland/M
+morel/SM
+More/M
+Morena/M
+Moreno/M
+moreover
+Morey/M
+Morgana/M
+Morganica/M
+Morgan/MS
+Morganne/M
+morgen/M
+Morgen/M
+morgue/SM
+Morgun/M
+Moria/M
+Moriarty/M
+moribundity/M
+moribund/Y
+Morie/M
+Morin/M
+morion/M
+Morison/M
+Morissa/M
+Morita/M
+Moritz/M
+Morlee/M
+Morley/M
+Morly/M
+Mormonism/MS
+Mormon/SM
+Morna/M
+morning/MY
+morn/SGJDM
+Moroccan/S
+Morocco/M
+morocco/SM
+Moro/M
+moronic
+moronically
+Moroni/M
+moron/SM
+moroseness/MS
+morose/YP
+morpheme/DSMG
+morphemic/S
+Morpheus/M
+morph/GDJ
+morphia/S
+morphine/MS
+morphism/MS
+morphologic
+morphological/Y
+morphology/MS
+morphophonemic/S
+morphophonemics/M
+morphs
+Morrie/M
+morris
+Morris/M
+Morrison/M
+Morristown/M
+Morrow/M
+morrow/MS
+Morry/M
+morsel/GMDS
+Morse/M
+mortality/SM
+mortal/SY
+mortarboard/SM
+mortar/GSDM
+Morten/M
+mortgageable
+mortgagee/SM
+mortgage/MGDS
+mortgagor/SM
+mortice's
+mortician/SM
+Mortie/M
+mortification/M
+mortified/Y
+mortifier/M
+mortify/DRSXGN
+Mortimer/M
+mortise/MGSD
+Mort/MN
+Morton/M
+mortuary/MS
+Morty/M
+Mosaic
+mosaicked
+mosaicking
+mosaic/MS
+Moscone/M
+Moscow/M
+Moseley/M
+Moselle/M
+Mose/MSR
+Moser/M
+mosey/SGD
+Moshe/M
+Moslem's
+Mosley/M
+mosque/SM
+mosquitoes
+mosquito/M
+mos/S
+mossback/MS
+Mossberg/M
+Moss/M
+moss/SDMG
+mossy/SRT
+most/SY
+Mosul/M
+mote/ASCNK
+motel/MS
+mote's
+motet/SM
+mothball/DMGS
+motherboard/MS
+motherfucker/MS!
+motherfucking/!
+motherhood/SM
+mothering/M
+motherland/SM
+motherless
+motherliness/MS
+motherly/P
+mother/RDYMZG
+moths
+moth/ZMR
+motif/MS
+motile/S
+motility/MS
+motional/K
+motioner/M
+motion/GRDMS
+motionlessness/S
+motionless/YP
+motion's/ACK
+motions/K
+motivated/U
+motivate/XDSNGV
+motivational/Y
+motivation/M
+motivator/S
+motiveless
+motive/MGSD
+motley/S
+motlier
+motliest
+mot/MSV
+motocross/SM
+motorbike/SDGM
+motorboat/MS
+motorcade/MSDG
+motorcar/MS
+motorcycle/GMDS
+motorcyclist/SM
+motor/DMSG
+motoring/M
+motorist/SM
+motorization/SM
+motorize/DSG
+motorized/U
+motorman/M
+motormen
+motormouth
+motormouths
+Motorola/M
+motorway/SM
+Motown/M
+mottle/GSRD
+mottler/M
+Mott/M
+mottoes
+motto/M
+moue/DSMG
+moulder/DSG
+moult/GSD
+mound/GMDS
+mountable
+mountaineering/M
+mountaineer/JMDSG
+mountainousness/M
+mountainous/PY
+mountainside/MS
+mountain/SM
+mountaintop/SM
+Mountbatten/M
+mountebank/SGMD
+mounted/U
+mount/EGACD
+mounter/SM
+mounties
+Mountie/SM
+mounting/MS
+Mount/M
+mounts/AE
+mourner/M
+mournfuller
+mournfullest
+mournfulness/S
+mournful/YP
+mourning/M
+mourn/ZGSJRD
+mouser/M
+mouse/SRDGMZ
+mousetrapped
+mousetrapping
+mousetrap/SM
+mousiness/MS
+mousing/M
+mousse/MGSD
+Moussorgsky/M
+mousy/PRT
+Mouthe/M
+mouthful/MS
+mouthiness/SM
+mouth/MSRDG
+mouthorgan
+mouthpiece/SM
+mouths
+mouthwash/SM
+mouthwatering
+mouthy/PTR
+Mouton/M
+mouton/SM
+movable/ASP
+movableness/AM
+move/ARSDGZB
+moved/U
+movement/SM
+mover/AM
+moviegoer/S
+movie/SM
+moving/YS
+mower/M
+Mowgli/M
+mowing/M
+mow/SDRZG
+moxie/MS
+Moyer/M
+Moyna/M
+Moyra/M
+Mozambican/S
+Mozambique/M
+Mozart/M
+Mozelle/M
+Mozes/M
+Mozilla/M
+mozzarella/MS
+mp
+MP
+mpg
+mph
+MPH
+MRI
+Mr/M
+Mrs
+ms
+M's
+MS
+MSG
+Msgr/M
+m's/K
+Ms/S
+MST
+MSW
+mt
+MT
+mtg
+mtge
+Mt/M
+MTS
+MTV
+Muawiya/M
+Mubarak/M
+muchness/M
+much/SP
+mucilage/MS
+mucilaginous
+mucker/M
+muck/GRDMS
+muckraker/M
+muckrake/ZMDRSG
+mucky/RT
+mucosa/M
+mucous
+mucus/SM
+mudded
+muddily
+muddiness/SM
+mudding
+muddle/GRSDZ
+muddleheaded/P
+muddlehead/SMD
+muddler/M
+muddy/TPGRSD
+mudflat/S
+mudguard/SM
+mudlarks
+mud/MS
+mudroom/S
+mudslide/S
+mudslinger/M
+mudslinging/M
+mudsling/JRGZ
+Mueller/M
+Muenster
+muenster/MS
+muesli/M
+muezzin/MS
+muff/GDMS
+Muffin/M
+muffin/SM
+muffler/M
+muffle/ZRSDG
+Mufi/M
+Mufinella/M
+mufti/MS
+Mugabe/M
+mugged
+mugger/SM
+mugginess/S
+mugging/S
+muggy/RPT
+mugshot/S
+mug/SM
+mugwump/MS
+Muhammadanism/S
+Muhammadan/SM
+Muhammad/M
+Muire/M
+Muir/M
+Mukden/M
+mukluk/SM
+mulattoes
+mulatto/M
+mulberry/MS
+mulch/GMSD
+mulct/SDG
+Mulder/M
+mule/MGDS
+muleskinner/S
+muleteer/MS
+mulishness/MS
+mulish/YP
+mullah/M
+mullahs
+mullein/MS
+Mullen/M
+muller/M
+Muller/M
+mullet/MS
+Mulligan/M
+mulligan/SM
+mulligatawny/SM
+Mullikan/M
+Mullins
+mullion/MDSG
+mull/RDSG
+Multan/M
+multi
+Multibus/M
+multicellular
+multichannel/M
+multicollinearity/M
+multicolor/SDM
+multicolumn
+multicomponent
+multicomputer/MS
+Multics/M
+MULTICS/M
+multicultural
+multiculturalism/S
+multidimensional
+multidimensionality
+multidisciplinary
+multifaceted
+multifamily
+multifariousness/SM
+multifarious/YP
+multifigure
+multiform
+multifunction/D
+multilateral/Y
+multilayer
+multilevel/D
+multilingual
+multilingualism/S
+multimedia/S
+multimegaton/M
+multimeter/M
+multimillionaire/SM
+multinational/S
+multinomial/M
+multiphase
+multiple/SM
+multiplet/SM
+multiplex/GZMSRD
+multiplexor's
+multipliable
+multiplicand/SM
+multiplication/M
+multiplicative/YS
+multiplicity/MS
+multiplier/M
+multiply/ZNSRDXG
+multiprocess/G
+multiprocessor/MS
+multiprogram
+multiprogrammed
+multiprogramming/MS
+multipurpose
+multiracial
+multistage
+multistory/S
+multisyllabic
+multitasking/S
+multitude/MS
+multitudinousness/M
+multitudinous/YP
+multiuser
+multivalent
+multivalued
+multivariate
+multiversity/M
+multivitamin/S
+mu/M
+mumbler/M
+mumbletypeg/S
+mumble/ZJGRSD
+Mumford/M
+mummed
+mummer/SM
+mummery/MS
+mummification/M
+mummify/XSDGN
+mumming
+mum/MS
+mummy/GSDM
+mumps/M
+muncher/M
+Münchhausen/M
+munchies
+Munch/M
+munch/ZRSDG
+Muncie/M
+mundane/YSP
+Mundt/M
+munge/JGZSRD
+Munich/M
+municipality/SM
+municipal/YS
+munificence/MS
+munificent/Y
+munition/SDG
+Munmro/M
+Munoz/M
+Munroe/M
+Munro/M
+mun/S
+Munsey/M
+Munson/M
+Munster/MS
+Muong/M
+muon/M
+Muppet/M
+muralist/SM
+mural/SM
+Murasaki/M
+Murat/M
+Murchison/M
+Murcia/M
+murderer/M
+murderess/S
+murder/GZRDMS
+murderousness/M
+murderous/YP
+Murdoch/M
+Murdock/M
+Mureil/M
+Murial/M
+muriatic
+Murielle/M
+Muriel/M
+Murillo/M
+murkily
+murkiness/S
+murk/TRMS
+murky/RPT
+Murmansk/M
+murmurer/M
+murmuring/U
+murmurous
+murmur/RDMGZSJ
+Murphy/M
+murrain/SM
+Murray/M
+Murrow/M
+Murrumbidgee/M
+Murry/M
+Murvyn/M
+muscatel/MS
+Muscat/M
+muscat/SM
+musclebound
+muscle/SDMG
+Muscovite/M
+muscovite/MS
+Muscovy/M
+muscularity/SM
+muscular/Y
+musculature/SM
+muse
+Muse/M
+muser/M
+musette/SM
+museum/MS
+mus/GJDSR
+musher/M
+mushiness/MS
+mush/MSRDG
+mushroom/DMSG
+mushy/PTR
+Musial/M
+musicale/SM
+musicality/SM
+musicals
+musical/YU
+musician/MYS
+musicianship/MS
+musicked
+musicking
+musicological
+musicologist/MS
+musicology/MS
+music/SM
+musing/Y
+Muskegon/M
+muskeg/SM
+muskellunge/SM
+musketeer/MS
+musketry/MS
+musket/SM
+musk/GDMS
+muskie/M
+muskiness/MS
+muskmelon/MS
+muskox/N
+muskrat/MS
+musky/RSPT
+Muslim/MS
+muslin/MS
+mussel/MS
+Mussolini/MS
+Mussorgsky/M
+muss/SDG
+mussy/RT
+mustache/DSM
+mustachio/MDS
+mustang/MS
+mustard/MS
+muster/GD
+mustily
+mustiness/MS
+mustn't
+must/RDGZS
+must've
+musty/RPT
+mutability/SM
+mutableness/M
+mutable/P
+mutably
+mutagen/SM
+mutant/MS
+mutate/XVNGSD
+mutational/Y
+mutation/M
+mutator/S
+muted/Y
+muteness/S
+mute/PDSRBYTG
+mutilate/XDSNG
+mutilation/M
+mutilator/MS
+mutineer/SMDG
+mutinous/Y
+mutiny/MGSD
+Mutsuhito/M
+mutterer/M
+mutter/GZRDJ
+muttonchops
+mutton/SM
+mutt/ZSMR
+mutuality/S
+mutual/SY
+muumuu/MS
+muzak
+Muzak/SM
+Muzo/M
+muzzled/U
+muzzle/MGRSD
+muzzler/M
+MVP
+MW
+Myanmar
+Mycah/M
+Myca/M
+Mycenaean
+Mycenae/M
+Mychal/M
+mycologist/MS
+mycology/MS
+myelitides
+myelitis/M
+Myer/MS
+myers
+mylar
+Mylar/S
+Myles/M
+Mylo/M
+My/M
+myna/SM
+Mynheer/M
+myocardial
+myocardium/M
+myopia/MS
+myopically
+myopic/S
+Myrah/M
+Myra/M
+Myranda/M
+Myrdal/M
+myriad/S
+Myriam/M
+Myrilla/M
+Myrle/M
+Myrlene/M
+myrmidon/S
+Myrna/M
+Myron/M
+myrrh/M
+myrrhs
+Myrta/M
+Myrtia/M
+Myrtice/M
+Myrtie/M
+Myrtle/M
+myrtle/SM
+Myrvyn/M
+Myrwyn/M
+mys
+my/S
+myself
+Mysore/M
+mysteriousness/MS
+mysterious/YP
+mystery/MDSG
+mystical/Y
+mysticism/MS
+mystic/SM
+mystification/M
+mystifier/M
+mystify/CSDGNX
+mystifying/Y
+mystique/MS
+Myst/M
+mythic
+mythical/Y
+myth/MS
+mythographer/SM
+mythography/M
+mythological/Y
+mythologist/MS
+mythologize/CSDG
+mythology/SM
+myths
+N
+NAACP
+nabbed
+nabbing
+Nabisco/M
+nabob/SM
+Nabokov/M
+nab/S
+nacelle/SM
+nacho/S
+NaCl/M
+nacre/MS
+nacreous
+Nada/M
+Nadean/M
+Nadeen/M
+Nader/M
+Nadia/M
+Nadine/M
+nadir/SM
+Nadiya/M
+Nadya/M
+Nady/M
+nae/VM
+Nagasaki/M
+nagged
+nagger/S
+nagging/Y
+nag/MS
+Nagoya/M
+Nagpur/M
+Nagy/M
+Nahuatl/SM
+Nahum/M
+naiad/SM
+naifs
+nailbrush/SM
+nailer/M
+nail/SGMRD
+Naipaul/M
+Nair/M
+Nairobi/M
+Naismith/M
+naive/SRTYP
+naiveté/SM
+naivety/MS
+Nakamura/M
+Nakayama/M
+nakedness/MS
+naked/TYRP
+Nakoma/M
+Nalani/M
+Na/M
+Namath/M
+nameable/U
+name/ADSG
+namedrop
+namedropping
+named's
+named/U
+nameless/PY
+namely
+nameplate/MS
+namer/SM
+name's
+namesake/SM
+Namibia/M
+Namibian/S
+naming/M
+Nam/M
+Nanak/M
+Nana/M
+Nananne/M
+Nancee/M
+Nance/M
+Nancey/M
+Nanchang/M
+Nancie/M
+Nanci/M
+Nancy/M
+Nanete/M
+Nanette/M
+Nanice/M
+Nani/M
+Nanine/M
+Nanjing
+Nanking's
+Nan/M
+Nannette/M
+Nannie/M
+Nanni/M
+Nanny/M
+nanny/SDMG
+nanometer/MS
+Nanon/M
+Nanook/M
+nanosecond/SM
+Nansen/M
+Nantes/M
+Nantucket/M
+Naoma/M
+Naomi/M
+napalm/MDGS
+nape/SM
+Naphtali/M
+naphthalene/MS
+naphtha/SM
+Napier/M
+napkin/SM
+Naples/M
+napless
+Nap/M
+Napoleonic
+napoleon/MS
+Napoleon/MS
+napped
+napper/MS
+Nappie/M
+napping
+Nappy/M
+nappy/TRSM
+nap/SM
+Nara/M
+Narbonne/M
+narc/DGS
+narcissism/MS
+narcissistic
+narcissist/MS
+narcissus/M
+Narcissus/M
+narcoleptic
+narcoses
+narcosis/M
+narcotic/SM
+narcotization/S
+narcotize/GSD
+Nariko/M
+Nari/M
+nark's
+Narmada/M
+Narragansett/M
+narrate/VGNSDX
+narration/M
+narrative/MYS
+narratology
+narrator/SM
+narrowing/P
+narrowness/SM
+narrow/RDYTGPS
+narwhal/MS
+nary
+nasality/MS
+nasalization/MS
+nasalize/GDS
+nasal/YS
+NASA/MS
+nascence/ASM
+nascent/A
+NASDAQ
+Nash/M
+Nashua/M
+Nashville/M
+Nassau/M
+Nasser/M
+nastily
+nastiness/MS
+nasturtium/SM
+nasty/TRSP
+natal
+Natala/M
+Natalee/M
+Natale/M
+Natalia/M
+Natalie/M
+Natalina/M
+Nataline/M
+natalist
+natality/M
+Natal/M
+Natalya/M
+Nata/M
+Nataniel/M
+Natasha/M
+Natassia/M
+Natchez
+natch/S
+Nate/XMN
+Nathalia/M
+Nathalie/M
+Nathanael/M
+Nathanial/M
+Nathaniel/M
+Nathanil/M
+Nathan/MS
+nationalism/SM
+nationalistic
+nationalistically
+nationalist/MS
+nationality/MS
+nationalization/MS
+nationalize/CSDG
+nationalized/AU
+nationalizer/SM
+national/YS
+nationhood/SM
+nation/MS
+nationwide
+nativeness/M
+native/PYS
+Natividad/M
+Nativity/M
+nativity/MS
+Natka/M
+natl
+Nat/M
+NATO/SM
+natter/SGD
+nattily
+nattiness/SM
+Natty/M
+natty/TRP
+naturalism/MS
+naturalistic
+naturalist/MS
+naturalization/SM
+naturalized/U
+naturalize/GSD
+naturalness/US
+natural/PUY
+naturals
+nature/ASDCG
+nature's
+naturist
+Naugahyde/S
+naughtily
+naughtiness/SM
+naught/MS
+naughty/TPRS
+Naur/M
+Nauru/M
+nausea/SM
+nauseate/DSG
+nauseating/Y
+nauseousness/SM
+nauseous/P
+nautical/Y
+nautilus/MS
+Navaho's
+Navajoes
+Navajo/S
+naval/Y
+Navarro/M
+navel/MS
+nave/SM
+navigability/SM
+navigableness/M
+navigable/P
+navigate/DSXNG
+navigational
+navigation/M
+navigator/MS
+Navona/M
+Navratilova/M
+navvy/M
+Navy/S
+navy/SM
+nay/MS
+naysayer/S
+Nazarene/MS
+Nazareth/M
+Nazi/SM
+Nazism/S
+NB
+NBA
+NBC
+Nb/M
+NBS
+NC
+NCAA
+NCC
+NCO
+NCR
+ND
+N'Djamena
+Ndjamena/M
+Nd/M
+Ne
+NE
+Neala/M
+Neale/M
+Neall/M
+Neal/M
+Nealon/M
+Nealson/M
+Nealy/M
+Neanderthal/S
+neap/DGS
+Neapolitan/SM
+nearby
+nearly/RT
+nearness/MS
+nearside/M
+nearsightedness/S
+nearsighted/YP
+near/TYRDPSG
+neaten/DG
+neath
+neatness/MS
+neat/YRNTXPS
+Neb/M
+Nebraska/M
+Nebraskan/MS
+Nebr/M
+Nebuchadnezzar/MS
+nebulae
+nebula/M
+nebular
+nebulousness/SM
+nebulous/PY
+necessaries
+necessarily/U
+necessary/U
+necessitate/DSNGX
+necessitation/M
+necessitous
+necessity/SM
+neckband/M
+neckerchief/MS
+neck/GRDMJS
+necking/M
+necklace/DSMG
+neckline/MS
+necktie/MS
+necrology/SM
+necromancer/MS
+necromancy/MS
+necromantic
+necrophiliac/S
+necrophilia/M
+necropolis/SM
+necropsy/M
+necroses
+necrosis/M
+necrotic
+nectarine/SM
+nectarous
+nectar/SM
+nectary/MS
+Neda/M
+Nedda/M
+Neddie/M
+Neddy/M
+Nedi/M
+Ned/M
+née
+needed/U
+needer/M
+needful/YSP
+Needham/M
+neediness/MS
+needlecraft/M
+needle/GMZRSD
+needlepoint/SM
+needlessness/S
+needless/YP
+needlewoman/M
+needlewomen
+needlework/RMS
+needn't
+need/YRDGS
+needy/TPR
+Neel/M
+Neely/M
+ne'er
+nefariousness/MS
+nefarious/YP
+Nefen/M
+Nefertiti/M
+negated/U
+negater/M
+negate/XRSDVNG
+negation/M
+negativeness/SM
+negative/PDSYG
+negativism/MS
+negativity/MS
+negator/MS
+Negev/M
+neglecter/M
+neglectfulness/SM
+neglectful/YP
+neglect/SDRG
+negligee/SM
+negligence/MS
+negligent/Y
+negligibility/M
+negligible
+negligibly
+negotiability/MS
+negotiable/A
+negotiant/M
+negotiate/ASDXGN
+negotiation/MA
+negotiator/MS
+Negress/MS
+negritude/MS
+Negritude/S
+Negroes
+negroid
+Negroid/S
+Negro/M
+neg/S
+Nehemiah/M
+Nehru/M
+neighbored/U
+neighborer/M
+neighborhood/SM
+neighborlinesses
+neighborliness/UM
+neighborly/UP
+neighbor/SMRDYZGJ
+neigh/MDG
+neighs
+Neila/M
+Neile/M
+Neilla/M
+Neille/M
+Neill/M
+Neil/SM
+neither
+Nelda/M
+Nelia/M
+Nelie/M
+Nelle/M
+Nellie/M
+Nelli/M
+Nell/M
+Nelly/M
+Nelsen/M
+Nels/N
+Nelson/M
+nelson/MS
+nematic
+nematode/SM
+Nembutal/M
+nemeses
+nemesis
+Nemesis/M
+neoclassical
+neoclassicism/MS
+neoclassic/M
+neocolonialism/MS
+neocortex/M
+neodymium/MS
+Neogene
+neolithic
+Neolithic/M
+neologism/SM
+neomycin/M
+neonatal/Y
+neonate/MS
+neon/DMS
+neophyte/MS
+neoplasm/SM
+neoplastic
+neoprene/SM
+Nepalese
+Nepali/MS
+Nepal/M
+nepenthe/MS
+nephew/MS
+nephrite/SM
+nephritic
+nephritides
+nephritis/M
+nepotism/MS
+nepotist/S
+Neptune/M
+neptunium/MS
+nerd/S
+nerdy/RT
+Nereid/M
+Nerf/M
+Nerissa/M
+Nerita/M
+Nero/M
+Neron/M
+Nerta/M
+Nerte/M
+Nertie/M
+Nerti/M
+Nert/M
+Nerty/M
+Neruda/M
+nervelessness/SM
+nerveless/YP
+nerve's
+nerve/UGSD
+nerviness/SM
+nerving/M
+nervousness/SM
+nervous/PY
+nervy/TPR
+Nessa/M
+Nessie/M
+Nessi/M
+Nessy/M
+Nesta/M
+nester/M
+Nester/M
+Nestle/M
+nestler/M
+nestle/RSDG
+nestling/M
+Nestorius/M
+Nestor/M
+nest/RDGSBM
+netball/M
+nether
+Netherlander/SM
+Netherlands/M
+nethermost
+netherworld/S
+Netscape/M
+net/SM
+Netta/M
+Nettie/M
+Netti/M
+netting/M
+nett/JGRDS
+Nettle/M
+nettle/MSDG
+nettlesome
+Netty/M
+network/SJMDG
+Netzahualcoyotl/M
+Neumann/M
+neuralgia/MS
+neuralgic
+neural/Y
+neurasthenia/MS
+neurasthenic/S
+neuritic/S
+neuritides
+neuritis/M
+neuroanatomy
+neurobiology/M
+neurological/Y
+neurologist/MS
+neurology/SM
+neuromuscular
+neuronal
+neurone/S
+neuron/MS
+neuropathology/M
+neurophysiology/M
+neuropsychiatric
+neuroses
+neurosis/M
+neurosurgeon/MS
+neurosurgery/SM
+neurotically
+neurotic/S
+neurotransmitter/S
+neuter/JZGRD
+neutralise's
+neutralism/MS
+neutralist/S
+neutrality/MS
+neutralization/MS
+neutralized/U
+neutralize/GZSRD
+neutral/PYS
+neutrino/MS
+neutron/MS
+neut/ZR
+Nevada/M
+Nevadan/S
+Nevadian/S
+Neva/M
+never
+nevermore
+nevertheless
+nevi
+Nevile/M
+Neville/M
+Nevil/M
+Nevin/SM
+Nevis/M
+Nev/M
+Nevsa/M
+Nevsky/M
+nevus/M
+Newark/M
+newbie/S
+newborn/S
+Newbury/M
+Newburyport/M
+Newcastle/M
+newcomer/MS
+newed/A
+Newell/M
+newel/MS
+newer/A
+newfangled
+newfound
+newfoundland
+Newfoundlander/M
+Newfoundland/SRMZ
+newish
+newline/SM
+newlywed/MS
+Newman/M
+newness/MS
+Newport/M
+news/A
+newsagent/MS
+newsboy/SM
+newscaster/M
+newscasting/M
+newscast/SRMGZ
+newsdealer/MS
+newsed
+newses
+newsflash/S
+newsgirl/S
+newsgroup/SM
+newsing
+newsletter/SM
+NeWS/M
+newsman/M
+newsmen
+newspaperman/M
+newspapermen
+newspaper/SMGD
+newspaperwoman/M
+newspaperwomen
+newsprint/MS
+new/SPTGDRY
+newsreader/MS
+newsreel/SM
+newsroom/S
+news's
+newsstand/MS
+Newsweekly/M
+newsweekly/S
+Newsweek/MY
+newswire
+newswoman/M
+newswomen
+newsworthiness/SM
+newsworthy/RPT
+newsy/TRS
+newt/MS
+Newtonian
+Newton/M
+newton/SM
+Nexis/M
+next
+nexus/SM
+Neysa/M
+NF
+NFC
+NFL
+NFS
+Ngaliema/M
+Nguyen/M
+NH
+NHL
+niacin/SM
+Niagara/M
+Niall/M
+Nial/M
+Niamey/M
+nibbed
+nibbing
+nibbler/M
+nibble/RSDGZ
+Nibelung/M
+nib/SM
+Nicaean
+Nicaragua/M
+Nicaraguan/S
+Niccolo/M
+Nice/M
+Nicene
+niceness/MS
+nicety/MS
+nice/YTPR
+niche/SDGM
+Nicholas
+Nichole/M
+Nicholle/M
+Nichol/MS
+Nicholson/M
+nichrome
+nickelodeon/SM
+nickel/SGMD
+nicker/GD
+Nickey/M
+nick/GZRDMS
+Nickie/M
+Nicki/M
+Nicklaus/M
+Nick/M
+nicknack's
+nickname/MGDRS
+nicknamer/M
+Nickolai/M
+Nickola/MS
+Nickolaus/M
+Nicko/M
+Nicky/M
+Nicobar/M
+Nicodemus/M
+Nicolai/MS
+Nicola/MS
+Nicolea/M
+Nicole/M
+Nicolette/M
+Nicoli/MS
+Nicolina/M
+Nicoline/M
+Nicolle/M
+Nicol/M
+Nico/M
+Nicosia/M
+nicotine/MS
+Niebuhr/M
+niece/MS
+Niel/MS
+Nielsen/M
+Niels/N
+Nielson/M
+Nietzsche/M
+Nieves/M
+nifty/TRS
+Nigel/M
+Nigeria/M
+Nigerian/S
+Nigerien
+Niger/M
+niggardliness/SM
+niggardly/P
+niggard/SGMDY
+nigger/SGDM!
+niggler/M
+niggle/RSDGZJ
+niggling/Y
+nigh/RDGT
+nighs
+nightcap/SM
+nightclothes
+nightclubbed
+nightclubbing
+nightclub/MS
+nightdress/MS
+nightfall/SM
+nightgown/MS
+nighthawk/MS
+nightie/MS
+Nightingale/M
+nightingale/SM
+nightlife/MS
+nightlong
+nightmare/MS
+nightmarish/Y
+nightshade/SM
+nightshirt/MS
+night/SMYDZ
+nightspot/MS
+nightstand/SM
+nightstick/S
+nighttime/S
+nightwear/M
+nighty's
+NIH
+nihilism/MS
+nihilistic
+nihilist/MS
+Nijinsky/M
+Nikaniki/M
+Nike/M
+Niki/M
+Nikita/M
+Nikkie/M
+Nikki/M
+Nikko/M
+Nikolai/M
+Nikola/MS
+Nikolaos/M
+Nikolaus/M
+Nikolayev's
+Nikoletta/M
+Nikolia/M
+Nikolos/M
+Niko/MS
+Nikon/M
+Nile/SM
+nilled
+nilling
+Nil/MS
+nil/MYS
+nilpotent
+Nilsen/M
+Nils/N
+Nilson/M
+Nilsson/M
+Ni/M
+nimbi
+nimbleness/SM
+nimble/TRP
+nimbly
+nimbus/DM
+NIMBY
+Nimitz/M
+Nimrod/MS
+Nina/M
+nincompoop/MS
+ninefold
+nine/MS
+ninepence/M
+ninepin/S
+ninepins/M
+nineteen/SMH
+nineteenths
+ninetieths
+Ninetta/M
+Ninette/M
+ninety/MHS
+Nineveh/M
+ninja/S
+Ninnetta/M
+Ninnette/M
+ninny/SM
+Ninon/M
+Nintendo/M
+ninth
+ninths
+Niobe/M
+niobium/MS
+nipped
+nipper/DMGS
+nippiness/S
+nipping/Y
+nipple/GMSD
+Nipponese
+Nippon/M
+nippy/TPR
+nip/S
+Nirenberg/M
+nirvana/MS
+Nirvana/S
+nisei
+Nisei/MS
+Nissa/M
+Nissan/M
+Nisse/M
+Nissie/M
+Nissy/M
+Nita/M
+niter/M
+nitpick/DRSJZG
+nitrate/MGNXSD
+nitration/M
+nitric
+nitride/MGS
+nitriding/M
+nitrification/SM
+nitrite/MS
+nitrocellulose/MS
+nitrogenous
+nitrogen/SM
+nitroglycerin/MS
+nitrous
+nitwit/MS
+nit/ZSMR
+Niven/M
+nixer/M
+nix/GDSR
+Nixie/M
+Nixon/M
+NJ
+Nkrumah/M
+NLRB
+nm
+NM
+no/A
+NOAA
+Noach/M
+Noah/M
+Noak/M
+Noami/M
+Noam/M
+Nobelist/SM
+nobelium/MS
+Nobel/M
+Nobe/M
+Nobie/M
+nobility/MS
+Noble/M
+nobleman/M
+noblemen
+nobleness/SM
+noblesse/M
+noble/TPSR
+noblewoman
+noblewomen
+nob/MY
+nobody/MS
+Noby/M
+nocturnal/SY
+nocturne/SM
+nodal/Y
+nodded
+nodding
+noddle/MSDG
+noddy/M
+node/MS
+NoDoz/M
+nod/SM
+nodular
+nodule/SM
+Noelani/M
+Noella/M
+Noelle/M
+Noell/M
+Noellyn/M
+Noel/MS
+noel/S
+Noelyn/M
+Noe/M
+Noemi/M
+noes/S
+noggin/SM
+nohow
+noise/GMSD
+noiselessness/SM
+noiseless/YP
+noisemaker/M
+noisemake/ZGR
+noisily
+noisiness/MS
+noisome
+noisy/TPR
+Nola/M
+Nolana/M
+Noland/M
+Nolan/M
+Nolie/M
+Nollie/M
+Noll/M
+Nolly/M
+No/M
+nomadic
+nomad/SM
+Nome/M
+nomenclature/MS
+Nomi/M
+nominalized
+nominal/K
+nominally
+nominals
+nominate/CDSAXNG
+nomination/MAC
+nominative/SY
+nominator/CSM
+nominee/MS
+non
+nonabrasive
+nonabsorbent/S
+nonacademic/S
+nonacceptance/MS
+nonacid/MS
+nonactive
+nonadaptive
+nonaddictive
+nonadhesive
+nonadjacent
+nonadjustable
+nonadministrative
+nonage/MS
+nonagenarian/MS
+nonaggression/SM
+nonagricultural
+Nonah/M
+nonalcoholic/S
+nonaligned
+nonalignment/SM
+nonallergic
+Nona/M
+nonappearance/MS
+nonassignable
+nonathletic
+nonattendance/SM
+nonautomotive
+nonavailability/SM
+nonbasic
+nonbeliever/SM
+nonbelligerent/S
+nonblocking
+nonbreakable
+nonburnable
+nonbusiness
+noncaloric
+noncancerous
+noncarbohydrate/M
+nonce/MS
+nonchalance/SM
+nonchalant/YP
+nonchargeable
+nonclerical/S
+nonclinical
+noncollectable
+noncombatant/MS
+noncombustible/S
+noncommercial/S
+noncommissioned
+noncommittal/Y
+noncom/MS
+noncommunicable
+noncompeting
+noncompetitive
+noncompliance/MS
+noncomplying/S
+noncomprehending
+nonconducting
+nonconductor/MS
+nonconforming
+nonconformist/SM
+nonconformity/SM
+nonconsecutive
+nonconservative
+nonconstructive
+noncontagious
+noncontiguous
+noncontinuous
+noncontributing
+noncontributory
+noncontroversial
+nonconvertible
+noncooperation/SM
+noncorroding/S
+noncorrosive
+noncredit
+noncriminal/S
+noncritical
+noncrystalline
+noncumulative
+noncustodial
+noncyclic
+nondairy
+nondecreasing
+nondeductible
+nondelivery/MS
+nondemocratic
+nondenominational
+nondepartmental
+nondepreciating
+nondescript/YS
+nondestructive/Y
+nondetachable
+nondeterminacy
+nondeterminate/Y
+nondeterminism
+nondeterministic
+nondeterministically
+nondisciplinary
+nondisclosure/SM
+nondiscrimination/SM
+nondiscriminatory
+nondramatic
+nondrinker/SM
+nondrying
+nondurable
+noneconomic
+noneducational
+noneffective/S
+nonelastic
+nonelectrical
+nonelectric/S
+nonemergency
+nonempty
+nonenforceable
+nonentity/MS
+nonequivalence/M
+nonequivalent/S
+none/S
+nones/M
+nonessential/S
+nonesuch/SM
+nonetheless
+nonevent/MS
+nonexchangeable
+nonexclusive
+nonexempt
+nonexistence/MS
+nonexistent
+nonexplosive/S
+nonextensible
+nonfactual
+nonfading
+nonfat
+nonfatal
+nonfattening
+nonferrous
+nonfictional
+nonfiction/SM
+nonflammable
+nonflowering
+nonfluctuating
+nonflying
+nonfood/M
+nonfreezing
+nonfunctional
+nongovernmental
+nongranular
+nonhazardous
+nonhereditary
+nonhuman
+nonidentical
+Nonie/M
+Noni/M
+noninclusive
+nonindependent
+nonindustrial
+noninfectious
+noninflammatory
+noninflationary
+noninflected
+nonintellectual/S
+noninteracting
+noninterchangeable
+noninterference/MS
+nonintervention/SM
+nonintoxicating
+nonintuitive
+noninvasive
+nonionic
+nonirritating
+nonjudgmental
+nonjudicial
+nonlegal
+nonlethal
+nonlinearity/MS
+nonlinear/Y
+nonlinguistic
+nonliterary
+nonliving
+nonlocal
+nonmagical
+nonmagnetic
+nonmalignant
+nonmember/SM
+nonmetallic
+nonmetal/MS
+nonmigratory
+nonmilitant/S
+nonmilitary
+Nonnah/M
+Nonna/M
+nonnarcotic/S
+nonnative/S
+nonnegative
+nonnegotiable
+nonnuclear
+nonnumerical/S
+nonobjective
+nonobligatory
+nonobservance/MS
+nonobservant
+nonoccupational
+nonoccurence
+nonofficial
+nonogenarian
+nonoperational
+nonoperative
+nonorthogonal
+nonorthogonality
+nonparallel/S
+nonparametric
+nonpareil/SM
+nonparticipant/SM
+nonparticipating
+nonpartisan/S
+nonpaying
+nonpayment/SM
+nonperformance/SM
+nonperforming
+nonperishable/S
+nonperson/S
+nonperturbing
+nonphysical/Y
+nonplus/S
+nonplussed
+nonplussing
+nonpoisonous
+nonpolitical
+nonpolluting
+nonporous
+nonpracticing
+nonprejudicial
+nonprescription
+nonprocedural/Y
+nonproductive
+nonprofessional/S
+nonprofit/SB
+nonprogrammable
+nonprogrammer
+nonproliferation/SM
+nonpublic
+nonpunishable
+nonracial
+nonradioactive
+nonrandom
+nonreactive
+nonreciprocal/S
+nonreciprocating
+nonrecognition/SM
+nonrecoverable
+nonrecurring
+nonredeemable
+nonreducing
+nonrefillable
+nonrefundable
+nonreligious
+nonrenewable
+nonrepresentational
+nonresidential
+nonresident/SM
+nonresidual
+nonresistance/SM
+nonresistant/S
+nonrespondent/S
+nonresponse
+nonrestrictive
+nonreturnable/S
+nonrhythmic
+nonrigid
+nonsalaried
+nonscheduled
+nonscientific
+nonscoring
+nonseasonal
+nonsectarian
+nonsecular
+nonsegregated
+nonsense/MS
+nonsensicalness/M
+nonsensical/PY
+nonsensitive
+nonsexist
+nonsexual
+nonsingular
+nonskid
+nonslip
+nonsmoker/SM
+nonsmoking
+nonsocial
+nonspeaking
+nonspecialist/MS
+nonspecializing
+nonspecific
+nonspiritual/S
+nonstaining
+nonstandard
+nonstarter/SM
+nonstick
+nonstop
+nonstrategic
+nonstriking
+nonstructural
+nonsuccessive
+nonsupervisory
+nonsupport/GS
+nonsurgical
+nonsustaining
+nonsympathizer/M
+nontarnishable
+nontaxable/S
+nontechnical/Y
+nontenured
+nonterminal/MS
+nonterminating
+nontermination/M
+nontheatrical
+nonthinking/S
+nonthreatening
+nontoxic
+nontraditional
+nontransferable
+nontransparent
+nontrivial
+nontropical
+nonuniform
+nonunion/S
+nonuser/SM
+nonvenomous
+nonverbal/Y
+nonveteran/MS
+nonviable
+nonviolence/SM
+nonviolent/Y
+nonvirulent
+nonvocal
+nonvocational
+nonvolatile
+nonvolunteer/S
+nonvoter/MS
+nonvoting
+nonwhite/SM
+nonworking
+nonyielding
+nonzero
+noodle/GMSD
+nook/MS
+noonday/MS
+noon/GDMS
+nooning/M
+noontide/MS
+noontime/MS
+noose/SDGM
+nope/S
+NORAD/M
+noradrenalin
+noradrenaline/M
+Norah/M
+Nora/M
+Norbert/M
+Norberto/M
+Norbie/M
+Norby/M
+Nordhoff/M
+Nordic/S
+Nordstrom/M
+Norean/M
+Noreen/M
+Norene/M
+Norfolk/M
+nor/H
+Norina/M
+Norine/M
+normalcy/MS
+normality/SM
+normalization/A
+normalizations
+normalization's
+normalized/AU
+normalizes/AU
+normalize/SRDZGB
+normal/SY
+Norma/M
+Normand/M
+Normandy/M
+Norman/SM
+normativeness/M
+normative/YP
+Normie/M
+norm/SMGD
+Normy/M
+Norplant
+Norrie/M
+Norri/SM
+Norristown/M
+Norry/M
+Norse
+Norseman/M
+Norsemen
+Northampton/M
+northbound
+northeastern
+northeaster/YM
+Northeast/SM
+northeastward/S
+northeast/ZSMR
+northerly/S
+norther/MY
+Northerner/M
+northernmost
+northern/RYZS
+Northfield/M
+northing/M
+northland
+North/M
+northmen
+north/MRGZ
+Northrop/M
+Northrup/M
+norths
+Norths
+Northumberland/M
+northward/S
+northwestern
+northwester/YM
+northwest/MRZS
+Northwest/MS
+northwestward/S
+Norton/M
+Norwalk/M
+Norway/M
+Norwegian/S
+Norwich/M
+Norw/M
+nosebag/M
+nosebleed/SM
+nosecone/S
+nosedive/DSG
+nosed/V
+nosegay/MS
+nose/M
+Nosferatu/M
+nos/GDS
+nosh/MSDG
+nosily
+nosiness/MS
+nosing/M
+nostalgia/SM
+nostalgically
+nostalgic/S
+Nostradamus/M
+Nostrand/M
+nostril/SM
+nostrum/SM
+nosy/SRPMT
+notability/SM
+notableness/M
+notable/PS
+notably
+notarial
+notarization/S
+notarize/DSG
+notary/MS
+notate/VGNXSD
+notational/CY
+notation/CMSF
+notative/CF
+notch/MSDG
+not/DRGB
+notebook/MS
+note/CSDFG
+notedness/M
+noted/YP
+notepad/S
+notepaper/MS
+note's
+noteworthiness/SM
+noteworthy/P
+nothingness/SM
+nothing/PS
+noticeable/U
+noticeably
+noticeboard/S
+noticed/U
+notice/MSDG
+notifiable
+notification/M
+notifier/M
+notify/NGXSRDZ
+notional/Y
+notion/MS
+notoriety/S
+notoriousness/M
+notorious/YP
+Notre/M
+Nottingham/M
+notwithstanding
+Nouakchott/M
+nougat/MS
+Noumea/M
+noun/SMK
+nourish/DRSGL
+nourished/U
+nourisher/M
+nourishment/SM
+nous/M
+nouveau
+nouvelle
+novae
+Novak/M
+Nova/M
+nova/MS
+novelette/SM
+Novelia/M
+novelist/SM
+novelization/S
+novelize/GDS
+Novell/SM
+novella/SM
+novel/SM
+novelty/MS
+November/SM
+novena/SM
+novene
+Novgorod/M
+novice/MS
+novitiate/MS
+Nov/M
+Novocaine/M
+Novocain/S
+Novokuznetsk/M
+Novosibirsk/M
+NOW
+nowadays
+noway/S
+Nowell/M
+nowhere/S
+nowise
+now/S
+noxiousness/M
+noxious/PY
+Noyce/M
+Noyes/M
+nozzle/MS
+Np
+NP
+NRA
+nroff/M
+N's
+NS
+n's/CI
+NSF
+n/T
+NT
+nth
+nuance/SDM
+nubbin/SM
+nubby/RT
+Nubia/M
+Nubian/M
+nubile
+nub/MS
+nuclear/K
+nuclease/M
+nucleated/A
+nucleate/DSXNG
+nucleation/M
+nucleic
+nuclei/M
+nucleoli
+nucleolus/M
+nucleon/MS
+nucleotide/MS
+nucleus/M
+nuclide/M
+nude/CRS
+nudely
+nudeness/M
+nudest
+nudge/GSRD
+nudger/M
+nudism/MS
+nudist/MS
+nudity/MS
+nugatory
+Nugent/M
+nugget/SM
+nuisance/MS
+nuke/DSMG
+Nukualofa
+null/DSG
+nullification/M
+nullifier/M
+nullify/RSDXGNZ
+nullity/SM
+nu/M
+numbered/UA
+numberer/M
+numberless
+numberplate/M
+number/RDMGJ
+numbers/A
+Numbers/M
+numbing/Y
+numbness/MS
+numb/SGZTYRDP
+numbskull's
+numerable/IC
+numeracy/SI
+numeral/YMS
+numerate/SDNGX
+numerates/I
+numeration/M
+numerator/MS
+numerical/Y
+numeric/S
+numerological
+numerologist/S
+numerology/MS
+numerousness/M
+numerous/YP
+numinous/S
+numismatic/S
+numismatics/M
+numismatist/MS
+numskull/SM
+Nunavut/M
+nuncio/SM
+Nunez/M
+Nunki/M
+nun/MS
+nunnery/MS
+nuptial/S
+Nuremberg/M
+Nureyev/M
+nursemaid/MS
+nurser/M
+nurseryman/M
+nurserymen
+nursery/MS
+nurse/SRDJGMZ
+nursling/M
+nurturer/M
+nurture/SRDGZM
+nus
+nutate/NGSD
+nutation/M
+nutcracker/M
+nutcrack/RZ
+nuthatch/SM
+nutmeat/SM
+nutmegged
+nutmegging
+nutmeg/MS
+nut/MS
+nutpick/MS
+Nutrasweet/M
+nutria/SM
+nutrient/MS
+nutriment/MS
+nutritional/Y
+nutritionist/MS
+nutrition/SM
+nutritiousness/MS
+nutritious/PY
+nutritive/Y
+nutshell/MS
+nutted
+nuttiness/SM
+nutting
+nutty/TRP
+nuzzle/GZRSD
+NV
+NW
+NWT
+NY
+Nyasa/M
+NYC
+Nydia/M
+Nye/M
+Nyerere/M
+nylon/SM
+nymphet/MS
+nymph/M
+nympholepsy/M
+nymphomaniac/S
+nymphomania/MS
+nymphs
+Nyquist/M
+NYSE
+Nyssa/M
+NZ
+o
+O
+oafishness/S
+oafish/PY
+oaf/MS
+Oahu/M
+Oakland/M
+Oakley/M
+Oakmont/M
+oak/SMN
+oakum/MS
+oakwood
+oar/GSMD
+oarlock/MS
+oarsman/M
+oarsmen
+oarswoman
+oarswomen
+OAS
+oases
+oasis/M
+oatcake/MS
+oater/M
+Oates/M
+oath/M
+oaths
+oatmeal/SM
+oat/SMNR
+Oaxaca/M
+ob
+OB
+Obadiah/M
+Obadias/M
+obbligato/S
+obduracy/S
+obdurateness/S
+obdurate/PDSYG
+Obediah/M
+obedience/EMS
+obedient/EY
+Obed/M
+obeisance/MS
+obeisant/Y
+obelisk/SM
+Oberlin/M
+Oberon/M
+obese
+obesity/MS
+obey/EDRGS
+obeyer/EM
+obfuscate/SRDXGN
+obfuscation/M
+obfuscatory
+Obidiah/M
+Obie/M
+obi/MDGS
+obit/SMR
+obituary/SM
+obj
+objectify/GSDXN
+objectionableness/M
+objectionable/U
+objectionably
+objection/SMB
+objectiveness/MS
+objective/PYS
+objectivity/MS
+objector/SM
+object/SGVMD
+objurgate/GNSDX
+objurgation/M
+oblate/NYPSX
+oblation/M
+obligate/NGSDXY
+obligational
+obligation/M
+obligatorily
+obligatory
+obliged/E
+obliger/M
+obliges/E
+oblige/SRDG
+obligingness/M
+obliging/PY
+oblique/DSYGP
+obliqueness/S
+obliquity/MS
+obliterate/VNGSDX
+obliteration/M
+obliterative/Y
+oblivion/MS
+obliviousness/MS
+oblivious/YP
+oblongness/M
+oblong/SYP
+obloquies
+obloquy/M
+Ob/MD
+obnoxiousness/MS
+obnoxious/YP
+oboe/SM
+oboist/S
+obos
+O'Brien/M
+obs
+obscene/RYT
+obscenity/MS
+obscurantism/MS
+obscurantist/MS
+obscuration
+obscureness/M
+obscure/YTPDSRGL
+obscurity/MS
+obsequies
+obsequiousness/S
+obsequious/YP
+obsequy
+observability/M
+observable/SU
+observably
+observance/MS
+observantly
+observants
+observant/U
+observational/Y
+observation/MS
+observatory/MS
+observed/U
+observer/M
+observe/ZGDSRB
+observing/Y
+obsess/GVDS
+obsessional
+obsession/MS
+obsessiveness/S
+obsessive/PYS
+obsidian/SM
+obsolesce/GSD
+obsolescence/S
+obsolescent/Y
+obsolete/GPDSY
+obsoleteness/M
+obstacle/SM
+obstetrical
+obstetrician/SM
+obstetric/S
+obstetrics/M
+obstinacy/SM
+obstinateness/M
+obstinate/PY
+obstreperousness/SM
+obstreperous/PY
+obstructed/U
+obstructer/M
+obstructionism/SM
+obstructionist/MS
+obstruction/SM
+obstructiveness/MS
+obstructive/PSY
+obstruct/RDVGS
+obtainable/U
+obtainably
+obtain/LSGDRB
+obtainment/S
+obtrude/DSRG
+obtruder/M
+obtrusion/S
+obtrusiveness/MSU
+obtrusive/UPY
+obtuseness/S
+obtuse/PRTY
+obverse/YS
+obviate/XGNDS
+obviousness/SM
+obvious/YP
+Oby/M
+ocarina/MS
+O'Casey
+Occam/M
+occasional/Y
+occasion/MDSJG
+Occidental/S
+occidental/SY
+occident/M
+Occident/SM
+occipital/Y
+occlude/GSD
+occlusion/MS
+occlusive/S
+occulter/M
+occultism/SM
+occult/SRDYG
+occupancy/SM
+occupant/MS
+occupational/Y
+occupation/SAM
+occupied/AU
+occupier/M
+occupies/A
+occupy/RSDZG
+occur/AS
+occurred/A
+occurrence/SM
+occurring/A
+oceanfront/MS
+oceangoing
+Oceania/M
+oceanic
+ocean/MS
+oceanographer/SM
+oceanographic
+oceanography/SM
+oceanology/MS
+oceanside
+Oceanside/M
+Oceanus/M
+ocelot/SM
+ocher/DMGS
+Ochoa/M
+o'clock
+O'Clock
+O'Connell/M
+O'Connor/M
+Oconomowoc/M
+OCR
+octagonal/Y
+octagon/SM
+octahedral
+octahedron/M
+octal/S
+octane/MS
+octant/M
+octave/MS
+Octavia/M
+Octavian/M
+Octavio/M
+Octavius/M
+octavo/MS
+octennial
+octet/SM
+octile
+octillion/M
+Oct/M
+October/MS
+octogenarian/MS
+octopus/SM
+octoroon/M
+ocular/S
+oculist/SM
+OD
+odalisque/SM
+oddball/SM
+oddity/MS
+oddment/MS
+oddness/MS
+odd/TRYSPL
+Odele/M
+Odelia/M
+Odelinda/M
+Odella/M
+Odelle/M
+Odell/M
+O'Dell/M
+ode/MDRS
+Ode/MR
+Oderberg/MS
+Oder/M
+Odessa/M
+Odets/M
+Odetta/M
+Odette/M
+Odey/M
+Odie/M
+Odilia/M
+Odille/M
+Odin/M
+odiousness/MS
+odious/PY
+Odis/M
+odium/MS
+Odo/M
+odometer/SM
+Odom/M
+O'Donnell/M
+odor/DMS
+odoriferous
+odorless
+odorous/YP
+ODs
+O'Dwyer/M
+Ody/M
+Odysseus/M
+Odyssey/M
+odyssey/S
+OE
+OED
+oedipal
+Oedipal/Y
+Oedipus/M
+OEM/M
+OEMS
+oenology/MS
+oenophile/S
+o'er
+O'Er
+Oersted/M
+oesophagi
+oeuvre/SM
+Ofelia/M
+Ofella/M
+offal/MS
+offbeat/MS
+offcuts
+Offenbach/M
+offender/M
+offend/SZGDR
+offense/MSV
+offensively/I
+offensiveness/MSI
+offensive/YSP
+offerer/M
+offering/M
+offer/RDJGZ
+offertory/SM
+offhand/D
+offhandedness/S
+offhanded/YP
+officeholder/SM
+officemate/S
+officer/GMD
+officership/S
+office/SRMZ
+officialdom/SM
+officialism/SM
+officially/U
+official/PSYM
+officiant/SM
+officiate/XSDNG
+officiation/M
+officiator/MS
+officio
+officiousness/MS
+officious/YP
+offing/M
+offish
+offload/GDS
+offprint/GSDM
+offramp
+offset/SM
+offsetting
+offshoot/MS
+offshore
+offside/RS
+offspring/M
+offstage/S
+off/SZGDRJ
+offtrack
+Ofilia/M
+of/K
+often/RT
+oftentimes
+oft/NRT
+ofttimes
+Ogbomosho/M
+Ogdan/M
+Ogden/M
+Ogdon/M
+Ogilvy/M
+ogive/M
+Oglethorpe/M
+ogle/ZGDSR
+ogreish
+ogre/MS
+ogress/S
+oh
+OH
+O'Hara
+O'Hare/M
+O'Higgins
+Ohioan/S
+Ohio/M
+ohmic
+ohmmeter/MS
+ohm/SM
+oho/S
+ohs
+OHSA/M
+oilcloth/M
+oilcloths
+oiler/M
+oilfield/MS
+oiliness/SM
+oilman/M
+oil/MDRSZG
+oilmen
+oilseed/SM
+oilskin/MS
+oily/TPR
+oink/GDS
+ointment/SM
+Oise/M
+OJ
+Ojibwa/SM
+Okamoto/M
+okapi/SM
+Okayama/M
+okay/M
+Okeechobee/M
+O'Keeffe
+Okefenokee
+Okhotsk/M
+Okinawa/M
+Okinawan/S
+Oklahoma/M
+Oklahoman/SM
+Okla/M
+OK/MDG
+okra/MS
+OKs
+Oktoberfest
+Olaf/M
+Olag/M
+Ola/M
+Olav/M
+Oldenburg/M
+olden/DG
+Oldfield/M
+oldie/MS
+oldish
+oldness/S
+Oldsmobile/M
+oldster/SM
+Olduvai/M
+old/XTNRPS
+olé
+oleaginous
+oleander/SM
+O'Leary/M
+olefin/M
+Oleg/M
+Ole/MV
+Olenek/M
+Olenka/M
+Olen/M
+Olenolin/M
+oleomargarine/SM
+oleo/S
+oles
+olfactory
+Olga/M
+Olia/M
+oligarchic
+oligarchical
+oligarch/M
+oligarchs
+oligarchy/SM
+Oligocene
+oligopolistic
+oligopoly/MS
+Olimpia/M
+Olin/M
+olive/MSR
+Olive/MZR
+Oliver/M
+Olivero/M
+Olivette/M
+Olivetti/M
+Olivia/M
+Olivier/M
+Olivie/RM
+Oliviero/M
+Oliy/M
+Ollie/M
+Olly/M
+Olmec
+Olmsted/M
+Olsen/M
+Olson/M
+Olva/M
+Olvan/M
+Olwen/M
+Olympe/M
+Olympiad/MS
+Olympian/S
+Olympia/SM
+Olympic/S
+Olympie/M
+Olympus/M
+Omaha/SM
+Oman/M
+Omar/M
+ombudsman/M
+ombudsmen
+Omdurman/M
+omega/MS
+omelet/SM
+omelette's
+omen/DMG
+Omero/M
+omicron/MS
+ominousness/SM
+ominous/YP
+omission/MS
+omit/S
+omitted
+omitting
+omnibus/MS
+omni/M
+omnipotence/SM
+Omnipotent
+omnipotent/SY
+omnipresence/MS
+omnipresent/Y
+omniscience/SM
+omniscient/YS
+omnivore/MS
+omnivorousness/MS
+omnivorous/PY
+oms
+Omsk/M
+om/XN
+ON
+onanism/M
+Onassis/M
+oncer/M
+once/SR
+oncogene/S
+oncologist/S
+oncology/SM
+oncoming/S
+Ondrea/M
+Oneal/M
+Onega/M
+Onegin/M
+Oneida/SM
+O'Neil
+O'Neill
+oneness/MS
+one/NPMSX
+oner/M
+onerousness/SM
+onerous/YP
+oneself
+onetime
+oneupmanship
+Onfre/M
+Onfroi/M
+ongoing/S
+Onida/M
+onion/GDM
+onionskin/MS
+onlooker/MS
+onlooking
+only/TP
+Onofredo/M
+Ono/M
+onomatopoeia/SM
+onomatopoeic
+onomatopoetic
+Onondaga/MS
+onrush/GMS
+on/RY
+ons
+Onsager/M
+onset/SM
+onsetting
+onshore
+onside
+onslaught/MS
+Ontarian/S
+Ontario/M
+Ont/M
+onto
+ontogeny/SM
+ontological/Y
+ontology/SM
+onus/SM
+onward/S
+onyx/MS
+oodles
+ooh/GD
+oohs
+oolitic
+Oona/M
+OOo/M
+oops/S
+Oort/M
+ooze/GDS
+oozy/RT
+opacity/SM
+opalescence/S
+opalescent/Y
+Opalina/M
+Opaline/M
+Opal/M
+opal/SM
+opaque/GTPYRSD
+opaqueness/SM
+opcode/MS
+OPEC
+Opel/M
+opencast
+opened/AU
+opener/M
+openhandedness/SM
+openhanded/P
+openhearted
+opening/M
+openness/S
+OpenOffice.org/M
+opens/A
+openwork/MS
+open/YRDJGZTP
+operable/I
+operandi
+operand/SM
+operant/YS
+opera/SM
+operate/XNGVDS
+operatically
+operatic/S
+operationalization/S
+operationalize/D
+operational/Y
+operation/M
+operative/IP
+operatively
+operativeness/MI
+operatives
+operator/SM
+operetta/MS
+ope/S
+Ophelia/M
+Ophelie/M
+Ophiuchus/M
+ophthalmic/S
+ophthalmologist/SM
+ophthalmology/MS
+opiate/GMSD
+opine/XGNSD
+opinionatedness/M
+opinionated/PY
+opinion/M
+opioid
+opium/MS
+opossum/SM
+opp
+Oppenheimer/M
+opponent/MS
+opportune/IY
+opportunism/SM
+opportunistic
+opportunistically
+opportunist/SM
+opportunity/MS
+oppose/BRSDG
+opposed/U
+opposer/M
+oppositeness/M
+opposite/SXYNP
+oppositional
+opposition/M
+oppress/DSGV
+oppression/MS
+oppressiveness/MS
+oppressive/YP
+oppressor/MS
+opprobrious/Y
+opprobrium/SM
+Oprah/M
+ops
+opt/DSG
+opthalmic
+opthalmologic
+opthalmology
+optical/Y
+optician/SM
+optic/S
+optics/M
+optima
+optimality
+optimal/Y
+optimise's
+optimism/SM
+optimistic
+optimistically
+optimist/SM
+optimization/SM
+optimize/DRSZG
+optimized/U
+optimizer/M
+optimizes/U
+optimum/SM
+optionality/M
+optional/YS
+option/GDMS
+optoelectronic
+optometric
+optometrist/MS
+optometry/SM
+opulence/SM
+opulent/Y
+opus/SM
+op/XGDN
+OR
+oracle/GMSD
+oracular
+Oralee/M
+Oralia/M
+Oralie/M
+Oralla/M
+Oralle/M
+oral/YS
+Ora/M
+orangeade/MS
+Orange/M
+orange/MS
+orangery/SM
+orangutan/MS
+Oranjestad/M
+Oran/M
+orate/SDGNX
+oration/M
+oratorical/Y
+oratorio/MS
+orator/MS
+oratory/MS
+Orazio/M
+Orbadiah/M
+orbicular
+orbiculares
+orbital/MYS
+orbit/MRDGZS
+orb/SMDG
+orchard/SM
+orchestral/Y
+orchestra/MS
+orchestrate/GNSDX
+orchestrater's
+orchestration/M
+orchestrator/M
+orchid/SM
+ordainer/M
+ordainment/MS
+ordain/SGLDR
+ordeal/SM
+order/AESGD
+ordered/U
+orderer
+ordering/S
+orderless
+orderliness/SE
+orderly/PS
+order's/E
+ordinal/S
+ordinance/MS
+ordinarily
+ordinariness/S
+ordinary/RSPT
+ordinated
+ordinate/I
+ordinates
+ordinate's
+ordinating
+ordination/SM
+ordnance/SM
+Ordovician
+ordure/MS
+oregano/SM
+Oreg/M
+Oregonian/S
+Oregon/M
+Orelee/M
+Orelia/M
+Orelie/M
+Orella/M
+Orelle/M
+Orel/M
+Oren/M
+Ore/NM
+ore/NSM
+Oreo
+Orestes
+organdie's
+organdy/MS
+organelle/MS
+organically/I
+organic/S
+organismic
+organism/MS
+organist/MS
+organizable/UMS
+organizational/MYS
+organization/MEAS
+organize/AGZDRS
+organized/UE
+organizer/MA
+organizes/E
+organizing/E
+organ/MS
+organometallic
+organza/SM
+orgasm/GSMD
+orgasmic
+orgiastic
+orgy/SM
+Oriana/M
+oriel/MS
+orientable
+Oriental/S
+oriental/SY
+orientated/A
+orientate/ESDXGN
+orientates/A
+orientation/AMES
+orienteering/M
+orienter
+orient/GADES
+orient's
+Orient/SM
+orifice/MS
+orig
+origami/MS
+originality/SM
+originally
+original/US
+originate/VGNXSD
+origination/M
+originative/Y
+originator/SM
+origin/MS
+Orin/M
+Orinoco/M
+oriole/SM
+Orion/M
+orison/SM
+Oriya/M
+Orizaba/M
+Orkney/M
+Orland/M
+Orlando/M
+Orlan/M
+Orleans
+Orlick/M
+Orlon/SM
+Orly/M
+ormolu/SM
+or/MY
+ornamental/SY
+ornamentation/SM
+ornament/GSDM
+ornateness/SM
+ornate/YP
+orneriness/SM
+ornery/PRT
+ornithological
+ornithologist/SM
+ornithology/MS
+orographic/M
+orography/M
+Orono/M
+orotund
+orotundity/MS
+orphanage/MS
+orphanhood/M
+orphan/SGDM
+Orpheus/M
+Orphic
+Orran/M
+Orren/M
+Orrin/M
+orris/SM
+Orr/MN
+ors
+Orsa/M
+Orsola/M
+Orson/M
+Ortega/M
+Ortensia/M
+orthodontia/S
+orthodontic/S
+orthodontics/M
+orthodontist/MS
+orthodoxies
+orthodoxly/U
+Orthodox/S
+orthodoxy's
+orthodox/YS
+orthodoxy/U
+orthogonality/M
+orthogonalization/M
+orthogonalized
+orthogonal/Y
+orthographic
+orthographically
+orthography/MS
+orthonormal
+orthopedic/S
+orthopedics/M
+orthopedist/SM
+orthophosphate/MS
+orthorhombic
+Ortiz/M
+Orton/M
+Orval/M
+Orville/M
+Orv/M
+Orwellian
+Orwell/M
+o's
+Osage/SM
+Osaka/M
+Osbert/M
+Osborne/M
+Osborn/M
+Osbourne/M
+Osbourn/M
+Oscar/SM
+Osceola/M
+oscillate/SDXNG
+oscillation/M
+oscillator/SM
+oscillatory
+oscilloscope/SM
+osculate/XDSNG
+osculation/M
+Osgood/M
+OSHA
+Oshawa/M
+O'Shea/M
+Oshkosh/M
+osier/MS
+Osiris/M
+Oslo/M
+Os/M
+OS/M
+Osman/M
+osmium/MS
+Osmond/M
+osmoses
+osmosis/M
+osmotic
+Osmund/M
+osprey/SM
+osseous/Y
+Ossie/M
+ossification/M
+ossify/NGSDX
+ostensible
+ostensibly
+ostentation/MS
+ostentatiousness/M
+ostentatious/PY
+osteoarthritides
+osteoarthritis/M
+osteology/M
+osteopathic
+osteopath/M
+osteopaths
+osteopathy/MS
+osteoporoses
+osteoporosis/M
+ostracise's
+ostracism/MS
+ostracize/GSD
+Ostrander/M
+ostrich/MS
+Ostrogoth/M
+Ostwald/M
+O'Sullivan/M
+Osvaldo/M
+Oswald/M
+Oswell/M
+OT
+OTB
+OTC
+Otes
+Otha/M
+Othelia/M
+Othella/M
+Othello/M
+otherness/M
+other/SMP
+otherwise
+otherworldly/P
+otherworld/Y
+Othilia/M
+Othilie/M
+Otho/M
+otiose
+Otis/M
+OTOH
+Ottawa/MS
+otter/DMGS
+Ottilie/M
+Otto/M
+Ottoman
+ottoman/MS
+Ouagadougou/M
+oubliette/SM
+ouch/SDG
+oughtn't
+ought/SGD
+Ouija/MS
+ounce/MS
+our/S
+ourself
+ourselves
+ouster/M
+oust/RDGZS
+outage/MS
+outargue/GDS
+outback/MRS
+outbalance/GDS
+outbidding
+outbid/S
+outboard/S
+outboast/GSD
+outbound/S
+outbreak/SMG
+outbroke
+outbroken
+outbuilding/SM
+outburst/MGS
+outcast/GSM
+outclass/SDG
+outcome/SM
+outcropped
+outcropping/S
+outcrop/SM
+outcry/MSDG
+outdated/P
+outdid
+outdistance/GSD
+outdoes
+outdo/G
+outdone
+outdoor/S
+outdoorsy
+outdraw/GS
+outdrawn
+outdrew
+outermost
+outerwear/M
+outface/SDG
+outfall/MS
+outfielder/M
+outfield/RMSZ
+outfight/SG
+outfit/MS
+outfitted
+outfitter/MS
+outfitting
+outflank/SGD
+outflow/SMDG
+outfought
+outfox/GSD
+outgeneraled
+outgoes
+outgo/GJ
+outgoing/P
+outgrew
+outgrip
+outgrow/GSH
+outgrown
+outgrowth/M
+outgrowths
+outguess/SDG
+outhit/S
+outhitting
+outhouse/SM
+outing/M
+outlaid
+outlander/M
+outlandishness/MS
+outlandish/PY
+outland/ZR
+outlast/GSD
+outlawry/M
+outlaw/SDMG
+outlay/GSM
+outlet/SM
+outliers
+outline/SDGM
+outlive/GSD
+outlook/MDGS
+outlying
+outmaneuver/GSD
+outmatch/SDG
+outmigration
+outmoded
+outness/M
+outnumber/GDS
+outpaced
+outpatient/SM
+outperform/DGS
+out/PJZGSDR
+outplacement/S
+outplay/GDS
+outpoint/GDS
+outpost/SM
+outpouring/M
+outpour/MJG
+outproduce/GSD
+output/SM
+outputted
+outputting
+outrace/GSD
+outrage/GSDM
+outrageousness/M
+outrageous/YP
+outran
+outrank/GSD
+outré
+outreach/SDG
+outrider/MS
+outrigger/SM
+outright/Y
+outrunning
+outrun/S
+outscore/GDS
+outsell/GS
+outset/MS
+outsetting
+outshine/SG
+outshone
+outshout/GDS
+outsider/PM
+outside/ZSR
+outsize/S
+outskirt/SM
+outsmart/SDG
+outsold
+outsource/SDJG
+outspend/SG
+outspent
+outspoke
+outspokenness/SM
+outspoken/YP
+outspread/SG
+outstanding/Y
+outstate/NX
+outstation/M
+outstay/SDG
+outstretch/GSD
+outstripped
+outstripping
+outstrip/S
+outtake/S
+outvote/GSD
+outwardness/M
+outward/SYP
+outwear/SG
+outweigh/GD
+outweighs
+outwit/S
+outwitted
+outwitting
+outwore
+outwork/SMDG
+outworn
+ouzo/SM
+oval/MYPS
+ovalness/M
+ova/M
+ovarian
+ovary/SM
+ovate/SDGNX
+ovation/GMD
+ovenbird/SM
+oven/MS
+overabundance/MS
+overabundant
+overachieve/SRDGZ
+overact/DGVS
+overage/S
+overaggressive
+overallocation
+overall/SM
+overambitious
+overanxious
+overarching
+overarm/GSD
+overate
+overattentive
+overawe/GDS
+overbalance/DSG
+overbear/GS
+overbearingness/M
+overbearing/YP
+overbidding
+overbid/S
+overbite/MS
+overblown
+overboard
+overbold
+overbook/SDG
+overbore
+overborne
+overbought
+overbuild/GS
+overbuilt
+overburdening/Y
+overburden/SDG
+overbuy/GS
+overcame
+overcapacity/M
+overcapitalize/DSG
+overcareful
+overcast/GS
+overcasting/M
+overcautious
+overcerebral
+overcharge/DSG
+overcloud/DSG
+overcoating/M
+overcoat/SMG
+overcomer/M
+overcome/RSG
+overcommitment/S
+overcompensate/XGNDS
+overcompensation/M
+overcomplexity/M
+overcomplicated
+overconfidence/MS
+overconfident/Y
+overconscientious
+overconsumption/M
+overcook/SDG
+overcooled
+overcorrection
+overcritical
+overcrowd/DGS
+overcurious
+overdecorate/SDG
+overdependent
+overdetermined
+overdevelop/SDG
+overdid
+overdoes
+overdo/G
+overdone
+overdose/DSMG
+overdraft/SM
+overdraw/GS
+overdrawn
+overdress/GDS
+overdrew
+overdrive/GSM
+overdriven
+overdrove
+overdubbed
+overdubbing
+overdub/S
+overdue
+overeagerness/M
+overeager/PY
+overeater/M
+overeat/GNRS
+overeducated
+overemotional
+overemphases
+overemphasis/M
+overemphasize/GZDSR
+overenthusiastic
+overestimate/DSXGN
+overestimation/M
+overexcite/DSG
+overexercise/SDG
+overexert/GDS
+overexertion/SM
+overexploitation
+overexploited
+overexpose/GDS
+overexposure/SM
+overextend/DSG
+overextension
+overfall/M
+overfed
+overfeed/GS
+overfill/GDS
+overfishing
+overflew
+overflight/SM
+overflow/DGS
+overflown
+overfly/GS
+overfond
+overfull
+overgeneralize/GDS
+overgenerous
+overgraze/SDG
+overgrew
+overground
+overgrow/GSH
+overgrown
+overgrowth/M
+overgrowths
+overhand/DGS
+overhang/GS
+overhasty
+overhaul/GRDJS
+overhead/S
+overheard
+overhearer/M
+overhear/SRG
+overheat/SGD
+overhung
+overincredulous
+overindulgence/SM
+overindulgent
+overindulge/SDG
+overinflated
+overjoy/SGD
+overkill/SDMG
+overladed
+overladen
+overlaid
+overlain
+overland/S
+overlap/MS
+overlapped
+overlapping
+overlarge
+overlay/GS
+overleaf
+overlie
+overload/SDG
+overlong
+overlook/DSG
+overlord/DMSG
+overloud
+overly/GRS
+overmanning
+overmaster/GSD
+overmatching
+overmodest
+overmuch/S
+overnice
+overnight/SDRGZ
+overoptimism/SM
+overoptimistic
+overpaid
+overparticular
+overpass/GMSD
+overpay/LSG
+overpayment/M
+overplay/SGD
+overpopulate/DSNGX
+overpopulation/M
+overpopulous
+overpower/GSD
+overpowering/Y
+overpraise/DSG
+overprecise
+overpressure
+overprice/SDG
+overprint/DGS
+overproduce/SDG
+overproduction/S
+overprotect/GVDS
+overprotection/M
+overqualified
+overran
+overrate/DSG
+overreach/DSRG
+overreaction/SM
+overreact/SGD
+overred
+overrefined
+overrepresented
+overridden
+overrider/M
+override/RSG
+overripe
+overrode
+overrule/GDS
+overrunning
+overrun/S
+oversample/DG
+oversaturate
+oversaw
+oversea/S
+overseeing
+overseen
+overseer/M
+oversee/ZRS
+oversell/SG
+oversensitiveness/S
+oversensitive/P
+oversensitivity
+oversexed
+overshadow/GSD
+overshoe/SM
+overshoot/SG
+overshot/S
+oversight/SM
+oversimple
+oversimplification/M
+oversimplify/GXNDS
+oversize/GS
+oversleep/GS
+overslept
+oversoftness/M
+oversoft/P
+oversold
+overspecialization/MS
+overspecialize/GSD
+overspend/SG
+overspent
+overspill/DMSG
+overspread/SG
+overstaffed
+overstatement/SM
+overstate/SDLG
+overstay/GSD
+overstepped
+overstepping
+overstep/S
+overstimulate/DSG
+overstock/SGD
+overstraining
+overstressed
+overstretch/D
+overstrict
+overstrike/GS
+overstrung
+overstuffed
+oversubscribe/SDG
+oversubtle
+oversupply/MDSG
+oversuspicious
+overtaken
+overtake/RSZG
+overtax/DSG
+overthrew
+overthrow/GS
+overthrown
+overtightened
+overtime/MGDS
+overtire/DSG
+overtone/MS
+overtook
+overt/PY
+overture/DSMG
+overturn/SDG
+overuse/DSG
+overvalue/GSD
+overview/MS
+overweening
+overweight/GSD
+overwhelm/GDS
+overwhelming/Y
+overwinter/SDG
+overwork/GSD
+overwrap
+overwrite/SG
+overwritten
+overwrote
+overwrought
+over/YGS
+overzealousness/M
+overzealous/P
+Ovid/M
+oviduct/SM
+oviform
+oviparous
+ovoid/S
+ovular
+ovulate/GNXDS
+ovulatory
+ovule/MS
+ovum/MS
+ow/DYG
+Owen/MS
+owe/S
+owlet/SM
+owl/GSMDR
+owlishness/M
+owlish/PY
+owned/U
+own/EGDS
+ownership/MS
+owner/SM
+oxalate/M
+oxalic
+oxaloacetic
+oxblood/S
+oxbow/SM
+oxcart/MS
+oxen/M
+oxford/MS
+Oxford/MS
+oxidant/SM
+oxidate/NVX
+oxidation/M
+oxidative/Y
+oxide/SM
+oxidization/MS
+oxidized/U
+oxidize/JDRSGZ
+oxidizer/M
+oxidizes/A
+ox/MNS
+Oxnard
+Oxonian
+oxtail/M
+Oxus/M
+oxyacetylene/MS
+oxygenate/XSDMGN
+oxygenation/M
+oxygen/MS
+oxyhydroxides
+oxymora
+oxymoron/M
+oyster/GSDM
+oystering/M
+oz
+Ozark/SM
+Oz/M
+ozone/SM
+Ozymandias/M
+Ozzie/M
+Ozzy/M
+P
+PA
+Pablo/M
+Pablum/M
+pablum/S
+Pabst/M
+pabulum/SM
+PAC
+pace/DRSMZG
+Pace/M
+pacemaker/SM
+pacer/M
+pacesetter/MS
+pacesetting
+Pacheco/M
+pachyderm/MS
+pachysandra/MS
+pacific
+pacifically
+pacification/M
+Pacific/M
+pacifier/M
+pacifism/MS
+pacifistic
+pacifist/MS
+pacify/NRSDGXZ
+package/ARSDG
+packaged/U
+packager/S
+package's
+packages/U
+packaging/SM
+Packard/SM
+packed/AU
+packer/MUS
+packet/MSDG
+pack/GZSJDRMB
+packhorse/M
+packinghouse/S
+packing/M
+packsaddle/SM
+Packston/M
+packs/UA
+Packwood/M
+Paco/M
+Pacorro/M
+pact/SM
+Padang/M
+padded/U
+Paddie/M
+padding/SM
+paddle/MZGRSD
+paddler/M
+paddock/SDMG
+Paddy/M
+paddy/SM
+Padget/M
+Padgett/M
+Padilla/M
+padlock/SGDM
+pad/MS
+Padraic/M
+Padraig/M
+padre/MS
+Padrewski/M
+Padriac/M
+paean/MS
+paediatrician/MS
+paediatrics/M
+paedophilia's
+paella/SM
+paeony/M
+Paganini/M
+paganism/MS
+pagan/SM
+pageantry/SM
+pageant/SM
+pageboy/SM
+paged/U
+pageful
+Page/M
+page/MZGDRS
+pager/M
+paginate/DSNGX
+Paglia/M
+pagoda/MS
+Pahlavi/M
+paid/AU
+Paige/M
+pailful/SM
+Pail/M
+pail/SM
+Paine/M
+painfuller
+painfullest
+painfulness/MS
+painful/YP
+pain/GSDM
+painkiller/MS
+painkilling
+painlessness/S
+painless/YP
+painstaking/SY
+paint/ADRZGS
+paintbox/M
+paintbrush/SM
+painted/U
+painterly/P
+painter/YM
+painting/SM
+paint's
+paintwork
+paired/UA
+pair/JSDMG
+pairs/A
+pairwise
+paisley/MS
+pajama/MDS
+Pakistani/S
+Pakistan/M
+palace/MS
+paladin/MS
+palaeolithic
+palaeontologists
+palaeontology/M
+palanquin/MS
+palatability/M
+palatableness/M
+palatable/P
+palatalization/MS
+palatalize/SDG
+palatal/YS
+palate/BMS
+palatial/Y
+palatinate/SM
+Palatine
+palatine/S
+palaver/GSDM
+paleface/SM
+Palembang/M
+paleness/S
+Paleocene
+Paleogene
+paleographer/SM
+paleography/SM
+paleolithic
+Paleolithic
+paleontologist/S
+paleontology/MS
+Paleozoic
+Palermo/M
+pale/SPY
+Palestine/M
+Palestinian/S
+Palestrina/M
+palette/MS
+Paley/M
+palfrey/MS
+palimony/S
+palimpsest/MS
+palindrome/MS
+palindromic
+paling/M
+palisade/MGSD
+Palisades/M
+palish
+Palladio/M
+palladium/SM
+pallbearer/SM
+palletized
+pallet/SMGD
+pall/GSMD
+palliate/SDVNGX
+palliation/M
+palliative/SY
+pallidness/MS
+pallid/PY
+Pall/M
+pallor/MS
+palmate
+palmer/M
+Palmer/M
+Palmerston/M
+palmetto/MS
+palm/GSMDR
+palmist/MS
+palmistry/MS
+Palm/MR
+Palmolive/M
+palmtop/S
+Palmyra/M
+palmy/RT
+Palo/M
+Paloma/M
+Palomar/M
+palomino/MS
+palpable
+palpably
+palpate/SDNGX
+palpation/M
+palpitate/NGXSD
+palpitation/M
+pal/SJMDRYTG
+palsy/GSDM
+paltriness/SM
+paltry/TRP
+paludal
+Pa/M
+Pamela/M
+Pamelina/M
+Pamella/M
+pa/MH
+Pamirs
+Pam/M
+Pammie/M
+Pammi/M
+Pammy/M
+pampas/M
+pamperer/M
+pamper/RDSG
+Pampers
+pamphleteer/DMSG
+pamphlet/SM
+panacea/MS
+panache/MS
+Panama/MS
+Panamanian/S
+panama/S
+pancake/MGSD
+Panchito/M
+Pancho/M
+panchromatic
+pancreas/MS
+pancreatic
+panda/SM
+pandemic/S
+pandemonium/SM
+pander/ZGRDS
+Pandora/M
+panegyric/SM
+pane/KMS
+paneling/M
+panelist/MS
+panelization
+panelized
+panel/JSGDM
+Pangaea/M
+pang/GDMS
+pangolin/M
+panhandle/RSDGMZ
+panicked
+panicking
+panicky/RT
+panic/SM
+panier's
+panjandrum/M
+Pankhurst/M
+Pan/M
+Panmunjom/M
+panned
+pannier/SM
+panning
+panoply/MSD
+panorama/MS
+panoramic
+panpipes
+Pansie/M
+pan/SMD
+Pansy/M
+pansy/SM
+Pantagruel/M
+Pantaloon/M
+pantaloons
+pant/GDS
+pantheism/MS
+pantheistic
+pantheist/S
+pantheon/MS
+panther/SM
+pantie/SM
+pantiled
+pantograph/M
+pantomime/SDGM
+pantomimic
+pantomimist/SM
+pantry/SM
+pantsuit/SM
+pantyhose
+pantyliner
+pantywaist/SM
+Panza/M
+Paola/M
+Paoli/M
+Paolina/M
+Paolo/M
+papacy/SM
+Papagena/M
+Papageno/M
+papal/Y
+papa/MS
+paparazzi
+papaw/SM
+papaya/MS
+paperback/GDMS
+paperboard/MS
+paperboy/SM
+paperer/M
+papergirl/SM
+paper/GJMRDZ
+paperhanger/SM
+paperhanging/SM
+paperiness/M
+paperless
+paperweight/MS
+paperwork/SM
+papery/P
+papillae
+papilla/M
+papillary
+papist/MS
+papoose/SM
+Pappas/M
+papped
+papping
+pappy/RST
+paprika/MS
+pap/SZMNR
+papyri
+papyrus/M
+Paquito/M
+parable/MGSD
+parabola/MS
+parabolic
+paraboloidal/M
+paraboloid/MS
+Paracelsus/M
+paracetamol/M
+parachuter/M
+parachute/RSDMG
+parachutist/MS
+Paraclete/M
+parader/M
+parade/RSDMZG
+paradigmatic
+paradigm/SM
+paradisaic
+paradisaical
+Paradise/M
+paradise/MS
+paradoxic
+paradoxicalness/M
+paradoxical/YP
+paradox/MS
+paraffin/GSMD
+paragon/SGDM
+paragrapher/M
+paragraph/MRDG
+paragraphs
+Paraguayan/S
+Paraguay/M
+parakeet/MS
+paralegal/S
+paralinguistic
+parallax/SM
+parallel/DSG
+paralleled/U
+parallelepiped/MS
+parallelism/SM
+parallelization/MS
+parallelize/ZGDSR
+parallelogram/MS
+paralysis/M
+paralytically
+paralytic/S
+paralyzedly/S
+paralyzed/Y
+paralyzer/M
+paralyze/ZGDRS
+paralyzingly/S
+paralyzing/Y
+paramagnetic
+paramagnet/M
+Paramaribo/M
+paramecia
+paramecium/M
+paramedical/S
+paramedic/MS
+parameterization/SM
+parameterize/BSDG
+parameterized/U
+parameterless
+parameter/SM
+parametric
+parametrically
+parametrization
+parametrize/DS
+paramilitary/S
+paramount/S
+paramour/MS
+para/MS
+Paramus/M
+Paraná
+paranoiac/S
+paranoia/SM
+paranoid/S
+paranormal/SY
+parapet/SMD
+paraphernalia
+paraphrase/GMSRD
+paraphraser/M
+paraplegia/MS
+paraplegic/S
+paraprofessional/SM
+parapsychologist/S
+parapsychology/MS
+paraquat/S
+parasite/SM
+parasitically
+parasitic/S
+parasitism/SM
+parasitologist/M
+parasitology/M
+parasol/SM
+parasympathetic/S
+parathion/SM
+parathyroid/S
+paratrooper/M
+paratroop/RSZ
+paratyphoid/S
+parboil/DSG
+parceled/U
+parceling/M
+parcel/SGMD
+Parcheesi/M
+parch/GSDL
+parchment/SM
+PARC/M
+pardonableness/M
+pardonable/U
+pardonably/U
+pardoner/M
+pardon/ZBGRDS
+paregoric/SM
+parentage/MS
+parental/Y
+parenteral
+parentheses
+parenthesis/M
+parenthesize/GSD
+parenthetic
+parenthetical/Y
+parenthood/MS
+parent/MDGJS
+pare/S
+paresis/M
+pares/S
+Pareto/M
+parfait/SM
+pariah/M
+pariahs
+parietal/S
+parimutuel/S
+paring/M
+parishioner/SM
+parish/MS
+Parisian/SM
+Paris/M
+parity/ESM
+parka/MS
+Parke/M
+Parker/M
+Parkersburg/M
+park/GJZDRMS
+Parkhouse/M
+parking/M
+Parkinson/M
+parkish
+parkland/M
+parklike
+Parkman
+Park/RMS
+parkway/MS
+parlance/SM
+parlay/DGS
+parley/MDSG
+parliamentarian/SM
+parliamentary/U
+parliament/MS
+Parliament/MS
+parlor/SM
+parlous
+Parmesan/S
+parmigiana
+Parnassus/SM
+Parnell/M
+parochialism/SM
+parochiality
+parochial/Y
+parodied/U
+parodist/SM
+parody/SDGM
+parolee/MS
+parole/MSDG
+paroxysmal
+paroxysm/MS
+parquetry/SM
+parquet/SMDG
+parrakeet's
+parred
+parricidal
+parricide/MS
+parring
+Parrish/M
+Parr/M
+Parrnell/M
+parrot/GMDS
+parrotlike
+parry/GSD
+Parry/M
+parse
+parsec/SM
+parsed/U
+Parsee's
+parser/M
+Parsifal/M
+parsimonious/Y
+parsimony/SM
+pars/JDSRGZ
+parsley/MS
+parsnip/MS
+parsonage/MS
+parson/MS
+Parsons/M
+partaken
+partaker/M
+partake/ZGSR
+part/CDGS
+parterre/MS
+parter/S
+parthenogeneses
+parthenogenesis/M
+Parthenon/M
+Parthia/M
+partiality/MS
+partial/SY
+participant/MS
+participate/NGVDSX
+participation/M
+participator/S
+participatory
+participial/Y
+participle/MS
+particleboard/S
+particle/MS
+particolored
+particularistic
+particularity/SM
+particularization/MS
+particularize/GSD
+particular/SY
+particulate/S
+parting/MS
+partisanship/SM
+partisan/SM
+partition/AMRDGS
+partitioned/U
+partitioner/M
+partitive/S
+partizan's
+partly
+partner/DMGS
+partnership/SM
+partook
+partridge/MS
+part's
+parturition/SM
+partway
+party/RSDMG
+parvenu/SM
+par/ZGSJBMDR
+Pasadena/M
+PASCAL
+Pascale/M
+Pascal/M
+pascal/SM
+paschal/S
+pasha/MS
+Paso/M
+Pasquale/M
+pas/S
+passably
+passage/MGSD
+passageway/MS
+Passaic/M
+passband
+passbook/MS
+passel/MS
+passé/M
+passenger/MYS
+passerby
+passer/M
+passersby
+passim
+passing/Y
+passionated
+passionate/EYP
+passionateness/EM
+passionates
+passionating
+passioned
+passionflower/MS
+passioning
+passionless
+passion/SEM
+Passion/SM
+passivated
+passiveness/S
+passive/SYP
+passivity/S
+pass/JGVBZDSR
+passkey/SM
+passmark
+passover
+Passover/MS
+passport/SM
+password/SDM
+pasta/MS
+pasteboard/SM
+pasted/UA
+pastel/MS
+paste/MS
+Pasternak/M
+pastern/SM
+pasteup
+pasteurization/MS
+pasteurized/U
+pasteurizer/M
+pasteurize/RSDGZ
+Pasteur/M
+pastiche/MS
+pastille/SM
+pastime/SM
+pastiness/SM
+pastoralization/M
+pastoral/SPY
+pastorate/MS
+pastor/GSDM
+past/PGMDRS
+pastrami/MS
+pastry/SM
+past's/A
+pasts/A
+pasturage/SM
+pasture/MGSRD
+pasturer/M
+pasty/PTRS
+Patagonia/M
+Patagonian/S
+patch/EGRSD
+patcher/EM
+patchily
+patchiness/S
+patch's
+patchwork/RMSZ
+patchy/PRT
+patellae
+patella/MS
+Patel/M
+Pate/M
+paten/M
+Paten/M
+patentee/SM
+patent/ZGMRDYSB
+paterfamilias/SM
+pater/M
+paternalism/MS
+paternalist
+paternalistic
+paternal/Y
+paternity/SM
+paternoster/SM
+Paterson/M
+pate/SM
+pathetic
+pathetically
+pathfinder/MS
+pathless/P
+path/M
+pathname/SM
+pathogenesis/M
+pathogenic
+pathogen/SM
+pathologic
+pathological/Y
+pathologist/MS
+pathology/SM
+pathos/SM
+paths
+pathway/MS
+Patience/M
+patience/SM
+patient/MRYTS
+patient's/I
+patients/I
+patina/SM
+patine
+Patin/M
+patio/MS
+Pat/MN
+pat/MNDRS
+Patna/M
+patois/M
+Paton/M
+patresfamilias
+patriarchal
+patriarchate/MS
+patriarch/M
+patriarchs
+patriarchy/MS
+Patrica/M
+Patrice/M
+Patricia/M
+patrician/MS
+patricide/MS
+Patricio/M
+Patrick/M
+Patric/M
+patrimonial
+patrimony/SM
+patriotically
+patriotic/U
+patriotism/SM
+patriot/SM
+patristic/S
+Patrizia/M
+Patrizio/M
+Patrizius/M
+patrolled
+patrolling
+patrolman/M
+patrolmen
+patrol/MS
+patrolwoman
+patrolwomen
+patronage/MS
+patroness/S
+patronization
+patronized/U
+patronize/GZRSDJ
+patronizer/M
+patronizes/A
+patronizing's/U
+patronizing/YM
+patronymically
+patronymic/S
+patron/YMS
+patroon/MS
+patsy/SM
+Patsy/SM
+patted
+Patten/M
+patten/MS
+patterer/M
+pattern/GSDM
+patternless
+patter/RDSGJ
+Patterson/M
+Pattie/M
+Patti/M
+patting
+Pattin/M
+Patton/M
+Patty/M
+patty/SM
+paucity/SM
+Paula/M
+Paule/M
+Pauletta/M
+Paulette/M
+Paulie/M
+Pauli/M
+Paulina/M
+Pauline
+Pauling/M
+Paulita/M
+Paul/MG
+Paulo/M
+Paulsen/M
+Paulson/M
+Paulus/M
+Pauly/M
+paunch/GMSD
+paunchiness/M
+paunchy/RTP
+pauperism/SM
+pauperize/SDG
+pauper/SGDM
+pause/DSG
+Pavarotti
+paved/UA
+pave/GDRSJL
+Pavel/M
+pavement/SGDM
+paver/M
+paves/A
+Pavia/M
+pavilion/SMDG
+paving/A
+paving's
+Pavla/M
+Pavlova/MS
+Pavlovian
+Pavlov/M
+pawl/SM
+paw/MDSG
+pawnbroker/SM
+pawnbroking/S
+Pawnee/SM
+pawner/M
+pawn/GSDRM
+pawnshop/MS
+pawpaw's
+Pawtucket/M
+paxes
+Paxon/M
+Paxton/M
+payable/S
+pay/AGSLB
+payback/S
+paycheck/SM
+payday/MS
+payed
+payee/SM
+payer/SM
+payload/SM
+paymaster/SM
+payment/ASM
+Payne/SM
+payoff/MS
+payola/MS
+payout/S
+payroll/MS
+payslip/S
+Payson/M
+Payton/M
+Paz/M
+Pb/M
+PBS
+PBX
+PCB
+PC/M
+PCP
+PCs
+pct
+pd
+PD
+Pd/M
+PDP
+PDQ
+PDT
+PE
+Peabody/M
+peaceableness/M
+peaceable/P
+peaceably
+peacefuller
+peacefullest
+peacefulness/S
+peaceful/PY
+peace/GMDS
+peacekeeping/S
+Peace/M
+peacemaker/MS
+peacemaking/MS
+peacetime/MS
+peach/GSDM
+Peachtree/M
+peachy/RT
+peacock/SGMD
+Peadar/M
+peafowl/SM
+peahen/MS
+peaked/P
+peakiness/M
+peak/SGDM
+peaky/P
+pealed/A
+Peale/M
+peal/MDSG
+peals/A
+pea/MS
+peanut/SM
+Pearce/M
+Pearla/M
+Pearle/M
+pearler/M
+Pearlie/M
+Pearline/M
+Pearl/M
+pearl/SGRDM
+pearly/TRS
+Pearson/M
+pear/SYM
+peartrees
+Peary/M
+peasanthood
+peasantry/SM
+peasant/SM
+peashooter/MS
+peats/A
+peat/SM
+peaty/TR
+pebble/MGSD
+pebbling/M
+pebbly/TR
+Pebrook/M
+pecan/SM
+peccadilloes
+peccadillo/M
+peccary/MS
+Pechora/M
+pecker/M
+peck/GZSDRM
+Peckinpah/M
+Peck/M
+Pecos/M
+pectic
+pectin/SM
+pectoral/S
+peculate/NGDSX
+peculator/S
+peculiarity/MS
+peculiar/SY
+pecuniary
+pedagogical/Y
+pedagogic/S
+pedagogics/M
+pedagogue/SDGM
+pedagogy/MS
+pedal/SGRDM
+pedantic
+pedantically
+pedantry/MS
+pedant/SM
+peddler/M
+peddle/ZGRSD
+pederast/SM
+pederasty/SM
+Peder/M
+pedestal/GDMS
+pedestrianization
+pedestrianize/GSD
+pedestrian/MS
+pediatrician/SM
+pediatric/S
+pedicab/SM
+pedicure/DSMG
+pedicurist/SM
+pedigree/DSM
+pediment/DMS
+pedlar's
+pedometer/MS
+pedophile/S
+pedophilia
+Pedro/M
+peduncle/MS
+peeing
+peekaboo/SM
+peek/GSD
+peeler/M
+peeling/M
+Peel/M
+peel/SJGZDR
+peen/GSDM
+peeper/M
+peephole/SM
+peep/SGZDR
+peepshow/MS
+peepy
+peerage/MS
+peer/DMG
+peeress/MS
+peerlessness/M
+peerless/PY
+peeve/GZMDS
+peevers/M
+peevishness/SM
+peevish/YP
+peewee/S
+pee/ZDRS
+Pegasus/MS
+pegboard/SM
+Pegeen/M
+pegged
+Peggie/M
+Peggi/M
+pegging
+Peggy/M
+Peg/M
+peg/MS
+peignoir/SM
+Pei/M
+Peiping/M
+Peirce/M
+pejoration/SM
+pejorative/SY
+peke/MS
+Pekinese's
+pekingese
+Pekingese/SM
+Peking/SM
+pekoe/SM
+pelagic
+Pelee/M
+Pele/M
+pelf/SM
+Pelham/M
+pelican/SM
+pellagra/SM
+pellet/SGMD
+pellucid
+Peloponnese/M
+pelter/M
+pelt/GSDR
+pelvic/S
+pelvis/SM
+Pembroke/M
+pemmican/SM
+penalization/SM
+penalized/U
+penalize/SDG
+penalty/MS
+penal/Y
+Pena/M
+penance/SDMG
+pence/M
+penchant/MS
+pencil/SGJMD
+pendant/SM
+pend/DCGS
+pendent/CS
+Penderecki/M
+Pendleton/M
+pendulous
+pendulum/MS
+Penelopa/M
+Penelope/M
+penetrability/SM
+penetrable
+penetrate/SDVGNX
+penetrating/Y
+penetration/M
+penetrativeness/M
+penetrative/PY
+penetrator/MS
+penguin/MS
+penicillin/SM
+penile
+peninsular
+peninsula/SM
+penis/MS
+penitence/MS
+penitential/YS
+penitentiary/MS
+penitent/SY
+penknife/M
+penknives
+penlight/MS
+pen/M
+Pen/M
+penman/M
+penmanship/MS
+penmen
+Penna
+pennant/SM
+penned
+Penney/M
+Pennie/M
+penniless
+Penni/M
+penning
+Pennington/M
+pennis
+Penn/M
+pennon/SM
+Pennsylvania/M
+Pennsylvanian/S
+Penny/M
+penny/SM
+pennyweight/SM
+pennyworth/M
+penologist/MS
+penology/MS
+Penrod/M
+Pensacola/M
+pensioner/M
+pension/ZGMRDBS
+pensiveness/S
+pensive/PY
+pens/V
+pentacle/MS
+pentagonal/SY
+Pentagon/M
+pentagon/SM
+pentagram/MS
+pentameter/SM
+pent/AS
+Pentateuch/M
+pentathlete/S
+pentathlon/MS
+pentatonic
+pentecostal
+Pentecostalism/S
+Pentecostal/S
+Pentecost/SM
+penthouse/SDGM
+Pentium/M
+penuche/SM
+penultimate/SY
+penumbrae
+penumbra/MS
+penuriousness/MS
+penurious/YP
+penury/SM
+peonage/MS
+peon/MS
+peony/SM
+people/SDMG
+Peoria/M
+Pepe/M
+Pepillo/M
+Pepi/M
+Pepin/M
+Pepita/M
+Pepito/M
+pepped
+peppercorn/MS
+pepperer/M
+peppergrass/M
+peppermint/MS
+pepperoni/S
+pepper/SGRDM
+peppery
+peppiness/SM
+pepping
+peppy/PRT
+Pepsico/M
+PepsiCo/M
+Pepsi/M
+pepsin/SM
+pep/SM
+peptic/S
+peptidase/SM
+peptide/SM
+peptizing
+Pepys/M
+Pequot/M
+peradventure/S
+perambulate/DSNGX
+perambulation/M
+perambulator/MS
+percale/MS
+perceivably
+perceive/DRSZGB
+perceived/U
+perceiver/M
+percentage/MS
+percentile/SM
+percent/MS
+perceptible
+perceptibly
+perceptional
+perception/MS
+perceptiveness/MS
+perceptive/YP
+perceptual/Y
+percept/VMS
+Perceval/M
+perchance
+perch/GSDM
+perchlorate/M
+perchlorination
+percipience/MS
+percipient/S
+Percival/M
+percolate/NGSDX
+percolation/M
+percolator/MS
+percuss/DSGV
+percussionist/MS
+percussion/SAM
+percussiveness/M
+percussive/PY
+percutaneous/Y
+Percy/M
+perdition/MS
+perdurable
+peregrinate/XSDNG
+peregrination/M
+peregrine/S
+Perelman/M
+peremptorily
+peremptory/P
+perennial/SY
+pères
+perestroika/S
+Perez/M
+perfecta/S
+perfect/DRYSTGVP
+perfecter/M
+perfectibility/MS
+perfectible
+perfectionism/MS
+perfectionist/MS
+perfection/MS
+perfectiveness/M
+perfective/PY
+perfectness/MS
+perfidiousness/M
+perfidious/YP
+perfidy/MS
+perforated/U
+perforate/XSDGN
+perforation/M
+perforce
+performance/MS
+performed/U
+performer/M
+perform/SDRZGB
+perfumer/M
+perfumery/SM
+perfume/ZMGSRD
+perfunctorily
+perfunctoriness/M
+perfunctory/P
+perfused
+perfusion/M
+Pergamon/M
+pergola/SM
+perhaps/S
+Peria/M
+pericardia
+pericardium/M
+Perice/M
+Periclean
+Pericles/M
+perigee/SM
+perihelia
+perihelion/M
+peril/GSDM
+Perilla/M
+perilousness/M
+perilous/PY
+Peri/M
+perimeter/MS
+perinatal
+perinea
+perineum/M
+periodic
+periodical/YMS
+periodicity/MS
+period/MS
+periodontal/Y
+periodontics/M
+periodontist/S
+peripatetic/S
+peripheral/SY
+periphery/SM
+periphrases
+periphrasis/M
+periphrastic
+periscope/SDMG
+perishable/SM
+perish/BZGSRD
+perishing/Y
+peristalses
+peristalsis/M
+peristaltic
+peristyle/MS
+peritoneal
+peritoneum/SM
+peritonitis/MS
+periwigged
+periwigging
+periwig/MS
+periwinkle/SM
+perjurer/M
+perjure/SRDZG
+perjury/MS
+per/K
+perk/GDS
+perkily
+perkiness/S
+Perkin/SM
+perky/TRP
+Perla/M
+Perle/M
+Perl/M
+permafrost/MS
+permalloy/M
+Permalloy/M
+permanence/SM
+permanency/MS
+permanentness/M
+permanent/YSP
+permeability/SM
+permeableness/M
+permeable/P
+permeate/NGVDSX
+Permian
+permissibility/M
+permissibleness/M
+permissible/P
+permissibly
+permission/SM
+permissiveness/MS
+permissive/YP
+permit/SM
+permitted
+permitting
+Perm/M
+perm/MDGS
+permutation/MS
+permute/SDG
+Pernell/M
+perniciousness/MS
+pernicious/PY
+Pernod/M
+Peron/M
+peroration/SM
+Perot/M
+peroxidase/M
+peroxide/MGDS
+perpend/DG
+perpendicularity/SM
+perpendicular/SY
+perpetrate/NGXSD
+perpetration/M
+perpetrator/SM
+perpetual/SY
+perpetuate/NGSDX
+perpetuation/M
+perpetuity/MS
+perplex/DSG
+perplexed/Y
+perplexity/MS
+perquisite/SM
+Perren/M
+Perri/M
+Perrine/M
+Perry/MR
+persecute/XVNGSD
+persecution/M
+persecutor/MS
+persecutory
+Perseid/M
+Persephone/M
+Perseus/M
+perseverance/MS
+persevere/GSD
+persevering/Y
+Pershing/M
+Persia/M
+Persian/S
+persiflage/MS
+persimmon/SM
+Persis/M
+persist/DRSG
+persistence/SM
+persistent/Y
+persnickety
+personableness/M
+personable/P
+personae
+personage/SM
+personality/SM
+personalization/CMS
+personalize/CSDG
+personalized/U
+personalty/MS
+personal/YS
+persona/M
+person/BMS
+personification/M
+personifier/M
+personify/XNGDRS
+personnel/SM
+person's/U
+persons/U
+perspective/YMS
+perspex
+perspicaciousness/M
+perspicacious/PY
+perspicacity/S
+perspicuity/SM
+perspicuousness/M
+perspicuous/YP
+perspiration/MS
+perspire/DSG
+persuaded/U
+persuader/M
+persuade/ZGDRSB
+persuasion/SM
+persuasively
+persuasiveness/MS
+persuasive/U
+pertain/GSD
+Perth/M
+pertinaciousness/M
+pertinacious/YP
+pertinacity/MS
+pertinence/S
+pertinent/YS
+pertness/MS
+perturbation/MS
+perturbed/U
+perturb/GDS
+pertussis/SM
+pert/YRTSP
+peruke/SM
+Peru/M
+perusal/SM
+peruser/M
+peruse/RSDZG
+Peruvian/S
+pervade/SDG
+pervasion/M
+pervasiveness/MS
+pervasive/PY
+perverseness/SM
+perverse/PXYNV
+perversion/M
+perversity/MS
+pervert/DRSG
+perverted/YP
+perverter/M
+perviousness
+peseta/SM
+Peshawar/M
+peskily
+peskiness/S
+pesky/RTP
+peso/MS
+pessimal/Y
+pessimism/SM
+pessimistic
+pessimistically
+pessimist/SM
+pester/DG
+pesticide/MS
+pestiferous
+pestilence/SM
+pestilential/Y
+pestilent/Y
+pestle/SDMG
+pesto/S
+pest/RZSM
+PET
+Pétain/M
+petal/SDM
+Peta/M
+petard/MS
+petcock/SM
+Pete/M
+peter/GD
+Peter/M
+Petersburg/M
+Petersen/M
+Peters/N
+Peterson/M
+Peterus/M
+Petey/M
+pethidine/M
+petiole/SM
+petiteness/M
+petite/XNPS
+petitioner/M
+petition/GZMRD
+petition's/A
+petitions/A
+petits
+Petkiewicz/M
+Pet/MRZ
+Petra/M
+Petrarch/M
+petrel/SM
+petri
+petrifaction/SM
+petrify/NDSG
+Petrina/M
+Petr/M
+petrochemical/SM
+petrodollar/MS
+petroglyph/M
+petrolatum/MS
+petroleum/MS
+petrolled
+petrolling
+petrol/MS
+petrologist/MS
+petrology/MS
+Petronella/M
+Petronia/M
+Petronilla/M
+Petronille/M
+pet/SMRZ
+petted
+petter/MS
+Pettibone/M
+petticoat/SMD
+pettifogged
+pettifogger/SM
+pettifogging
+pettifog/S
+pettily
+pettiness/S
+petting
+pettis
+pettishness/M
+pettish/YP
+Petty/M
+petty/PRST
+petulance/MS
+petulant/Y
+Petunia/M
+petunia/SM
+Peugeot/M
+Pewaukee/M
+pewee/MS
+pewit/MS
+pew/SM
+pewter/SRM
+peyote/SM
+Peyter/M
+Peyton/M
+pf
+Pfc
+PFC
+pfennig/SM
+Pfizer/M
+pg
+PG
+Phaedra/M
+Phaethon/M
+phaeton/MS
+phage/M
+phagocyte/SM
+Phaidra/M
+phalanger/MS
+phalanges
+phalanx/SM
+phalli
+phallic
+phallus/M
+Phanerozoic
+phantasmagoria/SM
+phantasmal
+phantasm/SM
+phantasy's
+phantom/MS
+pharaoh
+Pharaoh/M
+pharaohs
+Pharaohs
+pharisaic
+Pharisaic
+Pharisaical
+pharisee/S
+Pharisee/SM
+pharmaceutical/SY
+pharmaceutic/S
+pharmaceutics/M
+pharmacist/SM
+pharmacological/Y
+pharmacologist/SM
+pharmacology/SM
+pharmacopoeia/SM
+pharmacy/SM
+pharyngeal/S
+pharynges
+pharyngitides
+pharyngitis/M
+pharynx/M
+phase/DSRGZM
+phaseout/S
+PhD
+pheasant/SM
+Phebe/M
+Phedra/M
+Phekda/M
+Phelia/M
+Phelps/M
+phenacetin/MS
+phenobarbital/SM
+phenolic
+phenol/MS
+phenolphthalein/M
+phenomenal/Y
+phenomena/SM
+phenomenological/Y
+phenomenology/MS
+phenomenon/SM
+phenotype/MS
+phenylalanine/M
+phenyl/M
+pheromone/MS
+phew/S
+phialled
+phialling
+phial/MS
+Phidias/M
+Philadelphia/M
+philanderer/M
+philander/SRDGZ
+philanthropic
+philanthropically
+philanthropist/MS
+philanthropy/SM
+philatelic
+philatelist/MS
+philately/SM
+Philbert/M
+Philco/M
+philharmonic/S
+Philipa/M
+Philip/M
+Philippa/M
+Philippe/M
+Philippians/M
+philippic/SM
+Philippine/SM
+Philis/M
+philistine/S
+Philistine/SM
+philistinism/S
+Phillida/M
+Phillie/M
+Phillipa/M
+Phillipe/M
+Phillip/MS
+Phillipp/M
+Phillis/M
+Philly/SM
+Phil/MY
+philodendron/MS
+philological/Y
+philologist/MS
+philology/MS
+Philomena/M
+philosopher/MS
+philosophic
+philosophical/Y
+philosophized/U
+philosophizer/M
+philosophizes/U
+philosophize/ZDRSG
+philosophy/MS
+philter/SGDM
+philtre/DSMG
+Phineas/M
+Phip/M
+Phipps/M
+phi/SM
+phlebitides
+phlebitis/M
+phlegmatic
+phlegmatically
+phlegm/SM
+phloem/MS
+phlox/M
+pH/M
+Ph/M
+phobia/SM
+phobic/S
+Phobos/M
+Phoebe/M
+phoebe/SM
+Phoenicia/M
+Phoenician/SM
+Phoenix/M
+phoenix/MS
+phone/DSGM
+phoneme/SM
+phonemically
+phonemic/S
+phonemics/M
+phonetically
+phonetician/SM
+phonetic/S
+phonetics/M
+phonically
+phonic/S
+phonics/M
+phoniness/MS
+phonographer/M
+phonographic
+phonograph/RM
+phonographs
+phonologic
+phonological/Y
+phonologist/MS
+phonology/MS
+phonon/M
+phony/PTRSDG
+phooey/S
+phosphatase/M
+phosphate/MS
+phosphide/M
+phosphine/MS
+phosphoresce
+phosphorescence/SM
+phosphorescent/Y
+phosphoric
+phosphor/MS
+phosphorous
+phosphorus/SM
+photocell/MS
+photochemical/Y
+photochemistry/M
+photocopier/M
+photocopy/MRSDZG
+photoelectric
+photoelectrically
+photoelectronic
+photoelectrons
+photoengraver/M
+photoengrave/RSDJZG
+photoengraving/M
+photofinishing/MS
+photogenic
+photogenically
+photograph/AGD
+photographer/SM
+photographic
+photographically
+photograph's
+photographs/A
+photography/MS
+photojournalism/SM
+photojournalist/SM
+photoluminescence/M
+photolysis/M
+photolytic
+photometer/SM
+photometric
+photometrically
+photometry/M
+photomicrograph/M
+photomicrography/M
+photomultiplier/M
+photon/MS
+photorealism
+photosensitive
+photo/SGMD
+photosphere/M
+photostatic
+Photostat/MS
+Photostatted
+Photostatting
+photosyntheses
+photosynthesis/M
+photosynthesize/DSG
+photosynthetic
+phototypesetter
+phototypesetting/M
+phrasal
+phrase/AGDS
+phrasebook
+phrasemaking
+phraseology/MS
+phrase's
+phrasing/SM
+phrenological/Y
+phrenologist/MS
+phrenology/MS
+phylactery/MS
+phylae
+phyla/M
+Phylis/M
+Phyllida/M
+Phyllis/M
+Phyllys/M
+phylogeny/MS
+phylum/M
+Phylys/M
+phys
+physicality/M
+physical/PYS
+physician/SM
+physicist/MS
+physicked
+physicking
+physic/SM
+physiochemical
+physiognomy/SM
+physiography/MS
+physiologic
+physiological/Y
+physiologist/SM
+physiology/MS
+physiotherapist/MS
+physiotherapy/SM
+physique/MSD
+phytoplankton/M
+Piaf/M
+Piaget/M
+Pia/M
+pianism/M
+pianissimo/S
+pianistic
+pianist/SM
+pianoforte/MS
+pianola
+Pianola/M
+piano/SM
+piaster/MS
+piazza/SM
+pibroch/M
+pibrochs
+picador/MS
+picaresque/S
+pica/SM
+Picasso/M
+picayune/S
+Piccadilly/M
+piccalilli/MS
+piccolo/MS
+pickaback's
+pickaxe's
+pickax/GMSD
+pickerel/MS
+Pickering/M
+picker/MG
+picketer/M
+picket/MSRDZG
+Pickett/M
+Pickford/M
+pick/GZSJDR
+pickle/SDMG
+Pickman/M
+pickoff/S
+pickpocket/GSM
+pickup/SM
+Pickwick/M
+picky/RT
+picnicked
+picnicker/MS
+picnicking
+picnic/SM
+picofarad/MS
+picojoule
+picoseconds
+picot/DMGS
+Pict/M
+pictograph/M
+pictographs
+pictorialness/M
+pictorial/PYS
+picture/MGSD
+picturesqueness/SM
+picturesque/PY
+piddle/GSD
+piddly
+pidgin/SM
+piebald/S
+piece/GMDSR
+piecemeal
+piecer/M
+piecewise
+pieceworker/M
+piecework/ZSMR
+piedmont
+Piedmont/M
+pieing
+pie/MS
+Pierce/M
+piercer/M
+pierce/RSDZGJ
+piercing/Y
+Pierette/M
+pier/M
+Pier/M
+Pierre/M
+Pierrette/M
+Pierrot/M
+Pierson/M
+Pieter/M
+Pietra/M
+Pietrek/M
+Pietro/M
+piety/SM
+piezoelectric
+piezoelectricity/M
+piffle/MGSD
+pigeon/DMGS
+pigeonhole/SDGM
+pigged
+piggery/M
+pigging
+piggishness/SM
+piggish/YP
+piggyback/MSDG
+Piggy/M
+piggy/RSMT
+pigheadedness/S
+pigheaded/YP
+piglet/MS
+pigmentation/MS
+pigment/MDSG
+pig/MLS
+Pigmy's
+pigpen/SM
+pigroot
+pigskin/MS
+pigsty/SM
+pigswill/M
+pigtail/SMD
+Pike/M
+pike/MZGDRS
+piker/M
+pikestaff/MS
+pilaf/MS
+pilaster/SM
+Pilate/M
+pilau's
+pilchard/SM
+Pilcomayo/M
+pile/JDSMZG
+pileup/MS
+pilferage/SM
+pilferer/M
+pilfer/ZGSRD
+Pilgrim
+pilgrimage/DSGM
+pilgrim/MS
+piling/M
+pillage/RSDZG
+pillar/DMSG
+pillbox/MS
+pill/GSMD
+pillion/DMGS
+pillory/MSDG
+pillowcase/SM
+pillow/GDMS
+pillowslip/S
+Pillsbury/M
+pilot/DMGS
+pilothouse/SM
+piloting/M
+pimento/MS
+pimiento/SM
+pimpernel/SM
+pimp/GSMYD
+pimple/SDM
+pimplike
+pimply/TRM
+PIN
+pinafore/MS
+piñata/S
+Pinatubo/M
+pinball/MS
+Pincas/M
+pincer/GSD
+Pinchas/M
+pincher/M
+pinch/GRSD
+pincushion/SM
+Pincus/M
+Pindar/M
+pineapple/MS
+pined/A
+Pinehurst/M
+pine/MNGXDS
+pines/A
+pinfeather/SM
+ping/GDRM
+pinheaded/P
+pinhead/SMD
+pinhole/SM
+pining/A
+pinion/DMG
+Pinkerton/M
+pinkeye/MS
+pink/GTYDRMPS
+pinkie/SM
+pinkish/P
+pinkness/S
+pinko/MS
+pinky's
+pinnacle/MGSD
+pinnate
+pinned/U
+pinning/S
+Pinocchio/M
+Pinochet/M
+pinochle/SM
+piñon/S
+pinpoint/SDG
+pinprick/MDSG
+pin's
+pinsetter/SM
+Pinsky/M
+pinstripe/SDM
+pintail/SM
+Pinter/M
+pint/MRS
+pinto/S
+pinup/MS
+pin/US
+pinwheel/DMGS
+pinyin
+Pinyin
+piny/RT
+pioneer/SDMG
+pion/M
+Piotr/M
+piousness/MS
+pious/YP
+pipeline/DSMG
+pipe/MS
+piper/M
+Piper/M
+Pipestone/M
+pipet's
+pipette/MGSD
+pipework
+piping/YM
+pipit/MS
+pip/JSZMGDR
+Pip/MR
+Pippa/M
+pipped
+pipping
+pippin/SM
+Pippo/M
+Pippy/M
+pipsqueak/SM
+piquancy/MS
+piquantness/M
+piquant/PY
+pique/GMDS
+piracy/MS
+Piraeus/M
+Pirandello/M
+piranha/SM
+pirate/MGSD
+piratical/Y
+pirogi
+pirogies
+pirouette/MGSD
+pis
+Pisa/M
+piscatorial
+Pisces/M
+Pisistratus/M
+pismire/SM
+Pissaro/M
+piss/DSRG!
+pistachio/MS
+piste/SM
+pistillate
+pistil/MS
+pistoleers
+pistole/M
+pistol/SMGD
+piston/SM
+pitapat/S
+pitapatted
+pitapatting
+pita/SM
+Pitcairn/M
+pitchblende/SM
+pitcher/M
+pitchfork/GDMS
+pitching/M
+pitchman/M
+pitchmen
+pitch/RSDZG
+pitchstone/M
+piteousness/SM
+piteous/YP
+pitfall/SM
+pithily
+pithiness/SM
+pith/MGDS
+piths
+pithy/RTP
+pitiableness/M
+pitiable/P
+pitiably
+pitier/M
+pitifuller
+pitifullest
+pitifulness/M
+pitiful/PY
+pitilessness/SM
+pitiless/PY
+pitman/M
+pit/MS
+Pitney/M
+piton/SM
+pittance/SM
+pitted
+pitting
+Pittman/M
+Pittsburgh/ZM
+Pittsfield/M
+Pitt/SM
+Pittston/M
+pituitary/SM
+pitying/Y
+pity/ZDSRMG
+Pius/M
+pivotal/Y
+pivot/DMSG
+pivoting/M
+pix/DSG
+pixel/SM
+pixie/MS
+pixiness
+pixmap/SM
+Pizarro/M
+pizazz/S
+pi/ZGDRH
+pizza/SM
+pizzeria/SM
+pizzicati
+pizzicato
+pj's
+PJ's
+pk
+pkg
+pkt
+pkwy
+Pkwy
+pl
+placard/DSMG
+placate/NGVXDRS
+placatory
+placeable/A
+placebo/SM
+placed/EAU
+place/DSRJLGZM
+placeholder/S
+placekick/DGS
+placeless/Y
+placement/AMES
+placental/S
+placenta/SM
+placer/EM
+places/EA
+placidity/SM
+placidness/M
+placid/PY
+placing/AE
+placket/SM
+plagiarism/MS
+plagiarist/MS
+plagiarize/GZDSR
+plagiary/SM
+plagued/U
+plague/MGRSD
+plaguer/M
+plaice/M
+plaid/DMSG
+plainclothes
+plainclothesman
+plainclothesmen
+Plainfield/M
+plainness/MS
+plainsman/M
+plainsmen
+plainsong/SM
+plainspoken
+plain/SPTGRDY
+plaintiff/MS
+plaintiveness/M
+plaintive/YP
+plaint/VMS
+Plainview/M
+plaiting/M
+plait/SRDMG
+planar
+planarity
+Planck/M
+plan/DRMSGZ
+planeload
+planer/M
+plane's
+plane/SCGD
+planetarium/MS
+planetary
+planetesimal/M
+planet/MS
+planetoid/SM
+plangency/S
+plangent
+planking/M
+plank/SJMDG
+plankton/MS
+planned/U
+planner/SM
+planning
+Plano
+planoconcave
+planoconvex
+Plantagenet/M
+plantain/MS
+plantar
+plantation/MS
+planter/MS
+planting/S
+plantlike
+plant's
+plant/SADG
+plaque/MS
+plash/GSDM
+plasma/MS
+plasmid/S
+plasm/M
+plasterboard/MS
+plasterer/M
+plastering/M
+plaster/MDRSZG
+plasterwork/M
+plastically
+plasticine
+Plasticine/M
+plasticity/SM
+plasticize/GDS
+plastic/MYS
+plateau/GDMS
+plateful/S
+platelet/SM
+platen/M
+plater/M
+plate/SM
+platform/SGDM
+Plath/M
+plating/M
+platinize/GSD
+platinum/MS
+platitude/SM
+platitudinous/Y
+plat/JDNRSGXZ
+Plato/M
+platonic
+Platonic
+Platonism/M
+Platonist
+platoon/MDSG
+platted
+Platte/M
+platter/MS
+Platteville/M
+platting
+platypus/MS
+platys
+platy/TR
+plaudit/MS
+plausibility/S
+plausible/P
+plausibly
+Plautus/M
+playability/U
+playable/U
+playacting/M
+playact/SJDG
+playback/MS
+playbill/SM
+Playboy/M
+playboy/SM
+play/DRSEBG
+played/A
+player's/E
+player/SM
+playfellow/S
+playfulness/MS
+playful/PY
+playgirl/SM
+playgoer/MS
+playground/MS
+playgroup/S
+playhouse/SM
+playing/S
+playmate/MS
+playoff/S
+playpen/SM
+playroom/SM
+plays/A
+Playtex/M
+plaything/MS
+playtime/SM
+playwright/SM
+playwriting/M
+plaza/SM
+pleader/MA
+pleading/MY
+plead/ZGJRDS
+pleasanter
+pleasantest
+pleasantness/SMU
+pleasantry/MS
+pleasant/UYP
+pleased/EU
+pleaser/M
+pleases/E
+please/Y
+pleasingness/M
+pleasing/YP
+plea/SM
+pleas/RSDJG
+pleasurableness/M
+pleasurable/P
+pleasurably
+pleasureful
+pleasure/MGBDS
+pleasure's/E
+pleasures/E
+pleater/M
+pleat/RDMGS
+plebeian/SY
+plebe/MS
+plebiscite/SM
+plectra
+plectrum/SM
+pledger/M
+pledge/RSDMG
+Pleiads
+Pleistocene
+plenary/S
+plenipotentiary/S
+plenitude/MS
+plenteousness/M
+plenteous/PY
+plentifulness/M
+plentiful/YP
+plenty/SM
+plenum/M
+pleonasm/MS
+plethora/SM
+pleurae
+pleural
+pleura/M
+pleurisy/SM
+Plexiglas/MS
+plexus/SM
+pliability/MS
+pliableness/M
+pliable/P
+pliancy/MS
+pliantness/M
+pliant/YP
+plication/MA
+plier/MA
+plight/GMDRS
+plimsolls
+plinker/M
+plink/GRDS
+plinth/M
+plinths
+Pliny/M
+Pliocene/S
+PLO
+plodded
+plodder/SM
+plodding/SY
+plod/S
+plopped
+plopping
+plop/SM
+plosive
+plot/SM
+plotted/A
+plotter/MDSG
+plotting
+plover/MS
+plowed/U
+plower/M
+plowman/M
+plowmen
+plow/SGZDRM
+plowshare/MS
+ploy's
+ploy/SCDG
+plucker/M
+pluckily
+pluckiness/SM
+pluck/SGRD
+plucky/TPR
+pluggable
+plugged/UA
+plugging/AU
+plughole
+plug's
+plug/US
+plumage/DSM
+plumbago/M
+plumbed/U
+plumber/M
+plumbing/M
+plumb/JSZGMRD
+plume/SM
+plummer
+plummest
+plummet/DSG
+plummy
+plumper/M
+plumpness/S
+plump/RDNYSTGP
+plum/SMDG
+plumy/TR
+plunder/GDRSZ
+plunger/M
+plunge/RSDZG
+plunker/M
+plunk/ZGSRD
+pluperfect/S
+pluralism/MS
+pluralistic
+pluralist/S
+plurality/SM
+pluralization/MS
+pluralize/GZRSD
+pluralizer/M
+plural/SY
+plushness/MS
+plush/RSYMTP
+plushy/RPT
+plus/S
+plussed
+plussing
+Plutarch/M
+plutocracy/MS
+plutocratic
+plutocrat/SM
+Pluto/M
+plutonium/SM
+pluvial/S
+ply/AZNGRSD
+Plymouth/M
+plywood/MS
+pm
+PM
+Pm/M
+PMS
+pneumatically
+pneumatic/S
+pneumatics/M
+pneumonia/MS
+PO
+poacher/M
+poach/ZGSRD
+Pocahontas/M
+pocketbook/SM
+pocketful/SM
+pocketing/M
+pocketknife/M
+pocketknives
+pocket/MSRDG
+pock/GDMS
+pockmark/MDSG
+Pocono/MS
+podded
+podding
+podge/ZR
+Podgorica/M
+podiatrist/MS
+podiatry/MS
+podium/MS
+pod/SM
+Podunk/M
+Poe/M
+poem/MS
+poesy/GSDM
+poetaster/MS
+poetess/MS
+poetically
+poeticalness
+poetical/U
+poetic/S
+poetics/M
+poet/MS
+poetry/SM
+pogo
+Pogo/M
+pogrom/GMDS
+poignancy/MS
+poignant/Y
+Poincaré/M
+poinciana/SM
+Poindexter/M
+poinsettia/SM
+pointblank
+pointedness/M
+pointed/PY
+pointer/M
+pointillism/SM
+pointillist/SM
+pointing/M
+pointlessness/SM
+pointless/YP
+point/RDMZGS
+pointy/TR
+poise/M
+pois/GDS
+poi/SM
+poisoner/M
+poisoning/M
+poisonous/PY
+poison/RDMZGSJ
+Poisson/M
+poke/DRSZG
+Pokemon/M
+pokerface/D
+poker/M
+poky/SRT
+Poland/M
+Polanski/M
+polarimeter/SM
+polarimetry
+polariscope/M
+Polaris/M
+polarity/MS
+polarization/CMS
+polarized/UC
+polarize/RSDZG
+polarizes/C
+polarizing/C
+polarogram/SM
+polarograph
+polarography/M
+Polaroid/SM
+polar/S
+polecat/SM
+polemical/Y
+polemicist/S
+polemic/S
+polemics/M
+pole/MS
+Pole/MS
+poler/M
+polestar/S
+poleward/S
+pol/GMDRS
+policeman/M
+policemen/M
+police/MSDG
+policewoman/M
+policewomen
+policyholder/MS
+policymaker/S
+policymaking
+policy/SM
+poliomyelitides
+poliomyelitis/M
+polio/SM
+Polish
+polished/U
+polisher/M
+polish/RSDZGJ
+polis/M
+Politburo/M
+politburo/S
+politeness/MS
+polite/PRTY
+politesse/SM
+politically
+political/U
+politician/MS
+politicization/S
+politicize/CSDG
+politicked
+politicking/SM
+politico/SM
+politic/S
+politics/M
+polity/MS
+polka/SDMG
+Polk/M
+pollack/SM
+Pollard/M
+polled/U
+pollen/GDM
+pollinate/XSDGN
+pollination/M
+pollinator/MS
+polliwog/SM
+poll/MDNRSGX
+pollock's
+Pollock/SM
+pollster/MS
+pollutant/MS
+polluted/U
+polluter/M
+pollute/RSDXZVNG
+pollution/M
+Pollux/M
+Pollyanna/M
+Polly/M
+pollywog's
+Pol/MY
+Polo/M
+polo/MS
+polonaise/MS
+polonium/MS
+poltergeist/SM
+poltroon/MS
+polyandrous
+polyandry/MS
+polyatomic
+polybutene/MS
+polycarbonate
+polychemicals
+polychrome
+polyclinic/MS
+polycrystalline
+polyelectrolytes
+polyester/SM
+polyether/S
+polyethylene/SM
+polygamist/MS
+polygamous/Y
+polygamy/MS
+polyglot/S
+polygonal/Y
+polygon/MS
+polygraph/MDG
+polygraphs
+polygynous
+polyhedral
+polyhedron/MS
+Polyhymnia/M
+polyisobutylene
+polyisocyanates
+polymath/M
+polymaths
+polymerase/S
+polymeric
+polymerization/SM
+polymerize/SDG
+polymer/MS
+polymorphic
+polymorphism/MS
+polymorph/M
+polymyositis
+Polynesia/M
+Polynesian/S
+polynomial/YMS
+Polyphemus/M
+polyphonic
+polyphony/MS
+polyphosphate/S
+polyp/MS
+polypropylene/MS
+polystyrene/SM
+polysyllabic
+polysyllable/SM
+polytechnic/MS
+polytheism/SM
+polytheistic
+polytheist/SM
+polythene/M
+polytonal/Y
+polytopes
+polyunsaturated
+polyurethane/SM
+polyvinyl/MS
+Po/M
+pomade/MGSD
+pomander/MS
+pomegranate/SM
+Pomerania/M
+Pomeranian
+pommel/GSMD
+Pomona/M
+Pompadour/M
+pompadour/MDS
+pompano/SM
+Pompeian/S
+Pompeii/M
+Pompey/M
+pompom/SM
+pompon's
+pomposity/MS
+pompousness/S
+pompous/YP
+pomp/SM
+ponce/M
+Ponce/M
+Ponchartrain/M
+poncho/MS
+ponderer/M
+ponderousness/MS
+ponderous/PY
+ponder/ZGRD
+pond/SMDRGZ
+pone/SM
+pongee/MS
+poniard/GSDM
+pons/M
+Pontchartrain/M
+Pontiac/M
+Pontianak/M
+pontiff/MS
+pontifical/YS
+pontificate/XGNDS
+pontoon/SMDG
+pony/DSMG
+ponytail/SM
+pooch/GSDM
+poodle/MS
+poof/MS
+pooh/DG
+Pooh/M
+poohs
+Poole/M
+pool/MDSG
+poolroom/MS
+poolside
+Poona/M
+poop/MDSG
+poorboy
+poorhouse/MS
+poorness/MS
+poor/TYRP
+popcorn/MS
+Popek/MS
+pope/SM
+Pope/SM
+Popeye/M
+popgun/SM
+popinjay/MS
+poplar/SM
+poplin/MS
+Popocatepetl/M
+popover/SM
+poppa/MS
+popped
+Popper/M
+popper/SM
+poppet/M
+popping
+Poppins/M
+poppycock/MS
+Poppy/M
+poppy/SDM
+poppyseed
+Popsicle/MS
+pop/SM
+populace/MS
+popularism
+popularity/UMS
+popularization/SM
+popularize/A
+popularized
+popularizer/MS
+popularizes/U
+popularizing
+popular/YS
+populate/CXNGDS
+populated/UA
+populates/A
+populating/A
+population/MC
+populism/S
+populist/SM
+populousness/MS
+populous/YP
+porcelain/SM
+porch/SM
+porcine
+porcupine/MS
+pore/ZGDRS
+Porfirio/M
+porgy/SM
+poring/Y
+porker/M
+porky/TSR
+pork/ZRMS
+pornographer/SM
+pornographic
+pornographically
+pornography/SM
+porno/S
+porn/S
+porosity/SM
+porousness/MS
+porous/PY
+porphyritic
+porphyry/MS
+porpoise/DSGM
+porridge/MS
+Porrima/M
+porringer/MS
+Porsche/M
+portability/S
+portables
+portable/U
+portably
+port/ABSGZMRD
+portage/ASM
+portaged
+portaging
+portal/SM
+portamento/M
+portcullis/MS
+ported/CE
+Porte/M
+portend/SDG
+portentousness/M
+portentous/PY
+portent/SM
+porterage/M
+porter/DMG
+porterhouse/SM
+Porter/M
+porter's/A
+portfolio/MS
+porthole/SM
+Portia/M
+porticoes
+portico/M
+Portie/M
+portière/SM
+porting/E
+portion/KGSMD
+Portland/M
+portliness/SM
+portly/PTR
+portmanteau/SM
+Port/MR
+Pôrto/M
+portraitist/SM
+portrait/MS
+portraiture/MS
+portrayal/SM
+portrayer/M
+portray/GDRS
+ports/CE
+Portsmouth/M
+Portugal/M
+Portuguese/M
+portulaca/MS
+Porty/M
+posed/CA
+Poseidon/M
+poser/KME
+poses/CA
+poseur/MS
+pose/ZGKDRSE
+posh/DSRGT
+posing/CA
+positifs
+positionable
+positional/KY
+position/KGASMD
+position's/EC
+positions/EC
+positiveness/S
+positive/RSPYT
+positivism/M
+positivist/S
+positivity
+positron/SM
+posit/SCGD
+Posner/M
+posse/M
+possess/AGEDS
+possessed/PY
+possession/AEMS
+possessional
+possessiveness/MS
+possessive/PSMY
+possessor/MS
+possibility/SM
+possible/TRS
+possibly
+poss/S
+possum/MS
+postage/MS
+postal/S
+post/ASDRJG
+postbag/M
+postbox/SM
+postcard/SM
+postcode/SM
+postcondition/S
+postconsonantal
+postdate/DSG
+postdoctoral
+posteriori
+posterior/SY
+posterity/SM
+poster/MS
+postfix/GDS
+postgraduate/SM
+posthaste/S
+posthumousness/M
+posthumous/YP
+posthypnotic
+postilion/MS
+postindustrial
+posting/M
+postlude/MS
+Post/M
+postman/M
+postmarital
+postmark/GSMD
+postmaster/SM
+postmen
+postmeridian
+postmistress/MS
+postmodern
+postmodernist
+postmortem/S
+postnasal
+postnatal
+postoperative/Y
+postorder
+postpaid
+postpartum
+postpone/GLDRS
+postponement/S
+postpositions
+postprandial
+post's
+postscript/SM
+postsecondary
+postulate/XGNSD
+postulation/M
+postural
+posture/MGSRD
+posturer/M
+postvocalic
+postwar
+posy/SM
+potability/SM
+potableness/M
+potable/SP
+potage/M
+potash/MS
+potassium/MS
+potatoes
+potato/M
+potbelly/MSD
+potboiler/M
+potboil/ZR
+pot/CMS
+Potemkin/M
+potency/MS
+potentate/SM
+potentiality/MS
+potential/SY
+potentiating
+potentiometer/SM
+potent/YS
+potful/SM
+pothead/MS
+potherb/MS
+pother/GDMS
+potholder/MS
+pothole/SDMG
+potholing/M
+pothook/SM
+potion/SM
+potlatch/SM
+potluck/MS
+Potomac/M
+potpie/SM
+potpourri/SM
+Potsdam/M
+potsherd/MS
+potshot/S
+pottage/SM
+Pottawatomie/M
+potted
+Potter/M
+potter/RDMSG
+pottery/MS
+potting
+Potts/M
+potty/SRT
+pouch/SDMG
+Poughkeepsie/M
+Poul/M
+poulterer/MS
+poultice/DSMG
+poultry/MS
+pounce/SDG
+poundage/MS
+pounder/MS
+pound/KRDGS
+Pound/M
+pour/DSG
+pourer's
+Poussin/MS
+pouter/M
+pout/GZDRS
+poverty/MS
+POW
+powderpuff
+powder/RDGMS
+powdery
+Powell/M
+powerboat/MS
+powerfulness/M
+powerful/YP
+power/GMD
+powerhouse/MS
+powerlessness/SM
+powerless/YP
+Powers
+Powhatan/M
+pow/RZ
+powwow/GDMS
+pox/GMDS
+Poznan/M
+pp
+PP
+ppm
+ppr
+PPS
+pr
+PR
+practicability/S
+practicable/P
+practicably
+practicality/SM
+practicalness/M
+practical/YPS
+practice/BDRSMG
+practiced/U
+practicer/M
+practicum/SM
+practitioner/SM
+Pradesh/M
+Prado/M
+Praetorian
+praetorian/S
+praetor/MS
+pragmatical/Y
+pragmatic/S
+pragmatics/M
+pragmatism/MS
+pragmatist/MS
+Prague/M
+Praia
+prairie/MS
+praise/ESDG
+praiser/S
+praise's
+praiseworthiness/MS
+praiseworthy/P
+praising/Y
+Prakrit/M
+praline/MS
+pram/MS
+prancer/M
+prance/ZGSRD
+prancing/Y
+prank/SMDG
+prankster/SM
+praseodymium/SM
+Pratchett/M
+prate/DSRGZ
+prater/M
+pratfall/MS
+prating/Y
+prattle/DRSGZ
+prattler/M
+prattling/Y
+Pratt/M
+Prattville/M
+Pravda/M
+prawn/MDSG
+praxes
+praxis/M
+Praxiteles/M
+pray/DRGZS
+prayerbook
+prayerfulness/M
+prayerful/YP
+prayer/M
+PRC
+preach/DRSGLZJ
+preacher/M
+preaching/Y
+preachment/MS
+preachy/RT
+preadolescence/S
+Preakness/M
+preallocate/XGNDS
+preallocation/M
+preallocator/S
+preamble/MGDS
+preamp
+preamplifier/M
+prearrange/LSDG
+prearrangement/SM
+preassign/SDG
+preauthorize
+prebendary/M
+Precambrian
+precancel/DGS
+precancerous
+precariousness/MS
+precarious/PY
+precautionary
+precaution/SGDM
+precede/DSG
+precedence/SM
+precedented/U
+precedent/SDM
+preceptive/Y
+preceptor/MS
+precept/SMV
+precess/DSG
+precession/M
+precinct/MS
+preciosity/MS
+preciousness/S
+precious/PYS
+precipice/MS
+precipitable
+precipitant/S
+precipitateness/M
+precipitate/YNGVPDSX
+precipitation/M
+precipitousness/M
+precipitous/YP
+preciseness/SM
+precise/XYTRSPN
+precision/M
+précis/MDG
+preclude/GDS
+preclusion/S
+precociousness/MS
+precocious/YP
+precocity/SM
+precode/D
+precognition/SM
+precognitive
+precollege/M
+precolonial
+precomputed
+preconceive/GSD
+preconception/SM
+precondition/GMDS
+preconscious
+precook/GDS
+precursor/SM
+precursory
+precut
+predate/NGDSX
+predation/CMS
+predator/SM
+predatory
+predecease/SDG
+predecessor/MS
+predeclared
+predecline
+predefine/GSD
+predefinition/SM
+predesignate/GDS
+predestination/SM
+predestine/SDG
+predetermination/MS
+predeterminer/M
+predetermine/ZGSRD
+predicable/S
+predicament/SM
+predicate/VGNXSD
+predication/M
+predicator
+predictability/UMS
+predictable/U
+predictably/U
+predict/BSDGV
+predicted/U
+prediction/MS
+predictive/Y
+predictor/MS
+predigest/GDS
+predilect
+predilection/SM
+predispose/SDG
+predisposition/MS
+predoctoral
+predominance/SM
+predominant/Y
+predominate/YSDGN
+predomination/M
+preemie/MS
+preeminence/SM
+preeminent/Y
+preemployment/M
+preempt/GVSD
+preemption/SM
+preemptive/Y
+preemptor/M
+preener/M
+preen/SRDG
+preexist/DSG
+preexistence/SM
+preexistent
+prefabbed
+prefabbing
+prefab/MS
+prefabricate/XNGDS
+prefabrication/M
+preface/DRSGM
+prefacer/M
+prefatory
+prefect/MS
+prefecture/MS
+preferableness/M
+preferable/P
+preferably
+prefer/BL
+preference/MS
+preferential/Y
+preferment/SM
+preferred
+preferring
+prefiguration/M
+prefigure/SDG
+prefix/MDSG
+preflight/SGDM
+preform/DSG
+pref/RZ
+pregnancy/SM
+pregnant/Y
+preheat/GDS
+prehensile
+prehistoric
+prehistorical/Y
+prehistory/SM
+preindustrial
+preinitialize/SDG
+preinterview/M
+preisolated
+prejudge/DRSG
+prejudger/M
+prejudgment/SM
+prejudiced/U
+prejudice/MSDG
+prejudicial/PY
+prekindergarten/MS
+prelacy/MS
+prelate/SM
+preliminarily
+preliminary/S
+preliterate/S
+preloaded
+prelude/GMDRS
+preluder/M
+premarital/Y
+premarket
+prematureness/M
+premature/SPY
+prematurity/M
+premedical
+premeditated/Y
+premeditate/XDSGNV
+premeditation/M
+premed/S
+premenstrual
+premiere/MS
+premier/GSDM
+premiership/SM
+Preminger/M
+premise/GMDS
+premiss's
+premium/MS
+premix/GDS
+premolar/S
+premonition/SM
+premonitory
+prenatal/Y
+Pren/M
+Prenticed/M
+Prentice/MGD
+Prenticing/M
+Prentiss/M
+Prent/M
+prenuptial
+preoccupation/MS
+preoccupy/DSG
+preoperative
+preordain/DSLG
+prepackage/GSD
+prepaid
+preparation/SM
+preparative/SYM
+preparatory
+preparedly
+preparedness/USM
+prepared/UP
+prepare/ZDRSG
+prepay/GLS
+prepayment/SM
+prepender/S
+prepends
+preplanned
+preponderance/SM
+preponderant/Y
+preponderate/DSYGN
+prepositional/Y
+preposition/SDMG
+prepossess/GSD
+prepossessing/U
+prepossession/MS
+preposterousness/M
+preposterous/PY
+prepped
+prepping
+preppy/RST
+preprepared
+preprint/SGDM
+preprocessed
+preprocessing
+preprocessor/S
+preproduction
+preprogrammed
+prep/SM
+prepubescence/S
+prepubescent/S
+prepublication/M
+prepuce/SM
+prequel/S
+preradiation
+prerecord/DGS
+preregister/DSG
+preregistration/MS
+prerequisite/SM
+prerogative/SDM
+Pres
+presage/GMDRS
+presager/M
+presbyopia/MS
+presbyterian
+Presbyterianism/S
+Presbyterian/S
+presbyter/MS
+presbytery/MS
+preschool/RSZ
+prescience/SM
+prescient/Y
+Prescott/M
+prescribed/U
+prescriber/M
+prescribe/RSDG
+prescription/SM
+prescriptive/Y
+prescript/SVM
+preselect/SGD
+presence/SM
+presentableness/M
+presentable/P
+presentably/A
+presentational/A
+presentation/AMS
+presented/A
+presenter/A
+presentiment/MS
+presentment/SM
+presents/A
+present/SLBDRYZGP
+preservationist/S
+preservation/SM
+preservative/SM
+preserve/DRSBZG
+preserved/U
+preserver/M
+preset/S
+presetting
+preshrank
+preshrink/SG
+preshrunk
+preside/DRSG
+presidency/MS
+presidential/Y
+president/SM
+presider/M
+presidia
+presidium/M
+Presley/M
+presoaks
+presort/GDS
+pres/S
+press/ACDSG
+pressed/U
+presser/MS
+pressingly/C
+pressing/YS
+pressman/M
+pressmen
+pressure/DSMG
+pressurization/MS
+pressurize/DSRGZ
+pressurized/U
+prestidigitate/NX
+prestidigitation/M
+prestidigitatorial
+prestidigitator/M
+prestige/MS
+prestigious/PY
+Preston/M
+presto/S
+presumably
+presume/BGDRS
+presumer/M
+presuming/Y
+presumption/MS
+presumptive/Y
+presumptuousness/SM
+presumptuous/YP
+presuppose/GDS
+presupposition/S
+pretax
+preteen/S
+pretended/Y
+pretender/M
+pretending/U
+pretend/SDRZG
+pretense/MNVSX
+pretension/GDM
+pretentiousness/S
+pretentious/UYP
+preterite's
+preterit/SM
+preternatural/Y
+pretest/SDG
+pretext/SMDG
+Pretoria/M
+pretreated
+pretreatment/S
+pretrial
+prettify/SDG
+prettily
+prettiness/SM
+pretty/TGPDRS
+pretzel/SM
+prevailing/Y
+prevail/SGD
+prevalence/MS
+prevalent/SY
+prevaricate/DSXNG
+prevaricator/MS
+preventable/U
+preventably
+preventative/S
+prevent/BSDRGV
+preventer/M
+prevention/MS
+preventiveness/M
+preventive/SPY
+preview/ZGSDRM
+previous/Y
+prevision/SGMD
+prewar
+prexes
+preyer's
+prey/SMDG
+Priam/M
+priapic
+Pribilof/M
+price/AGSD
+priced/U
+priceless
+Price/M
+pricer/MS
+price's
+pricey
+pricier
+priciest
+pricker/M
+pricking/M
+prickle/GMDS
+prickliness/S
+prickly/RTP
+prick/RDSYZG
+prideful/Y
+pride/GMDS
+prier/M
+priestess/MS
+priesthood/SM
+Priestley/M
+priestliness/SM
+priestly/PTR
+priest/SMYDG
+prigged
+prigging
+priggishness/S
+priggish/PYM
+prig/SM
+primacy/MS
+primal
+primarily
+primary/MS
+primate/MS
+primed/U
+primely/M
+primeness/M
+prime/PYS
+primer/M
+Prime's
+primeval/Y
+priming/M
+primitiveness/SM
+primitive/YPS
+primitivism/M
+primmed
+primmer
+primmest
+primming
+primness/MS
+primogenitor/MS
+primogeniture/MS
+primordial/YS
+primp/DGS
+primrose/MGSD
+prim/SPJGZYDR
+princedom/MS
+princeliness/SM
+princely/PRT
+Prince/M
+prince/SMY
+princess/MS
+Princeton/M
+principality/MS
+principal/SY
+Principe/M
+Principia/M
+principled/U
+principle/SDMG
+printable/U
+printably
+print/AGDRS
+printed/U
+printer/AM
+printers
+printing/SM
+printmaker/M
+printmake/ZGR
+printmaking/M
+printout/S
+Prinz/M
+prioress/MS
+priori
+prioritize/DSRGZJ
+priority/MS
+prior/YS
+priory/SM
+Pris
+Prisca/M
+Priscella/M
+Priscilla/M
+prised
+prise/GMAS
+prismatic
+prism/MS
+prison/DRMSGZ
+prisoner/M
+Prissie/M
+prissily
+prissiness/SM
+prissy/RSPT
+pristine/Y
+prithee/S
+privacy/MS
+privateer/SMDG
+privateness/M
+private/NVYTRSXP
+privation/MCS
+privative/Y
+privatization/S
+privatize/GSD
+privet/SM
+privileged/U
+privilege/SDMG
+privily
+privy/SRMT
+prized/A
+prize/DSRGZM
+prizefighter/M
+prizefighting/M
+prizefight/SRMGJZ
+prizewinner/S
+prizewinning
+Pr/MN
+PRO
+proactive
+probabilist
+probabilistic
+probabilistically
+probability/SM
+probable/S
+probably
+probated/A
+probate/NVMX
+probates/A
+probating/A
+probational
+probationary/S
+probationer/M
+probation/MRZ
+probation's/A
+probative/A
+prober/M
+probity/SM
+problematical/UY
+problematic/S
+problem/SM
+proboscis/MS
+prob/RBJ
+procaine/MS
+procedural/SY
+procedure/MS
+proceeder/M
+proceeding/M
+proceed/JRDSG
+process/BSDMG
+processed/UA
+processes/A
+processional/YS
+procession/GD
+processor/MS
+proclamation/MS
+proclivity/MS
+proconsular
+procrastinate/XNGDS
+procrastination/M
+procrastinator/MS
+procreational
+procreatory
+procrustean
+Procrustean
+Procrustes/M
+proctor/GSDM
+proctorial
+procurable/U
+procure/L
+procurement/MS
+Procyon/M
+prodded
+prodding
+prodigality/S
+prodigal/SY
+prodigiousness/M
+prodigious/PY
+prodigy/MS
+prod/S
+produce/AZGDRS
+producer/AM
+producible/A
+production/ASM
+productively/UA
+productiveness/MS
+productive/PY
+productivities
+productivity/A
+productivity's
+productize/GZRSD
+product/V
+Prof
+profanation/S
+profaneness/MS
+profane/YPDRSG
+profanity/MS
+professed/Y
+professionalism/SM
+professionalize/GSD
+professional/USY
+profession/SM
+professorial/Y
+professorship/SM
+professor/SM
+proffer/GSD
+proficiency/SM
+proficient/YS
+profitability/MS
+profitableness/MU
+profitable/UP
+profitably/U
+profiteer/GSMD
+profiterole/MS
+profit/GZDRB
+profitless
+profligacy/S
+profligate/YS
+proforma/S
+profoundity
+profoundness/SM
+profound/PTYR
+prof/S
+profundity/MS
+profuseness/MS
+profuse/YP
+progenitor/SM
+progeny/M
+progesterone/SM
+prognathous
+prognoses
+prognosis/M
+prognosticate/NGVXDS
+prognostication/M
+prognosticator/S
+prognostic/S
+program/CSA
+programed
+programing
+programmability
+programmable/S
+programmed/CA
+programmer/ASM
+programming/CA
+programmings
+progression/SM
+progressiveness/SM
+progressive/SPY
+progressivism
+progress/MSDVG
+prohibiter/M
+prohibitionist/MS
+prohibition/MS
+Prohibition/MS
+prohibitiveness/M
+prohibitive/PY
+prohibitory
+prohibit/VGSRD
+projected/AU
+projectile/MS
+projectionist/MS
+projection/MS
+projective/Y
+project/MDVGS
+projector/SM
+Prokofieff/M
+Prokofiev/M
+prolegomena
+proletarianization/M
+proletarianized
+proletarian/S
+proletariat/SM
+proliferate/GNVDSX
+proliferation/M
+prolifically
+prolific/P
+prolixity/MS
+prolix/Y
+prologize
+prologue/MGSD
+prologuize
+prolongate/NGSDX
+prolongation/M
+prolonger/M
+prolong/G
+promenade/GZMSRD
+promenader/M
+Promethean
+Prometheus/M
+promethium/SM
+prominence/MS
+prominent/Y
+promiscuity/MS
+promiscuousness/M
+promiscuous/PY
+promise/GD
+promising/UY
+promissory
+promontory/MS
+promote/GVZBDR
+promoter/M
+promotiveness/M
+promotive/P
+prompted/U
+prompter/M
+promptitude/SM
+promptness/MS
+prompt/SGJTZPYDR
+pro/MS
+promulgate/NGSDX
+promulgation/M
+promulgator/MS
+pron
+proneness/MS
+prone/PY
+pronghorn/SM
+prong/SGMD
+pronominalization
+pronominalize
+pronounceable/U
+pronouncedly
+pronounced/U
+pronounce/GLSRD
+pronouncement/SM
+pronouncer/M
+pronto
+pronunciation/SM
+proofed/A
+proofer
+proofing/M
+proofreader/M
+proofread/GZSR
+proof/SEAM
+propaganda/SM
+propagandistic
+propagandist/SM
+propagandize/DSG
+propagated/U
+propagate/SDVNGX
+propagation/M
+propagator/MS
+propellant/MS
+propelled
+propeller/MS
+propelling
+propel/S
+propensity/MS
+properness/M
+proper/PYRT
+propertied/U
+property/SDM
+prophecy/SM
+prophesier/M
+prophesy/GRSDZ
+prophetess/S
+prophetic
+prophetical/Y
+prophet/SM
+prophylactic/S
+prophylaxes
+prophylaxis/M
+propinquity/MS
+propionate/M
+propitiate/GNXSD
+propitiatory
+propitiousness/M
+propitious/YP
+proponent/MS
+proportionality/M
+proportional/SY
+proportionate/YGESD
+proportioner/M
+proportion/ESGDM
+proportionment/M
+proposal/SM
+propped
+propping
+proprietary/S
+proprietorial
+proprietorship/SM
+proprietor/SM
+proprietress/MS
+propriety/MS
+proprioception
+proprioceptive
+prop/SZ
+propulsion/MS
+propulsive
+propylene/M
+prorogation/SM
+prorogue
+prosaic
+prosaically
+proscenium/MS
+prosciutti
+prosciutto/SM
+proscription/SM
+proscriptive
+pros/DSRG
+prosecute/SDBXNG
+prosecution/M
+prosecutor/MS
+proselyte/SDGM
+proselytism/MS
+proselytize/ZGDSR
+prose/M
+proser/M
+Proserpine/M
+prosodic/S
+prosody/MS
+prospect/DMSVG
+prospection/SM
+prospectiveness/M
+prospective/SYP
+prospector/MS
+prospectus/SM
+prosper/GSD
+prosperity/MS
+prosperousness/M
+prosperous/PY
+prostate
+prostheses
+prosthesis/M
+prosthetic/S
+prosthetics/M
+prostitute/DSXNGM
+prostitution/M
+prostrate/SDXNG
+prostration/M
+prosy/RT
+protactinium/MS
+protagonist/SM
+Protagoras/M
+protean/S
+protease/M
+protect/DVGS
+protected/UY
+protectionism/MS
+protectionist/MS
+protection/MS
+protectiveness/S
+protective/YPS
+protectorate/SM
+protector/MS
+protégées
+protégé/SM
+protein/MS
+proteolysis/M
+proteolytic
+Proterozoic/M
+protestantism
+Protestantism/MS
+protestant/S
+Protestant/SM
+protestation/MS
+protest/G
+protesting/Y
+Proteus/M
+protocol/DMGS
+protoplasmic
+protoplasm/MS
+prototype/SDGM
+prototypic
+prototypical/Y
+protozoa
+protozoan/MS
+protozoic
+protozoon's
+protract/DG
+protrude/SDG
+protrusile
+protrusion/MS
+protrusive/PY
+protuberance/S
+protuberant
+Proudhon/M
+proud/TRY
+Proust/M
+provabilities
+provability's
+provability/U
+provableness/M
+provable/P
+provably
+prov/DRGZB
+proved/U
+proven/U
+prove/ESDAG
+provenance/SM
+Provençal
+Provencals
+Provence/M
+provender/SDG
+provenience/SM
+provenly
+proverb/DG
+proverbial/Y
+Proverbs/M
+prover/M
+provide/DRSBGZ
+provided/U
+providence/SM
+Providence/SM
+providential/Y
+provident/Y
+provider/M
+province/SM
+provincialism/SM
+provincial/SY
+provisional/YS
+provisioner/M
+provision/R
+proviso/MS
+provocateur/S
+provocativeness/SM
+provocative/P
+provoked/U
+provoke/GZDRS
+provoking/Y
+provolone/SM
+Provo/M
+provost/MS
+prowess/SM
+prowler/M
+prowl/RDSZG
+prow/TRMS
+proximal/Y
+proximateness/M
+proximate/PY
+proximity/MS
+Proxmire/M
+proxy/SM
+Prozac
+prude/MS
+Prudence/M
+prudence/SM
+Prudential/M
+prudential/SY
+prudent/Y
+prudery/MS
+Prudi/M
+prudishness/SM
+prudish/YP
+Prudy/M
+Prue/M
+Pruitt/M
+Pru/M
+prune/DSRGZM
+pruner/M
+prurience/MS
+prurient/Y
+Prussia/M
+Prussian/S
+prussic
+Prut/M
+Pryce/M
+pry/DRSGTZ
+pryer's
+prying/Y
+P's
+PS
+p's/A
+psalmist/SM
+psalm/SGDM
+Psalms/M
+psalter
+Psalter/SM
+psaltery/MS
+psephologist/M
+pseudonymous
+pseudonym/SM
+pseudopod
+pseudo/S
+pseudoscience/S
+pshaw/SDG
+psi/S
+psittacoses
+psittacosis/M
+psoriases
+psoriasis/M
+psst/S
+PST
+psychedelically
+psychedelic/S
+psyche/M
+Psyche/M
+psychiatric
+psychiatrist/SM
+psychiatry/MS
+psychical/Y
+psychic/MS
+psychoacoustic/S
+psychoacoustics/M
+psychoactive
+psychoanalysis/M
+psychoanalyst/S
+psychoanalytic
+psychoanalytical
+psychoanalyze/SDG
+psychobabble/S
+psychobiology/M
+psychocultural
+psychodrama/MS
+psychogenic
+psychokinesis/M
+psycholinguistic/S
+psycholinguistics/M
+psycholinguists
+psychological/Y
+psychologist/MS
+psychology/MS
+psychometric/S
+psychometrics/M
+psychometry/M
+psychoneuroses
+psychoneurosis/M
+psychopathic/S
+psychopath/M
+psychopathology/M
+psychopaths
+psychopathy/SM
+psychophysical/Y
+psychophysic/S
+psychophysics/M
+psychophysiology/M
+psychosis/M
+psycho/SM
+psychosocial/Y
+psychosomatic/S
+psychosomatics/M
+psychos/S
+psychotherapeutic/S
+psychotherapist/MS
+psychotherapy/SM
+psychotically
+psychotic/S
+psychotropic/S
+psychs
+psych/SDG
+PT
+PTA
+Ptah/M
+ptarmigan/MS
+pt/C
+pterodactyl/SM
+Pt/M
+PTO
+Ptolemaic
+Ptolemaists
+Ptolemy/MS
+ptomaine/MS
+Pu
+pubbed
+pubbing
+pubertal
+puberty/MS
+pubes
+pubescence/S
+pubescent
+pubic
+pubis/M
+publican/AMS
+publication/AMS
+publicist/SM
+publicity/SM
+publicized/U
+publicize/SDG
+publicness/M
+publics/A
+public/YSP
+publishable/U
+published/UA
+publisher/ASM
+publishes/A
+publishing/M
+publish/JDRSBZG
+pub/MS
+Puccini/M
+puce/SM
+pucker/DG
+Puckett/M
+puck/GZSDRM
+puckishness/S
+puckish/YP
+Puck/M
+pudding/MS
+puddle/JMGRSD
+puddler/M
+puddling/M
+puddly
+pudenda
+pudendum/M
+pudginess/SM
+pudgy/PRT
+Puebla/M
+Pueblo/MS
+pueblo/SM
+puerile/Y
+puerility/SM
+puerperal
+puers
+Puerto/M
+puffball/SM
+puffer/M
+puffery/M
+puffiness/S
+puffin/SM
+Puff/M
+puff/SGZDRM
+puffy/PRT
+Puget/M
+pugged
+pugging
+Pugh/M
+pugilism/SM
+pugilistic
+pugilist/S
+pug/MS
+pugnaciousness/MS
+pugnacious/YP
+pugnacity/SM
+puissant/Y
+puke/GDS
+pukka
+Pulaski/SM
+pulchritude/SM
+pulchritudinous/M
+pule/GDS
+Pulitzer/SM
+pullback/S
+pull/DRGZSJ
+pullet/SM
+pulley/SM
+Pullman/MS
+pullout/S
+pullover/SM
+pulmonary
+pulpiness/S
+pulpit/MS
+pulp/MDRGS
+pulpwood/MS
+pulpy/PTR
+pulsar/MS
+pulsate/NGSDX
+pulsation/M
+pulse/ADSG
+pulser
+pulse's
+pulverable
+pulverization/MS
+pulverized/U
+pulverize/GZSRD
+pulverizer/M
+pulverizes/UA
+puma/SM
+pumice/SDMG
+pummel/SDG
+pumpernickel/SM
+pump/GZSMDR
+pumping/M
+pumpkin/MS
+punchbowl/M
+punched/U
+puncheon/MS
+puncher/M
+punch/GRSDJBZ
+punchline/S
+Punch/M
+punchy/RT
+punctilio/SM
+punctiliousness/SM
+punctilious/PY
+punctualities
+punctuality/UM
+punctualness/M
+punctual/PY
+punctuate/SDXNG
+punctuational
+punctuation/M
+puncture/SDMG
+punditry/S
+pundit/SM
+pungency/MS
+pungent/Y
+Punic
+puniness/MS
+punished/U
+punisher/M
+punishment/MS
+punish/RSDGBL
+punitiveness/M
+punitive/YP
+Punjabi/M
+Punjab/M
+punk/TRMS
+punky/PRS
+pun/MS
+punned
+punning
+punster/SM
+punter/M
+punt/GZMDRS
+puny/PTR
+pupae
+pupal
+pupa/M
+pupate/NGSD
+pupillage/M
+pupil/SM
+pup/MS
+pupped
+puppeteer/SM
+puppetry/MS
+puppet/SM
+pupping
+puppy/GSDM
+puppyish
+purblind
+Purcell/M
+purchasable
+purchase/GASD
+purchaser/MS
+purdah/M
+purdahs
+Purdue/M
+purebred/S
+puree/DSM
+pureeing
+pureness/MS
+pure/PYTGDR
+purgation/M
+purgative/MS
+purgatorial
+purgatory/SM
+purge/GZDSR
+purger/M
+purify/GSRDNXZ
+Purim/SM
+Purina/M
+purine/SM
+purism/MS
+puristic
+purist/MS
+puritanic
+puritanical/Y
+Puritanism/MS
+puritanism/S
+puritan/SM
+Puritan/SM
+purity/SM
+purlieu/SM
+purl/MDGS
+purloin/DRGS
+purloiner/M
+purple/MTGRSD
+purplish
+purport/DRSZG
+purported/Y
+purposefulness/S
+purposeful/YP
+purposelessness/M
+purposeless/PY
+purpose/SDVGYM
+purposiveness/M
+purposive/YP
+purr/DSG
+purring/Y
+purse/DSRGZM
+purser/M
+pursuance/MS
+pursuant
+pursuer/M
+pursue/ZGRSD
+pursuit/MS
+purulence/MS
+purulent
+Purus
+purveyance/MS
+purvey/DGS
+purveyor/MS
+purview/SM
+Pusan/M
+Pusey/M
+pushbutton/S
+pushcart/SM
+pushchair/SM
+pushdown
+push/DSRBGZ
+pusher/M
+pushily
+pushiness/MS
+Pushkin/M
+pushover/SM
+Pushtu/M
+pushy/PRT
+pusillanimity/MS
+pusillanimous/Y
+pus/SM
+puss/S
+pussycat/S
+pussyfoot/DSG
+pussy/TRSM
+pustular
+pustule/MS
+putative/Y
+Putin/M
+put/IS
+Putnam/M
+Putnem/M
+putout/S
+putrefaction/SM
+putrefactive
+putrefy/DSG
+putrescence/MS
+putrescent
+putridity/M
+putridness/M
+putrid/YP
+putsch/S
+putted/I
+puttee/MS
+putter/RDMGZ
+putting/I
+putt/SGZMDR
+puttying/M
+putty/SDMG
+puzzle/JRSDZLG
+puzzlement/MS
+puzzler/M
+PVC
+pvt
+Pvt/M
+PW
+PX
+p/XTGJ
+Pygmalion/M
+pygmy/SM
+Pygmy/SM
+Pyhrric/M
+pyknotic
+Pyle/M
+pylon/SM
+pylori
+pyloric
+pylorus/M
+Pym/M
+Pynchon/M
+Pyongyang/M
+pyorrhea/SM
+Pyotr/M
+pyramidal/Y
+pyramid/GMDS
+pyre/MS
+Pyrenees
+Pyrex/SM
+pyridine/M
+pyrimidine/SM
+pyrite/MS
+pyroelectric
+pyroelectricity/SM
+pyrolysis/M
+pyrolyze/RSM
+pyromaniac/SM
+pyromania/MS
+pyrometer/MS
+pyrometry/M
+pyrophosphate/M
+pyrotechnical
+pyrotechnic/S
+pyrotechnics/M
+pyroxene/M
+pyroxenite/M
+Pyrrhic
+Pythagoras/M
+Pythagorean/S
+Pythias
+Python/M
+python/MS
+pyx/MDSG
+q
+Q
+QA
+Qaddafi/M
+Qantas/M
+Qatar/M
+QB
+QC
+QED
+Qingdao
+Qiqihar/M
+QM
+Qom/M
+qr
+q's
+Q's
+qt
+qty
+qua
+Quaalude/M
+quackery/MS
+quackish
+quack/SDG
+quadded
+quadding
+quadrangle/MS
+quadrangular/M
+quadrant/MS
+quadraphonic/S
+quadrapole
+quadratical/Y
+quadratic/SM
+quadrature/MS
+quadrennial/SY
+quadrennium/MS
+quadric
+quadriceps/SM
+quadrilateral/S
+quadrille/XMGNSD
+quadrillion/MH
+quadripartite/NY
+quadriplegia/SM
+quadriplegic/SM
+quadrivia
+quadrivium/M
+quadrupedal
+quadruped/MS
+quadruple/GSD
+quadruplet/SM
+quadruplicate/GDS
+quadruply/NX
+quadrupole
+quad/SM
+quadword/MS
+quaffer/M
+quaff/SRDG
+quagmire/DSMG
+quahog/MS
+quail/GSDM
+quaintness/MS
+quaint/PTYR
+quake/GZDSR
+Quakeress/M
+Quakerism/S
+Quaker/SM
+quaky/RT
+qualification/ME
+qualified/UY
+qualifier/SM
+qualify/EGXSDN
+qualitative/Y
+quality/MS
+qualmish
+qualm/SM
+quandary/MS
+quangos
+quanta/M
+Quantico/M
+quantifiable/U
+quantified/U
+quantifier/M
+quantify/GNSRDZX
+quantile/S
+quantitativeness/M
+quantitative/PY
+quantity/MS
+quantization/MS
+quantizer/M
+quantize/ZGDRS
+quantum/M
+quarantine/DSGM
+quark/SM
+quarreler/M
+quarrellings
+quarrelsomeness/MS
+quarrelsome/PY
+quarrel/SZDRMG
+quarrier/M
+quarryman/M
+quarrymen
+quarry/RSDGM
+quarterback/SGMD
+quarterdeck/MS
+quarterer/M
+quarterfinal/MS
+quartering/M
+quarterly/S
+quartermaster/MS
+quarter/MDRYG
+quarterstaff/M
+quarterstaves
+quartet/SM
+quartic/S
+quartile/SM
+quarto/SM
+quart/RMSZ
+quartzite/M
+quartz/SM
+quasar/SM
+quash/GSD
+quasi
+quasilinear
+Quasimodo/M
+Quaternary
+quaternary/S
+quaternion/SM
+quatrain/SM
+quaver/GDS
+quavering/Y
+quavery
+Quayle/M
+quayside/M
+quay/SM
+queasily
+queasiness/SM
+queasy/TRP
+Quebec/M
+Quechua/M
+Queenie/M
+queenly/RT
+queen/SGMDY
+Queensland/M
+Queen/SM
+queerness/S
+queer/STGRDYP
+queller/M
+quell/SRDG
+Que/M
+quenchable/U
+quenched/U
+quencher/M
+quench/GZRSDB
+quenchless
+Quentin/M
+Quent/M
+Querida/M
+quern/M
+querulousness/S
+querulous/YP
+query/MGRSD
+quested/A
+quester/AS
+quester's
+quest/FSIM
+questing
+questionableness/M
+questionable/P
+questionably/U
+questioned/UA
+questioner/M
+questioning/UY
+questionnaire/MS
+question/SMRDGBZJ
+quests/A
+Quetzalcoatl/M
+queued/C
+queue/GZMDSR
+queuer/M
+queues/C
+queuing/C
+Quezon/M
+quibble/GZRSD
+quibbler/M
+quiche/SM
+quicken/RDG
+quickie/MS
+quicklime/SM
+quickness/MS
+quick/RNYTXPS
+quicksand/MS
+quicksilver/GDMS
+quickstep/SM
+quid/SM
+quiesce/D
+quiescence/MS
+quiescent/YP
+quieted/E
+quieten/SGD
+quieter/E
+quieter's
+quieting/E
+quietly/E
+quietness/MS
+quiets/E
+quietude/IEMS
+quietus/MS
+quiet/UTGPSDRY
+Quillan/M
+quill/GSDM
+Quill/M
+quilter/M
+quilting/M
+quilt/SZJGRDM
+quincentenary/M
+quince/SM
+Quincey/M
+quincy/M
+Quincy/M
+quinine/MS
+Quinlan/M
+Quinn/M
+quinquennial/Y
+quinsy/SM
+Quinta/M
+Quintana/M
+quintessence/SM
+quintessential/Y
+quintet/SM
+quintic
+quintile/SM
+Quintilian/M
+Quintilla/M
+quintillion/MH
+quintillionth/M
+Quintina/M
+Quintin/M
+Quint/M
+quint/MS
+Quinton/M
+quintuple/SDG
+quintuplet/MS
+Quintus/M
+quip/MS
+quipped
+quipper
+quipping
+quipster/SM
+quired/AI
+quire/MDSG
+quires/AI
+Quirinal/M
+quiring/IA
+quirkiness/SM
+quirk/SGMD
+quirky/PTR
+quirt/SDMG
+Quisling/M
+quisling/SM
+quitclaim/GDMS
+quit/DGS
+quite/SADG
+Quito/M
+quittance/SM
+quitter/SM
+quitting
+quiver/GDS
+quivering/Y
+quivery
+Quixote/M
+quixotic
+quixotically
+Quixotism/M
+quiz/M
+quizzed
+quizzer/SM
+quizzes
+quizzical/Y
+quizzing
+quo/H
+quoin/SGMD
+quoit/GSDM
+quondam
+quonset
+Quonset
+quorate/I
+quorum/MS
+quotability/S
+quota/MS
+quotation/SM
+quoter/M
+quote/UGSD
+quot/GDRB
+quotidian/S
+quotient/SM
+qwerty
+qwertys
+Rabat/M
+rabbet/GSMD
+Rabbi/M
+rabbi/MS
+rabbinate/MS
+rabbinic
+rabbinical/Y
+rabbiter/M
+rabbit/MRDSG
+rabble/GMRSD
+rabbler/M
+Rabelaisian
+Rabelais/M
+rabidness/SM
+rabid/YP
+rabies
+Rabi/M
+Rabin/M
+rabis
+Rab/M
+raccoon/SM
+racecourse/MS
+racegoers
+racehorse/SM
+raceme/MS
+race/MZGDRSJ
+racer/M
+racetrack/SMR
+raceway/SM
+Rachael/M
+Rachele/M
+Rachelle/M
+Rachel/M
+Rachmaninoff/M
+racialism/MS
+racialist/MS
+racial/Y
+racily
+Racine/M
+raciness/MS
+racism/S
+racist/MS
+racketeer/MDSJG
+racket/SMDG
+rackety
+rack/GDRMS
+raconteur/SM
+racoon's
+racquetball/S
+racquet's
+racy/RTP
+radarscope/MS
+radar/SM
+Radcliffe/M
+radded
+radder
+raddest
+Raddie/M
+radding
+Raddy/M
+radial/SY
+radiance/SM
+radian/SM
+radiant/YS
+radiate/XSDYVNG
+radiation/M
+radiative/Y
+radiator/MS
+radicalism/MS
+radicalization/S
+radicalize/GSD
+radicalness/M
+radical/SPY
+radices's
+radii/M
+radioactive/Y
+radioactivity/MS
+radioastronomical
+radioastronomy
+radiocarbon/MS
+radiochemical/Y
+radiochemistry/M
+radiogalaxy/S
+radiogram/SM
+radiographer/MS
+radiographic
+radiography/MS
+radioisotope/SM
+radiologic
+radiological/Y
+radiologist/MS
+radiology/MS
+radioman/M
+radiomen
+radiometer/SM
+radiometric
+radiometry/MS
+radionics
+radionuclide/M
+radiopasteurization
+radiophone/MS
+radiophysics
+radioscopy/SM
+radio/SMDG
+radiosonde/SM
+radiosterilization
+radiosterilized
+radiotelegraph
+radiotelegraphs
+radiotelegraphy/MS
+radiotelephone/SM
+radiotherapist/SM
+radiotherapy/SM
+radish/MS
+radium/MS
+radius/M
+radix/SM
+Rad/M
+radon/SM
+rad/S
+Raeann/M
+Rae/M
+RAF
+Rafaela/M
+Rafaelia/M
+Rafaelita/M
+Rafaellle/M
+Rafaello/M
+Rafael/M
+Rafa/M
+Rafe/M
+Raffaello/M
+Raffarty/M
+Rafferty/M
+raffia/SM
+raffishness/SM
+raffish/PY
+raffle/MSDG
+Raff/M
+Rafi/M
+Raf/M
+rafter/DM
+raft/GZSMDR
+raga/MS
+ragamuffin/MS
+ragbag/SM
+rage/MS
+raggedness/SM
+ragged/PRYT
+raggedy/TR
+ragging
+rag/GSMD
+raging/Y
+raglan/MS
+Ragnar/M
+Ragnarök
+ragout/SMDG
+ragtag/MS
+ragtime/MS
+ragweed/MS
+ragwort/M
+Rahal/M
+rah/DG
+Rahel/M
+rahs
+raider/M
+raid/MDRSGZ
+railbird/S
+rail/CDGS
+railer/SM
+railhead/SM
+railing/MS
+raillery/MS
+railroader/M
+railroading/M
+railroad/SZRDMGJ
+rail's
+railwaymen
+railway/MS
+raiment/SM
+Raimondo/M
+Raimund/M
+Raimundo/M
+Raina/M
+rainbow/MS
+raincloud/S
+raincoat/SM
+raindrop/SM
+Raine/MR
+Rainer/M
+rainfall/SM
+rainforest's
+rain/GSDM
+Rainier/M
+rainless
+rainmaker/SM
+rainmaking/MS
+rainproof/GSD
+rainstorm/SM
+rainwater/MS
+rainy/RT
+raise/DSRGZ
+raiser/M
+raising/M
+raisin/MS
+rajah/M
+rajahs
+Rajive/M
+raj/M
+Rakel/M
+rake/MGDRS
+raker/M
+rakishness/MS
+rakish/PY
+Raleigh/M
+Ralf/M
+Ralina/M
+rally/GSD
+Ralph/M
+Ralston/M
+Ra/M
+Ramada/M
+Ramadan/SM
+Ramakrishna/M
+Rama/M
+Raman/M
+Ramayana/M
+ramble/JRSDGZ
+rambler/M
+rambling/Y
+Rambo/M
+rambunctiousness/S
+rambunctious/PY
+ramekin/SM
+ramie/MS
+ramification/M
+ramify/XNGSD
+Ramirez/M
+Ramiro/M
+ramjet/SM
+Ram/M
+rammed
+ramming
+Ramo/MS
+Ramona/M
+Ramonda/M
+Ramon/M
+rampage/SDG
+rampancy/S
+rampant/Y
+rampart/SGMD
+ramp/GMDS
+ramrodded
+ramrodding
+ramrod/MS
+RAM/S
+Ramsay/M
+Ramses/M
+Ramsey/M
+ramshackle
+ram/SM
+rams/S
+ran/A
+Rana/M
+Rancell/M
+Rance/M
+rancher/M
+rancho/SM
+ranch/ZRSDMJG
+rancidity/MS
+rancidness/SM
+rancid/P
+rancorous/Y
+rancor/SM
+Randall/M
+Randal/M
+Randa/M
+Randee/M
+Randell/M
+Randene/M
+Randie/M
+Randi/M
+randiness/S
+Rand/M
+rand/MDGS
+Randolf/M
+Randolph/M
+randomization/SM
+randomize/SRDG
+randomness/SM
+random/PYS
+Randy/M
+randy/PRST
+Ranee/M
+ranee/SM
+ranged/C
+rangeland/S
+ranger/M
+ranges/C
+range/SM
+rang/GZDR
+ranginess/S
+ranging/C
+Rangoon/M
+rangy/RPT
+Rania/M
+Ranice/M
+Ranier/M
+Rani/MR
+Ranique/M
+rani's
+ranked/U
+ranker/M
+rank/GZTYDRMPJS
+Rankine/M
+ranking/M
+Rankin/M
+rankle/SDG
+rankness/MS
+Ranna/M
+ransacker/M
+ransack/GRDS
+Ransell/M
+ransomer/M
+Ransom/M
+ransom/ZGMRDS
+ranter/M
+rant/GZDRJS
+ranting/Y
+Raoul/M
+rapaciousness/MS
+rapacious/YP
+rapacity/MS
+rapeseed/M
+rape/SM
+Raphaela/M
+Raphael/M
+rapidity/MS
+rapidness/S
+rapid/YRPST
+rapier/SM
+rapine/SM
+rapist/MS
+rap/MDRSZG
+rapped
+rappelled
+rappelling
+rappel/S
+rapper/SM
+rapping/M
+rapporteur/SM
+rapport/SM
+rapprochement/SM
+rapscallion/MS
+raptness/S
+rapture/MGSD
+rapturousness/M
+rapturous/YP
+rapt/YP
+Rapunzel/M
+Raquela/M
+Raquel/M
+rarebit/MS
+rarefaction/MS
+rarefy/GSD
+rareness/MS
+rare/YTPGDRS
+rarity/SM
+Rasalgethi/M
+Rasalhague/M
+rascal/SMY
+rasher/M
+rashness/S
+rash/PZTYSR
+Rasia/M
+Rasla/M
+Rasmussen/M
+raspberry/SM
+rasper/M
+rasping/Y
+rasp/SGJMDR
+Rasputin/M
+raspy/RT
+Rastaban/M
+Rastafarian/M
+raster/MS
+Rastus/M
+ratchet/MDSG
+rateable
+rated/U
+rate/KNGSD
+ratepayer/SM
+rater/M
+rate's
+Ratfor/M
+rather
+Rather/M
+rathskeller/SM
+ratifier/M
+ratify/ZSRDGXN
+rating/M
+ratiocinate/VNGSDX
+ratiocination/M
+ratio/MS
+rationale/SM
+rationalism/SM
+rationalistic
+rationalist/S
+rationality/MS
+rationalization/SM
+rationalizer/M
+rationalize/ZGSRD
+rationalness/M
+rational/YPS
+ration/DSMG
+Ratliff/M
+ratlike
+ratline/SM
+rat/MDRSJZGB
+rattail
+rattan/MS
+ratted
+ratter/MS
+ratting
+rattlebrain/DMS
+rattle/RSDJGZ
+rattlesnake/MS
+rattletrap/MS
+rattling/Y
+rattly/TR
+rattrap/SM
+ratty/RT
+raucousness/SM
+raucous/YP
+Raul/M
+raunchily
+raunchiness/S
+raunchy/RTP
+ravage/GZRSD
+ravager/M
+raveling/S
+Ravel/M
+ravel/UGDS
+raven/JGMRDS
+Raven/M
+ravenous/YP
+raver/M
+rave/ZGDRSJ
+Ravid/M
+Ravi/M
+ravine/SDGM
+ravioli/SM
+ravisher/M
+ravishing/Y
+ravish/LSRDZG
+ravishment/SM
+Raviv/M
+Rawalpindi/M
+rawboned
+rawhide/SDMG
+Rawley/M
+Rawlings/M
+Rawlins/M
+Rawlinson/M
+rawness/SM
+raw/PSRYT
+Rawson/M
+Rayburn/M
+Raychel/M
+Raye/M
+ray/GSMD
+Rayleigh/M
+Ray/M
+Raymond/M
+Raymondville/M
+Raymund/M
+Raymundo/M
+Rayna/M
+Raynard/M
+Raynell/M
+Rayner/M
+Raynor/M
+rayon/SM
+Rayshell/M
+Raytheon/M
+raze/DRSG
+razer/M
+razorback/SM
+razorblades
+razor/MDGS
+razz/GDS
+razzmatazz/S
+Rb
+RBI/S
+RC
+RCA
+rcpt
+RCS
+rd
+RD
+RDA
+Rd/M
+reabbreviate
+reachability
+reachable/U
+reachably
+reached/U
+reacher/M
+reach/GRB
+reacquisition
+reactant/SM
+reacted/U
+reaction
+reactionary/SM
+reactivity
+readability/MS
+readable/P
+readably
+readdress/G
+Reade/M
+reader/M
+readership/MS
+Read/GM
+readied
+readies
+readily
+readinesses
+readiness/UM
+reading/M
+Reading/M
+read/JGZBR
+readopt/G
+readout/MS
+reads/A
+readying
+ready/TUPR
+Reagan/M
+Reagen/M
+realisms
+realism's
+realism/U
+realistically/U
+realistic/U
+realist/SM
+reality/USM
+realizability/MS
+realizableness/M
+realizable/SMP
+realizably/S
+realization/MS
+realized/U
+realize/JRSDBZG
+realizer/M
+realizes/U
+realizing/MY
+realm/M
+realness/S
+realpolitik/SM
+real/RSTP
+realtor's
+Realtor/S
+realty/SM
+Rea/M
+reamer/M
+ream/MDRGZ
+Reamonn/M
+reanimate
+reaper/M
+reappraise/G
+reap/SGZ
+rear/DRMSG
+rearguard/MS
+rearmost
+rearrange/L
+rearward/S
+reasonableness/SMU
+reasonable/UP
+reasonably/U
+Reasoner/M
+reasoner/SM
+reasoning/MS
+reasonless
+reasons
+reason/UBDMG
+reassess/GL
+reassuringly/U
+reattach/GSL
+reawakening/M
+Reba/M
+rebate/M
+Rebbecca/M
+Rebeca/M
+Rebecca's
+Rebecka/M
+Rebekah/M
+Rebeka/M
+Rebekkah/M
+rebeller
+rebellion/SM
+rebelliousness/MS
+rebellious/YP
+rebel/MS
+Rebe/M
+rebid
+rebidding
+rebind/G
+rebirth
+reboil/G
+rebook
+reboot/ZR
+rebound/G
+rebroadcast/MG
+rebuke/RSDG
+rebuking/Y
+rebus
+rebuttal/SM
+rebutting
+rec
+recalcitrance/SM
+recalcitrant/S
+recalibrate/N
+recantation/S
+recant/G
+recap
+recappable
+recapping
+recast/G
+recd
+rec'd
+recede
+receipt/SGDM
+receivable/S
+received/U
+receiver/M
+receivership/SM
+receive/ZGRSDB
+recency/M
+recension/M
+recentness/SM
+recent/YPT
+receptacle/SM
+receptionist/MS
+reception/MS
+receptiveness/S
+receptive/YP
+receptivity/S
+receptor/MS
+recessional/S
+recessionary
+recessiveness/M
+recessive/YPS
+recess/SDMVG
+rechargeable
+recheck/G
+recherché
+recherches
+recidivism/MS
+recidivist/MS
+Recife/M
+recipe/MS
+recipiency
+recipient/MS
+reciprocal/SY
+reciprocate/NGXVDS
+reciprocation/M
+reciprocity/MS
+recitalist/S
+recital/MS
+recitative/MS
+reciter/M
+recite/ZR
+recked
+recking
+recklessness/S
+reckless/PY
+reckoner/M
+reckoning/M
+reckon/SGRDJ
+reclaim/B
+reclamation/SM
+recliner/M
+recline/RSDZG
+recluse/MVNS
+reclusion/M
+recode/G
+recognizability
+recognizable/U
+recognizably
+recognize/BZGSRD
+recognizedly/S
+recognized/U
+recognizer/M
+recognizingly/S
+recognizing/UY
+recoilless
+recoinage
+recolor/GD
+recombinant
+recombine
+recommended/U
+recompense/GDS
+recompute/B
+reconciled/U
+reconciler/M
+reconcile/SRDGB
+reconditeness/M
+recondite/YP
+reconfigurability
+reconfigure/R
+reconnaissance/MS
+reconnect/R
+reconnoiter/GSD
+reconquer/G
+reconsecrate
+reconstitute
+reconstructed/U
+Reconstruction/M
+reconsult/G
+recontact/G
+recontaminate/N
+recontribute
+recook/G
+recopy/G
+recorded/AU
+records/A
+record/ZGJ
+recourse
+recoverability
+recoverable/U
+recover/B
+recovery/MS
+recreant/S
+recreational
+recriminate/GNVXDS
+recrimination/M
+recriminatory
+recross/G
+recrudesce/GDS
+recrudescence/MS
+recrudescent
+recruiter/M
+recruitment/MS
+recruit/ZSGDRML
+recrystallize
+rectal/Y
+rectangle/SM
+rectangular/Y
+recta's
+rectifiable
+rectification/M
+rectifier/M
+rectify/DRSGXZN
+rectilinear/Y
+rectitude/MS
+recto/MS
+rector/SM
+rectory/MS
+rectum/SM
+recumbent/Y
+recuperate/VGNSDX
+recuperation/M
+recur
+recurrence/MS
+recurrent
+recurse/NX
+recursion/M
+recusant/M
+recuse
+recyclable/S
+recycle/BZ
+redact/DGS
+redaction/SM
+redactor/MS
+redbird/SM
+redbreast/SM
+redbrick/M
+redbud/M
+redcap/MS
+redcoat/SM
+redcurrant/M
+redden/DGS
+redder
+reddest
+redding
+reddish/P
+Redd/M
+redeclaration
+redecorate
+redeemable/U
+redeem/BRZ
+redeemed/U
+redeemer/M
+Redeemer/M
+redemptioner/M
+redemption/RMS
+redemptive
+redeposit/M
+redetermination
+Redford/M
+Redgrave/M
+redhead/DRMS
+Redhook/M
+redial/G
+redirect/G
+redirection
+redlining/S
+Redmond/M
+redneck/SMD
+redness/MS
+redo/G
+redolence/MS
+redolent
+Redondo/M
+redouble/S
+redoubtably
+redound/GDS
+red/PYS
+redshift/S
+redskin/SM
+Redstone/M
+reduced/U
+reducer/M
+reduce/RSDGZ
+reducibility/M
+reducible
+reducibly
+reductionism/M
+reductionist/S
+reduction/SM
+reduct/V
+redundancy/SM
+redundant/Y
+redwood/SM
+redye
+redyeing
+Reeba/M
+Reebok/M
+Reece/M
+reecho/G
+reed/GMDR
+reediness/SM
+reeding/M
+Reed/M
+Reedville/M
+reedy/PTR
+reefer/M
+reef/GZSDRM
+reeker/M
+reek/GSR
+reeler/M
+reel's
+reel/USDG
+Ree/MDS
+Reena/M
+reenforcement
+reentrant
+Reese/M
+reestimate/M
+Reeta/M
+Reeva/M
+reeve/G
+Reeves
+reexamine
+refection/SM
+refectory/SM
+refer/B
+refereed/U
+refereeing
+referee/MSD
+reference/CGSRD
+referenced/U
+reference's
+referencing/U
+referendum/MS
+referentiality
+referential/YM
+referent/SM
+referral/SM
+referred
+referrer/S
+referring
+reffed
+reffing
+refile
+refinance
+refined/U
+refine/LZ
+refinement/MS
+refinish/G
+refit
+reflectance/M
+reflected/U
+reflectional
+reflection/SM
+reflectiveness/M
+reflective/YP
+reflectivity/M
+reflector/MS
+reflect/SDGV
+reflexion/MS
+reflexiveness/M
+reflexive/PSY
+reflexivity/M
+reflex/YV
+reflooring
+refluent
+reflux/G
+refocus/G
+refold/G
+reforestation
+reforge/G
+reformatory/SM
+reform/B
+reformed/U
+reformer/M
+reformism/M
+reformist/S
+refract/DGVS
+refractiveness/M
+refractive/PY
+refractometer/MS
+refractoriness/M
+refractory/PS
+refrain/DGS
+refreshed/U
+refreshing/Y
+refresh/LB
+refreshment/MS
+refrigerant/MS
+refrigerated/U
+refrigerate/XDSGN
+refrigeration/M
+refrigerator/MS
+refrozen
+refry/GS
+refugee/MS
+refuge/SDGM
+Refugio/M
+refulgence/SM
+refulgent
+refund/B
+refunder/M
+refurbish/L
+refurbishment/S
+refusal/SM
+refuse/R
+refuser/M
+refutation/MS
+refute/GZRSDB
+refuter/M
+ref/ZS
+reg
+regale/L
+regalement/S
+regal/GYRD
+regalia/M
+Regan/M
+regard/EGDS
+regardless/PY
+regather/G
+regatta/MS
+regency/MS
+regeneracy/MS
+regenerately
+regenerateness/M
+regenerate/U
+Regen/M
+reggae/SM
+Reggie/M
+Reggi/MS
+Reggy/M
+regicide/SM
+regime/MS
+regimen/MS
+regimental/S
+regimentation/MS
+regiment/SDMG
+Reginae
+Reginald/M
+Regina/M
+Reginauld/M
+Regine/M
+regionalism/MS
+regional/SY
+region/SM
+Regis/M
+register's
+register/UDSG
+registrable
+registrant/SM
+registrar/SM
+registration/AM
+registrations
+registry/MS
+Reg/MN
+regnant
+Regor/M
+regress/DSGV
+regression/MS
+regressiveness/M
+regressive/PY
+regressors
+regretfulness/M
+regretful/PY
+regret/S
+regrettable
+regrettably
+regretted
+regretting
+reground
+regroup/G
+regrow/G
+regularity/MS
+regularization/MS
+regularize/SDG
+regular/YS
+regulate/CSDXNG
+regulated/U
+regulation/M
+regulative
+regulator/SM
+regulatory
+Regulus/M
+regurgitate/XGNSD
+regurgitation/M
+rehabbed
+rehabbing
+rehabilitate/SDXVGN
+rehabilitation/M
+rehab/S
+rehang/G
+rehear/GJ
+rehearsal/SM
+rehearse
+rehearsed/U
+rehearser/M
+rehears/R
+reheat/G
+reheating/M
+Rehnquist
+rehydrate
+Reichenberg/M
+Reich/M
+Reichstags
+Reichstag's
+Reidar/M
+Reider/M
+Reid/MR
+reign/MDSG
+Reiko/M
+Reilly/M
+reimburse/GSDBL
+reimbursement/MS
+Reinald/M
+Reinaldo/MS
+Reina/M
+reindeer/M
+Reine/M
+reinforced/U
+reinforce/GSRDL
+reinforcement/MS
+reinforcer/M
+rein/GDM
+Reinhard/M
+Reinhardt/M
+Reinhold/M
+Reinold/M
+reinstate/L
+reinstatement/MS
+reinsurance
+Reinwald/M
+reissue
+REIT
+reiterative/SP
+rejecter/M
+rejecting/Y
+rejection/SM
+rejector/MS
+reject/RDVGS
+rejigger
+rejoice/RSDJG
+rejoicing/Y
+rejoinder/SM
+rejuvenate/NGSDX
+rejuvenatory
+relapse
+relatedly
+relatedness/MS
+related/U
+relater/M
+relate/XVNGSZ
+relational/Y
+relation/M
+relationship/MS
+relativeness/M
+relative/SPY
+relativism/M
+relativistic
+relativistically
+relativist/MS
+relativity/MS
+relator's
+relaxant/SM
+relaxation/MS
+relaxedness/M
+relaxed/YP
+relax/GZD
+relaxing/Y
+relay/GDM
+relearn/G
+releasable/U
+release/B
+released/U
+relenting/U
+relentlessness/SM
+relentless/PY
+relent/SDG
+relevance/SM
+relevancy/MS
+relevant/Y
+reliability/UMS
+reliables
+reliable/U
+reliably/U
+reliance/MS
+reliant/Y
+relicense/R
+relic/MS
+relict/C
+relict's
+relief/M
+relievedly
+relieved/U
+reliever/M
+relieve/RSDZG
+religionists
+religion/SM
+religiosity/M
+religiousness/MS
+religious/PY
+relink/G
+relinquish/GSDL
+relinquishment/SM
+reliquary/MS
+relish/GSD
+relive/GB
+reload/GR
+relocate/B
+reluctance/MS
+reluctant/Y
+rel/V
+rely/DG
+rem
+Re/M
+remade/S
+remainder/SGMD
+remain/GD
+remake/M
+remand/DGS
+remap
+remapping
+remarkableness/S
+remarkable/U
+remarkably
+remark/BG
+remarked/U
+Remarque/M
+rematch/G
+Rembrandt/M
+remeasure/D
+remediableness/M
+remediable/P
+remedy/SDMG
+remembered/U
+rememberer/M
+remember/GR
+remembrance/MRS
+remembrancer/M
+Remington/M
+reminisce/GSD
+reminiscence/SM
+reminiscent/Y
+remissness/MS
+remiss/YP
+remit/S
+remittance/MS
+remitted
+remitting/U
+Rem/M
+remnant/MS
+remodel/G
+remolding
+remonstrant/MS
+remonstrate/SDXVNG
+remonstration/M
+remonstrative/Y
+remorsefulness/M
+remorseful/PY
+remorselessness/MS
+remorseless/YP
+remorse/SM
+remoteness/MS
+remote/RPTY
+remoulds
+removal/MS
+REM/S
+remunerated/U
+remunerate/VNGXSD
+remuneration/M
+remunerativeness/M
+remunerative/YP
+Remus/M
+Remy/M
+Renado/M
+Renae/M
+renaissance/S
+Renaissance/SM
+renal
+Renaldo/M
+Rena/M
+Renard/M
+Renascence/SM
+Renata/M
+Renate/M
+Renato/M
+renaturation
+Renaud/M
+Renault/MS
+rend
+renderer/M
+render/GJRD
+rendering/M
+rendezvous/DSMG
+rendition/GSDM
+rend/RGZS
+Renee/M
+renegade/SDMG
+renege/GZRSD
+reneger/M
+Renelle/M
+Renell/M
+Rene/M
+renewal/MS
+renew/BG
+renewer/M
+Renie/M
+rennet/MS
+Rennie/M
+rennin/SM
+Renoir/M
+Reno/M
+renounce/LGRSD
+renouncement/MS
+renouncer/M
+renovate/NGXSD
+renovation/M
+renovator/SM
+renown/SGDM
+Rensselaer/M
+rentaller
+rental/SM
+renter/M
+rent/GZMDRS
+renumber/G
+renumeration
+renunciate/VNX
+renunciation/M
+Renville/M
+reoccupy/G
+reopen/G
+reorganized/U
+repack/G
+repairable/U
+repair/BZGR
+repairer/M
+repairman/M
+repairmen
+repairs/E
+repaper
+reparable
+reparation/SM
+reparteeing
+repartee/MDS
+repartition/Z
+repast/G
+repatriate/SDXNG
+repave
+repealer/M
+repeal/GR
+repeatability/M
+repeatable/U
+repeatably
+repeated/Y
+repeater/M
+repeat/RDJBZG
+repelled
+repellent/SY
+repelling/Y
+repel/S
+repentance/SM
+repentant/SY
+repent/RDG
+repertoire/SM
+repertory/SM
+repetition
+repetitiousness/S
+repetitious/YP
+repetitiveness/MS
+repetitive/PY
+repine/R
+repiner/M
+replace/RL
+replay/GM
+replenish/LRSDG
+replenishment/S
+repleteness/MS
+replete/SDPXGN
+repletion/M
+replica/SM
+replicate/SDVG
+replicator/S
+replug
+reply/X
+Rep/M
+repopulate
+reported/Y
+reportorial/Y
+reposeful
+repose/M
+repository/MS
+reprehend/GDS
+reprehensibility/MS
+reprehensibleness/M
+reprehensible/P
+reprehensibly
+reprehension/MS
+representable/U
+representational/Y
+representativeness/M
+Representative/S
+representative/SYMP
+representativity
+represented/U
+represent/GB
+repression/SM
+repressiveness/M
+repressive/YP
+repress/V
+reprieve/GDS
+reprimand/SGMD
+reprint/M
+reprisal/MS
+reproacher/M
+reproachfulness/M
+reproachful/YP
+reproach/GRSDB
+reproaching/Y
+reprobate/N
+reprocess/G
+reproducibility/MS
+reproducible/S
+reproducibly
+reproductive/S
+reproof/G
+reprove/R
+reproving/Y
+rep/S
+reptile/SM
+reptilian/S
+Republicanism/S
+republicanism/SM
+Republican/S
+republic/M
+republish/G
+repudiate/XGNSD
+repudiation/M
+repudiator/S
+repugnance/MS
+repugnant/Y
+repulse/VNX
+repulsion/M
+repulsiveness/MS
+repulsive/PY
+reputability/SM
+reputably/E
+reputation/SM
+reputed/Y
+repute/ESB
+reputing
+requested/U
+request/G
+Requiem/MS
+requiem/SM
+require/LR
+requirement/MS
+requisiteness/M
+requisite/PNXS
+requisitioner/M
+requisition/GDRM
+requital/MS
+requited/U
+requiter/M
+requite/RZ
+reread/G
+rerecord/G
+rerouteing
+rerunning
+res/C
+rescale
+rescind/SDRG
+rescission/SM
+rescue/GZRSD
+reseal/BG
+research/MB
+reselect/G
+resemblant
+resemble/DSG
+resend/G
+resent/DSLG
+resentfulness/SM
+resentful/PY
+resentment/MS
+reserpine/MS
+reservation/MS
+reservednesses
+reservedness/UM
+reserved/UYP
+reservist/SM
+reservoir/MS
+reset/RDG
+resettle/L
+reshipping
+reshow/G
+reshuffle/M
+reside/G
+residence/MS
+residency/SM
+residential/Y
+resident/SM
+resider/M
+residua
+residual/YS
+residuary
+residue/SM
+residuum/M
+resignation/MS
+resigned/YP
+resilience/MS
+resiliency/S
+resilient/Y
+resin/D
+resinlike
+resinous
+resiny
+resistance/SM
+Resistance/SM
+resistantly
+resistants
+resistant/U
+resisted/U
+resistible
+resistibly
+resisting/U
+resistiveness/M
+resistive/PY
+resistivity/M
+resistless
+resistor/MS
+resist/RDZVGS
+resize/G
+resold
+resole/G
+resoluble
+resoluteness/MS
+resolute/PYTRV
+resolvability/M
+resolvable/U
+resolved/U
+resolvent
+resonance/SM
+resonant/YS
+resonate/DSG
+resonator/MS
+resorption/MS
+resort/R
+resound/G
+resourcefulness/SM
+resourceful/PY
+resp
+respectability/SM
+respectable/SP
+respectably
+respect/BSDRMZGV
+respected/E
+respectful/EY
+respectfulness/SM
+respecting/E
+respectiveness/M
+respective/PY
+respect's/E
+respects/E
+respell/G
+respiration/MS
+respirator/SM
+respiratory/M
+resplendence/MS
+resplendent/Y
+respondent/MS
+respond/SDRZG
+responser/M
+response/RSXMV
+responsibility/MS
+responsibleness/M
+responsible/P
+responsibly
+responsiveness/MSU
+responsive/YPU
+respray/G
+restart/B
+restate/L
+restaurant/SM
+restaurateur/SM
+rest/DRSGVM
+rested/U
+rester/M
+restfuller
+restfullest
+restfulness/MS
+restful/YP
+restitution/SM
+restiveness/SM
+restive/PY
+restlessness/MS
+restless/YP
+restorability
+Restoration/M
+restoration/MS
+restorative/PYS
+restorer/M
+restore/Z
+restrained/UY
+restraint/MS
+restrict/DVGS
+restricted/YU
+restriction/SM
+restrictively
+restrictiveness/MS
+restrictives
+restrictive/U
+restroom/SM
+restructurability
+restructure
+rest's/U
+rests/U
+restudy/M
+restyle
+resubstitute
+resultant/YS
+result/SGMD
+resume/SDBG
+resumption/MS
+resurface
+resurgence/MS
+resurgent
+resurrect/GSD
+resurrection/SM
+resurvey/G
+resuscitate/XSDVNG
+resuscitation/M
+resuscitator/MS
+retail/Z
+retainer/M
+retain/LZGSRD
+retake
+retaliate/VNGXSD
+retaliation/M
+retaliatory
+Reta/M
+retardant/SM
+retardation/SM
+retarder/M
+retard/ZGRDS
+retch/SDG
+retention/SM
+retentiveness/S
+retentive/YP
+retentivity/M
+retest/G
+Retha/M
+rethought
+reticence/S
+reticent/Y
+reticle/SM
+reticular
+reticulate/GNYXSD
+reticulation/M
+reticule/MS
+reticulum/M
+retinal/S
+retina/SM
+retinue/MS
+retiredness/M
+retiree/MS
+retire/L
+retirement/SM
+retiring/YP
+retort/GD
+retract/DG
+retractile
+retrench/L
+retrenchment/MS
+retributed
+retribution/MS
+retributive
+retrieval/SM
+retriever/M
+retrieve/ZGDRSB
+retroactive/Y
+retrofire/GMSD
+retrofit/S
+retrofitted
+retrofitting
+retroflection
+retroflex/D
+retroflexion/M
+retrogradations
+retrograde/GYDS
+retrogression/MS
+retrogressive/Y
+retrogress/SDVG
+retrorocket/MS
+retro/SM
+retrospection/MS
+retrospective/SY
+retrospect/SVGMD
+retrovirus/S
+retrovision
+retry/G
+retsina/SM
+returnable/S
+returned/U
+returnee/SM
+retype
+Reube/M
+Reuben/M
+Reub/NM
+Reunion/M
+reuse/B
+Reuters
+Reuther/M
+reutilization
+Reuven/M
+Reva/M
+revanchist
+revealed/U
+revealingly
+revealing/U
+reveal/JBG
+reveille/MS
+revelation/MS
+Revelation/MS
+revelatory
+revelry/MS
+revel/SJRDGZ
+revenge/MGSRD
+revenger/M
+revenuer/M
+revenue/ZR
+reverberant
+reverberate/XVNGSD
+reverberation/M
+revere/GSD
+Revere/M
+reverencer/M
+reverence/SRDGM
+Reverend
+reverend/SM
+reverential/Y
+reverent/Y
+reverie/SM
+reversal/MS
+reverser/M
+reverse/Y
+reversibility/M
+reversible/S
+reversibly
+reversioner/M
+reversion/R
+revers/M
+reverter/M
+revertible
+revert/RDVGS
+revet/L
+revetment/SM
+review/G
+revile/GZSDL
+revilement/MS
+reviler/M
+revise/BRZ
+revised/U
+revisionary
+revisionism/SM
+revisionist/SM
+revitalize/ZR
+revivalism/MS
+revivalist/MS
+revival/SM
+reviver/M
+revive/RSDG
+revivification/M
+revivify/X
+Revkah/M
+Revlon/M
+Rev/M
+revocable
+revoke/GZRSD
+revolter/M
+revolt/GRD
+revolting/Y
+revolutionariness/M
+revolutionary/MSP
+revolutionist/MS
+revolutionize/GDSRZ
+revolutionizer/M
+revolution/SM
+revolve/BSRDZJG
+revolver/M
+revue/MS
+revulsion/MS
+revved
+revving
+rev/ZM
+rewarded/U
+rewarding/Y
+rewarm/G
+reweave
+rewedding
+reweigh/G
+rewind/BGR
+rewire/G
+rework/G
+rexes
+Rex/M
+Reyes
+Reykjavik/M
+re/YM
+Rey/M
+Reynaldo/M
+Reyna/M
+Reynard/M
+Reynold/SM
+rezone
+Rf
+RF
+RFC
+RFD
+R/G
+rhapsodic
+rhapsodical
+rhapsodize/GSD
+rhapsody/SM
+Rhea/M
+rhea/SM
+Rheba/M
+Rhee/M
+Rheims/M
+Rheinholdt/M
+Rhenish
+rhenium/MS
+rheology/M
+rheostat/MS
+rhesus/S
+Rheta/M
+rhetorical/YP
+rhetorician/MS
+rhetoric/MS
+Rhetta/M
+Rhett/M
+rheumatically
+rheumatic/S
+rheumatics/M
+rheumatism/SM
+rheumatoid
+rheum/MS
+rheumy/RT
+Rhiamon/M
+Rhianna/M
+Rhiannon/M
+Rhianon/M
+Rhinelander/M
+Rhineland/RM
+Rhine/M
+rhinestone/SM
+rhinitides
+rhinitis/M
+rhinoceros/MS
+rhino/MS
+rhinotracheitis
+rhizome/MS
+Rh/M
+Rhoda/M
+Rhodes
+Rhodesia/M
+Rhodesian/S
+Rhodia/M
+Rhodie/M
+rhodium/MS
+rhododendron/SM
+rhodolite/M
+rhodonite/M
+Rhody/M
+rhombic
+rhomboidal
+rhomboid/SM
+rhombus/SM
+rho/MS
+Rhona/M
+Rhonda/M
+Rhone
+rhubarb/MS
+rhyme/DSRGZM
+rhymester/MS
+Rhys/M
+rhythmical/Y
+rhythmic/S
+rhythmics/M
+rhythm/MS
+RI
+rial/MS
+Riane/M
+Riannon/M
+Rianon/M
+ribaldry/MS
+ribald/S
+ribbed
+Ribbentrop/M
+ribber/S
+ribbing/M
+ribbon/DMSG
+ribcage
+rib/MS
+riboflavin/MS
+ribonucleic
+ribosomal
+ribosome/MS
+Rica/M
+Rican/SM
+Ricard/M
+Ricardo/M
+Ricca/M
+Riccardo/M
+rice/DRSMZG
+Rice/M
+ricer/M
+Richard/MS
+Richardo/M
+Richardson/M
+Richart/M
+Richelieu/M
+richen/DG
+Richey/M
+Richfield/M
+Richie/M
+Richland/M
+Rich/M
+Richmond/M
+Richmound/M
+richness/MS
+Richter/M
+Richthofen/M
+Richy/M
+rich/YNSRPT
+Rici/M
+Rickard/M
+Rickenbacker/M
+Rickenbaugh/M
+Rickert/M
+rickets/M
+rickety/RT
+Rickey/M
+rick/GSDM
+Rickie/M
+Ricki/M
+Rick/M
+Rickover/M
+rickrack/MS
+rickshaw/SM
+Ricky/M
+Ric/M
+ricochet/GSD
+Rico/M
+Ricoriki/M
+ricotta/MS
+riddance/SM
+ridden
+ridding
+riddle/GMRSD
+Riddle/M
+ride/CZSGR
+Ride/M
+rider/CM
+riderless
+ridership/S
+ridge/DSGM
+Ridgefield/M
+ridgepole/SM
+Ridgway/M
+ridgy/RT
+ridicule/MGDRS
+ridiculer/M
+ridiculousness/MS
+ridiculous/PY
+riding/M
+rid/ZGRJSB
+Riemann/M
+Riesling/SM
+rife/RT
+riff/GSDM
+riffle/SDG
+riffraff/SM
+rifled/U
+rifle/GZMDSR
+rifleman/M
+riflemen
+rifler/M
+rifling/M
+rift/GSMD
+Riga/M
+rigamarole's
+rigatoni/M
+Rigel/M
+rigged
+rigger/SM
+rigging/MS
+Riggs/M
+righteousnesses/U
+righteousness/MS
+righteous/PYU
+rightfulness/MS
+rightful/PY
+rightism/SM
+rightist/S
+rightmost
+rightness/MS
+Right/S
+right/SGTPYRDN
+rightsize/SDG
+rights/M
+rightward/S
+rigidify/S
+rigidity/S
+rigidness/S
+rigid/YP
+rigmarole/MS
+rig/MS
+Rigoberto/M
+Rigoletto/M
+rigor/MS
+rigorousness/S
+rigorous/YP
+Riki/M
+Rikki/M
+Rik/M
+rile/DSG
+Riley/M
+Rilke/M
+rill/GSMD
+Rimbaud/M
+rime/MS
+rimer/M
+rim/GSMDR
+rimless
+rimmed
+rimming
+Rinaldo/M
+Rina/M
+rind/MDGS
+Rinehart/M
+ringer/M
+ring/GZJDRM
+ringing/Y
+ringleader/MS
+ringlet/SM
+ringlike
+Ringling/M
+Ring/M
+ringmaster/MS
+Ringo/M
+ringside/ZMRS
+ringworm/SM
+rink/GDRMS
+rinse/DSRG
+Riobard/M
+Rio/MS
+Riordan/M
+rioter/M
+riotousness/M
+riotous/PY
+riot/SMDRGZJ
+RIP
+riparian/S
+ripcord/SM
+ripened/U
+ripenesses
+ripeness/UM
+ripen/RDG
+ripe/PSY
+riper/U
+ripest/U
+Ripley/M
+Rip/M
+rip/NDRSXTG
+ripoff/S
+riposte/SDMG
+ripped
+ripper/SM
+ripping
+rippler/M
+ripple/RSDGM
+ripply/TR
+ripsaw/GDMS
+riptide/SM
+Risa/M
+RISC
+risen
+riser/M
+rise/RSJZG
+risibility/SM
+risible/S
+rising/M
+risker/M
+risk/GSDRM
+riskily
+riskiness/MS
+risky/RTP
+risotto/SM
+risqué
+rissole/M
+Ritalin
+Rita/M
+Ritchie/M
+rite/DSM
+Ritter/M
+ritualism/SM
+ritualistic
+ritualistically
+ritualized
+ritual/MSY
+Ritz/M
+ritzy/TR
+rivaled/U
+Rivalee/M
+rivalry/MS
+rival/SGDM
+Riva/MS
+rive/CSGRD
+Rivera/M
+riverbank/SM
+riverbed/S
+riverboat/S
+river/CM
+riverfront
+riverine
+Rivers
+Riverside/M
+riverside/S
+Riverview/M
+riveter/M
+rivet/GZSRDM
+riveting/Y
+Riviera/MS
+Rivi/M
+Rivkah/M
+rivulet/SM
+Rivy/M
+riv/ZGNDR
+Riyadh/M
+riyal/SM
+rm
+RMS
+RN
+RNA
+Rn/M
+roach/GSDM
+Roach/M
+roadbed/MS
+roadblock/SMDG
+roadhouse/SM
+roadie/S
+roadkill/S
+road/MIS
+roadrunner/MS
+roadshow/S
+roadside/S
+roadsigns
+roadster/SM
+roadsweepers
+roadway/SM
+roadwork/SM
+roadworthy
+roam/DRGZS
+Roana/M
+Roanna/M
+Roanne/M
+Roanoke/M
+roan/S
+roar/DRSJGZ
+roarer/M
+roaring/T
+Roarke/M
+roaster/M
+roast/SGJZRD
+robbed
+robber/SM
+Robbert/M
+robbery/SM
+Robbie/M
+Robbi/M
+robbing
+Robbin/MS
+Robb/M
+Robby/M
+Robbyn/M
+robe/ESDG
+Robena/M
+Robenia/M
+Robers/M
+Roberson/M
+Roberta/M
+Robert/MS
+Roberto/M
+Robertson/SM
+robe's
+Robeson/M
+Robespierre/M
+Robina/M
+Robinet/M
+Robinetta/M
+Robinette/M
+Robinett/M
+Robinia/M
+Robin/M
+robin/MS
+Robinson/M
+Robinsonville/M
+Robles/M
+Rob/MZ
+robotic/S
+robotism
+robotize/GDS
+robot/MS
+rob/SDG
+Robson/M
+Robt/M
+robustness/SM
+robust/RYPT
+Roby/M
+Robyn/M
+Rocco/M
+Rocha/M
+Rochambeau/M
+Rochella/M
+Rochelle/M
+Rochell/M
+Roche/M
+Rochester/M
+Rochette/M
+Roch/M
+rockabilly/MS
+rockabye
+Rockaway/MS
+rockbound
+Rockefeller/M
+rocker/M
+rocketry/MS
+rocket/SMDG
+Rockey/M
+rockfall/S
+Rockford/M
+rock/GZDRMS
+Rockie/M
+rockiness/MS
+Rockland/M
+Rock/M
+Rockne/M
+Rockville/M
+Rockwell/M
+Rocky/SM
+rocky/SRTP
+rococo/MS
+Roda/M
+rodded
+Roddenberry/M
+rodder
+Roddie/M
+rodding
+Rodd/M
+Roddy/M
+rodent/MS
+rodeo/SMDG
+Roderich/M
+Roderick/M
+Roderic/M
+Roderigo/M
+rode/S
+Rodger/M
+Rodge/ZMR
+Rodie/M
+Rodi/M
+Rodina/M
+Rodin/M
+Rod/M
+Rodney/M
+Rodolfo/M
+Rodolphe/M
+Rodolph/M
+Rodrick/M
+Rodrigo/M
+Rodriguez/M
+Rodrique/M
+Rodriquez/M
+rod/SGMD
+roebuck/SM
+Roentgen's
+roentgen/SM
+roe/SM
+ROFL
+Rogelio/M
+roger/GSD
+Rogerio/M
+Roger/M
+Roget/M
+Rog/MRZ
+rogued/K
+rogue/GMDS
+roguery/MS
+rogues/K
+roguing/K
+roguishness/SM
+roguish/PY
+roil/SGD
+Roi/SM
+roisterer/M
+roister/SZGRD
+Rojas/M
+Roland/M
+Rolando/M
+Roldan/M
+role/MS
+Roley/M
+Rolfe/M
+Rolf/M
+Rolland/M
+rollback/SM
+rolled/A
+Rollerblade/S
+rollerskating
+roller/SM
+rollick/DGS
+rollicking/Y
+Rollie/M
+rolling/S
+Rollin/SM
+Rollo/M
+rollover/S
+roll/UDSG
+Rolodex
+Rolph/M
+Rolvaag/M
+ROM
+romaine/MS
+Romain/M
+Roma/M
+romancer/M
+romance/RSDZMG
+Romanesque/S
+Romania/M
+Romanian/SM
+Romano/MS
+Romanov/M
+roman/S
+Romansh/M
+Romans/M
+Roman/SM
+romantically/U
+romanticism/MS
+Romanticism/S
+romanticist/S
+romanticize/SDG
+romantic/MS
+Romany/SM
+Romeo/MS
+romeo/S
+Romero/M
+Rome/SM
+Rommel/M
+Romney/M
+Romola/M
+Romona/M
+Romonda/M
+romper/M
+romp/GSZDR
+Rom/SM
+Romulus/M
+Romy/M
+Ronalda/M
+Ronald/M
+Rona/M
+Ronda/M
+rondo/SM
+Ronica/M
+Ron/M
+Ronna/M
+Ronnica/M
+Ronnie/M
+Ronni/M
+Ronny/M
+Ronstadt/M
+Rontgen
+Roobbie/M
+rood/MS
+roof/DRMJGZS
+roofer/M
+roofgarden
+roofing/M
+roofless
+rooftop/S
+rookery/MS
+rook/GDMS
+rookie/SRMT
+roomer/M
+roomette/SM
+roomful/MS
+roominess/MS
+roommate/SM
+room/MDRGZS
+roomy/TPSR
+Rooney/M
+Rooseveltian
+Roosevelt/M
+rooster/M
+roost/SGZRDM
+rooted/P
+rooter/M
+rootlessness/M
+rootless/P
+rootlet/SM
+Root/M
+root/MGDRZS
+rootstock/M
+rope/DRSMZG
+roper/M
+roping/M
+Roquefort/MS
+Roquemore/M
+Rora/M
+Rorie/M
+Rori/M
+Rorke/M
+Rorschach
+Rory/M
+Rosabella/M
+Rosabelle/M
+Rosabel/M
+Rosaleen/M
+Rosales/M
+Rosalia/M
+Rosalie/M
+Rosalinda/M
+Rosalinde/M
+Rosalind/M
+Rosaline/M
+Rosalynd/M
+Rosalyn/M
+Rosa/M
+Rosamond/M
+Rosamund/M
+Rosana/M
+Rosanna/M
+Rosanne/M
+Rosario/M
+rosary/SM
+Roscoe/M
+Rosco/M
+Roseanna/M
+Roseanne/M
+Roseann/M
+roseate/Y
+Roseau
+rosebud/MS
+rosebush/SM
+Rosecrans/M
+Roseland/M
+Roselia/M
+Roseline/M
+Roselin/M
+Rosella/M
+Roselle/M
+Rose/M
+Rosemaria/M
+Rosemarie/M
+Rosemary/M
+rosemary/MS
+rose/MGDS
+Rosemonde/M
+Rosenberg/M
+Rosenblum/M
+Rosendo/M
+Rosene/M
+Rosen/M
+Rosenthal/M
+Rosenzweig/M
+Rosetta/M
+Rosette/M
+rosette/SDMG
+rosewater
+rosewood/SM
+Roshelle/M
+Rosicrucian/M
+Rosie/M
+rosily
+Rosina/M
+rosiness/MS
+rosin/SMDG
+Rosita/M
+Roslyn/M
+Rosmunda/M
+Ros/N
+Ross
+Rossetti/M
+Rossie/M
+Rossi/M
+Rossini/M
+Rossy/M
+Rostand/M
+roster/DMGS
+Rostov/M
+rostra's
+rostrum/SM
+Roswell/M
+Rosy/M
+rosy/RTP
+rota/MS
+Rotarian/SM
+rotary/S
+rotated/U
+rotate/VGNXSD
+rotational/Y
+rotation/M
+rotative/Y
+rotator/SM
+rotatory
+ROTC
+rote/MS
+rotgut/MS
+Roth/M
+Rothschild/M
+rotisserie/MS
+rotogravure/SM
+rotor/MS
+rototill/RZ
+rot/SDG
+rotted
+rottenness/S
+rotten/RYSTP
+Rotterdam/M
+rotter/M
+rotting
+rotunda/SM
+rotundity/S
+rotundness/S
+rotund/SDYPG
+Rouault/M
+roué/MS
+rouge/GMDS
+roughage/SM
+roughen/DG
+rougher/M
+roughhouse/GDSM
+roughish
+roughneck/MDSG
+roughness/MS
+roughs
+roughshod
+rough/XPYRDNGT
+roulette/MGDS
+roundabout/PSM
+roundedness/M
+rounded/P
+roundelay/SM
+roundels
+rounder/M
+roundhead/D
+roundheadedness/M
+roundheaded/P
+roundhouse/SM
+roundish
+roundness/MS
+roundoff
+roundup/MS
+roundworm/MS
+round/YRDSGPZT
+Rourke/M
+rouse/DSRG
+rouser/M
+Rousseau/M
+roustabout/SM
+roust/SGD
+route/ASRDZGJ
+router/M
+route's
+rout/GZJMDRS
+routine/SYM
+routing/M
+routinize/GSD
+Rouvin/M
+rover/M
+Rover/M
+rove/ZGJDRS
+roving/M
+Rowan/M
+rowboat/SM
+rowdily
+rowdiness/MS
+rowdyism/MS
+rowdy/PTSR
+rowel/DMSG
+Rowe/M
+Rowena/M
+rowen/M
+Rowen/M
+rower/M
+Rowland/M
+Rowley/M
+Row/MN
+Rowney/M
+row/SJZMGNDR
+Roxana/M
+Roxane/M
+Roxanna/M
+Roxanne/M
+Roxie/M
+Roxi/M
+Roxine/M
+Roxy/M
+royalist/SM
+Royall/M
+Royal/M
+royal/SY
+royalty/MS
+Royce/M
+Roy/M
+Rozalie/M
+Rozalin/M
+Rozamond/M
+Rozanna/M
+Rozanne/M
+Rozele/M
+Rozella/M
+Rozelle/M
+Roze/M
+Rozina/M
+Roz/M
+RP
+rpm
+RPM
+rps
+RR
+Rriocard/M
+rs
+r's
+R's
+RSFSR
+RSI
+RSV
+RSVP
+RSX
+rt
+rte
+Rte
+RTFM
+r/TGVJ
+Rubaiyat/M
+rubato/MS
+rubbed
+rubberize/GSD
+rubberneck/DRMGSZ
+rubber/SDMG
+rubbery/TR
+rubbing/M
+rubbish/DSMG
+rubbishy
+rubble/GMSD
+rubdown/MS
+rubella/MS
+Rube/M
+Ruben/MS
+rube/SM
+Rubetta/M
+Rubia/M
+Rubicon/SM
+rubicund
+rubidium/SM
+Rubie/M
+Rubik/M
+Rubi/M
+Rubina/M
+Rubin/M
+Rubinstein/M
+ruble/MS
+rubout
+rubric/MS
+rub/S
+Ruby/M
+ruby/MTGDSR
+Ruchbah/M
+ruck/M
+rucksack/SM
+ruckus/SM
+ruction/SM
+rudderless
+rudder/MS
+Ruddie/M
+ruddiness/MS
+Rudd/M
+Ruddy/M
+ruddy/PTGRSD
+rudeness/MS
+rude/PYTR
+Rudie/M
+Rudiger/M
+rudimentariness/M
+rudimentary/P
+rudiment/SM
+Rudolf/M
+Rudolfo/M
+Rudolph/M
+Rudyard/M
+Rudy/M
+ruefulness/S
+rueful/PY
+rue/GDS
+Rufe/M
+ruff/GSYDM
+ruffian/GSMDY
+ruffled/U
+ruffler/M
+ruffle/RSDG
+ruffly/TR
+Rufus/M
+Rugby's
+rugby/SM
+ruggedness/S
+rugged/PYRT
+Ruggiero/M
+rugging
+rug/MS
+Ruhr/M
+ruination/MS
+ruiner/M
+ruin/MGSDR
+ruinousness/M
+ruinous/YP
+Ruiz/M
+rulebook/S
+ruled/U
+rule/MZGJDRS
+ruler/GMD
+ruling/M
+Rumanian's
+Rumania's
+rumba/GDMS
+rumble/JRSDG
+rumbler/M
+rumbustious
+rumen/M
+Rumford/M
+Ru/MH
+ruminant/YMS
+ruminate/VNGXSD
+ruminative/Y
+rummage/GRSD
+rummager/M
+Rummel/M
+rummer
+rummest
+rummy/TRSM
+rumored/U
+rumorer/M
+rumormonger/SGMD
+rumor/ZMRDSG
+Rumpelstiltskin/M
+rump/GMYDS
+rumple/SDG
+rumply/TR
+rumpus/SM
+rum/XSMN
+runabout/SM
+runaround/S
+run/AS
+runaway/S
+rundown/SM
+rune/MS
+Runge/M
+rung/MS
+runic
+runlet/SM
+runnable
+runnel/SM
+runner/MS
+running/S
+Runnymede/M
+runny/RT
+runoff/MS
+runtime
+runtiness/M
+runt/MS
+runty/RPT
+runway/MS
+Runyon/M
+rupee/MS
+Ruperta/M
+Rupert/M
+Ruperto/M
+rupiah/M
+rupiahs
+Ruppert/M
+Ruprecht/M
+rupture/GMSD
+rurality/M
+rural/Y
+Rurik/M
+ruse/MS
+Rushdie/M
+rush/DSRGZ
+rusher/M
+rushes/I
+rushing/M
+Rush/M
+Rushmore/M
+rushy/RT
+Ruskin/M
+rusk/MS
+Russell/M
+Russel/M
+russet/MDS
+russetting
+Russia/M
+Russian/SM
+Russo/M
+Russ/S
+Rustbelt/M
+rustically
+rusticate/GSD
+rustication/M
+rusticity/S
+rustic/S
+Rustie/M
+rustiness/MS
+Rustin/M
+rustler/M
+rustle/RSDGZ
+rust/MSDG
+rustproof/DGS
+Rusty/M
+rusty/XNRTP
+rutabaga/SM
+Rutger/SM
+Ruthanne/M
+Ruthann/M
+Ruthe/M
+ruthenium/MS
+rutherfordium/SM
+Rutherford/M
+Ruthie/M
+Ruthi/M
+ruthlessness/MS
+ruthless/YP
+Ruth/M
+Ruthy/M
+Rutland/M
+Rutledge/M
+rut/MS
+rutted
+Rutter/M
+Ruttger/M
+rutting
+rutty/RT
+Ruy/M
+RV
+RVs
+Rwandan/S
+Rwanda/SM
+Rwy/M
+Rx/M
+Ryan/M
+Ryann/M
+Rycca/M
+Rydberg/M
+Ryder/M
+rye/MS
+Ryley/M
+Ry/M
+Ryon/M
+Ryukyu/M
+Ryun/M
+S
+SA
+Saab/M
+Saar/M
+Saba/M
+sabbath
+Sabbath/M
+Sabbaths
+sabbatical/S
+sabered/U
+saber/GSMD
+Sabik/M
+Sabina/M
+Sabine/M
+Sabin/M
+sable/GMDS
+sabotage/DSMG
+saboteur/SM
+sabot/MS
+Sabra/M
+sabra/MS
+Sabrina/M
+SAC
+Sacajawea/M
+saccharides
+saccharine
+saccharin/MS
+Sacco/M
+sacerdotal
+Sacha/M
+sachem/MS
+sachet/SM
+Sachs/M
+sackcloth/M
+sackcloths
+sacker/M
+sackful/MS
+sack/GJDRMS
+sacking/M
+sacral
+sacra/L
+sacramental/S
+sacrament/DMGS
+Sacramento/M
+sacredness/S
+sacred/PY
+sacrificer/M
+sacrifice/RSDZMG
+sacrificial/Y
+sacrilege/MS
+sacrilegious/Y
+sacristan/SM
+sacristy/MS
+sacroiliac/S
+sacrosanctness/MS
+sacrosanct/P
+sacrum/M
+sac/SM
+Sada/M
+Sadat/M
+Saddam/M
+sadden/DSG
+sadder
+saddest
+saddlebag/SM
+saddler/M
+saddle's
+saddle/UGDS
+Sadducee/M
+Sadella/M
+Sade/M
+sades
+Sadie/M
+sadism/MS
+sadistic
+sadistically
+sadist/MS
+sadness/SM
+sadomasochism/MS
+sadomasochistic
+sadomasochist/S
+sad/PY
+Sadr/M
+Sadye/M
+safari/GMDS
+safeguard/MDSG
+safekeeping/MS
+safeness/MS
+safeness's/U
+safes
+safety/SDMG
+safe/URPTY
+safflower/SM
+saffron/MS
+sagaciousness/M
+sagacious/YP
+sagacity/MS
+saga/MS
+Sagan/M
+sagebrush/SM
+sage/MYPS
+sagged
+sagger
+sagging
+saggy/RT
+Saginaw/M
+Sagittarius/MS
+sago/MS
+sag/TSR
+saguaro/SM
+Sahara/M
+Saharan/M
+Sahel
+sahib/MS
+Saidee/M
+saids
+said/U
+Saigon/M
+sailboard/DGS
+sailboat/SRMZG
+sailcloth/M
+sailcloths
+sailer/M
+sailfish/SM
+sail/GJMDRS
+sailing/M
+sailor/YMS
+sailplane/SDMG
+sainthood/MS
+saintlike
+saintliness/MS
+saintly/RTP
+saint/YDMGS
+Saiph/M
+saith
+saiths
+Sakai/M
+sake/MRS
+saker/M
+Sakhalin/M
+Sakharov/M
+Saki/M
+saki's
+salaam/GMDS
+salable/U
+salaciousness/MS
+salacious/YP
+salacity/MS
+Saladin/M
+Salado/M
+salad/SM
+Salaidh/M
+salamander/MS
+salami/MS
+salary/SDMG
+Salas/M
+Salazar/M
+saleability/M
+sale/ABMS
+Saleem/M
+Salem/M
+Salerno/M
+salesclerk/SM
+salesgirl/SM
+saleslady/S
+salesman/M
+salesmanship/SM
+salesmen
+salespeople/M
+salesperson/MS
+salesroom/M
+saleswoman
+saleswomen
+salience/MS
+saliency
+salient/SY
+Salim/M
+Salina/MS
+saline/S
+salinger
+Salinger/M
+salinity/MS
+Salisbury/M
+Salish/M
+saliva/MS
+salivary
+salivate/XNGSD
+salivation/M
+Salk/M
+Sallee/M
+Salle/M
+Sallie/M
+Salli/M
+sallowness/MS
+sallow/TGRDSP
+Sallust/M
+Sallyanne/M
+Sallyann/M
+sally/GSDM
+Sally/M
+salmonellae
+salmonella/M
+Salmon/M
+salmon/SM
+Sal/MY
+Saloma/M
+Salome/M
+Salomi/M
+Salomo/M
+Salomone/M
+Salomon/M
+Salonika/M
+salon/SM
+saloonkeeper
+saloon/MS
+salsa/MS
+salsify/M
+SALT
+saltcellar/SM
+salted/UC
+salter/M
+salt/GZTPMDRS
+saltine/MS
+saltiness/SM
+saltness/M
+Salton/M
+saltpeter/SM
+salts/C
+saltshaker/S
+saltwater
+salty/RSPT
+salubriousness/M
+salubrious/YP
+salubrity/M
+salutariness/M
+salutary/P
+salutation/SM
+salutatory/S
+saluter/M
+salute/RSDG
+Salvadoran/S
+Salvadorian/S
+Salvador/M
+salvageable
+salvage/MGRSD
+salvager/M
+salvation/MS
+Salvatore/M
+salve/GZMDSR
+salver/M
+Salvidor/M
+salvo/GMDS
+Salween/M
+Salyut/M
+Salz/M
+SAM
+Samantha/M
+Samara/M
+Samaria/M
+Samaritan/MS
+samarium/MS
+Samarkand/M
+samba/GSDM
+sameness/MS
+same/SP
+Sam/M
+Sammie/M
+Sammy/M
+Samoa
+Samoan/S
+Samoset/M
+samovar/SM
+Samoyed/M
+sampan/MS
+sampler/M
+sample/RSDJGMZ
+sampling/M
+Sampson/M
+Samsonite/M
+Samson/M
+Samuele/M
+Samuel/SM
+Samuelson/M
+samurai/M
+San'a
+Sana/M
+sanatorium/MS
+Sanborn/M
+Sanchez/M
+Sancho/M
+sanctification/M
+sanctifier/M
+sanctify/RSDGNX
+sanctimoniousness/MS
+sanctimonious/PY
+sanctimony/MS
+sanctioned/U
+sanction/SMDG
+sanctity/SM
+sanctuary/MS
+sanctum/SM
+sandal/MDGS
+sandalwood/SM
+sandbagged
+sandbagging
+sandbag/MS
+sandbank/SM
+sandbar/S
+sandblaster/M
+sandblast/GZSMRD
+sandbox/MS
+Sandburg/M
+sandcastle/S
+Sande/M
+Sanderling/M
+sander/M
+Sander/M
+Sanderson/M
+sandhill
+sandhog/SM
+Sandia/M
+Sandie/M
+Sandi/M
+sandiness/S
+Sandinista
+sandlot/SM
+sandlotter/S
+sandman/M
+sandmen
+Sand/MRZ
+Sandor/M
+Sandoval/M
+sandpaper/DMGS
+sandpile
+sandpiper/MS
+sandpit/M
+Sandra/M
+Sandro/M
+sand/SMDRGZ
+sandstone/MS
+sandstorm/SM
+Sandusky/M
+sandwich/SDMG
+Sandye/M
+Sandy/M
+sandy/PRT
+saned
+sane/IRYTP
+saneness/MS
+saneness's/I
+sanes
+Sanford/M
+Sanforized
+Sanger/M
+sangfroid/S
+sangria/SM
+Sang/RM
+sang/S
+sanguinary
+sanguined
+sanguine/F
+sanguinely
+sanguineness/M
+sanguineous/F
+sanguines
+sanguining
+Sanhedrin/M
+saning
+sanitarian/S
+sanitarium/SM
+sanitary/S
+sanitate/NX
+sanitation/M
+sanitizer/M
+sanitize/RSDZG
+sanity/SIM
+sank
+Sankara/M
+San/M
+sans
+sanserif
+Sanskritic
+Sanskritize/M
+Sanskrit/M
+Sansone/M
+Sanson/M
+Santa/M
+Santana/M
+Santayana/M
+Santeria
+Santiago/M
+Santo/MS
+sapience/MS
+sapient
+sapless
+sapling/SM
+sap/MS
+sapped
+sapper/SM
+Sapphira/M
+Sapphire/M
+sapphire/MS
+Sappho/M
+sappiness/SM
+sapping
+Sapporo/M
+sappy/RPT
+saprophyte/MS
+saprophytic
+sapsucker/SM
+sapwood/SM
+Saraann/M
+Saracen/MS
+Saragossa/M
+Sarah/M
+Sarajane/M
+Sarajevo/M
+Sara/M
+Saran/M
+saran/SM
+sarape's
+Sarasota/M
+Saratoga/M
+Saratov/M
+Sarawak/M
+sarcasm/MS
+sarcastic
+sarcastically
+sarcoma/MS
+sarcophagi
+sarcophagus/M
+sardine/SDMG
+Sardinia/M
+sardonic
+sardonically
+Saree/M
+Sarena/M
+Sarene/M
+Sarette/M
+Sargasso/M
+Sarge/M
+Sargent/M
+sarge/SM
+Sargon/M
+Sari/M
+sari/MS
+Sarina/M
+Sarine/M
+Sarita/M
+Sarnoff/M
+sarong/MS
+Saroyan/M
+sarsaparilla/MS
+Sarto/M
+sartorial/Y
+sartorius/M
+Sartre/M
+Sascha/M
+SASE
+Sasha/M
+sashay/GDS
+Sashenka/M
+sash/GMDS
+Saskatchewan/M
+Saskatoon/M
+Sask/M
+sassafras/MS
+sass/GDSM
+Sassoon/M
+sassy/TRS
+SAT
+satanic
+satanical/Y
+Satanism/M
+satanism/S
+Satanist/M
+satanist/S
+Satan/M
+satchel/SM
+sat/DG
+sateen/MS
+satellite/GMSD
+sate/S
+satiable/I
+satiate/GNXSD
+satiation/M
+satiety/MS
+satin/MDSG
+satinwood/MS
+satiny
+satire/SM
+satiric
+satirical/Y
+satirist/SM
+satirize/DSG
+satirizes/U
+satisfaction/ESM
+satisfactorily/U
+satisfactoriness/MU
+satisfactory/UP
+satisfiability/U
+satisfiable/U
+satisfied/UE
+satisfier/M
+satisfies/E
+satisfy/GZDRS
+satisfying/EU
+satisfyingly
+Sat/M
+satori/SM
+satrap/SM
+saturated/CUA
+saturater/M
+saturates/A
+saturate/XDRSNG
+saturation/M
+Saturday/MS
+saturnalia
+Saturnalia/M
+saturnine/Y
+Saturn/M
+Satyanarayanan/M
+satyriases
+satyriasis/M
+satyric
+satyr/MS
+sauce/DSRGZM
+saucepan/SM
+saucer/M
+saucily
+sauciness/S
+saucy/TRP
+Saudi/S
+Saud/M
+Saudra/M
+sauerkraut/SM
+Saukville/M
+Saul/M
+Sault/M
+sauna/DMSG
+Sauncho/M
+Saunder/SM
+Saunderson/M
+Saundra/M
+saunter/DRSG
+saurian/S
+sauropod/SM
+sausage/MS
+Saussure/M
+sauté/DGS
+Sauternes/M
+Sauveur/M
+savage/GTZYPRSD
+Savage/M
+savageness/SM
+savagery/MS
+Savannah/M
+savanna/MS
+savant/SM
+saved/U
+saveloy/M
+saver/M
+save/ZGJDRSB
+Savina/M
+Savior/M
+savior/SM
+Saviour/M
+Savonarola/M
+savored/U
+savorer/M
+savorier
+savoriest
+savoriness/S
+savoringly/S
+savoring/Y
+savor/SMRDGZ
+savory/UMPS
+Savoyard/M
+Savoy/M
+savoy/SM
+savvy/GTRSD
+sawbones/M
+sawbuck/SM
+sawdust/MDSG
+sawer/M
+sawfly/SM
+sawhorse/MS
+Saw/M
+sawmill/SM
+saw/SMDRG
+sawtooth
+Sawyere/M
+Sawyer/M
+sawyer/MS
+Saxe/M
+saxifrage/SM
+Sax/M
+sax/MS
+Saxon/SM
+Saxony/M
+saxophone/MS
+saxophonist/SM
+Saxton/M
+Sayer/M
+sayer/SM
+sayest
+saying/MS
+Sayre/MS
+says/M
+say/USG
+Say/ZMR
+SBA
+Sb/M
+SC
+scabbard/SGDM
+scabbed
+scabbiness/SM
+scabbing
+scabby/RTP
+scabies/M
+scabrousness/M
+scabrous/YP
+scab/SM
+scad/SM
+scaffolding/M
+scaffold/JGDMS
+scalability
+Scala/M
+scalar/SM
+scalawag/SM
+scald/GJRDS
+scaled/AU
+scale/JGZMBDSR
+scaleless
+scalene
+scaler/M
+scales/A
+scaliness/MS
+scaling/A
+scallion/MS
+scalloper/M
+scallop/GSMDR
+scalloping/M
+scalpel/SM
+scalper/M
+scalp/GZRDMS
+scalping/M
+scaly/TPR
+scammed
+scamming
+scamper/GD
+scampi/M
+scamp/RDMGZS
+scam/SM
+Scan
+scan/AS
+scandal/GMDS
+scandalized/U
+scandalize/GDS
+scandalmonger/SM
+scandalousness/M
+scandalous/YP
+Scandinavia/M
+Scandinavian/S
+scandium/MS
+scanned/A
+scanner/SM
+scanning/A
+scansion/SM
+scant/CDRSG
+scantest
+scantily
+scantiness/MS
+scantly
+scantness/MS
+scanty/TPRS
+scapegoat/SGDM
+scapegrace/MS
+scape/M
+scapulae
+scapula/M
+scapular/S
+scarab/SM
+Scaramouch/M
+Scarborough/M
+scarceness/SM
+scarce/RTYP
+scarcity/MS
+scar/DRMSG
+scarecrow/MS
+scaremongering/M
+scaremonger/SGM
+scarer/M
+scare/S
+scarface
+Scarface/M
+scarf/SDGM
+scarification/M
+scarify/DRSNGX
+scarily
+scariness/S
+scarlatina/MS
+Scarlatti/M
+Scarlet/M
+scarlet/MDSG
+Scarlett/M
+scarp/SDMG
+scarred
+scarring
+scarves/M
+scary/PTR
+scathe/DG
+scathed/U
+scathing/Y
+scatological
+scatology/SM
+scat/S
+scatted
+scatterbrain/MDS
+scatter/DRJZSG
+scatterer/M
+scattergun
+scattering/YM
+scatting
+scavenge/GDRSZ
+scavenger/M
+SCCS
+scenario/SM
+scenarist/MS
+scene/GMDS
+scenery/SM
+scenically
+scenic/S
+scented/U
+scent/GDMS
+scentless
+scent's/C
+scents/C
+scepter/DMSG
+scepters/U
+sceptically
+sch
+Schaefer/M
+Schaeffer/M
+Schafer/M
+Schaffner/M
+Schantz/M
+Schapiro/M
+Scheat/M
+Schedar/M
+schedule/ADSRG
+scheduled/U
+scheduler/MS
+schedule's
+Scheherazade/M
+Scheherezade/M
+Schelling/M
+schema/M
+schemata
+schematically
+schematic/S
+scheme/JSRDGMZ
+schemer/M
+schemta
+Schenectady/M
+scherzo/MS
+Schick/M
+Schiller/M
+schilling/SM
+schismatic/S
+schism/SM
+schist/SM
+schizoid/S
+schizomycetes
+schizophrenia/SM
+schizophrenically
+schizophrenic/S
+schizo/S
+schlemiel/MS
+schlepped
+schlepping
+schlep/S
+Schlesinger/M
+Schliemann/M
+Schlitz/M
+schlock/SM
+schlocky/TR
+Schloss/M
+schmaltz/MS
+schmaltzy/TR
+Schmidt/M
+Schmitt/M
+schmoes
+schmo/M
+schmooze/GSD
+schmuck/MS
+Schnabel/M
+schnapps/M
+schnauzer/MS
+Schneider/M
+schnitzel/MS
+schnook/SM
+schnoz/S
+schnozzle/MS
+Schoenberg/M
+Schofield/M
+scholarship/MS
+scholar/SYM
+scholastically
+scholastic/S
+schoolbag/SM
+schoolbook/SM
+schoolboy/MS
+schoolchild/M
+schoolchildren
+schooldays
+schooled/U
+schoolfellow/S
+schoolfriend
+schoolgirlish
+schoolgirl/MS
+schoolhouse/MS
+schooling/M
+schoolmarmish
+schoolmarm/MS
+schoolmaster/SGDM
+schoolmate/MS
+schoolmistress/MS
+schoolroom/SM
+schoolteacher/MS
+schoolwork/SM
+schoolyard/SM
+school/ZGMRDJS
+schooner/SM
+Schopenhauer/M
+Schottky/M
+Schrieffer/M
+Schrödinger/M
+Schroeder/M
+Schroedinger/M
+Schubert/M
+Schultz/M
+Schulz/M
+Schumacher/M
+Schuman/M
+Schumann/M
+schussboomer/S
+schuss/SDMG
+Schuster/M
+Schuyler/M
+Schuylkill/M
+Schwab/M
+Schwartzkopf/M
+Schwartz/M
+Schwarzenegger/M
+schwa/SM
+Schweitzer/M
+Schweppes/M
+Schwinger/M
+Schwinn/M
+sci
+sciatica/SM
+sciatic/S
+science/FMS
+scientifically/U
+scientific/U
+scientist/SM
+Scientology/M
+scimitar/SM
+scintilla/MS
+scintillate/GNDSX
+scintillation/M
+scintillator/SM
+scion/SM
+Scipio/M
+scissor/SGD
+scleroses
+sclerosis/M
+sclerotic/S
+Sc/M
+scoffer/M
+scofflaw/MS
+scoff/RDGZS
+scolder/M
+scold/GSJRD
+scolioses
+scoliosis/M
+scollop's
+sconce/SDGM
+scone/SM
+scooper/M
+scoop/SRDMG
+scooter/M
+scoot/SRDGZ
+scope/DSGM
+Scopes/M
+scops
+scorbutic
+scorcher/M
+scorching/Y
+scorch/ZGRSD
+scoreboard/MS
+scorecard/MS
+scored/M
+scorekeeper/SM
+scoreless
+scoreline
+score/ZMDSRJG
+scorner/M
+scornfulness/M
+scornful/PY
+scorn/SGZMRD
+scorpion/SM
+Scorpio/SM
+Scorpius/M
+Scorsese/M
+Scotchgard/M
+Scotchman/M
+Scotchmen
+scotch/MSDG
+scotchs
+Scotch/S
+Scotchwoman
+Scotchwomen
+Scotia/M
+Scotian/M
+Scotland/M
+Scot/MS
+Scotsman/M
+Scotsmen
+Scotswoman
+Scotswomen
+Scottie/SM
+Scotti/M
+Scottish
+Scott/M
+Scottsdale/M
+Scotty's
+scoundrel/YMS
+scourer/M
+scourge/MGRSD
+scourger/M
+scouring/M
+scour/SRDGZ
+scouter/M
+scouting/M
+scoutmaster/SM
+Scout's
+scout/SRDMJG
+scow/DMGS
+scowler/M
+scowl/SRDG
+scrabble/DRSZG
+scrabbler/M
+Scrabble/SM
+scragged
+scragging
+scraggly/TR
+scraggy/TR
+scrag/SM
+scrambler/MS
+scrambler's/U
+scramble/UDSRG
+scrammed
+scramming
+scram/S
+Scranton/M
+scrapbook/SM
+scraper/M
+scrape/S
+scrapheap/SM
+scrapped
+scrapper/SM
+scrapping
+scrappy/RT
+scrap/SGZJRDM
+scrapyard/S
+scratched/U
+scratcher/M
+scratches/M
+scratchily
+scratchiness/S
+scratch/JDRSZG
+scratchy/TRP
+scrawler/M
+scrawl/GRDS
+scrawly/RT
+scrawniness/MS
+scrawny/TRP
+screamer/M
+screaming/Y
+scream/ZGSRD
+screecher/M
+screech/GMDRS
+screechy/TR
+screed/MS
+scree/DSM
+screened/U
+screening/M
+screenplay/MS
+screen/RDMJSG
+screenwriter/MS
+screwball/SM
+screwdriver/SM
+screwer/M
+screw/GUSD
+screwiness/S
+screw's
+screwup
+screwworm/MS
+screwy/RTP
+Scriabin/M
+scribal
+scribble/JZDRSG
+scribbler/M
+scribe/CDRSGIK
+scriber/MKIC
+scribe's
+Scribner/MS
+scrimmager/M
+scrimmage/RSDMG
+scrimp/DGS
+scrimshaw/GSDM
+scrim/SM
+Scripps/M
+scrip/SM
+scripted/U
+script/FGMDS
+scriptural/Y
+scripture/MS
+Scripture/MS
+scriptwriter/SM
+scriptwriting/M
+scrivener/M
+scriven/ZR
+scrod/M
+scrofula/MS
+scrofulous
+scrollbar/SM
+scroll/GMDSB
+Scrooge/MS
+scrooge/SDMG
+scrota
+scrotal
+scrotum/M
+scrounge/ZGDRS
+scroungy/TR
+scrubbed
+scrubber/MS
+scrubbing
+scrubby/TR
+scrub/S
+scruffily
+scruffiness/S
+scruff/SM
+scruffy/PRT
+Scruggs/M
+scrummage/MG
+scrum/MS
+scrumptious/Y
+scrunch/DSG
+scrunchy/S
+scruple/SDMG
+scrupulosity/SM
+scrupulousness's
+scrupulousness/US
+scrupulous/UPY
+scrutable/I
+scrutinized/U
+scrutinizer/M
+scrutinize/RSDGZ
+scrutinizingly/S
+scrutinizing/UY
+scrutiny/MS
+SCSI
+scuba/SDMG
+scudded
+scudding
+Scud/M
+scud/S
+scuff/GSD
+scuffle/SDG
+sculler/M
+scullery/MS
+Sculley/M
+scullion/MS
+scull/SRDMGZ
+sculptor/MS
+sculptress/MS
+sculpt/SDG
+sculptural/Y
+sculpture/SDGM
+scumbag/S
+scummed
+scumming
+scum/MS
+scummy/TR
+scupper/SDMG
+scurf/MS
+scurfy/TR
+scurrility/MS
+scurrilousness/MS
+scurrilous/PY
+scurry/GJSD
+scurvily
+scurviness/M
+scurvy/SRTP
+scutcheon/SM
+scuttlebutt/MS
+scuttle/MGSD
+scuzzy/RT
+Scylla/M
+scythe/SDGM
+Scythia/M
+SD
+SDI
+SE
+seabed/S
+seabird/S
+seaboard/MS
+Seaborg/M
+seaborne
+Seabrook/M
+seacoast/MS
+seafare/JRZG
+seafarer/M
+seafood/MS
+seafront/MS
+Seagate/M
+seagoing
+Seagram/M
+seagull/S
+seahorse/S
+sealant/MS
+sealed/AU
+sealer/M
+seal/MDRSGZ
+sealskin/SM
+seals/UA
+seamail
+seamanship/SM
+seaman/YM
+seamer/M
+seaminess/M
+seamlessness/M
+seamless/PY
+seam/MNDRGS
+seams/I
+seamstress/MS
+Seamus/M
+sea/MYS
+seamy/TRP
+Seana/M
+séance/SM
+Sean/M
+seaplane/SM
+seaport/SM
+seaquake/M
+Seaquarium/M
+searcher/AM
+searching/YS
+searchlight/SM
+search/RSDAGZ
+sear/DRSJGT
+searing/Y
+Sears/M
+seascape/SM
+seashell/MS
+seashore/SM
+seasickness/SM
+seasick/P
+seaside/SM
+seasonableness/M
+seasonable/UP
+seasonably/U
+seasonality
+seasonal/Y
+seasoned/U
+seasoner/M
+seasoning/M
+season/JRDYMBZSG
+seatbelt
+seated/A
+seater/M
+seating/SM
+SEATO
+seat's
+Seattle/M
+seat/UDSG
+seawall/S
+seaward/S
+seawater/S
+seaway/MS
+seaweed/SM
+seaworthinesses
+seaworthiness/MU
+seaworthy/TRP
+sebaceous
+Sebastian/M
+Sebastiano/M
+Sebastien/M
+seborrhea/SM
+SEC
+secant/SM
+secede/GRSD
+secessionist/MS
+secession/MS
+secludedness/M
+secluded/YP
+seclude/GSD
+seclusion/SM
+seclusive
+Seconal
+secondarily
+secondary/PS
+seconder/M
+secondhand
+second/RDYZGSL
+secrecy/MS
+secretarial
+secretariat/MS
+secretaryship/MS
+secretary/SM
+secrete/XNS
+secretion/M
+secretiveness/S
+secretive/PY
+secretory
+secret/TVGRDYS
+sec/S
+sectarianism/MS
+sectarian/S
+sectary/MS
+sectionalism/MS
+sectionalized
+sectional/SY
+section/ASEM
+sectioned
+sectioning
+sect/ISM
+sectoral
+sectored
+sector/EMS
+sectoring
+sects/E
+secularism/MS
+secularist/MS
+secularity/M
+secularization/MS
+secularized/U
+secularize/GSD
+secular/SY
+secured/U
+securely/I
+secure/PGTYRSDJ
+security/MSI
+secy
+sec'y
+sedan/SM
+sedateness/SM
+sedate/PXVNGTYRSD
+sedation/M
+sedative/S
+sedentary
+Seder/SM
+sedge/SM
+Sedgwick/M
+sedgy/RT
+sedimentary
+sedimentation/SM
+sediment/SGDM
+sedition/SM
+seditiousness/M
+seditious/PY
+seducer/M
+seduce/RSDGZ
+seduction/MS
+seductiveness/MS
+seductive/YP
+seductress/SM
+sedulous/Y
+Seebeck/M
+seed/ADSG
+seedbed/MS
+seedcase/SM
+seeded/U
+seeder/MS
+seediness/MS
+seeding/S
+seedless
+seedling/SM
+seedpod/S
+seed's
+seedy/TPR
+seeings
+seeing's
+seeing/U
+seeker/M
+seek/GZSR
+seeking/Y
+Seeley/M
+See/M
+seem/GJSYD
+seeming/Y
+seemliness's
+seemliness/US
+seemly/UTPR
+seen/U
+seepage/MS
+seep/GSD
+seer/SM
+seersucker/MS
+sees
+seesaw/DMSG
+seethe/SDGJ
+see/U
+segmental/Y
+segmentation/SM
+segmented/U
+segment/SGDM
+Segovia/M
+segregant
+segregated/U
+segregate/XCNGSD
+segregation/CM
+segregationist/SM
+segregative
+Segre/M
+segue/DS
+segueing
+Segundo/M
+Se/H
+Seidel/M
+seigneur/MS
+seignior/SM
+Seiko/M
+seine/GZMDSR
+Seine/M
+seiner/M
+Seinfeld/M
+seismic
+seismically
+seismographer/M
+seismographic
+seismographs
+seismography/SM
+seismograph/ZMR
+seismologic
+seismological
+seismologist/MS
+seismology/SM
+seismometer/S
+seize/BJGZDSR
+seizer/M
+seizing/M
+seizin/MS
+seizor/MS
+seizure/MS
+Seka/M
+Sela/M
+Selassie/M
+Selby/M
+seldom
+selected/UAC
+selectional
+selection/MS
+selectiveness/M
+selective/YP
+selectivity/MS
+selectman/M
+selectmen
+selectness/SM
+selector/SM
+select/PDSVGB
+Selectric/M
+selects/A
+Selena/M
+selenate/M
+Selene/M
+selenite/M
+selenium/MS
+selenographer/SM
+selenography/MS
+Selestina/M
+Seleucid/M
+Seleucus/M
+self/GPDMS
+selfishness/SU
+selfish/PUY
+selflessness/MS
+selfless/YP
+selfness/M
+Selfridge/M
+selfsameness/M
+selfsame/P
+Selia/M
+Selie/M
+Selig/M
+Selim/M
+Selina/M
+Selinda/M
+Seline/M
+Seljuk/M
+Selkirk/M
+Sella/M
+sell/AZGSR
+seller/AM
+Sellers/M
+Selle/ZM
+sellout/MS
+Selma/M
+seltzer/S
+selvage/MGSD
+selves/M
+Selznick/M
+semantical/Y
+semanticist/SM
+semantic/S
+semantics/M
+semaphore/GMSD
+Semarang/M
+semblance/ASME
+semen/SM
+semester/SM
+semiannual/Y
+semiarid
+semiautomated
+semiautomatic/S
+semicircle/SM
+semicircular
+semicolon/MS
+semiconductor/SM
+semiconscious
+semidefinite
+semidetached
+semidrying/M
+semifinalist/MS
+semifinal/MS
+semilogarithmic
+semimonthly/S
+seminal/Y
+seminarian/MS
+seminar/SM
+seminary/MS
+Seminole/SM
+semiofficial
+semioticians
+semiotic/S
+semiotics/M
+semipermanent/Y
+semipermeable
+semiprecious
+semiprivate
+semiprofessional/YS
+semipublic
+semiquantitative/Y
+Semiramis/M
+semiretired
+semisecret
+semiskilled
+semi/SM
+semisolid/S
+semistructured
+semisweet
+Semite/SM
+Semitic/MS
+semitic/S
+semitone/SM
+semitrailer/SM
+semitrance
+semitransparent
+semitropical
+semivowel/MS
+semiweekly/S
+semiyearly
+semolina/SM
+sempiternal
+sempstress/SM
+Semtex
+sen
+Sen
+Sena/M
+senate/MS
+Senate/MS
+senatorial
+senator/MS
+Sendai/M
+sender/M
+sends/A
+send/SRGZ
+Seneca/MS
+Senegalese
+Senegal/M
+senescence/SM
+senescent
+senile/SY
+senility/MS
+seniority/SM
+senior/MS
+Senior/S
+Sennacherib/M
+senna/MS
+Sennett/M
+Señora/M
+senora/S
+senorita/S
+senor/MS
+sensately/I
+sensate/YNX
+sensationalism/MS
+sensationalist/S
+sensationalize/GSD
+sensational/Y
+sensation/M
+sens/DSG
+senselessness/SM
+senseless/PY
+sense/M
+sensibility/ISM
+sensibleness/MS
+sensible/PRST
+sensibly/I
+sensitiveness/MS
+sensitiveness's/I
+sensitives
+sensitive/YIP
+sensitivity/ISM
+sensitization/CSM
+sensitized/U
+sensitizers
+sensitize/SDCG
+sensor/MS
+sensory
+sensualist/MS
+sensuality/MS
+sensual/YF
+sensuousness/S
+sensuous/PY
+Sensurround/M
+sentence/SDMG
+sentential/Y
+sententious/Y
+sentience/ISM
+sentient/YS
+sentimentalism/SM
+sentimentalist/SM
+sentimentality/SM
+sentimentalization/SM
+sentimentalize/RSDZG
+sentimentalizes/U
+sentimental/Y
+sentiment/MS
+sentinel/GDMS
+sentry/SM
+sent/UFEA
+Seoul/M
+sepal/SM
+separability/MSI
+separableness/MI
+separable/PI
+separably/I
+separateness/MS
+separates/M
+separate/YNGVDSXP
+separation/M
+separatism/SM
+separatist/SM
+separator/SM
+Sephardi/M
+Sephira/M
+sepia/MS
+Sepoy/M
+sepses
+sepsis/M
+septa/M
+septate/N
+September/MS
+septennial/Y
+septet/MS
+septicemia/SM
+septicemic
+septic/S
+septillion/M
+sept/M
+Sept/M
+septuagenarian/MS
+Septuagint/MS
+septum/M
+sepulcher/MGSD
+sepulchers/UA
+sepulchral/Y
+seq
+sequel/MS
+sequenced/A
+sequence/DRSJZMG
+sequencer/M
+sequence's/F
+sequences/F
+sequent/F
+sequentiality/FM
+sequentialize/DSG
+sequential/YF
+sequester/SDG
+sequestrate/XGNDS
+sequestration/M
+sequin/SDMG
+sequitur
+Sequoia/M
+sequoia/MS
+Sequoya/M
+Serafin/M
+seraglio/SM
+serape/S
+seraphic
+seraphically
+seraphim's
+seraph/M
+seraphs
+sera's
+Serbia/M
+Serbian/S
+Serb/MS
+Serbo/M
+serenade/MGDRS
+serenader/M
+Serena/M
+serendipitous/Y
+serendipity/MS
+serene/GTYRSDP
+Serene/M
+sereneness/SM
+Serengeti/M
+serenity/MS
+sere/TGDRS
+serfdom/MS
+serf/MS
+Sergeant/M
+sergeant/SM
+serge/DSGM
+Sergei/M
+Serge/M
+Sergent/M
+Sergio/M
+serialization/MS
+serialize/GSD
+serial/MYS
+series/M
+serif/SMD
+serigraph/M
+serigraphs
+seriousness/SM
+serious/PY
+sermonize/GSD
+sermon/SGDM
+serological/Y
+serology/MS
+serons
+serous
+Serpens/M
+serpent/GSDM
+serpentine/GYS
+Serra/M
+Serrano/M
+serrate/GNXSD
+serration/M
+serried
+serum/MS
+servant/SDMG
+serve/AGCFDSR
+served/U
+server/MCF
+servers
+serviceability/SM
+serviceableness/M
+serviceable/P
+serviced/U
+serviceman/M
+servicemen
+service/MGSRD
+service's/E
+services/E
+servicewoman
+servicewomen
+serviette/MS
+servilely
+servileness/M
+serviles
+servile/U
+servility/SM
+serving/SM
+servitor/SM
+servitude/MS
+servomechanism/MS
+servomotor/MS
+servo/S
+sesame/MS
+sesquicentennial/S
+sessile
+session/SM
+setback/S
+Seth/M
+Set/M
+Seton/M
+set's
+setscrew/SM
+set/SIA
+settable/A
+sett/BJGZSMR
+settee/MS
+setter/M
+setting/AS
+setting's
+settle/AUDSG
+settlement/ASM
+settler/MS
+settling/S
+setup/MS
+Seumas/M
+Seurat/M
+Seuss/M
+Sevastopol/M
+sevenfold
+sevenpence
+seven/SMH
+seventeen/HMS
+seventeenths
+sevenths
+seventieths
+seventy/MSH
+severalfold
+severalty/M
+several/YS
+severance/SM
+severed/E
+severeness/SM
+severe/PY
+severing/E
+severity/MS
+Severn/M
+severs/E
+sever/SGTRD
+Severus/M
+Seville/M
+sewage/MS
+Seward/M
+sewerage/SM
+sewer/GSMD
+sewing/SM
+sewn
+sew/SAGD
+sexagenarian/MS
+sex/GMDS
+sexily
+sexiness/MS
+sexism/SM
+sexist/SM
+sexless
+sexologist/SM
+sexology/MS
+sexpot/SM
+Sextans/M
+sextant/SM
+sextet/SM
+sextillion/M
+Sexton/M
+sexton/MS
+sextuple/MDG
+sextuplet/MS
+sexuality/MS
+sexualized
+sexual/Y
+sexy/RTP
+Seychelles
+Seyfert
+Seymour/M
+sf
+SF
+Sgt
+shabbily
+shabbiness/SM
+shabby/RTP
+shack/GMDS
+shackler/M
+shackle's
+Shackleton/M
+shackle/UGDS
+shad/DRJGSM
+shaded/U
+shadeless
+shade/SM
+shadily
+shadiness/MS
+shading/M
+shadowbox/SDG
+shadower/M
+shadow/GSDRM
+shadowiness/M
+Shadow/M
+shadowy/TRP
+shady/TRP
+Shae/M
+Shafer/M
+Shaffer/M
+shafting/M
+shaft/SDMG
+shagged
+shagginess/SM
+shagging
+shaggy/TPR
+shag/MS
+shah/M
+shahs
+Shaina/M
+Shaine/M
+shakable/U
+shakably/U
+shakeable
+shakedown/S
+shaken/U
+shakeout/SM
+shaker/M
+Shaker/S
+Shakespearean/S
+Shakespeare/M
+Shakespearian
+shake/SRGZB
+shakeup/S
+shakily
+shakiness/S
+shaking/M
+shaky/TPR
+shale/SM
+shall
+shallot/SM
+shallowness/SM
+shallow/STPGDRY
+Shalna/M
+Shalne/M
+shalom
+Shalom/M
+shalt
+shamanic
+shaman/SM
+shamble/DSG
+shambles/M
+shamefaced/Y
+shamefulness/S
+shameful/YP
+shamelessness/SM
+shameless/PY
+shame/SM
+sham/MDSG
+shammed
+shammer
+shamming
+shammy's
+shampoo/DRSMZG
+shampooer/M
+shamrock/SM
+Shamus/M
+Shana/M
+Shanan/M
+Shanda/M
+Shandee/M
+Shandeigh/M
+Shandie/M
+Shandra/M
+shandy/M
+Shandy/M
+Shane/M
+Shanghai/GM
+Shanghaiing/M
+shanghai/SDG
+Shanie/M
+Shani/M
+shank/SMDG
+Shannah/M
+Shanna/M
+Shannan/M
+Shannen/M
+Shannon/M
+Shanon/M
+shan't
+Shanta/M
+Shantee/M
+shantis
+Shantung/M
+shantung/MS
+shanty/SM
+shantytown/SM
+shape/AGDSR
+shaped/U
+shapelessness/SM
+shapeless/PY
+shapeliness/S
+shapely/RPT
+shaper/S
+shape's
+Shapiro/M
+sharable/U
+Sharai/M
+Shara/M
+shard/SM
+shareable
+sharecropped
+sharecropper/MS
+sharecropping
+sharecrop/S
+share/DSRGZMB
+shared/U
+shareholder/MS
+shareholding/S
+sharer/M
+shareware/S
+Shari'a
+Sharia/M
+sharia/SM
+Shari/M
+Sharity/M
+shark/SGMD
+sharkskin/SM
+Sharla/M
+Sharleen/M
+Sharlene/M
+Sharline/M
+Sharl/M
+Sharona/M
+Sharon/M
+Sharpe/M
+sharpen/ASGD
+sharpened/U
+sharpener/S
+sharper/M
+sharpie/SM
+Sharp/M
+sharpness/MS
+sharp/SGTZXPYRDN
+sharpshooter/M
+sharpshooting/M
+sharpshoot/JRGZ
+sharpy's
+Sharron/M
+Sharyl/M
+Shasta/M
+shat
+shatter/DSG
+shattering/Y
+shatterproof
+Shaughn/M
+Shaula/M
+Shauna/M
+Shaun/M
+shave/DSRJGZ
+shaved/U
+shaver/M
+Shavian
+shaving/M
+Shavuot/M
+Shawano/M
+shawl/SDMG
+shaw/M
+Shaw/M
+Shawna/M
+Shawnee/SM
+Shawn/M
+Shaylah/M
+Shayla/M
+Shaylyn/M
+Shaylynn/M
+Shay/M
+shay/MS
+Shayna/M
+Shayne/M
+Shcharansky/M
+sh/DRS
+sheaf/MDGS
+Shea/M
+shearer/M
+shear/RDGZS
+sheather/M
+sheathe/UGSD
+sheath/GJMDRS
+sheathing/M
+sheaths
+sheave/SDG
+sheaves/M
+Sheba/M
+shebang/MS
+Shebeli/M
+Sheboygan/M
+she'd
+shedding
+Shedir/M
+sheds
+shed's
+shed/U
+Sheelagh/M
+Sheelah/M
+Sheela/M
+Sheena/M
+sheen/MDGS
+sheeny/TRSM
+sheepdog/SM
+sheepfold/MS
+sheepherder/MS
+sheepishness/SM
+sheepish/YP
+sheep/M
+sheepskin/SM
+Sheeree/M
+sheerness/S
+sheer/PGTYRDS
+sheeting/M
+sheetlike
+sheet/RDMJSG
+Sheetrock
+Sheffielder/M
+Sheffield/RMZ
+Sheffie/M
+Sheff/M
+Sheffy/M
+sheikdom/SM
+sheikh's
+sheik/SM
+Sheilah/M
+Sheila/M
+shekel/MS
+Shelagh/M
+Shela/M
+Shelba/M
+Shelbi/M
+Shelby/M
+Shelden/M
+Sheldon/M
+shelf/MDGS
+Shelia/M
+she'll
+shellacked
+shellacking/MS
+shellac/S
+shelled/U
+Shelley/M
+shellfire/SM
+shellfish/SM
+Shellie/M
+Shelli/M
+Shell/M
+shell/RDMGS
+Shelly/M
+Shel/MY
+shelter/DRMGS
+sheltered/U
+shelterer/M
+Shelton/M
+shelve/JRSDG
+shelver/M
+shelves/M
+shelving/M
+she/M
+Shem/M
+Shena/M
+Shenandoah/M
+shenanigan/SM
+Shenyang/M
+Sheol/M
+Shepard/M
+shepherd/DMSG
+shepherdess/S
+Shepherd/M
+Shep/M
+Sheppard/M
+Shepperd/M
+Sheratan/M
+Sheraton/M
+sherbet/MS
+sherd's
+Sheree/M
+Sheridan/M
+Sherie/M
+sheriff/SM
+Sherill/M
+Sherilyn/M
+Sheri/M
+Sherline/M
+Sherlocke/M
+sherlock/M
+Sherlock/M
+Sher/M
+Sherman/M
+Shermie/M
+Sherm/M
+Shermy/M
+Sherpa/SM
+Sherrie/M
+Sherri/M
+Sherry/M
+sherry/MS
+Sherwin/M
+Sherwood/M
+Sherwynd/M
+Sherye/M
+Sheryl/M
+Shetland/S
+Shevardnadze/M
+shew/GSD
+shewn
+shh
+shiatsu/S
+shibboleth/M
+shibboleths
+shielded/U
+shielder/M
+shield/MDRSG
+Shields/M
+shiftily
+shiftiness/SM
+shiftlessness/S
+shiftless/PY
+shift/RDGZS
+shifty/TRP
+Shi'ite
+Shiite/SM
+Shijiazhuang
+Shikoku/M
+shill/DJSG
+shillelagh/M
+shillelaghs
+shilling/M
+Shillong/M
+Shiloh/M
+shimmed
+shimmer/DGS
+shimmery
+shimming
+shimmy/DSMG
+shim/SM
+Shina/M
+shinbone/SM
+shindig/MS
+shiner/M
+shine/S
+shingle/MDRSG
+shingler/M
+shinguard
+shininess/MS
+shining/Y
+shinned
+shinning
+shinny/GDSM
+shin/SGZDRM
+shinsplints
+Shintoism/S
+Shintoist/MS
+Shinto/MS
+shiny/PRT
+shipboard/MS
+shipborne
+shipbuilder/M
+shipbuild/RGZJ
+shipload/SM
+shipman/M
+shipmate/SM
+shipmen
+shipment/AMS
+shipowner/MS
+shippable
+shipped/A
+shipper/SM
+shipping/MS
+ship's
+shipshape
+ship/SLA
+shipwreck/GSMD
+shipwright/MS
+shipyard/MS
+Shiraz/M
+shire/MS
+shirker/M
+shirk/RDGZS
+Shirlee/M
+Shirleen/M
+Shirlene/M
+Shirley/M
+Shirline/M
+Shirl/M
+Shir/M
+shirr/GJDS
+shirtfront/S
+shirting/M
+shirt/JDMSG
+shirtless
+shirtmake/R
+shirtmaker/M
+shirtsleeve/MS
+shirttail/S
+shirtwaist/SM
+shit/S!
+shitting/!
+shitty/RT!
+Shiva/M
+shiverer/M
+shiver/GDR
+shivery
+shiv/SZRM
+shivved
+shivving
+shlemiel's
+Shmuel/M
+shoal/SRDMGT
+shoat/SM
+shocker/M
+shocking/Y
+Shockley/M
+shockproof
+shock/SGZRD
+shoddily
+shoddiness/SM
+shoddy/RSTP
+shod/U
+shoehorn/GSMD
+shoeing
+shoelace/MS
+shoemaker/M
+shoemake/RZ
+shoe/MS
+shoer's
+shoeshine/MS
+shoestring/MS
+shoetree/MS
+shogunate/SM
+shogun/MS
+Shoji/M
+Sholom/M
+shone
+shoo/DSG
+shoofly
+shook/SM
+shooter/M
+shootout/MS
+shoot/SJRGZ
+shopkeeper/M
+shopkeep/RGZ
+shoplifter/M
+shoplifting/M
+shoplift/SRDGZ
+shop/MS
+shopped/M
+shopper/M
+shoppe/RSDGZJ
+shopping/M
+shoptalk/SM
+shopworn
+shorebird/S
+shore/DSRGMJ
+shoreline/SM
+Shorewood/M
+shoring/M
+shortage/MS
+shortbread/MS
+shortcake/SM
+shortchange/DSG
+shortcoming/MS
+shortcrust
+shortcut/MS
+shortcutting
+shortener/M
+shortening/M
+shorten/RDGJ
+shortfall/SM
+shorthand/DMS
+Shorthorn/M
+shorthorn/MS
+shortie's
+shortish
+shortlist/GD
+Short/M
+shortness/MS
+short/SGTXYRDNP
+shortsightedness/S
+shortsighted/YP
+shortstop/MS
+shortwave/SM
+shorty/SM
+Shoshana/M
+Shoshanna/M
+Shoshone/SM
+Shostakovitch/M
+shotgunned
+shotgunner
+shotgunning
+shotgun/SM
+shot/MS
+shotted
+shotting
+shoulder/GMD
+shouldn't
+should/TZR
+shout/SGZRDM
+shove/DSRG
+shoveler/M
+shovelful/MS
+shovel/MDRSZG
+shover/M
+showbiz
+showbizzes
+showboat/SGDM
+showcase/MGSD
+showdown/MS
+shower/GDM
+showery/TR
+show/GDRZJS
+showgirl/SM
+showily
+showiness/MS
+showing/M
+showman/M
+showmanship/SM
+showmen
+shown
+showoff/S
+showpiece/SM
+showplace/SM
+showroom/MS
+showy/RTP
+shpt
+shrank
+shrapnel/SM
+shredded
+shredder/MS
+shredding
+shred/MS
+Shreveport/M
+shrewdness/SM
+shrewd/RYTP
+shrew/GSMD
+shrewishness/M
+shrewish/PY
+shrieker/M
+shriek/SGDRMZ
+shrift/SM
+shrike/SM
+shrill/DRTGPS
+shrillness/MS
+shrilly
+shrimp/MDGS
+shrine/SDGM
+shrinkage/SM
+shrinker/M
+shrinking/U
+shrink/SRBG
+shrivel/GSD
+shriven
+shrive/RSDG
+Shropshire/M
+shroud/GSMD
+shrubbed
+shrubbery/SM
+shrubbing
+shrubby/TR
+shrub/SM
+shrugged
+shrugging
+shrug/S
+shrunk/N
+shtick/S
+shucker/M
+shuck/SGMRD
+shucks/S
+shudder/DSG
+shuddery
+shuffleboard/MS
+shuffled/A
+shuffle/GDSRZ
+shuffles/A
+shuffling/A
+Shulman/M
+Shu/M
+shunned
+shunning
+shun/S
+shunter/M
+shunt/GSRD
+Shurlocke/M
+Shurlock/M
+Shurwood/M
+shush/SDG
+shutdown/MS
+shuteye/SM
+shutoff/M
+shutout/SM
+shut/S
+shutterbug/S
+shutter/DMGS
+shuttering/M
+shutting
+shuttlecock/MDSG
+shuttle/MGDS
+shy/DRSGTZY
+shyer
+shyest
+Shylockian/M
+Shylock/M
+shyness/SM
+shyster/SM
+Siamese/M
+Siam/M
+Siana/M
+Sianna/M
+Sian's
+Sibbie/M
+Sibby/M
+Sibeal/M
+Sibelius/M
+Sibella/M
+Sibelle/M
+Sibel/M
+Siberia/M
+Siberian/S
+sibilance/M
+sibilancy/M
+sibilant/SY
+Sibilla/M
+Sibley/M
+sibling/SM
+Sib/M
+Sibylla/M
+Sibylle/M
+sibylline
+Sibyl/M
+sibyl/SM
+Siciliana/M
+Sicilian/S
+Sicily/M
+sickbay/M
+sickbed/S
+sickener/M
+sickening/Y
+sicken/JRDG
+sicker/Y
+sick/GXTYNDRSP
+sickie/SM
+sickish/PY
+sickle/SDGM
+sickliness/M
+sickly/TRSDPG
+sickness/MS
+sicko/S
+sickout/S
+sickroom/SM
+sic/S
+sidearm/S
+sideband/MS
+sidebar/MS
+sideboard/SM
+sideburns
+sidecar/MS
+sided/A
+sidedness
+side/ISRM
+sidekick/MS
+sidelight/SM
+sideline/MGDRS
+sidelong
+sideman/M
+sidemen
+sidepiece/S
+sidereal
+sider/FA
+sides/A
+sidesaddle/MS
+sideshow/MS
+sidesplitting
+sidestepped
+sidestepping
+sidestep/S
+sidestroke/GMSD
+sideswipe/GSDM
+sidetrack/SDG
+sidewalk/MS
+sidewall/MS
+sidewards
+sideway/SM
+sidewinder/SM
+siding/SM
+sidle/DSG
+Sid/M
+Sidnee/M
+Sidney/M
+Sidoney/M
+Sidonia/M
+Sidonnie/M
+SIDS
+siege/GMDS
+Siegel/M
+Siegfried/M
+Sieglinda/M
+Siegmund/M
+Siemens/M
+Siena/M
+sienna/SM
+Sierpinski/M
+sierra/SM
+siesta/MS
+sieve/GZMDS
+Siffre/M
+sifted/UA
+sifter/M
+sift/GZJSDR
+Sigfrid/M
+Sigfried/M
+SIGGRAPH/M
+sigh/DRG
+sigher/M
+sighs
+sighted/P
+sighter/M
+sighting/S
+sight/ISM
+sightless/Y
+sightliness/UM
+sightly/TURP
+sightread
+sightseeing/S
+sightsee/RZ
+Sigismond/M
+Sigismondo/M
+Sigismund/M
+Sigismundo/M
+Sig/M
+sigma/SM
+sigmoid
+Sigmund/M
+signal/A
+signaled
+signaler/S
+signaling
+signalization/S
+signalize/GSD
+signally
+signalman/M
+signalmen
+signals
+signal's
+signatory/SM
+signature/MS
+signboard/MS
+signed/FU
+signer/SC
+signet/SGMD
+sign/GARDCS
+significance/IMS
+significantly/I
+significant/YS
+signification/M
+signify/DRSGNX
+signing/S
+Signora/M
+signora/SM
+signore/M
+signori
+signories
+signorina/SM
+signorine
+Signor/M
+signor/SFM
+signpost/DMSG
+sign's
+signs/F
+Sigrid/M
+Sigurd/M
+Sigvard/M
+Sihanouk/M
+Sikhism/MS
+Sikh/MS
+Sikhs
+Sikkimese
+Sikkim/M
+Sikorsky/M
+silage/GMSD
+Silas/M
+Sileas/M
+siled
+Sile/M
+silence/MZGRSD
+silencer/M
+silentness/M
+silent/TSPRY
+Silesia/M
+silhouette/GMSD
+silica/SM
+silicate/SM
+siliceous
+silicide/M
+silicone/SM
+silicon/MS
+silicoses
+silicosis/M
+silken/DG
+silk/GXNDMS
+silkily
+silkiness/SM
+silkscreen/SM
+silkworm/MS
+silky/RSPT
+silliness/SM
+sill/MS
+silly/PRST
+silo/GSM
+siltation/M
+silt/MDGS
+siltstone/M
+silty/RT
+Silurian/S
+Silvain/M
+Silva/M
+Silvana/M
+Silvan/M
+Silvano/M
+Silvanus/M
+silverer/M
+silverfish/MS
+Silverman/M
+silver/RDYMGS
+silversmith/M
+silversmiths
+Silverstein/M
+silverware/SM
+silvery/RTP
+Silvester/M
+Silvia/M
+Silvie/M
+Silvio/M
+Si/M
+SIMD
+Simenon/M
+Simeon/M
+simian/S
+similar/EY
+similarity/EMS
+simile/SM
+similitude/SME
+Simla/M
+simmer/GSD
+Simmonds/M
+Simmons/M
+Simmonsville/M
+Sim/MS
+Simms/M
+Simona/M
+Simone/M
+Simonette/M
+simonize/SDG
+Simon/M
+Simonne/M
+simony/MS
+simpatico
+simper/GDS
+simpleminded/YP
+simpleness/S
+simple/RSDGTP
+simpleton/SM
+simplex/S
+simplicity/MS
+simplified/U
+simplify/ZXRSDNG
+simplistic
+simplistically
+simply
+Simpson/M
+simulacrum/M
+Simula/M
+SIMULA/M
+simulate/XENGSD
+simulation/ME
+simulative
+simulator/SEM
+simulcast/GSD
+simultaneity/SM
+simultaneousness/M
+simultaneous/YP
+Sinai/M
+Sinatra/M
+since
+sincere/IY
+sincereness/M
+sincerer
+sincerest
+sincerity/MIS
+Sinclair/M
+Sinclare/M
+Sindbad/M
+Sindee/M
+Sindhi/M
+sinecure/MS
+sinecurist/M
+sine/SM
+sinew/SGMD
+sinewy
+sinfulness/SM
+sinful/YP
+Singaporean/S
+Singapore/M
+sing/BGJZYDR
+Singborg/M
+singeing
+singer/M
+Singer/M
+singe/S
+singing/Y
+singlehanded/Y
+singleness/SM
+single/PSDG
+Singleton/M
+singleton/SM
+singletree/SM
+singlet/SM
+singsong/GSMD
+singularity/SM
+singularization/M
+singular/SY
+Sinhalese/M
+sinisterness/M
+sinister/YP
+sinistral/Y
+sinkable/U
+sinker/M
+sink/GZSDRB
+sinkhole/SM
+Sinkiang/M
+sinking/M
+sinlessness/M
+sinless/YP
+sin/MAGS
+sinned
+sinner/MS
+sinning
+sinter/DM
+sinuosity/MS
+sinuousities
+sinuousness/M
+sinuous/PY
+sinusitis/SM
+sinus/MS
+sinusoidal/Y
+sinusoid/MS
+Siobhan/M
+Siouxie/M
+Sioux/M
+siphon/DMSG
+siphons/U
+sipped
+sipper/SM
+sipping
+sip/S
+sired/C
+sire/MS
+siren/M
+sires/C
+siring/C
+Sirius/M
+sirloin/MS
+Sir/MS
+sirocco/MS
+sirred
+sirring
+sirup's
+sir/XGMNDS
+sisal/MS
+Sisely/M
+Sisile/M
+sis/S
+Sissie/M
+sissified
+Sissy/M
+sissy/TRSM
+sister/GDYMS
+sisterhood/MS
+sisterliness/MS
+sisterly/P
+sister's/A
+Sistine
+Sisyphean
+Sisyphus/M
+sit/AG
+sitarist/SM
+sitar/SM
+sitcom/SM
+site/DSJM
+sits
+sitter/MS
+sitting/SM
+situate/GNSDX
+situational/Y
+situationist
+situation/M
+situ/S
+situs/M
+Siusan/M
+Siva/M
+Siward/M
+sixfold
+sixgun
+six/MRSH
+sixpence/MS
+sixpenny
+sixshooter
+sixteen/HRSM
+sixteenths
+sixths
+sixth/Y
+sixtieths
+sixty/SMH
+sizableness/M
+sizable/P
+sized/UA
+size/GJDRSBMZ
+sizer/M
+sizes/A
+sizing/M
+sizzler/M
+sizzle/RSDG
+SJ
+Sjaelland/M
+SK
+ska/S
+skateboard/SJGZMDR
+skater/M
+skate/SM
+skat/JMDRGZ
+skedaddle/GSD
+skeet/RMS
+skein/MDGS
+skeletal/Y
+skeleton/MS
+Skell/M
+Skelly/M
+skeptical/Y
+skepticism/MS
+skeptic/SM
+sketchbook/SM
+sketcher/M
+sketchily
+sketchiness/MS
+sketch/MRSDZG
+sketchpad
+sketchy/PRT
+skew/DRSPGZ
+skewer/GDM
+skewing/M
+skewness/M
+skidded
+skidding
+skid/S
+skiff/GMDS
+skiing/M
+skilfully
+skill/DMSG
+skilled/U
+skillet/MS
+skillfulnesses
+skillfulness/MU
+skillful/YUP
+skilling/M
+skimmed
+skimmer/MS
+skimming/SM
+ski/MNJSG
+skimp/GDS
+skimpily
+skimpiness/MS
+skimpy/PRT
+skim/SM
+skincare
+skindive/G
+skinflint/MS
+skinhead/SM
+skinless
+skinned
+Skinner/M
+skinner/SM
+skinniness/MS
+skinning
+skinny/TRSP
+skin/SM
+skintight
+Skip/M
+skipped
+Skipper/M
+skipper/SGDM
+Skippie/M
+skipping
+Skipp/RM
+Skippy/M
+skip/S
+Skipton/M
+skirmisher/M
+skirmish/RSDMZG
+skirter/M
+skirting/M
+skirt/RDMGS
+skit/GSMD
+skitter/SDG
+skittishness/SM
+skittish/YP
+skittle/SM
+skivvy/GSDM
+skoal/SDG
+Skopje/M
+skulduggery/MS
+skulker/M
+skulk/SRDGZ
+skullcap/MS
+skullduggery's
+skull/SDM
+skunk/GMDS
+skycap/MS
+skydiver/SM
+skydiving/MS
+Skye/M
+skyhook
+skyjacker/M
+skyjack/ZSGRDJ
+Skylab/M
+skylarker/M
+skylark/SRDMG
+Skylar/M
+Skyler/M
+skylight/MS
+skyline/MS
+Sky/M
+sky/MDRSGZ
+skyrocket/GDMS
+skyscraper/M
+skyscrape/RZ
+skyward/S
+skywave
+skyway/M
+skywriter/MS
+skywriting/MS
+slabbed
+slabbing
+slab/MS
+slacken/DG
+slacker/M
+slackness/MS
+slack/SPGTZXYRDN
+Slade/M
+slagged
+slagging
+slag/MS
+slain
+slake/DSG
+slaked/U
+slalom/SGMD
+slammed
+slammer/S
+slamming
+slam/S
+slander/MDRZSG
+slanderousness/M
+slanderous/PY
+slang/SMGD
+slangy/TR
+slanting/Y
+slant/SDG
+slantwise
+slapdash/S
+slaphappy/TR
+slap/MS
+slapped
+slapper
+slapping
+slapstick/MS
+slash/GZRSD
+slashing/Y
+slater/M
+Slater/M
+slate/SM
+slather/SMDG
+slating/M
+slat/MDRSGZ
+slatted
+slattern/MYS
+slatting
+slaughterer/M
+slaughterhouse/SM
+slaughter/SJMRDGZ
+slave/DSRGZM
+slaveholder/SM
+slaver/GDM
+slavery/SM
+Slavic/M
+slavishness/SM
+slavish/YP
+Slav/MS
+Slavonic/M
+slaw/MS
+slay/RGZS
+sleaze/S
+sleazily
+sleaziness/SM
+sleazy/RTP
+sledded
+sledder/S
+sledding
+sledgehammer/MDGS
+sledge/SDGM
+sled/SM
+sleekness/S
+sleek/PYRDGTS
+sleeper/M
+sleepily
+sleepiness/SM
+sleeping/M
+sleeplessness/SM
+sleepless/YP
+sleepover/S
+sleep/RMGZS
+sleepwalker/M
+sleepwalk/JGRDZS
+sleepwear/M
+sleepyhead/MS
+sleepy/PTR
+sleet/DMSG
+sleety/TR
+sleeveless
+sleeve/SDGM
+sleeving/M
+sleigh/GMD
+sleighs
+sleight/SM
+sleken/DG
+slenderize/DSG
+slenderness/MS
+slender/RYTP
+slept
+Slesinger/M
+sleuth/GMD
+sleuths
+slew/DGS
+slice/DSRGZM
+sliced/U
+slicer/M
+slicker/M
+slickness/MS
+slick/PSYRDGTZ
+slider/M
+slide/S
+slid/GZDR
+slight/DRYPSTG
+slighter/M
+slighting/Y
+slightness/S
+slime/SM
+sliminess/S
+slimline
+slimmed
+slimmer/S
+slimmest
+slimming/S
+slimness/S
+slim/SPGYD
+slimy/PTR
+sling/GMRS
+slingshot/MS
+slings/U
+slink/GS
+slinky/RT
+slipcase/MS
+slipcover/GMDS
+slipknot/SM
+slippage/SM
+slipped
+slipper/GSMD
+slipperiness/S
+slippery/PRT
+slipping
+slipshod
+slip/SM
+slipstream/MDGS
+slipway/SM
+slither/DSG
+slithery
+slit/SM
+slitted
+slitter/S
+slitting
+sliver/GSDM
+slivery
+Sloane/M
+Sloan/M
+slobber/SDG
+slobbery
+slob/MS
+Slocum/M
+sloe/MS
+sloganeer/MG
+slogan/MS
+slogged
+slogging
+slog/S
+sloop/SM
+slop/DRSGZ
+sloped/U
+slope/S
+slopped
+sloppily
+sloppiness/SM
+slopping
+sloppy/RTP
+slosh/GSDM
+slothfulness/MS
+slothful/PY
+sloth/GDM
+sloths
+slot/MS
+slotted
+slotting
+slouch/DRSZG
+sloucher/M
+slouchy/RT
+slough/GMD
+sloughs
+Slovakia/M
+Slovakian/S
+Slovak/S
+Slovene/S
+Slovenia/M
+Slovenian/S
+slovenliness/SM
+slovenly/TRP
+sloven/YMS
+slowcoaches
+slowdown/MS
+slowish
+slowness/MS
+slow/PGTYDRS
+slowpoke/MS
+SLR
+sludge/SDGM
+sludgy/TR
+slue/MGDS
+sluggard/MS
+slugged
+slugger/SM
+slugging
+sluggishness/SM
+sluggish/YP
+slug/MS
+sluice/SDGM
+slumberer/M
+slumber/MDRGS
+slumberous
+slumlord/MS
+slummed
+slummer
+slumming
+slum/MS
+slummy/TR
+slump/DSG
+slung/U
+slunk
+slur/MS
+slurp/GSD
+slurred
+slurried/M
+slurring
+slurrying/M
+slurry/MGDS
+slushiness/SM
+slush/SDMG
+slushy/RTP
+slut/MS
+sluttish
+slutty/TR
+Sly/M
+slyness/MS
+sly/RTY
+smacker/M
+smack/SMRDGZ
+smallholders
+smallholding/MS
+smallish
+Small/M
+smallness/S
+smallpox/SM
+small/SGTRDP
+smalltalk
+smalltime
+Smallwood/M
+smarmy/RT
+smarten/GD
+smartness/S
+smartypants
+smart/YRDNSGTXP
+smasher/M
+smash/GZRSD
+smashing/Y
+smashup/S
+smattering/SM
+smearer/M
+smear/GRDS
+smeary/TR
+smeller/M
+smelliness/MS
+smell/SBRDG
+smelly/TRP
+smelter/M
+smelt/SRDGZ
+Smetana/M
+smidgen/MS
+smilax/MS
+smile/GMDSR
+smiley/M
+smilies
+smiling/UY
+smirch/SDG
+smirk/GSMD
+Smirnoff/M
+smite/GSR
+smiter/M
+smith/DMG
+smithereens
+Smithfield/M
+Smith/M
+smiths
+Smithsonian/M
+Smithson/M
+Smithtown/M
+smithy/SM
+smitten
+Smitty/M
+Sm/M
+smocking/M
+smock/SGMDJ
+smoggy/TR
+smog/SM
+smoke/GZMDSRBJ
+smokehouse/MS
+smokeless
+smoker/M
+smokescreen/S
+smokestack/MS
+Smokey/M
+smokiness/S
+smoking/M
+smoky/RSPT
+smoldering/Y
+smolder/SGD
+Smolensk/M
+Smollett/M
+smooch/SDG
+smoothen/DG
+smoother/M
+smoothie/SM
+smoothness/MS
+smooths
+smooth/TZGPRDNY
+smörgåsbord/SM
+smote
+smother/GSD
+SMSA/MS
+SMTP
+Smucker/M
+smudge/GSD
+smudginess/M
+smudgy/TRP
+smugged
+smugger
+smuggest
+smugging
+smuggle/JZGSRD
+smuggler/M
+smugness/MS
+smug/YSP
+smut/SM
+Smuts/M
+smutted
+smuttiness/SM
+smutting
+smutty/TRP
+Smyrna/M
+snack/SGMD
+snaffle/GDSM
+snafu/DMSG
+snagged
+snagging
+snag/MS
+snail/GSDM
+Snake
+snakebird/M
+snakebite/MS
+snake/DSGM
+snakelike
+snakeroot/M
+snaky/TR
+snapback/M
+snapdragon/MS
+snapped/U
+snapper/SM
+snappily
+snappiness/SM
+snapping/U
+snappishness/SM
+snappish/PY
+snappy/PTR
+snapshot/MS
+snapshotted
+snapshotting
+snap/US
+snare/DSRGM
+snarer/M
+snarf/JSGD
+snarler/M
+snarling/Y
+snarl/UGSD
+snarly/RT
+snatch/DRSZG
+snatcher/M
+snazzily
+snazzy/TR
+Snead/M
+sneaker/MD
+sneakily
+sneakiness/SM
+sneaking/Y
+sneak/RDGZS
+sneaky/PRT
+Sneed/M
+sneerer/M
+sneer/GMRDJS
+sneering/Y
+sneeze/SRDG
+Snell/M
+snicker/GMRD
+snick/MRZ
+snideness/M
+Snider/M
+snide/YTSRP
+sniffer/M
+sniff/GZSRD
+sniffle/GDRS
+sniffler/M
+sniffles/M
+snifter/MDSG
+snigger's
+sniper/M
+snipe/SM
+snipped
+snipper/SM
+snippet/SM
+snipping
+snippy/RT
+snip/SGDRZ
+snitch/GDS
+snit/SM
+sniveler/M
+snivel/JSZGDR
+Sn/M
+snobbery/SM
+snobbishness/S
+snobbish/YP
+snobby/RT
+snob/MS
+Snodgrass/M
+snood/SGDM
+snooker/GMD
+snook/SMRZ
+snooper/M
+snoop/SRDGZ
+Snoopy/M
+snoopy/RT
+snootily
+snootiness/MS
+snoot/SDMG
+snooty/TRP
+snooze/GSD
+snore/DSRGZ
+snorkel/ZGSRDM
+snorter/M
+snort/GSZRD
+snot/MS
+snotted
+snottily
+snottiness/SM
+snotting
+snotty/TRP
+snout/SGDM
+snowball/SDMG
+snowbank/SM
+Snowbelt/SM
+snowbird/SM
+snowblower/S
+snowboard/GZDRJS
+snowbound
+snowcapped
+snowdrift/MS
+snowdrop/MS
+snowfall/MS
+snowfield/MS
+snowflake/MS
+snow/GDMS
+snowily
+snowiness/MS
+Snow/M
+snowman/M
+snowmen
+snowmobile/GMDRS
+snowplough/M
+snowploughs
+snowplow/SMGD
+snowshed
+snowshoeing
+snowshoe/MRS
+snowshoer/M
+snowstorm/MS
+snowsuit/S
+snowy/RTP
+snubbed
+snubber
+snubbing
+snub/SP
+snuffbox/SM
+snuffer/M
+snuff/GZSYRD
+snuffle/GDSR
+snuffler/M
+snuffly/RT
+snugged
+snugger
+snuggest
+snugging
+snuggle/GDS
+snuggly
+snugness/MS
+snug/SYP
+Snyder/M
+so
+SO
+soaker/M
+soak/GDRSJ
+soapbox/DSMG
+soapiness/S
+soap/MDRGS
+soapstone/MS
+soapsud/S
+soapy/RPT
+soar/DRJSG
+soarer/M
+soaring/Y
+sobbed
+sobbing/Y
+soberer/M
+soberness/SM
+sober/PGTYRD
+sobriety/SIM
+sobriquet/MS
+sob/SZR
+Soc
+soccer/MS
+sociabilities
+sociability/IM
+sociable/S
+sociably/IU
+socialism/SM
+socialistic
+socialist/SM
+socialite/SM
+sociality/M
+socialization/SM
+socialized/U
+socializer/M
+socialize/RSDG
+socially/U
+social/SY
+societal/Y
+society/MS
+socio
+sociobiology/M
+sociocultural/Y
+sociodemographic
+socioeconomically
+socioeconomic/S
+sociolinguistics/M
+sociological/MY
+sociologist/SM
+sociology/SM
+sociometric
+sociometry/M
+sociopath/M
+sociopaths
+socket/SMDG
+sock/GDMS
+Socorro/M
+Socrates/M
+Socratic/S
+soc/S
+soda/SM
+sodded
+sodden/DYPSG
+soddenness/M
+sodding
+Soddy/M
+sodium/MS
+sod/MS
+sodomite/MS
+sodomize/GDS
+Sodom/M
+sodomy/SM
+soever
+sofa/SM
+Sofia/M
+Sofie/M
+softball/MS
+softbound
+softener/M
+soften/ZGRD
+softhearted
+softie's
+softness/MS
+soft/SPXTYNR
+software/MS
+softwood/SM
+softy/SM
+soggily
+sogginess/S
+soggy/RPT
+Soho/M
+soigné
+soiled/U
+soil/SGMD
+soirée/SM
+sojourn/RDZGSM
+solace/GMSRD
+solacer/M
+solaria
+solarium/M
+solar/S
+solder/RDMSZG
+soldier/MDYSG
+soldiery/MS
+sold/RU
+solecism/MS
+soled/FA
+solemness
+solemnify/GSD
+solemnity/MS
+solemnization/SM
+solemnize/GSD
+solemnness/SM
+solemn/PTRY
+solenoid/MS
+soler/F
+soles/IFA
+sole/YSP
+sol/GSMDR
+solicitation/S
+solicited/U
+solicitor/MS
+solicitousness/S
+solicitous/YP
+solicit/SDG
+solicitude/MS
+solidarity/MS
+solidi
+solidification/M
+solidify/NXSDG
+solidity/S
+solidness/SM
+solid/STYRP
+solidus/M
+soliloquies
+soliloquize/DSG
+soliloquy/M
+soling/NM
+solipsism/MS
+solipsist/S
+Solis/M
+solitaire/SM
+solitary/SP
+solitude/SM
+Sollie/M
+Solly/M
+Sol/MY
+solo/DMSG
+soloist/SM
+Solomon/SM
+Solon/M
+Soloviev/M
+solstice/SM
+solubility/IMS
+soluble/SI
+solute/ENAXS
+solute's
+solution/AME
+solvable/UI
+solvating
+solve/ABSRDZG
+solved/EU
+solvency/IMS
+solvent/IS
+solvently
+solvent's
+solver/MEA
+solves/E
+solving/E
+Solzhenitsyn/M
+Somalia/M
+Somalian/S
+Somali/MS
+soma/M
+somatic
+somberness/SM
+somber/PY
+sombre
+sombrero/SM
+somebody'll
+somebody/SM
+someday
+somehow
+someone'll
+someone/SM
+someplace/M
+somersault/DSGM
+Somerset/M
+somerset/S
+somersetted
+somersetting
+Somerville/M
+something/S
+sometime/S
+someway/S
+somewhat/S
+somewhere/S
+some/Z
+sommelier/SM
+Somme/M
+somnambulism/SM
+somnambulist/SM
+somnolence/MS
+somnolent/Y
+Somoza/M
+sonar/SM
+sonata/MS
+sonatina/SM
+Sondheim/M
+Sondra/M
+Sonenberg/M
+songbag
+songbird/SM
+songbook/S
+songfest/MS
+songfulness/M
+songful/YP
+Songhai/M
+Songhua/M
+song/MS
+songster/MS
+songstress/SM
+songwriter/SM
+songwriting
+Sonia/M
+sonic/S
+Sonja/M
+Son/M
+sonnet/MDSG
+Sonnie/M
+Sonni/M
+Sonnnie/M
+Sonny/M
+sonny/SM
+Sonoma/M
+Sonora/M
+sonority/S
+sonorousness/SM
+sonorous/PY
+son/SMY
+Sontag/M
+sonuvabitch
+Sonya/M
+Sony/M
+soonish
+soon/TR
+soothe
+soother/M
+sooth/GZTYSRDMJ
+soothingness/M
+soothing/YP
+sooths
+soothsayer/M
+soothsay/JGZR
+soot/MGDS
+sooty/RT
+SOP
+Sophey/M
+Sophia/SM
+Sophie/M
+Sophi/M
+sophism/SM
+sophister/M
+sophistical
+sophisticatedly
+sophisticated/U
+sophisticate/XNGDS
+sophistication/MU
+sophistic/S
+sophist/RMS
+sophistry/SM
+Sophoclean
+Sophocles/M
+sophomore/SM
+sophomoric
+Sophronia/M
+soporifically
+soporific/SM
+sopped
+sopping/S
+soppy/RT
+soprano/SM
+sop/SM
+Sopwith/M
+sorbet/SM
+Sorbonne/M
+sorcerer/MS
+sorceress/S
+sorcery/MS
+Sorcha/M
+sordidness/SM
+sordid/PY
+sorehead/SM
+soreness/S
+Sorensen/M
+Sorenson/M
+sore/PYTGDRS
+sorghum/MS
+sorority/MS
+sorrel/SM
+Sorrentine/M
+sorrily
+sorriness/SM
+sorrower/M
+sorrowfulness/SM
+sorrowful/YP
+sorrow/GRDMS
+sorry/PTSR
+sorta
+sortable
+sorted/U
+sorter/MS
+sort/FSAGD
+sortieing
+sortie/MSD
+sort's
+sos
+SOS
+Sosa/M
+Sosanna/M
+Soto/M
+sot/SM
+sottish
+soubriquet's
+soufflé/MS
+sough/DG
+soughs
+sought/U
+soulfulness/MS
+soulful/YP
+soulless/Y
+soul/MDS
+sound/AUD
+soundboard/MS
+sounders
+sounder's
+sounder/U
+soundest
+sounding/AY
+soundings
+sounding's
+soundless/Y
+soundly/U
+soundness/UMS
+soundproof/GSD
+soundproofing/M
+sound's
+sounds/A
+soundtrack/MS
+soupçon/SM
+soup/GMDS
+Souphanouvong/M
+soupy/RT
+source/ASDMG
+sourceless
+sourdough
+sourdoughs
+sourish
+sourness/MS
+sourpuss/MS
+sour/TYDRPSG
+Sousa/M
+sousaphone/SM
+sous/DSG
+souse
+sou/SMH
+Southampton/M
+southbound
+southeastern
+southeaster/YM
+Southeast/MS
+southeast/RZMS
+southeastward/S
+southerly/S
+souther/MY
+southerner/M
+Southerner/MS
+southernisms
+southernmost
+southern/PZSYR
+Southey/M
+Southfield/M
+southing/M
+southland/M
+South/M
+southpaw/MS
+south/RDMG
+souths
+Souths
+southward/S
+southwestern
+southwester/YM
+Southwest/MS
+southwest/RMSZ
+southwestward/S
+souvenir/SM
+sou'wester
+sovereignty/MS
+sovereign/YMS
+soviet/MS
+Soviet/S
+sow/ADGS
+sowbelly/M
+sowens/M
+sower/DS
+Soweto/M
+sown/A
+sox's
+soybean/MS
+Soyinka/M
+soy/MS
+Soyuz/M
+Spaatz/M
+spacecraft/MS
+space/DSRGZMJ
+spaceflight/S
+spaceman/M
+spacemen
+spaceport/SM
+spacer/M
+spaceship/MS
+spacesuit/MS
+spacewalk/GSMD
+Spacewar/M
+spacewoman
+spacewomen
+spacey
+spacial
+spacier
+spaciest
+spaciness
+spacing/M
+spaciousness/SM
+spacious/PY
+Spackle
+spade/DSRGM
+spadeful/SM
+spader/M
+spadework/SM
+spadices
+spadix/M
+Spafford/M
+spaghetti/SM
+Spahn/M
+Spain/M
+spake
+Spalding/M
+Spam/M
+spa/MS
+Span
+spandex/MS
+spandrels
+spangle/GMDS
+Spanglish/S
+Spaniard/SM
+spanielled
+spanielling
+spaniel/SM
+Spanish/M
+spanker/M
+spanking/M
+spank/SRDJG
+span/MS
+spanned/U
+spanner/SM
+spanning
+SPARC/M
+SPARCstation/M
+spar/DRMGTS
+spareness/MS
+spare/PSY
+spareribs
+sparer/M
+sparing/UY
+sparker/M
+sparkle/DRSGZ
+sparkler/M
+Sparkman/M
+Sparks
+spark/SGMRD
+sparky/RT
+sparling/SM
+sparred
+sparrer
+sparring/U
+sparrow/MS
+sparseness/S
+sparse/YP
+sparsity/S
+spars/TR
+Spartacus/M
+Sparta/M
+spartan
+Spartan/S
+spasm/GSDM
+spasmodic
+spasmodically
+spastic/S
+spate/SM
+spathe/MS
+spatiality/M
+spatial/Y
+spat/MS
+spatted
+spatter/DGS
+spatterdock/M
+spatting
+spatula/SM
+spavin/DMS
+spawner/M
+spawn/MRDSG
+spay/DGS
+SPCA
+speakable/U
+speakeasy/SM
+speaker/M
+Speaker's
+speakership/M
+speaking/U
+speak/RBGZJS
+spearer/M
+spearfish/SDMG
+spearhead/GSDM
+spearmint/MS
+spear/MRDGS
+Spears
+spec'd
+specialism/MS
+specialist/MS
+specialization/SM
+specialized/U
+specialize/GZDSR
+specializing/U
+special/SRYP
+specialty/MS
+specie/MS
+specif
+specifiability
+specifiable
+specifiably
+specifically
+specification/SM
+specificity/S
+specific/SP
+specified/U
+specifier/SM
+specifies
+specify/AD
+specifying
+specimen/SM
+spec'ing
+speciousness/SM
+specious/YP
+speck/GMDS
+speckle/GMDS
+spec/SM
+spectacle/MSD
+spectacular/SY
+spectator/SM
+specter/DMS
+specter's/A
+spectralness/M
+spectral/YP
+spectra/M
+spectrogram/MS
+spectrographically
+spectrograph/M
+spectrography/M
+spectrometer/MS
+spectrometric
+spectrometry/M
+spectrophotometer/SM
+spectrophotometric
+spectrophotometry/M
+spectroscope/SM
+spectroscopic
+spectroscopically
+spectroscopy/SM
+spectrum/M
+specularity
+specular/Y
+speculate/VNGSDX
+speculation/M
+speculative/Y
+speculator/SM
+sped
+speech/GMDS
+speechlessness/SM
+speechless/YP
+speedboat/GSRM
+speedboating/M
+speeder/M
+speedily
+speediness/SM
+speedometer/MS
+speed/RMJGZS
+speedster/SM
+speedup/MS
+speedway/SM
+speedwell/MS
+speedy/PTR
+speer/M
+speleological
+speleologist/S
+speleology/MS
+spellbinder/M
+spellbind/SRGZ
+spellbound
+spelldown/MS
+spelled/A
+speller/M
+spelling/M
+spell/RDSJGZ
+spells/A
+spelunker/MS
+spelunking/S
+Spencerian
+Spencer/M
+Spence/RM
+spender/M
+spend/SBJRGZ
+spendthrift/MS
+Spenglerian
+Spengler/M
+Spense/MR
+Spenserian
+Spenser/M
+spent/U
+spermatophyte/M
+spermatozoa
+spermatozoon/M
+spermicidal
+spermicide/MS
+sperm/SM
+Sperry/M
+spew/DRGZJS
+spewer/M
+SPF
+sphagnum/SM
+sphere/SDGM
+spherical/Y
+spheric/S
+spherics/M
+spheroidal/Y
+spheroid/SM
+spherule/MS
+sphincter/SM
+Sphinx/M
+sphinx/MS
+Spica/M
+spic/DGM
+spicebush/M
+spice/SM
+spicily
+spiciness/SM
+spicule/MS
+spicy/PTR
+spider/SM
+spiderweb/S
+spiderwort/M
+spidery/TR
+Spiegel/M
+Spielberg/M
+spiel/GDMS
+spier/M
+spiffy/TDRSG
+spigot/MS
+spike/GMDSR
+Spike/M
+spiker/M
+spikiness/SM
+spiky/PTR
+spillage/SM
+Spillane/M
+spillover/SM
+spill/RDSG
+spillway/SM
+spinach/MS
+spinal/YS
+spindle/JGMDRS
+spindly/RT
+spinelessness/M
+spineless/YP
+spine/MS
+spinet/SM
+spininess/M
+spinnability/M
+spinnaker/SM
+spinneret/MS
+spinner/SM
+spinning/SM
+Spinoza/M
+spin/S
+spinsterhood/SM
+spinsterish
+spinster/MS
+spiny/PRT
+spiracle/SM
+spiraea's
+spiral/YDSG
+spire/AIDSGF
+spirea/MS
+spire's
+spiritedness/M
+spirited/PY
+spirit/GMDS
+spiritless
+spirits/I
+spiritualism/SM
+spiritualistic
+spiritualist/SM
+spirituality/SM
+spiritual/SYP
+spirituous
+spirochete/SM
+Spiro/M
+spiry/TR
+spitball/SM
+spite/CSDAG
+spitefuller
+spitefullest
+spitefulness/MS
+spiteful/PY
+spite's/A
+spitfire/SM
+spit/SGD
+spitted
+spitting
+spittle/SM
+spittoon/SM
+Spitz/M
+splashdown/MS
+splasher/M
+splash/GZDRS
+splashily
+splashiness/MS
+splashy/RTP
+splat/SM
+splatted
+splatter/DSG
+splatting
+splayfeet
+splayfoot/MD
+splay/SDG
+spleen/SM
+splendidness/M
+splendid/YRPT
+splendorous
+splendor/SM
+splenetic/S
+splicer/M
+splice/RSDGZJ
+spline/MSD
+splinter/GMD
+splintery
+splint/SGZMDR
+splits/M
+split/SM
+splittable
+splitter/MS
+splitting/S
+splodge/SM
+splotch/MSDG
+splotchy/RT
+splurge/GMDS
+splutterer/M
+splutter/RDSG
+Sp/M
+Spock/M
+spoilables
+spoilage/SM
+spoil/CSZGDR
+spoiled/U
+spoiler/MC
+spoilsport/SM
+Spokane/M
+spoke/DSG
+spoken/U
+spokeshave/MS
+spokesman/M
+spokesmen
+spokespeople
+spokesperson/S
+spokeswoman/M
+spokeswomen
+spoliation/MCS
+spongecake
+sponge/GMZRSD
+sponger/M
+sponginess/S
+spongy/TRP
+sponsor/DGMS
+sponsorship/S
+spontaneity/SM
+spontaneousness/M
+spontaneous/PY
+spoof/SMDG
+spookiness/MS
+spook/SMDG
+spooky/PRT
+spool/SRDMGZ
+spoonbill/SM
+spoonerism/SM
+spoonful/MS
+spoon/GSMD
+spoor/GSMD
+sporadically
+sporadic/Y
+spore/DSGM
+sporran/MS
+sportiness/SM
+sporting/Y
+sportiveness/M
+sportive/PY
+sportscast/RSGZM
+sportsmanlike/U
+sportsman/MY
+sportsmanship/MS
+sportsmen
+sportswear/M
+sportswoman/M
+sportswomen
+sportswriter/S
+sport/VGSRDM
+sporty/PRT
+Sposato/M
+spotlessness/MS
+spotless/YP
+spotlight/GDMS
+spotlit
+spot/MSC
+spotted/U
+spotter/MS
+spottily
+spottiness/SM
+spotting/M
+spotty/RTP
+spousal/MS
+spouse/GMSD
+spouter/M
+spout/SGRD
+sprain/SGD
+sprang/S
+sprat/SM
+sprawl/GSD
+sprayed/UA
+sprayer/M
+spray/GZSRDM
+sprays/A
+spreadeagled
+spreader/M
+spread/RSJGZB
+spreadsheet/S
+spreeing
+spree/MDS
+sprigged
+sprigging
+sprightliness/MS
+sprightly/PRT
+sprig/MS
+springboard/MS
+springbok/MS
+springeing
+springer/M
+Springfield/M
+springily
+springiness/SM
+springing/M
+springlike
+spring/SGZR
+Springsteen/M
+springtime/MS
+springy/TRP
+sprinkle/DRSJZG
+sprinkler/DM
+sprinkling/M
+Sprint/M
+sprint/SGZMDR
+sprite/SM
+spritz/GZDSR
+sprocket/DMGS
+sprocketed/U
+Sproul/M
+sprout/GSD
+spruce/GMTYRSDP
+spruceness/SM
+sprue/M
+sprung/U
+spryness/S
+spry/TRY
+SPSS
+spudded
+spudding
+spud/MS
+Spuds/M
+spume/DSGM
+spumone's
+spumoni/S
+spumy/TR
+spun
+spunk/GSMD
+spunky/SRT
+spurge/MS
+spuriousness/SM
+spurious/PY
+spur/MS
+spurn/RDSG
+spurred
+spurring
+spurt/SGD
+sputa
+Sputnik
+sputnik/MS
+sputter/DRGS
+sputum/M
+spy/DRSGM
+spyglass/MS
+sq
+sqq
+sqrt
+squabbed
+squabber
+squabbest
+squabbing
+squabbler/M
+squabble/ZGDRS
+squab/SM
+squadded
+squadding
+squadron/MDGS
+squad/SM
+squalidness/SM
+squalid/PRYT
+squaller/M
+squall/GMRDS
+squally/RT
+squalor/SM
+squamous/Y
+squander/GSRD
+Squanto
+square/GMTYRSDP
+squareness/SM
+squarer/M
+Squaresville/M
+squarish
+squash/GSRD
+squashiness/M
+squashy/RTP
+squatness/MS
+squat/SPY
+squatted
+squatter/SMDG
+squattest
+squatting
+squawker/M
+squawk/GRDMZS
+squaw/SM
+squeaker/M
+squeakily
+squeakiness/S
+squeak/RDMGZS
+squeaky/RPT
+squealer/M
+squeal/MRDSGZ
+squeamishness/SM
+squeamish/YP
+squeegee/DSM
+squeegeeing
+squeeze/GZSRDB
+squeezer/M
+squelcher/M
+squelch/GDRS
+squelchy/RT
+squibbed
+Squibb/GM
+squibbing
+Squibbing/M
+squib/SM
+squidded
+squidding
+squid/SM
+squiggle/MGDS
+squiggly/RT
+squinter/M
+squint/GTSRD
+squinting/Y
+squirehood
+squire/SDGM
+squirm/SGD
+squirmy/TR
+squirrel/SGYDM
+squirter/M
+squirt/GSRD
+squish/GSD
+squishy/RTP
+Sr
+Srinagar/M
+SRO
+S's
+SS
+SSA
+SSE
+ssh
+s's/KI
+SSS
+SST
+SSW
+ST
+stabbed
+stabber/S
+stabbing/S
+stability/ISM
+stabilizability
+stabilization/CS
+stabilization's
+stabilize/CGSD
+stabilizer/MS
+stableman/M
+stablemate
+stablemen
+stableness/UM
+stable/RSDGMTP
+stabler/U
+stable's/F
+stables/F
+stablest/U
+stabling/M
+stably/U
+stab/YS
+staccato/S
+Stacee/M
+Stace/M
+Stacey/M
+Stacia/M
+Stacie/M
+Staci/M
+stackable
+stacker/M
+stack's
+stack/USDG
+Stacy/M
+stadias
+stadia's
+stadium/MS
+Stael/M
+Stafani/M
+staff/ADSG
+Staffard/M
+staffer/MS
+Stafford/M
+Staffordshire/M
+staffroom
+staff's
+Staford/M
+stag/DRMJSGZ
+stagecoach/MS
+stagecraft/MS
+stagehand/MS
+stager/M
+stage/SM
+stagestruck
+stagflation/SM
+stagged
+staggerer/M
+stagger/GSJDR
+staggering/Y
+staggers/M
+stagging
+staginess/M
+staging/M
+stagnancy/SM
+stagnant/Y
+stagnate/NGDSX
+stagnation/M
+stagy/PTR
+Stahl/M
+staidness/MS
+staid/YRTP
+stained/U
+stainer/M
+stainless/YS
+stain/SGRD
+staircase/SM
+stair/MS
+stairway/SM
+stairwell/MS
+stake/DSGM
+stakeholder/S
+stakeout/SM
+stalactite/SM
+stalag/M
+stalagmite/SM
+stalemate/SDMG
+staleness/MS
+stale/PGYTDSR
+Staley/M
+Stalingrad/M
+Stalinist
+Stalin/SM
+stalker/M
+stalk/MRDSGZJ
+stall/DMSJG
+stalled/I
+stallholders
+stallion/SM
+Stallone/M
+stalls/I
+stalwartness/M
+stalwart/PYS
+Sta/M
+stamen/MS
+Stamford/M
+stamina/SM
+staminate
+stammer/DRSZG
+stammerer/M
+stammering/Y
+stampede/MGDRS
+stampeder/M
+stamped/U
+stamper/M
+stamp/RDSGZJ
+stance/MIS
+stancher/M
+stanch/GDRST
+stanchion/SGMD
+standalone
+standardization/AMS
+standardized/U
+standardize/GZDSR
+standardizer/M
+standardizes/A
+standard/YMS
+standby
+standbys
+standee/MS
+Standford/M
+standing/M
+Standish/M
+standoffish
+standoff/SM
+standout/MS
+standpipe/MS
+standpoint/SM
+stand/SJGZR
+standstill/SM
+Stanfield/M
+Stanford/M
+Stanislas/M
+Stanislaus/M
+Stanislavsky/M
+Stanislaw/M
+stank/S
+Stanleigh/M
+Stanley/M
+Stanly/M
+stannic
+stannous
+Stanton/M
+Stanwood/M
+Stan/YMS
+stanza/MS
+staph/M
+staphs
+staphylococcal
+staphylococci
+staphylococcus/M
+stapled/U
+stapler/M
+Stapleton/M
+staple/ZRSDGM
+starboard/SDMG
+starchily
+starchiness/MS
+starch/MDSG
+starchy/TRP
+stardom/MS
+star/DRMGZS
+stardust/MS
+stare/S
+starfish/SM
+Stargate/M
+stargaze/ZGDRS
+staring/U
+Starkey/M
+Stark/M
+starkness/MS
+stark/SPGTYRD
+Starla/M
+Starlene/M
+starless
+starlet/MS
+starlight/MS
+starling/MS
+Starlin/M
+starlit
+Star/M
+starred
+starring
+Starr/M
+starry/TR
+starship
+starstruck
+start/ASGDR
+starter/MS
+startle/GDS
+startling/PY
+startup/SM
+starvation/MS
+starveling/M
+starver/M
+starve/RSDG
+stash/GSD
+stasis/M
+stat/DRSGV
+statecraft/MS
+stated/U
+statehood/MS
+statehouse/S
+Statehouse's
+state/IGASD
+statelessness/MS
+stateless/P
+stateliness/MS
+stately/PRT
+statement/MSA
+Staten/M
+stater/M
+stateroom/SM
+stateside
+state's/K
+states/K
+statesmanlike
+statesman/MY
+statesmanship/SM
+statesmen
+stateswoman
+stateswomen
+statewide
+statical/Y
+static/S
+statics/M
+stationarity
+stationary/S
+stationer/M
+stationery/MS
+stationmaster/M
+station/SZGMDR
+statistical/Y
+statistician/MS
+statistic/MS
+Statler/M
+stator/SM
+statuary/SM
+statue/MSD
+statuesque/YP
+statuette/MS
+stature/MS
+status/SM
+statute/SM
+statutorily
+statutory/P
+Stauffer/M
+staunchness/S
+staunch/PDRSYTG
+stave/DGM
+Stavro/MS
+stay/DRGZS
+stayer/M
+std
+STD
+stdio
+steadfastness/MS
+steadfast/PY
+steadily/U
+steadiness's
+steadiness/US
+steading/M
+stead/SGDM
+steady/DRSUTGP
+steakhouse/SM
+steak/SM
+stealer/M
+stealing/M
+steal/SRHG
+stealthily
+stealthiness/MS
+stealth/M
+stealths
+stealthy/PTR
+steamboat/MS
+steamer/MDG
+steamfitter/S
+steamfitting/S
+steamily
+steaminess/SM
+steamroller/DMG
+steamroll/GZRDS
+steam/SGZRDMJ
+steamship/SM
+steamy/RSTP
+Stearne/M
+Stearn/SM
+steed/SM
+Steele/M
+steeliness/SM
+steelmaker/M
+steel/SDMGZ
+steelworker/M
+steelwork/ZSMR
+steelyard/MS
+steely/TPRS
+Steen/M
+steepen/GD
+steeper/M
+steeplebush/M
+steeplechase/GMSD
+steeplejack/MS
+steeple/MS
+steepness/S
+steep/SYRNDPGTX
+steerage/MS
+steerer/M
+steer/SGBRDJ
+steersman/M
+steersmen
+steeves
+Stefa/M
+Stefania/M
+Stefanie/M
+Stefan/M
+Stefano/M
+Steffane/M
+Steffen/M
+Steffie/M
+Steffi/M
+stegosauri
+stegosaurus/S
+Steinbeck/SM
+Steinberg/M
+Steinem/M
+Steiner/M
+Steinmetz/M
+Stein/RM
+stein/SGZMRD
+Steinway/M
+Stella/M
+stellar
+stellated
+Ste/M
+stemless
+stemmed/U
+stemming
+stem/MS
+stemware/MS
+stench/GMDS
+stenciler/M
+stencil/GDRMSZ
+stencillings
+Stendhal/M
+Stendler/M
+Stengel/M
+stenographer/SM
+stenographic
+stenography/SM
+steno/SM
+stenotype/M
+stentorian
+stepbrother/MS
+stepchild/M
+stepchildren
+stepdaughter/MS
+stepfather/SM
+Stepha/M
+Stephana/M
+Stephanie/M
+Stephani/M
+Stephan/M
+Stephannie/M
+Stephanus/M
+Stephenie/M
+Stephen/MS
+Stephenson/M
+Stephie/M
+Stephi/M
+Stephine/M
+stepladder/SM
+step/MIS
+stepmother/SM
+stepparent/SM
+stepper/M
+steppe/RSDGMZ
+steppingstone/S
+stepsister/SM
+stepson/SM
+stepwise
+stereographic
+stereography/M
+stereo/GSDM
+stereophonic
+stereoscope/MS
+stereoscopic
+stereoscopically
+stereoscopy/M
+stereotype/GMZDRS
+stereotypic
+stereotypical/Y
+sterile
+sterility/SM
+sterilization/SM
+sterilized/U
+sterilize/RSDGZ
+sterilizes/A
+Sterling/M
+sterling/MPYS
+sterlingness/M
+sternal
+Sternberg/M
+Sterne/M
+Stern/M
+sternness/S
+Sterno
+stern/SYRDPGT
+sternum/SM
+steroidal
+steroid/MS
+stertorous
+Stesha/M
+stethoscope/SM
+stet/MS
+stetson/MS
+Stetson/SM
+stetted
+stetting
+Steuben/M
+Stevana/M
+stevedore/GMSD
+Steve/M
+Stevena/M
+Steven/MS
+Stevenson/M
+Stevie/M
+Stevy/M
+steward/DMSG
+stewardess/SM
+Steward/M
+stewardship/MS
+Stewart/M
+stew/GDMS
+st/GBJ
+sticker/M
+stickily
+stickiness/SM
+stickleback/MS
+stickle/GZDR
+stickler/M
+stick/MRDSGZ
+stickpin/SM
+stickup/SM
+sticky/GPTDRS
+Stieglitz/M
+stiffen/JZRDG
+stiff/GTXPSYRND
+stiffness/MS
+stifle/GJRSD
+stifler/M
+stifling/Y
+stigma/MS
+stigmata
+stigmatic/S
+stigmatization/C
+stigmatizations
+stigmatization's
+stigmatize/DSG
+stigmatized/U
+stile/GMDS
+stiletto/MDSG
+stillbirth/M
+stillbirths
+stillborn/S
+stiller/MI
+stillest
+Stillman/M
+Stillmann/M
+stillness/MS
+still/RDIGS
+Stillwell/M
+stilted/PY
+stilt/GDMS
+Stilton/MS
+Stimson/M
+stimulant/MS
+stimulated/U
+stimulate/SDVGNX
+stimulation/M
+stimulative/S
+stimulator/M
+stimulatory
+stimuli/M
+stimulus/MS
+Stine/M
+stinger/M
+sting/GZR
+stingily
+stinginess/MS
+stinging/Y
+stingray/MS
+stingy/RTP
+stinkbug/S
+stinker/M
+stink/GZRJS
+stinking/Y
+stinkpot/M
+Stinky/M
+stinky/RT
+stinter/M
+stinting/U
+stint/JGRDMS
+stipendiary
+stipend/MS
+stipple/JDRSG
+stippler/M
+stipulate/XNGSD
+stipulation/M
+Stirling/M
+stirred/U
+stirrer/SM
+stirring/YS
+stirrup/SM
+stir/S
+stitch/ASDG
+stitcher/M
+stitchery/S
+stitching/MS
+stitch's
+St/M
+stoat/SM
+stochastic
+stochastically
+stochasticity
+stockade/SDMG
+stockbreeder/SM
+stockbroker/MS
+stockbroking/S
+stocker/SM
+Stockhausen/M
+stockholder/SM
+Stockholm/M
+stockily
+stockiness/SM
+stockinet's
+stockinette/S
+stocking/MDS
+stockist/MS
+stockpile/GRSD
+stockpiler/M
+stockpot/MS
+stockroom/MS
+stock's
+stock/SGAD
+stocktaking/MS
+Stockton/M
+stockyard/SM
+stocky/PRT
+Stoddard/M
+stodge/M
+stodgily
+stodginess/S
+stodgy/TRP
+stogy/SM
+stoical/Y
+stoichiometric
+stoichiometry/M
+stoicism/SM
+Stoicism/SM
+stoic/MS
+Stoic/MS
+stoke/DSRGZ
+stoker/M
+stokes/M
+Stokes/M
+STOL
+stole/MDS
+stolen
+stolidity/S
+stolidness/S
+stolid/PTYR
+stolon/SM
+stomachache/MS
+stomacher/M
+stomach/RSDMZG
+stomachs
+stomp/DSG
+stonecutter/SM
+stone/DSRGM
+Stonehenge/M
+stoneless
+Stone/M
+stonemason/MS
+stoner/M
+stonewall/GDS
+stoneware/MS
+stonewashed
+stonework/SM
+stonewort/M
+stonily
+stoniness/MS
+stony/TPR
+stood
+stooge/SDGM
+stool/SDMG
+stoop/SDG
+stopcock/MS
+stopgap/SM
+stoplight/SM
+stopover/MS
+stoppable/U
+stoppage/MS
+Stoppard/M
+stopped/U
+stopper/GMDS
+stopping/M
+stopple/GDSM
+stop's
+stops/M
+stop/US
+stopwatch/SM
+storage/SM
+store/ADSRG
+storefront/SM
+storehouse/MS
+storekeeper/M
+storekeep/ZR
+storeroom/SM
+store's
+stork/SM
+stormbound
+stormer/M
+Stormie/M
+stormily
+Stormi/M
+storminess/S
+Storm/M
+storm/SRDMGZ
+stormtroopers
+Stormy/M
+stormy/PTR
+storyboard/MDSG
+storybook/MS
+story/GSDM
+storyline
+storyteller/SM
+storytelling/MS
+Stouffer/M
+stoup/SM
+stouten/DG
+stouthearted
+Stout/M
+stoutness/MS
+stout/STYRNP
+stove/DSRGM
+stovepipe/SM
+stover/M
+stowage/SM
+stowaway/MS
+Stowe/M
+stow/GDS
+Strabo/M
+straddler/M
+straddle/ZDRSG
+Stradivari/SM
+Stradivarius/M
+strafe/GRSD
+strafer/M
+straggle/GDRSZ
+straggly/RT
+straightaway/S
+straightedge/MS
+straightener/M
+straighten/ZGDR
+straightforwardness/MS
+straightforward/SYP
+straightjacket's
+straightness/MS
+straight/RNDYSTXGP
+straightway/S
+strain/ASGZDR
+strained/UF
+strainer/MA
+straining/F
+strains/F
+straiten/DG
+straitjacket/GDMS
+straitlaced
+straitness/M
+strait/XTPSMGYDNR
+stranded/P
+strand/SDRG
+strangeness/SM
+strange/PYZTR
+stranger/GMD
+stranglehold/MS
+strangle/JDRSZG
+strangles/M
+strangulate/NGSDX
+strangulation/M
+strapless/S
+strapped/U
+strapping/S
+strap's
+strap/US
+Strasbourg/M
+stratagem/SM
+strata/MS
+strategical/Y
+strategic/S
+strategics/M
+strategist/SM
+strategy/SM
+Stratford/M
+strati
+stratification/M
+stratified/U
+stratify/NSDGX
+stratigraphic
+stratigraphical
+stratigraphy/M
+stratosphere/SM
+stratospheric
+stratospherically
+stratum/M
+stratus/M
+Strauss
+Stravinsky/M
+strawberry/SM
+strawflower/SM
+straw/SMDG
+strayer/M
+stray/GSRDM
+streak/DRMSGZ
+streaker/M
+streaky/TR
+streamed/U
+streamer/M
+stream/GZSMDR
+streaming/M
+streamline/SRDGM
+streetcar/MS
+streetlight/SM
+street/SMZ
+streetwalker/MS
+streetwise
+Streisand/M
+strengthen/AGDS
+strengthener/MS
+strength/NMX
+strengths
+strenuousness/SM
+strenuous/PY
+strep/MS
+streptococcal
+streptococci
+streptococcus/M
+streptomycin/SM
+stress/DSMG
+stressed/U
+stressful/YP
+stretchability/M
+stretchable/U
+stretch/BDRSZG
+stretcher/DMG
+stretchy/TRP
+strew/GDHS
+strewn
+striae
+stria/M
+striate/DSXGN
+striated/U
+striation/M
+stricken
+Strickland/M
+strict/AF
+stricter
+strictest
+strictly
+strictness/S
+stricture/SM
+stridden
+stridency/S
+strident/Y
+strider/M
+stride/RSGM
+strife/SM
+strikebreaker/M
+strikebreaking/M
+strikebreak/ZGR
+strikeout/S
+striker/M
+strike/RSGZJ
+striking/Y
+Strindberg/M
+stringed
+stringency/S
+stringent/Y
+stringer/MS
+stringiness/SM
+stringing/M
+string's
+string/SAG
+stringy/RTP
+striper/M
+stripe/SM
+strip/GRDMS
+stripling/M
+stripped/U
+stripper/MS
+stripping
+stripteaser/M
+striptease/SRDGZM
+stripy/RT
+strive/JRSG
+striven
+striver/M
+strobe/SDGM
+stroboscope/SM
+stroboscopic
+strode
+stroke/ZRSDGM
+stroking/M
+stroller/M
+stroll/GZSDR
+Stromberg/M
+Stromboli/M
+Strom/M
+strongbow
+strongbox/MS
+Strongheart/M
+stronghold/SM
+strongish
+Strong/M
+strongman/M
+strongmen
+strongroom/MS
+strong/YRT
+strontium/SM
+strophe/MS
+strophic
+stropped
+stropping
+strop/SM
+strove
+struck
+structuralism/M
+structuralist/SM
+structural/Y
+structured/AU
+structureless
+structures/A
+structure/SRDMG
+structuring/A
+strudel/MS
+struggle/GDRS
+struggler/M
+strummed
+strumming
+strumpet/GSDM
+strum/S
+strung/UA
+strut/S
+strutted
+strutter/M
+strutting
+strychnine/MS
+Stuart/MS
+stubbed/M
+stubbing
+Stubblefield/MS
+stubble/SM
+stubbly/RT
+stubbornness/SM
+stubborn/SGTYRDP
+stubby/SRT
+stub/MS
+stuccoes
+stucco/GDM
+stuck/U
+studbook/SM
+studded
+studding/SM
+Studebaker/M
+studentship/MS
+student/SM
+studiedness/M
+studied/PY
+studier/SM
+studio/MS
+studiousness/SM
+studious/PY
+stud/MS
+study/AGDS
+stuffily
+stuffiness/SM
+stuffing/M
+stuff/JGSRD
+stuffy/TRP
+stultify/NXGSD
+Stu/M
+stumble/GZDSR
+stumbling/Y
+stumpage/M
+stumper/M
+stump/RDMSG
+stumpy/RT
+stung
+stunk
+stunned
+stunner/M
+stunning/Y
+stun/S
+stunted/P
+stunt/GSDM
+stupefaction/SM
+stupefy/DSG
+stupendousness/M
+stupendous/PY
+stupidity/SM
+stupidness/M
+stupid/PTYRS
+stupor/MS
+sturdily
+sturdiness/SM
+sturdy/SRPT
+sturgeon/SM
+Sturm/M
+stutter/DRSZG
+Stuttgart/M
+Stuyvesant/M
+sty/DSGM
+Stygian
+styled/A
+style/GZMDSR
+styles/A
+styli
+styling/A
+stylishness/S
+stylish/PY
+stylistically
+stylistic/S
+stylist/MS
+stylites
+stylization/MS
+stylize/DSG
+stylos
+stylus/SM
+stymieing
+stymie/SD
+stymy's
+styptic/S
+styrene/MS
+Styrofoam/S
+Styx/M
+suable
+Suarez/M
+suasion/EMS
+suaveness/S
+suave/PRYT
+suavity/SM
+subaltern/SM
+subarctic/S
+subareas
+Subaru/M
+subassembly/M
+subatomic/S
+subbasement/SM
+subbed
+subbing
+subbranch/S
+subcaste/M
+subcategorizing
+subcategory/SM
+subchain
+subclassifications
+subclass/MS
+subclauses
+subcommand/S
+subcommittee/SM
+subcompact/S
+subcomponent/MS
+subcomputation/MS
+subconcept
+subconsciousness/SM
+subconscious/PSY
+subconstituent
+subcontinental
+subcontinent/MS
+subcontractor/SM
+subcontract/SMDG
+subcultural
+subculture/GMDS
+subcutaneous/Y
+subdirectory/S
+subdistrict/M
+subdivide/SRDG
+subdivision/SM
+subdued/Y
+subdue/GRSD
+subduer/M
+subexpression/MS
+subfamily/SM
+subfield/MS
+subfile/SM
+subfreezing
+subgoal/SM
+subgraph
+subgraphs
+subgroup/SGM
+subharmonic/S
+subheading/M
+subhead/MGJS
+subhuman/S
+subindex/M
+subinterval/MS
+subj
+subject/GVDMS
+subjection/SM
+subjectiveness/M
+subjective/PSY
+subjectivist/S
+subjectivity/SM
+subjoin/DSG
+subjugate/NGXSD
+subjugation/M
+subjunctive/S
+sublayer
+sublease/DSMG
+sublet/S
+subletting
+sublimate/GNSDX
+sublimation/M
+sublime/GRSDTYP
+sublimeness/M
+sublimer/M
+subliminal/Y
+sublimity/SM
+sublist/SM
+subliterary
+sublunary
+submachine
+submarginal
+submarine/MZGSRD
+submariner/M
+submerge/DSG
+submergence/SM
+submerse/XNGDS
+submersible/S
+submersion/M
+submicroscopic
+submission/SAM
+submissiveness/MS
+submissive/PY
+submit/SA
+submittable
+submittal
+submitted/A
+submitter/S
+submitting/A
+submode/S
+submodule/MS
+sub/MS
+subnational
+subnet/SM
+subnetwork/SM
+subnormal/SY
+suboptimal
+suborbital
+suborder/MS
+subordinately/I
+subordinates/I
+subordinate/YVNGXPSD
+subordination/IMS
+subordinator
+subornation/SM
+suborn/GSD
+subpage
+subparagraph/M
+subpart/MS
+subplot/MS
+subpoena/GSDM
+subpopulation/MS
+subproblem/SM
+subprocess/SM
+subprofessional/S
+subprogram/SM
+subproject
+subproof/SM
+subquestion/MS
+subrange/SM
+subregional/Y
+subregion/MS
+subrogation/M
+subroutine/SM
+subsample/MS
+subschema/MS
+subscribe/ASDG
+subscriber/SM
+subscripted/U
+subscription/MS
+subscript/SGD
+subsection/SM
+subsegment/SM
+subsentence
+subsequence/MS
+subsequent/SYP
+subservience/SM
+subservient/SY
+subset/MS
+subsidence/MS
+subside/SDG
+subsidiarity
+subsidiary/MS
+subsidization/MS
+subsidized/U
+subsidizer/M
+subsidize/ZRSDG
+subsidy/MS
+subsistence/MS
+subsistent
+subsist/SGD
+subsocietal
+subsoil/DRMSG
+subsonic
+subspace/MS
+subspecies/M
+substance/MS
+substandard
+substantially/IU
+substantialness/M
+substantial/PYS
+substantiated/U
+substantiate/VGNSDX
+substantiation/MFS
+substantiveness/M
+substantive/PSYM
+substantivity
+substation/MS
+substerilization
+substitutability
+substituted/U
+substitute/NGVBXDRS
+substitutionary
+substitution/M
+substitutive/Y
+substrata
+substrate/MS
+substratum/M
+substring/S
+substructure/SM
+subsume/SDG
+subsurface/S
+subsystem/MS
+subtable/S
+subtask/SM
+subteen/SM
+subtenancy/MS
+subtenant/SM
+subtend/DS
+subterfuge/SM
+subterranean/SY
+subtest
+subtext/SM
+subtitle/DSMG
+subtleness/M
+subtle/RPT
+subtlety/MS
+subtly/U
+subtopic/SM
+subtotal/GSDM
+subtracter/M
+subtraction/MS
+subtract/SRDZVG
+subtrahend/SM
+subtree/SM
+subtropical
+subtropic/S
+subtype/MS
+subunit/SM
+suburbanite/MS
+suburbanization/MS
+suburbanized
+suburbanizing
+suburban/S
+suburbia/SM
+suburb/MS
+subvention/MS
+subversion/SM
+subversiveness/MS
+subversive/SPY
+subverter/M
+subvert/SGDR
+subway/MDGS
+subzero
+succeeder/M
+succeed/GDRS
+successfulness/M
+successful/UY
+succession/SM
+successiveness/M
+successive/YP
+success/MSV
+successor/MS
+successorship
+succinctness/SM
+succinct/RYPT
+succored/U
+succorer/M
+succor/SGZRDM
+succotash/SM
+succubus/M
+succulence/SM
+succulency/MS
+succulent/S
+succumb/SDG
+such
+suchlike
+sucker/DMG
+suck/GZSDRB
+suckle/SDJG
+suckling/M
+Sucre/M
+sucrose/MS
+suction/SMGD
+Sudanese/M
+Sudanic/M
+Sudan/M
+suddenness/SM
+sudden/YPS
+Sudetenland/M
+sud/S
+suds/DSRG
+sudsy/TR
+sued/DG
+suede/SM
+Suellen/M
+Sue/M
+suer/M
+suet/MS
+Suetonius/M
+suety
+sue/ZGDRS
+Suez/M
+sufferance/SM
+sufferer/M
+suffering/M
+suffer/SJRDGZ
+suffice/GRSD
+sufficiency/SIM
+sufficient/IY
+suffixation/S
+suffixed/U
+suffix/GMRSD
+suffocate/XSDVGN
+suffocating/Y
+Suffolk/M
+suffragan/S
+suffrage/MS
+suffragette/MS
+suffragist/SM
+suffuse/VNGSDX
+suffusion/M
+Sufi/M
+Sufism/M
+sugarcane/S
+sugarcoat/GDS
+sugarless
+sugarplum/MS
+sugar/SJGMD
+sugary/TR
+suggest/DRZGVS
+suggester/M
+suggestibility/SM
+suggestible
+suggestion/MS
+suggestiveness/MS
+suggestive/PY
+sugillate
+Suharto/M
+suicidal/Y
+suicide/GSDM
+Sui/M
+suitability/SU
+suitableness/S
+suitable/P
+suitably/U
+suitcase/MS
+suited/U
+suite/SM
+suiting/M
+suit/MDGZBJS
+suitor/SM
+Sukarno/M
+Sukey/M
+Suki/M
+sukiyaki/SM
+Sukkoth's
+Sukkot/S
+Sula/M
+Sulawesi/M
+Suleiman/M
+sulfaquinoxaline
+sulfa/S
+sulfate/MSDG
+sulfide/S
+sulfite/M
+sulfonamide/SM
+sulfur/DMSG
+sulfuric
+sulfurousness/M
+sulfurous/YP
+sulk/GDS
+sulkily
+sulkiness/S
+sulky/RSPT
+Sulla/M
+sullenness/MS
+sullen/TYRP
+sullied/U
+Sullivan/M
+sully/GSD
+Sully/M
+sulphate/SM
+sulphide/MS
+sulphuric
+sultana/SM
+sultanate/MS
+sultan/SM
+sultrily
+sultriness/SM
+sultry/PRT
+Sulzberger/M
+sumach's
+sumac/SM
+Sumatra/M
+Sumatran/S
+sumer/F
+Sumeria/M
+Sumerian/M
+summability/M
+summable
+summand/MS
+summarily
+summarization/MS
+summarized/U
+summarize/GSRDZ
+summarizer/M
+summary/MS
+summation/FMS
+summed
+Summerdale/M
+summerhouse/MS
+summer/SGDM
+Summer/SM
+summertime/MS
+summery/TR
+summing
+summit/GMDS
+summitry/MS
+summoner/M
+summon/JSRDGZ
+summons/MSDG
+sum/MRS
+Sumner/M
+sumo/SM
+sump/SM
+sumptuousness/SM
+sumptuous/PY
+Sumter/M
+Sun
+sunbaked
+sunbathe
+sunbather/M
+sunbathing/M
+sunbaths
+sunbath/ZRSDG
+sunbeam/MS
+Sunbelt/M
+sunblock/S
+sunbonnet/MS
+sunburn/GSMD
+sunburst/MS
+suncream
+sundae/MS
+Sundanese/M
+Sundas
+Sunday/MS
+sunder/SDG
+sundial/MS
+sundowner/M
+sundown/MRDSZG
+sundris
+sundry/S
+sunfish/SM
+sunflower/MS
+sunglass/MS
+Sung/M
+sung/U
+sunk/SN
+sunlamp/S
+sunless
+sunlight/MS
+sunlit
+sun/MS
+sunned
+Sunni/MS
+sunniness/SM
+sunning
+Sunnite/SM
+Sunny/M
+sunny/RSTP
+Sunnyvale/M
+sunrise/GMS
+sunroof/S
+sunscreen/S
+sunset/MS
+sunsetting
+sunshade/MS
+Sunshine/M
+sunshine/MS
+sunshiny
+sunspot/SM
+sunstroke/MS
+suntanned
+suntanning
+suntan/SM
+sunup/MS
+superabundance/MS
+superabundant
+superannuate/GNXSD
+superannuation/M
+superbness/M
+superb/YRPT
+supercargoes
+supercargo/M
+supercharger/M
+supercharge/SRDZG
+superciliousness/SM
+supercilious/PY
+supercity/S
+superclass/M
+supercomputer/MS
+supercomputing
+superconcept
+superconducting
+superconductivity/SM
+superconductor/SM
+supercooled
+supercooling
+supercritical
+superdense
+super/DG
+superego/SM
+supererogation/MS
+supererogatory
+superficiality/S
+superficial/SPY
+superfine
+superfix/M
+superfluity/MS
+superfluousness/S
+superfluous/YP
+superheat/D
+superheroes
+superhero/SM
+superhighway/MS
+superhumanness/M
+superhuman/YP
+superimpose/SDG
+superimposition/MS
+superintendence/S
+superintendency/SM
+superintendent/SM
+superintend/GSD
+superiority/MS
+Superior/M
+superior/SMY
+superlativeness/M
+superlative/PYS
+superlunary
+supermachine
+superman/M
+Superman/M
+supermarket/SM
+supermen
+supermodel
+supermom/S
+supernal
+supernatant
+supernaturalism/M
+supernaturalness/M
+supernatural/SPY
+supernormal/Y
+supernovae
+supernova/MS
+supernumerary/S
+superordinate
+superpose/BSDG
+superposition/MS
+superpower/MS
+superpredicate
+supersaturate/XNGDS
+supersaturation/M
+superscribe/GSD
+superscript/DGS
+superscription/SM
+superseder/M
+supersede/SRDG
+supersensitiveness/M
+supersensitive/P
+superset/MS
+supersonically
+supersonic/S
+supersonics/M
+superstar/SM
+superstition/SM
+superstitious/YP
+superstore/S
+superstructural
+superstructure/SM
+supertanker/SM
+supertitle/MSDG
+superuser/MS
+supervene/GSD
+supervention/S
+supervised/U
+supervise/SDGNX
+supervision/M
+supervisor/SM
+supervisory
+superwoman/M
+superwomen
+supineness/M
+supine/PSY
+supper/DMG
+supplanter/M
+supplant/SGRD
+supplemental/S
+supplementary/S
+supplementation/S
+supplementer/M
+supplement/SMDRG
+suppleness/SM
+supple/SPLY
+suppliant/S
+supplicant/MS
+supplicate/NGXSD
+supplication/M
+supplier/AM
+suppl/RDGT
+supply/MAZGSRD
+supportability/M
+supportable/UI
+supported/U
+supporter/M
+supporting/Y
+supportive/Y
+support/ZGVSBDR
+supposed/Y
+suppose/SRDBJG
+supposition/MS
+suppository/MS
+suppressant/S
+suppressed/U
+suppressible/I
+suppression/SM
+suppressive/P
+suppressor/S
+suppress/VGSD
+suppurate/NGXSD
+suppuration/M
+supp/YDRGZ
+supra
+supranational
+supranationalism/M
+suprasegmental
+supremacist/SM
+supremacy/SM
+supremal
+supremeness/M
+supreme/PSRTY
+supremo/M
+sup/RSZ
+supt
+Supt/M
+Surabaya/M
+Surat/M
+surcease/DSMG
+surcharge/MGSD
+surcingle/MGSD
+surd/M
+sured/I
+surefire
+surefooted
+surely
+sureness/MS
+sureness's/U
+sure/PU
+surer/I
+surest
+surety/SM
+surfaced/UA
+surface/GSRDPZM
+surfacer/AMS
+surfaces/A
+surfacing/A
+surfactant/SM
+surfboard/MDSG
+surfeit/SDRMG
+surfer/M
+surfing/M
+surf/SJDRGMZ
+surged/A
+surge/GYMDS
+surgeon/MS
+surgery/MS
+surges/A
+surgical/Y
+Suriname
+Surinamese
+Surinam's
+surliness/SM
+surly/TPR
+surmiser/M
+surmise/SRDG
+surmountable/IU
+surmount/DBSG
+surname/GSDM
+surpassed/U
+surpass/GDS
+surpassing/Y
+surplice/SM
+surplus/MS
+surplussed
+surplussing
+surprised/U
+surprise/MGDRSJ
+surpriser/M
+surprising/YU
+surrealism/MS
+surrealistic
+surrealistically
+surrealist/S
+surreality
+surreal/S
+surrender/DRSG
+surrenderer/M
+surreptitiousness/S
+surreptitious/PY
+surrey/SM
+surrogacy/S
+surrogate/SDMNG
+surrogation/M
+surrounding/M
+surround/JGSD
+surtax/SDGM
+surveillance/SM
+surveillant
+surveyed/A
+surveying/M
+survey/JDSG
+surveyor/MS
+surveys/A
+survivability/M
+survivable/U
+survivalist/S
+survival/MS
+survive/SRDBG
+survivor/MS
+survivorship/M
+Surya/M
+Sus
+Susana/M
+Susanetta/M
+Susan/M
+Susannah/M
+Susanna/M
+Susanne/M
+Susann/M
+susceptibilities
+susceptibility/IM
+susceptible/I
+Susette/M
+sushi/SM
+Susie/M
+Susi/M
+suspected/U
+suspecter/M
+suspect/GSDR
+suspecting/U
+suspend/DRZGS
+suspended/UA
+suspender/M
+suspenseful
+suspense/MXNVS
+suspension/AM
+suspensive/Y
+suspensor/M
+suspicion/GSMD
+suspiciousness/M
+suspicious/YP
+Susquehanna/M
+Sussex/M
+sustainability
+sustainable/U
+sustain/DRGLBS
+sustainer/M
+sustainment/M
+sustenance/MS
+Susy/M
+Sutherland/M
+Sutherlan/M
+sutler/MS
+Sutton/M
+suture/GMSD
+SUV
+Suva/M
+Suwanee/M
+Suzanna/M
+Suzanne/M
+Suzann/M
+suzerain/SM
+suzerainty/MS
+Suzette/M
+Suzhou/M
+Suzie/M
+Suzi/M
+Suzuki/M
+Suzy/M
+Svalbard/M
+svelte/RPTY
+Svend/M
+Svengali
+Sven/M
+Sverdlovsk/M
+Svetlana/M
+SW
+swabbed
+swabbing
+swabby/S
+Swabian/SM
+swab/MS
+swaddle/SDG
+swagged
+swagger/GSDR
+swagging
+swag/GMS
+Swahili/MS
+swain/SM
+SWAK
+swallower/M
+swallow/GDRS
+swallowtail/SM
+swam
+swami/SM
+swamper/M
+swampland/MS
+swamp/SRDMG
+swampy/RPT
+Swanee/M
+swankily
+swankiness/MS
+swank/RDSGT
+swanky/PTRS
+swanlike
+swan/MS
+swanned
+swanning
+Swansea/M
+Swanson/M
+swappable/U
+swapped
+swapper/SM
+swapping
+swap/S
+sward/MSGD
+swarmer/M
+swarm/GSRDM
+swarthiness/M
+Swarthmore/M
+swarthy/RTP
+swart/P
+Swartz/M
+swashbuckler/SM
+swashbuckling/S
+swash/GSRD
+swastika/SM
+SWAT
+swatch/MS
+swathe
+swather/M
+swaths
+swath/SRDMGJ
+swat/S
+swatted
+swatter/MDSG
+swatting
+swayback/SD
+sway/DRGS
+swayer/M
+Swaziland/M
+Swazi/SM
+swearer/M
+swear/SGZR
+swearword/SM
+sweatband/MS
+sweater/M
+sweatily
+sweatiness/M
+sweatpants
+sweat/SGZRM
+sweatshirt/S
+sweatshop/MS
+sweaty/TRP
+Swedenborg/M
+Sweden/M
+swede/SM
+Swede/SM
+Swedish
+Swed/MN
+Sweeney/SM
+sweeper/M
+sweepingness/M
+sweeping/PY
+sweep/SBRJGZ
+sweeps/M
+sweepstakes
+sweepstake's
+sweetbread/SM
+sweetbrier/SM
+sweetcorn
+sweetened/U
+sweetener/M
+sweetening/M
+sweeten/ZDRGJ
+sweetheart/MS
+sweetie/MS
+sweeting/M
+sweetish/Y
+Sweet/M
+sweetmeat/MS
+sweetness/MS
+sweetshop
+sweet/TXSYRNPG
+swellhead/DS
+swelling/M
+swell/SJRDGT
+swelter/DJGS
+sweltering/Y
+Swen/M
+Swenson/M
+swept
+sweptback
+swerve/GSD
+swerving/U
+swifter/M
+swift/GTYRDPS
+Swift/M
+swiftness/MS
+swigged
+swigging
+swig/SM
+swill/SDG
+swimmer/MS
+swimming/MYS
+swim/S
+swimsuit/MS
+Swinburne/M
+swindle/GZRSD
+swindler/M
+swineherd/MS
+swine/SM
+swingeing
+swinger/M
+swinging/Y
+swing/SGRZJB
+swingy/R
+swinishness/M
+swinish/PY
+Swink/M
+swipe/DSG
+swirling/Y
+swirl/SGRD
+swirly/TR
+swish/GSRD
+swishy/R
+swiss
+Swiss/S
+switchback/GDMS
+switchblade/SM
+switchboard/MS
+switcher/M
+switch/GBZMRSDJ
+switchgear
+switchman/M
+switchmen/M
+switchover/M
+Switzerland/M
+Switzer/M
+Switz/MR
+swivel/GMDS
+swizzle/RDGM
+swob's
+swollen
+swoon/GSRD
+swooning/Y
+swoop/RDSG
+swoosh/GSD
+swop's
+sword/DMSG
+swordfish/SM
+swordplayer/M
+swordplay/RMS
+swordsman/M
+swordsmanship/SM
+swordsmen
+swordtail/M
+swore
+sworn
+swot/S
+swum
+swung
+s/XJBG
+sybarite/MS
+sybaritic
+Sybila/M
+Sybilla/M
+Sybille/M
+Sybil/M
+Sybyl/M
+sycamore/SM
+sycophancy/S
+sycophantic
+sycophantically
+sycophant/SYM
+Sydelle/M
+Sydel/M
+Syd/M
+Sydney/M
+Sykes/M
+Sylas/M
+syllabicate/GNDSX
+syllabication/M
+syllabicity
+syllabic/S
+syllabification/M
+syllabify/GSDXN
+syllabi's
+syllable/SDMG
+syllabub/M
+syllabus/MS
+syllabusss
+syllogism/MS
+syllogistic
+Sylow/M
+sylphic
+sylphlike
+sylph/M
+sylphs
+Sylvania/M
+Sylvan/M
+sylvan/S
+Sylvester/M
+Sylvia/M
+Sylvie/M
+Syman/M
+symbiont/M
+symbioses
+symbiosis/M
+symbiotic
+symbol/GMDS
+symbolical/Y
+symbolics/M
+symbolic/SM
+symbolism/MS
+symbolist/MS
+symbolization/MAS
+symbolized/U
+symbolize/GZRSD
+symbolizes/A
+Symington/M
+symmetric
+symmetrically/U
+symmetricalness/M
+symmetrical/PY
+symmetrization/M
+symmetrizing
+symmetry/MS
+Symon/M
+sympathetically/U
+sympathetic/S
+sympathized/U
+sympathizer/M
+sympathize/SRDJGZ
+sympathizing/MYUS
+sympathy/MS
+symphonic
+symphonists
+symphony/MS
+symposium/MS
+symptomatic
+symptomatically
+symptomatology/M
+symptom/MS
+syn
+synagogal
+synagogue/SM
+synapse/SDGM
+synaptic
+synchronism/M
+synchronization's
+synchronization/SA
+synchronize/AGCDS
+synchronized/U
+synchronizer/MS
+synchronousness/M
+synchronous/YP
+synchrony
+synchrotron/M
+syncopate/VNGXSD
+syncopation/M
+syncope/MS
+sync/SGD
+syndicalist
+syndicate/XSDGNM
+syndic/SM
+syndrome/SM
+synergism/SM
+synergistic
+synergy/MS
+synfuel/S
+Synge/M
+synod/SM
+synonymic
+synonymous/Y
+synonym/SM
+synonymy/MS
+synopses
+synopsis/M
+synopsized
+synopsizes
+synopsizing
+synoptic/S
+syntactical/Y
+syntactics/M
+syntactic/SY
+syntax/MS
+syntheses
+synthesis/M
+synthesized/U
+synthesize/GZSRD
+synthesizer/M
+synthesizes/A
+synthetically
+synthetic/S
+syphilis/MS
+syphilitic/S
+syphilized
+syphilizing
+Syracuse/M
+Syriac/M
+Syria/M
+Syrian/SM
+syringe/GMSD
+syrup/DMSG
+syrupy
+sys
+systematical/Y
+systematics/M
+systematic/SP
+systematization/SM
+systematized/U
+systematizer/M
+systematize/ZDRSG
+systematizing/U
+systemically
+systemic/S
+systemization/SM
+system/MS
+systole/MS
+systolic
+Szilard/M
+Szymborska/M
+TA
+Tabasco/MS
+Tabatha/M
+Tabbatha/M
+tabbed
+Tabbie/M
+Tabbi/M
+tabbing
+Tabbitha/M
+Tabb/M
+tabbouleh
+tabboulehs
+tabby/GSD
+Tabby/M
+Taber/M
+Tabernacle/S
+tabernacle/SDGM
+Tabina/M
+Tabitha/M
+tabla/MS
+tableau/M
+tableaux
+tablecloth/M
+tablecloths
+table/GMSD
+tableland/SM
+tablespoonful/MS
+tablespoon/SM
+tablet/MDGS
+tabletop/MS
+tableware/SM
+tabling/M
+tabloid/MS
+Tab/MR
+taboo/GSMD
+Tabor/M
+tabor/MDGS
+Tabriz/SM
+tab/SM
+tabula
+tabular/Y
+tabulate/XNGDS
+tabulation/M
+tabulator/MS
+tachometer/SM
+tachometry
+tachycardia/MS
+tachyon/SM
+tacitness/MS
+taciturnity/MS
+taciturn/Y
+Tacitus/M
+tacit/YP
+tacker/M
+tack/GZRDMS
+tackiness/MS
+tackler/M
+tackle/RSDMZG
+tackling/M
+tacky/RSTP
+Tacoma/M
+taco/MS
+tact/FSM
+tactfulness/S
+tactful/YP
+tactical/Y
+tactician/MS
+tactic/SM
+tactile/Y
+tactility/S
+tactlessness/SM
+tactless/PY
+tactual/Y
+Taddeo/M
+Taddeusz/M
+Tadd/M
+Tadeas/M
+Tadeo/M
+Tades
+Tadio/M
+Tad/M
+tadpole/MS
+tad/SM
+Tadzhikistan's
+Tadzhikstan/M
+Taegu/M
+Taejon/M
+taffeta/MS
+taffrail/SM
+Taffy/M
+taffy/SM
+Taft/M
+Tagalog/SM
+tagged/U
+tagger/S
+tagging
+Tagore/M
+tag/SM
+Tagus/M
+Tahitian/S
+Tahiti/M
+Tahoe/M
+Taichung/M
+taiga/MS
+tailback/MS
+tail/CMRDGAS
+tailcoat/S
+tailer/AM
+tailgate/MGRSD
+tailgater/M
+tailing/MS
+taillessness/M
+tailless/P
+taillight/MS
+tailor/DMJSGB
+Tailor/M
+tailpipe/SM
+tailspin/MS
+tailwind/SM
+Tainan/M
+Taine/M
+taint/DGS
+tainted/U
+Taipei/M
+Taite/M
+Tait/M
+Taiwanese
+Taiwan/M
+Taiyuan/M
+Tajikistan
+takeaway/S
+taken/A
+takeoff/SM
+takeout/S
+takeover/SM
+taker/M
+take/RSHZGJ
+takes/IA
+taking/IA
+Taklamakan/M
+Talbert/M
+Talbot/M
+talcked
+talcking
+talc/SM
+talcum/S
+talebearer/SM
+talented/M
+talentless
+talent/SMD
+taler/M
+tale/RSMN
+tali
+Talia/M
+Taliesin/M
+talion/M
+talismanic
+talisman/SM
+talkativeness/MS
+talkative/YP
+talker/M
+talk/GZSRD
+talkie/M
+talky/RST
+Talladega/M
+Tallahassee/M
+Tallahatchie/M
+Tallahoosa/M
+tallboy/MS
+Tallchief/M
+Talley/M
+Talleyrand/M
+Tallia/M
+Tallie/M
+Tallinn/M
+tallish
+tallness/MS
+Tallou/M
+tallow/DMSG
+tallowy
+tall/TPR
+Tallulah/M
+tally/GRSDZ
+tallyho/DMSG
+Tally/M
+Talmudic
+Talmudist/MS
+Talmud/MS
+talon/SMD
+talus/MS
+Talyah/M
+Talya/M
+Ta/M
+tamable/M
+tamale/SM
+tamarack/SM
+Tamarah/M
+Tamara/M
+tamarind/MS
+Tamar/M
+Tamarra/M
+Tamas
+tambourine/MS
+tamed/U
+Tameka/M
+tameness/S
+Tamera/M
+Tamerlane/M
+tame/SYP
+Tamika/M
+Tamiko/M
+Tamil/MS
+Tami/M
+Tam/M
+Tamma/M
+Tammany/M
+Tammara/M
+tam/MDRSTZGB
+Tammie/M
+Tammi/M
+Tammy/M
+Tampa/M
+Tampax/M
+tampered/U
+tamperer/M
+tamper/ZGRD
+tampon/DMSG
+tamp/SGZRD
+Tamqrah/M
+Tamra/M
+tanager/MS
+Tanaka/M
+Tana/M
+Tananarive/M
+tanbark/SM
+Tancred/M
+tandem/SM
+Tandie/M
+Tandi/M
+tandoori/S
+Tandy/M
+Taney/M
+T'ang
+Tanganyika/M
+tangelo/SM
+tangency/M
+tangential/Y
+tangent/SM
+tangerine/MS
+tang/GSYDM
+tangibility/MIS
+tangible/IPS
+tangibleness's/I
+tangibleness/SM
+tangibly/I
+Tangier/M
+tangle's
+tangle/UDSG
+tango/MDSG
+Tangshan/M
+tangy/RST
+Tanhya/M
+Tania/M
+Tani/M
+Tanisha/M
+Tanitansy/M
+tankard/MS
+tanker/M
+tankful/MS
+tank/GZSRDM
+Tan/M
+tan/MS
+tanned/U
+Tannenbaum/M
+Tanner/M
+tanner/SM
+tannery/MS
+tannest
+Tanney/M
+Tannhäuser/M
+Tannie/M
+tanning/SM
+tannin/SM
+Tann/RM
+Tanny/M
+Tansy/M
+tansy/SM
+tantalization/SM
+tantalized/U
+tantalize/GZSRD
+tantalizingly/S
+tantalizingness/S
+tantalizing/YP
+tantalum/MS
+Tantalus/M
+tantamount
+tantra/S
+tantrum/SM
+Tanya/M
+Tanzania/M
+Tanzanian/S
+taoism
+Taoism/MS
+Taoist/MS
+taoist/S
+Tao/M
+tao/S
+Tapdance/M
+taped/U
+tapeline/S
+taperer/M
+taper/GRD
+tape/SM
+tapestry/GMSD
+tapeworm/MS
+tapioca/MS
+tapir/MS
+tap/MSDRJZG
+tapped/U
+tapper/MS
+tappet/MS
+tapping/M
+taproom/MS
+taproot/SM
+taps/M
+Tarah/M
+Tara/M
+tarantella/MS
+tarantula/MS
+Tarawa/M
+Tarazed/M
+Tarbell/M
+tardily
+tardiness/S
+tardy/TPRS
+tare/MS
+target/GSMD
+tar/GSMD
+tariff/DMSG
+Tarim/M
+Tarkington/M
+tarmacked
+tarmacking
+tarmac/S
+tarnished/U
+tarnish/GDS
+tarn/MS
+taro/MS
+tarot/MS
+tarpapered
+tarpaulin/MS
+tarp/MS
+tarpon/MS
+tarragon/SM
+Tarrah/M
+Tarra/M
+Tarrance/M
+tarred/M
+tarring/M
+tarry/TGRSD
+Tarrytown/M
+tarsal/S
+tarsi
+tarsus/M
+tartan/MS
+tartaric
+Tartar's
+tartar/SM
+Tartary/M
+tartness/MS
+tart/PMYRDGTS
+Tartuffe/M
+Taryn/M
+Tarzan/M
+Tasha/M
+Tashkent/M
+Tasia/M
+task/GSDM
+taskmaster/SM
+taskmistress/MS
+Tasmania/M
+Tasmanian/S
+tassellings
+tassel/MDGS
+Tass/M
+tasted/EU
+tastefulness/SME
+tasteful/PEY
+taste/GZMJSRD
+tastelessness/SM
+tasteless/YP
+taster/M
+taste's/E
+tastes/E
+tastily
+tastiness/MS
+tasting/E
+tasty/RTP
+tatami/MS
+Tatar/SM
+Tate/M
+tater/M
+Tatiana/M
+Tatiania/M
+tat/SRZ
+tatted
+tatterdemalion/SM
+tattered/M
+tatter/GDS
+tatting/SM
+tattler/M
+tattle/RSDZG
+tattletale/SM
+tattooer/M
+tattooist/MS
+tattoo/ZRDMGS
+tatty/R
+Tatum/M
+taught/AU
+taunter/M
+taunting/Y
+taunt/ZGRDS
+taupe/SM
+Taurus/SM
+tau/SM
+tauten/GD
+tautness/S
+tautological/Y
+tautologous
+tautology/SM
+taut/PGTXYRDNS
+taverner/M
+tavern/RMS
+tawdrily
+tawdriness/SM
+tawdry/SRTP
+Tawney/M
+Tawnya/M
+tawny/RSMPT
+Tawsha/M
+taxable/S
+taxably
+taxation/MS
+taxed/U
+taxicab/MS
+taxidermist/SM
+taxidermy/MS
+taxi/MDGS
+taximeter/SM
+taxing/Y
+taxiway/MS
+taxonomic
+taxonomically
+taxonomist/SM
+taxonomy/SM
+taxpayer/MS
+taxpaying/M
+tax/ZGJMDRSB
+Taylor/SM
+Tb
+TB
+TBA
+Tbilisi/M
+tbs
+tbsp
+Tchaikovsky/M
+Tc/M
+TCP
+TD
+TDD
+Te
+teabag/S
+teacake/MS
+teacart/M
+teachable/P
+teach/AGS
+teacher/MS
+teaching/SM
+teacloth
+teacupful/MS
+teacup/MS
+Teador/M
+teahouse/SM
+teakettle/SM
+teak/SM
+teakwood/M
+tealeaves
+teal/MS
+tea/MDGS
+teammate/MS
+team/MRDGS
+teamster/MS
+teamwork/SM
+teapot/MS
+tearaway
+teardrop/MS
+tearer/M
+tearfulness/M
+tearful/YP
+teargas/S
+teargassed
+teargassing
+tearjerker/S
+tearoom/MS
+tear/RDMSG
+teary/RT
+Teasdale/M
+tease/KS
+teasel/DGSM
+teaser/M
+teashop/SM
+teasing/Y
+teaspoonful/MS
+teaspoon/MS
+teas/SRDGZ
+teatime/MS
+teat/MDS
+tech/D
+technetium/SM
+technicality/MS
+technicalness/M
+technical/YSP
+technician/MS
+Technicolor/MS
+Technion/M
+technique/SM
+technocracy/MS
+technocratic
+technocrat/S
+technological/Y
+technologist/MS
+technology/MS
+technophobia
+technophobic
+techs
+tectonically
+tectonic/S
+tectonics/M
+Tecumseh/M
+Tedda/M
+Teddie/M
+Teddi/M
+Tedd/M
+Teddy/M
+teddy/SM
+Tedie/M
+Tedi/M
+tediousness/SM
+tedious/YP
+tedium/MS
+Ted/M
+Tedman/M
+Tedmund/M
+Tedra/M
+tee/DRSMH
+teeing
+teem/GSD
+teemingness/M
+teeming/PY
+teenager/M
+teenage/RZ
+Teena/M
+teen/SR
+teenybopper/SM
+teeny/RT
+teepee's
+teeshirt/S
+teeter/GDS
+teethe
+teether/M
+teething/M
+teethmarks
+teeth/RSDJMG
+teetotaler/M
+teetotalism/MS
+teetotal/SRDGZ
+TEFL
+Teflon/MS
+Tegucigalpa/M
+Teheran's
+Tehran
+TEirtza/M
+tektite/SM
+Tektronix/M
+telecast/SRGZ
+telecommunicate/NX
+telecommunication/M
+telecommute/SRDZGJ
+telecoms
+teleconference/GMJSD
+Teledyne/M
+Telefunken/M
+telegenic
+telegrammed
+telegramming
+telegram/MS
+telegraphic
+telegraphically
+telegraphist/MS
+telegraph/MRDGZ
+telegraphs
+telegraphy/MS
+telekineses
+telekinesis/M
+telekinetic
+Telemachus/M
+Telemann/M
+telemarketer/S
+telemarketing/S
+telemeter/DMSG
+telemetric
+telemetry/MS
+teleological/Y
+teleology/M
+telepathic
+telepathically
+telepathy/SM
+telephone/SRDGMZ
+telephonic
+telephonist/SM
+telephony/MS
+telephotography/MS
+telephoto/S
+teleprinter/MS
+teleprocessing/S
+teleprompter
+TelePrompter/M
+TelePrompTer/S
+telescope/GSDM
+telescopic
+telescopically
+teletext/S
+telethon/MS
+teletype/SM
+Teletype/SM
+teletypewriter/SM
+televangelism/S
+televangelist/S
+televise/SDXNG
+television/M
+televisor/MS
+televisual
+telex/GSDM
+Telex/M
+tell/AGS
+Teller/M
+teller/SDMG
+telling/YS
+Tell/MR
+telltale/MS
+tellurium/SM
+telly/SM
+Telnet/M
+TELNET/M
+telnet/S
+telomeric
+tel/SY
+Telugu/M
+temblor/SM
+temerity/MS
+Tempe/M
+temperamental/Y
+temperament/SM
+temperance/IMS
+tempera/SLM
+temperately/I
+temperateness's/I
+temperateness/SM
+temperate/SDGPY
+temperature/MS
+tempered/UE
+temper/GRDM
+tempering/E
+temper's/E
+tempers/E
+tempest/DMSG
+tempestuousness/SM
+tempestuous/PY
+template/FS
+template's
+Temple/M
+Templeman/M
+temple/SDM
+Templeton/M
+Temp/M
+tempoes
+tempo/MS
+temporal/YS
+temporarily
+temporarinesses
+temporariness/FM
+temporary/SFP
+temporize/GJZRSD
+temporizer/M
+temporizings/U
+temporizing/YM
+temp/SGZTMRD
+temptation/MS
+tempted
+tempter/S
+tempt/FS
+tempting/YS
+temptress/MS
+tempura/SM
+tenabilities
+tenability/UM
+tenableness/M
+tenable/P
+tenably
+tenaciousness/S
+tenacious/YP
+tenacity/S
+tenancy/MS
+tenanted/U
+tenant/MDSG
+tenantry/MS
+tench/M
+tended/UE
+tendency/MS
+tendentiousness/SM
+tendentious/PY
+tendered
+tenderer
+tenderest
+tenderfoot/MS
+tender/FS
+tenderheartedness/MS
+tenderhearted/YP
+tendering
+tenderizer/M
+tenderize/SRDGZ
+tenderloin/SM
+tenderly
+tenderness/SM
+tending/E
+tendinitis/S
+tend/ISFRDG
+tendon/MS
+tendril/SM
+tends/E
+tenebrous
+tenement/MS
+tenet/SM
+Tenex/M
+TENEX/M
+tenfold/S
+ten/MHB
+Tenneco/M
+tenner
+Tennessean/S
+Tennessee/M
+Tenney/M
+tennis/SM
+Tenn/M
+Tennyson/M
+Tenochtitlan/M
+tenon/GSMD
+tenor/MS
+tenpin/SM
+tense/IPYTNVR
+tenseness's/I
+tenseness/SM
+tensile
+tensional/I
+tension/GMRDS
+tensionless
+tensions/E
+tension's/I
+tensity/IMS
+tensorial
+tensor/MS
+tenspot
+tens/SRDVGT
+tentacle/MSD
+tentativeness/S
+tentative/SPY
+tented/UF
+tenterhook/MS
+tenter/M
+tent/FSIM
+tenths
+tenth/SY
+tenting/F
+tenuity/S
+tenuousness/SM
+tenuous/YP
+tenure/SDM
+Teodoor/M
+Teodora/M
+Teodorico/M
+Teodor/M
+Teodoro/M
+tepee/MS
+tepidity/S
+tepidness/S
+tepid/YP
+tequila/SM
+Tera/M
+teratogenic
+teratology/MS
+terbium/SM
+tercel/M
+tercentenary/S
+tercentennial/S
+Terence/M
+Terencio/M
+Teresa/M
+Terese/M
+Tereshkova/M
+Teresina/M
+Teresita/M
+Teressa/M
+Teriann/M
+Teri/M
+Terkel/M
+termagant/SM
+termcap
+termer/M
+terminable/CPI
+terminableness/IMC
+terminal/SYM
+terminate/CXNV
+terminated/U
+terminates
+terminating
+termination/MC
+terminative/YC
+terminator/SM
+termini
+terminological/Y
+terminology/MS
+terminus/M
+termite/SM
+term/MYRDGS
+ternary/S
+tern/GIDS
+tern's
+terpsichorean
+Terpsichore/M
+terrace/MGSD
+terracing/M
+terracotta
+terrain/MS
+Terra/M
+terramycin
+Terrance/M
+Terran/M
+terrapin/MS
+terrarium/MS
+terrazzo/SM
+Terrell/M
+Terrel/M
+Terre/M
+Terrence/M
+terrestrial/YMS
+terribleness/SM
+terrible/P
+terribly
+Terrie/M
+terrier/M
+terrifically
+terrific/Y
+terrify/GDS
+terrifying/Y
+Terrijo/M
+Terrill/M
+Terri/M
+terrine/M
+territoriality/M
+Territorial/SM
+territorial/SY
+Territory's
+territory/SM
+terrorism/MS
+terroristic
+terrorist/MS
+terrorized/U
+terrorizer/M
+terrorize/RSDZG
+terror/MS
+terr/S
+terrycloth
+Terrye/M
+Terry/M
+terry/ZMRS
+terseness/SM
+terse/RTYP
+Tersina/M
+tertian
+Tertiary
+tertiary/S
+Terza/M
+TESL
+Tesla/M
+TESOL
+Tessa/M
+tessellate/XDSNG
+tessellation/M
+tesseral
+Tessie/M
+Tessi/M
+Tess/M
+Tessy/M
+testability/M
+testable/U
+testamentary
+testament/SM
+testate/IS
+testator/MS
+testatrices
+testatrix
+testbed/S
+testcard
+tested/AKU
+tester/MFCKS
+testes/M
+testicle/SM
+testicular
+testifier/M
+testify/GZDRS
+testily
+testimonial/SM
+testimony/SM
+testiness/S
+testing/S
+testis/M
+testosterone/SM
+test/RDBFZGSC
+tests/AK
+test's/AKF
+testy/RTP
+tetanus/MS
+tetchy/TR
+tether/DMSG
+tethered/U
+Tethys/M
+Tetons
+tetrachloride/M
+tetracycline/SM
+tetrafluoride
+tetragonal/Y
+tetrahalides
+tetrahedral/Y
+tetrahedron/SM
+tetrameron
+tetrameter/SM
+tetra/MS
+tetrasodium
+tetravalent
+Teutonic
+Teuton/SM
+Texaco/M
+Texan/S
+Texas/MS
+Tex/M
+TeX/M
+textbook/SM
+text/FSM
+textile/SM
+Textron/M
+textual/FY
+textural/Y
+textured/U
+texture/MGSD
+T/G
+Thacher/M
+Thackeray/M
+Thaddeus/M
+Thaddus/M
+Thadeus/M
+Thad/M
+Thailand/M
+Thaine/M
+Thain/M
+Thai/S
+thalami
+thalamus/M
+Thales/M
+Thalia/M
+thalidomide/MS
+thallium/SM
+thallophyte/M
+Thames
+than
+Thane/M
+thane/SM
+Thanh/M
+thanker/M
+thankfuller
+thankfullest
+thankfulness/SM
+thankful/YP
+thanklessness/SM
+thankless/PY
+thanksgiving/MS
+Thanksgiving/S
+thank/SRDG
+Thant/M
+Thar/M
+Thatcher/M
+thatching/M
+thatch/JMDRSZG
+Thatch/MR
+that'd
+that'll
+that/MS
+thaumaturge/M
+thaw/DGS
+Thaxter/M
+Thayer/M
+Thayne/M
+THC
+the
+Theadora/M
+Thea/M
+theatergoer/MS
+theatergoing/MS
+theater/SM
+theatricality/SM
+theatrical/YS
+theatric/S
+theatrics/M
+Thebault/M
+Thebes
+Theda/M
+Thedrick/M
+Thedric/M
+thee/DS
+theeing
+theft/MS
+Theiler/M
+their/MS
+theism/SM
+theistic
+theist/SM
+Thekla/M
+Thelma/M
+themas
+thematically
+thematics
+thematic/U
+theme/MS
+them/GD
+Themistocles/M
+themselves
+thence
+thenceforth
+thenceforward/S
+Theobald/M
+theocracy/SM
+theocratic
+Theocritus/M
+theodolite/MS
+Theodora/M
+Theodore/M
+Theodoric/M
+Theodor/M
+Theodosia/M
+Theodosian
+Theodosius/M
+theologian/SM
+theological/Y
+theologists
+theology/MS
+Theo/M
+theorem/MS
+theoretical/Y
+theoretician/MS
+theoretic/S
+theoretics/M
+theorist/SM
+theorization/SM
+theorize/ZGDRS
+theory/MS
+theosophic
+theosophical
+theosophist/MS
+Theosophy
+theosophy/SM
+therapeutically
+therapeutic/S
+therapeutics/M
+therapist/MS
+therapy/MS
+Theravada/M
+thereabout/S
+thereafter
+thereat
+thereby
+there'd
+therefor
+therefore
+therefrom
+therein
+there'll
+there/MS
+thereof
+thereon
+Theresa/M
+Therese/M
+Theresina/M
+Theresita/M
+Theressa/M
+thereto
+theretofore
+thereunder
+thereunto
+thereupon
+therewith
+Therine/M
+thermal/YS
+thermionic/S
+thermionics/M
+thermistor/MS
+therm/MS
+thermocouple/MS
+thermodynamical/Y
+thermodynamic/S
+thermodynamics/M
+thermoelastic
+thermoelectric
+thermoformed
+thermoforming
+thermogravimetric
+thermoluminescence/M
+thermometer/MS
+thermometric
+thermometry/M
+thermonuclear
+thermopile/M
+thermoplastic/S
+thermopower
+thermo/S
+thermosetting
+thermos/S
+Thermos/SM
+thermostable
+thermostatically
+thermostatic/S
+thermostatics/M
+thermostat/SM
+thermostatted
+thermostatting
+Theron/M
+thesauri
+thesaurus/MS
+these/S
+Theseus/M
+thesis/M
+thespian/S
+Thespian/S
+Thespis/M
+Thessalonian
+Thessaloníki/M
+Thessaly/M
+theta/MS
+thew/SM
+they
+they'd
+they'll
+they're
+they've
+th/GNJX
+Thia/M
+thiamine/MS
+Thibaud/M
+Thibaut/M
+thickener/M
+thickening/M
+thicken/RDJZG
+thicket/SMD
+thickheaded/M
+thickish
+thickness/MS
+thickset/S
+thick/TXPSRNY
+thief/M
+Thiensville/M
+Thieu/M
+thievery/MS
+thieve/SDJG
+thievishness/M
+thievish/P
+thighbone/SM
+thigh/DM
+thighs
+thimble/DSMG
+thimbleful/MS
+Thimbu/M
+Thimphu
+thine
+thingamabob/MS
+thingamajig/SM
+thing/MP
+thinkableness/M
+thinkable/U
+thinkably/U
+think/AGRS
+thinker/MS
+thinkingly/U
+thinking/SMYP
+thinned
+thinner/MS
+thinness/MS
+thinnest
+thinning
+thinnish
+thin/STPYR
+thiocyanate/M
+thiouracil/M
+third/DYGS
+thirster/M
+thirst/GSMDR
+thirstily
+thirstiness/S
+thirsty/TPR
+thirteen/MHS
+thirteenths
+thirtieths
+thirty/HMS
+this
+this'll
+thistledown/MS
+thistle/SM
+thither
+Th/M
+tho
+thole/GMSD
+Thomasa/M
+Thomasina/M
+Thomasine/M
+Thomasin/M
+Thoma/SM
+Thomism/M
+Thomistic
+Thom/M
+Thompson/M
+Thomson/M
+thong/SMD
+thoracic
+thorax/MS
+Thorazine
+Thoreau/M
+thoriate/D
+Thorin/M
+thorium/MS
+Thor/M
+Thornburg/M
+Thorndike/M
+Thornie/M
+thorniness/S
+Thorn/M
+thorn/SMDG
+Thornton/M
+Thorny/M
+thorny/PTR
+thoroughbred/S
+thoroughfare/MS
+thoroughgoing
+thoroughness/SM
+thorough/PTYR
+Thorpe/M
+Thorstein/M
+Thorsten/M
+Thorvald/M
+those
+Thoth/M
+thou/DSG
+though
+thoughtfully
+thoughtfulness/S
+thoughtful/U
+thoughtlessness/MS
+thoughtless/YP
+thought/MS
+thousandfold
+thousand/SHM
+thousandths
+Thrace/M
+Thracian/M
+thralldom/S
+thrall/GSMD
+thrash/DSRZGJ
+thrasher/M
+thrashing/M
+threadbare/P
+threader/M
+threading/A
+threadlike
+thread/MZDRGS
+thready/RT
+threatener/M
+threaten/GJRD
+threatening/Y
+threat/MDNSXG
+threefold
+three/MS
+threepence/M
+threepenny
+threescore/S
+threesome/SM
+threnody/SM
+thresh/DSRZG
+thresher/M
+threshold/MDGS
+threw
+thrice
+thriftily
+thriftiness/S
+thriftless
+thrift/SM
+thrifty/PTR
+thriller/M
+thrilling/Y
+thrill/ZMGDRS
+thriver/M
+thrive/RSDJG
+thriving/Y
+throatily
+throatiness/MS
+throat/MDSG
+throaty/PRT
+throbbed
+throbbing
+throb/S
+throeing
+throe/SDM
+thrombi
+thromboses
+thrombosis/M
+thrombotic
+thrombus/M
+Throneberry/M
+throne/CGSD
+throne's
+throng/GDSM
+throttle/DRSZMG
+throttler/M
+throughout
+throughput/SM
+throughway's
+through/Y
+throwaway/SM
+throwback/MS
+thrower/M
+thrown
+throwout
+throw/SZGR
+thrummed
+thrumming
+thrum/S
+thrush/MS
+thruster/M
+thrust/ZGSR
+Thruway/MS
+thruway/SM
+Thunderbird/M
+Thu
+Thucydides/M
+thudded
+thudding
+thud/MS
+thuggee/M
+thuggery/SM
+thuggish
+thug/MS
+Thule/M
+thulium/SM
+thumbnail/MS
+thumbscrew/SM
+thumb/SMDG
+thumbtack/GMDS
+thump/RDMSG
+thunderbolt/MS
+thunderclap/SM
+thundercloud/SM
+thunderer/M
+thunderhead/SM
+thundering/Y
+thunderous/Y
+thundershower/MS
+thunderstorm/MS
+thunderstruck
+thundery
+thunder/ZGJDRMS
+thunk
+Thurber/M
+Thurman/M
+Thur/MS
+Thursday/SM
+Thurstan/M
+Thurston/M
+thus/Y
+thwack/DRSZG
+thwacker/M
+thwarter/M
+thwart/GSDRY
+thy
+thyme/SM
+thymine/MS
+thymus/SM
+thyratron/M
+thyristor/MS
+thyroglobulin
+thyroidal
+thyroid/S
+thyronine
+thyrotoxic
+thyrotrophic
+thyrotrophin
+thyrotropic
+thyrotropin/M
+thyroxine/M
+thyself
+Tia/M
+Tianjin
+tiara/MS
+Tiberius/M
+Tiber/M
+Tibetan/S
+Tibet/M
+tibiae
+tibial
+tibia/M
+Tibold/M
+Tiburon/M
+ticker/M
+ticket/SGMD
+tick/GZJRDMS
+ticking/M
+tickler/M
+tickle/RSDZG
+ticklishness/MS
+ticklish/PY
+ticktacktoe/S
+ticktock/SMDG
+tic/MS
+Ticonderoga/M
+tidal/Y
+tidbit/MS
+tiddlywinks/M
+tide/GJDS
+tideland/MS
+tidewater/SM
+tideway/SM
+tidily/U
+tidiness/USM
+tidying/M
+tidy/UGDSRPT
+tie/AUDS
+tieback/MS
+Tiebold/M
+Tiebout/M
+tiebreaker/SM
+Tieck/M
+Tiena/M
+Tienanmen/M
+Tientsin's
+tier/DGM
+Tierney/M
+Tiertza/M
+Tiffanie/M
+Tiffani/M
+tiffany/M
+Tiffany/M
+tiff/GDMS
+Tiffie/M
+Tiffi/M
+Tiff/M
+Tiffy/M
+tigerish
+tiger/SM
+tightener/M
+tighten/JZGDR
+tightfisted
+tightness/MS
+tightrope/SM
+tight/STXPRNY
+tightwad/MS
+tigress/SM
+Tigris/M
+Tijuana/M
+tike's
+Tilda/M
+tilde/MS
+Tildie/M
+Tildi/M
+Tildy/M
+tile/DRSJMZG
+tiled/UE
+Tiler/M
+tiles/U
+tiling/M
+tillable
+tillage/SM
+till/EGSZDR
+tiller/GDM
+tiller's/E
+Tillich/M
+Tillie/M
+Tillman/M
+Tilly/M
+tilth/M
+tilt/RDSGZ
+Ti/M
+timber/DMSG
+timbering/M
+timberland/SM
+timberline/S
+timbrel/SM
+timbre/MS
+Timbuktu/M
+ti/MDRZ
+timebase
+time/DRSJMYZG
+timekeeper/MS
+timekeeping/SM
+timelessness/S
+timeless/PY
+timeliness/SMU
+timely/UTRP
+timeout/S
+timepiece/MS
+timer/M
+timescale/S
+timeserver/MS
+timeserving/S
+timeshare/SDG
+timespan
+timestamped
+timestamps
+timetable/GMSD
+timeworn
+Timex/M
+timezone/S
+timidity/SM
+timidness/MS
+timid/RYTP
+Timi/M
+timing/M
+Timmie/M
+Timmi/M
+Tim/MS
+Timmy/M
+Timofei/M
+Timon/M
+timorousness/MS
+timorous/YP
+Timoteo/M
+Timothea/M
+Timothee/M
+Timotheus/M
+Timothy/M
+timothy/MS
+timpani
+timpanist/S
+Timur/M
+Tina/M
+tincture/SDMG
+tinderbox/MS
+tinder/MS
+Tine/M
+tine/SM
+tinfoil/MS
+tingeing
+tinge/S
+ting/GYDM
+tingle/SDG
+tingling/Y
+tingly/TR
+Ting/M
+tinily
+tininess/MS
+tinker/SRDMZG
+Tinkertoy
+tinkle/SDG
+tinkling/M
+tinkly
+tin/MDGS
+tinned
+tinner/M
+tinnily
+tinniness/SM
+tinning/M
+tinnitus/MS
+tinny/RSTP
+tinplate/S
+tinsel/GMDYS
+Tinseltown/M
+tinsmith/M
+tinsmiths
+tinter/M
+tintinnabulation/MS
+Tintoretto/M
+tint/SGMRDB
+tintype/SM
+tinware/MS
+tiny/RPT
+Tioga/M
+Tiphanie/M
+Tiphani/M
+Tiphany/M
+tipi's
+tip/MS
+tipoff
+Tippecanoe/M
+tipped
+Tipperary/M
+tipper/MS
+tippet/MS
+tipping
+tippler/M
+tipple/ZGRSD
+tippy/R
+tipsily
+tipsiness/SM
+tipster/SM
+tipsy/TPR
+tiptoeing
+tiptoe/SD
+tiptop/S
+tirade/SM
+Tirana's
+Tirane
+tired/AYP
+tireder
+tiredest
+tiredness/S
+tirelessness/SM
+tireless/PY
+tire/MGDSJ
+tires/A
+Tiresias/M
+tiresomeness/S
+tiresome/PY
+tiring/AU
+Tirolean/S
+Tirol/M
+tiro's
+Tirrell/M
+tis
+Tisha/M
+Tish/M
+tissue/MGSD
+titanate/M
+Titania/M
+titanic
+titanically
+Titanic/M
+titanium/SM
+titan/SM
+Titan/SM
+titbit's
+titer/M
+tither/M
+tithe/SRDGZM
+tithing/M
+Titian/M
+titian/S
+Titicaca/M
+titillate/XSDVNG
+titillating/Y
+titillation/M
+titivate/NGDSX
+titivation/M
+titled/AU
+title/GMSRD
+titleholder/SM
+titling/A
+titmice
+titmouse/M
+tit/MRZS
+Tito/SM
+titrate/SDGN
+titration/M
+titted
+titter/GDS
+titting
+tittle/SDMG
+titular/SY
+Titus/M
+tizzy/SM
+TKO
+Tlaloc/M
+TLC
+Tlingit/M
+Tl/M
+TM
+Tm/M
+tn
+TN
+tnpk
+TNT
+toad/SM
+toadstool/SM
+toady/GSDM
+toadyism/M
+toaster/M
+toastmaster/MS
+toastmistress/S
+toast/SZGRDM
+toasty/TRS
+tobacconist/SM
+tobacco/SM
+tobaggon/SM
+Tobago/M
+Tobe/M
+Tobey/M
+Tobiah/M
+Tobias/M
+Tobie/M
+Tobi/M
+Tobin/M
+Tobit/M
+toboggan/MRDSZG
+Tobye/M
+Toby/M
+Tocantins/M
+toccata/M
+Tocqueville
+tocsin/MS
+to/D
+today'll
+today/SM
+Toddie/M
+toddler/M
+toddle/ZGSRD
+Todd/M
+Toddy/M
+toddy/SM
+Tod/M
+toecap/SM
+toeclip/S
+TOEFL
+toehold/MS
+toeing
+toe/MS
+toenail/DMGS
+toffee/SM
+tofu/S
+toga/SMD
+toge
+togetherness/MS
+together/P
+togged
+togging
+toggle/SDMG
+Togolese/M
+Togo/M
+tog/SMG
+Toiboid/M
+toilet/GMDS
+toiletry/MS
+toilette/SM
+toil/SGZMRD
+toilsomeness/M
+toilsome/PY
+Toinette/M
+Tojo/M
+tokamak
+Tokay/M
+toke/GDS
+tokenism/SM
+tokenized
+token/SMDG
+Tokugawa/M
+Tokyoite/MS
+Tokyo/M
+Toland/M
+told/AU
+Toledo/SM
+tole/MGDS
+tolerability/IM
+tolerable/I
+tolerably/I
+tolerance/SIM
+tolerant/IY
+tolerate/XVNGSD
+toleration/M
+Tolkien
+tollbooth/M
+tollbooths
+toll/DGS
+Tolley/M
+tollgate/MS
+tollhouse/M
+tollway/S
+Tolstoy/M
+toluene/MS
+Tolyatti/M
+tomahawk/SGMD
+Tomasina/M
+Tomasine/M
+Toma/SM
+Tomaso/M
+tomatoes
+tomato/M
+Tombaugh/M
+tomb/GSDM
+Tombigbee/M
+tomblike
+tombola/M
+tomboyish
+tomboy/MS
+tombstone/MS
+tomcat/SM
+tomcatted
+tomcatting
+Tome/M
+tome/SM
+tomfoolery/MS
+tomfool/M
+Tomi/M
+Tomkin/M
+Tomlin/M
+Tom/M
+tommed
+Tommie/M
+Tommi/M
+tomming
+tommy/M
+Tommy/M
+tomographic
+tomography/MS
+tomorrow/MS
+Tompkins/M
+Tomsk/M
+tom/SM
+tomtit/SM
+tonality/MS
+tonal/Y
+tonearm/S
+tone/ISRDZG
+tonelessness/M
+toneless/YP
+toner/IM
+tone's
+Tonga/M
+Tongan/SM
+tong/GRDS
+tongueless
+tongue/SDMG
+tonguing/M
+Tonia/M
+tonic/SM
+Tonie/M
+tonight/MS
+Toni/M
+Tonio/M
+tonk/MS
+tonnage/SM
+tonne/MS
+Tonnie/M
+tonsillectomy/MS
+tonsillitis/SM
+tonsil/SM
+ton/SKM
+tonsorial
+tonsure/SDGM
+Tonto/M
+Tonya/M
+Tonye/M
+Tony/M
+tony/RT
+toodle
+too/H
+took/A
+tool/AGDS
+toolbox/SM
+tooler/SM
+tooling/M
+toolkit/SM
+toolmaker/M
+toolmake/ZRG
+toolmaking/M
+tool's
+toolsmith
+Toomey/M
+tooter/M
+toot/GRDZS
+toothache/SM
+toothbrush/MSG
+tooth/DMG
+toothily
+toothless
+toothmarks
+toothpaste/SM
+toothpick/MS
+tooths
+toothsome
+toothy/TR
+tootle/SRDG
+tootsie
+Tootsie/M
+toots/M
+tootsy/MS
+topaz/MS
+topcoat/MS
+topdressing/S
+Topeka/M
+toper/M
+topflight
+topgallant/M
+topiary/S
+topicality/MS
+topical/Y
+topic/MS
+topknot/MS
+topless
+topmast/MS
+topmost
+topnotch/R
+topocentric
+topographer/SM
+topographic
+topographical/Y
+topography/MS
+topological/Y
+topologist/MS
+topology/MS
+topped
+topper/MS
+topping/MS
+topple/GSD
+topsail/MS
+topside/SRM
+top/SMDRG
+topsoil/GDMS
+topspin/MS
+Topsy/M
+toque/MS
+Torah/M
+Torahs
+torchbearer/SM
+torchlight/S
+torch/SDMG
+toreador/SM
+Tore/M
+tore/S
+Torey/M
+Torie/M
+tori/M
+Tori/M
+Torin/M
+torment/GSD
+tormenting/Y
+tormentor/MS
+torn
+tornadoes
+tornado/M
+toroidal/Y
+toroid/MS
+Toronto/M
+torpedoes
+torpedo/GMD
+torpidity/S
+torpid/SY
+torpor/MS
+Torquemada/M
+torque/MZGSRD
+Torrance/M
+Torre/MS
+torrence
+Torrence/M
+Torrens/M
+torrential
+torrent/MS
+Torrey/M
+Torricelli/M
+torridity/SM
+torridness/SM
+torrid/RYTP
+Torrie/M
+Torrin/M
+Torr/XM
+Torry/M
+torsional/Y
+torsion/IAM
+torsions
+torsi's
+tor/SLM
+torso/SM
+tors/S
+tort/ASFE
+tortellini/MS
+torte/MS
+torten
+tortilla/MS
+tortoiseshell/SM
+tortoise/SM
+Tortola/M
+tortoni/MS
+tort's
+Tortuga/M
+tortuousness/MS
+tortuous/PY
+torture/ZGSRD
+torturous
+torus/MS
+Tory/SM
+Tosca/M
+Toscanini/M
+Toshiba/M
+toss/SRDGZ
+tossup/MS
+totaler/M
+totalistic
+totalitarianism/SM
+totalitarian/S
+totality/MS
+totalizator/S
+totalizing
+total/ZGSRDYM
+totemic
+totem/MS
+toter/M
+tote/S
+toting/M
+tot/MDRSG
+Toto/M
+totted
+totterer/M
+tottering/Y
+totter/ZGRDS
+totting
+toucan/MS
+touchable/U
+touch/ASDG
+touchdown/SM
+touché
+touched/U
+toucher/M
+touchily
+touchiness/SM
+touching/SY
+touchline/M
+touchscreen
+touchstone/SM
+touchy/TPR
+toughen/DRZG
+toughener/M
+toughness/SM
+toughs
+tough/TXGRDNYP
+Toulouse/M
+toupee/SM
+toured/CF
+tourer/M
+tour/GZSRDM
+touring/F
+tourism/SM
+touristic
+tourist/SM
+touristy
+tourmaline/SM
+tournament/MS
+tourney/GDMS
+tourniquet/MS
+tour's/CF
+tours/CF
+tousle/GSD
+touter/M
+tout/SGRD
+Tova/M
+Tove/M
+towardliness/M
+towardly/P
+towards
+toward/YU
+towboat/MS
+tow/DRSZG
+towelette/S
+towel/GJDMS
+toweling/M
+tower/GMD
+towering/Y
+towhead/MSD
+towhee/SM
+towline/MS
+towner/M
+Townes
+Towney/M
+townhouse/S
+Townie/M
+townie/S
+Townley/M
+Town/M
+Townsend/M
+townsfolk
+township/MS
+townsman/M
+townsmen
+townspeople/M
+town/SRM
+townswoman/M
+townswomen
+Towny/M
+towpath/M
+towpaths
+towrope/MS
+Towsley/M
+toxemia/MS
+toxicity/MS
+toxicological
+toxicologist/SM
+toxicology/MS
+toxic/S
+toxin/MS
+toyer/M
+toymaker
+toy/MDRSG
+Toynbee/M
+Toyoda/M
+Toyota/M
+toyshop
+tr
+traceability/M
+traceableness/M
+traceable/P
+trace/ASDG
+traceback/MS
+traced/U
+Tracee/M
+traceless/Y
+Trace/M
+tracepoint/SM
+tracer/MS
+tracery/MDS
+trace's
+Tracey/M
+tracheae
+tracheal/M
+trachea/M
+tracheotomy/SM
+Tracie/M
+Traci/M
+tracing/SM
+trackage
+trackball/S
+trackbed
+tracked/U
+tracker/M
+trackless
+tracksuit/SM
+track/SZGMRD
+tractability/SI
+tractable/I
+tractably/I
+tract/ABS
+Tractarians
+traction/KSCEMAF
+tractive/KFE
+tractor/FKMASC
+tract's
+tracts/CEFK
+Tracy/M
+trademark/GSMD
+trader/M
+tradesman/M
+tradesmen
+tradespeople
+tradespersons
+trade/SRDGZM
+tradeswoman/M
+tradeswomen
+traditionalism/MS
+traditionalistic
+traditionalist/MS
+traditionalized
+traditionally
+traditional/U
+tradition/SM
+traduce/DRSGZ
+Trafalgar/M
+trafficked
+trafficker/MS
+trafficking/S
+traffic/SM
+tragedian/SM
+tragedienne/MS
+tragedy/MS
+tragically
+tragicomedy/SM
+tragicomic
+tragic/S
+trailblazer/MS
+trailblazing/S
+trailer/GDM
+trails/F
+trailside
+trail/SZGJRD
+trainable
+train/ASDG
+trained/U
+trainee/MS
+traineeships
+trainer/MS
+training/SM
+trainman/M
+trainmen
+trainspotter/S
+traipse/DSG
+trait/MS
+traitorous/Y
+traitor/SM
+Trajan/M
+trajectory/MS
+trammed
+trammeled/U
+trammel/GSD
+tramming
+tram/MS
+trample/DGRSZ
+trampler/M
+trampoline/GMSD
+tramp/RDSZG
+tramway/M
+trance/MGSD
+tranche/SM
+Tran/M
+tranquility/S
+tranquilized/U
+tranquilize/JGZDSR
+tranquilizer/M
+tranquilizes/A
+tranquilizing/YM
+tranquillize/GRSDZ
+tranquillizer/M
+tranquilness/M
+tranquil/PTRY
+transact/GSD
+transactional
+transaction/MS
+transactor/SM
+transalpine
+transaminase
+transatlantic
+Transcaucasia/M
+transceiver/SM
+transcendence/MS
+transcendentalism/SM
+transcendentalist/SM
+transcendental/YS
+transcendent/Y
+transcend/SDG
+transconductance
+transcontinental
+transcribe/DSRGZ
+transcriber/M
+transcription/SM
+transcript/SM
+transcultural
+transducer/SM
+transduction/M
+transect/DSG
+transept/SM
+transferability/M
+transferal/MS
+transfer/BSMD
+transferee/M
+transference/SM
+transferor/MS
+transferral/SM
+transferred
+transferrer/SM
+transferring
+transfiguration/SM
+transfigure/SDG
+transfinite/Y
+transfix/SDG
+transformational
+transformation/MS
+transform/DRZBSG
+transformed/U
+transformer/M
+transfuse/XSDGNB
+transfusion/M
+transgression/SM
+transgressor/S
+transgress/VGSD
+trans/I
+transience/SM
+transiency/S
+transient/YS
+transistorize/GDS
+transistor/SM
+Transite/M
+transitional/Y
+transition/MDGS
+transitivenesses
+transitiveness/IM
+transitive/PIY
+transitivity/MS
+transitoriness/M
+transitory/P
+transit/SGVMD
+transl
+translatability/M
+translatable/U
+translated/AU
+translate/VGNXSDB
+translational
+translation/M
+translator/SM
+transliterate/XNGSD
+translucence/SM
+translucency/MS
+translucent/Y
+transmigrate/XNGSD
+transmissible
+transmission/MSA
+transmissive
+transmit/AS
+transmittable
+transmittal/SM
+transmittance/MS
+transmitted/A
+transmitter/SM
+transmitting/A
+transmogrification/M
+transmogrify/GXDSN
+transmutation/SM
+transmute/GBSD
+transnational/S
+transoceanic
+transom/SM
+transonic
+transpacific
+transparency/MS
+transparentness/M
+transparent/YP
+transpiration/SM
+transpire/GSD
+transplantation/S
+transplant/GRDBS
+transpolar
+transponder/MS
+transportability
+transportable/U
+transportation/SM
+transport/BGZSDR
+transpose/BGSD
+transposed/U
+transposition/SM
+Transputer/M
+transsexualism/MS
+transsexual/SM
+transship/LS
+transshipment/SM
+transshipped
+transshipping
+transubstantiation/MS
+Transvaal/M
+transversal/YM
+transverse/GYDS
+transvestism/SM
+transvestite/SM
+transvestitism
+Transylvania/M
+trapdoor/S
+trapeze/DSGM
+trapezium/MS
+trapezoidal
+trapezoid/MS
+trap/MS
+trappable/U
+trapped
+trapper/SM
+trapping/S
+Trappist/MS
+trapshooting/SM
+trashcan/SM
+trashiness/SM
+trash/SRDMG
+trashy/TRP
+Trastevere/M
+trauma/MS
+traumatic
+traumatically
+traumatize/SDG
+travail/SMDG
+traveled/U
+traveler/M
+travelog's
+travelogue/S
+travel/SDRGZJ
+Traver/MS
+traversal/SM
+traverse/GBDRS
+traverser/M
+travertine/M
+travesty/SDGM
+Travis/M
+Travus/M
+trawler/M
+trawl/RDMSZG
+tray/SM
+treacherousness/SM
+treacherous/PY
+treachery/SM
+treacle/DSGM
+treacly
+treader/M
+treadle/GDSM
+treadmill/MS
+tread/SAGD
+Treadwell/M
+treas
+treason/BMS
+treasonous
+treasure/DRSZMG
+treasurer/M
+treasurership
+treasury/SM
+Treasury/SM
+treatable
+treated/U
+treater/S
+treatise/MS
+treatment/MS
+treat's
+treat/SAGDR
+treaty/MS
+treble/SDG
+Treblinka/M
+treeing
+treeless
+treelike
+tree/MDS
+treetop/SM
+trefoil/SM
+Trefor/M
+trekked
+trekker/MS
+Trekkie/M
+trekking
+trek/MS
+trellis/GDSM
+Tremaine/M
+Tremain/M
+trematode/SM
+Tremayne/M
+tremble/JDRSG
+trembler/M
+trembles/M
+trembly
+tremendousness/M
+tremendous/YP
+tremolo/MS
+tremor/MS
+tremulousness/SM
+tremulous/YP
+trenchancy/MS
+trenchant/Y
+trencherman/M
+trenchermen
+trencher/SM
+trench/GASD
+trench's
+trendily
+trendiness/S
+trend/SDMG
+trendy/PTRS
+Trenna/M
+Trent/M
+Trenton/M
+trepanned
+trepidation/MS
+Tresa/M
+Trescha/M
+trespasser/M
+trespass/ZRSDG
+Tressa/M
+tressed/E
+tresses/E
+tressing/E
+tress/MSDG
+trestle/MS
+Trevar/M
+Trevelyan/M
+Trever/M
+Trevino/M
+Trevor/M
+Trev/RM
+Trey/M
+trey/MS
+triableness/M
+triable/P
+triadic
+triad/MS
+triage/SDMG
+trial/ASM
+trialization
+trialled
+trialling
+triamcinolone
+triangle/SM
+triangulable
+triangularization/S
+triangular/Y
+triangulate/YGNXSD
+triangulation/M
+Triangulum/M
+Trianon/M
+Triassic
+triathlon/S
+triatomic
+tribalism/MS
+tribal/Y
+tribe/MS
+tribesman/M
+tribesmen
+tribeswoman
+tribeswomen
+tribulate/NX
+tribulation/M
+tribunal/MS
+tribune/SM
+tributary/MS
+tribute/EGSF
+tribute's
+trice/GSDM
+tricentennial/S
+triceps/SM
+triceratops/M
+trichinae
+trichina/M
+trichinoses
+trichinosis/M
+trichloroacetic
+trichloroethane
+trichotomy/M
+trichromatic
+Tricia/M
+trickery/MS
+trick/GMSRD
+trickily
+trickiness/SM
+trickle/DSG
+trickster/MS
+tricky/RPT
+tricolor/SMD
+tricycle/SDMG
+trident/SM
+tridiagonal
+tried/UA
+triennial/SY
+trier/AS
+trier's
+tries/A
+Trieste/M
+triffid/S
+trifle/MZGJSRD
+trifler/M
+trifluoride/M
+trifocals
+trigged
+trigger/GSDM
+triggest
+trigging
+triglyceride/MS
+trigonal/Y
+trigonometric
+trigonometrical
+trigonometry/MS
+trigram/S
+trig/S
+trihedral
+trike/GMSD
+trilateral/S
+trilby/SM
+trilingual
+trillion/SMH
+trillionth/M
+trillionths
+trillium/SM
+trill/RDMGS
+trilobite/MS
+trilogy/MS
+trimaran/MS
+Trimble/M
+trimer/M
+trimester/MS
+trimmed/U
+trimmer/MS
+trimmest
+trimming/MS
+trimness/S
+trimodal
+trimonthly
+trim/PSYR
+Trimurti/M
+Trina/M
+Trinidad/M
+trinitarian/S
+trinitrotoluene/SM
+trinity/MS
+Trinity/MS
+trinketer/M
+trinket/MRDSG
+triode/MS
+trio/SM
+trioxide/M
+tripartite/N
+tripartition/M
+tripe/MS
+triphenylarsine
+triphenylphosphine
+triphenylstibine
+triphosphopyridine
+triple/GSD
+triplet/SM
+triplex/S
+triplicate/SDG
+triplication/M
+triply/GDSN
+Trip/M
+tripodal
+tripod/MS
+tripoli/M
+Tripoli/M
+tripolyphosphate
+tripos/SM
+tripped
+Trippe/M
+tripper/MS
+tripping/Y
+Tripp/M
+trip/SMY
+triptych/M
+triptychs
+tripwire/MS
+trireme/SM
+Tris
+trisect/GSD
+trisection/S
+trisector
+Trisha/M
+Trish/M
+trisodium
+Trista/M
+Tristam/M
+Tristan/M
+tristate
+trisyllable/M
+tritely/F
+triteness/SF
+trite/SRPTY
+tritium/MS
+triton/M
+Triton/M
+triumphal
+triumphalism
+triumphant/Y
+triumph/GMD
+triumphs
+triumvirate/MS
+triumvir/MS
+triune
+trivalent
+trivet/SM
+trivia
+triviality/MS
+trivialization/MS
+trivialize/DSG
+trivial/Y
+trivium/M
+Trixie/M
+Trixi/M
+Trix/M
+Trixy/M
+Trobriand/M
+trochaic/S
+trochee/SM
+trod/AU
+trodden/UA
+trodes
+troff/MR
+troglodyte/MS
+troika/SM
+Trojan/MS
+troll/DMSG
+trolled/F
+trolleybus/S
+trolley/SGMD
+trolling/F
+trollish
+Trollope/M
+trollop/GSMD
+trolly's
+trombone/MS
+trombonist/SM
+tromp/DSG
+Trondheim/M
+trooper/M
+troopship/SM
+troop/SRDMZG
+trope/SM
+Tropez/M
+trophic
+trophy/MGDS
+tropical/SY
+tropic/MS
+tropism/SM
+tropocollagen
+troposphere/MS
+tropospheric
+troth/GDM
+troths
+trot/S
+Trotsky/M
+trotted
+trotter/SM
+trotting
+troubadour/SM
+troubled/U
+trouble/GDRSM
+troublemaker/MS
+troubler/M
+troubleshooter/M
+troubleshoot/SRDZG
+troubleshot
+troublesomeness/M
+troublesome/YP
+trough/M
+troughs
+trounce/GZDRS
+trouncer/M
+troupe/MZGSRD
+trouper/M
+trouser/DMGS
+trousseau/M
+trousseaux
+Troutman/M
+trout/SM
+trove/SM
+troweler/M
+trowel/SMDRGZ
+trow/SGD
+Troyes
+Troy/M
+troy/S
+Trstram/M
+truancy/MS
+truant/SMDG
+truce/SDGM
+Truckee/M
+trucker/M
+trucking/M
+truckle/GDS
+truckload/MS
+truck/SZGMRDJ
+truculence/SM
+truculent/Y
+Truda/M
+Trudeau/M
+Trude/M
+Trudey/M
+trudge/SRDG
+Trudie/M
+Trudi/M
+Trudy/M
+true/DRSPTG
+truelove/MS
+Trueman/M
+trueness/M
+truer/U
+truest/U
+truffle/MS
+truism/SM
+Trujillo/M
+Trula/M
+truly/U
+Trumaine/M
+Truman/M
+Trumann/M
+Trumbull/M
+trump/DMSG
+trumpery/SM
+trumpeter/M
+trumpet/MDRZGS
+Trump/M
+truncate/NGDSX
+truncation/M
+truncheon/MDSG
+trundle/GZDSR
+trundler/M
+trunk/GSMD
+trunnion/SM
+trusser/M
+trussing/M
+truss/SRDG
+trusted/EU
+trusteeing
+trustee/MDS
+trusteeship/SM
+truster/M
+trustful/EY
+trustfulness/SM
+trustiness/M
+trusting/Y
+trust/RDMSG
+trusts/E
+trustworthier
+trustworthiest
+trustworthiness/MS
+trustworthy/UP
+trusty/PTMSR
+Truth
+truthfulness/US
+truthful/UYP
+truths/U
+truth/UM
+TRW
+trying/Y
+try/JGDRSZ
+tryout/MS
+trypsin/M
+tryst/GDMS
+ts
+T's
+tsarevich
+tsarina's
+tsarism/M
+tsarist
+tsetse/S
+Tsimshian/M
+Tsiolkovsky/M
+Tsitsihar/M
+tsp
+tsunami/MS
+Tsunematsu/M
+Tswana/M
+TTL
+tty/M
+ttys
+Tuamotu/M
+Tuareg/M
+tubae
+tubal
+tuba/SM
+tubbed
+tubbing
+tubby/TR
+tubeless
+tubercle/MS
+tubercular/S
+tuberculin/MS
+tuberculoses
+tuberculosis/M
+tuberculous
+tuber/M
+tuberose/SM
+tuberous
+tube/SM
+tubing/M
+tub/JMDRSZG
+Tubman/M
+tubular/Y
+tubule/SM
+tucker/GDM
+Tucker/M
+tuck/GZSRD
+Tuckie/M
+Tuck/RM
+Tucky/M
+Tucson/M
+Tucuman/M
+Tudor/MS
+Tue/S
+Tuesday/SM
+tufter/M
+tuft/GZSMRD
+tufting/M
+tugboat/MS
+tugged
+tugging
+tug/S
+tuition/ISM
+Tulane/M
+tularemia/S
+tulip/SM
+tulle/SM
+Tulley/M
+Tull/M
+Tully/M
+Tulsa/M
+tum
+tumbledown
+tumbler/M
+tumbleweed/MS
+tumble/ZGRSDJ
+tumbrel/SM
+tumescence/S
+tumescent
+tumidity/MS
+tumid/Y
+tummy/SM
+tumor/MDS
+tumorous
+Tums/M
+tumult/SGMD
+tumultuousness/M
+tumultuous/PY
+tumulus/M
+tunableness/M
+tunable/P
+tuna/SM
+tundra/SM
+tun/DRJZGBS
+tune/CSDG
+tunefulness/MS
+tuneful/YP
+tuneless/Y
+tuner/M
+tune's
+tuneup/S
+tung
+tungstate/M
+tungsten/SM
+Tunguska/M
+Tungus/M
+tunic/MS
+tuning/A
+tuning's
+Tunisia/M
+Tunisian/S
+Tunis/M
+tunned
+tunneler/M
+tunnel/MRDSJGZ
+tunning
+tunny/SM
+tupelo/M
+Tupi/M
+tuple/SM
+tuppence/M
+Tupperware
+Tupungato/M
+turban/SDM
+turbid
+turbidity/SM
+turbinate/SD
+turbine/SM
+turbocharged
+turbocharger/SM
+turbofan/MS
+turbojet/MS
+turboprop/MS
+turbo/SM
+turbot/MS
+turbulence/SM
+turbulent/Y
+turd/MS
+tureen/MS
+turf/DGSM
+turfy/RT
+Turgenev/M
+turgidity/SM
+turgidness/M
+turgid/PY
+Turing/M
+Turin/M
+Turkestan/M
+Turkey/M
+turkey/SM
+Turkic/SM
+Turkish
+Turkmenistan/M
+turk/S
+Turk/SM
+turmeric/MS
+turmoil/SDMG
+turnabout/SM
+turnaround/MS
+turn/AZGRDBS
+turnbuckle/SM
+turncoat/SM
+turned/U
+turner/M
+Turner/M
+turning/MS
+turnip/SMDG
+turnkey/MS
+turnoff/MS
+turnout/MS
+turnover/SM
+turnpike/MS
+turnround/MS
+turnstile/SM
+turnstone/M
+turntable/SM
+turpentine/GMSD
+Turpin/M
+turpitude/SM
+turquoise/SM
+turret/SMD
+turtleback/MS
+turtledove/MS
+turtleneck/SDM
+turtle/SDMG
+turves's
+turvy
+Tuscaloosa/M
+Tuscan
+Tuscany/M
+Tuscarora/M
+Tuscon/M
+tush/SDG
+Tuskegee/M
+tusker/M
+tusk/GZRDMS
+tussle/GSD
+tussock/MS
+tussocky
+Tussuad/M
+Tutankhamen/M
+tutelage/MS
+tutelary/S
+Tut/M
+tutored/U
+tutorial/MS
+tutor/MDGS
+tutorship/S
+tut/S
+Tutsi
+tutted
+tutting
+tutti/S
+Tuttle/M
+tutu/SM
+Tuvalu
+tuxedo/SDM
+tux/S
+TVA
+TV/M
+TVs
+twaddle/GZMRSD
+twaddler/M
+Twain/M
+twain/S
+TWA/M
+twang/MDSG
+twangy/TR
+twas
+tweak/SGRD
+tweediness/M
+Tweedledee/M
+Tweedledum/M
+Tweed/M
+twee/DP
+tweed/SM
+tweedy/PTR
+tween
+tweeter/M
+tweet/ZSGRD
+tweezer/M
+tweeze/ZGRD
+twelfth
+twelfths
+twelvemonth/M
+twelvemonths
+twelve/MS
+twentieths
+twenty/MSH
+twerp/MS
+twice/R
+twiddle/GRSD
+twiddler/M
+twiddly/RT
+twigged
+twigging
+twiggy/RT
+twig/SM
+Twila/M
+twilight/MS
+twilit
+twill/SGD
+twiner/M
+twine/SM
+twinge/SDMG
+Twinkie
+twinkler/M
+twinkle/RSDG
+twinkling/M
+twinkly
+twinned
+twinning
+twin/RDMGZS
+twirler/M
+twirling/Y
+twirl/SZGRD
+twirly/TR
+twisted/U
+twister/M
+twists/U
+twist/SZGRD
+twisty
+twitch/GRSD
+twitchy/TR
+twit/S
+twitted
+twitterer/M
+twitter/SGRD
+twittery
+twitting
+twixt
+twofer/MS
+twofold/S
+two/MS
+twopence/SM
+twopenny/S
+twosome/MS
+twp
+Twp
+TWX
+Twyla/M
+TX
+t/XTJBG
+Tybalt/M
+Tybie/M
+Tybi/M
+tycoon/MS
+tyeing
+Tye/M
+tying/UA
+tyke/SM
+Tylenol/M
+Tyler/M
+Ty/M
+Tymon/M
+Tymothy/M
+tympani
+tympanist/SM
+tympanum/SM
+Tynan/M
+Tyndale/M
+Tyndall/M
+Tyne/M
+typeahead
+typecast/SG
+typed/AU
+typedef/S
+typeface/MS
+typeless
+type/MGDRSJ
+types/A
+typescript/SM
+typeset/S
+typesetter/MS
+typesetting/SM
+typewriter/M
+typewrite/SRJZG
+typewriting/M
+typewritten
+typewrote
+typhoid/SM
+Typhon/M
+typhoon/SM
+typhus/SM
+typicality/MS
+typically
+typicalness/M
+typical/U
+typification/M
+typify/SDNXG
+typing/A
+typist/MS
+typographer/SM
+typographic
+typographical/Y
+typography/MS
+typological/Y
+typology/MS
+typo/MS
+tyrannic
+tyrannicalness/M
+tyrannical/PY
+tyrannicide/M
+tyrannizer/M
+tyrannize/ZGJRSD
+tyrannizing/YM
+tyrannosaur/MS
+tyrannosaurus/S
+tyrannous
+tyranny/MS
+tyrant/MS
+Tyree/M
+tyreo
+Tyrolean/S
+Tyrol's
+Tyrone/M
+tyrosine/M
+tyro/SM
+Tyrus/M
+Tyson/M
+tzarina's
+tzar's
+Tzeltal/M
+u
+U
+UAR
+UART
+UAW
+Ubangi/M
+ubiquitous/YP
+ubiquity/S
+Ucayali/M
+Uccello/M
+UCLA/M
+Udale/M
+Udall/M
+udder/SM
+Udell/M
+Ufa/M
+ufologist/S
+ufology/MS
+UFO/S
+Uganda/M
+Ugandan/S
+ugh
+ughs
+uglification
+ugliness/MS
+uglis
+ugly/PTGSRD
+Ugo/M
+uh
+UHF
+Uighur
+Ujungpandang/M
+UK
+ukase/SM
+Ukraine/M
+Ukrainian/S
+ukulele/SM
+UL
+Ula/M
+Ulberto/M
+ulcerate/NGVXDS
+ulceration/M
+ulcer/MDGS
+ulcerous
+Ulick/M
+Ulises/M
+Ulla/M
+Ullman/M
+ulnae
+ulna/M
+ulnar
+Ulrica/M
+Ulrich/M
+Ulrick/M
+Ulric/M
+Ulrika/M
+Ulrikaumeko/M
+Ulrike/M
+Ulster/M
+ulster/MS
+ult
+ulterior/Y
+ultimas
+ultimate/DSYPG
+ultimateness/M
+ultimatum/MS
+ultimo
+ultracentrifugally
+ultracentrifugation
+ultracentrifuge/M
+ultraconservative/S
+ultrafast
+ultrahigh
+ultralight/S
+ultramarine/SM
+ultramodern
+ultramontane
+ultra/S
+ultrashort
+ultrasonically
+ultrasonic/S
+ultrasonics/M
+ultrasound/SM
+ultrastructure/M
+Ultrasuede
+ultraviolet/SM
+Ultrix/M
+ULTRIX/M
+ululate/DSXGN
+ululation/M
+Ulyanovsk/M
+Ulysses/M
+um
+umbel/MS
+umber/GMDS
+Umberto/M
+umbilical/S
+umbilici
+umbilicus/M
+umbrage/MGSD
+umbrageous
+umbra/MS
+umbrella/GDMS
+Umbriel/M
+Umeko/M
+umiak/MS
+umlaut/GMDS
+umpire/MGSD
+ump/MDSG
+umpteen/H
+UN
+unabated/Y
+unabridged/S
+unacceptability
+unacceptable
+unaccepted
+unaccommodating
+unaccountability
+unaccustomed/Y
+unadapted
+unadulterated/Y
+unadventurous
+unalienability
+unalterableness/M
+unalterable/P
+unalterably
+Una/M
+unambiguity
+unambiguous
+unambitious
+unamused
+unanimity/SM
+unanimous/Y
+unanticipated/Y
+unapologetic
+unapologizing/M
+unappeasable
+unappeasably
+unappreciative
+unary
+unassailableness/M
+unassailable/P
+unassertive
+unassumingness/M
+unassuming/PY
+unauthorized/PY
+unavailing/PY
+unaware/SPY
+unbalanced/P
+unbar
+unbarring
+unbecoming/P
+unbeknown
+unbelieving/Y
+unbiased/P
+unbid
+unbind/G
+unblessed
+unblinking/Y
+unbodied
+unbolt/G
+unbreakability
+unbred
+unbroken
+unbuckle
+unbudging/Y
+unburnt
+uncap
+uncapping
+uncatalogued
+uncauterized/MS
+unceasing/Y
+uncelebrated
+uncertain/P
+unchallengeable
+unchangingness/M
+unchanging/PY
+uncharacteristic
+uncharismatic
+unchastity
+unchristian
+uncial/S
+uncivilized/Y
+unclassified
+uncle/MSD
+unclouded/Y
+uncodable
+uncollected
+uncoloredness/M
+uncolored/PY
+uncombable
+uncommunicative
+uncompetitive
+uncomplicated
+uncomprehending/Y
+uncompromisable
+unconcerned/P
+unconcern/M
+unconfirmed
+unconfused
+unconscionableness/M
+unconscionable/P
+unconscionably
+unconstitutional
+unconsumed
+uncontentious
+uncontrollability
+unconvertible
+uncool
+uncooperative
+uncork/G
+uncouple/G
+uncouthness/M
+uncouth/YP
+uncreate/V
+uncritical
+uncross/GB
+uncrowded
+unction/IM
+unctions
+unctuousness/MS
+unctuous/PY
+uncustomary
+uncut
+undated/I
+undaunted/Y
+undeceive
+undecided/S
+undedicated
+undefinability
+undefinedness/M
+undefined/P
+undelete
+undeliverability
+undeniableness/M
+undeniable/P
+undeniably
+undependable
+underachiever/M
+underachieve/SRDGZ
+underact/GDS
+underadjusting
+underage/S
+underarm/DGS
+underbedding
+underbelly/MS
+underbidding
+underbid/S
+underbracing
+underbrush/MSDG
+undercarriage/MS
+undercharge/GSD
+underclassman
+underclassmen
+underclass/S
+underclothes
+underclothing/MS
+undercoating/M
+undercoat/JMDGS
+underconsumption/M
+undercooked
+undercount/S
+undercover
+undercurrent/SM
+undercut/S
+undercutting
+underdeveloped
+underdevelopment/MS
+underdog/MS
+underdone
+undereducated
+underemphasis
+underemployed
+underemployment/SM
+underenumerated
+underenumeration
+underestimate/NGXSD
+underexploited
+underexpose/SDG
+underexposure/SM
+underfed
+underfeed/SG
+underfloor
+underflow/GDMS
+underfoot
+underfund/DG
+underfur/MS
+undergarment/SM
+undergirding
+undergoes
+undergo/G
+undergone
+undergrad/MS
+undergraduate/MS
+underground/RMS
+undergrowth/M
+undergrowths
+underhand/D
+underhandedness/MS
+underhanded/YP
+underheat
+underinvestment
+underlaid
+underlain/S
+underlay/GS
+underlie
+underline/GSDJ
+underling/MS
+underlip/SM
+underloaded
+underly/GS
+undermanned
+undermentioned
+undermine/SDG
+undermost
+underneath
+underneaths
+undernourished
+undernourishment/SM
+underpaid
+underpants
+underpart/MS
+underpass/SM
+underpay/GSL
+underpayment/SM
+underperformed
+underpinned
+underpinning/MS
+underpin/S
+underplay/SGD
+underpopulated
+underpopulation/M
+underpowered
+underpricing
+underprivileged
+underproduction/MS
+underrate/GSD
+underregistration/M
+underreported
+underreporting
+underrepresentation/M
+underrepresented
+underscore/SDG
+undersealed
+undersea/S
+undersecretary/SM
+undersell/SG
+undersexed
+undershirt/SM
+undershoot/SG
+undershorts
+undershot
+underside/SM
+undersigned/M
+undersign/SGD
+undersized
+undersizes
+undersizing
+underskirt/MS
+undersold
+underspecification
+underspecified
+underspend/G
+understaffed
+understandability/M
+understandably
+understanding/YM
+understand/RGSJB
+understate/GSDL
+understatement/MS
+understocked
+understood
+understrength
+understructure/SM
+understudy/GMSD
+undertaken
+undertaker/M
+undertake/SRGZJ
+undertaking/M
+underthings
+undertone/SM
+undertook
+undertow/MS
+underused
+underusing
+underutilization/M
+underutilized
+undervaluation/S
+undervalue/SDG
+underwater/S
+underway
+underwear/M
+underweight/S
+underwent
+underwhelm/DGS
+underwood/M
+Underwood/M
+underworld/MS
+underwrite/GZSR
+underwriter/M
+underwritten
+underwrote
+under/Y
+undeserving
+undesigned
+undeviating/Y
+undialyzed/SM
+undiplomatic
+undiscerning
+undiscriminating
+undo/GJ
+undoubted/Y
+undramatic
+undramatized/SM
+undress/G
+undrinkability
+undrinkable
+undroppable
+undue
+undulant
+undulate/XDSNG
+undulation/M
+unearthliness/S
+unearthly/P
+unearth/YG
+unease
+uneconomic
+uneducated
+unemployed/S
+unencroachable
+unending/Y
+unendurable/P
+unenergized/MS
+unenforced
+unenterprising
+UNESCO
+unethical
+uneulogized/SM
+unexacting
+unexceptionably
+unexcited
+unexpectedness/MS
+unfading/Y
+unfailingness/M
+unfailing/P
+unfamiliar
+unfashionable
+unfathomably
+unfavored
+unfeeling
+unfeigned/Y
+unfelt
+unfeminine
+unfertile
+unfetchable
+unflagging
+unflappability/S
+unflappable
+unflappably
+unflinching/Y
+unfold/LG
+unfoldment/M
+unforced
+unforgeable
+unfossilized/MS
+unfraternizing/SM
+unfrozen
+unfulfillable
+unfunny
+unfussy
+ungainliness/MS
+ungainly/PRT
+Ungava/M
+ungenerous
+ungentle
+unglamorous
+ungrammaticality
+ungrudging
+unguent/MS
+ungulate/MS
+unharmonious
+unharness/G
+unhistorical
+unholy/TP
+unhook/DG
+unhydrolyzed/SM
+unhygienic
+Unibus/M
+unicameral
+UNICEF
+unicellular
+Unicode/M
+unicorn/SM
+unicycle/MGSD
+unicyclist/MS
+unideal
+unidimensional
+unidiomatic
+unidirectionality
+unidirectional/Y
+unidolized/MS
+unifiable
+unification/MA
+unifier/MS
+unifilar
+uniformity/MS
+uniformness/M
+uniform/TGSRDYMP
+unify/AXDSNG
+unilateralism/M
+unilateralist
+unilateral/Y
+unimodal
+unimpeachably
+unimportance
+unimportant
+unimpressive
+unindustrialized/MS
+uninhibited/YP
+uninominal
+uninsured
+unintellectual
+unintended
+uninteresting
+uninterruptedness/M
+uninterrupted/YP
+unintuitive
+uninviting
+union/AEMS
+unionism/SM
+unionist/SM
+Unionist/SM
+unionize
+Union/MS
+UniPlus/M
+unipolar
+uniprocessor/SM
+uniqueness/S
+unique/TYSRP
+Uniroyal/M
+unisex/S
+UniSoft/M
+unison/MS
+Unisys/M
+unitarianism/M
+Unitarianism/SM
+unitarian/MS
+Unitarian/MS
+unitary
+unite/AEDSG
+united/Y
+uniter/M
+unitize/GDS
+unit/VGRD
+unity/SEM
+univ
+Univac/M
+univalent/S
+univalve/MS
+univariate
+universalism/M
+universalistic
+universality/SM
+universalize/DSRZG
+universalizer/M
+universal/YSP
+universe/MS
+university/MS
+Unix/M
+UNIX/M
+unjam
+unkempt
+unkind/TP
+unkink
+unknightly
+unknowable/S
+unknowing
+unlabored
+unlace/G
+unlearn/G
+unlikeable
+unlikeliness/S
+unlimber/G
+unlimited
+unlit
+unliterary
+unloose/G
+unlucky/TP
+unmagnetized/MS
+unmanageably
+unmannered/Y
+unmask/G
+unmeaning
+unmeasured
+unmeetable
+unmelodious
+unmemorable
+unmemorialized/MS
+unmentionable/S
+unmerciful
+unmeritorious
+unmethodical
+unmineralized/MS
+unmissable
+unmistakably
+unmitigated/YP
+unmnemonic
+unmobilized/SM
+unmoral
+unmount/B
+unmovable
+unmoving
+unnaturalness/M
+unnavigable
+unnerving/Y
+unobliging
+unoffensive
+unofficial
+unorganized/YP
+unorthodox
+unpack/G
+unpaintable
+unpalatability
+unpalatable
+unpartizan
+unpatronizing
+unpeople
+unperceptive
+unperson
+unperturbed/Y
+unphysical
+unpick/G
+unpicturesque
+unpinning
+unpleasing
+unploughed
+unpolarized/SM
+unpopular
+unpractical
+unprecedented/Y
+unpredictable/S
+unpreemphasized
+unpremeditated
+unpretentiousness/M
+unprincipled/P
+unproblematic
+unproductive
+unpropitious
+unprovable
+unproven
+unprovocative
+unpunctual
+unquestionable
+unraisable
+unravellings
+unreadability
+unread/B
+unreal
+unrealizable
+unreasoning/Y
+unreceptive
+unrecordable
+unreflective
+unrelenting/Y
+unremitting/Y
+unrepeatability
+unrepeated
+unrepentant
+unreported
+unrepresentative
+unreproducible
+unrest/G
+unrestrained/P
+unrewarding
+unriddle
+unripe/P
+unromantic
+unruliness/SM
+unruly/PTR
+unsaleable
+unsanitary
+unsavored/YP
+unsavoriness/M
+unseal/GB
+unsearchable
+unseasonal
+unseeing/Y
+unseen/S
+unselfconsciousness/M
+unselfconscious/P
+unselfishness/M
+unsellable
+unsentimental
+unset
+unsettledness/M
+unsettled/P
+unsettling/Y
+unshapely
+unshaven
+unshorn
+unsighted
+unsightliness/S
+unskilful
+unsociability
+unsociable/P
+unsocial
+unsound/PT
+unspeakably
+unspecific
+unspectacular
+unspoilt
+unspoke
+unsporting
+unstable/P
+unstigmatized/SM
+unstilted
+unstinting/Y
+unstopping
+unstrapping
+unstudied
+unstuffy
+unsubdued
+unsubstantial
+unsubtle
+unsuitable
+unsuspecting/Y
+unswerving/Y
+unsymmetrical
+unsympathetic
+unsystematic
+unsystematized/Y
+untactful
+untalented
+untaxing
+unteach/B
+untellable
+untenable
+unthinking
+until/G
+untiring/Y
+unto
+untouchable/MS
+untowardness/M
+untoward/P
+untraceable
+untrue
+untruthfulness/M
+untwist/G
+Unukalhai/M
+unusualness/M
+unutterable
+unutterably
+unvocalized/MS
+unvulcanized/SM
+unwaivering
+unwarrantable
+unwarrantably
+unwashed/PS
+unwearable
+unwearied/Y
+unwed
+unwedge
+unwelcome
+unwell/M
+unwieldiness/MS
+unwieldy/TPR
+unwind/B
+unwomanly
+unworkable/S
+unworried
+unwrap
+unwrapping
+unyielding/Y
+unyoke
+unzip
+up
+Upanishads
+uparrow
+upbeat/SM
+upbraid/GDRS
+upbringing/M
+upbring/JG
+UPC
+upchuck/SDG
+upcome/G
+upcountry/S
+updatability
+updater/M
+update/RSDG
+Updike/M
+updraft/SM
+upend/SDG
+upfield
+upfront
+upgradeable
+upgrade/DSJG
+upheaval/MS
+upheld
+uphill/S
+upholder/M
+uphold/RSGZ
+upholster/ADGS
+upholsterer/SM
+upholstery/MS
+UPI
+upkeep/SM
+uplander/M
+upland/MRS
+uplifter/M
+uplift/SJDRG
+upload/GSD
+upmarket
+upon
+upped
+uppercase/GSD
+upperclassman/M
+upperclassmen
+uppercut/S
+uppercutting
+uppermost
+upper/S
+upping
+uppish
+uppity
+upraise/GDS
+uprated
+uprating
+uprear/DSG
+upright/DYGSP
+uprightness/S
+uprise/RGJ
+uprising/M
+upriver/S
+uproariousness/M
+uproarious/PY
+uproar/MS
+uproot/DRGS
+uprooter/M
+ups
+UPS
+upscale/GDS
+upset/S
+upsetting/MS
+upshot/SM
+upside/MS
+upsilon/MS
+upslope
+upstage/DSRG
+upstairs
+upstandingness/M
+upstanding/P
+upstart/MDGS
+upstate/SR
+upstream/DSG
+upstroke/MS
+upsurge/DSG
+upswing/GMS
+upswung
+uptake/SM
+upthrust/GMS
+uptight
+uptime
+Upton/M
+uptown/RS
+uptrend/M
+upturn/GDS
+upwardness/M
+upward/SYP
+upwelling
+upwind/S
+uracil/MS
+Ural/MS
+Urania/M
+uranium/MS
+Uranus/M
+uranyl/M
+Urbain/M
+Urbana/M
+urbane/Y
+urbanism/M
+urbanite/SM
+urbanity/SM
+urbanization/MS
+urbanize/DSG
+Urban/M
+urbanologist/S
+urbanology/S
+Urbano/M
+urban/RT
+Urbanus/M
+urchin/SM
+Urdu/M
+urea/SM
+uremia/MS
+uremic
+ureter/MS
+urethane/MS
+urethrae
+urethral
+urethra/M
+urethritis/M
+Urey/M
+urge/GDRSJ
+urgency/SM
+urgent/Y
+urger/M
+Uriah/M
+uric
+Uriel/M
+urinal/MS
+urinalyses
+urinalysis/M
+urinary/MS
+urinate/XDSNG
+urination/M
+urine/MS
+Uri/SM
+URL
+Ur/M
+urning/M
+urn/MDGS
+urogenital
+urological
+urologist/S
+urology/MS
+Urquhart/M
+Ursala/M
+Ursa/M
+ursine
+Ursola/M
+Urson/M
+Ursula/M
+Ursulina/M
+Ursuline/M
+urticaria/MS
+Uruguayan/S
+Uruguay/M
+Urumqi
+US
+USA
+usability/S
+usable/U
+usably/U
+USAF
+usage/SM
+USART
+USCG
+USC/M
+USDA
+us/DRSBZG
+used/U
+use/ESDAG
+usefulness/SM
+useful/YP
+uselessness/MS
+useless/PY
+Usenet/M
+Usenix/M
+user/M
+USG/M
+usherette/SM
+usher/SGMD
+USIA
+USMC
+USN
+USO
+USP
+USPS
+USS
+USSR
+Ustinov/M
+usu
+usuals
+usual/UPY
+usurer/SM
+usuriousness/M
+usurious/PY
+usurpation/MS
+usurper/M
+usurp/RDZSG
+usury/SM
+UT
+Utahan/SM
+Utah/M
+Uta/M
+Ute/M
+utensil/SM
+uteri
+uterine
+uterus/M
+Utica/M
+utile/I
+utilitarianism/MS
+utilitarian/S
+utility/MS
+utilization/MS
+utilization's/A
+utilize/GZDRS
+utilizer/M
+utilizes/A
+utmost/S
+Utopia/MS
+utopianism/M
+utopian's
+Utopian/S
+utopia/S
+Utrecht/M
+Utrillo/M
+utterance/MS
+uttered/U
+utterer/M
+uttermost/S
+utter/TRDYGS
+uucp/M
+UV
+uvula/MS
+uvular/S
+uxorious
+Uzbekistan
+Uzbek/M
+Uzi/M
+V
+VA
+vacancy/MS
+vacantness/M
+vacant/PY
+vacate/NGXSD
+vacationist/SM
+vacationland
+vacation/MRDZG
+vaccinate/NGSDX
+vaccination/M
+vaccine/SM
+vaccinial
+vaccinia/M
+Vachel/M
+vacillate/XNGSD
+vacillating/Y
+vacillation/M
+vacillator/SM
+Vaclav/M
+vacua's
+vacuity/MS
+vacuo
+vacuolated/U
+vacuolate/SDGN
+vacuole/SM
+vacuolization/SM
+vacuousness/MS
+vacuous/PY
+vacuum/GSMD
+Vader/M
+Vaduz/M
+vagabondage/MS
+vagabond/DMSG
+vagarious
+vagary/MS
+vaginae
+vaginal/Y
+vagina/M
+vagrancy/MS
+vagrant/SMY
+vagueing
+vagueness/MS
+vague/TYSRDP
+Vail/M
+vaingloriousness/M
+vainglorious/YP
+vainglory/MS
+vain/TYRP
+val
+valance/SDMG
+Valaree/M
+Valaria/M
+Valarie/M
+Valdemar/M
+Valdez/M
+Valeda/M
+valediction/MS
+valedictorian/MS
+valedictory/MS
+Vale/M
+valence/SM
+Valencia/MS
+valency/MS
+Valene/M
+Valenka/M
+Valentia/M
+Valentijn/M
+Valentina/M
+Valentine/M
+valentine/SM
+Valentin/M
+Valentino/M
+Valenzuela/M
+Valera/M
+Valeria/M
+Valerian/M
+Valerie/M
+Valerye/M
+Valéry/M
+vale/SM
+valet/GDMS
+valetudinarianism/MS
+valetudinarian/MS
+Valhalla/M
+valiance/S
+valiantness/M
+valiant/SPY
+Valida/M
+validated/AU
+validate/INGSDX
+validates/A
+validation/AMI
+validity/IMS
+validnesses
+validness/MI
+valid/PIY
+Valina/M
+valise/MS
+Valium/S
+Valkyrie/SM
+Vallejo
+Valle/M
+Valletta/M
+valley/SM
+Vallie/M
+Valli/M
+Vally/M
+Valma/M
+Val/MY
+Valois/M
+valor/MS
+valorous/Y
+Valparaiso/M
+Valry/M
+valuable/IP
+valuableness/IM
+valuables
+valuably/I
+valuate/NGXSD
+valuation/CSAM
+valuator/SM
+value/CGASD
+valued/U
+valuelessness/M
+valueless/P
+valuer/SM
+value's
+values/E
+valve/GMSD
+valveless
+valvular
+Va/M
+vamoose/GSD
+vamp/ADSG
+vamper
+vampire/MGSD
+vamp's
+vanadium/MS
+Vance/M
+Vancouver/M
+vandalism/MS
+vandalize/GSD
+vandal/MS
+Vandal/MS
+Vanda/M
+Vandenberg/M
+Vanderbilt/M
+Vanderburgh/M
+Vanderpoel/M
+Vandyke/SM
+vane/MS
+Vanessa/M
+Vang/M
+vanguard/MS
+Vania/M
+vanilla/MS
+vanisher/M
+vanish/GRSDJ
+vanishing/Y
+vanity/SM
+Van/M
+Vanna/M
+vanned
+Vannie/M
+Vanni/M
+vanning
+Vanny/M
+vanquisher/M
+vanquish/RSDGZ
+van/SMD
+vantage/MS
+Vanuatu
+Vanya/M
+Vanzetti/M
+vapidity/MS
+vapidness/SM
+vapid/PY
+vaporer/M
+vaporing/MY
+vaporisation
+vaporise/DSG
+vaporization/AMS
+vaporize/DRSZG
+vaporizer/M
+vapor/MRDJGZS
+vaporous
+vapory
+vaquero/SM
+VAR
+Varanasi/M
+Varese/M
+Vargas/M
+variability/IMS
+variableness/IM
+variable/PMS
+variables/I
+variably/I
+variance/I
+variances
+variance's
+Varian/M
+variant/ISY
+variate/MGNSDX
+variational
+variation/M
+varicolored/MS
+varicose/S
+variedly
+varied/U
+variegate/NGXSD
+variegation/M
+varier/M
+varietal/S
+variety/MS
+various/PY
+varistor/M
+Varityping/M
+varlet/MS
+varmint/SM
+varnished/U
+varnisher/M
+varnish/ZGMDRS
+var/S
+varsity/MS
+varying/UY
+vary/SRDJG
+vascular
+vasectomy/SM
+Vaseline/DSMG
+vase/SM
+Vasili/MS
+Vasily/M
+vasomotor
+Vasquez/M
+vassalage/MS
+vassal/GSMD
+Vassar/M
+Vassili/M
+Vassily/M
+vastness/MS
+vast/PTSYR
+v/ASV
+VAT
+Vatican/M
+vat/SM
+vatted
+vatting
+vaudeville/SM
+vaudevillian/SM
+Vaudois
+Vaughan/M
+Vaughn/M
+vaulter/M
+vaulting/M
+vault/ZSRDMGJ
+vaunter/M
+vaunt/GRDS
+VAXes
+Vax/M
+VAX/M
+Vazquez/M
+vb
+VCR
+VD
+VDT
+VDU
+vealed/A
+vealer/MA
+veal/MRDGS
+veals/A
+Veblen/M
+vectorial
+vectorization
+vectorized
+vectorizing
+vector's/F
+vector/SGDM
+Veda/MS
+Vedanta/M
+veejay/S
+veep/S
+veer/DSG
+veering/Y
+vegan/SM
+Vega/SM
+Vegemite/M
+veges
+vegetable/MS
+vegetarianism/MS
+vegetarian/SM
+vegetate/DSNGVX
+vegetation/M
+vegetative/PY
+vegged
+veggie/S
+vegging
+veg/M
+vehemence/MS
+vehemency/S
+vehement/Y
+vehicle/SM
+vehicular
+veiling/MU
+veil's
+veil/UGSD
+vein/GSRDM
+veining/M
+vela/M
+Vela/M
+velarize/SDG
+velar/S
+Velásquez/M
+Velázquez
+Velcro/SM
+veld/SM
+veldt's
+Velez/M
+Vella/M
+vellum/MS
+Velma/M
+velocipede/SM
+velocity/SM
+velor/S
+velour's
+velum/M
+Velveeta/M
+velveteen/MS
+velvet/GSMD
+Velvet/M
+velvety/RT
+venality/MS
+venal/Y
+venation/SM
+vend/DSG
+vender's/K
+vendetta/MS
+vendible/S
+vendor/MS
+veneerer/M
+veneer/GSRDM
+veneering/M
+venerability/S
+venerable/P
+venerate/XNGSD
+veneration/M
+venereal
+venetian
+Venetian/SM
+Venezuela/M
+Venezuelan/S
+vengeance/MS
+vengeful/APY
+vengefulness/AM
+venialness/M
+venial/YP
+Venice/M
+venireman/M
+veniremen
+venison/SM
+Venita/M
+Venn/M
+venomousness/M
+venomous/YP
+venom/SGDM
+venous/Y
+venter/M
+ventilated/U
+ventilate/XSDVGN
+ventilation/M
+ventilator/MS
+vent/ISGFD
+ventral/YS
+ventricle/MS
+ventricular
+ventriloquies
+ventriloquism/MS
+ventriloquist/MS
+ventriloquy
+vent's/F
+Ventura/M
+venture/RSDJZG
+venturesomeness/SM
+venturesome/YP
+venturi/S
+venturousness/MS
+venturous/YP
+venue/MAS
+Venusian/S
+Venus/S
+veraciousness/M
+veracious/YP
+veracities
+veracity/IM
+Veracruz/M
+Veradis
+Vera/M
+verandahed
+veranda/SDM
+verbalization/MS
+verbalized/U
+verbalizer/M
+verbalize/ZGRSD
+verballed
+verballing
+verbal/SY
+verbatim
+verbena/MS
+verbiage/SM
+verb/KSM
+verbose/YP
+verbosity/SM
+verboten
+verdant/Y
+Verde/M
+Verderer/M
+verdict/SM
+verdigris/GSDM
+Verdi/M
+verdure/SDM
+Vere/M
+Verena/M
+Verene/M
+verge/FGSD
+Verge/M
+verger/SM
+verge's
+Vergil's
+veridical/Y
+Veriee/M
+verifiability/M
+verifiableness/M
+verifiable/U
+verification/S
+verified/U
+verifier/MS
+verify/GASD
+Verile/M
+verily
+Verina/M
+Verine/M
+verisimilitude/SM
+veritableness/M
+veritable/P
+veritably
+verity/MS
+Verlag/M
+Verlaine/M
+Verla/M
+Vermeer/M
+vermicelli/MS
+vermiculite/MS
+vermiform
+vermilion/MS
+vermin/M
+verminous
+Vermonter/M
+Vermont/ZRM
+vermouth/M
+vermouths
+vernacular/YS
+vernal/Y
+Verna/M
+Verne/M
+Vernen/M
+Verney/M
+Vernice/M
+vernier/SM
+Vern/NM
+Vernon/M
+Vernor/M
+Verona/M
+Veronese/M
+Veronica/M
+veronica/SM
+Veronika/M
+Veronike/M
+Veronique/M
+verrucae
+verruca/MS
+versa
+Versailles/M
+Versatec/M
+versatileness/M
+versatile/YP
+versatility/SM
+versed/UI
+verse's
+verses/I
+verse/XSRDAGNF
+versicle/M
+versification/M
+versifier/M
+versify/GDRSZXN
+versing/I
+version/MFISA
+verso/SM
+versus
+vertebrae
+vertebral/Y
+vertebra/M
+vertebrate/IMS
+vertebration/M
+vertex/SM
+vertical/YPS
+vertices's
+vertiginous
+vertigoes
+vertigo/M
+verve/SM
+very/RT
+Vesalius/M
+vesicle/SM
+vesicular/Y
+vesiculate/GSD
+Vespasian/M
+vesper/SM
+Vespucci/M
+vessel/MS
+vestal/YS
+Vesta/M
+vest/DIGSL
+vestibular
+vestibule/SDM
+vestige/SM
+vestigial/Y
+vesting/SM
+vestment/ISM
+vestryman/M
+vestrymen
+vestry/MS
+vest's
+vesture/SDMG
+Vesuvius/M
+vetch/SM
+veteran/SM
+veterinarian/MS
+veterinary/S
+veter/M
+veto/DMG
+vetoes
+vet/SMR
+vetted
+vetting/A
+Vevay/M
+vexation/SM
+vexatiousness/M
+vexatious/PY
+vexed/Y
+vex/GFSD
+VF
+VFW
+VG
+VGA
+vhf
+VHF
+VHS
+VI
+via
+viability/SM
+viable/I
+viably
+viaduct/MS
+Viagra/M
+vial/MDGS
+viand/SM
+vibe/S
+vibraharp/MS
+vibrancy/MS
+vibrant/YS
+vibraphone/MS
+vibraphonist/SM
+vibrate/XNGSD
+vibrational/Y
+vibration/M
+vibrato/MS
+vibrator/SM
+vibratory
+vibrio/M
+vibrionic
+viburnum/SM
+vicarage/SM
+vicariousness/MS
+vicarious/YP
+vicar/SM
+vice/CMS
+viced
+vicegerent/MS
+vicennial
+Vicente/M
+viceregal
+viceroy/SM
+Vichy/M
+vichyssoise/MS
+vicing
+vicinity/MS
+viciousness/S
+vicious/YP
+vicissitude/MS
+Vickers/M
+Vickie/M
+Vicki/M
+Vicksburg/M
+Vicky/M
+Vick/ZM
+Vic/M
+victimization/SM
+victimized/U
+victimizer/M
+victimize/SRDZG
+victim/SM
+Victoir/M
+Victoria/M
+Victorianism/S
+Victorian/S
+victoriousness/M
+victorious/YP
+Victor/M
+victor/SM
+victory/MS
+Victrola/SM
+victualer/M
+victual/ZGSDR
+vicuña/S
+Vidal/M
+Vida/M
+videlicet
+videocassette/S
+videoconferencing
+videodisc/S
+videodisk/SM
+video/GSMD
+videophone/SM
+videotape/SDGM
+Vidovic/M
+Vidovik/M
+Vienna/M
+Viennese/M
+Vientiane/M
+vier/M
+vie/S
+Vietcong/M
+Viet/M
+Vietminh/M
+Vietnamese/M
+Vietnam/M
+viewed/A
+viewer/AS
+viewer's
+viewfinder/MS
+viewgraph/SM
+viewing/M
+viewless/Y
+view/MBGZJSRD
+viewpoint/SM
+views/A
+vigesimal
+vigilance/MS
+vigilante/SM
+vigilantism/MS
+vigilantist
+vigilant/Y
+vigil/SM
+vignette/MGDRS
+vignetter/M
+vignetting/M
+vignettist/MS
+vigor/MS
+vigorousness/M
+vigorous/YP
+vii
+viii
+Vijayawada/M
+Viki/M
+Viking/MS
+viking/S
+Vikki/M
+Vikky/M
+Vikram/M
+Vila
+vile/AR
+vilely
+vileness/MS
+vilest
+Vilhelmina/M
+vilification/M
+vilifier/M
+vilify/GNXRSD
+villager/M
+village/RSMZ
+villainousness/M
+villainous/YP
+villain/SM
+villainy/MS
+Villa/M
+villa/MS
+Villarreal/M
+ville
+villeinage/SM
+villein/MS
+villi
+Villon/M
+villus/M
+Vilma/M
+Vilnius/M
+Vilyui/M
+Vi/M
+vi/MDR
+vim/MS
+vinaigrette/MS
+Vina/M
+Vince/M
+Vincent/MS
+Vincenty/M
+Vincenz/M
+vincible/I
+Vinci/M
+Vindemiatrix/M
+vindicate/XSDVGN
+vindication/M
+vindicator/SM
+vindictiveness/MS
+vindictive/PY
+vinegar/DMSG
+vinegary
+vine/MGDS
+vineyard/SM
+Vinita/M
+Vin/M
+Vinnie/M
+Vinni/M
+Vinny/M
+vino/MS
+vinous
+Vinson/M
+vintage/MRSDG
+vintager/M
+vintner/MS
+vinyl/SM
+violable/I
+Viola/M
+Violante/M
+viola/SM
+violate/VNGXSD
+violator/MS
+Viole/M
+violence/SM
+violent/Y
+Violet/M
+violet/SM
+Violetta/M
+Violette/M
+violinist/SM
+violin/MS
+violist/MS
+viol/MSB
+violoncellist/S
+violoncello/MS
+viper/MS
+viperous
+VIP/S
+viragoes
+virago/M
+viral/Y
+vireo/SM
+Virge/M
+Virgie/M
+Virgilio/M
+Virgil/M
+virginal/YS
+Virgina/M
+Virginia/M
+Virginian/S
+Virginie/M
+virginity/SM
+virgin/SM
+Virgo/MS
+virgule/MS
+virile
+virility/MS
+virologist/S
+virology/SM
+virtual/Y
+virtue/SM
+virtuosity/MS
+virtuosoes
+virtuoso/MS
+virtuousness/SM
+virtuous/PY
+virulence/SM
+virulent/Y
+virus/MS
+visage/MSD
+Visakhapatnam's
+Visa/M
+visa/SGMD
+Visayans
+viscera
+visceral/Y
+viscid/Y
+viscoelastic
+viscoelasticity
+viscometer/SM
+viscose/MS
+viscosity/MS
+viscountcy/MS
+viscountess/SM
+viscount/MS
+viscousness/M
+viscous/PY
+viscus/M
+vise/CAXNGSD
+viselike
+vise's
+Vishnu/M
+visibility/ISM
+visible/PI
+visibly/I
+Visigoth/M
+Visigoths
+visionariness/M
+visionary/PS
+vision/KMDGS
+vision's/A
+visitable/U
+visitant/SM
+visitation/SM
+visited/U
+visit/GASD
+visitor/MS
+vis/MDSGV
+visor/SMDG
+VISTA
+vista/GSDM
+Vistula/M
+visualization/AMS
+visualized/U
+visualizer/M
+visualizes/A
+visualize/SRDZG
+visual/SY
+vitae
+vitality/MS
+vitalization/AMS
+vitalize/ASDGC
+vital/SY
+vita/M
+Vita/M
+vitamin/SM
+Vite/M
+Vitia/M
+vitiate/XGNSD
+vitiation/M
+viticulture/SM
+viticulturist/S
+Vitim/M
+Vito/M
+Vitoria/M
+vitreous/YSP
+vitrifaction/S
+vitrification/M
+vitrify/XDSNG
+vitrine/SM
+vitriolic
+vitriol/MDSG
+vitro
+vittles
+Vittoria/M
+Vittorio/M
+vituperate/SDXVGN
+vituperation/M
+vituperative/Y
+Vitus/M
+vivace/S
+vivaciousness/MS
+vivacious/YP
+vivacity/SM
+viva/DGS
+Vivaldi
+Viva/M
+vivaria
+vivarium/MS
+vivaxes
+Vivekananda/M
+vive/Z
+Vivia/M
+Viviana/M
+Vivian/M
+Vivianna/M
+Vivianne/M
+vividness/SM
+vivid/PTYR
+Vivie/M
+Viviene/M
+Vivien/M
+Vivienne/M
+vivifier
+vivify/NGASD
+Vivi/MN
+viviparous
+vivisect/DGS
+vivisectional
+vivisectionist/SM
+vivisection/MS
+Viviyan/M
+Viv/M
+vivo
+Vivyan/M
+Vivyanne/M
+vixenish/Y
+vixen/SM
+viz
+vizier/MS
+vizor's
+VJ
+Vladamir/M
+Vladimir/M
+Vladivostok/M
+Vlad/M
+VLF
+VLSI
+VMS/M
+VOA
+vocable/SM
+vocab/S
+vocabularian
+vocabularianism
+vocabulary/MS
+vocalic/S
+vocalise's
+vocalism/M
+vocalist/MS
+vocalization/SM
+vocalized/U
+vocalizer/M
+vocalize/ZGDRS
+vocal/SY
+vocation/AKMISF
+vocational/Y
+vocative/KYS
+vociferate/NGXSD
+vociferation/M
+vociferousness/MS
+vociferous/YP
+vocoded
+vocoder
+vodka/MS
+voe/S
+Vogel/M
+vogue/GMSRD
+vogueing
+voguish
+voiceband
+voiced/CU
+voice/IMGDS
+voicelessness/SM
+voiceless/YP
+voicer/S
+voices/C
+voicing/C
+voidable
+void/C
+voided
+voider/M
+voiding
+voidness/M
+voids
+voilà
+voile/MS
+volar
+volatileness/M
+volatile/PS
+volatility/MS
+volatilization/MS
+volatilize/SDG
+volcanically
+volcanic/S
+volcanism/M
+volcanoes
+volcano/M
+vole/MS
+Volga/M
+Volgograd/M
+vol/GSD
+volitionality
+volitional/Y
+volition/MS
+Volkswagen/SM
+volleyball/MS
+volleyer/M
+volley/SMRDG
+Vol/M
+Volstead/M
+voltage/SM
+voltaic
+Voltaire/M
+Volta/M
+volt/AMS
+Volterra/M
+voltmeter/MS
+volubility/S
+voluble/P
+volubly
+volume/SDGM
+volumetric
+volumetrically
+voluminousness/MS
+voluminous/PY
+voluntarily/I
+voluntariness/MI
+voluntarism/MS
+voluntary/PS
+volunteer/DMSG
+voluptuary/SM
+voluptuousness/S
+voluptuous/YP
+volute/S
+Volvo/M
+vomit/GRDS
+Vonda/M
+Von/M
+Vonnegut/M
+Vonnie/M
+Vonni/M
+Vonny/M
+voodoo/GDMS
+voodooism/S
+voraciousness/MS
+voracious/YP
+voracity/MS
+Voronezh/M
+Vorster/M
+vortex/SM
+vortices's
+vorticity/M
+votary/MS
+vote/CSDG
+voter/SM
+vote's
+votive/YP
+voucher/GMD
+vouchsafe/SDG
+vouch/SRDGZ
+vowelled
+vowelling
+vowel/MS
+vower/M
+vow/SMDRG
+voyage/GMZJSRD
+voyager/M
+voyageur/SM
+voyeurism/MS
+voyeuristic
+voyeur/MS
+VP
+vs
+V's
+VT
+Vt/M
+VTOL
+vulcanization/SM
+vulcanized/U
+vulcanize/SDG
+Vulcan/M
+vulgarian/MS
+vulgarism/MS
+vulgarity/MS
+vulgarization/S
+vulgarize/GZSRD
+vulgar/TSYR
+Vulgate/SM
+Vulg/M
+vulnerability/SI
+vulnerable/IP
+vulnerably/I
+vulpine
+vulturelike
+vulture/SM
+vulturous
+vulvae
+vulva/M
+vying
+Vyky/M
+WA
+Waals
+Wabash/M
+WAC
+Wacke/M
+wackes
+wackiness/MS
+wacko/MS
+wacky/RTP
+Waco/M
+Wac/S
+wadded
+wadding/SM
+waddle/GRSD
+Wade/M
+wader/M
+wade/S
+wadi/SM
+wad/MDRZGS
+Wadsworth/M
+wafer/GSMD
+waffle/GMZRSD
+Wafs
+wafter/M
+waft/SGRD
+wag/DRZGS
+waged/U
+wager/GZMRD
+wage/SM
+wagged
+waggery/MS
+wagging
+waggishness/SM
+waggish/YP
+waggle/SDG
+waggly
+Wagnerian
+Wagner/M
+wagoner/M
+wagon/SGZMRD
+wagtail/SM
+Wahl/M
+waif/SGDM
+Waikiki/M
+wailer/M
+wail/SGZRD
+wain/GSDM
+Wain/M
+wainscot/SGJD
+Wainwright/M
+wainwright/SM
+waistband/MS
+waistcoat/GDMS
+waister/M
+waist/GSRDM
+waistline/MS
+Waite/M
+waiter/DMG
+Waiter/M
+wait/GSZJRD
+Wait/MR
+waitpeople
+waitperson/S
+waitress/GMSD
+waiver/MB
+waive/SRDGZ
+Wakefield/M
+wakefulness/MS
+wakeful/PY
+Wake/M
+wake/MGDRSJ
+waken/SMRDG
+waker/M
+wakeup
+Waksman/M
+Walbridge/M
+Walcott/M
+Waldemar/M
+Walden/M
+Waldensian
+Waldheim/M
+Wald/MN
+Waldo/M
+Waldon/M
+Waldorf/M
+wale/DRSMG
+Wales
+Walesa/M
+Walford/M
+Walgreen/M
+waling/M
+walkabout/M
+walkaway/SM
+walker/M
+Walker/M
+walk/GZSBJRD
+walkie
+Walkman/S
+walkout/SM
+walkover/SM
+walkway/MS
+wallaby/MS
+Wallace/M
+Wallache/M
+wallah/M
+Wallas/M
+wallboard/MS
+Wallenstein/M
+Waller/M
+wallet/SM
+walleye/MSD
+wallflower/MS
+Wallie/M
+Wallis
+Walliw/M
+Walloon/SM
+walloper/M
+walloping/M
+wallop/RDSJG
+wallower/M
+wallow/RDSG
+wallpaper/DMGS
+wall/SGMRD
+Wall/SMR
+Wally/M
+wally/S
+walnut/SM
+Walpole/M
+Walpurgisnacht
+walrus/SM
+Walsh/M
+Walter/M
+Walther/M
+Walton/M
+waltzer/M
+Walt/ZMR
+waltz/MRSDGZ
+Walworth/M
+Waly/M
+wampum/SM
+Wanamaker/M
+Wanda/M
+wanderer/M
+wander/JZGRD
+wanderlust/SM
+Wandie/M
+Wandis/M
+wand/MRSZ
+wane/S
+Waneta/M
+wangler/M
+wangle/RSDGZ
+Wang/M
+Wanids/M
+Wankel/M
+wanna
+wannabe/S
+wanned
+wanner
+wanness/S
+wannest
+wanning
+wan/PGSDY
+Wansee/M
+Wansley/M
+wanted/U
+wanter/M
+want/GRDSJ
+wantonness/S
+wanton/PGSRDY
+wapiti/MS
+warble/GZRSD
+warbler/M
+warbonnet/S
+ward/AGMRDS
+Warde/M
+warden/DMGS
+Warden/M
+warder/DMGS
+Ward/MN
+wardrobe/MDSG
+wardroom/MS
+wardship/M
+wards/I
+warehouseman/M
+warehouse/MGSRD
+Ware/MG
+ware/MS
+warfare/SM
+Warfield/M
+war/GSMD
+warhead/MS
+Warhol/M
+warhorse/SM
+warily/U
+warinesses/U
+wariness/MS
+Waring/M
+warless
+warlike
+warlock/SM
+warlord/MS
+warmblooded
+warmed/A
+warmer/M
+warmheartedness/SM
+warmhearted/PY
+warmish
+warmness/MS
+warmongering/M
+warmonger/JGSM
+warms/A
+warmth/M
+warmths
+warm/YRDHPGZTS
+warned/U
+warner/M
+Warner/M
+warn/GRDJS
+warning/YM
+Warnock/M
+warpaint
+warpath/M
+warpaths
+warper/M
+warplane/MS
+warp/MRDGS
+warranted/U
+warranter/M
+warrant/GSMDR
+warranty/SDGM
+warred/M
+warrener/M
+Warren/M
+warren/SZRM
+warring/M
+warrior/MS
+Warsaw/M
+wars/C
+warship/MS
+warthog/S
+wartime/SM
+wart/MDS
+warty/RT
+Warwick/M
+wary/URPT
+Wasatch/M
+washable/S
+wash/AGSD
+washbasin/SM
+washboard/SM
+washbowl/SM
+Washburn/M
+washcloth/M
+washcloths
+washday/M
+washed/U
+washer/GDMS
+washerwoman/M
+washerwomen
+washing/SM
+Washingtonian/S
+Washington/M
+Wash/M
+Washoe/M
+washout/SM
+washrag/SM
+washroom/MS
+washstand/SM
+washtub/MS
+washy/RT
+wasn't
+WASP
+waspishness/SM
+waspish/PY
+Wasp's
+wasp/SM
+was/S
+wassail/GMDS
+Wasserman/M
+Wassermann/M
+wastage/SM
+wastebasket/SM
+wastefulness/S
+wasteful/YP
+wasteland/MS
+wastepaper/MS
+waster/DG
+waste/S
+wastewater
+wast/GZSRD
+wasting/Y
+wastrel/MS
+Watanabe/M
+watchable/U
+watchband/SM
+watchdogged
+watchdogging
+watchdog/SM
+watched/U
+watcher/M
+watchfulness/MS
+watchful/PY
+watch/JRSDGZB
+watchmake/JRGZ
+watchmaker/M
+watchman/M
+watchmen
+watchpoints
+watchtower/MS
+watchword/MS
+waterbird/S
+waterborne
+Waterbury/M
+watercolor/DMGS
+watercolorist/SM
+watercourse/SM
+watercraft/M
+watercress/SM
+waterer/M
+waterfall/SM
+waterfowl/M
+waterfront/SM
+Watergate/M
+waterhole/S
+Waterhouse/M
+wateriness/SM
+watering/M
+water/JGSMRD
+waterless
+waterlily/S
+waterline/S
+waterlogged
+waterloo
+Waterloo/SM
+waterman/M
+watermark/GSDM
+watermelon/SM
+watermill/S
+waterproof/PGRDSJ
+watershed/SM
+waterside/MSR
+watersider/M
+Waters/M
+waterspout/MS
+watertightness/M
+watertight/P
+Watertown/M
+waterway/MS
+waterwheel/S
+waterworks/M
+watery/PRT
+Watkins
+WATS
+Watson/M
+wattage/SM
+Watteau/M
+Wattenberg/M
+Watterson/M
+wattle/SDGM
+Watt/MS
+watt/TMRS
+Watusi/M
+Wat/ZM
+Waugh/M
+Waukesha/M
+Waunona/M
+Waupaca/M
+Waupun/M
+Wausau/M
+Wauwatosa/M
+waveband/MS
+waveform/SM
+wavefront/MS
+waveguide/MS
+Waveland/M
+wavelength/M
+wavelengths
+wavelet/SM
+wavelike
+wavenumber
+waver/GZRD
+wavering/YU
+Waverley/M
+Waverly/M
+Wave/S
+wave/ZGDRS
+wavily
+waviness/MS
+wavy/SRTP
+waxer/M
+waxiness/MS
+wax/MNDRSZG
+waxwing/MS
+waxwork/MS
+waxy/PRT
+wayfarer/MS
+wayfaring/S
+waylaid
+Wayland/M
+Waylan/M
+waylayer/M
+waylay/GRSZ
+wayleave/MS
+Waylen/M
+Waylin/M
+Waylon/M
+Way/M
+waymarked
+way/MS
+Wayne/M
+Waynesboro/M
+wayside/MS
+waywardness/S
+wayward/YP
+WC
+we
+weakener/M
+weaken/ZGRD
+weakfish/SM
+weakish
+weakliness/M
+weakling/SM
+weakly/RTP
+weakness/MS
+weak/TXPYRN
+weal/MHS
+wealthiness/MS
+wealth/M
+wealths
+wealthy/PTR
+weaner/M
+weanling/M
+wean/RDGS
+weapon/GDMS
+weaponless
+weaponry/MS
+wearable/S
+wearer/M
+wearied/U
+wearily
+weariness/MS
+wearing/Y
+wearisomeness/M
+wearisome/YP
+wear/RBSJGZ
+wearying/Y
+weary/TGPRSD
+weasel/SGMDY
+weatherbeaten
+weathercock/SDMG
+weatherer/M
+Weatherford/M
+weathering/M
+weatherize/GSD
+weatherman/M
+weather/MDRYJGS
+weathermen
+weatherperson/S
+weatherproof/SGPD
+weatherstripped
+weatherstripping/S
+weatherstrip/S
+weaver/M
+Weaver/M
+weaves/A
+weave/SRDGZ
+weaving/A
+webbed
+Webber/M
+webbing/MS
+Webb/RM
+weber/M
+Weber/M
+Webern/M
+webfeet
+webfoot/M
+Web/MR
+website/S
+web/SMR
+Webster/MS
+Websterville/M
+we'd
+wedded/A
+Weddell/M
+wedder
+wedding/SM
+wedge/SDGM
+wedgie/RST
+Wedgwood/M
+wedlock/SM
+Wed/M
+Wednesday/SM
+wed/SA
+weeder/M
+weediness/M
+weedkiller/M
+weedless
+wee/DRST
+weed/SGMRDZ
+weedy/TRP
+weeing
+weekday/MS
+weekender/M
+weekend/SDRMG
+weekly/S
+weeknight/SM
+Weeks/M
+week/SYM
+weenie/M
+ween/SGD
+weeny/RSMT
+weeper/M
+weep/SGZJRD
+weepy/RST
+weevil/MS
+weft/SGMD
+Wehr/M
+Weibull/M
+Weidar/M
+Weider/M
+Weidman/M
+Weierstrass/M
+weighed/UA
+weigher/M
+weigh/RDJG
+weighs/A
+weighted/U
+weighter/M
+weightily
+weightiness/SM
+weighting/M
+weight/JMSRDG
+weightlessness/SM
+weightless/YP
+weightlifter/S
+weightlifting/MS
+weighty/TPR
+Weill/M
+Wei/M
+Weinberg/M
+Weiner/M
+Weinstein/M
+weirdie/SM
+weirdness/MS
+weirdo/SM
+weird/YRDPGTS
+weir/SDMG
+Weisenheimer/M
+Weiss/M
+Weissman/M
+Weissmuller/M
+Weizmann/M
+Welbie/M
+Welby/M
+Welcher/M
+Welches
+welcomeness/M
+welcome/PRSDYG
+welcoming/U
+welder/M
+Weldon/M
+weld/SBJGZRD
+Weldwood/M
+welfare/SM
+welkin/SM
+we'll
+Welland/M
+wellbeing/M
+Weller/M
+Wellesley/M
+Welles/M
+wellhead/SM
+Wellington/MS
+wellington/S
+Wellman/M
+wellness/MS
+well/SGPD
+Wells/M
+wellspring/SM
+Wellsville/M
+Welmers/M
+Welsh
+welsher/M
+Welshman/M
+Welshmen
+welsh/RSDGZ
+Welshwoman/M
+Welshwomen
+welter/GD
+welterweight/MS
+welt/GZSMRD
+wencher/M
+wench/GRSDM
+Wendall/M
+Wenda/M
+wend/DSG
+Wendeline/M
+Wendell/M
+Wendel/M
+Wendie/M
+Wendi/M
+Wendye/M
+Wendy/M
+wen/M
+Wenonah/M
+Wenona/M
+went
+Wentworth/M
+wept/U
+were
+we're
+weren't
+werewolf/M
+werewolves
+Werner/M
+Wernher/M
+Werther/M
+werwolf's
+Wes
+Wesleyan
+Wesley/M
+Wessex/M
+Wesson/M
+westbound
+Westbrooke/M
+Westbrook/M
+Westchester/M
+wester/DYG
+westerly/S
+westerner/M
+westernization/MS
+westernize/GSD
+westernmost
+Western/ZRS
+western/ZSR
+Westfield/M
+Westhampton/M
+Westinghouse/M
+westing/M
+Westleigh/M
+Westley/M
+Westminster/M
+Westmore/M
+West/MS
+Weston/M
+Westphalia/M
+Westport/M
+west/RDGSM
+westward/S
+Westwood/M
+wetback/MS
+wetland/S
+wetness/MS
+wet/SPY
+wettable
+wetter/S
+wettest
+wetting
+we've
+Weyden/M
+Weyerhauser/M
+Weylin/M
+Wezen/M
+WFF
+whacker/M
+whack/GZRDS
+whaleboat/MS
+whalebone/SM
+whale/GSRDZM
+Whalen/M
+whaler/M
+whaling/M
+whammed
+whamming/M
+wham/MS
+whammy/S
+wharf/SGMD
+Wharton/M
+wharves
+whatchamacallit/MS
+what'd
+whatever
+what/MS
+whatnot/MS
+what're
+whatsoever
+wheal/MS
+wheatgerm
+Wheaties/M
+Wheatland/M
+wheat/NMXS
+Wheaton/M
+Wheatstone/M
+wheedle/ZDRSG
+wheelbarrow/GSDM
+wheelbase/MS
+wheelchair/MS
+wheeler/M
+Wheeler/M
+wheelhouse/SM
+wheelie/MS
+wheeling/M
+Wheeling/M
+Wheelock/M
+wheel/RDMJSGZ
+wheelwright/MS
+whee/S
+wheeze/SDG
+wheezily
+wheeziness/SM
+wheezy/PRT
+Whelan/M
+whelk/MDS
+Wheller/M
+whelm/DGS
+whelp/DMGS
+whence/S
+whenever
+when/S
+whensoever
+whereabout/S
+whereas/S
+whereat
+whereby
+where'd
+wherefore/MS
+wherein
+where/MS
+whereof
+whereon
+where're
+wheresoever
+whereto
+whereupon
+wherever
+wherewith
+wherewithal/SM
+wherry/DSGM
+whether
+whet/S
+whetstone/MS
+whetted
+whetting
+whew/GSD
+whey/MS
+which
+whichever
+whiff/GSMD
+whiffle/DRSG
+whiffler/M
+whiffletree/SM
+whig/S
+Whig/SM
+while/GSD
+whilom
+whilst
+whimmed
+whimming
+whimper/DSG
+whimsey's
+whimsicality/MS
+whimsical/YP
+whim/SM
+whimsy/TMDRS
+whine/GZMSRD
+whining/Y
+whinny/GTDRS
+whiny/RT
+whipcord/SM
+whiplash/SDMG
+Whippany/M
+whipped
+whipper/MS
+whippersnapper/MS
+whippet/MS
+whipping/SM
+Whipple/M
+whippletree/SM
+whippoorwill/SM
+whipsaw/GDMS
+whips/M
+whip/SM
+whirligig/MS
+whirlpool/MS
+whirl/RDGS
+whirlwind/MS
+whirlybird/MS
+whirly/MS
+whirred
+whirring
+whir/SY
+whisker/DM
+whiskery
+whiskey/SM
+whisk/GZRDS
+whisperer/M
+whisper/GRDJZS
+whispering/YM
+whist/GDMS
+whistleable
+whistle/DRSZG
+whistler/M
+Whistler/M
+whistling/M
+Whitaker/M
+Whitby/M
+Whitcomb/M
+whitebait/M
+whitecap/MS
+whiteface/M
+Whitefield/M
+whitefish/SM
+Whitehall/M
+Whitehead/M
+whitehead/S
+Whitehorse/M
+Whiteleaf/M
+Whiteley/M
+White/MS
+whitener/M
+whiteness/MS
+whitening/M
+whiten/JZDRG
+whiteout/S
+white/PYS
+whitespace
+whitetail/S
+whitewall/SM
+whitewash/GRSDM
+whitewater
+Whitewater/M
+whitey/MS
+Whitfield/M
+whither/DGS
+whitier
+whitiest
+whiting/M
+whitish
+Whitley/M
+Whitlock/M
+Whit/M
+Whitman/M
+Whitney/M
+whit/SJGTXMRND
+Whitsunday/MS
+Whittaker/M
+whitter
+Whittier
+whittle/JDRSZG
+whittler/M
+whiz
+whizkid
+whizzbang/S
+whizzed
+whizzes
+whizzing
+WHO
+whoa/S
+who'd
+whodunit/SM
+whoever
+wholegrain
+wholeheartedness/MS
+wholehearted/PY
+wholemeal
+wholeness/S
+wholesale/GZMSRD
+wholesaler/M
+wholesomeness/USM
+wholesome/UYP
+whole/SP
+wholewheat
+who'll
+wholly
+whom
+who/M
+whomever
+whomsoever
+whoopee/S
+whooper/M
+whoop/SRDGZ
+whoosh/DSGM
+whop
+whopper/MS
+whopping/S
+who're
+whorehouse/SM
+whoreish
+whore/SDGM
+whorish
+whorl/SDM
+whose
+whoso
+whosoever
+who've
+why
+whys
+WI
+Wiatt/M
+Wichita/M
+wickedness/MS
+wicked/RYPT
+wicker/M
+wickerwork/MS
+wicketkeeper/SM
+wicket/SM
+wick/GZRDMS
+wicking/M
+widemouthed
+widener/M
+wideness/S
+widen/SGZRD
+wide/RSYTP
+widespread
+widgeon's
+widget/SM
+widower/M
+widowhood/S
+widow/MRDSGZ
+width/M
+widths
+widthwise
+Wieland/M
+wielder/M
+wield/GZRDS
+Wiemar/M
+wiener/SM
+wienie/SM
+Wier/M
+Wiesel/M
+wife/DSMYG
+wifeless
+wifely/RPT
+wigeon/MS
+wigged
+wigging/M
+Wiggins
+wiggler/M
+wiggle/RSDGZ
+wiggly/RT
+wight/SGDM
+wiglet/S
+wigmaker
+wig/MS
+Wigner/M
+wigwagged
+wigwagging
+wigwag/S
+wigwam/MS
+Wilberforce/M
+Wilbert/M
+Wilbur/M
+Wilburn/M
+Wilburt/M
+Wilcox/M
+Wilda/M
+wildcat/SM
+wildcatted
+wildcatter/MS
+wildcatting
+wildebeest/SM
+Wilde/MR
+Wilden/M
+Wilder/M
+wilderness/SM
+wilder/P
+wildfire/MS
+wildflower/S
+wildfowl/M
+wilding/M
+wildlife/M
+wildness/MS
+Wildon/M
+wild/SPGTYRD
+wile/DSMG
+Wileen/M
+Wilek/M
+Wiley/M
+Wilford/M
+Wilfred/M
+Wilfredo/M
+Wilfrid/M
+wilfulness's
+Wilhelmina/M
+Wilhelmine/M
+Wilhelm/M
+Wilie/M
+wilily
+wiliness/MS
+Wilkerson/M
+Wilkes/M
+Wilkins/M
+Wilkinson/M
+Willabella/M
+Willa/M
+Willamette/M
+Willamina/M
+Willard/M
+Willcox/M
+Willdon/M
+willed/U
+Willem/M
+Willemstad/M
+willer/M
+Willetta/M
+Willette/M
+Willey/M
+willfulness/S
+willful/YP
+Williamsburg/M
+William/SM
+Williamson/M
+Willied/M
+Willie/M
+willies
+Willi/MS
+willinger
+willingest
+willingness's
+willingness/US
+willing/UYP
+Willisson/M
+williwaw/MS
+Will/M
+Willoughby/M
+willower/M
+Willow/M
+willow/RDMSG
+willowy/TR
+willpower/MS
+will/SGJRD
+Willy/SDM
+Willyt/M
+Wilma/M
+Wilmar/M
+Wilmer/M
+Wilmette/M
+Wilmington/M
+Wilona/M
+Wilone/M
+Wilow/M
+Wilshire/M
+Wilsonian
+Wilson/M
+wilt/DGS
+Wilt/M
+Wilton/M
+wily/PTR
+Wimbledon/M
+wimp/GSMD
+wimpish
+wimple/SDGM
+wimpy/RT
+wince/SDG
+Winchell/M
+wincher/M
+winchester/M
+Winchester/MS
+winch/GRSDM
+windbag/SM
+windblown
+windbreak/MZSR
+windburn/GSMD
+winded
+winder/UM
+windfall/SM
+windflower/MS
+Windham/M
+Windhoek/M
+windily
+windiness/SM
+winding/MS
+windjammer/SM
+windlass/GMSD
+windless/YP
+windmill/GDMS
+window/DMGS
+windowless
+windowpane/SM
+Windows
+windowsill/SM
+windpipe/SM
+windproof
+windrow/GDMS
+wind's
+winds/A
+windscreen/MS
+windshield/SM
+windsock/MS
+Windsor/MS
+windstorm/MS
+windsurf/GZJSRD
+windswept
+windup/MS
+wind/USRZG
+Windward/M
+windward/SY
+Windy/M
+windy/TPR
+wineglass/SM
+winegrower/SM
+Winehead/M
+winemake
+winemaster
+wine/MS
+winery/MS
+Winesap/M
+wineskin/M
+Winfield/M
+Winfred/M
+Winfrey/M
+wingback/M
+wingding/MS
+wingeing
+winger/M
+wing/GZRDM
+wingless
+winglike
+wingman
+wingmen
+wingspan/SM
+wingspread/MS
+wingtip/S
+Winifield/M
+Winifred/M
+Wini/M
+winker/M
+wink/GZRDS
+winking/U
+Winkle/M
+winkle/SDGM
+winless
+Win/M
+winnable
+Winnah/M
+Winna/M
+Winnebago/M
+Winne/M
+winner/MS
+Winnetka/M
+Winnie/M
+Winnifred/M
+Winni/M
+winning/SY
+Winnipeg/M
+Winn/M
+winnow/SZGRD
+Winny/M
+Winograd/M
+wino/MS
+Winonah/M
+Winona/M
+Winooski/M
+Winsborough/M
+Winsett/M
+Winslow/M
+winsomeness/SM
+winsome/PRTY
+Winston/M
+winterer/M
+wintergreen/SM
+winterize/GSD
+Winters
+winter/SGRDYM
+wintertime/MS
+Winthrop/M
+wintriness/M
+wintry/TPR
+winy/RT
+win/ZGDRS
+wipe/DRSZG
+wiper/M
+wirehair/MS
+wireless/MSDG
+wireman/M
+wiremen
+wirer/M
+wire's
+wires/A
+wiretap/MS
+wiretapped
+wiretapper/SM
+wiretapping
+wire/UDA
+wiriness/S
+wiring/SM
+wiry/RTP
+Wisc
+Wisconsinite/SM
+Wisconsin/M
+wisdoms
+wisdom/UM
+wiseacre/MS
+wisecrack/GMRDS
+wised
+wisely/TR
+Wise/M
+wiseness
+wisenheimer/M
+Wisenheimer/M
+wises
+wise/URTY
+wishbone/MS
+wishfulness/M
+wishful/PY
+wish/GZSRD
+wishy
+wising
+Wis/M
+wisp/MDGS
+wispy/RT
+wist/DGS
+wisteria/SM
+wistfulness/MS
+wistful/PY
+witchcraft/SM
+witchdoctor/S
+witchery/MS
+witch/SDMG
+withal
+withdrawal/MS
+withdrawer/M
+withdrawnness/M
+withdrawn/P
+withdraw/RGS
+withdrew
+withe/M
+wither/GDJ
+withering/Y
+Witherspoon/M
+with/GSRDZ
+withheld
+withholder/M
+withhold/SJGZR
+within/S
+without/S
+withs
+withstand/SG
+withstood
+witlessness/MS
+witless/PY
+Wit/M
+witness/DSMG
+witnessed/U
+wit/PSM
+witted
+witter/G
+Wittgenstein/M
+witticism/MS
+Wittie/M
+wittily
+wittiness/SM
+wittings
+witting/UY
+Witt/M
+Witty/M
+witty/RTP
+Witwatersrand/M
+wive/GDS
+wives/M
+wizard/MYS
+wizardry/MS
+wizen/D
+wiz's
+wk/Y
+Wm/M
+WNW
+woad/MS
+wobble/GSRD
+wobbler/M
+wobbliness/S
+wobbly/PRST
+Wodehouse/M
+woebegone/P
+woefuller
+woefullest
+woefulness/SM
+woeful/PY
+woe/PSM
+woke
+wok/SMN
+Wolcott/M
+wold/MS
+Wolfe/M
+wolfer/M
+Wolff/M
+Wolfgang/M
+wolfhound/MS
+Wolfie/M
+wolfishness/M
+wolfish/YP
+Wolf/M
+wolfram/MS
+wolf/RDMGS
+Wolfy/M
+Wollongong/M
+Wollstonecraft/M
+Wolsey/M
+Wolverhampton/M
+wolverine/SM
+Wolverton/M
+wolves/M
+woman/GSMYD
+womanhood/MS
+womanish
+womanized/U
+womanizer/M
+womanize/RSDZG
+womanizes/U
+womankind/M
+womanlike
+womanliness/SM
+womanly/PRT
+wombat/MS
+womb/SDM
+womenfolk/MS
+women/MS
+wonderer/M
+wonderfulness/SM
+wonderful/PY
+wonder/GLRDMS
+wondering/Y
+wonderland/SM
+wonderment/SM
+wondrousness/M
+wondrous/YP
+Wong/M
+wonk/S
+wonky/RT
+wonned
+wonning
+won/SG
+won't
+wontedness/MU
+wonted/PUY
+wont/SGMD
+Woodard/M
+Woodberry/M
+woodbine/SM
+woodblock/S
+Woodbury/M
+woodcarver/S
+woodcarving/MS
+woodchopper/SM
+woodchuck/MS
+woodcock/MS
+woodcraft/MS
+woodcut/SM
+woodcutter/MS
+woodcutting/MS
+woodenness/SM
+wooden/TPRY
+woodgrain/G
+woodhen
+Woodhull/M
+Woodie/M
+woodiness/MS
+woodland/SRM
+Woodlawn/M
+woodlice
+woodlot/S
+woodlouse/M
+woodman/M
+Woodman/M
+woodmen
+woodpecker/SM
+woodpile/SM
+Woodrow/M
+woodruff/M
+woo/DRZGS
+woodshedded
+woodshedding
+woodshed/SM
+woodside
+Wood/SM
+woodsman/M
+woodsmen
+wood/SMNDG
+woodsmoke
+woods/R
+Woodstock/M
+woodsy/TRP
+Woodward/MS
+woodwind/S
+woodworker/M
+woodworking/M
+woodwork/SMRGZJ
+woodworm/M
+woodyard
+Woody/M
+woody/TPSR
+woofer/M
+woof/SRDMGZ
+Woolf/M
+woolgatherer/M
+woolgathering/M
+woolgather/RGJ
+woolliness/MS
+woolly/RSPT
+Woolongong/M
+wool/SMYNDX
+Woolworth/M
+Woonsocket/M
+Wooster/M
+Wooten/M
+woozily
+wooziness/MS
+woozy/RTP
+wop/MS!
+Worcestershire/M
+Worcester/SM
+wordage/SM
+word/AGSJD
+wordbook/MS
+Worden/M
+wordily
+wordiness/SM
+wording/AM
+wordless/Y
+wordplay/SM
+word's
+Wordsworth/M
+wordy/TPR
+wore
+workability's
+workability/U
+workableness/M
+workable/U
+workably
+workaday
+workaholic/S
+workaround/SM
+workbench/MS
+workbook/SM
+workday/SM
+worked/A
+worker/M
+workfare/S
+workforce/S
+work/GZJSRDMB
+workhorse/MS
+workhouse/SM
+working/M
+workingman/M
+workingmen
+workingwoman/M
+workingwomen
+workload/SM
+workmanlike
+Workman/M
+workman/MY
+workmanship/MS
+workmate/S
+workmen/M
+workout/SM
+workpiece/SM
+workplace/SM
+workroom/MS
+works/A
+worksheet/S
+workshop/MS
+workspace/S
+workstation/MS
+worktable/SM
+worktop/S
+workup/S
+workweek/SM
+worldlier
+worldliest
+worldliness/USM
+worldly/UP
+worldwide
+world/ZSYM
+wormer/M
+wormhole/SM
+worm/SGMRD
+Worms/M
+wormwood/SM
+wormy/RT
+worn/U
+worried/Y
+worrier/M
+worriment/MS
+worrisome/YP
+worrying/Y
+worrywart/SM
+worry/ZGSRD
+worsen/GSD
+worse/SR
+worshiper/M
+worshipfulness/M
+worshipful/YP
+worship/ZDRGS
+worsted/MS
+worst/SGD
+worth/DG
+worthily/U
+worthinesses/U
+worthiness/SM
+Worthington/M
+worthlessness/SM
+worthless/PY
+Worth/M
+worths
+worthwhile/P
+Worthy/M
+worthy/UTSRP
+wort/SM
+wost
+wot
+Wotan/M
+wouldn't
+would/S
+wouldst
+would've
+wound/AU
+wounded/U
+wounder
+wounding
+wounds
+wound's
+wove/A
+woven/AU
+wovens
+wow/SDG
+Wozniak/M
+WP
+wpm
+wrack/SGMD
+wraith/M
+wraiths
+Wrangell/M
+wrangle/GZDRS
+wrangler/M
+wraparound/S
+wrap/MS
+wrapped/U
+wrapper/MS
+wrapping/SM
+wraps/U
+wrasse/SM
+wrathful/YP
+wrath/GDM
+wraths
+wreak/SDG
+wreathe
+wreath/GMDS
+wreaths
+wreckage/MS
+wrecker/M
+wreck/GZRDS
+wrenching/Y
+wrench/MDSG
+wren/MS
+Wren/MS
+Wrennie/M
+wrester/M
+wrestle/JGZDRS
+wrestler/M
+wrestling/M
+wrest/SRDG
+wretchedness/SM
+wretched/TPYR
+wretch/MDS
+wriggle/DRSGZ
+wriggler/M
+wriggly/RT
+Wright/M
+wright/MS
+Wrigley/M
+wringer/M
+wring/GZRS
+wrinkled/U
+wrinkle/GMDS
+wrinkly/RST
+wristband/SM
+wrist/MS
+wristwatch/MS
+writable/U
+write/ASBRJG
+writer/MA
+writeup
+writhe/SDG
+writing/M
+writ/MRSBJGZ
+written/UA
+Wroclaw
+wrongdoer/MS
+wrongdoing/MS
+wronger/M
+wrongfulness/MS
+wrongful/PY
+wrongheadedness/MS
+wrongheaded/PY
+wrongness/MS
+wrong/PSGTYRD
+Wronskian/M
+wrote/A
+wroth
+wrought/I
+wrung
+wry/DSGY
+wryer
+wryest
+wryness/SM
+W's
+WSW
+wt
+W/T
+Wuhan/M
+Wu/M
+Wurlitzer/M
+wurst/SM
+wuss/S
+wussy/TRS
+WV
+WW
+WWI
+WWII
+WWW
+w/XTJGV
+WY
+Wyatan/M
+Wyatt/M
+Wycherley/M
+Wycliffe/M
+Wye/MH
+Wyeth/M
+Wylie/M
+Wylma/M
+Wyman/M
+Wyndham/M
+Wyn/M
+Wynne/M
+Wynnie/M
+Wynn/M
+Wynny/M
+Wyo/M
+Wyomingite/SM
+Wyoming/M
+WYSIWYG
+x
+X
+Xanadu
+Xanthippe/M
+Xanthus/M
+Xaviera/M
+Xavier/M
+Xebec/M
+Xe/M
+XEmacs/M
+Xenakis/M
+Xena/M
+Xenia/M
+Xenix/M
+xenon/SM
+xenophobe/MS
+xenophobia/SM
+xenophobic
+Xenophon/M
+Xenos
+xerographic
+xerography/MS
+xerox/GSD
+Xerox/MGSD
+Xerxes/M
+Xever/M
+Xhosa/M
+Xi'an
+Xian/S
+Xiaoping/M
+xii
+xiii
+xi/M
+Ximenes/M
+Ximenez/M
+Ximian/SM
+Xingu/M
+xis
+xiv
+xix
+XL
+Xmas/SM
+XML
+Xochipilli/M
+XOR
+X's
+XS
+xterm/M
+Xuzhou/M
+xv
+xvi
+xvii
+xviii
+xx
+XXL
+xylem/SM
+xylene/M
+Xylia/M
+Xylina/M
+xylophone/MS
+xylophonist/S
+Xymenes/M
+Y
+ya
+yacc/M
+Yacc/M
+yachting/M
+yachtsman
+yachtsmen
+yachtswoman/M
+yachtswomen
+yacht/ZGJSDM
+yack's
+Yagi/M
+yahoo/MS
+Yahweh/M
+Yakima/M
+yakked
+yakking
+yak/SM
+Yakut/M
+Yakutsk/M
+Yale/M
+Yalies/M
+y'all
+Yalonda/M
+Yalow/M
+Yalta/M
+Yalu/M
+Yamaha/M
+yammer/RDZGS
+Yamoussoukro
+yam/SM
+Yanaton/M
+Yance/M
+Yancey/M
+Yancy/M
+Yang/M
+Yangon
+yang/S
+Yangtze/M
+Yankee/SM
+yank/GDS
+Yank/MS
+Yaounde/M
+yapped
+yapping
+yap/S
+Yaqui/M
+yardage/SM
+yardarm/SM
+Yardley/M
+Yard/M
+yardman/M
+yardmaster/S
+yardmen
+yard/SMDG
+yardstick/SM
+yarmulke/SM
+yarn/SGDM
+Yaroslavl/M
+yarrow/MS
+Yasmeen/M
+Yasmin/M
+Yates
+yaw/DSG
+yawl/SGMD
+yawner/M
+yawn/GZSDR
+yawning/Y
+Yb/M
+yd
+Yeager/M
+yeah
+yeahs
+yearbook/SM
+yearling/M
+yearlong
+yearly/S
+yearner/M
+yearning/MY
+yearn/JSGRD
+year/YMS
+yea/S
+yeastiness/M
+yeast/SGDM
+yeasty/PTR
+Yeats/M
+yecch
+yegg/MS
+Yehudi/M
+Yehudit/M
+Yekaterinburg/M
+Yelena/M
+yell/GSDR
+yellowhammers
+yellowish
+Yellowknife/M
+yellowness/MS
+Yellowstone/M
+yellow/TGPSRDM
+yellowy
+yelper/M
+yelp/GSDR
+Yeltsin
+Yemeni/S
+Yemenite/SM
+Yemen/M
+Yenisei/M
+yenned
+yenning
+yen/SM
+Yentl/M
+yeomanry/MS
+yeoman/YM
+yeomen
+yep/S
+Yerevan/M
+Yerkes/M
+Yesenia/M
+yeshiva/SM
+yes/S
+yessed
+yessing
+yesterday/MS
+yesteryear/SM
+yet
+ye/T
+yeti/SM
+Yetta/M
+Yettie/M
+Yetty/M
+Yevette/M
+Yevtushenko/M
+yew/SM
+y/F
+Yggdrasil/M
+Yiddish/M
+yielded/U
+yielding/U
+yield/JGRDS
+yikes
+yin/S
+yipe/S
+yipped
+yippee/S
+yipping
+yip/S
+YMCA
+YMHA
+Ymir/M
+YMMV
+Ynes/M
+Ynez/M
+yo
+Yoda/M
+yodeler/M
+yodel/SZRDG
+Yoder/M
+yoga/MS
+yoghurt's
+yogi/MS
+yogurt/SM
+yoke/DSMG
+yoked/U
+yokel/SM
+yokes/U
+yoking/U
+Yoknapatawpha/M
+Yokohama/M
+Yoko/M
+Yolanda/M
+Yolande/M
+Yolane/M
+Yolanthe/M
+yolk/DMS
+yon
+yonder
+Yong/M
+Yonkers/M
+yore/MS
+Yorgo/MS
+Yorick/M
+Yorke/M
+Yorker/M
+yorker/SM
+Yorkshire/MS
+Yorktown/M
+York/ZRMS
+Yoruba/M
+Yosemite/M
+Yoshiko/M
+Yoshi/M
+Yost/M
+you'd
+you'll
+youngish
+Young/M
+youngster/MS
+Youngstown/M
+young/TRYP
+you're
+your/MS
+yourself
+yourselves
+you/SH
+youthfulness/SM
+youthful/YP
+youths
+youth/SM
+you've
+Yovonnda/M
+yow
+yowl/GSD
+Ypres/M
+Ypsilanti/M
+yr
+yrs
+Y's
+Ysabel/M
+YT
+ytterbium/MS
+yttrium/SM
+yuan/M
+Yuba/M
+Yucatan
+yucca/MS
+yuck/GSD
+yucky/RT
+Yugo/M
+Yugoslavia/M
+Yugoslavian/S
+Yugoslav/M
+Yuh/M
+Yuki/M
+yukked
+yukking
+Yukon/M
+yuk/S
+yule/MS
+Yule/MS
+yuletide/MS
+Yuletide/S
+Yul/M
+Yulma/M
+yum
+Yuma/M
+yummy/TRS
+Yunnan/M
+yuppie/SM
+yup/S
+Yurik/M
+Yuri/M
+yurt/SM
+Yves/M
+Yvette/M
+Yvon/M
+Yvonne/M
+Yvor/M
+YWCA
+YWHA
+Zabrina/M
+Zaccaria/M
+Zachariah/M
+Zacharia/SM
+Zacharie/M
+Zachary/M
+Zacherie/M
+Zachery/M
+Zach/M
+Zackariah/M
+Zack/M
+zagging
+Zagreb/M
+zag/S
+Zahara/M
+Zaire/M
+Zairian/S
+Zak/M
+Zambezi/M
+Zambia/M
+Zambian/S
+Zamboni
+Zamenhof/M
+Zamora/M
+Zandra/M
+Zane/M
+Zaneta/M
+zaniness/MS
+Zan/M
+Zanuck/M
+zany/PDSRTG
+Zanzibar/M
+Zapata/M
+Zaporozhye/M
+Zappa/M
+zapped
+zapper/S
+zapping
+zap/S
+Zarah/M
+Zara/M
+Zared/M
+Zaria/M
+Zarla/M
+Zealand/M
+zeal/MS
+zealot/MS
+zealotry/MS
+zealousness/SM
+zealous/YP
+Zea/M
+Zebadiah/M
+Zebedee/M
+Zeb/M
+zebra/MS
+Zebulen/M
+Zebulon/M
+zebu/SM
+Zechariah/M
+Zedekiah/M
+Zed/M
+Zedong/M
+zed/SM
+Zeffirelli/M
+Zeiss/M
+zeitgeist/S
+Zeke/M
+Zelda/M
+Zelig/M
+Zellerbach/M
+Zelma/M
+Zena/M
+Zenger/M
+Zenia/M
+zenith/M
+zeniths
+Zen/M
+Zennist/M
+Zeno/M
+Zephaniah/M
+zephyr/MS
+Zephyrus/M
+Zeppelin's
+zeppelin/SM
+Zerk/M
+zeroed/M
+zeroing/M
+zero/SDHMG
+zestfulness/MS
+zestful/YP
+zest/MDSG
+zesty/RT
+zeta/SM
+zeugma/M
+Zeus/M
+Zhdanov/M
+Zhengzhou
+Zhivago/M
+Zhukov/M
+Zia/M
+Zibo/M
+Ziegfeld/MS
+Ziegler/M
+zig
+zigged
+zigging
+Ziggy/M
+zigzagged
+zigzagger
+zigzagging
+zigzag/MS
+zilch/S
+zillion/MS
+Zilvia/M
+Zimbabwean/S
+Zimbabwe/M
+Zimmerman/M
+zincked
+zincking
+zinc/MS
+zing/GZDRM
+zingy/RT
+zinnia/SM
+Zionism/MS
+Zionist/MS
+Zion/SM
+zip/MS
+zipped/U
+zipper/GSDM
+zipping/U
+zippy/RT
+zips/U
+zirconium/MS
+zircon/SM
+Zita/M
+Zitella/M
+zither/SM
+zit/S
+zloty/SM
+Zn/M
+zodiacal
+zodiac/SM
+Zoe/M
+Zola/M
+Zollie/M
+Zolly/M
+Zomba/M
+zombie/SM
+zombi's
+zonal/Y
+Zonda/M
+Zondra/M
+zoned/A
+zone/MYDSRJG
+zones/A
+zoning/A
+zonked
+Zonnya/M
+zookeepers
+zoological/Y
+zoologist/SM
+zoology/MS
+zoom/DGS
+zoophyte/SM
+zoophytic
+zoo/SM
+Zorah/M
+Zora/M
+Zorana/M
+Zorina/M
+Zorine/M
+Zorn/M
+Zoroaster/M
+Zoroastrianism/MS
+Zoroastrian/S
+Zorro/M
+Zosma/M
+zounds/S
+Zr/M
+Zs
+Zsazsa/M
+Zsigmondy/M
+z/TGJ
+Zubenelgenubi/M
+Zubeneschamali/M
+zucchini/SM
+Zukor/M
+Zulema/M
+Zululand/M
+Zulu/MS
+Zuni/S
+Zürich/M
+Zuzana/M
+zwieback/MS
+Zwingli/M
+Zworykin/M
+Z/X
+zydeco/S
+zygote/SM
+zygotic
+zymurgy/S
diff --git a/core/src/test/resources/jmeter/index-count.jmx b/core/src/test/resources/jmeter/index-count.jmx
new file mode 100644
index 0000000000..09a563f3ec
--- /dev/null
+++ b/core/src/test/resources/jmeter/index-count.jmx
@@ -0,0 +1,240 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Count Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ term : { age : ${personAge} } }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/_count</stringProp>
+ <stringProp name="HTTPSampler.method">POST</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/core/src/test/resources/jmeter/index-get.jmx b/core/src/test/resources/jmeter/index-get.jmx
new file mode 100644
index 0000000000..c8d7914f60
--- /dev/null
+++ b/core/src/test/resources/jmeter/index-get.jmx
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Get Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">GET</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree>
+ <ResponseAssertion guiclass="AssertionGui" testclass="ResponseAssertion" testname="Response Assertion" enabled="true">
+ <collectionProp name="Asserion.test_strings"/>
+ <stringProp name="Assertion.test_field">Assertion.response_code</stringProp>
+ <boolProp name="Assertion.assume_success">false</boolProp>
+ <intProp name="Assertion.test_type">2</intProp>
+ </ResponseAssertion>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/core/src/test/resources/jmeter/index-search.jmx b/core/src/test/resources/jmeter/index-search.jmx
new file mode 100644
index 0000000000..dc7428545c
--- /dev/null
+++ b/core/src/test/resources/jmeter/index-search.jmx
@@ -0,0 +1,240 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Search Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ query : { term : { age : ${personAge} } } }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/_search</stringProp>
+ <stringProp name="HTTPSampler.method">POST</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/core/src/test/resources/jmeter/index.jmx b/core/src/test/resources/jmeter/index.jmx
new file mode 100644
index 0000000000..64a6849239
--- /dev/null
+++ b/core/src/test/resources/jmeter/index.jmx
@@ -0,0 +1,210 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/core/src/test/resources/jmeter/ping-single.jmx b/core/src/test/resources/jmeter/ping-single.jmx
new file mode 100644
index 0000000000..64a6849239
--- /dev/null
+++ b/core/src/test/resources/jmeter/ping-single.jmx
@@ -0,0 +1,210 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<jmeterTestPlan version="1.2" properties="2.1">
+ <hashTree>
+ <TestPlan guiclass="TestPlanGui" testclass="TestPlan" testname="Test Plan" enabled="true">
+ <stringProp name="TestPlan.comments"></stringProp>
+ <boolProp name="TestPlan.functional_mode">false</boolProp>
+ <boolProp name="TestPlan.serialize_threadgroups">false</boolProp>
+ <elementProp name="TestPlan.user_defined_variables" elementType="Arguments" guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="TestPlan.user_define_classpath"></stringProp>
+ </TestPlan>
+ <hashTree>
+ <Arguments guiclass="ArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="host" elementType="Argument">
+ <stringProp name="Argument.name">host</stringProp>
+ <stringProp name="Argument.value">localhost</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfThreads" elementType="Argument">
+ <stringProp name="Argument.name">numberOfThreads</stringProp>
+ <stringProp name="Argument.value">20</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ <elementProp name="numberOfLoops" elementType="Argument">
+ <stringProp name="Argument.name">numberOfLoops</stringProp>
+ <stringProp name="Argument.value">10000</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ </elementProp>
+ </collectionProp>
+ </Arguments>
+ <hashTree/>
+ <ThreadGroup guiclass="ThreadGroupGui" testclass="ThreadGroup" testname="Elasticsearch Users" enabled="true">
+ <elementProp name="ThreadGroup.main_controller" elementType="LoopController" guiclass="LoopControlPanel" testclass="LoopController" testname="Loop Controller" enabled="true">
+ <boolProp name="LoopController.continue_forever">false</boolProp>
+ <stringProp name="LoopController.loops">${numberOfLoops}</stringProp>
+ </elementProp>
+ <stringProp name="ThreadGroup.num_threads">${numberOfThreads}</stringProp>
+ <stringProp name="ThreadGroup.ramp_time">0</stringProp>
+ <longProp name="ThreadGroup.start_time">1260471148000</longProp>
+ <longProp name="ThreadGroup.end_time">1260471148000</longProp>
+ <boolProp name="ThreadGroup.scheduler">false</boolProp>
+ <stringProp name="ThreadGroup.on_sample_error">continue</stringProp>
+ <stringProp name="ThreadGroup.duration"></stringProp>
+ <stringProp name="ThreadGroup.delay"></stringProp>
+ </ThreadGroup>
+ <hashTree>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Id" enabled="true">
+ <stringProp name="maximumValue">10000</stringProp>
+ <stringProp name="minimumValue">0</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ <stringProp name="randomSeed"></stringProp>
+ <stringProp name="variableName">personId</stringProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <RandomVariableConfig guiclass="TestBeanGUI" testclass="RandomVariableConfig" testname="Random Person Age" enabled="true">
+ <stringProp name="TestPlan.comments"> </stringProp>
+ <stringProp name="variableName">personAge</stringProp>
+ <stringProp name="outputFormat"></stringProp>
+ <stringProp name="minimumValue">1</stringProp>
+ <stringProp name="maximumValue">50</stringProp>
+ <stringProp name="randomSeed"></stringProp>
+ <boolProp name="perThread">true</boolProp>
+ </RandomVariableConfig>
+ <hashTree/>
+ <ConfigTestElement guiclass="HttpDefaultsGui" testclass="ConfigTestElement" testname="HTTP Request Defaults" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments"/>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain">${host}</stringProp>
+ <stringProp name="HTTPSampler.port">9200</stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path"></stringProp>
+ </ConfigTestElement>
+ <hashTree/>
+ <HTTPSampler guiclass="HttpTestSampleGui" testclass="HTTPSampler" testname="Index Request" enabled="true">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler>
+ <hashTree/>
+ <HTTPSampler2 guiclass="HttpTestSampleGui2" testclass="HTTPSampler2" testname="Index Request HTTPClient" enabled="false">
+ <elementProp name="HTTPsampler.Arguments" elementType="Arguments" guiclass="HTTPArgumentsPanel" testclass="Arguments" testname="User Defined Variables" enabled="true">
+ <collectionProp name="Arguments.arguments">
+ <elementProp name="" elementType="HTTPArgument">
+ <boolProp name="HTTPArgument.always_encode">false</boolProp>
+ <stringProp name="Argument.value">{ name : &quot;person${personId}&quot;, age : ${personAge} }</stringProp>
+ <stringProp name="Argument.metadata">=</stringProp>
+ <boolProp name="HTTPArgument.use_equals">true</boolProp>
+ </elementProp>
+ </collectionProp>
+ </elementProp>
+ <stringProp name="HTTPSampler.domain"></stringProp>
+ <stringProp name="HTTPSampler.port"></stringProp>
+ <stringProp name="HTTPSampler.connect_timeout"></stringProp>
+ <stringProp name="HTTPSampler.response_timeout"></stringProp>
+ <stringProp name="HTTPSampler.protocol"></stringProp>
+ <stringProp name="HTTPSampler.contentEncoding"></stringProp>
+ <stringProp name="HTTPSampler.path">/test/person/${personId}</stringProp>
+ <stringProp name="HTTPSampler.method">PUT</stringProp>
+ <boolProp name="HTTPSampler.follow_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.auto_redirects">false</boolProp>
+ <boolProp name="HTTPSampler.use_keepalive">true</boolProp>
+ <boolProp name="HTTPSampler.DO_MULTIPART_POST">false</boolProp>
+ <stringProp name="HTTPSampler.FILE_NAME"></stringProp>
+ <stringProp name="HTTPSampler.FILE_FIELD"></stringProp>
+ <stringProp name="HTTPSampler.mimetype"></stringProp>
+ <boolProp name="HTTPSampler.monitor">false</boolProp>
+ <stringProp name="HTTPSampler.embedded_url_re"></stringProp>
+ </HTTPSampler2>
+ <hashTree/>
+ </hashTree>
+ <ResultCollector guiclass="SummaryReport" testclass="ResultCollector" testname="Summary Report" enabled="true">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ <Summariser guiclass="SummariserGui" testclass="Summariser" testname="Generate Summary Results" enabled="true"/>
+ <hashTree/>
+ <ResultCollector guiclass="ViewResultsFullVisualizer" testclass="ResultCollector" testname="View Results Tree" enabled="false">
+ <boolProp name="ResultCollector.error_logging">false</boolProp>
+ <objProp>
+ <name>saveConfig</name>
+ <value class="SampleSaveConfiguration">
+ <time>true</time>
+ <latency>true</latency>
+ <timestamp>true</timestamp>
+ <success>true</success>
+ <label>true</label>
+ <code>true</code>
+ <message>true</message>
+ <threadName>true</threadName>
+ <dataType>true</dataType>
+ <encoding>false</encoding>
+ <assertions>true</assertions>
+ <subresults>true</subresults>
+ <responseData>false</responseData>
+ <samplerData>false</samplerData>
+ <xml>true</xml>
+ <fieldNames>false</fieldNames>
+ <responseHeaders>false</responseHeaders>
+ <requestHeaders>false</requestHeaders>
+ <responseDataOnError>false</responseDataOnError>
+ <saveAssertionResultsFailureMessage>false</saveAssertionResultsFailureMessage>
+ <assertionsResultsToSave>0</assertionsResultsToSave>
+ <bytes>true</bytes>
+ </value>
+ </objProp>
+ <stringProp name="filename"></stringProp>
+ </ResultCollector>
+ <hashTree/>
+ </hashTree>
+ </hashTree>
+</jmeterTestPlan>
diff --git a/core/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf8 b/core/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf8
new file mode 100644
index 0000000000..d60b6fa15d
--- /dev/null
+++ b/core/src/test/resources/org/apache/lucene/search/postingshighlight/CambridgeMA.utf8
@@ -0,0 +1 @@
+{{Distinguish|Cambridge, England}} {{primary sources|date=June 2012}} {{Use mdy dates|date=January 2011}} {{Infobox settlement |official_name = Cambridge, Massachusetts |nickname = |motto = "Boston's Left Bank"<ref>{{cite web|url= http://www.epodunk.com/cgi-bin/genInfo.php?locIndex=2894|title=Profile for Cambridge, Massachusetts, MA|publisher= ePodunk |accessdate= November 1, 2012}}</ref> |image_skyline = CambridgeMACityHall2.jpg |imagesize = 175px |image_caption = Cambridge City Hall |image_seal = |image_flag = |image_map = Cambridge ma highlight.png |mapsize = 250px |map_caption = Location in Middlesex County in Massachusetts |image_map1 = |mapsize1 = |map_caption1 = |coordinates_region = US-MA |subdivision_type = Country |subdivision_name = United States |subdivision_type1 = State |subdivision_name1 = [[Massachusetts]] |subdivision_type2 = [[List of counties in Massachusetts|County]] |subdivision_name2 = [[Middlesex County, Massachusetts|Middlesex]] |established_title = Settled |established_date = 1630 |established_title2 = Incorporated |established_date2 = 1636 |established_title3 = |established_date3 = |government_type = [[Council-manager government|Council-City Manager]] |leader_title = Mayor |leader_name = Henrietta Davis |leader_title1 = [[City manager|City Manager]] |leader_name1 = [[Robert W. Healy]] |area_magnitude = |area_total_km2 = 18.47 |area_total_sq_mi = 7.13 |area_land_km2 = 16.65 |area_land_sq_mi = 6.43 |area_water_km2 = 1.81 |area_water_sq_mi = 0.70 |population_as_of = 2010 |population_blank2_title = [[Demonym]] |population_blank2 = [[Cantabrigian]] |settlement_type = City |population_total = 105,162 |population_density_km2 = 6,341.98 |population_density_sq_mi = 16,422.08 |elevation_m = 12 |elevation_ft = 40 |timezone = [[Eastern Time Zone|Eastern]] |utc_offset = -5 |timezone_DST = [[Eastern Time Zone|Eastern]] |utc_offset_DST = -4 |coordinates_display = display=inline,title |latd = 42 |latm = 22 |lats = 25 |latNS = N |longd = 71 |longm = 06 |longs = 38 |longEW = W |website = [http://www.cambridgema.gov/ www.cambridgema.gov] |postal_code_type = ZIP code |postal_code = 02138, 02139, 02140, 02141, 02142 |area_code = [[Area code 617|617]] / [[Area code 857|857]] |blank_name = [[Federal Information Processing Standard|FIPS code]] |blank_info = 25-11000 |blank1_name = [[Geographic Names Information System|GNIS]] feature ID |blank1_info = 0617365 |footnotes = }} '''Cambridge''' is a city in [[Middlesex County, Massachusetts|Middlesex County]], [[Massachusetts]], [[United States]], in the [[Greater Boston]] area. It was named in honor of the [[University of Cambridge]] in [[England]], an important center of the [[Puritan]] theology embraced by the town's founders.<ref>{{cite book|last=Degler|first=Carl Neumann|title=Out of Our Pasts: The Forces That Shaped Modern America|publisher=HarperCollins|location=New York|year=1984|url=http://books.google.com/books?id=NebLe1ueuGQC&pg=PA18&lpg=PA18&dq=cambridge+university+puritans+newtowne#v=onepage&q=&f=false|accessdate=September 9, 2009 | isbn=978-0-06-131985-3}}</ref> Cambridge is home to two of the world's most prominent universities, [[Harvard University]] and the [[Massachusetts Institute of Technology]]. According to the [[2010 United States Census]], the city's population was 105,162.<ref name="2010.census.gov">{{cite web|url=http://2010.census.gov/news/releases/operations/cb11-cn104.html |title=Census 2010 News &#124; U.S. Census Bureau Delivers Massachusetts' 2010 Census Population Totals, Including First Look at Race and Hispanic Origin Data for Legislative Redistricting |publisher=2010.census.gov |date=2011-03-22 |accessdate=2012-04-28}}</ref> It is the fifth most populous city in the state, behind [[Boston]], [[Worcester, MA|Worcester]], [[Springfield, MA|Springfield]], and [[Lowell, Massachusetts|Lowell]].<ref name="2010.census.gov"/> Cambridge was one of the two [[county seat]]s of Middlesex County prior to the abolition of county government in 1997; [[Lowell, Massachusetts|Lowell]] was the other. ==History== {{See also|Timeline of Cambridge, Massachusetts history}} [[File:Formation of Massachusetts towns.svg|thumb|A map showing the original boundaries of Cambridge]] The site for what would become Cambridge was chosen in December 1630, because it was located safely upriver from Boston Harbor, which made it easily defensible from attacks by enemy ships. Also, the water from the local spring was so good that the local Native Americans believed it had medicinal properties.{{Citation needed|date=November 2009}} [[Thomas Dudley]], his daughter [[Anne Bradstreet]] and her husband Simon were among the first settlers of the town. The first houses were built in the spring of 1631. The settlement was initially referred to as "the newe towne".<ref name=drake>{{cite book|last=Drake|first=Samuel Adams|title=History of Middlesex County, Massachusetts|publisher=Estes and Lauriat|location=Boston|year=1880|volume=1|pages=305–16|url=http://books.google.com/books?id=QGolOAyd9RMC&pg=PA316&lpg=PA305&dq=newetowne&ct=result#PPA305,M1|accessdate=December 26, 2008}}</ref> Official Massachusetts records show the name capitalized as '''Newe Towne''' by 1632.<ref name=public>{{cite book|title=Report on the Custody and Condition of the Public Records of Parishes|publisher=Massachusetts Secretary of the Commonwealth|url=http://books.google.com/books?id=IyYWAAAAYAAJ&pg=RA1-PA298&lpg=RA1-PA298&dq=%22Ordered+That+Newtowne+shall+henceforward+be+called%22|location=Boston|year=1889|page=298|accessdate=December 24, 2008}}</ref> Located at the first convenient [[Charles River]] crossing west of [[Boston]], Newe Towne was one of a number of towns (including Boston, [[Dorchester, Massachusetts|Dorchester]], [[Watertown, Massachusetts|Watertown]], and [[Weymouth, Massachusetts|Weymouth]]) founded by the 700 original [[Puritan]] colonists of the [[Massachusetts Bay Colony]] under governor [[John Winthrop]]. The original village site is in the heart of today's [[Harvard Square]]. The marketplace where farmers brought in crops from surrounding towns to sell survives today as the small park at the corner of John F. Kennedy (J.F.K.) and Winthrop Streets, then at the edge of a salt marsh, since filled. The town included a much larger area than the present city, with various outlying parts becoming independent towns over the years: [[Newton, Massachusetts|Newton (originally Cambridge Village, then Newtown)]] in 1688,<ref>{{cite book |last= Ritter |first= Priscilla R. |coauthors= Thelma Fleishman |title= Newton, Massachusetts 1679–1779: A Biographical Directory |year= 1982 |publisher= New England Historic Genealogical Society }}</ref> [[Lexington, Massachusetts|Lexington (Cambridge Farms)]] in 1712, and both [[Arlington, Massachusetts|West Cambridge (originally Menotomy)]] and [[Brighton, Massachusetts|Brighton (Little Cambridge)]] in 1807.<ref>{{cite web |url=http://www.brightonbot.com/history.php |title=A Short History of Allston-Brighton |first=Marchione |last=William P. |author= |authorlink= |coauthors= |date= |month= |year=2011 |work=Brighton-Allston Historical Society |publisher=Brighton Board of Trade |location= |page= |pages= |at= |language= |trans_title= |arxiv= |asin= |bibcode= |doi= |doibroken= |isbn= |issn= |jfm= |jstor= |lccn= |mr= |oclc= |ol= |osti= |pmc = |pmid= |rfc= |ssrn= |zbl= |id= |archiveurl= |archivedate= |deadurl= |accessdate=December 21, 2011 |quote= |ref= |separator= |postscript=}}</ref> Part of West Cambridge joined the new town of [[Belmont, Massachusetts|Belmont]] in 1859, and the rest of West Cambridge was renamed Arlington in 1867; Brighton was annexed by Boston in 1874. In the late 19th century, various schemes for annexing Cambridge itself to the City of Boston were pursued and rejected.<ref>{{cite news |title=ANNEXATION AND ITS FRUITS |author=Staff writer |first= |last= |authorlink= |url=http://query.nytimes.com/gst/abstract.html?res=9901E4DC173BEF34BC4D52DFB766838F669FDE |agency= |newspaper=[[The New York Times]] |publisher= |isbn= |issn= |pmid= |pmd= |bibcode= |doi= |date=January 15, 1874, Wednesday |page= 4 |pages= |accessdate=|archiveurl=http://query.nytimes.com/mem/archive-free/pdf?res=9901E4DC173BEF34BC4D52DFB766838F669FDE |archivedate=January 15, 1874 |ref= }}</ref><ref>{{cite news |title=BOSTON'S ANNEXATION SCHEMES.; PROPOSAL TO ABSORB CAMBRIDGE AND OTHER NEAR-BY TOWNS |author=Staff writer |first= |last= |authorlink= |url=http://query.nytimes.com/gst/abstract.html?res=9C05E1DC1F39E233A25754C2A9659C94639ED7CF |agency= |newspaper=[[The New York Times]] |publisher= |isbn= |issn= |pmid= |pmd= |bibcode= |doi= |date=March 26, 1892, Wednesday |page= 11 |pages= |accessdate=August 21, 2010|archiveurl=http://query.nytimes.com/mem/archive-free/pdf?res=9C05E1DC1F39E233A25754C2A9659C94639ED7CF |archivedate=March 27, 1892 |ref= }}</ref> In 1636, [[Harvard College]] was founded by the colony to train [[minister (religion)|ministers]] and the new town was chosen for its site by [[Thomas Dudley]]. By 1638, the name "Newe Towne" had "compacted by usage into 'Newtowne'."<ref name=drake /> In May 1638<ref>{{cite book|title=The Cambridge of Eighteen Hundred and Ninety-six|editor=Arthur Gilman, ed.|publisher=Committee on the Memorial Volume|location=Cambridge|year=1896|page=8}}</ref><ref>{{cite web|author=Harvard News Office |url=http://news.harvard.edu/gazette/2002/05.02/02-history.html |title='&#39;Harvard Gazette'&#39; historical calendar giving May 12, 1638 as date of name change; certain other sources say May 2, 1638 or late 1637 |publisher=News.harvard.edu |date=2002-05-02 |accessdate=2012-04-28}}</ref> the name was changed to '''Cambridge''' in honor of the [[University of Cambridge|university]] in [[Cambridge, England]].<ref>{{cite book |last= Hannah Winthrop Chapter, D.A.R. |title= Historic Guide to Cambridge |edition= Second |year= 1907 |publisher= Hannah Winthrop Chapter, D.A.R. |location= Cambridge, Mass. |pages= 20–21 |quote= On October&nbsp;15, 1637, the Great and General Court passed a vote that: "The college is ordered to bee at Newetowne." In this same year the name of Newetowne was changed to Cambridge, ("It is ordered that Newetowne shall henceforward be called Cambridge") in honor of the university in Cambridge, England, where many of the early settlers were educated. }}</ref> The first president ([[Henry Dunster]]), the first benefactor ([[John Harvard (clergyman)|John Harvard]]), and the first schoolmaster ([[Nathaniel Eaton]]) of Harvard were all Cambridge University alumni, as was the then ruling (and first) governor of the [[Massachusetts Bay Colony]], John Winthrop. In 1629, Winthrop had led the signing of the founding document of the city of Boston, which was known as the [[Cambridge Agreement]], after the university.<ref>{{cite web|url=http://www.winthropsociety.org/doc_cambr.php|publisher=The Winthrop Society|title=Descendants of the Great Migration|accessdate=September 8, 2008}}</ref> It was Governor Thomas Dudley who, in 1650, signed the charter creating the corporation which still governs Harvard College.<ref>{{cite web|url=http://hul.harvard.edu/huarc/charter.html |title=Harvard Charter of 1650, Harvard University Archives, Harvard University, harvard.edu |publisher=Hul.harvard.edu |date= |accessdate=2012-04-28}}</ref><ref>{{cite book |last1= |first1= |authorlink1= |editor1-first= |editor1-last= |editor1-link= |others= |title=Constitution of the Commonwealth of Massachusetts|url=http://www.mass.gov/legis/const.htm |accessdate=December 13, 2009 |edition= |series= |volume= |date=September 1, 1779 |publisher=The General Court of Massachusetts |location= |isbn= |oclc= |doi= |page= |pages=|chapter=Chapter V: The University at Cambridge, and encouragement of literature, etc. |chapterurl= |ref= |bibcode= }}</ref> [[Image:Washington taking command of the American Army at Cambridge, 1775 - NARA - 532874.tif|thumb|right|George Washington in Cambridge, 1775]] Cambridge grew slowly as an agricultural village eight miles (13&nbsp;km) by road from Boston, the capital of the colony. By the [[American Revolution]], most residents lived near the [[Cambridge Common|Common]] and Harvard College, with farms and estates comprising most of the town. Most of the inhabitants were descendants of the original Puritan colonists, but there was also a small elite of [[Anglicans|Anglican]] "worthies" who were not involved in village life, who made their livings from estates, investments, and trade, and lived in mansions along "the Road to Watertown" (today's [[Brattle Street (Cambridge, Massachusetts)|Brattle Street]], still known as [[Tory Row]]). In 1775, [[George Washington]] came up from [[Virginia]] to take command of fledgling volunteer American soldiers camped on the [[Cambridge Common]]—today called the birthplace of the [[U.S. Army]]. (The name of today's nearby Sheraton Commander Hotel refers to that event.) Most of the Tory estates were confiscated after the Revolution. On January 24, 1776, [[Henry Knox]] arrived with artillery captured from [[Fort Ticonderoga]], which enabled Washington to drive the British army out of Boston. [[File:Cambridge 1873 WardMap.jpg|thumb|300px|left|A map of Cambridge from 1873]] Between 1790 and 1840, Cambridge began to grow rapidly, with the construction of the [[West Boston Bridge]] in 1792, that connected Cambridge directly to Boston, making it no longer necessary to travel eight miles (13&nbsp;km) through the [[Boston Neck]], [[Roxbury, Massachusetts|Roxbury]], and [[Brookline, Massachusetts|Brookline]] to cross the [[Charles River]]. A second bridge, the Canal Bridge, opened in 1809 alongside the new [[Middlesex Canal]]. The new bridges and roads made what were formerly estates and [[marsh]]land into prime industrial and residential districts. In the mid-19th century, Cambridge was the center of a literary revolution when it gave the country a new identity through poetry and literature. Cambridge was home to the famous Fireside Poets—so called because their poems would often be read aloud by families in front of their evening fires. In their day, the [[Fireside Poets]]—[[Henry Wadsworth Longfellow]], [[James Russell Lowell]], and [[Oliver Wendell Holmes, Sr.|Oliver Wendell Holmes]]—were as popular and influential as rock stars are today.{{Citation needed|date=November 2009}} Soon after, [[Toll road|turnpikes]] were built: the [[Cambridge and Concord Turnpike]] (today's Broadway and Concord Ave.), the [[Middlesex Turnpike (Massachusetts)|Middlesex Turnpike]] (Hampshire St. and [[Massachusetts Avenue (Boston)|Massachusetts Ave.]] northwest of [[Porter Square]]), and what are today's Cambridge, Main, and Harvard Streets were roads to connect various areas of Cambridge to the bridges. In addition, railroads crisscrossed the town during the same era, leading to the development of Porter Square as well as the creation of neighboring town [[Somerville, Massachusetts|Somerville]] from the formerly rural parts of [[Charlestown, Massachusetts|Charlestown]]. [[File:Middlesex Canal (Massachusetts) map, 1852.jpg|thumb|1852 Map of Boston area showing Cambridge and rail lines.]] Cambridge was incorporated as a city in 1846. This was despite noticeable tensions between East Cambridge, Cambridgeport, and Old Cambridge that stemmed from differences in in each area's culture, sources of income, and the national origins of the residents.<ref>Cambridge Considered: A Very Brief History of Cambridge, 1800-1900, Part I. http://cambridgeconsidered.blogspot.com/2011/01/very-brief-history-of-cambridge-1800.html</ref> The city's commercial center began to shift from Harvard Square to Central Square, which became the downtown of the city around this time. Between 1850 and 1900, Cambridge took on much of its present character—[[streetcar suburb]]an development along the turnpikes, with working-class and industrial neighborhoods focused on East Cambridge, comfortable middle-class housing being built on old estates in Cambridgeport and Mid-Cambridge, and upper-class enclaves near Harvard University and on the minor hills of the city. The coming of the railroad to North Cambridge and Northwest Cambridge then led to three major changes in the city: the development of massive brickyards and brickworks between Massachusetts Ave., Concord Ave. and [[Alewife Brook]]; the ice-cutting industry launched by [[Frederic Tudor]] on [[Fresh Pond, Cambridge, Massachusetts|Fresh Pond]]; and the carving up of the last estates into residential subdivisions to provide housing to the thousands of immigrants that arrived to work in the new industries. For many years, the city's largest employer was the [[New England Glass Company]], founded in 1818. By the middle of the 19th century it was the largest and most modern glassworks in the world. In 1888, all production was moved, by [[Edward Libbey|Edward Drummond Libbey]], to [[Toledo, Ohio]], where it continues today under the name Owens Illinois. Flint glassware with heavy lead content, produced by that company, is prized by antique glass collectors. There is none on public display in Cambridge, but there is a large collection in the [[Toledo Museum of Art]]. Among the largest businesses located in Cambridge was the firm of [[Carter's Ink Company]], whose neon sign long adorned the [[Charles River]] and which was for many years the largest manufacturer of ink in the world. By 1920, Cambridge was one of the main industrial cities of [[New England]], with nearly 120,000 residents. As industry in New England began to decline during the [[Great Depression]] and after World War II, Cambridge lost much of its industrial base. It also began the transition to being an intellectual, rather than an industrial, center. Harvard University had always been important in the city (both as a landowner and as an institution), but it began to play a more dominant role in the city's life and culture. Also, the move of the [[Massachusetts Institute of Technology]] from Boston in 1916 ensured Cambridge's status as an intellectual center of the United States. After the 1950s, the city's population began to decline slowly, as families tended to be replaced by single people and young couples. The 1980s brought a wave of high-technology startups, creating software such as [[Visicalc]] and [[Lotus 1-2-3]], and advanced computers, but many of these companies fell into decline with the fall of the minicomputer and [[DOS]]-based systems. However, the city continues to be home to many startups as well as a thriving biotech industry. By the end of the 20th century, Cambridge had one of the most expensive housing markets in the Northeastern United States. While maintaining much diversity in class, race, and age, it became harder and harder for those who grew up in the city to be able to afford to stay. The end of [[rent control]] in 1994 prompted many Cambridge renters to move to housing that was more affordable, in Somerville and other communities. In 2005, a reassessment of residential property values resulted in a disproportionate number of houses owned by non-affluent people jumping in value relative to other houses, with hundreds having their property tax increased by over 100%; this forced many homeowners in Cambridge to move elsewhere.<ref>Cambridge Chronicle, October 6, 13, 20, 27, 2005</ref> As of 2012, Cambridge's mix of amenities and proximity to Boston has kept housing prices relatively stable. ==Geography== [[File:Charles River Cambridge USA.jpg|thumb|upright|A view from Boston of Harvard's [[Weld Boathouse]] and Cambridge in winter. The [[Charles River]] is in the foreground.]] According to the [[United States Census Bureau]], Cambridge has a total area of {{convert|7.1|sqmi|km2}}, of which {{convert|6.4|sqmi|km2}} of it is land and {{convert|0.7|sqmi|km2}} of it (9.82%) is water. ===Adjacent municipalities=== Cambridge is located in eastern Massachusetts, bordered by: *the city of [[Boston]] to the south (across the [[Charles River]]) and east *the city of [[Somerville, Massachusetts|Somerville]] to the north *the town of [[Arlington, Massachusetts|Arlington]] to the northwest *the town of [[Belmont, Massachusetts|Belmont]] and *the city of [[Watertown, Massachusetts|Watertown]] to the west The border between Cambridge and the neighboring city of [[Somerville, Massachusetts|Somerville]] passes through densely populated neighborhoods which are connected by the [[Red Line (MBTA)|MBTA Red Line]]. Some of the main squares, [[Inman Square|Inman]], [[Porter Square|Porter]], and to a lesser extent, [[Harvard Square|Harvard]], are very close to the city line, as are Somerville's [[Union Square (Somerville)|Union]] and [[Davis Square]]s. ===Neighborhoods=== ====Squares==== [[File:Centralsquarecambridgemass.jpg|thumb|[[Central Square (Cambridge)|Central Square]]]] [[File:Harvard square 2009j.JPG|thumb|[[Harvard Square]]]] [[File:Cambridge MA Inman Square.jpg|thumb|[[Inman Square]]]] Cambridge has been called the "City of Squares" by some,<ref>{{cite web|author=No Writer Attributed |url=http://www.thecrimson.com/article/1969/9/18/cambridge-a-city-of-squares-pcambridge/ |title="Cambridge: A City of Squares" Harvard Crimson, Sept. 18, 1969 |publisher=Thecrimson.com |date=1969-09-18 |accessdate=2012-04-28}}</ref><ref>{{cite web|url=http://www.travelwritersmagazine.com/RonBernthal/Cambridge.html |title=Cambridge Journal: Massachusetts City No Longer in Boston's Shadow |publisher=Travelwritersmagazine.com |date= |accessdate=2012-04-28}}</ref> as most of its commercial districts are major street intersections known as [[Town square|squares]]. Each of the squares acts as a neighborhood center. These include: * [[Kendall Square]], formed by the junction of Broadway, Main Street, and Third Street, is also known as '''Technology Square''', a name shared with an office and laboratory building cluster in the neighborhood. Just over the [[Longfellow Bridge]] from Boston, at the eastern end of the [[Massachusetts Institute of Technology|MIT]] campus, it is served by the [[Kendall (MBTA station)|Kendall/MIT]] station on the [[Massachusetts Bay Transportation Authority|MBTA]] [[Red Line (MBTA)|Red Line]] subway. Most of Cambridge's large office towers are located here, giving the area somewhat of an office park feel. A flourishing [[biotech]] industry has grown up around this area. The "One Kendall Square" complex is nearby, but—confusingly—not actually in Kendall Square. Also, the "Cambridge Center" office complex is located here, and not at the actual center of Cambridge. * [[Central Square (Cambridge)|Central Square]], formed by the junction of Massachusetts Avenue, Prospect Street, and Western Avenue, is well known for its wide variety of ethnic restaurants. As recently as the late 1990s it was rather run-down; it underwent a controversial [[gentrification]] in recent years (in conjunction with the development of the nearby [[University Park at MIT]]), and continues to grow more expensive. It is served by the [[Central (MBTA station)|Central Station]] stop on the MBTA Red Line subway. '''Lafayette Square''', formed by the junction of Massachusetts Avenue, Columbia Street, Sidney Street, and Main Street, is considered part of the Central Square area. [[Cambridgeport]] is south of Central Square along Magazine Street and Brookline Street. * [[Harvard Square]], formed by the junction of Massachusetts Avenue, Brattle Street, and JFK Street. This is the primary site of [[Harvard University]], and is a major Cambridge shopping area. It is served by a [[Harvard (MBTA station)|Red Line station]]. Harvard Square was originally the northwestern terminus of the Red Line and a major transfer point to streetcars that also operated in a short [[Harvard Bus Tunnel|tunnel]]—which is still a major bus terminal, although the area under the Square was reconfigured dramatically in the 1980s when the Red Line was extended. The Harvard Square area includes '''Brattle Square''' and '''Eliot Square'''. A short distance away from the square lies the [[Cambridge Common]], while the neighborhood north of Harvard and east of Massachusetts Avenue is known as Agassiz in honor of the famed scientist [[Louis Agassiz]]. * [[Porter Square]], about a mile north on Massachusetts Avenue from Harvard Square, is formed by the junction of Massachusetts and Somerville Avenues, and includes part of the city of [[Somerville, Massachusetts|Somerville]]. It is served by the [[Porter (MBTA station)|Porter Square Station]], a complex housing a [[Red Line (MBTA)|Red Line]] stop and a [[Fitchburg Line]] [[MBTA commuter rail|commuter rail]] stop. [[Lesley University]]'s University Hall and Porter campus are located at Porter Square. * [[Inman Square]], at the junction of Cambridge and Hampshire streets in Mid-Cambridge. Inman Square is home to many diverse restaurants, bars, music venues and boutiques. The funky street scene still holds some urban flair, but was dressed up recently with Victorian streetlights, benches and bus stops. A new community park was installed and is a favorite place to enjoy some takeout food from the nearby restaurants and ice cream parlor. * [[Lechmere Square]], at the junction of Cambridge and First streets, adjacent to the CambridgeSide Galleria shopping mall. Perhaps best known as the northern terminus of the [[Massachusetts Bay Transportation Authority|MBTA]] [[Green Line (MBTA)|Green Line]] subway, at [[Lechmere (MBTA station)|Lechmere Station]]. ====Other neighborhoods==== The residential neighborhoods ([http://www.cambridgema.gov/CPD/publications/neighborhoods.cfm map]) in Cambridge border, but are not defined by the squares. These include: * [[East Cambridge, Massachusetts|East Cambridge]] (Area 1) is bordered on the north by the [[Somerville, Massachusetts|Somerville]] border, on the east by the Charles River, on the south by Broadway and Main Street, and on the west by the [[Grand Junction Railroad]] tracks. It includes the [[NorthPoint (Cambridge, Massachusetts)|NorthPoint]] development. * [[Massachusetts Institute of Technology|MIT]] Campus ([[MIT Campus (Area 2), Cambridge|Area 2]]) is bordered on the north by Broadway, on the south and east by the Charles River, and on the west by the Grand Junction Railroad tracks. * [[Wellington-Harrington]] (Area 3) is bordered on the north by the [[Somerville, Massachusetts|Somerville]] border, on the south and west by Hampshire Street, and on the east by the Grand Junction Railroad tracks. Referred to as "Mid-Block".{{clarify|What is? By whom? A full sentence would help.|date=September 2011}} * [[Area 4, Cambridge|Area 4]] is bordered on the north by Hampshire Street, on the south by Massachusetts Avenue, on the west by Prospect Street, and on the east by the Grand Junction Railroad tracks. Residents of Area 4 often refer to their neighborhood simply as "The Port", and refer to the area of Cambridgeport and Riverside as "The Coast". * [[Cambridgeport]] (Area 5) is bordered on the north by Massachusetts Avenue, on the south by the Charles River, on the west by River Street, and on the east by the Grand Junction Railroad tracks. * [[Mid-Cambridge]] (Area 6) is bordered on the north by Kirkland and Hampshire Streets and the [[Somerville, Massachusetts|Somerville]] border, on the south by Massachusetts Avenue, on the west by Peabody Street, and on the east by Prospect Street. * [[Riverside, Cambridge|Riverside]] (Area 7), an area sometimes referred to as "The Coast," is bordered on the north by Massachusetts Avenue, on the south by the Charles River, on the west by JFK Street, and on the east by River Street. * [[Agassiz, Cambridge, Massachusetts|Agassiz (Harvard North)]] (Area 8) is bordered on the north by the [[Somerville, Massachusetts|Somerville]] border, on the south and east by Kirkland Street, and on the west by Massachusetts Avenue. * [[Peabody, Cambridge, Massachusetts|Peabody]] (Area 9) is bordered on the north by railroad tracks, on the south by Concord Avenue, on the west by railroad tracks, and on the east by Massachusetts Avenue. The Avon Hill sub-neighborhood consists of the higher elevations bounded by Upland Road, Raymond Street, Linnaean Street and Massachusetts Avenue. * Brattle area/[[West Cambridge (neighborhood)|West Cambridge]] (Area 10) is bordered on the north by Concord Avenue and Garden Street, on the south by the Charles River and the [[Watertown, Massachusetts|Watertown]] border, on the west by Fresh Pond and the Collins Branch Library, and on the east by JFK Street. It includes the sub-neighborhoods of Brattle Street (formerly known as [[Tory Row]]) and Huron Village. * [[North Cambridge, Massachusetts|North Cambridge]] (Area 11) is bordered on the north by the [[Arlington, Massachusetts|Arlington]] and [[Somerville, Massachusetts|Somerville]] borders, on the south by railroad tracks, on the west by the [[Belmont, Massachusetts|Belmont]] border, and on the east by the [[Somerville, Massachusetts|Somerville]] border. * [[Cambridge Highlands]] (Area 12) is bordered on the north and east by railroad tracks, on the south by Fresh Pond, and on the west by the [[Belmont, Massachusetts|Belmont]] border. * [[Strawberry Hill, Cambridge|Strawberry Hill]] (Area 13) is bordered on the north by Fresh Pond, on the south by the [[Watertown, Massachusetts|Watertown]] border, on the west by the [[Belmont, Massachusetts|Belmont]] border, and on the east by railroad tracks. ===Parks and outdoors=== [[File:Alewife Brook Reservation.jpg|thumb|Alewife Brook Reservation]] Consisting largely of densely built residential space, Cambridge lacks significant tracts of public parkland. This is partly compensated for, however, by the presence of easily accessible open space on the university campuses, including [[Harvard Yard]] and MIT's Great Lawn, as well as the considerable open space of [[Mount Auburn Cemetery]]. At the western edge of Cambridge, the cemetery is well known as the first garden cemetery, for its distinguished inhabitants, for its superb landscaping (the oldest planned landscape in the country), and as a first-rate [[arboretum]]. Although known as a Cambridge landmark, much of the cemetery lies within the bounds of Watertown.<ref>http://www2.cambridgema.gov/CityOfCambridge_Content/documents/CambridgeStreetMap18x24_032007.pdf</ref> It is also a significant [[Important Bird Area]] (IBA) in the Greater Boston area. Public parkland includes the esplanade along the Charles River, which mirrors its [[Charles River Esplanade|Boston counterpart]], [[Cambridge Common]], a busy and historic public park immediately adjacent to the Harvard campus, and the [[Alewife Brook Reservation]] and [[Fresh Pond, Cambridge, Massachusetts|Fresh Pond]] in the western part of the city. ==Demographics== {{Historical populations | type=USA | align=right | 1790|2115 | 1800|2453 | 1810|2323 | 1820|3295 | 1830|6072 | 1840|8409 | 1850|15215 | 1860|26060 | 1870|39634 | 1880|52669 | 1890|70028 | 1900|91886 | 1910|104839 | 1920|109694 | 1930|113643 | 1940|110879 | 1950|120740 | 1960|107716 | 1970|100361 | 1980|95322 | 1990|95802 | 2000|101355 | 2010|105162 | footnote= {{Historical populations/Massachusetts municipalities references}}<ref name="1950_Census_Urban_populations_since_1790">{{cite journal | title=1950 Census of Population | volume=1: Number of Inhabitants | at=Section 6, Pages 21-7 through 21-09, Massachusetts Table 4. Population of Urban Places of 10,000 or more from Earliest Census to 1920 | publisher=Bureau of the Census | accessdate=July 12, 2011 | year=1952 | url=http://www2.census.gov/prod2/decennial/documents/23761117v1ch06.pdf}}</ref> }} As of the census{{GR|2}} of 2010, there were 105,162 people, 44,032 households, and 17,420 families residing in the city. The population density was 16,422.08 people per square mile (6,341.98/km²), making Cambridge the fifth most densely populated city in the US<ref name=CountyCityDataBook>County and City Data Book: 2000. Washington, DC: US Department of Commerce, Bureau of the Census. Table C-1.</ref> and the second most densely populated city in [[Massachusetts]] behind neighboring [[Somerville, Massachusetts|Somerville]].<ref>[http://www.boston.com/realestate/news/articles/2008/07/13/highest_population_density/ Highest Population Density, The Boston Globe]</ref> There were 47,291 housing units at an average density of 7,354.7 per square mile (2,840.3/km²). The racial makeup of the city was 66.60% [[White (U.S. Census)|White]], 11.70% [[Black (people)|Black]] or [[Race (United States Census)|African American]], 0.20% [[Native American (U.S. Census)|Native American]], 15.10% [[Asian (U.S. Census)|Asian]], 0.01% [[Pacific Islander (U.S. Census)|Pacific Islander]], 2.10% from [[Race (United States Census)|other races]], and 4.30% from two or more races. 7.60% of the population were [[Hispanics in the United States|Hispanic]] or [[Latino (U.S. Census)|Latino]] of any race. [[Non-Hispanic Whites]] were 62.1% of the population in 2010,<ref>{{cite web |url=http://quickfacts.census.gov/qfd/states/25/2511000.html |title=Cambridge (city), Massachusetts |work=State & County QuickFacts |publisher=U.S. Census Bureau}}</ref> down from 89.7% in 1970.<ref>{{cite web|title=Massachusetts - Race and Hispanic Origin for Selected Cities and Other Places: Earliest Census to 1990|publisher=U.S. Census Bureau|url=http://www.census.gov/population/www/documentation/twps0076/twps0076.html}}</ref> This rather closely parallels the average [[racial demographics of the United States]] as a whole, although Cambridge has significantly more Asians than the average, and fewer Hispanics and Caucasians. 11.0% were of [[irish people|Irish]], 7.2% English, 6.9% [[italians|Italian]], 5.5% [[West Indian]] and 5.3% [[germans|German]] ancestry according to [[Census 2000]]. 69.4% spoke English, 6.9% Spanish, 3.2% [[Standard Mandarin|Chinese]] or [[Standard Mandarin|Mandarin]], 3.0% [[portuguese language|Portuguese]], 2.9% [[French-based creole languages|French Creole]], 2.3% French, 1.5% [[korean language|Korean]], and 1.0% [[italian language|Italian]] as their first language. There were 44,032 households out of which 16.9% had children under the age of 18 living with them, 28.9% were married couples living together, 8.4% had a female householder with no husband present, and 60.4% were non-families. 40.7% of all households were made up of individuals and 9.6% had someone living alone who was 65 years of age or older. The average household size was 2.00 and the average family size was 2.76. In the city the population was spread out with 13.3% under the age of 18, 21.2% from 18 to 24, 38.6% from 25 to 44, 17.8% from 45 to 64, and 9.2% who were 65 years of age or older. The median age was 30.5 years. For every 100 females, there were 96.1 males. For every 100 females age 18 and over, there were 94.7 males. The median income for a household in the city was $47,979, and the median income for a family was $59,423 (these figures had risen to $58,457 and $79,533 respectively {{as of|2007|alt=as of a 2007 estimate}}<ref>{{cite web|url=http://factfinder.census.gov/servlet/ACSSAFFFacts?_event=Search&geo_id=16000US2418750&_geoContext=01000US%7C04000US24%7C16000US2418750&_street=&_county=cambridge&_cityTown=cambridge&_state=04000US25&_zip=&_lang=en&_sse=on&ActiveGeoDiv=geoSelect&_useEV=&pctxt=fph&pgsl=160&_submenuId=factsheet_1&ds_name=ACS_2007_3YR_SAFF&_ci_nbr=null&qr_name=null&reg=null%3Anull&_keyword=&_industry= |title=U.S. Census, 2000 |publisher=Factfinder.census.gov |date= |accessdate=2012-04-28}}</ref>). Males had a median income of $43,825 versus $38,489 for females. The per capita income for the city was $31,156. About 8.7% of families and 12.9% of the population were below the poverty line, including 15.1% of those under age 18 and 12.9% of those age 65 or over. Cambridge was ranked as one of the most liberal cities in America.<ref>{{cite web|author=Aug 16, 2005 12:00 AM |url=http://www.govpro.com/News/Article/31439/ |title=Study Ranks America’s Most Liberal and Conservative Cities |publisher=Govpro.com |date=2005-08-16 |accessdate=2012-04-28}}</ref> Locals living in and near the city jokingly refer to it as "The People's Republic of Cambridge."<ref>[http://www.universalhub.com/glossary/peoples_republic_the.html Wicked Good Guide to Boston English] Accessed February 2, 2009</ref> For 2012, the residential property tax rate in Cambridge is $8.48 per $1,000.<ref>{{cite web|url=http://www.cambridgema.gov/finance/propertytaxinformation/fy12propertytaxinformation.aspx |title=FY12 Property Tax Information - City of Cambridge, Massachusetts |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref> Cambridge enjoys the highest possible [[bond credit rating]], AAA, with all three Wall Street rating agencies.<ref>http://www.cambridgema.gov/CityOfCambridge_Content/documents/Understanding_Your_Taxes_2007.pdf</ref> Cambridge is noted for its diverse population, both racially and economically. Residents, known as ''Cantabrigians'', include affluent [[MIT]] and Harvard professors. The first legal applications in America for same-sex marriage licenses were issued at Cambridge's City Hall.<ref>{{cite web|url=http://www.boston.com/news/local/articles/2004/05/17/free_to_marry/ |title=Free to Marry |work=[[The Boston Globe]] |date=2004-05-17 |accessdate=2012-07-18}}</ref> Cambridge is also the birthplace of [[Thailand|Thai]] king [[Bhumibol Adulyadej|Bhumibol Adulyadej (Rama IX)]], who is the world's longest reigning monarch at age 82 (2010), as well as the longest reigning monarch in Thai history. He is also the first king of a foreign country to be born in the United States. ==Government== ===Federal and state representation=== {| class=wikitable ! colspan = 6 | Voter registration and party enrollment {{as of|lc=y|df=US|2008|10|15}}<ref>{{cite web|title = 2008 State Party Election Party Enrollment Statistics | publisher = Massachusetts Elections Division | format = PDF | accessdate = July 7, 2010 | url = http://www.sec.state.ma.us/ele/elepdf/st_county_town_enroll_breakdown_08.pdf}}</ref> |- ! colspan = 2 | Party ! Number of voters ! Percentage {{American politics/party colors/Democratic/row}} | [[Democratic Party (United States)|Democratic]] | style="text-align:center;"| 37,822 | style="text-align:center;"| 58.43% {{American politics/party colors/Republican/row}} | [[Republican Party (United States)|Republican]] | style="text-align:center;"| 3,280 | style="text-align:center;"| 5.07% {{American politics/party colors/Independent/row}} | Unaffiliated | style="text-align:center;"| 22,935 | style="text-align:center;"| 35.43% {{American politics/party colors/Libertarian/row}} | Minor Parties | style="text-align:center;"| 690 | style="text-align:center;"| 1.07% |- ! colspan = 2 | Total ! style="text-align:center;"| 64,727 ! style="text-align:center;"| 100% |} Cambridge is part of [[Massachusetts's 8th congressional district]], represented by Democrat [[Mike Capuano]], elected in 1998. The state's senior member of the [[United States Senate]] is Democrat [[John Kerry]], elected in 1984. The state's junior member is Republican [[Scott Brown]], [[United States Senate special election in Massachusetts, 2010|elected in 2010]] to fill the vacancy caused by the death of long-time Democratic Senator [[Ted Kennedy]]. The Governor of Massachusetts is Democrat [[Deval Patrick]], elected in 2006 and re-elected in 2010. On the state level, Cambridge is represented in six districts in the [[Massachusetts House of Representatives]]: the 24th Middlesex (which includes parts of Belmont and Arlington), the 25th and 26th Middlesex (the latter which includes a portion of Somerville), the 29th Middlesex (which includes a small part of Watertown), and the Eighth and Ninth Suffolk (both including parts of the City of Boston). The city is represented in the [[Massachusetts Senate]] as a part of the "First Suffolk and Middlesex" district (this contains parts of Boston, Revere and Winthrop each in Suffolk County); the "Middlesex, Suffolk and Essex" district, which includes Everett and Somerville, with Boston, Chelsea, and Revere of Suffolk, and Saugus in Essex; and the "Second Suffolk and Middlesex" district, containing parts of the City of Boston in Suffolk county, and Cambridge, Belmont and Watertown in Middlesex county.<ref>{{cite web|url=http://www.malegislature.gov/ |title=Index of Legislative Representation by City and Town, from |publisher=Mass.gov |date= |accessdate=2012-04-28}}</ref> In addition to the [[Cambridge Police Department (Massachusetts)|Cambridge Police Department]], the city is patrolled by the Fifth (Brighton) Barracks of Troop H of the [[Massachusetts State Police]].<ref>[http://www.mass.gov/?pageID=eopsterminal&L=5&L0=Home&L1=Law+Enforcement+%26+Criminal+Justice&L2=Law+Enforcement&L3=State+Police+Troops&L4=Troop+H&sid=Eeops&b=terminalcontent&f=msp_divisions_field_services_troops_troop_h_msp_field_troop_h_station_h5&csid=Eeops Station H-5, SP Brighton]{{dead link|date=April 2012}}</ref> Due, however, to close proximity, the city also practices functional cooperation with the Fourth (Boston) Barracks of Troop H, as well.<ref>[http://www.mass.gov/?pageID=eopsterminal&L=5&L0=Home&L1=Law+Enforcement+%26+Criminal+Justice&L2=Law+Enforcement&L3=State+Police+Troops&L4=Troop+H&sid=Eeops&b=terminalcontent&f=msp_divisions_field_services_troops_troop_h_msp_field_troop_h_station_h4&csid=Eeops Station H-4, SP Boston]{{dead link|date=April 2012}}</ref> ===City government=== [[File:CambridgeMACityHall1.jpg|thumb|right|[[Cambridge, Massachusetts City Hall|Cambridge City Hall]] in the 1980s]] Cambridge has a city government led by a [[List of mayors of Cambridge, Massachusetts|Mayor]] and nine-member City Council. There is also a six-member School Committee which functions alongside the Superintendent of public schools. The councilors and school committee members are elected every two years using the [[single transferable vote]] (STV) system.<ref>{{cite web|url=http://www.cambridgema.gov/election/Proportional_Representation.cfm |title=Proportional Representation Voting in Cambridge |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref> Once a laborious process that took several days to complete by hand, ballot sorting and calculations to determine the outcome of elections are now quickly performed by computer, after the ballots have been [[Optical scan voting system|optically scanned]]. The mayor is elected by the city councilors from amongst themselves, and serves as the chair of City Council meetings. The mayor also sits on the School Committee. However, the Mayor is not the Chief Executive of the City. Rather, the City Manager, who is appointed by the City Council, serves in that capacity. Under the City's Plan E form of government the city council does not have the power to appoint or remove city officials who are under direction of the city manager. The city council and its individual members are also forbidden from giving orders to any subordinate of the city manager.<ref>http://www.cambridgema.gov/CityOfCambridge_Content/documents/planE.pdf</ref> [[Robert W. Healy]] is the City Manager; he has served in the position since 1981. In recent history, the media has highlighted the salary of the City Manager as being one of the highest in the State of Massachusetts.<ref>{{cite news |title=Cambridge city manager's salary almost as much as Obama's pay |url=http://www.wickedlocal.com/cambridge/features/x1837730973/Cambridge-city-managers-salary-almost-as-much-as-Obamas |agency= |newspaper=Wicked Local: Cambridge |publisher= |date=August 11, 2011 |accessdate=December 30, 2011 |quote= |archiveurl= |archivedate= |deadurl= |ref=}}</ref> The city council consists of:<ref>{{cite web|url=http://www.cambridgema.gov/ccouncil/citycouncilmembers.aspx |title=City of Cambridge – City Council Members |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref>{{Refbegin|3}} *[[Leland Cheung]] (Jan. 2010–present) *Henrietta Davis (Jan. 1996–present)* *Marjorie C. Decker (Jan. 2000–present)<ref>{{cite web |url= http://www.wickedlocal.com/cambridge/news/x738245499/Marjorie-Decker-announces-she-will-run-for-Alice-Wolfs-Cambridge-State-Representative-seat |title= Marjorie Decker announces she will run for Alice Wolf's Cambridge State Representative seat |date= 22 March 2012 |work= Wicked Local Cambridge |publisher= GateHouse Media, Inc. |accessdate= 4 April 2012 }}</ref> *Craig A. Kelley (Jan. 2006–present) *David Maher (Jan. 2000-Jan. 2006, Sept. 2007–present<ref>{{cite web|author=By ewelin, on September 5th, 2007 |url=http://www.cambridgehighlands.com/2007/09/david-p-maher-elected-to-fill-michael-sullivans-vacated-city-council-seat |title=David P. Maher Elected to fill Michael Sullivan’s Vacated City Council Seat • Cambridge Highlands Neighborhood Association |publisher=Cambridgehighlands.com |date=2007-09-05 |accessdate=2012-04-28}}</ref>)** *[[Kenneth Reeves]] (Jan. 1990–present)** *[[E. Denise Simmons]] (Jan. 2002–present)** *[[Timothy J. Toomey, Jr.]] (Jan. 1990–present) *Minka vanBeuzekom (Jan. 2012–present){{Refend}} ''* = Current Mayor''<br> ''** = former Mayor'' ===Fire Department=== The city of Cambridge is protected full-time by the 274 professional firefighters of the Cambridge Fire Department. The current Chief of Department is Gerald R. Reardon. The Cambridge Fire Department operates out of eight fire stations, located throughout the city, under the command of two divisions. The CFD also maintains and operates a front-line fire apparatus fleet of eight engines, four ladders, two Non-Transport Paramedic EMS units, a Haz-Mat unit, a Tactical Rescue unit, a Dive Rescue unit, two Marine units, and numerous special, support, and reserve units. John J. Gelinas, Chief of Operations, is in charge of day to day operation of the department.<ref>{{cite web|url=http://www2.cambridgema.gov/cfd/ |title=City of Cambridge Fire Department |publisher=.cambridgema.gov |date=2005-03-13 |accessdate=2012-06-26}}</ref> The CFD is rated as a Class 1 fire department by the [[Insurance Services Office]] (ISO), and is one of only 32 fire departments so rated, out of 37,000 departments in the United States. The other class 1 departments in New England are in [[Hartford, Connecticut]] and [[Milford, Connecticut]]. Class 1 signifies the highest level of fire protection according to various criteria.<ref>{{cite web|url=http://www2.cambridgema.gov/CFD/Class1FD.cfm |title=Class 1 Fire Department |publisher=.cambridgema.gov |date=1999-07-01 |accessdate=2012-06-26}}</ref> The CFD responds to approximately 15,000 emergency calls annually. {| class=wikitable |- valign=bottom ! Engine Company ! Ladder Company ! Special Unit ! Division ! Address ! Neighborhood |- | Engine 1 || Ladder 1 || || || 491 Broadway || Harvard Square |- | Engine 2 || Ladder 3 || Squad 2 || || 378 Massachusetts Ave. || Lafayette Square |- | Engine 3 || Ladder 2 || || || 175 Cambridge St. || East Cambridge |- | Engine 4 || || Squad 4 || || 2029 Massachusetts Ave. || Porter Square |- | Engine 5 || || || Division 1 || 1384 Cambridge St. || Inman Square |- | Engine 6 || || || || 176 River St. || Cambridgeport |- | Engine 8 || Ladder 4 || || Division 2 || 113 Garden St. || Taylor Square |- | Engine 9 || || || || 167 Lexington Ave || West Cambridge |- | Maintenance Facility || || || || 100 Smith Pl. || |} ===Water Department=== Cambridge is unusual among cities inside Route 128 in having a non-[[MWRA]] water supply. City water is obtained from [[Hobbs Brook]] (in [[Lincoln, Massachusetts|Lincoln]] and [[Waltham, Massachusetts|Waltham]]), [[Stony Brook (Boston)|Stony Brook]] (Waltham and [[Weston, Massachusetts|Weston]]), and [[Fresh Pond (Cambridge, Massachusetts)|Fresh Pond]] (Cambridge). The city owns over 1200 acres of land in other towns that includes these reservoirs and portions of their watershed.<ref>{{cite web|url=http://www2.cambridgema.gov/CWD/wat_lands.cfm |title=Cambridge Watershed Lands & Facilities |publisher=.cambridgema.gov |date= |accessdate=2012-04-28}}</ref> Water is treated at Fresh Pond, then pumped uphill to an elevation of {{convert|176|ft|m}} [[above sea level]] at the Payson Park Reservoir ([[Belmont, Massachusetts|Belmont]]); From there, the water is redistributed downhill via gravity to individual users in the city.<ref>{{cite web|url=http://www.cambridgema.gov/CityOfCambridge_Content/documents/CWD_March_2010.pdf |title=Water supply system |format=PDF |date= |accessdate=2012-04-28}}</ref><ref>[http://www.cambridgema.gov/CWD/fpfaqs.cfm Is Fresh Pond really used for drinking water?], Cambridge Water Department</ref> ===County government=== Cambridge is a [[county seat]] of [[Middlesex County, Massachusetts]], along with [[Lowell, Massachusetts|Lowell]]. Though the county government was abolished in 1997, the county still exists as a geographical and political region. The employees of Middlesex County courts, jails, registries, and other county agencies now work directly for the state. At present, the county's registrars of [[Deed]]s and Probate remain in Cambridge; however, the Superior Court and District Attorney have had their base of operations transferred to [[Woburn, Massachusetts|Woburn]]. Third District court has shifted operations to [[Medford, Massachusetts|Medford]], and the Sheriff's office for the county is still awaiting a near-term relocation.<ref>{{cite news | url=http://www.boston.com/news/local/massachusetts/articles/2008/02/14/court_move_a_hassle_for_commuters/ |title=Court move a hassle for commuters |accessdate=July 25, 2009 |first=Eric |last=Moskowitz |authorlink= |coauthors= |date=February 14, 2008 |work=[[Boston Globe|The Boston Globe]] |pages= |archiveurl= |archivedate= |quote=In a little more than a month, Middlesex Superior Court will open in Woburn after nearly four decades at the Edward J. Sullivan Courthouse in Cambridge. With it, the court will bring the roughly 500 people who pass through its doors each day – the clerical staff, lawyers, judges, jurors, plaintiffs, defendants, and others who use or work in the system.}}</ref><ref>{{cite news | url=http://www.wickedlocal.com/cambridge/homepage/x135741754/Cambridges-Middlesex-Jail-courts-may-be-shuttered-for-good |title=Cambridge's Middlesex Jail, courts may be shuttered for good |accessdate=July 25, 2009 |first=Charlie |last=Breitrose |authorlink= |coauthors= |date=July 7, 2009 |work=Wicked Local News: Cambridge |pages= |archiveurl= |archivedate= |quote=The courts moved out of the building to allow workers to remove asbestos. Superior Court moved to Woburn in March 2008, and in February, the Third District Court moved to Medford.}}</ref> ==Education== [[File:MIT Main Campus Aerial.jpg|thumb|Aerial view of part of [[MIT]]'s main campus]] [[File:Dunster House.jpg|thumb|[[Dunster House]], Harvard]] ===Higher education=== Cambridge is perhaps best known as an academic and intellectual center, owing to its colleges and universities, which include: *[[Cambridge College]] *[[Cambridge School of Culinary Arts]] *[[Episcopal Divinity School]] *[[Harvard University]] *[[Hult International Business School]] *[[Lesley University]] *[[Longy School of Music]] *[[Massachusetts Institute of Technology]] *[[Le Cordon Bleu College of Culinary Arts in Boston]] [[Nobel laureates by university affiliation|At least 129]] of the world's total 780 [[Nobel Prize]] winners have been, at some point in their careers, affiliated with universities in Cambridge. The [[American Academy of Arts and Sciences]] is also based in Cambridge. ===Primary and secondary public education=== The Cambridge Public School District encompasses 12 elementary schools that follow a variety of different educational systems and philosophies. All but one of the elementary schools extend up to the [[middle school]] grades as well. The 12 elementary schools are: *[[Amigos School]] *Baldwin School *Cambridgeport School *Fletcher-Maynard Academy *Graham and Parks Alternative School *Haggerty School *Kennedy-Longfellow School *King Open School *Martin Luther King, Jr. School *Morse School (a [[Core Knowledge Foundation|Core Knowledge]] school) *Peabody School *Tobin School (a [[Montessori school]]) There are three public high schools serving Cambridge students, including the [[Cambridge Rindge and Latin School]].<ref>{{cite web|url=http://www.cpsd.us/Web/PubInfo/SchoolsAtAGlance06-07.pdf|title=Cambridge Public Schools at a Glance|format=PDF}}{{dead link|date=June 2012}}</ref> and Community Charter School of Cambridge (www.ccscambridge.org) In 2003, the CRLS, also known as Rindge, came close to losing its educational accreditation when it was placed on probation by the [[New England Association of Schools and Colleges]].<ref name="Crimson MCAS">{{cite web|url=http://www.thecrimson.com/article.aspx?ref=512061|title=School Fights Achievement Gap|publisher=The Harvard Crimson|accessdate=May 14, 2009}}</ref> The school has improved under Principal Chris Saheed, graduation rates hover around 98%, and 70% of students gain college admission. Community Charter School of Cambridge serves 350 students, primarily from Boston and Cambridge, and is a tuition free public charter school with a college preparatory curriculum. All students from the class of 2009 and 2010 gained admission to college. Outside of the main public schools are public charter schools including: [[Benjamin Banneker Charter School]], which serves students in grades K-6,<ref>{{cite web|url=http://www.banneker.org/ |title=The Benjamin Banneker Charter Public School |publisher=Banneker.org |date=2012-03-01 |accessdate=2012-04-28}}</ref> [[Community Charter School of Cambridge]],<ref>{{cite web|url=http://www.ccscambridge.org/ |title=Community Charter School of Cambridge |publisher=Ccscambridge.org |date= |accessdate=2012-04-28}}</ref> which is located in Kendall Square and serves students in grades 7–12, and [[Prospect Hill Academy]], a [[charter school]] whose upper school is in [[Central Square (Cambridge)|Central Square]], though it is not a part of the Cambridge Public School District. ===Primary and secondary private education=== [[File:Cambridge Public Library, Cambridge, Massachusetts.JPG|thumb|right|[[Cambridge Public Library]] original building, part of an expanded facility]] There are also many private schools in the city including: <!-- please keep alphabetical --> *[[Boston Archdiocesan Choir School]] (BACS) *[[Buckingham Browne & Nichols]] (BB&N) *[[Cambridge montessori school|Cambridge Montessori School]] (CMS) *Cambridge [[Religious Society of Friends|Friends]] School. Thomas Waring served as founding headmaster of the school. *Fayerweather Street School (FSS)[http://www.fayerweather.org/ ] *[[International School of Boston]] (ISB, formerly École Bilingue) *[[Matignon High School]] *[[North Cambridge Catholic High School]] (re-branded as Cristo Rey Boston and relocated to Dorchester, MA in 2010) *[[Shady Hill School]] *St. Peter School ==Economy== [[File:Cambridge Skyline.jpg|thumb|Buildings of [[Kendall Square]], center of Cambridge's [[biotech]] economy, seen from the [[Charles River]]]] Manufacturing was an important part of the economy in the late 19th and early 20th century, but educational institutions are the city's biggest employers today. Harvard and [[Massachusetts Institute of Technology|MIT]] together employ about 20,000.<ref name="2008top25">[http://www2.cambridgema.gov/cdd/data/labor/top25/top25_2008.html Top 25 Cambridge Employers: 2008], City of Cambridge</ref> As a cradle of technological innovation, Cambridge was home to technology firms [[Analog Devices]], [[Akamai Technologies|Akamai]], [[BBN Technologies|Bolt, Beranek, and Newman (BBN Technologies)]] (now part of Raytheon), [[General Radio|General Radio (later GenRad)]], [[Lotus Development Corporation]] (now part of [[IBM]]), [[Polaroid Corporation|Polaroid]], [[Symbolics]], and [[Thinking Machines]]. In 1996, [[Polaroid Corporation|Polaroid]], [[Arthur D. Little]], and [[Lotus Development Corporation|Lotus]] were top employers with over 1,000 employees in Cambridge, but faded out a few years later. Health care and biotechnology firms such as [[Genzyme]], [[Biogen Idec]], [[Millennium Pharmaceuticals]], [[Sanofi]], [[Pfizer]] and [[Novartis]]<ref>{{cite news |title=Novartis doubles plan for Cambridge |author=Casey Ross and Robert Weisman |first= |last= |authorlink= |authorlink2= |url=http://articles.boston.com/2010-10-27/business/29323650_1_french-drug-maker-astrazeneca-plc-research-operations |agency= |newspaper=[[The Boston Globe]] |publisher= |isbn= |issn= |pmid= |pmd= |bibcode= |doi= |date=October 27, 2010 |page= |pages= |accessdate=April 12, 2011|quote=Already Cambridge’s largest corporate employer, the Swiss firm expects to hire an additional 200 to 300 employees over the next five years, bringing its total workforce in the city to around 2,300. Novartis’s global research operations are headquartered in Cambridge, across Massachusetts Avenue from the site of the new four-acre campus. |archiveurl= |archivedate= |ref=}}</ref> have significant presences in the city. Though headquartered in Switzerland, Novartis continues to expand its operations in Cambridge. Other major biotech and pharmaceutical firms expanding their presence in Cambridge include [[GlaxoSmithKline]], [[AstraZeneca]], [[Shire plc|Shire]], and [[Pfizer]].<ref>{{cite news|title=Novartis Doubles Plan for Cambridge|url=http://www.boston.com/business/healthcare/articles/2010/10/27/novartis_doubles_plan_for_cambridge/|accessdate=23 February 2012 | work=The Boston Globe|first1=Casey|last1=Ross|first2=Robert|last2=Weisman|date=October 27, 2010}}</ref> Most Biotech firms in Cambridge are located around [[Kendall Square]] and [[East Cambridge, Massachusetts|East Cambridge]], which decades ago were the city's center of manufacturing. A number of biotechnology companies are also located in [[University Park at MIT]], a new development in another former manufacturing area. None of the high technology firms that once dominated the economy was among the 25 largest employers in 2005, but by 2008 high tech companies [[Akamai Technologies|Akamai]] and [[ITA Software]] had grown to be among the largest 25 employers.<ref name="2008top25" /> [[Google]],<ref>{{cite web|url=http://www.google.com/corporate/address.html |title=Google Offices |publisher=Google.com |date= |accessdate=2012-07-18}}</ref> [[IBM Research]], and [[Microsoft Research]] maintain offices in Cambridge. In late January 2012—less than a year after acquiring [[Billerica, Massachusetts|Billerica]]-based analytic database management company, [[Vertica]]—[[Hewlett-Packard]] announced it would also be opening its first offices in Cambridge.<ref>{{cite web|last=Huang|first=Gregory|title=Hewlett-Packard Expands to Cambridge via Vertica’s "Big Data" Center|url=http://www.xconomy.com/boston/2012/01/23/hewlett-packard-expands-to-cambridge-via-verticas-big-data-center/?single_page=true}}</ref> Around this same time, e-commerce giants [[Staples Inc.|Staples]]<ref>{{cite web|title=Staples to bring e-commerce office to Cambridge's Kendall Square Read more: Staples to bring e-commerce office to Cambridge's Kendall Square - Cambridge, Massachusetts - Cambridge Chronicle http://www.wickedlocal.com/cambridge/news/x690035936/Staples-to-bring-E-commerce-office-to-Cambridges-Kendall-Square#ixzz1nDY39Who|url=http://www.wickedlocal.com/cambridge/news/x690035936/Staples-to-bring-E-commerce-office-to-Cambridges-Kendall-Square#axzz1kg3no7Zg}}</ref> and [[Amazon.com]]<ref>{{cite web|title=Amazon Seeks Brick-And-Mortar Presence In Boston Area|url=http://www.wbur.org/2011/12/22/amazon-boston}}</ref> said they would be opening research and innovation centers in Kendall Square. Video game developer [[Harmonix Music Systems]] is based in [[Central Square (Cambridge)|Central Square]]. The proximity of Cambridge's universities has also made the city a center for nonprofit groups and think tanks, including the [[National Bureau of Economic Research]], the [[Smithsonian Astrophysical Observatory]], the [[Lincoln Institute of Land Policy]], [[Cultural Survival]], and [[One Laptop per Child]]. In September 2011, an initiative by the City of Cambridge called the "[[Entrepreneur Walk of Fame]]" was launched. It seeks to highlight individuals who have made contributions to innovation in the global business community.<ref>{{cite news |title=Stars of invention |author= |first=Kathleen |last=Pierce |url=http://articles.boston.com/2011-09-16/business/30165912_1_gates-and-jobs-microsoft-granite-stars |agency= |newspaper=The Boston Globe|date=September 16, 2011 |page= |pages= |at= |accessdate=October 1, 2011}}</ref> ===Top employers=== The top ten employers in the city are:<ref>{{cite web|url=http://cambridgema.gov/citynewsandpublications/news/2012/01/fy11comprehensiveannualfinancialreportnowavailable.aspx |title=City of Cambridge, Massachusetts Comprehensive Annual Financial Report July 1, 2010—June 30, 2011 |publisher=Cambridgema.gov |date=2011-06-30 |accessdate=2012-04-28}}</ref> {| class="wikitable" |- ! # ! Employer ! # of employees |- | 1 |[[Harvard University]] |10,718 |- |2 |[[Massachusetts Institute of Technology]] |7,604 |- |3 |City of Cambridge |2,922 |- |4 |[[Novartis]] Institutes for BioMedical Research |2,095 |- |5 |[[Mount Auburn Hospital]] |1,665 |- |6 |[[Vertex Pharmaceuticals]] |1,600 |- |7 |[[Genzyme]] |1,504 |- |8 |[[Biogen Idec]] |1,350 |- |9 |[[Federal government of the United States|Federal Government]] |1,316 |- |10 |[[Pfizer]] |1,300 |} ==Transportation== {{See also|Boston transportation}} ===Road=== [[File:Harvard Square at Peabody Street and Mass Avenue.jpg|thumb|[[Massachusetts Avenue (Boston)|Massachusetts Avenue]] in [[Harvard Square]]]] Several major roads lead to Cambridge, including [[Massachusetts State Highway 2|Route 2]], [[Massachusetts State Highway 16|Route 16]] and the [[Massachusetts State Highway 28|McGrath Highway (Route 28)]]. The [[Massachusetts Turnpike]] does not pass through Cambridge, but provides access by an exit in nearby [[Allston, Massachusetts|Allston]]. Both [[U.S. Route 1]] and [[I-93 (MA)]] also provide additional access on the eastern end of Cambridge at Leverett Circle in [[Boston]]. [[Massachusetts State Highway 2A|Route 2A]] runs the length of the city, chiefly along Massachusetts Avenue. The Charles River forms the southern border of Cambridge and is crossed by 11 bridges connecting Cambridge to Boston, including the [[Longfellow Bridge]] and the [[Harvard Bridge]], eight of which are open to motorized road traffic. Cambridge has an irregular street network because many of the roads date from the colonial era. Contrary to popular belief, the road system did not evolve from longstanding cow-paths. Roads connected various village settlements with each other and nearby towns, and were shaped by geographic features, most notably streams, hills, and swampy areas. Today, the major "squares" are typically connected by long, mostly straight roads, such as Massachusetts Avenue between [[Harvard Square]] and [[Central Square (Cambridge)|Central Square]], or Hampshire Street between [[Kendall Square]] and [[Inman Square]]. ===Mass transit=== [[File:Central MBTA station.jpg|thumb|[[Central (MBTA)|Central station on the MBTA Red Line]]]] Cambridge is well served by the [[MBTA]], including the [[Porter (MBTA station)|Porter Square stop]] on the regional [[MBTA Commuter Rail|Commuter Rail]], the [[Lechmere (MBTA station)|Lechmere stop]] on the [[Green Line (MBTA)|Green Line]], and five stops on the [[Red Line (MBTA)|Red Line]] ([[Alewife Station (MBTA)|Alewife]], [[Porter (MBTA)|Porter Square]], [[Harvard (MBTA station)|Harvard Square]], [[Central (MBTA station)|Central Square]], and [[Kendall/MIT (MBTA station)|Kendall Square/MIT]]). Alewife Station, the current terminus of the Red Line, has a large multi-story parking garage (at a rate of $7 per day {{as of|lc=y|2009}}).<ref>{{cite web|url=http://www.mbta.com/schedules_and_maps/subway/lines/stations/?stopId=10029 |title=> Schedules & Maps > Subway > Alewife Station |publisher=MBTA |date= |accessdate=2012-04-28}}</ref> The [[Harvard Bus Tunnel]], under Harvard Square, reduces traffic congestion on the surface, and connects to the Red Line underground. This tunnel was originally opened for streetcars in 1912, and served trackless trolleys and buses as the routes were converted. The tunnel was partially reconfigured when the Red Line was extended to Alewife in the early 1980s. Outside of the state-owned transit agency, the city is also served by the Charles River Transportation Management Agency (CRTMA) shuttles which are supported by some of the largest companies operating in city, in addition to the municipal government itself.<ref>{{cite web |url=http://www.charlesrivertma.org/members.htm |title=Charles River TMA Members |author=Staff writer |date=(As of) January 1, 2013 |work=CRTMA |publisher= |language= |trans_title= |type= |archiveurl= |archivedate= |deadurl= |accessdate=January 1, 2013 |quote= |ref= |separator= |postscript=}} </ref> ===Cycling=== Cambridge has several [[bike path]]s, including one along the Charles River,<ref>{{cite web|url=http://www.mass.gov/dcr/parks/metroboston/maps/bikepaths_dudley.gif |title=Dr. Paul Dudley White Bikepath |date= |accessdate=2012-04-28}}</ref> and the [[Cambridge Linear Park|Linear Park]] connecting the [[Minuteman Bikeway]] at Alewife with the [[Somerville Community Path]]. Bike parking is common and there are bike lanes on many streets, although concerns have been expressed regarding the suitability of many of the lanes. On several central MIT streets, bike lanes transfer onto the sidewalk. Cambridge bans cycling on certain sections of sidewalk where pedestrian traffic is heavy.<ref>{{cite web|url=http://www.cambridgema.gov/cdd/et/bike/bike_ban.html |title=Sidewalk Bicycling Banned Areas – Cambridge Massachusetts |publisher=Cambridgema.gov |date= |accessdate=2012-04-28}}</ref><ref>{{cite web|url=http://www.cambridgema.gov/cdd/et/bike/bike_reg.html |title=Traffic Regulations for Cyclists – Cambridge Massachusetts |publisher=Cambridgema.gov |date=1997-05-01 |accessdate=2012-04-28}}</ref> While ''[[Bicycling Magazine]]'' has rated Boston as one of the worst cities in the nation for bicycling (In their words, for "lousy roads, scarce and unconnected bike lanes and bike-friendly gestures from City Hall that go nowhere—such as hiring a bike coordinator in 2001, only to cut the position two years later"),<ref>[http://www.bicycling.com/article/1,6610,s1-2-16-14593-11,00.html Urban Treasures – bicycling.com]{{dead link|date=April 2012}}</ref> it has listed Cambridge as an honorable mention as one of the best<ref>[http://www.bicycling.com/article/1,6610,s1-2-16-14593-9,00.html Urban Treasures – bicycling.com]{{dead link|date=April 2012}}</ref> and was called by the magazine "Boston's Great Hope." Cambridge has an active, official bicycle committee. ===Walking=== [[File:Weeks Footbridge Cambridge, MA.jpg|thumb|The [[John W. Weeks Bridge|Weeks Bridge]] provides a pedestrian-only connection between Boston's Allston-Brighton neighborhood and Cambridge over the Charles River]] Walking is a popular activity in Cambridge. Per year 2000 data, of the communities in the U.S. with more than 100,000 residents, Cambridge has the highest percentage of commuters who walk to work.<ref>{{cite web|url=http://www.bikesatwork.com/carfree/census-lookup.php?state_select=ALL_STATES&lower_pop=100000&upper_pop=99999999&sort_num=2&show_rows=25&first_row=0 |title=The Carfree Census Database: Result of search for communities in any state with population over 100,000, sorted in descending order by % Pedestrian Commuters |publisher=Bikesatwork.com |date= |accessdate=2012-04-28}}</ref> Cambridge receives a "Walk Score" of 100 out of 100 possible points.<ref>[http://www.walkscore.com/get-score.php?street=cambridge%2C+ma&go=Go Walk Score site] Accessed July 28, 2009</ref> Cambridge's major historic squares have been recently changed into a modern walking landscape, which has sparked a traffic calming program based on the needs of pedestrians rather than of motorists. ===Intercity=== The Boston intercity bus and train stations at [[South Station]], Boston, and [[Logan International Airport]] in [[East Boston]], are accessible by [[Red Line (MBTA)|subway]]. The [[Fitchburg Line]] rail service from [[Porter (MBTA station)|Porter Square]] connects to some western suburbs. Since October 2010, there has also been intercity bus service between [[Alewife (MBTA station)|Alewife Station]] (Cambridge) and [[New York City]].<ref>{{cite web|last=Thomas |first=Sarah |url=http://www.boston.com/yourtown/news/cambridge/2010/10/warren_mbta_welcome_world_wide.html |title=NYC-bound buses will roll from Newton, Cambridge |publisher=Boston.com |date=2010-10-19 |accessdate=2012-04-28}}</ref> ==Media== ===Newspapers=== Cambridge is served by several weekly newspapers. The most prominent is the ''[[Cambridge Chronicle]]'', which is also the oldest surviving weekly paper in the United States. ===Radio=== Cambridge is home to the following commercially licensed and student-run radio stations: {| class=wikitable |- ! [[Callsign]] !! Frequency !! City/town !! Licensee !! Format |- | [[WHRB]] || align=right | 95.3 FM || Cambridge (Harvard) || Harvard Radio Broadcasting Co., Inc. || [[Variety (US radio)|Musical variety]] |- | [[WJIB]] || align=right | 740&nbsp;AM || Cambridge || Bob Bittner Broadcasting || [[Adult Standards]]/Pop |- | [[WMBR]] || align=right | 88.1 FM || Cambridge (MIT) || Technology Broadcasting Corporation || [[College radio]] |} ===Television=== Cambridge Community Television (CCTV) has served the Cambridge community since its inception in 1988. CCTV operates Cambridge's public access television facility and programs three television channels, 8, 9, and 96 on the Cambridge cable system (Comcast). ===Social media=== As of 2011, a growing number of social media efforts provide means for participatory engagement with the locality of Cambridge, such as Localocracy<ref>"Localocracy is an online town common where registered voters using real names can weigh in on local issues." [http://cambridge.localocracy.com/ Localocracy Cambridge, Massachusetts]. Accessed 2011-10-01</ref> and [[foursquare (website)|Foursquare]]. ==Culture, art and architecture== [[File:Fogg.jpg|thumb|[[Fogg Museum]], Harvard]] ===Museums=== * [[Harvard Art Museum]], including the [[Busch-Reisinger Museum]], a collection of Germanic art the [[Fogg Art Museum]], a comprehensive collection of Western art, and the [[Arthur M. Sackler Museum]], a collection of Middle East and Asian art * [[Harvard Museum of Natural History]], including the [[Glass Flowers]] collection * [[Peabody Museum of Archaeology and Ethnology]], Harvard *[[Semitic Museum]], Harvard * [[MIT Museum]] * [[List Visual Arts Center]], MIT ===Public art=== Cambridge has a large and varied collection of permanent public art, both on city property (managed by the Cambridge Arts Council),<ref>{{cite web|url=http://www.cambridgema.gov/CAC/Public/overview.cfm |title=CAC Public Art Program |publisher=Cambridgema.gov |date=2007-03-13 |accessdate=2012-04-28}}</ref> and on the campuses of Harvard<ref>{{cite web|url=http://ofa.fas.harvard.edu/visualarts/pubart.php |title=Office for the Arts at Harvard: Public Art |publisher=Ofa.fas.harvard.edu |date= |accessdate=2012-04-28}}</ref> and MIT.<ref>{{cite web|url=http://listart.mit.edu/map |title=MIT Public Art Collection Map |publisher=Listart.mit.edu |date= |accessdate=2012-04-28}}</ref> Temporary public artworks are displayed as part of the annual Cambridge River Festival on the banks of the Charles River, during winter celebrations in Harvard and Central Squares, and at university campus sites. Experimental forms of public artistic and cultural expression include the Central Square World's Fair, the Somerville-based annual Honk! Festival,<ref>{{cite web|url=http://honkfest.org/ |title= Honk Fest}}</ref> and [[If This House Could Talk]],<ref>{{cite web|url=http://cambridgehistory.org/discover/ifthishousecouldtalk/index.html |title=The Cambridge Historical Society}}</ref> a neighborhood art and history event. {{or|date=April 2012}} {{Citation needed|date=April 2012}} An active tradition of street musicians and other performers in Harvard Square entertains an audience of tourists and local residents during the warmer months of the year. The performances are coordinated through a public process that has been developed collaboratively by the performers,<ref>{{cite web|url=http://www.buskersadvocates.org/ | title= Street Arts & Buskers Advocates}}</ref> city administrators, private organizations and business groups.<ref>{{cite web|url=http://harvardsquare.com/Home/Arts-and-Entertainment/Street-Arts-and-Buskers-Advocates.aspx |title=Street Arts and Buskers Advocates |publisher=Harvardsquare.com |date= |accessdate=2012-04-28}}</ref> [[File:Longfellow National Historic Site, Cambridge, Massachusetts.JPG|thumb|right|The [[Longfellow National Historic Site]]]] [[File:Wfm stata center.jpg|thumb|[[Stata Center]], MIT]] [[File:Simmons Hall, MIT, Cambridge, Massachusetts.JPG|thumb|[[List of MIT undergraduate dormitories|Simmons Hall]], MIT]] ===Architecture=== Despite intensive urbanization during the late 19th century and 20th century, Cambridge has preserved an unusual number of historic buildings, including some dating to the 17th century. The city also contains an abundance of innovative contemporary architecture, largely built by Harvard and MIT. ;Notable historic buildings in the city include: * The [[Asa Gray House]] (1810) * [[Austin Hall, Harvard University]] (1882–84) * [[Cambridge, Massachusetts City Hall|Cambridge City Hall]] (1888–89) * [[Cambridge Public Library]] (1888) * [[Christ Church, Cambridge]] (1761) * [[Cooper-Frost-Austin House]] (1689–1817) * [[Elmwood (Cambridge, Massachusetts)|Elmwood House]] (1767), residence of the [[President of Harvard University]] * [[First Church of Christ, Scientist (Cambridge, Massachusetts)|First Church of Christ, Scientist]] (1924–30) * [[The First Parish in Cambridge]] (1833) * [[Harvard-Epworth United Methodist Church]] (1891–93) * [[Harvard Lampoon Building]] (1909) * The [[Hooper-Lee-Nichols House]] (1685–1850) * [[Longfellow National Historic Site]] (1759), former home of poet [[Henry Wadsworth Longfellow]] * [[The Memorial Church of Harvard University]] (1932) * [[Memorial Hall, Harvard University]] (1870–77) * [[Middlesex County Courthouse (Massachusetts)|Middlesex County Courthouse]] (1814–48) * [[Urban Rowhouse (40-48 Pearl Street, Cambridge, Massachusetts)|Urban Rowhouse]] (1875) * [[spite house|O'Reilly Spite House]] (1908), built to spite a neighbor who would not sell his adjacent land<ref name="existing">Bloom, Jonathan. (February 2, 2003) [[Boston Globe]] ''[http://nl.newsbank.com/nl-search/we/Archives?p_product=BG&p_theme=bg&p_action=search&p_maxdocs=200&p_topdoc=1&p_text_direct-0=0F907F2342522B5D&p_field_direct-0=document_id&p_perpage=10&p_sort=YMD_date:D Existing by the Thinnest of Margins. A Concord Avenue Landmark Gives New Meaning to Cozy.]'' Section: City Weekly; Page 11. Location: 260 Concord Ave, Cambridge, MA 02138.</ref> {{See also|List of Registered Historic Places in Cambridge, Massachusetts}} ;Contemporary architecture: * [[List of MIT undergraduate dormitories#Baker House|Baker House]] dormitory, MIT, by Finnish architect [[Alvar Aalto]], one of only two buildings by Aalto in the US * Harvard Graduate Center/Harkness Commons, by [[The Architects Collaborative]] (TAC, with [[Walter Gropius]]) * [[Carpenter Center for the Visual Arts]], Harvard, the only building in North America by [[Le Corbusier]] * [[Kresge Auditorium]], MIT, by [[Eero Saarinen]] * [[MIT Chapel]], by [[Eero Saarinen]] * [[Design Research Building]], by [[Benjamin Thompson and Associates]] * [[American Academy of Arts and Sciences]], by [[Kallmann McKinnell and Wood]], also architects of Boston City Hall * [[Arthur M. Sackler Museum]], Harvard, one of the few buildings in the U.S. by [[James Stirling (architect)|James Stirling]], winner of the [[Pritzker Prize]] * [[Stata Center]], MIT, by [[Frank Gehry]] * [[List of MIT undergraduate dormitories#Simmons Hall|Simmons Hall]], MIT, by [[Steven Holl]] ===Music=== <!-- make section generic. NEEDS MORE WORK. remove marketing fluff for Ryles. --> The city has an active music scene from classical performances to the latest popular bands. ==Sister cities== Cambridge has 8 active, official [[Twin towns and sister cities|sister cities]], and an unofficial relationship with [[Cambridge]], England:<ref name="peacom">"A message from the Peace Commission" [http://www.cambridgema.gov/peace/newsandpublications/news/detail.aspx?path=%2fsitecore%2fcontent%2fhome%2fpeace%2fnewsandpublications%2fnews%2f2008%2f02%2finformationoncambridgessistercities].</ref> *{{Flagicon|PRT}} [[Coimbra]], [[Portugal]] *{{Flagicon|CUB}} [[Cienfuegos]], [[Cuba]] *{{Flagicon|ITA}} [[Gaeta]], [[Italy]] *{{Flagicon|IRL}} [[Galway]], [[Republic of Ireland|Ireland]] *{{Flagicon|ARM}} [[Yerevan]], [[Armenia]]<ref>{{cite web|url=http://www.cysca.org/ |title=Cambridge-Yerevan Sister City Association |publisher=Cysca.org |date= |accessdate=2012-04-28}}</ref> *{{Flagicon|SLV}} [[San José Las Flores, Chalatenango|San José Las Flores]], [[El Salvador]] *{{Flagicon|JPN}} [[Tsukuba, Ibaraki|Tsukuba Science City]], Japan *{{Flagicon|POL}} [[Kraków]], [[Poland]] *{{Flagicon|CHN}} [[Haidian District]], [[China]] Ten other official sister city relationships are inactive: [[Dublin]], Ireland; [[Ischia]], [[Catania]], and [[Florence]], Italy; [[Kraków]], Poland; [[Santo Domingo Oeste]], Dominican Republic; [[Southwark]], London, England; [[Yuseong]], Daejeon, Korea; and [[Haidian District|Haidian]], Beijing, China.<ref name="peacom"/> There has also been an unofficial relationship with: *{{Flagicon|GBR}} [[Cambridge]], England, UK<ref>{{cite web|url=http://www.cambridgema.gov/peace/newsandpublications/news/detail.aspx?path=%2fsitecore%2fcontent%2fhome%2fpeace%2fnewsandpublications%2fnews%2f2008%2f02%2finformationoncambridgessistercities |title="Sister Cities", Cambridge Peace Commission |publisher=Cambridgema.gov |date=2008-02-15 |accessdate=2012-07-18}}</ref> ==Zip codes== *02138—Harvard Square/West Cambridge *02139—Central Square/Inman Square/MIT *02140—Porter Square/North Cambridge *02141—East Cambridge *02142—Kendall Square ==References== {{reflist|30em}} ==General references== * ''History of Middlesex County, Massachusetts'', [http://books.google.com/books?id=QGolOAyd9RMC&dq=intitle:History+intitle:of+intitle:Middlesex+intitle:County+intitle:Massachusetts&lr=&num=50&as_brr=0&source=gbs_other_versions_sidebar_s&cad=5 Volume 1 (A-H)], [http://books.google.com/books?id=hNaAnwRMedUC&pg=PA506&dq=intitle:History+intitle:of+intitle:Middlesex+intitle:County+intitle:Massachusetts&lr=&num=50&as_brr=0#PPA3,M1 Volume 2 (L-W)] compiled by Samuel Adams Drake, published 1879–1880. ** [http://books.google.com/books?id=QGolOAyd9RMC&printsec=titlepage#PPA305,M1 Cambridge article] by Rev. Edward Abbott in volume 1, pages 305–358. *Eliot, Samuel Atkins. ''A History of Cambridge, Massachusetts: 1630–1913''. Cambridge: The Cambridge Tribune, 1913. *Hiestand, Emily. "Watershed: An Excursion in Four Parts" The Georgia Review Spring 1998 pages 7–28 *[[Lucius Robinson Paige|Paige, Lucius]]. ''History of Cambridge, Massachusetts: 1630–1877''. Cambridge: The Riverside Press, 1877. *Survey of Architectural History in Cambridge: Mid Cambridge, 1967, Cambridge Historical Commission, Cambridge, Mass.{{ISBN missing}} *Survey of Architectural History in Cambridge: Cambridgeport, 1971 ISBN 0-262-53013-9, Cambridge Historical Commission, Cambridge, Mass. *Survey of Architectural History in Cambridge: Old Cambridge, 1973 ISBN 0-262-53014-7, Cambridge Historical Commission, Cambridge, Mass. *Survey of Architectural History in Cambridge: Northwest Cambridge, 1977 ISBN 0-262-53032-5, Cambridge Historical Commission, Cambridge, Mass. *Survey of Architectural History in Cambridge: East Cambridge, 1988 (revised) ISBN 0-262-53078-3, Cambridge Historical Commission, Cambridge, Mass. *{{cite book|last=Sinclair|first=Jill|title=Fresh Pond: The History of a Cambridge Landscape|publisher=MIT Press|location=Cambridge, Mass.|date=April 2009|isbn=978-0-262-19591-1 }} *{{cite book|last=Seaburg|first=Alan|title=Cambridge on the Charles|url=http://books.google.com/books?id=c7_oCS782-8C|publisher=Anne Miniver Press|location=Billerica, Mass.|year=2001|author=Seaburg, A. and Dahill, T. and Rose, C.H.|isbn=978-0-9625794-9-3}} ==External links== {{Commons category}} <!-- for current and future use if material is uploaded --> {{Wikivoyage|Cambridge (Massachusetts)}} {{Portal|Boston}} {{Commons category|Cambridge, Massachusetts}} *{{Official website|http://www.cambridgema.gov/}} *[http://www.cambridge-usa.org/ Cambridge Office for Tourism] *[http://www.city-data.com/city/Cambridge-Massachusetts.html City-Data.com] *[http://www.epodunk.com/cgi-bin/genInfo.php?locIndex=2894 ePodunk: Profile for Cambridge, Massachusetts] *{{dmoz|Regional/North_America/United_States/Massachusetts/Localities/C/Cambridge}} <br/><!--this break is to put visual space between the last information and the following template if needed--> ===Maps=== *[http://www.cambridgema.gov/GIS/FindMapAtlas.cfm Cambridge Maps] *[http://www.cambridgema.gov/GIS City of Cambridge Geographic Information System (GIS)] *[http://www.salemdeeds.com/atlases_results.asp?ImageType=index&atlastype=MassWorld&atlastown=&atlas=MASSACHUSETTS+1871&atlas_desc=MASSACHUSETTS+1871 ''1871 Atlas of Massachusetts''.] by Wall & Gray. [http://www.salemdeeds.com/atlases_pages.asp?ImageName=PAGE_0010_0011.jpg&atlastype=MassWorld&atlastown=&atlas=MASSACHUSETTS+1871&atlas_desc=MASSACHUSETTS+1871&pageprefix= Map of Massachusetts.] [http://www.salemdeeds.com/atlases_pages.asp?ImageName=PAGE_0044_0045.jpg&atlastype=MassWorld&atlastown=&atlas=MASSACHUSETTS+1871&atlas_desc=MASSACHUSETTS+1871&pageprefix= Map of Middlesex County.] *Dutton, E.P. [http://maps.bpl.org/details_10717/?srch_query=Dutton%2C+E.P.&srch_fields=all&srch_author=on&srch_style=exact&srch_fa=save&srch_ok=Go+Search Chart of Boston Harbor and Massachusetts Bay with Map of Adjacent Country.] Published 1867. A good map of roads and rail lines around Cambridge. *[http://www.citymap.com/cambridge/index.htm Cambridge Citymap – Community, Business, and Visitor Map.] *[http://docs.unh.edu/towns/CambridgeMassachusettsMapList.htm Old USGS maps of Cambridge area.] {{Greater Boston}} {{Middlesex County, Massachusetts}} {{Massachusetts}} {{New England}} {{Massachusetts cities and mayors of 100,000 population}} [[Category:Cambridge, Massachusetts| ]] [[Category:University towns in the United States]] [[Category:County seats in Massachusetts]] [[Category:Populated places established in 1630]] [[Category:Charles River]] [[Category:Place names of English origin in the United States]] [[af:Cambridge, Massachusetts]] [[ar:كامبريدج، ماساتشوستس]] [[zh-min-nan:Cambridge, Massachusetts]] [[be:Горад Кембрыдж, МаÑачуÑетÑ]] [[be-x-old:Кембрыдж (МаÑачуÑÑÑ‚Ñ)]] [[bg:Кеймбридж (МаÑачузетÑ)]] [[br:Cambridge (Massachusetts)]] [[ca:Cambridge (Massachusetts)]] [[cs:Cambridge (Massachusetts)]] [[cy:Cambridge, Massachusetts]] [[da:Cambridge (Massachusetts)]] [[de:Cambridge (Massachusetts)]] [[et:Cambridge (Massachusetts)]] [[es:Cambridge (Massachusetts)]] [[eo:KembriÄo (Masaĉuseco)]] [[eu:Cambridge (Massachusetts)]] [[fa:کمبریج (ماساچوست)]] [[fr:Cambridge (Massachusetts)]] [[gd:Cambridge (MA)]] [[ko:케임브리지 (매사추세츠 주)]] [[hy:Õ”Õ¥Õ´Õ¢Ö€Õ«Õ» (Õ„Õ¡Õ½Õ¡Õ¹Õ¸Ö‚Õ½Õ¥Õ©Õ½)]] [[id:Cambridge, Massachusetts]] [[it:Cambridge (Massachusetts)]] [[he:קיימברידג' (מסצ'וסטס)]] [[jv:Cambridge, Massachusetts]] [[kk:КÑмбридж (МаÑÑачуÑетÑ)]] [[kw:Cambridge, Massachusetts]] [[sw:Cambridge, Massachusetts]] [[ht:Cambridge, Massachusetts]] [[la:Cantabrigia (Massachusetta)]] [[lv:Keimbridža]] [[lb:Cambridge (Massachusetts)]] [[hu:Cambridge (Massachusetts)]] [[mr:केंबà¥à¤°à¤¿à¤œ, मॅसेचà¥à¤¯à¥à¤¸à¥‡à¤Ÿà¥à¤¸]] [[ms:Cambridge, Massachusetts]] [[nl:Cambridge (Massachusetts)]] [[ja:ケンブリッジ (マサãƒãƒ¥ãƒ¼ã‚»ãƒƒãƒ„å·ž)]] [[no:Cambridge (Massachusetts)]] [[pl:Cambridge (Massachusetts)]] [[pt:Cambridge (Massachusetts)]] [[ro:Cambridge, Massachusetts]] [[ru:Кембридж (МаÑÑачуÑетÑ)]] [[scn:Cambridge (Massachusetts), USA]] [[simple:Cambridge, Massachusetts]] [[sk:Cambridge (Massachusetts)]] [[sl:Cambridge, Massachusetts]] [[sr:Кембриџ (МаÑачуÑетÑ)]] [[fi:Cambridge (Massachusetts)]] [[sv:Cambridge, Massachusetts]] [[tl:Cambridge, Massachusetts]] [[ta:கேமà¯à®ªà®¿à®°à®¿à®œà¯, மாசசூசெடà¯à®¸à¯]] [[th:เคมบริดจ์ (รัà¸à¹à¸¡à¸ªà¸‹à¸²à¸Šà¸¹à¹€à¸‹à¸•à¸ªà¹Œ)]] [[tg:Кембриҷ (МаÑÑачуÑетÑ)]] [[tr:Cambridge, Massachusetts]] [[uk:Кембридж (МаÑÑачуÑетÑ)]] [[vi:Cambridge, Massachusetts]] [[vo:Cambridge (Massachusetts)]] [[war:Cambridge, Massachusetts]] [[yi:קעמברידזש, מ×ס×טשוסעטס]] [[zh:剑桥 (马è¨è¯¸å¡žå·ž)]] \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip
new file mode 100644
index 0000000000..5bbdea4a96
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.Beta1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip
new file mode 100644
index 0000000000..d9072ce465
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip
new file mode 100644
index 0000000000..dce299b7d6
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.RC2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip
new file mode 100644
index 0000000000..3ec908ddc2
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip
new file mode 100644
index 0000000000..67db98fc3c
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip
new file mode 100644
index 0000000000..6bdb9f27c8
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.10.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip
new file mode 100644
index 0000000000..b5253f9a67
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.11.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip
new file mode 100644
index 0000000000..0392049bb9
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.12.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip
new file mode 100644
index 0000000000..025b4c38b1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.13.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip
new file mode 100644
index 0000000000..413e08e658
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip
new file mode 100644
index 0000000000..c31d4de7c5
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip
new file mode 100644
index 0000000000..8b07a92493
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip
new file mode 100644
index 0000000000..dfd0fd09e7
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.5.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip
new file mode 100644
index 0000000000..1f3cff28e7
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.6.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip
new file mode 100644
index 0000000000..6d0e65c28c
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.7.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip
new file mode 100644
index 0000000000..8ff8ac3ddf
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.8.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip
new file mode 100644
index 0000000000..4445b3905a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-0.90.9.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip
new file mode 100644
index 0000000000..167dde888d
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip
new file mode 100644
index 0000000000..95fbfefb87
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.Beta2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip
new file mode 100644
index 0000000000..3ced97aa2a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip
new file mode 100644
index 0000000000..1298cfbfd1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.RC2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip
new file mode 100644
index 0000000000..2cb9abc43d
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip
new file mode 100644
index 0000000000..844271b58c
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip
new file mode 100644
index 0000000000..dd8e39388d
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip
new file mode 100644
index 0000000000..e4437effa2
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.0.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip
new file mode 100644
index 0000000000..4f05370b12
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip
new file mode 100644
index 0000000000..effeb94287
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip
new file mode 100644
index 0000000000..bedffa44f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.1.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.0.zip
new file mode 100644
index 0000000000..4644a38933
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip
new file mode 100644
index 0000000000..553b46b06a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip
new file mode 100644
index 0000000000..3f51a47534
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip
new file mode 100644
index 0000000000..8c8bfbd908
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip
new file mode 100644
index 0000000000..e3a151930e
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.2.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip
new file mode 100644
index 0000000000..d98958dea3
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip
new file mode 100644
index 0000000000..167d0f4c94
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip
new file mode 100644
index 0000000000..756eaf68ac
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip
new file mode 100644
index 0000000000..8470deefd8
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip
new file mode 100644
index 0000000000..2175012fc6
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip
new file mode 100644
index 0000000000..19d1e568d8
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.5.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip
new file mode 100644
index 0000000000..ad8e8bd003
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.6.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip
new file mode 100644
index 0000000000..3a645a9dd5
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.7.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip
new file mode 100644
index 0000000000..f8ab0a26dc
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.8.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip
new file mode 100644
index 0000000000..5ef35b21ce
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.3.9.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.Beta1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.Beta1.zip
new file mode 100644
index 0000000000..4546f5d4aa
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.Beta1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip
new file mode 100644
index 0000000000..467d19aa5e
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip
new file mode 100644
index 0000000000..2adbb28cae
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip
new file mode 100644
index 0000000000..4fac2086e5
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip
new file mode 100644
index 0000000000..1a0d66745a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip
new file mode 100644
index 0000000000..0328a9e2d5
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip
new file mode 100644
index 0000000000..eeb25ab6ed
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.0.zip
new file mode 100644
index 0000000000..f1dab08831
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.1.zip
new file mode 100644
index 0000000000..342e311e56
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip
new file mode 100644
index 0000000000..fb36b197c4
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip
new file mode 100644
index 0000000000..020f6f4e59
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.Beta2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip
new file mode 100644
index 0000000000..a84c50759a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip
new file mode 100644
index 0000000000..e5d65ebaf4
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.RC2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip
new file mode 100644
index 0000000000..13f778d965
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip
new file mode 100644
index 0000000000..76ed278fad
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip
new file mode 100644
index 0000000000..762eabf828
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip
new file mode 100644
index 0000000000..ba79dea345
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.0.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip
new file mode 100644
index 0000000000..cbf84c717a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip
new file mode 100644
index 0000000000..00ea0447b2
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip
new file mode 100644
index 0000000000..18abd99625
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.1.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.0.zip
new file mode 100644
index 0000000000..f5e62a1a94
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip
new file mode 100644
index 0000000000..935e71b0c1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip
new file mode 100644
index 0000000000..d69b22a088
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip
new file mode 100644
index 0000000000..295f9f758c
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip
new file mode 100644
index 0000000000..e9efc00c97
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.2.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip
new file mode 100644
index 0000000000..5a59e21930
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip
new file mode 100644
index 0000000000..2ae1d7c0ad
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip
new file mode 100644
index 0000000000..c67b99717a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip
new file mode 100644
index 0000000000..64e3235142
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip
new file mode 100644
index 0000000000..55e6744554
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip
new file mode 100644
index 0000000000..35a5fd4836
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.5.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip
new file mode 100644
index 0000000000..f1eb21ca4b
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.6.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip
new file mode 100644
index 0000000000..543c13c2ba
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.7.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip
new file mode 100644
index 0000000000..93abac386f
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.8.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip
new file mode 100644
index 0000000000..7dc194df58
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.3.9.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.Beta1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.Beta1.zip
new file mode 100644
index 0000000000..5adf788623
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.Beta1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip
new file mode 100644
index 0000000000..21f867c85e
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip
new file mode 100644
index 0000000000..18166eabbd
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip
new file mode 100644
index 0000000000..f03625a1ce
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip
new file mode 100644
index 0000000000..d78fbb1975
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip
new file mode 100644
index 0000000000..1e8944618e
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.4.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip
new file mode 100644
index 0000000000..fcae43982a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.0.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.0.zip
new file mode 100644
index 0000000000..a55e6c4c35
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.0.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.1.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.1.zip
new file mode 100644
index 0000000000..7cccbbe25a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip
new file mode 100644
index 0000000000..cee5783e10
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/unsupported-0.20.6.zip b/core/src/test/resources/org/elasticsearch/bwcompat/unsupported-0.20.6.zip
new file mode 100644
index 0000000000..753b577d3a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/unsupported-0.20.6.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/bwcompat/unsupportedrepo-0.20.6.zip b/core/src/test/resources/org/elasticsearch/bwcompat/unsupportedrepo-0.20.6.zip
new file mode 100644
index 0000000000..fab2f20fe6
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/bwcompat/unsupportedrepo-0.20.6.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zip b/core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zip
new file mode 100644
index 0000000000..5772361979
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/cluster/routing/custom_routing_1_x.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zip b/core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zip
new file mode 100644
index 0000000000..2fffc0b401
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/cluster/routing/default_routing_1_x.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/cluster/routing/issue_9023.zip b/core/src/test/resources/org/elasticsearch/cluster/routing/issue_9023.zip
new file mode 100644
index 0000000000..951573d5f2
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/cluster/routing/issue_9023.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/cluster/routing/shard_routes.txt b/core/src/test/resources/org/elasticsearch/cluster/routing/shard_routes.txt
new file mode 100644
index 0000000000..90889bf4e8
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/cluster/routing/shard_routes.txt
@@ -0,0 +1,217 @@
+# Index num_shards _type _id _routing pre_2.0_shard_id current_shard_id
+index 2 type1 foo null 1 1
+index 2 type1 foo 42 1 0
+index 2 type1 foo my_routing_key 0 1
+index 2 type1 bar null 0 1
+index 2 type1 bar 42 1 0
+index 2 type1 bar my_routing_key 0 1
+index 2 type1 foobar null 0 1
+index 2 type1 foobar 42 1 0
+index 2 type1 foobar my_routing_key 0 1
+index 2 type1 elasticsearch null 0 0
+index 2 type1 elasticsearch 42 1 0
+index 2 type1 elasticsearch my_routing_key 0 1
+index 2 type1 0956317778 null 0 1
+index 2 type1 0956317778 42 1 0
+index 2 type1 0956317778 my_routing_key 0 1
+index 2 type1 0 null 1 0
+index 2 type1 0 42 1 0
+index 2 type1 0 my_routing_key 0 1
+index 2 type2 foo null 1 1
+index 2 type2 foo 42 1 0
+index 2 type2 foo my_routing_key 0 1
+index 2 type2 bar null 0 1
+index 2 type2 bar 42 1 0
+index 2 type2 bar my_routing_key 0 1
+index 2 type2 foobar null 0 1
+index 2 type2 foobar 42 1 0
+index 2 type2 foobar my_routing_key 0 1
+index 2 type2 elasticsearch null 0 0
+index 2 type2 elasticsearch 42 1 0
+index 2 type2 elasticsearch my_routing_key 0 1
+index 2 type2 0956317778 null 0 1
+index 2 type2 0956317778 42 1 0
+index 2 type2 0956317778 my_routing_key 0 1
+index 2 type2 0 null 1 0
+index 2 type2 0 42 1 0
+index 2 type2 0 my_routing_key 0 1
+index 5 type1 foo null 4 1
+index 5 type1 foo 42 0 1
+index 5 type1 foo my_routing_key 3 1
+index 5 type1 bar null 4 3
+index 5 type1 bar 42 0 1
+index 5 type1 bar my_routing_key 3 1
+index 5 type1 foobar null 4 1
+index 5 type1 foobar 42 0 1
+index 5 type1 foobar my_routing_key 3 1
+index 5 type1 elasticsearch null 0 0
+index 5 type1 elasticsearch 42 0 1
+index 5 type1 elasticsearch my_routing_key 3 1
+index 5 type1 0956317778 null 3 4
+index 5 type1 0956317778 42 0 1
+index 5 type1 0956317778 my_routing_key 3 1
+index 5 type1 0 null 1 0
+index 5 type1 0 42 0 1
+index 5 type1 0 my_routing_key 3 1
+index 5 type2 foo null 4 1
+index 5 type2 foo 42 0 1
+index 5 type2 foo my_routing_key 3 1
+index 5 type2 bar null 4 3
+index 5 type2 bar 42 0 1
+index 5 type2 bar my_routing_key 3 1
+index 5 type2 foobar null 4 1
+index 5 type2 foobar 42 0 1
+index 5 type2 foobar my_routing_key 3 1
+index 5 type2 elasticsearch null 0 0
+index 5 type2 elasticsearch 42 0 1
+index 5 type2 elasticsearch my_routing_key 3 1
+index 5 type2 0956317778 null 3 4
+index 5 type2 0956317778 42 0 1
+index 5 type2 0956317778 my_routing_key 3 1
+index 5 type2 0 null 1 0
+index 5 type2 0 42 0 1
+index 5 type2 0 my_routing_key 3 1
+index 100 type1 foo null 49 81
+index 100 type1 foo 42 75 6
+index 100 type1 foo my_routing_key 38 1
+index 100 type1 bar null 34 53
+index 100 type1 bar 42 75 6
+index 100 type1 bar my_routing_key 38 1
+index 100 type1 foobar null 74 41
+index 100 type1 foobar 42 75 6
+index 100 type1 foobar my_routing_key 38 1
+index 100 type1 elasticsearch null 20 90
+index 100 type1 elasticsearch 42 75 6
+index 100 type1 elasticsearch my_routing_key 38 1
+index 100 type1 0956317778 null 18 39
+index 100 type1 0956317778 42 75 6
+index 100 type1 0956317778 my_routing_key 38 1
+index 100 type1 0 null 21 40
+index 100 type1 0 42 75 6
+index 100 type1 0 my_routing_key 38 1
+index 100 type2 foo null 49 81
+index 100 type2 foo 42 75 6
+index 100 type2 foo my_routing_key 38 1
+index 100 type2 bar null 34 53
+index 100 type2 bar 42 75 6
+index 100 type2 bar my_routing_key 38 1
+index 100 type2 foobar null 74 41
+index 100 type2 foobar 42 75 6
+index 100 type2 foobar my_routing_key 38 1
+index 100 type2 elasticsearch null 20 90
+index 100 type2 elasticsearch 42 75 6
+index 100 type2 elasticsearch my_routing_key 38 1
+index 100 type2 0956317778 null 18 39
+index 100 type2 0956317778 42 75 6
+index 100 type2 0956317778 my_routing_key 38 1
+index 100 type2 0 null 21 40
+index 100 type2 0 42 75 6
+index 100 type2 0 my_routing_key 38 1
+index2 2 type1 foo null 1 1
+index2 2 type1 foo 42 1 0
+index2 2 type1 foo my_routing_key 0 1
+index2 2 type1 bar null 0 1
+index2 2 type1 bar 42 1 0
+index2 2 type1 bar my_routing_key 0 1
+index2 2 type1 foobar null 0 1
+index2 2 type1 foobar 42 1 0
+index2 2 type1 foobar my_routing_key 0 1
+index2 2 type1 elasticsearch null 0 0
+index2 2 type1 elasticsearch 42 1 0
+index2 2 type1 elasticsearch my_routing_key 0 1
+index2 2 type1 0956317778 null 0 1
+index2 2 type1 0956317778 42 1 0
+index2 2 type1 0956317778 my_routing_key 0 1
+index2 2 type1 0 null 1 0
+index2 2 type1 0 42 1 0
+index2 2 type1 0 my_routing_key 0 1
+index2 2 type2 foo null 1 1
+index2 2 type2 foo 42 1 0
+index2 2 type2 foo my_routing_key 0 1
+index2 2 type2 bar null 0 1
+index2 2 type2 bar 42 1 0
+index2 2 type2 bar my_routing_key 0 1
+index2 2 type2 foobar null 0 1
+index2 2 type2 foobar 42 1 0
+index2 2 type2 foobar my_routing_key 0 1
+index2 2 type2 elasticsearch null 0 0
+index2 2 type2 elasticsearch 42 1 0
+index2 2 type2 elasticsearch my_routing_key 0 1
+index2 2 type2 0956317778 null 0 1
+index2 2 type2 0956317778 42 1 0
+index2 2 type2 0956317778 my_routing_key 0 1
+index2 2 type2 0 null 1 0
+index2 2 type2 0 42 1 0
+index2 2 type2 0 my_routing_key 0 1
+index2 5 type1 foo null 4 1
+index2 5 type1 foo 42 0 1
+index2 5 type1 foo my_routing_key 3 1
+index2 5 type1 bar null 4 3
+index2 5 type1 bar 42 0 1
+index2 5 type1 bar my_routing_key 3 1
+index2 5 type1 foobar null 4 1
+index2 5 type1 foobar 42 0 1
+index2 5 type1 foobar my_routing_key 3 1
+index2 5 type1 elasticsearch null 0 0
+index2 5 type1 elasticsearch 42 0 1
+index2 5 type1 elasticsearch my_routing_key 3 1
+index2 5 type1 0956317778 null 3 4
+index2 5 type1 0956317778 42 0 1
+index2 5 type1 0956317778 my_routing_key 3 1
+index2 5 type1 0 null 1 0
+index2 5 type1 0 42 0 1
+index2 5 type1 0 my_routing_key 3 1
+index2 5 type2 foo null 4 1
+index2 5 type2 foo 42 0 1
+index2 5 type2 foo my_routing_key 3 1
+index2 5 type2 bar null 4 3
+index2 5 type2 bar 42 0 1
+index2 5 type2 bar my_routing_key 3 1
+index2 5 type2 foobar null 4 1
+index2 5 type2 foobar 42 0 1
+index2 5 type2 foobar my_routing_key 3 1
+index2 5 type2 elasticsearch null 0 0
+index2 5 type2 elasticsearch 42 0 1
+index2 5 type2 elasticsearch my_routing_key 3 1
+index2 5 type2 0956317778 null 3 4
+index2 5 type2 0956317778 42 0 1
+index2 5 type2 0956317778 my_routing_key 3 1
+index2 5 type2 0 null 1 0
+index2 5 type2 0 42 0 1
+index2 5 type2 0 my_routing_key 3 1
+index2 100 type1 foo null 49 81
+index2 100 type1 foo 42 75 6
+index2 100 type1 foo my_routing_key 38 1
+index2 100 type1 bar null 34 53
+index2 100 type1 bar 42 75 6
+index2 100 type1 bar my_routing_key 38 1
+index2 100 type1 foobar null 74 41
+index2 100 type1 foobar 42 75 6
+index2 100 type1 foobar my_routing_key 38 1
+index2 100 type1 elasticsearch null 20 90
+index2 100 type1 elasticsearch 42 75 6
+index2 100 type1 elasticsearch my_routing_key 38 1
+index2 100 type1 0956317778 null 18 39
+index2 100 type1 0956317778 42 75 6
+index2 100 type1 0956317778 my_routing_key 38 1
+index2 100 type1 0 null 21 40
+index2 100 type1 0 42 75 6
+index2 100 type1 0 my_routing_key 38 1
+index2 100 type2 foo null 49 81
+index2 100 type2 foo 42 75 6
+index2 100 type2 foo my_routing_key 38 1
+index2 100 type2 bar null 34 53
+index2 100 type2 bar 42 75 6
+index2 100 type2 bar my_routing_key 38 1
+index2 100 type2 foobar null 74 41
+index2 100 type2 foobar 42 75 6
+index2 100 type2 foobar my_routing_key 38 1
+index2 100 type2 elasticsearch null 20 90
+index2 100 type2 elasticsearch 42 75 6
+index2 100 type2 elasticsearch my_routing_key 38 1
+index2 100 type2 0956317778 null 18 39
+index2 100 type2 0956317778 42 75 6
+index2 100 type2 0956317778 my_routing_key 38 1
+index2 100 type2 0 null 21 40
+index2 100 type2 0 42 75 6
+index2 100 type2 0 my_routing_key 38 1
diff --git a/core/src/test/resources/org/elasticsearch/common/cli/tool-cmd1.help b/core/src/test/resources/org/elasticsearch/common/cli/tool-cmd1.help
new file mode 100644
index 0000000000..d083e3a653
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/cli/tool-cmd1.help
@@ -0,0 +1 @@
+cmd1 help \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/common/cli/tool.help b/core/src/test/resources/org/elasticsearch/common/cli/tool.help
new file mode 100644
index 0000000000..023b1accdf
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/cli/tool.help
@@ -0,0 +1 @@
+tool help \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/dir/file2.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/dir/file2.txt
new file mode 100644
index 0000000000..5bdcfc19f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/dir/file2.txt
@@ -0,0 +1 @@
+version1
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/file1.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/file1.txt
new file mode 100644
index 0000000000..5bdcfc19f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v1/file1.txt
@@ -0,0 +1 @@
+version1
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/file2.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/file2.txt
new file mode 100644
index 0000000000..df7af2c382
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/file2.txt
@@ -0,0 +1 @@
+version2
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/subdir/file4.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/subdir/file4.txt
new file mode 100644
index 0000000000..5bdcfc19f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/dir/subdir/file4.txt
@@ -0,0 +1 @@
+version1
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file1.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file1.txt
new file mode 100644
index 0000000000..df7af2c382
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file1.txt
@@ -0,0 +1 @@
+version2
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file3.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file3.txt
new file mode 100644
index 0000000000..5bdcfc19f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v2/file3.txt
@@ -0,0 +1 @@
+version1
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/file2.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/file2.txt
new file mode 100644
index 0000000000..777d3c2b51
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/file2.txt
@@ -0,0 +1 @@
+version3
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file4.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file4.txt
new file mode 100644
index 0000000000..df7af2c382
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file4.txt
@@ -0,0 +1 @@
+version2
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file5.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file5.txt
new file mode 100644
index 0000000000..5bdcfc19f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/dir/subdir/file5.txt
@@ -0,0 +1 @@
+version1
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file1.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file1.txt
new file mode 100644
index 0000000000..777d3c2b51
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file1.txt
@@ -0,0 +1 @@
+version3
diff --git a/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file3.txt b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file3.txt
new file mode 100644
index 0000000000..df7af2c382
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/io/copyappend/v3/file3.txt
@@ -0,0 +1 @@
+version2
diff --git a/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/logging.yml
new file mode 100644
index 0000000000..bd7a15f443
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/logging.yml
@@ -0,0 +1,12 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console
+logger:
+ test: TRACE, console
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/logging.yml
new file mode 100644
index 0000000000..71fbce639a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/logging.yml
@@ -0,0 +1,10 @@
+logger:
+ # log action execution errors for easier debugging
+ second: DEBUG, console2
+
+appender:
+ console2:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/test3/logging.yml b/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/test3/logging.yml
new file mode 100644
index 0000000000..edfe0c9ed4
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/common/logging/log4j/config/test2/test3/logging.yml
@@ -0,0 +1,10 @@
+logger:
+ # log action execution errors for easier debugging
+ third: DEBUG, console3
+
+appender:
+ console3:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/gateway/global-3.st b/core/src/test/resources/org/elasticsearch/gateway/global-3.st
new file mode 100644
index 0000000000..b6b4e8d762
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/gateway/global-3.st
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/query/config/scripts/full-query-template.mustache b/core/src/test/resources/org/elasticsearch/index/query/config/scripts/full-query-template.mustache
new file mode 100644
index 0000000000..519141472f
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/query/config/scripts/full-query-template.mustache
@@ -0,0 +1,6 @@
+{
+ "query": {
+ "match": { "{{myField}}" : "{{myValue}}" }
+ },
+ "size" : {{mySize}}
+}
diff --git a/core/src/test/resources/org/elasticsearch/index/query/config/scripts/storedTemplate.mustache b/core/src/test/resources/org/elasticsearch/index/query/config/scripts/storedTemplate.mustache
new file mode 100644
index 0000000000..a779da7c46
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/query/config/scripts/storedTemplate.mustache
@@ -0,0 +1,3 @@
+{
+ "match_{{template}}": {}
+}
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/legacy_translogs.zip b/core/src/test/resources/org/elasticsearch/index/translog/legacy_translogs.zip
new file mode 100644
index 0000000000..c77e678801
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/legacy_translogs.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/translog-invalid-first-byte.binary b/core/src/test/resources/org/elasticsearch/index/translog/translog-invalid-first-byte.binary
new file mode 100644
index 0000000000..2eb76cf956
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/translog-invalid-first-byte.binary
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/translog-v0.binary b/core/src/test/resources/org/elasticsearch/index/translog/translog-v0.binary
new file mode 100644
index 0000000000..303bb2ef50
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/translog-v0.binary
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary
new file mode 100644
index 0000000000..d74970f18b
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary
new file mode 100644
index 0000000000..9f23966a41
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-truncated.binary b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-truncated.binary
new file mode 100644
index 0000000000..30e06c0e38
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1-truncated.binary
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/index/translog/translog-v1.binary b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1.binary
new file mode 100644
index 0000000000..f166c8a571
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/index/translog/translog-v1.binary
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties b/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties
new file mode 100644
index 0000000000..4487d7c8de
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties
@@ -0,0 +1,21 @@
+################################################################
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+################################################################
+description=This is a description for a dummy test site plugin.
+version=0.0.7-BOND-SITE
+
diff --git a/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html b/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html
new file mode 100644
index 0000000000..ceb8774542
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin on Node 3</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html b/core/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html
new file mode 100644
index 0000000000..ceb8774542
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/nodesinfo/node4/dummy/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin on Node 3</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html b/core/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html
new file mode 100644
index 0000000000..ceb8774542
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/nodesinfo/node4/test-no-version-plugin/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin on Node 3</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/plugins/anotherplugin/_site/index.html b/core/src/test/resources/org/elasticsearch/plugins/anotherplugin/_site/index.html
new file mode 100644
index 0000000000..d1735a6585
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/anotherplugin/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Another Site Plugin</title>
+</head>
+<body>
+<p>Welcome to this another elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty b/core/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty
diff --git a/core/src/test/resources/org/elasticsearch/plugins/dummy/_site/index.html b/core/src/test/resources/org/elasticsearch/plugins/dummy/_site/index.html
new file mode 100644
index 0000000000..320a23cf36
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/dummy/_site/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/plugins/loading/jar/in-jar-plugin.jar b/core/src/test/resources/org/elasticsearch/plugins/loading/jar/in-jar-plugin.jar
new file mode 100644
index 0000000000..6b854823f1
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/loading/jar/in-jar-plugin.jar
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/loading/zip/in-zip-plugin.jar b/core/src/test/resources/org/elasticsearch/plugins/loading/zip/in-zip-plugin.jar
new file mode 100644
index 0000000000..6b35430f34
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/loading/zip/in-zip-plugin.jar
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_folder_file.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_folder_file.zip
new file mode 100644
index 0000000000..0dbf53d89f
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_folder_file.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_folder_site.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_folder_site.zip
new file mode 100644
index 0000000000..823f20c731
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_folder_site.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_single_folder.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_single_folder.zip
new file mode 100644
index 0000000000..39cc092405
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_single_folder.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_and_config.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_and_config.zip
new file mode 100644
index 0000000000..f2bfc35e18
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_and_config.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_only.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_only.zip
new file mode 100644
index 0000000000..caacb0008a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_bin_only.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_classfile.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_classfile.zip
new file mode 100644
index 0000000000..29bedba1fb
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_classfile.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v1.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v1.zip
new file mode 100644
index 0000000000..b3f4d6a330
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v1.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v2.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v2.zip
new file mode 100644
index 0000000000..524f33c026
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v2.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v3.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v3.zip
new file mode 100644
index 0000000000..39b7d8601c
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_config_v3.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_with_sourcefiles.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_sourcefiles.zip
new file mode 100644
index 0000000000..ccd194fc73
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_with_sourcefiles.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/plugin_without_folders.zip b/core/src/test/resources/org/elasticsearch/plugins/plugin_without_folders.zip
new file mode 100644
index 0000000000..7f1eaa28dc
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/plugin_without_folders.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir/index.html b/core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir/index.html
new file mode 100644
index 0000000000..f18ae8070e
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir/index.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin (subdir)</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir_without_index/page.html b/core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir_without_index/page.html
new file mode 100644
index 0000000000..407ecddfd4
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/plugins/subdir/_site/dir_without_index/page.html
@@ -0,0 +1,9 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Dummy Site Plugin (page)</title>
+</head>
+<body>
+<p>Welcome to this dummy elasticsearch plugin</p>
+</body>
+</html>
diff --git a/core/src/test/resources/org/elasticsearch/rest/action/admin/indices/upgrade/index-0.90.6.zip b/core/src/test/resources/org/elasticsearch/rest/action/admin/indices/upgrade/index-0.90.6.zip
new file mode 100644
index 0000000000..8a77e4c8ec
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/rest/action/admin/indices/upgrade/index-0.90.6.zip
Binary files differ
diff --git a/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression b/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression
new file mode 100644
index 0000000000..9a037142aa
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.expression
@@ -0,0 +1 @@
+10 \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.groovy b/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.groovy
new file mode 100644
index 0000000000..d8263ee986
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.groovy
@@ -0,0 +1 @@
+2 \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.mustache b/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.mustache
new file mode 100644
index 0000000000..2edeafb09d
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/script/config/scripts/script1.mustache
@@ -0,0 +1 @@
+20 \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/script/config/scripts/script2.groovy b/core/src/test/resources/org/elasticsearch/script/config/scripts/script2.groovy
new file mode 100644
index 0000000000..175e351ac7
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/script/config/scripts/script2.groovy
@@ -0,0 +1 @@
+factor*2 \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy b/core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy
new file mode 100644
index 0000000000..7178e05efb
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_no_params.groovy
@@ -0,0 +1 @@
+return _subset_freq + _subset_size + _superset_freq + _superset_size
diff --git a/core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy b/core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy
new file mode 100644
index 0000000000..0099a531fd
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/search/aggregations/bucket/config/scripts/significance_script_with_params.groovy
@@ -0,0 +1 @@
+return param*(_subset_freq + _subset_size + _superset_freq + _superset_size)/param
diff --git a/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/combine_script.groovy b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/combine_script.groovy
new file mode 100644
index 0000000000..da13f60875
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/combine_script.groovy
@@ -0,0 +1 @@
+newaggregation = []; sum = 0;for (a in _agg) { sum += a}; newaggregation.add(sum); return newaggregation \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/init_script.groovy b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/init_script.groovy
new file mode 100644
index 0000000000..6cf4f40b6d
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/init_script.groovy
@@ -0,0 +1 @@
+vars.multiplier = 3 \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/map_script.groovy b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/map_script.groovy
new file mode 100644
index 0000000000..aece1a7d84
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/map_script.groovy
@@ -0,0 +1 @@
+_agg.add(vars.multiplier) \ No newline at end of file
diff --git a/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/reduce_script.groovy b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/reduce_script.groovy
new file mode 100644
index 0000000000..835dcfbb3a
--- /dev/null
+++ b/core/src/test/resources/org/elasticsearch/search/aggregations/metrics/scripted/conf/scripts/reduce_script.groovy
@@ -0,0 +1 @@
+newaggregation = []; sum = 0;for (aggregation in _aggs) { for (a in aggregation) { sum += a} }; newaggregation.add(sum); return newaggregation \ No newline at end of file
diff --git a/core/src/test/resources/packaging/scripts/20_tar_package.bats b/core/src/test/resources/packaging/scripts/20_tar_package.bats
new file mode 100644
index 0000000000..b5daaece1d
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/20_tar_package.bats
@@ -0,0 +1,95 @@
+#!/usr/bin/env bats
+
+# This file is used to test the tar gz package.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+
+# Cleans everything for the 1st execution
+setup() {
+ if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
+ clean_before_test
+ fi
+}
+
+##################################
+# Install TAR GZ package
+##################################
+@test "[TAR] tar command is available" {
+ skip_not_tar_gz
+ run tar --version
+ [ "$status" -eq 0 ]
+}
+
+@test "[TAR] archive is available" {
+ skip_not_tar_gz
+ count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l)
+ [ "$count" -eq 1 ]
+}
+
+@test "[TAR] archive is not installed" {
+ skip_not_tar_gz
+ count=$(find /tmp -type d -name 'elasticsearch*' | wc -l)
+ [ "$count" -eq 0 ]
+}
+
+@test "[TAR] install archive" {
+ skip_not_tar_gz
+
+ # Install the archive
+ install_archive
+
+ count=$(find /tmp -type d -name 'elasticsearch*' | wc -l)
+ [ "$count" -eq 1 ]
+}
+
+##################################
+# Check that the archive is correctly installed
+##################################
+@test "[TAR] verify archive installation" {
+ skip_not_tar_gz
+
+ verify_archive_installation "/tmp/elasticsearch"
+}
+
+##################################
+# Check that Elasticsearch is working
+##################################
+@test "[TAR] test elasticsearch" {
+ skip_not_tar_gz
+
+ start_elasticsearch_service
+
+ run_elasticsearch_tests
+
+ stop_elasticsearch_service
+
+ run rm -rf "/tmp/elasticsearch"
+ [ "$status" -eq 0 ]
+}
diff --git a/core/src/test/resources/packaging/scripts/25_tar_plugins.bats b/core/src/test/resources/packaging/scripts/25_tar_plugins.bats
new file mode 100644
index 0000000000..d61468be1d
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/25_tar_plugins.bats
@@ -0,0 +1,344 @@
+#!/usr/bin/env bats
+
+# This file is used to test the installation and removal
+# of plugins with a tar gz archive.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+
+setup() {
+ # Cleans everything for every test execution
+ clean_before_test
+
+ # Download Marvel and Shield
+ MARVEL_ZIP="$PWD/marvel.zip"
+ SHIELD_ZIP="$PWD/shield.zip"
+
+ if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
+ if [ ! -e "$MARVEL_ZIP" ]; then
+ wget --quiet -O "$MARVEL_ZIP" "http://download.elasticsearch.org/elasticsearch/marvel/marvel-latest.zip"
+ fi
+ if [ ! -e "$SHIELD_ZIP" ]; then
+ wget --quiet -O "$SHIELD_ZIP" "http://download.elasticsearch.org/elasticsearch/shield/shield-latest.zip"
+ fi
+ fi
+}
+
+##################################
+# Install plugins with a tar archive
+##################################
+@test "[TAR] install marvel plugin" {
+
+ # Install the archive
+ install_archive
+
+ # Checks that the archive is correctly installed
+ verify_archive_installation
+
+ # Checks that plugin archive is available
+ [ -e "$MARVEL_ZIP" ]
+
+ # Install Marvel
+ run /tmp/elasticsearch/bin/plugin -i elasticsearch/marvel/latest -u "file://$MARVEL_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Marvel is correctly installed
+ assert_file_exist "/tmp/elasticsearch/plugins/marvel"
+
+ start_elasticsearch_service
+
+ run curl -XGET 'http://localhost:9200/_cat/plugins?v=false&h=component'
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -w "marvel"
+
+ stop_elasticsearch_service
+
+ # Remove the plugin
+ run /tmp/elasticsearch/bin/plugin -r elasticsearch/marvel/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/tmp/elasticsearch/plugins/marvel"
+}
+
+@test "[TAR] install marvel plugin with a custom path.plugins" {
+
+ # Install the archive
+ install_archive
+
+ # Checks that the archive is correctly installed
+ verify_archive_installation
+
+ # Creates a temporary directory
+ TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Modify the path.plugins setting in configuration file
+ echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/tmp/elasticsearch/config/elasticsearch.yml"
+
+ run chown -R elasticsearch:elasticsearch "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$MARVEL_ZIP" ]
+
+ # Install Marvel
+ run /tmp/elasticsearch/bin/plugin -i elasticsearch/marvel/latest -u "file://$MARVEL_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Marvel is correctly installed
+ assert_file_exist "$TEMP_PLUGINS_DIR/marvel"
+
+ start_elasticsearch_service
+
+ run curl -XGET 'http://localhost:9200/_cat/plugins?v=false&h=component'
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -w "marvel"
+
+ stop_elasticsearch_service
+
+ # Remove the plugin
+ run /tmp/elasticsearch/bin/plugin -r elasticsearch/marvel/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "$TEMP_PLUGINS_DIR/marvel"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+}
+
+@test "[TAR] install shield plugin" {
+
+ # Install the archive
+ install_archive
+
+ # Checks that the archive is correctly installed
+ verify_archive_installation
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /tmp/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "/tmp/elasticsearch/config/shield"
+ assert_file_exist "/tmp/elasticsearch/config/shield/role_mapping.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/roles.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users_roles"
+ assert_file_exist "/tmp/elasticsearch/plugins/shield"
+
+ # Remove the plugin
+ run /tmp/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "/tmp/elasticsearch/config/shield"
+ assert_file_exist "/tmp/elasticsearch/config/shield/role_mapping.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/roles.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users_roles"
+ assert_file_not_exist "/tmp/elasticsearch/plugins/shield"
+}
+
+@test "[TAR] install shield plugin with a custom path.plugins" {
+
+ # Install the archive
+ install_archive
+
+ # Checks that the archive is correctly installed
+ verify_archive_installation
+
+ # Creates a temporary directory
+ TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Modify the path.plugins setting in configuration file
+ echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/tmp/elasticsearch/config/elasticsearch.yml"
+
+ run chown -R elasticsearch:elasticsearch "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /tmp/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "/tmp/elasticsearch/config/shield"
+ assert_file_exist "/tmp/elasticsearch/config/shield/role_mapping.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/roles.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users_roles"
+ assert_file_exist "$TEMP_PLUGINS_DIR/shield"
+
+ # Remove the plugin
+ run /tmp/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "/tmp/elasticsearch/config/shield"
+ assert_file_exist "/tmp/elasticsearch/config/shield/role_mapping.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/roles.yml"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users"
+ assert_file_exist "/tmp/elasticsearch/config/shield/users_roles"
+ assert_file_not_exist "$TEMP_PLUGINS_DIR/shield"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+}
+
+@test "[TAR] install shield plugin with a custom CONFIG_DIR" {
+
+ # Install the archive
+ install_archive
+
+ # Checks that the archive is correctly installed
+ verify_archive_installation
+
+ # Creates a temporary directory
+ TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Move configuration files to the new configuration directory
+ run mv /tmp/elasticsearch/config/* $TEMP_CONFIG_DIR
+ [ "$status" -eq 0 ]
+
+ run chown -R elasticsearch:elasticsearch "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml"
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield with the CONF_DIR environment variable
+ run env "CONF_DIR=$TEMP_CONFIG_DIR" /tmp/elasticsearch/bin/plugin -i "elasticsearch/shield/latest" -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_exist "/tmp/elasticsearch/plugins/shield"
+
+ # Remove the plugin
+ run /tmp/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_not_exist "/tmp/elasticsearch/plugins/shield"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+}
+
+@test "[TAR] install shield plugin with a custom ES_JAVA_OPTS" {
+
+ # Install the archive
+ install_archive
+
+ # Checks that the archive is correctly installed
+ verify_archive_installation
+
+ # Creates a temporary directory
+ TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Move configuration files to the new configuration directory
+ run mv /tmp/elasticsearch/config/* $TEMP_CONFIG_DIR
+ [ "$status" -eq 0 ]
+
+ run chown -R elasticsearch:elasticsearch "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml"
+
+ # Export ES_JAVA_OPTS
+ export ES_JAVA_OPTS="-Des.path.conf=$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /tmp/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/tmp/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_exist "/tmp/elasticsearch/plugins/shield"
+
+ # Remove the plugin
+ run /tmp/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/tmp/elasticsearch/bin/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_not_exist "/tmp/elasticsearch/plugins/shield"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+}
diff --git a/core/src/test/resources/packaging/scripts/30_deb_package.bats b/core/src/test/resources/packaging/scripts/30_deb_package.bats
new file mode 100644
index 0000000000..aa7a370d80
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/30_deb_package.bats
@@ -0,0 +1,177 @@
+#!/usr/bin/env bats
+
+# This file is used to test the installation and removal
+# of a Debian package.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+
+# Cleans everything for the 1st execution
+setup() {
+ if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
+ clean_before_test
+ fi
+}
+
+##################################
+# Install DEB package
+##################################
+@test "[DEB] dpkg command is available" {
+ skip_not_dpkg
+ run dpkg --version
+ [ "$status" -eq 0 ]
+}
+
+@test "[DEB] package is available" {
+ skip_not_dpkg
+ count=$(find . -type f -name 'elastic*.deb' | wc -l)
+ [ "$count" -eq 1 ]
+}
+
+@test "[DEB] package is not installed" {
+ skip_not_dpkg
+ run dpkg -s 'elasticsearch' >&2
+ [ "$status" -eq 1 ]
+}
+
+@test "[DEB] install package" {
+ skip_not_dpkg
+ run dpkg -i elasticsearch*.deb >&2
+ [ "$status" -eq 0 ]
+}
+
+@test "[DEB] package is installed" {
+ skip_not_dpkg
+ run dpkg -s 'elasticsearch' >&2
+ [ "$status" -eq 0 ]
+}
+
+##################################
+# Check that the package is correctly installed
+##################################
+@test "[DEB] verify package installation" {
+ skip_not_dpkg
+
+ verify_package_installation
+}
+
+##################################
+# Check that Elasticsearch is working
+##################################
+@test "[DEB] test elasticsearch" {
+ skip_not_dpkg
+
+ start_elasticsearch_service
+
+ run_elasticsearch_tests
+}
+
+##################################
+# Uninstall DEB package
+##################################
+@test "[DEB] remove package" {
+ skip_not_dpkg
+ run dpkg -r 'elasticsearch' >&2
+ [ "$status" -eq 0 ]
+}
+
+@test "[DEB] package has been removed" {
+ skip_not_dpkg
+ run dpkg -s 'elasticsearch' >&2
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -i "status" | grep -i "deinstall ok"
+}
+
+@test "[DEB] verify package removal" {
+ skip_not_dpkg
+
+ # The removal must stop the service
+ count=$(ps | grep Elasticsearch | wc -l)
+ [ "$count" -eq 0 ]
+
+ # The removal must disable the service
+ # see prerm file
+ if is_systemd; then
+ # Debian systemd distros usually returns exit code 3
+ run systemctl status elasticsearch.service
+ [ "$status" -eq 3 ]
+
+ run systemctl is-enabled elasticsearch.service
+ [ "$status" -eq 1 ]
+ fi
+
+ # Those directories are deleted when removing the package
+ # see postrm file
+ assert_file_not_exist "/var/log/elasticsearch"
+ assert_file_not_exist "/usr/share/elasticsearch/plugins"
+ assert_file_not_exist "/var/run/elasticsearch"
+
+ # The configuration files are still here
+ assert_file_exist "/etc/elasticsearch"
+ assert_file_exist "/etc/elasticsearch/elasticsearch.yml"
+ assert_file_exist "/etc/elasticsearch/logging.yml"
+
+ # The env file is still here
+ assert_file_exist "/etc/default/elasticsearch"
+
+ # The service files are still here
+ assert_file_exist "/etc/init.d/elasticsearch"
+ assert_file_exist "/usr/lib/systemd/system/elasticsearch.service"
+}
+
+@test "[DEB] purge package" {
+ skip_not_dpkg
+ run dpkg --purge 'elasticsearch' >&2
+ [ "$status" -eq 0 ]
+}
+
+@test "[DEB] verify package purge" {
+ skip_not_dpkg
+
+ # all remaining files are deleted by the purge
+ assert_file_not_exist "/etc/elasticsearch"
+ assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
+ assert_file_not_exist "/etc/elasticsearch/logging.yml"
+
+ assert_file_not_exist "/etc/default/elasticsearch"
+
+ assert_file_not_exist "/etc/init.d/elasticsearch"
+ assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service"
+
+ assert_file_not_exist "/usr/share/elasticsearch"
+
+ assert_file_not_exist "/usr/share/doc/elasticsearch"
+ assert_file_not_exist "/usr/share/doc/elasticsearch/copyright"
+}
+
+@test "[DEB] package has been completly removed" {
+ skip_not_dpkg
+ run dpkg -s 'elasticsearch' >&2
+ [ "$status" -eq 1 ]
+}
diff --git a/core/src/test/resources/packaging/scripts/40_rpm_package.bats b/core/src/test/resources/packaging/scripts/40_rpm_package.bats
new file mode 100644
index 0000000000..6a9007e4bb
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/40_rpm_package.bats
@@ -0,0 +1,142 @@
+#!/usr/bin/env bats
+
+# This file is used to test the installation of a RPM package.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+
+# Cleans everything for the 1st execution
+setup() {
+ if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
+ clean_before_test
+ fi
+}
+
+##################################
+# Install RPM package
+##################################
+@test "[RPM] rpm command is available" {
+ skip_not_rpm
+ run rpm --version
+ [ "$status" -eq 0 ]
+}
+
+@test "[RPM] package is available" {
+ skip_not_rpm
+ count=$(find . -type f -name 'elastic*.rpm' | wc -l)
+ [ "$count" -eq 1 ]
+}
+
+@test "[RPM] package is not installed" {
+ skip_not_rpm
+ run rpm -qe 'elasticsearch' >&2
+ [ "$status" -eq 1 ]
+}
+
+@test "[RPM] install package" {
+ skip_not_rpm
+ run rpm -i elasticsearch*.rpm >&2
+ [ "$status" -eq 0 ]
+}
+
+@test "[RPM] package is installed" {
+ skip_not_rpm
+ run rpm -qe 'elasticsearch' >&2
+ [ "$status" -eq 0 ]
+}
+
+##################################
+# Check that the package is correctly installed
+##################################
+@test "[RPM] verify package installation" {
+ skip_not_rpm
+
+ verify_package_installation
+}
+
+##################################
+# Check that Elasticsearch is working
+##################################
+@test "[RPM] test elasticsearch" {
+ skip_not_rpm
+
+ start_elasticsearch_service
+
+ run_elasticsearch_tests
+}
+
+##################################
+# Uninstall RPM package
+##################################
+@test "[RPM] remove package" {
+ skip_not_rpm
+ run rpm -e 'elasticsearch' >&2
+ [ "$status" -eq 0 ]
+}
+
+@test "[RPM] package has been removed" {
+ skip_not_rpm
+ run rpm -qe 'elasticsearch' >&2
+ [ "$status" -eq 1 ]
+}
+
+@test "[RPM] verify package removal" {
+ skip_not_rpm
+
+ # The removal must stop the service
+ count=$(ps | grep Elasticsearch | wc -l)
+ [ "$count" -eq 0 ]
+
+ # The removal must disable the service
+ # see prerm file
+ if is_systemd; then
+ # Redhat based systemd distros usually returns exit code 1
+ # OpenSUSE13 returns 0
+ run systemctl status elasticsearch.service
+ [ "$status" -eq 1 ] || [ "$status" -eq 0 ]
+
+ run systemctl is-enabled elasticsearch.service
+ [ "$status" -eq 1 ]
+ fi
+
+ # Those directories are deleted when removing the package
+ # see postrm file
+ assert_file_not_exist "/var/log/elasticsearch"
+ assert_file_not_exist "/usr/share/elasticsearch/plugins"
+ assert_file_not_exist "/var/run/elasticsearch"
+
+ assert_file_not_exist "/etc/elasticsearch"
+ assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml"
+ assert_file_not_exist "/etc/elasticsearch/logging.yml"
+
+ assert_file_not_exist "/etc/init.d/elasticsearch"
+ assert_file_not_exist "/usr/lib/systemd/system/elasticsearch.service"
+
+ assert_file_not_exist "/etc/sysconfig/elasticsearch"
+}
diff --git a/core/src/test/resources/packaging/scripts/50_plugins.bats b/core/src/test/resources/packaging/scripts/50_plugins.bats
new file mode 100644
index 0000000000..bb78a4c37f
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/50_plugins.bats
@@ -0,0 +1,380 @@
+#!/usr/bin/env bats
+
+# This file is used to test the installation and removal
+# of plugins when Elasticsearch is installed as a DEB/RPM
+# package.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+
+setup() {
+ # Cleans everything for every test execution
+ clean_before_test
+
+ # Download Marvel and Shield
+ MARVEL_ZIP="$PWD/marvel.zip"
+ SHIELD_ZIP="$PWD/shield.zip"
+
+ if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
+ if [ ! -e "$MARVEL_ZIP" ]; then
+ wget --quiet -O "$MARVEL_ZIP" "http://download.elasticsearch.org/elasticsearch/marvel/marvel-latest.zip"
+ fi
+ if [ ! -e "$SHIELD_ZIP" ]; then
+ wget --quiet -O "$SHIELD_ZIP" "http://download.elasticsearch.org/elasticsearch/shield/shield-latest.zip"
+ fi
+ fi
+}
+
+# Install a deb or rpm package
+install_package() {
+ if is_rpm; then
+ run rpm -i elasticsearch*.rpm >&2
+ [ "$status" -eq 0 ]
+
+ elif is_dpkg; then
+ run dpkg -i elasticsearch*.deb >&2
+ [ "$status" -eq 0 ]
+ fi
+}
+
+##################################
+# Install plugins with DEB/RPM package
+##################################
+@test "[PLUGINS] install marvel plugin" {
+
+ # Install the package
+ install_package
+
+ # Checks that the package is correctly installed
+ verify_package_installation
+
+ # Checks that plugin archive is available
+ [ -e "$MARVEL_ZIP" ]
+
+ # Install Marvel
+ run /usr/share/elasticsearch/bin/plugin -i elasticsearch/marvel/latest -u "file://$MARVEL_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Marvel is correctly installed
+ assert_file_exist "/usr/share/elasticsearch/plugins/marvel"
+
+ start_elasticsearch_service
+
+ run curl -XGET 'http://localhost:9200/_cat/plugins?v=false&h=component'
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -w "marvel"
+
+ stop_elasticsearch_service
+
+ # Remove the plugin
+ run /usr/share/elasticsearch/bin/plugin -r elasticsearch/marvel/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/usr/share/elasticsearch/plugins/marvel"
+}
+
+@test "[PLUGINS] install marvel plugin with a custom path.plugins" {
+
+ # Install the package
+ install_package
+
+ # Checks that the package is correctly installed
+ verify_package_installation
+
+ # Creates a temporary directory
+ TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Modify the path.plugins setting in configuration file
+ echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/etc/elasticsearch/elasticsearch.yml"
+
+ # Sets privileges
+ run chown -R root:elasticsearch "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+
+ run chmod -R 750 "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$MARVEL_ZIP" ]
+
+ # Install Marvel
+ run /usr/share/elasticsearch/bin/plugin -i elasticsearch/marvel/latest -u "file://$MARVEL_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Marvel is correctly installed
+ assert_file_exist "$TEMP_PLUGINS_DIR/marvel"
+
+ start_elasticsearch_service
+
+ run curl -XGET 'http://localhost:9200/_cat/plugins?v=false&h=component'
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -w "marvel"
+
+ stop_elasticsearch_service
+
+ # Remove the plugin
+ run /usr/share/elasticsearch/bin/plugin -r elasticsearch/marvel/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "$TEMP_PLUGINS_DIR/marvel"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+}
+
+@test "[PLUGINS] install shield plugin" {
+
+ # Install the package
+ install_package
+
+ # Checks that the package is correctly installed
+ verify_package_installation
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /usr/share/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "/etc/elasticsearch/shield"
+ assert_file_exist "/etc/elasticsearch/shield/role_mapping.yml"
+ assert_file_exist "/etc/elasticsearch/shield/roles.yml"
+ assert_file_exist "/etc/elasticsearch/shield/users"
+ assert_file_exist "/etc/elasticsearch/shield/users_roles"
+ assert_file_exist "/usr/share/elasticsearch/plugins/shield"
+
+ # Remove the plugin
+ run /usr/share/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "/etc/elasticsearch/shield"
+ assert_file_exist "/etc/elasticsearch/shield/role_mapping.yml"
+ assert_file_exist "/etc/elasticsearch/shield/roles.yml"
+ assert_file_exist "/etc/elasticsearch/shield/users"
+ assert_file_exist "/etc/elasticsearch/shield/users_roles"
+ assert_file_not_exist "/usr/share/elasticsearch/plugins/shield"
+}
+
+@test "[PLUGINS] install shield plugin with a custom path.plugins" {
+
+ # Install the package
+ install_package
+
+ # Checks that the package is correctly installed
+ verify_package_installation
+
+ # Creates a temporary directory
+ TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Modify the path.plugins setting in configuration file
+ echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/etc/elasticsearch/elasticsearch.yml"
+
+ # Sets privileges
+ run chown -R root:elasticsearch "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+
+ run chmod -R 750 "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /usr/share/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "/etc/elasticsearch/shield"
+ assert_file_exist "/etc/elasticsearch/shield/role_mapping.yml"
+ assert_file_exist "/etc/elasticsearch/shield/roles.yml"
+ assert_file_exist "/etc/elasticsearch/shield/users"
+ assert_file_exist "/etc/elasticsearch/shield/users_roles"
+ assert_file_exist "$TEMP_PLUGINS_DIR/shield"
+
+ # Remove the plugin
+ run /usr/share/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "/etc/elasticsearch/shield"
+ assert_file_exist "/etc/elasticsearch/shield/role_mapping.yml"
+ assert_file_exist "/etc/elasticsearch/shield/roles.yml"
+ assert_file_exist "/etc/elasticsearch/shield/users"
+ assert_file_exist "/etc/elasticsearch/shield/users_roles"
+ assert_file_not_exist "$TEMP_PLUGINS_DIR/marvel"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_PLUGINS_DIR"
+ [ "$status" -eq 0 ]
+}
+
+@test "[PLUGINS] install shield plugin with a custom CONFIG_DIR" {
+
+ # Install the package
+ install_package
+
+ # Checks that the package is correctly installed
+ verify_package_installation
+
+ # Creates a temporary directory
+ TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Modify the CONF_DIR variable in environment file
+ if is_rpm; then
+ echo "CONF_DIR=$TEMP_CONFIG_DIR" >> "/etc/sysconfig/elasticsearch"
+ elif is_dpkg; then
+ echo "CONF_DIR=$TEMP_CONFIG_DIR" >> "/etc/default/elasticsearch"
+ fi
+
+ # Move configuration files to the new configuration directory
+ run mv /etc/elasticsearch/* $TEMP_CONFIG_DIR
+ [ "$status" -eq 0 ]
+
+ assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml"
+
+ # Sets privileges
+ run chown -R root:elasticsearch "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ run chmod -R 750 "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /usr/share/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "/$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_exist "/usr/share/elasticsearch/plugins/shield"
+
+ # Remove the plugin
+ run /usr/share/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_not_exist "/tmp/elasticsearch/plugins/shield"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+}
+
+@test "[PLUGINS] install shield plugin with a custom ES_JAVA_OPTS" {
+
+ # Install the package
+ install_package
+
+ # Checks that the package is correctly installed
+ verify_package_installation
+
+ # Creates a temporary directory
+ TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'`
+
+ # Move configuration files to the new configuration directory
+ run mv /etc/elasticsearch/* $TEMP_CONFIG_DIR
+ [ "$status" -eq 0 ]
+
+ assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml"
+
+ # Sets privileges
+ run chown -R root:elasticsearch "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ run chmod -R 750 "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ # Export ES_JAVA_OPTS
+ export ES_JAVA_OPTS="-Des.path.conf=$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+
+ # Checks that plugin archive is available
+ [ -e "$SHIELD_ZIP" ]
+
+ # Install Shield
+ run /usr/share/elasticsearch/bin/plugin -i elasticsearch/shield/latest -u "file://$SHIELD_ZIP"
+ [ "$status" -eq 0 ]
+
+ # Checks that Shield is correctly installed
+ assert_file_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/esusers"
+ assert_file_exist "/usr/share/elasticsearch/bin/shield/syskeygen"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_exist "/usr/share/elasticsearch/plugins/shield"
+
+ # Remove the plugin
+ run /usr/share/elasticsearch/bin/plugin -r elasticsearch/shield/latest
+ [ "$status" -eq 0 ]
+
+ # Checks that the plugin is correctly removed
+ assert_file_not_exist "/usr/share/elasticsearch/bin/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/role_mapping.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/roles.yml"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users"
+ assert_file_exist "$TEMP_CONFIG_DIR/shield/users_roles"
+ assert_file_not_exist "/usr/share/elasticsearch/plugins/shield"
+
+ # Delete the custom plugins directory
+ run rm -rf "$TEMP_CONFIG_DIR"
+ [ "$status" -eq 0 ]
+}
diff --git a/core/src/test/resources/packaging/scripts/50_systemd.bats b/core/src/test/resources/packaging/scripts/50_systemd.bats
new file mode 100644
index 0000000000..addd4ff1ac
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/50_systemd.bats
@@ -0,0 +1,146 @@
+#!/usr/bin/env bats
+
+# This file is used to test the elasticsearch Systemd setup.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# The test case can be executed with the Bash Automated
+# Testing System tool available at https://github.com/sstephenson/bats
+# Thanks to Sam Stephenson!
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Load test utilities
+load packaging_test_utils
+
+# Cleans everything for the 1st execution
+setup() {
+ if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
+ clean_before_test
+ fi
+
+
+ # Installs a package before test
+ if is_dpkg; then
+ dpkg -i elasticsearch*.deb >&2 || true
+ fi
+ if is_rpm; then
+ rpm -i elasticsearch*.rpm >&2 || true
+ fi
+}
+
+@test "[SYSTEMD] daemon reload" {
+ skip_not_systemd
+
+ run systemctl daemon-reload
+ [ "$status" -eq 0 ]
+}
+
+@test "[SYSTEMD] enable" {
+ skip_not_systemd
+
+ run systemctl enable elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl is-enabled elasticsearch.service
+ [ "$status" -eq 0 ]
+}
+
+@test "[SYSTEMD] start" {
+ skip_not_systemd
+
+ run systemctl start elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ wait_for_elasticsearch_status
+
+ assert_file_exist "/var/run/elasticsearch/elasticsearch.pid"
+}
+
+@test "[SYSTEMD] start (running)" {
+ skip_not_systemd
+
+ run systemctl start elasticsearch.service
+ [ "$status" -eq 0 ]
+}
+
+@test "[SYSTEMD] is active (running)" {
+ skip_not_systemd
+
+ run systemctl is-active elasticsearch.service
+ [ "$status" -eq 0 ]
+ [ "$output" = "active" ]
+}
+
+@test "[SYSTEMD] status (running)" {
+ skip_not_systemd
+
+ run systemctl status elasticsearch.service
+ [ "$status" -eq 0 ]
+}
+
+##################################
+# Check that Elasticsearch is working
+##################################
+@test "[SYSTEMD] test elasticsearch" {
+ skip_not_systemd
+
+ run_elasticsearch_tests
+}
+
+@test "[SYSTEMD] restart" {
+ skip_not_systemd
+
+ run systemctl restart elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ wait_for_elasticsearch_status
+
+ run service elasticsearch status
+ [ "$status" -eq 0 ]
+}
+
+@test "[SYSTEMD] stop (running)" {
+ skip_not_systemd
+
+ run systemctl stop elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl status elasticsearch.service
+ echo "$output" | grep "Active:" | grep "inactive"
+}
+
+@test "[SYSTEMD] stop (stopped)" {
+ skip_not_systemd
+
+ run systemctl stop elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl status elasticsearch.service
+ echo "$output" | grep "Active:" | grep "inactive"
+}
+
+@test "[SYSTEMD] status (stopped)" {
+ skip_not_systemd
+
+ run systemctl status elasticsearch.service
+ echo "$output" | grep "Active:" | grep "inactive"
+}
diff --git a/core/src/test/resources/packaging/scripts/packaging_test_utils.bash b/core/src/test/resources/packaging/scripts/packaging_test_utils.bash
new file mode 100644
index 0000000000..b5fe262efd
--- /dev/null
+++ b/core/src/test/resources/packaging/scripts/packaging_test_utils.bash
@@ -0,0 +1,464 @@
+#!/bin/sh
+
+# This file contains some utilities to test the elasticsearch scripts,
+# the .deb/.rpm packages and the SysV/Systemd scripts.
+
+# WARNING: This testing file must be executed as root and can
+# dramatically change your system. It removes the 'elasticsearch'
+# user/group and also many directories. Do not execute this file
+# unless you know exactly what you are doing.
+
+# Licensed to Elasticsearch under one or more contributor
+# license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright
+# ownership. Elasticsearch licenses this file to you under
+# the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+# Checks if necessary commands are available to run the tests
+
+if [ ! -x /usr/bin/which ]; then
+ echo "'which' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which wget 2>/dev/null`" ]; then
+ echo "'wget' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which curl 2>/dev/null`" ]; then
+ echo "'curl' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which pgrep 2>/dev/null`" ]; then
+ echo "'pgrep' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which unzip 2>/dev/null`" ]; then
+ echo "'unzip' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which tar 2>/dev/null`" ]; then
+ echo "'tar' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which unzip 2>/dev/null`" ]; then
+ echo "'unzip' command is mandatory to run the tests"
+ exit 1
+fi
+
+if [ ! -x "`which java 2>/dev/null`" ]; then
+ echo "'java' command is mandatory to run the tests"
+ exit 1
+fi
+
+# Returns 0 if the 'dpkg' command is available
+is_dpkg() {
+ [ -x "`which dpkg 2>/dev/null`" ]
+}
+
+# Returns 0 if the 'rpm' command is available
+is_rpm() {
+ [ -x "`which rpm 2>/dev/null`" ]
+}
+
+# Skip test if the 'dpkg' command is not supported
+skip_not_dpkg() {
+ if [ ! -x "`which dpkg 2>/dev/null`" ]; then
+ skip "dpkg is not supported"
+ fi
+}
+
+# Skip test if the 'rpm' command is not supported
+skip_not_rpm() {
+ if [ ! -x "`which rpm 2>/dev/null`" ]; then
+ skip "rpm is not supported"
+ fi
+}
+
+# Returns 0 if the system supports Systemd
+is_systemd() {
+ [ -x /bin/systemctl ]
+}
+
+# Skip test if Systemd is not supported
+skip_not_systemd() {
+ if [ ! -x /bin/systemctl ]; then
+ skip "systemd is not supported"
+ fi
+}
+
+# Returns 0 if the system supports SysV
+is_sysvinit() {
+ [ -x "`which service 2>/dev/null`" ]
+}
+
+# Skip test if SysV is not supported
+skip_not_sysvinit() {
+ if [ -x "`which service 2>/dev/null`" ] && is_systemd; then
+ skip "sysvinit is supported, but systemd too"
+ fi
+ if [ ! -x "`which service 2>/dev/null`" ]; then
+ skip "sysvinit is not supported"
+ fi
+}
+
+# Skip if tar is not supported
+skip_not_tar_gz() {
+ if [ ! -x "`which tar 2>/dev/null`" ]; then
+ skip "tar is not supported"
+ fi
+}
+
+# Skip if unzip is not supported
+skip_not_zip() {
+ if [ ! -x "`which unzip 2>/dev/null`" ]; then
+ skip "unzip is not supported"
+ fi
+}
+
+assert_file_exist() {
+ [ -e "$1" ]
+}
+
+assert_file_not_exist() {
+ [ ! -e "$1" ]
+}
+
+assert_file() {
+ local file=$1
+ local type=$2
+ local user=$3
+ local privileges=$4
+
+ [ -n "$file" ] && [ -e "$file" ]
+
+ if [ "$type" = "d" ]; then
+ [ -d "$file" ]
+ else
+ [ -f "$file" ]
+ fi
+
+ if [ "x$user" != "x" ]; then
+ realuser=$(ls -ld "$file" | awk '{print $3}')
+ [ "$realuser" = "$user" ]
+ fi
+
+ if [ "x$privileges" != "x" ]; then
+ realprivileges=$(find "$file" -maxdepth 0 -printf "%m")
+ [ "$realprivileges" = "$privileges" ]
+ fi
+}
+
+assert_output() {
+ echo "$output" | grep -E "$1"
+}
+
+# Checks that all directories & files are correctly installed
+# after a package (deb/rpm) install
+verify_package_installation() {
+
+ run id elasticsearch
+ [ "$status" -eq 0 ]
+
+ run getent group elasticsearch
+ [ "$status" -eq 0 ]
+
+ # Home dir
+ assert_file "/usr/share/elasticsearch" d root 755
+ # Bin dir
+ assert_file "/usr/share/elasticsearch/bin" d root 755
+ assert_file "/usr/share/elasticsearch/lib" d root 755
+ # Conf dir
+ assert_file "/etc/elasticsearch" d root 755
+ assert_file "/etc/elasticsearch/elasticsearch.yml" f root 644
+ assert_file "/etc/elasticsearch/logging.yml" f root 644
+ # Data dir
+ assert_file "/var/lib/elasticsearch" d elasticsearch 755
+ # Log dir
+ assert_file "/var/log/elasticsearch" d elasticsearch 755
+ # Plugins dir
+ assert_file "/usr/share/elasticsearch/plugins" d elasticsearch 755
+ # PID dir
+ assert_file "/var/run/elasticsearch" d elasticsearch 755
+ # Readme files
+ assert_file "/usr/share/elasticsearch/NOTICE.txt" f root 644
+ assert_file "/usr/share/elasticsearch/README.textile" f root 644
+
+ if is_dpkg; then
+ # Env file
+ assert_file "/etc/default/elasticsearch" f root 644
+
+ # Doc files
+ assert_file "/usr/share/doc/elasticsearch" d root 755
+ assert_file "/usr/share/doc/elasticsearch/copyright" f root 644
+
+ fi
+
+ if is_rpm; then
+ # Env file
+ assert_file "/etc/sysconfig/elasticsearch" f root 644
+ # License file
+ assert_file "/usr/share/elasticsearch/LICENSE.txt" f root 644
+ fi
+
+ if is_systemd; then
+ assert_file "/usr/lib/systemd/system/elasticsearch.service" f root 644
+ assert_file "/usr/lib/tmpfiles.d/elasticsearch.conf" f root 644
+ assert_file "/usr/lib/sysctl.d/elasticsearch.conf" f root 644
+ fi
+}
+
+
+# Install the tar.gz archive
+install_archive() {
+ local eshome="/tmp"
+ if [ "x$1" != "x" ]; then
+ eshome="$1"
+ fi
+
+ run tar -xzvf elasticsearch*.tar.gz -C "$eshome" >&2
+ [ "$status" -eq 0 ]
+
+ run find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \;
+ [ "$status" -eq 0 ]
+
+ # ES cannot run as root so create elasticsearch user & group if needed
+ if ! getent group "elasticsearch" > /dev/null 2>&1 ; then
+ if is_dpkg; then
+ run addgroup --system "elasticsearch"
+ [ "$status" -eq 0 ]
+ else
+ run groupadd -r "elasticsearch"
+ [ "$status" -eq 0 ]
+ fi
+ fi
+ if ! id "elasticsearch" > /dev/null 2>&1 ; then
+ if is_dpkg; then
+ run adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch"
+ [ "$status" -eq 0 ]
+ else
+ run useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch"
+ [ "$status" -eq 0 ]
+ fi
+ fi
+
+ run chown -R elasticsearch:elasticsearch "$eshome/elasticsearch"
+ [ "$status" -eq 0 ]
+}
+
+
+# Checks that all directories & files are correctly installed
+# after a archive (tar.gz/zip) install
+verify_archive_installation() {
+ local eshome="/tmp/elasticsearch"
+ if [ "x$1" != "x" ]; then
+ eshome="$1"
+ fi
+
+ assert_file "$eshome" d
+ assert_file "$eshome/bin" d
+ assert_file "$eshome/bin/elasticsearch" f
+ assert_file "$eshome/bin/elasticsearch.in.sh" f
+ assert_file "$eshome/bin/plugin" f
+ assert_file "$eshome/config" d
+ assert_file "$eshome/config/elasticsearch.yml" f
+ assert_file "$eshome/config/logging.yml" f
+ assert_file "$eshome/config" d
+ assert_file "$eshome/lib" d
+ assert_file "$eshome/NOTICE.txt" f
+ assert_file "$eshome/LICENSE.txt" f
+ assert_file "$eshome/README.textile" f
+}
+
+# Deletes everything before running a test file
+clean_before_test() {
+
+ # List of files to be deleted
+ ELASTICSEARCH_TEST_FILES=("/usr/share/elasticsearch" \
+ "/etc/elasticsearch" \
+ "/var/lib/elasticsearch" \
+ "/var/log/elasticsearch" \
+ "/tmp/elasticsearch" \
+ "/etc/default/elasticsearch" \
+ "/etc/sysconfig/elasticsearch" \
+ "/var/run/elasticsearch" \
+ "/usr/share/doc/elasticsearch" \
+ "/tmp/elasticsearch" \
+ "/usr/lib/systemd/system/elasticsearch.conf" \
+ "/usr/lib/tmpfiles.d/elasticsearch.conf" \
+ "/usr/lib/sysctl.d/elasticsearch.conf")
+
+ if [ "$ES_CLEAN_BEFORE_TEST" = "true" ]; then
+ # Kills all processes of user elasticsearch
+ if id elasticsearch > /dev/null 2>&1; then
+ pkill -u elasticsearch 2>/dev/null || true
+ fi
+
+ # Kills all running Elasticsearch processes
+ ps aux | grep -i "org.elasticsearch.bootstrap.Elasticsearch" | awk {'print $2'} | xargs kill -9 > /dev/null 2>&1 || true
+
+ # Removes RPM package
+ if is_rpm; then
+ rpm --quiet -e elasticsearch > /dev/null 2>&1 || true
+ fi
+
+ if [ -x "`which yum 2>/dev/null`" ]; then
+ yum remove -y elasticsearch > /dev/null 2>&1 || true
+ fi
+
+ # Removes DEB package
+ if is_dpkg; then
+ dpkg --purge elasticsearch > /dev/null 2>&1 || true
+ fi
+
+ if [ -x "`which apt-get 2>/dev/null`" ]; then
+ apt-get --quiet --yes purge elasticsearch > /dev/null 2>&1 || true
+ fi
+
+ # Removes user & group
+ userdel elasticsearch > /dev/null 2>&1 || true
+ groupdel elasticsearch > /dev/null 2>&1 || true
+
+
+ # Removes all files
+ for d in "${ELASTICSEARCH_TEST_FILES[@]}"; do
+ if [ -e "$d" ]; then
+ rm -rf "$d"
+ fi
+ done
+ fi
+
+ # Checks that all files are deleted
+ for d in "${ELASTICSEARCH_TEST_FILES[@]}"; do
+ if [ -e "$d" ]; then
+ echo "$d should not exist before running the tests" >&2
+ exit 1
+ fi
+ done
+}
+
+start_elasticsearch_service() {
+
+ if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then
+ run /bin/su -s /bin/sh -c '/tmp/elasticsearch/bin/elasticsearch -d -p /tmp/elasticsearch/elasticsearch.pid' elasticsearch
+ [ "$status" -eq 0 ]
+
+ elif is_systemd; then
+ run systemctl daemon-reload
+ [ "$status" -eq 0 ]
+
+ run systemctl enable elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl is-enabled elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl start elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ elif is_sysvinit; then
+ run service elasticsearch start
+ [ "$status" -eq 0 ]
+ fi
+
+ wait_for_elasticsearch_status
+
+ if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
+ pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
+ [ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
+
+ run ps $pid
+ [ "$status" -eq 0 ]
+
+ elif is_systemd; then
+ run systemctl is-active elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl status elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ elif is_sysvinit; then
+ run service elasticsearch status
+ [ "$status" -eq 0 ]
+ fi
+}
+
+stop_elasticsearch_service() {
+
+ if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
+ pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
+ [ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
+
+ run kill -SIGTERM $pid
+ [ "$status" -eq 0 ]
+
+ elif is_systemd; then
+ run systemctl stop elasticsearch.service
+ [ "$status" -eq 0 ]
+
+ run systemctl is-active elasticsearch.service
+ [ "$status" -eq 3 ]
+
+ echo "$output" | grep -E 'inactive|failed'
+
+ elif is_sysvinit; then
+ run service elasticsearch stop
+ [ "$status" -eq 0 ]
+
+ run service elasticsearch status
+ [ "$status" -ne 0 ]
+ fi
+}
+
+# Waits for Elasticsearch to reach a given status (defaults to "green")
+wait_for_elasticsearch_status() {
+ local status="green"
+ if [ "x$1" != "x" ]; then
+ status="$1"
+ fi
+
+ # Try to connect to elasticsearch and wait for expected status
+ wget --quiet --retry-connrefused --waitretry=1 --timeout=60 \
+ --output-document=/dev/null "http://localhost:9200/_cluster/health?wait_for_status=$status&timeout=60s" || true
+
+ # Checks the cluster health
+ curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
+ if [ $? -ne 0 ]; then
+ echo "error when checking cluster health" >&2
+ exit 1
+ fi
+}
+
+# Executes some very basic Elasticsearch tests
+run_elasticsearch_tests() {
+ run curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -w "green"
+
+ run curl -XPOST 'http://localhost:9200/library/book/1?refresh=true' -d '{"title": "Elasticsearch - The Definitive Guide"}' 2>&1
+ [ "$status" -eq 0 ]
+
+ run curl -XGET 'http://localhost:9200/_cat/count?h=count&v=false'
+ [ "$status" -eq 0 ]
+ echo "$output" | grep -w "1"
+
+ run curl -XDELETE 'http://localhost:9200/_all'
+ [ "$status" -eq 0 ]
+}